repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
jedie/PyHardLinkBackup | PyHardLinkBackup/phlb/filesystem_walk.py | PathLibFilter.iter | python | def iter(self, dir_entries):
filter = self.filter
for entry in dir_entries:
path = filter(Path2(entry.path))
if path != False:
yield path | :param dir_entries: list of os.DirEntry() instances | train | https://github.com/jedie/PyHardLinkBackup/blob/be28666834d2d9e3d8aac1b661cb2d5bd4056c29/PyHardLinkBackup/phlb/filesystem_walk.py#L74-L82 | null | class PathLibFilter:
def __init__(self, filter):
"""
:param filter: callable to filter in self.iter()
"""
assert callable(filter)
self.filter = filter
|
jedie/PyHardLinkBackup | PyHardLinkBackup/phlb/add.py | add_dir_entry | python | def add_dir_entry(backup_run, dir_entry_path, process_bar, result):
# print(dir_entry_path.pformat())
# print(dir_entry_path.stat.st_nlink)
backup_entry = Path2(dir_entry_path.path)
filesize = dir_entry_path.stat.st_size
hash_filepath = Path2("%s%s%s" % (backup_entry.path, os.extsep, phlb_config.hash_name))
if hash_filepath.is_file():
with hash_filepath.open("r") as hash_file:
hash_hexdigest = hash_file.read().strip()
if filesize > 0:
process_bar.update(filesize)
else:
with hash_filepath.open("w") as hash_file:
callback = HashCallback(process_bar)
with backup_entry.open("rb") as f:
hash = calculate_hash(f, callback)
hash_hexdigest = hash.hexdigest()
hash_file.write(hash_hexdigest)
old_backup_entry = deduplicate(backup_entry, hash_hexdigest)
if old_backup_entry is None:
result.add_new_file(filesize)
else:
result.add_stined_file(filesize)
BackupEntry.objects.create(
backup_run, dir_entry_path.path_instance, hash_hexdigest=hash_hexdigest # Path2() instance
) | :param backup_run:
:param dir_entry_path: filesystem_walk.DirEntryPath() instance
:param process_bar: | train | https://github.com/jedie/PyHardLinkBackup/blob/be28666834d2d9e3d8aac1b661cb2d5bd4056c29/PyHardLinkBackup/phlb/add.py#L86-L120 | [
"def deduplicate(backup_entry, hash_hexdigest):\n abs_dst_root = Path2(phlb_config.backup_path)\n\n try:\n backup_entry.relative_to(abs_dst_root)\n except ValueError as err:\n raise ValueError(\"Backup entry not in backup root path: %s\" % err)\n\n assert backup_entry.is_file(), \"Is not a... | """
Python HardLink Backup
~~~~~~~~~~~~~~~~~~~~~~
:copyleft: 2016 by Jens Diemer
:license: GNU GPL v3 or above, see LICENSE for more details.
"""
import datetime
import hashlib
import logging
import os
# time.clock() on windows and time.time() on linux
from click._compat import strip_ansi
try:
# https://github.com/tqdm/tqdm
from tqdm import tqdm
except ImportError as err:
raise ImportError("Please install 'tqdm': %s" % err)
# os.environ["DJANGO_SETTINGS_MODULE"] = "PyHardLinkBackup.django_project.settings"
import django
from pathlib_revised import Path2 # https://github.com/jedie/pathlib revised/
from PyHardLinkBackup.phlb import BACKUP_RUN_CONFIG_FILENAME
from PyHardLinkBackup.phlb.deduplicate import deduplicate
from PyHardLinkBackup.phlb.phlb_main import scan_dir_tree
from PyHardLinkBackup.phlb.traceback_plus import exc_plus
from PyHardLinkBackup.phlb.filesystem_walk import scandir_limited
from PyHardLinkBackup.phlb.config import phlb_config
from PyHardLinkBackup.phlb.human import human_filesize, to_percent
from PyHardLinkBackup.backup_app.models import BackupEntry, BackupRun
log = logging.getLogger("phlb.%s" % __name__)
def calculate_hash(f, callback):
# TODO: merge code!
hash = hashlib.new(phlb_config.hash_name)
f.seek(0)
while True:
data = f.read(phlb_config.chunk_size)
if not data:
break
hash.update(data)
callback(data)
return hash
class HashCallback:
def __init__(self, process_bar):
self.process_bar = process_bar
def __call__(self, data):
self.process_bar.update(len(data))
class DeduplicateResult:
def __init__(self):
self.total_stined_file_count = 0
self.total_stined_bytes = 0
self.total_new_file_count = 0
self.total_new_bytes = 0
self.total_fast_backup = 0
def add_new_file(self, size):
self.total_new_file_count += 1
self.total_new_bytes += size
def add_stined_file(self, size):
self.total_stined_file_count += 1
self.total_stined_bytes += size
def get_total_size(self):
return self.total_new_bytes + self.total_stined_bytes
def add_dir_entries(backup_run, filtered_dir_entries, result):
total_size = sum([entry.stat.st_size for entry in filtered_dir_entries])
print("total size:", human_filesize(total_size))
path_iterator = sorted(
filtered_dir_entries,
key=lambda x: x.stat.st_mtime, # sort by last modify time
reverse=True, # sort from newest to oldes
)
# FIXME: The process bar will stuck if many small/null byte files are processed
# Maybe: Change from bytes to file count and use a second bar if a big file
# hash will be calculated.
with tqdm(total=total_size, unit="B", unit_scale=True) as process_bar:
for dir_entry in path_iterator:
try:
add_dir_entry(backup_run, dir_entry, process_bar, result)
except Exception as err:
# A unexpected error occurred.
# Print and add traceback to summary
log.error("Can't backup %s: %s" % (dir_entry, err))
for line in exc_plus():
log.error(strip_ansi(line))
def add_backup_entries(backup_run, result):
backup_path = backup_run.path_part()
filtered_dir_entries = scan_dir_tree(
backup_path,
extra_skip_patterns=(
"*.%s" % phlb_config.hash_name, # skip all existing hash files
BACKUP_RUN_CONFIG_FILENAME, # skip phlb_config.ini
),
)
add_dir_entries(backup_run, filtered_dir_entries, result)
def add_backup_run(backup_run_path):
print("*** add backup run: %s" % backup_run_path.path)
backup_name = backup_run_path.parent.stem
date_part = backup_run_path.stem
try:
backup_datetime = datetime.datetime.strptime(date_part, phlb_config.sub_dir_formatter)
except ValueError as err:
print("\nERROR parsing datetime from given path: %s" % err)
print(" * Is the given path right?")
print()
return
backup_run = BackupRun.objects.create(name=backup_name, backup_datetime=backup_datetime, completed=False)
result = DeduplicateResult()
add_backup_entries(backup_run, result)
print("*** backup run %s - %s added:" % (backup_name, date_part))
total_size = result.get_total_size()
print(
" * new content saved: %i files (%s %.1f%%)"
% (
result.total_new_file_count,
human_filesize(result.total_new_bytes),
to_percent(result.total_new_bytes, total_size),
)
)
print(
" * stint space via hardlinks: %i files (%s %.1f%%)"
% (
result.total_stined_file_count,
human_filesize(result.total_stined_bytes),
to_percent(result.total_stined_bytes, total_size),
)
)
def add_backup_name(backup_name_path):
backup_runs = scandir_limited(backup_name_path.path, limit=1)
for dir_entry in backup_runs:
backup_run_path = Path2(dir_entry.path)
print(" * %s" % backup_run_path.stem)
try:
backup_run = BackupRun.objects.get_from_config_file(backup_run_path)
except (FileNotFoundError, BackupRun.DoesNotExist) as err:
print("Error: %s" % err)
# no phlb_config.ini
add_backup_run(backup_run_path)
else:
print("\tBackup exists:", backup_run)
def add_all_backups():
abs_dst_root = Path2(phlb_config.backup_path)
backup_names = scandir_limited(abs_dst_root.path, limit=1)
for dir_entry in backup_names:
backup_name_path = Path2(dir_entry.path)
print("_" * 79)
print("'%s' (path: %s)" % (backup_name_path.stem, backup_name_path.path))
add_backup_name(backup_name_path)
def add_backups():
"""
Scan all existing backup and add missing ones to database.
"""
django.setup()
add_all_backups()
|
jedie/PyHardLinkBackup | PyHardLinkBackup/backup_app/migrations/0004_BackupRun_ini_file_20160203_1415.py | forwards_func | python | def forwards_func(apps, schema_editor):
print("\n")
create_count = 0
BackupRun = apps.get_model("backup_app", "BackupRun") # historical version of BackupRun
backup_runs = BackupRun.objects.all()
for backup_run in backup_runs:
# Use the origin BackupRun model to get access to write_config()
temp = OriginBackupRun(name=backup_run.name, backup_datetime=backup_run.backup_datetime)
try:
temp.write_config()
except OSError as err:
print("ERROR creating config file: %s" % err)
else:
create_count += 1
# print("%r created." % config_path.path)
print("%i config files created.\n" % create_count) | manage migrate backup_app 0004_BackupRun_ini_file_20160203_1415 | train | https://github.com/jedie/PyHardLinkBackup/blob/be28666834d2d9e3d8aac1b661cb2d5bd4056c29/PyHardLinkBackup/backup_app/migrations/0004_BackupRun_ini_file_20160203_1415.py#L9-L27 | null | # coding: utf-8
from django.db import migrations, models
from PyHardLinkBackup.backup_app.models import BackupRun as OriginBackupRun
def reverse_func(apps, schema_editor):
"""
manage migrate backup_app 0003_auto_20160127_2002
"""
print("\n")
remove_count = 0
BackupRun = apps.get_model("backup_app", "BackupRun")
backup_runs = BackupRun.objects.all()
for backup_run in backup_runs:
# Use the origin BackupRun model to get access to get_config_path()
temp = OriginBackupRun(name=backup_run.name, backup_datetime=backup_run.backup_datetime)
config_path = temp.get_config_path()
try:
config_path.unlink()
except OSError as err:
print("ERROR removing config file: %s" % err)
else:
remove_count += 1
# print("%r removed." % config_path.path)
print("%i config files removed.\n" % remove_count)
class Migration(migrations.Migration):
dependencies = [("backup_app", "0003_auto_20160127_2002")]
operations = [migrations.RunPython(forwards_func, reverse_func)]
|
jedie/PyHardLinkBackup | PyHardLinkBackup/backup_app/migrations/0004_BackupRun_ini_file_20160203_1415.py | reverse_func | python | def reverse_func(apps, schema_editor):
print("\n")
remove_count = 0
BackupRun = apps.get_model("backup_app", "BackupRun")
backup_runs = BackupRun.objects.all()
for backup_run in backup_runs:
# Use the origin BackupRun model to get access to get_config_path()
temp = OriginBackupRun(name=backup_run.name, backup_datetime=backup_run.backup_datetime)
config_path = temp.get_config_path()
try:
config_path.unlink()
except OSError as err:
print("ERROR removing config file: %s" % err)
else:
remove_count += 1
# print("%r removed." % config_path.path)
print("%i config files removed.\n" % remove_count) | manage migrate backup_app 0003_auto_20160127_2002 | train | https://github.com/jedie/PyHardLinkBackup/blob/be28666834d2d9e3d8aac1b661cb2d5bd4056c29/PyHardLinkBackup/backup_app/migrations/0004_BackupRun_ini_file_20160203_1415.py#L30-L50 | null | # coding: utf-8
from django.db import migrations, models
from PyHardLinkBackup.backup_app.models import BackupRun as OriginBackupRun
def forwards_func(apps, schema_editor):
"""
manage migrate backup_app 0004_BackupRun_ini_file_20160203_1415
"""
print("\n")
create_count = 0
BackupRun = apps.get_model("backup_app", "BackupRun") # historical version of BackupRun
backup_runs = BackupRun.objects.all()
for backup_run in backup_runs:
# Use the origin BackupRun model to get access to write_config()
temp = OriginBackupRun(name=backup_run.name, backup_datetime=backup_run.backup_datetime)
try:
temp.write_config()
except OSError as err:
print("ERROR creating config file: %s" % err)
else:
create_count += 1
# print("%r created." % config_path.path)
print("%i config files created.\n" % create_count)
class Migration(migrations.Migration):
dependencies = [("backup_app", "0003_auto_20160127_2002")]
operations = [migrations.RunPython(forwards_func, reverse_func)]
|
uyar/pygenstub | pygenstub.py | get_fields | python | def get_fields(node, fields_tag="field_list"):
fields_nodes = [c for c in node.children if c.tagname == fields_tag]
if len(fields_nodes) == 0:
return {}
assert len(fields_nodes) == 1, "multiple nodes with tag " + fields_tag
fields_node = fields_nodes[0]
fields = [
{f.tagname: f.rawsource.strip() for f in n.children}
for n in fields_node.children
if n.tagname == "field"
]
return {f["field_name"]: f["field_body"] for f in fields} | Get the field names and their values from a node.
:sig: (Document, str) -> Dict[str, str]
:param node: Node to get the fields from.
:param fields_tag: Tag of child node that contains the fields.
:return: Names and values of fields. | train | https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L81-L99 | null | # Copyright (C) 2016-2019 H. Turgut Uyar <uyar@tekir.org>
#
# pygenstub is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pygenstub is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pygenstub. If not, see <http://www.gnu.org/licenses/>.
"""pygenstub is a utility for generating stub files from docstrings in source files.
It takes a source file as input and creates a stub file with the same base name
and the ``.pyi`` extension.
For more information, please refer to the documentation:
https://pygenstub.tekir.org/
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import ast
import inspect
import logging
import os
import re
import sys
import textwrap
from argparse import ArgumentParser
from bisect import bisect
from collections import OrderedDict
from importlib import import_module
from io import StringIO
from pkgutil import get_loader, walk_packages
from docutils.core import publish_doctree
__version__ = "1.4.0" # sig: str
PY3 = sys.version_info >= (3, 0)
if not PY3:
import __builtin__ as builtins
from pathlib2 import Path
else:
import builtins
from pathlib import Path
# sigalias: Document = docutils.nodes.document
BUILTIN_TYPES = {k for k, t in builtins.__dict__.items() if isinstance(t, type)}
BUILTIN_TYPES.add("None")
SIG_FIELD = "sig" # sig: str
SIG_COMMENT = "# sig:" # sig: str
SIG_ALIAS = "# sigalias:" # sig: str
DECORATORS = {"property", "staticmethod", "classmethod"} # sig: Set[str]
LINE_LENGTH_LIMIT = 79
INDENT = 4 * " "
EDIT_WARNING = "THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT MANUALLY."
_RE_QUALIFIED_TYPES = re.compile(r"\w+(?:\.\w+)*")
_RE_COMMENT_IN_STRING = re.compile(r"""['"]\s*%(text)s\s*.*['"]""" % {"text": SIG_COMMENT})
_logger = logging.getLogger(__name__)
def extract_signature(docstring):
"""Extract the signature from a docstring.
:sig: (str) -> Optional[str]
:param docstring: Docstring to extract the signature from.
:return: Extracted signature, or ``None`` if there's no signature.
"""
root = publish_doctree(docstring, settings_overrides={"report_level": 5})
fields = get_fields(root)
return fields.get(SIG_FIELD)
def get_signature(node):
"""Get the signature of a function or a class.
:sig: (Union[ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef]) -> Optional[str]
:param node: Node to get the signature from.
:return: Value of signature field in node docstring, or ``None`` if there's no signature.
"""
docstring = ast.get_docstring(node)
if docstring is None:
return None
return extract_signature(docstring)
def split_parameter_types(parameters):
"""Split a parameter types declaration into individual types.
The input is the left hand side of a signature (the part before the arrow),
excluding the parentheses.
:sig: (str) -> List[str]
:param parameters: Comma separated parameter types.
:return: Parameter types.
"""
if parameters == "":
return []
# only consider the top level commas, ignore the ones in []
commas = []
bracket_depth = 0
for i, char in enumerate(parameters):
if (char == ",") and (bracket_depth == 0):
commas.append(i)
elif char == "[":
bracket_depth += 1
elif char == "]":
bracket_depth -= 1
types = []
last_i = 0
for i in commas:
types.append(parameters[last_i:i].strip())
last_i = i + 1
else:
types.append(parameters[last_i:].strip())
return types
def parse_signature(signature):
"""Parse a signature into its input and return parameter types.
This will also collect the types that are required by any of the input
and return types.
:sig: (str) -> Tuple[List[str], str, Set[str]]
:param signature: Signature to parse.
:return: Input parameter types, return type, and all required types.
"""
if " -> " not in signature:
# signature comment: no parameters, treat variable type as return type
param_types, return_type = None, signature.strip()
else:
lhs, return_type = [s.strip() for s in signature.split(" -> ")]
csv = lhs[1:-1].strip() # remove the parentheses around the parameter type list
param_types = split_parameter_types(csv)
requires = set(_RE_QUALIFIED_TYPES.findall(signature))
return param_types, return_type, requires
class StubNode:
"""A node in a stub tree."""
def __init__(self):
"""Initialize this stub node.
:sig: () -> None
"""
self.variables = [] # sig: List[VariableNode]
self.variable_names = set() # sig: Set[str]
self.children = [] # sig: List[Union[FunctionNode, ClassNode]]
self.parent = None # sig: Optional[StubNode]
def add_variable(self, node):
"""Add a variable node to this node.
:sig: (VariableNode) -> None
:param node: Variable node to add.
"""
if node.name not in self.variable_names:
self.variables.append(node)
self.variable_names.add(node.name)
node.parent = self
def add_child(self, node):
"""Add a function/method or class node to this node.
:sig: (Union[FunctionNode, ClassNode]) -> None
:param node: Function or class node to add.
"""
self.children.append(node)
node.parent = self
def get_code(self):
"""Get the stub code for this node.
The stub code for a node consists of the type annotations of its variables,
followed by the prototypes of its functions/methods and classes.
:sig: () -> List[str]
:return: Lines of stub code for this node.
"""
stub = []
for child in self.variables:
stub.extend(child.get_code())
if (
(len(self.variables) > 0)
and (len(self.children) > 0)
and (not isinstance(self, ClassNode))
):
stub.append("")
for child in self.children:
stub.extend(child.get_code())
return stub
class VariableNode(StubNode):
"""A node representing an assignment in a stub tree."""
def __init__(self, name, type_):
"""Initialize this variable node.
:sig: (str, str) -> None
:param name: Name of variable that is being assigned to.
:param type_: Type of variable.
"""
if not PY3:
StubNode.__init__(self)
else:
super().__init__()
self.name = name # sig: str
self.type_ = type_ # sig: str
def get_code(self):
"""Get the type annotation for this variable.
:sig: () -> List[str]
:return: Lines of stub code for this variable.
"""
return ["%(n)s = ... # type: %(t)s" % {"n": self.name, "t": self.type_}]
class FunctionNode(StubNode):
"""A node representing a function in a stub tree."""
def __init__(self, name, parameters, rtype, decorators=None):
"""Initialize this function node.
The parameters have to given as a list of triples where each item specifies
the name of the parameter, its type, and whether it has a default value or not.
:sig: (str, Sequence[Tuple[str, str, bool]], str, Optional[Sequence[str]]) -> None
:param name: Name of function.
:param parameters: List of parameter triples (name, type, has_default).
:param rtype: Type of return value.
:param decorators: Decorators of function.
"""
if not PY3:
StubNode.__init__(self)
else:
super().__init__()
self.name = name # sig: str
self.parameters = parameters # sig: Sequence[Tuple[str, str, bool]]
self.rtype = rtype # sig: str
self.decorators = decorators if decorators is not None else [] # sig: Sequence[str]
self._async = False # sig: bool
def get_code(self):
"""Get the stub code for this function.
:sig: () -> List[str]
:return: Lines of stub code for this function.
"""
stub = []
for deco in self.decorators:
if (deco in DECORATORS) or deco.endswith(".setter"):
stub.append("@" + deco)
parameters = []
for name, type_, has_default in self.parameters:
decl = "%(n)s%(t)s%(d)s" % {
"n": name,
"t": ": " + type_ if type_ else "",
"d": " = ..." if has_default else "",
}
parameters.append(decl)
slots = {
"a": "async " if self._async else "",
"n": self.name,
"p": ", ".join(parameters),
"r": self.rtype,
}
prototype = "%(a)sdef %(n)s(%(p)s) -> %(r)s: ..." % slots
if len(prototype) <= LINE_LENGTH_LIMIT:
stub.append(prototype)
elif len(INDENT + slots["p"]) <= LINE_LENGTH_LIMIT:
stub.append("%(a)sdef %(n)s(" % slots)
stub.append(INDENT + slots["p"])
stub.append(") -> %(r)s: ..." % slots)
else:
stub.append("%(a)sdef %(n)s(" % slots)
for param in parameters:
stub.append(INDENT + param + ",")
stub.append(") -> %(r)s: ..." % slots)
return stub
class ClassNode(StubNode):
"""A node representing a class in a stub tree."""
def __init__(self, name, bases, signature=None):
"""Initialize this class node.
:sig: (str, Sequence[str], Optional[str]) -> None
:param name: Name of class.
:param bases: Base classes of class.
:param signature: Signature of class, to be used in __init__ method.
"""
if not PY3:
StubNode.__init__(self)
else:
super().__init__()
self.name = name # sig: str
self.bases = bases # sig: Sequence[str]
self.signature = signature # sig: Optional[str]
def get_code(self):
"""Get the stub code for this class.
:sig: () -> List[str]
:return: Lines of stub code for this class.
"""
stub = []
bases = ("(" + ", ".join(self.bases) + ")") if len(self.bases) > 0 else ""
slots = {"n": self.name, "b": bases}
if (len(self.children) == 0) and (len(self.variables) == 0):
stub.append("class %(n)s%(b)s: ..." % slots)
else:
stub.append("class %(n)s%(b)s:" % slots)
super_code = super().get_code() if PY3 else StubNode.get_code(self)
for line in super_code:
stub.append(INDENT + line)
return stub
def get_aliases(lines):
"""Get the type aliases in the source.
:sig: (Sequence[str]) -> Dict[str, str]
:param lines: Lines of the source code.
:return: Aliases and their their definitions.
"""
aliases = {}
for line in lines:
line = line.strip()
if len(line) > 0 and line.startswith(SIG_ALIAS):
_, content = line.split(SIG_ALIAS)
alias, signature = [t.strip() for t in content.split("=")]
aliases[alias] = signature
return aliases
class StubGenerator(ast.NodeVisitor):
"""A transformer that generates stub declarations from a source code."""
def __init__(self, source, generic=False):
"""Initialize this stub generator.
:sig: (str, bool) -> None
:param source: Source code to generate the stub for.
:param generic: Whether to produce generic stubs.
"""
self.root = StubNode() # sig: StubNode
self.generic = generic # sig: bool
self.imported_namespaces = OrderedDict() # sig: OrderedDict[str, str]
self.imported_names = OrderedDict() # sig: OrderedDict[str, str]
self.defined_types = set() # sig: Set[str]
self.required_types = set() # sig: Set[str]
self.aliases = OrderedDict() # sig: OrderedDict[str, str]
self._parents = [self.root] # sig: List[StubNode]
self._code_lines = source.splitlines() # sig: List[str]
self.collect_aliases()
ast_tree = ast.parse(source)
self.visit(ast_tree)
def collect_aliases(self):
"""Collect the type aliases in the source.
:sig: () -> None
"""
self.aliases = get_aliases(self._code_lines)
for alias, signature in self.aliases.items():
_, _, requires = parse_signature(signature)
self.required_types |= requires
self.defined_types |= {alias}
def visit_Import(self, node):
"""Visit an import node."""
line = self._code_lines[node.lineno - 1]
module_name = line.split("import")[0].strip()
for name in node.names:
imported_name = name.name
if name.asname:
imported_name = name.asname + "::" + imported_name
self.imported_namespaces[imported_name] = module_name
def visit_ImportFrom(self, node):
"""Visit an from-import node."""
line = self._code_lines[node.lineno - 1]
module_name = line.split("from")[1].split("import")[0].strip()
for name in node.names:
imported_name = name.name
if name.asname:
imported_name = name.asname + "::" + imported_name
self.imported_names[imported_name] = module_name
def visit_Assign(self, node):
"""Visit an assignment node."""
line = self._code_lines[node.lineno - 1]
if SIG_COMMENT in line:
line = _RE_COMMENT_IN_STRING.sub("", line)
if (SIG_COMMENT not in line) and (not self.generic):
return
if SIG_COMMENT in line:
_, signature = line.split(SIG_COMMENT)
_, return_type, requires = parse_signature(signature)
self.required_types |= requires
parent = self._parents[-1]
for var in node.targets:
if isinstance(var, ast.Name):
name, p = var.id, parent
elif (
isinstance(var, ast.Attribute)
and isinstance(var.value, ast.Name)
and (var.value.id == "self")
):
name, p = var.attr, parent.parent
else:
name, p = None, None
if name is not None:
if self.generic:
return_type = "Any"
self.required_types.add(return_type)
stub_node = VariableNode(name, return_type)
p.add_variable(stub_node)
def get_function_node(self, node):
"""Process a function node.
:sig: (Union[ast.FunctionDef, ast.AsyncFunctionDef]) -> FunctionNode
:param node: Node to process.
:return: Generated function node in stub tree.
"""
decorators = []
for d in node.decorator_list:
if hasattr(d, "id"):
decorators.append(d.id)
elif hasattr(d, "func"):
decorators.append(d.func.id)
elif hasattr(d, "value"):
decorators.append(d.value.id + "." + d.attr)
signature = get_signature(node)
if signature is None:
parent = self._parents[-1]
if isinstance(parent, ClassNode) and (node.name == "__init__"):
signature = parent.signature
if (signature is None) and (not self.generic):
return None
param_names = [arg.arg if PY3 else arg.id for arg in node.args.args]
n_args = len(param_names)
if signature is None:
param_types, rtype, requires = ["Any"] * n_args, "Any", {"Any"}
else:
_logger.debug("parsing signature for %s", node.name)
param_types, rtype, requires = parse_signature(signature)
# TODO: only in classes
if ((n_args > 0) and (param_names[0] == "self")) or (
(n_args > 0) and (param_names[0] == "cls") and ("classmethod" in decorators)
):
if signature is None:
param_types[0] = ""
else:
param_types.insert(0, "")
_logger.debug("parameter types: %s", param_types)
_logger.debug("return type: %s", rtype)
_logger.debug("required types: %s", requires)
self.required_types |= requires
if node.args.vararg is not None:
param_names.append("*" + (node.args.vararg.arg if PY3 else node.args.vararg))
param_types.append("")
if node.args.kwarg is not None:
param_names.append("**" + (node.args.kwarg.arg if PY3 else node.args.kwarg))
param_types.append("")
kwonly_args = getattr(node.args, "kwonlyargs", [])
if len(kwonly_args) > 0:
param_names.extend([arg.arg for arg in kwonly_args])
if signature is None:
param_types.extend(["Any"] * len(kwonly_args))
if len(param_types) != len(param_names):
raise ValueError("Parameter names and types don't match: " + node.name)
param_locs = [(a.lineno, a.col_offset) for a in (node.args.args + kwonly_args)]
param_defaults = {
bisect(param_locs, (d.lineno, d.col_offset)) - 1 for d in node.args.defaults
}
kwonly_defaults = getattr(node.args, "kw_defaults", [])
for i, d in enumerate(kwonly_defaults):
if d is not None:
param_defaults.add(n_args + i)
params = [
(name, type_, i in param_defaults)
for i, (name, type_) in enumerate(zip(param_names, param_types))
]
if len(kwonly_args) > 0:
params.insert(n_args, ("*", "", False))
stub_node = FunctionNode(
node.name, parameters=params, rtype=rtype, decorators=decorators
)
self._parents[-1].add_child(stub_node)
self._parents.append(stub_node)
self.generic_visit(node)
del self._parents[-1]
return stub_node
def visit_FunctionDef(self, node):
"""Visit a function node."""
node = self.get_function_node(node)
if node is not None:
node._async = False
def visit_AsyncFunctionDef(self, node):
"""Visit an async function node."""
node = self.get_function_node(node)
if node is not None:
node._async = True
def visit_ClassDef(self, node):
"""Visit a class node."""
self.defined_types.add(node.name)
bases = []
for n in node.bases:
base_parts = []
while True:
if not isinstance(n, ast.Attribute):
base_parts.append(n.id)
break
else:
base_parts.append(n.attr)
n = n.value
bases.append(".".join(base_parts[::-1]))
self.required_types |= set(bases)
signature = get_signature(node)
stub_node = ClassNode(node.name, bases=bases, signature=signature)
self._parents[-1].add_child(stub_node)
self._parents.append(stub_node)
self.generic_visit(node)
del self._parents[-1]
@staticmethod
def generate_import_from(module_, names):
"""Generate an import line.
:sig: (str, Set[str]) -> str
:param module_: Name of module to import the names from.
:param names: Names to import.
:return: Import line in stub code.
"""
regular_names = [n for n in names if "::" not in n]
as_names = [n for n in names if "::" in n]
line = ""
if len(regular_names) > 0:
slots = {"m": module_, "n": ", ".join(sorted(regular_names))}
line = "from %(m)s import %(n)s" % slots
if len(line) > LINE_LENGTH_LIMIT:
slots["n"] = INDENT + (",\n" + INDENT).join(sorted(regular_names)) + ","
line = "from %(m)s import (\n%(n)s\n)" % slots
if len(as_names) > 0:
line += "\n"
for as_name in as_names:
a, n = as_name.split("::")
line += "from %(m)s import %(n)s as %(a)s" % {"m": module_, "n": n, "a": a}
return line
def generate_stub(self):
"""Generate the stub code for this source.
:sig: () -> str
:return: Generated stub code.
"""
needed_types = self.required_types - BUILTIN_TYPES
needed_types -= self.defined_types
_logger.debug("defined types: %s", self.defined_types)
module_vars = {v.name for v in self.root.variables}
_logger.debug("module variables: %s", module_vars)
qualified_types = {n for n in needed_types if "." in n}
qualified_namespaces = {".".join(n.split(".")[:-1]) for n in qualified_types}
needed_namespaces = qualified_namespaces - module_vars
needed_types -= qualified_types
_logger.debug("needed namespaces: %s", needed_namespaces)
imported_names = {n.split("::")[0] for n in self.imported_names}
imported_types = imported_names & (needed_types | needed_namespaces)
needed_types -= imported_types
needed_namespaces -= imported_names
_logger.debug("used imported types: %s", imported_types)
try:
typing_mod = __import__("typing")
typing_types = {n for n in needed_types if hasattr(typing_mod, n)}
needed_types -= typing_types
_logger.debug("types from typing module: %s", typing_types)
except ImportError:
typing_types = set()
_logger.warn("typing module not installed")
if len(needed_types) > 0:
raise ValueError("Unknown types: " + ", ".join(needed_types))
out = StringIO()
started = False
if len(typing_types) > 0:
line = self.generate_import_from("typing", typing_types)
out.write(line + "\n")
started = True
if len(imported_types) > 0:
if started:
out.write("\n")
# preserve the import order in the source file
for name in self.imported_names:
if name.split("::")[0] in imported_types:
line = self.generate_import_from(self.imported_names[name], {name})
out.write(line + "\n")
started = True
if len(needed_namespaces) > 0:
if started:
out.write("\n")
as_names = {n.split("::")[0]: n for n in self.imported_namespaces if "::" in n}
for module_ in sorted(needed_namespaces):
if module_ in as_names:
a, n = as_names[module_].split("::")
out.write("import " + n + " as " + a + "\n")
else:
out.write("import " + module_ + "\n")
started = True
if len(self.aliases) > 0:
if started:
out.write("\n")
for alias, signature in self.aliases.items():
out.write("%s = %s\n" % (alias, signature))
started = True
if started:
out.write("\n")
stub_lines = self.root.get_code()
n_lines = len(stub_lines)
for line_no in range(n_lines):
prev_line = stub_lines[line_no - 1] if line_no > 0 else None
line = stub_lines[line_no]
next_line = stub_lines[line_no + 1] if line_no < (n_lines - 1) else None
if (
line.startswith("class ")
and (prev_line is not None)
and (
(not prev_line.startswith("class "))
or (next_line and next_line.startswith(" "))
)
):
out.write("\n")
if (
line.startswith("def ")
and (prev_line is not None)
and (prev_line.startswith((" ", "class ")))
):
out.write("\n")
out.write(line + "\n")
line_no += 1
return out.getvalue()
def get_stub(source, generic=False):
"""Get the stub code for a source code.
:sig: (str, bool) -> str
:param source: Source code to generate the stub for.
:param generic: Whether to produce generic stubs.
:return: Generated stub code.
"""
generator = StubGenerator(source, generic=generic)
stub = generator.generate_stub()
return stub
def get_mod_paths(mod_name, out_dir):
"""Get source and stub paths for a module."""
paths = []
try:
mod = get_loader(mod_name)
source = Path(mod.path)
if source.name.endswith(".py"):
source_rel = Path(*mod_name.split("."))
if source.name == "__init__.py":
source_rel = source_rel.joinpath("__init__.py")
destination = Path(out_dir, source_rel.with_suffix(".pyi"))
paths.append((source, destination))
except Exception as e:
_logger.debug(e)
_logger.warning("cannot handle module, skipping: %s", mod_name)
return paths
def get_pkg_paths(pkg_name, out_dir):
"""Recursively get all source and stub paths for a package."""
paths = []
try:
pkg = import_module(pkg_name)
if not hasattr(pkg, "__path__"):
return get_mod_paths(pkg_name, out_dir)
for mod_info in walk_packages(pkg.__path__, pkg.__name__ + "."):
mod_paths = get_mod_paths(mod_info.name, out_dir)
paths.extend(mod_paths)
except Exception as e:
_logger.debug(e)
_logger.warning("cannot handle package, skipping: %s", pkg_name)
return paths
############################################################
# SPHINX
############################################################
def process_docstring(app, what, name, obj, options, lines):
"""Modify the docstring before generating documentation.
This will insert type declarations for parameters and return type
into the docstring, and remove the signature field so that it will
be excluded from the generated document.
"""
aliases = getattr(app, "_sigaliases", None)
if aliases is None:
if what == "module":
aliases = get_aliases(inspect.getsource(obj).splitlines())
app._sigaliases = aliases
sig_marker = ":" + SIG_FIELD + ":"
is_class = what in ("class", "exception")
signature = extract_signature("\n".join(lines))
if signature is None:
if not is_class:
return
init_method = getattr(obj, "__init__")
init_doc = init_method.__doc__
init_lines = init_doc.splitlines()[1:]
if len(init_lines) > 1:
init_doc = textwrap.dedent("\n".join(init_lines[1:]))
init_lines = init_doc.splitlines()
if sig_marker not in init_doc:
return
sig_started = False
for line in init_lines:
if line.lstrip().startswith(sig_marker):
sig_started = True
if sig_started:
lines.append(line)
signature = extract_signature("\n".join(lines))
if is_class:
obj = init_method
param_types, rtype, _ = parse_signature(signature)
param_names = [p for p in inspect.signature(obj).parameters]
if is_class and (param_names[0] == "self"):
del param_names[0]
# if something goes wrong, don't insert parameter types
if len(param_names) == len(param_types):
for name, type_ in zip(param_names, param_types):
find = ":param %(name)s:" % {"name": name}
alias = aliases.get(type_)
if alias is not None:
type_ = "*%(type)s* :sup:`%(alias)s`" % {"type": type_, "alias": alias}
for i, line in enumerate(lines):
if line.startswith(find):
lines.insert(i, ":type %(name)s: %(type)s" % {"name": name, "type": type_})
break
if not is_class:
for i, line in enumerate(lines):
if line.startswith((":return:", ":returns:")):
lines.insert(i, ":rtype: " + rtype)
break
# remove the signature field
sig_start = 0
while sig_start < len(lines):
if lines[sig_start].startswith(sig_marker):
break
sig_start += 1
sig_end = sig_start + 1
while sig_end < len(lines):
if (not lines[sig_end]) or (lines[sig_end][0] != " "):
break
sig_end += 1
for i in reversed(range(sig_start, sig_end)):
del lines[i]
def setup(app):
"""Register to Sphinx."""
app.connect("autodoc-process-docstring", process_docstring)
return {"version": __version__}
############################################################
# MAIN
############################################################
def main(argv=None):
"""Start the command line interface."""
parser = ArgumentParser(prog="pygenstub")
parser.add_argument("--version", action="version", version="%(prog)s " + __version__)
parser.add_argument("files", nargs="*", help="generate stubs for given files")
parser.add_argument(
"-m",
"--module",
action="append",
metavar="MODULE",
dest="modules",
default=[],
help="generate stubs for given modules",
)
parser.add_argument(
"-o", "--output", metavar="PATH", dest="out_dir", help="change the output directory"
)
parser.add_argument(
"--generic", action="store_true", default=False, help="generate generic stubs"
)
parser.add_argument("--debug", action="store_true", help="enable debug messages")
argv = argv if argv is not None else sys.argv
arguments = parser.parse_args(argv[1:])
# set debug mode
if arguments.debug:
logging.basicConfig(level=logging.DEBUG)
_logger.debug("running in debug mode")
out_dir = arguments.out_dir if arguments.out_dir is not None else ""
if (out_dir == "") and (len(arguments.modules) > 0):
print("Output directory must be given when generating stubs for modules.")
sys.exit(1)
modules = []
for path in arguments.files:
paths = Path(path).glob("**/*.py") if Path(path).is_dir() else [Path(path)]
for source in paths:
if str(source).startswith(os.path.pardir):
source = source.absolute().resolve()
if (out_dir != "") and source.is_absolute():
source = source.relative_to(source.root)
destination = Path(out_dir, source.with_suffix(".pyi"))
modules.append((source, destination))
for mod_name in arguments.modules:
modules.extend(get_pkg_paths(mod_name, out_dir))
for source, destination in modules:
_logger.info("generating stub for %s to path %s", source, destination)
with source.open() as f:
code = f.read()
try:
stub = get_stub(code, generic=arguments.generic)
except Exception as e:
print(source, "-", e, file=sys.stderr)
continue
if stub != "":
if not destination.parent.exists():
destination.parent.mkdir(parents=True)
with destination.open("w") as f:
f.write("# " + EDIT_WARNING + "\n\n" + stub)
if __name__ == "__main__":
main()
|
uyar/pygenstub | pygenstub.py | extract_signature | python | def extract_signature(docstring):
root = publish_doctree(docstring, settings_overrides={"report_level": 5})
fields = get_fields(root)
return fields.get(SIG_FIELD) | Extract the signature from a docstring.
:sig: (str) -> Optional[str]
:param docstring: Docstring to extract the signature from.
:return: Extracted signature, or ``None`` if there's no signature. | train | https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L102-L111 | [
"def get_fields(node, fields_tag=\"field_list\"):\n \"\"\"Get the field names and their values from a node.\n\n :sig: (Document, str) -> Dict[str, str]\n :param node: Node to get the fields from.\n :param fields_tag: Tag of child node that contains the fields.\n :return: Names and values of fields.\n... | # Copyright (C) 2016-2019 H. Turgut Uyar <uyar@tekir.org>
#
# pygenstub is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pygenstub is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pygenstub. If not, see <http://www.gnu.org/licenses/>.
"""pygenstub is a utility for generating stub files from docstrings in source files.
It takes a source file as input and creates a stub file with the same base name
and the ``.pyi`` extension.
For more information, please refer to the documentation:
https://pygenstub.tekir.org/
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import ast
import inspect
import logging
import os
import re
import sys
import textwrap
from argparse import ArgumentParser
from bisect import bisect
from collections import OrderedDict
from importlib import import_module
from io import StringIO
from pkgutil import get_loader, walk_packages
from docutils.core import publish_doctree
__version__ = "1.4.0" # sig: str
PY3 = sys.version_info >= (3, 0)
if not PY3:
import __builtin__ as builtins
from pathlib2 import Path
else:
import builtins
from pathlib import Path
# sigalias: Document = docutils.nodes.document
BUILTIN_TYPES = {k for k, t in builtins.__dict__.items() if isinstance(t, type)}
BUILTIN_TYPES.add("None")
SIG_FIELD = "sig" # sig: str
SIG_COMMENT = "# sig:" # sig: str
SIG_ALIAS = "# sigalias:" # sig: str
DECORATORS = {"property", "staticmethod", "classmethod"} # sig: Set[str]
LINE_LENGTH_LIMIT = 79
INDENT = 4 * " "
EDIT_WARNING = "THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT MANUALLY."
_RE_QUALIFIED_TYPES = re.compile(r"\w+(?:\.\w+)*")
_RE_COMMENT_IN_STRING = re.compile(r"""['"]\s*%(text)s\s*.*['"]""" % {"text": SIG_COMMENT})
_logger = logging.getLogger(__name__)
def get_fields(node, fields_tag="field_list"):
"""Get the field names and their values from a node.
:sig: (Document, str) -> Dict[str, str]
:param node: Node to get the fields from.
:param fields_tag: Tag of child node that contains the fields.
:return: Names and values of fields.
"""
fields_nodes = [c for c in node.children if c.tagname == fields_tag]
if len(fields_nodes) == 0:
return {}
assert len(fields_nodes) == 1, "multiple nodes with tag " + fields_tag
fields_node = fields_nodes[0]
fields = [
{f.tagname: f.rawsource.strip() for f in n.children}
for n in fields_node.children
if n.tagname == "field"
]
return {f["field_name"]: f["field_body"] for f in fields}
def get_signature(node):
"""Get the signature of a function or a class.
:sig: (Union[ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef]) -> Optional[str]
:param node: Node to get the signature from.
:return: Value of signature field in node docstring, or ``None`` if there's no signature.
"""
docstring = ast.get_docstring(node)
if docstring is None:
return None
return extract_signature(docstring)
def split_parameter_types(parameters):
"""Split a parameter types declaration into individual types.
The input is the left hand side of a signature (the part before the arrow),
excluding the parentheses.
:sig: (str) -> List[str]
:param parameters: Comma separated parameter types.
:return: Parameter types.
"""
if parameters == "":
return []
# only consider the top level commas, ignore the ones in []
commas = []
bracket_depth = 0
for i, char in enumerate(parameters):
if (char == ",") and (bracket_depth == 0):
commas.append(i)
elif char == "[":
bracket_depth += 1
elif char == "]":
bracket_depth -= 1
types = []
last_i = 0
for i in commas:
types.append(parameters[last_i:i].strip())
last_i = i + 1
else:
types.append(parameters[last_i:].strip())
return types
def parse_signature(signature):
"""Parse a signature into its input and return parameter types.
This will also collect the types that are required by any of the input
and return types.
:sig: (str) -> Tuple[List[str], str, Set[str]]
:param signature: Signature to parse.
:return: Input parameter types, return type, and all required types.
"""
if " -> " not in signature:
# signature comment: no parameters, treat variable type as return type
param_types, return_type = None, signature.strip()
else:
lhs, return_type = [s.strip() for s in signature.split(" -> ")]
csv = lhs[1:-1].strip() # remove the parentheses around the parameter type list
param_types = split_parameter_types(csv)
requires = set(_RE_QUALIFIED_TYPES.findall(signature))
return param_types, return_type, requires
class StubNode:
"""A node in a stub tree."""
def __init__(self):
"""Initialize this stub node.
:sig: () -> None
"""
self.variables = [] # sig: List[VariableNode]
self.variable_names = set() # sig: Set[str]
self.children = [] # sig: List[Union[FunctionNode, ClassNode]]
self.parent = None # sig: Optional[StubNode]
def add_variable(self, node):
"""Add a variable node to this node.
:sig: (VariableNode) -> None
:param node: Variable node to add.
"""
if node.name not in self.variable_names:
self.variables.append(node)
self.variable_names.add(node.name)
node.parent = self
def add_child(self, node):
"""Add a function/method or class node to this node.
:sig: (Union[FunctionNode, ClassNode]) -> None
:param node: Function or class node to add.
"""
self.children.append(node)
node.parent = self
def get_code(self):
"""Get the stub code for this node.
The stub code for a node consists of the type annotations of its variables,
followed by the prototypes of its functions/methods and classes.
:sig: () -> List[str]
:return: Lines of stub code for this node.
"""
stub = []
for child in self.variables:
stub.extend(child.get_code())
if (
(len(self.variables) > 0)
and (len(self.children) > 0)
and (not isinstance(self, ClassNode))
):
stub.append("")
for child in self.children:
stub.extend(child.get_code())
return stub
class VariableNode(StubNode):
"""A node representing an assignment in a stub tree."""
def __init__(self, name, type_):
"""Initialize this variable node.
:sig: (str, str) -> None
:param name: Name of variable that is being assigned to.
:param type_: Type of variable.
"""
if not PY3:
StubNode.__init__(self)
else:
super().__init__()
self.name = name # sig: str
self.type_ = type_ # sig: str
def get_code(self):
"""Get the type annotation for this variable.
:sig: () -> List[str]
:return: Lines of stub code for this variable.
"""
return ["%(n)s = ... # type: %(t)s" % {"n": self.name, "t": self.type_}]
class FunctionNode(StubNode):
"""A node representing a function in a stub tree."""
def __init__(self, name, parameters, rtype, decorators=None):
"""Initialize this function node.
The parameters have to given as a list of triples where each item specifies
the name of the parameter, its type, and whether it has a default value or not.
:sig: (str, Sequence[Tuple[str, str, bool]], str, Optional[Sequence[str]]) -> None
:param name: Name of function.
:param parameters: List of parameter triples (name, type, has_default).
:param rtype: Type of return value.
:param decorators: Decorators of function.
"""
if not PY3:
StubNode.__init__(self)
else:
super().__init__()
self.name = name # sig: str
self.parameters = parameters # sig: Sequence[Tuple[str, str, bool]]
self.rtype = rtype # sig: str
self.decorators = decorators if decorators is not None else [] # sig: Sequence[str]
self._async = False # sig: bool
def get_code(self):
"""Get the stub code for this function.
:sig: () -> List[str]
:return: Lines of stub code for this function.
"""
stub = []
for deco in self.decorators:
if (deco in DECORATORS) or deco.endswith(".setter"):
stub.append("@" + deco)
parameters = []
for name, type_, has_default in self.parameters:
decl = "%(n)s%(t)s%(d)s" % {
"n": name,
"t": ": " + type_ if type_ else "",
"d": " = ..." if has_default else "",
}
parameters.append(decl)
slots = {
"a": "async " if self._async else "",
"n": self.name,
"p": ", ".join(parameters),
"r": self.rtype,
}
prototype = "%(a)sdef %(n)s(%(p)s) -> %(r)s: ..." % slots
if len(prototype) <= LINE_LENGTH_LIMIT:
stub.append(prototype)
elif len(INDENT + slots["p"]) <= LINE_LENGTH_LIMIT:
stub.append("%(a)sdef %(n)s(" % slots)
stub.append(INDENT + slots["p"])
stub.append(") -> %(r)s: ..." % slots)
else:
stub.append("%(a)sdef %(n)s(" % slots)
for param in parameters:
stub.append(INDENT + param + ",")
stub.append(") -> %(r)s: ..." % slots)
return stub
class ClassNode(StubNode):
"""A node representing a class in a stub tree."""
def __init__(self, name, bases, signature=None):
"""Initialize this class node.
:sig: (str, Sequence[str], Optional[str]) -> None
:param name: Name of class.
:param bases: Base classes of class.
:param signature: Signature of class, to be used in __init__ method.
"""
if not PY3:
StubNode.__init__(self)
else:
super().__init__()
self.name = name # sig: str
self.bases = bases # sig: Sequence[str]
self.signature = signature # sig: Optional[str]
def get_code(self):
"""Get the stub code for this class.
:sig: () -> List[str]
:return: Lines of stub code for this class.
"""
stub = []
bases = ("(" + ", ".join(self.bases) + ")") if len(self.bases) > 0 else ""
slots = {"n": self.name, "b": bases}
if (len(self.children) == 0) and (len(self.variables) == 0):
stub.append("class %(n)s%(b)s: ..." % slots)
else:
stub.append("class %(n)s%(b)s:" % slots)
super_code = super().get_code() if PY3 else StubNode.get_code(self)
for line in super_code:
stub.append(INDENT + line)
return stub
def get_aliases(lines):
"""Get the type aliases in the source.
:sig: (Sequence[str]) -> Dict[str, str]
:param lines: Lines of the source code.
:return: Aliases and their their definitions.
"""
aliases = {}
for line in lines:
line = line.strip()
if len(line) > 0 and line.startswith(SIG_ALIAS):
_, content = line.split(SIG_ALIAS)
alias, signature = [t.strip() for t in content.split("=")]
aliases[alias] = signature
return aliases
class StubGenerator(ast.NodeVisitor):
"""A transformer that generates stub declarations from a source code."""
def __init__(self, source, generic=False):
"""Initialize this stub generator.
:sig: (str, bool) -> None
:param source: Source code to generate the stub for.
:param generic: Whether to produce generic stubs.
"""
self.root = StubNode() # sig: StubNode
self.generic = generic # sig: bool
self.imported_namespaces = OrderedDict() # sig: OrderedDict[str, str]
self.imported_names = OrderedDict() # sig: OrderedDict[str, str]
self.defined_types = set() # sig: Set[str]
self.required_types = set() # sig: Set[str]
self.aliases = OrderedDict() # sig: OrderedDict[str, str]
self._parents = [self.root] # sig: List[StubNode]
self._code_lines = source.splitlines() # sig: List[str]
self.collect_aliases()
ast_tree = ast.parse(source)
self.visit(ast_tree)
def collect_aliases(self):
"""Collect the type aliases in the source.
:sig: () -> None
"""
self.aliases = get_aliases(self._code_lines)
for alias, signature in self.aliases.items():
_, _, requires = parse_signature(signature)
self.required_types |= requires
self.defined_types |= {alias}
def visit_Import(self, node):
"""Visit an import node."""
line = self._code_lines[node.lineno - 1]
module_name = line.split("import")[0].strip()
for name in node.names:
imported_name = name.name
if name.asname:
imported_name = name.asname + "::" + imported_name
self.imported_namespaces[imported_name] = module_name
def visit_ImportFrom(self, node):
"""Visit an from-import node."""
line = self._code_lines[node.lineno - 1]
module_name = line.split("from")[1].split("import")[0].strip()
for name in node.names:
imported_name = name.name
if name.asname:
imported_name = name.asname + "::" + imported_name
self.imported_names[imported_name] = module_name
def visit_Assign(self, node):
"""Visit an assignment node."""
line = self._code_lines[node.lineno - 1]
if SIG_COMMENT in line:
line = _RE_COMMENT_IN_STRING.sub("", line)
if (SIG_COMMENT not in line) and (not self.generic):
return
if SIG_COMMENT in line:
_, signature = line.split(SIG_COMMENT)
_, return_type, requires = parse_signature(signature)
self.required_types |= requires
parent = self._parents[-1]
for var in node.targets:
if isinstance(var, ast.Name):
name, p = var.id, parent
elif (
isinstance(var, ast.Attribute)
and isinstance(var.value, ast.Name)
and (var.value.id == "self")
):
name, p = var.attr, parent.parent
else:
name, p = None, None
if name is not None:
if self.generic:
return_type = "Any"
self.required_types.add(return_type)
stub_node = VariableNode(name, return_type)
p.add_variable(stub_node)
def get_function_node(self, node):
"""Process a function node.
:sig: (Union[ast.FunctionDef, ast.AsyncFunctionDef]) -> FunctionNode
:param node: Node to process.
:return: Generated function node in stub tree.
"""
decorators = []
for d in node.decorator_list:
if hasattr(d, "id"):
decorators.append(d.id)
elif hasattr(d, "func"):
decorators.append(d.func.id)
elif hasattr(d, "value"):
decorators.append(d.value.id + "." + d.attr)
signature = get_signature(node)
if signature is None:
parent = self._parents[-1]
if isinstance(parent, ClassNode) and (node.name == "__init__"):
signature = parent.signature
if (signature is None) and (not self.generic):
return None
param_names = [arg.arg if PY3 else arg.id for arg in node.args.args]
n_args = len(param_names)
if signature is None:
param_types, rtype, requires = ["Any"] * n_args, "Any", {"Any"}
else:
_logger.debug("parsing signature for %s", node.name)
param_types, rtype, requires = parse_signature(signature)
# TODO: only in classes
if ((n_args > 0) and (param_names[0] == "self")) or (
(n_args > 0) and (param_names[0] == "cls") and ("classmethod" in decorators)
):
if signature is None:
param_types[0] = ""
else:
param_types.insert(0, "")
_logger.debug("parameter types: %s", param_types)
_logger.debug("return type: %s", rtype)
_logger.debug("required types: %s", requires)
self.required_types |= requires
if node.args.vararg is not None:
param_names.append("*" + (node.args.vararg.arg if PY3 else node.args.vararg))
param_types.append("")
if node.args.kwarg is not None:
param_names.append("**" + (node.args.kwarg.arg if PY3 else node.args.kwarg))
param_types.append("")
kwonly_args = getattr(node.args, "kwonlyargs", [])
if len(kwonly_args) > 0:
param_names.extend([arg.arg for arg in kwonly_args])
if signature is None:
param_types.extend(["Any"] * len(kwonly_args))
if len(param_types) != len(param_names):
raise ValueError("Parameter names and types don't match: " + node.name)
param_locs = [(a.lineno, a.col_offset) for a in (node.args.args + kwonly_args)]
param_defaults = {
bisect(param_locs, (d.lineno, d.col_offset)) - 1 for d in node.args.defaults
}
kwonly_defaults = getattr(node.args, "kw_defaults", [])
for i, d in enumerate(kwonly_defaults):
if d is not None:
param_defaults.add(n_args + i)
params = [
(name, type_, i in param_defaults)
for i, (name, type_) in enumerate(zip(param_names, param_types))
]
if len(kwonly_args) > 0:
params.insert(n_args, ("*", "", False))
stub_node = FunctionNode(
node.name, parameters=params, rtype=rtype, decorators=decorators
)
self._parents[-1].add_child(stub_node)
self._parents.append(stub_node)
self.generic_visit(node)
del self._parents[-1]
return stub_node
def visit_FunctionDef(self, node):
"""Visit a function node."""
node = self.get_function_node(node)
if node is not None:
node._async = False
def visit_AsyncFunctionDef(self, node):
"""Visit an async function node."""
node = self.get_function_node(node)
if node is not None:
node._async = True
def visit_ClassDef(self, node):
"""Visit a class node."""
self.defined_types.add(node.name)
bases = []
for n in node.bases:
base_parts = []
while True:
if not isinstance(n, ast.Attribute):
base_parts.append(n.id)
break
else:
base_parts.append(n.attr)
n = n.value
bases.append(".".join(base_parts[::-1]))
self.required_types |= set(bases)
signature = get_signature(node)
stub_node = ClassNode(node.name, bases=bases, signature=signature)
self._parents[-1].add_child(stub_node)
self._parents.append(stub_node)
self.generic_visit(node)
del self._parents[-1]
@staticmethod
def generate_import_from(module_, names):
"""Generate an import line.
:sig: (str, Set[str]) -> str
:param module_: Name of module to import the names from.
:param names: Names to import.
:return: Import line in stub code.
"""
regular_names = [n for n in names if "::" not in n]
as_names = [n for n in names if "::" in n]
line = ""
if len(regular_names) > 0:
slots = {"m": module_, "n": ", ".join(sorted(regular_names))}
line = "from %(m)s import %(n)s" % slots
if len(line) > LINE_LENGTH_LIMIT:
slots["n"] = INDENT + (",\n" + INDENT).join(sorted(regular_names)) + ","
line = "from %(m)s import (\n%(n)s\n)" % slots
if len(as_names) > 0:
line += "\n"
for as_name in as_names:
a, n = as_name.split("::")
line += "from %(m)s import %(n)s as %(a)s" % {"m": module_, "n": n, "a": a}
return line
def generate_stub(self):
"""Generate the stub code for this source.
:sig: () -> str
:return: Generated stub code.
"""
needed_types = self.required_types - BUILTIN_TYPES
needed_types -= self.defined_types
_logger.debug("defined types: %s", self.defined_types)
module_vars = {v.name for v in self.root.variables}
_logger.debug("module variables: %s", module_vars)
qualified_types = {n for n in needed_types if "." in n}
qualified_namespaces = {".".join(n.split(".")[:-1]) for n in qualified_types}
needed_namespaces = qualified_namespaces - module_vars
needed_types -= qualified_types
_logger.debug("needed namespaces: %s", needed_namespaces)
imported_names = {n.split("::")[0] for n in self.imported_names}
imported_types = imported_names & (needed_types | needed_namespaces)
needed_types -= imported_types
needed_namespaces -= imported_names
_logger.debug("used imported types: %s", imported_types)
try:
typing_mod = __import__("typing")
typing_types = {n for n in needed_types if hasattr(typing_mod, n)}
needed_types -= typing_types
_logger.debug("types from typing module: %s", typing_types)
except ImportError:
typing_types = set()
_logger.warn("typing module not installed")
if len(needed_types) > 0:
raise ValueError("Unknown types: " + ", ".join(needed_types))
out = StringIO()
started = False
if len(typing_types) > 0:
line = self.generate_import_from("typing", typing_types)
out.write(line + "\n")
started = True
if len(imported_types) > 0:
if started:
out.write("\n")
# preserve the import order in the source file
for name in self.imported_names:
if name.split("::")[0] in imported_types:
line = self.generate_import_from(self.imported_names[name], {name})
out.write(line + "\n")
started = True
if len(needed_namespaces) > 0:
if started:
out.write("\n")
as_names = {n.split("::")[0]: n for n in self.imported_namespaces if "::" in n}
for module_ in sorted(needed_namespaces):
if module_ in as_names:
a, n = as_names[module_].split("::")
out.write("import " + n + " as " + a + "\n")
else:
out.write("import " + module_ + "\n")
started = True
if len(self.aliases) > 0:
if started:
out.write("\n")
for alias, signature in self.aliases.items():
out.write("%s = %s\n" % (alias, signature))
started = True
if started:
out.write("\n")
stub_lines = self.root.get_code()
n_lines = len(stub_lines)
for line_no in range(n_lines):
prev_line = stub_lines[line_no - 1] if line_no > 0 else None
line = stub_lines[line_no]
next_line = stub_lines[line_no + 1] if line_no < (n_lines - 1) else None
if (
line.startswith("class ")
and (prev_line is not None)
and (
(not prev_line.startswith("class "))
or (next_line and next_line.startswith(" "))
)
):
out.write("\n")
if (
line.startswith("def ")
and (prev_line is not None)
and (prev_line.startswith((" ", "class ")))
):
out.write("\n")
out.write(line + "\n")
line_no += 1
return out.getvalue()
def get_stub(source, generic=False):
"""Get the stub code for a source code.
:sig: (str, bool) -> str
:param source: Source code to generate the stub for.
:param generic: Whether to produce generic stubs.
:return: Generated stub code.
"""
generator = StubGenerator(source, generic=generic)
stub = generator.generate_stub()
return stub
def get_mod_paths(mod_name, out_dir):
"""Get source and stub paths for a module."""
paths = []
try:
mod = get_loader(mod_name)
source = Path(mod.path)
if source.name.endswith(".py"):
source_rel = Path(*mod_name.split("."))
if source.name == "__init__.py":
source_rel = source_rel.joinpath("__init__.py")
destination = Path(out_dir, source_rel.with_suffix(".pyi"))
paths.append((source, destination))
except Exception as e:
_logger.debug(e)
_logger.warning("cannot handle module, skipping: %s", mod_name)
return paths
def get_pkg_paths(pkg_name, out_dir):
"""Recursively get all source and stub paths for a package."""
paths = []
try:
pkg = import_module(pkg_name)
if not hasattr(pkg, "__path__"):
return get_mod_paths(pkg_name, out_dir)
for mod_info in walk_packages(pkg.__path__, pkg.__name__ + "."):
mod_paths = get_mod_paths(mod_info.name, out_dir)
paths.extend(mod_paths)
except Exception as e:
_logger.debug(e)
_logger.warning("cannot handle package, skipping: %s", pkg_name)
return paths
############################################################
# SPHINX
############################################################
def process_docstring(app, what, name, obj, options, lines):
"""Modify the docstring before generating documentation.
This will insert type declarations for parameters and return type
into the docstring, and remove the signature field so that it will
be excluded from the generated document.
"""
aliases = getattr(app, "_sigaliases", None)
if aliases is None:
if what == "module":
aliases = get_aliases(inspect.getsource(obj).splitlines())
app._sigaliases = aliases
sig_marker = ":" + SIG_FIELD + ":"
is_class = what in ("class", "exception")
signature = extract_signature("\n".join(lines))
if signature is None:
if not is_class:
return
init_method = getattr(obj, "__init__")
init_doc = init_method.__doc__
init_lines = init_doc.splitlines()[1:]
if len(init_lines) > 1:
init_doc = textwrap.dedent("\n".join(init_lines[1:]))
init_lines = init_doc.splitlines()
if sig_marker not in init_doc:
return
sig_started = False
for line in init_lines:
if line.lstrip().startswith(sig_marker):
sig_started = True
if sig_started:
lines.append(line)
signature = extract_signature("\n".join(lines))
if is_class:
obj = init_method
param_types, rtype, _ = parse_signature(signature)
param_names = [p for p in inspect.signature(obj).parameters]
if is_class and (param_names[0] == "self"):
del param_names[0]
# if something goes wrong, don't insert parameter types
if len(param_names) == len(param_types):
for name, type_ in zip(param_names, param_types):
find = ":param %(name)s:" % {"name": name}
alias = aliases.get(type_)
if alias is not None:
type_ = "*%(type)s* :sup:`%(alias)s`" % {"type": type_, "alias": alias}
for i, line in enumerate(lines):
if line.startswith(find):
lines.insert(i, ":type %(name)s: %(type)s" % {"name": name, "type": type_})
break
if not is_class:
for i, line in enumerate(lines):
if line.startswith((":return:", ":returns:")):
lines.insert(i, ":rtype: " + rtype)
break
# remove the signature field
sig_start = 0
while sig_start < len(lines):
if lines[sig_start].startswith(sig_marker):
break
sig_start += 1
sig_end = sig_start + 1
while sig_end < len(lines):
if (not lines[sig_end]) or (lines[sig_end][0] != " "):
break
sig_end += 1
for i in reversed(range(sig_start, sig_end)):
del lines[i]
def setup(app):
"""Register to Sphinx."""
app.connect("autodoc-process-docstring", process_docstring)
return {"version": __version__}
############################################################
# MAIN
############################################################
def main(argv=None):
"""Start the command line interface."""
parser = ArgumentParser(prog="pygenstub")
parser.add_argument("--version", action="version", version="%(prog)s " + __version__)
parser.add_argument("files", nargs="*", help="generate stubs for given files")
parser.add_argument(
"-m",
"--module",
action="append",
metavar="MODULE",
dest="modules",
default=[],
help="generate stubs for given modules",
)
parser.add_argument(
"-o", "--output", metavar="PATH", dest="out_dir", help="change the output directory"
)
parser.add_argument(
"--generic", action="store_true", default=False, help="generate generic stubs"
)
parser.add_argument("--debug", action="store_true", help="enable debug messages")
argv = argv if argv is not None else sys.argv
arguments = parser.parse_args(argv[1:])
# set debug mode
if arguments.debug:
logging.basicConfig(level=logging.DEBUG)
_logger.debug("running in debug mode")
out_dir = arguments.out_dir if arguments.out_dir is not None else ""
if (out_dir == "") and (len(arguments.modules) > 0):
print("Output directory must be given when generating stubs for modules.")
sys.exit(1)
modules = []
for path in arguments.files:
paths = Path(path).glob("**/*.py") if Path(path).is_dir() else [Path(path)]
for source in paths:
if str(source).startswith(os.path.pardir):
source = source.absolute().resolve()
if (out_dir != "") and source.is_absolute():
source = source.relative_to(source.root)
destination = Path(out_dir, source.with_suffix(".pyi"))
modules.append((source, destination))
for mod_name in arguments.modules:
modules.extend(get_pkg_paths(mod_name, out_dir))
for source, destination in modules:
_logger.info("generating stub for %s to path %s", source, destination)
with source.open() as f:
code = f.read()
try:
stub = get_stub(code, generic=arguments.generic)
except Exception as e:
print(source, "-", e, file=sys.stderr)
continue
if stub != "":
if not destination.parent.exists():
destination.parent.mkdir(parents=True)
with destination.open("w") as f:
f.write("# " + EDIT_WARNING + "\n\n" + stub)
if __name__ == "__main__":
main()
|
uyar/pygenstub | pygenstub.py | split_parameter_types | python | def split_parameter_types(parameters):
if parameters == "":
return []
# only consider the top level commas, ignore the ones in []
commas = []
bracket_depth = 0
for i, char in enumerate(parameters):
if (char == ",") and (bracket_depth == 0):
commas.append(i)
elif char == "[":
bracket_depth += 1
elif char == "]":
bracket_depth -= 1
types = []
last_i = 0
for i in commas:
types.append(parameters[last_i:i].strip())
last_i = i + 1
else:
types.append(parameters[last_i:].strip())
return types | Split a parameter types declaration into individual types.
The input is the left hand side of a signature (the part before the arrow),
excluding the parentheses.
:sig: (str) -> List[str]
:param parameters: Comma separated parameter types.
:return: Parameter types. | train | https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L127-L158 | null | # Copyright (C) 2016-2019 H. Turgut Uyar <uyar@tekir.org>
#
# pygenstub is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pygenstub is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pygenstub. If not, see <http://www.gnu.org/licenses/>.
"""pygenstub is a utility for generating stub files from docstrings in source files.
It takes a source file as input and creates a stub file with the same base name
and the ``.pyi`` extension.
For more information, please refer to the documentation:
https://pygenstub.tekir.org/
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import ast
import inspect
import logging
import os
import re
import sys
import textwrap
from argparse import ArgumentParser
from bisect import bisect
from collections import OrderedDict
from importlib import import_module
from io import StringIO
from pkgutil import get_loader, walk_packages
from docutils.core import publish_doctree
__version__ = "1.4.0" # sig: str
PY3 = sys.version_info >= (3, 0)
if not PY3:
import __builtin__ as builtins
from pathlib2 import Path
else:
import builtins
from pathlib import Path
# sigalias: Document = docutils.nodes.document
BUILTIN_TYPES = {k for k, t in builtins.__dict__.items() if isinstance(t, type)}
BUILTIN_TYPES.add("None")
SIG_FIELD = "sig" # sig: str
SIG_COMMENT = "# sig:" # sig: str
SIG_ALIAS = "# sigalias:" # sig: str
DECORATORS = {"property", "staticmethod", "classmethod"} # sig: Set[str]
LINE_LENGTH_LIMIT = 79
INDENT = 4 * " "
EDIT_WARNING = "THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT MANUALLY."
_RE_QUALIFIED_TYPES = re.compile(r"\w+(?:\.\w+)*")
_RE_COMMENT_IN_STRING = re.compile(r"""['"]\s*%(text)s\s*.*['"]""" % {"text": SIG_COMMENT})
_logger = logging.getLogger(__name__)
def get_fields(node, fields_tag="field_list"):
"""Get the field names and their values from a node.
:sig: (Document, str) -> Dict[str, str]
:param node: Node to get the fields from.
:param fields_tag: Tag of child node that contains the fields.
:return: Names and values of fields.
"""
fields_nodes = [c for c in node.children if c.tagname == fields_tag]
if len(fields_nodes) == 0:
return {}
assert len(fields_nodes) == 1, "multiple nodes with tag " + fields_tag
fields_node = fields_nodes[0]
fields = [
{f.tagname: f.rawsource.strip() for f in n.children}
for n in fields_node.children
if n.tagname == "field"
]
return {f["field_name"]: f["field_body"] for f in fields}
def extract_signature(docstring):
"""Extract the signature from a docstring.
:sig: (str) -> Optional[str]
:param docstring: Docstring to extract the signature from.
:return: Extracted signature, or ``None`` if there's no signature.
"""
root = publish_doctree(docstring, settings_overrides={"report_level": 5})
fields = get_fields(root)
return fields.get(SIG_FIELD)
def get_signature(node):
"""Get the signature of a function or a class.
:sig: (Union[ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef]) -> Optional[str]
:param node: Node to get the signature from.
:return: Value of signature field in node docstring, or ``None`` if there's no signature.
"""
docstring = ast.get_docstring(node)
if docstring is None:
return None
return extract_signature(docstring)
def parse_signature(signature):
"""Parse a signature into its input and return parameter types.
This will also collect the types that are required by any of the input
and return types.
:sig: (str) -> Tuple[List[str], str, Set[str]]
:param signature: Signature to parse.
:return: Input parameter types, return type, and all required types.
"""
if " -> " not in signature:
# signature comment: no parameters, treat variable type as return type
param_types, return_type = None, signature.strip()
else:
lhs, return_type = [s.strip() for s in signature.split(" -> ")]
csv = lhs[1:-1].strip() # remove the parentheses around the parameter type list
param_types = split_parameter_types(csv)
requires = set(_RE_QUALIFIED_TYPES.findall(signature))
return param_types, return_type, requires
class StubNode:
"""A node in a stub tree."""
def __init__(self):
"""Initialize this stub node.
:sig: () -> None
"""
self.variables = [] # sig: List[VariableNode]
self.variable_names = set() # sig: Set[str]
self.children = [] # sig: List[Union[FunctionNode, ClassNode]]
self.parent = None # sig: Optional[StubNode]
def add_variable(self, node):
"""Add a variable node to this node.
:sig: (VariableNode) -> None
:param node: Variable node to add.
"""
if node.name not in self.variable_names:
self.variables.append(node)
self.variable_names.add(node.name)
node.parent = self
def add_child(self, node):
"""Add a function/method or class node to this node.
:sig: (Union[FunctionNode, ClassNode]) -> None
:param node: Function or class node to add.
"""
self.children.append(node)
node.parent = self
def get_code(self):
"""Get the stub code for this node.
The stub code for a node consists of the type annotations of its variables,
followed by the prototypes of its functions/methods and classes.
:sig: () -> List[str]
:return: Lines of stub code for this node.
"""
stub = []
for child in self.variables:
stub.extend(child.get_code())
if (
(len(self.variables) > 0)
and (len(self.children) > 0)
and (not isinstance(self, ClassNode))
):
stub.append("")
for child in self.children:
stub.extend(child.get_code())
return stub
class VariableNode(StubNode):
"""A node representing an assignment in a stub tree."""
def __init__(self, name, type_):
"""Initialize this variable node.
:sig: (str, str) -> None
:param name: Name of variable that is being assigned to.
:param type_: Type of variable.
"""
if not PY3:
StubNode.__init__(self)
else:
super().__init__()
self.name = name # sig: str
self.type_ = type_ # sig: str
def get_code(self):
"""Get the type annotation for this variable.
:sig: () -> List[str]
:return: Lines of stub code for this variable.
"""
return ["%(n)s = ... # type: %(t)s" % {"n": self.name, "t": self.type_}]
class FunctionNode(StubNode):
"""A node representing a function in a stub tree."""
def __init__(self, name, parameters, rtype, decorators=None):
"""Initialize this function node.
The parameters have to given as a list of triples where each item specifies
the name of the parameter, its type, and whether it has a default value or not.
:sig: (str, Sequence[Tuple[str, str, bool]], str, Optional[Sequence[str]]) -> None
:param name: Name of function.
:param parameters: List of parameter triples (name, type, has_default).
:param rtype: Type of return value.
:param decorators: Decorators of function.
"""
if not PY3:
StubNode.__init__(self)
else:
super().__init__()
self.name = name # sig: str
self.parameters = parameters # sig: Sequence[Tuple[str, str, bool]]
self.rtype = rtype # sig: str
self.decorators = decorators if decorators is not None else [] # sig: Sequence[str]
self._async = False # sig: bool
def get_code(self):
"""Get the stub code for this function.
:sig: () -> List[str]
:return: Lines of stub code for this function.
"""
stub = []
for deco in self.decorators:
if (deco in DECORATORS) or deco.endswith(".setter"):
stub.append("@" + deco)
parameters = []
for name, type_, has_default in self.parameters:
decl = "%(n)s%(t)s%(d)s" % {
"n": name,
"t": ": " + type_ if type_ else "",
"d": " = ..." if has_default else "",
}
parameters.append(decl)
slots = {
"a": "async " if self._async else "",
"n": self.name,
"p": ", ".join(parameters),
"r": self.rtype,
}
prototype = "%(a)sdef %(n)s(%(p)s) -> %(r)s: ..." % slots
if len(prototype) <= LINE_LENGTH_LIMIT:
stub.append(prototype)
elif len(INDENT + slots["p"]) <= LINE_LENGTH_LIMIT:
stub.append("%(a)sdef %(n)s(" % slots)
stub.append(INDENT + slots["p"])
stub.append(") -> %(r)s: ..." % slots)
else:
stub.append("%(a)sdef %(n)s(" % slots)
for param in parameters:
stub.append(INDENT + param + ",")
stub.append(") -> %(r)s: ..." % slots)
return stub
class ClassNode(StubNode):
"""A node representing a class in a stub tree."""
def __init__(self, name, bases, signature=None):
"""Initialize this class node.
:sig: (str, Sequence[str], Optional[str]) -> None
:param name: Name of class.
:param bases: Base classes of class.
:param signature: Signature of class, to be used in __init__ method.
"""
if not PY3:
StubNode.__init__(self)
else:
super().__init__()
self.name = name # sig: str
self.bases = bases # sig: Sequence[str]
self.signature = signature # sig: Optional[str]
def get_code(self):
"""Get the stub code for this class.
:sig: () -> List[str]
:return: Lines of stub code for this class.
"""
stub = []
bases = ("(" + ", ".join(self.bases) + ")") if len(self.bases) > 0 else ""
slots = {"n": self.name, "b": bases}
if (len(self.children) == 0) and (len(self.variables) == 0):
stub.append("class %(n)s%(b)s: ..." % slots)
else:
stub.append("class %(n)s%(b)s:" % slots)
super_code = super().get_code() if PY3 else StubNode.get_code(self)
for line in super_code:
stub.append(INDENT + line)
return stub
def get_aliases(lines):
"""Get the type aliases in the source.
:sig: (Sequence[str]) -> Dict[str, str]
:param lines: Lines of the source code.
:return: Aliases and their their definitions.
"""
aliases = {}
for line in lines:
line = line.strip()
if len(line) > 0 and line.startswith(SIG_ALIAS):
_, content = line.split(SIG_ALIAS)
alias, signature = [t.strip() for t in content.split("=")]
aliases[alias] = signature
return aliases
class StubGenerator(ast.NodeVisitor):
"""A transformer that generates stub declarations from a source code."""
def __init__(self, source, generic=False):
"""Initialize this stub generator.
:sig: (str, bool) -> None
:param source: Source code to generate the stub for.
:param generic: Whether to produce generic stubs.
"""
self.root = StubNode() # sig: StubNode
self.generic = generic # sig: bool
self.imported_namespaces = OrderedDict() # sig: OrderedDict[str, str]
self.imported_names = OrderedDict() # sig: OrderedDict[str, str]
self.defined_types = set() # sig: Set[str]
self.required_types = set() # sig: Set[str]
self.aliases = OrderedDict() # sig: OrderedDict[str, str]
self._parents = [self.root] # sig: List[StubNode]
self._code_lines = source.splitlines() # sig: List[str]
self.collect_aliases()
ast_tree = ast.parse(source)
self.visit(ast_tree)
def collect_aliases(self):
"""Collect the type aliases in the source.
:sig: () -> None
"""
self.aliases = get_aliases(self._code_lines)
for alias, signature in self.aliases.items():
_, _, requires = parse_signature(signature)
self.required_types |= requires
self.defined_types |= {alias}
def visit_Import(self, node):
"""Visit an import node."""
line = self._code_lines[node.lineno - 1]
module_name = line.split("import")[0].strip()
for name in node.names:
imported_name = name.name
if name.asname:
imported_name = name.asname + "::" + imported_name
self.imported_namespaces[imported_name] = module_name
def visit_ImportFrom(self, node):
"""Visit an from-import node."""
line = self._code_lines[node.lineno - 1]
module_name = line.split("from")[1].split("import")[0].strip()
for name in node.names:
imported_name = name.name
if name.asname:
imported_name = name.asname + "::" + imported_name
self.imported_names[imported_name] = module_name
def visit_Assign(self, node):
"""Visit an assignment node."""
line = self._code_lines[node.lineno - 1]
if SIG_COMMENT in line:
line = _RE_COMMENT_IN_STRING.sub("", line)
if (SIG_COMMENT not in line) and (not self.generic):
return
if SIG_COMMENT in line:
_, signature = line.split(SIG_COMMENT)
_, return_type, requires = parse_signature(signature)
self.required_types |= requires
parent = self._parents[-1]
for var in node.targets:
if isinstance(var, ast.Name):
name, p = var.id, parent
elif (
isinstance(var, ast.Attribute)
and isinstance(var.value, ast.Name)
and (var.value.id == "self")
):
name, p = var.attr, parent.parent
else:
name, p = None, None
if name is not None:
if self.generic:
return_type = "Any"
self.required_types.add(return_type)
stub_node = VariableNode(name, return_type)
p.add_variable(stub_node)
def get_function_node(self, node):
"""Process a function node.
:sig: (Union[ast.FunctionDef, ast.AsyncFunctionDef]) -> FunctionNode
:param node: Node to process.
:return: Generated function node in stub tree.
"""
decorators = []
for d in node.decorator_list:
if hasattr(d, "id"):
decorators.append(d.id)
elif hasattr(d, "func"):
decorators.append(d.func.id)
elif hasattr(d, "value"):
decorators.append(d.value.id + "." + d.attr)
signature = get_signature(node)
if signature is None:
parent = self._parents[-1]
if isinstance(parent, ClassNode) and (node.name == "__init__"):
signature = parent.signature
if (signature is None) and (not self.generic):
return None
param_names = [arg.arg if PY3 else arg.id for arg in node.args.args]
n_args = len(param_names)
if signature is None:
param_types, rtype, requires = ["Any"] * n_args, "Any", {"Any"}
else:
_logger.debug("parsing signature for %s", node.name)
param_types, rtype, requires = parse_signature(signature)
# TODO: only in classes
if ((n_args > 0) and (param_names[0] == "self")) or (
(n_args > 0) and (param_names[0] == "cls") and ("classmethod" in decorators)
):
if signature is None:
param_types[0] = ""
else:
param_types.insert(0, "")
_logger.debug("parameter types: %s", param_types)
_logger.debug("return type: %s", rtype)
_logger.debug("required types: %s", requires)
self.required_types |= requires
if node.args.vararg is not None:
param_names.append("*" + (node.args.vararg.arg if PY3 else node.args.vararg))
param_types.append("")
if node.args.kwarg is not None:
param_names.append("**" + (node.args.kwarg.arg if PY3 else node.args.kwarg))
param_types.append("")
kwonly_args = getattr(node.args, "kwonlyargs", [])
if len(kwonly_args) > 0:
param_names.extend([arg.arg for arg in kwonly_args])
if signature is None:
param_types.extend(["Any"] * len(kwonly_args))
if len(param_types) != len(param_names):
raise ValueError("Parameter names and types don't match: " + node.name)
param_locs = [(a.lineno, a.col_offset) for a in (node.args.args + kwonly_args)]
param_defaults = {
bisect(param_locs, (d.lineno, d.col_offset)) - 1 for d in node.args.defaults
}
kwonly_defaults = getattr(node.args, "kw_defaults", [])
for i, d in enumerate(kwonly_defaults):
if d is not None:
param_defaults.add(n_args + i)
params = [
(name, type_, i in param_defaults)
for i, (name, type_) in enumerate(zip(param_names, param_types))
]
if len(kwonly_args) > 0:
params.insert(n_args, ("*", "", False))
stub_node = FunctionNode(
node.name, parameters=params, rtype=rtype, decorators=decorators
)
self._parents[-1].add_child(stub_node)
self._parents.append(stub_node)
self.generic_visit(node)
del self._parents[-1]
return stub_node
def visit_FunctionDef(self, node):
"""Visit a function node."""
node = self.get_function_node(node)
if node is not None:
node._async = False
def visit_AsyncFunctionDef(self, node):
"""Visit an async function node."""
node = self.get_function_node(node)
if node is not None:
node._async = True
def visit_ClassDef(self, node):
"""Visit a class node."""
self.defined_types.add(node.name)
bases = []
for n in node.bases:
base_parts = []
while True:
if not isinstance(n, ast.Attribute):
base_parts.append(n.id)
break
else:
base_parts.append(n.attr)
n = n.value
bases.append(".".join(base_parts[::-1]))
self.required_types |= set(bases)
signature = get_signature(node)
stub_node = ClassNode(node.name, bases=bases, signature=signature)
self._parents[-1].add_child(stub_node)
self._parents.append(stub_node)
self.generic_visit(node)
del self._parents[-1]
@staticmethod
def generate_import_from(module_, names):
"""Generate an import line.
:sig: (str, Set[str]) -> str
:param module_: Name of module to import the names from.
:param names: Names to import.
:return: Import line in stub code.
"""
regular_names = [n for n in names if "::" not in n]
as_names = [n for n in names if "::" in n]
line = ""
if len(regular_names) > 0:
slots = {"m": module_, "n": ", ".join(sorted(regular_names))}
line = "from %(m)s import %(n)s" % slots
if len(line) > LINE_LENGTH_LIMIT:
slots["n"] = INDENT + (",\n" + INDENT).join(sorted(regular_names)) + ","
line = "from %(m)s import (\n%(n)s\n)" % slots
if len(as_names) > 0:
line += "\n"
for as_name in as_names:
a, n = as_name.split("::")
line += "from %(m)s import %(n)s as %(a)s" % {"m": module_, "n": n, "a": a}
return line
def generate_stub(self):
"""Generate the stub code for this source.
:sig: () -> str
:return: Generated stub code.
"""
needed_types = self.required_types - BUILTIN_TYPES
needed_types -= self.defined_types
_logger.debug("defined types: %s", self.defined_types)
module_vars = {v.name for v in self.root.variables}
_logger.debug("module variables: %s", module_vars)
qualified_types = {n for n in needed_types if "." in n}
qualified_namespaces = {".".join(n.split(".")[:-1]) for n in qualified_types}
needed_namespaces = qualified_namespaces - module_vars
needed_types -= qualified_types
_logger.debug("needed namespaces: %s", needed_namespaces)
imported_names = {n.split("::")[0] for n in self.imported_names}
imported_types = imported_names & (needed_types | needed_namespaces)
needed_types -= imported_types
needed_namespaces -= imported_names
_logger.debug("used imported types: %s", imported_types)
try:
typing_mod = __import__("typing")
typing_types = {n for n in needed_types if hasattr(typing_mod, n)}
needed_types -= typing_types
_logger.debug("types from typing module: %s", typing_types)
except ImportError:
typing_types = set()
_logger.warn("typing module not installed")
if len(needed_types) > 0:
raise ValueError("Unknown types: " + ", ".join(needed_types))
out = StringIO()
started = False
if len(typing_types) > 0:
line = self.generate_import_from("typing", typing_types)
out.write(line + "\n")
started = True
if len(imported_types) > 0:
if started:
out.write("\n")
# preserve the import order in the source file
for name in self.imported_names:
if name.split("::")[0] in imported_types:
line = self.generate_import_from(self.imported_names[name], {name})
out.write(line + "\n")
started = True
if len(needed_namespaces) > 0:
if started:
out.write("\n")
as_names = {n.split("::")[0]: n for n in self.imported_namespaces if "::" in n}
for module_ in sorted(needed_namespaces):
if module_ in as_names:
a, n = as_names[module_].split("::")
out.write("import " + n + " as " + a + "\n")
else:
out.write("import " + module_ + "\n")
started = True
if len(self.aliases) > 0:
if started:
out.write("\n")
for alias, signature in self.aliases.items():
out.write("%s = %s\n" % (alias, signature))
started = True
if started:
out.write("\n")
stub_lines = self.root.get_code()
n_lines = len(stub_lines)
for line_no in range(n_lines):
prev_line = stub_lines[line_no - 1] if line_no > 0 else None
line = stub_lines[line_no]
next_line = stub_lines[line_no + 1] if line_no < (n_lines - 1) else None
if (
line.startswith("class ")
and (prev_line is not None)
and (
(not prev_line.startswith("class "))
or (next_line and next_line.startswith(" "))
)
):
out.write("\n")
if (
line.startswith("def ")
and (prev_line is not None)
and (prev_line.startswith((" ", "class ")))
):
out.write("\n")
out.write(line + "\n")
line_no += 1
return out.getvalue()
def get_stub(source, generic=False):
"""Get the stub code for a source code.
:sig: (str, bool) -> str
:param source: Source code to generate the stub for.
:param generic: Whether to produce generic stubs.
:return: Generated stub code.
"""
generator = StubGenerator(source, generic=generic)
stub = generator.generate_stub()
return stub
def get_mod_paths(mod_name, out_dir):
"""Get source and stub paths for a module."""
paths = []
try:
mod = get_loader(mod_name)
source = Path(mod.path)
if source.name.endswith(".py"):
source_rel = Path(*mod_name.split("."))
if source.name == "__init__.py":
source_rel = source_rel.joinpath("__init__.py")
destination = Path(out_dir, source_rel.with_suffix(".pyi"))
paths.append((source, destination))
except Exception as e:
_logger.debug(e)
_logger.warning("cannot handle module, skipping: %s", mod_name)
return paths
def get_pkg_paths(pkg_name, out_dir):
"""Recursively get all source and stub paths for a package."""
paths = []
try:
pkg = import_module(pkg_name)
if not hasattr(pkg, "__path__"):
return get_mod_paths(pkg_name, out_dir)
for mod_info in walk_packages(pkg.__path__, pkg.__name__ + "."):
mod_paths = get_mod_paths(mod_info.name, out_dir)
paths.extend(mod_paths)
except Exception as e:
_logger.debug(e)
_logger.warning("cannot handle package, skipping: %s", pkg_name)
return paths
############################################################
# SPHINX
############################################################
def process_docstring(app, what, name, obj, options, lines):
"""Modify the docstring before generating documentation.
This will insert type declarations for parameters and return type
into the docstring, and remove the signature field so that it will
be excluded from the generated document.
"""
aliases = getattr(app, "_sigaliases", None)
if aliases is None:
if what == "module":
aliases = get_aliases(inspect.getsource(obj).splitlines())
app._sigaliases = aliases
sig_marker = ":" + SIG_FIELD + ":"
is_class = what in ("class", "exception")
signature = extract_signature("\n".join(lines))
if signature is None:
if not is_class:
return
init_method = getattr(obj, "__init__")
init_doc = init_method.__doc__
init_lines = init_doc.splitlines()[1:]
if len(init_lines) > 1:
init_doc = textwrap.dedent("\n".join(init_lines[1:]))
init_lines = init_doc.splitlines()
if sig_marker not in init_doc:
return
sig_started = False
for line in init_lines:
if line.lstrip().startswith(sig_marker):
sig_started = True
if sig_started:
lines.append(line)
signature = extract_signature("\n".join(lines))
if is_class:
obj = init_method
param_types, rtype, _ = parse_signature(signature)
param_names = [p for p in inspect.signature(obj).parameters]
if is_class and (param_names[0] == "self"):
del param_names[0]
# if something goes wrong, don't insert parameter types
if len(param_names) == len(param_types):
for name, type_ in zip(param_names, param_types):
find = ":param %(name)s:" % {"name": name}
alias = aliases.get(type_)
if alias is not None:
type_ = "*%(type)s* :sup:`%(alias)s`" % {"type": type_, "alias": alias}
for i, line in enumerate(lines):
if line.startswith(find):
lines.insert(i, ":type %(name)s: %(type)s" % {"name": name, "type": type_})
break
if not is_class:
for i, line in enumerate(lines):
if line.startswith((":return:", ":returns:")):
lines.insert(i, ":rtype: " + rtype)
break
# remove the signature field
sig_start = 0
while sig_start < len(lines):
if lines[sig_start].startswith(sig_marker):
break
sig_start += 1
sig_end = sig_start + 1
while sig_end < len(lines):
if (not lines[sig_end]) or (lines[sig_end][0] != " "):
break
sig_end += 1
for i in reversed(range(sig_start, sig_end)):
del lines[i]
def setup(app):
"""Register to Sphinx."""
app.connect("autodoc-process-docstring", process_docstring)
return {"version": __version__}
############################################################
# MAIN
############################################################
def main(argv=None):
"""Start the command line interface."""
parser = ArgumentParser(prog="pygenstub")
parser.add_argument("--version", action="version", version="%(prog)s " + __version__)
parser.add_argument("files", nargs="*", help="generate stubs for given files")
parser.add_argument(
"-m",
"--module",
action="append",
metavar="MODULE",
dest="modules",
default=[],
help="generate stubs for given modules",
)
parser.add_argument(
"-o", "--output", metavar="PATH", dest="out_dir", help="change the output directory"
)
parser.add_argument(
"--generic", action="store_true", default=False, help="generate generic stubs"
)
parser.add_argument("--debug", action="store_true", help="enable debug messages")
argv = argv if argv is not None else sys.argv
arguments = parser.parse_args(argv[1:])
# set debug mode
if arguments.debug:
logging.basicConfig(level=logging.DEBUG)
_logger.debug("running in debug mode")
out_dir = arguments.out_dir if arguments.out_dir is not None else ""
if (out_dir == "") and (len(arguments.modules) > 0):
print("Output directory must be given when generating stubs for modules.")
sys.exit(1)
modules = []
for path in arguments.files:
paths = Path(path).glob("**/*.py") if Path(path).is_dir() else [Path(path)]
for source in paths:
if str(source).startswith(os.path.pardir):
source = source.absolute().resolve()
if (out_dir != "") and source.is_absolute():
source = source.relative_to(source.root)
destination = Path(out_dir, source.with_suffix(".pyi"))
modules.append((source, destination))
for mod_name in arguments.modules:
modules.extend(get_pkg_paths(mod_name, out_dir))
for source, destination in modules:
_logger.info("generating stub for %s to path %s", source, destination)
with source.open() as f:
code = f.read()
try:
stub = get_stub(code, generic=arguments.generic)
except Exception as e:
print(source, "-", e, file=sys.stderr)
continue
if stub != "":
if not destination.parent.exists():
destination.parent.mkdir(parents=True)
with destination.open("w") as f:
f.write("# " + EDIT_WARNING + "\n\n" + stub)
if __name__ == "__main__":
main()
|
uyar/pygenstub | pygenstub.py | parse_signature | python | def parse_signature(signature):
if " -> " not in signature:
# signature comment: no parameters, treat variable type as return type
param_types, return_type = None, signature.strip()
else:
lhs, return_type = [s.strip() for s in signature.split(" -> ")]
csv = lhs[1:-1].strip() # remove the parentheses around the parameter type list
param_types = split_parameter_types(csv)
requires = set(_RE_QUALIFIED_TYPES.findall(signature))
return param_types, return_type, requires | Parse a signature into its input and return parameter types.
This will also collect the types that are required by any of the input
and return types.
:sig: (str) -> Tuple[List[str], str, Set[str]]
:param signature: Signature to parse.
:return: Input parameter types, return type, and all required types. | train | https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L161-L179 | [
"def split_parameter_types(parameters):\n \"\"\"Split a parameter types declaration into individual types.\n\n The input is the left hand side of a signature (the part before the arrow),\n excluding the parentheses.\n\n :sig: (str) -> List[str]\n :param parameters: Comma separated parameter types.\n ... | # Copyright (C) 2016-2019 H. Turgut Uyar <uyar@tekir.org>
#
# pygenstub is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pygenstub is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pygenstub. If not, see <http://www.gnu.org/licenses/>.
"""pygenstub is a utility for generating stub files from docstrings in source files.
It takes a source file as input and creates a stub file with the same base name
and the ``.pyi`` extension.
For more information, please refer to the documentation:
https://pygenstub.tekir.org/
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import ast
import inspect
import logging
import os
import re
import sys
import textwrap
from argparse import ArgumentParser
from bisect import bisect
from collections import OrderedDict
from importlib import import_module
from io import StringIO
from pkgutil import get_loader, walk_packages
from docutils.core import publish_doctree
__version__ = "1.4.0" # sig: str
PY3 = sys.version_info >= (3, 0)
if not PY3:
import __builtin__ as builtins
from pathlib2 import Path
else:
import builtins
from pathlib import Path
# sigalias: Document = docutils.nodes.document
BUILTIN_TYPES = {k for k, t in builtins.__dict__.items() if isinstance(t, type)}
BUILTIN_TYPES.add("None")
SIG_FIELD = "sig" # sig: str
SIG_COMMENT = "# sig:" # sig: str
SIG_ALIAS = "# sigalias:" # sig: str
DECORATORS = {"property", "staticmethod", "classmethod"} # sig: Set[str]
LINE_LENGTH_LIMIT = 79
INDENT = 4 * " "
EDIT_WARNING = "THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT MANUALLY."
_RE_QUALIFIED_TYPES = re.compile(r"\w+(?:\.\w+)*")
_RE_COMMENT_IN_STRING = re.compile(r"""['"]\s*%(text)s\s*.*['"]""" % {"text": SIG_COMMENT})
_logger = logging.getLogger(__name__)
def get_fields(node, fields_tag="field_list"):
"""Get the field names and their values from a node.
:sig: (Document, str) -> Dict[str, str]
:param node: Node to get the fields from.
:param fields_tag: Tag of child node that contains the fields.
:return: Names and values of fields.
"""
fields_nodes = [c for c in node.children if c.tagname == fields_tag]
if len(fields_nodes) == 0:
return {}
assert len(fields_nodes) == 1, "multiple nodes with tag " + fields_tag
fields_node = fields_nodes[0]
fields = [
{f.tagname: f.rawsource.strip() for f in n.children}
for n in fields_node.children
if n.tagname == "field"
]
return {f["field_name"]: f["field_body"] for f in fields}
def extract_signature(docstring):
"""Extract the signature from a docstring.
:sig: (str) -> Optional[str]
:param docstring: Docstring to extract the signature from.
:return: Extracted signature, or ``None`` if there's no signature.
"""
root = publish_doctree(docstring, settings_overrides={"report_level": 5})
fields = get_fields(root)
return fields.get(SIG_FIELD)
def get_signature(node):
"""Get the signature of a function or a class.
:sig: (Union[ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef]) -> Optional[str]
:param node: Node to get the signature from.
:return: Value of signature field in node docstring, or ``None`` if there's no signature.
"""
docstring = ast.get_docstring(node)
if docstring is None:
return None
return extract_signature(docstring)
def split_parameter_types(parameters):
"""Split a parameter types declaration into individual types.
The input is the left hand side of a signature (the part before the arrow),
excluding the parentheses.
:sig: (str) -> List[str]
:param parameters: Comma separated parameter types.
:return: Parameter types.
"""
if parameters == "":
return []
# only consider the top level commas, ignore the ones in []
commas = []
bracket_depth = 0
for i, char in enumerate(parameters):
if (char == ",") and (bracket_depth == 0):
commas.append(i)
elif char == "[":
bracket_depth += 1
elif char == "]":
bracket_depth -= 1
types = []
last_i = 0
for i in commas:
types.append(parameters[last_i:i].strip())
last_i = i + 1
else:
types.append(parameters[last_i:].strip())
return types
class StubNode:
"""A node in a stub tree."""
def __init__(self):
"""Initialize this stub node.
:sig: () -> None
"""
self.variables = [] # sig: List[VariableNode]
self.variable_names = set() # sig: Set[str]
self.children = [] # sig: List[Union[FunctionNode, ClassNode]]
self.parent = None # sig: Optional[StubNode]
def add_variable(self, node):
"""Add a variable node to this node.
:sig: (VariableNode) -> None
:param node: Variable node to add.
"""
if node.name not in self.variable_names:
self.variables.append(node)
self.variable_names.add(node.name)
node.parent = self
def add_child(self, node):
"""Add a function/method or class node to this node.
:sig: (Union[FunctionNode, ClassNode]) -> None
:param node: Function or class node to add.
"""
self.children.append(node)
node.parent = self
def get_code(self):
"""Get the stub code for this node.
The stub code for a node consists of the type annotations of its variables,
followed by the prototypes of its functions/methods and classes.
:sig: () -> List[str]
:return: Lines of stub code for this node.
"""
stub = []
for child in self.variables:
stub.extend(child.get_code())
if (
(len(self.variables) > 0)
and (len(self.children) > 0)
and (not isinstance(self, ClassNode))
):
stub.append("")
for child in self.children:
stub.extend(child.get_code())
return stub
class VariableNode(StubNode):
"""A node representing an assignment in a stub tree."""
def __init__(self, name, type_):
"""Initialize this variable node.
:sig: (str, str) -> None
:param name: Name of variable that is being assigned to.
:param type_: Type of variable.
"""
if not PY3:
StubNode.__init__(self)
else:
super().__init__()
self.name = name # sig: str
self.type_ = type_ # sig: str
def get_code(self):
"""Get the type annotation for this variable.
:sig: () -> List[str]
:return: Lines of stub code for this variable.
"""
return ["%(n)s = ... # type: %(t)s" % {"n": self.name, "t": self.type_}]
class FunctionNode(StubNode):
"""A node representing a function in a stub tree."""
def __init__(self, name, parameters, rtype, decorators=None):
"""Initialize this function node.
The parameters have to given as a list of triples where each item specifies
the name of the parameter, its type, and whether it has a default value or not.
:sig: (str, Sequence[Tuple[str, str, bool]], str, Optional[Sequence[str]]) -> None
:param name: Name of function.
:param parameters: List of parameter triples (name, type, has_default).
:param rtype: Type of return value.
:param decorators: Decorators of function.
"""
if not PY3:
StubNode.__init__(self)
else:
super().__init__()
self.name = name # sig: str
self.parameters = parameters # sig: Sequence[Tuple[str, str, bool]]
self.rtype = rtype # sig: str
self.decorators = decorators if decorators is not None else [] # sig: Sequence[str]
self._async = False # sig: bool
def get_code(self):
"""Get the stub code for this function.
:sig: () -> List[str]
:return: Lines of stub code for this function.
"""
stub = []
for deco in self.decorators:
if (deco in DECORATORS) or deco.endswith(".setter"):
stub.append("@" + deco)
parameters = []
for name, type_, has_default in self.parameters:
decl = "%(n)s%(t)s%(d)s" % {
"n": name,
"t": ": " + type_ if type_ else "",
"d": " = ..." if has_default else "",
}
parameters.append(decl)
slots = {
"a": "async " if self._async else "",
"n": self.name,
"p": ", ".join(parameters),
"r": self.rtype,
}
prototype = "%(a)sdef %(n)s(%(p)s) -> %(r)s: ..." % slots
if len(prototype) <= LINE_LENGTH_LIMIT:
stub.append(prototype)
elif len(INDENT + slots["p"]) <= LINE_LENGTH_LIMIT:
stub.append("%(a)sdef %(n)s(" % slots)
stub.append(INDENT + slots["p"])
stub.append(") -> %(r)s: ..." % slots)
else:
stub.append("%(a)sdef %(n)s(" % slots)
for param in parameters:
stub.append(INDENT + param + ",")
stub.append(") -> %(r)s: ..." % slots)
return stub
class ClassNode(StubNode):
"""A node representing a class in a stub tree."""
def __init__(self, name, bases, signature=None):
"""Initialize this class node.
:sig: (str, Sequence[str], Optional[str]) -> None
:param name: Name of class.
:param bases: Base classes of class.
:param signature: Signature of class, to be used in __init__ method.
"""
if not PY3:
StubNode.__init__(self)
else:
super().__init__()
self.name = name # sig: str
self.bases = bases # sig: Sequence[str]
self.signature = signature # sig: Optional[str]
def get_code(self):
"""Get the stub code for this class.
:sig: () -> List[str]
:return: Lines of stub code for this class.
"""
stub = []
bases = ("(" + ", ".join(self.bases) + ")") if len(self.bases) > 0 else ""
slots = {"n": self.name, "b": bases}
if (len(self.children) == 0) and (len(self.variables) == 0):
stub.append("class %(n)s%(b)s: ..." % slots)
else:
stub.append("class %(n)s%(b)s:" % slots)
super_code = super().get_code() if PY3 else StubNode.get_code(self)
for line in super_code:
stub.append(INDENT + line)
return stub
def get_aliases(lines):
"""Get the type aliases in the source.
:sig: (Sequence[str]) -> Dict[str, str]
:param lines: Lines of the source code.
:return: Aliases and their their definitions.
"""
aliases = {}
for line in lines:
line = line.strip()
if len(line) > 0 and line.startswith(SIG_ALIAS):
_, content = line.split(SIG_ALIAS)
alias, signature = [t.strip() for t in content.split("=")]
aliases[alias] = signature
return aliases
class StubGenerator(ast.NodeVisitor):
"""A transformer that generates stub declarations from a source code."""
def __init__(self, source, generic=False):
"""Initialize this stub generator.
:sig: (str, bool) -> None
:param source: Source code to generate the stub for.
:param generic: Whether to produce generic stubs.
"""
self.root = StubNode() # sig: StubNode
self.generic = generic # sig: bool
self.imported_namespaces = OrderedDict() # sig: OrderedDict[str, str]
self.imported_names = OrderedDict() # sig: OrderedDict[str, str]
self.defined_types = set() # sig: Set[str]
self.required_types = set() # sig: Set[str]
self.aliases = OrderedDict() # sig: OrderedDict[str, str]
self._parents = [self.root] # sig: List[StubNode]
self._code_lines = source.splitlines() # sig: List[str]
self.collect_aliases()
ast_tree = ast.parse(source)
self.visit(ast_tree)
def collect_aliases(self):
"""Collect the type aliases in the source.
:sig: () -> None
"""
self.aliases = get_aliases(self._code_lines)
for alias, signature in self.aliases.items():
_, _, requires = parse_signature(signature)
self.required_types |= requires
self.defined_types |= {alias}
def visit_Import(self, node):
"""Visit an import node."""
line = self._code_lines[node.lineno - 1]
module_name = line.split("import")[0].strip()
for name in node.names:
imported_name = name.name
if name.asname:
imported_name = name.asname + "::" + imported_name
self.imported_namespaces[imported_name] = module_name
def visit_ImportFrom(self, node):
"""Visit an from-import node."""
line = self._code_lines[node.lineno - 1]
module_name = line.split("from")[1].split("import")[0].strip()
for name in node.names:
imported_name = name.name
if name.asname:
imported_name = name.asname + "::" + imported_name
self.imported_names[imported_name] = module_name
def visit_Assign(self, node):
"""Visit an assignment node."""
line = self._code_lines[node.lineno - 1]
if SIG_COMMENT in line:
line = _RE_COMMENT_IN_STRING.sub("", line)
if (SIG_COMMENT not in line) and (not self.generic):
return
if SIG_COMMENT in line:
_, signature = line.split(SIG_COMMENT)
_, return_type, requires = parse_signature(signature)
self.required_types |= requires
parent = self._parents[-1]
for var in node.targets:
if isinstance(var, ast.Name):
name, p = var.id, parent
elif (
isinstance(var, ast.Attribute)
and isinstance(var.value, ast.Name)
and (var.value.id == "self")
):
name, p = var.attr, parent.parent
else:
name, p = None, None
if name is not None:
if self.generic:
return_type = "Any"
self.required_types.add(return_type)
stub_node = VariableNode(name, return_type)
p.add_variable(stub_node)
def get_function_node(self, node):
"""Process a function node.
:sig: (Union[ast.FunctionDef, ast.AsyncFunctionDef]) -> FunctionNode
:param node: Node to process.
:return: Generated function node in stub tree.
"""
decorators = []
for d in node.decorator_list:
if hasattr(d, "id"):
decorators.append(d.id)
elif hasattr(d, "func"):
decorators.append(d.func.id)
elif hasattr(d, "value"):
decorators.append(d.value.id + "." + d.attr)
signature = get_signature(node)
if signature is None:
parent = self._parents[-1]
if isinstance(parent, ClassNode) and (node.name == "__init__"):
signature = parent.signature
if (signature is None) and (not self.generic):
return None
param_names = [arg.arg if PY3 else arg.id for arg in node.args.args]
n_args = len(param_names)
if signature is None:
param_types, rtype, requires = ["Any"] * n_args, "Any", {"Any"}
else:
_logger.debug("parsing signature for %s", node.name)
param_types, rtype, requires = parse_signature(signature)
# TODO: only in classes
if ((n_args > 0) and (param_names[0] == "self")) or (
(n_args > 0) and (param_names[0] == "cls") and ("classmethod" in decorators)
):
if signature is None:
param_types[0] = ""
else:
param_types.insert(0, "")
_logger.debug("parameter types: %s", param_types)
_logger.debug("return type: %s", rtype)
_logger.debug("required types: %s", requires)
self.required_types |= requires
if node.args.vararg is not None:
param_names.append("*" + (node.args.vararg.arg if PY3 else node.args.vararg))
param_types.append("")
if node.args.kwarg is not None:
param_names.append("**" + (node.args.kwarg.arg if PY3 else node.args.kwarg))
param_types.append("")
kwonly_args = getattr(node.args, "kwonlyargs", [])
if len(kwonly_args) > 0:
param_names.extend([arg.arg for arg in kwonly_args])
if signature is None:
param_types.extend(["Any"] * len(kwonly_args))
if len(param_types) != len(param_names):
raise ValueError("Parameter names and types don't match: " + node.name)
param_locs = [(a.lineno, a.col_offset) for a in (node.args.args + kwonly_args)]
param_defaults = {
bisect(param_locs, (d.lineno, d.col_offset)) - 1 for d in node.args.defaults
}
kwonly_defaults = getattr(node.args, "kw_defaults", [])
for i, d in enumerate(kwonly_defaults):
if d is not None:
param_defaults.add(n_args + i)
params = [
(name, type_, i in param_defaults)
for i, (name, type_) in enumerate(zip(param_names, param_types))
]
if len(kwonly_args) > 0:
params.insert(n_args, ("*", "", False))
stub_node = FunctionNode(
node.name, parameters=params, rtype=rtype, decorators=decorators
)
self._parents[-1].add_child(stub_node)
self._parents.append(stub_node)
self.generic_visit(node)
del self._parents[-1]
return stub_node
def visit_FunctionDef(self, node):
"""Visit a function node."""
node = self.get_function_node(node)
if node is not None:
node._async = False
def visit_AsyncFunctionDef(self, node):
"""Visit an async function node."""
node = self.get_function_node(node)
if node is not None:
node._async = True
def visit_ClassDef(self, node):
"""Visit a class node."""
self.defined_types.add(node.name)
bases = []
for n in node.bases:
base_parts = []
while True:
if not isinstance(n, ast.Attribute):
base_parts.append(n.id)
break
else:
base_parts.append(n.attr)
n = n.value
bases.append(".".join(base_parts[::-1]))
self.required_types |= set(bases)
signature = get_signature(node)
stub_node = ClassNode(node.name, bases=bases, signature=signature)
self._parents[-1].add_child(stub_node)
self._parents.append(stub_node)
self.generic_visit(node)
del self._parents[-1]
@staticmethod
def generate_import_from(module_, names):
"""Generate an import line.
:sig: (str, Set[str]) -> str
:param module_: Name of module to import the names from.
:param names: Names to import.
:return: Import line in stub code.
"""
regular_names = [n for n in names if "::" not in n]
as_names = [n for n in names if "::" in n]
line = ""
if len(regular_names) > 0:
slots = {"m": module_, "n": ", ".join(sorted(regular_names))}
line = "from %(m)s import %(n)s" % slots
if len(line) > LINE_LENGTH_LIMIT:
slots["n"] = INDENT + (",\n" + INDENT).join(sorted(regular_names)) + ","
line = "from %(m)s import (\n%(n)s\n)" % slots
if len(as_names) > 0:
line += "\n"
for as_name in as_names:
a, n = as_name.split("::")
line += "from %(m)s import %(n)s as %(a)s" % {"m": module_, "n": n, "a": a}
return line
def generate_stub(self):
"""Generate the stub code for this source.
:sig: () -> str
:return: Generated stub code.
"""
needed_types = self.required_types - BUILTIN_TYPES
needed_types -= self.defined_types
_logger.debug("defined types: %s", self.defined_types)
module_vars = {v.name for v in self.root.variables}
_logger.debug("module variables: %s", module_vars)
qualified_types = {n for n in needed_types if "." in n}
qualified_namespaces = {".".join(n.split(".")[:-1]) for n in qualified_types}
needed_namespaces = qualified_namespaces - module_vars
needed_types -= qualified_types
_logger.debug("needed namespaces: %s", needed_namespaces)
imported_names = {n.split("::")[0] for n in self.imported_names}
imported_types = imported_names & (needed_types | needed_namespaces)
needed_types -= imported_types
needed_namespaces -= imported_names
_logger.debug("used imported types: %s", imported_types)
try:
typing_mod = __import__("typing")
typing_types = {n for n in needed_types if hasattr(typing_mod, n)}
needed_types -= typing_types
_logger.debug("types from typing module: %s", typing_types)
except ImportError:
typing_types = set()
_logger.warn("typing module not installed")
if len(needed_types) > 0:
raise ValueError("Unknown types: " + ", ".join(needed_types))
out = StringIO()
started = False
if len(typing_types) > 0:
line = self.generate_import_from("typing", typing_types)
out.write(line + "\n")
started = True
if len(imported_types) > 0:
if started:
out.write("\n")
# preserve the import order in the source file
for name in self.imported_names:
if name.split("::")[0] in imported_types:
line = self.generate_import_from(self.imported_names[name], {name})
out.write(line + "\n")
started = True
if len(needed_namespaces) > 0:
if started:
out.write("\n")
as_names = {n.split("::")[0]: n for n in self.imported_namespaces if "::" in n}
for module_ in sorted(needed_namespaces):
if module_ in as_names:
a, n = as_names[module_].split("::")
out.write("import " + n + " as " + a + "\n")
else:
out.write("import " + module_ + "\n")
started = True
if len(self.aliases) > 0:
if started:
out.write("\n")
for alias, signature in self.aliases.items():
out.write("%s = %s\n" % (alias, signature))
started = True
if started:
out.write("\n")
stub_lines = self.root.get_code()
n_lines = len(stub_lines)
for line_no in range(n_lines):
prev_line = stub_lines[line_no - 1] if line_no > 0 else None
line = stub_lines[line_no]
next_line = stub_lines[line_no + 1] if line_no < (n_lines - 1) else None
if (
line.startswith("class ")
and (prev_line is not None)
and (
(not prev_line.startswith("class "))
or (next_line and next_line.startswith(" "))
)
):
out.write("\n")
if (
line.startswith("def ")
and (prev_line is not None)
and (prev_line.startswith((" ", "class ")))
):
out.write("\n")
out.write(line + "\n")
line_no += 1
return out.getvalue()
def get_stub(source, generic=False):
"""Get the stub code for a source code.
:sig: (str, bool) -> str
:param source: Source code to generate the stub for.
:param generic: Whether to produce generic stubs.
:return: Generated stub code.
"""
generator = StubGenerator(source, generic=generic)
stub = generator.generate_stub()
return stub
def get_mod_paths(mod_name, out_dir):
"""Get source and stub paths for a module."""
paths = []
try:
mod = get_loader(mod_name)
source = Path(mod.path)
if source.name.endswith(".py"):
source_rel = Path(*mod_name.split("."))
if source.name == "__init__.py":
source_rel = source_rel.joinpath("__init__.py")
destination = Path(out_dir, source_rel.with_suffix(".pyi"))
paths.append((source, destination))
except Exception as e:
_logger.debug(e)
_logger.warning("cannot handle module, skipping: %s", mod_name)
return paths
def get_pkg_paths(pkg_name, out_dir):
"""Recursively get all source and stub paths for a package."""
paths = []
try:
pkg = import_module(pkg_name)
if not hasattr(pkg, "__path__"):
return get_mod_paths(pkg_name, out_dir)
for mod_info in walk_packages(pkg.__path__, pkg.__name__ + "."):
mod_paths = get_mod_paths(mod_info.name, out_dir)
paths.extend(mod_paths)
except Exception as e:
_logger.debug(e)
_logger.warning("cannot handle package, skipping: %s", pkg_name)
return paths
############################################################
# SPHINX
############################################################
def process_docstring(app, what, name, obj, options, lines):
"""Modify the docstring before generating documentation.
This will insert type declarations for parameters and return type
into the docstring, and remove the signature field so that it will
be excluded from the generated document.
"""
aliases = getattr(app, "_sigaliases", None)
if aliases is None:
if what == "module":
aliases = get_aliases(inspect.getsource(obj).splitlines())
app._sigaliases = aliases
sig_marker = ":" + SIG_FIELD + ":"
is_class = what in ("class", "exception")
signature = extract_signature("\n".join(lines))
if signature is None:
if not is_class:
return
init_method = getattr(obj, "__init__")
init_doc = init_method.__doc__
init_lines = init_doc.splitlines()[1:]
if len(init_lines) > 1:
init_doc = textwrap.dedent("\n".join(init_lines[1:]))
init_lines = init_doc.splitlines()
if sig_marker not in init_doc:
return
sig_started = False
for line in init_lines:
if line.lstrip().startswith(sig_marker):
sig_started = True
if sig_started:
lines.append(line)
signature = extract_signature("\n".join(lines))
if is_class:
obj = init_method
param_types, rtype, _ = parse_signature(signature)
param_names = [p for p in inspect.signature(obj).parameters]
if is_class and (param_names[0] == "self"):
del param_names[0]
# if something goes wrong, don't insert parameter types
if len(param_names) == len(param_types):
for name, type_ in zip(param_names, param_types):
find = ":param %(name)s:" % {"name": name}
alias = aliases.get(type_)
if alias is not None:
type_ = "*%(type)s* :sup:`%(alias)s`" % {"type": type_, "alias": alias}
for i, line in enumerate(lines):
if line.startswith(find):
lines.insert(i, ":type %(name)s: %(type)s" % {"name": name, "type": type_})
break
if not is_class:
for i, line in enumerate(lines):
if line.startswith((":return:", ":returns:")):
lines.insert(i, ":rtype: " + rtype)
break
# remove the signature field
sig_start = 0
while sig_start < len(lines):
if lines[sig_start].startswith(sig_marker):
break
sig_start += 1
sig_end = sig_start + 1
while sig_end < len(lines):
if (not lines[sig_end]) or (lines[sig_end][0] != " "):
break
sig_end += 1
for i in reversed(range(sig_start, sig_end)):
del lines[i]
def setup(app):
"""Register to Sphinx."""
app.connect("autodoc-process-docstring", process_docstring)
return {"version": __version__}
############################################################
# MAIN
############################################################
def main(argv=None):
"""Start the command line interface."""
parser = ArgumentParser(prog="pygenstub")
parser.add_argument("--version", action="version", version="%(prog)s " + __version__)
parser.add_argument("files", nargs="*", help="generate stubs for given files")
parser.add_argument(
"-m",
"--module",
action="append",
metavar="MODULE",
dest="modules",
default=[],
help="generate stubs for given modules",
)
parser.add_argument(
"-o", "--output", metavar="PATH", dest="out_dir", help="change the output directory"
)
parser.add_argument(
"--generic", action="store_true", default=False, help="generate generic stubs"
)
parser.add_argument("--debug", action="store_true", help="enable debug messages")
argv = argv if argv is not None else sys.argv
arguments = parser.parse_args(argv[1:])
# set debug mode
if arguments.debug:
logging.basicConfig(level=logging.DEBUG)
_logger.debug("running in debug mode")
out_dir = arguments.out_dir if arguments.out_dir is not None else ""
if (out_dir == "") and (len(arguments.modules) > 0):
print("Output directory must be given when generating stubs for modules.")
sys.exit(1)
modules = []
for path in arguments.files:
paths = Path(path).glob("**/*.py") if Path(path).is_dir() else [Path(path)]
for source in paths:
if str(source).startswith(os.path.pardir):
source = source.absolute().resolve()
if (out_dir != "") and source.is_absolute():
source = source.relative_to(source.root)
destination = Path(out_dir, source.with_suffix(".pyi"))
modules.append((source, destination))
for mod_name in arguments.modules:
modules.extend(get_pkg_paths(mod_name, out_dir))
for source, destination in modules:
_logger.info("generating stub for %s to path %s", source, destination)
with source.open() as f:
code = f.read()
try:
stub = get_stub(code, generic=arguments.generic)
except Exception as e:
print(source, "-", e, file=sys.stderr)
continue
if stub != "":
if not destination.parent.exists():
destination.parent.mkdir(parents=True)
with destination.open("w") as f:
f.write("# " + EDIT_WARNING + "\n\n" + stub)
if __name__ == "__main__":
main()
|
uyar/pygenstub | pygenstub.py | get_aliases | python | def get_aliases(lines):
aliases = {}
for line in lines:
line = line.strip()
if len(line) > 0 and line.startswith(SIG_ALIAS):
_, content = line.split(SIG_ALIAS)
alias, signature = [t.strip() for t in content.split("=")]
aliases[alias] = signature
return aliases | Get the type aliases in the source.
:sig: (Sequence[str]) -> Dict[str, str]
:param lines: Lines of the source code.
:return: Aliases and their their definitions. | train | https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L372-L386 | null | # Copyright (C) 2016-2019 H. Turgut Uyar <uyar@tekir.org>
#
# pygenstub is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pygenstub is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pygenstub. If not, see <http://www.gnu.org/licenses/>.
"""pygenstub is a utility for generating stub files from docstrings in source files.
It takes a source file as input and creates a stub file with the same base name
and the ``.pyi`` extension.
For more information, please refer to the documentation:
https://pygenstub.tekir.org/
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import ast
import inspect
import logging
import os
import re
import sys
import textwrap
from argparse import ArgumentParser
from bisect import bisect
from collections import OrderedDict
from importlib import import_module
from io import StringIO
from pkgutil import get_loader, walk_packages
from docutils.core import publish_doctree
__version__ = "1.4.0" # sig: str
PY3 = sys.version_info >= (3, 0)
if not PY3:
import __builtin__ as builtins
from pathlib2 import Path
else:
import builtins
from pathlib import Path
# sigalias: Document = docutils.nodes.document
BUILTIN_TYPES = {k for k, t in builtins.__dict__.items() if isinstance(t, type)}
BUILTIN_TYPES.add("None")
SIG_FIELD = "sig" # sig: str
SIG_COMMENT = "# sig:" # sig: str
SIG_ALIAS = "# sigalias:" # sig: str
DECORATORS = {"property", "staticmethod", "classmethod"} # sig: Set[str]
LINE_LENGTH_LIMIT = 79
INDENT = 4 * " "
EDIT_WARNING = "THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT MANUALLY."
_RE_QUALIFIED_TYPES = re.compile(r"\w+(?:\.\w+)*")
_RE_COMMENT_IN_STRING = re.compile(r"""['"]\s*%(text)s\s*.*['"]""" % {"text": SIG_COMMENT})
_logger = logging.getLogger(__name__)
def get_fields(node, fields_tag="field_list"):
"""Get the field names and their values from a node.
:sig: (Document, str) -> Dict[str, str]
:param node: Node to get the fields from.
:param fields_tag: Tag of child node that contains the fields.
:return: Names and values of fields.
"""
fields_nodes = [c for c in node.children if c.tagname == fields_tag]
if len(fields_nodes) == 0:
return {}
assert len(fields_nodes) == 1, "multiple nodes with tag " + fields_tag
fields_node = fields_nodes[0]
fields = [
{f.tagname: f.rawsource.strip() for f in n.children}
for n in fields_node.children
if n.tagname == "field"
]
return {f["field_name"]: f["field_body"] for f in fields}
def extract_signature(docstring):
"""Extract the signature from a docstring.
:sig: (str) -> Optional[str]
:param docstring: Docstring to extract the signature from.
:return: Extracted signature, or ``None`` if there's no signature.
"""
root = publish_doctree(docstring, settings_overrides={"report_level": 5})
fields = get_fields(root)
return fields.get(SIG_FIELD)
def get_signature(node):
"""Get the signature of a function or a class.
:sig: (Union[ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef]) -> Optional[str]
:param node: Node to get the signature from.
:return: Value of signature field in node docstring, or ``None`` if there's no signature.
"""
docstring = ast.get_docstring(node)
if docstring is None:
return None
return extract_signature(docstring)
def split_parameter_types(parameters):
"""Split a parameter types declaration into individual types.
The input is the left hand side of a signature (the part before the arrow),
excluding the parentheses.
:sig: (str) -> List[str]
:param parameters: Comma separated parameter types.
:return: Parameter types.
"""
if parameters == "":
return []
# only consider the top level commas, ignore the ones in []
commas = []
bracket_depth = 0
for i, char in enumerate(parameters):
if (char == ",") and (bracket_depth == 0):
commas.append(i)
elif char == "[":
bracket_depth += 1
elif char == "]":
bracket_depth -= 1
types = []
last_i = 0
for i in commas:
types.append(parameters[last_i:i].strip())
last_i = i + 1
else:
types.append(parameters[last_i:].strip())
return types
def parse_signature(signature):
"""Parse a signature into its input and return parameter types.
This will also collect the types that are required by any of the input
and return types.
:sig: (str) -> Tuple[List[str], str, Set[str]]
:param signature: Signature to parse.
:return: Input parameter types, return type, and all required types.
"""
if " -> " not in signature:
# signature comment: no parameters, treat variable type as return type
param_types, return_type = None, signature.strip()
else:
lhs, return_type = [s.strip() for s in signature.split(" -> ")]
csv = lhs[1:-1].strip() # remove the parentheses around the parameter type list
param_types = split_parameter_types(csv)
requires = set(_RE_QUALIFIED_TYPES.findall(signature))
return param_types, return_type, requires
class StubNode:
"""A node in a stub tree."""
def __init__(self):
"""Initialize this stub node.
:sig: () -> None
"""
self.variables = [] # sig: List[VariableNode]
self.variable_names = set() # sig: Set[str]
self.children = [] # sig: List[Union[FunctionNode, ClassNode]]
self.parent = None # sig: Optional[StubNode]
def add_variable(self, node):
"""Add a variable node to this node.
:sig: (VariableNode) -> None
:param node: Variable node to add.
"""
if node.name not in self.variable_names:
self.variables.append(node)
self.variable_names.add(node.name)
node.parent = self
def add_child(self, node):
"""Add a function/method or class node to this node.
:sig: (Union[FunctionNode, ClassNode]) -> None
:param node: Function or class node to add.
"""
self.children.append(node)
node.parent = self
def get_code(self):
"""Get the stub code for this node.
The stub code for a node consists of the type annotations of its variables,
followed by the prototypes of its functions/methods and classes.
:sig: () -> List[str]
:return: Lines of stub code for this node.
"""
stub = []
for child in self.variables:
stub.extend(child.get_code())
if (
(len(self.variables) > 0)
and (len(self.children) > 0)
and (not isinstance(self, ClassNode))
):
stub.append("")
for child in self.children:
stub.extend(child.get_code())
return stub
class VariableNode(StubNode):
"""A node representing an assignment in a stub tree."""
def __init__(self, name, type_):
"""Initialize this variable node.
:sig: (str, str) -> None
:param name: Name of variable that is being assigned to.
:param type_: Type of variable.
"""
if not PY3:
StubNode.__init__(self)
else:
super().__init__()
self.name = name # sig: str
self.type_ = type_ # sig: str
def get_code(self):
"""Get the type annotation for this variable.
:sig: () -> List[str]
:return: Lines of stub code for this variable.
"""
return ["%(n)s = ... # type: %(t)s" % {"n": self.name, "t": self.type_}]
class FunctionNode(StubNode):
"""A node representing a function in a stub tree."""
def __init__(self, name, parameters, rtype, decorators=None):
"""Initialize this function node.
The parameters have to given as a list of triples where each item specifies
the name of the parameter, its type, and whether it has a default value or not.
:sig: (str, Sequence[Tuple[str, str, bool]], str, Optional[Sequence[str]]) -> None
:param name: Name of function.
:param parameters: List of parameter triples (name, type, has_default).
:param rtype: Type of return value.
:param decorators: Decorators of function.
"""
if not PY3:
StubNode.__init__(self)
else:
super().__init__()
self.name = name # sig: str
self.parameters = parameters # sig: Sequence[Tuple[str, str, bool]]
self.rtype = rtype # sig: str
self.decorators = decorators if decorators is not None else [] # sig: Sequence[str]
self._async = False # sig: bool
def get_code(self):
"""Get the stub code for this function.
:sig: () -> List[str]
:return: Lines of stub code for this function.
"""
stub = []
for deco in self.decorators:
if (deco in DECORATORS) or deco.endswith(".setter"):
stub.append("@" + deco)
parameters = []
for name, type_, has_default in self.parameters:
decl = "%(n)s%(t)s%(d)s" % {
"n": name,
"t": ": " + type_ if type_ else "",
"d": " = ..." if has_default else "",
}
parameters.append(decl)
slots = {
"a": "async " if self._async else "",
"n": self.name,
"p": ", ".join(parameters),
"r": self.rtype,
}
prototype = "%(a)sdef %(n)s(%(p)s) -> %(r)s: ..." % slots
if len(prototype) <= LINE_LENGTH_LIMIT:
stub.append(prototype)
elif len(INDENT + slots["p"]) <= LINE_LENGTH_LIMIT:
stub.append("%(a)sdef %(n)s(" % slots)
stub.append(INDENT + slots["p"])
stub.append(") -> %(r)s: ..." % slots)
else:
stub.append("%(a)sdef %(n)s(" % slots)
for param in parameters:
stub.append(INDENT + param + ",")
stub.append(") -> %(r)s: ..." % slots)
return stub
class ClassNode(StubNode):
"""A node representing a class in a stub tree."""
def __init__(self, name, bases, signature=None):
"""Initialize this class node.
:sig: (str, Sequence[str], Optional[str]) -> None
:param name: Name of class.
:param bases: Base classes of class.
:param signature: Signature of class, to be used in __init__ method.
"""
if not PY3:
StubNode.__init__(self)
else:
super().__init__()
self.name = name # sig: str
self.bases = bases # sig: Sequence[str]
self.signature = signature # sig: Optional[str]
def get_code(self):
"""Get the stub code for this class.
:sig: () -> List[str]
:return: Lines of stub code for this class.
"""
stub = []
bases = ("(" + ", ".join(self.bases) + ")") if len(self.bases) > 0 else ""
slots = {"n": self.name, "b": bases}
if (len(self.children) == 0) and (len(self.variables) == 0):
stub.append("class %(n)s%(b)s: ..." % slots)
else:
stub.append("class %(n)s%(b)s:" % slots)
super_code = super().get_code() if PY3 else StubNode.get_code(self)
for line in super_code:
stub.append(INDENT + line)
return stub
class StubGenerator(ast.NodeVisitor):
"""A transformer that generates stub declarations from a source code."""
def __init__(self, source, generic=False):
"""Initialize this stub generator.
:sig: (str, bool) -> None
:param source: Source code to generate the stub for.
:param generic: Whether to produce generic stubs.
"""
self.root = StubNode() # sig: StubNode
self.generic = generic # sig: bool
self.imported_namespaces = OrderedDict() # sig: OrderedDict[str, str]
self.imported_names = OrderedDict() # sig: OrderedDict[str, str]
self.defined_types = set() # sig: Set[str]
self.required_types = set() # sig: Set[str]
self.aliases = OrderedDict() # sig: OrderedDict[str, str]
self._parents = [self.root] # sig: List[StubNode]
self._code_lines = source.splitlines() # sig: List[str]
self.collect_aliases()
ast_tree = ast.parse(source)
self.visit(ast_tree)
def collect_aliases(self):
"""Collect the type aliases in the source.
:sig: () -> None
"""
self.aliases = get_aliases(self._code_lines)
for alias, signature in self.aliases.items():
_, _, requires = parse_signature(signature)
self.required_types |= requires
self.defined_types |= {alias}
def visit_Import(self, node):
"""Visit an import node."""
line = self._code_lines[node.lineno - 1]
module_name = line.split("import")[0].strip()
for name in node.names:
imported_name = name.name
if name.asname:
imported_name = name.asname + "::" + imported_name
self.imported_namespaces[imported_name] = module_name
def visit_ImportFrom(self, node):
"""Visit an from-import node."""
line = self._code_lines[node.lineno - 1]
module_name = line.split("from")[1].split("import")[0].strip()
for name in node.names:
imported_name = name.name
if name.asname:
imported_name = name.asname + "::" + imported_name
self.imported_names[imported_name] = module_name
def visit_Assign(self, node):
"""Visit an assignment node."""
line = self._code_lines[node.lineno - 1]
if SIG_COMMENT in line:
line = _RE_COMMENT_IN_STRING.sub("", line)
if (SIG_COMMENT not in line) and (not self.generic):
return
if SIG_COMMENT in line:
_, signature = line.split(SIG_COMMENT)
_, return_type, requires = parse_signature(signature)
self.required_types |= requires
parent = self._parents[-1]
for var in node.targets:
if isinstance(var, ast.Name):
name, p = var.id, parent
elif (
isinstance(var, ast.Attribute)
and isinstance(var.value, ast.Name)
and (var.value.id == "self")
):
name, p = var.attr, parent.parent
else:
name, p = None, None
if name is not None:
if self.generic:
return_type = "Any"
self.required_types.add(return_type)
stub_node = VariableNode(name, return_type)
p.add_variable(stub_node)
def get_function_node(self, node):
"""Process a function node.
:sig: (Union[ast.FunctionDef, ast.AsyncFunctionDef]) -> FunctionNode
:param node: Node to process.
:return: Generated function node in stub tree.
"""
decorators = []
for d in node.decorator_list:
if hasattr(d, "id"):
decorators.append(d.id)
elif hasattr(d, "func"):
decorators.append(d.func.id)
elif hasattr(d, "value"):
decorators.append(d.value.id + "." + d.attr)
signature = get_signature(node)
if signature is None:
parent = self._parents[-1]
if isinstance(parent, ClassNode) and (node.name == "__init__"):
signature = parent.signature
if (signature is None) and (not self.generic):
return None
param_names = [arg.arg if PY3 else arg.id for arg in node.args.args]
n_args = len(param_names)
if signature is None:
param_types, rtype, requires = ["Any"] * n_args, "Any", {"Any"}
else:
_logger.debug("parsing signature for %s", node.name)
param_types, rtype, requires = parse_signature(signature)
# TODO: only in classes
if ((n_args > 0) and (param_names[0] == "self")) or (
(n_args > 0) and (param_names[0] == "cls") and ("classmethod" in decorators)
):
if signature is None:
param_types[0] = ""
else:
param_types.insert(0, "")
_logger.debug("parameter types: %s", param_types)
_logger.debug("return type: %s", rtype)
_logger.debug("required types: %s", requires)
self.required_types |= requires
if node.args.vararg is not None:
param_names.append("*" + (node.args.vararg.arg if PY3 else node.args.vararg))
param_types.append("")
if node.args.kwarg is not None:
param_names.append("**" + (node.args.kwarg.arg if PY3 else node.args.kwarg))
param_types.append("")
kwonly_args = getattr(node.args, "kwonlyargs", [])
if len(kwonly_args) > 0:
param_names.extend([arg.arg for arg in kwonly_args])
if signature is None:
param_types.extend(["Any"] * len(kwonly_args))
if len(param_types) != len(param_names):
raise ValueError("Parameter names and types don't match: " + node.name)
param_locs = [(a.lineno, a.col_offset) for a in (node.args.args + kwonly_args)]
param_defaults = {
bisect(param_locs, (d.lineno, d.col_offset)) - 1 for d in node.args.defaults
}
kwonly_defaults = getattr(node.args, "kw_defaults", [])
for i, d in enumerate(kwonly_defaults):
if d is not None:
param_defaults.add(n_args + i)
params = [
(name, type_, i in param_defaults)
for i, (name, type_) in enumerate(zip(param_names, param_types))
]
if len(kwonly_args) > 0:
params.insert(n_args, ("*", "", False))
stub_node = FunctionNode(
node.name, parameters=params, rtype=rtype, decorators=decorators
)
self._parents[-1].add_child(stub_node)
self._parents.append(stub_node)
self.generic_visit(node)
del self._parents[-1]
return stub_node
def visit_FunctionDef(self, node):
"""Visit a function node."""
node = self.get_function_node(node)
if node is not None:
node._async = False
def visit_AsyncFunctionDef(self, node):
"""Visit an async function node."""
node = self.get_function_node(node)
if node is not None:
node._async = True
def visit_ClassDef(self, node):
"""Visit a class node."""
self.defined_types.add(node.name)
bases = []
for n in node.bases:
base_parts = []
while True:
if not isinstance(n, ast.Attribute):
base_parts.append(n.id)
break
else:
base_parts.append(n.attr)
n = n.value
bases.append(".".join(base_parts[::-1]))
self.required_types |= set(bases)
signature = get_signature(node)
stub_node = ClassNode(node.name, bases=bases, signature=signature)
self._parents[-1].add_child(stub_node)
self._parents.append(stub_node)
self.generic_visit(node)
del self._parents[-1]
@staticmethod
def generate_import_from(module_, names):
"""Generate an import line.
:sig: (str, Set[str]) -> str
:param module_: Name of module to import the names from.
:param names: Names to import.
:return: Import line in stub code.
"""
regular_names = [n for n in names if "::" not in n]
as_names = [n for n in names if "::" in n]
line = ""
if len(regular_names) > 0:
slots = {"m": module_, "n": ", ".join(sorted(regular_names))}
line = "from %(m)s import %(n)s" % slots
if len(line) > LINE_LENGTH_LIMIT:
slots["n"] = INDENT + (",\n" + INDENT).join(sorted(regular_names)) + ","
line = "from %(m)s import (\n%(n)s\n)" % slots
if len(as_names) > 0:
line += "\n"
for as_name in as_names:
a, n = as_name.split("::")
line += "from %(m)s import %(n)s as %(a)s" % {"m": module_, "n": n, "a": a}
return line
def generate_stub(self):
"""Generate the stub code for this source.
:sig: () -> str
:return: Generated stub code.
"""
needed_types = self.required_types - BUILTIN_TYPES
needed_types -= self.defined_types
_logger.debug("defined types: %s", self.defined_types)
module_vars = {v.name for v in self.root.variables}
_logger.debug("module variables: %s", module_vars)
qualified_types = {n for n in needed_types if "." in n}
qualified_namespaces = {".".join(n.split(".")[:-1]) for n in qualified_types}
needed_namespaces = qualified_namespaces - module_vars
needed_types -= qualified_types
_logger.debug("needed namespaces: %s", needed_namespaces)
imported_names = {n.split("::")[0] for n in self.imported_names}
imported_types = imported_names & (needed_types | needed_namespaces)
needed_types -= imported_types
needed_namespaces -= imported_names
_logger.debug("used imported types: %s", imported_types)
try:
typing_mod = __import__("typing")
typing_types = {n for n in needed_types if hasattr(typing_mod, n)}
needed_types -= typing_types
_logger.debug("types from typing module: %s", typing_types)
except ImportError:
typing_types = set()
_logger.warn("typing module not installed")
if len(needed_types) > 0:
raise ValueError("Unknown types: " + ", ".join(needed_types))
out = StringIO()
started = False
if len(typing_types) > 0:
line = self.generate_import_from("typing", typing_types)
out.write(line + "\n")
started = True
if len(imported_types) > 0:
if started:
out.write("\n")
# preserve the import order in the source file
for name in self.imported_names:
if name.split("::")[0] in imported_types:
line = self.generate_import_from(self.imported_names[name], {name})
out.write(line + "\n")
started = True
if len(needed_namespaces) > 0:
if started:
out.write("\n")
as_names = {n.split("::")[0]: n for n in self.imported_namespaces if "::" in n}
for module_ in sorted(needed_namespaces):
if module_ in as_names:
a, n = as_names[module_].split("::")
out.write("import " + n + " as " + a + "\n")
else:
out.write("import " + module_ + "\n")
started = True
if len(self.aliases) > 0:
if started:
out.write("\n")
for alias, signature in self.aliases.items():
out.write("%s = %s\n" % (alias, signature))
started = True
if started:
out.write("\n")
stub_lines = self.root.get_code()
n_lines = len(stub_lines)
for line_no in range(n_lines):
prev_line = stub_lines[line_no - 1] if line_no > 0 else None
line = stub_lines[line_no]
next_line = stub_lines[line_no + 1] if line_no < (n_lines - 1) else None
if (
line.startswith("class ")
and (prev_line is not None)
and (
(not prev_line.startswith("class "))
or (next_line and next_line.startswith(" "))
)
):
out.write("\n")
if (
line.startswith("def ")
and (prev_line is not None)
and (prev_line.startswith((" ", "class ")))
):
out.write("\n")
out.write(line + "\n")
line_no += 1
return out.getvalue()
def get_stub(source, generic=False):
"""Get the stub code for a source code.
:sig: (str, bool) -> str
:param source: Source code to generate the stub for.
:param generic: Whether to produce generic stubs.
:return: Generated stub code.
"""
generator = StubGenerator(source, generic=generic)
stub = generator.generate_stub()
return stub
def get_mod_paths(mod_name, out_dir):
"""Get source and stub paths for a module."""
paths = []
try:
mod = get_loader(mod_name)
source = Path(mod.path)
if source.name.endswith(".py"):
source_rel = Path(*mod_name.split("."))
if source.name == "__init__.py":
source_rel = source_rel.joinpath("__init__.py")
destination = Path(out_dir, source_rel.with_suffix(".pyi"))
paths.append((source, destination))
except Exception as e:
_logger.debug(e)
_logger.warning("cannot handle module, skipping: %s", mod_name)
return paths
def get_pkg_paths(pkg_name, out_dir):
"""Recursively get all source and stub paths for a package."""
paths = []
try:
pkg = import_module(pkg_name)
if not hasattr(pkg, "__path__"):
return get_mod_paths(pkg_name, out_dir)
for mod_info in walk_packages(pkg.__path__, pkg.__name__ + "."):
mod_paths = get_mod_paths(mod_info.name, out_dir)
paths.extend(mod_paths)
except Exception as e:
_logger.debug(e)
_logger.warning("cannot handle package, skipping: %s", pkg_name)
return paths
############################################################
# SPHINX
############################################################
def process_docstring(app, what, name, obj, options, lines):
"""Modify the docstring before generating documentation.
This will insert type declarations for parameters and return type
into the docstring, and remove the signature field so that it will
be excluded from the generated document.
"""
aliases = getattr(app, "_sigaliases", None)
if aliases is None:
if what == "module":
aliases = get_aliases(inspect.getsource(obj).splitlines())
app._sigaliases = aliases
sig_marker = ":" + SIG_FIELD + ":"
is_class = what in ("class", "exception")
signature = extract_signature("\n".join(lines))
if signature is None:
if not is_class:
return
init_method = getattr(obj, "__init__")
init_doc = init_method.__doc__
init_lines = init_doc.splitlines()[1:]
if len(init_lines) > 1:
init_doc = textwrap.dedent("\n".join(init_lines[1:]))
init_lines = init_doc.splitlines()
if sig_marker not in init_doc:
return
sig_started = False
for line in init_lines:
if line.lstrip().startswith(sig_marker):
sig_started = True
if sig_started:
lines.append(line)
signature = extract_signature("\n".join(lines))
if is_class:
obj = init_method
param_types, rtype, _ = parse_signature(signature)
param_names = [p for p in inspect.signature(obj).parameters]
if is_class and (param_names[0] == "self"):
del param_names[0]
# if something goes wrong, don't insert parameter types
if len(param_names) == len(param_types):
for name, type_ in zip(param_names, param_types):
find = ":param %(name)s:" % {"name": name}
alias = aliases.get(type_)
if alias is not None:
type_ = "*%(type)s* :sup:`%(alias)s`" % {"type": type_, "alias": alias}
for i, line in enumerate(lines):
if line.startswith(find):
lines.insert(i, ":type %(name)s: %(type)s" % {"name": name, "type": type_})
break
if not is_class:
for i, line in enumerate(lines):
if line.startswith((":return:", ":returns:")):
lines.insert(i, ":rtype: " + rtype)
break
# remove the signature field
sig_start = 0
while sig_start < len(lines):
if lines[sig_start].startswith(sig_marker):
break
sig_start += 1
sig_end = sig_start + 1
while sig_end < len(lines):
if (not lines[sig_end]) or (lines[sig_end][0] != " "):
break
sig_end += 1
for i in reversed(range(sig_start, sig_end)):
del lines[i]
def setup(app):
"""Register to Sphinx."""
app.connect("autodoc-process-docstring", process_docstring)
return {"version": __version__}
############################################################
# MAIN
############################################################
def main(argv=None):
"""Start the command line interface."""
parser = ArgumentParser(prog="pygenstub")
parser.add_argument("--version", action="version", version="%(prog)s " + __version__)
parser.add_argument("files", nargs="*", help="generate stubs for given files")
parser.add_argument(
"-m",
"--module",
action="append",
metavar="MODULE",
dest="modules",
default=[],
help="generate stubs for given modules",
)
parser.add_argument(
"-o", "--output", metavar="PATH", dest="out_dir", help="change the output directory"
)
parser.add_argument(
"--generic", action="store_true", default=False, help="generate generic stubs"
)
parser.add_argument("--debug", action="store_true", help="enable debug messages")
argv = argv if argv is not None else sys.argv
arguments = parser.parse_args(argv[1:])
# set debug mode
if arguments.debug:
logging.basicConfig(level=logging.DEBUG)
_logger.debug("running in debug mode")
out_dir = arguments.out_dir if arguments.out_dir is not None else ""
if (out_dir == "") and (len(arguments.modules) > 0):
print("Output directory must be given when generating stubs for modules.")
sys.exit(1)
modules = []
for path in arguments.files:
paths = Path(path).glob("**/*.py") if Path(path).is_dir() else [Path(path)]
for source in paths:
if str(source).startswith(os.path.pardir):
source = source.absolute().resolve()
if (out_dir != "") and source.is_absolute():
source = source.relative_to(source.root)
destination = Path(out_dir, source.with_suffix(".pyi"))
modules.append((source, destination))
for mod_name in arguments.modules:
modules.extend(get_pkg_paths(mod_name, out_dir))
for source, destination in modules:
_logger.info("generating stub for %s to path %s", source, destination)
with source.open() as f:
code = f.read()
try:
stub = get_stub(code, generic=arguments.generic)
except Exception as e:
print(source, "-", e, file=sys.stderr)
continue
if stub != "":
if not destination.parent.exists():
destination.parent.mkdir(parents=True)
with destination.open("w") as f:
f.write("# " + EDIT_WARNING + "\n\n" + stub)
if __name__ == "__main__":
main()
|
uyar/pygenstub | pygenstub.py | get_stub | python | def get_stub(source, generic=False):
generator = StubGenerator(source, generic=generic)
stub = generator.generate_stub()
return stub | Get the stub code for a source code.
:sig: (str, bool) -> str
:param source: Source code to generate the stub for.
:param generic: Whether to produce generic stubs.
:return: Generated stub code. | train | https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L745-L755 | [
"def generate_stub(self):\n \"\"\"Generate the stub code for this source.\n\n :sig: () -> str\n :return: Generated stub code.\n \"\"\"\n needed_types = self.required_types - BUILTIN_TYPES\n\n needed_types -= self.defined_types\n _logger.debug(\"defined types: %s\", self.defined_types)\n\n mo... | # Copyright (C) 2016-2019 H. Turgut Uyar <uyar@tekir.org>
#
# pygenstub is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pygenstub is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pygenstub. If not, see <http://www.gnu.org/licenses/>.
"""pygenstub is a utility for generating stub files from docstrings in source files.
It takes a source file as input and creates a stub file with the same base name
and the ``.pyi`` extension.
For more information, please refer to the documentation:
https://pygenstub.tekir.org/
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import ast
import inspect
import logging
import os
import re
import sys
import textwrap
from argparse import ArgumentParser
from bisect import bisect
from collections import OrderedDict
from importlib import import_module
from io import StringIO
from pkgutil import get_loader, walk_packages
from docutils.core import publish_doctree
__version__ = "1.4.0" # sig: str
PY3 = sys.version_info >= (3, 0)
if not PY3:
import __builtin__ as builtins
from pathlib2 import Path
else:
import builtins
from pathlib import Path
# sigalias: Document = docutils.nodes.document
BUILTIN_TYPES = {k for k, t in builtins.__dict__.items() if isinstance(t, type)}
BUILTIN_TYPES.add("None")
SIG_FIELD = "sig" # sig: str
SIG_COMMENT = "# sig:" # sig: str
SIG_ALIAS = "# sigalias:" # sig: str
DECORATORS = {"property", "staticmethod", "classmethod"} # sig: Set[str]
LINE_LENGTH_LIMIT = 79
INDENT = 4 * " "
EDIT_WARNING = "THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT MANUALLY."
_RE_QUALIFIED_TYPES = re.compile(r"\w+(?:\.\w+)*")
_RE_COMMENT_IN_STRING = re.compile(r"""['"]\s*%(text)s\s*.*['"]""" % {"text": SIG_COMMENT})
_logger = logging.getLogger(__name__)
def get_fields(node, fields_tag="field_list"):
"""Get the field names and their values from a node.
:sig: (Document, str) -> Dict[str, str]
:param node: Node to get the fields from.
:param fields_tag: Tag of child node that contains the fields.
:return: Names and values of fields.
"""
fields_nodes = [c for c in node.children if c.tagname == fields_tag]
if len(fields_nodes) == 0:
return {}
assert len(fields_nodes) == 1, "multiple nodes with tag " + fields_tag
fields_node = fields_nodes[0]
fields = [
{f.tagname: f.rawsource.strip() for f in n.children}
for n in fields_node.children
if n.tagname == "field"
]
return {f["field_name"]: f["field_body"] for f in fields}
def extract_signature(docstring):
"""Extract the signature from a docstring.
:sig: (str) -> Optional[str]
:param docstring: Docstring to extract the signature from.
:return: Extracted signature, or ``None`` if there's no signature.
"""
root = publish_doctree(docstring, settings_overrides={"report_level": 5})
fields = get_fields(root)
return fields.get(SIG_FIELD)
def get_signature(node):
"""Get the signature of a function or a class.
:sig: (Union[ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef]) -> Optional[str]
:param node: Node to get the signature from.
:return: Value of signature field in node docstring, or ``None`` if there's no signature.
"""
docstring = ast.get_docstring(node)
if docstring is None:
return None
return extract_signature(docstring)
def split_parameter_types(parameters):
"""Split a parameter types declaration into individual types.
The input is the left hand side of a signature (the part before the arrow),
excluding the parentheses.
:sig: (str) -> List[str]
:param parameters: Comma separated parameter types.
:return: Parameter types.
"""
if parameters == "":
return []
# only consider the top level commas, ignore the ones in []
commas = []
bracket_depth = 0
for i, char in enumerate(parameters):
if (char == ",") and (bracket_depth == 0):
commas.append(i)
elif char == "[":
bracket_depth += 1
elif char == "]":
bracket_depth -= 1
types = []
last_i = 0
for i in commas:
types.append(parameters[last_i:i].strip())
last_i = i + 1
else:
types.append(parameters[last_i:].strip())
return types
def parse_signature(signature):
"""Parse a signature into its input and return parameter types.
This will also collect the types that are required by any of the input
and return types.
:sig: (str) -> Tuple[List[str], str, Set[str]]
:param signature: Signature to parse.
:return: Input parameter types, return type, and all required types.
"""
if " -> " not in signature:
# signature comment: no parameters, treat variable type as return type
param_types, return_type = None, signature.strip()
else:
lhs, return_type = [s.strip() for s in signature.split(" -> ")]
csv = lhs[1:-1].strip() # remove the parentheses around the parameter type list
param_types = split_parameter_types(csv)
requires = set(_RE_QUALIFIED_TYPES.findall(signature))
return param_types, return_type, requires
class StubNode:
"""A node in a stub tree."""
def __init__(self):
"""Initialize this stub node.
:sig: () -> None
"""
self.variables = [] # sig: List[VariableNode]
self.variable_names = set() # sig: Set[str]
self.children = [] # sig: List[Union[FunctionNode, ClassNode]]
self.parent = None # sig: Optional[StubNode]
def add_variable(self, node):
"""Add a variable node to this node.
:sig: (VariableNode) -> None
:param node: Variable node to add.
"""
if node.name not in self.variable_names:
self.variables.append(node)
self.variable_names.add(node.name)
node.parent = self
def add_child(self, node):
"""Add a function/method or class node to this node.
:sig: (Union[FunctionNode, ClassNode]) -> None
:param node: Function or class node to add.
"""
self.children.append(node)
node.parent = self
def get_code(self):
"""Get the stub code for this node.
The stub code for a node consists of the type annotations of its variables,
followed by the prototypes of its functions/methods and classes.
:sig: () -> List[str]
:return: Lines of stub code for this node.
"""
stub = []
for child in self.variables:
stub.extend(child.get_code())
if (
(len(self.variables) > 0)
and (len(self.children) > 0)
and (not isinstance(self, ClassNode))
):
stub.append("")
for child in self.children:
stub.extend(child.get_code())
return stub
class VariableNode(StubNode):
"""A node representing an assignment in a stub tree."""
def __init__(self, name, type_):
"""Initialize this variable node.
:sig: (str, str) -> None
:param name: Name of variable that is being assigned to.
:param type_: Type of variable.
"""
if not PY3:
StubNode.__init__(self)
else:
super().__init__()
self.name = name # sig: str
self.type_ = type_ # sig: str
def get_code(self):
"""Get the type annotation for this variable.
:sig: () -> List[str]
:return: Lines of stub code for this variable.
"""
return ["%(n)s = ... # type: %(t)s" % {"n": self.name, "t": self.type_}]
class FunctionNode(StubNode):
"""A node representing a function in a stub tree."""
def __init__(self, name, parameters, rtype, decorators=None):
"""Initialize this function node.
The parameters have to given as a list of triples where each item specifies
the name of the parameter, its type, and whether it has a default value or not.
:sig: (str, Sequence[Tuple[str, str, bool]], str, Optional[Sequence[str]]) -> None
:param name: Name of function.
:param parameters: List of parameter triples (name, type, has_default).
:param rtype: Type of return value.
:param decorators: Decorators of function.
"""
if not PY3:
StubNode.__init__(self)
else:
super().__init__()
self.name = name # sig: str
self.parameters = parameters # sig: Sequence[Tuple[str, str, bool]]
self.rtype = rtype # sig: str
self.decorators = decorators if decorators is not None else [] # sig: Sequence[str]
self._async = False # sig: bool
def get_code(self):
"""Get the stub code for this function.
:sig: () -> List[str]
:return: Lines of stub code for this function.
"""
stub = []
for deco in self.decorators:
if (deco in DECORATORS) or deco.endswith(".setter"):
stub.append("@" + deco)
parameters = []
for name, type_, has_default in self.parameters:
decl = "%(n)s%(t)s%(d)s" % {
"n": name,
"t": ": " + type_ if type_ else "",
"d": " = ..." if has_default else "",
}
parameters.append(decl)
slots = {
"a": "async " if self._async else "",
"n": self.name,
"p": ", ".join(parameters),
"r": self.rtype,
}
prototype = "%(a)sdef %(n)s(%(p)s) -> %(r)s: ..." % slots
if len(prototype) <= LINE_LENGTH_LIMIT:
stub.append(prototype)
elif len(INDENT + slots["p"]) <= LINE_LENGTH_LIMIT:
stub.append("%(a)sdef %(n)s(" % slots)
stub.append(INDENT + slots["p"])
stub.append(") -> %(r)s: ..." % slots)
else:
stub.append("%(a)sdef %(n)s(" % slots)
for param in parameters:
stub.append(INDENT + param + ",")
stub.append(") -> %(r)s: ..." % slots)
return stub
class ClassNode(StubNode):
"""A node representing a class in a stub tree."""
def __init__(self, name, bases, signature=None):
"""Initialize this class node.
:sig: (str, Sequence[str], Optional[str]) -> None
:param name: Name of class.
:param bases: Base classes of class.
:param signature: Signature of class, to be used in __init__ method.
"""
if not PY3:
StubNode.__init__(self)
else:
super().__init__()
self.name = name # sig: str
self.bases = bases # sig: Sequence[str]
self.signature = signature # sig: Optional[str]
def get_code(self):
"""Get the stub code for this class.
:sig: () -> List[str]
:return: Lines of stub code for this class.
"""
stub = []
bases = ("(" + ", ".join(self.bases) + ")") if len(self.bases) > 0 else ""
slots = {"n": self.name, "b": bases}
if (len(self.children) == 0) and (len(self.variables) == 0):
stub.append("class %(n)s%(b)s: ..." % slots)
else:
stub.append("class %(n)s%(b)s:" % slots)
super_code = super().get_code() if PY3 else StubNode.get_code(self)
for line in super_code:
stub.append(INDENT + line)
return stub
def get_aliases(lines):
"""Get the type aliases in the source.
:sig: (Sequence[str]) -> Dict[str, str]
:param lines: Lines of the source code.
:return: Aliases and their their definitions.
"""
aliases = {}
for line in lines:
line = line.strip()
if len(line) > 0 and line.startswith(SIG_ALIAS):
_, content = line.split(SIG_ALIAS)
alias, signature = [t.strip() for t in content.split("=")]
aliases[alias] = signature
return aliases
class StubGenerator(ast.NodeVisitor):
"""A transformer that generates stub declarations from a source code."""
def __init__(self, source, generic=False):
"""Initialize this stub generator.
:sig: (str, bool) -> None
:param source: Source code to generate the stub for.
:param generic: Whether to produce generic stubs.
"""
self.root = StubNode() # sig: StubNode
self.generic = generic # sig: bool
self.imported_namespaces = OrderedDict() # sig: OrderedDict[str, str]
self.imported_names = OrderedDict() # sig: OrderedDict[str, str]
self.defined_types = set() # sig: Set[str]
self.required_types = set() # sig: Set[str]
self.aliases = OrderedDict() # sig: OrderedDict[str, str]
self._parents = [self.root] # sig: List[StubNode]
self._code_lines = source.splitlines() # sig: List[str]
self.collect_aliases()
ast_tree = ast.parse(source)
self.visit(ast_tree)
def collect_aliases(self):
"""Collect the type aliases in the source.
:sig: () -> None
"""
self.aliases = get_aliases(self._code_lines)
for alias, signature in self.aliases.items():
_, _, requires = parse_signature(signature)
self.required_types |= requires
self.defined_types |= {alias}
def visit_Import(self, node):
"""Visit an import node."""
line = self._code_lines[node.lineno - 1]
module_name = line.split("import")[0].strip()
for name in node.names:
imported_name = name.name
if name.asname:
imported_name = name.asname + "::" + imported_name
self.imported_namespaces[imported_name] = module_name
def visit_ImportFrom(self, node):
"""Visit an from-import node."""
line = self._code_lines[node.lineno - 1]
module_name = line.split("from")[1].split("import")[0].strip()
for name in node.names:
imported_name = name.name
if name.asname:
imported_name = name.asname + "::" + imported_name
self.imported_names[imported_name] = module_name
def visit_Assign(self, node):
"""Visit an assignment node."""
line = self._code_lines[node.lineno - 1]
if SIG_COMMENT in line:
line = _RE_COMMENT_IN_STRING.sub("", line)
if (SIG_COMMENT not in line) and (not self.generic):
return
if SIG_COMMENT in line:
_, signature = line.split(SIG_COMMENT)
_, return_type, requires = parse_signature(signature)
self.required_types |= requires
parent = self._parents[-1]
for var in node.targets:
if isinstance(var, ast.Name):
name, p = var.id, parent
elif (
isinstance(var, ast.Attribute)
and isinstance(var.value, ast.Name)
and (var.value.id == "self")
):
name, p = var.attr, parent.parent
else:
name, p = None, None
if name is not None:
if self.generic:
return_type = "Any"
self.required_types.add(return_type)
stub_node = VariableNode(name, return_type)
p.add_variable(stub_node)
def get_function_node(self, node):
"""Process a function node.
:sig: (Union[ast.FunctionDef, ast.AsyncFunctionDef]) -> FunctionNode
:param node: Node to process.
:return: Generated function node in stub tree.
"""
decorators = []
for d in node.decorator_list:
if hasattr(d, "id"):
decorators.append(d.id)
elif hasattr(d, "func"):
decorators.append(d.func.id)
elif hasattr(d, "value"):
decorators.append(d.value.id + "." + d.attr)
signature = get_signature(node)
if signature is None:
parent = self._parents[-1]
if isinstance(parent, ClassNode) and (node.name == "__init__"):
signature = parent.signature
if (signature is None) and (not self.generic):
return None
param_names = [arg.arg if PY3 else arg.id for arg in node.args.args]
n_args = len(param_names)
if signature is None:
param_types, rtype, requires = ["Any"] * n_args, "Any", {"Any"}
else:
_logger.debug("parsing signature for %s", node.name)
param_types, rtype, requires = parse_signature(signature)
# TODO: only in classes
if ((n_args > 0) and (param_names[0] == "self")) or (
(n_args > 0) and (param_names[0] == "cls") and ("classmethod" in decorators)
):
if signature is None:
param_types[0] = ""
else:
param_types.insert(0, "")
_logger.debug("parameter types: %s", param_types)
_logger.debug("return type: %s", rtype)
_logger.debug("required types: %s", requires)
self.required_types |= requires
if node.args.vararg is not None:
param_names.append("*" + (node.args.vararg.arg if PY3 else node.args.vararg))
param_types.append("")
if node.args.kwarg is not None:
param_names.append("**" + (node.args.kwarg.arg if PY3 else node.args.kwarg))
param_types.append("")
kwonly_args = getattr(node.args, "kwonlyargs", [])
if len(kwonly_args) > 0:
param_names.extend([arg.arg for arg in kwonly_args])
if signature is None:
param_types.extend(["Any"] * len(kwonly_args))
if len(param_types) != len(param_names):
raise ValueError("Parameter names and types don't match: " + node.name)
param_locs = [(a.lineno, a.col_offset) for a in (node.args.args + kwonly_args)]
param_defaults = {
bisect(param_locs, (d.lineno, d.col_offset)) - 1 for d in node.args.defaults
}
kwonly_defaults = getattr(node.args, "kw_defaults", [])
for i, d in enumerate(kwonly_defaults):
if d is not None:
param_defaults.add(n_args + i)
params = [
(name, type_, i in param_defaults)
for i, (name, type_) in enumerate(zip(param_names, param_types))
]
if len(kwonly_args) > 0:
params.insert(n_args, ("*", "", False))
stub_node = FunctionNode(
node.name, parameters=params, rtype=rtype, decorators=decorators
)
self._parents[-1].add_child(stub_node)
self._parents.append(stub_node)
self.generic_visit(node)
del self._parents[-1]
return stub_node
def visit_FunctionDef(self, node):
"""Visit a function node."""
node = self.get_function_node(node)
if node is not None:
node._async = False
def visit_AsyncFunctionDef(self, node):
"""Visit an async function node."""
node = self.get_function_node(node)
if node is not None:
node._async = True
def visit_ClassDef(self, node):
"""Visit a class node."""
self.defined_types.add(node.name)
bases = []
for n in node.bases:
base_parts = []
while True:
if not isinstance(n, ast.Attribute):
base_parts.append(n.id)
break
else:
base_parts.append(n.attr)
n = n.value
bases.append(".".join(base_parts[::-1]))
self.required_types |= set(bases)
signature = get_signature(node)
stub_node = ClassNode(node.name, bases=bases, signature=signature)
self._parents[-1].add_child(stub_node)
self._parents.append(stub_node)
self.generic_visit(node)
del self._parents[-1]
@staticmethod
def generate_import_from(module_, names):
"""Generate an import line.
:sig: (str, Set[str]) -> str
:param module_: Name of module to import the names from.
:param names: Names to import.
:return: Import line in stub code.
"""
regular_names = [n for n in names if "::" not in n]
as_names = [n for n in names if "::" in n]
line = ""
if len(regular_names) > 0:
slots = {"m": module_, "n": ", ".join(sorted(regular_names))}
line = "from %(m)s import %(n)s" % slots
if len(line) > LINE_LENGTH_LIMIT:
slots["n"] = INDENT + (",\n" + INDENT).join(sorted(regular_names)) + ","
line = "from %(m)s import (\n%(n)s\n)" % slots
if len(as_names) > 0:
line += "\n"
for as_name in as_names:
a, n = as_name.split("::")
line += "from %(m)s import %(n)s as %(a)s" % {"m": module_, "n": n, "a": a}
return line
def generate_stub(self):
"""Generate the stub code for this source.
:sig: () -> str
:return: Generated stub code.
"""
needed_types = self.required_types - BUILTIN_TYPES
needed_types -= self.defined_types
_logger.debug("defined types: %s", self.defined_types)
module_vars = {v.name for v in self.root.variables}
_logger.debug("module variables: %s", module_vars)
qualified_types = {n for n in needed_types if "." in n}
qualified_namespaces = {".".join(n.split(".")[:-1]) for n in qualified_types}
needed_namespaces = qualified_namespaces - module_vars
needed_types -= qualified_types
_logger.debug("needed namespaces: %s", needed_namespaces)
imported_names = {n.split("::")[0] for n in self.imported_names}
imported_types = imported_names & (needed_types | needed_namespaces)
needed_types -= imported_types
needed_namespaces -= imported_names
_logger.debug("used imported types: %s", imported_types)
try:
typing_mod = __import__("typing")
typing_types = {n for n in needed_types if hasattr(typing_mod, n)}
needed_types -= typing_types
_logger.debug("types from typing module: %s", typing_types)
except ImportError:
typing_types = set()
_logger.warn("typing module not installed")
if len(needed_types) > 0:
raise ValueError("Unknown types: " + ", ".join(needed_types))
out = StringIO()
started = False
if len(typing_types) > 0:
line = self.generate_import_from("typing", typing_types)
out.write(line + "\n")
started = True
if len(imported_types) > 0:
if started:
out.write("\n")
# preserve the import order in the source file
for name in self.imported_names:
if name.split("::")[0] in imported_types:
line = self.generate_import_from(self.imported_names[name], {name})
out.write(line + "\n")
started = True
if len(needed_namespaces) > 0:
if started:
out.write("\n")
as_names = {n.split("::")[0]: n for n in self.imported_namespaces if "::" in n}
for module_ in sorted(needed_namespaces):
if module_ in as_names:
a, n = as_names[module_].split("::")
out.write("import " + n + " as " + a + "\n")
else:
out.write("import " + module_ + "\n")
started = True
if len(self.aliases) > 0:
if started:
out.write("\n")
for alias, signature in self.aliases.items():
out.write("%s = %s\n" % (alias, signature))
started = True
if started:
out.write("\n")
stub_lines = self.root.get_code()
n_lines = len(stub_lines)
for line_no in range(n_lines):
prev_line = stub_lines[line_no - 1] if line_no > 0 else None
line = stub_lines[line_no]
next_line = stub_lines[line_no + 1] if line_no < (n_lines - 1) else None
if (
line.startswith("class ")
and (prev_line is not None)
and (
(not prev_line.startswith("class "))
or (next_line and next_line.startswith(" "))
)
):
out.write("\n")
if (
line.startswith("def ")
and (prev_line is not None)
and (prev_line.startswith((" ", "class ")))
):
out.write("\n")
out.write(line + "\n")
line_no += 1
return out.getvalue()
def get_mod_paths(mod_name, out_dir):
"""Get source and stub paths for a module."""
paths = []
try:
mod = get_loader(mod_name)
source = Path(mod.path)
if source.name.endswith(".py"):
source_rel = Path(*mod_name.split("."))
if source.name == "__init__.py":
source_rel = source_rel.joinpath("__init__.py")
destination = Path(out_dir, source_rel.with_suffix(".pyi"))
paths.append((source, destination))
except Exception as e:
_logger.debug(e)
_logger.warning("cannot handle module, skipping: %s", mod_name)
return paths
def get_pkg_paths(pkg_name, out_dir):
"""Recursively get all source and stub paths for a package."""
paths = []
try:
pkg = import_module(pkg_name)
if not hasattr(pkg, "__path__"):
return get_mod_paths(pkg_name, out_dir)
for mod_info in walk_packages(pkg.__path__, pkg.__name__ + "."):
mod_paths = get_mod_paths(mod_info.name, out_dir)
paths.extend(mod_paths)
except Exception as e:
_logger.debug(e)
_logger.warning("cannot handle package, skipping: %s", pkg_name)
return paths
############################################################
# SPHINX
############################################################
def process_docstring(app, what, name, obj, options, lines):
"""Modify the docstring before generating documentation.
This will insert type declarations for parameters and return type
into the docstring, and remove the signature field so that it will
be excluded from the generated document.
"""
aliases = getattr(app, "_sigaliases", None)
if aliases is None:
if what == "module":
aliases = get_aliases(inspect.getsource(obj).splitlines())
app._sigaliases = aliases
sig_marker = ":" + SIG_FIELD + ":"
is_class = what in ("class", "exception")
signature = extract_signature("\n".join(lines))
if signature is None:
if not is_class:
return
init_method = getattr(obj, "__init__")
init_doc = init_method.__doc__
init_lines = init_doc.splitlines()[1:]
if len(init_lines) > 1:
init_doc = textwrap.dedent("\n".join(init_lines[1:]))
init_lines = init_doc.splitlines()
if sig_marker not in init_doc:
return
sig_started = False
for line in init_lines:
if line.lstrip().startswith(sig_marker):
sig_started = True
if sig_started:
lines.append(line)
signature = extract_signature("\n".join(lines))
if is_class:
obj = init_method
param_types, rtype, _ = parse_signature(signature)
param_names = [p for p in inspect.signature(obj).parameters]
if is_class and (param_names[0] == "self"):
del param_names[0]
# if something goes wrong, don't insert parameter types
if len(param_names) == len(param_types):
for name, type_ in zip(param_names, param_types):
find = ":param %(name)s:" % {"name": name}
alias = aliases.get(type_)
if alias is not None:
type_ = "*%(type)s* :sup:`%(alias)s`" % {"type": type_, "alias": alias}
for i, line in enumerate(lines):
if line.startswith(find):
lines.insert(i, ":type %(name)s: %(type)s" % {"name": name, "type": type_})
break
if not is_class:
for i, line in enumerate(lines):
if line.startswith((":return:", ":returns:")):
lines.insert(i, ":rtype: " + rtype)
break
# remove the signature field
sig_start = 0
while sig_start < len(lines):
if lines[sig_start].startswith(sig_marker):
break
sig_start += 1
sig_end = sig_start + 1
while sig_end < len(lines):
if (not lines[sig_end]) or (lines[sig_end][0] != " "):
break
sig_end += 1
for i in reversed(range(sig_start, sig_end)):
del lines[i]
def setup(app):
"""Register to Sphinx."""
app.connect("autodoc-process-docstring", process_docstring)
return {"version": __version__}
############################################################
# MAIN
############################################################
def main(argv=None):
"""Start the command line interface."""
parser = ArgumentParser(prog="pygenstub")
parser.add_argument("--version", action="version", version="%(prog)s " + __version__)
parser.add_argument("files", nargs="*", help="generate stubs for given files")
parser.add_argument(
"-m",
"--module",
action="append",
metavar="MODULE",
dest="modules",
default=[],
help="generate stubs for given modules",
)
parser.add_argument(
"-o", "--output", metavar="PATH", dest="out_dir", help="change the output directory"
)
parser.add_argument(
"--generic", action="store_true", default=False, help="generate generic stubs"
)
parser.add_argument("--debug", action="store_true", help="enable debug messages")
argv = argv if argv is not None else sys.argv
arguments = parser.parse_args(argv[1:])
# set debug mode
if arguments.debug:
logging.basicConfig(level=logging.DEBUG)
_logger.debug("running in debug mode")
out_dir = arguments.out_dir if arguments.out_dir is not None else ""
if (out_dir == "") and (len(arguments.modules) > 0):
print("Output directory must be given when generating stubs for modules.")
sys.exit(1)
modules = []
for path in arguments.files:
paths = Path(path).glob("**/*.py") if Path(path).is_dir() else [Path(path)]
for source in paths:
if str(source).startswith(os.path.pardir):
source = source.absolute().resolve()
if (out_dir != "") and source.is_absolute():
source = source.relative_to(source.root)
destination = Path(out_dir, source.with_suffix(".pyi"))
modules.append((source, destination))
for mod_name in arguments.modules:
modules.extend(get_pkg_paths(mod_name, out_dir))
for source, destination in modules:
_logger.info("generating stub for %s to path %s", source, destination)
with source.open() as f:
code = f.read()
try:
stub = get_stub(code, generic=arguments.generic)
except Exception as e:
print(source, "-", e, file=sys.stderr)
continue
if stub != "":
if not destination.parent.exists():
destination.parent.mkdir(parents=True)
with destination.open("w") as f:
f.write("# " + EDIT_WARNING + "\n\n" + stub)
if __name__ == "__main__":
main()
|
uyar/pygenstub | pygenstub.py | get_mod_paths | python | def get_mod_paths(mod_name, out_dir):
paths = []
try:
mod = get_loader(mod_name)
source = Path(mod.path)
if source.name.endswith(".py"):
source_rel = Path(*mod_name.split("."))
if source.name == "__init__.py":
source_rel = source_rel.joinpath("__init__.py")
destination = Path(out_dir, source_rel.with_suffix(".pyi"))
paths.append((source, destination))
except Exception as e:
_logger.debug(e)
_logger.warning("cannot handle module, skipping: %s", mod_name)
return paths | Get source and stub paths for a module. | train | https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L758-L773 | null | # Copyright (C) 2016-2019 H. Turgut Uyar <uyar@tekir.org>
#
# pygenstub is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pygenstub is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pygenstub. If not, see <http://www.gnu.org/licenses/>.
"""pygenstub is a utility for generating stub files from docstrings in source files.
It takes a source file as input and creates a stub file with the same base name
and the ``.pyi`` extension.
For more information, please refer to the documentation:
https://pygenstub.tekir.org/
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import ast
import inspect
import logging
import os
import re
import sys
import textwrap
from argparse import ArgumentParser
from bisect import bisect
from collections import OrderedDict
from importlib import import_module
from io import StringIO
from pkgutil import get_loader, walk_packages
from docutils.core import publish_doctree
__version__ = "1.4.0" # sig: str
PY3 = sys.version_info >= (3, 0)
if not PY3:
import __builtin__ as builtins
from pathlib2 import Path
else:
import builtins
from pathlib import Path
# sigalias: Document = docutils.nodes.document
BUILTIN_TYPES = {k for k, t in builtins.__dict__.items() if isinstance(t, type)}
BUILTIN_TYPES.add("None")
SIG_FIELD = "sig" # sig: str
SIG_COMMENT = "# sig:" # sig: str
SIG_ALIAS = "# sigalias:" # sig: str
DECORATORS = {"property", "staticmethod", "classmethod"} # sig: Set[str]
LINE_LENGTH_LIMIT = 79
INDENT = 4 * " "
EDIT_WARNING = "THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT MANUALLY."
_RE_QUALIFIED_TYPES = re.compile(r"\w+(?:\.\w+)*")
_RE_COMMENT_IN_STRING = re.compile(r"""['"]\s*%(text)s\s*.*['"]""" % {"text": SIG_COMMENT})
_logger = logging.getLogger(__name__)
def get_fields(node, fields_tag="field_list"):
"""Get the field names and their values from a node.
:sig: (Document, str) -> Dict[str, str]
:param node: Node to get the fields from.
:param fields_tag: Tag of child node that contains the fields.
:return: Names and values of fields.
"""
fields_nodes = [c for c in node.children if c.tagname == fields_tag]
if len(fields_nodes) == 0:
return {}
assert len(fields_nodes) == 1, "multiple nodes with tag " + fields_tag
fields_node = fields_nodes[0]
fields = [
{f.tagname: f.rawsource.strip() for f in n.children}
for n in fields_node.children
if n.tagname == "field"
]
return {f["field_name"]: f["field_body"] for f in fields}
def extract_signature(docstring):
"""Extract the signature from a docstring.
:sig: (str) -> Optional[str]
:param docstring: Docstring to extract the signature from.
:return: Extracted signature, or ``None`` if there's no signature.
"""
root = publish_doctree(docstring, settings_overrides={"report_level": 5})
fields = get_fields(root)
return fields.get(SIG_FIELD)
def get_signature(node):
"""Get the signature of a function or a class.
:sig: (Union[ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef]) -> Optional[str]
:param node: Node to get the signature from.
:return: Value of signature field in node docstring, or ``None`` if there's no signature.
"""
docstring = ast.get_docstring(node)
if docstring is None:
return None
return extract_signature(docstring)
def split_parameter_types(parameters):
"""Split a parameter types declaration into individual types.
The input is the left hand side of a signature (the part before the arrow),
excluding the parentheses.
:sig: (str) -> List[str]
:param parameters: Comma separated parameter types.
:return: Parameter types.
"""
if parameters == "":
return []
# only consider the top level commas, ignore the ones in []
commas = []
bracket_depth = 0
for i, char in enumerate(parameters):
if (char == ",") and (bracket_depth == 0):
commas.append(i)
elif char == "[":
bracket_depth += 1
elif char == "]":
bracket_depth -= 1
types = []
last_i = 0
for i in commas:
types.append(parameters[last_i:i].strip())
last_i = i + 1
else:
types.append(parameters[last_i:].strip())
return types
def parse_signature(signature):
"""Parse a signature into its input and return parameter types.
This will also collect the types that are required by any of the input
and return types.
:sig: (str) -> Tuple[List[str], str, Set[str]]
:param signature: Signature to parse.
:return: Input parameter types, return type, and all required types.
"""
if " -> " not in signature:
# signature comment: no parameters, treat variable type as return type
param_types, return_type = None, signature.strip()
else:
lhs, return_type = [s.strip() for s in signature.split(" -> ")]
csv = lhs[1:-1].strip() # remove the parentheses around the parameter type list
param_types = split_parameter_types(csv)
requires = set(_RE_QUALIFIED_TYPES.findall(signature))
return param_types, return_type, requires
class StubNode:
"""A node in a stub tree."""
def __init__(self):
"""Initialize this stub node.
:sig: () -> None
"""
self.variables = [] # sig: List[VariableNode]
self.variable_names = set() # sig: Set[str]
self.children = [] # sig: List[Union[FunctionNode, ClassNode]]
self.parent = None # sig: Optional[StubNode]
def add_variable(self, node):
"""Add a variable node to this node.
:sig: (VariableNode) -> None
:param node: Variable node to add.
"""
if node.name not in self.variable_names:
self.variables.append(node)
self.variable_names.add(node.name)
node.parent = self
def add_child(self, node):
"""Add a function/method or class node to this node.
:sig: (Union[FunctionNode, ClassNode]) -> None
:param node: Function or class node to add.
"""
self.children.append(node)
node.parent = self
def get_code(self):
"""Get the stub code for this node.
The stub code for a node consists of the type annotations of its variables,
followed by the prototypes of its functions/methods and classes.
:sig: () -> List[str]
:return: Lines of stub code for this node.
"""
stub = []
for child in self.variables:
stub.extend(child.get_code())
if (
(len(self.variables) > 0)
and (len(self.children) > 0)
and (not isinstance(self, ClassNode))
):
stub.append("")
for child in self.children:
stub.extend(child.get_code())
return stub
class VariableNode(StubNode):
"""A node representing an assignment in a stub tree."""
def __init__(self, name, type_):
"""Initialize this variable node.
:sig: (str, str) -> None
:param name: Name of variable that is being assigned to.
:param type_: Type of variable.
"""
if not PY3:
StubNode.__init__(self)
else:
super().__init__()
self.name = name # sig: str
self.type_ = type_ # sig: str
def get_code(self):
"""Get the type annotation for this variable.
:sig: () -> List[str]
:return: Lines of stub code for this variable.
"""
return ["%(n)s = ... # type: %(t)s" % {"n": self.name, "t": self.type_}]
class FunctionNode(StubNode):
"""A node representing a function in a stub tree."""
def __init__(self, name, parameters, rtype, decorators=None):
"""Initialize this function node.
The parameters have to given as a list of triples where each item specifies
the name of the parameter, its type, and whether it has a default value or not.
:sig: (str, Sequence[Tuple[str, str, bool]], str, Optional[Sequence[str]]) -> None
:param name: Name of function.
:param parameters: List of parameter triples (name, type, has_default).
:param rtype: Type of return value.
:param decorators: Decorators of function.
"""
if not PY3:
StubNode.__init__(self)
else:
super().__init__()
self.name = name # sig: str
self.parameters = parameters # sig: Sequence[Tuple[str, str, bool]]
self.rtype = rtype # sig: str
self.decorators = decorators if decorators is not None else [] # sig: Sequence[str]
self._async = False # sig: bool
def get_code(self):
"""Get the stub code for this function.
:sig: () -> List[str]
:return: Lines of stub code for this function.
"""
stub = []
for deco in self.decorators:
if (deco in DECORATORS) or deco.endswith(".setter"):
stub.append("@" + deco)
parameters = []
for name, type_, has_default in self.parameters:
decl = "%(n)s%(t)s%(d)s" % {
"n": name,
"t": ": " + type_ if type_ else "",
"d": " = ..." if has_default else "",
}
parameters.append(decl)
slots = {
"a": "async " if self._async else "",
"n": self.name,
"p": ", ".join(parameters),
"r": self.rtype,
}
prototype = "%(a)sdef %(n)s(%(p)s) -> %(r)s: ..." % slots
if len(prototype) <= LINE_LENGTH_LIMIT:
stub.append(prototype)
elif len(INDENT + slots["p"]) <= LINE_LENGTH_LIMIT:
stub.append("%(a)sdef %(n)s(" % slots)
stub.append(INDENT + slots["p"])
stub.append(") -> %(r)s: ..." % slots)
else:
stub.append("%(a)sdef %(n)s(" % slots)
for param in parameters:
stub.append(INDENT + param + ",")
stub.append(") -> %(r)s: ..." % slots)
return stub
class ClassNode(StubNode):
"""A node representing a class in a stub tree."""
def __init__(self, name, bases, signature=None):
"""Initialize this class node.
:sig: (str, Sequence[str], Optional[str]) -> None
:param name: Name of class.
:param bases: Base classes of class.
:param signature: Signature of class, to be used in __init__ method.
"""
if not PY3:
StubNode.__init__(self)
else:
super().__init__()
self.name = name # sig: str
self.bases = bases # sig: Sequence[str]
self.signature = signature # sig: Optional[str]
def get_code(self):
"""Get the stub code for this class.
:sig: () -> List[str]
:return: Lines of stub code for this class.
"""
stub = []
bases = ("(" + ", ".join(self.bases) + ")") if len(self.bases) > 0 else ""
slots = {"n": self.name, "b": bases}
if (len(self.children) == 0) and (len(self.variables) == 0):
stub.append("class %(n)s%(b)s: ..." % slots)
else:
stub.append("class %(n)s%(b)s:" % slots)
super_code = super().get_code() if PY3 else StubNode.get_code(self)
for line in super_code:
stub.append(INDENT + line)
return stub
def get_aliases(lines):
"""Get the type aliases in the source.
:sig: (Sequence[str]) -> Dict[str, str]
:param lines: Lines of the source code.
:return: Aliases and their their definitions.
"""
aliases = {}
for line in lines:
line = line.strip()
if len(line) > 0 and line.startswith(SIG_ALIAS):
_, content = line.split(SIG_ALIAS)
alias, signature = [t.strip() for t in content.split("=")]
aliases[alias] = signature
return aliases
class StubGenerator(ast.NodeVisitor):
"""A transformer that generates stub declarations from a source code."""
def __init__(self, source, generic=False):
"""Initialize this stub generator.
:sig: (str, bool) -> None
:param source: Source code to generate the stub for.
:param generic: Whether to produce generic stubs.
"""
self.root = StubNode() # sig: StubNode
self.generic = generic # sig: bool
self.imported_namespaces = OrderedDict() # sig: OrderedDict[str, str]
self.imported_names = OrderedDict() # sig: OrderedDict[str, str]
self.defined_types = set() # sig: Set[str]
self.required_types = set() # sig: Set[str]
self.aliases = OrderedDict() # sig: OrderedDict[str, str]
self._parents = [self.root] # sig: List[StubNode]
self._code_lines = source.splitlines() # sig: List[str]
self.collect_aliases()
ast_tree = ast.parse(source)
self.visit(ast_tree)
def collect_aliases(self):
"""Collect the type aliases in the source.
:sig: () -> None
"""
self.aliases = get_aliases(self._code_lines)
for alias, signature in self.aliases.items():
_, _, requires = parse_signature(signature)
self.required_types |= requires
self.defined_types |= {alias}
def visit_Import(self, node):
"""Visit an import node."""
line = self._code_lines[node.lineno - 1]
module_name = line.split("import")[0].strip()
for name in node.names:
imported_name = name.name
if name.asname:
imported_name = name.asname + "::" + imported_name
self.imported_namespaces[imported_name] = module_name
def visit_ImportFrom(self, node):
"""Visit an from-import node."""
line = self._code_lines[node.lineno - 1]
module_name = line.split("from")[1].split("import")[0].strip()
for name in node.names:
imported_name = name.name
if name.asname:
imported_name = name.asname + "::" + imported_name
self.imported_names[imported_name] = module_name
def visit_Assign(self, node):
"""Visit an assignment node."""
line = self._code_lines[node.lineno - 1]
if SIG_COMMENT in line:
line = _RE_COMMENT_IN_STRING.sub("", line)
if (SIG_COMMENT not in line) and (not self.generic):
return
if SIG_COMMENT in line:
_, signature = line.split(SIG_COMMENT)
_, return_type, requires = parse_signature(signature)
self.required_types |= requires
parent = self._parents[-1]
for var in node.targets:
if isinstance(var, ast.Name):
name, p = var.id, parent
elif (
isinstance(var, ast.Attribute)
and isinstance(var.value, ast.Name)
and (var.value.id == "self")
):
name, p = var.attr, parent.parent
else:
name, p = None, None
if name is not None:
if self.generic:
return_type = "Any"
self.required_types.add(return_type)
stub_node = VariableNode(name, return_type)
p.add_variable(stub_node)
def get_function_node(self, node):
"""Process a function node.
:sig: (Union[ast.FunctionDef, ast.AsyncFunctionDef]) -> FunctionNode
:param node: Node to process.
:return: Generated function node in stub tree.
"""
decorators = []
for d in node.decorator_list:
if hasattr(d, "id"):
decorators.append(d.id)
elif hasattr(d, "func"):
decorators.append(d.func.id)
elif hasattr(d, "value"):
decorators.append(d.value.id + "." + d.attr)
signature = get_signature(node)
if signature is None:
parent = self._parents[-1]
if isinstance(parent, ClassNode) and (node.name == "__init__"):
signature = parent.signature
if (signature is None) and (not self.generic):
return None
param_names = [arg.arg if PY3 else arg.id for arg in node.args.args]
n_args = len(param_names)
if signature is None:
param_types, rtype, requires = ["Any"] * n_args, "Any", {"Any"}
else:
_logger.debug("parsing signature for %s", node.name)
param_types, rtype, requires = parse_signature(signature)
# TODO: only in classes
if ((n_args > 0) and (param_names[0] == "self")) or (
(n_args > 0) and (param_names[0] == "cls") and ("classmethod" in decorators)
):
if signature is None:
param_types[0] = ""
else:
param_types.insert(0, "")
_logger.debug("parameter types: %s", param_types)
_logger.debug("return type: %s", rtype)
_logger.debug("required types: %s", requires)
self.required_types |= requires
if node.args.vararg is not None:
param_names.append("*" + (node.args.vararg.arg if PY3 else node.args.vararg))
param_types.append("")
if node.args.kwarg is not None:
param_names.append("**" + (node.args.kwarg.arg if PY3 else node.args.kwarg))
param_types.append("")
kwonly_args = getattr(node.args, "kwonlyargs", [])
if len(kwonly_args) > 0:
param_names.extend([arg.arg for arg in kwonly_args])
if signature is None:
param_types.extend(["Any"] * len(kwonly_args))
if len(param_types) != len(param_names):
raise ValueError("Parameter names and types don't match: " + node.name)
param_locs = [(a.lineno, a.col_offset) for a in (node.args.args + kwonly_args)]
param_defaults = {
bisect(param_locs, (d.lineno, d.col_offset)) - 1 for d in node.args.defaults
}
kwonly_defaults = getattr(node.args, "kw_defaults", [])
for i, d in enumerate(kwonly_defaults):
if d is not None:
param_defaults.add(n_args + i)
params = [
(name, type_, i in param_defaults)
for i, (name, type_) in enumerate(zip(param_names, param_types))
]
if len(kwonly_args) > 0:
params.insert(n_args, ("*", "", False))
stub_node = FunctionNode(
node.name, parameters=params, rtype=rtype, decorators=decorators
)
self._parents[-1].add_child(stub_node)
self._parents.append(stub_node)
self.generic_visit(node)
del self._parents[-1]
return stub_node
def visit_FunctionDef(self, node):
"""Visit a function node."""
node = self.get_function_node(node)
if node is not None:
node._async = False
def visit_AsyncFunctionDef(self, node):
"""Visit an async function node."""
node = self.get_function_node(node)
if node is not None:
node._async = True
def visit_ClassDef(self, node):
"""Visit a class node."""
self.defined_types.add(node.name)
bases = []
for n in node.bases:
base_parts = []
while True:
if not isinstance(n, ast.Attribute):
base_parts.append(n.id)
break
else:
base_parts.append(n.attr)
n = n.value
bases.append(".".join(base_parts[::-1]))
self.required_types |= set(bases)
signature = get_signature(node)
stub_node = ClassNode(node.name, bases=bases, signature=signature)
self._parents[-1].add_child(stub_node)
self._parents.append(stub_node)
self.generic_visit(node)
del self._parents[-1]
@staticmethod
def generate_import_from(module_, names):
"""Generate an import line.
:sig: (str, Set[str]) -> str
:param module_: Name of module to import the names from.
:param names: Names to import.
:return: Import line in stub code.
"""
regular_names = [n for n in names if "::" not in n]
as_names = [n for n in names if "::" in n]
line = ""
if len(regular_names) > 0:
slots = {"m": module_, "n": ", ".join(sorted(regular_names))}
line = "from %(m)s import %(n)s" % slots
if len(line) > LINE_LENGTH_LIMIT:
slots["n"] = INDENT + (",\n" + INDENT).join(sorted(regular_names)) + ","
line = "from %(m)s import (\n%(n)s\n)" % slots
if len(as_names) > 0:
line += "\n"
for as_name in as_names:
a, n = as_name.split("::")
line += "from %(m)s import %(n)s as %(a)s" % {"m": module_, "n": n, "a": a}
return line
def generate_stub(self):
"""Generate the stub code for this source.
:sig: () -> str
:return: Generated stub code.
"""
needed_types = self.required_types - BUILTIN_TYPES
needed_types -= self.defined_types
_logger.debug("defined types: %s", self.defined_types)
module_vars = {v.name for v in self.root.variables}
_logger.debug("module variables: %s", module_vars)
qualified_types = {n for n in needed_types if "." in n}
qualified_namespaces = {".".join(n.split(".")[:-1]) for n in qualified_types}
needed_namespaces = qualified_namespaces - module_vars
needed_types -= qualified_types
_logger.debug("needed namespaces: %s", needed_namespaces)
imported_names = {n.split("::")[0] for n in self.imported_names}
imported_types = imported_names & (needed_types | needed_namespaces)
needed_types -= imported_types
needed_namespaces -= imported_names
_logger.debug("used imported types: %s", imported_types)
try:
typing_mod = __import__("typing")
typing_types = {n for n in needed_types if hasattr(typing_mod, n)}
needed_types -= typing_types
_logger.debug("types from typing module: %s", typing_types)
except ImportError:
typing_types = set()
_logger.warn("typing module not installed")
if len(needed_types) > 0:
raise ValueError("Unknown types: " + ", ".join(needed_types))
out = StringIO()
started = False
if len(typing_types) > 0:
line = self.generate_import_from("typing", typing_types)
out.write(line + "\n")
started = True
if len(imported_types) > 0:
if started:
out.write("\n")
# preserve the import order in the source file
for name in self.imported_names:
if name.split("::")[0] in imported_types:
line = self.generate_import_from(self.imported_names[name], {name})
out.write(line + "\n")
started = True
if len(needed_namespaces) > 0:
if started:
out.write("\n")
as_names = {n.split("::")[0]: n for n in self.imported_namespaces if "::" in n}
for module_ in sorted(needed_namespaces):
if module_ in as_names:
a, n = as_names[module_].split("::")
out.write("import " + n + " as " + a + "\n")
else:
out.write("import " + module_ + "\n")
started = True
if len(self.aliases) > 0:
if started:
out.write("\n")
for alias, signature in self.aliases.items():
out.write("%s = %s\n" % (alias, signature))
started = True
if started:
out.write("\n")
stub_lines = self.root.get_code()
n_lines = len(stub_lines)
for line_no in range(n_lines):
prev_line = stub_lines[line_no - 1] if line_no > 0 else None
line = stub_lines[line_no]
next_line = stub_lines[line_no + 1] if line_no < (n_lines - 1) else None
if (
line.startswith("class ")
and (prev_line is not None)
and (
(not prev_line.startswith("class "))
or (next_line and next_line.startswith(" "))
)
):
out.write("\n")
if (
line.startswith("def ")
and (prev_line is not None)
and (prev_line.startswith((" ", "class ")))
):
out.write("\n")
out.write(line + "\n")
line_no += 1
return out.getvalue()
def get_stub(source, generic=False):
"""Get the stub code for a source code.
:sig: (str, bool) -> str
:param source: Source code to generate the stub for.
:param generic: Whether to produce generic stubs.
:return: Generated stub code.
"""
generator = StubGenerator(source, generic=generic)
stub = generator.generate_stub()
return stub
def get_pkg_paths(pkg_name, out_dir):
"""Recursively get all source and stub paths for a package."""
paths = []
try:
pkg = import_module(pkg_name)
if not hasattr(pkg, "__path__"):
return get_mod_paths(pkg_name, out_dir)
for mod_info in walk_packages(pkg.__path__, pkg.__name__ + "."):
mod_paths = get_mod_paths(mod_info.name, out_dir)
paths.extend(mod_paths)
except Exception as e:
_logger.debug(e)
_logger.warning("cannot handle package, skipping: %s", pkg_name)
return paths
############################################################
# SPHINX
############################################################
def process_docstring(app, what, name, obj, options, lines):
"""Modify the docstring before generating documentation.
This will insert type declarations for parameters and return type
into the docstring, and remove the signature field so that it will
be excluded from the generated document.
"""
aliases = getattr(app, "_sigaliases", None)
if aliases is None:
if what == "module":
aliases = get_aliases(inspect.getsource(obj).splitlines())
app._sigaliases = aliases
sig_marker = ":" + SIG_FIELD + ":"
is_class = what in ("class", "exception")
signature = extract_signature("\n".join(lines))
if signature is None:
if not is_class:
return
init_method = getattr(obj, "__init__")
init_doc = init_method.__doc__
init_lines = init_doc.splitlines()[1:]
if len(init_lines) > 1:
init_doc = textwrap.dedent("\n".join(init_lines[1:]))
init_lines = init_doc.splitlines()
if sig_marker not in init_doc:
return
sig_started = False
for line in init_lines:
if line.lstrip().startswith(sig_marker):
sig_started = True
if sig_started:
lines.append(line)
signature = extract_signature("\n".join(lines))
if is_class:
obj = init_method
param_types, rtype, _ = parse_signature(signature)
param_names = [p for p in inspect.signature(obj).parameters]
if is_class and (param_names[0] == "self"):
del param_names[0]
# if something goes wrong, don't insert parameter types
if len(param_names) == len(param_types):
for name, type_ in zip(param_names, param_types):
find = ":param %(name)s:" % {"name": name}
alias = aliases.get(type_)
if alias is not None:
type_ = "*%(type)s* :sup:`%(alias)s`" % {"type": type_, "alias": alias}
for i, line in enumerate(lines):
if line.startswith(find):
lines.insert(i, ":type %(name)s: %(type)s" % {"name": name, "type": type_})
break
if not is_class:
for i, line in enumerate(lines):
if line.startswith((":return:", ":returns:")):
lines.insert(i, ":rtype: " + rtype)
break
# remove the signature field
sig_start = 0
while sig_start < len(lines):
if lines[sig_start].startswith(sig_marker):
break
sig_start += 1
sig_end = sig_start + 1
while sig_end < len(lines):
if (not lines[sig_end]) or (lines[sig_end][0] != " "):
break
sig_end += 1
for i in reversed(range(sig_start, sig_end)):
del lines[i]
def setup(app):
"""Register to Sphinx."""
app.connect("autodoc-process-docstring", process_docstring)
return {"version": __version__}
############################################################
# MAIN
############################################################
def main(argv=None):
"""Start the command line interface."""
parser = ArgumentParser(prog="pygenstub")
parser.add_argument("--version", action="version", version="%(prog)s " + __version__)
parser.add_argument("files", nargs="*", help="generate stubs for given files")
parser.add_argument(
"-m",
"--module",
action="append",
metavar="MODULE",
dest="modules",
default=[],
help="generate stubs for given modules",
)
parser.add_argument(
"-o", "--output", metavar="PATH", dest="out_dir", help="change the output directory"
)
parser.add_argument(
"--generic", action="store_true", default=False, help="generate generic stubs"
)
parser.add_argument("--debug", action="store_true", help="enable debug messages")
argv = argv if argv is not None else sys.argv
arguments = parser.parse_args(argv[1:])
# set debug mode
if arguments.debug:
logging.basicConfig(level=logging.DEBUG)
_logger.debug("running in debug mode")
out_dir = arguments.out_dir if arguments.out_dir is not None else ""
if (out_dir == "") and (len(arguments.modules) > 0):
print("Output directory must be given when generating stubs for modules.")
sys.exit(1)
modules = []
for path in arguments.files:
paths = Path(path).glob("**/*.py") if Path(path).is_dir() else [Path(path)]
for source in paths:
if str(source).startswith(os.path.pardir):
source = source.absolute().resolve()
if (out_dir != "") and source.is_absolute():
source = source.relative_to(source.root)
destination = Path(out_dir, source.with_suffix(".pyi"))
modules.append((source, destination))
for mod_name in arguments.modules:
modules.extend(get_pkg_paths(mod_name, out_dir))
for source, destination in modules:
_logger.info("generating stub for %s to path %s", source, destination)
with source.open() as f:
code = f.read()
try:
stub = get_stub(code, generic=arguments.generic)
except Exception as e:
print(source, "-", e, file=sys.stderr)
continue
if stub != "":
if not destination.parent.exists():
destination.parent.mkdir(parents=True)
with destination.open("w") as f:
f.write("# " + EDIT_WARNING + "\n\n" + stub)
if __name__ == "__main__":
main()
|
uyar/pygenstub | pygenstub.py | get_pkg_paths | python | def get_pkg_paths(pkg_name, out_dir):
paths = []
try:
pkg = import_module(pkg_name)
if not hasattr(pkg, "__path__"):
return get_mod_paths(pkg_name, out_dir)
for mod_info in walk_packages(pkg.__path__, pkg.__name__ + "."):
mod_paths = get_mod_paths(mod_info.name, out_dir)
paths.extend(mod_paths)
except Exception as e:
_logger.debug(e)
_logger.warning("cannot handle package, skipping: %s", pkg_name)
return paths | Recursively get all source and stub paths for a package. | train | https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L776-L789 | [
"def get_mod_paths(mod_name, out_dir):\n \"\"\"Get source and stub paths for a module.\"\"\"\n paths = []\n try:\n mod = get_loader(mod_name)\n source = Path(mod.path)\n if source.name.endswith(\".py\"):\n source_rel = Path(*mod_name.split(\".\"))\n if source.name... | # Copyright (C) 2016-2019 H. Turgut Uyar <uyar@tekir.org>
#
# pygenstub is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pygenstub is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pygenstub. If not, see <http://www.gnu.org/licenses/>.
"""pygenstub is a utility for generating stub files from docstrings in source files.
It takes a source file as input and creates a stub file with the same base name
and the ``.pyi`` extension.
For more information, please refer to the documentation:
https://pygenstub.tekir.org/
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import ast
import inspect
import logging
import os
import re
import sys
import textwrap
from argparse import ArgumentParser
from bisect import bisect
from collections import OrderedDict
from importlib import import_module
from io import StringIO
from pkgutil import get_loader, walk_packages
from docutils.core import publish_doctree
__version__ = "1.4.0" # sig: str
PY3 = sys.version_info >= (3, 0)
if not PY3:
import __builtin__ as builtins
from pathlib2 import Path
else:
import builtins
from pathlib import Path
# sigalias: Document = docutils.nodes.document
BUILTIN_TYPES = {k for k, t in builtins.__dict__.items() if isinstance(t, type)}
BUILTIN_TYPES.add("None")
SIG_FIELD = "sig" # sig: str
SIG_COMMENT = "# sig:" # sig: str
SIG_ALIAS = "# sigalias:" # sig: str
DECORATORS = {"property", "staticmethod", "classmethod"} # sig: Set[str]
LINE_LENGTH_LIMIT = 79
INDENT = 4 * " "
EDIT_WARNING = "THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT MANUALLY."
_RE_QUALIFIED_TYPES = re.compile(r"\w+(?:\.\w+)*")
_RE_COMMENT_IN_STRING = re.compile(r"""['"]\s*%(text)s\s*.*['"]""" % {"text": SIG_COMMENT})
_logger = logging.getLogger(__name__)
def get_fields(node, fields_tag="field_list"):
"""Get the field names and their values from a node.
:sig: (Document, str) -> Dict[str, str]
:param node: Node to get the fields from.
:param fields_tag: Tag of child node that contains the fields.
:return: Names and values of fields.
"""
fields_nodes = [c for c in node.children if c.tagname == fields_tag]
if len(fields_nodes) == 0:
return {}
assert len(fields_nodes) == 1, "multiple nodes with tag " + fields_tag
fields_node = fields_nodes[0]
fields = [
{f.tagname: f.rawsource.strip() for f in n.children}
for n in fields_node.children
if n.tagname == "field"
]
return {f["field_name"]: f["field_body"] for f in fields}
def extract_signature(docstring):
"""Extract the signature from a docstring.
:sig: (str) -> Optional[str]
:param docstring: Docstring to extract the signature from.
:return: Extracted signature, or ``None`` if there's no signature.
"""
root = publish_doctree(docstring, settings_overrides={"report_level": 5})
fields = get_fields(root)
return fields.get(SIG_FIELD)
def get_signature(node):
"""Get the signature of a function or a class.
:sig: (Union[ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef]) -> Optional[str]
:param node: Node to get the signature from.
:return: Value of signature field in node docstring, or ``None`` if there's no signature.
"""
docstring = ast.get_docstring(node)
if docstring is None:
return None
return extract_signature(docstring)
def split_parameter_types(parameters):
"""Split a parameter types declaration into individual types.
The input is the left hand side of a signature (the part before the arrow),
excluding the parentheses.
:sig: (str) -> List[str]
:param parameters: Comma separated parameter types.
:return: Parameter types.
"""
if parameters == "":
return []
# only consider the top level commas, ignore the ones in []
commas = []
bracket_depth = 0
for i, char in enumerate(parameters):
if (char == ",") and (bracket_depth == 0):
commas.append(i)
elif char == "[":
bracket_depth += 1
elif char == "]":
bracket_depth -= 1
types = []
last_i = 0
for i in commas:
types.append(parameters[last_i:i].strip())
last_i = i + 1
else:
types.append(parameters[last_i:].strip())
return types
def parse_signature(signature):
"""Parse a signature into its input and return parameter types.
This will also collect the types that are required by any of the input
and return types.
:sig: (str) -> Tuple[List[str], str, Set[str]]
:param signature: Signature to parse.
:return: Input parameter types, return type, and all required types.
"""
if " -> " not in signature:
# signature comment: no parameters, treat variable type as return type
param_types, return_type = None, signature.strip()
else:
lhs, return_type = [s.strip() for s in signature.split(" -> ")]
csv = lhs[1:-1].strip() # remove the parentheses around the parameter type list
param_types = split_parameter_types(csv)
requires = set(_RE_QUALIFIED_TYPES.findall(signature))
return param_types, return_type, requires
class StubNode:
"""A node in a stub tree."""
def __init__(self):
"""Initialize this stub node.
:sig: () -> None
"""
self.variables = [] # sig: List[VariableNode]
self.variable_names = set() # sig: Set[str]
self.children = [] # sig: List[Union[FunctionNode, ClassNode]]
self.parent = None # sig: Optional[StubNode]
def add_variable(self, node):
"""Add a variable node to this node.
:sig: (VariableNode) -> None
:param node: Variable node to add.
"""
if node.name not in self.variable_names:
self.variables.append(node)
self.variable_names.add(node.name)
node.parent = self
def add_child(self, node):
"""Add a function/method or class node to this node.
:sig: (Union[FunctionNode, ClassNode]) -> None
:param node: Function or class node to add.
"""
self.children.append(node)
node.parent = self
def get_code(self):
"""Get the stub code for this node.
The stub code for a node consists of the type annotations of its variables,
followed by the prototypes of its functions/methods and classes.
:sig: () -> List[str]
:return: Lines of stub code for this node.
"""
stub = []
for child in self.variables:
stub.extend(child.get_code())
if (
(len(self.variables) > 0)
and (len(self.children) > 0)
and (not isinstance(self, ClassNode))
):
stub.append("")
for child in self.children:
stub.extend(child.get_code())
return stub
class VariableNode(StubNode):
"""A node representing an assignment in a stub tree."""
def __init__(self, name, type_):
"""Initialize this variable node.
:sig: (str, str) -> None
:param name: Name of variable that is being assigned to.
:param type_: Type of variable.
"""
if not PY3:
StubNode.__init__(self)
else:
super().__init__()
self.name = name # sig: str
self.type_ = type_ # sig: str
def get_code(self):
"""Get the type annotation for this variable.
:sig: () -> List[str]
:return: Lines of stub code for this variable.
"""
return ["%(n)s = ... # type: %(t)s" % {"n": self.name, "t": self.type_}]
class FunctionNode(StubNode):
"""A node representing a function in a stub tree."""
def __init__(self, name, parameters, rtype, decorators=None):
"""Initialize this function node.
The parameters have to given as a list of triples where each item specifies
the name of the parameter, its type, and whether it has a default value or not.
:sig: (str, Sequence[Tuple[str, str, bool]], str, Optional[Sequence[str]]) -> None
:param name: Name of function.
:param parameters: List of parameter triples (name, type, has_default).
:param rtype: Type of return value.
:param decorators: Decorators of function.
"""
if not PY3:
StubNode.__init__(self)
else:
super().__init__()
self.name = name # sig: str
self.parameters = parameters # sig: Sequence[Tuple[str, str, bool]]
self.rtype = rtype # sig: str
self.decorators = decorators if decorators is not None else [] # sig: Sequence[str]
self._async = False # sig: bool
def get_code(self):
"""Get the stub code for this function.
:sig: () -> List[str]
:return: Lines of stub code for this function.
"""
stub = []
for deco in self.decorators:
if (deco in DECORATORS) or deco.endswith(".setter"):
stub.append("@" + deco)
parameters = []
for name, type_, has_default in self.parameters:
decl = "%(n)s%(t)s%(d)s" % {
"n": name,
"t": ": " + type_ if type_ else "",
"d": " = ..." if has_default else "",
}
parameters.append(decl)
slots = {
"a": "async " if self._async else "",
"n": self.name,
"p": ", ".join(parameters),
"r": self.rtype,
}
prototype = "%(a)sdef %(n)s(%(p)s) -> %(r)s: ..." % slots
if len(prototype) <= LINE_LENGTH_LIMIT:
stub.append(prototype)
elif len(INDENT + slots["p"]) <= LINE_LENGTH_LIMIT:
stub.append("%(a)sdef %(n)s(" % slots)
stub.append(INDENT + slots["p"])
stub.append(") -> %(r)s: ..." % slots)
else:
stub.append("%(a)sdef %(n)s(" % slots)
for param in parameters:
stub.append(INDENT + param + ",")
stub.append(") -> %(r)s: ..." % slots)
return stub
class ClassNode(StubNode):
"""A node representing a class in a stub tree."""
def __init__(self, name, bases, signature=None):
"""Initialize this class node.
:sig: (str, Sequence[str], Optional[str]) -> None
:param name: Name of class.
:param bases: Base classes of class.
:param signature: Signature of class, to be used in __init__ method.
"""
if not PY3:
StubNode.__init__(self)
else:
super().__init__()
self.name = name # sig: str
self.bases = bases # sig: Sequence[str]
self.signature = signature # sig: Optional[str]
def get_code(self):
"""Get the stub code for this class.
:sig: () -> List[str]
:return: Lines of stub code for this class.
"""
stub = []
bases = ("(" + ", ".join(self.bases) + ")") if len(self.bases) > 0 else ""
slots = {"n": self.name, "b": bases}
if (len(self.children) == 0) and (len(self.variables) == 0):
stub.append("class %(n)s%(b)s: ..." % slots)
else:
stub.append("class %(n)s%(b)s:" % slots)
super_code = super().get_code() if PY3 else StubNode.get_code(self)
for line in super_code:
stub.append(INDENT + line)
return stub
def get_aliases(lines):
"""Get the type aliases in the source.
:sig: (Sequence[str]) -> Dict[str, str]
:param lines: Lines of the source code.
:return: Aliases and their their definitions.
"""
aliases = {}
for line in lines:
line = line.strip()
if len(line) > 0 and line.startswith(SIG_ALIAS):
_, content = line.split(SIG_ALIAS)
alias, signature = [t.strip() for t in content.split("=")]
aliases[alias] = signature
return aliases
class StubGenerator(ast.NodeVisitor):
"""A transformer that generates stub declarations from a source code."""
def __init__(self, source, generic=False):
"""Initialize this stub generator.
:sig: (str, bool) -> None
:param source: Source code to generate the stub for.
:param generic: Whether to produce generic stubs.
"""
self.root = StubNode() # sig: StubNode
self.generic = generic # sig: bool
self.imported_namespaces = OrderedDict() # sig: OrderedDict[str, str]
self.imported_names = OrderedDict() # sig: OrderedDict[str, str]
self.defined_types = set() # sig: Set[str]
self.required_types = set() # sig: Set[str]
self.aliases = OrderedDict() # sig: OrderedDict[str, str]
self._parents = [self.root] # sig: List[StubNode]
self._code_lines = source.splitlines() # sig: List[str]
self.collect_aliases()
ast_tree = ast.parse(source)
self.visit(ast_tree)
def collect_aliases(self):
"""Collect the type aliases in the source.
:sig: () -> None
"""
self.aliases = get_aliases(self._code_lines)
for alias, signature in self.aliases.items():
_, _, requires = parse_signature(signature)
self.required_types |= requires
self.defined_types |= {alias}
def visit_Import(self, node):
"""Visit an import node."""
line = self._code_lines[node.lineno - 1]
module_name = line.split("import")[0].strip()
for name in node.names:
imported_name = name.name
if name.asname:
imported_name = name.asname + "::" + imported_name
self.imported_namespaces[imported_name] = module_name
def visit_ImportFrom(self, node):
"""Visit an from-import node."""
line = self._code_lines[node.lineno - 1]
module_name = line.split("from")[1].split("import")[0].strip()
for name in node.names:
imported_name = name.name
if name.asname:
imported_name = name.asname + "::" + imported_name
self.imported_names[imported_name] = module_name
def visit_Assign(self, node):
"""Visit an assignment node."""
line = self._code_lines[node.lineno - 1]
if SIG_COMMENT in line:
line = _RE_COMMENT_IN_STRING.sub("", line)
if (SIG_COMMENT not in line) and (not self.generic):
return
if SIG_COMMENT in line:
_, signature = line.split(SIG_COMMENT)
_, return_type, requires = parse_signature(signature)
self.required_types |= requires
parent = self._parents[-1]
for var in node.targets:
if isinstance(var, ast.Name):
name, p = var.id, parent
elif (
isinstance(var, ast.Attribute)
and isinstance(var.value, ast.Name)
and (var.value.id == "self")
):
name, p = var.attr, parent.parent
else:
name, p = None, None
if name is not None:
if self.generic:
return_type = "Any"
self.required_types.add(return_type)
stub_node = VariableNode(name, return_type)
p.add_variable(stub_node)
def get_function_node(self, node):
"""Process a function node.
:sig: (Union[ast.FunctionDef, ast.AsyncFunctionDef]) -> FunctionNode
:param node: Node to process.
:return: Generated function node in stub tree.
"""
decorators = []
for d in node.decorator_list:
if hasattr(d, "id"):
decorators.append(d.id)
elif hasattr(d, "func"):
decorators.append(d.func.id)
elif hasattr(d, "value"):
decorators.append(d.value.id + "." + d.attr)
signature = get_signature(node)
if signature is None:
parent = self._parents[-1]
if isinstance(parent, ClassNode) and (node.name == "__init__"):
signature = parent.signature
if (signature is None) and (not self.generic):
return None
param_names = [arg.arg if PY3 else arg.id for arg in node.args.args]
n_args = len(param_names)
if signature is None:
param_types, rtype, requires = ["Any"] * n_args, "Any", {"Any"}
else:
_logger.debug("parsing signature for %s", node.name)
param_types, rtype, requires = parse_signature(signature)
# TODO: only in classes
if ((n_args > 0) and (param_names[0] == "self")) or (
(n_args > 0) and (param_names[0] == "cls") and ("classmethod" in decorators)
):
if signature is None:
param_types[0] = ""
else:
param_types.insert(0, "")
_logger.debug("parameter types: %s", param_types)
_logger.debug("return type: %s", rtype)
_logger.debug("required types: %s", requires)
self.required_types |= requires
if node.args.vararg is not None:
param_names.append("*" + (node.args.vararg.arg if PY3 else node.args.vararg))
param_types.append("")
if node.args.kwarg is not None:
param_names.append("**" + (node.args.kwarg.arg if PY3 else node.args.kwarg))
param_types.append("")
kwonly_args = getattr(node.args, "kwonlyargs", [])
if len(kwonly_args) > 0:
param_names.extend([arg.arg for arg in kwonly_args])
if signature is None:
param_types.extend(["Any"] * len(kwonly_args))
if len(param_types) != len(param_names):
raise ValueError("Parameter names and types don't match: " + node.name)
param_locs = [(a.lineno, a.col_offset) for a in (node.args.args + kwonly_args)]
param_defaults = {
bisect(param_locs, (d.lineno, d.col_offset)) - 1 for d in node.args.defaults
}
kwonly_defaults = getattr(node.args, "kw_defaults", [])
for i, d in enumerate(kwonly_defaults):
if d is not None:
param_defaults.add(n_args + i)
params = [
(name, type_, i in param_defaults)
for i, (name, type_) in enumerate(zip(param_names, param_types))
]
if len(kwonly_args) > 0:
params.insert(n_args, ("*", "", False))
stub_node = FunctionNode(
node.name, parameters=params, rtype=rtype, decorators=decorators
)
self._parents[-1].add_child(stub_node)
self._parents.append(stub_node)
self.generic_visit(node)
del self._parents[-1]
return stub_node
def visit_FunctionDef(self, node):
"""Visit a function node."""
node = self.get_function_node(node)
if node is not None:
node._async = False
def visit_AsyncFunctionDef(self, node):
"""Visit an async function node."""
node = self.get_function_node(node)
if node is not None:
node._async = True
def visit_ClassDef(self, node):
"""Visit a class node."""
self.defined_types.add(node.name)
bases = []
for n in node.bases:
base_parts = []
while True:
if not isinstance(n, ast.Attribute):
base_parts.append(n.id)
break
else:
base_parts.append(n.attr)
n = n.value
bases.append(".".join(base_parts[::-1]))
self.required_types |= set(bases)
signature = get_signature(node)
stub_node = ClassNode(node.name, bases=bases, signature=signature)
self._parents[-1].add_child(stub_node)
self._parents.append(stub_node)
self.generic_visit(node)
del self._parents[-1]
@staticmethod
def generate_import_from(module_, names):
"""Generate an import line.
:sig: (str, Set[str]) -> str
:param module_: Name of module to import the names from.
:param names: Names to import.
:return: Import line in stub code.
"""
regular_names = [n for n in names if "::" not in n]
as_names = [n for n in names if "::" in n]
line = ""
if len(regular_names) > 0:
slots = {"m": module_, "n": ", ".join(sorted(regular_names))}
line = "from %(m)s import %(n)s" % slots
if len(line) > LINE_LENGTH_LIMIT:
slots["n"] = INDENT + (",\n" + INDENT).join(sorted(regular_names)) + ","
line = "from %(m)s import (\n%(n)s\n)" % slots
if len(as_names) > 0:
line += "\n"
for as_name in as_names:
a, n = as_name.split("::")
line += "from %(m)s import %(n)s as %(a)s" % {"m": module_, "n": n, "a": a}
return line
def generate_stub(self):
"""Generate the stub code for this source.
:sig: () -> str
:return: Generated stub code.
"""
needed_types = self.required_types - BUILTIN_TYPES
needed_types -= self.defined_types
_logger.debug("defined types: %s", self.defined_types)
module_vars = {v.name for v in self.root.variables}
_logger.debug("module variables: %s", module_vars)
qualified_types = {n for n in needed_types if "." in n}
qualified_namespaces = {".".join(n.split(".")[:-1]) for n in qualified_types}
needed_namespaces = qualified_namespaces - module_vars
needed_types -= qualified_types
_logger.debug("needed namespaces: %s", needed_namespaces)
imported_names = {n.split("::")[0] for n in self.imported_names}
imported_types = imported_names & (needed_types | needed_namespaces)
needed_types -= imported_types
needed_namespaces -= imported_names
_logger.debug("used imported types: %s", imported_types)
try:
typing_mod = __import__("typing")
typing_types = {n for n in needed_types if hasattr(typing_mod, n)}
needed_types -= typing_types
_logger.debug("types from typing module: %s", typing_types)
except ImportError:
typing_types = set()
_logger.warn("typing module not installed")
if len(needed_types) > 0:
raise ValueError("Unknown types: " + ", ".join(needed_types))
out = StringIO()
started = False
if len(typing_types) > 0:
line = self.generate_import_from("typing", typing_types)
out.write(line + "\n")
started = True
if len(imported_types) > 0:
if started:
out.write("\n")
# preserve the import order in the source file
for name in self.imported_names:
if name.split("::")[0] in imported_types:
line = self.generate_import_from(self.imported_names[name], {name})
out.write(line + "\n")
started = True
if len(needed_namespaces) > 0:
if started:
out.write("\n")
as_names = {n.split("::")[0]: n for n in self.imported_namespaces if "::" in n}
for module_ in sorted(needed_namespaces):
if module_ in as_names:
a, n = as_names[module_].split("::")
out.write("import " + n + " as " + a + "\n")
else:
out.write("import " + module_ + "\n")
started = True
if len(self.aliases) > 0:
if started:
out.write("\n")
for alias, signature in self.aliases.items():
out.write("%s = %s\n" % (alias, signature))
started = True
if started:
out.write("\n")
stub_lines = self.root.get_code()
n_lines = len(stub_lines)
for line_no in range(n_lines):
prev_line = stub_lines[line_no - 1] if line_no > 0 else None
line = stub_lines[line_no]
next_line = stub_lines[line_no + 1] if line_no < (n_lines - 1) else None
if (
line.startswith("class ")
and (prev_line is not None)
and (
(not prev_line.startswith("class "))
or (next_line and next_line.startswith(" "))
)
):
out.write("\n")
if (
line.startswith("def ")
and (prev_line is not None)
and (prev_line.startswith((" ", "class ")))
):
out.write("\n")
out.write(line + "\n")
line_no += 1
return out.getvalue()
def get_stub(source, generic=False):
"""Get the stub code for a source code.
:sig: (str, bool) -> str
:param source: Source code to generate the stub for.
:param generic: Whether to produce generic stubs.
:return: Generated stub code.
"""
generator = StubGenerator(source, generic=generic)
stub = generator.generate_stub()
return stub
def get_mod_paths(mod_name, out_dir):
"""Get source and stub paths for a module."""
paths = []
try:
mod = get_loader(mod_name)
source = Path(mod.path)
if source.name.endswith(".py"):
source_rel = Path(*mod_name.split("."))
if source.name == "__init__.py":
source_rel = source_rel.joinpath("__init__.py")
destination = Path(out_dir, source_rel.with_suffix(".pyi"))
paths.append((source, destination))
except Exception as e:
_logger.debug(e)
_logger.warning("cannot handle module, skipping: %s", mod_name)
return paths
############################################################
# SPHINX
############################################################
def process_docstring(app, what, name, obj, options, lines):
"""Modify the docstring before generating documentation.
This will insert type declarations for parameters and return type
into the docstring, and remove the signature field so that it will
be excluded from the generated document.
"""
aliases = getattr(app, "_sigaliases", None)
if aliases is None:
if what == "module":
aliases = get_aliases(inspect.getsource(obj).splitlines())
app._sigaliases = aliases
sig_marker = ":" + SIG_FIELD + ":"
is_class = what in ("class", "exception")
signature = extract_signature("\n".join(lines))
if signature is None:
if not is_class:
return
init_method = getattr(obj, "__init__")
init_doc = init_method.__doc__
init_lines = init_doc.splitlines()[1:]
if len(init_lines) > 1:
init_doc = textwrap.dedent("\n".join(init_lines[1:]))
init_lines = init_doc.splitlines()
if sig_marker not in init_doc:
return
sig_started = False
for line in init_lines:
if line.lstrip().startswith(sig_marker):
sig_started = True
if sig_started:
lines.append(line)
signature = extract_signature("\n".join(lines))
if is_class:
obj = init_method
param_types, rtype, _ = parse_signature(signature)
param_names = [p for p in inspect.signature(obj).parameters]
if is_class and (param_names[0] == "self"):
del param_names[0]
# if something goes wrong, don't insert parameter types
if len(param_names) == len(param_types):
for name, type_ in zip(param_names, param_types):
find = ":param %(name)s:" % {"name": name}
alias = aliases.get(type_)
if alias is not None:
type_ = "*%(type)s* :sup:`%(alias)s`" % {"type": type_, "alias": alias}
for i, line in enumerate(lines):
if line.startswith(find):
lines.insert(i, ":type %(name)s: %(type)s" % {"name": name, "type": type_})
break
if not is_class:
for i, line in enumerate(lines):
if line.startswith((":return:", ":returns:")):
lines.insert(i, ":rtype: " + rtype)
break
# remove the signature field
sig_start = 0
while sig_start < len(lines):
if lines[sig_start].startswith(sig_marker):
break
sig_start += 1
sig_end = sig_start + 1
while sig_end < len(lines):
if (not lines[sig_end]) or (lines[sig_end][0] != " "):
break
sig_end += 1
for i in reversed(range(sig_start, sig_end)):
del lines[i]
def setup(app):
"""Register to Sphinx."""
app.connect("autodoc-process-docstring", process_docstring)
return {"version": __version__}
############################################################
# MAIN
############################################################
def main(argv=None):
"""Start the command line interface."""
parser = ArgumentParser(prog="pygenstub")
parser.add_argument("--version", action="version", version="%(prog)s " + __version__)
parser.add_argument("files", nargs="*", help="generate stubs for given files")
parser.add_argument(
"-m",
"--module",
action="append",
metavar="MODULE",
dest="modules",
default=[],
help="generate stubs for given modules",
)
parser.add_argument(
"-o", "--output", metavar="PATH", dest="out_dir", help="change the output directory"
)
parser.add_argument(
"--generic", action="store_true", default=False, help="generate generic stubs"
)
parser.add_argument("--debug", action="store_true", help="enable debug messages")
argv = argv if argv is not None else sys.argv
arguments = parser.parse_args(argv[1:])
# set debug mode
if arguments.debug:
logging.basicConfig(level=logging.DEBUG)
_logger.debug("running in debug mode")
out_dir = arguments.out_dir if arguments.out_dir is not None else ""
if (out_dir == "") and (len(arguments.modules) > 0):
print("Output directory must be given when generating stubs for modules.")
sys.exit(1)
modules = []
for path in arguments.files:
paths = Path(path).glob("**/*.py") if Path(path).is_dir() else [Path(path)]
for source in paths:
if str(source).startswith(os.path.pardir):
source = source.absolute().resolve()
if (out_dir != "") and source.is_absolute():
source = source.relative_to(source.root)
destination = Path(out_dir, source.with_suffix(".pyi"))
modules.append((source, destination))
for mod_name in arguments.modules:
modules.extend(get_pkg_paths(mod_name, out_dir))
for source, destination in modules:
_logger.info("generating stub for %s to path %s", source, destination)
with source.open() as f:
code = f.read()
try:
stub = get_stub(code, generic=arguments.generic)
except Exception as e:
print(source, "-", e, file=sys.stderr)
continue
if stub != "":
if not destination.parent.exists():
destination.parent.mkdir(parents=True)
with destination.open("w") as f:
f.write("# " + EDIT_WARNING + "\n\n" + stub)
if __name__ == "__main__":
main()
|
uyar/pygenstub | pygenstub.py | process_docstring | python | def process_docstring(app, what, name, obj, options, lines):
aliases = getattr(app, "_sigaliases", None)
if aliases is None:
if what == "module":
aliases = get_aliases(inspect.getsource(obj).splitlines())
app._sigaliases = aliases
sig_marker = ":" + SIG_FIELD + ":"
is_class = what in ("class", "exception")
signature = extract_signature("\n".join(lines))
if signature is None:
if not is_class:
return
init_method = getattr(obj, "__init__")
init_doc = init_method.__doc__
init_lines = init_doc.splitlines()[1:]
if len(init_lines) > 1:
init_doc = textwrap.dedent("\n".join(init_lines[1:]))
init_lines = init_doc.splitlines()
if sig_marker not in init_doc:
return
sig_started = False
for line in init_lines:
if line.lstrip().startswith(sig_marker):
sig_started = True
if sig_started:
lines.append(line)
signature = extract_signature("\n".join(lines))
if is_class:
obj = init_method
param_types, rtype, _ = parse_signature(signature)
param_names = [p for p in inspect.signature(obj).parameters]
if is_class and (param_names[0] == "self"):
del param_names[0]
# if something goes wrong, don't insert parameter types
if len(param_names) == len(param_types):
for name, type_ in zip(param_names, param_types):
find = ":param %(name)s:" % {"name": name}
alias = aliases.get(type_)
if alias is not None:
type_ = "*%(type)s* :sup:`%(alias)s`" % {"type": type_, "alias": alias}
for i, line in enumerate(lines):
if line.startswith(find):
lines.insert(i, ":type %(name)s: %(type)s" % {"name": name, "type": type_})
break
if not is_class:
for i, line in enumerate(lines):
if line.startswith((":return:", ":returns:")):
lines.insert(i, ":rtype: " + rtype)
break
# remove the signature field
sig_start = 0
while sig_start < len(lines):
if lines[sig_start].startswith(sig_marker):
break
sig_start += 1
sig_end = sig_start + 1
while sig_end < len(lines):
if (not lines[sig_end]) or (lines[sig_end][0] != " "):
break
sig_end += 1
for i in reversed(range(sig_start, sig_end)):
del lines[i] | Modify the docstring before generating documentation.
This will insert type declarations for parameters and return type
into the docstring, and remove the signature field so that it will
be excluded from the generated document. | train | https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L797-L874 | [
"def extract_signature(docstring):\n \"\"\"Extract the signature from a docstring.\n\n :sig: (str) -> Optional[str]\n :param docstring: Docstring to extract the signature from.\n :return: Extracted signature, or ``None`` if there's no signature.\n \"\"\"\n root = publish_doctree(docstring, setting... | # Copyright (C) 2016-2019 H. Turgut Uyar <uyar@tekir.org>
#
# pygenstub is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pygenstub is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pygenstub. If not, see <http://www.gnu.org/licenses/>.
"""pygenstub is a utility for generating stub files from docstrings in source files.
It takes a source file as input and creates a stub file with the same base name
and the ``.pyi`` extension.
For more information, please refer to the documentation:
https://pygenstub.tekir.org/
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import ast
import inspect
import logging
import os
import re
import sys
import textwrap
from argparse import ArgumentParser
from bisect import bisect
from collections import OrderedDict
from importlib import import_module
from io import StringIO
from pkgutil import get_loader, walk_packages
from docutils.core import publish_doctree
__version__ = "1.4.0" # sig: str
PY3 = sys.version_info >= (3, 0)
if not PY3:
import __builtin__ as builtins
from pathlib2 import Path
else:
import builtins
from pathlib import Path
# sigalias: Document = docutils.nodes.document
BUILTIN_TYPES = {k for k, t in builtins.__dict__.items() if isinstance(t, type)}
BUILTIN_TYPES.add("None")
SIG_FIELD = "sig" # sig: str
SIG_COMMENT = "# sig:" # sig: str
SIG_ALIAS = "# sigalias:" # sig: str
DECORATORS = {"property", "staticmethod", "classmethod"} # sig: Set[str]
LINE_LENGTH_LIMIT = 79
INDENT = 4 * " "
EDIT_WARNING = "THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT MANUALLY."
_RE_QUALIFIED_TYPES = re.compile(r"\w+(?:\.\w+)*")
_RE_COMMENT_IN_STRING = re.compile(r"""['"]\s*%(text)s\s*.*['"]""" % {"text": SIG_COMMENT})
_logger = logging.getLogger(__name__)
def get_fields(node, fields_tag="field_list"):
"""Get the field names and their values from a node.
:sig: (Document, str) -> Dict[str, str]
:param node: Node to get the fields from.
:param fields_tag: Tag of child node that contains the fields.
:return: Names and values of fields.
"""
fields_nodes = [c for c in node.children if c.tagname == fields_tag]
if len(fields_nodes) == 0:
return {}
assert len(fields_nodes) == 1, "multiple nodes with tag " + fields_tag
fields_node = fields_nodes[0]
fields = [
{f.tagname: f.rawsource.strip() for f in n.children}
for n in fields_node.children
if n.tagname == "field"
]
return {f["field_name"]: f["field_body"] for f in fields}
def extract_signature(docstring):
"""Extract the signature from a docstring.
:sig: (str) -> Optional[str]
:param docstring: Docstring to extract the signature from.
:return: Extracted signature, or ``None`` if there's no signature.
"""
root = publish_doctree(docstring, settings_overrides={"report_level": 5})
fields = get_fields(root)
return fields.get(SIG_FIELD)
def get_signature(node):
"""Get the signature of a function or a class.
:sig: (Union[ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef]) -> Optional[str]
:param node: Node to get the signature from.
:return: Value of signature field in node docstring, or ``None`` if there's no signature.
"""
docstring = ast.get_docstring(node)
if docstring is None:
return None
return extract_signature(docstring)
def split_parameter_types(parameters):
"""Split a parameter types declaration into individual types.
The input is the left hand side of a signature (the part before the arrow),
excluding the parentheses.
:sig: (str) -> List[str]
:param parameters: Comma separated parameter types.
:return: Parameter types.
"""
if parameters == "":
return []
# only consider the top level commas, ignore the ones in []
commas = []
bracket_depth = 0
for i, char in enumerate(parameters):
if (char == ",") and (bracket_depth == 0):
commas.append(i)
elif char == "[":
bracket_depth += 1
elif char == "]":
bracket_depth -= 1
types = []
last_i = 0
for i in commas:
types.append(parameters[last_i:i].strip())
last_i = i + 1
else:
types.append(parameters[last_i:].strip())
return types
def parse_signature(signature):
"""Parse a signature into its input and return parameter types.
This will also collect the types that are required by any of the input
and return types.
:sig: (str) -> Tuple[List[str], str, Set[str]]
:param signature: Signature to parse.
:return: Input parameter types, return type, and all required types.
"""
if " -> " not in signature:
# signature comment: no parameters, treat variable type as return type
param_types, return_type = None, signature.strip()
else:
lhs, return_type = [s.strip() for s in signature.split(" -> ")]
csv = lhs[1:-1].strip() # remove the parentheses around the parameter type list
param_types = split_parameter_types(csv)
requires = set(_RE_QUALIFIED_TYPES.findall(signature))
return param_types, return_type, requires
class StubNode:
"""A node in a stub tree."""
def __init__(self):
"""Initialize this stub node.
:sig: () -> None
"""
self.variables = [] # sig: List[VariableNode]
self.variable_names = set() # sig: Set[str]
self.children = [] # sig: List[Union[FunctionNode, ClassNode]]
self.parent = None # sig: Optional[StubNode]
def add_variable(self, node):
"""Add a variable node to this node.
:sig: (VariableNode) -> None
:param node: Variable node to add.
"""
if node.name not in self.variable_names:
self.variables.append(node)
self.variable_names.add(node.name)
node.parent = self
def add_child(self, node):
"""Add a function/method or class node to this node.
:sig: (Union[FunctionNode, ClassNode]) -> None
:param node: Function or class node to add.
"""
self.children.append(node)
node.parent = self
def get_code(self):
"""Get the stub code for this node.
The stub code for a node consists of the type annotations of its variables,
followed by the prototypes of its functions/methods and classes.
:sig: () -> List[str]
:return: Lines of stub code for this node.
"""
stub = []
for child in self.variables:
stub.extend(child.get_code())
if (
(len(self.variables) > 0)
and (len(self.children) > 0)
and (not isinstance(self, ClassNode))
):
stub.append("")
for child in self.children:
stub.extend(child.get_code())
return stub
class VariableNode(StubNode):
"""A node representing an assignment in a stub tree."""
def __init__(self, name, type_):
"""Initialize this variable node.
:sig: (str, str) -> None
:param name: Name of variable that is being assigned to.
:param type_: Type of variable.
"""
if not PY3:
StubNode.__init__(self)
else:
super().__init__()
self.name = name # sig: str
self.type_ = type_ # sig: str
def get_code(self):
"""Get the type annotation for this variable.
:sig: () -> List[str]
:return: Lines of stub code for this variable.
"""
return ["%(n)s = ... # type: %(t)s" % {"n": self.name, "t": self.type_}]
class FunctionNode(StubNode):
"""A node representing a function in a stub tree."""
def __init__(self, name, parameters, rtype, decorators=None):
"""Initialize this function node.
The parameters have to given as a list of triples where each item specifies
the name of the parameter, its type, and whether it has a default value or not.
:sig: (str, Sequence[Tuple[str, str, bool]], str, Optional[Sequence[str]]) -> None
:param name: Name of function.
:param parameters: List of parameter triples (name, type, has_default).
:param rtype: Type of return value.
:param decorators: Decorators of function.
"""
if not PY3:
StubNode.__init__(self)
else:
super().__init__()
self.name = name # sig: str
self.parameters = parameters # sig: Sequence[Tuple[str, str, bool]]
self.rtype = rtype # sig: str
self.decorators = decorators if decorators is not None else [] # sig: Sequence[str]
self._async = False # sig: bool
def get_code(self):
"""Get the stub code for this function.
:sig: () -> List[str]
:return: Lines of stub code for this function.
"""
stub = []
for deco in self.decorators:
if (deco in DECORATORS) or deco.endswith(".setter"):
stub.append("@" + deco)
parameters = []
for name, type_, has_default in self.parameters:
decl = "%(n)s%(t)s%(d)s" % {
"n": name,
"t": ": " + type_ if type_ else "",
"d": " = ..." if has_default else "",
}
parameters.append(decl)
slots = {
"a": "async " if self._async else "",
"n": self.name,
"p": ", ".join(parameters),
"r": self.rtype,
}
prototype = "%(a)sdef %(n)s(%(p)s) -> %(r)s: ..." % slots
if len(prototype) <= LINE_LENGTH_LIMIT:
stub.append(prototype)
elif len(INDENT + slots["p"]) <= LINE_LENGTH_LIMIT:
stub.append("%(a)sdef %(n)s(" % slots)
stub.append(INDENT + slots["p"])
stub.append(") -> %(r)s: ..." % slots)
else:
stub.append("%(a)sdef %(n)s(" % slots)
for param in parameters:
stub.append(INDENT + param + ",")
stub.append(") -> %(r)s: ..." % slots)
return stub
class ClassNode(StubNode):
"""A node representing a class in a stub tree."""
def __init__(self, name, bases, signature=None):
"""Initialize this class node.
:sig: (str, Sequence[str], Optional[str]) -> None
:param name: Name of class.
:param bases: Base classes of class.
:param signature: Signature of class, to be used in __init__ method.
"""
if not PY3:
StubNode.__init__(self)
else:
super().__init__()
self.name = name # sig: str
self.bases = bases # sig: Sequence[str]
self.signature = signature # sig: Optional[str]
def get_code(self):
"""Get the stub code for this class.
:sig: () -> List[str]
:return: Lines of stub code for this class.
"""
stub = []
bases = ("(" + ", ".join(self.bases) + ")") if len(self.bases) > 0 else ""
slots = {"n": self.name, "b": bases}
if (len(self.children) == 0) and (len(self.variables) == 0):
stub.append("class %(n)s%(b)s: ..." % slots)
else:
stub.append("class %(n)s%(b)s:" % slots)
super_code = super().get_code() if PY3 else StubNode.get_code(self)
for line in super_code:
stub.append(INDENT + line)
return stub
def get_aliases(lines):
"""Get the type aliases in the source.
:sig: (Sequence[str]) -> Dict[str, str]
:param lines: Lines of the source code.
:return: Aliases and their their definitions.
"""
aliases = {}
for line in lines:
line = line.strip()
if len(line) > 0 and line.startswith(SIG_ALIAS):
_, content = line.split(SIG_ALIAS)
alias, signature = [t.strip() for t in content.split("=")]
aliases[alias] = signature
return aliases
class StubGenerator(ast.NodeVisitor):
"""A transformer that generates stub declarations from a source code."""
def __init__(self, source, generic=False):
"""Initialize this stub generator.
:sig: (str, bool) -> None
:param source: Source code to generate the stub for.
:param generic: Whether to produce generic stubs.
"""
self.root = StubNode() # sig: StubNode
self.generic = generic # sig: bool
self.imported_namespaces = OrderedDict() # sig: OrderedDict[str, str]
self.imported_names = OrderedDict() # sig: OrderedDict[str, str]
self.defined_types = set() # sig: Set[str]
self.required_types = set() # sig: Set[str]
self.aliases = OrderedDict() # sig: OrderedDict[str, str]
self._parents = [self.root] # sig: List[StubNode]
self._code_lines = source.splitlines() # sig: List[str]
self.collect_aliases()
ast_tree = ast.parse(source)
self.visit(ast_tree)
def collect_aliases(self):
"""Collect the type aliases in the source.
:sig: () -> None
"""
self.aliases = get_aliases(self._code_lines)
for alias, signature in self.aliases.items():
_, _, requires = parse_signature(signature)
self.required_types |= requires
self.defined_types |= {alias}
def visit_Import(self, node):
"""Visit an import node."""
line = self._code_lines[node.lineno - 1]
module_name = line.split("import")[0].strip()
for name in node.names:
imported_name = name.name
if name.asname:
imported_name = name.asname + "::" + imported_name
self.imported_namespaces[imported_name] = module_name
def visit_ImportFrom(self, node):
"""Visit an from-import node."""
line = self._code_lines[node.lineno - 1]
module_name = line.split("from")[1].split("import")[0].strip()
for name in node.names:
imported_name = name.name
if name.asname:
imported_name = name.asname + "::" + imported_name
self.imported_names[imported_name] = module_name
def visit_Assign(self, node):
"""Visit an assignment node."""
line = self._code_lines[node.lineno - 1]
if SIG_COMMENT in line:
line = _RE_COMMENT_IN_STRING.sub("", line)
if (SIG_COMMENT not in line) and (not self.generic):
return
if SIG_COMMENT in line:
_, signature = line.split(SIG_COMMENT)
_, return_type, requires = parse_signature(signature)
self.required_types |= requires
parent = self._parents[-1]
for var in node.targets:
if isinstance(var, ast.Name):
name, p = var.id, parent
elif (
isinstance(var, ast.Attribute)
and isinstance(var.value, ast.Name)
and (var.value.id == "self")
):
name, p = var.attr, parent.parent
else:
name, p = None, None
if name is not None:
if self.generic:
return_type = "Any"
self.required_types.add(return_type)
stub_node = VariableNode(name, return_type)
p.add_variable(stub_node)
def get_function_node(self, node):
"""Process a function node.
:sig: (Union[ast.FunctionDef, ast.AsyncFunctionDef]) -> FunctionNode
:param node: Node to process.
:return: Generated function node in stub tree.
"""
decorators = []
for d in node.decorator_list:
if hasattr(d, "id"):
decorators.append(d.id)
elif hasattr(d, "func"):
decorators.append(d.func.id)
elif hasattr(d, "value"):
decorators.append(d.value.id + "." + d.attr)
signature = get_signature(node)
if signature is None:
parent = self._parents[-1]
if isinstance(parent, ClassNode) and (node.name == "__init__"):
signature = parent.signature
if (signature is None) and (not self.generic):
return None
param_names = [arg.arg if PY3 else arg.id for arg in node.args.args]
n_args = len(param_names)
if signature is None:
param_types, rtype, requires = ["Any"] * n_args, "Any", {"Any"}
else:
_logger.debug("parsing signature for %s", node.name)
param_types, rtype, requires = parse_signature(signature)
# TODO: only in classes
if ((n_args > 0) and (param_names[0] == "self")) or (
(n_args > 0) and (param_names[0] == "cls") and ("classmethod" in decorators)
):
if signature is None:
param_types[0] = ""
else:
param_types.insert(0, "")
_logger.debug("parameter types: %s", param_types)
_logger.debug("return type: %s", rtype)
_logger.debug("required types: %s", requires)
self.required_types |= requires
if node.args.vararg is not None:
param_names.append("*" + (node.args.vararg.arg if PY3 else node.args.vararg))
param_types.append("")
if node.args.kwarg is not None:
param_names.append("**" + (node.args.kwarg.arg if PY3 else node.args.kwarg))
param_types.append("")
kwonly_args = getattr(node.args, "kwonlyargs", [])
if len(kwonly_args) > 0:
param_names.extend([arg.arg for arg in kwonly_args])
if signature is None:
param_types.extend(["Any"] * len(kwonly_args))
if len(param_types) != len(param_names):
raise ValueError("Parameter names and types don't match: " + node.name)
param_locs = [(a.lineno, a.col_offset) for a in (node.args.args + kwonly_args)]
param_defaults = {
bisect(param_locs, (d.lineno, d.col_offset)) - 1 for d in node.args.defaults
}
kwonly_defaults = getattr(node.args, "kw_defaults", [])
for i, d in enumerate(kwonly_defaults):
if d is not None:
param_defaults.add(n_args + i)
params = [
(name, type_, i in param_defaults)
for i, (name, type_) in enumerate(zip(param_names, param_types))
]
if len(kwonly_args) > 0:
params.insert(n_args, ("*", "", False))
stub_node = FunctionNode(
node.name, parameters=params, rtype=rtype, decorators=decorators
)
self._parents[-1].add_child(stub_node)
self._parents.append(stub_node)
self.generic_visit(node)
del self._parents[-1]
return stub_node
def visit_FunctionDef(self, node):
"""Visit a function node."""
node = self.get_function_node(node)
if node is not None:
node._async = False
def visit_AsyncFunctionDef(self, node):
"""Visit an async function node."""
node = self.get_function_node(node)
if node is not None:
node._async = True
def visit_ClassDef(self, node):
"""Visit a class node."""
self.defined_types.add(node.name)
bases = []
for n in node.bases:
base_parts = []
while True:
if not isinstance(n, ast.Attribute):
base_parts.append(n.id)
break
else:
base_parts.append(n.attr)
n = n.value
bases.append(".".join(base_parts[::-1]))
self.required_types |= set(bases)
signature = get_signature(node)
stub_node = ClassNode(node.name, bases=bases, signature=signature)
self._parents[-1].add_child(stub_node)
self._parents.append(stub_node)
self.generic_visit(node)
del self._parents[-1]
@staticmethod
def generate_import_from(module_, names):
"""Generate an import line.
:sig: (str, Set[str]) -> str
:param module_: Name of module to import the names from.
:param names: Names to import.
:return: Import line in stub code.
"""
regular_names = [n for n in names if "::" not in n]
as_names = [n for n in names if "::" in n]
line = ""
if len(regular_names) > 0:
slots = {"m": module_, "n": ", ".join(sorted(regular_names))}
line = "from %(m)s import %(n)s" % slots
if len(line) > LINE_LENGTH_LIMIT:
slots["n"] = INDENT + (",\n" + INDENT).join(sorted(regular_names)) + ","
line = "from %(m)s import (\n%(n)s\n)" % slots
if len(as_names) > 0:
line += "\n"
for as_name in as_names:
a, n = as_name.split("::")
line += "from %(m)s import %(n)s as %(a)s" % {"m": module_, "n": n, "a": a}
return line
def generate_stub(self):
"""Generate the stub code for this source.
:sig: () -> str
:return: Generated stub code.
"""
needed_types = self.required_types - BUILTIN_TYPES
needed_types -= self.defined_types
_logger.debug("defined types: %s", self.defined_types)
module_vars = {v.name for v in self.root.variables}
_logger.debug("module variables: %s", module_vars)
qualified_types = {n for n in needed_types if "." in n}
qualified_namespaces = {".".join(n.split(".")[:-1]) for n in qualified_types}
needed_namespaces = qualified_namespaces - module_vars
needed_types -= qualified_types
_logger.debug("needed namespaces: %s", needed_namespaces)
imported_names = {n.split("::")[0] for n in self.imported_names}
imported_types = imported_names & (needed_types | needed_namespaces)
needed_types -= imported_types
needed_namespaces -= imported_names
_logger.debug("used imported types: %s", imported_types)
try:
typing_mod = __import__("typing")
typing_types = {n for n in needed_types if hasattr(typing_mod, n)}
needed_types -= typing_types
_logger.debug("types from typing module: %s", typing_types)
except ImportError:
typing_types = set()
_logger.warn("typing module not installed")
if len(needed_types) > 0:
raise ValueError("Unknown types: " + ", ".join(needed_types))
out = StringIO()
started = False
if len(typing_types) > 0:
line = self.generate_import_from("typing", typing_types)
out.write(line + "\n")
started = True
if len(imported_types) > 0:
if started:
out.write("\n")
# preserve the import order in the source file
for name in self.imported_names:
if name.split("::")[0] in imported_types:
line = self.generate_import_from(self.imported_names[name], {name})
out.write(line + "\n")
started = True
if len(needed_namespaces) > 0:
if started:
out.write("\n")
as_names = {n.split("::")[0]: n for n in self.imported_namespaces if "::" in n}
for module_ in sorted(needed_namespaces):
if module_ in as_names:
a, n = as_names[module_].split("::")
out.write("import " + n + " as " + a + "\n")
else:
out.write("import " + module_ + "\n")
started = True
if len(self.aliases) > 0:
if started:
out.write("\n")
for alias, signature in self.aliases.items():
out.write("%s = %s\n" % (alias, signature))
started = True
if started:
out.write("\n")
stub_lines = self.root.get_code()
n_lines = len(stub_lines)
for line_no in range(n_lines):
prev_line = stub_lines[line_no - 1] if line_no > 0 else None
line = stub_lines[line_no]
next_line = stub_lines[line_no + 1] if line_no < (n_lines - 1) else None
if (
line.startswith("class ")
and (prev_line is not None)
and (
(not prev_line.startswith("class "))
or (next_line and next_line.startswith(" "))
)
):
out.write("\n")
if (
line.startswith("def ")
and (prev_line is not None)
and (prev_line.startswith((" ", "class ")))
):
out.write("\n")
out.write(line + "\n")
line_no += 1
return out.getvalue()
def get_stub(source, generic=False):
"""Get the stub code for a source code.
:sig: (str, bool) -> str
:param source: Source code to generate the stub for.
:param generic: Whether to produce generic stubs.
:return: Generated stub code.
"""
generator = StubGenerator(source, generic=generic)
stub = generator.generate_stub()
return stub
def get_mod_paths(mod_name, out_dir):
"""Get source and stub paths for a module."""
paths = []
try:
mod = get_loader(mod_name)
source = Path(mod.path)
if source.name.endswith(".py"):
source_rel = Path(*mod_name.split("."))
if source.name == "__init__.py":
source_rel = source_rel.joinpath("__init__.py")
destination = Path(out_dir, source_rel.with_suffix(".pyi"))
paths.append((source, destination))
except Exception as e:
_logger.debug(e)
_logger.warning("cannot handle module, skipping: %s", mod_name)
return paths
def get_pkg_paths(pkg_name, out_dir):
"""Recursively get all source and stub paths for a package."""
paths = []
try:
pkg = import_module(pkg_name)
if not hasattr(pkg, "__path__"):
return get_mod_paths(pkg_name, out_dir)
for mod_info in walk_packages(pkg.__path__, pkg.__name__ + "."):
mod_paths = get_mod_paths(mod_info.name, out_dir)
paths.extend(mod_paths)
except Exception as e:
_logger.debug(e)
_logger.warning("cannot handle package, skipping: %s", pkg_name)
return paths
############################################################
# SPHINX
############################################################
def setup(app):
"""Register to Sphinx."""
app.connect("autodoc-process-docstring", process_docstring)
return {"version": __version__}
############################################################
# MAIN
############################################################
def main(argv=None):
"""Start the command line interface."""
parser = ArgumentParser(prog="pygenstub")
parser.add_argument("--version", action="version", version="%(prog)s " + __version__)
parser.add_argument("files", nargs="*", help="generate stubs for given files")
parser.add_argument(
"-m",
"--module",
action="append",
metavar="MODULE",
dest="modules",
default=[],
help="generate stubs for given modules",
)
parser.add_argument(
"-o", "--output", metavar="PATH", dest="out_dir", help="change the output directory"
)
parser.add_argument(
"--generic", action="store_true", default=False, help="generate generic stubs"
)
parser.add_argument("--debug", action="store_true", help="enable debug messages")
argv = argv if argv is not None else sys.argv
arguments = parser.parse_args(argv[1:])
# set debug mode
if arguments.debug:
logging.basicConfig(level=logging.DEBUG)
_logger.debug("running in debug mode")
out_dir = arguments.out_dir if arguments.out_dir is not None else ""
if (out_dir == "") and (len(arguments.modules) > 0):
print("Output directory must be given when generating stubs for modules.")
sys.exit(1)
modules = []
for path in arguments.files:
paths = Path(path).glob("**/*.py") if Path(path).is_dir() else [Path(path)]
for source in paths:
if str(source).startswith(os.path.pardir):
source = source.absolute().resolve()
if (out_dir != "") and source.is_absolute():
source = source.relative_to(source.root)
destination = Path(out_dir, source.with_suffix(".pyi"))
modules.append((source, destination))
for mod_name in arguments.modules:
modules.extend(get_pkg_paths(mod_name, out_dir))
for source, destination in modules:
_logger.info("generating stub for %s to path %s", source, destination)
with source.open() as f:
code = f.read()
try:
stub = get_stub(code, generic=arguments.generic)
except Exception as e:
print(source, "-", e, file=sys.stderr)
continue
if stub != "":
if not destination.parent.exists():
destination.parent.mkdir(parents=True)
with destination.open("w") as f:
f.write("# " + EDIT_WARNING + "\n\n" + stub)
if __name__ == "__main__":
main()
|
uyar/pygenstub | pygenstub.py | main | python | def main(argv=None):
parser = ArgumentParser(prog="pygenstub")
parser.add_argument("--version", action="version", version="%(prog)s " + __version__)
parser.add_argument("files", nargs="*", help="generate stubs for given files")
parser.add_argument(
"-m",
"--module",
action="append",
metavar="MODULE",
dest="modules",
default=[],
help="generate stubs for given modules",
)
parser.add_argument(
"-o", "--output", metavar="PATH", dest="out_dir", help="change the output directory"
)
parser.add_argument(
"--generic", action="store_true", default=False, help="generate generic stubs"
)
parser.add_argument("--debug", action="store_true", help="enable debug messages")
argv = argv if argv is not None else sys.argv
arguments = parser.parse_args(argv[1:])
# set debug mode
if arguments.debug:
logging.basicConfig(level=logging.DEBUG)
_logger.debug("running in debug mode")
out_dir = arguments.out_dir if arguments.out_dir is not None else ""
if (out_dir == "") and (len(arguments.modules) > 0):
print("Output directory must be given when generating stubs for modules.")
sys.exit(1)
modules = []
for path in arguments.files:
paths = Path(path).glob("**/*.py") if Path(path).is_dir() else [Path(path)]
for source in paths:
if str(source).startswith(os.path.pardir):
source = source.absolute().resolve()
if (out_dir != "") and source.is_absolute():
source = source.relative_to(source.root)
destination = Path(out_dir, source.with_suffix(".pyi"))
modules.append((source, destination))
for mod_name in arguments.modules:
modules.extend(get_pkg_paths(mod_name, out_dir))
for source, destination in modules:
_logger.info("generating stub for %s to path %s", source, destination)
with source.open() as f:
code = f.read()
try:
stub = get_stub(code, generic=arguments.generic)
except Exception as e:
print(source, "-", e, file=sys.stderr)
continue
if stub != "":
if not destination.parent.exists():
destination.parent.mkdir(parents=True)
with destination.open("w") as f:
f.write("# " + EDIT_WARNING + "\n\n" + stub) | Start the command line interface. | train | https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L888-L951 | [
"def get_stub(source, generic=False):\n \"\"\"Get the stub code for a source code.\n\n :sig: (str, bool) -> str\n :param source: Source code to generate the stub for.\n :param generic: Whether to produce generic stubs.\n :return: Generated stub code.\n \"\"\"\n generator = StubGenerator(source,... | # Copyright (C) 2016-2019 H. Turgut Uyar <uyar@tekir.org>
#
# pygenstub is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pygenstub is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pygenstub. If not, see <http://www.gnu.org/licenses/>.
"""pygenstub is a utility for generating stub files from docstrings in source files.
It takes a source file as input and creates a stub file with the same base name
and the ``.pyi`` extension.
For more information, please refer to the documentation:
https://pygenstub.tekir.org/
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import ast
import inspect
import logging
import os
import re
import sys
import textwrap
from argparse import ArgumentParser
from bisect import bisect
from collections import OrderedDict
from importlib import import_module
from io import StringIO
from pkgutil import get_loader, walk_packages
from docutils.core import publish_doctree
__version__ = "1.4.0" # sig: str
PY3 = sys.version_info >= (3, 0)
if not PY3:
import __builtin__ as builtins
from pathlib2 import Path
else:
import builtins
from pathlib import Path
# sigalias: Document = docutils.nodes.document
BUILTIN_TYPES = {k for k, t in builtins.__dict__.items() if isinstance(t, type)}
BUILTIN_TYPES.add("None")
SIG_FIELD = "sig" # sig: str
SIG_COMMENT = "# sig:" # sig: str
SIG_ALIAS = "# sigalias:" # sig: str
DECORATORS = {"property", "staticmethod", "classmethod"} # sig: Set[str]
LINE_LENGTH_LIMIT = 79
INDENT = 4 * " "
EDIT_WARNING = "THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT MANUALLY."
_RE_QUALIFIED_TYPES = re.compile(r"\w+(?:\.\w+)*")
_RE_COMMENT_IN_STRING = re.compile(r"""['"]\s*%(text)s\s*.*['"]""" % {"text": SIG_COMMENT})
_logger = logging.getLogger(__name__)
def get_fields(node, fields_tag="field_list"):
"""Get the field names and their values from a node.
:sig: (Document, str) -> Dict[str, str]
:param node: Node to get the fields from.
:param fields_tag: Tag of child node that contains the fields.
:return: Names and values of fields.
"""
fields_nodes = [c for c in node.children if c.tagname == fields_tag]
if len(fields_nodes) == 0:
return {}
assert len(fields_nodes) == 1, "multiple nodes with tag " + fields_tag
fields_node = fields_nodes[0]
fields = [
{f.tagname: f.rawsource.strip() for f in n.children}
for n in fields_node.children
if n.tagname == "field"
]
return {f["field_name"]: f["field_body"] for f in fields}
def extract_signature(docstring):
"""Extract the signature from a docstring.
:sig: (str) -> Optional[str]
:param docstring: Docstring to extract the signature from.
:return: Extracted signature, or ``None`` if there's no signature.
"""
root = publish_doctree(docstring, settings_overrides={"report_level": 5})
fields = get_fields(root)
return fields.get(SIG_FIELD)
def get_signature(node):
"""Get the signature of a function or a class.
:sig: (Union[ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef]) -> Optional[str]
:param node: Node to get the signature from.
:return: Value of signature field in node docstring, or ``None`` if there's no signature.
"""
docstring = ast.get_docstring(node)
if docstring is None:
return None
return extract_signature(docstring)
def split_parameter_types(parameters):
"""Split a parameter types declaration into individual types.
The input is the left hand side of a signature (the part before the arrow),
excluding the parentheses.
:sig: (str) -> List[str]
:param parameters: Comma separated parameter types.
:return: Parameter types.
"""
if parameters == "":
return []
# only consider the top level commas, ignore the ones in []
commas = []
bracket_depth = 0
for i, char in enumerate(parameters):
if (char == ",") and (bracket_depth == 0):
commas.append(i)
elif char == "[":
bracket_depth += 1
elif char == "]":
bracket_depth -= 1
types = []
last_i = 0
for i in commas:
types.append(parameters[last_i:i].strip())
last_i = i + 1
else:
types.append(parameters[last_i:].strip())
return types
def parse_signature(signature):
"""Parse a signature into its input and return parameter types.
This will also collect the types that are required by any of the input
and return types.
:sig: (str) -> Tuple[List[str], str, Set[str]]
:param signature: Signature to parse.
:return: Input parameter types, return type, and all required types.
"""
if " -> " not in signature:
# signature comment: no parameters, treat variable type as return type
param_types, return_type = None, signature.strip()
else:
lhs, return_type = [s.strip() for s in signature.split(" -> ")]
csv = lhs[1:-1].strip() # remove the parentheses around the parameter type list
param_types = split_parameter_types(csv)
requires = set(_RE_QUALIFIED_TYPES.findall(signature))
return param_types, return_type, requires
class StubNode:
"""A node in a stub tree."""
def __init__(self):
"""Initialize this stub node.
:sig: () -> None
"""
self.variables = [] # sig: List[VariableNode]
self.variable_names = set() # sig: Set[str]
self.children = [] # sig: List[Union[FunctionNode, ClassNode]]
self.parent = None # sig: Optional[StubNode]
def add_variable(self, node):
"""Add a variable node to this node.
:sig: (VariableNode) -> None
:param node: Variable node to add.
"""
if node.name not in self.variable_names:
self.variables.append(node)
self.variable_names.add(node.name)
node.parent = self
def add_child(self, node):
"""Add a function/method or class node to this node.
:sig: (Union[FunctionNode, ClassNode]) -> None
:param node: Function or class node to add.
"""
self.children.append(node)
node.parent = self
def get_code(self):
"""Get the stub code for this node.
The stub code for a node consists of the type annotations of its variables,
followed by the prototypes of its functions/methods and classes.
:sig: () -> List[str]
:return: Lines of stub code for this node.
"""
stub = []
for child in self.variables:
stub.extend(child.get_code())
if (
(len(self.variables) > 0)
and (len(self.children) > 0)
and (not isinstance(self, ClassNode))
):
stub.append("")
for child in self.children:
stub.extend(child.get_code())
return stub
class VariableNode(StubNode):
"""A node representing an assignment in a stub tree."""
def __init__(self, name, type_):
"""Initialize this variable node.
:sig: (str, str) -> None
:param name: Name of variable that is being assigned to.
:param type_: Type of variable.
"""
if not PY3:
StubNode.__init__(self)
else:
super().__init__()
self.name = name # sig: str
self.type_ = type_ # sig: str
def get_code(self):
"""Get the type annotation for this variable.
:sig: () -> List[str]
:return: Lines of stub code for this variable.
"""
return ["%(n)s = ... # type: %(t)s" % {"n": self.name, "t": self.type_}]
class FunctionNode(StubNode):
"""A node representing a function in a stub tree."""
def __init__(self, name, parameters, rtype, decorators=None):
"""Initialize this function node.
The parameters have to given as a list of triples where each item specifies
the name of the parameter, its type, and whether it has a default value or not.
:sig: (str, Sequence[Tuple[str, str, bool]], str, Optional[Sequence[str]]) -> None
:param name: Name of function.
:param parameters: List of parameter triples (name, type, has_default).
:param rtype: Type of return value.
:param decorators: Decorators of function.
"""
if not PY3:
StubNode.__init__(self)
else:
super().__init__()
self.name = name # sig: str
self.parameters = parameters # sig: Sequence[Tuple[str, str, bool]]
self.rtype = rtype # sig: str
self.decorators = decorators if decorators is not None else [] # sig: Sequence[str]
self._async = False # sig: bool
def get_code(self):
"""Get the stub code for this function.
:sig: () -> List[str]
:return: Lines of stub code for this function.
"""
stub = []
for deco in self.decorators:
if (deco in DECORATORS) or deco.endswith(".setter"):
stub.append("@" + deco)
parameters = []
for name, type_, has_default in self.parameters:
decl = "%(n)s%(t)s%(d)s" % {
"n": name,
"t": ": " + type_ if type_ else "",
"d": " = ..." if has_default else "",
}
parameters.append(decl)
slots = {
"a": "async " if self._async else "",
"n": self.name,
"p": ", ".join(parameters),
"r": self.rtype,
}
prototype = "%(a)sdef %(n)s(%(p)s) -> %(r)s: ..." % slots
if len(prototype) <= LINE_LENGTH_LIMIT:
stub.append(prototype)
elif len(INDENT + slots["p"]) <= LINE_LENGTH_LIMIT:
stub.append("%(a)sdef %(n)s(" % slots)
stub.append(INDENT + slots["p"])
stub.append(") -> %(r)s: ..." % slots)
else:
stub.append("%(a)sdef %(n)s(" % slots)
for param in parameters:
stub.append(INDENT + param + ",")
stub.append(") -> %(r)s: ..." % slots)
return stub
class ClassNode(StubNode):
"""A node representing a class in a stub tree."""
def __init__(self, name, bases, signature=None):
"""Initialize this class node.
:sig: (str, Sequence[str], Optional[str]) -> None
:param name: Name of class.
:param bases: Base classes of class.
:param signature: Signature of class, to be used in __init__ method.
"""
if not PY3:
StubNode.__init__(self)
else:
super().__init__()
self.name = name # sig: str
self.bases = bases # sig: Sequence[str]
self.signature = signature # sig: Optional[str]
def get_code(self):
"""Get the stub code for this class.
:sig: () -> List[str]
:return: Lines of stub code for this class.
"""
stub = []
bases = ("(" + ", ".join(self.bases) + ")") if len(self.bases) > 0 else ""
slots = {"n": self.name, "b": bases}
if (len(self.children) == 0) and (len(self.variables) == 0):
stub.append("class %(n)s%(b)s: ..." % slots)
else:
stub.append("class %(n)s%(b)s:" % slots)
super_code = super().get_code() if PY3 else StubNode.get_code(self)
for line in super_code:
stub.append(INDENT + line)
return stub
def get_aliases(lines):
"""Get the type aliases in the source.
:sig: (Sequence[str]) -> Dict[str, str]
:param lines: Lines of the source code.
:return: Aliases and their their definitions.
"""
aliases = {}
for line in lines:
line = line.strip()
if len(line) > 0 and line.startswith(SIG_ALIAS):
_, content = line.split(SIG_ALIAS)
alias, signature = [t.strip() for t in content.split("=")]
aliases[alias] = signature
return aliases
class StubGenerator(ast.NodeVisitor):
"""A transformer that generates stub declarations from a source code."""
def __init__(self, source, generic=False):
"""Initialize this stub generator.
:sig: (str, bool) -> None
:param source: Source code to generate the stub for.
:param generic: Whether to produce generic stubs.
"""
self.root = StubNode() # sig: StubNode
self.generic = generic # sig: bool
self.imported_namespaces = OrderedDict() # sig: OrderedDict[str, str]
self.imported_names = OrderedDict() # sig: OrderedDict[str, str]
self.defined_types = set() # sig: Set[str]
self.required_types = set() # sig: Set[str]
self.aliases = OrderedDict() # sig: OrderedDict[str, str]
self._parents = [self.root] # sig: List[StubNode]
self._code_lines = source.splitlines() # sig: List[str]
self.collect_aliases()
ast_tree = ast.parse(source)
self.visit(ast_tree)
def collect_aliases(self):
"""Collect the type aliases in the source.
:sig: () -> None
"""
self.aliases = get_aliases(self._code_lines)
for alias, signature in self.aliases.items():
_, _, requires = parse_signature(signature)
self.required_types |= requires
self.defined_types |= {alias}
def visit_Import(self, node):
"""Visit an import node."""
line = self._code_lines[node.lineno - 1]
module_name = line.split("import")[0].strip()
for name in node.names:
imported_name = name.name
if name.asname:
imported_name = name.asname + "::" + imported_name
self.imported_namespaces[imported_name] = module_name
def visit_ImportFrom(self, node):
"""Visit an from-import node."""
line = self._code_lines[node.lineno - 1]
module_name = line.split("from")[1].split("import")[0].strip()
for name in node.names:
imported_name = name.name
if name.asname:
imported_name = name.asname + "::" + imported_name
self.imported_names[imported_name] = module_name
def visit_Assign(self, node):
"""Visit an assignment node."""
line = self._code_lines[node.lineno - 1]
if SIG_COMMENT in line:
line = _RE_COMMENT_IN_STRING.sub("", line)
if (SIG_COMMENT not in line) and (not self.generic):
return
if SIG_COMMENT in line:
_, signature = line.split(SIG_COMMENT)
_, return_type, requires = parse_signature(signature)
self.required_types |= requires
parent = self._parents[-1]
for var in node.targets:
if isinstance(var, ast.Name):
name, p = var.id, parent
elif (
isinstance(var, ast.Attribute)
and isinstance(var.value, ast.Name)
and (var.value.id == "self")
):
name, p = var.attr, parent.parent
else:
name, p = None, None
if name is not None:
if self.generic:
return_type = "Any"
self.required_types.add(return_type)
stub_node = VariableNode(name, return_type)
p.add_variable(stub_node)
def get_function_node(self, node):
"""Process a function node.
:sig: (Union[ast.FunctionDef, ast.AsyncFunctionDef]) -> FunctionNode
:param node: Node to process.
:return: Generated function node in stub tree.
"""
decorators = []
for d in node.decorator_list:
if hasattr(d, "id"):
decorators.append(d.id)
elif hasattr(d, "func"):
decorators.append(d.func.id)
elif hasattr(d, "value"):
decorators.append(d.value.id + "." + d.attr)
signature = get_signature(node)
if signature is None:
parent = self._parents[-1]
if isinstance(parent, ClassNode) and (node.name == "__init__"):
signature = parent.signature
if (signature is None) and (not self.generic):
return None
param_names = [arg.arg if PY3 else arg.id for arg in node.args.args]
n_args = len(param_names)
if signature is None:
param_types, rtype, requires = ["Any"] * n_args, "Any", {"Any"}
else:
_logger.debug("parsing signature for %s", node.name)
param_types, rtype, requires = parse_signature(signature)
# TODO: only in classes
if ((n_args > 0) and (param_names[0] == "self")) or (
(n_args > 0) and (param_names[0] == "cls") and ("classmethod" in decorators)
):
if signature is None:
param_types[0] = ""
else:
param_types.insert(0, "")
_logger.debug("parameter types: %s", param_types)
_logger.debug("return type: %s", rtype)
_logger.debug("required types: %s", requires)
self.required_types |= requires
if node.args.vararg is not None:
param_names.append("*" + (node.args.vararg.arg if PY3 else node.args.vararg))
param_types.append("")
if node.args.kwarg is not None:
param_names.append("**" + (node.args.kwarg.arg if PY3 else node.args.kwarg))
param_types.append("")
kwonly_args = getattr(node.args, "kwonlyargs", [])
if len(kwonly_args) > 0:
param_names.extend([arg.arg for arg in kwonly_args])
if signature is None:
param_types.extend(["Any"] * len(kwonly_args))
if len(param_types) != len(param_names):
raise ValueError("Parameter names and types don't match: " + node.name)
param_locs = [(a.lineno, a.col_offset) for a in (node.args.args + kwonly_args)]
param_defaults = {
bisect(param_locs, (d.lineno, d.col_offset)) - 1 for d in node.args.defaults
}
kwonly_defaults = getattr(node.args, "kw_defaults", [])
for i, d in enumerate(kwonly_defaults):
if d is not None:
param_defaults.add(n_args + i)
params = [
(name, type_, i in param_defaults)
for i, (name, type_) in enumerate(zip(param_names, param_types))
]
if len(kwonly_args) > 0:
params.insert(n_args, ("*", "", False))
stub_node = FunctionNode(
node.name, parameters=params, rtype=rtype, decorators=decorators
)
self._parents[-1].add_child(stub_node)
self._parents.append(stub_node)
self.generic_visit(node)
del self._parents[-1]
return stub_node
def visit_FunctionDef(self, node):
"""Visit a function node."""
node = self.get_function_node(node)
if node is not None:
node._async = False
def visit_AsyncFunctionDef(self, node):
"""Visit an async function node."""
node = self.get_function_node(node)
if node is not None:
node._async = True
def visit_ClassDef(self, node):
"""Visit a class node."""
self.defined_types.add(node.name)
bases = []
for n in node.bases:
base_parts = []
while True:
if not isinstance(n, ast.Attribute):
base_parts.append(n.id)
break
else:
base_parts.append(n.attr)
n = n.value
bases.append(".".join(base_parts[::-1]))
self.required_types |= set(bases)
signature = get_signature(node)
stub_node = ClassNode(node.name, bases=bases, signature=signature)
self._parents[-1].add_child(stub_node)
self._parents.append(stub_node)
self.generic_visit(node)
del self._parents[-1]
@staticmethod
def generate_import_from(module_, names):
"""Generate an import line.
:sig: (str, Set[str]) -> str
:param module_: Name of module to import the names from.
:param names: Names to import.
:return: Import line in stub code.
"""
regular_names = [n for n in names if "::" not in n]
as_names = [n for n in names if "::" in n]
line = ""
if len(regular_names) > 0:
slots = {"m": module_, "n": ", ".join(sorted(regular_names))}
line = "from %(m)s import %(n)s" % slots
if len(line) > LINE_LENGTH_LIMIT:
slots["n"] = INDENT + (",\n" + INDENT).join(sorted(regular_names)) + ","
line = "from %(m)s import (\n%(n)s\n)" % slots
if len(as_names) > 0:
line += "\n"
for as_name in as_names:
a, n = as_name.split("::")
line += "from %(m)s import %(n)s as %(a)s" % {"m": module_, "n": n, "a": a}
return line
def generate_stub(self):
"""Generate the stub code for this source.
:sig: () -> str
:return: Generated stub code.
"""
needed_types = self.required_types - BUILTIN_TYPES
needed_types -= self.defined_types
_logger.debug("defined types: %s", self.defined_types)
module_vars = {v.name for v in self.root.variables}
_logger.debug("module variables: %s", module_vars)
qualified_types = {n for n in needed_types if "." in n}
qualified_namespaces = {".".join(n.split(".")[:-1]) for n in qualified_types}
needed_namespaces = qualified_namespaces - module_vars
needed_types -= qualified_types
_logger.debug("needed namespaces: %s", needed_namespaces)
imported_names = {n.split("::")[0] for n in self.imported_names}
imported_types = imported_names & (needed_types | needed_namespaces)
needed_types -= imported_types
needed_namespaces -= imported_names
_logger.debug("used imported types: %s", imported_types)
try:
typing_mod = __import__("typing")
typing_types = {n for n in needed_types if hasattr(typing_mod, n)}
needed_types -= typing_types
_logger.debug("types from typing module: %s", typing_types)
except ImportError:
typing_types = set()
_logger.warn("typing module not installed")
if len(needed_types) > 0:
raise ValueError("Unknown types: " + ", ".join(needed_types))
out = StringIO()
started = False
if len(typing_types) > 0:
line = self.generate_import_from("typing", typing_types)
out.write(line + "\n")
started = True
if len(imported_types) > 0:
if started:
out.write("\n")
# preserve the import order in the source file
for name in self.imported_names:
if name.split("::")[0] in imported_types:
line = self.generate_import_from(self.imported_names[name], {name})
out.write(line + "\n")
started = True
if len(needed_namespaces) > 0:
if started:
out.write("\n")
as_names = {n.split("::")[0]: n for n in self.imported_namespaces if "::" in n}
for module_ in sorted(needed_namespaces):
if module_ in as_names:
a, n = as_names[module_].split("::")
out.write("import " + n + " as " + a + "\n")
else:
out.write("import " + module_ + "\n")
started = True
if len(self.aliases) > 0:
if started:
out.write("\n")
for alias, signature in self.aliases.items():
out.write("%s = %s\n" % (alias, signature))
started = True
if started:
out.write("\n")
stub_lines = self.root.get_code()
n_lines = len(stub_lines)
for line_no in range(n_lines):
prev_line = stub_lines[line_no - 1] if line_no > 0 else None
line = stub_lines[line_no]
next_line = stub_lines[line_no + 1] if line_no < (n_lines - 1) else None
if (
line.startswith("class ")
and (prev_line is not None)
and (
(not prev_line.startswith("class "))
or (next_line and next_line.startswith(" "))
)
):
out.write("\n")
if (
line.startswith("def ")
and (prev_line is not None)
and (prev_line.startswith((" ", "class ")))
):
out.write("\n")
out.write(line + "\n")
line_no += 1
return out.getvalue()
def get_stub(source, generic=False):
"""Get the stub code for a source code.
:sig: (str, bool) -> str
:param source: Source code to generate the stub for.
:param generic: Whether to produce generic stubs.
:return: Generated stub code.
"""
generator = StubGenerator(source, generic=generic)
stub = generator.generate_stub()
return stub
def get_mod_paths(mod_name, out_dir):
"""Get source and stub paths for a module."""
paths = []
try:
mod = get_loader(mod_name)
source = Path(mod.path)
if source.name.endswith(".py"):
source_rel = Path(*mod_name.split("."))
if source.name == "__init__.py":
source_rel = source_rel.joinpath("__init__.py")
destination = Path(out_dir, source_rel.with_suffix(".pyi"))
paths.append((source, destination))
except Exception as e:
_logger.debug(e)
_logger.warning("cannot handle module, skipping: %s", mod_name)
return paths
def get_pkg_paths(pkg_name, out_dir):
"""Recursively get all source and stub paths for a package."""
paths = []
try:
pkg = import_module(pkg_name)
if not hasattr(pkg, "__path__"):
return get_mod_paths(pkg_name, out_dir)
for mod_info in walk_packages(pkg.__path__, pkg.__name__ + "."):
mod_paths = get_mod_paths(mod_info.name, out_dir)
paths.extend(mod_paths)
except Exception as e:
_logger.debug(e)
_logger.warning("cannot handle package, skipping: %s", pkg_name)
return paths
############################################################
# SPHINX
############################################################
def process_docstring(app, what, name, obj, options, lines):
"""Modify the docstring before generating documentation.
This will insert type declarations for parameters and return type
into the docstring, and remove the signature field so that it will
be excluded from the generated document.
"""
aliases = getattr(app, "_sigaliases", None)
if aliases is None:
if what == "module":
aliases = get_aliases(inspect.getsource(obj).splitlines())
app._sigaliases = aliases
sig_marker = ":" + SIG_FIELD + ":"
is_class = what in ("class", "exception")
signature = extract_signature("\n".join(lines))
if signature is None:
if not is_class:
return
init_method = getattr(obj, "__init__")
init_doc = init_method.__doc__
init_lines = init_doc.splitlines()[1:]
if len(init_lines) > 1:
init_doc = textwrap.dedent("\n".join(init_lines[1:]))
init_lines = init_doc.splitlines()
if sig_marker not in init_doc:
return
sig_started = False
for line in init_lines:
if line.lstrip().startswith(sig_marker):
sig_started = True
if sig_started:
lines.append(line)
signature = extract_signature("\n".join(lines))
if is_class:
obj = init_method
param_types, rtype, _ = parse_signature(signature)
param_names = [p for p in inspect.signature(obj).parameters]
if is_class and (param_names[0] == "self"):
del param_names[0]
# if something goes wrong, don't insert parameter types
if len(param_names) == len(param_types):
for name, type_ in zip(param_names, param_types):
find = ":param %(name)s:" % {"name": name}
alias = aliases.get(type_)
if alias is not None:
type_ = "*%(type)s* :sup:`%(alias)s`" % {"type": type_, "alias": alias}
for i, line in enumerate(lines):
if line.startswith(find):
lines.insert(i, ":type %(name)s: %(type)s" % {"name": name, "type": type_})
break
if not is_class:
for i, line in enumerate(lines):
if line.startswith((":return:", ":returns:")):
lines.insert(i, ":rtype: " + rtype)
break
# remove the signature field
sig_start = 0
while sig_start < len(lines):
if lines[sig_start].startswith(sig_marker):
break
sig_start += 1
sig_end = sig_start + 1
while sig_end < len(lines):
if (not lines[sig_end]) or (lines[sig_end][0] != " "):
break
sig_end += 1
for i in reversed(range(sig_start, sig_end)):
del lines[i]
def setup(app):
"""Register to Sphinx."""
app.connect("autodoc-process-docstring", process_docstring)
return {"version": __version__}
############################################################
# MAIN
############################################################
if __name__ == "__main__":
main()
|
uyar/pygenstub | pygenstub.py | StubNode.add_variable | python | def add_variable(self, node):
if node.name not in self.variable_names:
self.variables.append(node)
self.variable_names.add(node.name)
node.parent = self | Add a variable node to this node.
:sig: (VariableNode) -> None
:param node: Variable node to add. | train | https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L195-L204 | null | class StubNode:
"""A node in a stub tree."""
def __init__(self):
"""Initialize this stub node.
:sig: () -> None
"""
self.variables = [] # sig: List[VariableNode]
self.variable_names = set() # sig: Set[str]
self.children = [] # sig: List[Union[FunctionNode, ClassNode]]
self.parent = None # sig: Optional[StubNode]
def add_child(self, node):
"""Add a function/method or class node to this node.
:sig: (Union[FunctionNode, ClassNode]) -> None
:param node: Function or class node to add.
"""
self.children.append(node)
node.parent = self
def get_code(self):
"""Get the stub code for this node.
The stub code for a node consists of the type annotations of its variables,
followed by the prototypes of its functions/methods and classes.
:sig: () -> List[str]
:return: Lines of stub code for this node.
"""
stub = []
for child in self.variables:
stub.extend(child.get_code())
if (
(len(self.variables) > 0)
and (len(self.children) > 0)
and (not isinstance(self, ClassNode))
):
stub.append("")
for child in self.children:
stub.extend(child.get_code())
return stub
|
uyar/pygenstub | pygenstub.py | StubNode.get_code | python | def get_code(self):
stub = []
for child in self.variables:
stub.extend(child.get_code())
if (
(len(self.variables) > 0)
and (len(self.children) > 0)
and (not isinstance(self, ClassNode))
):
stub.append("")
for child in self.children:
stub.extend(child.get_code())
return stub | Get the stub code for this node.
The stub code for a node consists of the type annotations of its variables,
followed by the prototypes of its functions/methods and classes.
:sig: () -> List[str]
:return: Lines of stub code for this node. | train | https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L215-L235 | null | class StubNode:
"""A node in a stub tree."""
def __init__(self):
"""Initialize this stub node.
:sig: () -> None
"""
self.variables = [] # sig: List[VariableNode]
self.variable_names = set() # sig: Set[str]
self.children = [] # sig: List[Union[FunctionNode, ClassNode]]
self.parent = None # sig: Optional[StubNode]
def add_variable(self, node):
"""Add a variable node to this node.
:sig: (VariableNode) -> None
:param node: Variable node to add.
"""
if node.name not in self.variable_names:
self.variables.append(node)
self.variable_names.add(node.name)
node.parent = self
def add_child(self, node):
"""Add a function/method or class node to this node.
:sig: (Union[FunctionNode, ClassNode]) -> None
:param node: Function or class node to add.
"""
self.children.append(node)
node.parent = self
|
uyar/pygenstub | pygenstub.py | FunctionNode.get_code | python | def get_code(self):
stub = []
for deco in self.decorators:
if (deco in DECORATORS) or deco.endswith(".setter"):
stub.append("@" + deco)
parameters = []
for name, type_, has_default in self.parameters:
decl = "%(n)s%(t)s%(d)s" % {
"n": name,
"t": ": " + type_ if type_ else "",
"d": " = ..." if has_default else "",
}
parameters.append(decl)
slots = {
"a": "async " if self._async else "",
"n": self.name,
"p": ", ".join(parameters),
"r": self.rtype,
}
prototype = "%(a)sdef %(n)s(%(p)s) -> %(r)s: ..." % slots
if len(prototype) <= LINE_LENGTH_LIMIT:
stub.append(prototype)
elif len(INDENT + slots["p"]) <= LINE_LENGTH_LIMIT:
stub.append("%(a)sdef %(n)s(" % slots)
stub.append(INDENT + slots["p"])
stub.append(") -> %(r)s: ..." % slots)
else:
stub.append("%(a)sdef %(n)s(" % slots)
for param in parameters:
stub.append(INDENT + param + ",")
stub.append(") -> %(r)s: ..." % slots)
return stub | Get the stub code for this function.
:sig: () -> List[str]
:return: Lines of stub code for this function. | train | https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L290-L331 | null | class FunctionNode(StubNode):
"""A node representing a function in a stub tree."""
def __init__(self, name, parameters, rtype, decorators=None):
"""Initialize this function node.
The parameters have to given as a list of triples where each item specifies
the name of the parameter, its type, and whether it has a default value or not.
:sig: (str, Sequence[Tuple[str, str, bool]], str, Optional[Sequence[str]]) -> None
:param name: Name of function.
:param parameters: List of parameter triples (name, type, has_default).
:param rtype: Type of return value.
:param decorators: Decorators of function.
"""
if not PY3:
StubNode.__init__(self)
else:
super().__init__()
self.name = name # sig: str
self.parameters = parameters # sig: Sequence[Tuple[str, str, bool]]
self.rtype = rtype # sig: str
self.decorators = decorators if decorators is not None else [] # sig: Sequence[str]
self._async = False # sig: bool
|
uyar/pygenstub | pygenstub.py | ClassNode.get_code | python | def get_code(self):
stub = []
bases = ("(" + ", ".join(self.bases) + ")") if len(self.bases) > 0 else ""
slots = {"n": self.name, "b": bases}
if (len(self.children) == 0) and (len(self.variables) == 0):
stub.append("class %(n)s%(b)s: ..." % slots)
else:
stub.append("class %(n)s%(b)s:" % slots)
super_code = super().get_code() if PY3 else StubNode.get_code(self)
for line in super_code:
stub.append(INDENT + line)
return stub | Get the stub code for this class.
:sig: () -> List[str]
:return: Lines of stub code for this class. | train | https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L353-L369 | [
"def get_code(self):\n \"\"\"Get the stub code for this node.\n\n The stub code for a node consists of the type annotations of its variables,\n followed by the prototypes of its functions/methods and classes.\n\n :sig: () -> List[str]\n :return: Lines of stub code for this node.\n \"\"\"\n stub... | class ClassNode(StubNode):
"""A node representing a class in a stub tree."""
def __init__(self, name, bases, signature=None):
"""Initialize this class node.
:sig: (str, Sequence[str], Optional[str]) -> None
:param name: Name of class.
:param bases: Base classes of class.
:param signature: Signature of class, to be used in __init__ method.
"""
if not PY3:
StubNode.__init__(self)
else:
super().__init__()
self.name = name # sig: str
self.bases = bases # sig: Sequence[str]
self.signature = signature # sig: Optional[str]
|
uyar/pygenstub | pygenstub.py | StubGenerator.collect_aliases | python | def collect_aliases(self):
self.aliases = get_aliases(self._code_lines)
for alias, signature in self.aliases.items():
_, _, requires = parse_signature(signature)
self.required_types |= requires
self.defined_types |= {alias} | Collect the type aliases in the source.
:sig: () -> None | train | https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L417-L426 | [
"def parse_signature(signature):\n \"\"\"Parse a signature into its input and return parameter types.\n\n This will also collect the types that are required by any of the input\n and return types.\n\n :sig: (str) -> Tuple[List[str], str, Set[str]]\n :param signature: Signature to parse.\n :return:... | class StubGenerator(ast.NodeVisitor):
"""A transformer that generates stub declarations from a source code."""
def __init__(self, source, generic=False):
"""Initialize this stub generator.
:sig: (str, bool) -> None
:param source: Source code to generate the stub for.
:param generic: Whether to produce generic stubs.
"""
self.root = StubNode() # sig: StubNode
self.generic = generic # sig: bool
self.imported_namespaces = OrderedDict() # sig: OrderedDict[str, str]
self.imported_names = OrderedDict() # sig: OrderedDict[str, str]
self.defined_types = set() # sig: Set[str]
self.required_types = set() # sig: Set[str]
self.aliases = OrderedDict() # sig: OrderedDict[str, str]
self._parents = [self.root] # sig: List[StubNode]
self._code_lines = source.splitlines() # sig: List[str]
self.collect_aliases()
ast_tree = ast.parse(source)
self.visit(ast_tree)
def visit_Import(self, node):
"""Visit an import node."""
line = self._code_lines[node.lineno - 1]
module_name = line.split("import")[0].strip()
for name in node.names:
imported_name = name.name
if name.asname:
imported_name = name.asname + "::" + imported_name
self.imported_namespaces[imported_name] = module_name
def visit_ImportFrom(self, node):
"""Visit an from-import node."""
line = self._code_lines[node.lineno - 1]
module_name = line.split("from")[1].split("import")[0].strip()
for name in node.names:
imported_name = name.name
if name.asname:
imported_name = name.asname + "::" + imported_name
self.imported_names[imported_name] = module_name
def visit_Assign(self, node):
"""Visit an assignment node."""
line = self._code_lines[node.lineno - 1]
if SIG_COMMENT in line:
line = _RE_COMMENT_IN_STRING.sub("", line)
if (SIG_COMMENT not in line) and (not self.generic):
return
if SIG_COMMENT in line:
_, signature = line.split(SIG_COMMENT)
_, return_type, requires = parse_signature(signature)
self.required_types |= requires
parent = self._parents[-1]
for var in node.targets:
if isinstance(var, ast.Name):
name, p = var.id, parent
elif (
isinstance(var, ast.Attribute)
and isinstance(var.value, ast.Name)
and (var.value.id == "self")
):
name, p = var.attr, parent.parent
else:
name, p = None, None
if name is not None:
if self.generic:
return_type = "Any"
self.required_types.add(return_type)
stub_node = VariableNode(name, return_type)
p.add_variable(stub_node)
def get_function_node(self, node):
"""Process a function node.
:sig: (Union[ast.FunctionDef, ast.AsyncFunctionDef]) -> FunctionNode
:param node: Node to process.
:return: Generated function node in stub tree.
"""
decorators = []
for d in node.decorator_list:
if hasattr(d, "id"):
decorators.append(d.id)
elif hasattr(d, "func"):
decorators.append(d.func.id)
elif hasattr(d, "value"):
decorators.append(d.value.id + "." + d.attr)
signature = get_signature(node)
if signature is None:
parent = self._parents[-1]
if isinstance(parent, ClassNode) and (node.name == "__init__"):
signature = parent.signature
if (signature is None) and (not self.generic):
return None
param_names = [arg.arg if PY3 else arg.id for arg in node.args.args]
n_args = len(param_names)
if signature is None:
param_types, rtype, requires = ["Any"] * n_args, "Any", {"Any"}
else:
_logger.debug("parsing signature for %s", node.name)
param_types, rtype, requires = parse_signature(signature)
# TODO: only in classes
if ((n_args > 0) and (param_names[0] == "self")) or (
(n_args > 0) and (param_names[0] == "cls") and ("classmethod" in decorators)
):
if signature is None:
param_types[0] = ""
else:
param_types.insert(0, "")
_logger.debug("parameter types: %s", param_types)
_logger.debug("return type: %s", rtype)
_logger.debug("required types: %s", requires)
self.required_types |= requires
if node.args.vararg is not None:
param_names.append("*" + (node.args.vararg.arg if PY3 else node.args.vararg))
param_types.append("")
if node.args.kwarg is not None:
param_names.append("**" + (node.args.kwarg.arg if PY3 else node.args.kwarg))
param_types.append("")
kwonly_args = getattr(node.args, "kwonlyargs", [])
if len(kwonly_args) > 0:
param_names.extend([arg.arg for arg in kwonly_args])
if signature is None:
param_types.extend(["Any"] * len(kwonly_args))
if len(param_types) != len(param_names):
raise ValueError("Parameter names and types don't match: " + node.name)
param_locs = [(a.lineno, a.col_offset) for a in (node.args.args + kwonly_args)]
param_defaults = {
bisect(param_locs, (d.lineno, d.col_offset)) - 1 for d in node.args.defaults
}
kwonly_defaults = getattr(node.args, "kw_defaults", [])
for i, d in enumerate(kwonly_defaults):
if d is not None:
param_defaults.add(n_args + i)
params = [
(name, type_, i in param_defaults)
for i, (name, type_) in enumerate(zip(param_names, param_types))
]
if len(kwonly_args) > 0:
params.insert(n_args, ("*", "", False))
stub_node = FunctionNode(
node.name, parameters=params, rtype=rtype, decorators=decorators
)
self._parents[-1].add_child(stub_node)
self._parents.append(stub_node)
self.generic_visit(node)
del self._parents[-1]
return stub_node
def visit_FunctionDef(self, node):
"""Visit a function node."""
node = self.get_function_node(node)
if node is not None:
node._async = False
def visit_AsyncFunctionDef(self, node):
"""Visit an async function node."""
node = self.get_function_node(node)
if node is not None:
node._async = True
def visit_ClassDef(self, node):
"""Visit a class node."""
self.defined_types.add(node.name)
bases = []
for n in node.bases:
base_parts = []
while True:
if not isinstance(n, ast.Attribute):
base_parts.append(n.id)
break
else:
base_parts.append(n.attr)
n = n.value
bases.append(".".join(base_parts[::-1]))
self.required_types |= set(bases)
signature = get_signature(node)
stub_node = ClassNode(node.name, bases=bases, signature=signature)
self._parents[-1].add_child(stub_node)
self._parents.append(stub_node)
self.generic_visit(node)
del self._parents[-1]
@staticmethod
def generate_import_from(module_, names):
"""Generate an import line.
:sig: (str, Set[str]) -> str
:param module_: Name of module to import the names from.
:param names: Names to import.
:return: Import line in stub code.
"""
regular_names = [n for n in names if "::" not in n]
as_names = [n for n in names if "::" in n]
line = ""
if len(regular_names) > 0:
slots = {"m": module_, "n": ", ".join(sorted(regular_names))}
line = "from %(m)s import %(n)s" % slots
if len(line) > LINE_LENGTH_LIMIT:
slots["n"] = INDENT + (",\n" + INDENT).join(sorted(regular_names)) + ","
line = "from %(m)s import (\n%(n)s\n)" % slots
if len(as_names) > 0:
line += "\n"
for as_name in as_names:
a, n = as_name.split("::")
line += "from %(m)s import %(n)s as %(a)s" % {"m": module_, "n": n, "a": a}
return line
def generate_stub(self):
"""Generate the stub code for this source.
:sig: () -> str
:return: Generated stub code.
"""
needed_types = self.required_types - BUILTIN_TYPES
needed_types -= self.defined_types
_logger.debug("defined types: %s", self.defined_types)
module_vars = {v.name for v in self.root.variables}
_logger.debug("module variables: %s", module_vars)
qualified_types = {n for n in needed_types if "." in n}
qualified_namespaces = {".".join(n.split(".")[:-1]) for n in qualified_types}
needed_namespaces = qualified_namespaces - module_vars
needed_types -= qualified_types
_logger.debug("needed namespaces: %s", needed_namespaces)
imported_names = {n.split("::")[0] for n in self.imported_names}
imported_types = imported_names & (needed_types | needed_namespaces)
needed_types -= imported_types
needed_namespaces -= imported_names
_logger.debug("used imported types: %s", imported_types)
try:
typing_mod = __import__("typing")
typing_types = {n for n in needed_types if hasattr(typing_mod, n)}
needed_types -= typing_types
_logger.debug("types from typing module: %s", typing_types)
except ImportError:
typing_types = set()
_logger.warn("typing module not installed")
if len(needed_types) > 0:
raise ValueError("Unknown types: " + ", ".join(needed_types))
out = StringIO()
started = False
if len(typing_types) > 0:
line = self.generate_import_from("typing", typing_types)
out.write(line + "\n")
started = True
if len(imported_types) > 0:
if started:
out.write("\n")
# preserve the import order in the source file
for name in self.imported_names:
if name.split("::")[0] in imported_types:
line = self.generate_import_from(self.imported_names[name], {name})
out.write(line + "\n")
started = True
if len(needed_namespaces) > 0:
if started:
out.write("\n")
as_names = {n.split("::")[0]: n for n in self.imported_namespaces if "::" in n}
for module_ in sorted(needed_namespaces):
if module_ in as_names:
a, n = as_names[module_].split("::")
out.write("import " + n + " as " + a + "\n")
else:
out.write("import " + module_ + "\n")
started = True
if len(self.aliases) > 0:
if started:
out.write("\n")
for alias, signature in self.aliases.items():
out.write("%s = %s\n" % (alias, signature))
started = True
if started:
out.write("\n")
stub_lines = self.root.get_code()
n_lines = len(stub_lines)
for line_no in range(n_lines):
prev_line = stub_lines[line_no - 1] if line_no > 0 else None
line = stub_lines[line_no]
next_line = stub_lines[line_no + 1] if line_no < (n_lines - 1) else None
if (
line.startswith("class ")
and (prev_line is not None)
and (
(not prev_line.startswith("class "))
or (next_line and next_line.startswith(" "))
)
):
out.write("\n")
if (
line.startswith("def ")
and (prev_line is not None)
and (prev_line.startswith((" ", "class ")))
):
out.write("\n")
out.write(line + "\n")
line_no += 1
return out.getvalue()
|
uyar/pygenstub | pygenstub.py | StubGenerator.visit_Import | python | def visit_Import(self, node):
line = self._code_lines[node.lineno - 1]
module_name = line.split("import")[0].strip()
for name in node.names:
imported_name = name.name
if name.asname:
imported_name = name.asname + "::" + imported_name
self.imported_namespaces[imported_name] = module_name | Visit an import node. | train | https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L428-L436 | null | class StubGenerator(ast.NodeVisitor):
"""A transformer that generates stub declarations from a source code."""
def __init__(self, source, generic=False):
"""Initialize this stub generator.
:sig: (str, bool) -> None
:param source: Source code to generate the stub for.
:param generic: Whether to produce generic stubs.
"""
self.root = StubNode() # sig: StubNode
self.generic = generic # sig: bool
self.imported_namespaces = OrderedDict() # sig: OrderedDict[str, str]
self.imported_names = OrderedDict() # sig: OrderedDict[str, str]
self.defined_types = set() # sig: Set[str]
self.required_types = set() # sig: Set[str]
self.aliases = OrderedDict() # sig: OrderedDict[str, str]
self._parents = [self.root] # sig: List[StubNode]
self._code_lines = source.splitlines() # sig: List[str]
self.collect_aliases()
ast_tree = ast.parse(source)
self.visit(ast_tree)
def collect_aliases(self):
"""Collect the type aliases in the source.
:sig: () -> None
"""
self.aliases = get_aliases(self._code_lines)
for alias, signature in self.aliases.items():
_, _, requires = parse_signature(signature)
self.required_types |= requires
self.defined_types |= {alias}
def visit_ImportFrom(self, node):
"""Visit an from-import node."""
line = self._code_lines[node.lineno - 1]
module_name = line.split("from")[1].split("import")[0].strip()
for name in node.names:
imported_name = name.name
if name.asname:
imported_name = name.asname + "::" + imported_name
self.imported_names[imported_name] = module_name
def visit_Assign(self, node):
"""Visit an assignment node."""
line = self._code_lines[node.lineno - 1]
if SIG_COMMENT in line:
line = _RE_COMMENT_IN_STRING.sub("", line)
if (SIG_COMMENT not in line) and (not self.generic):
return
if SIG_COMMENT in line:
_, signature = line.split(SIG_COMMENT)
_, return_type, requires = parse_signature(signature)
self.required_types |= requires
parent = self._parents[-1]
for var in node.targets:
if isinstance(var, ast.Name):
name, p = var.id, parent
elif (
isinstance(var, ast.Attribute)
and isinstance(var.value, ast.Name)
and (var.value.id == "self")
):
name, p = var.attr, parent.parent
else:
name, p = None, None
if name is not None:
if self.generic:
return_type = "Any"
self.required_types.add(return_type)
stub_node = VariableNode(name, return_type)
p.add_variable(stub_node)
def get_function_node(self, node):
"""Process a function node.
:sig: (Union[ast.FunctionDef, ast.AsyncFunctionDef]) -> FunctionNode
:param node: Node to process.
:return: Generated function node in stub tree.
"""
decorators = []
for d in node.decorator_list:
if hasattr(d, "id"):
decorators.append(d.id)
elif hasattr(d, "func"):
decorators.append(d.func.id)
elif hasattr(d, "value"):
decorators.append(d.value.id + "." + d.attr)
signature = get_signature(node)
if signature is None:
parent = self._parents[-1]
if isinstance(parent, ClassNode) and (node.name == "__init__"):
signature = parent.signature
if (signature is None) and (not self.generic):
return None
param_names = [arg.arg if PY3 else arg.id for arg in node.args.args]
n_args = len(param_names)
if signature is None:
param_types, rtype, requires = ["Any"] * n_args, "Any", {"Any"}
else:
_logger.debug("parsing signature for %s", node.name)
param_types, rtype, requires = parse_signature(signature)
# TODO: only in classes
if ((n_args > 0) and (param_names[0] == "self")) or (
(n_args > 0) and (param_names[0] == "cls") and ("classmethod" in decorators)
):
if signature is None:
param_types[0] = ""
else:
param_types.insert(0, "")
_logger.debug("parameter types: %s", param_types)
_logger.debug("return type: %s", rtype)
_logger.debug("required types: %s", requires)
self.required_types |= requires
if node.args.vararg is not None:
param_names.append("*" + (node.args.vararg.arg if PY3 else node.args.vararg))
param_types.append("")
if node.args.kwarg is not None:
param_names.append("**" + (node.args.kwarg.arg if PY3 else node.args.kwarg))
param_types.append("")
kwonly_args = getattr(node.args, "kwonlyargs", [])
if len(kwonly_args) > 0:
param_names.extend([arg.arg for arg in kwonly_args])
if signature is None:
param_types.extend(["Any"] * len(kwonly_args))
if len(param_types) != len(param_names):
raise ValueError("Parameter names and types don't match: " + node.name)
param_locs = [(a.lineno, a.col_offset) for a in (node.args.args + kwonly_args)]
param_defaults = {
bisect(param_locs, (d.lineno, d.col_offset)) - 1 for d in node.args.defaults
}
kwonly_defaults = getattr(node.args, "kw_defaults", [])
for i, d in enumerate(kwonly_defaults):
if d is not None:
param_defaults.add(n_args + i)
params = [
(name, type_, i in param_defaults)
for i, (name, type_) in enumerate(zip(param_names, param_types))
]
if len(kwonly_args) > 0:
params.insert(n_args, ("*", "", False))
stub_node = FunctionNode(
node.name, parameters=params, rtype=rtype, decorators=decorators
)
self._parents[-1].add_child(stub_node)
self._parents.append(stub_node)
self.generic_visit(node)
del self._parents[-1]
return stub_node
def visit_FunctionDef(self, node):
"""Visit a function node."""
node = self.get_function_node(node)
if node is not None:
node._async = False
def visit_AsyncFunctionDef(self, node):
"""Visit an async function node."""
node = self.get_function_node(node)
if node is not None:
node._async = True
def visit_ClassDef(self, node):
"""Visit a class node."""
self.defined_types.add(node.name)
bases = []
for n in node.bases:
base_parts = []
while True:
if not isinstance(n, ast.Attribute):
base_parts.append(n.id)
break
else:
base_parts.append(n.attr)
n = n.value
bases.append(".".join(base_parts[::-1]))
self.required_types |= set(bases)
signature = get_signature(node)
stub_node = ClassNode(node.name, bases=bases, signature=signature)
self._parents[-1].add_child(stub_node)
self._parents.append(stub_node)
self.generic_visit(node)
del self._parents[-1]
@staticmethod
def generate_import_from(module_, names):
"""Generate an import line.
:sig: (str, Set[str]) -> str
:param module_: Name of module to import the names from.
:param names: Names to import.
:return: Import line in stub code.
"""
regular_names = [n for n in names if "::" not in n]
as_names = [n for n in names if "::" in n]
line = ""
if len(regular_names) > 0:
slots = {"m": module_, "n": ", ".join(sorted(regular_names))}
line = "from %(m)s import %(n)s" % slots
if len(line) > LINE_LENGTH_LIMIT:
slots["n"] = INDENT + (",\n" + INDENT).join(sorted(regular_names)) + ","
line = "from %(m)s import (\n%(n)s\n)" % slots
if len(as_names) > 0:
line += "\n"
for as_name in as_names:
a, n = as_name.split("::")
line += "from %(m)s import %(n)s as %(a)s" % {"m": module_, "n": n, "a": a}
return line
def generate_stub(self):
"""Generate the stub code for this source.
:sig: () -> str
:return: Generated stub code.
"""
needed_types = self.required_types - BUILTIN_TYPES
needed_types -= self.defined_types
_logger.debug("defined types: %s", self.defined_types)
module_vars = {v.name for v in self.root.variables}
_logger.debug("module variables: %s", module_vars)
qualified_types = {n for n in needed_types if "." in n}
qualified_namespaces = {".".join(n.split(".")[:-1]) for n in qualified_types}
needed_namespaces = qualified_namespaces - module_vars
needed_types -= qualified_types
_logger.debug("needed namespaces: %s", needed_namespaces)
imported_names = {n.split("::")[0] for n in self.imported_names}
imported_types = imported_names & (needed_types | needed_namespaces)
needed_types -= imported_types
needed_namespaces -= imported_names
_logger.debug("used imported types: %s", imported_types)
try:
typing_mod = __import__("typing")
typing_types = {n for n in needed_types if hasattr(typing_mod, n)}
needed_types -= typing_types
_logger.debug("types from typing module: %s", typing_types)
except ImportError:
typing_types = set()
_logger.warn("typing module not installed")
if len(needed_types) > 0:
raise ValueError("Unknown types: " + ", ".join(needed_types))
out = StringIO()
started = False
if len(typing_types) > 0:
line = self.generate_import_from("typing", typing_types)
out.write(line + "\n")
started = True
if len(imported_types) > 0:
if started:
out.write("\n")
# preserve the import order in the source file
for name in self.imported_names:
if name.split("::")[0] in imported_types:
line = self.generate_import_from(self.imported_names[name], {name})
out.write(line + "\n")
started = True
if len(needed_namespaces) > 0:
if started:
out.write("\n")
as_names = {n.split("::")[0]: n for n in self.imported_namespaces if "::" in n}
for module_ in sorted(needed_namespaces):
if module_ in as_names:
a, n = as_names[module_].split("::")
out.write("import " + n + " as " + a + "\n")
else:
out.write("import " + module_ + "\n")
started = True
if len(self.aliases) > 0:
if started:
out.write("\n")
for alias, signature in self.aliases.items():
out.write("%s = %s\n" % (alias, signature))
started = True
if started:
out.write("\n")
stub_lines = self.root.get_code()
n_lines = len(stub_lines)
for line_no in range(n_lines):
prev_line = stub_lines[line_no - 1] if line_no > 0 else None
line = stub_lines[line_no]
next_line = stub_lines[line_no + 1] if line_no < (n_lines - 1) else None
if (
line.startswith("class ")
and (prev_line is not None)
and (
(not prev_line.startswith("class "))
or (next_line and next_line.startswith(" "))
)
):
out.write("\n")
if (
line.startswith("def ")
and (prev_line is not None)
and (prev_line.startswith((" ", "class ")))
):
out.write("\n")
out.write(line + "\n")
line_no += 1
return out.getvalue()
|
uyar/pygenstub | pygenstub.py | StubGenerator.visit_ImportFrom | python | def visit_ImportFrom(self, node):
line = self._code_lines[node.lineno - 1]
module_name = line.split("from")[1].split("import")[0].strip()
for name in node.names:
imported_name = name.name
if name.asname:
imported_name = name.asname + "::" + imported_name
self.imported_names[imported_name] = module_name | Visit an from-import node. | train | https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L438-L446 | null | class StubGenerator(ast.NodeVisitor):
"""A transformer that generates stub declarations from a source code."""
def __init__(self, source, generic=False):
"""Initialize this stub generator.
:sig: (str, bool) -> None
:param source: Source code to generate the stub for.
:param generic: Whether to produce generic stubs.
"""
self.root = StubNode() # sig: StubNode
self.generic = generic # sig: bool
self.imported_namespaces = OrderedDict() # sig: OrderedDict[str, str]
self.imported_names = OrderedDict() # sig: OrderedDict[str, str]
self.defined_types = set() # sig: Set[str]
self.required_types = set() # sig: Set[str]
self.aliases = OrderedDict() # sig: OrderedDict[str, str]
self._parents = [self.root] # sig: List[StubNode]
self._code_lines = source.splitlines() # sig: List[str]
self.collect_aliases()
ast_tree = ast.parse(source)
self.visit(ast_tree)
def collect_aliases(self):
"""Collect the type aliases in the source.
:sig: () -> None
"""
self.aliases = get_aliases(self._code_lines)
for alias, signature in self.aliases.items():
_, _, requires = parse_signature(signature)
self.required_types |= requires
self.defined_types |= {alias}
def visit_Import(self, node):
"""Visit an import node."""
line = self._code_lines[node.lineno - 1]
module_name = line.split("import")[0].strip()
for name in node.names:
imported_name = name.name
if name.asname:
imported_name = name.asname + "::" + imported_name
self.imported_namespaces[imported_name] = module_name
def visit_Assign(self, node):
"""Visit an assignment node."""
line = self._code_lines[node.lineno - 1]
if SIG_COMMENT in line:
line = _RE_COMMENT_IN_STRING.sub("", line)
if (SIG_COMMENT not in line) and (not self.generic):
return
if SIG_COMMENT in line:
_, signature = line.split(SIG_COMMENT)
_, return_type, requires = parse_signature(signature)
self.required_types |= requires
parent = self._parents[-1]
for var in node.targets:
if isinstance(var, ast.Name):
name, p = var.id, parent
elif (
isinstance(var, ast.Attribute)
and isinstance(var.value, ast.Name)
and (var.value.id == "self")
):
name, p = var.attr, parent.parent
else:
name, p = None, None
if name is not None:
if self.generic:
return_type = "Any"
self.required_types.add(return_type)
stub_node = VariableNode(name, return_type)
p.add_variable(stub_node)
def get_function_node(self, node):
"""Process a function node.
:sig: (Union[ast.FunctionDef, ast.AsyncFunctionDef]) -> FunctionNode
:param node: Node to process.
:return: Generated function node in stub tree.
"""
decorators = []
for d in node.decorator_list:
if hasattr(d, "id"):
decorators.append(d.id)
elif hasattr(d, "func"):
decorators.append(d.func.id)
elif hasattr(d, "value"):
decorators.append(d.value.id + "." + d.attr)
signature = get_signature(node)
if signature is None:
parent = self._parents[-1]
if isinstance(parent, ClassNode) and (node.name == "__init__"):
signature = parent.signature
if (signature is None) and (not self.generic):
return None
param_names = [arg.arg if PY3 else arg.id for arg in node.args.args]
n_args = len(param_names)
if signature is None:
param_types, rtype, requires = ["Any"] * n_args, "Any", {"Any"}
else:
_logger.debug("parsing signature for %s", node.name)
param_types, rtype, requires = parse_signature(signature)
# TODO: only in classes
if ((n_args > 0) and (param_names[0] == "self")) or (
(n_args > 0) and (param_names[0] == "cls") and ("classmethod" in decorators)
):
if signature is None:
param_types[0] = ""
else:
param_types.insert(0, "")
_logger.debug("parameter types: %s", param_types)
_logger.debug("return type: %s", rtype)
_logger.debug("required types: %s", requires)
self.required_types |= requires
if node.args.vararg is not None:
param_names.append("*" + (node.args.vararg.arg if PY3 else node.args.vararg))
param_types.append("")
if node.args.kwarg is not None:
param_names.append("**" + (node.args.kwarg.arg if PY3 else node.args.kwarg))
param_types.append("")
kwonly_args = getattr(node.args, "kwonlyargs", [])
if len(kwonly_args) > 0:
param_names.extend([arg.arg for arg in kwonly_args])
if signature is None:
param_types.extend(["Any"] * len(kwonly_args))
if len(param_types) != len(param_names):
raise ValueError("Parameter names and types don't match: " + node.name)
param_locs = [(a.lineno, a.col_offset) for a in (node.args.args + kwonly_args)]
param_defaults = {
bisect(param_locs, (d.lineno, d.col_offset)) - 1 for d in node.args.defaults
}
kwonly_defaults = getattr(node.args, "kw_defaults", [])
for i, d in enumerate(kwonly_defaults):
if d is not None:
param_defaults.add(n_args + i)
params = [
(name, type_, i in param_defaults)
for i, (name, type_) in enumerate(zip(param_names, param_types))
]
if len(kwonly_args) > 0:
params.insert(n_args, ("*", "", False))
stub_node = FunctionNode(
node.name, parameters=params, rtype=rtype, decorators=decorators
)
self._parents[-1].add_child(stub_node)
self._parents.append(stub_node)
self.generic_visit(node)
del self._parents[-1]
return stub_node
def visit_FunctionDef(self, node):
"""Visit a function node."""
node = self.get_function_node(node)
if node is not None:
node._async = False
def visit_AsyncFunctionDef(self, node):
"""Visit an async function node."""
node = self.get_function_node(node)
if node is not None:
node._async = True
def visit_ClassDef(self, node):
"""Visit a class node."""
self.defined_types.add(node.name)
bases = []
for n in node.bases:
base_parts = []
while True:
if not isinstance(n, ast.Attribute):
base_parts.append(n.id)
break
else:
base_parts.append(n.attr)
n = n.value
bases.append(".".join(base_parts[::-1]))
self.required_types |= set(bases)
signature = get_signature(node)
stub_node = ClassNode(node.name, bases=bases, signature=signature)
self._parents[-1].add_child(stub_node)
self._parents.append(stub_node)
self.generic_visit(node)
del self._parents[-1]
@staticmethod
def generate_import_from(module_, names):
"""Generate an import line.
:sig: (str, Set[str]) -> str
:param module_: Name of module to import the names from.
:param names: Names to import.
:return: Import line in stub code.
"""
regular_names = [n for n in names if "::" not in n]
as_names = [n for n in names if "::" in n]
line = ""
if len(regular_names) > 0:
slots = {"m": module_, "n": ", ".join(sorted(regular_names))}
line = "from %(m)s import %(n)s" % slots
if len(line) > LINE_LENGTH_LIMIT:
slots["n"] = INDENT + (",\n" + INDENT).join(sorted(regular_names)) + ","
line = "from %(m)s import (\n%(n)s\n)" % slots
if len(as_names) > 0:
line += "\n"
for as_name in as_names:
a, n = as_name.split("::")
line += "from %(m)s import %(n)s as %(a)s" % {"m": module_, "n": n, "a": a}
return line
def generate_stub(self):
"""Generate the stub code for this source.
:sig: () -> str
:return: Generated stub code.
"""
needed_types = self.required_types - BUILTIN_TYPES
needed_types -= self.defined_types
_logger.debug("defined types: %s", self.defined_types)
module_vars = {v.name for v in self.root.variables}
_logger.debug("module variables: %s", module_vars)
qualified_types = {n for n in needed_types if "." in n}
qualified_namespaces = {".".join(n.split(".")[:-1]) for n in qualified_types}
needed_namespaces = qualified_namespaces - module_vars
needed_types -= qualified_types
_logger.debug("needed namespaces: %s", needed_namespaces)
imported_names = {n.split("::")[0] for n in self.imported_names}
imported_types = imported_names & (needed_types | needed_namespaces)
needed_types -= imported_types
needed_namespaces -= imported_names
_logger.debug("used imported types: %s", imported_types)
try:
typing_mod = __import__("typing")
typing_types = {n for n in needed_types if hasattr(typing_mod, n)}
needed_types -= typing_types
_logger.debug("types from typing module: %s", typing_types)
except ImportError:
typing_types = set()
_logger.warn("typing module not installed")
if len(needed_types) > 0:
raise ValueError("Unknown types: " + ", ".join(needed_types))
out = StringIO()
started = False
if len(typing_types) > 0:
line = self.generate_import_from("typing", typing_types)
out.write(line + "\n")
started = True
if len(imported_types) > 0:
if started:
out.write("\n")
# preserve the import order in the source file
for name in self.imported_names:
if name.split("::")[0] in imported_types:
line = self.generate_import_from(self.imported_names[name], {name})
out.write(line + "\n")
started = True
if len(needed_namespaces) > 0:
if started:
out.write("\n")
as_names = {n.split("::")[0]: n for n in self.imported_namespaces if "::" in n}
for module_ in sorted(needed_namespaces):
if module_ in as_names:
a, n = as_names[module_].split("::")
out.write("import " + n + " as " + a + "\n")
else:
out.write("import " + module_ + "\n")
started = True
if len(self.aliases) > 0:
if started:
out.write("\n")
for alias, signature in self.aliases.items():
out.write("%s = %s\n" % (alias, signature))
started = True
if started:
out.write("\n")
stub_lines = self.root.get_code()
n_lines = len(stub_lines)
for line_no in range(n_lines):
prev_line = stub_lines[line_no - 1] if line_no > 0 else None
line = stub_lines[line_no]
next_line = stub_lines[line_no + 1] if line_no < (n_lines - 1) else None
if (
line.startswith("class ")
and (prev_line is not None)
and (
(not prev_line.startswith("class "))
or (next_line and next_line.startswith(" "))
)
):
out.write("\n")
if (
line.startswith("def ")
and (prev_line is not None)
and (prev_line.startswith((" ", "class ")))
):
out.write("\n")
out.write(line + "\n")
line_no += 1
return out.getvalue()
|
uyar/pygenstub | pygenstub.py | StubGenerator.visit_Assign | python | def visit_Assign(self, node):
line = self._code_lines[node.lineno - 1]
if SIG_COMMENT in line:
line = _RE_COMMENT_IN_STRING.sub("", line)
if (SIG_COMMENT not in line) and (not self.generic):
return
if SIG_COMMENT in line:
_, signature = line.split(SIG_COMMENT)
_, return_type, requires = parse_signature(signature)
self.required_types |= requires
parent = self._parents[-1]
for var in node.targets:
if isinstance(var, ast.Name):
name, p = var.id, parent
elif (
isinstance(var, ast.Attribute)
and isinstance(var.value, ast.Name)
and (var.value.id == "self")
):
name, p = var.attr, parent.parent
else:
name, p = None, None
if name is not None:
if self.generic:
return_type = "Any"
self.required_types.add(return_type)
stub_node = VariableNode(name, return_type)
p.add_variable(stub_node) | Visit an assignment node. | train | https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L448-L480 | [
"def parse_signature(signature):\n \"\"\"Parse a signature into its input and return parameter types.\n\n This will also collect the types that are required by any of the input\n and return types.\n\n :sig: (str) -> Tuple[List[str], str, Set[str]]\n :param signature: Signature to parse.\n :return:... | class StubGenerator(ast.NodeVisitor):
"""A transformer that generates stub declarations from a source code."""
def __init__(self, source, generic=False):
"""Initialize this stub generator.
:sig: (str, bool) -> None
:param source: Source code to generate the stub for.
:param generic: Whether to produce generic stubs.
"""
self.root = StubNode() # sig: StubNode
self.generic = generic # sig: bool
self.imported_namespaces = OrderedDict() # sig: OrderedDict[str, str]
self.imported_names = OrderedDict() # sig: OrderedDict[str, str]
self.defined_types = set() # sig: Set[str]
self.required_types = set() # sig: Set[str]
self.aliases = OrderedDict() # sig: OrderedDict[str, str]
self._parents = [self.root] # sig: List[StubNode]
self._code_lines = source.splitlines() # sig: List[str]
self.collect_aliases()
ast_tree = ast.parse(source)
self.visit(ast_tree)
def collect_aliases(self):
"""Collect the type aliases in the source.
:sig: () -> None
"""
self.aliases = get_aliases(self._code_lines)
for alias, signature in self.aliases.items():
_, _, requires = parse_signature(signature)
self.required_types |= requires
self.defined_types |= {alias}
def visit_Import(self, node):
"""Visit an import node."""
line = self._code_lines[node.lineno - 1]
module_name = line.split("import")[0].strip()
for name in node.names:
imported_name = name.name
if name.asname:
imported_name = name.asname + "::" + imported_name
self.imported_namespaces[imported_name] = module_name
def visit_ImportFrom(self, node):
"""Visit an from-import node."""
line = self._code_lines[node.lineno - 1]
module_name = line.split("from")[1].split("import")[0].strip()
for name in node.names:
imported_name = name.name
if name.asname:
imported_name = name.asname + "::" + imported_name
self.imported_names[imported_name] = module_name
def get_function_node(self, node):
"""Process a function node.
:sig: (Union[ast.FunctionDef, ast.AsyncFunctionDef]) -> FunctionNode
:param node: Node to process.
:return: Generated function node in stub tree.
"""
decorators = []
for d in node.decorator_list:
if hasattr(d, "id"):
decorators.append(d.id)
elif hasattr(d, "func"):
decorators.append(d.func.id)
elif hasattr(d, "value"):
decorators.append(d.value.id + "." + d.attr)
signature = get_signature(node)
if signature is None:
parent = self._parents[-1]
if isinstance(parent, ClassNode) and (node.name == "__init__"):
signature = parent.signature
if (signature is None) and (not self.generic):
return None
param_names = [arg.arg if PY3 else arg.id for arg in node.args.args]
n_args = len(param_names)
if signature is None:
param_types, rtype, requires = ["Any"] * n_args, "Any", {"Any"}
else:
_logger.debug("parsing signature for %s", node.name)
param_types, rtype, requires = parse_signature(signature)
# TODO: only in classes
if ((n_args > 0) and (param_names[0] == "self")) or (
(n_args > 0) and (param_names[0] == "cls") and ("classmethod" in decorators)
):
if signature is None:
param_types[0] = ""
else:
param_types.insert(0, "")
_logger.debug("parameter types: %s", param_types)
_logger.debug("return type: %s", rtype)
_logger.debug("required types: %s", requires)
self.required_types |= requires
if node.args.vararg is not None:
param_names.append("*" + (node.args.vararg.arg if PY3 else node.args.vararg))
param_types.append("")
if node.args.kwarg is not None:
param_names.append("**" + (node.args.kwarg.arg if PY3 else node.args.kwarg))
param_types.append("")
kwonly_args = getattr(node.args, "kwonlyargs", [])
if len(kwonly_args) > 0:
param_names.extend([arg.arg for arg in kwonly_args])
if signature is None:
param_types.extend(["Any"] * len(kwonly_args))
if len(param_types) != len(param_names):
raise ValueError("Parameter names and types don't match: " + node.name)
param_locs = [(a.lineno, a.col_offset) for a in (node.args.args + kwonly_args)]
param_defaults = {
bisect(param_locs, (d.lineno, d.col_offset)) - 1 for d in node.args.defaults
}
kwonly_defaults = getattr(node.args, "kw_defaults", [])
for i, d in enumerate(kwonly_defaults):
if d is not None:
param_defaults.add(n_args + i)
params = [
(name, type_, i in param_defaults)
for i, (name, type_) in enumerate(zip(param_names, param_types))
]
if len(kwonly_args) > 0:
params.insert(n_args, ("*", "", False))
stub_node = FunctionNode(
node.name, parameters=params, rtype=rtype, decorators=decorators
)
self._parents[-1].add_child(stub_node)
self._parents.append(stub_node)
self.generic_visit(node)
del self._parents[-1]
return stub_node
def visit_FunctionDef(self, node):
"""Visit a function node."""
node = self.get_function_node(node)
if node is not None:
node._async = False
def visit_AsyncFunctionDef(self, node):
"""Visit an async function node."""
node = self.get_function_node(node)
if node is not None:
node._async = True
def visit_ClassDef(self, node):
"""Visit a class node."""
self.defined_types.add(node.name)
bases = []
for n in node.bases:
base_parts = []
while True:
if not isinstance(n, ast.Attribute):
base_parts.append(n.id)
break
else:
base_parts.append(n.attr)
n = n.value
bases.append(".".join(base_parts[::-1]))
self.required_types |= set(bases)
signature = get_signature(node)
stub_node = ClassNode(node.name, bases=bases, signature=signature)
self._parents[-1].add_child(stub_node)
self._parents.append(stub_node)
self.generic_visit(node)
del self._parents[-1]
@staticmethod
def generate_import_from(module_, names):
"""Generate an import line.
:sig: (str, Set[str]) -> str
:param module_: Name of module to import the names from.
:param names: Names to import.
:return: Import line in stub code.
"""
regular_names = [n for n in names if "::" not in n]
as_names = [n for n in names if "::" in n]
line = ""
if len(regular_names) > 0:
slots = {"m": module_, "n": ", ".join(sorted(regular_names))}
line = "from %(m)s import %(n)s" % slots
if len(line) > LINE_LENGTH_LIMIT:
slots["n"] = INDENT + (",\n" + INDENT).join(sorted(regular_names)) + ","
line = "from %(m)s import (\n%(n)s\n)" % slots
if len(as_names) > 0:
line += "\n"
for as_name in as_names:
a, n = as_name.split("::")
line += "from %(m)s import %(n)s as %(a)s" % {"m": module_, "n": n, "a": a}
return line
def generate_stub(self):
"""Generate the stub code for this source.
:sig: () -> str
:return: Generated stub code.
"""
needed_types = self.required_types - BUILTIN_TYPES
needed_types -= self.defined_types
_logger.debug("defined types: %s", self.defined_types)
module_vars = {v.name for v in self.root.variables}
_logger.debug("module variables: %s", module_vars)
qualified_types = {n for n in needed_types if "." in n}
qualified_namespaces = {".".join(n.split(".")[:-1]) for n in qualified_types}
needed_namespaces = qualified_namespaces - module_vars
needed_types -= qualified_types
_logger.debug("needed namespaces: %s", needed_namespaces)
imported_names = {n.split("::")[0] for n in self.imported_names}
imported_types = imported_names & (needed_types | needed_namespaces)
needed_types -= imported_types
needed_namespaces -= imported_names
_logger.debug("used imported types: %s", imported_types)
try:
typing_mod = __import__("typing")
typing_types = {n for n in needed_types if hasattr(typing_mod, n)}
needed_types -= typing_types
_logger.debug("types from typing module: %s", typing_types)
except ImportError:
typing_types = set()
_logger.warn("typing module not installed")
if len(needed_types) > 0:
raise ValueError("Unknown types: " + ", ".join(needed_types))
out = StringIO()
started = False
if len(typing_types) > 0:
line = self.generate_import_from("typing", typing_types)
out.write(line + "\n")
started = True
if len(imported_types) > 0:
if started:
out.write("\n")
# preserve the import order in the source file
for name in self.imported_names:
if name.split("::")[0] in imported_types:
line = self.generate_import_from(self.imported_names[name], {name})
out.write(line + "\n")
started = True
if len(needed_namespaces) > 0:
if started:
out.write("\n")
as_names = {n.split("::")[0]: n for n in self.imported_namespaces if "::" in n}
for module_ in sorted(needed_namespaces):
if module_ in as_names:
a, n = as_names[module_].split("::")
out.write("import " + n + " as " + a + "\n")
else:
out.write("import " + module_ + "\n")
started = True
if len(self.aliases) > 0:
if started:
out.write("\n")
for alias, signature in self.aliases.items():
out.write("%s = %s\n" % (alias, signature))
started = True
if started:
out.write("\n")
stub_lines = self.root.get_code()
n_lines = len(stub_lines)
for line_no in range(n_lines):
prev_line = stub_lines[line_no - 1] if line_no > 0 else None
line = stub_lines[line_no]
next_line = stub_lines[line_no + 1] if line_no < (n_lines - 1) else None
if (
line.startswith("class ")
and (prev_line is not None)
and (
(not prev_line.startswith("class "))
or (next_line and next_line.startswith(" "))
)
):
out.write("\n")
if (
line.startswith("def ")
and (prev_line is not None)
and (prev_line.startswith((" ", "class ")))
):
out.write("\n")
out.write(line + "\n")
line_no += 1
return out.getvalue()
|
uyar/pygenstub | pygenstub.py | StubGenerator.get_function_node | python | def get_function_node(self, node):
decorators = []
for d in node.decorator_list:
if hasattr(d, "id"):
decorators.append(d.id)
elif hasattr(d, "func"):
decorators.append(d.func.id)
elif hasattr(d, "value"):
decorators.append(d.value.id + "." + d.attr)
signature = get_signature(node)
if signature is None:
parent = self._parents[-1]
if isinstance(parent, ClassNode) and (node.name == "__init__"):
signature = parent.signature
if (signature is None) and (not self.generic):
return None
param_names = [arg.arg if PY3 else arg.id for arg in node.args.args]
n_args = len(param_names)
if signature is None:
param_types, rtype, requires = ["Any"] * n_args, "Any", {"Any"}
else:
_logger.debug("parsing signature for %s", node.name)
param_types, rtype, requires = parse_signature(signature)
# TODO: only in classes
if ((n_args > 0) and (param_names[0] == "self")) or (
(n_args > 0) and (param_names[0] == "cls") and ("classmethod" in decorators)
):
if signature is None:
param_types[0] = ""
else:
param_types.insert(0, "")
_logger.debug("parameter types: %s", param_types)
_logger.debug("return type: %s", rtype)
_logger.debug("required types: %s", requires)
self.required_types |= requires
if node.args.vararg is not None:
param_names.append("*" + (node.args.vararg.arg if PY3 else node.args.vararg))
param_types.append("")
if node.args.kwarg is not None:
param_names.append("**" + (node.args.kwarg.arg if PY3 else node.args.kwarg))
param_types.append("")
kwonly_args = getattr(node.args, "kwonlyargs", [])
if len(kwonly_args) > 0:
param_names.extend([arg.arg for arg in kwonly_args])
if signature is None:
param_types.extend(["Any"] * len(kwonly_args))
if len(param_types) != len(param_names):
raise ValueError("Parameter names and types don't match: " + node.name)
param_locs = [(a.lineno, a.col_offset) for a in (node.args.args + kwonly_args)]
param_defaults = {
bisect(param_locs, (d.lineno, d.col_offset)) - 1 for d in node.args.defaults
}
kwonly_defaults = getattr(node.args, "kw_defaults", [])
for i, d in enumerate(kwonly_defaults):
if d is not None:
param_defaults.add(n_args + i)
params = [
(name, type_, i in param_defaults)
for i, (name, type_) in enumerate(zip(param_names, param_types))
]
if len(kwonly_args) > 0:
params.insert(n_args, ("*", "", False))
stub_node = FunctionNode(
node.name, parameters=params, rtype=rtype, decorators=decorators
)
self._parents[-1].add_child(stub_node)
self._parents.append(stub_node)
self.generic_visit(node)
del self._parents[-1]
return stub_node | Process a function node.
:sig: (Union[ast.FunctionDef, ast.AsyncFunctionDef]) -> FunctionNode
:param node: Node to process.
:return: Generated function node in stub tree. | train | https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L482-L575 | [
"def get_signature(node):\n \"\"\"Get the signature of a function or a class.\n\n :sig: (Union[ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef]) -> Optional[str]\n :param node: Node to get the signature from.\n :return: Value of signature field in node docstring, or ``None`` if there's no signature.... | class StubGenerator(ast.NodeVisitor):
"""A transformer that generates stub declarations from a source code."""
def __init__(self, source, generic=False):
"""Initialize this stub generator.
:sig: (str, bool) -> None
:param source: Source code to generate the stub for.
:param generic: Whether to produce generic stubs.
"""
self.root = StubNode() # sig: StubNode
self.generic = generic # sig: bool
self.imported_namespaces = OrderedDict() # sig: OrderedDict[str, str]
self.imported_names = OrderedDict() # sig: OrderedDict[str, str]
self.defined_types = set() # sig: Set[str]
self.required_types = set() # sig: Set[str]
self.aliases = OrderedDict() # sig: OrderedDict[str, str]
self._parents = [self.root] # sig: List[StubNode]
self._code_lines = source.splitlines() # sig: List[str]
self.collect_aliases()
ast_tree = ast.parse(source)
self.visit(ast_tree)
def collect_aliases(self):
"""Collect the type aliases in the source.
:sig: () -> None
"""
self.aliases = get_aliases(self._code_lines)
for alias, signature in self.aliases.items():
_, _, requires = parse_signature(signature)
self.required_types |= requires
self.defined_types |= {alias}
def visit_Import(self, node):
"""Visit an import node."""
line = self._code_lines[node.lineno - 1]
module_name = line.split("import")[0].strip()
for name in node.names:
imported_name = name.name
if name.asname:
imported_name = name.asname + "::" + imported_name
self.imported_namespaces[imported_name] = module_name
def visit_ImportFrom(self, node):
"""Visit an from-import node."""
line = self._code_lines[node.lineno - 1]
module_name = line.split("from")[1].split("import")[0].strip()
for name in node.names:
imported_name = name.name
if name.asname:
imported_name = name.asname + "::" + imported_name
self.imported_names[imported_name] = module_name
def visit_Assign(self, node):
"""Visit an assignment node."""
line = self._code_lines[node.lineno - 1]
if SIG_COMMENT in line:
line = _RE_COMMENT_IN_STRING.sub("", line)
if (SIG_COMMENT not in line) and (not self.generic):
return
if SIG_COMMENT in line:
_, signature = line.split(SIG_COMMENT)
_, return_type, requires = parse_signature(signature)
self.required_types |= requires
parent = self._parents[-1]
for var in node.targets:
if isinstance(var, ast.Name):
name, p = var.id, parent
elif (
isinstance(var, ast.Attribute)
and isinstance(var.value, ast.Name)
and (var.value.id == "self")
):
name, p = var.attr, parent.parent
else:
name, p = None, None
if name is not None:
if self.generic:
return_type = "Any"
self.required_types.add(return_type)
stub_node = VariableNode(name, return_type)
p.add_variable(stub_node)
def visit_FunctionDef(self, node):
"""Visit a function node."""
node = self.get_function_node(node)
if node is not None:
node._async = False
def visit_AsyncFunctionDef(self, node):
"""Visit an async function node."""
node = self.get_function_node(node)
if node is not None:
node._async = True
def visit_ClassDef(self, node):
"""Visit a class node."""
self.defined_types.add(node.name)
bases = []
for n in node.bases:
base_parts = []
while True:
if not isinstance(n, ast.Attribute):
base_parts.append(n.id)
break
else:
base_parts.append(n.attr)
n = n.value
bases.append(".".join(base_parts[::-1]))
self.required_types |= set(bases)
signature = get_signature(node)
stub_node = ClassNode(node.name, bases=bases, signature=signature)
self._parents[-1].add_child(stub_node)
self._parents.append(stub_node)
self.generic_visit(node)
del self._parents[-1]
@staticmethod
def generate_import_from(module_, names):
"""Generate an import line.
:sig: (str, Set[str]) -> str
:param module_: Name of module to import the names from.
:param names: Names to import.
:return: Import line in stub code.
"""
regular_names = [n for n in names if "::" not in n]
as_names = [n for n in names if "::" in n]
line = ""
if len(regular_names) > 0:
slots = {"m": module_, "n": ", ".join(sorted(regular_names))}
line = "from %(m)s import %(n)s" % slots
if len(line) > LINE_LENGTH_LIMIT:
slots["n"] = INDENT + (",\n" + INDENT).join(sorted(regular_names)) + ","
line = "from %(m)s import (\n%(n)s\n)" % slots
if len(as_names) > 0:
line += "\n"
for as_name in as_names:
a, n = as_name.split("::")
line += "from %(m)s import %(n)s as %(a)s" % {"m": module_, "n": n, "a": a}
return line
def generate_stub(self):
"""Generate the stub code for this source.
:sig: () -> str
:return: Generated stub code.
"""
needed_types = self.required_types - BUILTIN_TYPES
needed_types -= self.defined_types
_logger.debug("defined types: %s", self.defined_types)
module_vars = {v.name for v in self.root.variables}
_logger.debug("module variables: %s", module_vars)
qualified_types = {n for n in needed_types if "." in n}
qualified_namespaces = {".".join(n.split(".")[:-1]) for n in qualified_types}
needed_namespaces = qualified_namespaces - module_vars
needed_types -= qualified_types
_logger.debug("needed namespaces: %s", needed_namespaces)
imported_names = {n.split("::")[0] for n in self.imported_names}
imported_types = imported_names & (needed_types | needed_namespaces)
needed_types -= imported_types
needed_namespaces -= imported_names
_logger.debug("used imported types: %s", imported_types)
try:
typing_mod = __import__("typing")
typing_types = {n for n in needed_types if hasattr(typing_mod, n)}
needed_types -= typing_types
_logger.debug("types from typing module: %s", typing_types)
except ImportError:
typing_types = set()
_logger.warn("typing module not installed")
if len(needed_types) > 0:
raise ValueError("Unknown types: " + ", ".join(needed_types))
out = StringIO()
started = False
if len(typing_types) > 0:
line = self.generate_import_from("typing", typing_types)
out.write(line + "\n")
started = True
if len(imported_types) > 0:
if started:
out.write("\n")
# preserve the import order in the source file
for name in self.imported_names:
if name.split("::")[0] in imported_types:
line = self.generate_import_from(self.imported_names[name], {name})
out.write(line + "\n")
started = True
if len(needed_namespaces) > 0:
if started:
out.write("\n")
as_names = {n.split("::")[0]: n for n in self.imported_namespaces if "::" in n}
for module_ in sorted(needed_namespaces):
if module_ in as_names:
a, n = as_names[module_].split("::")
out.write("import " + n + " as " + a + "\n")
else:
out.write("import " + module_ + "\n")
started = True
if len(self.aliases) > 0:
if started:
out.write("\n")
for alias, signature in self.aliases.items():
out.write("%s = %s\n" % (alias, signature))
started = True
if started:
out.write("\n")
stub_lines = self.root.get_code()
n_lines = len(stub_lines)
for line_no in range(n_lines):
prev_line = stub_lines[line_no - 1] if line_no > 0 else None
line = stub_lines[line_no]
next_line = stub_lines[line_no + 1] if line_no < (n_lines - 1) else None
if (
line.startswith("class ")
and (prev_line is not None)
and (
(not prev_line.startswith("class "))
or (next_line and next_line.startswith(" "))
)
):
out.write("\n")
if (
line.startswith("def ")
and (prev_line is not None)
and (prev_line.startswith((" ", "class ")))
):
out.write("\n")
out.write(line + "\n")
line_no += 1
return out.getvalue()
|
uyar/pygenstub | pygenstub.py | StubGenerator.visit_FunctionDef | python | def visit_FunctionDef(self, node):
node = self.get_function_node(node)
if node is not None:
node._async = False | Visit a function node. | train | https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L577-L581 | [
"def get_function_node(self, node):\n \"\"\"Process a function node.\n\n :sig: (Union[ast.FunctionDef, ast.AsyncFunctionDef]) -> FunctionNode\n :param node: Node to process.\n :return: Generated function node in stub tree.\n \"\"\"\n decorators = []\n for d in node.decorator_list:\n if h... | class StubGenerator(ast.NodeVisitor):
"""A transformer that generates stub declarations from a source code."""
def __init__(self, source, generic=False):
"""Initialize this stub generator.
:sig: (str, bool) -> None
:param source: Source code to generate the stub for.
:param generic: Whether to produce generic stubs.
"""
self.root = StubNode() # sig: StubNode
self.generic = generic # sig: bool
self.imported_namespaces = OrderedDict() # sig: OrderedDict[str, str]
self.imported_names = OrderedDict() # sig: OrderedDict[str, str]
self.defined_types = set() # sig: Set[str]
self.required_types = set() # sig: Set[str]
self.aliases = OrderedDict() # sig: OrderedDict[str, str]
self._parents = [self.root] # sig: List[StubNode]
self._code_lines = source.splitlines() # sig: List[str]
self.collect_aliases()
ast_tree = ast.parse(source)
self.visit(ast_tree)
def collect_aliases(self):
"""Collect the type aliases in the source.
:sig: () -> None
"""
self.aliases = get_aliases(self._code_lines)
for alias, signature in self.aliases.items():
_, _, requires = parse_signature(signature)
self.required_types |= requires
self.defined_types |= {alias}
def visit_Import(self, node):
"""Visit an import node."""
line = self._code_lines[node.lineno - 1]
module_name = line.split("import")[0].strip()
for name in node.names:
imported_name = name.name
if name.asname:
imported_name = name.asname + "::" + imported_name
self.imported_namespaces[imported_name] = module_name
def visit_ImportFrom(self, node):
"""Visit an from-import node."""
line = self._code_lines[node.lineno - 1]
module_name = line.split("from")[1].split("import")[0].strip()
for name in node.names:
imported_name = name.name
if name.asname:
imported_name = name.asname + "::" + imported_name
self.imported_names[imported_name] = module_name
def visit_Assign(self, node):
"""Visit an assignment node."""
line = self._code_lines[node.lineno - 1]
if SIG_COMMENT in line:
line = _RE_COMMENT_IN_STRING.sub("", line)
if (SIG_COMMENT not in line) and (not self.generic):
return
if SIG_COMMENT in line:
_, signature = line.split(SIG_COMMENT)
_, return_type, requires = parse_signature(signature)
self.required_types |= requires
parent = self._parents[-1]
for var in node.targets:
if isinstance(var, ast.Name):
name, p = var.id, parent
elif (
isinstance(var, ast.Attribute)
and isinstance(var.value, ast.Name)
and (var.value.id == "self")
):
name, p = var.attr, parent.parent
else:
name, p = None, None
if name is not None:
if self.generic:
return_type = "Any"
self.required_types.add(return_type)
stub_node = VariableNode(name, return_type)
p.add_variable(stub_node)
def get_function_node(self, node):
"""Process a function node.
:sig: (Union[ast.FunctionDef, ast.AsyncFunctionDef]) -> FunctionNode
:param node: Node to process.
:return: Generated function node in stub tree.
"""
decorators = []
for d in node.decorator_list:
if hasattr(d, "id"):
decorators.append(d.id)
elif hasattr(d, "func"):
decorators.append(d.func.id)
elif hasattr(d, "value"):
decorators.append(d.value.id + "." + d.attr)
signature = get_signature(node)
if signature is None:
parent = self._parents[-1]
if isinstance(parent, ClassNode) and (node.name == "__init__"):
signature = parent.signature
if (signature is None) and (not self.generic):
return None
param_names = [arg.arg if PY3 else arg.id for arg in node.args.args]
n_args = len(param_names)
if signature is None:
param_types, rtype, requires = ["Any"] * n_args, "Any", {"Any"}
else:
_logger.debug("parsing signature for %s", node.name)
param_types, rtype, requires = parse_signature(signature)
# TODO: only in classes
if ((n_args > 0) and (param_names[0] == "self")) or (
(n_args > 0) and (param_names[0] == "cls") and ("classmethod" in decorators)
):
if signature is None:
param_types[0] = ""
else:
param_types.insert(0, "")
_logger.debug("parameter types: %s", param_types)
_logger.debug("return type: %s", rtype)
_logger.debug("required types: %s", requires)
self.required_types |= requires
if node.args.vararg is not None:
param_names.append("*" + (node.args.vararg.arg if PY3 else node.args.vararg))
param_types.append("")
if node.args.kwarg is not None:
param_names.append("**" + (node.args.kwarg.arg if PY3 else node.args.kwarg))
param_types.append("")
kwonly_args = getattr(node.args, "kwonlyargs", [])
if len(kwonly_args) > 0:
param_names.extend([arg.arg for arg in kwonly_args])
if signature is None:
param_types.extend(["Any"] * len(kwonly_args))
if len(param_types) != len(param_names):
raise ValueError("Parameter names and types don't match: " + node.name)
param_locs = [(a.lineno, a.col_offset) for a in (node.args.args + kwonly_args)]
param_defaults = {
bisect(param_locs, (d.lineno, d.col_offset)) - 1 for d in node.args.defaults
}
kwonly_defaults = getattr(node.args, "kw_defaults", [])
for i, d in enumerate(kwonly_defaults):
if d is not None:
param_defaults.add(n_args + i)
params = [
(name, type_, i in param_defaults)
for i, (name, type_) in enumerate(zip(param_names, param_types))
]
if len(kwonly_args) > 0:
params.insert(n_args, ("*", "", False))
stub_node = FunctionNode(
node.name, parameters=params, rtype=rtype, decorators=decorators
)
self._parents[-1].add_child(stub_node)
self._parents.append(stub_node)
self.generic_visit(node)
del self._parents[-1]
return stub_node
def visit_AsyncFunctionDef(self, node):
"""Visit an async function node."""
node = self.get_function_node(node)
if node is not None:
node._async = True
def visit_ClassDef(self, node):
"""Visit a class node."""
self.defined_types.add(node.name)
bases = []
for n in node.bases:
base_parts = []
while True:
if not isinstance(n, ast.Attribute):
base_parts.append(n.id)
break
else:
base_parts.append(n.attr)
n = n.value
bases.append(".".join(base_parts[::-1]))
self.required_types |= set(bases)
signature = get_signature(node)
stub_node = ClassNode(node.name, bases=bases, signature=signature)
self._parents[-1].add_child(stub_node)
self._parents.append(stub_node)
self.generic_visit(node)
del self._parents[-1]
@staticmethod
def generate_import_from(module_, names):
"""Generate an import line.
:sig: (str, Set[str]) -> str
:param module_: Name of module to import the names from.
:param names: Names to import.
:return: Import line in stub code.
"""
regular_names = [n for n in names if "::" not in n]
as_names = [n for n in names if "::" in n]
line = ""
if len(regular_names) > 0:
slots = {"m": module_, "n": ", ".join(sorted(regular_names))}
line = "from %(m)s import %(n)s" % slots
if len(line) > LINE_LENGTH_LIMIT:
slots["n"] = INDENT + (",\n" + INDENT).join(sorted(regular_names)) + ","
line = "from %(m)s import (\n%(n)s\n)" % slots
if len(as_names) > 0:
line += "\n"
for as_name in as_names:
a, n = as_name.split("::")
line += "from %(m)s import %(n)s as %(a)s" % {"m": module_, "n": n, "a": a}
return line
def generate_stub(self):
"""Generate the stub code for this source.
:sig: () -> str
:return: Generated stub code.
"""
needed_types = self.required_types - BUILTIN_TYPES
needed_types -= self.defined_types
_logger.debug("defined types: %s", self.defined_types)
module_vars = {v.name for v in self.root.variables}
_logger.debug("module variables: %s", module_vars)
qualified_types = {n for n in needed_types if "." in n}
qualified_namespaces = {".".join(n.split(".")[:-1]) for n in qualified_types}
needed_namespaces = qualified_namespaces - module_vars
needed_types -= qualified_types
_logger.debug("needed namespaces: %s", needed_namespaces)
imported_names = {n.split("::")[0] for n in self.imported_names}
imported_types = imported_names & (needed_types | needed_namespaces)
needed_types -= imported_types
needed_namespaces -= imported_names
_logger.debug("used imported types: %s", imported_types)
try:
typing_mod = __import__("typing")
typing_types = {n for n in needed_types if hasattr(typing_mod, n)}
needed_types -= typing_types
_logger.debug("types from typing module: %s", typing_types)
except ImportError:
typing_types = set()
_logger.warn("typing module not installed")
if len(needed_types) > 0:
raise ValueError("Unknown types: " + ", ".join(needed_types))
out = StringIO()
started = False
if len(typing_types) > 0:
line = self.generate_import_from("typing", typing_types)
out.write(line + "\n")
started = True
if len(imported_types) > 0:
if started:
out.write("\n")
# preserve the import order in the source file
for name in self.imported_names:
if name.split("::")[0] in imported_types:
line = self.generate_import_from(self.imported_names[name], {name})
out.write(line + "\n")
started = True
if len(needed_namespaces) > 0:
if started:
out.write("\n")
as_names = {n.split("::")[0]: n for n in self.imported_namespaces if "::" in n}
for module_ in sorted(needed_namespaces):
if module_ in as_names:
a, n = as_names[module_].split("::")
out.write("import " + n + " as " + a + "\n")
else:
out.write("import " + module_ + "\n")
started = True
if len(self.aliases) > 0:
if started:
out.write("\n")
for alias, signature in self.aliases.items():
out.write("%s = %s\n" % (alias, signature))
started = True
if started:
out.write("\n")
stub_lines = self.root.get_code()
n_lines = len(stub_lines)
for line_no in range(n_lines):
prev_line = stub_lines[line_no - 1] if line_no > 0 else None
line = stub_lines[line_no]
next_line = stub_lines[line_no + 1] if line_no < (n_lines - 1) else None
if (
line.startswith("class ")
and (prev_line is not None)
and (
(not prev_line.startswith("class "))
or (next_line and next_line.startswith(" "))
)
):
out.write("\n")
if (
line.startswith("def ")
and (prev_line is not None)
and (prev_line.startswith((" ", "class ")))
):
out.write("\n")
out.write(line + "\n")
line_no += 1
return out.getvalue()
|
uyar/pygenstub | pygenstub.py | StubGenerator.visit_AsyncFunctionDef | python | def visit_AsyncFunctionDef(self, node):
node = self.get_function_node(node)
if node is not None:
node._async = True | Visit an async function node. | train | https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L583-L587 | [
"def get_function_node(self, node):\n \"\"\"Process a function node.\n\n :sig: (Union[ast.FunctionDef, ast.AsyncFunctionDef]) -> FunctionNode\n :param node: Node to process.\n :return: Generated function node in stub tree.\n \"\"\"\n decorators = []\n for d in node.decorator_list:\n if h... | class StubGenerator(ast.NodeVisitor):
"""A transformer that generates stub declarations from a source code."""
def __init__(self, source, generic=False):
"""Initialize this stub generator.
:sig: (str, bool) -> None
:param source: Source code to generate the stub for.
:param generic: Whether to produce generic stubs.
"""
self.root = StubNode() # sig: StubNode
self.generic = generic # sig: bool
self.imported_namespaces = OrderedDict() # sig: OrderedDict[str, str]
self.imported_names = OrderedDict() # sig: OrderedDict[str, str]
self.defined_types = set() # sig: Set[str]
self.required_types = set() # sig: Set[str]
self.aliases = OrderedDict() # sig: OrderedDict[str, str]
self._parents = [self.root] # sig: List[StubNode]
self._code_lines = source.splitlines() # sig: List[str]
self.collect_aliases()
ast_tree = ast.parse(source)
self.visit(ast_tree)
def collect_aliases(self):
"""Collect the type aliases in the source.
:sig: () -> None
"""
self.aliases = get_aliases(self._code_lines)
for alias, signature in self.aliases.items():
_, _, requires = parse_signature(signature)
self.required_types |= requires
self.defined_types |= {alias}
def visit_Import(self, node):
"""Visit an import node."""
line = self._code_lines[node.lineno - 1]
module_name = line.split("import")[0].strip()
for name in node.names:
imported_name = name.name
if name.asname:
imported_name = name.asname + "::" + imported_name
self.imported_namespaces[imported_name] = module_name
def visit_ImportFrom(self, node):
"""Visit an from-import node."""
line = self._code_lines[node.lineno - 1]
module_name = line.split("from")[1].split("import")[0].strip()
for name in node.names:
imported_name = name.name
if name.asname:
imported_name = name.asname + "::" + imported_name
self.imported_names[imported_name] = module_name
def visit_Assign(self, node):
"""Visit an assignment node."""
line = self._code_lines[node.lineno - 1]
if SIG_COMMENT in line:
line = _RE_COMMENT_IN_STRING.sub("", line)
if (SIG_COMMENT not in line) and (not self.generic):
return
if SIG_COMMENT in line:
_, signature = line.split(SIG_COMMENT)
_, return_type, requires = parse_signature(signature)
self.required_types |= requires
parent = self._parents[-1]
for var in node.targets:
if isinstance(var, ast.Name):
name, p = var.id, parent
elif (
isinstance(var, ast.Attribute)
and isinstance(var.value, ast.Name)
and (var.value.id == "self")
):
name, p = var.attr, parent.parent
else:
name, p = None, None
if name is not None:
if self.generic:
return_type = "Any"
self.required_types.add(return_type)
stub_node = VariableNode(name, return_type)
p.add_variable(stub_node)
def get_function_node(self, node):
"""Process a function node.
:sig: (Union[ast.FunctionDef, ast.AsyncFunctionDef]) -> FunctionNode
:param node: Node to process.
:return: Generated function node in stub tree.
"""
decorators = []
for d in node.decorator_list:
if hasattr(d, "id"):
decorators.append(d.id)
elif hasattr(d, "func"):
decorators.append(d.func.id)
elif hasattr(d, "value"):
decorators.append(d.value.id + "." + d.attr)
signature = get_signature(node)
if signature is None:
parent = self._parents[-1]
if isinstance(parent, ClassNode) and (node.name == "__init__"):
signature = parent.signature
if (signature is None) and (not self.generic):
return None
param_names = [arg.arg if PY3 else arg.id for arg in node.args.args]
n_args = len(param_names)
if signature is None:
param_types, rtype, requires = ["Any"] * n_args, "Any", {"Any"}
else:
_logger.debug("parsing signature for %s", node.name)
param_types, rtype, requires = parse_signature(signature)
# TODO: only in classes
if ((n_args > 0) and (param_names[0] == "self")) or (
(n_args > 0) and (param_names[0] == "cls") and ("classmethod" in decorators)
):
if signature is None:
param_types[0] = ""
else:
param_types.insert(0, "")
_logger.debug("parameter types: %s", param_types)
_logger.debug("return type: %s", rtype)
_logger.debug("required types: %s", requires)
self.required_types |= requires
if node.args.vararg is not None:
param_names.append("*" + (node.args.vararg.arg if PY3 else node.args.vararg))
param_types.append("")
if node.args.kwarg is not None:
param_names.append("**" + (node.args.kwarg.arg if PY3 else node.args.kwarg))
param_types.append("")
kwonly_args = getattr(node.args, "kwonlyargs", [])
if len(kwonly_args) > 0:
param_names.extend([arg.arg for arg in kwonly_args])
if signature is None:
param_types.extend(["Any"] * len(kwonly_args))
if len(param_types) != len(param_names):
raise ValueError("Parameter names and types don't match: " + node.name)
param_locs = [(a.lineno, a.col_offset) for a in (node.args.args + kwonly_args)]
param_defaults = {
bisect(param_locs, (d.lineno, d.col_offset)) - 1 for d in node.args.defaults
}
kwonly_defaults = getattr(node.args, "kw_defaults", [])
for i, d in enumerate(kwonly_defaults):
if d is not None:
param_defaults.add(n_args + i)
params = [
(name, type_, i in param_defaults)
for i, (name, type_) in enumerate(zip(param_names, param_types))
]
if len(kwonly_args) > 0:
params.insert(n_args, ("*", "", False))
stub_node = FunctionNode(
node.name, parameters=params, rtype=rtype, decorators=decorators
)
self._parents[-1].add_child(stub_node)
self._parents.append(stub_node)
self.generic_visit(node)
del self._parents[-1]
return stub_node
def visit_FunctionDef(self, node):
"""Visit a function node."""
node = self.get_function_node(node)
if node is not None:
node._async = False
def visit_ClassDef(self, node):
"""Visit a class node."""
self.defined_types.add(node.name)
bases = []
for n in node.bases:
base_parts = []
while True:
if not isinstance(n, ast.Attribute):
base_parts.append(n.id)
break
else:
base_parts.append(n.attr)
n = n.value
bases.append(".".join(base_parts[::-1]))
self.required_types |= set(bases)
signature = get_signature(node)
stub_node = ClassNode(node.name, bases=bases, signature=signature)
self._parents[-1].add_child(stub_node)
self._parents.append(stub_node)
self.generic_visit(node)
del self._parents[-1]
@staticmethod
def generate_import_from(module_, names):
"""Generate an import line.
:sig: (str, Set[str]) -> str
:param module_: Name of module to import the names from.
:param names: Names to import.
:return: Import line in stub code.
"""
regular_names = [n for n in names if "::" not in n]
as_names = [n for n in names if "::" in n]
line = ""
if len(regular_names) > 0:
slots = {"m": module_, "n": ", ".join(sorted(regular_names))}
line = "from %(m)s import %(n)s" % slots
if len(line) > LINE_LENGTH_LIMIT:
slots["n"] = INDENT + (",\n" + INDENT).join(sorted(regular_names)) + ","
line = "from %(m)s import (\n%(n)s\n)" % slots
if len(as_names) > 0:
line += "\n"
for as_name in as_names:
a, n = as_name.split("::")
line += "from %(m)s import %(n)s as %(a)s" % {"m": module_, "n": n, "a": a}
return line
def generate_stub(self):
"""Generate the stub code for this source.
:sig: () -> str
:return: Generated stub code.
"""
needed_types = self.required_types - BUILTIN_TYPES
needed_types -= self.defined_types
_logger.debug("defined types: %s", self.defined_types)
module_vars = {v.name for v in self.root.variables}
_logger.debug("module variables: %s", module_vars)
qualified_types = {n for n in needed_types if "." in n}
qualified_namespaces = {".".join(n.split(".")[:-1]) for n in qualified_types}
needed_namespaces = qualified_namespaces - module_vars
needed_types -= qualified_types
_logger.debug("needed namespaces: %s", needed_namespaces)
imported_names = {n.split("::")[0] for n in self.imported_names}
imported_types = imported_names & (needed_types | needed_namespaces)
needed_types -= imported_types
needed_namespaces -= imported_names
_logger.debug("used imported types: %s", imported_types)
try:
typing_mod = __import__("typing")
typing_types = {n for n in needed_types if hasattr(typing_mod, n)}
needed_types -= typing_types
_logger.debug("types from typing module: %s", typing_types)
except ImportError:
typing_types = set()
_logger.warn("typing module not installed")
if len(needed_types) > 0:
raise ValueError("Unknown types: " + ", ".join(needed_types))
out = StringIO()
started = False
if len(typing_types) > 0:
line = self.generate_import_from("typing", typing_types)
out.write(line + "\n")
started = True
if len(imported_types) > 0:
if started:
out.write("\n")
# preserve the import order in the source file
for name in self.imported_names:
if name.split("::")[0] in imported_types:
line = self.generate_import_from(self.imported_names[name], {name})
out.write(line + "\n")
started = True
if len(needed_namespaces) > 0:
if started:
out.write("\n")
as_names = {n.split("::")[0]: n for n in self.imported_namespaces if "::" in n}
for module_ in sorted(needed_namespaces):
if module_ in as_names:
a, n = as_names[module_].split("::")
out.write("import " + n + " as " + a + "\n")
else:
out.write("import " + module_ + "\n")
started = True
if len(self.aliases) > 0:
if started:
out.write("\n")
for alias, signature in self.aliases.items():
out.write("%s = %s\n" % (alias, signature))
started = True
if started:
out.write("\n")
stub_lines = self.root.get_code()
n_lines = len(stub_lines)
for line_no in range(n_lines):
prev_line = stub_lines[line_no - 1] if line_no > 0 else None
line = stub_lines[line_no]
next_line = stub_lines[line_no + 1] if line_no < (n_lines - 1) else None
if (
line.startswith("class ")
and (prev_line is not None)
and (
(not prev_line.startswith("class "))
or (next_line and next_line.startswith(" "))
)
):
out.write("\n")
if (
line.startswith("def ")
and (prev_line is not None)
and (prev_line.startswith((" ", "class ")))
):
out.write("\n")
out.write(line + "\n")
line_no += 1
return out.getvalue()
|
uyar/pygenstub | pygenstub.py | StubGenerator.visit_ClassDef | python | def visit_ClassDef(self, node):
self.defined_types.add(node.name)
bases = []
for n in node.bases:
base_parts = []
while True:
if not isinstance(n, ast.Attribute):
base_parts.append(n.id)
break
else:
base_parts.append(n.attr)
n = n.value
bases.append(".".join(base_parts[::-1]))
self.required_types |= set(bases)
signature = get_signature(node)
stub_node = ClassNode(node.name, bases=bases, signature=signature)
self._parents[-1].add_child(stub_node)
self._parents.append(stub_node)
self.generic_visit(node)
del self._parents[-1] | Visit a class node. | train | https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L589-L612 | [
"def get_signature(node):\n \"\"\"Get the signature of a function or a class.\n\n :sig: (Union[ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef]) -> Optional[str]\n :param node: Node to get the signature from.\n :return: Value of signature field in node docstring, or ``None`` if there's no signature.... | class StubGenerator(ast.NodeVisitor):
"""A transformer that generates stub declarations from a source code."""
def __init__(self, source, generic=False):
"""Initialize this stub generator.
:sig: (str, bool) -> None
:param source: Source code to generate the stub for.
:param generic: Whether to produce generic stubs.
"""
self.root = StubNode() # sig: StubNode
self.generic = generic # sig: bool
self.imported_namespaces = OrderedDict() # sig: OrderedDict[str, str]
self.imported_names = OrderedDict() # sig: OrderedDict[str, str]
self.defined_types = set() # sig: Set[str]
self.required_types = set() # sig: Set[str]
self.aliases = OrderedDict() # sig: OrderedDict[str, str]
self._parents = [self.root] # sig: List[StubNode]
self._code_lines = source.splitlines() # sig: List[str]
self.collect_aliases()
ast_tree = ast.parse(source)
self.visit(ast_tree)
def collect_aliases(self):
"""Collect the type aliases in the source.
:sig: () -> None
"""
self.aliases = get_aliases(self._code_lines)
for alias, signature in self.aliases.items():
_, _, requires = parse_signature(signature)
self.required_types |= requires
self.defined_types |= {alias}
def visit_Import(self, node):
"""Visit an import node."""
line = self._code_lines[node.lineno - 1]
module_name = line.split("import")[0].strip()
for name in node.names:
imported_name = name.name
if name.asname:
imported_name = name.asname + "::" + imported_name
self.imported_namespaces[imported_name] = module_name
def visit_ImportFrom(self, node):
"""Visit an from-import node."""
line = self._code_lines[node.lineno - 1]
module_name = line.split("from")[1].split("import")[0].strip()
for name in node.names:
imported_name = name.name
if name.asname:
imported_name = name.asname + "::" + imported_name
self.imported_names[imported_name] = module_name
def visit_Assign(self, node):
"""Visit an assignment node."""
line = self._code_lines[node.lineno - 1]
if SIG_COMMENT in line:
line = _RE_COMMENT_IN_STRING.sub("", line)
if (SIG_COMMENT not in line) and (not self.generic):
return
if SIG_COMMENT in line:
_, signature = line.split(SIG_COMMENT)
_, return_type, requires = parse_signature(signature)
self.required_types |= requires
parent = self._parents[-1]
for var in node.targets:
if isinstance(var, ast.Name):
name, p = var.id, parent
elif (
isinstance(var, ast.Attribute)
and isinstance(var.value, ast.Name)
and (var.value.id == "self")
):
name, p = var.attr, parent.parent
else:
name, p = None, None
if name is not None:
if self.generic:
return_type = "Any"
self.required_types.add(return_type)
stub_node = VariableNode(name, return_type)
p.add_variable(stub_node)
def get_function_node(self, node):
"""Process a function node.
:sig: (Union[ast.FunctionDef, ast.AsyncFunctionDef]) -> FunctionNode
:param node: Node to process.
:return: Generated function node in stub tree.
"""
decorators = []
for d in node.decorator_list:
if hasattr(d, "id"):
decorators.append(d.id)
elif hasattr(d, "func"):
decorators.append(d.func.id)
elif hasattr(d, "value"):
decorators.append(d.value.id + "." + d.attr)
signature = get_signature(node)
if signature is None:
parent = self._parents[-1]
if isinstance(parent, ClassNode) and (node.name == "__init__"):
signature = parent.signature
if (signature is None) and (not self.generic):
return None
param_names = [arg.arg if PY3 else arg.id for arg in node.args.args]
n_args = len(param_names)
if signature is None:
param_types, rtype, requires = ["Any"] * n_args, "Any", {"Any"}
else:
_logger.debug("parsing signature for %s", node.name)
param_types, rtype, requires = parse_signature(signature)
# TODO: only in classes
if ((n_args > 0) and (param_names[0] == "self")) or (
(n_args > 0) and (param_names[0] == "cls") and ("classmethod" in decorators)
):
if signature is None:
param_types[0] = ""
else:
param_types.insert(0, "")
_logger.debug("parameter types: %s", param_types)
_logger.debug("return type: %s", rtype)
_logger.debug("required types: %s", requires)
self.required_types |= requires
if node.args.vararg is not None:
param_names.append("*" + (node.args.vararg.arg if PY3 else node.args.vararg))
param_types.append("")
if node.args.kwarg is not None:
param_names.append("**" + (node.args.kwarg.arg if PY3 else node.args.kwarg))
param_types.append("")
kwonly_args = getattr(node.args, "kwonlyargs", [])
if len(kwonly_args) > 0:
param_names.extend([arg.arg for arg in kwonly_args])
if signature is None:
param_types.extend(["Any"] * len(kwonly_args))
if len(param_types) != len(param_names):
raise ValueError("Parameter names and types don't match: " + node.name)
param_locs = [(a.lineno, a.col_offset) for a in (node.args.args + kwonly_args)]
param_defaults = {
bisect(param_locs, (d.lineno, d.col_offset)) - 1 for d in node.args.defaults
}
kwonly_defaults = getattr(node.args, "kw_defaults", [])
for i, d in enumerate(kwonly_defaults):
if d is not None:
param_defaults.add(n_args + i)
params = [
(name, type_, i in param_defaults)
for i, (name, type_) in enumerate(zip(param_names, param_types))
]
if len(kwonly_args) > 0:
params.insert(n_args, ("*", "", False))
stub_node = FunctionNode(
node.name, parameters=params, rtype=rtype, decorators=decorators
)
self._parents[-1].add_child(stub_node)
self._parents.append(stub_node)
self.generic_visit(node)
del self._parents[-1]
return stub_node
def visit_FunctionDef(self, node):
"""Visit a function node."""
node = self.get_function_node(node)
if node is not None:
node._async = False
def visit_AsyncFunctionDef(self, node):
"""Visit an async function node."""
node = self.get_function_node(node)
if node is not None:
node._async = True
@staticmethod
def generate_import_from(module_, names):
"""Generate an import line.
:sig: (str, Set[str]) -> str
:param module_: Name of module to import the names from.
:param names: Names to import.
:return: Import line in stub code.
"""
regular_names = [n for n in names if "::" not in n]
as_names = [n for n in names if "::" in n]
line = ""
if len(regular_names) > 0:
slots = {"m": module_, "n": ", ".join(sorted(regular_names))}
line = "from %(m)s import %(n)s" % slots
if len(line) > LINE_LENGTH_LIMIT:
slots["n"] = INDENT + (",\n" + INDENT).join(sorted(regular_names)) + ","
line = "from %(m)s import (\n%(n)s\n)" % slots
if len(as_names) > 0:
line += "\n"
for as_name in as_names:
a, n = as_name.split("::")
line += "from %(m)s import %(n)s as %(a)s" % {"m": module_, "n": n, "a": a}
return line
def generate_stub(self):
"""Generate the stub code for this source.
:sig: () -> str
:return: Generated stub code.
"""
needed_types = self.required_types - BUILTIN_TYPES
needed_types -= self.defined_types
_logger.debug("defined types: %s", self.defined_types)
module_vars = {v.name for v in self.root.variables}
_logger.debug("module variables: %s", module_vars)
qualified_types = {n for n in needed_types if "." in n}
qualified_namespaces = {".".join(n.split(".")[:-1]) for n in qualified_types}
needed_namespaces = qualified_namespaces - module_vars
needed_types -= qualified_types
_logger.debug("needed namespaces: %s", needed_namespaces)
imported_names = {n.split("::")[0] for n in self.imported_names}
imported_types = imported_names & (needed_types | needed_namespaces)
needed_types -= imported_types
needed_namespaces -= imported_names
_logger.debug("used imported types: %s", imported_types)
try:
typing_mod = __import__("typing")
typing_types = {n for n in needed_types if hasattr(typing_mod, n)}
needed_types -= typing_types
_logger.debug("types from typing module: %s", typing_types)
except ImportError:
typing_types = set()
_logger.warn("typing module not installed")
if len(needed_types) > 0:
raise ValueError("Unknown types: " + ", ".join(needed_types))
out = StringIO()
started = False
if len(typing_types) > 0:
line = self.generate_import_from("typing", typing_types)
out.write(line + "\n")
started = True
if len(imported_types) > 0:
if started:
out.write("\n")
# preserve the import order in the source file
for name in self.imported_names:
if name.split("::")[0] in imported_types:
line = self.generate_import_from(self.imported_names[name], {name})
out.write(line + "\n")
started = True
if len(needed_namespaces) > 0:
if started:
out.write("\n")
as_names = {n.split("::")[0]: n for n in self.imported_namespaces if "::" in n}
for module_ in sorted(needed_namespaces):
if module_ in as_names:
a, n = as_names[module_].split("::")
out.write("import " + n + " as " + a + "\n")
else:
out.write("import " + module_ + "\n")
started = True
if len(self.aliases) > 0:
if started:
out.write("\n")
for alias, signature in self.aliases.items():
out.write("%s = %s\n" % (alias, signature))
started = True
if started:
out.write("\n")
stub_lines = self.root.get_code()
n_lines = len(stub_lines)
for line_no in range(n_lines):
prev_line = stub_lines[line_no - 1] if line_no > 0 else None
line = stub_lines[line_no]
next_line = stub_lines[line_no + 1] if line_no < (n_lines - 1) else None
if (
line.startswith("class ")
and (prev_line is not None)
and (
(not prev_line.startswith("class "))
or (next_line and next_line.startswith(" "))
)
):
out.write("\n")
if (
line.startswith("def ")
and (prev_line is not None)
and (prev_line.startswith((" ", "class ")))
):
out.write("\n")
out.write(line + "\n")
line_no += 1
return out.getvalue()
|
uyar/pygenstub | pygenstub.py | StubGenerator.generate_import_from | python | def generate_import_from(module_, names):
regular_names = [n for n in names if "::" not in n]
as_names = [n for n in names if "::" in n]
line = ""
if len(regular_names) > 0:
slots = {"m": module_, "n": ", ".join(sorted(regular_names))}
line = "from %(m)s import %(n)s" % slots
if len(line) > LINE_LENGTH_LIMIT:
slots["n"] = INDENT + (",\n" + INDENT).join(sorted(regular_names)) + ","
line = "from %(m)s import (\n%(n)s\n)" % slots
if len(as_names) > 0:
line += "\n"
for as_name in as_names:
a, n = as_name.split("::")
line += "from %(m)s import %(n)s as %(a)s" % {"m": module_, "n": n, "a": a}
return line | Generate an import line.
:sig: (str, Set[str]) -> str
:param module_: Name of module to import the names from.
:param names: Names to import.
:return: Import line in stub code. | train | https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L615-L639 | null | class StubGenerator(ast.NodeVisitor):
"""A transformer that generates stub declarations from a source code."""
def __init__(self, source, generic=False):
"""Initialize this stub generator.
:sig: (str, bool) -> None
:param source: Source code to generate the stub for.
:param generic: Whether to produce generic stubs.
"""
self.root = StubNode() # sig: StubNode
self.generic = generic # sig: bool
self.imported_namespaces = OrderedDict() # sig: OrderedDict[str, str]
self.imported_names = OrderedDict() # sig: OrderedDict[str, str]
self.defined_types = set() # sig: Set[str]
self.required_types = set() # sig: Set[str]
self.aliases = OrderedDict() # sig: OrderedDict[str, str]
self._parents = [self.root] # sig: List[StubNode]
self._code_lines = source.splitlines() # sig: List[str]
self.collect_aliases()
ast_tree = ast.parse(source)
self.visit(ast_tree)
def collect_aliases(self):
"""Collect the type aliases in the source.
:sig: () -> None
"""
self.aliases = get_aliases(self._code_lines)
for alias, signature in self.aliases.items():
_, _, requires = parse_signature(signature)
self.required_types |= requires
self.defined_types |= {alias}
def visit_Import(self, node):
"""Visit an import node."""
line = self._code_lines[node.lineno - 1]
module_name = line.split("import")[0].strip()
for name in node.names:
imported_name = name.name
if name.asname:
imported_name = name.asname + "::" + imported_name
self.imported_namespaces[imported_name] = module_name
def visit_ImportFrom(self, node):
"""Visit an from-import node."""
line = self._code_lines[node.lineno - 1]
module_name = line.split("from")[1].split("import")[0].strip()
for name in node.names:
imported_name = name.name
if name.asname:
imported_name = name.asname + "::" + imported_name
self.imported_names[imported_name] = module_name
def visit_Assign(self, node):
"""Visit an assignment node."""
line = self._code_lines[node.lineno - 1]
if SIG_COMMENT in line:
line = _RE_COMMENT_IN_STRING.sub("", line)
if (SIG_COMMENT not in line) and (not self.generic):
return
if SIG_COMMENT in line:
_, signature = line.split(SIG_COMMENT)
_, return_type, requires = parse_signature(signature)
self.required_types |= requires
parent = self._parents[-1]
for var in node.targets:
if isinstance(var, ast.Name):
name, p = var.id, parent
elif (
isinstance(var, ast.Attribute)
and isinstance(var.value, ast.Name)
and (var.value.id == "self")
):
name, p = var.attr, parent.parent
else:
name, p = None, None
if name is not None:
if self.generic:
return_type = "Any"
self.required_types.add(return_type)
stub_node = VariableNode(name, return_type)
p.add_variable(stub_node)
def get_function_node(self, node):
"""Process a function node.
:sig: (Union[ast.FunctionDef, ast.AsyncFunctionDef]) -> FunctionNode
:param node: Node to process.
:return: Generated function node in stub tree.
"""
decorators = []
for d in node.decorator_list:
if hasattr(d, "id"):
decorators.append(d.id)
elif hasattr(d, "func"):
decorators.append(d.func.id)
elif hasattr(d, "value"):
decorators.append(d.value.id + "." + d.attr)
signature = get_signature(node)
if signature is None:
parent = self._parents[-1]
if isinstance(parent, ClassNode) and (node.name == "__init__"):
signature = parent.signature
if (signature is None) and (not self.generic):
return None
param_names = [arg.arg if PY3 else arg.id for arg in node.args.args]
n_args = len(param_names)
if signature is None:
param_types, rtype, requires = ["Any"] * n_args, "Any", {"Any"}
else:
_logger.debug("parsing signature for %s", node.name)
param_types, rtype, requires = parse_signature(signature)
# TODO: only in classes
if ((n_args > 0) and (param_names[0] == "self")) or (
(n_args > 0) and (param_names[0] == "cls") and ("classmethod" in decorators)
):
if signature is None:
param_types[0] = ""
else:
param_types.insert(0, "")
_logger.debug("parameter types: %s", param_types)
_logger.debug("return type: %s", rtype)
_logger.debug("required types: %s", requires)
self.required_types |= requires
if node.args.vararg is not None:
param_names.append("*" + (node.args.vararg.arg if PY3 else node.args.vararg))
param_types.append("")
if node.args.kwarg is not None:
param_names.append("**" + (node.args.kwarg.arg if PY3 else node.args.kwarg))
param_types.append("")
kwonly_args = getattr(node.args, "kwonlyargs", [])
if len(kwonly_args) > 0:
param_names.extend([arg.arg for arg in kwonly_args])
if signature is None:
param_types.extend(["Any"] * len(kwonly_args))
if len(param_types) != len(param_names):
raise ValueError("Parameter names and types don't match: " + node.name)
param_locs = [(a.lineno, a.col_offset) for a in (node.args.args + kwonly_args)]
param_defaults = {
bisect(param_locs, (d.lineno, d.col_offset)) - 1 for d in node.args.defaults
}
kwonly_defaults = getattr(node.args, "kw_defaults", [])
for i, d in enumerate(kwonly_defaults):
if d is not None:
param_defaults.add(n_args + i)
params = [
(name, type_, i in param_defaults)
for i, (name, type_) in enumerate(zip(param_names, param_types))
]
if len(kwonly_args) > 0:
params.insert(n_args, ("*", "", False))
stub_node = FunctionNode(
node.name, parameters=params, rtype=rtype, decorators=decorators
)
self._parents[-1].add_child(stub_node)
self._parents.append(stub_node)
self.generic_visit(node)
del self._parents[-1]
return stub_node
def visit_FunctionDef(self, node):
"""Visit a function node."""
node = self.get_function_node(node)
if node is not None:
node._async = False
def visit_AsyncFunctionDef(self, node):
"""Visit an async function node."""
node = self.get_function_node(node)
if node is not None:
node._async = True
def visit_ClassDef(self, node):
"""Visit a class node."""
self.defined_types.add(node.name)
bases = []
for n in node.bases:
base_parts = []
while True:
if not isinstance(n, ast.Attribute):
base_parts.append(n.id)
break
else:
base_parts.append(n.attr)
n = n.value
bases.append(".".join(base_parts[::-1]))
self.required_types |= set(bases)
signature = get_signature(node)
stub_node = ClassNode(node.name, bases=bases, signature=signature)
self._parents[-1].add_child(stub_node)
self._parents.append(stub_node)
self.generic_visit(node)
del self._parents[-1]
@staticmethod
def generate_stub(self):
"""Generate the stub code for this source.
:sig: () -> str
:return: Generated stub code.
"""
needed_types = self.required_types - BUILTIN_TYPES
needed_types -= self.defined_types
_logger.debug("defined types: %s", self.defined_types)
module_vars = {v.name for v in self.root.variables}
_logger.debug("module variables: %s", module_vars)
qualified_types = {n for n in needed_types if "." in n}
qualified_namespaces = {".".join(n.split(".")[:-1]) for n in qualified_types}
needed_namespaces = qualified_namespaces - module_vars
needed_types -= qualified_types
_logger.debug("needed namespaces: %s", needed_namespaces)
imported_names = {n.split("::")[0] for n in self.imported_names}
imported_types = imported_names & (needed_types | needed_namespaces)
needed_types -= imported_types
needed_namespaces -= imported_names
_logger.debug("used imported types: %s", imported_types)
try:
typing_mod = __import__("typing")
typing_types = {n for n in needed_types if hasattr(typing_mod, n)}
needed_types -= typing_types
_logger.debug("types from typing module: %s", typing_types)
except ImportError:
typing_types = set()
_logger.warn("typing module not installed")
if len(needed_types) > 0:
raise ValueError("Unknown types: " + ", ".join(needed_types))
out = StringIO()
started = False
if len(typing_types) > 0:
line = self.generate_import_from("typing", typing_types)
out.write(line + "\n")
started = True
if len(imported_types) > 0:
if started:
out.write("\n")
# preserve the import order in the source file
for name in self.imported_names:
if name.split("::")[0] in imported_types:
line = self.generate_import_from(self.imported_names[name], {name})
out.write(line + "\n")
started = True
if len(needed_namespaces) > 0:
if started:
out.write("\n")
as_names = {n.split("::")[0]: n for n in self.imported_namespaces if "::" in n}
for module_ in sorted(needed_namespaces):
if module_ in as_names:
a, n = as_names[module_].split("::")
out.write("import " + n + " as " + a + "\n")
else:
out.write("import " + module_ + "\n")
started = True
if len(self.aliases) > 0:
if started:
out.write("\n")
for alias, signature in self.aliases.items():
out.write("%s = %s\n" % (alias, signature))
started = True
if started:
out.write("\n")
stub_lines = self.root.get_code()
n_lines = len(stub_lines)
for line_no in range(n_lines):
prev_line = stub_lines[line_no - 1] if line_no > 0 else None
line = stub_lines[line_no]
next_line = stub_lines[line_no + 1] if line_no < (n_lines - 1) else None
if (
line.startswith("class ")
and (prev_line is not None)
and (
(not prev_line.startswith("class "))
or (next_line and next_line.startswith(" "))
)
):
out.write("\n")
if (
line.startswith("def ")
and (prev_line is not None)
and (prev_line.startswith((" ", "class ")))
):
out.write("\n")
out.write(line + "\n")
line_no += 1
return out.getvalue()
|
uyar/pygenstub | pygenstub.py | StubGenerator.generate_stub | python | def generate_stub(self):
needed_types = self.required_types - BUILTIN_TYPES
needed_types -= self.defined_types
_logger.debug("defined types: %s", self.defined_types)
module_vars = {v.name for v in self.root.variables}
_logger.debug("module variables: %s", module_vars)
qualified_types = {n for n in needed_types if "." in n}
qualified_namespaces = {".".join(n.split(".")[:-1]) for n in qualified_types}
needed_namespaces = qualified_namespaces - module_vars
needed_types -= qualified_types
_logger.debug("needed namespaces: %s", needed_namespaces)
imported_names = {n.split("::")[0] for n in self.imported_names}
imported_types = imported_names & (needed_types | needed_namespaces)
needed_types -= imported_types
needed_namespaces -= imported_names
_logger.debug("used imported types: %s", imported_types)
try:
typing_mod = __import__("typing")
typing_types = {n for n in needed_types if hasattr(typing_mod, n)}
needed_types -= typing_types
_logger.debug("types from typing module: %s", typing_types)
except ImportError:
typing_types = set()
_logger.warn("typing module not installed")
if len(needed_types) > 0:
raise ValueError("Unknown types: " + ", ".join(needed_types))
out = StringIO()
started = False
if len(typing_types) > 0:
line = self.generate_import_from("typing", typing_types)
out.write(line + "\n")
started = True
if len(imported_types) > 0:
if started:
out.write("\n")
# preserve the import order in the source file
for name in self.imported_names:
if name.split("::")[0] in imported_types:
line = self.generate_import_from(self.imported_names[name], {name})
out.write(line + "\n")
started = True
if len(needed_namespaces) > 0:
if started:
out.write("\n")
as_names = {n.split("::")[0]: n for n in self.imported_namespaces if "::" in n}
for module_ in sorted(needed_namespaces):
if module_ in as_names:
a, n = as_names[module_].split("::")
out.write("import " + n + " as " + a + "\n")
else:
out.write("import " + module_ + "\n")
started = True
if len(self.aliases) > 0:
if started:
out.write("\n")
for alias, signature in self.aliases.items():
out.write("%s = %s\n" % (alias, signature))
started = True
if started:
out.write("\n")
stub_lines = self.root.get_code()
n_lines = len(stub_lines)
for line_no in range(n_lines):
prev_line = stub_lines[line_no - 1] if line_no > 0 else None
line = stub_lines[line_no]
next_line = stub_lines[line_no + 1] if line_no < (n_lines - 1) else None
if (
line.startswith("class ")
and (prev_line is not None)
and (
(not prev_line.startswith("class "))
or (next_line and next_line.startswith(" "))
)
):
out.write("\n")
if (
line.startswith("def ")
and (prev_line is not None)
and (prev_line.startswith((" ", "class ")))
):
out.write("\n")
out.write(line + "\n")
line_no += 1
return out.getvalue() | Generate the stub code for this source.
:sig: () -> str
:return: Generated stub code. | train | https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L641-L742 | [
"def generate_import_from(module_, names):\n \"\"\"Generate an import line.\n\n :sig: (str, Set[str]) -> str\n :param module_: Name of module to import the names from.\n :param names: Names to import.\n :return: Import line in stub code.\n \"\"\"\n regular_names = [n for n in names if \"::\" no... | class StubGenerator(ast.NodeVisitor):
"""A transformer that generates stub declarations from a source code."""
def __init__(self, source, generic=False):
"""Initialize this stub generator.
:sig: (str, bool) -> None
:param source: Source code to generate the stub for.
:param generic: Whether to produce generic stubs.
"""
self.root = StubNode() # sig: StubNode
self.generic = generic # sig: bool
self.imported_namespaces = OrderedDict() # sig: OrderedDict[str, str]
self.imported_names = OrderedDict() # sig: OrderedDict[str, str]
self.defined_types = set() # sig: Set[str]
self.required_types = set() # sig: Set[str]
self.aliases = OrderedDict() # sig: OrderedDict[str, str]
self._parents = [self.root] # sig: List[StubNode]
self._code_lines = source.splitlines() # sig: List[str]
self.collect_aliases()
ast_tree = ast.parse(source)
self.visit(ast_tree)
def collect_aliases(self):
"""Collect the type aliases in the source.
:sig: () -> None
"""
self.aliases = get_aliases(self._code_lines)
for alias, signature in self.aliases.items():
_, _, requires = parse_signature(signature)
self.required_types |= requires
self.defined_types |= {alias}
def visit_Import(self, node):
"""Visit an import node."""
line = self._code_lines[node.lineno - 1]
module_name = line.split("import")[0].strip()
for name in node.names:
imported_name = name.name
if name.asname:
imported_name = name.asname + "::" + imported_name
self.imported_namespaces[imported_name] = module_name
def visit_ImportFrom(self, node):
"""Visit an from-import node."""
line = self._code_lines[node.lineno - 1]
module_name = line.split("from")[1].split("import")[0].strip()
for name in node.names:
imported_name = name.name
if name.asname:
imported_name = name.asname + "::" + imported_name
self.imported_names[imported_name] = module_name
def visit_Assign(self, node):
"""Visit an assignment node."""
line = self._code_lines[node.lineno - 1]
if SIG_COMMENT in line:
line = _RE_COMMENT_IN_STRING.sub("", line)
if (SIG_COMMENT not in line) and (not self.generic):
return
if SIG_COMMENT in line:
_, signature = line.split(SIG_COMMENT)
_, return_type, requires = parse_signature(signature)
self.required_types |= requires
parent = self._parents[-1]
for var in node.targets:
if isinstance(var, ast.Name):
name, p = var.id, parent
elif (
isinstance(var, ast.Attribute)
and isinstance(var.value, ast.Name)
and (var.value.id == "self")
):
name, p = var.attr, parent.parent
else:
name, p = None, None
if name is not None:
if self.generic:
return_type = "Any"
self.required_types.add(return_type)
stub_node = VariableNode(name, return_type)
p.add_variable(stub_node)
def get_function_node(self, node):
"""Process a function node.
:sig: (Union[ast.FunctionDef, ast.AsyncFunctionDef]) -> FunctionNode
:param node: Node to process.
:return: Generated function node in stub tree.
"""
decorators = []
for d in node.decorator_list:
if hasattr(d, "id"):
decorators.append(d.id)
elif hasattr(d, "func"):
decorators.append(d.func.id)
elif hasattr(d, "value"):
decorators.append(d.value.id + "." + d.attr)
signature = get_signature(node)
if signature is None:
parent = self._parents[-1]
if isinstance(parent, ClassNode) and (node.name == "__init__"):
signature = parent.signature
if (signature is None) and (not self.generic):
return None
param_names = [arg.arg if PY3 else arg.id for arg in node.args.args]
n_args = len(param_names)
if signature is None:
param_types, rtype, requires = ["Any"] * n_args, "Any", {"Any"}
else:
_logger.debug("parsing signature for %s", node.name)
param_types, rtype, requires = parse_signature(signature)
# TODO: only in classes
if ((n_args > 0) and (param_names[0] == "self")) or (
(n_args > 0) and (param_names[0] == "cls") and ("classmethod" in decorators)
):
if signature is None:
param_types[0] = ""
else:
param_types.insert(0, "")
_logger.debug("parameter types: %s", param_types)
_logger.debug("return type: %s", rtype)
_logger.debug("required types: %s", requires)
self.required_types |= requires
if node.args.vararg is not None:
param_names.append("*" + (node.args.vararg.arg if PY3 else node.args.vararg))
param_types.append("")
if node.args.kwarg is not None:
param_names.append("**" + (node.args.kwarg.arg if PY3 else node.args.kwarg))
param_types.append("")
kwonly_args = getattr(node.args, "kwonlyargs", [])
if len(kwonly_args) > 0:
param_names.extend([arg.arg for arg in kwonly_args])
if signature is None:
param_types.extend(["Any"] * len(kwonly_args))
if len(param_types) != len(param_names):
raise ValueError("Parameter names and types don't match: " + node.name)
param_locs = [(a.lineno, a.col_offset) for a in (node.args.args + kwonly_args)]
param_defaults = {
bisect(param_locs, (d.lineno, d.col_offset)) - 1 for d in node.args.defaults
}
kwonly_defaults = getattr(node.args, "kw_defaults", [])
for i, d in enumerate(kwonly_defaults):
if d is not None:
param_defaults.add(n_args + i)
params = [
(name, type_, i in param_defaults)
for i, (name, type_) in enumerate(zip(param_names, param_types))
]
if len(kwonly_args) > 0:
params.insert(n_args, ("*", "", False))
stub_node = FunctionNode(
node.name, parameters=params, rtype=rtype, decorators=decorators
)
self._parents[-1].add_child(stub_node)
self._parents.append(stub_node)
self.generic_visit(node)
del self._parents[-1]
return stub_node
def visit_FunctionDef(self, node):
"""Visit a function node."""
node = self.get_function_node(node)
if node is not None:
node._async = False
def visit_AsyncFunctionDef(self, node):
"""Visit an async function node."""
node = self.get_function_node(node)
if node is not None:
node._async = True
def visit_ClassDef(self, node):
"""Visit a class node."""
self.defined_types.add(node.name)
bases = []
for n in node.bases:
base_parts = []
while True:
if not isinstance(n, ast.Attribute):
base_parts.append(n.id)
break
else:
base_parts.append(n.attr)
n = n.value
bases.append(".".join(base_parts[::-1]))
self.required_types |= set(bases)
signature = get_signature(node)
stub_node = ClassNode(node.name, bases=bases, signature=signature)
self._parents[-1].add_child(stub_node)
self._parents.append(stub_node)
self.generic_visit(node)
del self._parents[-1]
@staticmethod
def generate_import_from(module_, names):
"""Generate an import line.
:sig: (str, Set[str]) -> str
:param module_: Name of module to import the names from.
:param names: Names to import.
:return: Import line in stub code.
"""
regular_names = [n for n in names if "::" not in n]
as_names = [n for n in names if "::" in n]
line = ""
if len(regular_names) > 0:
slots = {"m": module_, "n": ", ".join(sorted(regular_names))}
line = "from %(m)s import %(n)s" % slots
if len(line) > LINE_LENGTH_LIMIT:
slots["n"] = INDENT + (",\n" + INDENT).join(sorted(regular_names)) + ","
line = "from %(m)s import (\n%(n)s\n)" % slots
if len(as_names) > 0:
line += "\n"
for as_name in as_names:
a, n = as_name.split("::")
line += "from %(m)s import %(n)s as %(a)s" % {"m": module_, "n": n, "a": a}
return line
|
rfverbruggen/rachiopy | rachiopy/__init__.py | Rachio._request | python | def _request(self, path, method, body=None):
url = '/'.join([_SERVER, path])
(resp, content) = _HTTP.request(url, method,
headers=self._headers, body=body)
content_type = resp.get('content-type')
if content_type and content_type.startswith('application/json'):
content = json.loads(content.decode('UTF-8'))
return (resp, content) | Make a request from the API. | train | https://github.com/rfverbruggen/rachiopy/blob/c91abc9984f0f453e60fa905285c1b640c3390ae/rachiopy/__init__.py#L32-L42 | null | class Rachio(object):
"""Represent the Rachio API."""
def __init__(self, authtoken):
"""Rachio class initializer."""
self._headers = {'Content-Type': 'application/json',
'Authorization': 'Bearer %s' % authtoken}
self.person = Person(self)
self.device = Device(self)
self.zone = Zone(self)
self.schedulerule = Schedulerule(self)
self.flexschedulerule = FlexSchedulerule(self)
self.notification = Notification(self)
def get(self, path):
"""Make a GET request from the API."""
return self._request(path, 'GET')
def delete(self, path):
"""Make a DELETE request from the API."""
return self._request(path, 'DELETE')
def put(self, path, payload):
"""Make a PUT request from the API."""
body = json.dumps(payload)
return self._request(path, 'PUT', body)
def post(self, path, payload):
"""Make a POST request from the API."""
body = json.dumps(payload)
return self._request(path, 'POST', body)
|
rfverbruggen/rachiopy | rachiopy/__init__.py | Rachio.put | python | def put(self, path, payload):
body = json.dumps(payload)
return self._request(path, 'PUT', body) | Make a PUT request from the API. | train | https://github.com/rfverbruggen/rachiopy/blob/c91abc9984f0f453e60fa905285c1b640c3390ae/rachiopy/__init__.py#L52-L55 | [
"def _request(self, path, method, body=None):\n \"\"\"Make a request from the API.\"\"\"\n url = '/'.join([_SERVER, path])\n (resp, content) = _HTTP.request(url, method,\n headers=self._headers, body=body)\n\n content_type = resp.get('content-type')\n if content_typ... | class Rachio(object):
"""Represent the Rachio API."""
def __init__(self, authtoken):
"""Rachio class initializer."""
self._headers = {'Content-Type': 'application/json',
'Authorization': 'Bearer %s' % authtoken}
self.person = Person(self)
self.device = Device(self)
self.zone = Zone(self)
self.schedulerule = Schedulerule(self)
self.flexschedulerule = FlexSchedulerule(self)
self.notification = Notification(self)
def _request(self, path, method, body=None):
"""Make a request from the API."""
url = '/'.join([_SERVER, path])
(resp, content) = _HTTP.request(url, method,
headers=self._headers, body=body)
content_type = resp.get('content-type')
if content_type and content_type.startswith('application/json'):
content = json.loads(content.decode('UTF-8'))
return (resp, content)
def get(self, path):
"""Make a GET request from the API."""
return self._request(path, 'GET')
def delete(self, path):
"""Make a DELETE request from the API."""
return self._request(path, 'DELETE')
def post(self, path, payload):
"""Make a POST request from the API."""
body = json.dumps(payload)
return self._request(path, 'POST', body)
|
rfverbruggen/rachiopy | rachiopy/__init__.py | Rachio.post | python | def post(self, path, payload):
body = json.dumps(payload)
return self._request(path, 'POST', body) | Make a POST request from the API. | train | https://github.com/rfverbruggen/rachiopy/blob/c91abc9984f0f453e60fa905285c1b640c3390ae/rachiopy/__init__.py#L57-L60 | [
"def _request(self, path, method, body=None):\n \"\"\"Make a request from the API.\"\"\"\n url = '/'.join([_SERVER, path])\n (resp, content) = _HTTP.request(url, method,\n headers=self._headers, body=body)\n\n content_type = resp.get('content-type')\n if content_typ... | class Rachio(object):
"""Represent the Rachio API."""
def __init__(self, authtoken):
"""Rachio class initializer."""
self._headers = {'Content-Type': 'application/json',
'Authorization': 'Bearer %s' % authtoken}
self.person = Person(self)
self.device = Device(self)
self.zone = Zone(self)
self.schedulerule = Schedulerule(self)
self.flexschedulerule = FlexSchedulerule(self)
self.notification = Notification(self)
def _request(self, path, method, body=None):
"""Make a request from the API."""
url = '/'.join([_SERVER, path])
(resp, content) = _HTTP.request(url, method,
headers=self._headers, body=body)
content_type = resp.get('content-type')
if content_type and content_type.startswith('application/json'):
content = json.loads(content.decode('UTF-8'))
return (resp, content)
def get(self, path):
"""Make a GET request from the API."""
return self._request(path, 'GET')
def delete(self, path):
"""Make a DELETE request from the API."""
return self._request(path, 'DELETE')
def put(self, path, payload):
"""Make a PUT request from the API."""
body = json.dumps(payload)
return self._request(path, 'PUT', body)
|
rfverbruggen/rachiopy | rachiopy/schedulerule.py | Schedulerule.skip | python | def skip(self, sched_rule_id):
path = 'schedulerule/skip'
payload = {'id': sched_rule_id}
return self.rachio.put(path, payload) | Skip a schedule rule (watering time). | train | https://github.com/rfverbruggen/rachiopy/blob/c91abc9984f0f453e60fa905285c1b640c3390ae/rachiopy/schedulerule.py#L11-L15 | null | class Schedulerule(object):
"""Schedulerule class with methods for /schedulerule/ API calls."""
def __init__(self, rachio):
"""Schedulerule class initializer."""
self.rachio = rachio
def start(self, sched_rule_id):
"""Start a schedule rule (watering time)."""
path = 'schedulerule/start'
payload = {'id': sched_rule_id}
return self.rachio.put(path, payload)
def seasonalAdjustment(self, sched_rule_id, adjustment):
"""Seasonal adjustment for a schedule rule (watering time).
This adjustment amount will be applied to the overall run time of the
selected schedule while overriding any current adjustments.
"""
path = 'schedulerule/seasonal_adjustment'
payload = {'id': sched_rule_id, 'adjustment': adjustment}
return self.rachio.put(path, payload)
def get(self, sched_rule_id):
"""Retrieve the information for a scheduleRule entity."""
path = '/'.join(['schedulerule', sched_rule_id])
return self.rachio.get(path)
|
rfverbruggen/rachiopy | rachiopy/schedulerule.py | Schedulerule.start | python | def start(self, sched_rule_id):
path = 'schedulerule/start'
payload = {'id': sched_rule_id}
return self.rachio.put(path, payload) | Start a schedule rule (watering time). | train | https://github.com/rfverbruggen/rachiopy/blob/c91abc9984f0f453e60fa905285c1b640c3390ae/rachiopy/schedulerule.py#L17-L21 | null | class Schedulerule(object):
"""Schedulerule class with methods for /schedulerule/ API calls."""
def __init__(self, rachio):
"""Schedulerule class initializer."""
self.rachio = rachio
def skip(self, sched_rule_id):
"""Skip a schedule rule (watering time)."""
path = 'schedulerule/skip'
payload = {'id': sched_rule_id}
return self.rachio.put(path, payload)
def seasonalAdjustment(self, sched_rule_id, adjustment):
"""Seasonal adjustment for a schedule rule (watering time).
This adjustment amount will be applied to the overall run time of the
selected schedule while overriding any current adjustments.
"""
path = 'schedulerule/seasonal_adjustment'
payload = {'id': sched_rule_id, 'adjustment': adjustment}
return self.rachio.put(path, payload)
def get(self, sched_rule_id):
"""Retrieve the information for a scheduleRule entity."""
path = '/'.join(['schedulerule', sched_rule_id])
return self.rachio.get(path)
|
rfverbruggen/rachiopy | rachiopy/schedulerule.py | Schedulerule.seasonalAdjustment | python | def seasonalAdjustment(self, sched_rule_id, adjustment):
path = 'schedulerule/seasonal_adjustment'
payload = {'id': sched_rule_id, 'adjustment': adjustment}
return self.rachio.put(path, payload) | Seasonal adjustment for a schedule rule (watering time).
This adjustment amount will be applied to the overall run time of the
selected schedule while overriding any current adjustments. | train | https://github.com/rfverbruggen/rachiopy/blob/c91abc9984f0f453e60fa905285c1b640c3390ae/rachiopy/schedulerule.py#L23-L31 | null | class Schedulerule(object):
"""Schedulerule class with methods for /schedulerule/ API calls."""
def __init__(self, rachio):
"""Schedulerule class initializer."""
self.rachio = rachio
def skip(self, sched_rule_id):
"""Skip a schedule rule (watering time)."""
path = 'schedulerule/skip'
payload = {'id': sched_rule_id}
return self.rachio.put(path, payload)
def start(self, sched_rule_id):
"""Start a schedule rule (watering time)."""
path = 'schedulerule/start'
payload = {'id': sched_rule_id}
return self.rachio.put(path, payload)
def get(self, sched_rule_id):
"""Retrieve the information for a scheduleRule entity."""
path = '/'.join(['schedulerule', sched_rule_id])
return self.rachio.get(path)
|
rfverbruggen/rachiopy | rachiopy/schedulerule.py | Schedulerule.get | python | def get(self, sched_rule_id):
path = '/'.join(['schedulerule', sched_rule_id])
return self.rachio.get(path) | Retrieve the information for a scheduleRule entity. | train | https://github.com/rfverbruggen/rachiopy/blob/c91abc9984f0f453e60fa905285c1b640c3390ae/rachiopy/schedulerule.py#L33-L36 | null | class Schedulerule(object):
"""Schedulerule class with methods for /schedulerule/ API calls."""
def __init__(self, rachio):
"""Schedulerule class initializer."""
self.rachio = rachio
def skip(self, sched_rule_id):
"""Skip a schedule rule (watering time)."""
path = 'schedulerule/skip'
payload = {'id': sched_rule_id}
return self.rachio.put(path, payload)
def start(self, sched_rule_id):
"""Start a schedule rule (watering time)."""
path = 'schedulerule/start'
payload = {'id': sched_rule_id}
return self.rachio.put(path, payload)
def seasonalAdjustment(self, sched_rule_id, adjustment):
"""Seasonal adjustment for a schedule rule (watering time).
This adjustment amount will be applied to the overall run time of the
selected schedule while overriding any current adjustments.
"""
path = 'schedulerule/seasonal_adjustment'
payload = {'id': sched_rule_id, 'adjustment': adjustment}
return self.rachio.put(path, payload)
|
rfverbruggen/rachiopy | rachiopy/zone.py | Zone.start | python | def start(self, zone_id, duration):
path = 'zone/start'
payload = {'id': zone_id, 'duration': duration}
return self.rachio.put(path, payload) | Start a zone. | train | https://github.com/rfverbruggen/rachiopy/blob/c91abc9984f0f453e60fa905285c1b640c3390ae/rachiopy/zone.py#L11-L15 | null | class Zone(object):
"""Zone class with methods for /zone/ API calls."""
def __init__(self, rachio):
"""Zone class initializer."""
self.rachio = rachio
def startMultiple(self, zones):
"""Start multiple zones."""
path = 'zone/start_multiple'
payload = {'zones': zones}
return self.rachio.put(path, payload)
def schedule(self):
"""Create an empty zone schedule."""
return ZoneSchedule(self)
def get(self, zone_id):
"""Retrieve the information for a zone entity."""
path = '/'.join(['zone', zone_id])
return self.rachio.get(path)
|
rfverbruggen/rachiopy | rachiopy/zone.py | Zone.startMultiple | python | def startMultiple(self, zones):
path = 'zone/start_multiple'
payload = {'zones': zones}
return self.rachio.put(path, payload) | Start multiple zones. | train | https://github.com/rfverbruggen/rachiopy/blob/c91abc9984f0f453e60fa905285c1b640c3390ae/rachiopy/zone.py#L17-L21 | null | class Zone(object):
"""Zone class with methods for /zone/ API calls."""
def __init__(self, rachio):
"""Zone class initializer."""
self.rachio = rachio
def start(self, zone_id, duration):
"""Start a zone."""
path = 'zone/start'
payload = {'id': zone_id, 'duration': duration}
return self.rachio.put(path, payload)
def schedule(self):
"""Create an empty zone schedule."""
return ZoneSchedule(self)
def get(self, zone_id):
"""Retrieve the information for a zone entity."""
path = '/'.join(['zone', zone_id])
return self.rachio.get(path)
|
rfverbruggen/rachiopy | rachiopy/zone.py | Zone.get | python | def get(self, zone_id):
path = '/'.join(['zone', zone_id])
return self.rachio.get(path) | Retrieve the information for a zone entity. | train | https://github.com/rfverbruggen/rachiopy/blob/c91abc9984f0f453e60fa905285c1b640c3390ae/rachiopy/zone.py#L27-L30 | null | class Zone(object):
"""Zone class with methods for /zone/ API calls."""
def __init__(self, rachio):
"""Zone class initializer."""
self.rachio = rachio
def start(self, zone_id, duration):
"""Start a zone."""
path = 'zone/start'
payload = {'id': zone_id, 'duration': duration}
return self.rachio.put(path, payload)
def startMultiple(self, zones):
"""Start multiple zones."""
path = 'zone/start_multiple'
payload = {'zones': zones}
return self.rachio.put(path, payload)
def schedule(self):
"""Create an empty zone schedule."""
return ZoneSchedule(self)
|
rfverbruggen/rachiopy | rachiopy/zone.py | ZoneSchedule.start | python | def start(self):
zones = [{"id": data[0], "duration": data[1], "sortOrder": count}
for (count, data) in enumerate(self._zones, 1)]
self._api.startMultiple(zones) | Start the schedule. | train | https://github.com/rfverbruggen/rachiopy/blob/c91abc9984f0f453e60fa905285c1b640c3390ae/rachiopy/zone.py#L45-L49 | null | class ZoneSchedule(object):
"""Help with starting multiple zones."""
def __init__(self, zone_api):
"""Zoneschedule class initializer."""
self._api = zone_api
self._zones = []
def enqueue(self, zone_id, duration):
"""Add a zone and duration to the schedule."""
self._zones.append((zone_id, duration))
def __enter__(self):
"""Allow a schedule to be created in a with block."""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Allow the schedule to be executed by leaving with block."""
self.start()
|
rfverbruggen/rachiopy | rachiopy/flexschedulerule.py | FlexSchedulerule.get | python | def get(self, flex_sched_rule_id):
path = '/'.join(['flexschedulerule', flex_sched_rule_id])
return self.rachio.get(path) | Retrieve the information for a flexscheduleRule entity. | train | https://github.com/rfverbruggen/rachiopy/blob/c91abc9984f0f453e60fa905285c1b640c3390ae/rachiopy/flexschedulerule.py#L11-L14 | null | class FlexSchedulerule(object):
"""FlexSchedulerule class with methods for /flexschedulerule/ calls."""
def __init__(self, rachio):
"""Flexschedulerule class initializer."""
self.rachio = rachio
|
rfverbruggen/rachiopy | rachiopy/device.py | Device.get | python | def get(self, dev_id):
path = '/'.join(['device', dev_id])
return self.rachio.get(path) | Retrieve the information for a device entity. | train | https://github.com/rfverbruggen/rachiopy/blob/c91abc9984f0f453e60fa905285c1b640c3390ae/rachiopy/device.py#L11-L14 | null | class Device(object):
"""Device class with /device/ API calls."""
def __init__(self, rachio):
"""Device class initializer."""
self.rachio = rachio
def getCurrentSchedule(self, dev_id):
"""Retrieve current schedule running, if any."""
path = '/'.join(['device', dev_id, 'current_schedule'])
return self.rachio.get(path)
def getEvent(self, dev_id, starttime, endtime):
"""Retrieve events for a device entity."""
path = 'device/%s/event?startTime=%s&endTime=%s' % \
(dev_id, starttime, endtime)
return self.rachio.get(path)
def getScheduleItem(self, dev_id):
"""Retrieve the next two weeks of schedule items for a device."""
path = '/'.join(['device', dev_id, 'scheduleitem'])
return self.rachio.get(path)
def getForecast(self, dev_id, units):
"""Retrieve current and predicted forecast."""
assert units in ['US', 'METRIC'], 'units must be either US or METRIC'
path = 'device/%s/forecast?units=%s' % (dev_id, units)
return self.rachio.get(path)
def stopWater(self, dev_id):
"""Stop all watering on device."""
path = 'device/stop_water'
payload = {'id': dev_id}
return self.rachio.put(path, payload)
def rainDelay(self, dev_id, duration):
"""Rain delay device."""
path = 'device/rain_delay'
payload = {'id': dev_id, 'duration': duration}
return self.rachio.put(path, payload)
def on(self, dev_id):
"""Turn ON all features of the device.
schedules, weather intelligence, water budget, etc.
"""
path = 'device/on'
payload = {'id': dev_id}
return self.rachio.put(path, payload)
def off(self, dev_id):
"""Turn OFF all features of the device.
schedules, weather intelligence, water budget, etc.
"""
path = 'device/off'
payload = {'id': dev_id}
return self.rachio.put(path, payload)
|
rfverbruggen/rachiopy | rachiopy/device.py | Device.getEvent | python | def getEvent(self, dev_id, starttime, endtime):
path = 'device/%s/event?startTime=%s&endTime=%s' % \
(dev_id, starttime, endtime)
return self.rachio.get(path) | Retrieve events for a device entity. | train | https://github.com/rfverbruggen/rachiopy/blob/c91abc9984f0f453e60fa905285c1b640c3390ae/rachiopy/device.py#L21-L25 | null | class Device(object):
"""Device class with /device/ API calls."""
def __init__(self, rachio):
"""Device class initializer."""
self.rachio = rachio
def get(self, dev_id):
"""Retrieve the information for a device entity."""
path = '/'.join(['device', dev_id])
return self.rachio.get(path)
def getCurrentSchedule(self, dev_id):
"""Retrieve current schedule running, if any."""
path = '/'.join(['device', dev_id, 'current_schedule'])
return self.rachio.get(path)
def getScheduleItem(self, dev_id):
"""Retrieve the next two weeks of schedule items for a device."""
path = '/'.join(['device', dev_id, 'scheduleitem'])
return self.rachio.get(path)
def getForecast(self, dev_id, units):
"""Retrieve current and predicted forecast."""
assert units in ['US', 'METRIC'], 'units must be either US or METRIC'
path = 'device/%s/forecast?units=%s' % (dev_id, units)
return self.rachio.get(path)
def stopWater(self, dev_id):
"""Stop all watering on device."""
path = 'device/stop_water'
payload = {'id': dev_id}
return self.rachio.put(path, payload)
def rainDelay(self, dev_id, duration):
"""Rain delay device."""
path = 'device/rain_delay'
payload = {'id': dev_id, 'duration': duration}
return self.rachio.put(path, payload)
def on(self, dev_id):
"""Turn ON all features of the device.
schedules, weather intelligence, water budget, etc.
"""
path = 'device/on'
payload = {'id': dev_id}
return self.rachio.put(path, payload)
def off(self, dev_id):
"""Turn OFF all features of the device.
schedules, weather intelligence, water budget, etc.
"""
path = 'device/off'
payload = {'id': dev_id}
return self.rachio.put(path, payload)
|
rfverbruggen/rachiopy | rachiopy/device.py | Device.getForecast | python | def getForecast(self, dev_id, units):
assert units in ['US', 'METRIC'], 'units must be either US or METRIC'
path = 'device/%s/forecast?units=%s' % (dev_id, units)
return self.rachio.get(path) | Retrieve current and predicted forecast. | train | https://github.com/rfverbruggen/rachiopy/blob/c91abc9984f0f453e60fa905285c1b640c3390ae/rachiopy/device.py#L32-L36 | null | class Device(object):
"""Device class with /device/ API calls."""
def __init__(self, rachio):
"""Device class initializer."""
self.rachio = rachio
def get(self, dev_id):
"""Retrieve the information for a device entity."""
path = '/'.join(['device', dev_id])
return self.rachio.get(path)
def getCurrentSchedule(self, dev_id):
"""Retrieve current schedule running, if any."""
path = '/'.join(['device', dev_id, 'current_schedule'])
return self.rachio.get(path)
def getEvent(self, dev_id, starttime, endtime):
"""Retrieve events for a device entity."""
path = 'device/%s/event?startTime=%s&endTime=%s' % \
(dev_id, starttime, endtime)
return self.rachio.get(path)
def getScheduleItem(self, dev_id):
"""Retrieve the next two weeks of schedule items for a device."""
path = '/'.join(['device', dev_id, 'scheduleitem'])
return self.rachio.get(path)
def stopWater(self, dev_id):
"""Stop all watering on device."""
path = 'device/stop_water'
payload = {'id': dev_id}
return self.rachio.put(path, payload)
def rainDelay(self, dev_id, duration):
"""Rain delay device."""
path = 'device/rain_delay'
payload = {'id': dev_id, 'duration': duration}
return self.rachio.put(path, payload)
def on(self, dev_id):
"""Turn ON all features of the device.
schedules, weather intelligence, water budget, etc.
"""
path = 'device/on'
payload = {'id': dev_id}
return self.rachio.put(path, payload)
def off(self, dev_id):
"""Turn OFF all features of the device.
schedules, weather intelligence, water budget, etc.
"""
path = 'device/off'
payload = {'id': dev_id}
return self.rachio.put(path, payload)
|
rfverbruggen/rachiopy | rachiopy/device.py | Device.stopWater | python | def stopWater(self, dev_id):
path = 'device/stop_water'
payload = {'id': dev_id}
return self.rachio.put(path, payload) | Stop all watering on device. | train | https://github.com/rfverbruggen/rachiopy/blob/c91abc9984f0f453e60fa905285c1b640c3390ae/rachiopy/device.py#L38-L42 | null | class Device(object):
"""Device class with /device/ API calls."""
def __init__(self, rachio):
"""Device class initializer."""
self.rachio = rachio
def get(self, dev_id):
"""Retrieve the information for a device entity."""
path = '/'.join(['device', dev_id])
return self.rachio.get(path)
def getCurrentSchedule(self, dev_id):
"""Retrieve current schedule running, if any."""
path = '/'.join(['device', dev_id, 'current_schedule'])
return self.rachio.get(path)
def getEvent(self, dev_id, starttime, endtime):
"""Retrieve events for a device entity."""
path = 'device/%s/event?startTime=%s&endTime=%s' % \
(dev_id, starttime, endtime)
return self.rachio.get(path)
def getScheduleItem(self, dev_id):
"""Retrieve the next two weeks of schedule items for a device."""
path = '/'.join(['device', dev_id, 'scheduleitem'])
return self.rachio.get(path)
def getForecast(self, dev_id, units):
"""Retrieve current and predicted forecast."""
assert units in ['US', 'METRIC'], 'units must be either US or METRIC'
path = 'device/%s/forecast?units=%s' % (dev_id, units)
return self.rachio.get(path)
def rainDelay(self, dev_id, duration):
"""Rain delay device."""
path = 'device/rain_delay'
payload = {'id': dev_id, 'duration': duration}
return self.rachio.put(path, payload)
def on(self, dev_id):
"""Turn ON all features of the device.
schedules, weather intelligence, water budget, etc.
"""
path = 'device/on'
payload = {'id': dev_id}
return self.rachio.put(path, payload)
def off(self, dev_id):
"""Turn OFF all features of the device.
schedules, weather intelligence, water budget, etc.
"""
path = 'device/off'
payload = {'id': dev_id}
return self.rachio.put(path, payload)
|
rfverbruggen/rachiopy | rachiopy/device.py | Device.rainDelay | python | def rainDelay(self, dev_id, duration):
path = 'device/rain_delay'
payload = {'id': dev_id, 'duration': duration}
return self.rachio.put(path, payload) | Rain delay device. | train | https://github.com/rfverbruggen/rachiopy/blob/c91abc9984f0f453e60fa905285c1b640c3390ae/rachiopy/device.py#L44-L48 | null | class Device(object):
"""Device class with /device/ API calls."""
def __init__(self, rachio):
"""Device class initializer."""
self.rachio = rachio
def get(self, dev_id):
"""Retrieve the information for a device entity."""
path = '/'.join(['device', dev_id])
return self.rachio.get(path)
def getCurrentSchedule(self, dev_id):
"""Retrieve current schedule running, if any."""
path = '/'.join(['device', dev_id, 'current_schedule'])
return self.rachio.get(path)
def getEvent(self, dev_id, starttime, endtime):
"""Retrieve events for a device entity."""
path = 'device/%s/event?startTime=%s&endTime=%s' % \
(dev_id, starttime, endtime)
return self.rachio.get(path)
def getScheduleItem(self, dev_id):
"""Retrieve the next two weeks of schedule items for a device."""
path = '/'.join(['device', dev_id, 'scheduleitem'])
return self.rachio.get(path)
def getForecast(self, dev_id, units):
"""Retrieve current and predicted forecast."""
assert units in ['US', 'METRIC'], 'units must be either US or METRIC'
path = 'device/%s/forecast?units=%s' % (dev_id, units)
return self.rachio.get(path)
def stopWater(self, dev_id):
"""Stop all watering on device."""
path = 'device/stop_water'
payload = {'id': dev_id}
return self.rachio.put(path, payload)
def on(self, dev_id):
"""Turn ON all features of the device.
schedules, weather intelligence, water budget, etc.
"""
path = 'device/on'
payload = {'id': dev_id}
return self.rachio.put(path, payload)
def off(self, dev_id):
"""Turn OFF all features of the device.
schedules, weather intelligence, water budget, etc.
"""
path = 'device/off'
payload = {'id': dev_id}
return self.rachio.put(path, payload)
|
rfverbruggen/rachiopy | rachiopy/device.py | Device.on | python | def on(self, dev_id):
path = 'device/on'
payload = {'id': dev_id}
return self.rachio.put(path, payload) | Turn ON all features of the device.
schedules, weather intelligence, water budget, etc. | train | https://github.com/rfverbruggen/rachiopy/blob/c91abc9984f0f453e60fa905285c1b640c3390ae/rachiopy/device.py#L50-L57 | null | class Device(object):
"""Device class with /device/ API calls."""
def __init__(self, rachio):
"""Device class initializer."""
self.rachio = rachio
def get(self, dev_id):
"""Retrieve the information for a device entity."""
path = '/'.join(['device', dev_id])
return self.rachio.get(path)
def getCurrentSchedule(self, dev_id):
"""Retrieve current schedule running, if any."""
path = '/'.join(['device', dev_id, 'current_schedule'])
return self.rachio.get(path)
def getEvent(self, dev_id, starttime, endtime):
"""Retrieve events for a device entity."""
path = 'device/%s/event?startTime=%s&endTime=%s' % \
(dev_id, starttime, endtime)
return self.rachio.get(path)
def getScheduleItem(self, dev_id):
"""Retrieve the next two weeks of schedule items for a device."""
path = '/'.join(['device', dev_id, 'scheduleitem'])
return self.rachio.get(path)
def getForecast(self, dev_id, units):
"""Retrieve current and predicted forecast."""
assert units in ['US', 'METRIC'], 'units must be either US or METRIC'
path = 'device/%s/forecast?units=%s' % (dev_id, units)
return self.rachio.get(path)
def stopWater(self, dev_id):
"""Stop all watering on device."""
path = 'device/stop_water'
payload = {'id': dev_id}
return self.rachio.put(path, payload)
def rainDelay(self, dev_id, duration):
"""Rain delay device."""
path = 'device/rain_delay'
payload = {'id': dev_id, 'duration': duration}
return self.rachio.put(path, payload)
def off(self, dev_id):
"""Turn OFF all features of the device.
schedules, weather intelligence, water budget, etc.
"""
path = 'device/off'
payload = {'id': dev_id}
return self.rachio.put(path, payload)
|
rfverbruggen/rachiopy | rachiopy/device.py | Device.off | python | def off(self, dev_id):
path = 'device/off'
payload = {'id': dev_id}
return self.rachio.put(path, payload) | Turn OFF all features of the device.
schedules, weather intelligence, water budget, etc. | train | https://github.com/rfverbruggen/rachiopy/blob/c91abc9984f0f453e60fa905285c1b640c3390ae/rachiopy/device.py#L59-L66 | null | class Device(object):
"""Device class with /device/ API calls."""
def __init__(self, rachio):
"""Device class initializer."""
self.rachio = rachio
def get(self, dev_id):
"""Retrieve the information for a device entity."""
path = '/'.join(['device', dev_id])
return self.rachio.get(path)
def getCurrentSchedule(self, dev_id):
"""Retrieve current schedule running, if any."""
path = '/'.join(['device', dev_id, 'current_schedule'])
return self.rachio.get(path)
def getEvent(self, dev_id, starttime, endtime):
"""Retrieve events for a device entity."""
path = 'device/%s/event?startTime=%s&endTime=%s' % \
(dev_id, starttime, endtime)
return self.rachio.get(path)
def getScheduleItem(self, dev_id):
"""Retrieve the next two weeks of schedule items for a device."""
path = '/'.join(['device', dev_id, 'scheduleitem'])
return self.rachio.get(path)
def getForecast(self, dev_id, units):
"""Retrieve current and predicted forecast."""
assert units in ['US', 'METRIC'], 'units must be either US or METRIC'
path = 'device/%s/forecast?units=%s' % (dev_id, units)
return self.rachio.get(path)
def stopWater(self, dev_id):
"""Stop all watering on device."""
path = 'device/stop_water'
payload = {'id': dev_id}
return self.rachio.put(path, payload)
def rainDelay(self, dev_id, duration):
"""Rain delay device."""
path = 'device/rain_delay'
payload = {'id': dev_id, 'duration': duration}
return self.rachio.put(path, payload)
def on(self, dev_id):
"""Turn ON all features of the device.
schedules, weather intelligence, water budget, etc.
"""
path = 'device/on'
payload = {'id': dev_id}
return self.rachio.put(path, payload)
|
rfverbruggen/rachiopy | rachiopy/notification.py | Notification.postWebhook | python | def postWebhook(self, dev_id, external_id, url, event_types):
path = 'notification/webhook'
payload = {'device': {'id': dev_id}, 'externalId': external_id,
'url': url, 'eventTypes': event_types}
return self.rachio.post(path, payload) | Add a webhook to a device.
externalId can be used as opaque data that
is tied to your company, and passed back in each webhook event
response. | train | https://github.com/rfverbruggen/rachiopy/blob/c91abc9984f0f453e60fa905285c1b640c3390ae/rachiopy/notification.py#L24-L34 | null | class Notification(object):
"""Notification class with methods for /notification/ API calls."""
def __init__(self, rachio):
"""Notification class initializer."""
self.rachio = rachio
def getWebhookEventType(self):
"""Retrieve the list of events types.
Event types that are available to any webhook for subscription.
"""
path = 'notification/webhook_event_type'
return self.rachio.get(path)
def getDeviceWebhook(self, dev_id):
"""Retrieve all webhooks for a device."""
path = '/'.join(['notification', dev_id, 'webhook'])
return self.rachio.get(path)
def putWebhook(self, hook_id, external_id, url, event_types):
"""Update a webhook."""
path = 'notification/webhook'
payload = {'id': hook_id, 'externalId': external_id,
'url': url, 'eventTypes': event_types}
return self.rachio.put(path, payload)
def deleteWebhook(self, hook_id):
"""Remove a webhook."""
path = '/'.join(['notification', 'webhook', hook_id])
return self.rachio.delete(path)
def get(self, hook_id):
"""Get a webhook."""
path = '/'.join(['notification', 'webhook', hook_id])
return self.rachio.get(path)
|
rfverbruggen/rachiopy | rachiopy/notification.py | Notification.putWebhook | python | def putWebhook(self, hook_id, external_id, url, event_types):
path = 'notification/webhook'
payload = {'id': hook_id, 'externalId': external_id,
'url': url, 'eventTypes': event_types}
return self.rachio.put(path, payload) | Update a webhook. | train | https://github.com/rfverbruggen/rachiopy/blob/c91abc9984f0f453e60fa905285c1b640c3390ae/rachiopy/notification.py#L36-L41 | null | class Notification(object):
"""Notification class with methods for /notification/ API calls."""
def __init__(self, rachio):
"""Notification class initializer."""
self.rachio = rachio
def getWebhookEventType(self):
"""Retrieve the list of events types.
Event types that are available to any webhook for subscription.
"""
path = 'notification/webhook_event_type'
return self.rachio.get(path)
def getDeviceWebhook(self, dev_id):
"""Retrieve all webhooks for a device."""
path = '/'.join(['notification', dev_id, 'webhook'])
return self.rachio.get(path)
def postWebhook(self, dev_id, external_id, url, event_types):
"""Add a webhook to a device.
externalId can be used as opaque data that
is tied to your company, and passed back in each webhook event
response.
"""
path = 'notification/webhook'
payload = {'device': {'id': dev_id}, 'externalId': external_id,
'url': url, 'eventTypes': event_types}
return self.rachio.post(path, payload)
def deleteWebhook(self, hook_id):
"""Remove a webhook."""
path = '/'.join(['notification', 'webhook', hook_id])
return self.rachio.delete(path)
def get(self, hook_id):
"""Get a webhook."""
path = '/'.join(['notification', 'webhook', hook_id])
return self.rachio.get(path)
|
rfverbruggen/rachiopy | rachiopy/notification.py | Notification.deleteWebhook | python | def deleteWebhook(self, hook_id):
path = '/'.join(['notification', 'webhook', hook_id])
return self.rachio.delete(path) | Remove a webhook. | train | https://github.com/rfverbruggen/rachiopy/blob/c91abc9984f0f453e60fa905285c1b640c3390ae/rachiopy/notification.py#L43-L46 | null | class Notification(object):
"""Notification class with methods for /notification/ API calls."""
def __init__(self, rachio):
"""Notification class initializer."""
self.rachio = rachio
def getWebhookEventType(self):
"""Retrieve the list of events types.
Event types that are available to any webhook for subscription.
"""
path = 'notification/webhook_event_type'
return self.rachio.get(path)
def getDeviceWebhook(self, dev_id):
"""Retrieve all webhooks for a device."""
path = '/'.join(['notification', dev_id, 'webhook'])
return self.rachio.get(path)
def postWebhook(self, dev_id, external_id, url, event_types):
"""Add a webhook to a device.
externalId can be used as opaque data that
is tied to your company, and passed back in each webhook event
response.
"""
path = 'notification/webhook'
payload = {'device': {'id': dev_id}, 'externalId': external_id,
'url': url, 'eventTypes': event_types}
return self.rachio.post(path, payload)
def putWebhook(self, hook_id, external_id, url, event_types):
"""Update a webhook."""
path = 'notification/webhook'
payload = {'id': hook_id, 'externalId': external_id,
'url': url, 'eventTypes': event_types}
return self.rachio.put(path, payload)
def get(self, hook_id):
"""Get a webhook."""
path = '/'.join(['notification', 'webhook', hook_id])
return self.rachio.get(path)
|
rfverbruggen/rachiopy | rachiopy/notification.py | Notification.get | python | def get(self, hook_id):
path = '/'.join(['notification', 'webhook', hook_id])
return self.rachio.get(path) | Get a webhook. | train | https://github.com/rfverbruggen/rachiopy/blob/c91abc9984f0f453e60fa905285c1b640c3390ae/rachiopy/notification.py#L48-L51 | null | class Notification(object):
"""Notification class with methods for /notification/ API calls."""
def __init__(self, rachio):
"""Notification class initializer."""
self.rachio = rachio
def getWebhookEventType(self):
"""Retrieve the list of events types.
Event types that are available to any webhook for subscription.
"""
path = 'notification/webhook_event_type'
return self.rachio.get(path)
def getDeviceWebhook(self, dev_id):
"""Retrieve all webhooks for a device."""
path = '/'.join(['notification', dev_id, 'webhook'])
return self.rachio.get(path)
def postWebhook(self, dev_id, external_id, url, event_types):
"""Add a webhook to a device.
externalId can be used as opaque data that
is tied to your company, and passed back in each webhook event
response.
"""
path = 'notification/webhook'
payload = {'device': {'id': dev_id}, 'externalId': external_id,
'url': url, 'eventTypes': event_types}
return self.rachio.post(path, payload)
def putWebhook(self, hook_id, external_id, url, event_types):
"""Update a webhook."""
path = 'notification/webhook'
payload = {'id': hook_id, 'externalId': external_id,
'url': url, 'eventTypes': event_types}
return self.rachio.put(path, payload)
def deleteWebhook(self, hook_id):
"""Remove a webhook."""
path = '/'.join(['notification', 'webhook', hook_id])
return self.rachio.delete(path)
|
rfverbruggen/rachiopy | rachiopy/person.py | Person.get | python | def get(self, user_id):
path = '/'.join(['person', user_id])
return self.rachio.get(path) | Retrieve the information for a person entity. | train | https://github.com/rfverbruggen/rachiopy/blob/c91abc9984f0f453e60fa905285c1b640c3390ae/rachiopy/person.py#L16-L19 | null | class Person(object):
"""Person class with methods for /person/ API calls."""
def __init__(self, rachio):
"""Person class initializer."""
self.rachio = rachio
def getInfo(self):
"""Retrieve the id for the person entity currently logged in."""
path = 'person/info'
return self.rachio.get(path)
|
tapilab/brandelion | brandelion/cli/analyze.py | parse_json | python | def parse_json(json_file, include_date=False):
if json_file[-2:] == 'gz':
fh = gzip.open(json_file, 'rt')
else:
fh = io.open(json_file, mode='rt', encoding='utf8')
for line in fh:
try:
jj = json.loads(line)
if type(jj) is not list:
jj = [jj]
for j in jj:
if include_date:
yield (j['user']['screen_name'].lower(), j['text'], j['created_at'])
else:
if 'full_text' in j: # get untruncated text if available.
yield (j['user']['screen_name'].lower(), j['full_text'])
else:
yield (j['user']['screen_name'].lower(), j['text'])
except Exception as e:
sys.stderr.write('skipping json error: %s\n' % e) | Yield screen_name, text tuples from a json file. | train | https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/analyze.py#L50-L71 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Analyze social and linguistic brand data.
usage:
brandelion analyze --text --brand-tweets <file> --exemplar-tweets <file> --sample-tweets <file> --output <file> [--text-method <string>]
brandelion analyze --network --brand-followers <file> --exemplar-followers <file> --output <file> [--network-method <string> --min-followers <n> --max-followers <n> --sample-exemplars <p> --seed <s>]
Options
-h, --help
--brand-followers <file> File containing follower data for brand accounts.
--brand-tweets <file> File containing tweets from brand accounts.
--exemplar-followers <file> File containing follower data for exemplar accounts.
--exemplar-tweets <file> File containing tweets from exemplar accounts.
--sample-tweets <file> File containing tweets from representative sample of Twitter.
--text-method <string> Method to do text analysis [default: chi2]
--network-method <string> Method to do text analysis [default: jaccard]
-o, --output <file> File to store results
-t, --text Analyze text of tweets.
-n, --network Analyze followers.
--min-followers <n> Ignore exemplars that don't have at least n followers [default: 0]
--max-followers <n> Ignore exemplars that have more than least n followers [default: 1e10]
--sample-exemplars <p> Sample p percent of the exemplars, uniformly at random. [default: 100]
--seed <s> Seed for random sampling. [default: 12345]
"""
from collections import Counter, defaultdict
from docopt import docopt
import io
from itertools import groupby
import gzip
import json
import math
import numpy as np
import os
import re
import random
from scipy.sparse import vstack
import sys
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_selection import chi2 as skchi2
from sklearn import linear_model
from . import report
### TEXT ANALYSIS ###
def extract_tweets(json_file):
""" Yield screen_name, string tuples, where the string is the
concatenation of all tweets of this user. """
for screen_name, tweet_iter in groupby(parse_json(json_file), lambda x: x[0]):
tweets = [t[1] for t in tweet_iter]
yield screen_name, ' '.join(tweets)
def preprocess(s):
"""
>>> preprocess('#hi there http://www.foo.com @you isn"t RT <>')
'hashtaghi hashtaghi there isn"t'
"""
# s = re.sub('@\S+', 'thisisamention', s) # map all mentions to thisisamention
s = re.sub(r'@\S+', ' ', s) # map all mentions to thisisamention
# s = re.sub('http\S+', 'http', s) # keep only http from urls
s = re.sub(r'http\S+', ' ', s) # keep only http from urls
s = re.sub(r'#(\S+)', r'hashtag\1 hashtag\1', s) # #foo -> hashtagfoo hashtagfoo (for retaining hashtags even using bigrams)
# s = re.sub(r'[0-9]+', '9', s) # 1234 -> 9
s = re.sub(r'\bRT\b', ' ', s, re.IGNORECASE)
s = re.sub(r'&[a-z]+;', ' ', s, re.IGNORECASE)
s = re.sub(r'\s+', ' ', s).strip()
return s.lower()
def vectorize(json_file, vec, dofit=True):
""" Return a matrix where each row corresponds to a Twitter account, and
each column corresponds to the number of times a term is used by that
account. """
## CountVectorizer, efficiently.
screen_names = [x[0] for x in extract_tweets(json_file)]
if dofit:
X = vec.fit_transform(x[1] for x in extract_tweets(json_file))
else:
X = vec.transform(x[1] for x in extract_tweets(json_file))
return screen_names, X
def chi2(exemplars, samples, n=300):
y = np.array(([1.] * exemplars.shape[0]) + ([.0] * samples.shape[0]))
X = vstack((exemplars, samples)).tocsr()
clf = linear_model.LogisticRegression(penalty='l2')
clf.fit(X, y)
coef = clf.coef_[0]
chis, pvals = skchi2(X, y)
top_indices = chis.argsort()[::-1]
top_indices = [i for i in top_indices if coef[i] > 0]
for idx in range(len(coef)):
coef[idx] = 0.
for idx in top_indices[:n]:
coef[idx] = chis[idx]
return coef
def do_score(vec, coef):
return np.sum(coef[vec.nonzero()[1]]) / np.sum(coef)
def write_top_words(fname, vocab, scores):
outf = io.open(fname, 'w', encoding='utf8')
for i in np.argsort(scores)[::-1]:
if scores[i] > 0:
outf.write('%s %g\n' % (vocab[i], scores[i]))
outf.close()
def analyze_text(brand_tweets_file, exemplar_tweets_file, sample_tweets_file, outfile, analyze_fn):
analyze = getattr(sys.modules[__name__], analyze_fn)
vec = CountVectorizer(min_df=3, preprocessor=preprocess, ngram_range=(2, 2), binary=True)
_, exemplar_vectors = vectorize(exemplar_tweets_file, vec, dofit=True)
print('read tweets for %d exemplar accounts' % exemplar_vectors.shape[0])
brands, brand_vectors = vectorize(brand_tweets_file, vec, dofit=False)
print('read tweets for %d brand accounts' % brand_vectors.shape[0])
_, sample_vectors = vectorize(sample_tweets_file, vec, dofit=False)
print('read tweets for %d sample accounts' % sample_vectors.shape[0])
scores = analyze(exemplar_vectors, sample_vectors)
vocab = vec.get_feature_names()
write_top_words(outfile + '.topwords', vocab, scores)
print('top 10 ngrams:\n', '\n'.join(['%s=%.4g' % (vocab[i], scores[i]) for i in np.argsort(scores)[::-1][:10]]))
outf = open(outfile, 'wt')
for bi, brand_vec in enumerate(brand_vectors):
outf.write('%s %g\n' % (brands[bi], do_score(brand_vec, scores)))
outf.flush()
### FOLLOWER ANALYSIS ###
def get_twitter_handles(fname):
handles = set()
with open(fname, 'rt') as f:
for line in f:
handles.add(line[:90].split()[0].lower())
return handles
def read_follower_file(fname, min_followers=0, max_followers=1e10, blacklist=set()):
""" Read a file of follower information and return a dictionary mapping screen_name to a set of follower ids. """
result = {}
with open(fname, 'rt') as f:
for line in f:
parts = line.split()
if len(parts) > 3:
if parts[1].lower() not in blacklist:
followers = set(int(x) for x in parts[2:])
if len(followers) > min_followers and len(followers) <= max_followers:
result[parts[1].lower()] = followers
else:
print('skipping exemplar', parts[1].lower())
return result
def iter_follower_file(fname):
""" Iterator from a file of follower information and return a tuple of screen_name, follower ids.
File format is:
<iso timestamp> <screen_name> <follower_id1> <follower_ids2> ...
"""
with open(fname, 'rt') as f:
for line in f:
parts = line.split()
if len(parts) > 3:
yield parts[1].lower(), set(int(x) for x in parts[2:])
# JACCARD
def _jaccard(a, b):
""" Return the Jaccard similarity between two sets a and b. """
return 1. * len(a & b) / len(a | b)
def jaccard(brands, exemplars, weighted_avg=False, sqrt=False):
""" Return the average Jaccard similarity between a brand's followers and the
followers of each exemplar. """
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_jaccard(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_jaccard(followers, others) for others in exemplars.values()) / len(exemplars)
# limit to exemplars with less than 40k followers: scores[brand] = 1. * sum(_jaccard(brands[brand], others) for others in exemplars.itervalues() if len(others) < 40000) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def jaccard_weighted_avg(brands, exemplars):
return jaccard(brands, exemplars, True, False)
def jaccard_sqrt_no_weighted_avg(brands, exemplars):
return jaccard(brands, exemplars, False, True)
def jaccard_sqrt(brands, exemplars):
return jaccard(brands, exemplars, weighted_avg=True, sqrt=True)
def jaccard_merge(brands, exemplars):
""" Return the average Jaccard similarity between a brand's followers and
the followers of each exemplar. We merge all exemplar followers into one
big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _jaccard(followers, exemplar_followers)
return scores
def compute_log_degrees(brands, exemplars):
""" For each follower, let Z be the total number of brands they follow.
Return a dictionary of 1. / log(Z), for each follower.
"""
counts = Counter()
for followers in brands.values(): # + exemplars.values(): # Include exemplars in these counts? No, don't want to penalize people who follow many exemplars.
counts.update(followers)
counts.update(counts.keys()) # Add 1 to each count.
for k in counts:
counts[k] = 1. / math.log(counts[k])
return counts
# PROPORTION
def _proportion(a, b):
""" Return the len(a & b) / len(a) """
return 1. * len(a & b) / len(a)
def proportion(brands, exemplars, weighted_avg=False, sqrt=False):
"""
Return the proportion of a brand's followers who also follow an exemplar.
"""
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_proportion(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_proportion(followers, others) for others in exemplars.values()) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def proportion_weighted_avg(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=True, sqrt=False)
def proportion_sqrt_no_weighted_avg(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=False, sqrt=True)
def proportion_sqrt(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=True, sqrt=True)
def proportion_merge(brands, exemplars):
""" Return the proportion of a brand's followers who also follower an
exemplar. We merge all exemplar followers into one big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _proportion(followers, exemplar_followers)
return scores
# COSINE SIMILARITY
def _cosine(a, b):
""" Return the len(a & b) / len(a) """
return 1. * len(a & b) / (math.sqrt(len(a)) * math.sqrt(len(b)))
def cosine(brands, exemplars, weighted_avg=False, sqrt=False):
"""
Return the cosine similarity betwee a brand's followers and the exemplars.
"""
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_cosine(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_cosine(followers, others) for others in exemplars.values()) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def cosine_weighted_avg(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=True, sqrt=False)
def cosine_sqrt_no_weighted_avg(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=False, sqrt=True)
def cosine_sqrt(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=True, sqrt=True)
def cosine_merge(brands, exemplars):
""" Return the proportion of a brand's followers who also follower an
exemplar. We merge all exemplar followers into one big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _cosine(followers, exemplar_followers)
return scores
def adamic(brands, exemplars):
""" Return the average Adamic/Adar similarity between a brand's followers
and the followers of each exemplar. We approximate the number of followed
accounts per user by only considering those in our brand set."""
print('adamic deprecated...requires loading all brands in memory.')
return
degrees = compute_log_degrees(brands, exemplars)
scores = {}
exemplar_sums = dict([(exemplar, sum(degrees[z] for z in exemplars[exemplar])) for exemplar in exemplars])
for brand in sorted(brands):
brand_sum = sum(degrees[z] for z in brands[brand])
total = 0.
for exemplar in exemplars:
total += sum(degrees[z] for z in brands[brand] & exemplars[exemplar]) / (brand_sum + exemplar_sums[exemplar])
scores[brand] = total / len(exemplars)
return scores
def compute_rarity_scores(exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is
the degree of the ith exemplar they follow.
>>> compute_rarity_scores({'e1':{1,2,3,4}, 'e2':{4,5}}).items()
[(1, 0.25), (2, 0.25), (3, 0.25), (4, 0.75), (5, 0.5)]
"""
scores = defaultdict(lambda: 0.)
for followers in exemplars.values():
score = 1. / len(followers)
for f in followers:
scores[f] += score
return scores
def rarity(brands, exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is the degree of the ith exemplar they follow.
The score for a brand is then the average of their follower scores."""
rarity = compute_rarity_scores(exemplars)
scores = {}
for brand, followers in brands:
scores[brand] = sum(rarity[f] for f in followers) / len(followers)
return scores
def compute_rarity_scores_log(exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is
the degree of the ith exemplar they follow.
>>> compute_rarity_scores({'e1':{1,2,3,4}, 'e2':{4,5}}).items()
[(1, 0.25), (2, 0.25), (3, 0.25), (4, 0.75), (5, 0.5)]
"""
scores = defaultdict(lambda: 0.)
for followers in exemplars.values():
score = 1. / math.log(len(followers))
for f in followers:
scores[f] += score
return scores
def rarity_log(brands, exemplars):
""" Compute a score for each follower that is sum_i (1/log(n_i)), where n_i is the degree of the ith exemplar they follow.
The score for a brand is then the average of their follower scores."""
rarity = compute_rarity_scores_log(exemplars)
scores = {}
for brand, followers in brands:
scores[brand] = sum(rarity[f] for f in followers) / len(followers)
return scores
def mkdirs(filename):
report.mkdirs(os.path.dirname(filename))
def analyze_followers(brand_follower_file, exemplar_follower_file, outfile, analyze_fn,
min_followers, max_followers, sample_exemplars):
brands = iter_follower_file(brand_follower_file)
exemplars = read_follower_file(exemplar_follower_file, min_followers=min_followers, max_followers=max_followers, blacklist=get_twitter_handles(brand_follower_file))
print('read follower data for %d exemplars' % (len(exemplars)))
if sample_exemplars < 100: # sample a subset of exemplars.
exemplars = dict([(k, exemplars[k]) for k in random.sample(exemplars.keys(), int(len(exemplars) * sample_exemplars / 100.))])
print('sampled %d exemplars' % (len(exemplars)))
analyze = getattr(sys.modules[__name__], analyze_fn)
scores = analyze(brands, exemplars)
mkdirs(outfile)
outf = open(outfile, 'wt')
for brand in sorted(scores):
outf.write('%s %g\n' % (brand, scores[brand]))
outf.flush()
outf.close()
print('results written to', outfile)
def main():
args = docopt(__doc__)
print(args)
if '--seed' in args:
random.seed(args['--seed'])
if args['--network']:
analyze_followers(args['--brand-followers'], args['--exemplar-followers'], args['--output'], args['--network-method'],
int(args['--min-followers']), int(float(args['--max-followers'])), float(args['--sample-exemplars']))
if args['--text']:
analyze_text(args['--brand-tweets'], args['--exemplar-tweets'], args['--sample-tweets'], args['--output'], args['--text-method'])
if __name__ == '__main__':
main()
|
tapilab/brandelion | brandelion/cli/analyze.py | extract_tweets | python | def extract_tweets(json_file):
for screen_name, tweet_iter in groupby(parse_json(json_file), lambda x: x[0]):
tweets = [t[1] for t in tweet_iter]
yield screen_name, ' '.join(tweets) | Yield screen_name, string tuples, where the string is the
concatenation of all tweets of this user. | train | https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/analyze.py#L74-L79 | [
"def parse_json(json_file, include_date=False):\n \"\"\" Yield screen_name, text tuples from a json file. \"\"\"\n if json_file[-2:] == 'gz':\n fh = gzip.open(json_file, 'rt')\n else:\n fh = io.open(json_file, mode='rt', encoding='utf8')\n for line in fh:\n try:\n jj = js... | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Analyze social and linguistic brand data.
usage:
brandelion analyze --text --brand-tweets <file> --exemplar-tweets <file> --sample-tweets <file> --output <file> [--text-method <string>]
brandelion analyze --network --brand-followers <file> --exemplar-followers <file> --output <file> [--network-method <string> --min-followers <n> --max-followers <n> --sample-exemplars <p> --seed <s>]
Options
-h, --help
--brand-followers <file> File containing follower data for brand accounts.
--brand-tweets <file> File containing tweets from brand accounts.
--exemplar-followers <file> File containing follower data for exemplar accounts.
--exemplar-tweets <file> File containing tweets from exemplar accounts.
--sample-tweets <file> File containing tweets from representative sample of Twitter.
--text-method <string> Method to do text analysis [default: chi2]
--network-method <string> Method to do text analysis [default: jaccard]
-o, --output <file> File to store results
-t, --text Analyze text of tweets.
-n, --network Analyze followers.
--min-followers <n> Ignore exemplars that don't have at least n followers [default: 0]
--max-followers <n> Ignore exemplars that have more than least n followers [default: 1e10]
--sample-exemplars <p> Sample p percent of the exemplars, uniformly at random. [default: 100]
--seed <s> Seed for random sampling. [default: 12345]
"""
from collections import Counter, defaultdict
from docopt import docopt
import io
from itertools import groupby
import gzip
import json
import math
import numpy as np
import os
import re
import random
from scipy.sparse import vstack
import sys
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_selection import chi2 as skchi2
from sklearn import linear_model
from . import report
### TEXT ANALYSIS ###
def parse_json(json_file, include_date=False):
""" Yield screen_name, text tuples from a json file. """
if json_file[-2:] == 'gz':
fh = gzip.open(json_file, 'rt')
else:
fh = io.open(json_file, mode='rt', encoding='utf8')
for line in fh:
try:
jj = json.loads(line)
if type(jj) is not list:
jj = [jj]
for j in jj:
if include_date:
yield (j['user']['screen_name'].lower(), j['text'], j['created_at'])
else:
if 'full_text' in j: # get untruncated text if available.
yield (j['user']['screen_name'].lower(), j['full_text'])
else:
yield (j['user']['screen_name'].lower(), j['text'])
except Exception as e:
sys.stderr.write('skipping json error: %s\n' % e)
def preprocess(s):
"""
>>> preprocess('#hi there http://www.foo.com @you isn"t RT <>')
'hashtaghi hashtaghi there isn"t'
"""
# s = re.sub('@\S+', 'thisisamention', s) # map all mentions to thisisamention
s = re.sub(r'@\S+', ' ', s) # map all mentions to thisisamention
# s = re.sub('http\S+', 'http', s) # keep only http from urls
s = re.sub(r'http\S+', ' ', s) # keep only http from urls
s = re.sub(r'#(\S+)', r'hashtag\1 hashtag\1', s) # #foo -> hashtagfoo hashtagfoo (for retaining hashtags even using bigrams)
# s = re.sub(r'[0-9]+', '9', s) # 1234 -> 9
s = re.sub(r'\bRT\b', ' ', s, re.IGNORECASE)
s = re.sub(r'&[a-z]+;', ' ', s, re.IGNORECASE)
s = re.sub(r'\s+', ' ', s).strip()
return s.lower()
def vectorize(json_file, vec, dofit=True):
""" Return a matrix where each row corresponds to a Twitter account, and
each column corresponds to the number of times a term is used by that
account. """
## CountVectorizer, efficiently.
screen_names = [x[0] for x in extract_tweets(json_file)]
if dofit:
X = vec.fit_transform(x[1] for x in extract_tweets(json_file))
else:
X = vec.transform(x[1] for x in extract_tweets(json_file))
return screen_names, X
def chi2(exemplars, samples, n=300):
y = np.array(([1.] * exemplars.shape[0]) + ([.0] * samples.shape[0]))
X = vstack((exemplars, samples)).tocsr()
clf = linear_model.LogisticRegression(penalty='l2')
clf.fit(X, y)
coef = clf.coef_[0]
chis, pvals = skchi2(X, y)
top_indices = chis.argsort()[::-1]
top_indices = [i for i in top_indices if coef[i] > 0]
for idx in range(len(coef)):
coef[idx] = 0.
for idx in top_indices[:n]:
coef[idx] = chis[idx]
return coef
def do_score(vec, coef):
return np.sum(coef[vec.nonzero()[1]]) / np.sum(coef)
def write_top_words(fname, vocab, scores):
outf = io.open(fname, 'w', encoding='utf8')
for i in np.argsort(scores)[::-1]:
if scores[i] > 0:
outf.write('%s %g\n' % (vocab[i], scores[i]))
outf.close()
def analyze_text(brand_tweets_file, exemplar_tweets_file, sample_tweets_file, outfile, analyze_fn):
analyze = getattr(sys.modules[__name__], analyze_fn)
vec = CountVectorizer(min_df=3, preprocessor=preprocess, ngram_range=(2, 2), binary=True)
_, exemplar_vectors = vectorize(exemplar_tweets_file, vec, dofit=True)
print('read tweets for %d exemplar accounts' % exemplar_vectors.shape[0])
brands, brand_vectors = vectorize(brand_tweets_file, vec, dofit=False)
print('read tweets for %d brand accounts' % brand_vectors.shape[0])
_, sample_vectors = vectorize(sample_tweets_file, vec, dofit=False)
print('read tweets for %d sample accounts' % sample_vectors.shape[0])
scores = analyze(exemplar_vectors, sample_vectors)
vocab = vec.get_feature_names()
write_top_words(outfile + '.topwords', vocab, scores)
print('top 10 ngrams:\n', '\n'.join(['%s=%.4g' % (vocab[i], scores[i]) for i in np.argsort(scores)[::-1][:10]]))
outf = open(outfile, 'wt')
for bi, brand_vec in enumerate(brand_vectors):
outf.write('%s %g\n' % (brands[bi], do_score(brand_vec, scores)))
outf.flush()
### FOLLOWER ANALYSIS ###
def get_twitter_handles(fname):
handles = set()
with open(fname, 'rt') as f:
for line in f:
handles.add(line[:90].split()[0].lower())
return handles
def read_follower_file(fname, min_followers=0, max_followers=1e10, blacklist=set()):
""" Read a file of follower information and return a dictionary mapping screen_name to a set of follower ids. """
result = {}
with open(fname, 'rt') as f:
for line in f:
parts = line.split()
if len(parts) > 3:
if parts[1].lower() not in blacklist:
followers = set(int(x) for x in parts[2:])
if len(followers) > min_followers and len(followers) <= max_followers:
result[parts[1].lower()] = followers
else:
print('skipping exemplar', parts[1].lower())
return result
def iter_follower_file(fname):
""" Iterator from a file of follower information and return a tuple of screen_name, follower ids.
File format is:
<iso timestamp> <screen_name> <follower_id1> <follower_ids2> ...
"""
with open(fname, 'rt') as f:
for line in f:
parts = line.split()
if len(parts) > 3:
yield parts[1].lower(), set(int(x) for x in parts[2:])
# JACCARD
def _jaccard(a, b):
""" Return the Jaccard similarity between two sets a and b. """
return 1. * len(a & b) / len(a | b)
def jaccard(brands, exemplars, weighted_avg=False, sqrt=False):
""" Return the average Jaccard similarity between a brand's followers and the
followers of each exemplar. """
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_jaccard(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_jaccard(followers, others) for others in exemplars.values()) / len(exemplars)
# limit to exemplars with less than 40k followers: scores[brand] = 1. * sum(_jaccard(brands[brand], others) for others in exemplars.itervalues() if len(others) < 40000) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def jaccard_weighted_avg(brands, exemplars):
return jaccard(brands, exemplars, True, False)
def jaccard_sqrt_no_weighted_avg(brands, exemplars):
return jaccard(brands, exemplars, False, True)
def jaccard_sqrt(brands, exemplars):
return jaccard(brands, exemplars, weighted_avg=True, sqrt=True)
def jaccard_merge(brands, exemplars):
""" Return the average Jaccard similarity between a brand's followers and
the followers of each exemplar. We merge all exemplar followers into one
big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _jaccard(followers, exemplar_followers)
return scores
def compute_log_degrees(brands, exemplars):
""" For each follower, let Z be the total number of brands they follow.
Return a dictionary of 1. / log(Z), for each follower.
"""
counts = Counter()
for followers in brands.values(): # + exemplars.values(): # Include exemplars in these counts? No, don't want to penalize people who follow many exemplars.
counts.update(followers)
counts.update(counts.keys()) # Add 1 to each count.
for k in counts:
counts[k] = 1. / math.log(counts[k])
return counts
# PROPORTION
def _proportion(a, b):
""" Return the len(a & b) / len(a) """
return 1. * len(a & b) / len(a)
def proportion(brands, exemplars, weighted_avg=False, sqrt=False):
"""
Return the proportion of a brand's followers who also follow an exemplar.
"""
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_proportion(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_proportion(followers, others) for others in exemplars.values()) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def proportion_weighted_avg(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=True, sqrt=False)
def proportion_sqrt_no_weighted_avg(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=False, sqrt=True)
def proportion_sqrt(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=True, sqrt=True)
def proportion_merge(brands, exemplars):
""" Return the proportion of a brand's followers who also follower an
exemplar. We merge all exemplar followers into one big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _proportion(followers, exemplar_followers)
return scores
# COSINE SIMILARITY
def _cosine(a, b):
""" Return the len(a & b) / len(a) """
return 1. * len(a & b) / (math.sqrt(len(a)) * math.sqrt(len(b)))
def cosine(brands, exemplars, weighted_avg=False, sqrt=False):
"""
Return the cosine similarity betwee a brand's followers and the exemplars.
"""
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_cosine(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_cosine(followers, others) for others in exemplars.values()) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def cosine_weighted_avg(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=True, sqrt=False)
def cosine_sqrt_no_weighted_avg(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=False, sqrt=True)
def cosine_sqrt(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=True, sqrt=True)
def cosine_merge(brands, exemplars):
""" Return the proportion of a brand's followers who also follower an
exemplar. We merge all exemplar followers into one big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _cosine(followers, exemplar_followers)
return scores
def adamic(brands, exemplars):
""" Return the average Adamic/Adar similarity between a brand's followers
and the followers of each exemplar. We approximate the number of followed
accounts per user by only considering those in our brand set."""
print('adamic deprecated...requires loading all brands in memory.')
return
degrees = compute_log_degrees(brands, exemplars)
scores = {}
exemplar_sums = dict([(exemplar, sum(degrees[z] for z in exemplars[exemplar])) for exemplar in exemplars])
for brand in sorted(brands):
brand_sum = sum(degrees[z] for z in brands[brand])
total = 0.
for exemplar in exemplars:
total += sum(degrees[z] for z in brands[brand] & exemplars[exemplar]) / (brand_sum + exemplar_sums[exemplar])
scores[brand] = total / len(exemplars)
return scores
def compute_rarity_scores(exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is
the degree of the ith exemplar they follow.
>>> compute_rarity_scores({'e1':{1,2,3,4}, 'e2':{4,5}}).items()
[(1, 0.25), (2, 0.25), (3, 0.25), (4, 0.75), (5, 0.5)]
"""
scores = defaultdict(lambda: 0.)
for followers in exemplars.values():
score = 1. / len(followers)
for f in followers:
scores[f] += score
return scores
def rarity(brands, exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is the degree of the ith exemplar they follow.
The score for a brand is then the average of their follower scores."""
rarity = compute_rarity_scores(exemplars)
scores = {}
for brand, followers in brands:
scores[brand] = sum(rarity[f] for f in followers) / len(followers)
return scores
def compute_rarity_scores_log(exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is
the degree of the ith exemplar they follow.
>>> compute_rarity_scores({'e1':{1,2,3,4}, 'e2':{4,5}}).items()
[(1, 0.25), (2, 0.25), (3, 0.25), (4, 0.75), (5, 0.5)]
"""
scores = defaultdict(lambda: 0.)
for followers in exemplars.values():
score = 1. / math.log(len(followers))
for f in followers:
scores[f] += score
return scores
def rarity_log(brands, exemplars):
""" Compute a score for each follower that is sum_i (1/log(n_i)), where n_i is the degree of the ith exemplar they follow.
The score for a brand is then the average of their follower scores."""
rarity = compute_rarity_scores_log(exemplars)
scores = {}
for brand, followers in brands:
scores[brand] = sum(rarity[f] for f in followers) / len(followers)
return scores
def mkdirs(filename):
report.mkdirs(os.path.dirname(filename))
def analyze_followers(brand_follower_file, exemplar_follower_file, outfile, analyze_fn,
min_followers, max_followers, sample_exemplars):
brands = iter_follower_file(brand_follower_file)
exemplars = read_follower_file(exemplar_follower_file, min_followers=min_followers, max_followers=max_followers, blacklist=get_twitter_handles(brand_follower_file))
print('read follower data for %d exemplars' % (len(exemplars)))
if sample_exemplars < 100: # sample a subset of exemplars.
exemplars = dict([(k, exemplars[k]) for k in random.sample(exemplars.keys(), int(len(exemplars) * sample_exemplars / 100.))])
print('sampled %d exemplars' % (len(exemplars)))
analyze = getattr(sys.modules[__name__], analyze_fn)
scores = analyze(brands, exemplars)
mkdirs(outfile)
outf = open(outfile, 'wt')
for brand in sorted(scores):
outf.write('%s %g\n' % (brand, scores[brand]))
outf.flush()
outf.close()
print('results written to', outfile)
def main():
args = docopt(__doc__)
print(args)
if '--seed' in args:
random.seed(args['--seed'])
if args['--network']:
analyze_followers(args['--brand-followers'], args['--exemplar-followers'], args['--output'], args['--network-method'],
int(args['--min-followers']), int(float(args['--max-followers'])), float(args['--sample-exemplars']))
if args['--text']:
analyze_text(args['--brand-tweets'], args['--exemplar-tweets'], args['--sample-tweets'], args['--output'], args['--text-method'])
if __name__ == '__main__':
main()
|
tapilab/brandelion | brandelion/cli/analyze.py | preprocess | python | def preprocess(s):
# s = re.sub('@\S+', 'thisisamention', s) # map all mentions to thisisamention
s = re.sub(r'@\S+', ' ', s) # map all mentions to thisisamention
# s = re.sub('http\S+', 'http', s) # keep only http from urls
s = re.sub(r'http\S+', ' ', s) # keep only http from urls
s = re.sub(r'#(\S+)', r'hashtag\1 hashtag\1', s) # #foo -> hashtagfoo hashtagfoo (for retaining hashtags even using bigrams)
# s = re.sub(r'[0-9]+', '9', s) # 1234 -> 9
s = re.sub(r'\bRT\b', ' ', s, re.IGNORECASE)
s = re.sub(r'&[a-z]+;', ' ', s, re.IGNORECASE)
s = re.sub(r'\s+', ' ', s).strip()
return s.lower() | >>> preprocess('#hi there http://www.foo.com @you isn"t RT <>')
'hashtaghi hashtaghi there isn"t' | train | https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/analyze.py#L82-L96 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Analyze social and linguistic brand data.
usage:
brandelion analyze --text --brand-tweets <file> --exemplar-tweets <file> --sample-tweets <file> --output <file> [--text-method <string>]
brandelion analyze --network --brand-followers <file> --exemplar-followers <file> --output <file> [--network-method <string> --min-followers <n> --max-followers <n> --sample-exemplars <p> --seed <s>]
Options
-h, --help
--brand-followers <file> File containing follower data for brand accounts.
--brand-tweets <file> File containing tweets from brand accounts.
--exemplar-followers <file> File containing follower data for exemplar accounts.
--exemplar-tweets <file> File containing tweets from exemplar accounts.
--sample-tweets <file> File containing tweets from representative sample of Twitter.
--text-method <string> Method to do text analysis [default: chi2]
--network-method <string> Method to do text analysis [default: jaccard]
-o, --output <file> File to store results
-t, --text Analyze text of tweets.
-n, --network Analyze followers.
--min-followers <n> Ignore exemplars that don't have at least n followers [default: 0]
--max-followers <n> Ignore exemplars that have more than least n followers [default: 1e10]
--sample-exemplars <p> Sample p percent of the exemplars, uniformly at random. [default: 100]
--seed <s> Seed for random sampling. [default: 12345]
"""
from collections import Counter, defaultdict
from docopt import docopt
import io
from itertools import groupby
import gzip
import json
import math
import numpy as np
import os
import re
import random
from scipy.sparse import vstack
import sys
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_selection import chi2 as skchi2
from sklearn import linear_model
from . import report
### TEXT ANALYSIS ###
def parse_json(json_file, include_date=False):
""" Yield screen_name, text tuples from a json file. """
if json_file[-2:] == 'gz':
fh = gzip.open(json_file, 'rt')
else:
fh = io.open(json_file, mode='rt', encoding='utf8')
for line in fh:
try:
jj = json.loads(line)
if type(jj) is not list:
jj = [jj]
for j in jj:
if include_date:
yield (j['user']['screen_name'].lower(), j['text'], j['created_at'])
else:
if 'full_text' in j: # get untruncated text if available.
yield (j['user']['screen_name'].lower(), j['full_text'])
else:
yield (j['user']['screen_name'].lower(), j['text'])
except Exception as e:
sys.stderr.write('skipping json error: %s\n' % e)
def extract_tweets(json_file):
""" Yield screen_name, string tuples, where the string is the
concatenation of all tweets of this user. """
for screen_name, tweet_iter in groupby(parse_json(json_file), lambda x: x[0]):
tweets = [t[1] for t in tweet_iter]
yield screen_name, ' '.join(tweets)
def vectorize(json_file, vec, dofit=True):
""" Return a matrix where each row corresponds to a Twitter account, and
each column corresponds to the number of times a term is used by that
account. """
## CountVectorizer, efficiently.
screen_names = [x[0] for x in extract_tweets(json_file)]
if dofit:
X = vec.fit_transform(x[1] for x in extract_tweets(json_file))
else:
X = vec.transform(x[1] for x in extract_tweets(json_file))
return screen_names, X
def chi2(exemplars, samples, n=300):
y = np.array(([1.] * exemplars.shape[0]) + ([.0] * samples.shape[0]))
X = vstack((exemplars, samples)).tocsr()
clf = linear_model.LogisticRegression(penalty='l2')
clf.fit(X, y)
coef = clf.coef_[0]
chis, pvals = skchi2(X, y)
top_indices = chis.argsort()[::-1]
top_indices = [i for i in top_indices if coef[i] > 0]
for idx in range(len(coef)):
coef[idx] = 0.
for idx in top_indices[:n]:
coef[idx] = chis[idx]
return coef
def do_score(vec, coef):
return np.sum(coef[vec.nonzero()[1]]) / np.sum(coef)
def write_top_words(fname, vocab, scores):
outf = io.open(fname, 'w', encoding='utf8')
for i in np.argsort(scores)[::-1]:
if scores[i] > 0:
outf.write('%s %g\n' % (vocab[i], scores[i]))
outf.close()
def analyze_text(brand_tweets_file, exemplar_tweets_file, sample_tweets_file, outfile, analyze_fn):
analyze = getattr(sys.modules[__name__], analyze_fn)
vec = CountVectorizer(min_df=3, preprocessor=preprocess, ngram_range=(2, 2), binary=True)
_, exemplar_vectors = vectorize(exemplar_tweets_file, vec, dofit=True)
print('read tweets for %d exemplar accounts' % exemplar_vectors.shape[0])
brands, brand_vectors = vectorize(brand_tweets_file, vec, dofit=False)
print('read tweets for %d brand accounts' % brand_vectors.shape[0])
_, sample_vectors = vectorize(sample_tweets_file, vec, dofit=False)
print('read tweets for %d sample accounts' % sample_vectors.shape[0])
scores = analyze(exemplar_vectors, sample_vectors)
vocab = vec.get_feature_names()
write_top_words(outfile + '.topwords', vocab, scores)
print('top 10 ngrams:\n', '\n'.join(['%s=%.4g' % (vocab[i], scores[i]) for i in np.argsort(scores)[::-1][:10]]))
outf = open(outfile, 'wt')
for bi, brand_vec in enumerate(brand_vectors):
outf.write('%s %g\n' % (brands[bi], do_score(brand_vec, scores)))
outf.flush()
### FOLLOWER ANALYSIS ###
def get_twitter_handles(fname):
handles = set()
with open(fname, 'rt') as f:
for line in f:
handles.add(line[:90].split()[0].lower())
return handles
def read_follower_file(fname, min_followers=0, max_followers=1e10, blacklist=set()):
""" Read a file of follower information and return a dictionary mapping screen_name to a set of follower ids. """
result = {}
with open(fname, 'rt') as f:
for line in f:
parts = line.split()
if len(parts) > 3:
if parts[1].lower() not in blacklist:
followers = set(int(x) for x in parts[2:])
if len(followers) > min_followers and len(followers) <= max_followers:
result[parts[1].lower()] = followers
else:
print('skipping exemplar', parts[1].lower())
return result
def iter_follower_file(fname):
""" Iterator from a file of follower information and return a tuple of screen_name, follower ids.
File format is:
<iso timestamp> <screen_name> <follower_id1> <follower_ids2> ...
"""
with open(fname, 'rt') as f:
for line in f:
parts = line.split()
if len(parts) > 3:
yield parts[1].lower(), set(int(x) for x in parts[2:])
# JACCARD
def _jaccard(a, b):
""" Return the Jaccard similarity between two sets a and b. """
return 1. * len(a & b) / len(a | b)
def jaccard(brands, exemplars, weighted_avg=False, sqrt=False):
""" Return the average Jaccard similarity between a brand's followers and the
followers of each exemplar. """
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_jaccard(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_jaccard(followers, others) for others in exemplars.values()) / len(exemplars)
# limit to exemplars with less than 40k followers: scores[brand] = 1. * sum(_jaccard(brands[brand], others) for others in exemplars.itervalues() if len(others) < 40000) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def jaccard_weighted_avg(brands, exemplars):
return jaccard(brands, exemplars, True, False)
def jaccard_sqrt_no_weighted_avg(brands, exemplars):
return jaccard(brands, exemplars, False, True)
def jaccard_sqrt(brands, exemplars):
return jaccard(brands, exemplars, weighted_avg=True, sqrt=True)
def jaccard_merge(brands, exemplars):
""" Return the average Jaccard similarity between a brand's followers and
the followers of each exemplar. We merge all exemplar followers into one
big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _jaccard(followers, exemplar_followers)
return scores
def compute_log_degrees(brands, exemplars):
""" For each follower, let Z be the total number of brands they follow.
Return a dictionary of 1. / log(Z), for each follower.
"""
counts = Counter()
for followers in brands.values(): # + exemplars.values(): # Include exemplars in these counts? No, don't want to penalize people who follow many exemplars.
counts.update(followers)
counts.update(counts.keys()) # Add 1 to each count.
for k in counts:
counts[k] = 1. / math.log(counts[k])
return counts
# PROPORTION
def _proportion(a, b):
""" Return the len(a & b) / len(a) """
return 1. * len(a & b) / len(a)
def proportion(brands, exemplars, weighted_avg=False, sqrt=False):
"""
Return the proportion of a brand's followers who also follow an exemplar.
"""
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_proportion(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_proportion(followers, others) for others in exemplars.values()) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def proportion_weighted_avg(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=True, sqrt=False)
def proportion_sqrt_no_weighted_avg(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=False, sqrt=True)
def proportion_sqrt(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=True, sqrt=True)
def proportion_merge(brands, exemplars):
""" Return the proportion of a brand's followers who also follower an
exemplar. We merge all exemplar followers into one big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _proportion(followers, exemplar_followers)
return scores
# COSINE SIMILARITY
def _cosine(a, b):
""" Return the len(a & b) / len(a) """
return 1. * len(a & b) / (math.sqrt(len(a)) * math.sqrt(len(b)))
def cosine(brands, exemplars, weighted_avg=False, sqrt=False):
"""
Return the cosine similarity betwee a brand's followers and the exemplars.
"""
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_cosine(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_cosine(followers, others) for others in exemplars.values()) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def cosine_weighted_avg(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=True, sqrt=False)
def cosine_sqrt_no_weighted_avg(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=False, sqrt=True)
def cosine_sqrt(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=True, sqrt=True)
def cosine_merge(brands, exemplars):
""" Return the proportion of a brand's followers who also follower an
exemplar. We merge all exemplar followers into one big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _cosine(followers, exemplar_followers)
return scores
def adamic(brands, exemplars):
""" Return the average Adamic/Adar similarity between a brand's followers
and the followers of each exemplar. We approximate the number of followed
accounts per user by only considering those in our brand set."""
print('adamic deprecated...requires loading all brands in memory.')
return
degrees = compute_log_degrees(brands, exemplars)
scores = {}
exemplar_sums = dict([(exemplar, sum(degrees[z] for z in exemplars[exemplar])) for exemplar in exemplars])
for brand in sorted(brands):
brand_sum = sum(degrees[z] for z in brands[brand])
total = 0.
for exemplar in exemplars:
total += sum(degrees[z] for z in brands[brand] & exemplars[exemplar]) / (brand_sum + exemplar_sums[exemplar])
scores[brand] = total / len(exemplars)
return scores
def compute_rarity_scores(exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is
the degree of the ith exemplar they follow.
>>> compute_rarity_scores({'e1':{1,2,3,4}, 'e2':{4,5}}).items()
[(1, 0.25), (2, 0.25), (3, 0.25), (4, 0.75), (5, 0.5)]
"""
scores = defaultdict(lambda: 0.)
for followers in exemplars.values():
score = 1. / len(followers)
for f in followers:
scores[f] += score
return scores
def rarity(brands, exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is the degree of the ith exemplar they follow.
The score for a brand is then the average of their follower scores."""
rarity = compute_rarity_scores(exemplars)
scores = {}
for brand, followers in brands:
scores[brand] = sum(rarity[f] for f in followers) / len(followers)
return scores
def compute_rarity_scores_log(exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is
the degree of the ith exemplar they follow.
>>> compute_rarity_scores({'e1':{1,2,3,4}, 'e2':{4,5}}).items()
[(1, 0.25), (2, 0.25), (3, 0.25), (4, 0.75), (5, 0.5)]
"""
scores = defaultdict(lambda: 0.)
for followers in exemplars.values():
score = 1. / math.log(len(followers))
for f in followers:
scores[f] += score
return scores
def rarity_log(brands, exemplars):
""" Compute a score for each follower that is sum_i (1/log(n_i)), where n_i is the degree of the ith exemplar they follow.
The score for a brand is then the average of their follower scores."""
rarity = compute_rarity_scores_log(exemplars)
scores = {}
for brand, followers in brands:
scores[brand] = sum(rarity[f] for f in followers) / len(followers)
return scores
def mkdirs(filename):
report.mkdirs(os.path.dirname(filename))
def analyze_followers(brand_follower_file, exemplar_follower_file, outfile, analyze_fn,
min_followers, max_followers, sample_exemplars):
brands = iter_follower_file(brand_follower_file)
exemplars = read_follower_file(exemplar_follower_file, min_followers=min_followers, max_followers=max_followers, blacklist=get_twitter_handles(brand_follower_file))
print('read follower data for %d exemplars' % (len(exemplars)))
if sample_exemplars < 100: # sample a subset of exemplars.
exemplars = dict([(k, exemplars[k]) for k in random.sample(exemplars.keys(), int(len(exemplars) * sample_exemplars / 100.))])
print('sampled %d exemplars' % (len(exemplars)))
analyze = getattr(sys.modules[__name__], analyze_fn)
scores = analyze(brands, exemplars)
mkdirs(outfile)
outf = open(outfile, 'wt')
for brand in sorted(scores):
outf.write('%s %g\n' % (brand, scores[brand]))
outf.flush()
outf.close()
print('results written to', outfile)
def main():
args = docopt(__doc__)
print(args)
if '--seed' in args:
random.seed(args['--seed'])
if args['--network']:
analyze_followers(args['--brand-followers'], args['--exemplar-followers'], args['--output'], args['--network-method'],
int(args['--min-followers']), int(float(args['--max-followers'])), float(args['--sample-exemplars']))
if args['--text']:
analyze_text(args['--brand-tweets'], args['--exemplar-tweets'], args['--sample-tweets'], args['--output'], args['--text-method'])
if __name__ == '__main__':
main()
|
tapilab/brandelion | brandelion/cli/analyze.py | vectorize | python | def vectorize(json_file, vec, dofit=True):
## CountVectorizer, efficiently.
screen_names = [x[0] for x in extract_tweets(json_file)]
if dofit:
X = vec.fit_transform(x[1] for x in extract_tweets(json_file))
else:
X = vec.transform(x[1] for x in extract_tweets(json_file))
return screen_names, X | Return a matrix where each row corresponds to a Twitter account, and
each column corresponds to the number of times a term is used by that
account. | train | https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/analyze.py#L99-L109 | [
"def extract_tweets(json_file):\n \"\"\" Yield screen_name, string tuples, where the string is the\n concatenation of all tweets of this user. \"\"\"\n for screen_name, tweet_iter in groupby(parse_json(json_file), lambda x: x[0]):\n tweets = [t[1] for t in tweet_iter]\n yield screen_name, ' '... | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Analyze social and linguistic brand data.
usage:
brandelion analyze --text --brand-tweets <file> --exemplar-tweets <file> --sample-tweets <file> --output <file> [--text-method <string>]
brandelion analyze --network --brand-followers <file> --exemplar-followers <file> --output <file> [--network-method <string> --min-followers <n> --max-followers <n> --sample-exemplars <p> --seed <s>]
Options
-h, --help
--brand-followers <file> File containing follower data for brand accounts.
--brand-tweets <file> File containing tweets from brand accounts.
--exemplar-followers <file> File containing follower data for exemplar accounts.
--exemplar-tweets <file> File containing tweets from exemplar accounts.
--sample-tweets <file> File containing tweets from representative sample of Twitter.
--text-method <string> Method to do text analysis [default: chi2]
--network-method <string> Method to do text analysis [default: jaccard]
-o, --output <file> File to store results
-t, --text Analyze text of tweets.
-n, --network Analyze followers.
--min-followers <n> Ignore exemplars that don't have at least n followers [default: 0]
--max-followers <n> Ignore exemplars that have more than least n followers [default: 1e10]
--sample-exemplars <p> Sample p percent of the exemplars, uniformly at random. [default: 100]
--seed <s> Seed for random sampling. [default: 12345]
"""
from collections import Counter, defaultdict
from docopt import docopt
import io
from itertools import groupby
import gzip
import json
import math
import numpy as np
import os
import re
import random
from scipy.sparse import vstack
import sys
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_selection import chi2 as skchi2
from sklearn import linear_model
from . import report
### TEXT ANALYSIS ###
def parse_json(json_file, include_date=False):
""" Yield screen_name, text tuples from a json file. """
if json_file[-2:] == 'gz':
fh = gzip.open(json_file, 'rt')
else:
fh = io.open(json_file, mode='rt', encoding='utf8')
for line in fh:
try:
jj = json.loads(line)
if type(jj) is not list:
jj = [jj]
for j in jj:
if include_date:
yield (j['user']['screen_name'].lower(), j['text'], j['created_at'])
else:
if 'full_text' in j: # get untruncated text if available.
yield (j['user']['screen_name'].lower(), j['full_text'])
else:
yield (j['user']['screen_name'].lower(), j['text'])
except Exception as e:
sys.stderr.write('skipping json error: %s\n' % e)
def extract_tweets(json_file):
""" Yield screen_name, string tuples, where the string is the
concatenation of all tweets of this user. """
for screen_name, tweet_iter in groupby(parse_json(json_file), lambda x: x[0]):
tweets = [t[1] for t in tweet_iter]
yield screen_name, ' '.join(tweets)
def preprocess(s):
"""
>>> preprocess('#hi there http://www.foo.com @you isn"t RT <>')
'hashtaghi hashtaghi there isn"t'
"""
# s = re.sub('@\S+', 'thisisamention', s) # map all mentions to thisisamention
s = re.sub(r'@\S+', ' ', s) # map all mentions to thisisamention
# s = re.sub('http\S+', 'http', s) # keep only http from urls
s = re.sub(r'http\S+', ' ', s) # keep only http from urls
s = re.sub(r'#(\S+)', r'hashtag\1 hashtag\1', s) # #foo -> hashtagfoo hashtagfoo (for retaining hashtags even using bigrams)
# s = re.sub(r'[0-9]+', '9', s) # 1234 -> 9
s = re.sub(r'\bRT\b', ' ', s, re.IGNORECASE)
s = re.sub(r'&[a-z]+;', ' ', s, re.IGNORECASE)
s = re.sub(r'\s+', ' ', s).strip()
return s.lower()
def chi2(exemplars, samples, n=300):
y = np.array(([1.] * exemplars.shape[0]) + ([.0] * samples.shape[0]))
X = vstack((exemplars, samples)).tocsr()
clf = linear_model.LogisticRegression(penalty='l2')
clf.fit(X, y)
coef = clf.coef_[0]
chis, pvals = skchi2(X, y)
top_indices = chis.argsort()[::-1]
top_indices = [i for i in top_indices if coef[i] > 0]
for idx in range(len(coef)):
coef[idx] = 0.
for idx in top_indices[:n]:
coef[idx] = chis[idx]
return coef
def do_score(vec, coef):
return np.sum(coef[vec.nonzero()[1]]) / np.sum(coef)
def write_top_words(fname, vocab, scores):
outf = io.open(fname, 'w', encoding='utf8')
for i in np.argsort(scores)[::-1]:
if scores[i] > 0:
outf.write('%s %g\n' % (vocab[i], scores[i]))
outf.close()
def analyze_text(brand_tweets_file, exemplar_tweets_file, sample_tweets_file, outfile, analyze_fn):
analyze = getattr(sys.modules[__name__], analyze_fn)
vec = CountVectorizer(min_df=3, preprocessor=preprocess, ngram_range=(2, 2), binary=True)
_, exemplar_vectors = vectorize(exemplar_tweets_file, vec, dofit=True)
print('read tweets for %d exemplar accounts' % exemplar_vectors.shape[0])
brands, brand_vectors = vectorize(brand_tweets_file, vec, dofit=False)
print('read tweets for %d brand accounts' % brand_vectors.shape[0])
_, sample_vectors = vectorize(sample_tweets_file, vec, dofit=False)
print('read tweets for %d sample accounts' % sample_vectors.shape[0])
scores = analyze(exemplar_vectors, sample_vectors)
vocab = vec.get_feature_names()
write_top_words(outfile + '.topwords', vocab, scores)
print('top 10 ngrams:\n', '\n'.join(['%s=%.4g' % (vocab[i], scores[i]) for i in np.argsort(scores)[::-1][:10]]))
outf = open(outfile, 'wt')
for bi, brand_vec in enumerate(brand_vectors):
outf.write('%s %g\n' % (brands[bi], do_score(brand_vec, scores)))
outf.flush()
### FOLLOWER ANALYSIS ###
def get_twitter_handles(fname):
handles = set()
with open(fname, 'rt') as f:
for line in f:
handles.add(line[:90].split()[0].lower())
return handles
def read_follower_file(fname, min_followers=0, max_followers=1e10, blacklist=set()):
""" Read a file of follower information and return a dictionary mapping screen_name to a set of follower ids. """
result = {}
with open(fname, 'rt') as f:
for line in f:
parts = line.split()
if len(parts) > 3:
if parts[1].lower() not in blacklist:
followers = set(int(x) for x in parts[2:])
if len(followers) > min_followers and len(followers) <= max_followers:
result[parts[1].lower()] = followers
else:
print('skipping exemplar', parts[1].lower())
return result
def iter_follower_file(fname):
""" Iterator from a file of follower information and return a tuple of screen_name, follower ids.
File format is:
<iso timestamp> <screen_name> <follower_id1> <follower_ids2> ...
"""
with open(fname, 'rt') as f:
for line in f:
parts = line.split()
if len(parts) > 3:
yield parts[1].lower(), set(int(x) for x in parts[2:])
# JACCARD
def _jaccard(a, b):
""" Return the Jaccard similarity between two sets a and b. """
return 1. * len(a & b) / len(a | b)
def jaccard(brands, exemplars, weighted_avg=False, sqrt=False):
""" Return the average Jaccard similarity between a brand's followers and the
followers of each exemplar. """
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_jaccard(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_jaccard(followers, others) for others in exemplars.values()) / len(exemplars)
# limit to exemplars with less than 40k followers: scores[brand] = 1. * sum(_jaccard(brands[brand], others) for others in exemplars.itervalues() if len(others) < 40000) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def jaccard_weighted_avg(brands, exemplars):
return jaccard(brands, exemplars, True, False)
def jaccard_sqrt_no_weighted_avg(brands, exemplars):
return jaccard(brands, exemplars, False, True)
def jaccard_sqrt(brands, exemplars):
return jaccard(brands, exemplars, weighted_avg=True, sqrt=True)
def jaccard_merge(brands, exemplars):
""" Return the average Jaccard similarity between a brand's followers and
the followers of each exemplar. We merge all exemplar followers into one
big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _jaccard(followers, exemplar_followers)
return scores
def compute_log_degrees(brands, exemplars):
""" For each follower, let Z be the total number of brands they follow.
Return a dictionary of 1. / log(Z), for each follower.
"""
counts = Counter()
for followers in brands.values(): # + exemplars.values(): # Include exemplars in these counts? No, don't want to penalize people who follow many exemplars.
counts.update(followers)
counts.update(counts.keys()) # Add 1 to each count.
for k in counts:
counts[k] = 1. / math.log(counts[k])
return counts
# PROPORTION
def _proportion(a, b):
""" Return the len(a & b) / len(a) """
return 1. * len(a & b) / len(a)
def proportion(brands, exemplars, weighted_avg=False, sqrt=False):
"""
Return the proportion of a brand's followers who also follow an exemplar.
"""
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_proportion(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_proportion(followers, others) for others in exemplars.values()) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def proportion_weighted_avg(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=True, sqrt=False)
def proportion_sqrt_no_weighted_avg(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=False, sqrt=True)
def proportion_sqrt(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=True, sqrt=True)
def proportion_merge(brands, exemplars):
""" Return the proportion of a brand's followers who also follower an
exemplar. We merge all exemplar followers into one big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _proportion(followers, exemplar_followers)
return scores
# COSINE SIMILARITY
def _cosine(a, b):
""" Return the len(a & b) / len(a) """
return 1. * len(a & b) / (math.sqrt(len(a)) * math.sqrt(len(b)))
def cosine(brands, exemplars, weighted_avg=False, sqrt=False):
"""
Return the cosine similarity betwee a brand's followers and the exemplars.
"""
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_cosine(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_cosine(followers, others) for others in exemplars.values()) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def cosine_weighted_avg(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=True, sqrt=False)
def cosine_sqrt_no_weighted_avg(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=False, sqrt=True)
def cosine_sqrt(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=True, sqrt=True)
def cosine_merge(brands, exemplars):
""" Return the proportion of a brand's followers who also follower an
exemplar. We merge all exemplar followers into one big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _cosine(followers, exemplar_followers)
return scores
def adamic(brands, exemplars):
""" Return the average Adamic/Adar similarity between a brand's followers
and the followers of each exemplar. We approximate the number of followed
accounts per user by only considering those in our brand set."""
print('adamic deprecated...requires loading all brands in memory.')
return
degrees = compute_log_degrees(brands, exemplars)
scores = {}
exemplar_sums = dict([(exemplar, sum(degrees[z] for z in exemplars[exemplar])) for exemplar in exemplars])
for brand in sorted(brands):
brand_sum = sum(degrees[z] for z in brands[brand])
total = 0.
for exemplar in exemplars:
total += sum(degrees[z] for z in brands[brand] & exemplars[exemplar]) / (brand_sum + exemplar_sums[exemplar])
scores[brand] = total / len(exemplars)
return scores
def compute_rarity_scores(exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is
the degree of the ith exemplar they follow.
>>> compute_rarity_scores({'e1':{1,2,3,4}, 'e2':{4,5}}).items()
[(1, 0.25), (2, 0.25), (3, 0.25), (4, 0.75), (5, 0.5)]
"""
scores = defaultdict(lambda: 0.)
for followers in exemplars.values():
score = 1. / len(followers)
for f in followers:
scores[f] += score
return scores
def rarity(brands, exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is the degree of the ith exemplar they follow.
The score for a brand is then the average of their follower scores."""
rarity = compute_rarity_scores(exemplars)
scores = {}
for brand, followers in brands:
scores[brand] = sum(rarity[f] for f in followers) / len(followers)
return scores
def compute_rarity_scores_log(exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is
the degree of the ith exemplar they follow.
>>> compute_rarity_scores({'e1':{1,2,3,4}, 'e2':{4,5}}).items()
[(1, 0.25), (2, 0.25), (3, 0.25), (4, 0.75), (5, 0.5)]
"""
scores = defaultdict(lambda: 0.)
for followers in exemplars.values():
score = 1. / math.log(len(followers))
for f in followers:
scores[f] += score
return scores
def rarity_log(brands, exemplars):
""" Compute a score for each follower that is sum_i (1/log(n_i)), where n_i is the degree of the ith exemplar they follow.
The score for a brand is then the average of their follower scores."""
rarity = compute_rarity_scores_log(exemplars)
scores = {}
for brand, followers in brands:
scores[brand] = sum(rarity[f] for f in followers) / len(followers)
return scores
def mkdirs(filename):
report.mkdirs(os.path.dirname(filename))
def analyze_followers(brand_follower_file, exemplar_follower_file, outfile, analyze_fn,
min_followers, max_followers, sample_exemplars):
brands = iter_follower_file(brand_follower_file)
exemplars = read_follower_file(exemplar_follower_file, min_followers=min_followers, max_followers=max_followers, blacklist=get_twitter_handles(brand_follower_file))
print('read follower data for %d exemplars' % (len(exemplars)))
if sample_exemplars < 100: # sample a subset of exemplars.
exemplars = dict([(k, exemplars[k]) for k in random.sample(exemplars.keys(), int(len(exemplars) * sample_exemplars / 100.))])
print('sampled %d exemplars' % (len(exemplars)))
analyze = getattr(sys.modules[__name__], analyze_fn)
scores = analyze(brands, exemplars)
mkdirs(outfile)
outf = open(outfile, 'wt')
for brand in sorted(scores):
outf.write('%s %g\n' % (brand, scores[brand]))
outf.flush()
outf.close()
print('results written to', outfile)
def main():
args = docopt(__doc__)
print(args)
if '--seed' in args:
random.seed(args['--seed'])
if args['--network']:
analyze_followers(args['--brand-followers'], args['--exemplar-followers'], args['--output'], args['--network-method'],
int(args['--min-followers']), int(float(args['--max-followers'])), float(args['--sample-exemplars']))
if args['--text']:
analyze_text(args['--brand-tweets'], args['--exemplar-tweets'], args['--sample-tweets'], args['--output'], args['--text-method'])
if __name__ == '__main__':
main()
|
tapilab/brandelion | brandelion/cli/analyze.py | read_follower_file | python | def read_follower_file(fname, min_followers=0, max_followers=1e10, blacklist=set()):
result = {}
with open(fname, 'rt') as f:
for line in f:
parts = line.split()
if len(parts) > 3:
if parts[1].lower() not in blacklist:
followers = set(int(x) for x in parts[2:])
if len(followers) > min_followers and len(followers) <= max_followers:
result[parts[1].lower()] = followers
else:
print('skipping exemplar', parts[1].lower())
return result | Read a file of follower information and return a dictionary mapping screen_name to a set of follower ids. | train | https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/analyze.py#L171-L184 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Analyze social and linguistic brand data.
usage:
brandelion analyze --text --brand-tweets <file> --exemplar-tweets <file> --sample-tweets <file> --output <file> [--text-method <string>]
brandelion analyze --network --brand-followers <file> --exemplar-followers <file> --output <file> [--network-method <string> --min-followers <n> --max-followers <n> --sample-exemplars <p> --seed <s>]
Options
-h, --help
--brand-followers <file> File containing follower data for brand accounts.
--brand-tweets <file> File containing tweets from brand accounts.
--exemplar-followers <file> File containing follower data for exemplar accounts.
--exemplar-tweets <file> File containing tweets from exemplar accounts.
--sample-tweets <file> File containing tweets from representative sample of Twitter.
--text-method <string> Method to do text analysis [default: chi2]
--network-method <string> Method to do text analysis [default: jaccard]
-o, --output <file> File to store results
-t, --text Analyze text of tweets.
-n, --network Analyze followers.
--min-followers <n> Ignore exemplars that don't have at least n followers [default: 0]
--max-followers <n> Ignore exemplars that have more than least n followers [default: 1e10]
--sample-exemplars <p> Sample p percent of the exemplars, uniformly at random. [default: 100]
--seed <s> Seed for random sampling. [default: 12345]
"""
from collections import Counter, defaultdict
from docopt import docopt
import io
from itertools import groupby
import gzip
import json
import math
import numpy as np
import os
import re
import random
from scipy.sparse import vstack
import sys
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_selection import chi2 as skchi2
from sklearn import linear_model
from . import report
### TEXT ANALYSIS ###
def parse_json(json_file, include_date=False):
""" Yield screen_name, text tuples from a json file. """
if json_file[-2:] == 'gz':
fh = gzip.open(json_file, 'rt')
else:
fh = io.open(json_file, mode='rt', encoding='utf8')
for line in fh:
try:
jj = json.loads(line)
if type(jj) is not list:
jj = [jj]
for j in jj:
if include_date:
yield (j['user']['screen_name'].lower(), j['text'], j['created_at'])
else:
if 'full_text' in j: # get untruncated text if available.
yield (j['user']['screen_name'].lower(), j['full_text'])
else:
yield (j['user']['screen_name'].lower(), j['text'])
except Exception as e:
sys.stderr.write('skipping json error: %s\n' % e)
def extract_tweets(json_file):
""" Yield screen_name, string tuples, where the string is the
concatenation of all tweets of this user. """
for screen_name, tweet_iter in groupby(parse_json(json_file), lambda x: x[0]):
tweets = [t[1] for t in tweet_iter]
yield screen_name, ' '.join(tweets)
def preprocess(s):
"""
>>> preprocess('#hi there http://www.foo.com @you isn"t RT <>')
'hashtaghi hashtaghi there isn"t'
"""
# s = re.sub('@\S+', 'thisisamention', s) # map all mentions to thisisamention
s = re.sub(r'@\S+', ' ', s) # map all mentions to thisisamention
# s = re.sub('http\S+', 'http', s) # keep only http from urls
s = re.sub(r'http\S+', ' ', s) # keep only http from urls
s = re.sub(r'#(\S+)', r'hashtag\1 hashtag\1', s) # #foo -> hashtagfoo hashtagfoo (for retaining hashtags even using bigrams)
# s = re.sub(r'[0-9]+', '9', s) # 1234 -> 9
s = re.sub(r'\bRT\b', ' ', s, re.IGNORECASE)
s = re.sub(r'&[a-z]+;', ' ', s, re.IGNORECASE)
s = re.sub(r'\s+', ' ', s).strip()
return s.lower()
def vectorize(json_file, vec, dofit=True):
""" Return a matrix where each row corresponds to a Twitter account, and
each column corresponds to the number of times a term is used by that
account. """
## CountVectorizer, efficiently.
screen_names = [x[0] for x in extract_tweets(json_file)]
if dofit:
X = vec.fit_transform(x[1] for x in extract_tweets(json_file))
else:
X = vec.transform(x[1] for x in extract_tweets(json_file))
return screen_names, X
def chi2(exemplars, samples, n=300):
y = np.array(([1.] * exemplars.shape[0]) + ([.0] * samples.shape[0]))
X = vstack((exemplars, samples)).tocsr()
clf = linear_model.LogisticRegression(penalty='l2')
clf.fit(X, y)
coef = clf.coef_[0]
chis, pvals = skchi2(X, y)
top_indices = chis.argsort()[::-1]
top_indices = [i for i in top_indices if coef[i] > 0]
for idx in range(len(coef)):
coef[idx] = 0.
for idx in top_indices[:n]:
coef[idx] = chis[idx]
return coef
def do_score(vec, coef):
return np.sum(coef[vec.nonzero()[1]]) / np.sum(coef)
def write_top_words(fname, vocab, scores):
outf = io.open(fname, 'w', encoding='utf8')
for i in np.argsort(scores)[::-1]:
if scores[i] > 0:
outf.write('%s %g\n' % (vocab[i], scores[i]))
outf.close()
def analyze_text(brand_tweets_file, exemplar_tweets_file, sample_tweets_file, outfile, analyze_fn):
analyze = getattr(sys.modules[__name__], analyze_fn)
vec = CountVectorizer(min_df=3, preprocessor=preprocess, ngram_range=(2, 2), binary=True)
_, exemplar_vectors = vectorize(exemplar_tweets_file, vec, dofit=True)
print('read tweets for %d exemplar accounts' % exemplar_vectors.shape[0])
brands, brand_vectors = vectorize(brand_tweets_file, vec, dofit=False)
print('read tweets for %d brand accounts' % brand_vectors.shape[0])
_, sample_vectors = vectorize(sample_tweets_file, vec, dofit=False)
print('read tweets for %d sample accounts' % sample_vectors.shape[0])
scores = analyze(exemplar_vectors, sample_vectors)
vocab = vec.get_feature_names()
write_top_words(outfile + '.topwords', vocab, scores)
print('top 10 ngrams:\n', '\n'.join(['%s=%.4g' % (vocab[i], scores[i]) for i in np.argsort(scores)[::-1][:10]]))
outf = open(outfile, 'wt')
for bi, brand_vec in enumerate(brand_vectors):
outf.write('%s %g\n' % (brands[bi], do_score(brand_vec, scores)))
outf.flush()
### FOLLOWER ANALYSIS ###
def get_twitter_handles(fname):
handles = set()
with open(fname, 'rt') as f:
for line in f:
handles.add(line[:90].split()[0].lower())
return handles
def iter_follower_file(fname):
""" Iterator from a file of follower information and return a tuple of screen_name, follower ids.
File format is:
<iso timestamp> <screen_name> <follower_id1> <follower_ids2> ...
"""
with open(fname, 'rt') as f:
for line in f:
parts = line.split()
if len(parts) > 3:
yield parts[1].lower(), set(int(x) for x in parts[2:])
# JACCARD
def _jaccard(a, b):
""" Return the Jaccard similarity between two sets a and b. """
return 1. * len(a & b) / len(a | b)
def jaccard(brands, exemplars, weighted_avg=False, sqrt=False):
""" Return the average Jaccard similarity between a brand's followers and the
followers of each exemplar. """
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_jaccard(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_jaccard(followers, others) for others in exemplars.values()) / len(exemplars)
# limit to exemplars with less than 40k followers: scores[brand] = 1. * sum(_jaccard(brands[brand], others) for others in exemplars.itervalues() if len(others) < 40000) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def jaccard_weighted_avg(brands, exemplars):
return jaccard(brands, exemplars, True, False)
def jaccard_sqrt_no_weighted_avg(brands, exemplars):
return jaccard(brands, exemplars, False, True)
def jaccard_sqrt(brands, exemplars):
return jaccard(brands, exemplars, weighted_avg=True, sqrt=True)
def jaccard_merge(brands, exemplars):
""" Return the average Jaccard similarity between a brand's followers and
the followers of each exemplar. We merge all exemplar followers into one
big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _jaccard(followers, exemplar_followers)
return scores
def compute_log_degrees(brands, exemplars):
""" For each follower, let Z be the total number of brands they follow.
Return a dictionary of 1. / log(Z), for each follower.
"""
counts = Counter()
for followers in brands.values(): # + exemplars.values(): # Include exemplars in these counts? No, don't want to penalize people who follow many exemplars.
counts.update(followers)
counts.update(counts.keys()) # Add 1 to each count.
for k in counts:
counts[k] = 1. / math.log(counts[k])
return counts
# PROPORTION
def _proportion(a, b):
""" Return the len(a & b) / len(a) """
return 1. * len(a & b) / len(a)
def proportion(brands, exemplars, weighted_avg=False, sqrt=False):
"""
Return the proportion of a brand's followers who also follow an exemplar.
"""
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_proportion(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_proportion(followers, others) for others in exemplars.values()) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def proportion_weighted_avg(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=True, sqrt=False)
def proportion_sqrt_no_weighted_avg(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=False, sqrt=True)
def proportion_sqrt(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=True, sqrt=True)
def proportion_merge(brands, exemplars):
""" Return the proportion of a brand's followers who also follower an
exemplar. We merge all exemplar followers into one big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _proportion(followers, exemplar_followers)
return scores
# COSINE SIMILARITY
def _cosine(a, b):
""" Return the len(a & b) / len(a) """
return 1. * len(a & b) / (math.sqrt(len(a)) * math.sqrt(len(b)))
def cosine(brands, exemplars, weighted_avg=False, sqrt=False):
"""
Return the cosine similarity betwee a brand's followers and the exemplars.
"""
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_cosine(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_cosine(followers, others) for others in exemplars.values()) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def cosine_weighted_avg(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=True, sqrt=False)
def cosine_sqrt_no_weighted_avg(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=False, sqrt=True)
def cosine_sqrt(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=True, sqrt=True)
def cosine_merge(brands, exemplars):
""" Return the proportion of a brand's followers who also follower an
exemplar. We merge all exemplar followers into one big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _cosine(followers, exemplar_followers)
return scores
def adamic(brands, exemplars):
""" Return the average Adamic/Adar similarity between a brand's followers
and the followers of each exemplar. We approximate the number of followed
accounts per user by only considering those in our brand set."""
print('adamic deprecated...requires loading all brands in memory.')
return
degrees = compute_log_degrees(brands, exemplars)
scores = {}
exemplar_sums = dict([(exemplar, sum(degrees[z] for z in exemplars[exemplar])) for exemplar in exemplars])
for brand in sorted(brands):
brand_sum = sum(degrees[z] for z in brands[brand])
total = 0.
for exemplar in exemplars:
total += sum(degrees[z] for z in brands[brand] & exemplars[exemplar]) / (brand_sum + exemplar_sums[exemplar])
scores[brand] = total / len(exemplars)
return scores
def compute_rarity_scores(exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is
the degree of the ith exemplar they follow.
>>> compute_rarity_scores({'e1':{1,2,3,4}, 'e2':{4,5}}).items()
[(1, 0.25), (2, 0.25), (3, 0.25), (4, 0.75), (5, 0.5)]
"""
scores = defaultdict(lambda: 0.)
for followers in exemplars.values():
score = 1. / len(followers)
for f in followers:
scores[f] += score
return scores
def rarity(brands, exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is the degree of the ith exemplar they follow.
The score for a brand is then the average of their follower scores."""
rarity = compute_rarity_scores(exemplars)
scores = {}
for brand, followers in brands:
scores[brand] = sum(rarity[f] for f in followers) / len(followers)
return scores
def compute_rarity_scores_log(exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is
the degree of the ith exemplar they follow.
>>> compute_rarity_scores({'e1':{1,2,3,4}, 'e2':{4,5}}).items()
[(1, 0.25), (2, 0.25), (3, 0.25), (4, 0.75), (5, 0.5)]
"""
scores = defaultdict(lambda: 0.)
for followers in exemplars.values():
score = 1. / math.log(len(followers))
for f in followers:
scores[f] += score
return scores
def rarity_log(brands, exemplars):
""" Compute a score for each follower that is sum_i (1/log(n_i)), where n_i is the degree of the ith exemplar they follow.
The score for a brand is then the average of their follower scores."""
rarity = compute_rarity_scores_log(exemplars)
scores = {}
for brand, followers in brands:
scores[brand] = sum(rarity[f] for f in followers) / len(followers)
return scores
def mkdirs(filename):
report.mkdirs(os.path.dirname(filename))
def analyze_followers(brand_follower_file, exemplar_follower_file, outfile, analyze_fn,
min_followers, max_followers, sample_exemplars):
brands = iter_follower_file(brand_follower_file)
exemplars = read_follower_file(exemplar_follower_file, min_followers=min_followers, max_followers=max_followers, blacklist=get_twitter_handles(brand_follower_file))
print('read follower data for %d exemplars' % (len(exemplars)))
if sample_exemplars < 100: # sample a subset of exemplars.
exemplars = dict([(k, exemplars[k]) for k in random.sample(exemplars.keys(), int(len(exemplars) * sample_exemplars / 100.))])
print('sampled %d exemplars' % (len(exemplars)))
analyze = getattr(sys.modules[__name__], analyze_fn)
scores = analyze(brands, exemplars)
mkdirs(outfile)
outf = open(outfile, 'wt')
for brand in sorted(scores):
outf.write('%s %g\n' % (brand, scores[brand]))
outf.flush()
outf.close()
print('results written to', outfile)
def main():
args = docopt(__doc__)
print(args)
if '--seed' in args:
random.seed(args['--seed'])
if args['--network']:
analyze_followers(args['--brand-followers'], args['--exemplar-followers'], args['--output'], args['--network-method'],
int(args['--min-followers']), int(float(args['--max-followers'])), float(args['--sample-exemplars']))
if args['--text']:
analyze_text(args['--brand-tweets'], args['--exemplar-tweets'], args['--sample-tweets'], args['--output'], args['--text-method'])
if __name__ == '__main__':
main()
|
tapilab/brandelion | brandelion/cli/analyze.py | iter_follower_file | python | def iter_follower_file(fname):
with open(fname, 'rt') as f:
for line in f:
parts = line.split()
if len(parts) > 3:
yield parts[1].lower(), set(int(x) for x in parts[2:]) | Iterator from a file of follower information and return a tuple of screen_name, follower ids.
File format is:
<iso timestamp> <screen_name> <follower_id1> <follower_ids2> ... | train | https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/analyze.py#L187-L196 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Analyze social and linguistic brand data.
usage:
brandelion analyze --text --brand-tweets <file> --exemplar-tweets <file> --sample-tweets <file> --output <file> [--text-method <string>]
brandelion analyze --network --brand-followers <file> --exemplar-followers <file> --output <file> [--network-method <string> --min-followers <n> --max-followers <n> --sample-exemplars <p> --seed <s>]
Options
-h, --help
--brand-followers <file> File containing follower data for brand accounts.
--brand-tweets <file> File containing tweets from brand accounts.
--exemplar-followers <file> File containing follower data for exemplar accounts.
--exemplar-tweets <file> File containing tweets from exemplar accounts.
--sample-tweets <file> File containing tweets from representative sample of Twitter.
--text-method <string> Method to do text analysis [default: chi2]
--network-method <string> Method to do text analysis [default: jaccard]
-o, --output <file> File to store results
-t, --text Analyze text of tweets.
-n, --network Analyze followers.
--min-followers <n> Ignore exemplars that don't have at least n followers [default: 0]
--max-followers <n> Ignore exemplars that have more than least n followers [default: 1e10]
--sample-exemplars <p> Sample p percent of the exemplars, uniformly at random. [default: 100]
--seed <s> Seed for random sampling. [default: 12345]
"""
from collections import Counter, defaultdict
from docopt import docopt
import io
from itertools import groupby
import gzip
import json
import math
import numpy as np
import os
import re
import random
from scipy.sparse import vstack
import sys
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_selection import chi2 as skchi2
from sklearn import linear_model
from . import report
### TEXT ANALYSIS ###
def parse_json(json_file, include_date=False):
""" Yield screen_name, text tuples from a json file. """
if json_file[-2:] == 'gz':
fh = gzip.open(json_file, 'rt')
else:
fh = io.open(json_file, mode='rt', encoding='utf8')
for line in fh:
try:
jj = json.loads(line)
if type(jj) is not list:
jj = [jj]
for j in jj:
if include_date:
yield (j['user']['screen_name'].lower(), j['text'], j['created_at'])
else:
if 'full_text' in j: # get untruncated text if available.
yield (j['user']['screen_name'].lower(), j['full_text'])
else:
yield (j['user']['screen_name'].lower(), j['text'])
except Exception as e:
sys.stderr.write('skipping json error: %s\n' % e)
def extract_tweets(json_file):
""" Yield screen_name, string tuples, where the string is the
concatenation of all tweets of this user. """
for screen_name, tweet_iter in groupby(parse_json(json_file), lambda x: x[0]):
tweets = [t[1] for t in tweet_iter]
yield screen_name, ' '.join(tweets)
def preprocess(s):
"""
>>> preprocess('#hi there http://www.foo.com @you isn"t RT <>')
'hashtaghi hashtaghi there isn"t'
"""
# s = re.sub('@\S+', 'thisisamention', s) # map all mentions to thisisamention
s = re.sub(r'@\S+', ' ', s) # map all mentions to thisisamention
# s = re.sub('http\S+', 'http', s) # keep only http from urls
s = re.sub(r'http\S+', ' ', s) # keep only http from urls
s = re.sub(r'#(\S+)', r'hashtag\1 hashtag\1', s) # #foo -> hashtagfoo hashtagfoo (for retaining hashtags even using bigrams)
# s = re.sub(r'[0-9]+', '9', s) # 1234 -> 9
s = re.sub(r'\bRT\b', ' ', s, re.IGNORECASE)
s = re.sub(r'&[a-z]+;', ' ', s, re.IGNORECASE)
s = re.sub(r'\s+', ' ', s).strip()
return s.lower()
def vectorize(json_file, vec, dofit=True):
""" Return a matrix where each row corresponds to a Twitter account, and
each column corresponds to the number of times a term is used by that
account. """
## CountVectorizer, efficiently.
screen_names = [x[0] for x in extract_tweets(json_file)]
if dofit:
X = vec.fit_transform(x[1] for x in extract_tweets(json_file))
else:
X = vec.transform(x[1] for x in extract_tweets(json_file))
return screen_names, X
def chi2(exemplars, samples, n=300):
y = np.array(([1.] * exemplars.shape[0]) + ([.0] * samples.shape[0]))
X = vstack((exemplars, samples)).tocsr()
clf = linear_model.LogisticRegression(penalty='l2')
clf.fit(X, y)
coef = clf.coef_[0]
chis, pvals = skchi2(X, y)
top_indices = chis.argsort()[::-1]
top_indices = [i for i in top_indices if coef[i] > 0]
for idx in range(len(coef)):
coef[idx] = 0.
for idx in top_indices[:n]:
coef[idx] = chis[idx]
return coef
def do_score(vec, coef):
return np.sum(coef[vec.nonzero()[1]]) / np.sum(coef)
def write_top_words(fname, vocab, scores):
outf = io.open(fname, 'w', encoding='utf8')
for i in np.argsort(scores)[::-1]:
if scores[i] > 0:
outf.write('%s %g\n' % (vocab[i], scores[i]))
outf.close()
def analyze_text(brand_tweets_file, exemplar_tweets_file, sample_tweets_file, outfile, analyze_fn):
analyze = getattr(sys.modules[__name__], analyze_fn)
vec = CountVectorizer(min_df=3, preprocessor=preprocess, ngram_range=(2, 2), binary=True)
_, exemplar_vectors = vectorize(exemplar_tweets_file, vec, dofit=True)
print('read tweets for %d exemplar accounts' % exemplar_vectors.shape[0])
brands, brand_vectors = vectorize(brand_tweets_file, vec, dofit=False)
print('read tweets for %d brand accounts' % brand_vectors.shape[0])
_, sample_vectors = vectorize(sample_tweets_file, vec, dofit=False)
print('read tweets for %d sample accounts' % sample_vectors.shape[0])
scores = analyze(exemplar_vectors, sample_vectors)
vocab = vec.get_feature_names()
write_top_words(outfile + '.topwords', vocab, scores)
print('top 10 ngrams:\n', '\n'.join(['%s=%.4g' % (vocab[i], scores[i]) for i in np.argsort(scores)[::-1][:10]]))
outf = open(outfile, 'wt')
for bi, brand_vec in enumerate(brand_vectors):
outf.write('%s %g\n' % (brands[bi], do_score(brand_vec, scores)))
outf.flush()
### FOLLOWER ANALYSIS ###
def get_twitter_handles(fname):
handles = set()
with open(fname, 'rt') as f:
for line in f:
handles.add(line[:90].split()[0].lower())
return handles
def read_follower_file(fname, min_followers=0, max_followers=1e10, blacklist=set()):
""" Read a file of follower information and return a dictionary mapping screen_name to a set of follower ids. """
result = {}
with open(fname, 'rt') as f:
for line in f:
parts = line.split()
if len(parts) > 3:
if parts[1].lower() not in blacklist:
followers = set(int(x) for x in parts[2:])
if len(followers) > min_followers and len(followers) <= max_followers:
result[parts[1].lower()] = followers
else:
print('skipping exemplar', parts[1].lower())
return result
# JACCARD
def _jaccard(a, b):
""" Return the Jaccard similarity between two sets a and b. """
return 1. * len(a & b) / len(a | b)
def jaccard(brands, exemplars, weighted_avg=False, sqrt=False):
""" Return the average Jaccard similarity between a brand's followers and the
followers of each exemplar. """
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_jaccard(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_jaccard(followers, others) for others in exemplars.values()) / len(exemplars)
# limit to exemplars with less than 40k followers: scores[brand] = 1. * sum(_jaccard(brands[brand], others) for others in exemplars.itervalues() if len(others) < 40000) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def jaccard_weighted_avg(brands, exemplars):
return jaccard(brands, exemplars, True, False)
def jaccard_sqrt_no_weighted_avg(brands, exemplars):
return jaccard(brands, exemplars, False, True)
def jaccard_sqrt(brands, exemplars):
return jaccard(brands, exemplars, weighted_avg=True, sqrt=True)
def jaccard_merge(brands, exemplars):
""" Return the average Jaccard similarity between a brand's followers and
the followers of each exemplar. We merge all exemplar followers into one
big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _jaccard(followers, exemplar_followers)
return scores
def compute_log_degrees(brands, exemplars):
""" For each follower, let Z be the total number of brands they follow.
Return a dictionary of 1. / log(Z), for each follower.
"""
counts = Counter()
for followers in brands.values(): # + exemplars.values(): # Include exemplars in these counts? No, don't want to penalize people who follow many exemplars.
counts.update(followers)
counts.update(counts.keys()) # Add 1 to each count.
for k in counts:
counts[k] = 1. / math.log(counts[k])
return counts
# PROPORTION
def _proportion(a, b):
""" Return the len(a & b) / len(a) """
return 1. * len(a & b) / len(a)
def proportion(brands, exemplars, weighted_avg=False, sqrt=False):
"""
Return the proportion of a brand's followers who also follow an exemplar.
"""
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_proportion(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_proportion(followers, others) for others in exemplars.values()) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def proportion_weighted_avg(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=True, sqrt=False)
def proportion_sqrt_no_weighted_avg(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=False, sqrt=True)
def proportion_sqrt(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=True, sqrt=True)
def proportion_merge(brands, exemplars):
""" Return the proportion of a brand's followers who also follower an
exemplar. We merge all exemplar followers into one big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _proportion(followers, exemplar_followers)
return scores
# COSINE SIMILARITY
def _cosine(a, b):
""" Return the len(a & b) / len(a) """
return 1. * len(a & b) / (math.sqrt(len(a)) * math.sqrt(len(b)))
def cosine(brands, exemplars, weighted_avg=False, sqrt=False):
"""
Return the cosine similarity betwee a brand's followers and the exemplars.
"""
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_cosine(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_cosine(followers, others) for others in exemplars.values()) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def cosine_weighted_avg(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=True, sqrt=False)
def cosine_sqrt_no_weighted_avg(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=False, sqrt=True)
def cosine_sqrt(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=True, sqrt=True)
def cosine_merge(brands, exemplars):
""" Return the proportion of a brand's followers who also follower an
exemplar. We merge all exemplar followers into one big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _cosine(followers, exemplar_followers)
return scores
def adamic(brands, exemplars):
""" Return the average Adamic/Adar similarity between a brand's followers
and the followers of each exemplar. We approximate the number of followed
accounts per user by only considering those in our brand set."""
print('adamic deprecated...requires loading all brands in memory.')
return
degrees = compute_log_degrees(brands, exemplars)
scores = {}
exemplar_sums = dict([(exemplar, sum(degrees[z] for z in exemplars[exemplar])) for exemplar in exemplars])
for brand in sorted(brands):
brand_sum = sum(degrees[z] for z in brands[brand])
total = 0.
for exemplar in exemplars:
total += sum(degrees[z] for z in brands[brand] & exemplars[exemplar]) / (brand_sum + exemplar_sums[exemplar])
scores[brand] = total / len(exemplars)
return scores
def compute_rarity_scores(exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is
the degree of the ith exemplar they follow.
>>> compute_rarity_scores({'e1':{1,2,3,4}, 'e2':{4,5}}).items()
[(1, 0.25), (2, 0.25), (3, 0.25), (4, 0.75), (5, 0.5)]
"""
scores = defaultdict(lambda: 0.)
for followers in exemplars.values():
score = 1. / len(followers)
for f in followers:
scores[f] += score
return scores
def rarity(brands, exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is the degree of the ith exemplar they follow.
The score for a brand is then the average of their follower scores."""
rarity = compute_rarity_scores(exemplars)
scores = {}
for brand, followers in brands:
scores[brand] = sum(rarity[f] for f in followers) / len(followers)
return scores
def compute_rarity_scores_log(exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is
the degree of the ith exemplar they follow.
>>> compute_rarity_scores({'e1':{1,2,3,4}, 'e2':{4,5}}).items()
[(1, 0.25), (2, 0.25), (3, 0.25), (4, 0.75), (5, 0.5)]
"""
scores = defaultdict(lambda: 0.)
for followers in exemplars.values():
score = 1. / math.log(len(followers))
for f in followers:
scores[f] += score
return scores
def rarity_log(brands, exemplars):
""" Compute a score for each follower that is sum_i (1/log(n_i)), where n_i is the degree of the ith exemplar they follow.
The score for a brand is then the average of their follower scores."""
rarity = compute_rarity_scores_log(exemplars)
scores = {}
for brand, followers in brands:
scores[brand] = sum(rarity[f] for f in followers) / len(followers)
return scores
def mkdirs(filename):
report.mkdirs(os.path.dirname(filename))
def analyze_followers(brand_follower_file, exemplar_follower_file, outfile, analyze_fn,
min_followers, max_followers, sample_exemplars):
brands = iter_follower_file(brand_follower_file)
exemplars = read_follower_file(exemplar_follower_file, min_followers=min_followers, max_followers=max_followers, blacklist=get_twitter_handles(brand_follower_file))
print('read follower data for %d exemplars' % (len(exemplars)))
if sample_exemplars < 100: # sample a subset of exemplars.
exemplars = dict([(k, exemplars[k]) for k in random.sample(exemplars.keys(), int(len(exemplars) * sample_exemplars / 100.))])
print('sampled %d exemplars' % (len(exemplars)))
analyze = getattr(sys.modules[__name__], analyze_fn)
scores = analyze(brands, exemplars)
mkdirs(outfile)
outf = open(outfile, 'wt')
for brand in sorted(scores):
outf.write('%s %g\n' % (brand, scores[brand]))
outf.flush()
outf.close()
print('results written to', outfile)
def main():
args = docopt(__doc__)
print(args)
if '--seed' in args:
random.seed(args['--seed'])
if args['--network']:
analyze_followers(args['--brand-followers'], args['--exemplar-followers'], args['--output'], args['--network-method'],
int(args['--min-followers']), int(float(args['--max-followers'])), float(args['--sample-exemplars']))
if args['--text']:
analyze_text(args['--brand-tweets'], args['--exemplar-tweets'], args['--sample-tweets'], args['--output'], args['--text-method'])
if __name__ == '__main__':
main()
|
tapilab/brandelion | brandelion/cli/analyze.py | jaccard_merge | python | def jaccard_merge(brands, exemplars):
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _jaccard(followers, exemplar_followers)
return scores | Return the average Jaccard similarity between a brand's followers and
the followers of each exemplar. We merge all exemplar followers into one
big pseudo-account. | train | https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/analyze.py#L235-L246 | [
"def _jaccard(a, b):\n \"\"\" Return the Jaccard similarity between two sets a and b. \"\"\"\n return 1. * len(a & b) / len(a | b)\n"
] | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Analyze social and linguistic brand data.
usage:
brandelion analyze --text --brand-tweets <file> --exemplar-tweets <file> --sample-tweets <file> --output <file> [--text-method <string>]
brandelion analyze --network --brand-followers <file> --exemplar-followers <file> --output <file> [--network-method <string> --min-followers <n> --max-followers <n> --sample-exemplars <p> --seed <s>]
Options
-h, --help
--brand-followers <file> File containing follower data for brand accounts.
--brand-tweets <file> File containing tweets from brand accounts.
--exemplar-followers <file> File containing follower data for exemplar accounts.
--exemplar-tweets <file> File containing tweets from exemplar accounts.
--sample-tweets <file> File containing tweets from representative sample of Twitter.
--text-method <string> Method to do text analysis [default: chi2]
--network-method <string> Method to do text analysis [default: jaccard]
-o, --output <file> File to store results
-t, --text Analyze text of tweets.
-n, --network Analyze followers.
--min-followers <n> Ignore exemplars that don't have at least n followers [default: 0]
--max-followers <n> Ignore exemplars that have more than least n followers [default: 1e10]
--sample-exemplars <p> Sample p percent of the exemplars, uniformly at random. [default: 100]
--seed <s> Seed for random sampling. [default: 12345]
"""
from collections import Counter, defaultdict
from docopt import docopt
import io
from itertools import groupby
import gzip
import json
import math
import numpy as np
import os
import re
import random
from scipy.sparse import vstack
import sys
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_selection import chi2 as skchi2
from sklearn import linear_model
from . import report
### TEXT ANALYSIS ###
def parse_json(json_file, include_date=False):
""" Yield screen_name, text tuples from a json file. """
if json_file[-2:] == 'gz':
fh = gzip.open(json_file, 'rt')
else:
fh = io.open(json_file, mode='rt', encoding='utf8')
for line in fh:
try:
jj = json.loads(line)
if type(jj) is not list:
jj = [jj]
for j in jj:
if include_date:
yield (j['user']['screen_name'].lower(), j['text'], j['created_at'])
else:
if 'full_text' in j: # get untruncated text if available.
yield (j['user']['screen_name'].lower(), j['full_text'])
else:
yield (j['user']['screen_name'].lower(), j['text'])
except Exception as e:
sys.stderr.write('skipping json error: %s\n' % e)
def extract_tweets(json_file):
""" Yield screen_name, string tuples, where the string is the
concatenation of all tweets of this user. """
for screen_name, tweet_iter in groupby(parse_json(json_file), lambda x: x[0]):
tweets = [t[1] for t in tweet_iter]
yield screen_name, ' '.join(tweets)
def preprocess(s):
"""
>>> preprocess('#hi there http://www.foo.com @you isn"t RT <>')
'hashtaghi hashtaghi there isn"t'
"""
# s = re.sub('@\S+', 'thisisamention', s) # map all mentions to thisisamention
s = re.sub(r'@\S+', ' ', s) # map all mentions to thisisamention
# s = re.sub('http\S+', 'http', s) # keep only http from urls
s = re.sub(r'http\S+', ' ', s) # keep only http from urls
s = re.sub(r'#(\S+)', r'hashtag\1 hashtag\1', s) # #foo -> hashtagfoo hashtagfoo (for retaining hashtags even using bigrams)
# s = re.sub(r'[0-9]+', '9', s) # 1234 -> 9
s = re.sub(r'\bRT\b', ' ', s, re.IGNORECASE)
s = re.sub(r'&[a-z]+;', ' ', s, re.IGNORECASE)
s = re.sub(r'\s+', ' ', s).strip()
return s.lower()
def vectorize(json_file, vec, dofit=True):
""" Return a matrix where each row corresponds to a Twitter account, and
each column corresponds to the number of times a term is used by that
account. """
## CountVectorizer, efficiently.
screen_names = [x[0] for x in extract_tweets(json_file)]
if dofit:
X = vec.fit_transform(x[1] for x in extract_tweets(json_file))
else:
X = vec.transform(x[1] for x in extract_tweets(json_file))
return screen_names, X
def chi2(exemplars, samples, n=300):
y = np.array(([1.] * exemplars.shape[0]) + ([.0] * samples.shape[0]))
X = vstack((exemplars, samples)).tocsr()
clf = linear_model.LogisticRegression(penalty='l2')
clf.fit(X, y)
coef = clf.coef_[0]
chis, pvals = skchi2(X, y)
top_indices = chis.argsort()[::-1]
top_indices = [i for i in top_indices if coef[i] > 0]
for idx in range(len(coef)):
coef[idx] = 0.
for idx in top_indices[:n]:
coef[idx] = chis[idx]
return coef
def do_score(vec, coef):
return np.sum(coef[vec.nonzero()[1]]) / np.sum(coef)
def write_top_words(fname, vocab, scores):
outf = io.open(fname, 'w', encoding='utf8')
for i in np.argsort(scores)[::-1]:
if scores[i] > 0:
outf.write('%s %g\n' % (vocab[i], scores[i]))
outf.close()
def analyze_text(brand_tweets_file, exemplar_tweets_file, sample_tweets_file, outfile, analyze_fn):
analyze = getattr(sys.modules[__name__], analyze_fn)
vec = CountVectorizer(min_df=3, preprocessor=preprocess, ngram_range=(2, 2), binary=True)
_, exemplar_vectors = vectorize(exemplar_tweets_file, vec, dofit=True)
print('read tweets for %d exemplar accounts' % exemplar_vectors.shape[0])
brands, brand_vectors = vectorize(brand_tweets_file, vec, dofit=False)
print('read tweets for %d brand accounts' % brand_vectors.shape[0])
_, sample_vectors = vectorize(sample_tweets_file, vec, dofit=False)
print('read tweets for %d sample accounts' % sample_vectors.shape[0])
scores = analyze(exemplar_vectors, sample_vectors)
vocab = vec.get_feature_names()
write_top_words(outfile + '.topwords', vocab, scores)
print('top 10 ngrams:\n', '\n'.join(['%s=%.4g' % (vocab[i], scores[i]) for i in np.argsort(scores)[::-1][:10]]))
outf = open(outfile, 'wt')
for bi, brand_vec in enumerate(brand_vectors):
outf.write('%s %g\n' % (brands[bi], do_score(brand_vec, scores)))
outf.flush()
### FOLLOWER ANALYSIS ###
def get_twitter_handles(fname):
handles = set()
with open(fname, 'rt') as f:
for line in f:
handles.add(line[:90].split()[0].lower())
return handles
def read_follower_file(fname, min_followers=0, max_followers=1e10, blacklist=set()):
""" Read a file of follower information and return a dictionary mapping screen_name to a set of follower ids. """
result = {}
with open(fname, 'rt') as f:
for line in f:
parts = line.split()
if len(parts) > 3:
if parts[1].lower() not in blacklist:
followers = set(int(x) for x in parts[2:])
if len(followers) > min_followers and len(followers) <= max_followers:
result[parts[1].lower()] = followers
else:
print('skipping exemplar', parts[1].lower())
return result
def iter_follower_file(fname):
""" Iterator from a file of follower information and return a tuple of screen_name, follower ids.
File format is:
<iso timestamp> <screen_name> <follower_id1> <follower_ids2> ...
"""
with open(fname, 'rt') as f:
for line in f:
parts = line.split()
if len(parts) > 3:
yield parts[1].lower(), set(int(x) for x in parts[2:])
# JACCARD
def _jaccard(a, b):
""" Return the Jaccard similarity between two sets a and b. """
return 1. * len(a & b) / len(a | b)
def jaccard(brands, exemplars, weighted_avg=False, sqrt=False):
""" Return the average Jaccard similarity between a brand's followers and the
followers of each exemplar. """
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_jaccard(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_jaccard(followers, others) for others in exemplars.values()) / len(exemplars)
# limit to exemplars with less than 40k followers: scores[brand] = 1. * sum(_jaccard(brands[brand], others) for others in exemplars.itervalues() if len(others) < 40000) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def jaccard_weighted_avg(brands, exemplars):
return jaccard(brands, exemplars, True, False)
def jaccard_sqrt_no_weighted_avg(brands, exemplars):
return jaccard(brands, exemplars, False, True)
def jaccard_sqrt(brands, exemplars):
return jaccard(brands, exemplars, weighted_avg=True, sqrt=True)
def compute_log_degrees(brands, exemplars):
""" For each follower, let Z be the total number of brands they follow.
Return a dictionary of 1. / log(Z), for each follower.
"""
counts = Counter()
for followers in brands.values(): # + exemplars.values(): # Include exemplars in these counts? No, don't want to penalize people who follow many exemplars.
counts.update(followers)
counts.update(counts.keys()) # Add 1 to each count.
for k in counts:
counts[k] = 1. / math.log(counts[k])
return counts
# PROPORTION
def _proportion(a, b):
""" Return the len(a & b) / len(a) """
return 1. * len(a & b) / len(a)
def proportion(brands, exemplars, weighted_avg=False, sqrt=False):
"""
Return the proportion of a brand's followers who also follow an exemplar.
"""
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_proportion(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_proportion(followers, others) for others in exemplars.values()) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def proportion_weighted_avg(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=True, sqrt=False)
def proportion_sqrt_no_weighted_avg(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=False, sqrt=True)
def proportion_sqrt(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=True, sqrt=True)
def proportion_merge(brands, exemplars):
""" Return the proportion of a brand's followers who also follower an
exemplar. We merge all exemplar followers into one big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _proportion(followers, exemplar_followers)
return scores
# COSINE SIMILARITY
def _cosine(a, b):
""" Return the len(a & b) / len(a) """
return 1. * len(a & b) / (math.sqrt(len(a)) * math.sqrt(len(b)))
def cosine(brands, exemplars, weighted_avg=False, sqrt=False):
"""
Return the cosine similarity betwee a brand's followers and the exemplars.
"""
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_cosine(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_cosine(followers, others) for others in exemplars.values()) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def cosine_weighted_avg(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=True, sqrt=False)
def cosine_sqrt_no_weighted_avg(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=False, sqrt=True)
def cosine_sqrt(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=True, sqrt=True)
def cosine_merge(brands, exemplars):
""" Return the proportion of a brand's followers who also follower an
exemplar. We merge all exemplar followers into one big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _cosine(followers, exemplar_followers)
return scores
def adamic(brands, exemplars):
""" Return the average Adamic/Adar similarity between a brand's followers
and the followers of each exemplar. We approximate the number of followed
accounts per user by only considering those in our brand set."""
print('adamic deprecated...requires loading all brands in memory.')
return
degrees = compute_log_degrees(brands, exemplars)
scores = {}
exemplar_sums = dict([(exemplar, sum(degrees[z] for z in exemplars[exemplar])) for exemplar in exemplars])
for brand in sorted(brands):
brand_sum = sum(degrees[z] for z in brands[brand])
total = 0.
for exemplar in exemplars:
total += sum(degrees[z] for z in brands[brand] & exemplars[exemplar]) / (brand_sum + exemplar_sums[exemplar])
scores[brand] = total / len(exemplars)
return scores
def compute_rarity_scores(exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is
the degree of the ith exemplar they follow.
>>> compute_rarity_scores({'e1':{1,2,3,4}, 'e2':{4,5}}).items()
[(1, 0.25), (2, 0.25), (3, 0.25), (4, 0.75), (5, 0.5)]
"""
scores = defaultdict(lambda: 0.)
for followers in exemplars.values():
score = 1. / len(followers)
for f in followers:
scores[f] += score
return scores
def rarity(brands, exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is the degree of the ith exemplar they follow.
The score for a brand is then the average of their follower scores."""
rarity = compute_rarity_scores(exemplars)
scores = {}
for brand, followers in brands:
scores[brand] = sum(rarity[f] for f in followers) / len(followers)
return scores
def compute_rarity_scores_log(exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is
the degree of the ith exemplar they follow.
>>> compute_rarity_scores({'e1':{1,2,3,4}, 'e2':{4,5}}).items()
[(1, 0.25), (2, 0.25), (3, 0.25), (4, 0.75), (5, 0.5)]
"""
scores = defaultdict(lambda: 0.)
for followers in exemplars.values():
score = 1. / math.log(len(followers))
for f in followers:
scores[f] += score
return scores
def rarity_log(brands, exemplars):
""" Compute a score for each follower that is sum_i (1/log(n_i)), where n_i is the degree of the ith exemplar they follow.
The score for a brand is then the average of their follower scores."""
rarity = compute_rarity_scores_log(exemplars)
scores = {}
for brand, followers in brands:
scores[brand] = sum(rarity[f] for f in followers) / len(followers)
return scores
def mkdirs(filename):
report.mkdirs(os.path.dirname(filename))
def analyze_followers(brand_follower_file, exemplar_follower_file, outfile, analyze_fn,
min_followers, max_followers, sample_exemplars):
brands = iter_follower_file(brand_follower_file)
exemplars = read_follower_file(exemplar_follower_file, min_followers=min_followers, max_followers=max_followers, blacklist=get_twitter_handles(brand_follower_file))
print('read follower data for %d exemplars' % (len(exemplars)))
if sample_exemplars < 100: # sample a subset of exemplars.
exemplars = dict([(k, exemplars[k]) for k in random.sample(exemplars.keys(), int(len(exemplars) * sample_exemplars / 100.))])
print('sampled %d exemplars' % (len(exemplars)))
analyze = getattr(sys.modules[__name__], analyze_fn)
scores = analyze(brands, exemplars)
mkdirs(outfile)
outf = open(outfile, 'wt')
for brand in sorted(scores):
outf.write('%s %g\n' % (brand, scores[brand]))
outf.flush()
outf.close()
print('results written to', outfile)
def main():
args = docopt(__doc__)
print(args)
if '--seed' in args:
random.seed(args['--seed'])
if args['--network']:
analyze_followers(args['--brand-followers'], args['--exemplar-followers'], args['--output'], args['--network-method'],
int(args['--min-followers']), int(float(args['--max-followers'])), float(args['--sample-exemplars']))
if args['--text']:
analyze_text(args['--brand-tweets'], args['--exemplar-tweets'], args['--sample-tweets'], args['--output'], args['--text-method'])
if __name__ == '__main__':
main()
|
tapilab/brandelion | brandelion/cli/analyze.py | compute_log_degrees | python | def compute_log_degrees(brands, exemplars):
counts = Counter()
for followers in brands.values(): # + exemplars.values(): # Include exemplars in these counts? No, don't want to penalize people who follow many exemplars.
counts.update(followers)
counts.update(counts.keys()) # Add 1 to each count.
for k in counts:
counts[k] = 1. / math.log(counts[k])
return counts | For each follower, let Z be the total number of brands they follow.
Return a dictionary of 1. / log(Z), for each follower. | train | https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/analyze.py#L249-L259 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Analyze social and linguistic brand data.
usage:
brandelion analyze --text --brand-tweets <file> --exemplar-tweets <file> --sample-tweets <file> --output <file> [--text-method <string>]
brandelion analyze --network --brand-followers <file> --exemplar-followers <file> --output <file> [--network-method <string> --min-followers <n> --max-followers <n> --sample-exemplars <p> --seed <s>]
Options
-h, --help
--brand-followers <file> File containing follower data for brand accounts.
--brand-tweets <file> File containing tweets from brand accounts.
--exemplar-followers <file> File containing follower data for exemplar accounts.
--exemplar-tweets <file> File containing tweets from exemplar accounts.
--sample-tweets <file> File containing tweets from representative sample of Twitter.
--text-method <string> Method to do text analysis [default: chi2]
--network-method <string> Method to do text analysis [default: jaccard]
-o, --output <file> File to store results
-t, --text Analyze text of tweets.
-n, --network Analyze followers.
--min-followers <n> Ignore exemplars that don't have at least n followers [default: 0]
--max-followers <n> Ignore exemplars that have more than least n followers [default: 1e10]
--sample-exemplars <p> Sample p percent of the exemplars, uniformly at random. [default: 100]
--seed <s> Seed for random sampling. [default: 12345]
"""
from collections import Counter, defaultdict
from docopt import docopt
import io
from itertools import groupby
import gzip
import json
import math
import numpy as np
import os
import re
import random
from scipy.sparse import vstack
import sys
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_selection import chi2 as skchi2
from sklearn import linear_model
from . import report
### TEXT ANALYSIS ###
def parse_json(json_file, include_date=False):
""" Yield screen_name, text tuples from a json file. """
if json_file[-2:] == 'gz':
fh = gzip.open(json_file, 'rt')
else:
fh = io.open(json_file, mode='rt', encoding='utf8')
for line in fh:
try:
jj = json.loads(line)
if type(jj) is not list:
jj = [jj]
for j in jj:
if include_date:
yield (j['user']['screen_name'].lower(), j['text'], j['created_at'])
else:
if 'full_text' in j: # get untruncated text if available.
yield (j['user']['screen_name'].lower(), j['full_text'])
else:
yield (j['user']['screen_name'].lower(), j['text'])
except Exception as e:
sys.stderr.write('skipping json error: %s\n' % e)
def extract_tweets(json_file):
""" Yield screen_name, string tuples, where the string is the
concatenation of all tweets of this user. """
for screen_name, tweet_iter in groupby(parse_json(json_file), lambda x: x[0]):
tweets = [t[1] for t in tweet_iter]
yield screen_name, ' '.join(tweets)
def preprocess(s):
"""
>>> preprocess('#hi there http://www.foo.com @you isn"t RT <>')
'hashtaghi hashtaghi there isn"t'
"""
# s = re.sub('@\S+', 'thisisamention', s) # map all mentions to thisisamention
s = re.sub(r'@\S+', ' ', s) # map all mentions to thisisamention
# s = re.sub('http\S+', 'http', s) # keep only http from urls
s = re.sub(r'http\S+', ' ', s) # keep only http from urls
s = re.sub(r'#(\S+)', r'hashtag\1 hashtag\1', s) # #foo -> hashtagfoo hashtagfoo (for retaining hashtags even using bigrams)
# s = re.sub(r'[0-9]+', '9', s) # 1234 -> 9
s = re.sub(r'\bRT\b', ' ', s, re.IGNORECASE)
s = re.sub(r'&[a-z]+;', ' ', s, re.IGNORECASE)
s = re.sub(r'\s+', ' ', s).strip()
return s.lower()
def vectorize(json_file, vec, dofit=True):
""" Return a matrix where each row corresponds to a Twitter account, and
each column corresponds to the number of times a term is used by that
account. """
## CountVectorizer, efficiently.
screen_names = [x[0] for x in extract_tweets(json_file)]
if dofit:
X = vec.fit_transform(x[1] for x in extract_tweets(json_file))
else:
X = vec.transform(x[1] for x in extract_tweets(json_file))
return screen_names, X
def chi2(exemplars, samples, n=300):
y = np.array(([1.] * exemplars.shape[0]) + ([.0] * samples.shape[0]))
X = vstack((exemplars, samples)).tocsr()
clf = linear_model.LogisticRegression(penalty='l2')
clf.fit(X, y)
coef = clf.coef_[0]
chis, pvals = skchi2(X, y)
top_indices = chis.argsort()[::-1]
top_indices = [i for i in top_indices if coef[i] > 0]
for idx in range(len(coef)):
coef[idx] = 0.
for idx in top_indices[:n]:
coef[idx] = chis[idx]
return coef
def do_score(vec, coef):
return np.sum(coef[vec.nonzero()[1]]) / np.sum(coef)
def write_top_words(fname, vocab, scores):
outf = io.open(fname, 'w', encoding='utf8')
for i in np.argsort(scores)[::-1]:
if scores[i] > 0:
outf.write('%s %g\n' % (vocab[i], scores[i]))
outf.close()
def analyze_text(brand_tweets_file, exemplar_tweets_file, sample_tweets_file, outfile, analyze_fn):
analyze = getattr(sys.modules[__name__], analyze_fn)
vec = CountVectorizer(min_df=3, preprocessor=preprocess, ngram_range=(2, 2), binary=True)
_, exemplar_vectors = vectorize(exemplar_tweets_file, vec, dofit=True)
print('read tweets for %d exemplar accounts' % exemplar_vectors.shape[0])
brands, brand_vectors = vectorize(brand_tweets_file, vec, dofit=False)
print('read tweets for %d brand accounts' % brand_vectors.shape[0])
_, sample_vectors = vectorize(sample_tweets_file, vec, dofit=False)
print('read tweets for %d sample accounts' % sample_vectors.shape[0])
scores = analyze(exemplar_vectors, sample_vectors)
vocab = vec.get_feature_names()
write_top_words(outfile + '.topwords', vocab, scores)
print('top 10 ngrams:\n', '\n'.join(['%s=%.4g' % (vocab[i], scores[i]) for i in np.argsort(scores)[::-1][:10]]))
outf = open(outfile, 'wt')
for bi, brand_vec in enumerate(brand_vectors):
outf.write('%s %g\n' % (brands[bi], do_score(brand_vec, scores)))
outf.flush()
### FOLLOWER ANALYSIS ###
def get_twitter_handles(fname):
handles = set()
with open(fname, 'rt') as f:
for line in f:
handles.add(line[:90].split()[0].lower())
return handles
def read_follower_file(fname, min_followers=0, max_followers=1e10, blacklist=set()):
""" Read a file of follower information and return a dictionary mapping screen_name to a set of follower ids. """
result = {}
with open(fname, 'rt') as f:
for line in f:
parts = line.split()
if len(parts) > 3:
if parts[1].lower() not in blacklist:
followers = set(int(x) for x in parts[2:])
if len(followers) > min_followers and len(followers) <= max_followers:
result[parts[1].lower()] = followers
else:
print('skipping exemplar', parts[1].lower())
return result
def iter_follower_file(fname):
""" Iterator from a file of follower information and return a tuple of screen_name, follower ids.
File format is:
<iso timestamp> <screen_name> <follower_id1> <follower_ids2> ...
"""
with open(fname, 'rt') as f:
for line in f:
parts = line.split()
if len(parts) > 3:
yield parts[1].lower(), set(int(x) for x in parts[2:])
# JACCARD
def _jaccard(a, b):
""" Return the Jaccard similarity between two sets a and b. """
return 1. * len(a & b) / len(a | b)
def jaccard(brands, exemplars, weighted_avg=False, sqrt=False):
""" Return the average Jaccard similarity between a brand's followers and the
followers of each exemplar. """
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_jaccard(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_jaccard(followers, others) for others in exemplars.values()) / len(exemplars)
# limit to exemplars with less than 40k followers: scores[brand] = 1. * sum(_jaccard(brands[brand], others) for others in exemplars.itervalues() if len(others) < 40000) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def jaccard_weighted_avg(brands, exemplars):
return jaccard(brands, exemplars, True, False)
def jaccard_sqrt_no_weighted_avg(brands, exemplars):
return jaccard(brands, exemplars, False, True)
def jaccard_sqrt(brands, exemplars):
return jaccard(brands, exemplars, weighted_avg=True, sqrt=True)
def jaccard_merge(brands, exemplars):
""" Return the average Jaccard similarity between a brand's followers and
the followers of each exemplar. We merge all exemplar followers into one
big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _jaccard(followers, exemplar_followers)
return scores
# PROPORTION
def _proportion(a, b):
""" Return the len(a & b) / len(a) """
return 1. * len(a & b) / len(a)
def proportion(brands, exemplars, weighted_avg=False, sqrt=False):
"""
Return the proportion of a brand's followers who also follow an exemplar.
"""
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_proportion(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_proportion(followers, others) for others in exemplars.values()) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def proportion_weighted_avg(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=True, sqrt=False)
def proportion_sqrt_no_weighted_avg(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=False, sqrt=True)
def proportion_sqrt(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=True, sqrt=True)
def proportion_merge(brands, exemplars):
""" Return the proportion of a brand's followers who also follower an
exemplar. We merge all exemplar followers into one big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _proportion(followers, exemplar_followers)
return scores
# COSINE SIMILARITY
def _cosine(a, b):
""" Return the len(a & b) / len(a) """
return 1. * len(a & b) / (math.sqrt(len(a)) * math.sqrt(len(b)))
def cosine(brands, exemplars, weighted_avg=False, sqrt=False):
"""
Return the cosine similarity betwee a brand's followers and the exemplars.
"""
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_cosine(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_cosine(followers, others) for others in exemplars.values()) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def cosine_weighted_avg(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=True, sqrt=False)
def cosine_sqrt_no_weighted_avg(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=False, sqrt=True)
def cosine_sqrt(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=True, sqrt=True)
def cosine_merge(brands, exemplars):
""" Return the proportion of a brand's followers who also follower an
exemplar. We merge all exemplar followers into one big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _cosine(followers, exemplar_followers)
return scores
def adamic(brands, exemplars):
""" Return the average Adamic/Adar similarity between a brand's followers
and the followers of each exemplar. We approximate the number of followed
accounts per user by only considering those in our brand set."""
print('adamic deprecated...requires loading all brands in memory.')
return
degrees = compute_log_degrees(brands, exemplars)
scores = {}
exemplar_sums = dict([(exemplar, sum(degrees[z] for z in exemplars[exemplar])) for exemplar in exemplars])
for brand in sorted(brands):
brand_sum = sum(degrees[z] for z in brands[brand])
total = 0.
for exemplar in exemplars:
total += sum(degrees[z] for z in brands[brand] & exemplars[exemplar]) / (brand_sum + exemplar_sums[exemplar])
scores[brand] = total / len(exemplars)
return scores
def compute_rarity_scores(exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is
the degree of the ith exemplar they follow.
>>> compute_rarity_scores({'e1':{1,2,3,4}, 'e2':{4,5}}).items()
[(1, 0.25), (2, 0.25), (3, 0.25), (4, 0.75), (5, 0.5)]
"""
scores = defaultdict(lambda: 0.)
for followers in exemplars.values():
score = 1. / len(followers)
for f in followers:
scores[f] += score
return scores
def rarity(brands, exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is the degree of the ith exemplar they follow.
The score for a brand is then the average of their follower scores."""
rarity = compute_rarity_scores(exemplars)
scores = {}
for brand, followers in brands:
scores[brand] = sum(rarity[f] for f in followers) / len(followers)
return scores
def compute_rarity_scores_log(exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is
the degree of the ith exemplar they follow.
>>> compute_rarity_scores({'e1':{1,2,3,4}, 'e2':{4,5}}).items()
[(1, 0.25), (2, 0.25), (3, 0.25), (4, 0.75), (5, 0.5)]
"""
scores = defaultdict(lambda: 0.)
for followers in exemplars.values():
score = 1. / math.log(len(followers))
for f in followers:
scores[f] += score
return scores
def rarity_log(brands, exemplars):
""" Compute a score for each follower that is sum_i (1/log(n_i)), where n_i is the degree of the ith exemplar they follow.
The score for a brand is then the average of their follower scores."""
rarity = compute_rarity_scores_log(exemplars)
scores = {}
for brand, followers in brands:
scores[brand] = sum(rarity[f] for f in followers) / len(followers)
return scores
def mkdirs(filename):
report.mkdirs(os.path.dirname(filename))
def analyze_followers(brand_follower_file, exemplar_follower_file, outfile, analyze_fn,
min_followers, max_followers, sample_exemplars):
brands = iter_follower_file(brand_follower_file)
exemplars = read_follower_file(exemplar_follower_file, min_followers=min_followers, max_followers=max_followers, blacklist=get_twitter_handles(brand_follower_file))
print('read follower data for %d exemplars' % (len(exemplars)))
if sample_exemplars < 100: # sample a subset of exemplars.
exemplars = dict([(k, exemplars[k]) for k in random.sample(exemplars.keys(), int(len(exemplars) * sample_exemplars / 100.))])
print('sampled %d exemplars' % (len(exemplars)))
analyze = getattr(sys.modules[__name__], analyze_fn)
scores = analyze(brands, exemplars)
mkdirs(outfile)
outf = open(outfile, 'wt')
for brand in sorted(scores):
outf.write('%s %g\n' % (brand, scores[brand]))
outf.flush()
outf.close()
print('results written to', outfile)
def main():
args = docopt(__doc__)
print(args)
if '--seed' in args:
random.seed(args['--seed'])
if args['--network']:
analyze_followers(args['--brand-followers'], args['--exemplar-followers'], args['--output'], args['--network-method'],
int(args['--min-followers']), int(float(args['--max-followers'])), float(args['--sample-exemplars']))
if args['--text']:
analyze_text(args['--brand-tweets'], args['--exemplar-tweets'], args['--sample-tweets'], args['--output'], args['--text-method'])
if __name__ == '__main__':
main()
|
tapilab/brandelion | brandelion/cli/analyze.py | proportion_merge | python | def proportion_merge(brands, exemplars):
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _proportion(followers, exemplar_followers)
return scores | Return the proportion of a brand's followers who also follower an
exemplar. We merge all exemplar followers into one big pseudo-account. | train | https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/analyze.py#L298-L308 | [
"def _proportion(a, b):\n \"\"\" Return the len(a & b) / len(a) \"\"\"\n return 1. * len(a & b) / len(a)\n"
] | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Analyze social and linguistic brand data.
usage:
brandelion analyze --text --brand-tweets <file> --exemplar-tweets <file> --sample-tweets <file> --output <file> [--text-method <string>]
brandelion analyze --network --brand-followers <file> --exemplar-followers <file> --output <file> [--network-method <string> --min-followers <n> --max-followers <n> --sample-exemplars <p> --seed <s>]
Options
-h, --help
--brand-followers <file> File containing follower data for brand accounts.
--brand-tweets <file> File containing tweets from brand accounts.
--exemplar-followers <file> File containing follower data for exemplar accounts.
--exemplar-tweets <file> File containing tweets from exemplar accounts.
--sample-tweets <file> File containing tweets from representative sample of Twitter.
--text-method <string> Method to do text analysis [default: chi2]
--network-method <string> Method to do text analysis [default: jaccard]
-o, --output <file> File to store results
-t, --text Analyze text of tweets.
-n, --network Analyze followers.
--min-followers <n> Ignore exemplars that don't have at least n followers [default: 0]
--max-followers <n> Ignore exemplars that have more than least n followers [default: 1e10]
--sample-exemplars <p> Sample p percent of the exemplars, uniformly at random. [default: 100]
--seed <s> Seed for random sampling. [default: 12345]
"""
from collections import Counter, defaultdict
from docopt import docopt
import io
from itertools import groupby
import gzip
import json
import math
import numpy as np
import os
import re
import random
from scipy.sparse import vstack
import sys
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_selection import chi2 as skchi2
from sklearn import linear_model
from . import report
### TEXT ANALYSIS ###
def parse_json(json_file, include_date=False):
""" Yield screen_name, text tuples from a json file. """
if json_file[-2:] == 'gz':
fh = gzip.open(json_file, 'rt')
else:
fh = io.open(json_file, mode='rt', encoding='utf8')
for line in fh:
try:
jj = json.loads(line)
if type(jj) is not list:
jj = [jj]
for j in jj:
if include_date:
yield (j['user']['screen_name'].lower(), j['text'], j['created_at'])
else:
if 'full_text' in j: # get untruncated text if available.
yield (j['user']['screen_name'].lower(), j['full_text'])
else:
yield (j['user']['screen_name'].lower(), j['text'])
except Exception as e:
sys.stderr.write('skipping json error: %s\n' % e)
def extract_tweets(json_file):
""" Yield screen_name, string tuples, where the string is the
concatenation of all tweets of this user. """
for screen_name, tweet_iter in groupby(parse_json(json_file), lambda x: x[0]):
tweets = [t[1] for t in tweet_iter]
yield screen_name, ' '.join(tweets)
def preprocess(s):
"""
>>> preprocess('#hi there http://www.foo.com @you isn"t RT <>')
'hashtaghi hashtaghi there isn"t'
"""
# s = re.sub('@\S+', 'thisisamention', s) # map all mentions to thisisamention
s = re.sub(r'@\S+', ' ', s) # map all mentions to thisisamention
# s = re.sub('http\S+', 'http', s) # keep only http from urls
s = re.sub(r'http\S+', ' ', s) # keep only http from urls
s = re.sub(r'#(\S+)', r'hashtag\1 hashtag\1', s) # #foo -> hashtagfoo hashtagfoo (for retaining hashtags even using bigrams)
# s = re.sub(r'[0-9]+', '9', s) # 1234 -> 9
s = re.sub(r'\bRT\b', ' ', s, re.IGNORECASE)
s = re.sub(r'&[a-z]+;', ' ', s, re.IGNORECASE)
s = re.sub(r'\s+', ' ', s).strip()
return s.lower()
def vectorize(json_file, vec, dofit=True):
""" Return a matrix where each row corresponds to a Twitter account, and
each column corresponds to the number of times a term is used by that
account. """
## CountVectorizer, efficiently.
screen_names = [x[0] for x in extract_tweets(json_file)]
if dofit:
X = vec.fit_transform(x[1] for x in extract_tweets(json_file))
else:
X = vec.transform(x[1] for x in extract_tweets(json_file))
return screen_names, X
def chi2(exemplars, samples, n=300):
y = np.array(([1.] * exemplars.shape[0]) + ([.0] * samples.shape[0]))
X = vstack((exemplars, samples)).tocsr()
clf = linear_model.LogisticRegression(penalty='l2')
clf.fit(X, y)
coef = clf.coef_[0]
chis, pvals = skchi2(X, y)
top_indices = chis.argsort()[::-1]
top_indices = [i for i in top_indices if coef[i] > 0]
for idx in range(len(coef)):
coef[idx] = 0.
for idx in top_indices[:n]:
coef[idx] = chis[idx]
return coef
def do_score(vec, coef):
return np.sum(coef[vec.nonzero()[1]]) / np.sum(coef)
def write_top_words(fname, vocab, scores):
outf = io.open(fname, 'w', encoding='utf8')
for i in np.argsort(scores)[::-1]:
if scores[i] > 0:
outf.write('%s %g\n' % (vocab[i], scores[i]))
outf.close()
def analyze_text(brand_tweets_file, exemplar_tweets_file, sample_tweets_file, outfile, analyze_fn):
analyze = getattr(sys.modules[__name__], analyze_fn)
vec = CountVectorizer(min_df=3, preprocessor=preprocess, ngram_range=(2, 2), binary=True)
_, exemplar_vectors = vectorize(exemplar_tweets_file, vec, dofit=True)
print('read tweets for %d exemplar accounts' % exemplar_vectors.shape[0])
brands, brand_vectors = vectorize(brand_tweets_file, vec, dofit=False)
print('read tweets for %d brand accounts' % brand_vectors.shape[0])
_, sample_vectors = vectorize(sample_tweets_file, vec, dofit=False)
print('read tweets for %d sample accounts' % sample_vectors.shape[0])
scores = analyze(exemplar_vectors, sample_vectors)
vocab = vec.get_feature_names()
write_top_words(outfile + '.topwords', vocab, scores)
print('top 10 ngrams:\n', '\n'.join(['%s=%.4g' % (vocab[i], scores[i]) for i in np.argsort(scores)[::-1][:10]]))
outf = open(outfile, 'wt')
for bi, brand_vec in enumerate(brand_vectors):
outf.write('%s %g\n' % (brands[bi], do_score(brand_vec, scores)))
outf.flush()
### FOLLOWER ANALYSIS ###
def get_twitter_handles(fname):
handles = set()
with open(fname, 'rt') as f:
for line in f:
handles.add(line[:90].split()[0].lower())
return handles
def read_follower_file(fname, min_followers=0, max_followers=1e10, blacklist=set()):
""" Read a file of follower information and return a dictionary mapping screen_name to a set of follower ids. """
result = {}
with open(fname, 'rt') as f:
for line in f:
parts = line.split()
if len(parts) > 3:
if parts[1].lower() not in blacklist:
followers = set(int(x) for x in parts[2:])
if len(followers) > min_followers and len(followers) <= max_followers:
result[parts[1].lower()] = followers
else:
print('skipping exemplar', parts[1].lower())
return result
def iter_follower_file(fname):
""" Iterator from a file of follower information and return a tuple of screen_name, follower ids.
File format is:
<iso timestamp> <screen_name> <follower_id1> <follower_ids2> ...
"""
with open(fname, 'rt') as f:
for line in f:
parts = line.split()
if len(parts) > 3:
yield parts[1].lower(), set(int(x) for x in parts[2:])
# JACCARD
def _jaccard(a, b):
""" Return the Jaccard similarity between two sets a and b. """
return 1. * len(a & b) / len(a | b)
def jaccard(brands, exemplars, weighted_avg=False, sqrt=False):
""" Return the average Jaccard similarity between a brand's followers and the
followers of each exemplar. """
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_jaccard(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_jaccard(followers, others) for others in exemplars.values()) / len(exemplars)
# limit to exemplars with less than 40k followers: scores[brand] = 1. * sum(_jaccard(brands[brand], others) for others in exemplars.itervalues() if len(others) < 40000) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def jaccard_weighted_avg(brands, exemplars):
return jaccard(brands, exemplars, True, False)
def jaccard_sqrt_no_weighted_avg(brands, exemplars):
return jaccard(brands, exemplars, False, True)
def jaccard_sqrt(brands, exemplars):
return jaccard(brands, exemplars, weighted_avg=True, sqrt=True)
def jaccard_merge(brands, exemplars):
""" Return the average Jaccard similarity between a brand's followers and
the followers of each exemplar. We merge all exemplar followers into one
big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _jaccard(followers, exemplar_followers)
return scores
def compute_log_degrees(brands, exemplars):
""" For each follower, let Z be the total number of brands they follow.
Return a dictionary of 1. / log(Z), for each follower.
"""
counts = Counter()
for followers in brands.values(): # + exemplars.values(): # Include exemplars in these counts? No, don't want to penalize people who follow many exemplars.
counts.update(followers)
counts.update(counts.keys()) # Add 1 to each count.
for k in counts:
counts[k] = 1. / math.log(counts[k])
return counts
# PROPORTION
def _proportion(a, b):
""" Return the len(a & b) / len(a) """
return 1. * len(a & b) / len(a)
def proportion(brands, exemplars, weighted_avg=False, sqrt=False):
"""
Return the proportion of a brand's followers who also follow an exemplar.
"""
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_proportion(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_proportion(followers, others) for others in exemplars.values()) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def proportion_weighted_avg(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=True, sqrt=False)
def proportion_sqrt_no_weighted_avg(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=False, sqrt=True)
def proportion_sqrt(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=True, sqrt=True)
# COSINE SIMILARITY
def _cosine(a, b):
""" Return the len(a & b) / len(a) """
return 1. * len(a & b) / (math.sqrt(len(a)) * math.sqrt(len(b)))
def cosine(brands, exemplars, weighted_avg=False, sqrt=False):
"""
Return the cosine similarity betwee a brand's followers and the exemplars.
"""
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_cosine(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_cosine(followers, others) for others in exemplars.values()) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def cosine_weighted_avg(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=True, sqrt=False)
def cosine_sqrt_no_weighted_avg(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=False, sqrt=True)
def cosine_sqrt(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=True, sqrt=True)
def cosine_merge(brands, exemplars):
""" Return the proportion of a brand's followers who also follower an
exemplar. We merge all exemplar followers into one big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _cosine(followers, exemplar_followers)
return scores
def adamic(brands, exemplars):
""" Return the average Adamic/Adar similarity between a brand's followers
and the followers of each exemplar. We approximate the number of followed
accounts per user by only considering those in our brand set."""
print('adamic deprecated...requires loading all brands in memory.')
return
degrees = compute_log_degrees(brands, exemplars)
scores = {}
exemplar_sums = dict([(exemplar, sum(degrees[z] for z in exemplars[exemplar])) for exemplar in exemplars])
for brand in sorted(brands):
brand_sum = sum(degrees[z] for z in brands[brand])
total = 0.
for exemplar in exemplars:
total += sum(degrees[z] for z in brands[brand] & exemplars[exemplar]) / (brand_sum + exemplar_sums[exemplar])
scores[brand] = total / len(exemplars)
return scores
def compute_rarity_scores(exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is
the degree of the ith exemplar they follow.
>>> compute_rarity_scores({'e1':{1,2,3,4}, 'e2':{4,5}}).items()
[(1, 0.25), (2, 0.25), (3, 0.25), (4, 0.75), (5, 0.5)]
"""
scores = defaultdict(lambda: 0.)
for followers in exemplars.values():
score = 1. / len(followers)
for f in followers:
scores[f] += score
return scores
def rarity(brands, exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is the degree of the ith exemplar they follow.
The score for a brand is then the average of their follower scores."""
rarity = compute_rarity_scores(exemplars)
scores = {}
for brand, followers in brands:
scores[brand] = sum(rarity[f] for f in followers) / len(followers)
return scores
def compute_rarity_scores_log(exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is
the degree of the ith exemplar they follow.
>>> compute_rarity_scores({'e1':{1,2,3,4}, 'e2':{4,5}}).items()
[(1, 0.25), (2, 0.25), (3, 0.25), (4, 0.75), (5, 0.5)]
"""
scores = defaultdict(lambda: 0.)
for followers in exemplars.values():
score = 1. / math.log(len(followers))
for f in followers:
scores[f] += score
return scores
def rarity_log(brands, exemplars):
""" Compute a score for each follower that is sum_i (1/log(n_i)), where n_i is the degree of the ith exemplar they follow.
The score for a brand is then the average of their follower scores."""
rarity = compute_rarity_scores_log(exemplars)
scores = {}
for brand, followers in brands:
scores[brand] = sum(rarity[f] for f in followers) / len(followers)
return scores
def mkdirs(filename):
report.mkdirs(os.path.dirname(filename))
def analyze_followers(brand_follower_file, exemplar_follower_file, outfile, analyze_fn,
min_followers, max_followers, sample_exemplars):
brands = iter_follower_file(brand_follower_file)
exemplars = read_follower_file(exemplar_follower_file, min_followers=min_followers, max_followers=max_followers, blacklist=get_twitter_handles(brand_follower_file))
print('read follower data for %d exemplars' % (len(exemplars)))
if sample_exemplars < 100: # sample a subset of exemplars.
exemplars = dict([(k, exemplars[k]) for k in random.sample(exemplars.keys(), int(len(exemplars) * sample_exemplars / 100.))])
print('sampled %d exemplars' % (len(exemplars)))
analyze = getattr(sys.modules[__name__], analyze_fn)
scores = analyze(brands, exemplars)
mkdirs(outfile)
outf = open(outfile, 'wt')
for brand in sorted(scores):
outf.write('%s %g\n' % (brand, scores[brand]))
outf.flush()
outf.close()
print('results written to', outfile)
def main():
args = docopt(__doc__)
print(args)
if '--seed' in args:
random.seed(args['--seed'])
if args['--network']:
analyze_followers(args['--brand-followers'], args['--exemplar-followers'], args['--output'], args['--network-method'],
int(args['--min-followers']), int(float(args['--max-followers'])), float(args['--sample-exemplars']))
if args['--text']:
analyze_text(args['--brand-tweets'], args['--exemplar-tweets'], args['--sample-tweets'], args['--output'], args['--text-method'])
if __name__ == '__main__':
main()
|
tapilab/brandelion | brandelion/cli/analyze.py | _cosine | python | def _cosine(a, b):
return 1. * len(a & b) / (math.sqrt(len(a)) * math.sqrt(len(b))) | Return the len(a & b) / len(a) | train | https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/analyze.py#L314-L316 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Analyze social and linguistic brand data.
usage:
brandelion analyze --text --brand-tweets <file> --exemplar-tweets <file> --sample-tweets <file> --output <file> [--text-method <string>]
brandelion analyze --network --brand-followers <file> --exemplar-followers <file> --output <file> [--network-method <string> --min-followers <n> --max-followers <n> --sample-exemplars <p> --seed <s>]
Options
-h, --help
--brand-followers <file> File containing follower data for brand accounts.
--brand-tweets <file> File containing tweets from brand accounts.
--exemplar-followers <file> File containing follower data for exemplar accounts.
--exemplar-tweets <file> File containing tweets from exemplar accounts.
--sample-tweets <file> File containing tweets from representative sample of Twitter.
--text-method <string> Method to do text analysis [default: chi2]
--network-method <string> Method to do text analysis [default: jaccard]
-o, --output <file> File to store results
-t, --text Analyze text of tweets.
-n, --network Analyze followers.
--min-followers <n> Ignore exemplars that don't have at least n followers [default: 0]
--max-followers <n> Ignore exemplars that have more than least n followers [default: 1e10]
--sample-exemplars <p> Sample p percent of the exemplars, uniformly at random. [default: 100]
--seed <s> Seed for random sampling. [default: 12345]
"""
from collections import Counter, defaultdict
from docopt import docopt
import io
from itertools import groupby
import gzip
import json
import math
import numpy as np
import os
import re
import random
from scipy.sparse import vstack
import sys
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_selection import chi2 as skchi2
from sklearn import linear_model
from . import report
### TEXT ANALYSIS ###
def parse_json(json_file, include_date=False):
""" Yield screen_name, text tuples from a json file. """
if json_file[-2:] == 'gz':
fh = gzip.open(json_file, 'rt')
else:
fh = io.open(json_file, mode='rt', encoding='utf8')
for line in fh:
try:
jj = json.loads(line)
if type(jj) is not list:
jj = [jj]
for j in jj:
if include_date:
yield (j['user']['screen_name'].lower(), j['text'], j['created_at'])
else:
if 'full_text' in j: # get untruncated text if available.
yield (j['user']['screen_name'].lower(), j['full_text'])
else:
yield (j['user']['screen_name'].lower(), j['text'])
except Exception as e:
sys.stderr.write('skipping json error: %s\n' % e)
def extract_tweets(json_file):
""" Yield screen_name, string tuples, where the string is the
concatenation of all tweets of this user. """
for screen_name, tweet_iter in groupby(parse_json(json_file), lambda x: x[0]):
tweets = [t[1] for t in tweet_iter]
yield screen_name, ' '.join(tweets)
def preprocess(s):
"""
>>> preprocess('#hi there http://www.foo.com @you isn"t RT <>')
'hashtaghi hashtaghi there isn"t'
"""
# s = re.sub('@\S+', 'thisisamention', s) # map all mentions to thisisamention
s = re.sub(r'@\S+', ' ', s) # map all mentions to thisisamention
# s = re.sub('http\S+', 'http', s) # keep only http from urls
s = re.sub(r'http\S+', ' ', s) # keep only http from urls
s = re.sub(r'#(\S+)', r'hashtag\1 hashtag\1', s) # #foo -> hashtagfoo hashtagfoo (for retaining hashtags even using bigrams)
# s = re.sub(r'[0-9]+', '9', s) # 1234 -> 9
s = re.sub(r'\bRT\b', ' ', s, re.IGNORECASE)
s = re.sub(r'&[a-z]+;', ' ', s, re.IGNORECASE)
s = re.sub(r'\s+', ' ', s).strip()
return s.lower()
def vectorize(json_file, vec, dofit=True):
""" Return a matrix where each row corresponds to a Twitter account, and
each column corresponds to the number of times a term is used by that
account. """
## CountVectorizer, efficiently.
screen_names = [x[0] for x in extract_tweets(json_file)]
if dofit:
X = vec.fit_transform(x[1] for x in extract_tweets(json_file))
else:
X = vec.transform(x[1] for x in extract_tweets(json_file))
return screen_names, X
def chi2(exemplars, samples, n=300):
y = np.array(([1.] * exemplars.shape[0]) + ([.0] * samples.shape[0]))
X = vstack((exemplars, samples)).tocsr()
clf = linear_model.LogisticRegression(penalty='l2')
clf.fit(X, y)
coef = clf.coef_[0]
chis, pvals = skchi2(X, y)
top_indices = chis.argsort()[::-1]
top_indices = [i for i in top_indices if coef[i] > 0]
for idx in range(len(coef)):
coef[idx] = 0.
for idx in top_indices[:n]:
coef[idx] = chis[idx]
return coef
def do_score(vec, coef):
return np.sum(coef[vec.nonzero()[1]]) / np.sum(coef)
def write_top_words(fname, vocab, scores):
outf = io.open(fname, 'w', encoding='utf8')
for i in np.argsort(scores)[::-1]:
if scores[i] > 0:
outf.write('%s %g\n' % (vocab[i], scores[i]))
outf.close()
def analyze_text(brand_tweets_file, exemplar_tweets_file, sample_tweets_file, outfile, analyze_fn):
analyze = getattr(sys.modules[__name__], analyze_fn)
vec = CountVectorizer(min_df=3, preprocessor=preprocess, ngram_range=(2, 2), binary=True)
_, exemplar_vectors = vectorize(exemplar_tweets_file, vec, dofit=True)
print('read tweets for %d exemplar accounts' % exemplar_vectors.shape[0])
brands, brand_vectors = vectorize(brand_tweets_file, vec, dofit=False)
print('read tweets for %d brand accounts' % brand_vectors.shape[0])
_, sample_vectors = vectorize(sample_tweets_file, vec, dofit=False)
print('read tweets for %d sample accounts' % sample_vectors.shape[0])
scores = analyze(exemplar_vectors, sample_vectors)
vocab = vec.get_feature_names()
write_top_words(outfile + '.topwords', vocab, scores)
print('top 10 ngrams:\n', '\n'.join(['%s=%.4g' % (vocab[i], scores[i]) for i in np.argsort(scores)[::-1][:10]]))
outf = open(outfile, 'wt')
for bi, brand_vec in enumerate(brand_vectors):
outf.write('%s %g\n' % (brands[bi], do_score(brand_vec, scores)))
outf.flush()
### FOLLOWER ANALYSIS ###
def get_twitter_handles(fname):
handles = set()
with open(fname, 'rt') as f:
for line in f:
handles.add(line[:90].split()[0].lower())
return handles
def read_follower_file(fname, min_followers=0, max_followers=1e10, blacklist=set()):
""" Read a file of follower information and return a dictionary mapping screen_name to a set of follower ids. """
result = {}
with open(fname, 'rt') as f:
for line in f:
parts = line.split()
if len(parts) > 3:
if parts[1].lower() not in blacklist:
followers = set(int(x) for x in parts[2:])
if len(followers) > min_followers and len(followers) <= max_followers:
result[parts[1].lower()] = followers
else:
print('skipping exemplar', parts[1].lower())
return result
def iter_follower_file(fname):
""" Iterator from a file of follower information and return a tuple of screen_name, follower ids.
File format is:
<iso timestamp> <screen_name> <follower_id1> <follower_ids2> ...
"""
with open(fname, 'rt') as f:
for line in f:
parts = line.split()
if len(parts) > 3:
yield parts[1].lower(), set(int(x) for x in parts[2:])
# JACCARD
def _jaccard(a, b):
""" Return the Jaccard similarity between two sets a and b. """
return 1. * len(a & b) / len(a | b)
def jaccard(brands, exemplars, weighted_avg=False, sqrt=False):
""" Return the average Jaccard similarity between a brand's followers and the
followers of each exemplar. """
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_jaccard(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_jaccard(followers, others) for others in exemplars.values()) / len(exemplars)
# limit to exemplars with less than 40k followers: scores[brand] = 1. * sum(_jaccard(brands[brand], others) for others in exemplars.itervalues() if len(others) < 40000) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def jaccard_weighted_avg(brands, exemplars):
return jaccard(brands, exemplars, True, False)
def jaccard_sqrt_no_weighted_avg(brands, exemplars):
return jaccard(brands, exemplars, False, True)
def jaccard_sqrt(brands, exemplars):
return jaccard(brands, exemplars, weighted_avg=True, sqrt=True)
def jaccard_merge(brands, exemplars):
""" Return the average Jaccard similarity between a brand's followers and
the followers of each exemplar. We merge all exemplar followers into one
big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _jaccard(followers, exemplar_followers)
return scores
def compute_log_degrees(brands, exemplars):
""" For each follower, let Z be the total number of brands they follow.
Return a dictionary of 1. / log(Z), for each follower.
"""
counts = Counter()
for followers in brands.values(): # + exemplars.values(): # Include exemplars in these counts? No, don't want to penalize people who follow many exemplars.
counts.update(followers)
counts.update(counts.keys()) # Add 1 to each count.
for k in counts:
counts[k] = 1. / math.log(counts[k])
return counts
# PROPORTION
def _proportion(a, b):
""" Return the len(a & b) / len(a) """
return 1. * len(a & b) / len(a)
def proportion(brands, exemplars, weighted_avg=False, sqrt=False):
"""
Return the proportion of a brand's followers who also follow an exemplar.
"""
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_proportion(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_proportion(followers, others) for others in exemplars.values()) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def proportion_weighted_avg(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=True, sqrt=False)
def proportion_sqrt_no_weighted_avg(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=False, sqrt=True)
def proportion_sqrt(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=True, sqrt=True)
def proportion_merge(brands, exemplars):
""" Return the proportion of a brand's followers who also follower an
exemplar. We merge all exemplar followers into one big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _proportion(followers, exemplar_followers)
return scores
# COSINE SIMILARITY
def cosine(brands, exemplars, weighted_avg=False, sqrt=False):
"""
Return the cosine similarity betwee a brand's followers and the exemplars.
"""
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_cosine(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_cosine(followers, others) for others in exemplars.values()) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def cosine_weighted_avg(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=True, sqrt=False)
def cosine_sqrt_no_weighted_avg(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=False, sqrt=True)
def cosine_sqrt(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=True, sqrt=True)
def cosine_merge(brands, exemplars):
""" Return the proportion of a brand's followers who also follower an
exemplar. We merge all exemplar followers into one big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _cosine(followers, exemplar_followers)
return scores
def adamic(brands, exemplars):
""" Return the average Adamic/Adar similarity between a brand's followers
and the followers of each exemplar. We approximate the number of followed
accounts per user by only considering those in our brand set."""
print('adamic deprecated...requires loading all brands in memory.')
return
degrees = compute_log_degrees(brands, exemplars)
scores = {}
exemplar_sums = dict([(exemplar, sum(degrees[z] for z in exemplars[exemplar])) for exemplar in exemplars])
for brand in sorted(brands):
brand_sum = sum(degrees[z] for z in brands[brand])
total = 0.
for exemplar in exemplars:
total += sum(degrees[z] for z in brands[brand] & exemplars[exemplar]) / (brand_sum + exemplar_sums[exemplar])
scores[brand] = total / len(exemplars)
return scores
def compute_rarity_scores(exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is
the degree of the ith exemplar they follow.
>>> compute_rarity_scores({'e1':{1,2,3,4}, 'e2':{4,5}}).items()
[(1, 0.25), (2, 0.25), (3, 0.25), (4, 0.75), (5, 0.5)]
"""
scores = defaultdict(lambda: 0.)
for followers in exemplars.values():
score = 1. / len(followers)
for f in followers:
scores[f] += score
return scores
def rarity(brands, exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is the degree of the ith exemplar they follow.
The score for a brand is then the average of their follower scores."""
rarity = compute_rarity_scores(exemplars)
scores = {}
for brand, followers in brands:
scores[brand] = sum(rarity[f] for f in followers) / len(followers)
return scores
def compute_rarity_scores_log(exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is
the degree of the ith exemplar they follow.
>>> compute_rarity_scores({'e1':{1,2,3,4}, 'e2':{4,5}}).items()
[(1, 0.25), (2, 0.25), (3, 0.25), (4, 0.75), (5, 0.5)]
"""
scores = defaultdict(lambda: 0.)
for followers in exemplars.values():
score = 1. / math.log(len(followers))
for f in followers:
scores[f] += score
return scores
def rarity_log(brands, exemplars):
""" Compute a score for each follower that is sum_i (1/log(n_i)), where n_i is the degree of the ith exemplar they follow.
The score for a brand is then the average of their follower scores."""
rarity = compute_rarity_scores_log(exemplars)
scores = {}
for brand, followers in brands:
scores[brand] = sum(rarity[f] for f in followers) / len(followers)
return scores
def mkdirs(filename):
report.mkdirs(os.path.dirname(filename))
def analyze_followers(brand_follower_file, exemplar_follower_file, outfile, analyze_fn,
min_followers, max_followers, sample_exemplars):
brands = iter_follower_file(brand_follower_file)
exemplars = read_follower_file(exemplar_follower_file, min_followers=min_followers, max_followers=max_followers, blacklist=get_twitter_handles(brand_follower_file))
print('read follower data for %d exemplars' % (len(exemplars)))
if sample_exemplars < 100: # sample a subset of exemplars.
exemplars = dict([(k, exemplars[k]) for k in random.sample(exemplars.keys(), int(len(exemplars) * sample_exemplars / 100.))])
print('sampled %d exemplars' % (len(exemplars)))
analyze = getattr(sys.modules[__name__], analyze_fn)
scores = analyze(brands, exemplars)
mkdirs(outfile)
outf = open(outfile, 'wt')
for brand in sorted(scores):
outf.write('%s %g\n' % (brand, scores[brand]))
outf.flush()
outf.close()
print('results written to', outfile)
def main():
args = docopt(__doc__)
print(args)
if '--seed' in args:
random.seed(args['--seed'])
if args['--network']:
analyze_followers(args['--brand-followers'], args['--exemplar-followers'], args['--output'], args['--network-method'],
int(args['--min-followers']), int(float(args['--max-followers'])), float(args['--sample-exemplars']))
if args['--text']:
analyze_text(args['--brand-tweets'], args['--exemplar-tweets'], args['--sample-tweets'], args['--output'], args['--text-method'])
if __name__ == '__main__':
main()
|
tapilab/brandelion | brandelion/cli/analyze.py | cosine | python | def cosine(brands, exemplars, weighted_avg=False, sqrt=False):
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_cosine(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_cosine(followers, others) for others in exemplars.values()) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores | Return the cosine similarity betwee a brand's followers and the exemplars. | train | https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/analyze.py#L319-L332 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Analyze social and linguistic brand data.
usage:
brandelion analyze --text --brand-tweets <file> --exemplar-tweets <file> --sample-tweets <file> --output <file> [--text-method <string>]
brandelion analyze --network --brand-followers <file> --exemplar-followers <file> --output <file> [--network-method <string> --min-followers <n> --max-followers <n> --sample-exemplars <p> --seed <s>]
Options
-h, --help
--brand-followers <file> File containing follower data for brand accounts.
--brand-tweets <file> File containing tweets from brand accounts.
--exemplar-followers <file> File containing follower data for exemplar accounts.
--exemplar-tweets <file> File containing tweets from exemplar accounts.
--sample-tweets <file> File containing tweets from representative sample of Twitter.
--text-method <string> Method to do text analysis [default: chi2]
--network-method <string> Method to do text analysis [default: jaccard]
-o, --output <file> File to store results
-t, --text Analyze text of tweets.
-n, --network Analyze followers.
--min-followers <n> Ignore exemplars that don't have at least n followers [default: 0]
--max-followers <n> Ignore exemplars that have more than least n followers [default: 1e10]
--sample-exemplars <p> Sample p percent of the exemplars, uniformly at random. [default: 100]
--seed <s> Seed for random sampling. [default: 12345]
"""
from collections import Counter, defaultdict
from docopt import docopt
import io
from itertools import groupby
import gzip
import json
import math
import numpy as np
import os
import re
import random
from scipy.sparse import vstack
import sys
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_selection import chi2 as skchi2
from sklearn import linear_model
from . import report
### TEXT ANALYSIS ###
def parse_json(json_file, include_date=False):
""" Yield screen_name, text tuples from a json file. """
if json_file[-2:] == 'gz':
fh = gzip.open(json_file, 'rt')
else:
fh = io.open(json_file, mode='rt', encoding='utf8')
for line in fh:
try:
jj = json.loads(line)
if type(jj) is not list:
jj = [jj]
for j in jj:
if include_date:
yield (j['user']['screen_name'].lower(), j['text'], j['created_at'])
else:
if 'full_text' in j: # get untruncated text if available.
yield (j['user']['screen_name'].lower(), j['full_text'])
else:
yield (j['user']['screen_name'].lower(), j['text'])
except Exception as e:
sys.stderr.write('skipping json error: %s\n' % e)
def extract_tweets(json_file):
""" Yield screen_name, string tuples, where the string is the
concatenation of all tweets of this user. """
for screen_name, tweet_iter in groupby(parse_json(json_file), lambda x: x[0]):
tweets = [t[1] for t in tweet_iter]
yield screen_name, ' '.join(tweets)
def preprocess(s):
"""
>>> preprocess('#hi there http://www.foo.com @you isn"t RT <>')
'hashtaghi hashtaghi there isn"t'
"""
# s = re.sub('@\S+', 'thisisamention', s) # map all mentions to thisisamention
s = re.sub(r'@\S+', ' ', s) # map all mentions to thisisamention
# s = re.sub('http\S+', 'http', s) # keep only http from urls
s = re.sub(r'http\S+', ' ', s) # keep only http from urls
s = re.sub(r'#(\S+)', r'hashtag\1 hashtag\1', s) # #foo -> hashtagfoo hashtagfoo (for retaining hashtags even using bigrams)
# s = re.sub(r'[0-9]+', '9', s) # 1234 -> 9
s = re.sub(r'\bRT\b', ' ', s, re.IGNORECASE)
s = re.sub(r'&[a-z]+;', ' ', s, re.IGNORECASE)
s = re.sub(r'\s+', ' ', s).strip()
return s.lower()
def vectorize(json_file, vec, dofit=True):
""" Return a matrix where each row corresponds to a Twitter account, and
each column corresponds to the number of times a term is used by that
account. """
## CountVectorizer, efficiently.
screen_names = [x[0] for x in extract_tweets(json_file)]
if dofit:
X = vec.fit_transform(x[1] for x in extract_tweets(json_file))
else:
X = vec.transform(x[1] for x in extract_tweets(json_file))
return screen_names, X
def chi2(exemplars, samples, n=300):
y = np.array(([1.] * exemplars.shape[0]) + ([.0] * samples.shape[0]))
X = vstack((exemplars, samples)).tocsr()
clf = linear_model.LogisticRegression(penalty='l2')
clf.fit(X, y)
coef = clf.coef_[0]
chis, pvals = skchi2(X, y)
top_indices = chis.argsort()[::-1]
top_indices = [i for i in top_indices if coef[i] > 0]
for idx in range(len(coef)):
coef[idx] = 0.
for idx in top_indices[:n]:
coef[idx] = chis[idx]
return coef
def do_score(vec, coef):
return np.sum(coef[vec.nonzero()[1]]) / np.sum(coef)
def write_top_words(fname, vocab, scores):
outf = io.open(fname, 'w', encoding='utf8')
for i in np.argsort(scores)[::-1]:
if scores[i] > 0:
outf.write('%s %g\n' % (vocab[i], scores[i]))
outf.close()
def analyze_text(brand_tweets_file, exemplar_tweets_file, sample_tweets_file, outfile, analyze_fn):
analyze = getattr(sys.modules[__name__], analyze_fn)
vec = CountVectorizer(min_df=3, preprocessor=preprocess, ngram_range=(2, 2), binary=True)
_, exemplar_vectors = vectorize(exemplar_tweets_file, vec, dofit=True)
print('read tweets for %d exemplar accounts' % exemplar_vectors.shape[0])
brands, brand_vectors = vectorize(brand_tweets_file, vec, dofit=False)
print('read tweets for %d brand accounts' % brand_vectors.shape[0])
_, sample_vectors = vectorize(sample_tweets_file, vec, dofit=False)
print('read tweets for %d sample accounts' % sample_vectors.shape[0])
scores = analyze(exemplar_vectors, sample_vectors)
vocab = vec.get_feature_names()
write_top_words(outfile + '.topwords', vocab, scores)
print('top 10 ngrams:\n', '\n'.join(['%s=%.4g' % (vocab[i], scores[i]) for i in np.argsort(scores)[::-1][:10]]))
outf = open(outfile, 'wt')
for bi, brand_vec in enumerate(brand_vectors):
outf.write('%s %g\n' % (brands[bi], do_score(brand_vec, scores)))
outf.flush()
### FOLLOWER ANALYSIS ###
def get_twitter_handles(fname):
handles = set()
with open(fname, 'rt') as f:
for line in f:
handles.add(line[:90].split()[0].lower())
return handles
def read_follower_file(fname, min_followers=0, max_followers=1e10, blacklist=set()):
""" Read a file of follower information and return a dictionary mapping screen_name to a set of follower ids. """
result = {}
with open(fname, 'rt') as f:
for line in f:
parts = line.split()
if len(parts) > 3:
if parts[1].lower() not in blacklist:
followers = set(int(x) for x in parts[2:])
if len(followers) > min_followers and len(followers) <= max_followers:
result[parts[1].lower()] = followers
else:
print('skipping exemplar', parts[1].lower())
return result
def iter_follower_file(fname):
""" Iterator from a file of follower information and return a tuple of screen_name, follower ids.
File format is:
<iso timestamp> <screen_name> <follower_id1> <follower_ids2> ...
"""
with open(fname, 'rt') as f:
for line in f:
parts = line.split()
if len(parts) > 3:
yield parts[1].lower(), set(int(x) for x in parts[2:])
# JACCARD
def _jaccard(a, b):
""" Return the Jaccard similarity between two sets a and b. """
return 1. * len(a & b) / len(a | b)
def jaccard(brands, exemplars, weighted_avg=False, sqrt=False):
""" Return the average Jaccard similarity between a brand's followers and the
followers of each exemplar. """
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_jaccard(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_jaccard(followers, others) for others in exemplars.values()) / len(exemplars)
# limit to exemplars with less than 40k followers: scores[brand] = 1. * sum(_jaccard(brands[brand], others) for others in exemplars.itervalues() if len(others) < 40000) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def jaccard_weighted_avg(brands, exemplars):
return jaccard(brands, exemplars, True, False)
def jaccard_sqrt_no_weighted_avg(brands, exemplars):
return jaccard(brands, exemplars, False, True)
def jaccard_sqrt(brands, exemplars):
return jaccard(brands, exemplars, weighted_avg=True, sqrt=True)
def jaccard_merge(brands, exemplars):
""" Return the average Jaccard similarity between a brand's followers and
the followers of each exemplar. We merge all exemplar followers into one
big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _jaccard(followers, exemplar_followers)
return scores
def compute_log_degrees(brands, exemplars):
""" For each follower, let Z be the total number of brands they follow.
Return a dictionary of 1. / log(Z), for each follower.
"""
counts = Counter()
for followers in brands.values(): # + exemplars.values(): # Include exemplars in these counts? No, don't want to penalize people who follow many exemplars.
counts.update(followers)
counts.update(counts.keys()) # Add 1 to each count.
for k in counts:
counts[k] = 1. / math.log(counts[k])
return counts
# PROPORTION
def _proportion(a, b):
""" Return the len(a & b) / len(a) """
return 1. * len(a & b) / len(a)
def proportion(brands, exemplars, weighted_avg=False, sqrt=False):
"""
Return the proportion of a brand's followers who also follow an exemplar.
"""
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_proportion(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_proportion(followers, others) for others in exemplars.values()) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def proportion_weighted_avg(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=True, sqrt=False)
def proportion_sqrt_no_weighted_avg(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=False, sqrt=True)
def proportion_sqrt(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=True, sqrt=True)
def proportion_merge(brands, exemplars):
""" Return the proportion of a brand's followers who also follower an
exemplar. We merge all exemplar followers into one big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _proportion(followers, exemplar_followers)
return scores
# COSINE SIMILARITY
def _cosine(a, b):
""" Return the len(a & b) / len(a) """
return 1. * len(a & b) / (math.sqrt(len(a)) * math.sqrt(len(b)))
def cosine_weighted_avg(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=True, sqrt=False)
def cosine_sqrt_no_weighted_avg(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=False, sqrt=True)
def cosine_sqrt(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=True, sqrt=True)
def cosine_merge(brands, exemplars):
""" Return the proportion of a brand's followers who also follower an
exemplar. We merge all exemplar followers into one big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _cosine(followers, exemplar_followers)
return scores
def adamic(brands, exemplars):
""" Return the average Adamic/Adar similarity between a brand's followers
and the followers of each exemplar. We approximate the number of followed
accounts per user by only considering those in our brand set."""
print('adamic deprecated...requires loading all brands in memory.')
return
degrees = compute_log_degrees(brands, exemplars)
scores = {}
exemplar_sums = dict([(exemplar, sum(degrees[z] for z in exemplars[exemplar])) for exemplar in exemplars])
for brand in sorted(brands):
brand_sum = sum(degrees[z] for z in brands[brand])
total = 0.
for exemplar in exemplars:
total += sum(degrees[z] for z in brands[brand] & exemplars[exemplar]) / (brand_sum + exemplar_sums[exemplar])
scores[brand] = total / len(exemplars)
return scores
def compute_rarity_scores(exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is
the degree of the ith exemplar they follow.
>>> compute_rarity_scores({'e1':{1,2,3,4}, 'e2':{4,5}}).items()
[(1, 0.25), (2, 0.25), (3, 0.25), (4, 0.75), (5, 0.5)]
"""
scores = defaultdict(lambda: 0.)
for followers in exemplars.values():
score = 1. / len(followers)
for f in followers:
scores[f] += score
return scores
def rarity(brands, exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is the degree of the ith exemplar they follow.
The score for a brand is then the average of their follower scores."""
rarity = compute_rarity_scores(exemplars)
scores = {}
for brand, followers in brands:
scores[brand] = sum(rarity[f] for f in followers) / len(followers)
return scores
def compute_rarity_scores_log(exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is
the degree of the ith exemplar they follow.
>>> compute_rarity_scores({'e1':{1,2,3,4}, 'e2':{4,5}}).items()
[(1, 0.25), (2, 0.25), (3, 0.25), (4, 0.75), (5, 0.5)]
"""
scores = defaultdict(lambda: 0.)
for followers in exemplars.values():
score = 1. / math.log(len(followers))
for f in followers:
scores[f] += score
return scores
def rarity_log(brands, exemplars):
""" Compute a score for each follower that is sum_i (1/log(n_i)), where n_i is the degree of the ith exemplar they follow.
The score for a brand is then the average of their follower scores."""
rarity = compute_rarity_scores_log(exemplars)
scores = {}
for brand, followers in brands:
scores[brand] = sum(rarity[f] for f in followers) / len(followers)
return scores
def mkdirs(filename):
report.mkdirs(os.path.dirname(filename))
def analyze_followers(brand_follower_file, exemplar_follower_file, outfile, analyze_fn,
min_followers, max_followers, sample_exemplars):
brands = iter_follower_file(brand_follower_file)
exemplars = read_follower_file(exemplar_follower_file, min_followers=min_followers, max_followers=max_followers, blacklist=get_twitter_handles(brand_follower_file))
print('read follower data for %d exemplars' % (len(exemplars)))
if sample_exemplars < 100: # sample a subset of exemplars.
exemplars = dict([(k, exemplars[k]) for k in random.sample(exemplars.keys(), int(len(exemplars) * sample_exemplars / 100.))])
print('sampled %d exemplars' % (len(exemplars)))
analyze = getattr(sys.modules[__name__], analyze_fn)
scores = analyze(brands, exemplars)
mkdirs(outfile)
outf = open(outfile, 'wt')
for brand in sorted(scores):
outf.write('%s %g\n' % (brand, scores[brand]))
outf.flush()
outf.close()
print('results written to', outfile)
def main():
args = docopt(__doc__)
print(args)
if '--seed' in args:
random.seed(args['--seed'])
if args['--network']:
analyze_followers(args['--brand-followers'], args['--exemplar-followers'], args['--output'], args['--network-method'],
int(args['--min-followers']), int(float(args['--max-followers'])), float(args['--sample-exemplars']))
if args['--text']:
analyze_text(args['--brand-tweets'], args['--exemplar-tweets'], args['--sample-tweets'], args['--output'], args['--text-method'])
if __name__ == '__main__':
main()
|
tapilab/brandelion | brandelion/cli/analyze.py | cosine_merge | python | def cosine_merge(brands, exemplars):
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _cosine(followers, exemplar_followers)
return scores | Return the proportion of a brand's followers who also follower an
exemplar. We merge all exemplar followers into one big pseudo-account. | train | https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/analyze.py#L347-L357 | [
"def _cosine(a, b):\n \"\"\" Return the len(a & b) / len(a) \"\"\"\n return 1. * len(a & b) / (math.sqrt(len(a)) * math.sqrt(len(b)))\n"
] | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Analyze social and linguistic brand data.
usage:
brandelion analyze --text --brand-tweets <file> --exemplar-tweets <file> --sample-tweets <file> --output <file> [--text-method <string>]
brandelion analyze --network --brand-followers <file> --exemplar-followers <file> --output <file> [--network-method <string> --min-followers <n> --max-followers <n> --sample-exemplars <p> --seed <s>]
Options
-h, --help
--brand-followers <file> File containing follower data for brand accounts.
--brand-tweets <file> File containing tweets from brand accounts.
--exemplar-followers <file> File containing follower data for exemplar accounts.
--exemplar-tweets <file> File containing tweets from exemplar accounts.
--sample-tweets <file> File containing tweets from representative sample of Twitter.
--text-method <string> Method to do text analysis [default: chi2]
--network-method <string> Method to do text analysis [default: jaccard]
-o, --output <file> File to store results
-t, --text Analyze text of tweets.
-n, --network Analyze followers.
--min-followers <n> Ignore exemplars that don't have at least n followers [default: 0]
--max-followers <n> Ignore exemplars that have more than least n followers [default: 1e10]
--sample-exemplars <p> Sample p percent of the exemplars, uniformly at random. [default: 100]
--seed <s> Seed for random sampling. [default: 12345]
"""
from collections import Counter, defaultdict
from docopt import docopt
import io
from itertools import groupby
import gzip
import json
import math
import numpy as np
import os
import re
import random
from scipy.sparse import vstack
import sys
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_selection import chi2 as skchi2
from sklearn import linear_model
from . import report
### TEXT ANALYSIS ###
def parse_json(json_file, include_date=False):
""" Yield screen_name, text tuples from a json file. """
if json_file[-2:] == 'gz':
fh = gzip.open(json_file, 'rt')
else:
fh = io.open(json_file, mode='rt', encoding='utf8')
for line in fh:
try:
jj = json.loads(line)
if type(jj) is not list:
jj = [jj]
for j in jj:
if include_date:
yield (j['user']['screen_name'].lower(), j['text'], j['created_at'])
else:
if 'full_text' in j: # get untruncated text if available.
yield (j['user']['screen_name'].lower(), j['full_text'])
else:
yield (j['user']['screen_name'].lower(), j['text'])
except Exception as e:
sys.stderr.write('skipping json error: %s\n' % e)
def extract_tweets(json_file):
""" Yield screen_name, string tuples, where the string is the
concatenation of all tweets of this user. """
for screen_name, tweet_iter in groupby(parse_json(json_file), lambda x: x[0]):
tweets = [t[1] for t in tweet_iter]
yield screen_name, ' '.join(tweets)
def preprocess(s):
"""
>>> preprocess('#hi there http://www.foo.com @you isn"t RT <>')
'hashtaghi hashtaghi there isn"t'
"""
# s = re.sub('@\S+', 'thisisamention', s) # map all mentions to thisisamention
s = re.sub(r'@\S+', ' ', s) # map all mentions to thisisamention
# s = re.sub('http\S+', 'http', s) # keep only http from urls
s = re.sub(r'http\S+', ' ', s) # keep only http from urls
s = re.sub(r'#(\S+)', r'hashtag\1 hashtag\1', s) # #foo -> hashtagfoo hashtagfoo (for retaining hashtags even using bigrams)
# s = re.sub(r'[0-9]+', '9', s) # 1234 -> 9
s = re.sub(r'\bRT\b', ' ', s, re.IGNORECASE)
s = re.sub(r'&[a-z]+;', ' ', s, re.IGNORECASE)
s = re.sub(r'\s+', ' ', s).strip()
return s.lower()
def vectorize(json_file, vec, dofit=True):
""" Return a matrix where each row corresponds to a Twitter account, and
each column corresponds to the number of times a term is used by that
account. """
## CountVectorizer, efficiently.
screen_names = [x[0] for x in extract_tweets(json_file)]
if dofit:
X = vec.fit_transform(x[1] for x in extract_tweets(json_file))
else:
X = vec.transform(x[1] for x in extract_tweets(json_file))
return screen_names, X
def chi2(exemplars, samples, n=300):
y = np.array(([1.] * exemplars.shape[0]) + ([.0] * samples.shape[0]))
X = vstack((exemplars, samples)).tocsr()
clf = linear_model.LogisticRegression(penalty='l2')
clf.fit(X, y)
coef = clf.coef_[0]
chis, pvals = skchi2(X, y)
top_indices = chis.argsort()[::-1]
top_indices = [i for i in top_indices if coef[i] > 0]
for idx in range(len(coef)):
coef[idx] = 0.
for idx in top_indices[:n]:
coef[idx] = chis[idx]
return coef
def do_score(vec, coef):
return np.sum(coef[vec.nonzero()[1]]) / np.sum(coef)
def write_top_words(fname, vocab, scores):
outf = io.open(fname, 'w', encoding='utf8')
for i in np.argsort(scores)[::-1]:
if scores[i] > 0:
outf.write('%s %g\n' % (vocab[i], scores[i]))
outf.close()
def analyze_text(brand_tweets_file, exemplar_tweets_file, sample_tweets_file, outfile, analyze_fn):
analyze = getattr(sys.modules[__name__], analyze_fn)
vec = CountVectorizer(min_df=3, preprocessor=preprocess, ngram_range=(2, 2), binary=True)
_, exemplar_vectors = vectorize(exemplar_tweets_file, vec, dofit=True)
print('read tweets for %d exemplar accounts' % exemplar_vectors.shape[0])
brands, brand_vectors = vectorize(brand_tweets_file, vec, dofit=False)
print('read tweets for %d brand accounts' % brand_vectors.shape[0])
_, sample_vectors = vectorize(sample_tweets_file, vec, dofit=False)
print('read tweets for %d sample accounts' % sample_vectors.shape[0])
scores = analyze(exemplar_vectors, sample_vectors)
vocab = vec.get_feature_names()
write_top_words(outfile + '.topwords', vocab, scores)
print('top 10 ngrams:\n', '\n'.join(['%s=%.4g' % (vocab[i], scores[i]) for i in np.argsort(scores)[::-1][:10]]))
outf = open(outfile, 'wt')
for bi, brand_vec in enumerate(brand_vectors):
outf.write('%s %g\n' % (brands[bi], do_score(brand_vec, scores)))
outf.flush()
### FOLLOWER ANALYSIS ###
def get_twitter_handles(fname):
handles = set()
with open(fname, 'rt') as f:
for line in f:
handles.add(line[:90].split()[0].lower())
return handles
def read_follower_file(fname, min_followers=0, max_followers=1e10, blacklist=set()):
""" Read a file of follower information and return a dictionary mapping screen_name to a set of follower ids. """
result = {}
with open(fname, 'rt') as f:
for line in f:
parts = line.split()
if len(parts) > 3:
if parts[1].lower() not in blacklist:
followers = set(int(x) for x in parts[2:])
if len(followers) > min_followers and len(followers) <= max_followers:
result[parts[1].lower()] = followers
else:
print('skipping exemplar', parts[1].lower())
return result
def iter_follower_file(fname):
""" Iterator from a file of follower information and return a tuple of screen_name, follower ids.
File format is:
<iso timestamp> <screen_name> <follower_id1> <follower_ids2> ...
"""
with open(fname, 'rt') as f:
for line in f:
parts = line.split()
if len(parts) > 3:
yield parts[1].lower(), set(int(x) for x in parts[2:])
# JACCARD
def _jaccard(a, b):
""" Return the Jaccard similarity between two sets a and b. """
return 1. * len(a & b) / len(a | b)
def jaccard(brands, exemplars, weighted_avg=False, sqrt=False):
""" Return the average Jaccard similarity between a brand's followers and the
followers of each exemplar. """
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_jaccard(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_jaccard(followers, others) for others in exemplars.values()) / len(exemplars)
# limit to exemplars with less than 40k followers: scores[brand] = 1. * sum(_jaccard(brands[brand], others) for others in exemplars.itervalues() if len(others) < 40000) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def jaccard_weighted_avg(brands, exemplars):
return jaccard(brands, exemplars, True, False)
def jaccard_sqrt_no_weighted_avg(brands, exemplars):
return jaccard(brands, exemplars, False, True)
def jaccard_sqrt(brands, exemplars):
return jaccard(brands, exemplars, weighted_avg=True, sqrt=True)
def jaccard_merge(brands, exemplars):
""" Return the average Jaccard similarity between a brand's followers and
the followers of each exemplar. We merge all exemplar followers into one
big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _jaccard(followers, exemplar_followers)
return scores
def compute_log_degrees(brands, exemplars):
""" For each follower, let Z be the total number of brands they follow.
Return a dictionary of 1. / log(Z), for each follower.
"""
counts = Counter()
for followers in brands.values(): # + exemplars.values(): # Include exemplars in these counts? No, don't want to penalize people who follow many exemplars.
counts.update(followers)
counts.update(counts.keys()) # Add 1 to each count.
for k in counts:
counts[k] = 1. / math.log(counts[k])
return counts
# PROPORTION
def _proportion(a, b):
""" Return the len(a & b) / len(a) """
return 1. * len(a & b) / len(a)
def proportion(brands, exemplars, weighted_avg=False, sqrt=False):
"""
Return the proportion of a brand's followers who also follow an exemplar.
"""
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_proportion(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_proportion(followers, others) for others in exemplars.values()) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def proportion_weighted_avg(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=True, sqrt=False)
def proportion_sqrt_no_weighted_avg(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=False, sqrt=True)
def proportion_sqrt(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=True, sqrt=True)
def proportion_merge(brands, exemplars):
""" Return the proportion of a brand's followers who also follower an
exemplar. We merge all exemplar followers into one big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _proportion(followers, exemplar_followers)
return scores
# COSINE SIMILARITY
def _cosine(a, b):
""" Return the len(a & b) / len(a) """
return 1. * len(a & b) / (math.sqrt(len(a)) * math.sqrt(len(b)))
def cosine(brands, exemplars, weighted_avg=False, sqrt=False):
"""
Return the cosine similarity betwee a brand's followers and the exemplars.
"""
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_cosine(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_cosine(followers, others) for others in exemplars.values()) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def cosine_weighted_avg(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=True, sqrt=False)
def cosine_sqrt_no_weighted_avg(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=False, sqrt=True)
def cosine_sqrt(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=True, sqrt=True)
def adamic(brands, exemplars):
""" Return the average Adamic/Adar similarity between a brand's followers
and the followers of each exemplar. We approximate the number of followed
accounts per user by only considering those in our brand set."""
print('adamic deprecated...requires loading all brands in memory.')
return
degrees = compute_log_degrees(brands, exemplars)
scores = {}
exemplar_sums = dict([(exemplar, sum(degrees[z] for z in exemplars[exemplar])) for exemplar in exemplars])
for brand in sorted(brands):
brand_sum = sum(degrees[z] for z in brands[brand])
total = 0.
for exemplar in exemplars:
total += sum(degrees[z] for z in brands[brand] & exemplars[exemplar]) / (brand_sum + exemplar_sums[exemplar])
scores[brand] = total / len(exemplars)
return scores
def compute_rarity_scores(exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is
the degree of the ith exemplar they follow.
>>> compute_rarity_scores({'e1':{1,2,3,4}, 'e2':{4,5}}).items()
[(1, 0.25), (2, 0.25), (3, 0.25), (4, 0.75), (5, 0.5)]
"""
scores = defaultdict(lambda: 0.)
for followers in exemplars.values():
score = 1. / len(followers)
for f in followers:
scores[f] += score
return scores
def rarity(brands, exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is the degree of the ith exemplar they follow.
The score for a brand is then the average of their follower scores."""
rarity = compute_rarity_scores(exemplars)
scores = {}
for brand, followers in brands:
scores[brand] = sum(rarity[f] for f in followers) / len(followers)
return scores
def compute_rarity_scores_log(exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is
the degree of the ith exemplar they follow.
>>> compute_rarity_scores({'e1':{1,2,3,4}, 'e2':{4,5}}).items()
[(1, 0.25), (2, 0.25), (3, 0.25), (4, 0.75), (5, 0.5)]
"""
scores = defaultdict(lambda: 0.)
for followers in exemplars.values():
score = 1. / math.log(len(followers))
for f in followers:
scores[f] += score
return scores
def rarity_log(brands, exemplars):
""" Compute a score for each follower that is sum_i (1/log(n_i)), where n_i is the degree of the ith exemplar they follow.
The score for a brand is then the average of their follower scores."""
rarity = compute_rarity_scores_log(exemplars)
scores = {}
for brand, followers in brands:
scores[brand] = sum(rarity[f] for f in followers) / len(followers)
return scores
def mkdirs(filename):
report.mkdirs(os.path.dirname(filename))
def analyze_followers(brand_follower_file, exemplar_follower_file, outfile, analyze_fn,
min_followers, max_followers, sample_exemplars):
brands = iter_follower_file(brand_follower_file)
exemplars = read_follower_file(exemplar_follower_file, min_followers=min_followers, max_followers=max_followers, blacklist=get_twitter_handles(brand_follower_file))
print('read follower data for %d exemplars' % (len(exemplars)))
if sample_exemplars < 100: # sample a subset of exemplars.
exemplars = dict([(k, exemplars[k]) for k in random.sample(exemplars.keys(), int(len(exemplars) * sample_exemplars / 100.))])
print('sampled %d exemplars' % (len(exemplars)))
analyze = getattr(sys.modules[__name__], analyze_fn)
scores = analyze(brands, exemplars)
mkdirs(outfile)
outf = open(outfile, 'wt')
for brand in sorted(scores):
outf.write('%s %g\n' % (brand, scores[brand]))
outf.flush()
outf.close()
print('results written to', outfile)
def main():
args = docopt(__doc__)
print(args)
if '--seed' in args:
random.seed(args['--seed'])
if args['--network']:
analyze_followers(args['--brand-followers'], args['--exemplar-followers'], args['--output'], args['--network-method'],
int(args['--min-followers']), int(float(args['--max-followers'])), float(args['--sample-exemplars']))
if args['--text']:
analyze_text(args['--brand-tweets'], args['--exemplar-tweets'], args['--sample-tweets'], args['--output'], args['--text-method'])
if __name__ == '__main__':
main()
|
tapilab/brandelion | brandelion/cli/analyze.py | adamic | python | def adamic(brands, exemplars):
print('adamic deprecated...requires loading all brands in memory.')
return
degrees = compute_log_degrees(brands, exemplars)
scores = {}
exemplar_sums = dict([(exemplar, sum(degrees[z] for z in exemplars[exemplar])) for exemplar in exemplars])
for brand in sorted(brands):
brand_sum = sum(degrees[z] for z in brands[brand])
total = 0.
for exemplar in exemplars:
total += sum(degrees[z] for z in brands[brand] & exemplars[exemplar]) / (brand_sum + exemplar_sums[exemplar])
scores[brand] = total / len(exemplars)
return scores | Return the average Adamic/Adar similarity between a brand's followers
and the followers of each exemplar. We approximate the number of followed
accounts per user by only considering those in our brand set. | train | https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/analyze.py#L360-L376 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Analyze social and linguistic brand data.
usage:
brandelion analyze --text --brand-tweets <file> --exemplar-tweets <file> --sample-tweets <file> --output <file> [--text-method <string>]
brandelion analyze --network --brand-followers <file> --exemplar-followers <file> --output <file> [--network-method <string> --min-followers <n> --max-followers <n> --sample-exemplars <p> --seed <s>]
Options
-h, --help
--brand-followers <file> File containing follower data for brand accounts.
--brand-tweets <file> File containing tweets from brand accounts.
--exemplar-followers <file> File containing follower data for exemplar accounts.
--exemplar-tweets <file> File containing tweets from exemplar accounts.
--sample-tweets <file> File containing tweets from representative sample of Twitter.
--text-method <string> Method to do text analysis [default: chi2]
--network-method <string> Method to do text analysis [default: jaccard]
-o, --output <file> File to store results
-t, --text Analyze text of tweets.
-n, --network Analyze followers.
--min-followers <n> Ignore exemplars that don't have at least n followers [default: 0]
--max-followers <n> Ignore exemplars that have more than least n followers [default: 1e10]
--sample-exemplars <p> Sample p percent of the exemplars, uniformly at random. [default: 100]
--seed <s> Seed for random sampling. [default: 12345]
"""
from collections import Counter, defaultdict
from docopt import docopt
import io
from itertools import groupby
import gzip
import json
import math
import numpy as np
import os
import re
import random
from scipy.sparse import vstack
import sys
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_selection import chi2 as skchi2
from sklearn import linear_model
from . import report
### TEXT ANALYSIS ###
def parse_json(json_file, include_date=False):
""" Yield screen_name, text tuples from a json file. """
if json_file[-2:] == 'gz':
fh = gzip.open(json_file, 'rt')
else:
fh = io.open(json_file, mode='rt', encoding='utf8')
for line in fh:
try:
jj = json.loads(line)
if type(jj) is not list:
jj = [jj]
for j in jj:
if include_date:
yield (j['user']['screen_name'].lower(), j['text'], j['created_at'])
else:
if 'full_text' in j: # get untruncated text if available.
yield (j['user']['screen_name'].lower(), j['full_text'])
else:
yield (j['user']['screen_name'].lower(), j['text'])
except Exception as e:
sys.stderr.write('skipping json error: %s\n' % e)
def extract_tweets(json_file):
""" Yield screen_name, string tuples, where the string is the
concatenation of all tweets of this user. """
for screen_name, tweet_iter in groupby(parse_json(json_file), lambda x: x[0]):
tweets = [t[1] for t in tweet_iter]
yield screen_name, ' '.join(tweets)
def preprocess(s):
"""
>>> preprocess('#hi there http://www.foo.com @you isn"t RT <>')
'hashtaghi hashtaghi there isn"t'
"""
# s = re.sub('@\S+', 'thisisamention', s) # map all mentions to thisisamention
s = re.sub(r'@\S+', ' ', s) # map all mentions to thisisamention
# s = re.sub('http\S+', 'http', s) # keep only http from urls
s = re.sub(r'http\S+', ' ', s) # keep only http from urls
s = re.sub(r'#(\S+)', r'hashtag\1 hashtag\1', s) # #foo -> hashtagfoo hashtagfoo (for retaining hashtags even using bigrams)
# s = re.sub(r'[0-9]+', '9', s) # 1234 -> 9
s = re.sub(r'\bRT\b', ' ', s, re.IGNORECASE)
s = re.sub(r'&[a-z]+;', ' ', s, re.IGNORECASE)
s = re.sub(r'\s+', ' ', s).strip()
return s.lower()
def vectorize(json_file, vec, dofit=True):
""" Return a matrix where each row corresponds to a Twitter account, and
each column corresponds to the number of times a term is used by that
account. """
## CountVectorizer, efficiently.
screen_names = [x[0] for x in extract_tweets(json_file)]
if dofit:
X = vec.fit_transform(x[1] for x in extract_tweets(json_file))
else:
X = vec.transform(x[1] for x in extract_tweets(json_file))
return screen_names, X
def chi2(exemplars, samples, n=300):
y = np.array(([1.] * exemplars.shape[0]) + ([.0] * samples.shape[0]))
X = vstack((exemplars, samples)).tocsr()
clf = linear_model.LogisticRegression(penalty='l2')
clf.fit(X, y)
coef = clf.coef_[0]
chis, pvals = skchi2(X, y)
top_indices = chis.argsort()[::-1]
top_indices = [i for i in top_indices if coef[i] > 0]
for idx in range(len(coef)):
coef[idx] = 0.
for idx in top_indices[:n]:
coef[idx] = chis[idx]
return coef
def do_score(vec, coef):
return np.sum(coef[vec.nonzero()[1]]) / np.sum(coef)
def write_top_words(fname, vocab, scores):
outf = io.open(fname, 'w', encoding='utf8')
for i in np.argsort(scores)[::-1]:
if scores[i] > 0:
outf.write('%s %g\n' % (vocab[i], scores[i]))
outf.close()
def analyze_text(brand_tweets_file, exemplar_tweets_file, sample_tweets_file, outfile, analyze_fn):
analyze = getattr(sys.modules[__name__], analyze_fn)
vec = CountVectorizer(min_df=3, preprocessor=preprocess, ngram_range=(2, 2), binary=True)
_, exemplar_vectors = vectorize(exemplar_tweets_file, vec, dofit=True)
print('read tweets for %d exemplar accounts' % exemplar_vectors.shape[0])
brands, brand_vectors = vectorize(brand_tweets_file, vec, dofit=False)
print('read tweets for %d brand accounts' % brand_vectors.shape[0])
_, sample_vectors = vectorize(sample_tweets_file, vec, dofit=False)
print('read tweets for %d sample accounts' % sample_vectors.shape[0])
scores = analyze(exemplar_vectors, sample_vectors)
vocab = vec.get_feature_names()
write_top_words(outfile + '.topwords', vocab, scores)
print('top 10 ngrams:\n', '\n'.join(['%s=%.4g' % (vocab[i], scores[i]) for i in np.argsort(scores)[::-1][:10]]))
outf = open(outfile, 'wt')
for bi, brand_vec in enumerate(brand_vectors):
outf.write('%s %g\n' % (brands[bi], do_score(brand_vec, scores)))
outf.flush()
### FOLLOWER ANALYSIS ###
def get_twitter_handles(fname):
handles = set()
with open(fname, 'rt') as f:
for line in f:
handles.add(line[:90].split()[0].lower())
return handles
def read_follower_file(fname, min_followers=0, max_followers=1e10, blacklist=set()):
""" Read a file of follower information and return a dictionary mapping screen_name to a set of follower ids. """
result = {}
with open(fname, 'rt') as f:
for line in f:
parts = line.split()
if len(parts) > 3:
if parts[1].lower() not in blacklist:
followers = set(int(x) for x in parts[2:])
if len(followers) > min_followers and len(followers) <= max_followers:
result[parts[1].lower()] = followers
else:
print('skipping exemplar', parts[1].lower())
return result
def iter_follower_file(fname):
""" Iterator from a file of follower information and return a tuple of screen_name, follower ids.
File format is:
<iso timestamp> <screen_name> <follower_id1> <follower_ids2> ...
"""
with open(fname, 'rt') as f:
for line in f:
parts = line.split()
if len(parts) > 3:
yield parts[1].lower(), set(int(x) for x in parts[2:])
# JACCARD
def _jaccard(a, b):
""" Return the Jaccard similarity between two sets a and b. """
return 1. * len(a & b) / len(a | b)
def jaccard(brands, exemplars, weighted_avg=False, sqrt=False):
""" Return the average Jaccard similarity between a brand's followers and the
followers of each exemplar. """
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_jaccard(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_jaccard(followers, others) for others in exemplars.values()) / len(exemplars)
# limit to exemplars with less than 40k followers: scores[brand] = 1. * sum(_jaccard(brands[brand], others) for others in exemplars.itervalues() if len(others) < 40000) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def jaccard_weighted_avg(brands, exemplars):
return jaccard(brands, exemplars, True, False)
def jaccard_sqrt_no_weighted_avg(brands, exemplars):
return jaccard(brands, exemplars, False, True)
def jaccard_sqrt(brands, exemplars):
return jaccard(brands, exemplars, weighted_avg=True, sqrt=True)
def jaccard_merge(brands, exemplars):
""" Return the average Jaccard similarity between a brand's followers and
the followers of each exemplar. We merge all exemplar followers into one
big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _jaccard(followers, exemplar_followers)
return scores
def compute_log_degrees(brands, exemplars):
""" For each follower, let Z be the total number of brands they follow.
Return a dictionary of 1. / log(Z), for each follower.
"""
counts = Counter()
for followers in brands.values(): # + exemplars.values(): # Include exemplars in these counts? No, don't want to penalize people who follow many exemplars.
counts.update(followers)
counts.update(counts.keys()) # Add 1 to each count.
for k in counts:
counts[k] = 1. / math.log(counts[k])
return counts
# PROPORTION
def _proportion(a, b):
""" Return the len(a & b) / len(a) """
return 1. * len(a & b) / len(a)
def proportion(brands, exemplars, weighted_avg=False, sqrt=False):
"""
Return the proportion of a brand's followers who also follow an exemplar.
"""
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_proportion(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_proportion(followers, others) for others in exemplars.values()) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def proportion_weighted_avg(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=True, sqrt=False)
def proportion_sqrt_no_weighted_avg(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=False, sqrt=True)
def proportion_sqrt(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=True, sqrt=True)
def proportion_merge(brands, exemplars):
""" Return the proportion of a brand's followers who also follower an
exemplar. We merge all exemplar followers into one big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _proportion(followers, exemplar_followers)
return scores
# COSINE SIMILARITY
def _cosine(a, b):
""" Return the len(a & b) / len(a) """
return 1. * len(a & b) / (math.sqrt(len(a)) * math.sqrt(len(b)))
def cosine(brands, exemplars, weighted_avg=False, sqrt=False):
"""
Return the cosine similarity betwee a brand's followers and the exemplars.
"""
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_cosine(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_cosine(followers, others) for others in exemplars.values()) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def cosine_weighted_avg(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=True, sqrt=False)
def cosine_sqrt_no_weighted_avg(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=False, sqrt=True)
def cosine_sqrt(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=True, sqrt=True)
def cosine_merge(brands, exemplars):
""" Return the proportion of a brand's followers who also follower an
exemplar. We merge all exemplar followers into one big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _cosine(followers, exemplar_followers)
return scores
def compute_rarity_scores(exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is
the degree of the ith exemplar they follow.
>>> compute_rarity_scores({'e1':{1,2,3,4}, 'e2':{4,5}}).items()
[(1, 0.25), (2, 0.25), (3, 0.25), (4, 0.75), (5, 0.5)]
"""
scores = defaultdict(lambda: 0.)
for followers in exemplars.values():
score = 1. / len(followers)
for f in followers:
scores[f] += score
return scores
def rarity(brands, exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is the degree of the ith exemplar they follow.
The score for a brand is then the average of their follower scores."""
rarity = compute_rarity_scores(exemplars)
scores = {}
for brand, followers in brands:
scores[brand] = sum(rarity[f] for f in followers) / len(followers)
return scores
def compute_rarity_scores_log(exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is
the degree of the ith exemplar they follow.
>>> compute_rarity_scores({'e1':{1,2,3,4}, 'e2':{4,5}}).items()
[(1, 0.25), (2, 0.25), (3, 0.25), (4, 0.75), (5, 0.5)]
"""
scores = defaultdict(lambda: 0.)
for followers in exemplars.values():
score = 1. / math.log(len(followers))
for f in followers:
scores[f] += score
return scores
def rarity_log(brands, exemplars):
""" Compute a score for each follower that is sum_i (1/log(n_i)), where n_i is the degree of the ith exemplar they follow.
The score for a brand is then the average of their follower scores."""
rarity = compute_rarity_scores_log(exemplars)
scores = {}
for brand, followers in brands:
scores[brand] = sum(rarity[f] for f in followers) / len(followers)
return scores
def mkdirs(filename):
report.mkdirs(os.path.dirname(filename))
def analyze_followers(brand_follower_file, exemplar_follower_file, outfile, analyze_fn,
min_followers, max_followers, sample_exemplars):
brands = iter_follower_file(brand_follower_file)
exemplars = read_follower_file(exemplar_follower_file, min_followers=min_followers, max_followers=max_followers, blacklist=get_twitter_handles(brand_follower_file))
print('read follower data for %d exemplars' % (len(exemplars)))
if sample_exemplars < 100: # sample a subset of exemplars.
exemplars = dict([(k, exemplars[k]) for k in random.sample(exemplars.keys(), int(len(exemplars) * sample_exemplars / 100.))])
print('sampled %d exemplars' % (len(exemplars)))
analyze = getattr(sys.modules[__name__], analyze_fn)
scores = analyze(brands, exemplars)
mkdirs(outfile)
outf = open(outfile, 'wt')
for brand in sorted(scores):
outf.write('%s %g\n' % (brand, scores[brand]))
outf.flush()
outf.close()
print('results written to', outfile)
def main():
args = docopt(__doc__)
print(args)
if '--seed' in args:
random.seed(args['--seed'])
if args['--network']:
analyze_followers(args['--brand-followers'], args['--exemplar-followers'], args['--output'], args['--network-method'],
int(args['--min-followers']), int(float(args['--max-followers'])), float(args['--sample-exemplars']))
if args['--text']:
analyze_text(args['--brand-tweets'], args['--exemplar-tweets'], args['--sample-tweets'], args['--output'], args['--text-method'])
if __name__ == '__main__':
main()
|
tapilab/brandelion | brandelion/cli/analyze.py | compute_rarity_scores | python | def compute_rarity_scores(exemplars):
scores = defaultdict(lambda: 0.)
for followers in exemplars.values():
score = 1. / len(followers)
for f in followers:
scores[f] += score
return scores | Compute a score for each follower that is sum_i (1/n_i), where n_i is
the degree of the ith exemplar they follow.
>>> compute_rarity_scores({'e1':{1,2,3,4}, 'e2':{4,5}}).items()
[(1, 0.25), (2, 0.25), (3, 0.25), (4, 0.75), (5, 0.5)] | train | https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/analyze.py#L379-L390 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Analyze social and linguistic brand data.
usage:
brandelion analyze --text --brand-tweets <file> --exemplar-tweets <file> --sample-tweets <file> --output <file> [--text-method <string>]
brandelion analyze --network --brand-followers <file> --exemplar-followers <file> --output <file> [--network-method <string> --min-followers <n> --max-followers <n> --sample-exemplars <p> --seed <s>]
Options
-h, --help
--brand-followers <file> File containing follower data for brand accounts.
--brand-tweets <file> File containing tweets from brand accounts.
--exemplar-followers <file> File containing follower data for exemplar accounts.
--exemplar-tweets <file> File containing tweets from exemplar accounts.
--sample-tweets <file> File containing tweets from representative sample of Twitter.
--text-method <string> Method to do text analysis [default: chi2]
--network-method <string> Method to do text analysis [default: jaccard]
-o, --output <file> File to store results
-t, --text Analyze text of tweets.
-n, --network Analyze followers.
--min-followers <n> Ignore exemplars that don't have at least n followers [default: 0]
--max-followers <n> Ignore exemplars that have more than least n followers [default: 1e10]
--sample-exemplars <p> Sample p percent of the exemplars, uniformly at random. [default: 100]
--seed <s> Seed for random sampling. [default: 12345]
"""
from collections import Counter, defaultdict
from docopt import docopt
import io
from itertools import groupby
import gzip
import json
import math
import numpy as np
import os
import re
import random
from scipy.sparse import vstack
import sys
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_selection import chi2 as skchi2
from sklearn import linear_model
from . import report
### TEXT ANALYSIS ###
def parse_json(json_file, include_date=False):
""" Yield screen_name, text tuples from a json file. """
if json_file[-2:] == 'gz':
fh = gzip.open(json_file, 'rt')
else:
fh = io.open(json_file, mode='rt', encoding='utf8')
for line in fh:
try:
jj = json.loads(line)
if type(jj) is not list:
jj = [jj]
for j in jj:
if include_date:
yield (j['user']['screen_name'].lower(), j['text'], j['created_at'])
else:
if 'full_text' in j: # get untruncated text if available.
yield (j['user']['screen_name'].lower(), j['full_text'])
else:
yield (j['user']['screen_name'].lower(), j['text'])
except Exception as e:
sys.stderr.write('skipping json error: %s\n' % e)
def extract_tweets(json_file):
""" Yield screen_name, string tuples, where the string is the
concatenation of all tweets of this user. """
for screen_name, tweet_iter in groupby(parse_json(json_file), lambda x: x[0]):
tweets = [t[1] for t in tweet_iter]
yield screen_name, ' '.join(tweets)
def preprocess(s):
"""
>>> preprocess('#hi there http://www.foo.com @you isn"t RT <>')
'hashtaghi hashtaghi there isn"t'
"""
# s = re.sub('@\S+', 'thisisamention', s) # map all mentions to thisisamention
s = re.sub(r'@\S+', ' ', s) # map all mentions to thisisamention
# s = re.sub('http\S+', 'http', s) # keep only http from urls
s = re.sub(r'http\S+', ' ', s) # keep only http from urls
s = re.sub(r'#(\S+)', r'hashtag\1 hashtag\1', s) # #foo -> hashtagfoo hashtagfoo (for retaining hashtags even using bigrams)
# s = re.sub(r'[0-9]+', '9', s) # 1234 -> 9
s = re.sub(r'\bRT\b', ' ', s, re.IGNORECASE)
s = re.sub(r'&[a-z]+;', ' ', s, re.IGNORECASE)
s = re.sub(r'\s+', ' ', s).strip()
return s.lower()
def vectorize(json_file, vec, dofit=True):
""" Return a matrix where each row corresponds to a Twitter account, and
each column corresponds to the number of times a term is used by that
account. """
## CountVectorizer, efficiently.
screen_names = [x[0] for x in extract_tweets(json_file)]
if dofit:
X = vec.fit_transform(x[1] for x in extract_tweets(json_file))
else:
X = vec.transform(x[1] for x in extract_tweets(json_file))
return screen_names, X
def chi2(exemplars, samples, n=300):
y = np.array(([1.] * exemplars.shape[0]) + ([.0] * samples.shape[0]))
X = vstack((exemplars, samples)).tocsr()
clf = linear_model.LogisticRegression(penalty='l2')
clf.fit(X, y)
coef = clf.coef_[0]
chis, pvals = skchi2(X, y)
top_indices = chis.argsort()[::-1]
top_indices = [i for i in top_indices if coef[i] > 0]
for idx in range(len(coef)):
coef[idx] = 0.
for idx in top_indices[:n]:
coef[idx] = chis[idx]
return coef
def do_score(vec, coef):
return np.sum(coef[vec.nonzero()[1]]) / np.sum(coef)
def write_top_words(fname, vocab, scores):
outf = io.open(fname, 'w', encoding='utf8')
for i in np.argsort(scores)[::-1]:
if scores[i] > 0:
outf.write('%s %g\n' % (vocab[i], scores[i]))
outf.close()
def analyze_text(brand_tweets_file, exemplar_tweets_file, sample_tweets_file, outfile, analyze_fn):
analyze = getattr(sys.modules[__name__], analyze_fn)
vec = CountVectorizer(min_df=3, preprocessor=preprocess, ngram_range=(2, 2), binary=True)
_, exemplar_vectors = vectorize(exemplar_tweets_file, vec, dofit=True)
print('read tweets for %d exemplar accounts' % exemplar_vectors.shape[0])
brands, brand_vectors = vectorize(brand_tweets_file, vec, dofit=False)
print('read tweets for %d brand accounts' % brand_vectors.shape[0])
_, sample_vectors = vectorize(sample_tweets_file, vec, dofit=False)
print('read tweets for %d sample accounts' % sample_vectors.shape[0])
scores = analyze(exemplar_vectors, sample_vectors)
vocab = vec.get_feature_names()
write_top_words(outfile + '.topwords', vocab, scores)
print('top 10 ngrams:\n', '\n'.join(['%s=%.4g' % (vocab[i], scores[i]) for i in np.argsort(scores)[::-1][:10]]))
outf = open(outfile, 'wt')
for bi, brand_vec in enumerate(brand_vectors):
outf.write('%s %g\n' % (brands[bi], do_score(brand_vec, scores)))
outf.flush()
### FOLLOWER ANALYSIS ###
def get_twitter_handles(fname):
handles = set()
with open(fname, 'rt') as f:
for line in f:
handles.add(line[:90].split()[0].lower())
return handles
def read_follower_file(fname, min_followers=0, max_followers=1e10, blacklist=set()):
""" Read a file of follower information and return a dictionary mapping screen_name to a set of follower ids. """
result = {}
with open(fname, 'rt') as f:
for line in f:
parts = line.split()
if len(parts) > 3:
if parts[1].lower() not in blacklist:
followers = set(int(x) for x in parts[2:])
if len(followers) > min_followers and len(followers) <= max_followers:
result[parts[1].lower()] = followers
else:
print('skipping exemplar', parts[1].lower())
return result
def iter_follower_file(fname):
""" Iterator from a file of follower information and return a tuple of screen_name, follower ids.
File format is:
<iso timestamp> <screen_name> <follower_id1> <follower_ids2> ...
"""
with open(fname, 'rt') as f:
for line in f:
parts = line.split()
if len(parts) > 3:
yield parts[1].lower(), set(int(x) for x in parts[2:])
# JACCARD
def _jaccard(a, b):
""" Return the Jaccard similarity between two sets a and b. """
return 1. * len(a & b) / len(a | b)
def jaccard(brands, exemplars, weighted_avg=False, sqrt=False):
""" Return the average Jaccard similarity between a brand's followers and the
followers of each exemplar. """
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_jaccard(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_jaccard(followers, others) for others in exemplars.values()) / len(exemplars)
# limit to exemplars with less than 40k followers: scores[brand] = 1. * sum(_jaccard(brands[brand], others) for others in exemplars.itervalues() if len(others) < 40000) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def jaccard_weighted_avg(brands, exemplars):
return jaccard(brands, exemplars, True, False)
def jaccard_sqrt_no_weighted_avg(brands, exemplars):
return jaccard(brands, exemplars, False, True)
def jaccard_sqrt(brands, exemplars):
return jaccard(brands, exemplars, weighted_avg=True, sqrt=True)
def jaccard_merge(brands, exemplars):
""" Return the average Jaccard similarity between a brand's followers and
the followers of each exemplar. We merge all exemplar followers into one
big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _jaccard(followers, exemplar_followers)
return scores
def compute_log_degrees(brands, exemplars):
""" For each follower, let Z be the total number of brands they follow.
Return a dictionary of 1. / log(Z), for each follower.
"""
counts = Counter()
for followers in brands.values(): # + exemplars.values(): # Include exemplars in these counts? No, don't want to penalize people who follow many exemplars.
counts.update(followers)
counts.update(counts.keys()) # Add 1 to each count.
for k in counts:
counts[k] = 1. / math.log(counts[k])
return counts
# PROPORTION
def _proportion(a, b):
""" Return the len(a & b) / len(a) """
return 1. * len(a & b) / len(a)
def proportion(brands, exemplars, weighted_avg=False, sqrt=False):
"""
Return the proportion of a brand's followers who also follow an exemplar.
"""
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_proportion(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_proportion(followers, others) for others in exemplars.values()) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def proportion_weighted_avg(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=True, sqrt=False)
def proportion_sqrt_no_weighted_avg(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=False, sqrt=True)
def proportion_sqrt(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=True, sqrt=True)
def proportion_merge(brands, exemplars):
""" Return the proportion of a brand's followers who also follower an
exemplar. We merge all exemplar followers into one big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _proportion(followers, exemplar_followers)
return scores
# COSINE SIMILARITY
def _cosine(a, b):
""" Return the len(a & b) / len(a) """
return 1. * len(a & b) / (math.sqrt(len(a)) * math.sqrt(len(b)))
def cosine(brands, exemplars, weighted_avg=False, sqrt=False):
"""
Return the cosine similarity betwee a brand's followers and the exemplars.
"""
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_cosine(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_cosine(followers, others) for others in exemplars.values()) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def cosine_weighted_avg(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=True, sqrt=False)
def cosine_sqrt_no_weighted_avg(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=False, sqrt=True)
def cosine_sqrt(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=True, sqrt=True)
def cosine_merge(brands, exemplars):
""" Return the proportion of a brand's followers who also follower an
exemplar. We merge all exemplar followers into one big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _cosine(followers, exemplar_followers)
return scores
def adamic(brands, exemplars):
""" Return the average Adamic/Adar similarity between a brand's followers
and the followers of each exemplar. We approximate the number of followed
accounts per user by only considering those in our brand set."""
print('adamic deprecated...requires loading all brands in memory.')
return
degrees = compute_log_degrees(brands, exemplars)
scores = {}
exemplar_sums = dict([(exemplar, sum(degrees[z] for z in exemplars[exemplar])) for exemplar in exemplars])
for brand in sorted(brands):
brand_sum = sum(degrees[z] for z in brands[brand])
total = 0.
for exemplar in exemplars:
total += sum(degrees[z] for z in brands[brand] & exemplars[exemplar]) / (brand_sum + exemplar_sums[exemplar])
scores[brand] = total / len(exemplars)
return scores
def rarity(brands, exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is the degree of the ith exemplar they follow.
The score for a brand is then the average of their follower scores."""
rarity = compute_rarity_scores(exemplars)
scores = {}
for brand, followers in brands:
scores[brand] = sum(rarity[f] for f in followers) / len(followers)
return scores
def compute_rarity_scores_log(exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is
the degree of the ith exemplar they follow.
>>> compute_rarity_scores({'e1':{1,2,3,4}, 'e2':{4,5}}).items()
[(1, 0.25), (2, 0.25), (3, 0.25), (4, 0.75), (5, 0.5)]
"""
scores = defaultdict(lambda: 0.)
for followers in exemplars.values():
score = 1. / math.log(len(followers))
for f in followers:
scores[f] += score
return scores
def rarity_log(brands, exemplars):
""" Compute a score for each follower that is sum_i (1/log(n_i)), where n_i is the degree of the ith exemplar they follow.
The score for a brand is then the average of their follower scores."""
rarity = compute_rarity_scores_log(exemplars)
scores = {}
for brand, followers in brands:
scores[brand] = sum(rarity[f] for f in followers) / len(followers)
return scores
def mkdirs(filename):
report.mkdirs(os.path.dirname(filename))
def analyze_followers(brand_follower_file, exemplar_follower_file, outfile, analyze_fn,
min_followers, max_followers, sample_exemplars):
brands = iter_follower_file(brand_follower_file)
exemplars = read_follower_file(exemplar_follower_file, min_followers=min_followers, max_followers=max_followers, blacklist=get_twitter_handles(brand_follower_file))
print('read follower data for %d exemplars' % (len(exemplars)))
if sample_exemplars < 100: # sample a subset of exemplars.
exemplars = dict([(k, exemplars[k]) for k in random.sample(exemplars.keys(), int(len(exemplars) * sample_exemplars / 100.))])
print('sampled %d exemplars' % (len(exemplars)))
analyze = getattr(sys.modules[__name__], analyze_fn)
scores = analyze(brands, exemplars)
mkdirs(outfile)
outf = open(outfile, 'wt')
for brand in sorted(scores):
outf.write('%s %g\n' % (brand, scores[brand]))
outf.flush()
outf.close()
print('results written to', outfile)
def main():
args = docopt(__doc__)
print(args)
if '--seed' in args:
random.seed(args['--seed'])
if args['--network']:
analyze_followers(args['--brand-followers'], args['--exemplar-followers'], args['--output'], args['--network-method'],
int(args['--min-followers']), int(float(args['--max-followers'])), float(args['--sample-exemplars']))
if args['--text']:
analyze_text(args['--brand-tweets'], args['--exemplar-tweets'], args['--sample-tweets'], args['--output'], args['--text-method'])
if __name__ == '__main__':
main()
|
tapilab/brandelion | brandelion/cli/analyze.py | rarity | python | def rarity(brands, exemplars):
rarity = compute_rarity_scores(exemplars)
scores = {}
for brand, followers in brands:
scores[brand] = sum(rarity[f] for f in followers) / len(followers)
return scores | Compute a score for each follower that is sum_i (1/n_i), where n_i is the degree of the ith exemplar they follow.
The score for a brand is then the average of their follower scores. | train | https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/analyze.py#L393-L400 | [
"def compute_rarity_scores(exemplars):\n \"\"\" Compute a score for each follower that is sum_i (1/n_i), where n_i is\n the degree of the ith exemplar they follow.\n >>> compute_rarity_scores({'e1':{1,2,3,4}, 'e2':{4,5}}).items()\n [(1, 0.25), (2, 0.25), (3, 0.25), (4, 0.75), (5, 0.5)]\n \"\"\"\n ... | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Analyze social and linguistic brand data.
usage:
brandelion analyze --text --brand-tweets <file> --exemplar-tweets <file> --sample-tweets <file> --output <file> [--text-method <string>]
brandelion analyze --network --brand-followers <file> --exemplar-followers <file> --output <file> [--network-method <string> --min-followers <n> --max-followers <n> --sample-exemplars <p> --seed <s>]
Options
-h, --help
--brand-followers <file> File containing follower data for brand accounts.
--brand-tweets <file> File containing tweets from brand accounts.
--exemplar-followers <file> File containing follower data for exemplar accounts.
--exemplar-tweets <file> File containing tweets from exemplar accounts.
--sample-tweets <file> File containing tweets from representative sample of Twitter.
--text-method <string> Method to do text analysis [default: chi2]
--network-method <string> Method to do text analysis [default: jaccard]
-o, --output <file> File to store results
-t, --text Analyze text of tweets.
-n, --network Analyze followers.
--min-followers <n> Ignore exemplars that don't have at least n followers [default: 0]
--max-followers <n> Ignore exemplars that have more than least n followers [default: 1e10]
--sample-exemplars <p> Sample p percent of the exemplars, uniformly at random. [default: 100]
--seed <s> Seed for random sampling. [default: 12345]
"""
from collections import Counter, defaultdict
from docopt import docopt
import io
from itertools import groupby
import gzip
import json
import math
import numpy as np
import os
import re
import random
from scipy.sparse import vstack
import sys
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_selection import chi2 as skchi2
from sklearn import linear_model
from . import report
### TEXT ANALYSIS ###
def parse_json(json_file, include_date=False):
""" Yield screen_name, text tuples from a json file. """
if json_file[-2:] == 'gz':
fh = gzip.open(json_file, 'rt')
else:
fh = io.open(json_file, mode='rt', encoding='utf8')
for line in fh:
try:
jj = json.loads(line)
if type(jj) is not list:
jj = [jj]
for j in jj:
if include_date:
yield (j['user']['screen_name'].lower(), j['text'], j['created_at'])
else:
if 'full_text' in j: # get untruncated text if available.
yield (j['user']['screen_name'].lower(), j['full_text'])
else:
yield (j['user']['screen_name'].lower(), j['text'])
except Exception as e:
sys.stderr.write('skipping json error: %s\n' % e)
def extract_tweets(json_file):
""" Yield screen_name, string tuples, where the string is the
concatenation of all tweets of this user. """
for screen_name, tweet_iter in groupby(parse_json(json_file), lambda x: x[0]):
tweets = [t[1] for t in tweet_iter]
yield screen_name, ' '.join(tweets)
def preprocess(s):
"""
>>> preprocess('#hi there http://www.foo.com @you isn"t RT <>')
'hashtaghi hashtaghi there isn"t'
"""
# s = re.sub('@\S+', 'thisisamention', s) # map all mentions to thisisamention
s = re.sub(r'@\S+', ' ', s) # map all mentions to thisisamention
# s = re.sub('http\S+', 'http', s) # keep only http from urls
s = re.sub(r'http\S+', ' ', s) # keep only http from urls
s = re.sub(r'#(\S+)', r'hashtag\1 hashtag\1', s) # #foo -> hashtagfoo hashtagfoo (for retaining hashtags even using bigrams)
# s = re.sub(r'[0-9]+', '9', s) # 1234 -> 9
s = re.sub(r'\bRT\b', ' ', s, re.IGNORECASE)
s = re.sub(r'&[a-z]+;', ' ', s, re.IGNORECASE)
s = re.sub(r'\s+', ' ', s).strip()
return s.lower()
def vectorize(json_file, vec, dofit=True):
""" Return a matrix where each row corresponds to a Twitter account, and
each column corresponds to the number of times a term is used by that
account. """
## CountVectorizer, efficiently.
screen_names = [x[0] for x in extract_tweets(json_file)]
if dofit:
X = vec.fit_transform(x[1] for x in extract_tweets(json_file))
else:
X = vec.transform(x[1] for x in extract_tweets(json_file))
return screen_names, X
def chi2(exemplars, samples, n=300):
y = np.array(([1.] * exemplars.shape[0]) + ([.0] * samples.shape[0]))
X = vstack((exemplars, samples)).tocsr()
clf = linear_model.LogisticRegression(penalty='l2')
clf.fit(X, y)
coef = clf.coef_[0]
chis, pvals = skchi2(X, y)
top_indices = chis.argsort()[::-1]
top_indices = [i for i in top_indices if coef[i] > 0]
for idx in range(len(coef)):
coef[idx] = 0.
for idx in top_indices[:n]:
coef[idx] = chis[idx]
return coef
def do_score(vec, coef):
return np.sum(coef[vec.nonzero()[1]]) / np.sum(coef)
def write_top_words(fname, vocab, scores):
outf = io.open(fname, 'w', encoding='utf8')
for i in np.argsort(scores)[::-1]:
if scores[i] > 0:
outf.write('%s %g\n' % (vocab[i], scores[i]))
outf.close()
def analyze_text(brand_tweets_file, exemplar_tweets_file, sample_tweets_file, outfile, analyze_fn):
analyze = getattr(sys.modules[__name__], analyze_fn)
vec = CountVectorizer(min_df=3, preprocessor=preprocess, ngram_range=(2, 2), binary=True)
_, exemplar_vectors = vectorize(exemplar_tweets_file, vec, dofit=True)
print('read tweets for %d exemplar accounts' % exemplar_vectors.shape[0])
brands, brand_vectors = vectorize(brand_tweets_file, vec, dofit=False)
print('read tweets for %d brand accounts' % brand_vectors.shape[0])
_, sample_vectors = vectorize(sample_tweets_file, vec, dofit=False)
print('read tweets for %d sample accounts' % sample_vectors.shape[0])
scores = analyze(exemplar_vectors, sample_vectors)
vocab = vec.get_feature_names()
write_top_words(outfile + '.topwords', vocab, scores)
print('top 10 ngrams:\n', '\n'.join(['%s=%.4g' % (vocab[i], scores[i]) for i in np.argsort(scores)[::-1][:10]]))
outf = open(outfile, 'wt')
for bi, brand_vec in enumerate(brand_vectors):
outf.write('%s %g\n' % (brands[bi], do_score(brand_vec, scores)))
outf.flush()
### FOLLOWER ANALYSIS ###
def get_twitter_handles(fname):
handles = set()
with open(fname, 'rt') as f:
for line in f:
handles.add(line[:90].split()[0].lower())
return handles
def read_follower_file(fname, min_followers=0, max_followers=1e10, blacklist=set()):
""" Read a file of follower information and return a dictionary mapping screen_name to a set of follower ids. """
result = {}
with open(fname, 'rt') as f:
for line in f:
parts = line.split()
if len(parts) > 3:
if parts[1].lower() not in blacklist:
followers = set(int(x) for x in parts[2:])
if len(followers) > min_followers and len(followers) <= max_followers:
result[parts[1].lower()] = followers
else:
print('skipping exemplar', parts[1].lower())
return result
def iter_follower_file(fname):
""" Iterator from a file of follower information and return a tuple of screen_name, follower ids.
File format is:
<iso timestamp> <screen_name> <follower_id1> <follower_ids2> ...
"""
with open(fname, 'rt') as f:
for line in f:
parts = line.split()
if len(parts) > 3:
yield parts[1].lower(), set(int(x) for x in parts[2:])
# JACCARD
def _jaccard(a, b):
""" Return the Jaccard similarity between two sets a and b. """
return 1. * len(a & b) / len(a | b)
def jaccard(brands, exemplars, weighted_avg=False, sqrt=False):
""" Return the average Jaccard similarity between a brand's followers and the
followers of each exemplar. """
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_jaccard(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_jaccard(followers, others) for others in exemplars.values()) / len(exemplars)
# limit to exemplars with less than 40k followers: scores[brand] = 1. * sum(_jaccard(brands[brand], others) for others in exemplars.itervalues() if len(others) < 40000) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def jaccard_weighted_avg(brands, exemplars):
return jaccard(brands, exemplars, True, False)
def jaccard_sqrt_no_weighted_avg(brands, exemplars):
return jaccard(brands, exemplars, False, True)
def jaccard_sqrt(brands, exemplars):
return jaccard(brands, exemplars, weighted_avg=True, sqrt=True)
def jaccard_merge(brands, exemplars):
""" Return the average Jaccard similarity between a brand's followers and
the followers of each exemplar. We merge all exemplar followers into one
big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _jaccard(followers, exemplar_followers)
return scores
def compute_log_degrees(brands, exemplars):
""" For each follower, let Z be the total number of brands they follow.
Return a dictionary of 1. / log(Z), for each follower.
"""
counts = Counter()
for followers in brands.values(): # + exemplars.values(): # Include exemplars in these counts? No, don't want to penalize people who follow many exemplars.
counts.update(followers)
counts.update(counts.keys()) # Add 1 to each count.
for k in counts:
counts[k] = 1. / math.log(counts[k])
return counts
# PROPORTION
def _proportion(a, b):
""" Return the len(a & b) / len(a) """
return 1. * len(a & b) / len(a)
def proportion(brands, exemplars, weighted_avg=False, sqrt=False):
"""
Return the proportion of a brand's followers who also follow an exemplar.
"""
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_proportion(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_proportion(followers, others) for others in exemplars.values()) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def proportion_weighted_avg(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=True, sqrt=False)
def proportion_sqrt_no_weighted_avg(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=False, sqrt=True)
def proportion_sqrt(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=True, sqrt=True)
def proportion_merge(brands, exemplars):
""" Return the proportion of a brand's followers who also follower an
exemplar. We merge all exemplar followers into one big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _proportion(followers, exemplar_followers)
return scores
# COSINE SIMILARITY
def _cosine(a, b):
""" Return the len(a & b) / len(a) """
return 1. * len(a & b) / (math.sqrt(len(a)) * math.sqrt(len(b)))
def cosine(brands, exemplars, weighted_avg=False, sqrt=False):
"""
Return the cosine similarity betwee a brand's followers and the exemplars.
"""
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_cosine(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_cosine(followers, others) for others in exemplars.values()) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def cosine_weighted_avg(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=True, sqrt=False)
def cosine_sqrt_no_weighted_avg(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=False, sqrt=True)
def cosine_sqrt(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=True, sqrt=True)
def cosine_merge(brands, exemplars):
""" Return the proportion of a brand's followers who also follower an
exemplar. We merge all exemplar followers into one big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _cosine(followers, exemplar_followers)
return scores
def adamic(brands, exemplars):
""" Return the average Adamic/Adar similarity between a brand's followers
and the followers of each exemplar. We approximate the number of followed
accounts per user by only considering those in our brand set."""
print('adamic deprecated...requires loading all brands in memory.')
return
degrees = compute_log_degrees(brands, exemplars)
scores = {}
exemplar_sums = dict([(exemplar, sum(degrees[z] for z in exemplars[exemplar])) for exemplar in exemplars])
for brand in sorted(brands):
brand_sum = sum(degrees[z] for z in brands[brand])
total = 0.
for exemplar in exemplars:
total += sum(degrees[z] for z in brands[brand] & exemplars[exemplar]) / (brand_sum + exemplar_sums[exemplar])
scores[brand] = total / len(exemplars)
return scores
def compute_rarity_scores(exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is
the degree of the ith exemplar they follow.
>>> compute_rarity_scores({'e1':{1,2,3,4}, 'e2':{4,5}}).items()
[(1, 0.25), (2, 0.25), (3, 0.25), (4, 0.75), (5, 0.5)]
"""
scores = defaultdict(lambda: 0.)
for followers in exemplars.values():
score = 1. / len(followers)
for f in followers:
scores[f] += score
return scores
def compute_rarity_scores_log(exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is
the degree of the ith exemplar they follow.
>>> compute_rarity_scores({'e1':{1,2,3,4}, 'e2':{4,5}}).items()
[(1, 0.25), (2, 0.25), (3, 0.25), (4, 0.75), (5, 0.5)]
"""
scores = defaultdict(lambda: 0.)
for followers in exemplars.values():
score = 1. / math.log(len(followers))
for f in followers:
scores[f] += score
return scores
def rarity_log(brands, exemplars):
""" Compute a score for each follower that is sum_i (1/log(n_i)), where n_i is the degree of the ith exemplar they follow.
The score for a brand is then the average of their follower scores."""
rarity = compute_rarity_scores_log(exemplars)
scores = {}
for brand, followers in brands:
scores[brand] = sum(rarity[f] for f in followers) / len(followers)
return scores
def mkdirs(filename):
report.mkdirs(os.path.dirname(filename))
def analyze_followers(brand_follower_file, exemplar_follower_file, outfile, analyze_fn,
min_followers, max_followers, sample_exemplars):
brands = iter_follower_file(brand_follower_file)
exemplars = read_follower_file(exemplar_follower_file, min_followers=min_followers, max_followers=max_followers, blacklist=get_twitter_handles(brand_follower_file))
print('read follower data for %d exemplars' % (len(exemplars)))
if sample_exemplars < 100: # sample a subset of exemplars.
exemplars = dict([(k, exemplars[k]) for k in random.sample(exemplars.keys(), int(len(exemplars) * sample_exemplars / 100.))])
print('sampled %d exemplars' % (len(exemplars)))
analyze = getattr(sys.modules[__name__], analyze_fn)
scores = analyze(brands, exemplars)
mkdirs(outfile)
outf = open(outfile, 'wt')
for brand in sorted(scores):
outf.write('%s %g\n' % (brand, scores[brand]))
outf.flush()
outf.close()
print('results written to', outfile)
def main():
args = docopt(__doc__)
print(args)
if '--seed' in args:
random.seed(args['--seed'])
if args['--network']:
analyze_followers(args['--brand-followers'], args['--exemplar-followers'], args['--output'], args['--network-method'],
int(args['--min-followers']), int(float(args['--max-followers'])), float(args['--sample-exemplars']))
if args['--text']:
analyze_text(args['--brand-tweets'], args['--exemplar-tweets'], args['--sample-tweets'], args['--output'], args['--text-method'])
if __name__ == '__main__':
main()
|
tapilab/brandelion | brandelion/cli/analyze.py | compute_rarity_scores_log | python | def compute_rarity_scores_log(exemplars):
scores = defaultdict(lambda: 0.)
for followers in exemplars.values():
score = 1. / math.log(len(followers))
for f in followers:
scores[f] += score
return scores | Compute a score for each follower that is sum_i (1/n_i), where n_i is
the degree of the ith exemplar they follow.
>>> compute_rarity_scores({'e1':{1,2,3,4}, 'e2':{4,5}}).items()
[(1, 0.25), (2, 0.25), (3, 0.25), (4, 0.75), (5, 0.5)] | train | https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/analyze.py#L403-L414 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Analyze social and linguistic brand data.
usage:
brandelion analyze --text --brand-tweets <file> --exemplar-tweets <file> --sample-tweets <file> --output <file> [--text-method <string>]
brandelion analyze --network --brand-followers <file> --exemplar-followers <file> --output <file> [--network-method <string> --min-followers <n> --max-followers <n> --sample-exemplars <p> --seed <s>]
Options
-h, --help
--brand-followers <file> File containing follower data for brand accounts.
--brand-tweets <file> File containing tweets from brand accounts.
--exemplar-followers <file> File containing follower data for exemplar accounts.
--exemplar-tweets <file> File containing tweets from exemplar accounts.
--sample-tweets <file> File containing tweets from representative sample of Twitter.
--text-method <string> Method to do text analysis [default: chi2]
--network-method <string> Method to do text analysis [default: jaccard]
-o, --output <file> File to store results
-t, --text Analyze text of tweets.
-n, --network Analyze followers.
--min-followers <n> Ignore exemplars that don't have at least n followers [default: 0]
--max-followers <n> Ignore exemplars that have more than least n followers [default: 1e10]
--sample-exemplars <p> Sample p percent of the exemplars, uniformly at random. [default: 100]
--seed <s> Seed for random sampling. [default: 12345]
"""
from collections import Counter, defaultdict
from docopt import docopt
import io
from itertools import groupby
import gzip
import json
import math
import numpy as np
import os
import re
import random
from scipy.sparse import vstack
import sys
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_selection import chi2 as skchi2
from sklearn import linear_model
from . import report
### TEXT ANALYSIS ###
def parse_json(json_file, include_date=False):
""" Yield screen_name, text tuples from a json file. """
if json_file[-2:] == 'gz':
fh = gzip.open(json_file, 'rt')
else:
fh = io.open(json_file, mode='rt', encoding='utf8')
for line in fh:
try:
jj = json.loads(line)
if type(jj) is not list:
jj = [jj]
for j in jj:
if include_date:
yield (j['user']['screen_name'].lower(), j['text'], j['created_at'])
else:
if 'full_text' in j: # get untruncated text if available.
yield (j['user']['screen_name'].lower(), j['full_text'])
else:
yield (j['user']['screen_name'].lower(), j['text'])
except Exception as e:
sys.stderr.write('skipping json error: %s\n' % e)
def extract_tweets(json_file):
""" Yield screen_name, string tuples, where the string is the
concatenation of all tweets of this user. """
for screen_name, tweet_iter in groupby(parse_json(json_file), lambda x: x[0]):
tweets = [t[1] for t in tweet_iter]
yield screen_name, ' '.join(tweets)
def preprocess(s):
"""
>>> preprocess('#hi there http://www.foo.com @you isn"t RT <>')
'hashtaghi hashtaghi there isn"t'
"""
# s = re.sub('@\S+', 'thisisamention', s) # map all mentions to thisisamention
s = re.sub(r'@\S+', ' ', s) # map all mentions to thisisamention
# s = re.sub('http\S+', 'http', s) # keep only http from urls
s = re.sub(r'http\S+', ' ', s) # keep only http from urls
s = re.sub(r'#(\S+)', r'hashtag\1 hashtag\1', s) # #foo -> hashtagfoo hashtagfoo (for retaining hashtags even using bigrams)
# s = re.sub(r'[0-9]+', '9', s) # 1234 -> 9
s = re.sub(r'\bRT\b', ' ', s, re.IGNORECASE)
s = re.sub(r'&[a-z]+;', ' ', s, re.IGNORECASE)
s = re.sub(r'\s+', ' ', s).strip()
return s.lower()
def vectorize(json_file, vec, dofit=True):
""" Return a matrix where each row corresponds to a Twitter account, and
each column corresponds to the number of times a term is used by that
account. """
## CountVectorizer, efficiently.
screen_names = [x[0] for x in extract_tweets(json_file)]
if dofit:
X = vec.fit_transform(x[1] for x in extract_tweets(json_file))
else:
X = vec.transform(x[1] for x in extract_tweets(json_file))
return screen_names, X
def chi2(exemplars, samples, n=300):
y = np.array(([1.] * exemplars.shape[0]) + ([.0] * samples.shape[0]))
X = vstack((exemplars, samples)).tocsr()
clf = linear_model.LogisticRegression(penalty='l2')
clf.fit(X, y)
coef = clf.coef_[0]
chis, pvals = skchi2(X, y)
top_indices = chis.argsort()[::-1]
top_indices = [i for i in top_indices if coef[i] > 0]
for idx in range(len(coef)):
coef[idx] = 0.
for idx in top_indices[:n]:
coef[idx] = chis[idx]
return coef
def do_score(vec, coef):
return np.sum(coef[vec.nonzero()[1]]) / np.sum(coef)
def write_top_words(fname, vocab, scores):
outf = io.open(fname, 'w', encoding='utf8')
for i in np.argsort(scores)[::-1]:
if scores[i] > 0:
outf.write('%s %g\n' % (vocab[i], scores[i]))
outf.close()
def analyze_text(brand_tweets_file, exemplar_tweets_file, sample_tweets_file, outfile, analyze_fn):
analyze = getattr(sys.modules[__name__], analyze_fn)
vec = CountVectorizer(min_df=3, preprocessor=preprocess, ngram_range=(2, 2), binary=True)
_, exemplar_vectors = vectorize(exemplar_tweets_file, vec, dofit=True)
print('read tweets for %d exemplar accounts' % exemplar_vectors.shape[0])
brands, brand_vectors = vectorize(brand_tweets_file, vec, dofit=False)
print('read tweets for %d brand accounts' % brand_vectors.shape[0])
_, sample_vectors = vectorize(sample_tweets_file, vec, dofit=False)
print('read tweets for %d sample accounts' % sample_vectors.shape[0])
scores = analyze(exemplar_vectors, sample_vectors)
vocab = vec.get_feature_names()
write_top_words(outfile + '.topwords', vocab, scores)
print('top 10 ngrams:\n', '\n'.join(['%s=%.4g' % (vocab[i], scores[i]) for i in np.argsort(scores)[::-1][:10]]))
outf = open(outfile, 'wt')
for bi, brand_vec in enumerate(brand_vectors):
outf.write('%s %g\n' % (brands[bi], do_score(brand_vec, scores)))
outf.flush()
### FOLLOWER ANALYSIS ###
def get_twitter_handles(fname):
handles = set()
with open(fname, 'rt') as f:
for line in f:
handles.add(line[:90].split()[0].lower())
return handles
def read_follower_file(fname, min_followers=0, max_followers=1e10, blacklist=set()):
""" Read a file of follower information and return a dictionary mapping screen_name to a set of follower ids. """
result = {}
with open(fname, 'rt') as f:
for line in f:
parts = line.split()
if len(parts) > 3:
if parts[1].lower() not in blacklist:
followers = set(int(x) for x in parts[2:])
if len(followers) > min_followers and len(followers) <= max_followers:
result[parts[1].lower()] = followers
else:
print('skipping exemplar', parts[1].lower())
return result
def iter_follower_file(fname):
""" Iterator from a file of follower information and return a tuple of screen_name, follower ids.
File format is:
<iso timestamp> <screen_name> <follower_id1> <follower_ids2> ...
"""
with open(fname, 'rt') as f:
for line in f:
parts = line.split()
if len(parts) > 3:
yield parts[1].lower(), set(int(x) for x in parts[2:])
# JACCARD
def _jaccard(a, b):
""" Return the Jaccard similarity between two sets a and b. """
return 1. * len(a & b) / len(a | b)
def jaccard(brands, exemplars, weighted_avg=False, sqrt=False):
""" Return the average Jaccard similarity between a brand's followers and the
followers of each exemplar. """
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_jaccard(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_jaccard(followers, others) for others in exemplars.values()) / len(exemplars)
# limit to exemplars with less than 40k followers: scores[brand] = 1. * sum(_jaccard(brands[brand], others) for others in exemplars.itervalues() if len(others) < 40000) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def jaccard_weighted_avg(brands, exemplars):
return jaccard(brands, exemplars, True, False)
def jaccard_sqrt_no_weighted_avg(brands, exemplars):
return jaccard(brands, exemplars, False, True)
def jaccard_sqrt(brands, exemplars):
return jaccard(brands, exemplars, weighted_avg=True, sqrt=True)
def jaccard_merge(brands, exemplars):
""" Return the average Jaccard similarity between a brand's followers and
the followers of each exemplar. We merge all exemplar followers into one
big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _jaccard(followers, exemplar_followers)
return scores
def compute_log_degrees(brands, exemplars):
""" For each follower, let Z be the total number of brands they follow.
Return a dictionary of 1. / log(Z), for each follower.
"""
counts = Counter()
for followers in brands.values(): # + exemplars.values(): # Include exemplars in these counts? No, don't want to penalize people who follow many exemplars.
counts.update(followers)
counts.update(counts.keys()) # Add 1 to each count.
for k in counts:
counts[k] = 1. / math.log(counts[k])
return counts
# PROPORTION
def _proportion(a, b):
""" Return the len(a & b) / len(a) """
return 1. * len(a & b) / len(a)
def proportion(brands, exemplars, weighted_avg=False, sqrt=False):
"""
Return the proportion of a brand's followers who also follow an exemplar.
"""
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_proportion(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_proportion(followers, others) for others in exemplars.values()) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def proportion_weighted_avg(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=True, sqrt=False)
def proportion_sqrt_no_weighted_avg(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=False, sqrt=True)
def proportion_sqrt(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=True, sqrt=True)
def proportion_merge(brands, exemplars):
""" Return the proportion of a brand's followers who also follower an
exemplar. We merge all exemplar followers into one big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _proportion(followers, exemplar_followers)
return scores
# COSINE SIMILARITY
def _cosine(a, b):
""" Return the len(a & b) / len(a) """
return 1. * len(a & b) / (math.sqrt(len(a)) * math.sqrt(len(b)))
def cosine(brands, exemplars, weighted_avg=False, sqrt=False):
"""
Return the cosine similarity betwee a brand's followers and the exemplars.
"""
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_cosine(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_cosine(followers, others) for others in exemplars.values()) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def cosine_weighted_avg(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=True, sqrt=False)
def cosine_sqrt_no_weighted_avg(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=False, sqrt=True)
def cosine_sqrt(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=True, sqrt=True)
def cosine_merge(brands, exemplars):
""" Return the proportion of a brand's followers who also follower an
exemplar. We merge all exemplar followers into one big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _cosine(followers, exemplar_followers)
return scores
def adamic(brands, exemplars):
""" Return the average Adamic/Adar similarity between a brand's followers
and the followers of each exemplar. We approximate the number of followed
accounts per user by only considering those in our brand set."""
print('adamic deprecated...requires loading all brands in memory.')
return
degrees = compute_log_degrees(brands, exemplars)
scores = {}
exemplar_sums = dict([(exemplar, sum(degrees[z] for z in exemplars[exemplar])) for exemplar in exemplars])
for brand in sorted(brands):
brand_sum = sum(degrees[z] for z in brands[brand])
total = 0.
for exemplar in exemplars:
total += sum(degrees[z] for z in brands[brand] & exemplars[exemplar]) / (brand_sum + exemplar_sums[exemplar])
scores[brand] = total / len(exemplars)
return scores
def compute_rarity_scores(exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is
the degree of the ith exemplar they follow.
>>> compute_rarity_scores({'e1':{1,2,3,4}, 'e2':{4,5}}).items()
[(1, 0.25), (2, 0.25), (3, 0.25), (4, 0.75), (5, 0.5)]
"""
scores = defaultdict(lambda: 0.)
for followers in exemplars.values():
score = 1. / len(followers)
for f in followers:
scores[f] += score
return scores
def rarity(brands, exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is the degree of the ith exemplar they follow.
The score for a brand is then the average of their follower scores."""
rarity = compute_rarity_scores(exemplars)
scores = {}
for brand, followers in brands:
scores[brand] = sum(rarity[f] for f in followers) / len(followers)
return scores
def rarity_log(brands, exemplars):
""" Compute a score for each follower that is sum_i (1/log(n_i)), where n_i is the degree of the ith exemplar they follow.
The score for a brand is then the average of their follower scores."""
rarity = compute_rarity_scores_log(exemplars)
scores = {}
for brand, followers in brands:
scores[brand] = sum(rarity[f] for f in followers) / len(followers)
return scores
def mkdirs(filename):
report.mkdirs(os.path.dirname(filename))
def analyze_followers(brand_follower_file, exemplar_follower_file, outfile, analyze_fn,
min_followers, max_followers, sample_exemplars):
brands = iter_follower_file(brand_follower_file)
exemplars = read_follower_file(exemplar_follower_file, min_followers=min_followers, max_followers=max_followers, blacklist=get_twitter_handles(brand_follower_file))
print('read follower data for %d exemplars' % (len(exemplars)))
if sample_exemplars < 100: # sample a subset of exemplars.
exemplars = dict([(k, exemplars[k]) for k in random.sample(exemplars.keys(), int(len(exemplars) * sample_exemplars / 100.))])
print('sampled %d exemplars' % (len(exemplars)))
analyze = getattr(sys.modules[__name__], analyze_fn)
scores = analyze(brands, exemplars)
mkdirs(outfile)
outf = open(outfile, 'wt')
for brand in sorted(scores):
outf.write('%s %g\n' % (brand, scores[brand]))
outf.flush()
outf.close()
print('results written to', outfile)
def main():
args = docopt(__doc__)
print(args)
if '--seed' in args:
random.seed(args['--seed'])
if args['--network']:
analyze_followers(args['--brand-followers'], args['--exemplar-followers'], args['--output'], args['--network-method'],
int(args['--min-followers']), int(float(args['--max-followers'])), float(args['--sample-exemplars']))
if args['--text']:
analyze_text(args['--brand-tweets'], args['--exemplar-tweets'], args['--sample-tweets'], args['--output'], args['--text-method'])
if __name__ == '__main__':
main()
|
tapilab/brandelion | brandelion/cli/analyze.py | rarity_log | python | def rarity_log(brands, exemplars):
rarity = compute_rarity_scores_log(exemplars)
scores = {}
for brand, followers in brands:
scores[brand] = sum(rarity[f] for f in followers) / len(followers)
return scores | Compute a score for each follower that is sum_i (1/log(n_i)), where n_i is the degree of the ith exemplar they follow.
The score for a brand is then the average of their follower scores. | train | https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/analyze.py#L417-L424 | [
"def compute_rarity_scores_log(exemplars):\n \"\"\" Compute a score for each follower that is sum_i (1/n_i), where n_i is\n the degree of the ith exemplar they follow.\n >>> compute_rarity_scores({'e1':{1,2,3,4}, 'e2':{4,5}}).items()\n [(1, 0.25), (2, 0.25), (3, 0.25), (4, 0.75), (5, 0.5)]\n \"\"\"\n... | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Analyze social and linguistic brand data.
usage:
brandelion analyze --text --brand-tweets <file> --exemplar-tweets <file> --sample-tweets <file> --output <file> [--text-method <string>]
brandelion analyze --network --brand-followers <file> --exemplar-followers <file> --output <file> [--network-method <string> --min-followers <n> --max-followers <n> --sample-exemplars <p> --seed <s>]
Options
-h, --help
--brand-followers <file> File containing follower data for brand accounts.
--brand-tweets <file> File containing tweets from brand accounts.
--exemplar-followers <file> File containing follower data for exemplar accounts.
--exemplar-tweets <file> File containing tweets from exemplar accounts.
--sample-tweets <file> File containing tweets from representative sample of Twitter.
--text-method <string> Method to do text analysis [default: chi2]
--network-method <string> Method to do text analysis [default: jaccard]
-o, --output <file> File to store results
-t, --text Analyze text of tweets.
-n, --network Analyze followers.
--min-followers <n> Ignore exemplars that don't have at least n followers [default: 0]
--max-followers <n> Ignore exemplars that have more than least n followers [default: 1e10]
--sample-exemplars <p> Sample p percent of the exemplars, uniformly at random. [default: 100]
--seed <s> Seed for random sampling. [default: 12345]
"""
from collections import Counter, defaultdict
from docopt import docopt
import io
from itertools import groupby
import gzip
import json
import math
import numpy as np
import os
import re
import random
from scipy.sparse import vstack
import sys
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_selection import chi2 as skchi2
from sklearn import linear_model
from . import report
### TEXT ANALYSIS ###
def parse_json(json_file, include_date=False):
""" Yield screen_name, text tuples from a json file. """
if json_file[-2:] == 'gz':
fh = gzip.open(json_file, 'rt')
else:
fh = io.open(json_file, mode='rt', encoding='utf8')
for line in fh:
try:
jj = json.loads(line)
if type(jj) is not list:
jj = [jj]
for j in jj:
if include_date:
yield (j['user']['screen_name'].lower(), j['text'], j['created_at'])
else:
if 'full_text' in j: # get untruncated text if available.
yield (j['user']['screen_name'].lower(), j['full_text'])
else:
yield (j['user']['screen_name'].lower(), j['text'])
except Exception as e:
sys.stderr.write('skipping json error: %s\n' % e)
def extract_tweets(json_file):
""" Yield screen_name, string tuples, where the string is the
concatenation of all tweets of this user. """
for screen_name, tweet_iter in groupby(parse_json(json_file), lambda x: x[0]):
tweets = [t[1] for t in tweet_iter]
yield screen_name, ' '.join(tweets)
def preprocess(s):
"""
>>> preprocess('#hi there http://www.foo.com @you isn"t RT <>')
'hashtaghi hashtaghi there isn"t'
"""
# s = re.sub('@\S+', 'thisisamention', s) # map all mentions to thisisamention
s = re.sub(r'@\S+', ' ', s) # map all mentions to thisisamention
# s = re.sub('http\S+', 'http', s) # keep only http from urls
s = re.sub(r'http\S+', ' ', s) # keep only http from urls
s = re.sub(r'#(\S+)', r'hashtag\1 hashtag\1', s) # #foo -> hashtagfoo hashtagfoo (for retaining hashtags even using bigrams)
# s = re.sub(r'[0-9]+', '9', s) # 1234 -> 9
s = re.sub(r'\bRT\b', ' ', s, re.IGNORECASE)
s = re.sub(r'&[a-z]+;', ' ', s, re.IGNORECASE)
s = re.sub(r'\s+', ' ', s).strip()
return s.lower()
def vectorize(json_file, vec, dofit=True):
""" Return a matrix where each row corresponds to a Twitter account, and
each column corresponds to the number of times a term is used by that
account. """
## CountVectorizer, efficiently.
screen_names = [x[0] for x in extract_tweets(json_file)]
if dofit:
X = vec.fit_transform(x[1] for x in extract_tweets(json_file))
else:
X = vec.transform(x[1] for x in extract_tweets(json_file))
return screen_names, X
def chi2(exemplars, samples, n=300):
y = np.array(([1.] * exemplars.shape[0]) + ([.0] * samples.shape[0]))
X = vstack((exemplars, samples)).tocsr()
clf = linear_model.LogisticRegression(penalty='l2')
clf.fit(X, y)
coef = clf.coef_[0]
chis, pvals = skchi2(X, y)
top_indices = chis.argsort()[::-1]
top_indices = [i for i in top_indices if coef[i] > 0]
for idx in range(len(coef)):
coef[idx] = 0.
for idx in top_indices[:n]:
coef[idx] = chis[idx]
return coef
def do_score(vec, coef):
return np.sum(coef[vec.nonzero()[1]]) / np.sum(coef)
def write_top_words(fname, vocab, scores):
outf = io.open(fname, 'w', encoding='utf8')
for i in np.argsort(scores)[::-1]:
if scores[i] > 0:
outf.write('%s %g\n' % (vocab[i], scores[i]))
outf.close()
def analyze_text(brand_tweets_file, exemplar_tweets_file, sample_tweets_file, outfile, analyze_fn):
analyze = getattr(sys.modules[__name__], analyze_fn)
vec = CountVectorizer(min_df=3, preprocessor=preprocess, ngram_range=(2, 2), binary=True)
_, exemplar_vectors = vectorize(exemplar_tweets_file, vec, dofit=True)
print('read tweets for %d exemplar accounts' % exemplar_vectors.shape[0])
brands, brand_vectors = vectorize(brand_tweets_file, vec, dofit=False)
print('read tweets for %d brand accounts' % brand_vectors.shape[0])
_, sample_vectors = vectorize(sample_tweets_file, vec, dofit=False)
print('read tweets for %d sample accounts' % sample_vectors.shape[0])
scores = analyze(exemplar_vectors, sample_vectors)
vocab = vec.get_feature_names()
write_top_words(outfile + '.topwords', vocab, scores)
print('top 10 ngrams:\n', '\n'.join(['%s=%.4g' % (vocab[i], scores[i]) for i in np.argsort(scores)[::-1][:10]]))
outf = open(outfile, 'wt')
for bi, brand_vec in enumerate(brand_vectors):
outf.write('%s %g\n' % (brands[bi], do_score(brand_vec, scores)))
outf.flush()
### FOLLOWER ANALYSIS ###
def get_twitter_handles(fname):
handles = set()
with open(fname, 'rt') as f:
for line in f:
handles.add(line[:90].split()[0].lower())
return handles
def read_follower_file(fname, min_followers=0, max_followers=1e10, blacklist=set()):
""" Read a file of follower information and return a dictionary mapping screen_name to a set of follower ids. """
result = {}
with open(fname, 'rt') as f:
for line in f:
parts = line.split()
if len(parts) > 3:
if parts[1].lower() not in blacklist:
followers = set(int(x) for x in parts[2:])
if len(followers) > min_followers and len(followers) <= max_followers:
result[parts[1].lower()] = followers
else:
print('skipping exemplar', parts[1].lower())
return result
def iter_follower_file(fname):
""" Iterator from a file of follower information and return a tuple of screen_name, follower ids.
File format is:
<iso timestamp> <screen_name> <follower_id1> <follower_ids2> ...
"""
with open(fname, 'rt') as f:
for line in f:
parts = line.split()
if len(parts) > 3:
yield parts[1].lower(), set(int(x) for x in parts[2:])
# JACCARD
def _jaccard(a, b):
""" Return the Jaccard similarity between two sets a and b. """
return 1. * len(a & b) / len(a | b)
def jaccard(brands, exemplars, weighted_avg=False, sqrt=False):
""" Return the average Jaccard similarity between a brand's followers and the
followers of each exemplar. """
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_jaccard(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_jaccard(followers, others) for others in exemplars.values()) / len(exemplars)
# limit to exemplars with less than 40k followers: scores[brand] = 1. * sum(_jaccard(brands[brand], others) for others in exemplars.itervalues() if len(others) < 40000) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def jaccard_weighted_avg(brands, exemplars):
return jaccard(brands, exemplars, True, False)
def jaccard_sqrt_no_weighted_avg(brands, exemplars):
return jaccard(brands, exemplars, False, True)
def jaccard_sqrt(brands, exemplars):
return jaccard(brands, exemplars, weighted_avg=True, sqrt=True)
def jaccard_merge(brands, exemplars):
""" Return the average Jaccard similarity between a brand's followers and
the followers of each exemplar. We merge all exemplar followers into one
big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _jaccard(followers, exemplar_followers)
return scores
def compute_log_degrees(brands, exemplars):
""" For each follower, let Z be the total number of brands they follow.
Return a dictionary of 1. / log(Z), for each follower.
"""
counts = Counter()
for followers in brands.values(): # + exemplars.values(): # Include exemplars in these counts? No, don't want to penalize people who follow many exemplars.
counts.update(followers)
counts.update(counts.keys()) # Add 1 to each count.
for k in counts:
counts[k] = 1. / math.log(counts[k])
return counts
# PROPORTION
def _proportion(a, b):
""" Return the len(a & b) / len(a) """
return 1. * len(a & b) / len(a)
def proportion(brands, exemplars, weighted_avg=False, sqrt=False):
"""
Return the proportion of a brand's followers who also follow an exemplar.
"""
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_proportion(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_proportion(followers, others) for others in exemplars.values()) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def proportion_weighted_avg(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=True, sqrt=False)
def proportion_sqrt_no_weighted_avg(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=False, sqrt=True)
def proportion_sqrt(brands, exemplars):
return proportion(brands, exemplars, weighted_avg=True, sqrt=True)
def proportion_merge(brands, exemplars):
""" Return the proportion of a brand's followers who also follower an
exemplar. We merge all exemplar followers into one big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _proportion(followers, exemplar_followers)
return scores
# COSINE SIMILARITY
def _cosine(a, b):
""" Return the len(a & b) / len(a) """
return 1. * len(a & b) / (math.sqrt(len(a)) * math.sqrt(len(b)))
def cosine(brands, exemplars, weighted_avg=False, sqrt=False):
"""
Return the cosine similarity betwee a brand's followers and the exemplars.
"""
scores = {}
for brand, followers in brands:
if weighted_avg:
scores[brand] = np.average([_cosine(followers, others) for others in exemplars.values()],
weights=[1. / len(others) for others in exemplars.values()])
else:
scores[brand] = 1. * sum(_cosine(followers, others) for others in exemplars.values()) / len(exemplars)
if sqrt:
scores = dict([(b, math.sqrt(s)) for b, s in scores.items()])
return scores
def cosine_weighted_avg(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=True, sqrt=False)
def cosine_sqrt_no_weighted_avg(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=False, sqrt=True)
def cosine_sqrt(brands, exemplars):
return cosine(brands, exemplars, weighted_avg=True, sqrt=True)
def cosine_merge(brands, exemplars):
""" Return the proportion of a brand's followers who also follower an
exemplar. We merge all exemplar followers into one big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _cosine(followers, exemplar_followers)
return scores
def adamic(brands, exemplars):
""" Return the average Adamic/Adar similarity between a brand's followers
and the followers of each exemplar. We approximate the number of followed
accounts per user by only considering those in our brand set."""
print('adamic deprecated...requires loading all brands in memory.')
return
degrees = compute_log_degrees(brands, exemplars)
scores = {}
exemplar_sums = dict([(exemplar, sum(degrees[z] for z in exemplars[exemplar])) for exemplar in exemplars])
for brand in sorted(brands):
brand_sum = sum(degrees[z] for z in brands[brand])
total = 0.
for exemplar in exemplars:
total += sum(degrees[z] for z in brands[brand] & exemplars[exemplar]) / (brand_sum + exemplar_sums[exemplar])
scores[brand] = total / len(exemplars)
return scores
def compute_rarity_scores(exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is
the degree of the ith exemplar they follow.
>>> compute_rarity_scores({'e1':{1,2,3,4}, 'e2':{4,5}}).items()
[(1, 0.25), (2, 0.25), (3, 0.25), (4, 0.75), (5, 0.5)]
"""
scores = defaultdict(lambda: 0.)
for followers in exemplars.values():
score = 1. / len(followers)
for f in followers:
scores[f] += score
return scores
def rarity(brands, exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is the degree of the ith exemplar they follow.
The score for a brand is then the average of their follower scores."""
rarity = compute_rarity_scores(exemplars)
scores = {}
for brand, followers in brands:
scores[brand] = sum(rarity[f] for f in followers) / len(followers)
return scores
def compute_rarity_scores_log(exemplars):
""" Compute a score for each follower that is sum_i (1/n_i), where n_i is
the degree of the ith exemplar they follow.
>>> compute_rarity_scores({'e1':{1,2,3,4}, 'e2':{4,5}}).items()
[(1, 0.25), (2, 0.25), (3, 0.25), (4, 0.75), (5, 0.5)]
"""
scores = defaultdict(lambda: 0.)
for followers in exemplars.values():
score = 1. / math.log(len(followers))
for f in followers:
scores[f] += score
return scores
def mkdirs(filename):
report.mkdirs(os.path.dirname(filename))
def analyze_followers(brand_follower_file, exemplar_follower_file, outfile, analyze_fn,
min_followers, max_followers, sample_exemplars):
brands = iter_follower_file(brand_follower_file)
exemplars = read_follower_file(exemplar_follower_file, min_followers=min_followers, max_followers=max_followers, blacklist=get_twitter_handles(brand_follower_file))
print('read follower data for %d exemplars' % (len(exemplars)))
if sample_exemplars < 100: # sample a subset of exemplars.
exemplars = dict([(k, exemplars[k]) for k in random.sample(exemplars.keys(), int(len(exemplars) * sample_exemplars / 100.))])
print('sampled %d exemplars' % (len(exemplars)))
analyze = getattr(sys.modules[__name__], analyze_fn)
scores = analyze(brands, exemplars)
mkdirs(outfile)
outf = open(outfile, 'wt')
for brand in sorted(scores):
outf.write('%s %g\n' % (brand, scores[brand]))
outf.flush()
outf.close()
print('results written to', outfile)
def main():
args = docopt(__doc__)
print(args)
if '--seed' in args:
random.seed(args['--seed'])
if args['--network']:
analyze_followers(args['--brand-followers'], args['--exemplar-followers'], args['--output'], args['--network-method'],
int(args['--min-followers']), int(float(args['--max-followers'])), float(args['--sample-exemplars']))
if args['--text']:
analyze_text(args['--brand-tweets'], args['--exemplar-tweets'], args['--sample-tweets'], args['--output'], args['--text-method'])
if __name__ == '__main__':
main()
|
tapilab/brandelion | brandelion/cli/collect.py | iter_lines | python | def iter_lines(filename):
with open(filename, 'rt') as idfile:
for line in idfile:
screen_name = line.strip()
if len(screen_name) > 0:
yield screen_name.split()[0] | Iterate over screen names in a file, one per line. | train | https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/collect.py#L47-L53 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Collect Twitter data for brands.
usage:
brandelion collect --tweets --input <file> --output <file> --max=<N>
brandelion collect --followers --input <file> --output <file> --max=<N> [--loop]
brandelion collect --exemplars --query <string> --output <file>
Options
-h, --help
-i, --input <file> File containing list of Twitter accounts, one per line.
-l, --loop If true, keep looping to collect more data continuously.
-o, --output <file> File to store results
-t, --tweets Fetch tweets.
-f, --followers Fetch followers
-e, --exemplars Fetch exemplars from Twitter lists
-q, --query <string> A single string used to search for Twitter lists.
-m, --max=<N> Maximum number of followers or tweets to collect per account [default: 50000].
"""
from collections import Counter
import datetime
from docopt import docopt
import gzip
import io
import json
import re
import requests
import sys
import time
import traceback
import requests
import twutil
import json
import time
##import config from init.py:
from .. import config
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError as GoogleHttpError
def fetch_followers(account_file, outfile, limit, do_loop):
""" Fetch up to limit followers for each Twitter account in
account_file. Write results to outfile file in format:
screen_name user_id follower_id_1 follower_id_2 ..."""
print('Fetching followers for accounts in %s' % account_file)
niters = 1
while True:
outf = gzip.open(outfile, 'wt')
for screen_name in iter_lines(account_file):
timestamp = datetime.datetime.now().isoformat()
print('collecting followers for', screen_name)
followers = twutil.collect.followers_for_screen_name(screen_name, limit)
if len(followers) > 0:
outf.write('%s %s %s\n' % (timestamp, screen_name, ' '.join(followers)))
outf.flush()
else:
print('unknown user', screen_name)
outf.close()
if not do_loop:
return
else:
if niters == 1:
outfile = '%s.%d' % (outfile, niters)
else:
outfile = outfile[:outfile.rindex('.')] + '.%d' % niters
niters += 1
def fetch_tweets(account_file, outfile, limit):
""" Fetch up to limit tweets for each account in account_file and write to
outfile. """
print('fetching tweets for accounts in', account_file)
outf = io.open(outfile, 'wt')
for screen_name in iter_lines(account_file):
print('\nFetching tweets for %s' % screen_name)
for tweet in twutil.collect.tweets_for_user(screen_name, limit):
tweet['user']['screen_name'] = screen_name
outf.write('%s\n' % json.dumps(tweet, ensure_ascii=False))
outf.flush()
#DEPRECATED
# def fetch_lists(keyword, max_results=20):
# """
# Fetch the urls of up to max_results Twitter lists that match the provided keyword.
# >>> len(fetch_lists('politics', max_results=4))
# 4
# """
# res_per_page = 8
# start = 0
# results = []
# while len(results) < max_results:
# url = 'http://ajax.googleapis.com/ajax/services/search/web?v=1.0&q=site:twitter.com+inurl:lists+%s&rsz=%d&start=%d' % (keyword,
# res_per_page,
# start)
# js = json.loads(requests.get(url).text)
# if not js['responseData']:
# print('something went wrong in google search:\n', js)
# return results[:max_results]
# else:
# for r in js['responseData']['results']:
# results.append(r['url'])
# start += res_per_page
# time.sleep(.4)
# return results[:max_results]
#NEW FETCH lists
def google_search(search_term, api_key, cse_id, **kwargs):
final_urls = []
try:
service = build("customsearch", "v1", developerKey=api_key)
res = service.cse().list(q=search_term, cx=cse_id, **kwargs).execute()
for item in res['items']:
final_urls.append(item['formattedUrl'])
return final_urls
except GoogleHttpError:
print("something wrong with Google HTTP Errpr")
return final_urls
def fetch_lists(keyword,max_results=20):
"""
Fetch the urls of up to max_results Twitter lists that match the provided keyword.
>>> len(fetch_lists('politics', max_results=4))
4
"""
#CONFIG FILE READ
api_key=config.get('GOOGLE_CSE_KEYS','API_KEY')
cse_id=config.get('GOOGLE_CSE_KEYS','CSE_ID')
results = []
start_c = 1
search_term = "inurl:lists + "+keyword
while len(results)<max_results:
temp_res = google_search(search_term,api_key,cse_id,num=10,start=start_c)
if len(temp_res) == 0:
print("Google API Error, returning retrieved results")
return results
results.extend(temp_res)
start_c += 10
return results[:max_results]
def fetch_list_members(list_url):
""" Get all members of the list specified by the given url. E.g., https://twitter.com/lore77/lists/libri-cultura-education """
match = re.match(r'.+twitter\.com\/(.+)\/lists\/(.+)', list_url)
if not match:
print('cannot parse list url %s' % list_url)
return []
screen_name, slug = match.groups()
print('collecting list %s/%s' % (screen_name, slug))
return twutil.collect.list_members(slug, screen_name)
def fetch_exemplars(keyword, outfile, n=50):
""" Fetch top lists matching this keyword, then return Twitter screen
names along with the number of different lists on which each appers.. """
list_urls = fetch_lists(keyword, n)
print('found %d lists for %s' % (len(list_urls), keyword))
counts = Counter()
for list_url in list_urls:
counts.update(fetch_list_members(list_url))
# Write to file.
outf = io.open(outfile, 'wt')
for handle in sorted(counts):
outf.write('%s\t%d\n' % (handle, counts[handle]))
outf.close()
print('saved exemplars to', outfile)
def main():
args = docopt(__doc__)
if args['--followers']:
fetch_followers(args['--input'], args['--output'], int(args['--max']), args['--loop'])
elif args['--tweets']:
fetch_tweets(args['--input'], args['--output'], int(args['--max']))
else:
fetch_exemplars(args['--query'], args['--output'])
if __name__ == '__main__':
main()
|
tapilab/brandelion | brandelion/cli/collect.py | fetch_followers | python | def fetch_followers(account_file, outfile, limit, do_loop):
print('Fetching followers for accounts in %s' % account_file)
niters = 1
while True:
outf = gzip.open(outfile, 'wt')
for screen_name in iter_lines(account_file):
timestamp = datetime.datetime.now().isoformat()
print('collecting followers for', screen_name)
followers = twutil.collect.followers_for_screen_name(screen_name, limit)
if len(followers) > 0:
outf.write('%s %s %s\n' % (timestamp, screen_name, ' '.join(followers)))
outf.flush()
else:
print('unknown user', screen_name)
outf.close()
if not do_loop:
return
else:
if niters == 1:
outfile = '%s.%d' % (outfile, niters)
else:
outfile = outfile[:outfile.rindex('.')] + '.%d' % niters
niters += 1 | Fetch up to limit followers for each Twitter account in
account_file. Write results to outfile file in format:
screen_name user_id follower_id_1 follower_id_2 ... | train | https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/collect.py#L56-L82 | [
"def iter_lines(filename):\n \"\"\" Iterate over screen names in a file, one per line.\"\"\"\n with open(filename, 'rt') as idfile:\n for line in idfile:\n screen_name = line.strip()\n if len(screen_name) > 0:\n yield screen_name.split()[0]\n"
] | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Collect Twitter data for brands.
usage:
brandelion collect --tweets --input <file> --output <file> --max=<N>
brandelion collect --followers --input <file> --output <file> --max=<N> [--loop]
brandelion collect --exemplars --query <string> --output <file>
Options
-h, --help
-i, --input <file> File containing list of Twitter accounts, one per line.
-l, --loop If true, keep looping to collect more data continuously.
-o, --output <file> File to store results
-t, --tweets Fetch tweets.
-f, --followers Fetch followers
-e, --exemplars Fetch exemplars from Twitter lists
-q, --query <string> A single string used to search for Twitter lists.
-m, --max=<N> Maximum number of followers or tweets to collect per account [default: 50000].
"""
from collections import Counter
import datetime
from docopt import docopt
import gzip
import io
import json
import re
import requests
import sys
import time
import traceback
import requests
import twutil
import json
import time
##import config from init.py:
from .. import config
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError as GoogleHttpError
def iter_lines(filename):
""" Iterate over screen names in a file, one per line."""
with open(filename, 'rt') as idfile:
for line in idfile:
screen_name = line.strip()
if len(screen_name) > 0:
yield screen_name.split()[0]
def fetch_tweets(account_file, outfile, limit):
""" Fetch up to limit tweets for each account in account_file and write to
outfile. """
print('fetching tweets for accounts in', account_file)
outf = io.open(outfile, 'wt')
for screen_name in iter_lines(account_file):
print('\nFetching tweets for %s' % screen_name)
for tweet in twutil.collect.tweets_for_user(screen_name, limit):
tweet['user']['screen_name'] = screen_name
outf.write('%s\n' % json.dumps(tweet, ensure_ascii=False))
outf.flush()
#DEPRECATED
# def fetch_lists(keyword, max_results=20):
# """
# Fetch the urls of up to max_results Twitter lists that match the provided keyword.
# >>> len(fetch_lists('politics', max_results=4))
# 4
# """
# res_per_page = 8
# start = 0
# results = []
# while len(results) < max_results:
# url = 'http://ajax.googleapis.com/ajax/services/search/web?v=1.0&q=site:twitter.com+inurl:lists+%s&rsz=%d&start=%d' % (keyword,
# res_per_page,
# start)
# js = json.loads(requests.get(url).text)
# if not js['responseData']:
# print('something went wrong in google search:\n', js)
# return results[:max_results]
# else:
# for r in js['responseData']['results']:
# results.append(r['url'])
# start += res_per_page
# time.sleep(.4)
# return results[:max_results]
#NEW FETCH lists
def google_search(search_term, api_key, cse_id, **kwargs):
final_urls = []
try:
service = build("customsearch", "v1", developerKey=api_key)
res = service.cse().list(q=search_term, cx=cse_id, **kwargs).execute()
for item in res['items']:
final_urls.append(item['formattedUrl'])
return final_urls
except GoogleHttpError:
print("something wrong with Google HTTP Errpr")
return final_urls
def fetch_lists(keyword,max_results=20):
"""
Fetch the urls of up to max_results Twitter lists that match the provided keyword.
>>> len(fetch_lists('politics', max_results=4))
4
"""
#CONFIG FILE READ
api_key=config.get('GOOGLE_CSE_KEYS','API_KEY')
cse_id=config.get('GOOGLE_CSE_KEYS','CSE_ID')
results = []
start_c = 1
search_term = "inurl:lists + "+keyword
while len(results)<max_results:
temp_res = google_search(search_term,api_key,cse_id,num=10,start=start_c)
if len(temp_res) == 0:
print("Google API Error, returning retrieved results")
return results
results.extend(temp_res)
start_c += 10
return results[:max_results]
def fetch_list_members(list_url):
""" Get all members of the list specified by the given url. E.g., https://twitter.com/lore77/lists/libri-cultura-education """
match = re.match(r'.+twitter\.com\/(.+)\/lists\/(.+)', list_url)
if not match:
print('cannot parse list url %s' % list_url)
return []
screen_name, slug = match.groups()
print('collecting list %s/%s' % (screen_name, slug))
return twutil.collect.list_members(slug, screen_name)
def fetch_exemplars(keyword, outfile, n=50):
""" Fetch top lists matching this keyword, then return Twitter screen
names along with the number of different lists on which each appers.. """
list_urls = fetch_lists(keyword, n)
print('found %d lists for %s' % (len(list_urls), keyword))
counts = Counter()
for list_url in list_urls:
counts.update(fetch_list_members(list_url))
# Write to file.
outf = io.open(outfile, 'wt')
for handle in sorted(counts):
outf.write('%s\t%d\n' % (handle, counts[handle]))
outf.close()
print('saved exemplars to', outfile)
def main():
args = docopt(__doc__)
if args['--followers']:
fetch_followers(args['--input'], args['--output'], int(args['--max']), args['--loop'])
elif args['--tweets']:
fetch_tweets(args['--input'], args['--output'], int(args['--max']))
else:
fetch_exemplars(args['--query'], args['--output'])
if __name__ == '__main__':
main()
|
tapilab/brandelion | brandelion/cli/collect.py | fetch_tweets | python | def fetch_tweets(account_file, outfile, limit):
print('fetching tweets for accounts in', account_file)
outf = io.open(outfile, 'wt')
for screen_name in iter_lines(account_file):
print('\nFetching tweets for %s' % screen_name)
for tweet in twutil.collect.tweets_for_user(screen_name, limit):
tweet['user']['screen_name'] = screen_name
outf.write('%s\n' % json.dumps(tweet, ensure_ascii=False))
outf.flush() | Fetch up to limit tweets for each account in account_file and write to
outfile. | train | https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/collect.py#L85-L95 | [
"def iter_lines(filename):\n \"\"\" Iterate over screen names in a file, one per line.\"\"\"\n with open(filename, 'rt') as idfile:\n for line in idfile:\n screen_name = line.strip()\n if len(screen_name) > 0:\n yield screen_name.split()[0]\n"
] | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Collect Twitter data for brands.
usage:
brandelion collect --tweets --input <file> --output <file> --max=<N>
brandelion collect --followers --input <file> --output <file> --max=<N> [--loop]
brandelion collect --exemplars --query <string> --output <file>
Options
-h, --help
-i, --input <file> File containing list of Twitter accounts, one per line.
-l, --loop If true, keep looping to collect more data continuously.
-o, --output <file> File to store results
-t, --tweets Fetch tweets.
-f, --followers Fetch followers
-e, --exemplars Fetch exemplars from Twitter lists
-q, --query <string> A single string used to search for Twitter lists.
-m, --max=<N> Maximum number of followers or tweets to collect per account [default: 50000].
"""
from collections import Counter
import datetime
from docopt import docopt
import gzip
import io
import json
import re
import requests
import sys
import time
import traceback
import requests
import twutil
import json
import time
##import config from init.py:
from .. import config
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError as GoogleHttpError
def iter_lines(filename):
""" Iterate over screen names in a file, one per line."""
with open(filename, 'rt') as idfile:
for line in idfile:
screen_name = line.strip()
if len(screen_name) > 0:
yield screen_name.split()[0]
def fetch_followers(account_file, outfile, limit, do_loop):
""" Fetch up to limit followers for each Twitter account in
account_file. Write results to outfile file in format:
screen_name user_id follower_id_1 follower_id_2 ..."""
print('Fetching followers for accounts in %s' % account_file)
niters = 1
while True:
outf = gzip.open(outfile, 'wt')
for screen_name in iter_lines(account_file):
timestamp = datetime.datetime.now().isoformat()
print('collecting followers for', screen_name)
followers = twutil.collect.followers_for_screen_name(screen_name, limit)
if len(followers) > 0:
outf.write('%s %s %s\n' % (timestamp, screen_name, ' '.join(followers)))
outf.flush()
else:
print('unknown user', screen_name)
outf.close()
if not do_loop:
return
else:
if niters == 1:
outfile = '%s.%d' % (outfile, niters)
else:
outfile = outfile[:outfile.rindex('.')] + '.%d' % niters
niters += 1
#DEPRECATED
# def fetch_lists(keyword, max_results=20):
# """
# Fetch the urls of up to max_results Twitter lists that match the provided keyword.
# >>> len(fetch_lists('politics', max_results=4))
# 4
# """
# res_per_page = 8
# start = 0
# results = []
# while len(results) < max_results:
# url = 'http://ajax.googleapis.com/ajax/services/search/web?v=1.0&q=site:twitter.com+inurl:lists+%s&rsz=%d&start=%d' % (keyword,
# res_per_page,
# start)
# js = json.loads(requests.get(url).text)
# if not js['responseData']:
# print('something went wrong in google search:\n', js)
# return results[:max_results]
# else:
# for r in js['responseData']['results']:
# results.append(r['url'])
# start += res_per_page
# time.sleep(.4)
# return results[:max_results]
#NEW FETCH lists
def google_search(search_term, api_key, cse_id, **kwargs):
final_urls = []
try:
service = build("customsearch", "v1", developerKey=api_key)
res = service.cse().list(q=search_term, cx=cse_id, **kwargs).execute()
for item in res['items']:
final_urls.append(item['formattedUrl'])
return final_urls
except GoogleHttpError:
print("something wrong with Google HTTP Errpr")
return final_urls
def fetch_lists(keyword,max_results=20):
"""
Fetch the urls of up to max_results Twitter lists that match the provided keyword.
>>> len(fetch_lists('politics', max_results=4))
4
"""
#CONFIG FILE READ
api_key=config.get('GOOGLE_CSE_KEYS','API_KEY')
cse_id=config.get('GOOGLE_CSE_KEYS','CSE_ID')
results = []
start_c = 1
search_term = "inurl:lists + "+keyword
while len(results)<max_results:
temp_res = google_search(search_term,api_key,cse_id,num=10,start=start_c)
if len(temp_res) == 0:
print("Google API Error, returning retrieved results")
return results
results.extend(temp_res)
start_c += 10
return results[:max_results]
def fetch_list_members(list_url):
""" Get all members of the list specified by the given url. E.g., https://twitter.com/lore77/lists/libri-cultura-education """
match = re.match(r'.+twitter\.com\/(.+)\/lists\/(.+)', list_url)
if not match:
print('cannot parse list url %s' % list_url)
return []
screen_name, slug = match.groups()
print('collecting list %s/%s' % (screen_name, slug))
return twutil.collect.list_members(slug, screen_name)
def fetch_exemplars(keyword, outfile, n=50):
""" Fetch top lists matching this keyword, then return Twitter screen
names along with the number of different lists on which each appers.. """
list_urls = fetch_lists(keyword, n)
print('found %d lists for %s' % (len(list_urls), keyword))
counts = Counter()
for list_url in list_urls:
counts.update(fetch_list_members(list_url))
# Write to file.
outf = io.open(outfile, 'wt')
for handle in sorted(counts):
outf.write('%s\t%d\n' % (handle, counts[handle]))
outf.close()
print('saved exemplars to', outfile)
def main():
args = docopt(__doc__)
if args['--followers']:
fetch_followers(args['--input'], args['--output'], int(args['--max']), args['--loop'])
elif args['--tweets']:
fetch_tweets(args['--input'], args['--output'], int(args['--max']))
else:
fetch_exemplars(args['--query'], args['--output'])
if __name__ == '__main__':
main()
|
tapilab/brandelion | brandelion/cli/collect.py | fetch_lists | python | def fetch_lists(keyword,max_results=20):
#CONFIG FILE READ
api_key=config.get('GOOGLE_CSE_KEYS','API_KEY')
cse_id=config.get('GOOGLE_CSE_KEYS','CSE_ID')
results = []
start_c = 1
search_term = "inurl:lists + "+keyword
while len(results)<max_results:
temp_res = google_search(search_term,api_key,cse_id,num=10,start=start_c)
if len(temp_res) == 0:
print("Google API Error, returning retrieved results")
return results
results.extend(temp_res)
start_c += 10
return results[:max_results] | Fetch the urls of up to max_results Twitter lists that match the provided keyword.
>>> len(fetch_lists('politics', max_results=4))
4 | train | https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/collect.py#L138-L158 | [
"def google_search(search_term, api_key, cse_id, **kwargs):\n final_urls = []\n try:\n service = build(\"customsearch\", \"v1\", developerKey=api_key)\n res = service.cse().list(q=search_term, cx=cse_id, **kwargs).execute()\n for item in res['items']:\n final_urls.append(item['... | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Collect Twitter data for brands.
usage:
brandelion collect --tweets --input <file> --output <file> --max=<N>
brandelion collect --followers --input <file> --output <file> --max=<N> [--loop]
brandelion collect --exemplars --query <string> --output <file>
Options
-h, --help
-i, --input <file> File containing list of Twitter accounts, one per line.
-l, --loop If true, keep looping to collect more data continuously.
-o, --output <file> File to store results
-t, --tweets Fetch tweets.
-f, --followers Fetch followers
-e, --exemplars Fetch exemplars from Twitter lists
-q, --query <string> A single string used to search for Twitter lists.
-m, --max=<N> Maximum number of followers or tweets to collect per account [default: 50000].
"""
from collections import Counter
import datetime
from docopt import docopt
import gzip
import io
import json
import re
import requests
import sys
import time
import traceback
import requests
import twutil
import json
import time
##import config from init.py:
from .. import config
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError as GoogleHttpError
def iter_lines(filename):
""" Iterate over screen names in a file, one per line."""
with open(filename, 'rt') as idfile:
for line in idfile:
screen_name = line.strip()
if len(screen_name) > 0:
yield screen_name.split()[0]
def fetch_followers(account_file, outfile, limit, do_loop):
""" Fetch up to limit followers for each Twitter account in
account_file. Write results to outfile file in format:
screen_name user_id follower_id_1 follower_id_2 ..."""
print('Fetching followers for accounts in %s' % account_file)
niters = 1
while True:
outf = gzip.open(outfile, 'wt')
for screen_name in iter_lines(account_file):
timestamp = datetime.datetime.now().isoformat()
print('collecting followers for', screen_name)
followers = twutil.collect.followers_for_screen_name(screen_name, limit)
if len(followers) > 0:
outf.write('%s %s %s\n' % (timestamp, screen_name, ' '.join(followers)))
outf.flush()
else:
print('unknown user', screen_name)
outf.close()
if not do_loop:
return
else:
if niters == 1:
outfile = '%s.%d' % (outfile, niters)
else:
outfile = outfile[:outfile.rindex('.')] + '.%d' % niters
niters += 1
def fetch_tweets(account_file, outfile, limit):
""" Fetch up to limit tweets for each account in account_file and write to
outfile. """
print('fetching tweets for accounts in', account_file)
outf = io.open(outfile, 'wt')
for screen_name in iter_lines(account_file):
print('\nFetching tweets for %s' % screen_name)
for tweet in twutil.collect.tweets_for_user(screen_name, limit):
tweet['user']['screen_name'] = screen_name
outf.write('%s\n' % json.dumps(tweet, ensure_ascii=False))
outf.flush()
#DEPRECATED
# def fetch_lists(keyword, max_results=20):
# """
# Fetch the urls of up to max_results Twitter lists that match the provided keyword.
# >>> len(fetch_lists('politics', max_results=4))
# 4
# """
# res_per_page = 8
# start = 0
# results = []
# while len(results) < max_results:
# url = 'http://ajax.googleapis.com/ajax/services/search/web?v=1.0&q=site:twitter.com+inurl:lists+%s&rsz=%d&start=%d' % (keyword,
# res_per_page,
# start)
# js = json.loads(requests.get(url).text)
# if not js['responseData']:
# print('something went wrong in google search:\n', js)
# return results[:max_results]
# else:
# for r in js['responseData']['results']:
# results.append(r['url'])
# start += res_per_page
# time.sleep(.4)
# return results[:max_results]
#NEW FETCH lists
def google_search(search_term, api_key, cse_id, **kwargs):
final_urls = []
try:
service = build("customsearch", "v1", developerKey=api_key)
res = service.cse().list(q=search_term, cx=cse_id, **kwargs).execute()
for item in res['items']:
final_urls.append(item['formattedUrl'])
return final_urls
except GoogleHttpError:
print("something wrong with Google HTTP Errpr")
return final_urls
def fetch_list_members(list_url):
""" Get all members of the list specified by the given url. E.g., https://twitter.com/lore77/lists/libri-cultura-education """
match = re.match(r'.+twitter\.com\/(.+)\/lists\/(.+)', list_url)
if not match:
print('cannot parse list url %s' % list_url)
return []
screen_name, slug = match.groups()
print('collecting list %s/%s' % (screen_name, slug))
return twutil.collect.list_members(slug, screen_name)
def fetch_exemplars(keyword, outfile, n=50):
""" Fetch top lists matching this keyword, then return Twitter screen
names along with the number of different lists on which each appers.. """
list_urls = fetch_lists(keyword, n)
print('found %d lists for %s' % (len(list_urls), keyword))
counts = Counter()
for list_url in list_urls:
counts.update(fetch_list_members(list_url))
# Write to file.
outf = io.open(outfile, 'wt')
for handle in sorted(counts):
outf.write('%s\t%d\n' % (handle, counts[handle]))
outf.close()
print('saved exemplars to', outfile)
def main():
args = docopt(__doc__)
if args['--followers']:
fetch_followers(args['--input'], args['--output'], int(args['--max']), args['--loop'])
elif args['--tweets']:
fetch_tweets(args['--input'], args['--output'], int(args['--max']))
else:
fetch_exemplars(args['--query'], args['--output'])
if __name__ == '__main__':
main()
|
tapilab/brandelion | brandelion/cli/collect.py | fetch_list_members | python | def fetch_list_members(list_url):
match = re.match(r'.+twitter\.com\/(.+)\/lists\/(.+)', list_url)
if not match:
print('cannot parse list url %s' % list_url)
return []
screen_name, slug = match.groups()
print('collecting list %s/%s' % (screen_name, slug))
return twutil.collect.list_members(slug, screen_name) | Get all members of the list specified by the given url. E.g., https://twitter.com/lore77/lists/libri-cultura-education | train | https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/collect.py#L160-L168 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Collect Twitter data for brands.
usage:
brandelion collect --tweets --input <file> --output <file> --max=<N>
brandelion collect --followers --input <file> --output <file> --max=<N> [--loop]
brandelion collect --exemplars --query <string> --output <file>
Options
-h, --help
-i, --input <file> File containing list of Twitter accounts, one per line.
-l, --loop If true, keep looping to collect more data continuously.
-o, --output <file> File to store results
-t, --tweets Fetch tweets.
-f, --followers Fetch followers
-e, --exemplars Fetch exemplars from Twitter lists
-q, --query <string> A single string used to search for Twitter lists.
-m, --max=<N> Maximum number of followers or tweets to collect per account [default: 50000].
"""
from collections import Counter
import datetime
from docopt import docopt
import gzip
import io
import json
import re
import requests
import sys
import time
import traceback
import requests
import twutil
import json
import time
##import config from init.py:
from .. import config
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError as GoogleHttpError
def iter_lines(filename):
""" Iterate over screen names in a file, one per line."""
with open(filename, 'rt') as idfile:
for line in idfile:
screen_name = line.strip()
if len(screen_name) > 0:
yield screen_name.split()[0]
def fetch_followers(account_file, outfile, limit, do_loop):
""" Fetch up to limit followers for each Twitter account in
account_file. Write results to outfile file in format:
screen_name user_id follower_id_1 follower_id_2 ..."""
print('Fetching followers for accounts in %s' % account_file)
niters = 1
while True:
outf = gzip.open(outfile, 'wt')
for screen_name in iter_lines(account_file):
timestamp = datetime.datetime.now().isoformat()
print('collecting followers for', screen_name)
followers = twutil.collect.followers_for_screen_name(screen_name, limit)
if len(followers) > 0:
outf.write('%s %s %s\n' % (timestamp, screen_name, ' '.join(followers)))
outf.flush()
else:
print('unknown user', screen_name)
outf.close()
if not do_loop:
return
else:
if niters == 1:
outfile = '%s.%d' % (outfile, niters)
else:
outfile = outfile[:outfile.rindex('.')] + '.%d' % niters
niters += 1
def fetch_tweets(account_file, outfile, limit):
""" Fetch up to limit tweets for each account in account_file and write to
outfile. """
print('fetching tweets for accounts in', account_file)
outf = io.open(outfile, 'wt')
for screen_name in iter_lines(account_file):
print('\nFetching tweets for %s' % screen_name)
for tweet in twutil.collect.tweets_for_user(screen_name, limit):
tweet['user']['screen_name'] = screen_name
outf.write('%s\n' % json.dumps(tweet, ensure_ascii=False))
outf.flush()
#DEPRECATED
# def fetch_lists(keyword, max_results=20):
# """
# Fetch the urls of up to max_results Twitter lists that match the provided keyword.
# >>> len(fetch_lists('politics', max_results=4))
# 4
# """
# res_per_page = 8
# start = 0
# results = []
# while len(results) < max_results:
# url = 'http://ajax.googleapis.com/ajax/services/search/web?v=1.0&q=site:twitter.com+inurl:lists+%s&rsz=%d&start=%d' % (keyword,
# res_per_page,
# start)
# js = json.loads(requests.get(url).text)
# if not js['responseData']:
# print('something went wrong in google search:\n', js)
# return results[:max_results]
# else:
# for r in js['responseData']['results']:
# results.append(r['url'])
# start += res_per_page
# time.sleep(.4)
# return results[:max_results]
#NEW FETCH lists
def google_search(search_term, api_key, cse_id, **kwargs):
final_urls = []
try:
service = build("customsearch", "v1", developerKey=api_key)
res = service.cse().list(q=search_term, cx=cse_id, **kwargs).execute()
for item in res['items']:
final_urls.append(item['formattedUrl'])
return final_urls
except GoogleHttpError:
print("something wrong with Google HTTP Errpr")
return final_urls
def fetch_lists(keyword,max_results=20):
"""
Fetch the urls of up to max_results Twitter lists that match the provided keyword.
>>> len(fetch_lists('politics', max_results=4))
4
"""
#CONFIG FILE READ
api_key=config.get('GOOGLE_CSE_KEYS','API_KEY')
cse_id=config.get('GOOGLE_CSE_KEYS','CSE_ID')
results = []
start_c = 1
search_term = "inurl:lists + "+keyword
while len(results)<max_results:
temp_res = google_search(search_term,api_key,cse_id,num=10,start=start_c)
if len(temp_res) == 0:
print("Google API Error, returning retrieved results")
return results
results.extend(temp_res)
start_c += 10
return results[:max_results]
def fetch_exemplars(keyword, outfile, n=50):
""" Fetch top lists matching this keyword, then return Twitter screen
names along with the number of different lists on which each appers.. """
list_urls = fetch_lists(keyword, n)
print('found %d lists for %s' % (len(list_urls), keyword))
counts = Counter()
for list_url in list_urls:
counts.update(fetch_list_members(list_url))
# Write to file.
outf = io.open(outfile, 'wt')
for handle in sorted(counts):
outf.write('%s\t%d\n' % (handle, counts[handle]))
outf.close()
print('saved exemplars to', outfile)
def main():
args = docopt(__doc__)
if args['--followers']:
fetch_followers(args['--input'], args['--output'], int(args['--max']), args['--loop'])
elif args['--tweets']:
fetch_tweets(args['--input'], args['--output'], int(args['--max']))
else:
fetch_exemplars(args['--query'], args['--output'])
if __name__ == '__main__':
main()
|
tapilab/brandelion | brandelion/cli/collect.py | fetch_exemplars | python | def fetch_exemplars(keyword, outfile, n=50):
list_urls = fetch_lists(keyword, n)
print('found %d lists for %s' % (len(list_urls), keyword))
counts = Counter()
for list_url in list_urls:
counts.update(fetch_list_members(list_url))
# Write to file.
outf = io.open(outfile, 'wt')
for handle in sorted(counts):
outf.write('%s\t%d\n' % (handle, counts[handle]))
outf.close()
print('saved exemplars to', outfile) | Fetch top lists matching this keyword, then return Twitter screen
names along with the number of different lists on which each appers.. | train | https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/collect.py#L171-L184 | [
"def fetch_lists(keyword,max_results=20):\n \"\"\"\n Fetch the urls of up to max_results Twitter lists that match the provided keyword.\n >>> len(fetch_lists('politics', max_results=4))\n 4\n \"\"\"\n #CONFIG FILE READ\n api_key=config.get('GOOGLE_CSE_KEYS','API_KEY')\n cse_id=config.get('GO... | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Collect Twitter data for brands.
usage:
brandelion collect --tweets --input <file> --output <file> --max=<N>
brandelion collect --followers --input <file> --output <file> --max=<N> [--loop]
brandelion collect --exemplars --query <string> --output <file>
Options
-h, --help
-i, --input <file> File containing list of Twitter accounts, one per line.
-l, --loop If true, keep looping to collect more data continuously.
-o, --output <file> File to store results
-t, --tweets Fetch tweets.
-f, --followers Fetch followers
-e, --exemplars Fetch exemplars from Twitter lists
-q, --query <string> A single string used to search for Twitter lists.
-m, --max=<N> Maximum number of followers or tweets to collect per account [default: 50000].
"""
from collections import Counter
import datetime
from docopt import docopt
import gzip
import io
import json
import re
import requests
import sys
import time
import traceback
import requests
import twutil
import json
import time
##import config from init.py:
from .. import config
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError as GoogleHttpError
def iter_lines(filename):
""" Iterate over screen names in a file, one per line."""
with open(filename, 'rt') as idfile:
for line in idfile:
screen_name = line.strip()
if len(screen_name) > 0:
yield screen_name.split()[0]
def fetch_followers(account_file, outfile, limit, do_loop):
""" Fetch up to limit followers for each Twitter account in
account_file. Write results to outfile file in format:
screen_name user_id follower_id_1 follower_id_2 ..."""
print('Fetching followers for accounts in %s' % account_file)
niters = 1
while True:
outf = gzip.open(outfile, 'wt')
for screen_name in iter_lines(account_file):
timestamp = datetime.datetime.now().isoformat()
print('collecting followers for', screen_name)
followers = twutil.collect.followers_for_screen_name(screen_name, limit)
if len(followers) > 0:
outf.write('%s %s %s\n' % (timestamp, screen_name, ' '.join(followers)))
outf.flush()
else:
print('unknown user', screen_name)
outf.close()
if not do_loop:
return
else:
if niters == 1:
outfile = '%s.%d' % (outfile, niters)
else:
outfile = outfile[:outfile.rindex('.')] + '.%d' % niters
niters += 1
def fetch_tweets(account_file, outfile, limit):
""" Fetch up to limit tweets for each account in account_file and write to
outfile. """
print('fetching tweets for accounts in', account_file)
outf = io.open(outfile, 'wt')
for screen_name in iter_lines(account_file):
print('\nFetching tweets for %s' % screen_name)
for tweet in twutil.collect.tweets_for_user(screen_name, limit):
tweet['user']['screen_name'] = screen_name
outf.write('%s\n' % json.dumps(tweet, ensure_ascii=False))
outf.flush()
#DEPRECATED
# def fetch_lists(keyword, max_results=20):
# """
# Fetch the urls of up to max_results Twitter lists that match the provided keyword.
# >>> len(fetch_lists('politics', max_results=4))
# 4
# """
# res_per_page = 8
# start = 0
# results = []
# while len(results) < max_results:
# url = 'http://ajax.googleapis.com/ajax/services/search/web?v=1.0&q=site:twitter.com+inurl:lists+%s&rsz=%d&start=%d' % (keyword,
# res_per_page,
# start)
# js = json.loads(requests.get(url).text)
# if not js['responseData']:
# print('something went wrong in google search:\n', js)
# return results[:max_results]
# else:
# for r in js['responseData']['results']:
# results.append(r['url'])
# start += res_per_page
# time.sleep(.4)
# return results[:max_results]
#NEW FETCH lists
def google_search(search_term, api_key, cse_id, **kwargs):
final_urls = []
try:
service = build("customsearch", "v1", developerKey=api_key)
res = service.cse().list(q=search_term, cx=cse_id, **kwargs).execute()
for item in res['items']:
final_urls.append(item['formattedUrl'])
return final_urls
except GoogleHttpError:
print("something wrong with Google HTTP Errpr")
return final_urls
def fetch_lists(keyword,max_results=20):
"""
Fetch the urls of up to max_results Twitter lists that match the provided keyword.
>>> len(fetch_lists('politics', max_results=4))
4
"""
#CONFIG FILE READ
api_key=config.get('GOOGLE_CSE_KEYS','API_KEY')
cse_id=config.get('GOOGLE_CSE_KEYS','CSE_ID')
results = []
start_c = 1
search_term = "inurl:lists + "+keyword
while len(results)<max_results:
temp_res = google_search(search_term,api_key,cse_id,num=10,start=start_c)
if len(temp_res) == 0:
print("Google API Error, returning retrieved results")
return results
results.extend(temp_res)
start_c += 10
return results[:max_results]
def fetch_list_members(list_url):
""" Get all members of the list specified by the given url. E.g., https://twitter.com/lore77/lists/libri-cultura-education """
match = re.match(r'.+twitter\.com\/(.+)\/lists\/(.+)', list_url)
if not match:
print('cannot parse list url %s' % list_url)
return []
screen_name, slug = match.groups()
print('collecting list %s/%s' % (screen_name, slug))
return twutil.collect.list_members(slug, screen_name)
def main():
args = docopt(__doc__)
if args['--followers']:
fetch_followers(args['--input'], args['--output'], int(args['--max']), args['--loop'])
elif args['--tweets']:
fetch_tweets(args['--input'], args['--output'], int(args['--max']))
else:
fetch_exemplars(args['--query'], args['--output'])
if __name__ == '__main__':
main()
|
tapilab/brandelion | brandelion/cli/diagnose.py | correlation_by_exemplar | python | def correlation_by_exemplar(brands, exemplars, validation_scores, analyze_fn_str, outf):
analyze_fn = getattr(analyze, analyze_fn_str)
keys = sorted(k for k in validation_scores.keys() if k in set(x[0] for x in brands))
truth = [validation_scores[k] for k in keys]
result = {}
outf.write('exemplar\tcorr\tn_followers\n')
outf.flush()
for exemplar in exemplars:
single_exemplar = {exemplar: exemplars[exemplar]}
social_scores = analyze_fn(brands, single_exemplar)
predicted = [social_scores[k] for k in keys]
outf.write('%s\t%g\t%d\n' % (exemplar, scistat.pearsonr(predicted, truth)[0], len(exemplars[exemplar])))
outf.flush()
result[exemplar] = scistat.pearsonr(predicted, truth)[0]
outf.close()
return result | Report the overall correlation with the validation scores using each exemplar in isolation. | train | https://github.com/tapilab/brandelion/blob/40a5a5333cf704182c8666d1fbbbdadc7ff88546/brandelion/cli/diagnose.py#L50-L66 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Run diagnostics on a dataset.
usage:
brandelion diagnose --network --brand-followers <file> --exemplar-followers <file> --validation <file> --output <file> [--network-method <string>]
Options
-h, --help
--brand-followers <file> File containing follower data for brand accounts.
--exemplar-followers <file> File containing follower data for exemplar accounts.
--network-method <string> Method to do text analysis [default: jaccard]
-n, --network Analyze followers.
-o, --output <file> File to store results
-t, --text Analyze text.
-v, --validation <file> File containing third-party scores for each brand by Twitter name, (e.g., surveys), for comparison.
"""
from docopt import docopt
import os
import scipy.stats as scistat
import random
from . import analyze, report
random.seed(123)
def read_scores(fname):
scores = {}
for line in open(fname):
parts = line.strip().lower().split()
if len(parts) > 1:
scores[parts[0]] = float(parts[1])
return scores
def validate(scores, validation):
keys = sorted(validation.keys())
predicted = [scores[k] for k in keys]
truth = [validation[k] for k in keys]
print('Pearson:', scistat.pearsonr(predicted, truth))
def diagnose_text(brand_tweets_file, exemplar_tweets_file, sample_tweets_file, validation_file):
pass
def mkdirs(filename):
report.mkdirs(os.path.dirname(filename))
def diagnose_followers(brand_follower_file, exemplar_follower_file, validation_file, analyze_fn, output_file):
mkdirs(output_file)
outf = open(output_file, 'wt')
brands = analyze.read_follower_file(brand_follower_file).items()
exemplars = analyze.read_follower_file(exemplar_follower_file, blacklist=analyze.get_twitter_handles(brand_follower_file))
print('read follower data for %d exemplars' % (len(exemplars)))
scores = report.read_scores(validation_file)
return correlation_by_exemplar(brands, exemplars, scores, analyze_fn, outf)
def main():
args = docopt(__doc__)
print(args)
if '--network' in args:
diagnose_followers(args['--brand-followers'], args['--exemplar-followers'], args['--validation'], args['--network-method'], args['--output'])
if '--text' in args:
diagnose_text(args['--brand-tweets'], args['--exemplar-tweets'], args['--sample-tweets'], args['--validation'], args['--text-method'])
if __name__ == '__main__':
main()
|
pyQode/pyqode.cobol | pyqode/cobol/widgets/code_edit.py | CobolCodeEdit._do_home_key | python | def _do_home_key(self, event=None, select=False):
# get nb char to first significative char
min_column = self.indenter_mode.min_column
text = api.TextHelper(self).current_line_text()[min_column:]
indent = len(text) - len(text.lstrip())
delta = (self.textCursor().positionInBlock() - indent - min_column)
cursor = self.textCursor()
move = QtGui.QTextCursor.MoveAnchor
if select:
move = QtGui.QTextCursor.KeepAnchor
if delta > 0:
cursor.movePosition(QtGui.QTextCursor.Left, move, delta)
else:
cursor.movePosition(QtGui.QTextCursor.StartOfBlock, move)
cursor.movePosition(QtGui.QTextCursor.Right, cursor.MoveAnchor, min_column)
self.setTextCursor(cursor)
if event:
event.accept() | Performs home key action | train | https://github.com/pyQode/pyqode.cobol/blob/eedae4e320a4b2d0c44abb2c3061091321648fb7/pyqode/cobol/widgets/code_edit.py#L206-L224 | null | class CobolCodeEdit(api.CodeEdit):
"""
CodeEdit specialized for cobol source code editing.
"""
class CobolFileManager(FileManager):
def _get_icon(self):
return QtGui.QIcon(icons.ICON_MIMETYPE)
mimetypes = ['text/x-cobol']
extensions = [".COB", ".CBL", ".PCO", ".CPY", ".SCB", ".SQB"]
@classmethod
def all_extensions(cls):
return cls.extensions + [ext.lower() for ext in cls.extensions]
@property
def free_format(self):
return self._free_format
@free_format.setter
def free_format(self, free_fmt):
if free_fmt != self._free_format:
self._free_format = free_fmt
self.margins.enabled = not free_fmt
self.syntax_highlighter.rehighlight()
self._update_backend_format()
@property
def lower_case_keywords(self):
return self._lower_case_keywords
@lower_case_keywords.setter
def lower_case_keywords(self, value):
self._lower_case_keywords = value
self._update_backend_proposed_kw_case()
@property
def comment_indicator(self):
return self._comment_indicator
@comment_indicator.setter
def comment_indicator(self, value):
self._comment_indicator = value
def __init__(self, parent=None, color_scheme='qt', free_format=False):
super().__init__(parent)
self.file = self.CobolFileManager(self)
self._lower_case_keywords = False
self._free_format = None
self._comment_indicator = '*> '
self.word_separators.remove('-')
self._start_server()
self._setup_panels()
self._setup_modes(color_scheme)
self.encoding_panel = self.panels.append(
panels.EncodingPanel(), api.Panel.Position.TOP
)
self.read_only_panel = self.panels.append(
panels.ReadOnlyPanel(), api.Panel.Position.TOP
)
self.free_format = free_format
def _start_server(self):
if hasattr(sys, "frozen"):
cwd = os.path.dirname(sys.executable)
base = 'cobol-backend'
srv = base + '.exe' if sys.platform == 'win32' else base
srv = os.path.join(cwd, srv)
self.backend.start(srv)
else:
self.backend.start(server.__file__)
def close(self, *args, **kwargs):
self.cursor_history = None
self.extended_selection_mode = None
self.case_converter = None
self.auto_complete = None
self.outline_mode = None
self.add_separator()
self.goto_def_mode = None
self.code_completion_mode = None
self.file_watcher = None
self.auto_indent_mode = None
self.caret_line_mode = None
self.zoom_mode = None
self.indenter_mode = None
self.auto_indent_mode = None
self.left_margin = None
self.right_margin = None
self.comments_mode = None
self.offset_calculator = None
self.occurences_highlighter_mode = None
self.backspace_mode = None
self.search_panel = None
self.folding_panel = None
self.line_nbr_panel = None
self.checker_panel = None
self.global_checker_panel = None
self.encoding_panel = None
self.read_only_panel = None
self.margins = None
super().close(*args, **kwargs)
def _setup_modes(self, color_scheme):
self.cursor_history = self.modes.append(modes.CursorHistoryMode())
self.extended_selection_mode = self.modes.append(
modes.ExtendedSelectionMode()
)
self.case_converter = self.modes.append(
modes.CaseConverterMode()
)
self.auto_complete = self.modes.append(
modes.AutoCompleteMode())
self.outline_mode = self.modes.append(
modes.OutlineMode(get_outline))
self.add_separator()
self.goto_def_mode = self.modes.append(
cobmodes.GoToDefinitionMode()
)
self.code_completion_mode = self.modes.append(
modes.CodeCompletionMode()
)
self.code_completion_mode.trigger_symbols[:] = []
self.file_watcher = self.modes.append(
modes.FileWatcherMode()
)
self.auto_indent_mode = self.modes.append(
cobmodes.CobolAutoIndentMode()
)
self.caret_line_mode = self.modes.append(
modes.CaretLineHighlighterMode()
)
self.zoom_mode = self.modes.append(
modes.ZoomMode()
)
self.indenter_mode = self.modes.append(
cobmodes.IndenterMode()
)
self.auto_indent_mode = self.modes.append(
modes.AutoIndentMode()
)
self.modes.append(cobmodes.CobolSyntaxHighlighter(
self.document(), color_scheme=api.ColorScheme(color_scheme)))
self.syntax_highlighter.fold_detector = CobolFoldDetector()
self.margins = self.modes.append(cobmodes.MarginsMode())
self.comments_mode = self.modes.append(cobmodes.CommentsMode())
self.offset_calculator = self.modes.append(
cobmodes.OffsetCalculatorMode())
self.occurences_highlighter_mode = self.modes.append(
modes.OccurrencesHighlighterMode()
)
self.occurences_highlighter_mode.case_sensitive = False
self.backspace_mode = self.modes.append(
cobmodes.SmartBackSpaceMode()
)
def _setup_panels(self):
self.search_panel = self.panels.append(
panels.SearchAndReplacePanel(), api.Panel.Position.BOTTOM
)
self.folding_panel = self.panels.append(
panels.FoldingPanel(), api.Panel.Position.LEFT
)
self.line_nbr_panel = self.panels.append(
panels.LineNumberPanel(), api.Panel.Position.LEFT
)
self.checker_panel = self.panels.append(
panels.CheckerPanel(), api.Panel.Position.LEFT
)
self.global_checker_panel = self.panels.append(
panels.GlobalCheckerPanel(), api.Panel.Position.RIGHT)
def _update_backend_format(self):
from pyqode.cobol.backend.workers import set_free_format
try:
self.backend.send_request(set_free_format, self.free_format)
except NotRunning:
QtCore.QTimer.singleShot(100, self._update_backend_format)
def _update_backend_proposed_kw_case(self):
from pyqode.cobol.backend.workers import set_lower_case_keywords
try:
self.backend.send_request(set_lower_case_keywords,
self.lower_case_keywords)
except NotRunning:
QtCore.QTimer.singleShot(
100, self._update_backend_proposed_kw_case)
|
pyQode/pyqode.cobol | pyqode/cobol/modes/goto.py | GoToDefinitionMode.on_state_changed | python | def on_state_changed(self, state):
super(GoToDefinitionMode, self).on_state_changed(state)
if state:
self.editor.mouse_moved.connect(self._on_mouse_moved)
self.editor.mouse_released.connect(self._on_mouse_released)
self.editor.add_action(self.action_goto, sub_menu='COBOL')
self.editor.mouse_double_clicked.connect(
self._timer.cancel_requests)
else:
self.editor.mouse_moved.disconnect(self._on_mouse_moved)
self.editor.mouse_released.disconnect(self._on_mouse_released)
self.editor.remove_action(self.action_goto, sub_menu='Python')
self.editor.mouse_double_clicked.disconnect(
self._timer.cancel_requests) | Connects/disconnects slots to/from signals when the mode state
changed. | train | https://github.com/pyQode/pyqode.cobol/blob/eedae4e320a4b2d0c44abb2c3061091321648fb7/pyqode/cobol/modes/goto.py#L29-L46 | null | class GoToDefinitionMode(Mode, QObject):
"""
Go to the definition of the symbol under the word cursor.
"""
#: Signal emitted when a word is clicked. The parameter is a
#: QTextCursor with the clicked word set as the selected text.
word_clicked = Signal(QTextCursor)
def __init__(self):
QObject.__init__(self)
Mode.__init__(self)
self._previous_cursor_start = -1
self._previous_cursor_end = -1
self._definition = None
self._deco = None
self._pending = False
self.action_goto = QAction(_("Go to assignments"), self)
self.action_goto.setShortcut('F7')
self.action_goto.triggered.connect(self.request_goto)
self.word_clicked.connect(self.request_goto)
self._timer = DelayJobRunner(delay=200)
def _select_word_under_mouse_cursor(self):
""" Selects the word under the mouse cursor. """
cursor = TextHelper(self.editor).word_under_mouse_cursor()
if (self._previous_cursor_start != cursor.selectionStart() and
self._previous_cursor_end != cursor.selectionEnd()):
self._remove_decoration()
self._add_decoration(cursor)
self._previous_cursor_start = cursor.selectionStart()
self._previous_cursor_end = cursor.selectionEnd()
def _on_mouse_moved(self, event):
""" mouse moved callback """
if event.modifiers() & Qt.ControlModifier:
self._select_word_under_mouse_cursor()
else:
self._remove_decoration()
self.editor.set_mouse_cursor(Qt.IBeamCursor)
self._previous_cursor_start = -1
self._previous_cursor_end = -1
def _on_mouse_released(self, event):
""" mouse pressed callback """
if event.button() == 1 and self._deco:
cursor = TextHelper(self.editor).word_under_mouse_cursor()
if cursor and cursor.selectedText():
self._timer.request_job(self.word_clicked.emit, cursor)
def find_definition(self, symbol, definition):
if symbol.lower() == definition.name.lower().replace(" section", "").replace(" division", ""):
return definition
for ch in definition.children:
d = self.find_definition(symbol, ch)
if d is not None:
return d
return None
def select_word(self, cursor):
symbol = cursor.selectedText()
analyser = self.editor.outline_mode
for definition in analyser.definitions:
node = self.find_definition(symbol, definition)
if node is not None:
break
else:
node = None
self._definition = None
if node and node.line != cursor.block().blockNumber():
self._definition = node
if self._deco is None:
if cursor.selectedText():
self._deco = TextDecoration(cursor)
self._deco.set_foreground(Qt.blue)
self._deco.set_as_underlined()
self.editor.decorations.append(self._deco)
return True
return False
def _add_decoration(self, cursor):
"""
Adds a decoration for the word under ``cursor``.
"""
if self.select_word(cursor):
self.editor.set_mouse_cursor(Qt.PointingHandCursor)
else:
self.editor.set_mouse_cursor(Qt.IBeamCursor)
def _remove_decoration(self):
"""
Removes the word under cursor's decoration
"""
if self._deco is not None:
self.editor.decorations.remove(self._deco)
self._deco = None
def request_goto(self, tc=None):
"""
Request a go to assignment.
:param tc: Text cursor which contains the text that we must look for
its assignment. Can be None to go to the text that is under
the text cursor.
:type tc: QtGui.QTextCursor
"""
if not tc:
tc = TextHelper(self.editor).word_under_cursor(
select_whole_word=True)
if not self._definition or isinstance(self.sender(), QAction):
self.select_word(tc)
if self._definition is not None:
QTimer.singleShot(100, self._goto_def)
def _goto_def(self):
if self._definition:
line = self._definition.line
col = self._definition.column
TextHelper(self.editor).goto_line(line, move=True, column=col)
|
pyQode/pyqode.cobol | pyqode/cobol/modes/goto.py | GoToDefinitionMode._on_mouse_moved | python | def _on_mouse_moved(self, event):
if event.modifiers() & Qt.ControlModifier:
self._select_word_under_mouse_cursor()
else:
self._remove_decoration()
self.editor.set_mouse_cursor(Qt.IBeamCursor)
self._previous_cursor_start = -1
self._previous_cursor_end = -1 | mouse moved callback | train | https://github.com/pyQode/pyqode.cobol/blob/eedae4e320a4b2d0c44abb2c3061091321648fb7/pyqode/cobol/modes/goto.py#L58-L66 | null | class GoToDefinitionMode(Mode, QObject):
"""
Go to the definition of the symbol under the word cursor.
"""
#: Signal emitted when a word is clicked. The parameter is a
#: QTextCursor with the clicked word set as the selected text.
word_clicked = Signal(QTextCursor)
def __init__(self):
QObject.__init__(self)
Mode.__init__(self)
self._previous_cursor_start = -1
self._previous_cursor_end = -1
self._definition = None
self._deco = None
self._pending = False
self.action_goto = QAction(_("Go to assignments"), self)
self.action_goto.setShortcut('F7')
self.action_goto.triggered.connect(self.request_goto)
self.word_clicked.connect(self.request_goto)
self._timer = DelayJobRunner(delay=200)
def on_state_changed(self, state):
"""
Connects/disconnects slots to/from signals when the mode state
changed.
"""
super(GoToDefinitionMode, self).on_state_changed(state)
if state:
self.editor.mouse_moved.connect(self._on_mouse_moved)
self.editor.mouse_released.connect(self._on_mouse_released)
self.editor.add_action(self.action_goto, sub_menu='COBOL')
self.editor.mouse_double_clicked.connect(
self._timer.cancel_requests)
else:
self.editor.mouse_moved.disconnect(self._on_mouse_moved)
self.editor.mouse_released.disconnect(self._on_mouse_released)
self.editor.remove_action(self.action_goto, sub_menu='Python')
self.editor.mouse_double_clicked.disconnect(
self._timer.cancel_requests)
def _select_word_under_mouse_cursor(self):
""" Selects the word under the mouse cursor. """
cursor = TextHelper(self.editor).word_under_mouse_cursor()
if (self._previous_cursor_start != cursor.selectionStart() and
self._previous_cursor_end != cursor.selectionEnd()):
self._remove_decoration()
self._add_decoration(cursor)
self._previous_cursor_start = cursor.selectionStart()
self._previous_cursor_end = cursor.selectionEnd()
def _on_mouse_released(self, event):
""" mouse pressed callback """
if event.button() == 1 and self._deco:
cursor = TextHelper(self.editor).word_under_mouse_cursor()
if cursor and cursor.selectedText():
self._timer.request_job(self.word_clicked.emit, cursor)
def find_definition(self, symbol, definition):
if symbol.lower() == definition.name.lower().replace(" section", "").replace(" division", ""):
return definition
for ch in definition.children:
d = self.find_definition(symbol, ch)
if d is not None:
return d
return None
def select_word(self, cursor):
symbol = cursor.selectedText()
analyser = self.editor.outline_mode
for definition in analyser.definitions:
node = self.find_definition(symbol, definition)
if node is not None:
break
else:
node = None
self._definition = None
if node and node.line != cursor.block().blockNumber():
self._definition = node
if self._deco is None:
if cursor.selectedText():
self._deco = TextDecoration(cursor)
self._deco.set_foreground(Qt.blue)
self._deco.set_as_underlined()
self.editor.decorations.append(self._deco)
return True
return False
def _add_decoration(self, cursor):
"""
Adds a decoration for the word under ``cursor``.
"""
if self.select_word(cursor):
self.editor.set_mouse_cursor(Qt.PointingHandCursor)
else:
self.editor.set_mouse_cursor(Qt.IBeamCursor)
def _remove_decoration(self):
"""
Removes the word under cursor's decoration
"""
if self._deco is not None:
self.editor.decorations.remove(self._deco)
self._deco = None
def request_goto(self, tc=None):
"""
Request a go to assignment.
:param tc: Text cursor which contains the text that we must look for
its assignment. Can be None to go to the text that is under
the text cursor.
:type tc: QtGui.QTextCursor
"""
if not tc:
tc = TextHelper(self.editor).word_under_cursor(
select_whole_word=True)
if not self._definition or isinstance(self.sender(), QAction):
self.select_word(tc)
if self._definition is not None:
QTimer.singleShot(100, self._goto_def)
def _goto_def(self):
if self._definition:
line = self._definition.line
col = self._definition.column
TextHelper(self.editor).goto_line(line, move=True, column=col)
|
pyQode/pyqode.cobol | pyqode/cobol/modes/goto.py | GoToDefinitionMode._add_decoration | python | def _add_decoration(self, cursor):
if self.select_word(cursor):
self.editor.set_mouse_cursor(Qt.PointingHandCursor)
else:
self.editor.set_mouse_cursor(Qt.IBeamCursor) | Adds a decoration for the word under ``cursor``. | train | https://github.com/pyQode/pyqode.cobol/blob/eedae4e320a4b2d0c44abb2c3061091321648fb7/pyqode/cobol/modes/goto.py#L105-L112 | null | class GoToDefinitionMode(Mode, QObject):
"""
Go to the definition of the symbol under the word cursor.
"""
#: Signal emitted when a word is clicked. The parameter is a
#: QTextCursor with the clicked word set as the selected text.
word_clicked = Signal(QTextCursor)
def __init__(self):
QObject.__init__(self)
Mode.__init__(self)
self._previous_cursor_start = -1
self._previous_cursor_end = -1
self._definition = None
self._deco = None
self._pending = False
self.action_goto = QAction(_("Go to assignments"), self)
self.action_goto.setShortcut('F7')
self.action_goto.triggered.connect(self.request_goto)
self.word_clicked.connect(self.request_goto)
self._timer = DelayJobRunner(delay=200)
def on_state_changed(self, state):
"""
Connects/disconnects slots to/from signals when the mode state
changed.
"""
super(GoToDefinitionMode, self).on_state_changed(state)
if state:
self.editor.mouse_moved.connect(self._on_mouse_moved)
self.editor.mouse_released.connect(self._on_mouse_released)
self.editor.add_action(self.action_goto, sub_menu='COBOL')
self.editor.mouse_double_clicked.connect(
self._timer.cancel_requests)
else:
self.editor.mouse_moved.disconnect(self._on_mouse_moved)
self.editor.mouse_released.disconnect(self._on_mouse_released)
self.editor.remove_action(self.action_goto, sub_menu='Python')
self.editor.mouse_double_clicked.disconnect(
self._timer.cancel_requests)
def _select_word_under_mouse_cursor(self):
""" Selects the word under the mouse cursor. """
cursor = TextHelper(self.editor).word_under_mouse_cursor()
if (self._previous_cursor_start != cursor.selectionStart() and
self._previous_cursor_end != cursor.selectionEnd()):
self._remove_decoration()
self._add_decoration(cursor)
self._previous_cursor_start = cursor.selectionStart()
self._previous_cursor_end = cursor.selectionEnd()
def _on_mouse_moved(self, event):
""" mouse moved callback """
if event.modifiers() & Qt.ControlModifier:
self._select_word_under_mouse_cursor()
else:
self._remove_decoration()
self.editor.set_mouse_cursor(Qt.IBeamCursor)
self._previous_cursor_start = -1
self._previous_cursor_end = -1
def _on_mouse_released(self, event):
""" mouse pressed callback """
if event.button() == 1 and self._deco:
cursor = TextHelper(self.editor).word_under_mouse_cursor()
if cursor and cursor.selectedText():
self._timer.request_job(self.word_clicked.emit, cursor)
def find_definition(self, symbol, definition):
if symbol.lower() == definition.name.lower().replace(" section", "").replace(" division", ""):
return definition
for ch in definition.children:
d = self.find_definition(symbol, ch)
if d is not None:
return d
return None
def select_word(self, cursor):
symbol = cursor.selectedText()
analyser = self.editor.outline_mode
for definition in analyser.definitions:
node = self.find_definition(symbol, definition)
if node is not None:
break
else:
node = None
self._definition = None
if node and node.line != cursor.block().blockNumber():
self._definition = node
if self._deco is None:
if cursor.selectedText():
self._deco = TextDecoration(cursor)
self._deco.set_foreground(Qt.blue)
self._deco.set_as_underlined()
self.editor.decorations.append(self._deco)
return True
return False
def _remove_decoration(self):
"""
Removes the word under cursor's decoration
"""
if self._deco is not None:
self.editor.decorations.remove(self._deco)
self._deco = None
def request_goto(self, tc=None):
"""
Request a go to assignment.
:param tc: Text cursor which contains the text that we must look for
its assignment. Can be None to go to the text that is under
the text cursor.
:type tc: QtGui.QTextCursor
"""
if not tc:
tc = TextHelper(self.editor).word_under_cursor(
select_whole_word=True)
if not self._definition or isinstance(self.sender(), QAction):
self.select_word(tc)
if self._definition is not None:
QTimer.singleShot(100, self._goto_def)
def _goto_def(self):
if self._definition:
line = self._definition.line
col = self._definition.column
TextHelper(self.editor).goto_line(line, move=True, column=col)
|
pyQode/pyqode.cobol | pyqode/cobol/modes/goto.py | GoToDefinitionMode.request_goto | python | def request_goto(self, tc=None):
if not tc:
tc = TextHelper(self.editor).word_under_cursor(
select_whole_word=True)
if not self._definition or isinstance(self.sender(), QAction):
self.select_word(tc)
if self._definition is not None:
QTimer.singleShot(100, self._goto_def) | Request a go to assignment.
:param tc: Text cursor which contains the text that we must look for
its assignment. Can be None to go to the text that is under
the text cursor.
:type tc: QtGui.QTextCursor | train | https://github.com/pyQode/pyqode.cobol/blob/eedae4e320a4b2d0c44abb2c3061091321648fb7/pyqode/cobol/modes/goto.py#L122-L137 | null | class GoToDefinitionMode(Mode, QObject):
"""
Go to the definition of the symbol under the word cursor.
"""
#: Signal emitted when a word is clicked. The parameter is a
#: QTextCursor with the clicked word set as the selected text.
word_clicked = Signal(QTextCursor)
def __init__(self):
QObject.__init__(self)
Mode.__init__(self)
self._previous_cursor_start = -1
self._previous_cursor_end = -1
self._definition = None
self._deco = None
self._pending = False
self.action_goto = QAction(_("Go to assignments"), self)
self.action_goto.setShortcut('F7')
self.action_goto.triggered.connect(self.request_goto)
self.word_clicked.connect(self.request_goto)
self._timer = DelayJobRunner(delay=200)
def on_state_changed(self, state):
"""
Connects/disconnects slots to/from signals when the mode state
changed.
"""
super(GoToDefinitionMode, self).on_state_changed(state)
if state:
self.editor.mouse_moved.connect(self._on_mouse_moved)
self.editor.mouse_released.connect(self._on_mouse_released)
self.editor.add_action(self.action_goto, sub_menu='COBOL')
self.editor.mouse_double_clicked.connect(
self._timer.cancel_requests)
else:
self.editor.mouse_moved.disconnect(self._on_mouse_moved)
self.editor.mouse_released.disconnect(self._on_mouse_released)
self.editor.remove_action(self.action_goto, sub_menu='Python')
self.editor.mouse_double_clicked.disconnect(
self._timer.cancel_requests)
def _select_word_under_mouse_cursor(self):
""" Selects the word under the mouse cursor. """
cursor = TextHelper(self.editor).word_under_mouse_cursor()
if (self._previous_cursor_start != cursor.selectionStart() and
self._previous_cursor_end != cursor.selectionEnd()):
self._remove_decoration()
self._add_decoration(cursor)
self._previous_cursor_start = cursor.selectionStart()
self._previous_cursor_end = cursor.selectionEnd()
def _on_mouse_moved(self, event):
""" mouse moved callback """
if event.modifiers() & Qt.ControlModifier:
self._select_word_under_mouse_cursor()
else:
self._remove_decoration()
self.editor.set_mouse_cursor(Qt.IBeamCursor)
self._previous_cursor_start = -1
self._previous_cursor_end = -1
def _on_mouse_released(self, event):
""" mouse pressed callback """
if event.button() == 1 and self._deco:
cursor = TextHelper(self.editor).word_under_mouse_cursor()
if cursor and cursor.selectedText():
self._timer.request_job(self.word_clicked.emit, cursor)
def find_definition(self, symbol, definition):
if symbol.lower() == definition.name.lower().replace(" section", "").replace(" division", ""):
return definition
for ch in definition.children:
d = self.find_definition(symbol, ch)
if d is not None:
return d
return None
def select_word(self, cursor):
symbol = cursor.selectedText()
analyser = self.editor.outline_mode
for definition in analyser.definitions:
node = self.find_definition(symbol, definition)
if node is not None:
break
else:
node = None
self._definition = None
if node and node.line != cursor.block().blockNumber():
self._definition = node
if self._deco is None:
if cursor.selectedText():
self._deco = TextDecoration(cursor)
self._deco.set_foreground(Qt.blue)
self._deco.set_as_underlined()
self.editor.decorations.append(self._deco)
return True
return False
def _add_decoration(self, cursor):
"""
Adds a decoration for the word under ``cursor``.
"""
if self.select_word(cursor):
self.editor.set_mouse_cursor(Qt.PointingHandCursor)
else:
self.editor.set_mouse_cursor(Qt.IBeamCursor)
def _remove_decoration(self):
"""
Removes the word under cursor's decoration
"""
if self._deco is not None:
self.editor.decorations.remove(self._deco)
self._deco = None
def _goto_def(self):
if self._definition:
line = self._definition.line
col = self._definition.column
TextHelper(self.editor).goto_line(line, move=True, column=col)
|
pyQode/pyqode.cobol | pyqode/cobol/api/folding.py | CobolFoldDetector.normalize_text | python | def normalize_text(self, text):
if not self.editor.free_format:
text = ' ' * 6 + text[6:]
return text.upper() | Normalize text, when fixed format is ON, replace the first 6 chars by a space. | train | https://github.com/pyQode/pyqode.cobol/blob/eedae4e320a4b2d0c44abb2c3061091321648fb7/pyqode/cobol/api/folding.py#L43-L49 | null | class CobolFoldDetector(FoldDetector):
def __init__(self):
super().__init__()
self.proc_division = None
self._proc_div_txt = ""
self.data_division = None
self._data_div_txt = ""
self.variables = set()
self.divisions = []
def is_valid(self, block):
return block is not None and block.isValid()
def stripped_texts(self, block, prev_block):
ctext = block.text().rstrip().upper()
if self.is_valid(prev_block):
ptext = prev_block.text().rstrip().upper()
else:
ptext = ''
return ctext, ptext
def is_in_data_division(self, block):
for div_block, div_type in reversed(self.divisions):
if div_block.blockNumber() < block.blockNumber():
return div_type == 'data'
return False
def is_in_proc_division(self, block):
for div_block, div_type in reversed(self.divisions):
if div_block.blockNumber() < block.blockNumber():
return div_type == 'procedure'
return False
def get_indent(self, normalized_text):
indent = len(normalized_text) - len(normalized_text.lstrip())
return indent + indent % 2
def detect_fold_level(self, prev_block, block):
ctext, ptext = self.stripped_texts(block, prev_block)
if not self.editor.free_format:
ctext = self.normalize_text(ctext)
ptext = self.normalize_text(ptext)
if regex.DIVISION.indexIn(ctext) != -1 and not ctext.lstrip().startswith('*'):
return OFFSET_DIVISION
elif regex.SECTION.indexIn(ctext) != -1 and not ctext.lstrip().startswith('*'):
return OFFSET_SECTION
else:
# anywhere else, folding is mostly based on the indentation level
indent = self.get_indent(ctext)
pindent = self.get_indent(ptext)
if ctext.strip().upper().startswith('END-') and self.is_valid(prev_block) and pindent > indent:
# find previous block with the same indent, use it's fold level + 1 to include
# the end-branch statement in the fold scope
pblock = prev_block
while self.is_valid(pblock) and (pindent != indent or len(ptext.strip()) == 0):
pblock = pblock.previous()
ptext = self.normalize_text(pblock.text())
pindent = self.get_indent(ptext)
lvl = TextBlockHelper.get_fold_lvl(pblock.next())
else:
lvl = OFFSET_OTHER + indent
# if not self.editor.free_format and (ctext.lstrip().startswith('-') or ctext.lstrip().startswith('*')):
if not self.editor.free_format and (ctext.lstrip().startswith('-')):
# use previous fold level
lvl = TextBlockHelper.get_fold_lvl(prev_block)
if not self.editor.free_format and ctext.strip().startswith('*'):
if regex.DIVISION.indexIn(ptext) != -1 and not ptext.lstrip().startswith('*'):
lvl = OFFSET_SECTION
elif regex.SECTION.indexIn(ptext) != -1 and not ptext.lstrip().startswith('*'):
return OFFSET_SECTION + 2
else:
lvl = TextBlockHelper.get_fold_lvl(prev_block)
return lvl
|
pyQode/pyqode.cobol | pyqode/cobol/api/pic.py | _clean_code | python | def _clean_code(code):
lines = []
# cleanup lines, the parser is very sensitive to extra spaces,...
for l in code.splitlines():
# remove last .
if l.endswith('.'):
l = l[:-1]
# the parser doe not like VALUE xxx.
if "VALUE" in l:
l = l[:l.find("VALUE")]
# the parser does not like extra spaces between "PIC X(xxx)" and "."
indent = len(l) - len(l.lstrip())
tokens = l.split(" ")
while "" in tokens:
tokens.remove("")
if tokens and not tokens[-1].endswith("."):
tokens[-1] += "."
lines.append(" " * indent + " ".join(tokens))
return lines | Cleans the received code (the parser does not like extra spaces not a VALUE
statement). Returns the cleaned code as a list of lines.
:param code: The COBOL code to clean
:return The list of code lines (cleaned) | train | https://github.com/pyQode/pyqode.cobol/blob/eedae4e320a4b2d0c44abb2c3061091321648fb7/pyqode/cobol/api/pic.py#L17-L44 | null | from .parsers.pic import process_cobol
class PicFieldInfo(object):
"""
This structure holds the information about a PIC field.
"""
offset = 0
name = ""
level = 0
pic = ""
occurs = None
redefines = None
indexed_by = None
def get_field_infos(code, free_format):
"""
Gets the list of pic fields information from line |start| to line |end|.
:param code: code to parse
:returns: the list of pic fields info found in the specified text.
"""
offset = 0
field_infos = []
lines = _clean_code(code)
previous_offset = 0
for row in process_cobol(lines, free_format):
fi = PicFieldInfo()
fi.name = row["name"]
fi.level = row["level"]
fi.pic = row["pic"]
fi.occurs = row["occurs"]
fi.redefines = row["redefines"]
fi.indexed_by = row["indexed_by"]
# find item that was redefined and use its offset
if fi.redefines:
for fib in field_infos:
if fib.name == fi.redefines:
offset = fib.offset
# level 1 should have their offset set to 1
if fi.level == 1:
offset = 1
# level 78 have no offset
if fi.level == 78:
offset = 0
# level 77 have offset always to 1
if fi.level == 77:
offset = 1
# set item offset
fi.offset = offset
# special case: level 88 have the same level as its parent
if fi.level == 88:
fi.offset = previous_offset
else:
previous_offset = offset
field_infos.append(fi)
# compute offset of next PIC field.
if row['pic']:
offset += row['pic_info']['length']
return field_infos
|
pyQode/pyqode.cobol | pyqode/cobol/api/pic.py | get_field_infos | python | def get_field_infos(code, free_format):
offset = 0
field_infos = []
lines = _clean_code(code)
previous_offset = 0
for row in process_cobol(lines, free_format):
fi = PicFieldInfo()
fi.name = row["name"]
fi.level = row["level"]
fi.pic = row["pic"]
fi.occurs = row["occurs"]
fi.redefines = row["redefines"]
fi.indexed_by = row["indexed_by"]
# find item that was redefined and use its offset
if fi.redefines:
for fib in field_infos:
if fib.name == fi.redefines:
offset = fib.offset
# level 1 should have their offset set to 1
if fi.level == 1:
offset = 1
# level 78 have no offset
if fi.level == 78:
offset = 0
# level 77 have offset always to 1
if fi.level == 77:
offset = 1
# set item offset
fi.offset = offset
# special case: level 88 have the same level as its parent
if fi.level == 88:
fi.offset = previous_offset
else:
previous_offset = offset
field_infos.append(fi)
# compute offset of next PIC field.
if row['pic']:
offset += row['pic_info']['length']
return field_infos | Gets the list of pic fields information from line |start| to line |end|.
:param code: code to parse
:returns: the list of pic fields info found in the specified text. | train | https://github.com/pyQode/pyqode.cobol/blob/eedae4e320a4b2d0c44abb2c3061091321648fb7/pyqode/cobol/api/pic.py#L47-L103 | [
"def process_cobol(lines, free_format):\n return clean_names(denormalize_cobol(parse_cobol(clean_cobol(lines, free_format))),\n ensure_unique_names=False, strip_prefix=False,\n make_database_safe=False)\n",
"def _clean_code(code):\n \"\"\"\n Cleans the received... | from .parsers.pic import process_cobol
class PicFieldInfo(object):
"""
This structure holds the information about a PIC field.
"""
offset = 0
name = ""
level = 0
pic = ""
occurs = None
redefines = None
indexed_by = None
def _clean_code(code):
"""
Cleans the received code (the parser does not like extra spaces not a VALUE
statement). Returns the cleaned code as a list of lines.
:param code: The COBOL code to clean
:return The list of code lines (cleaned)
"""
lines = []
# cleanup lines, the parser is very sensitive to extra spaces,...
for l in code.splitlines():
# remove last .
if l.endswith('.'):
l = l[:-1]
# the parser doe not like VALUE xxx.
if "VALUE" in l:
l = l[:l.find("VALUE")]
# the parser does not like extra spaces between "PIC X(xxx)" and "."
indent = len(l) - len(l.lstrip())
tokens = l.split(" ")
while "" in tokens:
tokens.remove("")
if tokens and not tokens[-1].endswith("."):
tokens[-1] += "."
lines.append(" " * indent + " ".join(tokens))
return lines
|
pyQode/pyqode.cobol | pyqode/cobol/modes/indenter.py | IndenterMode.unindent_selection | python | def unindent_selection(self, cursor):
doc = self.editor.document()
tab_len = self.editor.tab_length
nb_lines = len(cursor.selection().toPlainText().splitlines())
if nb_lines == 0:
nb_lines = 1
block = doc.findBlock(cursor.selectionStart())
assert isinstance(block, QtGui.QTextBlock)
i = 0
_logger().debug('unindent selection: %d lines', nb_lines)
while i < nb_lines:
txt = block.text()[self.min_column:]
_logger().debug('line to unindent: %s', txt)
_logger().debug('self.editor.use_spaces_instead_of_tabs: %r',
self.editor.use_spaces_instead_of_tabs)
if self.editor.use_spaces_instead_of_tabs:
indentation = len(txt) - len(txt.lstrip())
else:
indentation = len(txt) - len(txt.replace('\t', ''))
_logger().debug('unindent line %d: %d spaces (min indent=%d)', i, indentation, self.min_column)
if indentation > 0:
c = QtGui.QTextCursor(block)
c.movePosition(c.StartOfLine, cursor.MoveAnchor)
c.movePosition(c.Right, cursor.MoveAnchor, indentation + self.min_column)
max_spaces = indentation % tab_len
if max_spaces == 0:
max_spaces = tab_len
spaces = self.count_deletable_spaces(c, max_spaces)
for _ in range(spaces):
c.deletePreviousChar()
block = block.next()
i += 1
return cursor | Un-indents selected text
:param cursor: QTextCursor | train | https://github.com/pyQode/pyqode.cobol/blob/eedae4e320a4b2d0c44abb2c3061091321648fb7/pyqode/cobol/modes/indenter.py#L70-L107 | [
"def _logger():\n return logging.getLogger(__name__)\n"
] | class IndenterMode(Mode):
""" Implements classic indentation/tabulation (Tab/Shift+Tab)
It inserts/removes tabulations (a series of spaces defined by the
tabLength settings) at the cursor position if there is no selection,
otherwise it fully indents/un-indents selected lines.
To trigger an indentation/un-indentation programatically, you must emit
:attr:`pyqode.core.api.CodeEdit.indent_requested` or
:attr:`pyqode.core.api.CodeEdit.unindent_requested`.
"""
@property
def min_column(self):
return 0 if self.editor.free_format else 7
def __init__(self):
super(IndenterMode, self).__init__()
def on_state_changed(self, state):
if state:
self.editor.indent_requested.connect(self.indent)
self.editor.unindent_requested.connect(self.unindent)
else:
self.editor.indent_requested.disconnect(self.indent)
self.editor.unindent_requested.disconnect(self.unindent)
def indent_selection(self, cursor):
"""
Indent selected text
:param cursor: QTextCursor
"""
doc = self.editor.document()
tab_len = self.editor.tab_length
cursor.beginEditBlock()
nb_lines = len(cursor.selection().toPlainText().splitlines())
if (cursor.atBlockStart() and cursor.position() == cursor.selectionEnd()):
nb_lines += 1
block = doc.findBlock(cursor.selectionStart())
i = 0
# indent every lines
while i < nb_lines:
nb_space_to_add = tab_len
cursor = QtGui.QTextCursor(block)
cursor.movePosition(cursor.StartOfLine, cursor.MoveAnchor)
cursor.movePosition(cursor.Right, cursor.MoveAnchor, self.min_column)
if self.editor.use_spaces_instead_of_tabs:
for _ in range(nb_space_to_add):
cursor.insertText(" ")
else:
cursor.insertText('\t')
block = block.next()
i += 1
cursor.endEditBlock()
def indent(self):
"""
Indents text at cursor position.
"""
cursor = self.editor.textCursor()
assert isinstance(cursor, QtGui.QTextCursor)
if cursor.hasSelection():
self.indent_selection(cursor)
else:
# simply insert indentation at the cursor position
tab_len = self.editor.tab_length
if cursor.positionInBlock() < self.min_column and not cursor.atBlockEnd():
cursor.movePosition(cursor.Right, cursor.MoveAnchor, self.min_column)
cursor.beginEditBlock()
if self.editor.use_spaces_instead_of_tabs:
nb_space_to_add = tab_len - (cursor.positionInBlock() - self.min_column) % tab_len
cursor.insertText(nb_space_to_add * " ")
else:
cursor.insertText('\t')
cursor.endEditBlock()
self.editor.setTextCursor(cursor)
def count_deletable_spaces(self, cursor, max_spaces):
# count the number of spaces deletable, stop at tab len
max_spaces = abs(max_spaces)
if max_spaces > self.editor.tab_length:
max_spaces = self.editor.tab_length
spaces = 0
trav_cursor = QtGui.QTextCursor(cursor)
while spaces < max_spaces or trav_cursor.atBlockStart():
pos = trav_cursor.position()
trav_cursor.movePosition(cursor.Left, cursor.KeepAnchor)
char = trav_cursor.selectedText()
if char == " ":
spaces += 1
else:
break
trav_cursor.setPosition(pos - 1)
return spaces
def unindent(self):
"""
Un-indents text at cursor position.
"""
_logger().debug('unindent')
cursor = self.editor.textCursor()
_logger().debug('cursor has selection %r', cursor.hasSelection())
if cursor.hasSelection():
cursor.beginEditBlock()
self.unindent_selection(cursor)
cursor.endEditBlock()
self.editor.setTextCursor(cursor)
else:
tab_len = self.editor.tab_length
indentation = cursor.positionInBlock()
indentation -= self.min_column
if indentation == 0:
return
max_spaces = indentation % tab_len
if max_spaces == 0:
max_spaces = tab_len
spaces = self.count_deletable_spaces(cursor, max_spaces)
_logger().info('deleting %d space before cursor' % spaces)
cursor.beginEditBlock()
for _ in range(spaces):
cursor.deletePreviousChar()
cursor.endEditBlock()
self.editor.setTextCursor(cursor)
_logger().debug(cursor.block().text())
|
pyQode/pyqode.cobol | pyqode/cobol/modes/indenter.py | IndenterMode.indent | python | def indent(self):
cursor = self.editor.textCursor()
assert isinstance(cursor, QtGui.QTextCursor)
if cursor.hasSelection():
self.indent_selection(cursor)
else:
# simply insert indentation at the cursor position
tab_len = self.editor.tab_length
if cursor.positionInBlock() < self.min_column and not cursor.atBlockEnd():
cursor.movePosition(cursor.Right, cursor.MoveAnchor, self.min_column)
cursor.beginEditBlock()
if self.editor.use_spaces_instead_of_tabs:
nb_space_to_add = tab_len - (cursor.positionInBlock() - self.min_column) % tab_len
cursor.insertText(nb_space_to_add * " ")
else:
cursor.insertText('\t')
cursor.endEditBlock()
self.editor.setTextCursor(cursor) | Indents text at cursor position. | train | https://github.com/pyQode/pyqode.cobol/blob/eedae4e320a4b2d0c44abb2c3061091321648fb7/pyqode/cobol/modes/indenter.py#L109-L129 | null | class IndenterMode(Mode):
""" Implements classic indentation/tabulation (Tab/Shift+Tab)
It inserts/removes tabulations (a series of spaces defined by the
tabLength settings) at the cursor position if there is no selection,
otherwise it fully indents/un-indents selected lines.
To trigger an indentation/un-indentation programatically, you must emit
:attr:`pyqode.core.api.CodeEdit.indent_requested` or
:attr:`pyqode.core.api.CodeEdit.unindent_requested`.
"""
@property
def min_column(self):
return 0 if self.editor.free_format else 7
def __init__(self):
super(IndenterMode, self).__init__()
def on_state_changed(self, state):
if state:
self.editor.indent_requested.connect(self.indent)
self.editor.unindent_requested.connect(self.unindent)
else:
self.editor.indent_requested.disconnect(self.indent)
self.editor.unindent_requested.disconnect(self.unindent)
def indent_selection(self, cursor):
"""
Indent selected text
:param cursor: QTextCursor
"""
doc = self.editor.document()
tab_len = self.editor.tab_length
cursor.beginEditBlock()
nb_lines = len(cursor.selection().toPlainText().splitlines())
if (cursor.atBlockStart() and cursor.position() == cursor.selectionEnd()):
nb_lines += 1
block = doc.findBlock(cursor.selectionStart())
i = 0
# indent every lines
while i < nb_lines:
nb_space_to_add = tab_len
cursor = QtGui.QTextCursor(block)
cursor.movePosition(cursor.StartOfLine, cursor.MoveAnchor)
cursor.movePosition(cursor.Right, cursor.MoveAnchor, self.min_column)
if self.editor.use_spaces_instead_of_tabs:
for _ in range(nb_space_to_add):
cursor.insertText(" ")
else:
cursor.insertText('\t')
block = block.next()
i += 1
cursor.endEditBlock()
def unindent_selection(self, cursor):
"""
Un-indents selected text
:param cursor: QTextCursor
"""
doc = self.editor.document()
tab_len = self.editor.tab_length
nb_lines = len(cursor.selection().toPlainText().splitlines())
if nb_lines == 0:
nb_lines = 1
block = doc.findBlock(cursor.selectionStart())
assert isinstance(block, QtGui.QTextBlock)
i = 0
_logger().debug('unindent selection: %d lines', nb_lines)
while i < nb_lines:
txt = block.text()[self.min_column:]
_logger().debug('line to unindent: %s', txt)
_logger().debug('self.editor.use_spaces_instead_of_tabs: %r',
self.editor.use_spaces_instead_of_tabs)
if self.editor.use_spaces_instead_of_tabs:
indentation = len(txt) - len(txt.lstrip())
else:
indentation = len(txt) - len(txt.replace('\t', ''))
_logger().debug('unindent line %d: %d spaces (min indent=%d)', i, indentation, self.min_column)
if indentation > 0:
c = QtGui.QTextCursor(block)
c.movePosition(c.StartOfLine, cursor.MoveAnchor)
c.movePosition(c.Right, cursor.MoveAnchor, indentation + self.min_column)
max_spaces = indentation % tab_len
if max_spaces == 0:
max_spaces = tab_len
spaces = self.count_deletable_spaces(c, max_spaces)
for _ in range(spaces):
c.deletePreviousChar()
block = block.next()
i += 1
return cursor
def count_deletable_spaces(self, cursor, max_spaces):
# count the number of spaces deletable, stop at tab len
max_spaces = abs(max_spaces)
if max_spaces > self.editor.tab_length:
max_spaces = self.editor.tab_length
spaces = 0
trav_cursor = QtGui.QTextCursor(cursor)
while spaces < max_spaces or trav_cursor.atBlockStart():
pos = trav_cursor.position()
trav_cursor.movePosition(cursor.Left, cursor.KeepAnchor)
char = trav_cursor.selectedText()
if char == " ":
spaces += 1
else:
break
trav_cursor.setPosition(pos - 1)
return spaces
def unindent(self):
"""
Un-indents text at cursor position.
"""
_logger().debug('unindent')
cursor = self.editor.textCursor()
_logger().debug('cursor has selection %r', cursor.hasSelection())
if cursor.hasSelection():
cursor.beginEditBlock()
self.unindent_selection(cursor)
cursor.endEditBlock()
self.editor.setTextCursor(cursor)
else:
tab_len = self.editor.tab_length
indentation = cursor.positionInBlock()
indentation -= self.min_column
if indentation == 0:
return
max_spaces = indentation % tab_len
if max_spaces == 0:
max_spaces = tab_len
spaces = self.count_deletable_spaces(cursor, max_spaces)
_logger().info('deleting %d space before cursor' % spaces)
cursor.beginEditBlock()
for _ in range(spaces):
cursor.deletePreviousChar()
cursor.endEditBlock()
self.editor.setTextCursor(cursor)
_logger().debug(cursor.block().text())
|
pyQode/pyqode.cobol | pyqode/cobol/modes/indenter.py | IndenterMode.unindent | python | def unindent(self):
_logger().debug('unindent')
cursor = self.editor.textCursor()
_logger().debug('cursor has selection %r', cursor.hasSelection())
if cursor.hasSelection():
cursor.beginEditBlock()
self.unindent_selection(cursor)
cursor.endEditBlock()
self.editor.setTextCursor(cursor)
else:
tab_len = self.editor.tab_length
indentation = cursor.positionInBlock()
indentation -= self.min_column
if indentation == 0:
return
max_spaces = indentation % tab_len
if max_spaces == 0:
max_spaces = tab_len
spaces = self.count_deletable_spaces(cursor, max_spaces)
_logger().info('deleting %d space before cursor' % spaces)
cursor.beginEditBlock()
for _ in range(spaces):
cursor.deletePreviousChar()
cursor.endEditBlock()
self.editor.setTextCursor(cursor)
_logger().debug(cursor.block().text()) | Un-indents text at cursor position. | train | https://github.com/pyQode/pyqode.cobol/blob/eedae4e320a4b2d0c44abb2c3061091321648fb7/pyqode/cobol/modes/indenter.py#L149-L178 | [
"def _logger():\n return logging.getLogger(__name__)\n"
] | class IndenterMode(Mode):
""" Implements classic indentation/tabulation (Tab/Shift+Tab)
It inserts/removes tabulations (a series of spaces defined by the
tabLength settings) at the cursor position if there is no selection,
otherwise it fully indents/un-indents selected lines.
To trigger an indentation/un-indentation programatically, you must emit
:attr:`pyqode.core.api.CodeEdit.indent_requested` or
:attr:`pyqode.core.api.CodeEdit.unindent_requested`.
"""
@property
def min_column(self):
return 0 if self.editor.free_format else 7
def __init__(self):
super(IndenterMode, self).__init__()
def on_state_changed(self, state):
if state:
self.editor.indent_requested.connect(self.indent)
self.editor.unindent_requested.connect(self.unindent)
else:
self.editor.indent_requested.disconnect(self.indent)
self.editor.unindent_requested.disconnect(self.unindent)
def indent_selection(self, cursor):
"""
Indent selected text
:param cursor: QTextCursor
"""
doc = self.editor.document()
tab_len = self.editor.tab_length
cursor.beginEditBlock()
nb_lines = len(cursor.selection().toPlainText().splitlines())
if (cursor.atBlockStart() and cursor.position() == cursor.selectionEnd()):
nb_lines += 1
block = doc.findBlock(cursor.selectionStart())
i = 0
# indent every lines
while i < nb_lines:
nb_space_to_add = tab_len
cursor = QtGui.QTextCursor(block)
cursor.movePosition(cursor.StartOfLine, cursor.MoveAnchor)
cursor.movePosition(cursor.Right, cursor.MoveAnchor, self.min_column)
if self.editor.use_spaces_instead_of_tabs:
for _ in range(nb_space_to_add):
cursor.insertText(" ")
else:
cursor.insertText('\t')
block = block.next()
i += 1
cursor.endEditBlock()
def unindent_selection(self, cursor):
"""
Un-indents selected text
:param cursor: QTextCursor
"""
doc = self.editor.document()
tab_len = self.editor.tab_length
nb_lines = len(cursor.selection().toPlainText().splitlines())
if nb_lines == 0:
nb_lines = 1
block = doc.findBlock(cursor.selectionStart())
assert isinstance(block, QtGui.QTextBlock)
i = 0
_logger().debug('unindent selection: %d lines', nb_lines)
while i < nb_lines:
txt = block.text()[self.min_column:]
_logger().debug('line to unindent: %s', txt)
_logger().debug('self.editor.use_spaces_instead_of_tabs: %r',
self.editor.use_spaces_instead_of_tabs)
if self.editor.use_spaces_instead_of_tabs:
indentation = len(txt) - len(txt.lstrip())
else:
indentation = len(txt) - len(txt.replace('\t', ''))
_logger().debug('unindent line %d: %d spaces (min indent=%d)', i, indentation, self.min_column)
if indentation > 0:
c = QtGui.QTextCursor(block)
c.movePosition(c.StartOfLine, cursor.MoveAnchor)
c.movePosition(c.Right, cursor.MoveAnchor, indentation + self.min_column)
max_spaces = indentation % tab_len
if max_spaces == 0:
max_spaces = tab_len
spaces = self.count_deletable_spaces(c, max_spaces)
for _ in range(spaces):
c.deletePreviousChar()
block = block.next()
i += 1
return cursor
def indent(self):
"""
Indents text at cursor position.
"""
cursor = self.editor.textCursor()
assert isinstance(cursor, QtGui.QTextCursor)
if cursor.hasSelection():
self.indent_selection(cursor)
else:
# simply insert indentation at the cursor position
tab_len = self.editor.tab_length
if cursor.positionInBlock() < self.min_column and not cursor.atBlockEnd():
cursor.movePosition(cursor.Right, cursor.MoveAnchor, self.min_column)
cursor.beginEditBlock()
if self.editor.use_spaces_instead_of_tabs:
nb_space_to_add = tab_len - (cursor.positionInBlock() - self.min_column) % tab_len
cursor.insertText(nb_space_to_add * " ")
else:
cursor.insertText('\t')
cursor.endEditBlock()
self.editor.setTextCursor(cursor)
def count_deletable_spaces(self, cursor, max_spaces):
# count the number of spaces deletable, stop at tab len
max_spaces = abs(max_spaces)
if max_spaces > self.editor.tab_length:
max_spaces = self.editor.tab_length
spaces = 0
trav_cursor = QtGui.QTextCursor(cursor)
while spaces < max_spaces or trav_cursor.atBlockStart():
pos = trav_cursor.position()
trav_cursor.movePosition(cursor.Left, cursor.KeepAnchor)
char = trav_cursor.selectedText()
if char == " ":
spaces += 1
else:
break
trav_cursor.setPosition(pos - 1)
return spaces
|
pyQode/pyqode.cobol | pyqode/cobol/widgets/pic_offsets.py | PicOffsetsTable.set_editor | python | def set_editor(self, editor):
if self._editor is not None:
try:
self._editor.offset_calculator.pic_infos_available.disconnect(
self._update)
except (AttributeError, RuntimeError, ReferenceError):
# see https://github.com/OpenCobolIDE/OpenCobolIDE/issues/89
pass
self._editor = weakref.proxy(editor) if editor else editor
try:
self._editor.offset_calculator.pic_infos_available.connect(
self._update)
except AttributeError:
pass | Sets the associated editor, when the editor's offset calculator mode
emit the signal pic_infos_available, the table is automatically
refreshed.
You can also refresh manually by calling :meth:`update_pic_infos`. | train | https://github.com/pyQode/pyqode.cobol/blob/eedae4e320a4b2d0c44abb2c3061091321648fb7/pyqode/cobol/widgets/pic_offsets.py#L25-L45 | null | class PicOffsetsTable(QtWidgets.QTableWidget):
"""
Displays the pic field offsets.
"""
#: signal emitted when the widget should be shown (i.e. when the pic info
#: has been updated)
show_requested = QtCore.Signal()
def __init__(self, parent=None):
super().__init__(parent)
self._update([])
self._editor = None
self.verticalHeader().setVisible(False)
self.setColumnCount(4)
self.setHorizontalHeaderLabels(
['Level', 'Name', 'Offset', 'PIC'])
self.setEditTriggers(self.NoEditTriggers)
self.setSelectionBehavior(self.SelectRows)
self.setSelectionMode(self.SingleSelection)
def update_pic_infos(self, infos):
"""
Update the pic filed informations shown in the table.
"""
self._update(infos)
def _update(self, infos):
self.clearContents()
self.setRowCount(len(infos))
# process each info in a separate row
for i, info in enumerate(infos):
self.setItem(
i, 0, QtWidgets.QTableWidgetItem("%s" % info.level))
self.setItem(
i, 1, QtWidgets.QTableWidgetItem(info.name))
self.setItem(
i, 2, QtWidgets.QTableWidgetItem("%s" % info.offset))
self.setItem(
i, 3, QtWidgets.QTableWidgetItem(info.pic))
self.setSortingEnabled(False)
self.show_requested.emit()
|
pyQode/pyqode.cobol | pyqode/cobol/api/parsers/names.py | cmp_name | python | def cmp_name(first_node, second_node):
if len(first_node.children) == len(second_node.children):
for first_child, second_child in zip(first_node.children,
second_node.children):
for key in first_child.__dict__.keys():
if key.startswith('_'):
continue
if first_child.__dict__[key] != second_child.__dict__[key]:
return 1
ret_val = cmp_name(first_child, second_child)
if ret_val != 0:
return 1
else:
return 1
return 0 | Compare two name recursively.
:param first_node: First node
:param second_node: Second state
:return: 0 if same name, 1 if names are differents. | train | https://github.com/pyQode/pyqode.cobol/blob/eedae4e320a4b2d0c44abb2c3061091321648fb7/pyqode/cobol/api/parsers/names.py#L99-L122 | [
"def cmp_name(first_node, second_node):\n \"\"\"\n Compare two name recursively.\n\n :param first_node: First node\n\n :param second_node: Second state\n\n :return: 0 if same name, 1 if names are differents.\n \"\"\"\n if len(first_node.children) == len(second_node.children):\n for first... | """
This parser parses the defined names in a cobol program and store them
under the appropriate division/section.
The code comes from OpenCobolIDE and has been left mostly intact.
"""
import logging
import re
from pyqode.cobol.api import icons
from pyqode.cobol.api.keywords import ALL_KEYWORDS
from pyqode.core.share import Definition
from pyqode.cobol.api import regex
def _logger():
return logging.getLogger(__name__)
class Name(object):
"""
A Name is a node in the simplified abstract syntax tree.
"""
class Type:
"""
Enumerates the possible name types (div, section, paragraph,...)
"""
Root = -1
Division = 0
Section = 1
Variable = 2
Paragraph = 3
def __init__(self, node_type, line, column, name, description=None):
self.node_type = node_type
self.line = line
self.column = column
self.end_line = -1
self.name = name
if description is None:
description = name
self.description = description.replace(".", "")
self.children = []
def add_child(self, child):
"""
Add a child to the node
:param child: The child node to add
"""
self.children.append(child)
child.parent = self
def find(self, name):
"""
Finds a possible child whose name match the name parameter.
:param name: name of the child node to look up
:type name: str
:return: DocumentNode or None
"""
for c in self.children:
if c.name == name:
return c
result = c.find(name)
if result:
return result
def __repr__(self):
type_names = {
self.Type.Root: "Root",
self.Type.Division: "Division",
self.Type.Paragraph: "Paragraph",
self.Type.Section: "Section",
self.Type.Variable: "Variable"
}
return "%s(name=%s, line=%s, end_line=%s)" % (
type_names[self.node_type], self.name, self.line, self.end_line)
def to_definition(self):
"""
Converts the name instance to a pyqode.core.share.Definition
"""
icon = {
Name.Type.Root: icons.ICON_MIMETYPE,
Name.Type.Division: icons.ICON_DIVISION,
Name.Type.Section: icons.ICON_SECTION,
Name.Type.Variable: icons.ICON_VAR,
Name.Type.Paragraph: icons.ICON_FUNC
}[self.node_type]
d = Definition(self.name, self.line, self.column, icon, self.description)
for ch in self.children:
d.add_child(ch.to_definition())
return d
def parse_division(l, c, line, root_node, last_section_node):
"""
Extracts a division node from a line
:param l: The line number (starting from 0)
:param c: The column number
:param line: The line string (without indentation)
:param root_node: The document root node.
:return: tuple(last_div_node, last_section_node)
"""
name = line
name = name.replace(".", "")
# trim whitespaces/tabs between XXX and DIVISION
tokens = [t for t in name.split(' ') if t]
node = Name(Name.Type.Division, l, c, '%s %s' % (tokens[0], tokens[1]))
root_node.add_child(node)
last_div_node = node
# do not take previous sections into account
if last_section_node:
last_section_node.end_line = l
last_section_node = None
return last_div_node, last_section_node
def parse_section(l, c, last_div_node, last_vars, line):
"""
Extracts a section node from a line.
:param l: The line number (starting from 0)
:param last_div_node: The last div node found
:param last_vars: The last vars dict
:param line: The line string (without indentation)
:return: last_section_node
"""
name = line
name = name.replace(".", "")
node = Name(Name.Type.Section, l, c, name)
last_div_node.add_child(node)
last_section_node = node
# do not take previous var into account
last_vars.clear()
return last_section_node
def parse_pic_field(l, c, last_section_node, last_vars, line):
"""
Parse a pic field line. Return A VariableNode or None in case of malformed
code.
:param l: The line number (starting from 0)
:param c: The column number (starting from 0)
:param last_section_node: The last section node found
:param last_vars: The last vars dict
:param line: The line string (without indentation)
:return: The extracted variable node
"""
parent_node = None
raw_tokens = line.split(" ")
tokens = []
for t in raw_tokens:
if not t.isspace() and t != "":
tokens.append(t)
try:
if tokens[0].upper() == "FD":
lvl = 1
else:
lvl = int(tokens[0], 16)
name = tokens[1]
except ValueError:
return None
except IndexError:
# line not complete
return None
name = name.replace(".", "")
if name in ALL_KEYWORDS or name in ['-', '/']:
return None
m = re.findall(r'pic.*\.', line, re.IGNORECASE)
if m:
description = ' '.join([t for t in m[0].split(' ') if t])
else:
description = line
try:
index = description.lower().index('value')
except ValueError:
description = description.replace('.', '')
else:
description = description[index:].replace('value', '')[:80]
if lvl == int('78', 16):
lvl = 1
if lvl == 1:
parent_node = last_section_node
last_vars.clear()
else:
# find parent level
levels = sorted(last_vars.keys(), reverse=True)
for lv in levels:
if lv < lvl:
parent_node = last_vars[lv]
break
if not parent_node:
# malformed code
return None
# todo: enabled this with an option in pyqode 3.0
# if lvl == int('88', 16):
# return None
if not name or name.upper().strip() == 'PIC':
name = 'FILLER'
node = Name(Name.Type.Variable, l, c, name, description)
parent_node.add_child(node)
last_vars[lvl] = node
# remove closed variables
levels = sorted(last_vars.keys(), reverse=True)
for l in levels:
if l > lvl:
last_vars.pop(l)
return node
def parse_paragraph(l, c, last_div_node, last_section_node, line):
"""
Extracts a paragraph node
:param l: The line number (starting from 0)
:param last_div_node: The last div node found
:param last_section_node: The last section node found
:param line: The line string (without indentation)
:return: The extracted paragraph node
"""
if not line.endswith('.'):
return None
name = line.replace(".", "")
if name.strip() == '':
return None
if name.upper() in ALL_KEYWORDS:
return None
parent_node = last_div_node
if last_section_node is not None:
parent_node = last_section_node
node = Name(Name.Type.Paragraph, l, c, name)
parent_node.add_child(node)
return node
def defined_names(code, free_format=False):
"""
Parses a cobol document and build a name tree.
For convenience, it also returns the list of variables (PIC) and
procedures (paragraphs).
:param code: cobol code to parse. Default is None.
:param free_format: True if the source code must be considered as coded
in free format.
:return: A tuple made up of the name tree root node, the list of variables
and the list of paragraphs.
:rtype: Name, list of Name, list of Name
"""
root_node = Name(Name.Type.Root, 0, 0, 'root')
variables = []
paragraphs = []
lines = code.splitlines()
last_div_node = None
last_section_node = None
last_vars = {}
last_par = None
for i, line in enumerate(lines):
if not free_format:
if len(line) >= 6:
line = 6 * " " + line[6:]
column = len(line) - len(line.lstrip())
if not line.isspace() and not line.strip().startswith("*"):
line = line.strip()
# DIVISIONS
if regex.DIVISION.indexIn(line.upper()) != -1 and 'EXIT' not in line.upper():
# remember
if last_div_node is not None:
last_div_node.end_line = i
last_div_node, last_section_node = parse_division(
i, column, line, root_node, last_section_node)
# SECTIONS
elif regex.SECTION.indexIn(line.upper()) != -1 and 'EXIT' not in line.upper():
if last_section_node:
last_section_node.end_line = i
if last_div_node is None:
name = 'PROCEDURE DIVISION'
for to_check in ['WORKING-STORAGE', 'LOCAL-STORAGE', 'LINKAGE', 'REPORT ', 'SCREEN']:
if to_check in line.upper():
name = 'DATA DIVISION'
last_div_node = Name(Name.Type.Division, -1, -1, name, name)
root_node.add_child(last_div_node)
last_section_node = parse_section(
i, column, last_div_node, last_vars, line)
# VARIABLES
# PARAGRAPHS
elif (last_div_node is not None and
"PROCEDURE DIVISION" in last_div_node.name.upper()):
tokens = line.upper().split(" ")
if len(tokens) == 1 and not tokens[0] in ALL_KEYWORDS:
p = parse_paragraph(
i, column, last_div_node, last_section_node, line)
if p:
paragraphs.append(p)
if last_par:
last_par.end_line = i
last_par = p
elif regex.VAR_PATTERN.indexIn(line.upper()) != -1 or line.upper().lstrip().startswith('FD'):
if last_div_node is None:
last_div_node = Name(Name.Type.Division, -1, -1, 'DATA DIVISION', '')
root_node.add_child(last_div_node)
if last_section_node is None:
last_section_node = Name(Name.Type.Section, -1, -1, 'WORKING-STORAGE SECTION', '')
last_div_node.add_child(last_section_node)
v = parse_pic_field(
i, column, last_section_node, last_vars, line)
if v:
variables.append(v)
# close last div
if last_par:
last_par.end_line = len(lines) - 1
if last_div_node:
last_div_node.end_line = len(lines)
if root_node and last_div_node:
root_node.end_line = last_div_node.end_line
return root_node, variables, paragraphs
if __name__ == '__main__':
with open('/Users/Colin/test.cbl') as f:
root, variables, paragraphs = defined_names(f.read())
print(root)
|
pyQode/pyqode.cobol | pyqode/cobol/api/parsers/names.py | parse_division | python | def parse_division(l, c, line, root_node, last_section_node):
name = line
name = name.replace(".", "")
# trim whitespaces/tabs between XXX and DIVISION
tokens = [t for t in name.split(' ') if t]
node = Name(Name.Type.Division, l, c, '%s %s' % (tokens[0], tokens[1]))
root_node.add_child(node)
last_div_node = node
# do not take previous sections into account
if last_section_node:
last_section_node.end_line = l
last_section_node = None
return last_div_node, last_section_node | Extracts a division node from a line
:param l: The line number (starting from 0)
:param c: The column number
:param line: The line string (without indentation)
:param root_node: The document root node.
:return: tuple(last_div_node, last_section_node) | train | https://github.com/pyQode/pyqode.cobol/blob/eedae4e320a4b2d0c44abb2c3061091321648fb7/pyqode/cobol/api/parsers/names.py#L125-L150 | null | """
This parser parses the defined names in a cobol program and store them
under the appropriate division/section.
The code comes from OpenCobolIDE and has been left mostly intact.
"""
import logging
import re
from pyqode.cobol.api import icons
from pyqode.cobol.api.keywords import ALL_KEYWORDS
from pyqode.core.share import Definition
from pyqode.cobol.api import regex
def _logger():
return logging.getLogger(__name__)
class Name(object):
"""
A Name is a node in the simplified abstract syntax tree.
"""
class Type:
"""
Enumerates the possible name types (div, section, paragraph,...)
"""
Root = -1
Division = 0
Section = 1
Variable = 2
Paragraph = 3
def __init__(self, node_type, line, column, name, description=None):
self.node_type = node_type
self.line = line
self.column = column
self.end_line = -1
self.name = name
if description is None:
description = name
self.description = description.replace(".", "")
self.children = []
def add_child(self, child):
"""
Add a child to the node
:param child: The child node to add
"""
self.children.append(child)
child.parent = self
def find(self, name):
"""
Finds a possible child whose name match the name parameter.
:param name: name of the child node to look up
:type name: str
:return: DocumentNode or None
"""
for c in self.children:
if c.name == name:
return c
result = c.find(name)
if result:
return result
def __repr__(self):
type_names = {
self.Type.Root: "Root",
self.Type.Division: "Division",
self.Type.Paragraph: "Paragraph",
self.Type.Section: "Section",
self.Type.Variable: "Variable"
}
return "%s(name=%s, line=%s, end_line=%s)" % (
type_names[self.node_type], self.name, self.line, self.end_line)
def to_definition(self):
"""
Converts the name instance to a pyqode.core.share.Definition
"""
icon = {
Name.Type.Root: icons.ICON_MIMETYPE,
Name.Type.Division: icons.ICON_DIVISION,
Name.Type.Section: icons.ICON_SECTION,
Name.Type.Variable: icons.ICON_VAR,
Name.Type.Paragraph: icons.ICON_FUNC
}[self.node_type]
d = Definition(self.name, self.line, self.column, icon, self.description)
for ch in self.children:
d.add_child(ch.to_definition())
return d
def cmp_name(first_node, second_node):
"""
Compare two name recursively.
:param first_node: First node
:param second_node: Second state
:return: 0 if same name, 1 if names are differents.
"""
if len(first_node.children) == len(second_node.children):
for first_child, second_child in zip(first_node.children,
second_node.children):
for key in first_child.__dict__.keys():
if key.startswith('_'):
continue
if first_child.__dict__[key] != second_child.__dict__[key]:
return 1
ret_val = cmp_name(first_child, second_child)
if ret_val != 0:
return 1
else:
return 1
return 0
def parse_section(l, c, last_div_node, last_vars, line):
"""
Extracts a section node from a line.
:param l: The line number (starting from 0)
:param last_div_node: The last div node found
:param last_vars: The last vars dict
:param line: The line string (without indentation)
:return: last_section_node
"""
name = line
name = name.replace(".", "")
node = Name(Name.Type.Section, l, c, name)
last_div_node.add_child(node)
last_section_node = node
# do not take previous var into account
last_vars.clear()
return last_section_node
def parse_pic_field(l, c, last_section_node, last_vars, line):
"""
Parse a pic field line. Return A VariableNode or None in case of malformed
code.
:param l: The line number (starting from 0)
:param c: The column number (starting from 0)
:param last_section_node: The last section node found
:param last_vars: The last vars dict
:param line: The line string (without indentation)
:return: The extracted variable node
"""
parent_node = None
raw_tokens = line.split(" ")
tokens = []
for t in raw_tokens:
if not t.isspace() and t != "":
tokens.append(t)
try:
if tokens[0].upper() == "FD":
lvl = 1
else:
lvl = int(tokens[0], 16)
name = tokens[1]
except ValueError:
return None
except IndexError:
# line not complete
return None
name = name.replace(".", "")
if name in ALL_KEYWORDS or name in ['-', '/']:
return None
m = re.findall(r'pic.*\.', line, re.IGNORECASE)
if m:
description = ' '.join([t for t in m[0].split(' ') if t])
else:
description = line
try:
index = description.lower().index('value')
except ValueError:
description = description.replace('.', '')
else:
description = description[index:].replace('value', '')[:80]
if lvl == int('78', 16):
lvl = 1
if lvl == 1:
parent_node = last_section_node
last_vars.clear()
else:
# find parent level
levels = sorted(last_vars.keys(), reverse=True)
for lv in levels:
if lv < lvl:
parent_node = last_vars[lv]
break
if not parent_node:
# malformed code
return None
# todo: enabled this with an option in pyqode 3.0
# if lvl == int('88', 16):
# return None
if not name or name.upper().strip() == 'PIC':
name = 'FILLER'
node = Name(Name.Type.Variable, l, c, name, description)
parent_node.add_child(node)
last_vars[lvl] = node
# remove closed variables
levels = sorted(last_vars.keys(), reverse=True)
for l in levels:
if l > lvl:
last_vars.pop(l)
return node
def parse_paragraph(l, c, last_div_node, last_section_node, line):
"""
Extracts a paragraph node
:param l: The line number (starting from 0)
:param last_div_node: The last div node found
:param last_section_node: The last section node found
:param line: The line string (without indentation)
:return: The extracted paragraph node
"""
if not line.endswith('.'):
return None
name = line.replace(".", "")
if name.strip() == '':
return None
if name.upper() in ALL_KEYWORDS:
return None
parent_node = last_div_node
if last_section_node is not None:
parent_node = last_section_node
node = Name(Name.Type.Paragraph, l, c, name)
parent_node.add_child(node)
return node
def defined_names(code, free_format=False):
"""
Parses a cobol document and build a name tree.
For convenience, it also returns the list of variables (PIC) and
procedures (paragraphs).
:param code: cobol code to parse. Default is None.
:param free_format: True if the source code must be considered as coded
in free format.
:return: A tuple made up of the name tree root node, the list of variables
and the list of paragraphs.
:rtype: Name, list of Name, list of Name
"""
root_node = Name(Name.Type.Root, 0, 0, 'root')
variables = []
paragraphs = []
lines = code.splitlines()
last_div_node = None
last_section_node = None
last_vars = {}
last_par = None
for i, line in enumerate(lines):
if not free_format:
if len(line) >= 6:
line = 6 * " " + line[6:]
column = len(line) - len(line.lstrip())
if not line.isspace() and not line.strip().startswith("*"):
line = line.strip()
# DIVISIONS
if regex.DIVISION.indexIn(line.upper()) != -1 and 'EXIT' not in line.upper():
# remember
if last_div_node is not None:
last_div_node.end_line = i
last_div_node, last_section_node = parse_division(
i, column, line, root_node, last_section_node)
# SECTIONS
elif regex.SECTION.indexIn(line.upper()) != -1 and 'EXIT' not in line.upper():
if last_section_node:
last_section_node.end_line = i
if last_div_node is None:
name = 'PROCEDURE DIVISION'
for to_check in ['WORKING-STORAGE', 'LOCAL-STORAGE', 'LINKAGE', 'REPORT ', 'SCREEN']:
if to_check in line.upper():
name = 'DATA DIVISION'
last_div_node = Name(Name.Type.Division, -1, -1, name, name)
root_node.add_child(last_div_node)
last_section_node = parse_section(
i, column, last_div_node, last_vars, line)
# VARIABLES
# PARAGRAPHS
elif (last_div_node is not None and
"PROCEDURE DIVISION" in last_div_node.name.upper()):
tokens = line.upper().split(" ")
if len(tokens) == 1 and not tokens[0] in ALL_KEYWORDS:
p = parse_paragraph(
i, column, last_div_node, last_section_node, line)
if p:
paragraphs.append(p)
if last_par:
last_par.end_line = i
last_par = p
elif regex.VAR_PATTERN.indexIn(line.upper()) != -1 or line.upper().lstrip().startswith('FD'):
if last_div_node is None:
last_div_node = Name(Name.Type.Division, -1, -1, 'DATA DIVISION', '')
root_node.add_child(last_div_node)
if last_section_node is None:
last_section_node = Name(Name.Type.Section, -1, -1, 'WORKING-STORAGE SECTION', '')
last_div_node.add_child(last_section_node)
v = parse_pic_field(
i, column, last_section_node, last_vars, line)
if v:
variables.append(v)
# close last div
if last_par:
last_par.end_line = len(lines) - 1
if last_div_node:
last_div_node.end_line = len(lines)
if root_node and last_div_node:
root_node.end_line = last_div_node.end_line
return root_node, variables, paragraphs
if __name__ == '__main__':
with open('/Users/Colin/test.cbl') as f:
root, variables, paragraphs = defined_names(f.read())
print(root)
|
pyQode/pyqode.cobol | pyqode/cobol/api/parsers/names.py | parse_section | python | def parse_section(l, c, last_div_node, last_vars, line):
name = line
name = name.replace(".", "")
node = Name(Name.Type.Section, l, c, name)
last_div_node.add_child(node)
last_section_node = node
# do not take previous var into account
last_vars.clear()
return last_section_node | Extracts a section node from a line.
:param l: The line number (starting from 0)
:param last_div_node: The last div node found
:param last_vars: The last vars dict
:param line: The line string (without indentation)
:return: last_section_node | train | https://github.com/pyQode/pyqode.cobol/blob/eedae4e320a4b2d0c44abb2c3061091321648fb7/pyqode/cobol/api/parsers/names.py#L153-L174 | null | """
This parser parses the defined names in a cobol program and store them
under the appropriate division/section.
The code comes from OpenCobolIDE and has been left mostly intact.
"""
import logging
import re
from pyqode.cobol.api import icons
from pyqode.cobol.api.keywords import ALL_KEYWORDS
from pyqode.core.share import Definition
from pyqode.cobol.api import regex
def _logger():
return logging.getLogger(__name__)
class Name(object):
"""
A Name is a node in the simplified abstract syntax tree.
"""
class Type:
"""
Enumerates the possible name types (div, section, paragraph,...)
"""
Root = -1
Division = 0
Section = 1
Variable = 2
Paragraph = 3
def __init__(self, node_type, line, column, name, description=None):
self.node_type = node_type
self.line = line
self.column = column
self.end_line = -1
self.name = name
if description is None:
description = name
self.description = description.replace(".", "")
self.children = []
def add_child(self, child):
"""
Add a child to the node
:param child: The child node to add
"""
self.children.append(child)
child.parent = self
def find(self, name):
"""
Finds a possible child whose name match the name parameter.
:param name: name of the child node to look up
:type name: str
:return: DocumentNode or None
"""
for c in self.children:
if c.name == name:
return c
result = c.find(name)
if result:
return result
def __repr__(self):
type_names = {
self.Type.Root: "Root",
self.Type.Division: "Division",
self.Type.Paragraph: "Paragraph",
self.Type.Section: "Section",
self.Type.Variable: "Variable"
}
return "%s(name=%s, line=%s, end_line=%s)" % (
type_names[self.node_type], self.name, self.line, self.end_line)
def to_definition(self):
"""
Converts the name instance to a pyqode.core.share.Definition
"""
icon = {
Name.Type.Root: icons.ICON_MIMETYPE,
Name.Type.Division: icons.ICON_DIVISION,
Name.Type.Section: icons.ICON_SECTION,
Name.Type.Variable: icons.ICON_VAR,
Name.Type.Paragraph: icons.ICON_FUNC
}[self.node_type]
d = Definition(self.name, self.line, self.column, icon, self.description)
for ch in self.children:
d.add_child(ch.to_definition())
return d
def cmp_name(first_node, second_node):
"""
Compare two name recursively.
:param first_node: First node
:param second_node: Second state
:return: 0 if same name, 1 if names are differents.
"""
if len(first_node.children) == len(second_node.children):
for first_child, second_child in zip(first_node.children,
second_node.children):
for key in first_child.__dict__.keys():
if key.startswith('_'):
continue
if first_child.__dict__[key] != second_child.__dict__[key]:
return 1
ret_val = cmp_name(first_child, second_child)
if ret_val != 0:
return 1
else:
return 1
return 0
def parse_division(l, c, line, root_node, last_section_node):
"""
Extracts a division node from a line
:param l: The line number (starting from 0)
:param c: The column number
:param line: The line string (without indentation)
:param root_node: The document root node.
:return: tuple(last_div_node, last_section_node)
"""
name = line
name = name.replace(".", "")
# trim whitespaces/tabs between XXX and DIVISION
tokens = [t for t in name.split(' ') if t]
node = Name(Name.Type.Division, l, c, '%s %s' % (tokens[0], tokens[1]))
root_node.add_child(node)
last_div_node = node
# do not take previous sections into account
if last_section_node:
last_section_node.end_line = l
last_section_node = None
return last_div_node, last_section_node
def parse_pic_field(l, c, last_section_node, last_vars, line):
"""
Parse a pic field line. Return A VariableNode or None in case of malformed
code.
:param l: The line number (starting from 0)
:param c: The column number (starting from 0)
:param last_section_node: The last section node found
:param last_vars: The last vars dict
:param line: The line string (without indentation)
:return: The extracted variable node
"""
parent_node = None
raw_tokens = line.split(" ")
tokens = []
for t in raw_tokens:
if not t.isspace() and t != "":
tokens.append(t)
try:
if tokens[0].upper() == "FD":
lvl = 1
else:
lvl = int(tokens[0], 16)
name = tokens[1]
except ValueError:
return None
except IndexError:
# line not complete
return None
name = name.replace(".", "")
if name in ALL_KEYWORDS or name in ['-', '/']:
return None
m = re.findall(r'pic.*\.', line, re.IGNORECASE)
if m:
description = ' '.join([t for t in m[0].split(' ') if t])
else:
description = line
try:
index = description.lower().index('value')
except ValueError:
description = description.replace('.', '')
else:
description = description[index:].replace('value', '')[:80]
if lvl == int('78', 16):
lvl = 1
if lvl == 1:
parent_node = last_section_node
last_vars.clear()
else:
# find parent level
levels = sorted(last_vars.keys(), reverse=True)
for lv in levels:
if lv < lvl:
parent_node = last_vars[lv]
break
if not parent_node:
# malformed code
return None
# todo: enabled this with an option in pyqode 3.0
# if lvl == int('88', 16):
# return None
if not name or name.upper().strip() == 'PIC':
name = 'FILLER'
node = Name(Name.Type.Variable, l, c, name, description)
parent_node.add_child(node)
last_vars[lvl] = node
# remove closed variables
levels = sorted(last_vars.keys(), reverse=True)
for l in levels:
if l > lvl:
last_vars.pop(l)
return node
def parse_paragraph(l, c, last_div_node, last_section_node, line):
"""
Extracts a paragraph node
:param l: The line number (starting from 0)
:param last_div_node: The last div node found
:param last_section_node: The last section node found
:param line: The line string (without indentation)
:return: The extracted paragraph node
"""
if not line.endswith('.'):
return None
name = line.replace(".", "")
if name.strip() == '':
return None
if name.upper() in ALL_KEYWORDS:
return None
parent_node = last_div_node
if last_section_node is not None:
parent_node = last_section_node
node = Name(Name.Type.Paragraph, l, c, name)
parent_node.add_child(node)
return node
def defined_names(code, free_format=False):
"""
Parses a cobol document and build a name tree.
For convenience, it also returns the list of variables (PIC) and
procedures (paragraphs).
:param code: cobol code to parse. Default is None.
:param free_format: True if the source code must be considered as coded
in free format.
:return: A tuple made up of the name tree root node, the list of variables
and the list of paragraphs.
:rtype: Name, list of Name, list of Name
"""
root_node = Name(Name.Type.Root, 0, 0, 'root')
variables = []
paragraphs = []
lines = code.splitlines()
last_div_node = None
last_section_node = None
last_vars = {}
last_par = None
for i, line in enumerate(lines):
if not free_format:
if len(line) >= 6:
line = 6 * " " + line[6:]
column = len(line) - len(line.lstrip())
if not line.isspace() and not line.strip().startswith("*"):
line = line.strip()
# DIVISIONS
if regex.DIVISION.indexIn(line.upper()) != -1 and 'EXIT' not in line.upper():
# remember
if last_div_node is not None:
last_div_node.end_line = i
last_div_node, last_section_node = parse_division(
i, column, line, root_node, last_section_node)
# SECTIONS
elif regex.SECTION.indexIn(line.upper()) != -1 and 'EXIT' not in line.upper():
if last_section_node:
last_section_node.end_line = i
if last_div_node is None:
name = 'PROCEDURE DIVISION'
for to_check in ['WORKING-STORAGE', 'LOCAL-STORAGE', 'LINKAGE', 'REPORT ', 'SCREEN']:
if to_check in line.upper():
name = 'DATA DIVISION'
last_div_node = Name(Name.Type.Division, -1, -1, name, name)
root_node.add_child(last_div_node)
last_section_node = parse_section(
i, column, last_div_node, last_vars, line)
# VARIABLES
# PARAGRAPHS
elif (last_div_node is not None and
"PROCEDURE DIVISION" in last_div_node.name.upper()):
tokens = line.upper().split(" ")
if len(tokens) == 1 and not tokens[0] in ALL_KEYWORDS:
p = parse_paragraph(
i, column, last_div_node, last_section_node, line)
if p:
paragraphs.append(p)
if last_par:
last_par.end_line = i
last_par = p
elif regex.VAR_PATTERN.indexIn(line.upper()) != -1 or line.upper().lstrip().startswith('FD'):
if last_div_node is None:
last_div_node = Name(Name.Type.Division, -1, -1, 'DATA DIVISION', '')
root_node.add_child(last_div_node)
if last_section_node is None:
last_section_node = Name(Name.Type.Section, -1, -1, 'WORKING-STORAGE SECTION', '')
last_div_node.add_child(last_section_node)
v = parse_pic_field(
i, column, last_section_node, last_vars, line)
if v:
variables.append(v)
# close last div
if last_par:
last_par.end_line = len(lines) - 1
if last_div_node:
last_div_node.end_line = len(lines)
if root_node and last_div_node:
root_node.end_line = last_div_node.end_line
return root_node, variables, paragraphs
if __name__ == '__main__':
with open('/Users/Colin/test.cbl') as f:
root, variables, paragraphs = defined_names(f.read())
print(root)
|
pyQode/pyqode.cobol | pyqode/cobol/api/parsers/names.py | parse_pic_field | python | def parse_pic_field(l, c, last_section_node, last_vars, line):
parent_node = None
raw_tokens = line.split(" ")
tokens = []
for t in raw_tokens:
if not t.isspace() and t != "":
tokens.append(t)
try:
if tokens[0].upper() == "FD":
lvl = 1
else:
lvl = int(tokens[0], 16)
name = tokens[1]
except ValueError:
return None
except IndexError:
# line not complete
return None
name = name.replace(".", "")
if name in ALL_KEYWORDS or name in ['-', '/']:
return None
m = re.findall(r'pic.*\.', line, re.IGNORECASE)
if m:
description = ' '.join([t for t in m[0].split(' ') if t])
else:
description = line
try:
index = description.lower().index('value')
except ValueError:
description = description.replace('.', '')
else:
description = description[index:].replace('value', '')[:80]
if lvl == int('78', 16):
lvl = 1
if lvl == 1:
parent_node = last_section_node
last_vars.clear()
else:
# find parent level
levels = sorted(last_vars.keys(), reverse=True)
for lv in levels:
if lv < lvl:
parent_node = last_vars[lv]
break
if not parent_node:
# malformed code
return None
# todo: enabled this with an option in pyqode 3.0
# if lvl == int('88', 16):
# return None
if not name or name.upper().strip() == 'PIC':
name = 'FILLER'
node = Name(Name.Type.Variable, l, c, name, description)
parent_node.add_child(node)
last_vars[lvl] = node
# remove closed variables
levels = sorted(last_vars.keys(), reverse=True)
for l in levels:
if l > lvl:
last_vars.pop(l)
return node | Parse a pic field line. Return A VariableNode or None in case of malformed
code.
:param l: The line number (starting from 0)
:param c: The column number (starting from 0)
:param last_section_node: The last section node found
:param last_vars: The last vars dict
:param line: The line string (without indentation)
:return: The extracted variable node | train | https://github.com/pyQode/pyqode.cobol/blob/eedae4e320a4b2d0c44abb2c3061091321648fb7/pyqode/cobol/api/parsers/names.py#L177-L248 | null | """
This parser parses the defined names in a cobol program and store them
under the appropriate division/section.
The code comes from OpenCobolIDE and has been left mostly intact.
"""
import logging
import re
from pyqode.cobol.api import icons
from pyqode.cobol.api.keywords import ALL_KEYWORDS
from pyqode.core.share import Definition
from pyqode.cobol.api import regex
def _logger():
return logging.getLogger(__name__)
class Name(object):
"""
A Name is a node in the simplified abstract syntax tree.
"""
class Type:
"""
Enumerates the possible name types (div, section, paragraph,...)
"""
Root = -1
Division = 0
Section = 1
Variable = 2
Paragraph = 3
def __init__(self, node_type, line, column, name, description=None):
self.node_type = node_type
self.line = line
self.column = column
self.end_line = -1
self.name = name
if description is None:
description = name
self.description = description.replace(".", "")
self.children = []
def add_child(self, child):
"""
Add a child to the node
:param child: The child node to add
"""
self.children.append(child)
child.parent = self
def find(self, name):
"""
Finds a possible child whose name match the name parameter.
:param name: name of the child node to look up
:type name: str
:return: DocumentNode or None
"""
for c in self.children:
if c.name == name:
return c
result = c.find(name)
if result:
return result
def __repr__(self):
type_names = {
self.Type.Root: "Root",
self.Type.Division: "Division",
self.Type.Paragraph: "Paragraph",
self.Type.Section: "Section",
self.Type.Variable: "Variable"
}
return "%s(name=%s, line=%s, end_line=%s)" % (
type_names[self.node_type], self.name, self.line, self.end_line)
def to_definition(self):
"""
Converts the name instance to a pyqode.core.share.Definition
"""
icon = {
Name.Type.Root: icons.ICON_MIMETYPE,
Name.Type.Division: icons.ICON_DIVISION,
Name.Type.Section: icons.ICON_SECTION,
Name.Type.Variable: icons.ICON_VAR,
Name.Type.Paragraph: icons.ICON_FUNC
}[self.node_type]
d = Definition(self.name, self.line, self.column, icon, self.description)
for ch in self.children:
d.add_child(ch.to_definition())
return d
def cmp_name(first_node, second_node):
"""
Compare two name recursively.
:param first_node: First node
:param second_node: Second state
:return: 0 if same name, 1 if names are differents.
"""
if len(first_node.children) == len(second_node.children):
for first_child, second_child in zip(first_node.children,
second_node.children):
for key in first_child.__dict__.keys():
if key.startswith('_'):
continue
if first_child.__dict__[key] != second_child.__dict__[key]:
return 1
ret_val = cmp_name(first_child, second_child)
if ret_val != 0:
return 1
else:
return 1
return 0
def parse_division(l, c, line, root_node, last_section_node):
"""
Extracts a division node from a line
:param l: The line number (starting from 0)
:param c: The column number
:param line: The line string (without indentation)
:param root_node: The document root node.
:return: tuple(last_div_node, last_section_node)
"""
name = line
name = name.replace(".", "")
# trim whitespaces/tabs between XXX and DIVISION
tokens = [t for t in name.split(' ') if t]
node = Name(Name.Type.Division, l, c, '%s %s' % (tokens[0], tokens[1]))
root_node.add_child(node)
last_div_node = node
# do not take previous sections into account
if last_section_node:
last_section_node.end_line = l
last_section_node = None
return last_div_node, last_section_node
def parse_section(l, c, last_div_node, last_vars, line):
"""
Extracts a section node from a line.
:param l: The line number (starting from 0)
:param last_div_node: The last div node found
:param last_vars: The last vars dict
:param line: The line string (without indentation)
:return: last_section_node
"""
name = line
name = name.replace(".", "")
node = Name(Name.Type.Section, l, c, name)
last_div_node.add_child(node)
last_section_node = node
# do not take previous var into account
last_vars.clear()
return last_section_node
def parse_paragraph(l, c, last_div_node, last_section_node, line):
"""
Extracts a paragraph node
:param l: The line number (starting from 0)
:param last_div_node: The last div node found
:param last_section_node: The last section node found
:param line: The line string (without indentation)
:return: The extracted paragraph node
"""
if not line.endswith('.'):
return None
name = line.replace(".", "")
if name.strip() == '':
return None
if name.upper() in ALL_KEYWORDS:
return None
parent_node = last_div_node
if last_section_node is not None:
parent_node = last_section_node
node = Name(Name.Type.Paragraph, l, c, name)
parent_node.add_child(node)
return node
def defined_names(code, free_format=False):
"""
Parses a cobol document and build a name tree.
For convenience, it also returns the list of variables (PIC) and
procedures (paragraphs).
:param code: cobol code to parse. Default is None.
:param free_format: True if the source code must be considered as coded
in free format.
:return: A tuple made up of the name tree root node, the list of variables
and the list of paragraphs.
:rtype: Name, list of Name, list of Name
"""
root_node = Name(Name.Type.Root, 0, 0, 'root')
variables = []
paragraphs = []
lines = code.splitlines()
last_div_node = None
last_section_node = None
last_vars = {}
last_par = None
for i, line in enumerate(lines):
if not free_format:
if len(line) >= 6:
line = 6 * " " + line[6:]
column = len(line) - len(line.lstrip())
if not line.isspace() and not line.strip().startswith("*"):
line = line.strip()
# DIVISIONS
if regex.DIVISION.indexIn(line.upper()) != -1 and 'EXIT' not in line.upper():
# remember
if last_div_node is not None:
last_div_node.end_line = i
last_div_node, last_section_node = parse_division(
i, column, line, root_node, last_section_node)
# SECTIONS
elif regex.SECTION.indexIn(line.upper()) != -1 and 'EXIT' not in line.upper():
if last_section_node:
last_section_node.end_line = i
if last_div_node is None:
name = 'PROCEDURE DIVISION'
for to_check in ['WORKING-STORAGE', 'LOCAL-STORAGE', 'LINKAGE', 'REPORT ', 'SCREEN']:
if to_check in line.upper():
name = 'DATA DIVISION'
last_div_node = Name(Name.Type.Division, -1, -1, name, name)
root_node.add_child(last_div_node)
last_section_node = parse_section(
i, column, last_div_node, last_vars, line)
# VARIABLES
# PARAGRAPHS
elif (last_div_node is not None and
"PROCEDURE DIVISION" in last_div_node.name.upper()):
tokens = line.upper().split(" ")
if len(tokens) == 1 and not tokens[0] in ALL_KEYWORDS:
p = parse_paragraph(
i, column, last_div_node, last_section_node, line)
if p:
paragraphs.append(p)
if last_par:
last_par.end_line = i
last_par = p
elif regex.VAR_PATTERN.indexIn(line.upper()) != -1 or line.upper().lstrip().startswith('FD'):
if last_div_node is None:
last_div_node = Name(Name.Type.Division, -1, -1, 'DATA DIVISION', '')
root_node.add_child(last_div_node)
if last_section_node is None:
last_section_node = Name(Name.Type.Section, -1, -1, 'WORKING-STORAGE SECTION', '')
last_div_node.add_child(last_section_node)
v = parse_pic_field(
i, column, last_section_node, last_vars, line)
if v:
variables.append(v)
# close last div
if last_par:
last_par.end_line = len(lines) - 1
if last_div_node:
last_div_node.end_line = len(lines)
if root_node and last_div_node:
root_node.end_line = last_div_node.end_line
return root_node, variables, paragraphs
if __name__ == '__main__':
with open('/Users/Colin/test.cbl') as f:
root, variables, paragraphs = defined_names(f.read())
print(root)
|
pyQode/pyqode.cobol | pyqode/cobol/api/parsers/names.py | parse_paragraph | python | def parse_paragraph(l, c, last_div_node, last_section_node, line):
if not line.endswith('.'):
return None
name = line.replace(".", "")
if name.strip() == '':
return None
if name.upper() in ALL_KEYWORDS:
return None
parent_node = last_div_node
if last_section_node is not None:
parent_node = last_section_node
node = Name(Name.Type.Paragraph, l, c, name)
parent_node.add_child(node)
return node | Extracts a paragraph node
:param l: The line number (starting from 0)
:param last_div_node: The last div node found
:param last_section_node: The last section node found
:param line: The line string (without indentation)
:return: The extracted paragraph node | train | https://github.com/pyQode/pyqode.cobol/blob/eedae4e320a4b2d0c44abb2c3061091321648fb7/pyqode/cobol/api/parsers/names.py#L251-L273 | null | """
This parser parses the defined names in a cobol program and store them
under the appropriate division/section.
The code comes from OpenCobolIDE and has been left mostly intact.
"""
import logging
import re
from pyqode.cobol.api import icons
from pyqode.cobol.api.keywords import ALL_KEYWORDS
from pyqode.core.share import Definition
from pyqode.cobol.api import regex
def _logger():
return logging.getLogger(__name__)
class Name(object):
"""
A Name is a node in the simplified abstract syntax tree.
"""
class Type:
"""
Enumerates the possible name types (div, section, paragraph,...)
"""
Root = -1
Division = 0
Section = 1
Variable = 2
Paragraph = 3
def __init__(self, node_type, line, column, name, description=None):
self.node_type = node_type
self.line = line
self.column = column
self.end_line = -1
self.name = name
if description is None:
description = name
self.description = description.replace(".", "")
self.children = []
def add_child(self, child):
"""
Add a child to the node
:param child: The child node to add
"""
self.children.append(child)
child.parent = self
def find(self, name):
"""
Finds a possible child whose name match the name parameter.
:param name: name of the child node to look up
:type name: str
:return: DocumentNode or None
"""
for c in self.children:
if c.name == name:
return c
result = c.find(name)
if result:
return result
def __repr__(self):
type_names = {
self.Type.Root: "Root",
self.Type.Division: "Division",
self.Type.Paragraph: "Paragraph",
self.Type.Section: "Section",
self.Type.Variable: "Variable"
}
return "%s(name=%s, line=%s, end_line=%s)" % (
type_names[self.node_type], self.name, self.line, self.end_line)
def to_definition(self):
"""
Converts the name instance to a pyqode.core.share.Definition
"""
icon = {
Name.Type.Root: icons.ICON_MIMETYPE,
Name.Type.Division: icons.ICON_DIVISION,
Name.Type.Section: icons.ICON_SECTION,
Name.Type.Variable: icons.ICON_VAR,
Name.Type.Paragraph: icons.ICON_FUNC
}[self.node_type]
d = Definition(self.name, self.line, self.column, icon, self.description)
for ch in self.children:
d.add_child(ch.to_definition())
return d
def cmp_name(first_node, second_node):
"""
Compare two name recursively.
:param first_node: First node
:param second_node: Second state
:return: 0 if same name, 1 if names are differents.
"""
if len(first_node.children) == len(second_node.children):
for first_child, second_child in zip(first_node.children,
second_node.children):
for key in first_child.__dict__.keys():
if key.startswith('_'):
continue
if first_child.__dict__[key] != second_child.__dict__[key]:
return 1
ret_val = cmp_name(first_child, second_child)
if ret_val != 0:
return 1
else:
return 1
return 0
def parse_division(l, c, line, root_node, last_section_node):
"""
Extracts a division node from a line
:param l: The line number (starting from 0)
:param c: The column number
:param line: The line string (without indentation)
:param root_node: The document root node.
:return: tuple(last_div_node, last_section_node)
"""
name = line
name = name.replace(".", "")
# trim whitespaces/tabs between XXX and DIVISION
tokens = [t for t in name.split(' ') if t]
node = Name(Name.Type.Division, l, c, '%s %s' % (tokens[0], tokens[1]))
root_node.add_child(node)
last_div_node = node
# do not take previous sections into account
if last_section_node:
last_section_node.end_line = l
last_section_node = None
return last_div_node, last_section_node
def parse_section(l, c, last_div_node, last_vars, line):
"""
Extracts a section node from a line.
:param l: The line number (starting from 0)
:param last_div_node: The last div node found
:param last_vars: The last vars dict
:param line: The line string (without indentation)
:return: last_section_node
"""
name = line
name = name.replace(".", "")
node = Name(Name.Type.Section, l, c, name)
last_div_node.add_child(node)
last_section_node = node
# do not take previous var into account
last_vars.clear()
return last_section_node
def parse_pic_field(l, c, last_section_node, last_vars, line):
"""
Parse a pic field line. Return A VariableNode or None in case of malformed
code.
:param l: The line number (starting from 0)
:param c: The column number (starting from 0)
:param last_section_node: The last section node found
:param last_vars: The last vars dict
:param line: The line string (without indentation)
:return: The extracted variable node
"""
parent_node = None
raw_tokens = line.split(" ")
tokens = []
for t in raw_tokens:
if not t.isspace() and t != "":
tokens.append(t)
try:
if tokens[0].upper() == "FD":
lvl = 1
else:
lvl = int(tokens[0], 16)
name = tokens[1]
except ValueError:
return None
except IndexError:
# line not complete
return None
name = name.replace(".", "")
if name in ALL_KEYWORDS or name in ['-', '/']:
return None
m = re.findall(r'pic.*\.', line, re.IGNORECASE)
if m:
description = ' '.join([t for t in m[0].split(' ') if t])
else:
description = line
try:
index = description.lower().index('value')
except ValueError:
description = description.replace('.', '')
else:
description = description[index:].replace('value', '')[:80]
if lvl == int('78', 16):
lvl = 1
if lvl == 1:
parent_node = last_section_node
last_vars.clear()
else:
# find parent level
levels = sorted(last_vars.keys(), reverse=True)
for lv in levels:
if lv < lvl:
parent_node = last_vars[lv]
break
if not parent_node:
# malformed code
return None
# todo: enabled this with an option in pyqode 3.0
# if lvl == int('88', 16):
# return None
if not name or name.upper().strip() == 'PIC':
name = 'FILLER'
node = Name(Name.Type.Variable, l, c, name, description)
parent_node.add_child(node)
last_vars[lvl] = node
# remove closed variables
levels = sorted(last_vars.keys(), reverse=True)
for l in levels:
if l > lvl:
last_vars.pop(l)
return node
def defined_names(code, free_format=False):
"""
Parses a cobol document and build a name tree.
For convenience, it also returns the list of variables (PIC) and
procedures (paragraphs).
:param code: cobol code to parse. Default is None.
:param free_format: True if the source code must be considered as coded
in free format.
:return: A tuple made up of the name tree root node, the list of variables
and the list of paragraphs.
:rtype: Name, list of Name, list of Name
"""
root_node = Name(Name.Type.Root, 0, 0, 'root')
variables = []
paragraphs = []
lines = code.splitlines()
last_div_node = None
last_section_node = None
last_vars = {}
last_par = None
for i, line in enumerate(lines):
if not free_format:
if len(line) >= 6:
line = 6 * " " + line[6:]
column = len(line) - len(line.lstrip())
if not line.isspace() and not line.strip().startswith("*"):
line = line.strip()
# DIVISIONS
if regex.DIVISION.indexIn(line.upper()) != -1 and 'EXIT' not in line.upper():
# remember
if last_div_node is not None:
last_div_node.end_line = i
last_div_node, last_section_node = parse_division(
i, column, line, root_node, last_section_node)
# SECTIONS
elif regex.SECTION.indexIn(line.upper()) != -1 and 'EXIT' not in line.upper():
if last_section_node:
last_section_node.end_line = i
if last_div_node is None:
name = 'PROCEDURE DIVISION'
for to_check in ['WORKING-STORAGE', 'LOCAL-STORAGE', 'LINKAGE', 'REPORT ', 'SCREEN']:
if to_check in line.upper():
name = 'DATA DIVISION'
last_div_node = Name(Name.Type.Division, -1, -1, name, name)
root_node.add_child(last_div_node)
last_section_node = parse_section(
i, column, last_div_node, last_vars, line)
# VARIABLES
# PARAGRAPHS
elif (last_div_node is not None and
"PROCEDURE DIVISION" in last_div_node.name.upper()):
tokens = line.upper().split(" ")
if len(tokens) == 1 and not tokens[0] in ALL_KEYWORDS:
p = parse_paragraph(
i, column, last_div_node, last_section_node, line)
if p:
paragraphs.append(p)
if last_par:
last_par.end_line = i
last_par = p
elif regex.VAR_PATTERN.indexIn(line.upper()) != -1 or line.upper().lstrip().startswith('FD'):
if last_div_node is None:
last_div_node = Name(Name.Type.Division, -1, -1, 'DATA DIVISION', '')
root_node.add_child(last_div_node)
if last_section_node is None:
last_section_node = Name(Name.Type.Section, -1, -1, 'WORKING-STORAGE SECTION', '')
last_div_node.add_child(last_section_node)
v = parse_pic_field(
i, column, last_section_node, last_vars, line)
if v:
variables.append(v)
# close last div
if last_par:
last_par.end_line = len(lines) - 1
if last_div_node:
last_div_node.end_line = len(lines)
if root_node and last_div_node:
root_node.end_line = last_div_node.end_line
return root_node, variables, paragraphs
if __name__ == '__main__':
with open('/Users/Colin/test.cbl') as f:
root, variables, paragraphs = defined_names(f.read())
print(root)
|
pyQode/pyqode.cobol | pyqode/cobol/api/parsers/names.py | defined_names | python | def defined_names(code, free_format=False):
root_node = Name(Name.Type.Root, 0, 0, 'root')
variables = []
paragraphs = []
lines = code.splitlines()
last_div_node = None
last_section_node = None
last_vars = {}
last_par = None
for i, line in enumerate(lines):
if not free_format:
if len(line) >= 6:
line = 6 * " " + line[6:]
column = len(line) - len(line.lstrip())
if not line.isspace() and not line.strip().startswith("*"):
line = line.strip()
# DIVISIONS
if regex.DIVISION.indexIn(line.upper()) != -1 and 'EXIT' not in line.upper():
# remember
if last_div_node is not None:
last_div_node.end_line = i
last_div_node, last_section_node = parse_division(
i, column, line, root_node, last_section_node)
# SECTIONS
elif regex.SECTION.indexIn(line.upper()) != -1 and 'EXIT' not in line.upper():
if last_section_node:
last_section_node.end_line = i
if last_div_node is None:
name = 'PROCEDURE DIVISION'
for to_check in ['WORKING-STORAGE', 'LOCAL-STORAGE', 'LINKAGE', 'REPORT ', 'SCREEN']:
if to_check in line.upper():
name = 'DATA DIVISION'
last_div_node = Name(Name.Type.Division, -1, -1, name, name)
root_node.add_child(last_div_node)
last_section_node = parse_section(
i, column, last_div_node, last_vars, line)
# VARIABLES
# PARAGRAPHS
elif (last_div_node is not None and
"PROCEDURE DIVISION" in last_div_node.name.upper()):
tokens = line.upper().split(" ")
if len(tokens) == 1 and not tokens[0] in ALL_KEYWORDS:
p = parse_paragraph(
i, column, last_div_node, last_section_node, line)
if p:
paragraphs.append(p)
if last_par:
last_par.end_line = i
last_par = p
elif regex.VAR_PATTERN.indexIn(line.upper()) != -1 or line.upper().lstrip().startswith('FD'):
if last_div_node is None:
last_div_node = Name(Name.Type.Division, -1, -1, 'DATA DIVISION', '')
root_node.add_child(last_div_node)
if last_section_node is None:
last_section_node = Name(Name.Type.Section, -1, -1, 'WORKING-STORAGE SECTION', '')
last_div_node.add_child(last_section_node)
v = parse_pic_field(
i, column, last_section_node, last_vars, line)
if v:
variables.append(v)
# close last div
if last_par:
last_par.end_line = len(lines) - 1
if last_div_node:
last_div_node.end_line = len(lines)
if root_node and last_div_node:
root_node.end_line = last_div_node.end_line
return root_node, variables, paragraphs | Parses a cobol document and build a name tree.
For convenience, it also returns the list of variables (PIC) and
procedures (paragraphs).
:param code: cobol code to parse. Default is None.
:param free_format: True if the source code must be considered as coded
in free format.
:return: A tuple made up of the name tree root node, the list of variables
and the list of paragraphs.
:rtype: Name, list of Name, list of Name | train | https://github.com/pyQode/pyqode.cobol/blob/eedae4e320a4b2d0c44abb2c3061091321648fb7/pyqode/cobol/api/parsers/names.py#L276-L360 | null | """
This parser parses the defined names in a cobol program and store them
under the appropriate division/section.
The code comes from OpenCobolIDE and has been left mostly intact.
"""
import logging
import re
from pyqode.cobol.api import icons
from pyqode.cobol.api.keywords import ALL_KEYWORDS
from pyqode.core.share import Definition
from pyqode.cobol.api import regex
def _logger():
return logging.getLogger(__name__)
class Name(object):
"""
A Name is a node in the simplified abstract syntax tree.
"""
class Type:
"""
Enumerates the possible name types (div, section, paragraph,...)
"""
Root = -1
Division = 0
Section = 1
Variable = 2
Paragraph = 3
def __init__(self, node_type, line, column, name, description=None):
self.node_type = node_type
self.line = line
self.column = column
self.end_line = -1
self.name = name
if description is None:
description = name
self.description = description.replace(".", "")
self.children = []
def add_child(self, child):
"""
Add a child to the node
:param child: The child node to add
"""
self.children.append(child)
child.parent = self
def find(self, name):
"""
Finds a possible child whose name match the name parameter.
:param name: name of the child node to look up
:type name: str
:return: DocumentNode or None
"""
for c in self.children:
if c.name == name:
return c
result = c.find(name)
if result:
return result
def __repr__(self):
type_names = {
self.Type.Root: "Root",
self.Type.Division: "Division",
self.Type.Paragraph: "Paragraph",
self.Type.Section: "Section",
self.Type.Variable: "Variable"
}
return "%s(name=%s, line=%s, end_line=%s)" % (
type_names[self.node_type], self.name, self.line, self.end_line)
def to_definition(self):
"""
Converts the name instance to a pyqode.core.share.Definition
"""
icon = {
Name.Type.Root: icons.ICON_MIMETYPE,
Name.Type.Division: icons.ICON_DIVISION,
Name.Type.Section: icons.ICON_SECTION,
Name.Type.Variable: icons.ICON_VAR,
Name.Type.Paragraph: icons.ICON_FUNC
}[self.node_type]
d = Definition(self.name, self.line, self.column, icon, self.description)
for ch in self.children:
d.add_child(ch.to_definition())
return d
def cmp_name(first_node, second_node):
"""
Compare two name recursively.
:param first_node: First node
:param second_node: Second state
:return: 0 if same name, 1 if names are differents.
"""
if len(first_node.children) == len(second_node.children):
for first_child, second_child in zip(first_node.children,
second_node.children):
for key in first_child.__dict__.keys():
if key.startswith('_'):
continue
if first_child.__dict__[key] != second_child.__dict__[key]:
return 1
ret_val = cmp_name(first_child, second_child)
if ret_val != 0:
return 1
else:
return 1
return 0
def parse_division(l, c, line, root_node, last_section_node):
"""
Extracts a division node from a line
:param l: The line number (starting from 0)
:param c: The column number
:param line: The line string (without indentation)
:param root_node: The document root node.
:return: tuple(last_div_node, last_section_node)
"""
name = line
name = name.replace(".", "")
# trim whitespaces/tabs between XXX and DIVISION
tokens = [t for t in name.split(' ') if t]
node = Name(Name.Type.Division, l, c, '%s %s' % (tokens[0], tokens[1]))
root_node.add_child(node)
last_div_node = node
# do not take previous sections into account
if last_section_node:
last_section_node.end_line = l
last_section_node = None
return last_div_node, last_section_node
def parse_section(l, c, last_div_node, last_vars, line):
"""
Extracts a section node from a line.
:param l: The line number (starting from 0)
:param last_div_node: The last div node found
:param last_vars: The last vars dict
:param line: The line string (without indentation)
:return: last_section_node
"""
name = line
name = name.replace(".", "")
node = Name(Name.Type.Section, l, c, name)
last_div_node.add_child(node)
last_section_node = node
# do not take previous var into account
last_vars.clear()
return last_section_node
def parse_pic_field(l, c, last_section_node, last_vars, line):
"""
Parse a pic field line. Return A VariableNode or None in case of malformed
code.
:param l: The line number (starting from 0)
:param c: The column number (starting from 0)
:param last_section_node: The last section node found
:param last_vars: The last vars dict
:param line: The line string (without indentation)
:return: The extracted variable node
"""
parent_node = None
raw_tokens = line.split(" ")
tokens = []
for t in raw_tokens:
if not t.isspace() and t != "":
tokens.append(t)
try:
if tokens[0].upper() == "FD":
lvl = 1
else:
lvl = int(tokens[0], 16)
name = tokens[1]
except ValueError:
return None
except IndexError:
# line not complete
return None
name = name.replace(".", "")
if name in ALL_KEYWORDS or name in ['-', '/']:
return None
m = re.findall(r'pic.*\.', line, re.IGNORECASE)
if m:
description = ' '.join([t for t in m[0].split(' ') if t])
else:
description = line
try:
index = description.lower().index('value')
except ValueError:
description = description.replace('.', '')
else:
description = description[index:].replace('value', '')[:80]
if lvl == int('78', 16):
lvl = 1
if lvl == 1:
parent_node = last_section_node
last_vars.clear()
else:
# find parent level
levels = sorted(last_vars.keys(), reverse=True)
for lv in levels:
if lv < lvl:
parent_node = last_vars[lv]
break
if not parent_node:
# malformed code
return None
# todo: enabled this with an option in pyqode 3.0
# if lvl == int('88', 16):
# return None
if not name or name.upper().strip() == 'PIC':
name = 'FILLER'
node = Name(Name.Type.Variable, l, c, name, description)
parent_node.add_child(node)
last_vars[lvl] = node
# remove closed variables
levels = sorted(last_vars.keys(), reverse=True)
for l in levels:
if l > lvl:
last_vars.pop(l)
return node
def parse_paragraph(l, c, last_div_node, last_section_node, line):
"""
Extracts a paragraph node
:param l: The line number (starting from 0)
:param last_div_node: The last div node found
:param last_section_node: The last section node found
:param line: The line string (without indentation)
:return: The extracted paragraph node
"""
if not line.endswith('.'):
return None
name = line.replace(".", "")
if name.strip() == '':
return None
if name.upper() in ALL_KEYWORDS:
return None
parent_node = last_div_node
if last_section_node is not None:
parent_node = last_section_node
node = Name(Name.Type.Paragraph, l, c, name)
parent_node.add_child(node)
return node
if __name__ == '__main__':
with open('/Users/Colin/test.cbl') as f:
root, variables, paragraphs = defined_names(f.read())
print(root)
|
pyQode/pyqode.cobol | pyqode/cobol/api/parsers/names.py | Name.find | python | def find(self, name):
for c in self.children:
if c.name == name:
return c
result = c.find(name)
if result:
return result | Finds a possible child whose name match the name parameter.
:param name: name of the child node to look up
:type name: str
:return: DocumentNode or None | train | https://github.com/pyQode/pyqode.cobol/blob/eedae4e320a4b2d0c44abb2c3061091321648fb7/pyqode/cobol/api/parsers/names.py#L55-L69 | null | class Name(object):
"""
A Name is a node in the simplified abstract syntax tree.
"""
class Type:
"""
Enumerates the possible name types (div, section, paragraph,...)
"""
Root = -1
Division = 0
Section = 1
Variable = 2
Paragraph = 3
def __init__(self, node_type, line, column, name, description=None):
self.node_type = node_type
self.line = line
self.column = column
self.end_line = -1
self.name = name
if description is None:
description = name
self.description = description.replace(".", "")
self.children = []
def add_child(self, child):
"""
Add a child to the node
:param child: The child node to add
"""
self.children.append(child)
child.parent = self
def __repr__(self):
type_names = {
self.Type.Root: "Root",
self.Type.Division: "Division",
self.Type.Paragraph: "Paragraph",
self.Type.Section: "Section",
self.Type.Variable: "Variable"
}
return "%s(name=%s, line=%s, end_line=%s)" % (
type_names[self.node_type], self.name, self.line, self.end_line)
def to_definition(self):
"""
Converts the name instance to a pyqode.core.share.Definition
"""
icon = {
Name.Type.Root: icons.ICON_MIMETYPE,
Name.Type.Division: icons.ICON_DIVISION,
Name.Type.Section: icons.ICON_SECTION,
Name.Type.Variable: icons.ICON_VAR,
Name.Type.Paragraph: icons.ICON_FUNC
}[self.node_type]
d = Definition(self.name, self.line, self.column, icon, self.description)
for ch in self.children:
d.add_child(ch.to_definition())
return d
|
pyQode/pyqode.cobol | pyqode/cobol/api/parsers/names.py | Name.to_definition | python | def to_definition(self):
icon = {
Name.Type.Root: icons.ICON_MIMETYPE,
Name.Type.Division: icons.ICON_DIVISION,
Name.Type.Section: icons.ICON_SECTION,
Name.Type.Variable: icons.ICON_VAR,
Name.Type.Paragraph: icons.ICON_FUNC
}[self.node_type]
d = Definition(self.name, self.line, self.column, icon, self.description)
for ch in self.children:
d.add_child(ch.to_definition())
return d | Converts the name instance to a pyqode.core.share.Definition | train | https://github.com/pyQode/pyqode.cobol/blob/eedae4e320a4b2d0c44abb2c3061091321648fb7/pyqode/cobol/api/parsers/names.py#L82-L96 | null | class Name(object):
"""
A Name is a node in the simplified abstract syntax tree.
"""
class Type:
"""
Enumerates the possible name types (div, section, paragraph,...)
"""
Root = -1
Division = 0
Section = 1
Variable = 2
Paragraph = 3
def __init__(self, node_type, line, column, name, description=None):
self.node_type = node_type
self.line = line
self.column = column
self.end_line = -1
self.name = name
if description is None:
description = name
self.description = description.replace(".", "")
self.children = []
def add_child(self, child):
"""
Add a child to the node
:param child: The child node to add
"""
self.children.append(child)
child.parent = self
def find(self, name):
"""
Finds a possible child whose name match the name parameter.
:param name: name of the child node to look up
:type name: str
:return: DocumentNode or None
"""
for c in self.children:
if c.name == name:
return c
result = c.find(name)
if result:
return result
def __repr__(self):
type_names = {
self.Type.Root: "Root",
self.Type.Division: "Division",
self.Type.Paragraph: "Paragraph",
self.Type.Section: "Section",
self.Type.Variable: "Variable"
}
return "%s(name=%s, line=%s, end_line=%s)" % (
type_names[self.node_type], self.name, self.line, self.end_line)
|
pyQode/pyqode.cobol | pyqode/cobol/api/parsers/pic.py | parse_cobol | python | def parse_cobol(lines):
output = []
intify = ["level", "occurs"]
# All in 1 line now, let's parse
for row in lines:
match = CobolPatterns.row_pattern.match(row.strip())
if not match:
_logger().warning("Found unmatched row %s" % row.strip())
continue
match = match.groupdict()
for i in intify:
match[i] = int(match[i]) if match[i] is not None else None
if match['pic'] is not None:
match['pic_info'] = parse_pic_string(match['pic'])
output.append(match)
return output | Parses the COBOL
- converts the COBOL line into a dictionary containing the information
- parses the pic information into type, length, precision
- ~~handles redefines~~ -> our implementation does not do that anymore
because we want to display item that was redefined. | train | https://github.com/pyQode/pyqode.cobol/blob/eedae4e320a4b2d0c44abb2c3061091321648fb7/pyqode/cobol/api/parsers/pic.py#L114-L143 | [
"def _logger():\n return logging.getLogger(__name__)\n",
"def parse_pic_string(pic_str):\n # Expand repeating chars\n while True:\n match = CobolPatterns.pic_pattern_repeats.search(pic_str)\n\n if not match:\n break\n\n expanded_str = match.group(1) * int(match.group(2))\n... | """
This module contains the Python-Cobol pic parser written by
Brian Peterson (https://github.com/bpeterso2000/pycobol ) and enhanced
by Paulus Schoutsen
(https://github.com/balloob/Python-COBOL/blob/master/cobol.py ), licensed
under the GPL v3.
We slightly modified this module to:
- add python3 support
- use the pyqode logger mechanism
- fix a few PEP8 issues.
"""
import logging
import re
def _logger():
return logging.getLogger(__name__)
class CobolPatterns:
opt_pattern_format = "({})?"
row_pattern_base = r'^(?P<level>\d{2})\s*(?P<name>\S+)'
row_pattern_redefines = r"\s+REDEFINES\s(?P<redefines>\S+)"
row_pattern_pic = r'\s+(COMP-\d\s+)?PIC\s+(?P<pic>\S+)'
row_pattern_occurs = r'\s+OCCURS (?P<occurs>\d+) TIMES'
row_pattern_indexed_by = r"\s+INDEXED BY\s(?P<indexed_by>\S+)"
row_pattern_end = r'\.$'
row_pattern = re.compile(
row_pattern_base +
opt_pattern_format.format(row_pattern_redefines) +
opt_pattern_format.format(row_pattern_pic) +
opt_pattern_format.format(row_pattern_occurs) +
opt_pattern_format.format(row_pattern_indexed_by) +
row_pattern_end, re.IGNORECASE
)
pic_pattern_repeats = re.compile(r'(.)\((\d+)\)')
pic_pattern_float = re.compile(r'S?[9Z]*[.V][9Z]+')
pic_pattern_integer = re.compile(r'S?[9Z]+')
# Parse the pic string
def parse_pic_string(pic_str):
# Expand repeating chars
while True:
match = CobolPatterns.pic_pattern_repeats.search(pic_str)
if not match:
break
expanded_str = match.group(1) * int(match.group(2))
pic_str = CobolPatterns.pic_pattern_repeats.sub(
expanded_str, pic_str, 1)
# Match to types
if CobolPatterns.pic_pattern_float.match(pic_str):
data_type = 'Float'
elif CobolPatterns.pic_pattern_integer.match(pic_str):
data_type = 'Integer'
else:
data_type = 'Char'
# Handle signed
if pic_str[0] == "S":
data_type = "Signed " + data_type
# Handle precision
decimal_pos = 0
if 'V' in pic_str:
decimal_pos = len(pic_str[pic_str.index('V') + 1:])
pic_str = pic_str.replace('V', '')
return {
'type': data_type,
'length': len(pic_str),
'precision': decimal_pos
}
# Cleans the COBOL by converting the cobol information to single lines
def clean_cobol(lines, free_format):
holder = []
output = []
for row in lines:
if not free_format:
row = row[6:72].rstrip()
if row == "" or row[0] in ('*', '/'):
continue
holder.append(row if len(holder) == 0 else row.strip())
if row[-1] == ".":
output.append(" ".join(holder))
holder = []
if len(holder) > 0:
_logger().warning(
"Probably invalid COBOL - found unfinished line: %s" %
" ".join(holder))
return output
# Helper function
# Gets all the lines that have a higher level then the parent_level until
# a line with equal or lower level then parent_level is encountered
def get_subgroup(parent_level, lines):
output = []
for row in lines:
if row["level"] > parent_level:
output.append(row)
else:
return output
return output
def denormalize_cobol(lines):
return handle_occurs(lines, 1)
# Helper function
# Will go ahead and denormalize the COBOL
# Beacuse the OCCURS are removed the INDEXED BY will also be removed
def handle_occurs(lines, occurs, level_diff=0, name_postfix=""):
output = []
for i in range(1, occurs + 1):
skip_till = 0
new_name_postfix = (name_postfix if occurs == 1 else
name_postfix + '-' + str(i))
for index, row in enumerate(lines):
if index < skip_till:
continue
new_row = row.copy()
new_row['level'] += level_diff
# Not needed when flattened
new_row['indexed_by'] = None
if row['occurs'] is None:
# First time occurs is just 1, we don't want to add _1 after
# *every* field
new_row['name'] = row['name'] + new_name_postfix
# + "-" + str(i) if occurs > 1 else row['name'] + name_postfix
output.append(new_row)
else:
if row["pic"] is not None:
# If it has occurs and pic just repeat the same line
# multiple times
new_row['occurs'] = None
for j in range(1, row["occurs"] + 1):
row_to_add = new_row.copy()
# First time occurs is just 1, we don't want to add
# 1 after *every* field
row_to_add["name"] = (row['name'] + new_name_postfix +
'-' + str(j))
# + "-" + str(i) + "-" + str(j) if occurs > 1 else
# row['name'] + name_postfix + "-" + str(j)
output.append(row_to_add)
else:
# Get all the lines that have to occur
occur_lines = get_subgroup(row['level'], lines[index + 1:])
# Calculate the new level difference that has to be applied
new_level_diff = level_diff + row['level'] - occur_lines[
0]['level']
output += handle_occurs(occur_lines, row['occurs'],
new_level_diff, new_name_postfix)
skip_till = index + len(occur_lines) + 1
return output
def clean_names(lines, ensure_unique_names=False, strip_prefix=False,
make_database_safe=False):
"""
Clean the names.
Options to:
- strip prefixes on names
- enforce unique names
- make database safe names by converting - to _
"""
names = {}
for row in lines:
if strip_prefix:
row['name'] = row['name'][row['name'].find('-') + 1:]
if row['indexed_by'] is not None:
row['indexed_by'] = row['indexed_by'][row['indexed_by'].find(
'-') + 1:]
if ensure_unique_names:
i = 1
while (row['name'] if i == 1 else
row['name'] + "-" + str(i)) in names:
i += 1
names[row['name'] if i == 1 else row['name'] + "-" + str(i)] = 1
if i > 1:
row['name'] = row['name'] + "-" + str(i)
if make_database_safe:
row['name'] = row['name'].replace("-", "_")
return lines
def process_cobol(lines, free_format):
return clean_names(denormalize_cobol(parse_cobol(clean_cobol(lines, free_format))),
ensure_unique_names=False, strip_prefix=False,
make_database_safe=False)
|
pyQode/pyqode.cobol | pyqode/cobol/api/parsers/pic.py | clean_names | python | def clean_names(lines, ensure_unique_names=False, strip_prefix=False,
make_database_safe=False):
names = {}
for row in lines:
if strip_prefix:
row['name'] = row['name'][row['name'].find('-') + 1:]
if row['indexed_by'] is not None:
row['indexed_by'] = row['indexed_by'][row['indexed_by'].find(
'-') + 1:]
if ensure_unique_names:
i = 1
while (row['name'] if i == 1 else
row['name'] + "-" + str(i)) in names:
i += 1
names[row['name'] if i == 1 else row['name'] + "-" + str(i)] = 1
if i > 1:
row['name'] = row['name'] + "-" + str(i)
if make_database_safe:
row['name'] = row['name'].replace("-", "_")
return lines | Clean the names.
Options to:
- strip prefixes on names
- enforce unique names
- make database safe names by converting - to _ | train | https://github.com/pyQode/pyqode.cobol/blob/eedae4e320a4b2d0c44abb2c3061091321648fb7/pyqode/cobol/api/parsers/pic.py#L230-L263 | null | """
This module contains the Python-Cobol pic parser written by
Brian Peterson (https://github.com/bpeterso2000/pycobol ) and enhanced
by Paulus Schoutsen
(https://github.com/balloob/Python-COBOL/blob/master/cobol.py ), licensed
under the GPL v3.
We slightly modified this module to:
- add python3 support
- use the pyqode logger mechanism
- fix a few PEP8 issues.
"""
import logging
import re
def _logger():
return logging.getLogger(__name__)
class CobolPatterns:
opt_pattern_format = "({})?"
row_pattern_base = r'^(?P<level>\d{2})\s*(?P<name>\S+)'
row_pattern_redefines = r"\s+REDEFINES\s(?P<redefines>\S+)"
row_pattern_pic = r'\s+(COMP-\d\s+)?PIC\s+(?P<pic>\S+)'
row_pattern_occurs = r'\s+OCCURS (?P<occurs>\d+) TIMES'
row_pattern_indexed_by = r"\s+INDEXED BY\s(?P<indexed_by>\S+)"
row_pattern_end = r'\.$'
row_pattern = re.compile(
row_pattern_base +
opt_pattern_format.format(row_pattern_redefines) +
opt_pattern_format.format(row_pattern_pic) +
opt_pattern_format.format(row_pattern_occurs) +
opt_pattern_format.format(row_pattern_indexed_by) +
row_pattern_end, re.IGNORECASE
)
pic_pattern_repeats = re.compile(r'(.)\((\d+)\)')
pic_pattern_float = re.compile(r'S?[9Z]*[.V][9Z]+')
pic_pattern_integer = re.compile(r'S?[9Z]+')
# Parse the pic string
def parse_pic_string(pic_str):
# Expand repeating chars
while True:
match = CobolPatterns.pic_pattern_repeats.search(pic_str)
if not match:
break
expanded_str = match.group(1) * int(match.group(2))
pic_str = CobolPatterns.pic_pattern_repeats.sub(
expanded_str, pic_str, 1)
# Match to types
if CobolPatterns.pic_pattern_float.match(pic_str):
data_type = 'Float'
elif CobolPatterns.pic_pattern_integer.match(pic_str):
data_type = 'Integer'
else:
data_type = 'Char'
# Handle signed
if pic_str[0] == "S":
data_type = "Signed " + data_type
# Handle precision
decimal_pos = 0
if 'V' in pic_str:
decimal_pos = len(pic_str[pic_str.index('V') + 1:])
pic_str = pic_str.replace('V', '')
return {
'type': data_type,
'length': len(pic_str),
'precision': decimal_pos
}
# Cleans the COBOL by converting the cobol information to single lines
def clean_cobol(lines, free_format):
holder = []
output = []
for row in lines:
if not free_format:
row = row[6:72].rstrip()
if row == "" or row[0] in ('*', '/'):
continue
holder.append(row if len(holder) == 0 else row.strip())
if row[-1] == ".":
output.append(" ".join(holder))
holder = []
if len(holder) > 0:
_logger().warning(
"Probably invalid COBOL - found unfinished line: %s" %
" ".join(holder))
return output
def parse_cobol(lines):
"""
Parses the COBOL
- converts the COBOL line into a dictionary containing the information
- parses the pic information into type, length, precision
- ~~handles redefines~~ -> our implementation does not do that anymore
because we want to display item that was redefined.
"""
output = []
intify = ["level", "occurs"]
# All in 1 line now, let's parse
for row in lines:
match = CobolPatterns.row_pattern.match(row.strip())
if not match:
_logger().warning("Found unmatched row %s" % row.strip())
continue
match = match.groupdict()
for i in intify:
match[i] = int(match[i]) if match[i] is not None else None
if match['pic'] is not None:
match['pic_info'] = parse_pic_string(match['pic'])
output.append(match)
return output
# Helper function
# Gets all the lines that have a higher level then the parent_level until
# a line with equal or lower level then parent_level is encountered
def get_subgroup(parent_level, lines):
output = []
for row in lines:
if row["level"] > parent_level:
output.append(row)
else:
return output
return output
def denormalize_cobol(lines):
return handle_occurs(lines, 1)
# Helper function
# Will go ahead and denormalize the COBOL
# Beacuse the OCCURS are removed the INDEXED BY will also be removed
def handle_occurs(lines, occurs, level_diff=0, name_postfix=""):
output = []
for i in range(1, occurs + 1):
skip_till = 0
new_name_postfix = (name_postfix if occurs == 1 else
name_postfix + '-' + str(i))
for index, row in enumerate(lines):
if index < skip_till:
continue
new_row = row.copy()
new_row['level'] += level_diff
# Not needed when flattened
new_row['indexed_by'] = None
if row['occurs'] is None:
# First time occurs is just 1, we don't want to add _1 after
# *every* field
new_row['name'] = row['name'] + new_name_postfix
# + "-" + str(i) if occurs > 1 else row['name'] + name_postfix
output.append(new_row)
else:
if row["pic"] is not None:
# If it has occurs and pic just repeat the same line
# multiple times
new_row['occurs'] = None
for j in range(1, row["occurs"] + 1):
row_to_add = new_row.copy()
# First time occurs is just 1, we don't want to add
# 1 after *every* field
row_to_add["name"] = (row['name'] + new_name_postfix +
'-' + str(j))
# + "-" + str(i) + "-" + str(j) if occurs > 1 else
# row['name'] + name_postfix + "-" + str(j)
output.append(row_to_add)
else:
# Get all the lines that have to occur
occur_lines = get_subgroup(row['level'], lines[index + 1:])
# Calculate the new level difference that has to be applied
new_level_diff = level_diff + row['level'] - occur_lines[
0]['level']
output += handle_occurs(occur_lines, row['occurs'],
new_level_diff, new_name_postfix)
skip_till = index + len(occur_lines) + 1
return output
def process_cobol(lines, free_format):
return clean_names(denormalize_cobol(parse_cobol(clean_cobol(lines, free_format))),
ensure_unique_names=False, strip_prefix=False,
make_database_safe=False)
|
pyQode/pyqode.cobol | pyqode/cobol/modes/margins.py | MarginsMode.on_state_changed | python | def on_state_changed(self, state):
if state:
self.editor.painted.connect(self._paint_margins)
self.editor.key_pressed.connect(self._on_key_pressed)
self.editor.repaint()
else:
self.editor.painted.disconnect(self._paint_margins)
self.editor.key_pressed.disconnect(self._on_key_pressed)
self.editor.repaint() | Connects/Disconnects to the painted event of the editor.
:param state: Enable state | train | https://github.com/pyQode/pyqode.cobol/blob/eedae4e320a4b2d0c44abb2c3061091321648fb7/pyqode/cobol/modes/margins.py#L63-L76 | null | class MarginsMode(Mode):
"""
Displays four margins at the specified columns.
"""
@property
def colors(self):
"""
Gets/sets the colors of the 4 margins
"""
return self._colors
@colors.setter
def colors(self, value):
self._colors = value
self._pens = [QtGui.QPen(c) for c in value]
TextHelper(self.editor).mark_whole_doc_dirty()
self.editor.repaint()
if self.editor:
for clone in self.editor.clones:
try:
clone.modes.get(self.__class__).colors = value
except KeyError:
# this should never happen since we're working with clones
pass
@property
def positions(self):
"""
Gets/sets the 4 position of the margins (tuple made up from 4
integers).
The positions are 0 based (use 0 for column 1,...).
"""
return self._positions
@positions.setter
def positions(self, value):
self._positions = value
if self.editor:
for clone in self.editor.clones:
try:
clone.modes.get(self.__class__).position = value
except KeyError:
# this should never happen since we're working with clones
pass
def __init__(self):
super(MarginsMode, self).__init__()
self._positions = [7, 11, 72, 79]
self._colors = [QtGui.QColor('red') for _ in range(4)]
self._pens = [QtGui.QPen(c) for c in self._colors]
def _paint_margins(self, event):
""" Paints the right margin after editor paint event. """
font = QtGui.QFont(self.editor.font_name, self.editor.font_size +
self.editor.zoom_level)
metrics = QtGui.QFontMetricsF(font)
painter = QtGui.QPainter(self.editor.viewport())
for pos, pen in zip(self._positions, self._pens):
if pos < 0:
# margin not visible
continue
offset = self.editor.contentOffset().x() + \
self.editor.document().documentMargin()
x_pos = round(metrics.width(' ') * pos) + offset
painter.setPen(pen)
painter.drawLine(x_pos, 0, x_pos, 2 ** 16)
def _on_key_pressed(self, event):
Qt = QtCore.Qt
modifier_match = int(event.modifiers()) == \
Qt.ControlModifier + Qt.AltModifier
if event.key() == Qt.Key_Left and modifier_match:
self._go_to_previous_margin()
event.accept()
elif event.key() == Qt.Key_Right and modifier_match:
self._go_to_next_margin()
event.accept()
def _go_to_next_margin(self):
tc = self.editor.textCursor()
max_column = len(tc.block().text())
current_column = TextHelper(self.editor).current_column_nbr()
next_column = max_column
for p in self._positions:
if p < 0:
continue
if p > current_column:
next_column = p
if next_column > max_column:
next_column = max_column
break
tc.movePosition(tc.Right, tc.MoveAnchor, next_column - current_column)
self.editor.setTextCursor(tc)
def _go_to_previous_margin(self):
tc = self.editor.textCursor()
min_column = 1
current_column = TextHelper(self.editor).current_column_nbr()
next_column = min_column
for p in reversed(self._positions):
if p < 0:
continue
if p < current_column:
next_column = p
if next_column < min_column:
next_column = min_column
break
tc.movePosition(tc.Left, tc.MoveAnchor, current_column - next_column)
self.editor.setTextCursor(tc)
def clone_settings(self, original):
self.colors = original.colors
self.positions = original.positions
|
pyQode/pyqode.cobol | pyqode/cobol/modes/margins.py | MarginsMode._paint_margins | python | def _paint_margins(self, event):
font = QtGui.QFont(self.editor.font_name, self.editor.font_size +
self.editor.zoom_level)
metrics = QtGui.QFontMetricsF(font)
painter = QtGui.QPainter(self.editor.viewport())
for pos, pen in zip(self._positions, self._pens):
if pos < 0:
# margin not visible
continue
offset = self.editor.contentOffset().x() + \
self.editor.document().documentMargin()
x_pos = round(metrics.width(' ') * pos) + offset
painter.setPen(pen)
painter.drawLine(x_pos, 0, x_pos, 2 ** 16) | Paints the right margin after editor paint event. | train | https://github.com/pyQode/pyqode.cobol/blob/eedae4e320a4b2d0c44abb2c3061091321648fb7/pyqode/cobol/modes/margins.py#L78-L92 | null | class MarginsMode(Mode):
"""
Displays four margins at the specified columns.
"""
@property
def colors(self):
"""
Gets/sets the colors of the 4 margins
"""
return self._colors
@colors.setter
def colors(self, value):
self._colors = value
self._pens = [QtGui.QPen(c) for c in value]
TextHelper(self.editor).mark_whole_doc_dirty()
self.editor.repaint()
if self.editor:
for clone in self.editor.clones:
try:
clone.modes.get(self.__class__).colors = value
except KeyError:
# this should never happen since we're working with clones
pass
@property
def positions(self):
"""
Gets/sets the 4 position of the margins (tuple made up from 4
integers).
The positions are 0 based (use 0 for column 1,...).
"""
return self._positions
@positions.setter
def positions(self, value):
self._positions = value
if self.editor:
for clone in self.editor.clones:
try:
clone.modes.get(self.__class__).position = value
except KeyError:
# this should never happen since we're working with clones
pass
def __init__(self):
super(MarginsMode, self).__init__()
self._positions = [7, 11, 72, 79]
self._colors = [QtGui.QColor('red') for _ in range(4)]
self._pens = [QtGui.QPen(c) for c in self._colors]
def on_state_changed(self, state):
"""
Connects/Disconnects to the painted event of the editor.
:param state: Enable state
"""
if state:
self.editor.painted.connect(self._paint_margins)
self.editor.key_pressed.connect(self._on_key_pressed)
self.editor.repaint()
else:
self.editor.painted.disconnect(self._paint_margins)
self.editor.key_pressed.disconnect(self._on_key_pressed)
self.editor.repaint()
def _on_key_pressed(self, event):
Qt = QtCore.Qt
modifier_match = int(event.modifiers()) == \
Qt.ControlModifier + Qt.AltModifier
if event.key() == Qt.Key_Left and modifier_match:
self._go_to_previous_margin()
event.accept()
elif event.key() == Qt.Key_Right and modifier_match:
self._go_to_next_margin()
event.accept()
def _go_to_next_margin(self):
tc = self.editor.textCursor()
max_column = len(tc.block().text())
current_column = TextHelper(self.editor).current_column_nbr()
next_column = max_column
for p in self._positions:
if p < 0:
continue
if p > current_column:
next_column = p
if next_column > max_column:
next_column = max_column
break
tc.movePosition(tc.Right, tc.MoveAnchor, next_column - current_column)
self.editor.setTextCursor(tc)
def _go_to_previous_margin(self):
tc = self.editor.textCursor()
min_column = 1
current_column = TextHelper(self.editor).current_column_nbr()
next_column = min_column
for p in reversed(self._positions):
if p < 0:
continue
if p < current_column:
next_column = p
if next_column < min_column:
next_column = min_column
break
tc.movePosition(tc.Left, tc.MoveAnchor, current_column - next_column)
self.editor.setTextCursor(tc)
def clone_settings(self, original):
self.colors = original.colors
self.positions = original.positions
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.