text stringlengths 0 1.05M | meta dict |
|---|---|
''' A collection of unit tests for the classes of xmlnode.py.
'''
import unittest
from collections import namedtuple
import xmlnode
class BufferedIterTests(unittest.TestCase):
''' Test cases for the BufferedIter class.
'''
def test_once(self):
''' Test that the BufferedIter can iterate an iterable once.
'''
buffered = xmlnode.BufferedIter(iter(range(5)))
self.assertEqual([0, 1, 2, 3, 4], list(buffered))
def test_twice(self):
''' Test that the BufferedIter can iterate an iterable twice. This
will only work if it correctly stores the iterated values.
'''
buffered = xmlnode.BufferedIter(iter(range(5)))
self.assertEqual([0, 1, 2, 3, 4], list(buffered))
self.assertEqual([0, 1, 2, 3, 4], list(buffered))
def test_index(self):
''' Test that the BufferedIter can be indexed.
'''
buffered = xmlnode.BufferedIter(iter(range(5)))
self.assertEqual(2, buffered[2])
def test_empty(self):
''' Test that the BufferedIter can handle empty iterables.
'''
buffered = xmlnode.BufferedIter(iter([]))
self.assertEqual([], list(buffered))
def test_infinite(self):
''' Test that the BufferedIter can handle infinite iterables.
'''
def infinite():
i = 0
while True:
yield i
i += 1
buffered = xmlnode.BufferedIter(iter(infinite()))
self.assertEqual(100, buffered[100])
def test_index_twice(self):
''' Test that the BufferedIter can be indexed twice.
'''
buffered = xmlnode.BufferedIter(iter(range(5)))
self.assertEqual(2, buffered[2])
self.assertEqual(3, buffered[3])
class XMLNodeCollectionTests(unittest.TestCase):
''' Test cases for the XMLNodeCollection class.
'''
XMLNode = namedtuple('XMLNode', ['tag'])
def test_iterable(self):
''' Test that the XMLNodeCollection iterates its nodes.
'''
nodes = [self.XMLNode(tag) for tag in range(5)]
collection = xmlnode.XMLNodeCollection(nodes)
self.assertEqual(nodes, list(collection))
def test_filter(self):
''' Test that the XMLNodeCollection can be filtered.
'''
nodes = [self.XMLNode(tag) for tag in ['in', 'out', 'in', 'out', 'in']]
collection = xmlnode.XMLNodeCollection(nodes)
filtered = collection['in']
self.assertEqual(nodes[::2], list(filtered))
def test_index(self):
''' Test that the XMLNodeCollection can be indexed.
'''
nodes = [self.XMLNode(tag) for tag in range(5)]
collection = xmlnode.XMLNodeCollection(nodes)
self.assertEqual(nodes[2], collection[2])
def test_index(self):
''' Test that a filtered XMLNodeCollection can be indexed.
'''
nodes = [self.XMLNode(tag) for tag in ['in', 'out', 'in', 'out', 'in']]
collection = xmlnode.XMLNodeCollection(nodes)
filtered = collection['in']
self.assertEqual(nodes[2], filtered[1])
class XMLNodeTests(unittest.TestCase):
''' Test cases for the XMLNode class.
'''
XMLNode = namedtuple('XMLNode', ['tag', 'text', 'children'])
def test_feed_all(self):
''' Test that the XMLNode parses correctly when fed all XML data at
once.
'''
feed = iter([b'<root><element>Element 1</element></root>'])
node = xmlnode.XMLNode.parse(feed)
expected = self.XMLNode('root', None, [
self.XMLNode('element', 'Element 1', [])
])
self.assertEqual(node, expected)
def test_feed_many(self):
''' Test that the XMLNode parses correctly when fed XML data over three
calls.
'''
feed = iter([b'<root><element>', b'Element 1</element>', b'</root>'])
node = xmlnode.XMLNode.parse(feed)
expected = self.XMLNode('root', None, [
self.XMLNode('element', 'Element 1', [])
])
self.assertEqual(node, expected)
def test_feed_xml_version(self):
''' Test that the XMLNode can parse an xml version definition.
'''
feed = iter([b'<?xml version="1.0" encoding="UTF-8"?>', b'<root><element>', b'Element 1</element>'])
node = xmlnode.XMLNode.parse(feed)
expected = self.XMLNode('root', None, [
self.XMLNode('element', 'Element 1', [])
])
self.assertEqual(node, expected)
def test_feed_parent_unclosed(self):
''' Test that the XMLNode can still iterate child nodes even if the
parent node isn't closed properly.
'''
feed = iter([b'<root><element>', b'Element 1</element>'])
node = xmlnode.XMLNode.parse(feed)
expected = self.XMLNode('root', None, [
self.XMLNode('element', 'Element 1', [])
])
self.assertEqual(node, expected)
def test_feed_invalid_xml(self):
''' Test that the XMLNode errors when given invalid XML.
'''
feed = iter([b'not valid<root><element>', b'Element 1</element>'])
node = xmlnode.XMLNode.parse(feed)
with self.assertRaises(Exception):
node.tag
def test_feed_raises_exception(self):
''' Test that the XMLNode does not error early when its underlying
iterable errors.
'''
def feed():
yield b'<root>'
raise Exception
node = xmlnode.XMLNode.parse(feed())
self.assertEqual('root', node.tag)
with self.assertRaises(Exception):
for child in node.children:
pass
def test_attributes_present(self):
''' Test that the XMLNode parses and stores attributes.
'''
feed = iter([b'<root key="value"/>'])
node = xmlnode.XMLNode.parse(feed)
self.assertEqual({'key': 'value'}, node.attributes)
if __name__ == '__main__':
unittest.main() | {
"repo_name": "foldr/helpers-py",
"path": "xmlnode_tests.py",
"copies": "1",
"size": "6206",
"license": "mit",
"hash": -1920977800921125000,
"line_mean": 27.5619047619,
"line_max": 108,
"alpha_frac": 0.5659039639,
"autogenerated": false,
"ratio": 4.268225584594223,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5334129548494223,
"avg_score": null,
"num_lines": null
} |
# A collection of useful functions and objects
# Creates a corpus in reverse: i.e. keys are features, values are a set of docs
def reverse_corpus(corpus):
reverse_corpus = {}
for doc,spectrum in corpus.items():
for f,_ in spectrum.items():
if not f in reverse_corpus:
reverse_corpus[f] = set()
reverse_corpus[f].add(doc)
return reverse_corpus
# Counts how many docs each feature appears in
def count_docs(reverse_corpus,corpus,min_percent = 0.0,max_percent = 100.0):
doc_counts = {}
n_docs = len(corpus)
for feature,docs in reverse_corpus.items():
doc_counts[feature] = (len(docs),(100.0*len(docs))/n_docs)
df = zip(doc_counts.keys(),doc_counts.values())
to_remove = []
if min_percent > 0.0:
df2 = filter(lambda x: x[1][1] < min_percent,df)
tr,_ = zip(*df2)
to_remove += tr
if max_percent < 100.0:
df2 = filter(lambda x: x[1][1] > max_percent,df)
tr,_ = zip(*df2)
to_remove += tr
to_remove = set(to_remove)
return doc_counts,to_remove
# remove a particular set of features from the corpus
def remove_features(corpus,to_remove):
for doc,spectrum in corpus.items():
features = set(spectrum.keys())
overlap = features.intersection(to_remove)
for f in overlap:
del spectrum[f]
return corpus
def bin_diff(diff,bin_width = 0.005):
import numpy as np
# return the name of the bin center for the mass given the specified bin width
offset = bin_width/2.0
diff += offset
bin_no = np.floor(diff / bin_width)
bin_lower = bin_no*bin_width
bin_upper = bin_lower + bin_width
bin_middle = (bin_lower + bin_upper)/2.0
bin_middle -= offset
return "{:.4f}".format(bin_middle)
def convert_corpus_to_counts(corpus):
n_files = len(corpus)
counts = {}
for file,spectra in corpus.items():
counts[file] = {}
for mol,spectrum in spectra.items():
for mz,intensity in spectrum.items():
if not mz in counts[file]:
counts[file][mz] = 1
else:
counts[file][mz] += 1
return counts
def make_count_matrix(counts):
feature_index = {}
sample_index = {}
import numpy as np
from scipy.sparse import coo_matrix
filepos = 0
featurepos = 0
spdata = []
file_list = sorted(counts.keys())
for file in file_list:
file_counts = counts[file]
sample_index[file] = filepos
filepos += 1
for feature,count in file_counts.items():
if not feature in feature_index:
feature_index[feature] = featurepos
featurepos += 1
spdata.append((sample_index[file],feature_index[feature],count))
i,j,k = zip(*spdata)
co = coo_matrix((k,(j,i))) # note j,i: rows are features
sample_list = ['' for s in sample_index]
for s,pos in sample_index.items():
sample_list[pos] = s
feature_list = ['' for s in feature_index]
for s,pos in feature_index.items():
feature_list[pos] = s
return np.array(co.todense()),sample_index,feature_index,sample_list,feature_list
| {
"repo_name": "sdrogers/ms2ldaviz",
"path": "lda/code/ms2lda_feature_extraction_utilities.py",
"copies": "2",
"size": "2826",
"license": "mit",
"hash": -1804940334618937000,
"line_mean": 28.7473684211,
"line_max": 82,
"alpha_frac": 0.6818825195,
"autogenerated": false,
"ratio": 2.869035532994924,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45509180524949244,
"avg_score": null,
"num_lines": null
} |
"""A collection of useful functions for LazySusan."""
import traceback
from functools import wraps
from lazysusan.plugins import CommandPlugin
def admin_required(function):
"""A command decorator that requires an admin to run.
Admin users are listed in lazysusan.ini under admin_ids, one per line.
If the sending user is not an admin, a private message will be returned to
them indicated such."""
@wraps(function)
def wrapper(cls, *args, **kwargs): # pylint: disable-msg=C0111
if isinstance(cls, CommandPlugin):
bot = cls.bot
else: # Support the built-in commands
bot = cls
user_id = get_sender_id(args[1])
# Verify the user is a moderator
if user_id not in bot.config['admin_ids']:
message = 'You must be an admin to execute that command.'
return bot.api.pm(message, user_id)
return function(cls, *args, **kwargs)
wrapper.func_dict['admin_required'] = True
return wrapper
def admin_or_moderator_required(function):
"""A command decorator that requires either an admin or a moderator to run.
If the sending user is neither an admin, nor a moderator, a private message
will be returned to them indicating such.
"""
@wraps(function)
def wrapper(cls, *args, **kwargs): # pylint: disable-msg=C0111
if isinstance(cls, CommandPlugin):
bot = cls.bot
else: # Support the built-in commands
bot = cls
user_id = get_sender_id(args[1])
# Verify the user is a moderator
if user_id not in bot.moderator_ids \
and user_id not in bot.config['admin_ids']:
message = ('You must be either an admin or a moderator to execute '
'that command.')
return bot.api.pm(message, user_id)
return function(cls, *args, **kwargs)
wrapper.func_dict['admin_or_moderator_required'] = True
return wrapper
def display_exceptions(function):
"""Expand the arguments to the functions."""
@wraps(function)
def wrapper(*args, **kwargs): # pylint: disable-msg=C0111
try:
return function(*args, **kwargs)
except: # Handle all exceptions -- pylint: disable-msg=W0702
traceback.print_exc()
return wrapper
def dynamic_permissions(admin=False, mod=False):
"""A command decorator generator whose permissions can be altered."""
def generator(function):
"""Return a generator that dynamically decorates the function."""
@wraps(function)
def wrapper(*args, **kwargs): # pylint:disable-msg=C0111
return dyn(*args, **kwargs)
dyn = DynamicPermissions(function, mod=mod, admin=admin)
wrapper.func_dict['dynamic_permissions'] = True
return wrapper
return generator
def get_sender_id(data):
"""Return the userid of the user from the message data."""
if data['command'] == 'speak':
return data['userid']
elif data['command'] == 'pmmed':
return data['senderid']
else:
raise Exception('Unrecognized command type `{0}`'
.format(data['command']))
def moderator_required(function):
"""A command decorator that requires a moderator to run.
This decorator should only be used on commands that explicitly require the
bot to have moderator privileges. Use the `admin_or_moderator` decorator if
you just want to control access to a specific command.
If the sending user is not a moderator, a private message will be returned
to them indicating such.
"""
@wraps(function)
def wrapper(cls, *args, **kwargs): # pylint: disable-msg=C0111
if isinstance(cls, CommandPlugin):
bot = cls.bot
else: # Support the built-in commands
bot = cls
user_id = get_sender_id(args[1])
# Verify the user is a moderator
if user_id not in bot.moderator_ids:
message = 'You must be a moderator to execute that command.'
return bot.api.pm(message, user_id)
return function(cls, *args, **kwargs)
wrapper.func_dict['moderator_required'] = True
return wrapper
def no_arg_command(function):
"""Indicate that the command does not have a message."""
@wraps(function)
def wrapper(cls, message, *args, **kwargs): # pylint: disable-msg=C0111
if message:
return
return function(cls, *args, **kwargs)
return wrapper
def single_arg_command(function):
"""Indicate that the command takes a message with a single argument."""
@wraps(function)
def wrapper(cls, *args, **kwargs): # pylint: disable-msg=C0111
if not args[0] or ' ' in args[0]: # Input will only contain spaces
return
return function(cls, *args, **kwargs)
return wrapper
class DynamicPermissions(object):
"""Responsible for altering dynamic_permission decorated functions."""
PERM_MAPPING = {(True, True): admin_or_moderator_required,
(True, False): admin_required,
(False, True): moderator_required}
decorated = {}
def __init__(self, function, admin, mod):
if admin or mod:
self.wrapped = self.PERM_MAPPING[(admin, mod)](function)
else:
self.wrapped = function
self.decorated[self.wrapped] = function
def __call__(self, *args, **kwargs):
return self.wrapped(*args, **kwargs)
| {
"repo_name": "bboe/LazySusan",
"path": "lazysusan/helpers.py",
"copies": "1",
"size": "5504",
"license": "bsd-2-clause",
"hash": -7857479673755309000,
"line_mean": 33.835443038,
"line_max": 79,
"alpha_frac": 0.6299055233,
"autogenerated": false,
"ratio": 4.153962264150944,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00009041591320072333,
"num_lines": 158
} |
"""A collection of useful functions related to paths, ST- and non-ST-related.
Also has some ST-specific file extensions as "constants".
"""
import os
import re
import inspect
from collections import namedtuple
import sublime
__all__ = (
"FTYPE_EXT_KEYMAP",
"FTYPE_EXT_COMPLETIONS",
"FTYPE_EXT_SNIPPET",
"FTYPE_EXT_BUILD",
"FTYPE_EXT_SETTINGS",
"FTYPE_EXT_TMPREFERENCES",
"FTYPE_EXT_TMLANGUAGE",
"root_at_packages",
"data_path",
"root_at_data",
"file_path_tuple",
"get_module_path",
"get_package_name"
)
FTYPE_EXT_KEYMAP = ".sublime-keymap"
FTYPE_EXT_COMPLETIONS = ".sublime-completions"
FTYPE_EXT_SNIPPET = ".sublime-snippet"
FTYPE_EXT_BUILD = ".sublime-build"
FTYPE_EXT_SETTINGS = ".sublime-settings"
FTYPE_EXT_TMPREFERENCES = ".tmPreferences"
FTYPE_EXT_TMLANGUAGE = ".tmLanguage"
def root_at_packages(*leafs):
"""Combine leafs with path to Sublime's Packages folder.
Requires the API to finish loading on ST3.
"""
# If we really need to, we dan extract the packages path from sys.path (ST3)
return os.path.join(sublime.packages_path(), *leafs)
def data_path():
"""Extract Sublime Text's data path from the packages path.
Requires the API to finish loading on ST3.
"""
return os.path.dirname(sublime.packages_path())
def root_at_data(*leafs):
"""Combine leafs with Sublime's ``Data`` folder.
Requires the API to finish loading on ST3.
"""
return os.path.join(data_path(), *leafs)
FilePath = namedtuple("FilePath", "file_path path file_name base_name ext no_ext")
def file_path_tuple(file_path):
"""Create a namedtuple with: file_path, path, file_name, base_name, ext, no_ext."""
path, file_name = os.path.split(file_path)
base_name, ext = os.path.splitext(file_name)
return FilePath(
file_path,
path,
file_name,
base_name,
ext,
no_ext=os.path.join(path, base_name)
)
def get_module_path(_file_=None):
"""Return a tuple with the normalized module path plus a boolean.
* _file_ (optional)
The value of `__file__` in your module.
If omitted, `get_caller_frame()` will be used instead which usually works.
Return: (normalized_module_path, archived)
`normalized_module_path`
What you usually refer to when using Sublime API, without `.sublime-package`
`archived`
True, when in an archive
"""
if _file_ is None:
_file_ = get_caller_frame().f_globals['__file__']
dir_name = os.path.dirname(os.path.abspath(_file_))
# Check if we are in an archived package
if int(sublime.version()) < 3000 or not dir_name.endswith(".sublime-package"):
return dir_name, False
# We are in a .sublime-package and need to normalize the path
virtual_path = re.sub(r"(?:Installed )?Packages([\\/][^\\/]+)\.sublime-package(?=[\\/]|$)",
r"Packages\1", dir_name)
return virtual_path, True
def get_package_path(_file_=None):
"""Get the path to the current Sublime Text package.
Parameters are the same as for `get_module_path`.
"""
if _file_ is None:
_file_ = get_caller_frame().f_globals['__file__']
mpath = get_module_path(_file_)[0]
# There probably is a better way for this, but it works
while not os.path.dirname(mpath).endswith('Packages'):
if len(mpath) <= 3:
return None
# We're not in a top-level plugin.
# If this was ST2 we could easily use sublime.packages_path(), but ...
mpath = os.path.dirname(mpath)
return mpath
def get_package_name(_file_=None):
"""`return os.path.split(get_package_path(_file_))[1]`."""
if _file_ is None:
_file_ = get_caller_frame().f_globals['__file__']
return os.path.split(get_package_path(_file_))[1]
def get_caller_frame(i=1):
"""Get the caller's frame (utilizing the inspect module).
You can adjust `i` to find the i-th caller, default is 1.
"""
# We can't use inspect.stack()[1 + i][1] for the file name because ST sets
# that to a different value when inside a zip archive.
return inspect.stack()[1 + i][0]
| {
"repo_name": "FichteFoll/CSScheme",
"path": "my_sublime_lib/path.py",
"copies": "1",
"size": "4229",
"license": "mit",
"hash": 4696471100354160000,
"line_mean": 28.3680555556,
"line_max": 95,
"alpha_frac": 0.6306455427,
"autogenerated": false,
"ratio": 3.372408293460925,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4503053836160925,
"avg_score": null,
"num_lines": null
} |
"""A collection of useful functions that didn't really belong anywhere else"""
from data_interrogator.interrogators import Allowable
from django.conf import settings
from django.apps import apps
from django.db.models import Model
from typing import Tuple, Union
import logging
logger = logging.getLogger(__name__)
logger.debug(f"Logging started for {__name__}")
def get_human_readable_model_name(model: Model) -> str:
"""Get the optimal model name from a model"""
if type(model) == str:
return model
name = f'{model._meta.app_label}:{model.__name__}'
if hasattr(settings, 'INTERROGATOR_NAME_OVERRIDES') and name in settings.INTERROGATOR_NAME_OVERRIDES:
return settings.INTERROGATOR_NAME_OVERRIDES[name]
elif hasattr(model, 'interrogator_name'):
return getattr(model, 'interrogator_name')
else:
return model._meta.verbose_name.title()
def append_to_group(app_group, app_model_pair) -> Tuple:
app_group = list(app_group)
app_group.append(app_model_pair)
return tuple(app_group)
def get_model_name(model: Union[str, Model]):
if type(model) != str:
return model.__name__
return model
def get_all_base_models(bases):
"""From a beginning list of base_models, produce all reportable models"""
all_models = {}
if bases in [Allowable.ALL_MODELS, Allowable.ALL_APPS]:
for app in apps.get_app_configs():
for model in app.models:
model_name = get_model_name(model)
human_readable_name = get_human_readable_model_name(model)
if app.verbose_name in all_models:
all_models[app.verbose_name] = append_to_group(
all_models[app.verbose_name],
tuple([f'{app.name}:{model_name}', human_readable_name])
)
else:
all_models[app.verbose_name] = (
(f"{app.name}:{model_name}", human_readable_name),
)
return list(all_models.items())
for base in bases:
if len(base) == 1:
# If base_model is a app_name
app_name = base[0]
app = apps.get_app_config(app_name)
for model in app.models:
# (database field, human readable name)
model_name = get_model_name(model)
human_readable_name = get_human_readable_model_name(model)
if app.verbose_name in all_models:
all_models[app.verbose_name] = append_to_group(
all_models[app.verbose_name],
tuple([f"{app_name}:{model_name}", human_readable_name])
)
else:
all_models[app.verbose_name] = (
(f"{app_name}:{model_name}", human_readable_name)
)
else:
# Base model is a (app_name, base_model) tuple
app_name, model = base[:2]
app = apps.get_app_config(app_name)
model = app.get_model(model)
model_name = get_model_name(model)
human_readable_name = get_human_readable_model_name(model)
if app.verbose_name in all_models:
all_models[app.verbose_name] = append_to_group(
all_models[app.verbose_name],
tuple([f"{app_name}:{model_name}", human_readable_name])
)
else:
all_models[app.verbose_name] = tuple(
[(f"{app_name}:{model_name}", human_readable_name)]
)
all_models = list(all_models.items())
return all_models
| {
"repo_name": "LegoStormtroopr/django-data-interrogator",
"path": "data_interrogator/utils.py",
"copies": "1",
"size": "3709",
"license": "mit",
"hash": 4714996468626158000,
"line_mean": 34.3238095238,
"line_max": 105,
"alpha_frac": 0.5610676732,
"autogenerated": false,
"ratio": 3.9042105263157896,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4965278199515789,
"avg_score": null,
"num_lines": null
} |
# a collection of useful helper functions for the processors
import re
import io
import csv
# derp.ext -> derp_ext_
def makeFilenameSafe(filename):
return re.sub("\.", "_", filename) + "_"
def clustersToStandardJSON(clusters, assignment, filename, helpers):
results = []
for cluster in clusters:
if cluster.hasCheating():
results.append(cluster.toJSON())
json = io.getJSONString(results, True)
helpers.writeToPostprocessed(json, assignment, filename)
# only use with a preprocessor or processor
def getPartner(student, assignment, semester, helpers):
if student != None:
partnerText = helpers.readFromAssignment(student, assignment, "partners.txt")
if partnerText != None:
partnerText = re.sub(",", "\n", partnerText)
partnerText = re.sub(":", "\n", partnerText)
partnerArray = partnerText.strip().split("\n")
for line in partnerArray:
line = line.strip().split(" ")[0]
if len(line) > 1 and line != student:
otherSemester = helpers.getSemester(line)
if otherSemester != None and otherSemester == semester:
return line
return None
# PairResults take up less space on disk than Corpus results.
# If used properly, can rebuild a corpus in RAM from these.
class PairResult:
def __init__(self, student1, student2, score):
self.pair = [student1, student2]
self.score = score
def toJSON(self):
result = {}
result["score"] = self.score
result["pair"] = self.pair
return result
class PairResults:
def __init__(self, assignment, filename, helpers):
self.filename = helpers.config.corpusPath + '__algae__/processed/' + assignment + '/' + filename
self.assignment = assignment
self.end = filename
self.handle = None
self.helpers = helpers
# write a line to disk
def add(self, pair):
if self.handle == None:
# create a blank file
self.helpers.writeToProcessed("", self.assignment, self.end)
self.handle = open(self.filename, "w+")
string = "{},{},{}\n".format(pair.pair[0], pair.pair[1], pair.score)
self.handle.write(string)
# closes the handle
# IF WE'VE CALLED ADD(), CALL THIS BEFORE ITERATE()
def finish(self):
if self.handle != None:
self.handle.close()
self.handle = None
# Generator that allows iteration through all results
def iterate(self):
handle = open(self.filename, "r")
line = handle.readline()
while line != "":
parts = line.strip().split(",")
# create the pair result
pair = PairResult(parts[0], parts[1], float(parts[2]))
yield pair
# get the next line
line = handle.readline()
# all done
handle.close()
# Sends data to JSON. Can use lots of RAM.
def toJSON(self):
# return JSON serialiazble form
results = []
for pair in self.iterate():
results.append(pair.toJSON())
return results
class Member:
def __init__(self, student, assignment, helpers):
self.student = student
self.semester = helpers.getSemester(student)
self.partner = getPartner(student, assignment, self.semester, helpers)
def toJSON(self):
result = {}
result["student"] = self.student
result["partner"] = self.partner
result["semester"] = self.semester
return result
class Cluster:
def __init__(self, allowPartners, filename, score):
self.members = []
self.allowPartners = allowPartners
self.file = filename
self.score = score
self.allowMistakes = True # allows (a,b),(b,) situations when true
def add(self, newMember):
for member in self.members:
if member.student == newMember.student:
# don't add more than once
return
self.members.append(newMember)
def hasCheating(self):
if len(self.members) < 2:
# can't have cheating without at least two people
return False
if len(self.members) == 2 and self.allowPartners:
member1 = self.members[0]
member2 = self.members[1]
if member1.partner == None or member2.partner == None:
# check for a mistake
if self.allowMistakes == True:
mistake = (member1.partner == None and member2.partner == member1.student) or (member2.partner == None and member1.partner == member2.student)
return not mistake
# assume both must have a partner
return True
# both must list eachother as partners
return (member1.student == member2.partner and member2.student == member1.partner) == False
# 3 or more is cheating
return True
def toJSON(self):
# return JSON serialiazble form
result = {}
result["allowPartners"] = self.allowPartners
result["file"] = self.file
result["score"] = self.score
result["members"] = []
for member in self.members:
result["members"].append(member.toJSON())
return result
# Groups pair clusters into larger connected components
def groupPairClusters(clusters, top):
groups = []
for cluster in clusters:
studentList = [cluster.members[0].student, cluster.members[1].student]
if cluster.members[0].partner != None:
studentList.append(cluster.members[0].partner)
if cluster.members[1].partner != None:
studentList.append(cluster.members[1].partner)
# collect the groups
foundMatch = False
for group in groups:
for member in group.members:
# look for a matching member
if member.student in studentList or member.partner in studentList:
# some shared member, group together
group.add(cluster.members[0])
group.add(cluster.members[1])
# Adjust the score appropriately
if top == True:
group.score = max(group.score, cluster.score)
else:
group.score = min(group.score, cluster.score)
# all done here
foundMatch = True
break
# stop looking for groups if we found one
if foundMatch == True:
break
# add the cluster as its own group if need be
if foundMatch == False:
groups.append(cluster)
# all done
return groups
| {
"repo_name": "JonathanPierce/Algae",
"path": "helpers/common.py",
"copies": "1",
"size": "5735",
"license": "mit",
"hash": 5968645274133617000,
"line_mean": 27.2512315271,
"line_max": 147,
"alpha_frac": 0.6915431561,
"autogenerated": false,
"ratio": 3.3265661252900234,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45181092813900237,
"avg_score": null,
"num_lines": null
} |
"""A Collection of useful miscellaneous functions.
misc.py:
Collection of useful miscellaneous functions.
:Author: Hannes Breytenbach (hannes@saao.ac.za)
"""
from __future__ import absolute_import, division, print_function
import collections
import itertools
import operator
from ...extern.six.moves import zip, map, filter
def first_true_index(iterable, pred=None, default=None):
"""find the first index position for the which the callable pred returns True"""
if pred is None:
func = operator.itemgetter(1)
else:
func = lambda x: pred(x[1])
ii = next(filter(func, enumerate(iterable)), default) # either index-item pair or default
return ii[0] if ii else default
def first_false_index(iterable, pred=None, default=None):
"""find the first index position for the which the callable pred returns False"""
if pred is None:
func = operator.not_
else:
func = lambda x: not pred(x)
return first_true_index(iterable, func, default)
def sortmore(*args, **kw):
"""
Sorts any number of lists according to:
optionally given item sorting key function(s) and/or a global sorting key function.
Parameters
----------
One or more lists
Keywords
--------
globalkey : None
revert to sorting by key function
globalkey : callable
Sort by evaluated value for all items in the lists
(call signature of this function needs to be such that it accepts an
argument tuple of items from each list.
eg.: globalkey = lambda *l: sum(l) will order all the lists by the
sum of the items from each list
if key: None
sorting done by value of first input list
(in this case the objects in the first iterable need the comparison
methods __lt__ etc...)
if key: callable
sorting done by value of key(item) for items in first iterable
if key: tuple
sorting done by value of (key(item_0), ..., key(item_n)) for items in
the first n iterables (where n is the length of the key tuple)
i.e. the first callable is the primary sorting criterion, and the
rest act as tie-breakers.
Returns
-------
Sorted lists
Examples
--------
Capture sorting indeces:
l = list('CharacterS')
In [1]: sortmore( l, range(len(l)) )
Out[1]: (['C', 'S', 'a', 'a', 'c', 'e', 'h', 'r', 'r', 't'],
[0, 9, 2, 4, 5, 7, 1, 3, 8, 6])
In [2]: sortmore( l, range(len(l)), key=str.lower )
Out[2]: (['a', 'a', 'C', 'c', 'e', 'h', 'r', 'r', 'S', 't'],
[2, 4, 0, 5, 7, 1, 3, 8, 9, 6])
"""
first = list(args[0])
if not len(first):
return args
globalkey = kw.get('globalkey')
key = kw.get('key')
if key is None:
if globalkey:
# if global sort function given and no local (secondary) key given, ==> no tiebreakers
key = lambda x: 0
else:
key = lambda x: x # if no global sort and no local sort keys given, sort by item values
if globalkey is None:
globalkey = lambda *x: 0
if not isinstance(globalkey, collections.Callable):
raise ValueError('globalkey needs to be callable')
if isinstance(key, collections.Callable):
k = lambda x: (globalkey(*x), key(x[0]))
elif isinstance(key, tuple):
key = (k if k else lambda x: 0 for k in key)
k = lambda x: (globalkey(*x),) + tuple(f(z) for (f, z) in zip(key, x))
else:
raise KeyError(
"kw arg 'key' should be None, callable, or a sequence of callables, not {}"
.format(type(key)))
res = sorted(list(zip(*args)), key=k)
if 'order' in kw:
if kw['order'].startswith(('descend', 'reverse')):
res = reversed(res)
return tuple(map(list, zip(*res)))
def groupmore(func=None, *its):
"""Extends the itertools.groupby functionality to arbitrary number of iterators."""
if not func:
func = lambda x: x
its = sortmore(*its, key=func)
nfunc = lambda x: func(x[0])
zipper = itertools.groupby(zip(*its), nfunc)
unzipper = ((key, zip(*groups)) for key, groups in zipper)
return unzipper
| {
"repo_name": "joergdietrich/astropy",
"path": "astropy/io/ascii/misc.py",
"copies": "4",
"size": "4232",
"license": "bsd-3-clause",
"hash": 3542416152728907000,
"line_mean": 31.8062015504,
"line_max": 100,
"alpha_frac": 0.6034971645,
"autogenerated": false,
"ratio": 3.72863436123348,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.633213152573348,
"avg_score": null,
"num_lines": null
} |
"""A Collection of useful miscellaneous functions.
misc.py:
Collection of useful miscellaneous functions.
:Author: Hannes Breytenbach (hannes@saao.ac.za)
"""
import collections.abc
import itertools
import operator
def first_true_index(iterable, pred=None, default=None):
"""find the first index position for the which the callable pred returns True"""
if pred is None:
func = operator.itemgetter(1)
else:
func = lambda x: pred(x[1])
ii = next(filter(func, enumerate(iterable)), default) # either index-item pair or default
return ii[0] if ii else default
def first_false_index(iterable, pred=None, default=None):
"""find the first index position for the which the callable pred returns False"""
if pred is None:
func = operator.not_
else:
func = lambda x: not pred(x)
return first_true_index(iterable, func, default)
def sortmore(*args, **kw):
"""
Sorts any number of lists according to:
optionally given item sorting key function(s) and/or a global sorting key function.
Parameters
----------
One or more lists
Keywords
--------
globalkey : None
revert to sorting by key function
globalkey : callable
Sort by evaluated value for all items in the lists
(call signature of this function needs to be such that it accepts an
argument tuple of items from each list.
eg.: ``globalkey = lambda *l: sum(l)`` will order all the lists by the
sum of the items from each list
if key: None
sorting done by value of first input list
(in this case the objects in the first iterable need the comparison
methods __lt__ etc...)
if key: callable
sorting done by value of key(item) for items in first iterable
if key: tuple
sorting done by value of (key(item_0), ..., key(item_n)) for items in
the first n iterables (where n is the length of the key tuple)
i.e. the first callable is the primary sorting criterion, and the
rest act as tie-breakers.
Returns
-------
Sorted lists
Examples
--------
Capture sorting indices::
l = list('CharacterS')
In [1]: sortmore( l, range(len(l)) )
Out[1]: (['C', 'S', 'a', 'a', 'c', 'e', 'h', 'r', 'r', 't'],
[0, 9, 2, 4, 5, 7, 1, 3, 8, 6])
In [2]: sortmore( l, range(len(l)), key=str.lower )
Out[2]: (['a', 'a', 'C', 'c', 'e', 'h', 'r', 'r', 'S', 't'],
[2, 4, 0, 5, 7, 1, 3, 8, 9, 6])
"""
first = list(args[0])
if not len(first):
return args
globalkey = kw.get('globalkey')
key = kw.get('key')
if key is None:
if globalkey:
# if global sort function given and no local (secondary) key given, ==> no tiebreakers
key = lambda x: 0
else:
key = lambda x: x # if no global sort and no local sort keys given, sort by item values
if globalkey is None:
globalkey = lambda *x: 0
if not isinstance(globalkey, collections.abc.Callable):
raise ValueError('globalkey needs to be callable')
if isinstance(key, collections.abc.Callable):
k = lambda x: (globalkey(*x), key(x[0]))
elif isinstance(key, tuple):
key = (k if k else lambda x: 0 for k in key)
k = lambda x: (globalkey(*x),) + tuple(f(z) for (f, z) in zip(key, x))
else:
raise KeyError(
"kw arg 'key' should be None, callable, or a sequence of callables, not {}"
.format(type(key)))
res = sorted(list(zip(*args)), key=k)
if 'order' in kw:
if kw['order'].startswith(('descend', 'reverse')):
res = reversed(res)
return tuple(map(list, zip(*res)))
def groupmore(func=None, *its):
"""Extends the itertools.groupby functionality to arbitrary number of iterators."""
if not func:
func = lambda x: x
its = sortmore(*its, key=func)
nfunc = lambda x: func(x[0])
zipper = itertools.groupby(zip(*its), nfunc)
unzipper = ((key, zip(*groups)) for key, groups in zipper)
return unzipper
| {
"repo_name": "pllim/astropy",
"path": "astropy/io/ascii/misc.py",
"copies": "8",
"size": "4135",
"license": "bsd-3-clause",
"hash": -8283620445521436000,
"line_mean": 31.5590551181,
"line_max": 100,
"alpha_frac": 0.5987908102,
"autogenerated": false,
"ratio": 3.725225225225225,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8324016035425226,
"avg_score": null,
"num_lines": null
} |
"""A Collection of useful miscellaneous functions.
misc.py:
Collection of useful miscellaneous functions.
:Author: Hannes Breytenbach (hannes@saao.ac.za)
"""
import collections
import itertools
import operator
def first_true_index(iterable, pred=None, default=None):
"""find the first index position for the which the callable pred returns True"""
if pred is None:
func = operator.itemgetter(1)
else:
func = lambda x: pred(x[1])
ii = next(filter(func, enumerate(iterable)), default) # either index-item pair or default
return ii[0] if ii else default
def first_false_index(iterable, pred=None, default=None):
"""find the first index position for the which the callable pred returns False"""
if pred is None:
func = operator.not_
else:
func = lambda x: not pred(x)
return first_true_index(iterable, func, default)
def sortmore(*args, **kw):
"""
Sorts any number of lists according to:
optionally given item sorting key function(s) and/or a global sorting key function.
Parameters
----------
One or more lists
Keywords
--------
globalkey : None
revert to sorting by key function
globalkey : callable
Sort by evaluated value for all items in the lists
(call signature of this function needs to be such that it accepts an
argument tuple of items from each list.
eg.: globalkey = lambda *l: sum(l) will order all the lists by the
sum of the items from each list
if key: None
sorting done by value of first input list
(in this case the objects in the first iterable need the comparison
methods __lt__ etc...)
if key: callable
sorting done by value of key(item) for items in first iterable
if key: tuple
sorting done by value of (key(item_0), ..., key(item_n)) for items in
the first n iterables (where n is the length of the key tuple)
i.e. the first callable is the primary sorting criterion, and the
rest act as tie-breakers.
Returns
-------
Sorted lists
Examples
--------
Capture sorting indeces:
l = list('CharacterS')
In [1]: sortmore( l, range(len(l)) )
Out[1]: (['C', 'S', 'a', 'a', 'c', 'e', 'h', 'r', 'r', 't'],
[0, 9, 2, 4, 5, 7, 1, 3, 8, 6])
In [2]: sortmore( l, range(len(l)), key=str.lower )
Out[2]: (['a', 'a', 'C', 'c', 'e', 'h', 'r', 'r', 'S', 't'],
[2, 4, 0, 5, 7, 1, 3, 8, 9, 6])
"""
first = list(args[0])
if not len(first):
return args
globalkey = kw.get('globalkey')
key = kw.get('key')
if key is None:
if globalkey:
# if global sort function given and no local (secondary) key given, ==> no tiebreakers
key = lambda x: 0
else:
key = lambda x: x # if no global sort and no local sort keys given, sort by item values
if globalkey is None:
globalkey = lambda *x: 0
if not isinstance(globalkey, collections.Callable):
raise ValueError('globalkey needs to be callable')
if isinstance(key, collections.Callable):
k = lambda x: (globalkey(*x), key(x[0]))
elif isinstance(key, tuple):
key = (k if k else lambda x: 0 for k in key)
k = lambda x: (globalkey(*x),) + tuple(f(z) for (f, z) in zip(key, x))
else:
raise KeyError(
"kw arg 'key' should be None, callable, or a sequence of callables, not {}"
.format(type(key)))
res = sorted(list(zip(*args)), key=k)
if 'order' in kw:
if kw['order'].startswith(('descend', 'reverse')):
res = reversed(res)
return tuple(map(list, zip(*res)))
def groupmore(func=None, *its):
"""Extends the itertools.groupby functionality to arbitrary number of iterators."""
if not func:
func = lambda x: x
its = sortmore(*its, key=func)
nfunc = lambda x: func(x[0])
zipper = itertools.groupby(zip(*its), nfunc)
unzipper = ((key, zip(*groups)) for key, groups in zipper)
return unzipper
| {
"repo_name": "funbaker/astropy",
"path": "astropy/io/ascii/misc.py",
"copies": "2",
"size": "4118",
"license": "bsd-3-clause",
"hash": 5931937301773830000,
"line_mean": 31.4251968504,
"line_max": 100,
"alpha_frac": 0.599077222,
"autogenerated": false,
"ratio": 3.7300724637681157,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5329149685768115,
"avg_score": null,
"num_lines": null
} |
"""A collection of utilities for dealing with Molecular Fragments"""
import itertools
import numpy as np
from typing import Any, List, Iterable, Optional, Sequence, Set, Tuple, Union
from deepchem.utils.typing import RDKitAtom, RDKitMol
from deepchem.utils.geometry_utils import compute_pairwise_distances
from deepchem.utils.rdkit_utils import compute_charges
class AtomShim(object):
"""This is a shim object wrapping an atom.
We use this class instead of raw RDKit atoms since manipulating a
large number of rdkit Atoms seems to result in segfaults. Wrapping
the basic information in an AtomShim seems to avoid issues.
"""
def __init__(self, atomic_num: int, partial_charge: float,
atom_coords: np.ndarray):
"""Initialize this object
Parameters
----------
atomic_num: int
Atomic number for this atom.
partial_charge: float
The partial Gasteiger charge for this atom
atom_coords: np.ndarray
Of shape (3,) with the coordinates of this atom
"""
self.atomic_num = atomic_num
self.partial_charge = partial_charge
self.coords = atom_coords
def GetAtomicNum(self) -> int:
"""Returns atomic number for this atom.
Returns
-------
int
Atomic number for this atom.
"""
return self.atomic_num
def GetPartialCharge(self) -> float:
"""Returns partial charge for this atom.
Returns
-------
float
A partial Gasteiger charge for this atom.
"""
return self.partial_charge
def GetCoords(self) -> np.ndarray:
"""Returns 3D coordinates for this atom as numpy array.
Returns
-------
np.ndarray
Numpy array of shape `(3,)` with coordinates for this atom.
"""
return self.coords
class MolecularFragment(object):
"""A class that represents a fragment of a molecule.
It's often convenient to represent a fragment of a molecule. For
example, if two molecules form a molecular complex, it may be useful
to create two fragments which represent the subsets of each molecule
that's close to the other molecule (in the contact region).
Ideally, we'd be able to do this in RDKit direct, but manipulating
molecular fragments doesn't seem to be supported functionality.
Examples
--------
>>> import numpy as np
>>> from rdkit import Chem
>>> mol = Chem.MolFromSmiles("C")
>>> coords = np.array([[0.0, 0.0, 0.0]])
>>> atom = mol.GetAtoms()[0]
>>> fragment = MolecularFragment([atom], coords)
"""
def __init__(self, atoms: Sequence[RDKitAtom], coords: np.ndarray):
"""Initialize this object.
Parameters
----------
atoms: Iterable[RDKit Atom]
Each entry in this list should be a RDKit Atom.
coords: np.ndarray
Array of locations for atoms of shape `(N, 3)` where `N ==
len(atoms)`.
"""
if not isinstance(coords, np.ndarray):
raise ValueError("Coords must be a numpy array of shape (N, 3)")
if coords.shape != (len(atoms), 3):
raise ValueError(
"Coords must be a numpy array of shape `(N, 3)` where `N == len(atoms)`."
)
self.atoms = [
AtomShim(x.GetAtomicNum(), get_partial_charge(x), coords[ind])
for ind, x in enumerate(atoms)
]
self.coords = coords
def GetAtoms(self) -> List[AtomShim]:
"""Returns the list of atoms
Returns
-------
List[AtomShim]
list of atoms in this fragment.
"""
return self.atoms
def GetCoords(self) -> np.ndarray:
"""Returns 3D coordinates for this fragment as numpy array.
Returns
-------
np.ndarray
A numpy array of shape `(N, 3)` with coordinates for this fragment.
Here, N is the number of atoms.
"""
return self.coords
def get_partial_charge(atom: Union[RDKitAtom, AtomShim]) -> float:
"""Get partial charge of a given atom (rdkit Atom object)
Parameters
----------
atom: RDKit Atom or AtomShim
Either a rdkit.Atom object or `AtomShim`
Returns
-------
float
A partial Gasteiger charge of a given atom.
Note
----
This function requires RDKit to be installed.
Examples
--------
>>> from rdkit import Chem
>>> mol = Chem.MolFromSmiles("CC")
>>> atom = mol.GetAtoms()[0]
>>> get_partial_charge(atom)
0.0
"""
try:
from rdkit import Chem
except ModuleNotFoundError:
raise ValueError("This function requires RDKit to be installed.")
if isinstance(atom, Chem.Atom):
try:
value = atom.GetProp(str("_GasteigerCharge"))
if value == '-nan':
return 0.0
return float(value)
except KeyError:
return 0.0
else:
return atom.GetPartialCharge()
def merge_molecular_fragments(
molecules: List[MolecularFragment]) -> Optional[MolecularFragment]:
"""Helper method to merge two molecular fragments.
Parameters
----------
molecules: List[MolecularFragment]
List of `MolecularFragment` objects.
Returns
-------
Optional[MolecularFragment]
Returns a merged `MolecularFragment`
"""
if len(molecules) == 0:
return None
if len(molecules) == 1:
return molecules[0]
else:
all_atoms = []
all_coords = []
for mol_frag in molecules:
all_atoms += mol_frag.GetAtoms()
all_coords.append(mol_frag.GetCoords())
all_coords = np.concatenate(all_coords)
return MolecularFragment(all_atoms, all_coords)
def get_mol_subset(
coords: np.ndarray, mol: Union[RDKitMol, MolecularFragment],
atom_indices_to_keep: List[int]) -> Tuple[np.ndarray, MolecularFragment]:
"""Strip a subset of the atoms in this molecule
Parameters
----------
coords: np.ndarray
Must be of shape (N, 3) and correspond to coordinates of mol.
mol: RDKit Mol or MolecularFragment
The molecule to strip
atom_indices_to_keep: list
List of the indices of the atoms to keep. Each index is a unique
number between `[0, N)`.
Returns
-------
Tuple[np.ndarray, MolecularFragment]
A tuple of `(coords, mol_frag)` where `coords` is a numpy array of
coordinates with hydrogen coordinates. `mol_frag` is a `MolecularFragment`.
Note
----
This function requires RDKit to be installed.
"""
try:
from rdkit import Chem
except ModuleNotFoundError:
raise ValueError("This function requires RDKit to be installed.")
indexes_to_keep = []
atoms_to_keep = []
# Compute partial charges on molecule if RDKit Mol
if isinstance(mol, Chem.Mol):
compute_charges(mol)
atoms = list(mol.GetAtoms())
for index in atom_indices_to_keep:
indexes_to_keep.append(index)
atoms_to_keep.append(atoms[index])
coords = coords[indexes_to_keep]
mol_frag = MolecularFragment(atoms_to_keep, coords)
return coords, mol_frag
def strip_hydrogens(coords: np.ndarray, mol: Union[RDKitMol, MolecularFragment]
) -> Tuple[np.ndarray, MolecularFragment]:
"""Strip the hydrogens from input molecule
Parameters
----------
coords: np.ndarray
The coords must be of shape (N, 3) and correspond to coordinates of mol.
mol: RDKit Mol or MolecularFragment
The molecule to strip
Returns
-------
Tuple[np.ndarray, MolecularFragment]
A tuple of `(coords, mol_frag)` where `coords` is a numpy array of
coordinates with hydrogen coordinates. `mol_frag` is a `MolecularFragment`.
Note
----
This function requires RDKit to be installed.
"""
mol_atoms = mol.GetAtoms()
atomic_numbers = [atom.GetAtomicNum() for atom in mol_atoms]
atom_indices_to_keep = [
ind for (ind, atomic_number) in enumerate(atomic_numbers)
if (atomic_number != 1)
]
return get_mol_subset(coords, mol, atom_indices_to_keep)
def get_contact_atom_indices(fragments: List[Tuple[np.ndarray, RDKitMol]],
cutoff: float = 4.5) -> List[List[int]]:
"""Compute that atoms close to contact region.
Molecular complexes can get very large. This can make it unwieldy to
compute functions on them. To improve memory usage, it can be very
useful to trim out atoms that aren't close to contact regions. This
function computes pairwise distances between all pairs of molecules
in the molecular complex. If an atom is within cutoff distance of
any atom on another molecule in the complex, it is regarded as a
contact atom. Otherwise it is trimmed.
Parameters
----------
fragments: List[Tuple[np.ndarray, RDKit Mol]]
As returned by `rdkit_utils.load_complex`, a list of tuples of
`(coords, mol)` where `coords` is a `(N_atoms, 3)` array and `mol`
is the rdkit molecule object.
cutoff: float, optional (default 4.5)
The cutoff distance in angstroms.
Returns
-------
List[List[int]]
A list of length `len(molecular_complex)`. Each entry in this list
is a list of atom indices from that molecule which should be kept, in
sorted order.
"""
# indices to atoms to keep
keep_inds: List[Set[int]] = [set([]) for _ in fragments]
for (ind1, ind2) in itertools.combinations(range(len(fragments)), 2):
frag1, frag2 = fragments[ind1], fragments[ind2]
pairwise_distances = compute_pairwise_distances(frag1[0], frag2[0])
# contacts is of form (x_coords, y_coords), a tuple of 2 lists
contacts = np.nonzero((pairwise_distances < cutoff))
# contacts[0] is the x_coords, that is the frag1 atoms that have
# nonzero contact.
frag1_atoms = set([int(c) for c in contacts[0].tolist()])
# contacts[1] is the y_coords, the frag2 atoms with nonzero contacts
frag2_atoms = set([int(c) for c in contacts[1].tolist()])
keep_inds[ind1] = keep_inds[ind1].union(frag1_atoms)
keep_inds[ind2] = keep_inds[ind2].union(frag2_atoms)
sorted_keep_inds = [sorted(list(keep)) for keep in keep_inds]
return sorted_keep_inds
def reduce_molecular_complex_to_contacts(
fragments: List[Tuple[np.ndarray, RDKitMol]],
cutoff: float = 4.5) -> List[Tuple[np.ndarray, MolecularFragment]]:
"""Reduce a molecular complex to only those atoms near a contact.
Molecular complexes can get very large. This can make it unwieldy to
compute functions on them. To improve memory usage, it can be very
useful to trim out atoms that aren't close to contact regions. This
function takes in a molecular complex and returns a new molecular
complex representation that contains only contact atoms. The contact
atoms are computed by calling `get_contact_atom_indices` under the
hood.
Parameters
----------
fragments: List[Tuple[np.ndarray, RDKit Mol]]
As returned by `rdkit_utils.load_complex`, a list of tuples of
`(coords, mol)` where `coords` is a `(N_atoms, 3)` array and `mol`
is the rdkit molecule object.
cutoff: float
The cutoff distance in angstroms.
Returns
-------
List[Tuple[np.ndarray, MolecularFragment]]
A list of length `len(molecular_complex)`. Each entry in this list
is a tuple of `(coords, MolecularFragment)`. The coords is stripped
down to `(N_contact_atoms, 3)` where `N_contact_atoms` is the number
of contact atoms for this complex. `MolecularFragment` is used since
it's tricky to make a RDKit sub-molecule.
"""
atoms_to_keep = get_contact_atom_indices(fragments, cutoff)
reduced_complex = []
for frag, keep in zip(fragments, atoms_to_keep):
contact_frag = get_mol_subset(frag[0], frag[1], keep)
reduced_complex.append(contact_frag)
return reduced_complex
| {
"repo_name": "miaecle/deepchem",
"path": "deepchem/utils/fragment_utils.py",
"copies": "1",
"size": "11354",
"license": "mit",
"hash": 5449972734824394000,
"line_mean": 30.6267409471,
"line_max": 83,
"alpha_frac": 0.6758851506,
"autogenerated": false,
"ratio": 3.7031963470319633,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9850621379357323,
"avg_score": 0.005692023654928136,
"num_lines": 359
} |
"""A collection of utilities for dealing with Molecular Fragments"""
import itertools
import numpy as np
from typing import List, Optional, Sequence, Set, Tuple, Union
import logging
from deepchem.utils.typing import RDKitAtom, RDKitMol
from deepchem.utils.geometry_utils import compute_pairwise_distances
logger = logging.getLogger(__name__)
class MoleculeLoadException(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(*args, **kwargs)
class AtomShim(object):
"""This is a shim object wrapping an atom.
We use this class instead of raw RDKit atoms since manipulating a
large number of rdkit Atoms seems to result in segfaults. Wrapping
the basic information in an AtomShim seems to avoid issues.
"""
def __init__(self, atomic_num: int, partial_charge: float,
atom_coords: np.ndarray):
"""Initialize this object
Parameters
----------
atomic_num: int
Atomic number for this atom.
partial_charge: float
The partial Gasteiger charge for this atom
atom_coords: np.ndarray
Of shape (3,) with the coordinates of this atom
"""
self.atomic_num = atomic_num
self.partial_charge = partial_charge
self.coords = atom_coords
def GetAtomicNum(self) -> int:
"""Returns atomic number for this atom.
Returns
-------
int
Atomic number for this atom.
"""
return self.atomic_num
def GetPartialCharge(self) -> float:
"""Returns partial charge for this atom.
Returns
-------
float
A partial Gasteiger charge for this atom.
"""
return self.partial_charge
def GetCoords(self) -> np.ndarray:
"""Returns 3D coordinates for this atom as numpy array.
Returns
-------
np.ndarray
Numpy array of shape `(3,)` with coordinates for this atom.
"""
return self.coords
class MolecularFragment(object):
"""A class that represents a fragment of a molecule.
It's often convenient to represent a fragment of a molecule. For
example, if two molecules form a molecular complex, it may be useful
to create two fragments which represent the subsets of each molecule
that's close to the other molecule (in the contact region).
Ideally, we'd be able to do this in RDKit direct, but manipulating
molecular fragments doesn't seem to be supported functionality.
Examples
--------
>>> import numpy as np
>>> from rdkit import Chem
>>> mol = Chem.MolFromSmiles("C")
>>> coords = np.array([[0.0, 0.0, 0.0]])
>>> atom = mol.GetAtoms()[0]
>>> fragment = MolecularFragment([atom], coords)
"""
def __init__(self, atoms: Sequence[RDKitAtom], coords: np.ndarray):
"""Initialize this object.
Parameters
----------
atoms: Iterable[rdkit.Chem.rdchem.Atom]
Each entry in this list should be a RDKit Atom.
coords: np.ndarray
Array of locations for atoms of shape `(N, 3)` where `N ==
len(atoms)`.
"""
if not isinstance(coords, np.ndarray):
raise ValueError("Coords must be a numpy array of shape (N, 3)")
if coords.shape != (len(atoms), 3):
raise ValueError(
"Coords must be a numpy array of shape `(N, 3)` where `N == len(atoms)`."
)
self.atoms = [
AtomShim(x.GetAtomicNum(), get_partial_charge(x), coords[ind])
for ind, x in enumerate(atoms)
]
self.coords = coords
def GetAtoms(self) -> List[AtomShim]:
"""Returns the list of atoms
Returns
-------
List[AtomShim]
list of atoms in this fragment.
"""
return self.atoms
def GetNumAtoms(self) -> int:
"""Returns the number of atoms
Returns
-------
int
Number of atoms in this fragment.
"""
return len(self.atoms)
def GetCoords(self) -> np.ndarray:
"""Returns 3D coordinates for this fragment as numpy array.
Returns
-------
np.ndarray
A numpy array of shape `(N, 3)` with coordinates for this fragment.
Here, N is the number of atoms.
"""
return self.coords
def get_partial_charge(atom: Union[RDKitAtom, AtomShim]) -> float:
"""Get partial charge of a given atom (rdkit Atom object)
Parameters
----------
atom: rdkit.Chem.rdchem.Atom or AtomShim
Either a rdkit.Atom object or `AtomShim`
Returns
-------
float
A partial Gasteiger charge of a given atom.
Notes
-----
This function requires RDKit to be installed.
Examples
--------
>>> from rdkit import Chem
>>> mol = Chem.MolFromSmiles("CC")
>>> atom = mol.GetAtoms()[0]
>>> get_partial_charge(atom)
0.0
"""
try:
from rdkit import Chem
except ModuleNotFoundError:
raise ImportError("This function requires RDKit to be installed.")
if isinstance(atom, Chem.Atom):
try:
value = atom.GetProp(str("_GasteigerCharge"))
if value == '-nan':
return 0.0
return float(value)
except KeyError:
return 0.0
else:
return atom.GetPartialCharge()
def merge_molecular_fragments(
molecules: List[MolecularFragment]) -> Optional[MolecularFragment]:
"""Helper method to merge two molecular fragments.
Parameters
----------
molecules: List[MolecularFragment]
List of `MolecularFragment` objects.
Returns
-------
Optional[MolecularFragment]
Returns a merged `MolecularFragment`
"""
if len(molecules) == 0:
return None
if len(molecules) == 1:
return molecules[0]
else:
all_atoms = []
all_coords = []
for mol_frag in molecules:
all_atoms += mol_frag.GetAtoms()
all_coords.append(mol_frag.GetCoords())
return MolecularFragment(all_atoms, np.concatenate(all_coords))
def get_mol_subset(
coords: np.ndarray, mol: Union[RDKitMol, MolecularFragment],
atom_indices_to_keep: List[int]) -> Tuple[np.ndarray, MolecularFragment]:
"""Strip a subset of the atoms in this molecule
Parameters
----------
coords: np.ndarray
Must be of shape (N, 3) and correspond to coordinates of mol.
mol: rdkit.Chem.rdchem.Mol or MolecularFragment
The molecule to strip
atom_indices_to_keep: list
List of the indices of the atoms to keep. Each index is a unique
number between `[0, N)`.
Returns
-------
Tuple[np.ndarray, MolecularFragment]
A tuple of `(coords, mol_frag)` where `coords` is a numpy array of
coordinates with hydrogen coordinates. `mol_frag` is a `MolecularFragment`.
Notes
-----
This function requires RDKit to be installed.
"""
try:
from rdkit import Chem
except ModuleNotFoundError:
raise ImportError("This function requires RDKit to be installed.")
indexes_to_keep = []
atoms_to_keep = []
# Compute partial charges on molecule if RDKit Mol
if isinstance(mol, Chem.Mol):
compute_charges(mol)
atoms = list(mol.GetAtoms())
for index in atom_indices_to_keep:
indexes_to_keep.append(index)
atoms_to_keep.append(atoms[index])
coords = coords[indexes_to_keep]
mol_frag = MolecularFragment(atoms_to_keep, coords)
return coords, mol_frag
def strip_hydrogens(coords: np.ndarray, mol: Union[RDKitMol, MolecularFragment]
) -> Tuple[np.ndarray, MolecularFragment]:
"""Strip the hydrogens from input molecule
Parameters
----------
coords: np.ndarray
The coords must be of shape (N, 3) and correspond to coordinates of mol.
mol: rdkit.Chem.rdchem.Mol or MolecularFragment
The molecule to strip
Returns
-------
Tuple[np.ndarray, MolecularFragment]
A tuple of `(coords, mol_frag)` where `coords` is a numpy array of
coordinates with hydrogen coordinates. `mol_frag` is a `MolecularFragment`.
Notes
-----
This function requires RDKit to be installed.
"""
mol_atoms = mol.GetAtoms()
atomic_numbers = [atom.GetAtomicNum() for atom in mol_atoms]
atom_indices_to_keep = [
ind for (ind, atomic_number) in enumerate(atomic_numbers)
if (atomic_number != 1)
]
return get_mol_subset(coords, mol, atom_indices_to_keep)
def get_contact_atom_indices(fragments: List[Tuple[np.ndarray, RDKitMol]],
cutoff: float = 4.5) -> List[List[int]]:
"""Compute that atoms close to contact region.
Molecular complexes can get very large. This can make it unwieldy to
compute functions on them. To improve memory usage, it can be very
useful to trim out atoms that aren't close to contact regions. This
function computes pairwise distances between all pairs of molecules
in the molecular complex. If an atom is within cutoff distance of
any atom on another molecule in the complex, it is regarded as a
contact atom. Otherwise it is trimmed.
Parameters
----------
fragments: List[Tuple[np.ndarray, rdkit.Chem.rdchem.Mol]]
As returned by `rdkit_utils.load_complex`, a list of tuples of
`(coords, mol)` where `coords` is a `(N_atoms, 3)` array and `mol`
is the rdkit molecule object.
cutoff: float, optional (default 4.5)
The cutoff distance in angstroms.
Returns
-------
List[List[int]]
A list of length `len(molecular_complex)`. Each entry in this list
is a list of atom indices from that molecule which should be kept, in
sorted order.
"""
# indices to atoms to keep
keep_inds: List[Set[int]] = [set([]) for _ in fragments]
for (ind1, ind2) in itertools.combinations(range(len(fragments)), 2):
frag1, frag2 = fragments[ind1], fragments[ind2]
pairwise_distances = compute_pairwise_distances(frag1[0], frag2[0])
# contacts is of form (x_coords, y_coords), a tuple of 2 lists
contacts = np.nonzero((pairwise_distances < cutoff))
# contacts[0] is the x_coords, that is the frag1 atoms that have
# nonzero contact.
frag1_atoms = set([int(c) for c in contacts[0].tolist()])
# contacts[1] is the y_coords, the frag2 atoms with nonzero contacts
frag2_atoms = set([int(c) for c in contacts[1].tolist()])
keep_inds[ind1] = keep_inds[ind1].union(frag1_atoms)
keep_inds[ind2] = keep_inds[ind2].union(frag2_atoms)
sorted_keep_inds = [sorted(list(keep)) for keep in keep_inds]
return sorted_keep_inds
def reduce_molecular_complex_to_contacts(
fragments: List[Tuple[np.ndarray, RDKitMol]],
cutoff: float = 4.5) -> List[Tuple[np.ndarray, MolecularFragment]]:
"""Reduce a molecular complex to only those atoms near a contact.
Molecular complexes can get very large. This can make it unwieldy to
compute functions on them. To improve memory usage, it can be very
useful to trim out atoms that aren't close to contact regions. This
function takes in a molecular complex and returns a new molecular
complex representation that contains only contact atoms. The contact
atoms are computed by calling `get_contact_atom_indices` under the
hood.
Parameters
----------
fragments: List[Tuple[np.ndarray, rdkit.Chem.rdchem.Mol]]
As returned by `rdkit_utils.load_complex`, a list of tuples of
`(coords, mol)` where `coords` is a `(N_atoms, 3)` array and `mol`
is the rdkit molecule object.
cutoff: float
The cutoff distance in angstroms.
Returns
-------
List[Tuple[np.ndarray, MolecularFragment]]
A list of length `len(molecular_complex)`. Each entry in this list
is a tuple of `(coords, MolecularFragment)`. The coords is stripped
down to `(N_contact_atoms, 3)` where `N_contact_atoms` is the number
of contact atoms for this complex. `MolecularFragment` is used since
it's tricky to make a RDKit sub-molecule.
"""
atoms_to_keep = get_contact_atom_indices(fragments, cutoff)
reduced_complex = []
for frag, keep in zip(fragments, atoms_to_keep):
contact_frag = get_mol_subset(frag[0], frag[1], keep)
reduced_complex.append(contact_frag)
return reduced_complex
# TODO: This is duplicated! Clean up
def compute_charges(mol):
"""Attempt to compute Gasteiger Charges on Mol
This also has the side effect of calculating charges on mol. The
mol passed into this function has to already have been sanitized
Parameters
----------
mol: rdkit molecule
Returns
-------
No return since updates in place.
Note
----
This function requires RDKit to be installed.
"""
from rdkit.Chem import AllChem
try:
# Updates charges in place
AllChem.ComputeGasteigerCharges(mol)
except Exception as e:
logger.exception("Unable to compute charges for mol")
raise MoleculeLoadException(e)
| {
"repo_name": "deepchem/deepchem",
"path": "deepchem/utils/fragment_utils.py",
"copies": "2",
"size": "12339",
"license": "mit",
"hash": -6616561351259699000,
"line_mean": 29.5420792079,
"line_max": 83,
"alpha_frac": 0.6761487965,
"autogenerated": false,
"ratio": 3.7109774436090226,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5387126240109023,
"avg_score": null,
"num_lines": null
} |
"""A collection of utilities for handling (mainly subsurface ocean) observations"""
import copy
import gsw
import numpy as np
def t48tot68(t48):
"""Convert from IPTS-48 to IPTS-68 temperature scales,
as specified in the CF Standard Name information for
sea_water_temperature
http://cfconventions.org/Data/cf-standard-names/27/build/cf-standard-name-table.html
temperatures are in degrees C"""
t68 = t48 - 4.4e-6 * t48 * (100 - t48)
return t68
def t68tot90(t68):
"""Convert from IPTS-68 to ITS-90 temperature scales,
as specified in the CF Standard Name information for
sea_water_temperature
http://cfconventions.org/Data/cf-standard-names/27/build/cf-standard-name-table.html
temperatures are in degrees C"""
t90 = 0.99976 * t68
return t90
def pottem(T, S, dStart, dEnd=0.0, pressure=False, lat=0.0):
"""Calculate the temperature of water if it is moved from
depth dStart to dEnd.
t: initial temperature of the water.
s: salinity of the water.
dStart: depth that the parcel of water starts at.
dEnd: depth that the parcel of water ends up at.
pressure: set to true if dStart and dEnd are pressures rather than depths.
lat: if pressure if False, latitude should also be specified."""
if pressure:
P0 = dStart
P1 = dEnd
else:
P0 = depth_to_pressure(dStart, lat)
P1 = depth_to_pressure(dEnd, lat)
assert P0 <= 20000 and P1 <= 20000 and P0 >= 0 and P1 >= 0, 'Pressure out of range'
dpp = 1.0e0
if P1 >= P0:
DP = dpp
else:
DP = -dpp
P = P0
DS = S - 35e0
TB = (T-((((-2.1687e-16*T+1.8676e-14)*T-4.6206e-13)*P0
+ ((2.7759e-12*T-1.1351e-10)*DS+((-5.4481e-14*T
+ 8.733e-12)*T-6.7795e-10)*T+1.8741e-8))*P0
+ (-4.2393e-8*T+1.8932e-6)*DS
+ ((6.6228e-10*T-6.836e-8)*T+8.5258e-6)*T+3.5803e-5)*DP)
test = 1
while test > 0.0:
TA = (TB+2.0e0*((((-2.1687e-16*T+1.8676e-14)*T-4.6206e-13)*P
+ ((2.7759e-12*T-1.1351e-10)*DS+((-5.4481e-14*T
+ 8.733e-12)*T-6.7795e-10)*T+1.8741e-8))*P
+ (-4.2393e-8*T+1.8932e-6)*DS
+ ((6.6228e-10*T-6.836e-8)*T+8.5258e-6)*T+3.5803e-5)*DP)
P = P + DP
TB = T
T = TA
test = (P-P1)*(P-DP-P1)
POTTEM = ((P1-P+DP)*T+(P-P1)*TB)/DP
return POTTEM
def depth_to_pressure(z, lat):
"""Converts depths to pressures.
z: scalar or numpy array of depth (m).
lat: scalar or numpy array of latitude (deg)."""
assert np.array(lat).size > 0 and np.array(z).size > 0, 'No value provided for z or lat'
p = gsw.p_from_z(-z, lat)
return p
def pressure_to_depth(p, lat):
"""Wrapper function to convert from ocean pressure to depth.
"""
return -gsw.z_from_p(p, lat)
def density(t, s, l, latitude=None):
"""Calculate the density/densities based on:
t - potential temperature(s) in degC.
s - salinity(s) in PSU.
l - level(s) (either pressure or density) in m or db.
latitude - only set if l contains depths (can be array or scalar) in deg.
Code is ported from Ops_OceanRhoEOS25 from NEMOQC,
which uses a 25 term expression given by McDougall et al (2003; JAOT 20, #5),
which provides an accurate fit to the Feistel and Hagen (1995) equation of state.
That code is in turn based on the UM routine in RHO_EOS25 (constants in STATE),
but with salinity in PSU and density in kg m**-3, as in McDougall.
Test values from McDougall et al (2005) are:
t = 25C, s = 35psu, p = 2000 db => rho = 1031.654229 kg/m^2.
20 20 1000 1017.726743
12 40 8000 1062.928258
This function is not properly tested for anything other than basic usage.
"""
# VALUES NEEDED IN THE CALCULATION.
# Small constant.
epsln = 1.E-20
# 25 coefficients in the realistic equation of state
a0 = 9.99843699e+02
a1 = 7.35212840e+00
a2 = -5.45928211e-02
a3 = 3.98476704e-04
a4 = 2.96938239e+00
a5 = -7.23268813e-03
a6 = 2.12382341e-03
a7 = 1.04004591e-02
a8 = 1.03970529e-07
a9 = 5.18761880e-06
a10 = -3.24041825e-08
a11 = -1.23869360e-11
b0 = 1.00000000e+00
b1 = 7.28606739e-03
b2 = -4.60835542e-05
b3 = 3.68390573e-07
b4 = 1.80809186e-10
b5 = 2.14691708e-03
b6 = -9.27062484e-06
b7 = -1.78343643e-10
b8 = 4.76534122e-06
b9 = 1.63410736e-09
b10 = 5.30848875e-06
b11 = -3.03175128e-16
b12 = -1.27934137e-17
# CONVERT DEPTH TO PRESSURE IF NECESSARY.
if latitude is not None:
p = depth_to_pressure(l, latitude)
else:
p = l
# ERROR CHECKING. Disabled as does not work properly with
# masked arrays.
#assert np.count_nonzero(s <= 0.0) == 0, 'Negative salinity values detected'
#assert np.count_nonzero(p < 0.0) == 0, 'Negative depths detected'
# DENSITY CALCULATION.
p1 = p
t1 = t
s1 = s
t2 = t1 * t1
sp5 = np.sqrt(s1)
p1t1 = p1 * t1
num = (a0 + t1*(a1 + t1*(a2+a3*t1) )
+ s1*(a4 + a5*t1 + a6*s1)
+ p1*(a7 + a8*t2 + a9*s1 + p1*(a10+a11*t2)))
den = (b0 + t1*(b1 + t1*(b2 + t1*(b3 + t1*b4)))
+ s1*(b5 + t1*(b6 + b7*t2) + sp5*(b8 + b9*t2))
+ p1*(b10 + p1t1*(b11*t2 + b12*p1)))
denr = 1.0/(epsln+den)
rho = num * denr
return rho
| {
"repo_name": "IQuOD/AutoQC",
"path": "util/obs_utils.py",
"copies": "4",
"size": "5745",
"license": "mit",
"hash": -3809380547231274500,
"line_mean": 29.7219251337,
"line_max": 92,
"alpha_frac": 0.5603133159,
"autogenerated": false,
"ratio": 2.721459024159166,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.016492990410630216,
"num_lines": 187
} |
"""A collection of utilities to make testing with ros less painful.
"""
import os
import functools
import random
import socket
import subprocess
import time
import psutil
import rosgraph
import rosnode
def my_get_node_names(namespace=None, uri='http://localhost:11311'):
"""Monkeypatches get_node_names with a non-default ROS_MASTER_URI.
"""
old_master = rosgraph.Master
rosgraph.Master = functools.partial(rosgraph.Master, master_uri=uri)
nodenames = rosnode.get_node_names(namespace=namespace)
rosgraph.Master = old_master
return nodenames
def rand_port():
"""Picks a random port number.
This is potentially unsafe, but shouldn't generally be a problem.
"""
return random.randint(10311, 12311)
class RosTestMeta(type):
"""Metaclass for RosTest that adds the setup/teardown we want.
"""
def __new__(mcs, name, bases, dct):
# It will break unless we throw in fake setup and teardown methods if
# the real ones don't exist yet.
def noop(_):
"""Do nothing function.
This is injected if there is no user-defined setUp or tearDown
method on an instance of RosTest.
"""
pass
try:
old_setup = dct['setUp']
except KeyError:
old_setup = noop
try:
old_teardown = dct['tearDown']
except KeyError:
old_teardown = noop
def new_setup(self):
"""Wrapper around the user-defined setUp method that runs roscore.
"""
self.port = rand_port()
self.rosmaster_uri = 'http://{}:{}'.format(socket.gethostname(),
self.port)
env = {k:v for k, v in os.environ.iteritems()}
env.update({'ROS_MASTER_URI': self.rosmaster_uri})
roscore_initialized = False
while not roscore_initialized:
self.roscore = subprocess.Popen(
['roscore', '-p', str(self.port)], env=env)
# an error will return a nonzero errorcode, and None indicates
# that the process is still running, so falsy results are good
time.sleep(1)
if not self.roscore.poll():
roscore_initialized = True
else:
self.roscore.kill()
self.roscore = None
old_setup(self)
def new_teardown(self):
"""Wrapper around the user-defined tearDown method to end roscore.
"""
proc = psutil.Process(self.roscore.pid)
children = proc.children(recursive=True)
old_teardown(self)
self.roscore.kill()
self.roscore.wait()
self.roscore = None
for child in children:
try:
child.terminate()
child.wait()
except psutil.NoSuchProcess:
# its possible the process has already been killed
pass
dct['setUp'] = new_setup
dct['tearDown'] = new_teardown
dct['setUp'].__name__ = 'setUp'
dct['tearDown'].__name__ = 'tearDown'
return super(RosTestMeta, mcs).__new__(mcs, name, bases, dct)
| {
"repo_name": "gtagency/pyrostest",
"path": "pyrostest/rostest_utils.py",
"copies": "1",
"size": "3300",
"license": "mit",
"hash": -6761907423034320000,
"line_mean": 30.4285714286,
"line_max": 78,
"alpha_frac": 0.56,
"autogenerated": false,
"ratio": 4.336399474375821,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.010281049974871734,
"num_lines": 105
} |
"""A collection of utilities used across the project."""
import os
import importlib
import prettytable
from oslo_log import log as logging
from asciipic.common import exception
LOG = logging.getLogger(__name__)
def get_attribute(root, attribute):
"""Search for the received attribute name in the object tree.
:param root: the root object
:param attribute: the name of the required attribute
"""
command_tree = [root]
while command_tree:
current_object = command_tree.pop()
if hasattr(current_object, attribute):
return getattr(current_object, attribute)
parent = getattr(current_object, "parent", None)
if parent:
command_tree.append(parent)
raise exception.AsciipicException("The %(attribute)r attribute is "
"missing from the object tree.",
attribute=attribute)
def join_with_space(items, glue=', '):
"""Join with glue the items.
:param items: The items we want to join
"""
return glue.join([str(item) for item in items])
class BaseFactory(object):
"""Base class for all Factories."""
# The base class for all the items
BASE_CLASS = None
# The prefix patch for all the items
PREFIX = ""
# A list with all the items
ITEMS = []
# The name of the factory
NAME = ""
@classmethod
def get_items(cls):
"""Return a list with all the items."""
all_items = []
for item_module in cls.ITEMS:
module_name = "{}.{}".format(cls.PREFIX, item_module)
module = importlib.import_module(module_name)
for item in dir(module):
item = getattr(module, item)
try:
if (not issubclass(item, cls.BASE_CLASS) or
item == cls.BASE_CLASS):
continue
except (exception.AsciipicException, TypeError):
continue
all_items.append(item)
return all_items
def empty_table():
"""Return an empty table."""
return prettytable.PrettyTable(["No items in this namespace."])
def get_unicode_string_type():
"""Get the unicode string type."""
try:
# PY2.x
return unicode
except NameError:
# PY3.x
return str
def get_resource_path(resource):
"""Get resource path from asciipic resources."""
resource_path = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"..", resource)
return os.path.normpath(resource_path)
| {
"repo_name": "micumatei/asciipic",
"path": "asciipic/common/util.py",
"copies": "1",
"size": "2621",
"license": "mit",
"hash": -6946474802750318000,
"line_mean": 26.0206185567,
"line_max": 71,
"alpha_frac": 0.5860358642,
"autogenerated": false,
"ratio": 4.317957166392092,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 97
} |
"""A collection of utilities used internally by pynads. By no means are they
off limits for playing with, however, they aren't exported by pynads.
"""
from collections import Iterable, Mapping
from inspect import isfunction
__all__ = ('_iter_but_not_str_or_map', '_propagate_self',
'_single_value_iter', 'with_metaclass', '_get_names',
'_get_name', 'iscallable', 'chain_dict_update', 'Instance')
def _iter_but_not_str_or_map(maybe_iter):
"""Helper function to differ between iterables and iterables that are
strings or mappings. This is used for pynads.concrete.List to determine
if an iterable should be consumed or placed into a single value tuple.
"""
return (isinstance(maybe_iter, Iterable) and
not isinstance(maybe_iter, (str, Mapping)))
def _propagate_self(self, *_, **__):
"""Some object methods, rather doing anything meaningful with their input,
would prefer to simply propagate themselves along. For example, this is used
in two different ways with Just and Nothing.
When calling any of the or_else and or_call methods on Just, there is
already a value provided (whatever the Just is) so these methods simply
ignore their inputs and propagate the Just along.
However, when filtering, fmapping, applying or binding a Nothing
(and also a Left), this method is used to signal some sort of failure in the
chain and propagate the original object along instead.
"""
return self
def _single_value_iter(x):
"""Helper function for pynads.concrete.list.Generator that allows
placing a single value into an iteration context.
"""
yield x
def with_metaclass(meta, bases=(object,), name=None):
"""Creates an anonymous object with a metaclass. Allows compatibility
between Python2 and Python3.
>>> class MyThing(with_metaclass(type)):
... pass
>>> MyThing.__mro__
... (MyThing, typeBase, object)
"""
name = name or "{!s}Base".format(meta.__name__)
return meta(name, bases, {})
def iscallable(func):
"""Helper function to determine if a passed object is callable.
Some versions of Python 3 (3.0 and 3.1) don't have the callable builtin.
Returns True if the passed object appears callable (has the __call__ method
defined). However, calling the object may still fail.
"""
return hasattr(func, '__call__')
def _get_name(obj):
"""Attempts to extract name from a given object.
"""
try:
# interop with functools.partial and objects that emulate it
if hasattr(obj, 'func') and hasattr(obj.func, '__name__'):
return "partialed {!s}".format(obj.func.__name__)
# callable object that isn't a function
elif not isfunction(obj) and hasattr(obj, '__class__'):
return obj.__class__.__name__
# must be just a regular function
else:
return obj.__name__
except AttributeError:
return ''
def _get_names(*objs):
"""Helper function for pynads.funcs.compose that intelligently extracts
names from the passed callables, including already composed functions,
partially applied functions (functools.partial or similar) and callable
objects.
"""
names = []
for obj in objs:
# extract names from a previously
# composed group of functions
if hasattr(obj, 'fs'):
names.extend(_get_names(*obj.fs))
else:
names.append(_get_name(obj))
return names
def chain_dict_update(*ds):
"""Updates multiple dictionaries into one dictionary.
If the same key appears multiple times, then the last appearance wins.
>>> m, n, o = {'a':10}, {'b':7}, {'a':4}
>>> chain_dict_updates(m, n, o)
... {'b': 7, 'a': 4}
"""
dct = {}
for d in ds:
dct.update(d)
return dct
class Instance(object):
"""Helper to allow attaching an instance of a class to the class as a class
attribute.
.. code-block:: python
class Thing(object):
thing = Instance()
`Thing.thing`` is an instance of the class itself. This is useful for
monoids whos mempty is just an empty instance of the class.
Additionally, if any arguments need to be provided, for whatever reason,
they can be inserted via the descriptor's instantiation.
.. code-block:: python
class Thing(object):
thing = Instance(hello="world")
def __init__(self, hello):
self.hello = hello
And then the instance is created with those values. The instance is cached
inside the descriptor and only created once per class.
"""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self._inst = None
def __get__(self, _, cls):
if self._inst is None:
self._inst = cls(*self.args, **self.kwargs)
return self._inst
| {
"repo_name": "justanr/pynads",
"path": "pynads/utils/internal.py",
"copies": "1",
"size": "4931",
"license": "mit",
"hash": 5957453975872053000,
"line_mean": 32.0939597315,
"line_max": 80,
"alpha_frac": 0.6430744271,
"autogenerated": false,
"ratio": 4.171742808798647,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5314817235898647,
"avg_score": null,
"num_lines": null
} |
"""A collection of utility functions and classes related to IO streams. For internal use only."""
import io
import typing
def read_exact(stream: typing.BinaryIO, byte_count: int) -> bytes:
"""Read byte_count bytes from the stream and raise an exception if too few bytes are read (i. e. if EOF was hit prematurely).
:param stream: The stream to read from.
:param byte_count: The number of bytes to read.
:return: The read data, which is exactly ``byte_count`` bytes long.
:raise EOFError: If not enough data could be read from the stream.
"""
data = stream.read(byte_count)
if len(data) != byte_count:
raise EOFError(f"Attempted to read {byte_count} bytes of data, but only got {len(data)} bytes")
return data
if typing.TYPE_CHECKING:
class PeekableIO(typing.Protocol):
"""Minimal protocol for binary IO streams that support the peek method.
The peek method is supported by various standard Python binary IO streams, such as io.BufferedReader. If a stream does not natively support the peek method, it may be wrapped using the custom helper function make_peekable.
"""
def readable(self) -> bool:
...
def read(self, size: typing.Optional[int] = ...) -> bytes:
...
def peek(self, size: int = ...) -> bytes:
...
class _PeekableIOWrapper(object):
"""Wrapper class to add peek support to an existing stream. Do not instantiate this class directly, use the make_peekable function instead.
Python provides a standard io.BufferedReader class, which supports the peek method. However, according to its documentation, it only supports wrapping io.RawIOBase subclasses, and not streams which are already otherwise buffered.
Warning: this class does not perform any buffering of its own, outside of what is required to make peek work. It is strongly recommended to only wrap streams that are already buffered or otherwise fast to read from. In particular, raw streams (io.RawIOBase subclasses) should be wrapped using io.BufferedReader instead.
"""
_wrapped: typing.BinaryIO
_readahead: bytes
def __init__(self, wrapped: typing.BinaryIO) -> None:
super().__init__()
self._wrapped = wrapped
self._readahead = b""
def readable(self) -> bool:
return self._wrapped.readable()
def read(self, size: typing.Optional[int] = None) -> bytes:
if size is None or size < 0:
ret = self._readahead + self._wrapped.read()
self._readahead = b""
elif size <= len(self._readahead):
ret = self._readahead[:size]
self._readahead = self._readahead[size:]
else:
ret = self._readahead + self._wrapped.read(size - len(self._readahead))
self._readahead = b""
return ret
def peek(self, size: int = -1) -> bytes:
if not self._readahead:
self._readahead = self._wrapped.read(io.DEFAULT_BUFFER_SIZE if size < 0 else size)
return self._readahead
def make_peekable(stream: typing.BinaryIO) -> "PeekableIO":
"""Wrap an arbitrary binary IO stream so that it supports the peek method.
The stream is wrapped as efficiently as possible (or not at all if it already supports the peek method). However, in the worst case a custom wrapper class needs to be used, which may not be particularly efficient and only supports a very minimal interface. The only methods that are guaranteed to exist on the returned stream are readable, read, and peek.
"""
if hasattr(stream, "peek"):
# Stream is already peekable, nothing to be done.
return typing.cast("PeekableIO", stream)
elif not typing.TYPE_CHECKING and isinstance(stream, io.RawIOBase):
# This branch is skipped when type checking - mypy incorrectly warns about this code being unreachable, because it thinks that a typing.BinaryIO cannot be an instance of io.RawIOBase.
# Raw IO streams can be wrapped efficiently using BufferedReader.
return io.BufferedReader(stream)
else:
# Other streams need to be wrapped using our custom wrapper class.
return _PeekableIOWrapper(stream)
| {
"repo_name": "dgelessus/python-rsrcfork",
"path": "rsrcfork/_io_utils.py",
"copies": "1",
"size": "3928",
"license": "mit",
"hash": -5628069400250655000,
"line_mean": 41.2365591398,
"line_max": 356,
"alpha_frac": 0.7309063136,
"autogenerated": false,
"ratio": 3.773294908741595,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.904559353653712,
"avg_score": 0.19172153716089485,
"num_lines": 93
} |
"""A collection of utility functions for dealing with sensors and sensor data.
More sophisticated operations call for the use of a full-fledged sensor
package, such as Open3D or PCL.
Sensor transforms
=================
The :func:`get_sensor_xform`/:func:`set_sensor_xform` functions are used to
interface cleanly with the klampt :mod:`klampt.math.se3` transform
representation.
Getting images and point clouds
===============================
The :func:`camera_to_images`, :func:`camera_to_points`, and
:func:`camera_to_points_world` functions convert raw CameraSensor outputs to
Python objects that are more easily operated upon, e.g., images and point
clouds. Use these to retrieve images as Numpy arrays.
The :func:`image_to_points` function converts a depth / color image to a
point cloud, given camera intrinsic information.
Working with cameras
====================
The :func:`camera_to_viewport` and :func:`viewport_to_camera` functions help
with converting to and from the :class:`klampt.vis.glprogram.GLViewport` class
used in :mod:`klampt.vis`.
The :func:`camera_to_intrinsics` and :func:`intrinsics_to_camera` functions
convert between intrinsics definitions.
:func:`camera_ray`, and :func:`camera_project` convert to/from image points.
:func:`visible` determines whether a point or object is visible from a camera.
"""
from ..robotsim import *
from ..io import loader
from . import coordinates
import math
import sys
from ..math import vectorops,so3,se3
import time
_has_numpy = False
_tried_numpy_import = False
np = None
_has_scipy = False
_tried_scipy_import = False
sp = None
def _try_numpy_import():
global _has_numpy,_tried_numpy_import
global np
if _tried_numpy_import:
return _has_numpy
_tried_numpy_import = True
try:
import numpy as np
_has_numpy = True
#sys.modules['numpy'] = numpy
except ImportError:
import warnings
warnings.warn("klampt.model.sensing.py: numpy not available.",ImportWarning)
_has_numpy = False
return _has_numpy
def _try_scipy_import():
global _has_scipy,_tried_scipy_import
global sp
if _tried_scipy_import:
return _has_scipy
_tried_scipy_import = True
try:
import scipy as sp
_has_scipy = True
#sys.modules['scipy'] = scipy
except ImportError:
import warnings
warnings.warn("klampt.model.sensing.py: scipy not available.",ImportWarning)
_has_scipy = False
return _has_scipy
def get_sensor_xform(sensor,robot=None):
"""Extracts the transform of a SimRobotSensor. The sensor must be
of a link-mounted type, e.g., a CameraSensor or ForceSensor.
Args:
sensor (SimRobotSensor)
robot (RobotModel, optional): if provided, returns the world
coordinates of the sensor. Otherwise, returns the local
coordinates on the link to which it is mounted.
Returns:
klampt.se3 object: the sensor transform
"""
s = sensor.getSetting("Tsensor")
Tsensor = loader.readSe3(s)
if robot is not None:
link = int(sensor.getSetting("link"))
if link >= 0:
return se3.mul(robot.link(link).getTransform(),Tsensor)
return Tsensor
def set_sensor_xform(sensor,T,link=None):
"""Given a link-mounted sensor (e.g., CameraSensor or ForceSensor), sets
its link-local transform to T.
Args:
sensor (SimRobotSensor)
T (se3 element or coordinates.Frame): desired local coordinates of the
sensor on its link.
link (int or RobotModelLink, optional): if provided, the link of the
sensor is modified.
Another way to set a sensor is to give a coordinates.Frame object. This
frame must either be associated with a RobotModelLink or its parent should
be associated with one.
(the reason why you should use this is that the Tsensor attribute has a
particular format using the loader.writeSe3 function.)
"""
if isinstance(T,coordinates.Frame):
if isinstance(T._data,RobotModelLink):
#attach it directly to the link
return set_sensor_xform(sensor,se3.identity(),T._data)
parent = None
if T.parent() is None:
parent = -1
else:
#assume its parent is a link?
parent = T.parent()._data
return set_sensor_xform(sensor,T.relativeCoordinates(),parent)
try:
s = sensor.getSetting("Tsensor")
except Exception:
raise ValueError("Sensor does not have a Tsensor attribute")
sensor.setSetting("Tsensor",loader.writeSe3(T))
if link != None:
if isinstance(link,RobotModelLink):
sensor.setSetting("link",str(link.index))
else:
assert isinstance(link,int),"Can only set a sensor transform to a RobotModelLink or an integer link index"
sensor.setSetting("link",str(link))
def camera_to_images(camera,image_format='numpy',color_format='channels'):
"""Given a SimRobotSensor that is a CameraSensor, returns either the RGB
image, the depth image, or both.
Args:
camera (SimRobotSensor): a sensor that is of 'CameraSensor' type
image_format (str): governs the return type. Can be:
* 'numpy' (default): returns numpy arrays. Depending on the
value of color_format, the RGB image either has shape (h,w,3)
and dtype uint8 or (h,w) and dtype uint32. Depth images as
numpy arrays with shape (h,w). Will fall back to 'native' if
numpy is not available.
* 'native': returns list-of-lists arrays in the same format as
above
color_format (str): governs how pixels in the RGB result are packed.
Can be:
* 'channels' (default): returns a 3D array with 3 channels
corresponding to R, G, B values in the range [0,255].
* 'rgb' returns a 2D array with a 32-bit integer channel, with
R,G,B channels packed in hex format 0xrrggbb.
* 'bgr': similar to 'rgb' but with hex order 0xbbggrr.
(Note that image_format='native' takes up a lot of extra memory, especially
with color_format='channels')
Returns:
tuple: (rgb, depth), which are either numpy arrays or list-of-lists
format, as specified by image_format.
* rgb: the RGB result (packed as specified by color_format)
* depth: the depth result (floats)
"""
assert isinstance(camera,SimRobotSensor),"Must provide a SimRobotSensor instance"
assert camera.type() == 'CameraSensor',"Must provide a camera sensor instance"
#import time
#t_1 = time.time()
w = int(camera.getSetting('xres'))
h = int(camera.getSetting('yres'))
has_rgb = int(camera.getSetting('rgb'))
has_depth = int(camera.getSetting('depth'))
#t0 = time.time()
#print("camera.getSettings() time",t0-t_1)
measurements = camera.getMeasurements()
#t1 = time.time()
#print("camera.getMeasurements() time",t1-t0)
if image_format == 'numpy':
if not _try_numpy_import():
image_format = 'native'
rgb = None
depth = None
if has_rgb:
if image_format == 'numpy':
#t0 = time.time()
argb = np.array(measurements[0:w*h]).reshape(h,w).astype(np.uint32)
#t1 = time.time()
#print("Numpy array creation time",t1-t0)
if color_format == 'rgb':
rgb = argb
elif color_format == 'bgr':
rgb = np.bitwise_or.reduce((np.left_shift(np.bitwise_and(argb,0x00000ff),16),
np.bitwise_and(argb,0x000ff00)),
np.right_shift(np.bitwise_and(argb,0x0ff0000), 16))
else:
rgb = np.zeros((h,w,3),dtype=np.uint8)
rgb[:,:,0] = np.right_shift(np.bitwise_and(argb,0x0ff0000), 16)
rgb[:,:,1] = np.right_shift(np.bitwise_and(argb,0x00ff00), 8)
rgb[:,:,2] = np.bitwise_and(argb,0x00000ff)
#t2 = time.time()
#print(" Conversion time",t2-t1)
else:
if color_format == 'rgb':
rgb = []
for i in range(h):
rgb.append([int(v) for v in measurements[i*w:(i+1)*w]])
elif color_format == 'bgr':
def bgr_to_rgb(pixel):
return ((pixel & 0x0000ff) << 16) | (pixel & 0x00ff00) | ((pixel & 0xff0000) >> 16)
rgb = []
for i in range(h):
rgb.append([bgr_to_rgb(int(v)) for v in measurements[i*w:(i+1)*w]])
else:
rgb = []
for i in range(h):
start = i*w
row = []
for j in range(w):
pixel = int(measurements[start+j])
row.append([(pixel>>16)&0xff,(pixel>>8)&0xff,pixel&0xff])
rgb.append(row)
if has_depth:
start = (w*h if has_rgb else 0)
if image_format == 'numpy':
#t0 = time.time()
depth = np.array(measurements[start:start+w*h]).reshape(h,w)
#t1 = time.time()
#print("Numpy array creation time",t1-t0)
else:
depth = []
for i in range(h):
depth.append(measurements[start+i*w:start+(i+1)*w])
if has_rgb and has_depth:
return rgb,depth
elif has_rgb:
return rgb
elif has_depth:
return depth
return None
def image_to_points(depth,color,xfov,yfov=None,depth_scale=None,depth_range=None,color_format='auto',points_format='numpy',all_points=False):
"""Given a depth and optionally color image, returns a point cloud
representing the depth or RGB-D scene.
Args:
depth (list of lists or numpy array): the w x h depth image (rectified).
color (list of lists or numpy array, optional): the w x h color image.
Assumed that color maps directly onto depth pixels. If None,
an uncolored point cloud will be produced.
xfov (float): horizontal field of view, in radians.
yfov (float, optional): vertical field of view, in radians. If not
given, square pixels are assumed.
depth_scale (float, optional): a scaling from depth image values to
absolute depth values.
depth_range (pair of floats, optional): if given, only points within
this depth range (non-inclusive) will be extracted. If
all_points=False, points that fail the range test will be stripped
from the output. E.g., (0.5,8.0) only extracts points with
z > 0.5 and z < 8 units.
color_format (str): governs how pixels in the RGB result are packed.
Can be:
* 'auto' (default): if it's a 3D array, it assumes elements are in
'channels' format, otherwise it assumes 'rgb'.
* 'channels': a 3D array with 3 channels corresponding to R, G,
B values in the range [0,255] if uint8 type, otherwise in the
range [0,1].
* 'rgb' a 2D array with a 32-bit integer channel, with
R,G,B channels packed in hex format 0xrrggbb.
points_format (str, optional): configures the format of the return
value. Can be:
* 'numpy' (default): either an Nx3, Nx4, or Nx6 numpy array,
depending on whether color is requested (and its format). Will
fall back to 'native' if numpy is not available.
* 'native': same as numpy, but in list-of-lists format rather than
numpy arrays.
* 'PointCloud': a Klampt PointCloud object
* 'Geometry3D': a Klampt Geometry3D point cloud object
all_points (bool, optional): configures whether bad points should be
stripped out. If False (default), this strips out all pixels that
don't have a good depth reading (i.e., the camera sensor's maximum
reading.) If True, these pixels are all set to (0,0,0).
Returns:
numpy ndarray or Geometry3D: the point cloud. Represented as being local to the standard
camera frame with +x to the right, +y down, +z forward.
"""
has_numpy = _try_numpy_import()
if not has_numpy:
raise NotImplementedError("TODO image_to_points without numpy")
depth = np.asarray(depth)
assert len(depth.shape)==2
h,w = depth.shape
if color is not None:
color = np.asarray(color)
if h != color.shape[0] or w != color.shape[1]:
raise ValueError("color and depth need to have same dimensions")
if color_format == 'auto':
if len(color.shape)==3:
color_format = 'channels'
else:
assert len(color.shape)==2
color_format = 'rgb'
if depth_scale is not None:
depth *= depth_scale
if depth_range is not None:
valid = np.logical_and((depth > depth_range[0]),(depth < depth_range[1]))
if all_points:
depth[~valid] = 0
valid = (depth > 0)
else:
valid = (depth > 0)
xshift = -w*0.5
yshift = -h*0.5
xscale = math.tan(xfov*0.5)/(w*0.5)
if yfov is not None:
yscale = math.tan(yfov*0.5)/(h*0.5)
else:
yscale = xscale #square pixels are assumed
xs = [(j+xshift)*xscale for j in range(w)]
ys = [(i+yshift)*yscale for i in range(h)]
if color_format == 'channels' and color.dtype == np.uint8:
#scale to range [0,1]
color = color*(1.0/255.0)
xgrid = np.repeat(np.array(xs).reshape((1,w)),h,0)
ygrid = np.repeat(np.array(ys).reshape((h,1)),w,1)
assert xgrid.shape == (h,w)
assert ygrid.shape == (h,w)
pts = np.dstack((np.multiply(xgrid,depth),np.multiply(ygrid,depth),depth))
assert pts.shape == (h,w,3)
if color_format is not None:
if len(color.shape) == 2:
color = color.reshape(color.shape[0],color.shape[1],1)
pts = np.concatenate((pts,color),2)
#now have a nice array containing all points, shaped h x w x (3+c)
#extract out the valid points from this array
if all_points:
pts = pts.reshape(w*h,pts.shape[2])
else:
pts = pts[valid]
if points_format == 'native':
return pts.tolist()
elif points_format == 'numpy':
return pts
elif points_format == 'PointCloud' or points_format == 'Geometry3D':
res = PointCloud()
if all_points:
res.setSetting('width',str(w))
res.setSetting('height',str(h))
res.setPoints(pts.shape[0],pts[:,0:3].flatten().tolist())
if color_format == 'rgb':
res.addProperty('rgb')
res.setProperties(pts[:,3].flatten().tolist())
elif color_format == 'channels':
res.addProperty('r')
res.addProperty('g')
res.addProperty('b')
res.setProperties(pts[:,3:6].flatten().tolist())
if points_format == 'PointCloud':
return res
else:
from klampt import Geometry3D
g = Geometry3D()
g.setPointCloud(res)
return g
else:
raise ValueError("Invalid points_format, must be either native, numpy, PointCloud, or Geometry3D")
def camera_to_points(camera,points_format='numpy',all_points=False,color_format='channels'):
"""Given a SimRobotSensor that is a CameraSensor, returns a point cloud
associated with the current measurements.
Points are triangulated with respect to the camera's intrinsic coordinates,
and are returned in the camera local frame (+z backward, +x toward the
right, +y toward up).
The arguments
Args:
points_format (str, optional): configures the format of the return
value. Can be:
* 'numpy' (default): either an Nx3, Nx4, or Nx6 numpy array,
depending on whether color is requested (and its format). Will
fall back to 'native' if numpy is not available.
* 'native': same as numpy, but in list-of-lists format rather than
numpy arrays.
* 'PointCloud': a Klampt PointCloud object
* 'Geometry3D': a Klampt Geometry3D point cloud object
all_points (bool, optional): configures whether bad points should be
stripped out. If False (default), this strips out all pixels that
don't have a good depth reading (i.e., the camera sensor's maximum
reading.) If True, these pixels are all set to (0,0,0).
color_format (str): If the sensor has an RGB component, then color
channels may be produced. This value configures the output format,
and can take on the values:
* 'channels': produces individual R,G,B channels in the range
[0,1]. (note this is different from the interpretation of
camera_to_images)
* 'rgb': produces a single 32-bit integer channel packing the 8-bit
color channels together in the format 0xrrggbb.
* None: no color is produced.
Returns:
object: the point cloud in the requested format.
"""
assert isinstance(camera,SimRobotSensor),"Must provide a SimRobotSensor instance"
assert camera.type() == 'CameraSensor',"Must provide a camera sensor instance"
assert int(camera.getSetting('depth'))==1,"Camera sensor must have a depth channel"
has_numpy = _try_numpy_import()
if points_format == 'numpy' and not has_numpy:
points_format = 'native'
images = camera_to_images(camera,'numpy',color_format)
assert images != None
rgb,depth = None,None
if int(camera.getSetting('rgb'))==0:
depth = images
color_format = None
else:
rgb,depth = images
w = int(camera.getSetting('xres'))
h = int(camera.getSetting('yres'))
xfov = float(camera.getSetting('xfov'))
yfov = float(camera.getSetting('yfov'))
zmin = float(camera.getSetting('zmin'))
zmax = float(camera.getSetting('zmax'))
xshift = -w*0.5
yshift = -h*0.5
xscale = math.tan(xfov*0.5)/(w*0.5)
#yscale = -1.0/(math.tan(yfov*0.5)*h/2)
yscale = xscale #square pixels are assumed
xs = [(j+xshift)*xscale for j in range(w)]
ys = [(i+yshift)*yscale for i in range(h)]
if has_numpy:
if all_points:
depth[depth >= zmax] = 0
if color_format == 'channels':
#scale to range [0,1]
rgb = rgb*(1.0/255.0)
xgrid = np.repeat(np.array(xs).reshape((1,w)),h,0)
ygrid = np.repeat(np.array(ys).reshape((h,1)),w,1)
assert xgrid.shape == (h,w)
assert ygrid.shape == (h,w)
pts = np.dstack((np.multiply(xgrid,depth),np.multiply(ygrid,depth),depth))
assert pts.shape == (h,w,3)
if color_format is not None:
if len(rgb.shape) == 2:
rgb = rgb.reshape(rgb.shape[0],rgb.shape[1],1)
pts = np.concatenate((pts,rgb),2)
#now have a nice array containing all points, shaped h x w x (3+c)
#extract out the valid points from this array
if all_points:
pts = pts.reshape(w*h,pts.shape[2])
else:
pts = pts[depth < zmax]
if points_format == 'native':
return pts.tolist()
elif points_format == 'numpy':
return pts
elif points_format == 'PointCloud' or points_format == 'Geometry3D':
res = PointCloud()
if all_points:
res.setSetting('width',str(w))
res.setSetting('height',str(h))
res.setPoints(pts.shape[0],pts[:,0:3].flatten().tolist())
if color_format == 'rgb':
res.addProperty('rgb')
res.setProperties(pts[:,3].flatten().tolist())
elif color_format == 'channels':
res.addProperty('r')
res.addProperty('g')
res.addProperty('b')
res.setProperties(pts[:,3:6].flatten().tolist())
elif color_format == 'bgr':
raise ValueError("bgr color format not supported with PointCloud output")
if points_format == 'PointCloud':
return res
else:
from klampt import Geometry3D
g = Geometry3D()
g.setPointCloud(res)
return g
else:
raise ValueError("Invalid points_format "+points_format)
return Nnoe
else:
raise NotImplementedError("Native format depth image processing not done yet")
def camera_to_points_world(camera,robot,points_format='numpy',color_format='channels'):
"""Same as :meth:`camera_to_points`, but converts to the world coordinate
system given the robot to which the camera is attached.
Points that have no reading are stripped out.
"""
assert isinstance(camera,SimRobotSensor),"Must provide a SimRobotSensor instance"
assert camera.type() == 'CameraSensor',"Must provide a camera sensor instance"
link = int(camera.getSetting('link'))
Tsensor = camera.getSetting('Tsensor')
#first 9: row major rotation matrix, last 3: translation
entries = [float(v) for v in Tsensor.split()]
Tworld = get_sensor_xform(camera,robot)
#now get the points
pts = camera_to_points(camera,points_format,all_points=False,color_format=color_format)
if points_format == 'numpy':
Rw = np.array(so3.matrix(Tworld[0]))
tw = np.array(Tworld[1])
pts[:,0:3] = np.dot(pts[:,0:3],Rw.T) + tw
return pts
elif points_format == 'native':
for p in pts:
p[0:3] = se3.apply(Tworld,p[0:3])
return pts
elif points_format == 'PointCloud' or points_format == 'Geometry3D':
pts.transform(*Tworld)
else:
raise ValueError("Invalid format "+str(points_format))
return pts
def camera_to_viewport(camera,robot):
"""Returns a GLViewport instance corresponding to the camera's view.
See :mod:`klampt.vis.glprogram` and :mod:`klampt.vis.visualization` for
information about how to use the object with the visualization, e.g.
``vis.setViewport(vp)``.
Args:
camera (SimRobotSensor): the camera instance.
robot (RobotModel): the robot on which the camera is located, which
should be set to the robot's current configuration. This could be
set to None, in which case the camera's transform is in its link's
local coordinates.
Returns:
:class:`GLViewport`: matches the camera's viewport.
"""
assert isinstance(camera,SimRobotSensor),"Must provide a SimRobotSensor instance"
assert camera.type() == 'CameraSensor',"Must provide a camera sensor instance"
from ..vis.glviewport import GLViewport
xform = get_sensor_xform(camera,robot)
w = int(camera.getSetting('xres'))
h = int(camera.getSetting('yres'))
xfov = float(camera.getSetting('xfov'))
yfov = float(camera.getSetting('yfov'))
zmin = float(camera.getSetting('zmin'))
zmax = float(camera.getSetting('zmax'))
view = GLViewport()
view.w, view.h = w,h
view.fov = math.degrees(xfov)
view.camera.dist = 1.0
view.camera.tgt = se3.apply(xform,[0,0,view.camera.dist])
#axes corresponding to right, down, fwd in camera view
view.camera.set_orientation(xform[0],['x','y','z'])
view.clippingplanes = (zmin,zmax)
return view
def viewport_to_camera(viewport,camera,robot):
"""Fills in a simulated camera's settings to match a GLViewport specifying
the camera's view.
Args:
viewport (GLViewport): the viewport to match
camera (SimRobotSensor): the viewport will be output to this sensor
robot (RobotModel): the robot on which the camera is located, which
should be set to the robot's current configuration. This could be
set to None, in which case the camera's transform is in its link's
local coordinates.
"""
from ..vis.glprogram import GLViewport
assert isinstance(viewport,GLViewport)
assert isinstance(camera,SimRobotSensor),"Must provide a SimRobotSensor instance"
assert camera.type() == 'CameraSensor',"Must provide a camera sensor instance"
xform = viewport.getTransform()
link = int(camera.getSetting('link'))
if link < 0 or robot is None:
rlink = None
else:
rlink = robot.link(link)
set_sensor_xform(camera,xform,rlink)
(zmin,zmax) = viewport.clippingplanes
xfov = math.radians(viewport.fov)
yfov = xfov*viewport.h/viewport.w
camera.setSetting('xres',str(viewport.w))
camera.setSetting('yres',str(viewport.h))
camera.setSetting('xfov',str(xfov))
camera.setSetting('yfov',str(yfov))
camera.setSetting('zmin',str(zmin))
camera.setSetting('zmax',str(zmax))
return camera
def camera_to_intrinsics(camera,format='opencv',fn=None):
"""Returns the camera's intrinsics and/or saves them to a file under the
given format.
Args:
camera (SimRobotSensor): the camera instance.
format (str): either 'opencv', 'numpy', 'ros', or 'json' describing the
desired type
fn (str, optional): the file to save to (must be .json, .xml, or .yml).
Returns:
varies: If format='opencv', the (projection, distortion) matrix is
returned. If format='numpy', just the projection matrix is returned.
If format=='json', a dict of the fx, fy, cx, cy values is returned
"""
assert isinstance(camera,SimRobotSensor),"Must provide a SimRobotSensor instance"
assert camera.type() == 'CameraSensor',"Must provide a camera sensor instance"
w = int(camera.getSetting('xres'))
h = int(camera.getSetting('yres'))
xfov = float(camera.getSetting('xfov'))
yfov = float(camera.getSetting('yfov'))
fx = 0.5*w/math.tan(xfov*0.5);
fy = 0.5*h/math.tan(yfov*0.5);
cx = w*0.5
cy = h*0.5
if format == 'json':
jsonobj = {'fx':fx,'fy':fy,'cx':cy,'model':None,'coeffs':[]}
if fn is not None:
import json
with open(fn,'w') as f:
json.dump(jsonobj,f)
return jsonobj
elif format == 'numpy':
import numpy as np
res = np.zeros((3,3))
res[0,0] = fx
res[1,1] = fy
res[0,2] = cx
res[1,2] = cy
res[2,2] = 1
if fn is not None:
np.save(fn,res)
return res
elif format == 'ros':
from ..io import ros
return ros.to_CameraInfo(camera)
elif format == 'opencv':
import numpy as np
res = np.zeros((3,3))
dist = np.zeros(5)
res[0,0] = fx
res[1,1] = fy
res[0,2] = cx
res[1,2] = cy
res[2,2] = 1
if fn is not None:
if fn.endswith('yml'):
#write as YAML
with open(fn,'w') as f:
w.write("""%YAML:1.0
image_width: {}
image_height: {}
camera_matrix: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [ {}, 0., {}, 0.,
{}, {}, 0., 0., 1. ]
distortion_coefficients: !!opencv-matrix
rows: 1
cols: 5
dt: d
data: [ 0 0 0 0 0 ]""".format(w,h,fx,cx,fy,cy))
else:
#write as XML
with open(fn,'w') as f:
w.write("""<opencv_storage>
<cameraResolution>
{} {}</cameraResolution>
<cameraMatrix type_id="opencv-matrix">
<rows>3</rows>
<cols>3</cols>
<dt>d</dt>
<data>
{} 0 {} 0 {} {} 0 0 1</data></cameraMatrix>
<dist_coeffs type_id="opencv-matrix">
<rows>1</rows>
<cols>5</cols>
<dt>d</dt>
<data>
0 0 0. 0. 0</data></dist_coeffs>""".format(w,h,fx,cx,fy,cy))
return res,dist
else:
raise ValueError("Invalid format, only opencv, numpy, ros, and json are supported")
def intrinsics_to_camera(data,camera,format='opencv'):
"""Fills in a simulated camera's settings to match given intrinsics. Note:
all distortions are dropped.
Args:
data: the file or data to set. Interpretation varies depending on format.
camera (SimRobotSensor): the viewport will be output to this sensor
format (str): either 'opencv', 'numpy', 'ros', or 'json'
"""
assert isinstance(camera,SimRobotSensor),"Must provide a SimRobotSensor instance"
assert camera.type() == 'CameraSensor',"Must provide a camera sensor instance"
if isinstance(data,str):
with open(data,'r') as f:
if format == 'opencv':
raise NotImplementedError("TODO: read from OpenCV calibrations")
elif format == 'numpy':
import numpy as np
return intrinsics_to_camera(np.load(data),camera,format)
elif format == 'json':
import json
with open(data,'r') as f:
jsonobj = json.load(f)
return intrinsics_to_camera(jsonobj,camera,format)
else:
raise ValueError("Invalid format, only opencv, numpy, and json are supported to load from disk")
if format == 'ros':
from ..io import ros
return ros.from_CameraInfo(data,camera)
elif format == 'numpy':
if data.shape != (3,3):
raise ValueError("data must be a 3x3 numpy matrix")
fx = data[0,0]
fy = data[1,1]
cx = data[0,2]
cy = data[1,2]
elif format == 'opencv':
proj,dist = data
if proj.shape != (3,3):
raise ValueError("projection matrix must be a 3x3 numpy matrix")
fx = proj[0,0]
fy = proj[1,1]
cx = proj[0,2]
cy = proj[1,2]
elif format == 'json':
fx = data['fx']
fy = data['fy']
cx = data['cx']
cy = data['cy']
else:
raise ValueError("Invalid format, only opencv, numpy, ros, and json are supported")
w = int(cx*2)
h = int(cy*2)
xfov = math.atan(fx/w*2)*2
yfov = math.atan(fy/h*2)*2
camera.setSetting('xres',str(w))
camera.setSetting('yres',str(h))
camera.setSetting('xfov',str(xfov))
camera.setSetting('yfov',str(yfov))
return camera
def camera_ray(camera,robot,x,y):
"""Returns the (source,direction) of a ray emanating from the
SimRobotSensor at pixel coordinates (x,y).
If you are doing this multiple times, it's faster to convert the camera
to GLViewport and use GLViewport.click_ray.
Arguments:
camera (SimRobotSensor): the camera
robot (RobotModel): the robot on which the camera is mounted.
x (int/float): x pixel coordinates
y (int/float): y pixel coordinates
Returns:
(source,direction): world-space ray source/direction.
"""
return camera_to_viewport(camera,robot).click_ray(x,y)
def camera_project(camera,robot,pt,clip=True):
"""Given a point in world space, returns the (x,y,z) coordinates of the
projected pixel. z is given in absolute coordinates, while x,y are given
in pixel values.
If clip=True and the point is out of the viewing volume, then None is
returned. Otherwise, if the point is exactly at the focal plane then the
middle of the viewport is returned.
If you are doing this multiple times, it's faster to convert the camera
to GLViewport and use GLViewport.project.
Arguments:
camera (SimRobotSensor): the camera
robot (RobotModel): the robot on which the camera is mounted.
pt (3-vector): world coordinates of point
clip (bool, optional): if true, None will be returned if the point is
outside of the viewing volume.
Returns:
tuple: (x,y,z), where x,y are pixel value of image, z is depth.
"""
return camera_to_viewport(camera,robot).project(pt,clip)
def visible(camera,object,full=True,robot=None):
"""Tests whether the given object is visible in a SimRobotSensor or a
GLViewport.
If you are doing this multiple times, first convert to GLViewport.
Args:
camera (SimRobotSensor or GLViewport): the camera.
object: a 3-vector, a (center,radius) pair indicating a sphere, an
axis-aligned bounding box (bmin,bmax), a Geometry3D, or an object
that has a geometry() method, e.g., RigidObjectModel, RobotModelLink.
full (bool, optional): if True, the entire object must be in the
viewing frustum for it to be considered visible. If False, any
part of the object can be in the viewing frustum.
robot (RobotModel): if camera is a SimRobotSensor, this will be used to
derive the transform.
"""
if isinstance(camera,SimRobotSensor):
camera = camera_to_viewport(camera,robot)
if hasattr(object,'geometry'):
return visible(camera,object.geometry(),full,robot)
if hasattr(object,'__iter__'):
if not hasattr(object[0],'__iter__'):
#vector
if len(object) != 3:
raise ValueError("Object must be a 3-vector")
return camera.project(object) != None
elif hasattr(object[1],'__iter__'):
if len(object[0]) != 3 or len(object[1]) != 3:
raise ValueError("Object must be a bounding box")
bmin,bmax = object
if not full:
#test whether center is in bmin,bmax
center = vectorops.interpolate(bmin,bmax,0.5)
cproj = camera.project(center)
if cproj is not None:
return True
if all(a <= v <= b for (a,b,v) in zip(bmin,bmax,camera.getTransform()[1])):
return True
points = [camera.project(bmin,full),camera.project(bmax,full)]
pt = [bmin[0],bmin[1],bmax[2]]
points.append(camera.project(pt,full))
pt = [bmin[0],bmax[1],bmax[2]]
points.append(camera.project(pt,full))
pt = [bmin[0],bmax[1],bmin[2]]
points.append(camera.project(pt,full))
pt = [bmax[0],bmin[1],bmin[2]]
points.append(camera.project(pt,full))
pt = [bmax[0],bmin[1],bmax[2]]
points.append(camera.project(pt,full))
pt = [bmax[0],bmax[1],bmin[2]]
points.append(camera.project(pt,full))
if any(p is None for p in points):
return False
if full:
return True
if min(p[2] for p in points) > camera.clippingplanes[1]:
return False
if max(p[2] for p in points) < camera.clippingplanes[0]:
return False
points = [p for p in points if p[2] > 0]
for p in points:
if 0 <= p[0] <= camera.w and 0 <= p[1] <= camera.h:
return True
#TODO: intersection of projected polygon
return False
else:
#sphere
if len(object[0]) != 3:
raise ValueError("Object must be a sphere")
c,r = object
if full:
cproj = camera.project(c,True)
if cproj is None: return False
rproj = camera.w/cproj[2]*r
if cproj[2] - r < camera.clippingplanes[0] or cproj[2] + r > camera.clippingplanes[1]: return False
return 0 <= cproj[0] - rproj and cproj[0] + rproj <= camera.w and 0 <= cproj[1] - rproj and cproj[1] + rproj <= camera.h
else:
cproj = camera.project(c,False)
if cproj is None:
dist = r - vectorops.distance(camera.getTransform()[1],c)
if dist >= camera.clippingplanes[0]:
return True
return False
if 0 <= cproj[0] <= camera.w and 0 <= cproj[1] <= camera.h:
if cproj[2] + r > camera.clippingplanes[0] and cproj[2] - r < camera.clippingplanes[1]:
return True
return False
rproj = camera.w/cproj[2]*r
xclosest = max(min(cproj[0],camera.w),0)
yclosest = max(min(cproj[1],camera.h),0)
zclosest = max(min(cproj[2],camera.clippingplanes[1]),camera.clippingplanes[0])
return vectorops.distance((xclosest,yclosest),cproj[0:2]) <= rproj
from klampt import Geometry3D
if not isinstance(object,Geometry3D):
raise ValueError("Object must be a point, sphere, bounding box, or Geometry3D")
return visible(camera,object.getBB(),full,robot)
| {
"repo_name": "krishauser/Klampt",
"path": "Python/klampt/model/sensing.py",
"copies": "1",
"size": "36836",
"license": "bsd-3-clause",
"hash": -5739434249685445000,
"line_mean": 38.3967914439,
"line_max": 141,
"alpha_frac": 0.5947442719,
"autogenerated": false,
"ratio": 3.72834008097166,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9770429675772009,
"avg_score": 0.010530935419930235,
"num_lines": 935
} |
"""A collection of utility functions for dealing with sensors and sensor data.
The get/set_sensor_xform functions are used to interface cleanly with the klampt se3 functions.
The camera_to_X functions convert raw CameraSensor outputs to Python objects that are more easily
operated upon, e.g., images and point clouds.
"""
from ..robotsim import *
from ..io import loader
import coordinates
import math
import sys
from ..math import vectorops,so3,se3
import time
_has_numpy = False
_tried_numpy_import = False
np = None
def _try_numpy_import():
global _has_numpy,_tried_numpy_import
global np
if _tried_numpy_import:
return _has_numpy
_tried_numpy_import = True
try:
import numpy as np
_has_numpy = True
#sys.modules['numpy'] = numpy
except ImportError:
print "klampt.model.sensing.py: Warning, numpy not available."
_has_numpy = False
return _has_numpy
def get_sensor_xform(sensor,robot=None):
"""Extracts the transform of a SimRobotSensor. The sensor must be
of a link-mounted type, e.g., a CameraSensor or ForceSensor.
Args:
sensor (SimRobotSensor)
robot (RobotModel, optional): if provided, returns the world
coordinates of the sensor. Otherwise, returns the local
coordinates on the link to which it is mounted.
Returns:
klampt.se3 object: the sensor transform
"""
s = sensor.getSetting("Tsensor")
Tsensor = loader.readSe3(s)
if robot is not None:
link = int(sensor.getSetting("link"))
if link >= 0:
return se3.mul(robot.link(link).getTransform(),Tsensor)
return Tsensor
def set_sensor_xform(sensor,T,link=None):
"""Given a link-mounted sensor (e.g., CameraSensor or ForceSensor), sets
its link-local transform to T.
Args:
sensor (SimRobotSensor)
T (se3 element or coordinates.Frame): desired local coordinates of the sensor
on its link.
link (int or RobotModelLink, optional): if provided, the link of the
sensor is modified.
Another way to set a sensor is to give a coordinates.Frame object. This
frame must either be associated with a RobotModelLink or its parent should
be associated with one.
(the reason why you should use this is that the Tsensor attribute has a
particular format using the loader.writeSe3 function.)
"""
if isinstance(T,coordinates.Frame):
if isinstance(T._data,RobotModelLink):
#attach it directly to the link
return set_sensor_xform(sensor,se3.identity(),T._data)
parent = None
if T.parent() is None:
parent = -1
else:
#assume its parent is a link?
parent = T.parent()._data
return set_sensor_xform(sensor,T.relativeCoordinates(),parent)
try:
s = sensor.getSetting("Tsensor")
except Exception:
raise ValueError("Sensor does not have a Tsensor attribute")
sensor.setSetting("Tsensor",loader.writeSe3(T))
if link != None:
if isinstance(link,RobotModelLink):
sensor.setSetting("link",str(link.index))
else:
assert isinstance(link,int),"Can only set a sensor transform to a RobotModelLink or an integer link index"
sensor.setSetting("link",str(link))
def camera_to_images(camera,image_format='numpy',color_format='channels'):
"""Given a SimRobotSensor that is a CameraSensor, returns either the RGB image, the depth image, or both.
Args:
camera (SimRobotSensor): a sensor that is of 'CameraSensor' type
image_format (str): governs the return type. Can be:
* 'numpy' (default): returns numpy arrays. Depending on the value of color_format,
the RGB image either has shape (h,w,3) and dtype uint8 or (h,w) and dtype uint32.
Depth images as numpy arrays with shape (h,w). Will fall back to 'native' if numpy
is not available.
* 'native': returns list-of-lists arrays in the same format as above
color_format (str): governs how pixels in the RGB result are packed. Can be:
* 'channels' (default): returns a 3D array with 3 channels corresponding to R, G, B
values in the range [0,255].
* 'rgb' returns a 2D array with a 32-bit integer channel, with R,G,B channels packed in
order XRGB.
* 'bgr': similar to 'rgb' but with order XBGR.
(Note that image_format='native' takes up a lot of extra memory, especially with color_format='channels')
Returns:
tuple: (rgb, depth), which are either numpy arrays or list-of-lists
format, as specified by image_format.
* rgb: the RGB result (packed as specified by color_format)
* depth: the depth result (floats)
"""
assert isinstance(camera,SimRobotSensor),"Must provide a SimRobotSensor instance"
assert camera.type() == 'CameraSensor',"Must provide a camera sensor instance"
w = int(camera.getSetting('xres'))
h = int(camera.getSetting('yres'))
has_rgb = int(camera.getSetting('rgb'))
has_depth = int(camera.getSetting('depth'))
measurements = camera.getMeasurements()
if image_format == 'numpy':
if not _try_numpy_import():
image_format = 'native'
rgb = None
depth = None
if has_rgb:
if image_format == 'numpy':
abgr = np.array(measurements[0:w*h]).reshape(h,w).astype(np.uint32)
if color_format == 'bgr':
rgb = abgr
elif color_format == 'rgb':
rgb = np.bitwise_or(np.bitwise_or(np.left_shift(np.bitwise_and(abgr,0x00000ff),16),
np.bitwise_and(abgr,0x000ff00)),
np.right_shift(np.bitwise_and(abgr,0x0ff0000), 16))
else:
rgb = np.zeros((h,w,3),dtype=np.uint8)
rgb[:,:,0] = np.bitwise_and(abgr,0x00000ff)
rgb[:,:,1] = np.right_shift(np.bitwise_and(abgr,0x00ff00), 8)
rgb[:,:,2] = np.right_shift(np.bitwise_and(abgr,0x0ff0000), 16)
else:
if color_format == 'bgr':
rgb = []
for i in xrange(h):
rgb.append([int(v) for v in measurements[i*w:(i+1)*w]])
elif color_format == 'rgb':
def bgr_to_rgb(pixel):
return ((pixel & 0x0000ff) << 16) | (pixel & 0x00ff00) | ((pixel & 0xff0000) >> 16)
rgb = []
for i in xrange(h):
rgb.append([bgr_to_rgb(int(v)) for v in measurements[i*w:(i+1)*w]])
else:
rgb = []
for i in xrange(h):
start = i*w
row = []
for j in xrange(w):
pixel = int(measurements[start+j])
row.append([pixel&0xff,(pixel>>8)&0xff,(pixel>>16)&0xff])
rgb.append(row)
if has_depth:
start = (w*h if has_rgb else 0)
if image_format == 'numpy':
depth = np.array(measurements[start:start+w*h]).reshape(h,w)
else:
depth = []
for i in xrange(h):
depth.append(measurements[start+i*w:start+(i+1)*w])
if has_rgb and has_depth:
return rgb,depth
elif has_rgb:
return rgb
elif has_depth:
return depth
return None
def camera_to_points(camera,points_format='numpy',all_points=False,color_format='channels'):
"""Given a SimRobotSensor that is a CameraSensor, returns a point cloud associated with the current measurements.
Points are triangulated with respect to the camera's intrinsic coordinates, and are returned in the camera local frame
(+z backward, +x toward the right, +y toward up).
The arguments
Args:
points_format (str, optional): configures the format of the return value. Can be:
* 'numpy' (default): either an Nx3, Nx4, or Nx6 numpy array, depending on whether color is requested
(and its format). Will fall back to 'native' if numpy is not available.
* 'native': same as numpy, but in list-of-lists format rather than numpy arrays.
* 'PointCloud': a Klampt PointCloud object
* 'Geometry3D': a Klampt Geometry3D point cloud object
all_points (bool, optional): configures whether bad points should be stripped out. If False (default), this
strips out all pixels that don't have a good depth reading (i.e., the camera sensor's maximum reading.)
If True, these pixels are all set to (0,0,0).
color_format (str): If the sensor has an RGB component, then color channels may be produced. This value
configures the output format, and can take on the values:
* 'channels': produces individual R,G,B channels in the range [0,1]. (note this is different from the
interpretation of camera_to_images)
* 'rgb': produces a single 32-bit integer channel packing the 8-bit color channels together (actually BGR)
* None: no color is produced.
Returns:
object: the point cloud in the requested format.
"""
assert isinstance(camera,SimRobotSensor),"Must provide a SimRobotSensor instance"
assert camera.type() == 'CameraSensor',"Must provide a camera sensor instance"
assert int(camera.getSetting('depth'))==1,"Camera sensor must have a depth channel"
has_numpy = _try_numpy_import()
if points_format == 'numpy' and not has_numpy:
points_format = 'native'
images = camera_to_images(camera,'numpy',color_format)
assert images != None
rgb,depth = None,None
if int(camera.getSetting('rgb'))==0:
depth = images
color_format = None
else:
rgb,depth = images
w = int(camera.getSetting('xres'))
h = int(camera.getSetting('yres'))
xfov = float(camera.getSetting('xfov'))
yfov = float(camera.getSetting('yfov'))
zmin = float(camera.getSetting('zmin'))
zmax = float(camera.getSetting('zmax'))
xshift = -w*0.5
yshift = -h*0.5
xscale = math.tan(xfov*0.5)/(w*0.5)
#yscale = -1.0/(math.tan(yfov*0.5)*h/2)
yscale = xscale #square pixels are assumed
xs = [(j+xshift)*xscale for j in range(w)]
ys = [(i+yshift)*yscale for i in range(h)]
if has_numpy:
if all_points:
depth[depth >= zmax] = 0
if color_format == 'channels':
#scale to range [0,1]
rgb = rgb*(1.0/255.0)
xgrid = np.repeat(np.array(xs).reshape((1,w)),h,0)
ygrid = np.repeat(np.array(ys).reshape((h,1)),w,1)
assert xgrid.shape == (h,w)
assert ygrid.shape == (h,w)
pts = np.dstack((np.multiply(xgrid,depth),np.multiply(ygrid,depth),depth))
assert pts.shape == (h,w,3)
if color_format is not None:
if len(rgb.shape) == 2:
rgb = rgb.reshape(rgb.shape[0],rgb.shape[1],1)
pts = np.concatenate((pts,rgb),2)
#now have a nice array containing all points, shaped h x w x (3+c)
#extract out the valid points from this array
if all_points:
pts = pts.reshape(w*h,pts.shape[2])
else:
pts = pts[depth < zmax]
if points_format == 'native':
return pts.tolist()
elif points_format == 'numpy':
return pts
elif points_format == 'PointCloud' or points_format == 'Geometry3D':
res = PointCloud()
if all_points:
res.setSetting('width',str(w))
res.setSetting('height',str(h))
res.setPoints(pts.shape[0],pts[:,0:3].flatten().tolist())
if color_format == 'rgb':
res.addProperty('rgb')
res.setProperties(pts[:,3].flatten().tolist())
elif color_format == 'channels':
res.addProperty('r')
res.addProperty('g')
res.addProperty('b')
res.setProperties(pts[:,3:6].flatten().tolist())
elif color_format == 'bgr':
raise ValueError("bgr color format not supported with PointCloud output")
if points_format == 'PointCloud':
return res
else:
g = Geometry3D()
g.setPointCloud(res)
return g
else:
raise ValueError("Invalid points_format "+points_format)
return Nnoe
else:
raise NotImplementedError("Native format depth image processing not done yet")
def camera_to_points_world(camera,robot,points_format='numpy',color_format='channels'):
"""Same as :meth:`camera_to_points`, but converts to the world coordinate
system given the robot to which the camera is attached.
Points that have no reading are stripped out.
"""
assert isinstance(camera,SimRobotSensor),"Must provide a SimRobotSensor instance"
assert camera.type() == 'CameraSensor',"Must provide a camera sensor instance"
link = int(camera.getSetting('link'))
Tsensor = camera.getSetting('Tsensor')
#first 9: row major rotation matrix, last 3: translation
entries = [float(v) for v in Tsensor.split()]
Tworld = get_sensor_xform(camera,robot)
#now get the points
pts = camera_to_points(camera,points_format,all_points=False,color_format=color_format)
if points_format == 'numpy':
Rw = np.array(so3.matrix(Tworld[0]))
tw = np.array(Tworld[1])
pts[:,0:3] = np.dot(pts[:,0:3],Rw.T) + tw
return pts
elif points_format == 'native':
for p in pts:
p[0:3] = se3.apply(Tworld,p[0:3])
return pts
elif points_format == 'PointCloud' or points_format == 'Geometry3D':
pts.transform(*Tworld)
else:
raise ValueError("Invalid format "+str(points_format))
return pts
def camera_to_viewport(camera,robot):
"""Returns a GLViewport instance corresponding to the camera's view.
See klampt.vis.glprogram and klampt.vis.visualization for information about how
to use the object with the visualization, e.g. `vis.setViewport(vp)`.
Returns:
GLViewport: the camera's current viewport.
"""
assert isinstance(camera,SimRobotSensor),"Must provide a SimRobotSensor instance"
assert camera.type() == 'CameraSensor',"Must provide a camera sensor instance"
from ..vis.glprogram import GLViewport
xform = get_sensor_xform(camera,robot)
w = int(camera.getSetting('xres'))
h = int(camera.getSetting('yres'))
xfov = float(camera.getSetting('xfov'))
yfov = float(camera.getSetting('yfov'))
zmin = float(camera.getSetting('zmin'))
zmax = float(camera.getSetting('zmax'))
view = GLViewport()
view.w, view.h = w,h
view.fov = math.degrees(xfov)
view.camera.dist = 1.0
view.camera.tgt = se3.apply(xform,[0,0,view.camera.dist])
#axes corresponding to right, down, fwd in camera view
view.camera.set_orientation(xform[0],['x','y','z'])
view.clippingplanes = (zmin,zmax)
return view
| {
"repo_name": "krishauser/Klampt",
"path": "Python/python2_version/klampt/model/sensing.py",
"copies": "1",
"size": "15248",
"license": "bsd-3-clause",
"hash": 2966842061521503000,
"line_mean": 40.5476839237,
"line_max": 122,
"alpha_frac": 0.6081453305,
"autogenerated": false,
"ratio": 3.8053406538557524,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4913485984355752,
"avg_score": null,
"num_lines": null
} |
"""A collection of utility functions for Flow."""
import csv
import errno
import os
from lxml import etree
from xml.etree import ElementTree
def makexml(name, nsl):
"""Create an xml file."""
xsi = "http://www.w3.org/2001/XMLSchema-instance"
ns = {"xsi": xsi}
attr = {"{%s}noNamespaceSchemaLocation" % xsi: nsl}
t = etree.Element(name, attrib=attr, nsmap=ns)
return t
def printxml(t, fn):
"""Print information from a dict into an xml file."""
etree.ElementTree(t).write(
fn, pretty_print=True, encoding='UTF-8', xml_declaration=True)
def ensure_dir(path):
"""Ensure that the directory specified exists, and if not, create it."""
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
return path
def emission_to_csv(emission_path, output_path=None):
"""Convert an emission file generated by sumo into a csv file.
Note that the emission file contains information generated by sumo, not
flow. This means that some data, such as absolute position, is not
immediately available from the emission file, but can be recreated.
Parameters
----------
emission_path : str
path to the emission file that should be converted
output_path : str
path to the csv file that will be generated, default is the same
directory as the emission file, with the same name
"""
parser = etree.XMLParser(recover=True)
tree = ElementTree.parse(emission_path, parser=parser)
root = tree.getroot()
# parse the xml data into a dict
out_data = []
for time in root.findall('timestep'):
t = float(time.attrib['time'])
for car in time:
out_data.append(dict())
try:
out_data[-1]['time'] = t
out_data[-1]['CO'] = float(car.attrib['CO'])
out_data[-1]['y'] = float(car.attrib['y'])
out_data[-1]['CO2'] = float(car.attrib['CO2'])
out_data[-1]['electricity'] = float(car.attrib['electricity'])
out_data[-1]['type'] = car.attrib['type']
out_data[-1]['id'] = car.attrib['id']
out_data[-1]['eclass'] = car.attrib['eclass']
out_data[-1]['waiting'] = float(car.attrib['waiting'])
out_data[-1]['NOx'] = float(car.attrib['NOx'])
out_data[-1]['fuel'] = float(car.attrib['fuel'])
out_data[-1]['HC'] = float(car.attrib['HC'])
out_data[-1]['x'] = float(car.attrib['x'])
out_data[-1]['route'] = car.attrib['route']
out_data[-1]['relative_position'] = float(car.attrib['pos'])
out_data[-1]['noise'] = float(car.attrib['noise'])
out_data[-1]['angle'] = float(car.attrib['angle'])
out_data[-1]['PMx'] = float(car.attrib['PMx'])
out_data[-1]['speed'] = float(car.attrib['speed'])
out_data[-1]['edge_id'] = car.attrib['lane'].rpartition('_')[0]
out_data[-1]['lane_number'] = car.attrib['lane'].\
rpartition('_')[-1]
except KeyError:
del out_data[-1]
# sort the elements of the dictionary by the vehicle id
out_data = sorted(out_data, key=lambda k: k['id'])
# default output path
if output_path is None:
output_path = emission_path[:-3] + 'csv'
# output the dict data into a csv file
keys = out_data[0].keys()
with open(output_path, 'w') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(out_data)
| {
"repo_name": "cathywu/flow",
"path": "flow/core/util.py",
"copies": "1",
"size": "3706",
"license": "mit",
"hash": 7110366608710657000,
"line_mean": 36.4343434343,
"line_max": 79,
"alpha_frac": 0.5715056665,
"autogenerated": false,
"ratio": 3.7208835341365463,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9792389200636546,
"avg_score": 0,
"num_lines": 99
} |
"""A collection of utility functions"""
import json
import os
def parse_quantity(q):
"""
Parse an Onshape units definition
Args:
q:an Onshape units definition... for instance:
{
'typeTag': '',
'unitToPower': [
{
'key': 'METER',
'value': 1
}
],
'value': 0.0868175271040671
}
Returns:
a string that can be converted to any other unit engine.
>>> from onshape_client.utility import parse_quantity
>>> d = {'value': 0.1414213562373095, 'unitToPower': [{'value': 1, 'key': 'METER'}], 'typeTag': ''}
>>> parse_quantity(d)
'0.1414213562373095*meter'
>>> d = {'value': 0.1414213562373095, 'unitToPower': [{'value': 3, 'key': 'MILLIMETER'}], 'typeTag': ''}
>>> parse_quantity(d)
'0.1414213562373095*millimeter**3'
"""
units_s = str(q["value"])
for u in q["unitToPower"]:
units_s = units_s + "*" + u["key"].lower()
power = u["value"]
if not power == 1:
units_s = units_s + "**" + str(power)
return units_s
def get_field(response, field):
return load_json(response)[field]
def load_json(response):
data = json.loads(response.data.decode("UTF-8"))
return data
def write_to_file(data_uri):
"""Write a data uri to a local file"""
from base64 import b64decode
import re
header, encoded = data_uri.split(",", 1)
data = b64decode(encoded)
pattern = re.compile(r"""name=([^;]*)""")
name = pattern.search(header).groups()[0]
name = name.replace("+", " ")
tmp_path = "tmp/"
try:
os.mkdir(tmp_path)
except BaseException:
pass
file_path = os.getcwd() + "/" + tmp_path + name
with open(file_path, "wb") as f:
f.write(data)
return file_path
| {
"repo_name": "onshape-public/onshape-clients",
"path": "python/onshape_client/utility.py",
"copies": "1",
"size": "1882",
"license": "mit",
"hash": 304836531274704060,
"line_mean": 25.5070422535,
"line_max": 108,
"alpha_frac": 0.537194474,
"autogenerated": false,
"ratio": 3.453211009174312,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4490405483174312,
"avg_score": null,
"num_lines": null
} |
"""A collection of utility functions."""
import importlib
import logging
def string_to_class(class_string):
"""Convert a string to a python class.
The string is first split into a module and a class name. For
example, 'dummy.package.FakeClass' would be split into
'dummy.package' and 'FakeClass'. The package/module are then
imported, and the class is returned.
Args:
class_string (str):
The full string name of the class to import. This should
include the package and module if applicable.
Returns:
Class:
If the path exists, the python class in the location given
by `class_string` is returned.
Raises:
ImportError: If the path `class_string` doesn't exist.
ValueError: If `class_string` is not a fully qualified name.
eg: `DummyClass` instead of `module.DummyClass`.
"""
logger = logging.getLogger(__name__)
if '.' not in class_string:
logger.error(
"'{}' is not a fully qualifed class name".format(class_string))
raise ValueError("'class_string' must be a fully qualifed name.")
module_name, class_name = class_string.rsplit('.', 1)
try:
module = importlib.import_module(module_name)
except ImportError:
logger.error(
"Could not import '{}'".format(module_name), exc_info=True)
raise
try:
class_obj = getattr(module, class_name)
except AttributeError:
error_msg = "Could not import '{}' from '{}'.".format(
class_name, module_name)
logger.error(error_msg, exc_info=True)
raise ImportError(error_msg)
logger.debug("Succesfully imported '{}' from '{}'.".format(
class_name, module_name))
return class_obj
| {
"repo_name": "cdriehuys/django_helpcenter",
"path": "helpcenter/utils.py",
"copies": "2",
"size": "1804",
"license": "mit",
"hash": 4892123378920879000,
"line_mean": 28.5737704918,
"line_max": 75,
"alpha_frac": 0.6247228381,
"autogenerated": false,
"ratio": 4.214953271028038,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5839676109128038,
"avg_score": null,
"num_lines": null
} |
"""A collection of utility functions."""
import json
from os import path
import click
from docker.client import Client
from docker.utils import kwargs_from_env
_LOG_LEVEL = 0
LOG_PREFIXES = {
0: click.style('>', fg='white', bold=True),
1: click.style('>>', fg='cyan', bold=True),
2: click.style('>>>', fg='blue', bold=True),
}
_DOCKER_CLIENT = None
def set_log_level(log_level):
"""Set the log level to use.
Log levels are provided as an integer, and the following levels are available:
0: The default log level
1: Info level logging
2: Debug level logging
"""
if log_level not in [0, 1, 2]:
return False
global _LOG_LEVEL # pylint: disable = global-statement
_LOG_LEVEL = log_level
return True
def get_log_level():
"""Get the current log level."""
return _LOG_LEVEL
def _print(message, log_level, prefix, **kwargs):
"""Print a message to the console, depending on log level."""
if log_level <= get_log_level():
if prefix:
click.secho('{} '.format(LOG_PREFIXES[log_level]), nl=False)
return click.secho(message, **kwargs)
def log(message, prefix=True, **kwargs):
"""Print a default level log message."""
return _print(message, log_level=0, prefix=prefix, **kwargs)
def info(message, prefix=True, **kwargs):
"""Print an info level log message."""
return _print(message, log_level=1, prefix=prefix, **kwargs)
def debug(message, prefix=True, **kwargs):
"""Print a debug level log message."""
return _print(message, log_level=2, prefix=prefix, **kwargs)
def docker_client():
"""Create a Docker client instance or return an existing one."""
global _DOCKER_CLIENT # pylint: disable = global-statement
if _DOCKER_CLIENT:
return _DOCKER_CLIENT
else:
# assert_hostname=False is required when using boot2docker, it's taken as a hint from Fig: https://github.com/docker/fig/blob/master/compose/cli/docker_client.py#L29
_DOCKER_CLIENT = Client(**kwargs_from_env(assert_hostname=False))
return _DOCKER_CLIENT
def pull_image(wanted_image):
"""Download Docker image if it isn't already local."""
if ':' not in wanted_image:
wanted_image = '{}:latest'.format(wanted_image)
debug('Checking if "{}" is among the local images in Docker.'.format(wanted_image))
for image in docker_client().images(name=wanted_image.split(':')[0]):
if wanted_image in image['RepoTags']:
info('Docker image found.')
return True
log('The Docker image "{}" was not found locally, pulling it: '.format(wanted_image), nl=False)
for data in docker_client().pull(wanted_image, stream=True):
data = data.decode('utf-8')
line = json.loads(data)
if not line.get('error'):
continue
log('Failed! Error message follows:', fg='red', bold=True, prefix=False)
log(' {}'.format(line['error']), err=True, prefix=False)
return False
log('Done.', fg='green', prefix=False)
return True
def convert_volumes_list(volumes):
"""Turn a dict into a list of binds in the format Docker loves."""
binds = dict()
for local, container in volumes.items():
binds[path.abspath(local)] = container
return binds
| {
"repo_name": "FalconSocial/berth",
"path": "berth/utils.py",
"copies": "1",
"size": "3301",
"license": "mit",
"hash": 7364309166968798000,
"line_mean": 29.8504672897,
"line_max": 173,
"alpha_frac": 0.6419266889,
"autogenerated": false,
"ratio": 3.746878547105562,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4888805236005562,
"avg_score": null,
"num_lines": null
} |
""" A collection of utility methods
:Authors: Sana dev team
:Version: 1.1
"""
import os, sys, traceback
import time
import logging
import cjson
from django.conf import settings
from django.http import HttpResponse
from sana import handler
from sana.mrs.models import RequestLog
LOGGING_ENABLE_ATTR = 'LOGGING_ENABLE'
LOGGING_START_TIME_ATTR = 'LOGGING_START_TIME'
def enable_logging(f):
""" Decorator to enable logging on a Django request method.
"""
def new_f(*args, **kwargs):
request = args[0]
setattr(request, LOGGING_ENABLE_ATTR, True)
return f(*args, **kwargs)
new_f.func_name = f.func_name
return new_f
def trace(f):
"""Decorator to add traces to a method.
"""
def new_f(*args, **kwargs):
extra = {'mac':'', 'type':''}
logging.info("TRACE %s ENTER" % f.func_name,extra=extra)
result = f(*args, **kwargs)
logging.info("TRACE %s EXIT" % f.func_name,extra=extra)
return result
new_f.func_name = f.func_name
return new_f
def log_json_detail(request, log_id):
log = RequestLog.objects.get(pk=log_id)
message = {'id': log_id,
'data': cjson.decode(log.message,True)}
return HttpResponse(cjson.encode(message))
class LoggingMiddleware(object):
"""Logs exceptions with tracebacks with the standard logging module.
"""
def __init__(self):
self._handler = handler.ThreadBufferedHandler()
logging.root.setLevel(logging.NOTSET)
logging.root.addHandler(self._handler)
def process_exception(self, request, exception):
extra = {'mac':'', 'type':''}
logging.error("An unhandled exception occurred: %s" % repr(exception),
extra=extra)
time_taken = -1
if hasattr(request, LOGGING_START_TIME_ATTR):
start = getattr(request, LOGGING_START_TIME_ATTR)
time_taken = time.time() - start
records = self._handler.get_records()
first = records[0] if len(records) > 0 else None
records = [self._record_to_json(record, first) for record in records]
message = cjson.encode(records)
log_entry = RequestLog(uri=request.path,
message=message,
duration=time_taken)
log_entry.save()
def process_request(self, request):
setattr(request, LOGGING_START_TIME_ATTR, time.time())
self._handler.clear_records()
return None
def _time_humanize(self, seconds):
return "%.3fs" % seconds
def _record_delta(self, this, first):
return self._time_humanize(this - first)
def _record_to_json(self, record, first):
return {'filename': record.filename,
'timestamp': record.created,
'level_name': record.levelname,
'level_number': record.levelno,
'module': record.module,
'function_name': record.funcName,
'line_number': record.lineno,
'message': record.msg,
'delta': self._record_delta(record.created, first.created)
}
def process_response(self, request, response):
if not hasattr(request, LOGGING_ENABLE_ATTR):
return response
time_taken = -1
if hasattr(request, LOGGING_START_TIME_ATTR):
start = getattr(request, LOGGING_START_TIME_ATTR)
time_taken = time.time() - start
records = self._handler.get_records()
first = records[0] if len(records) > 0 else None
records = [self._record_to_json(record, first) for record in records]
message = cjson.encode(records)
log_entry = RequestLog(uri=request.path,
message = message,
duration=time_taken)
log_entry.save()
return response
def log_traceback(logging):
"""Prints the traceback for the most recently caught exception to the log
and returns a nicely formatted message.
"""
et, val, tb = sys.exc_info()
trace = traceback.format_tb(tb)
stack = traceback.extract_tb(tb)
for item in stack:
logging.error(traceback.format_tb(item))
mod = stack[0]
return "Exception : %s %s %s" % (et, val, trace[0])
def flush(flushable):
""" Removes data stored for a model instance cached in this servers data
stores
flushable => a instance of a class which provides a flush method
"""
flush_setting = 'FLUSH_'+flushable.__class__.__name__.upper()
if getattr(settings, flush_setting):
flushable.flush()
def mark(module, line,*args):
""" in code tracing util for debugging """
print('Mark %s.%s: %s' % (module, line, args))
#-------------------------------------------------------------------------------
# File utilities
#-------------------------------------------------------------------------------
kilobytes = 1024
megabytes = 1000 * kilobytes
chunksize = 100 * kilobytes
_ipath = settings.MEDIA_ROOT + 'binary/'
_opath = settings.MEDIA_ROOT + 'binary/chunk/'
def split(fin, path, chunksize=chunksize):
""" Splits a file into a number of smaller chunks """
print (fin, path, chunksize)
if not os.path.exists(path):
os.mkdir(path)
partnum = 0
instream = open(fin, "rb")
while True:
chunk = instream.read(chunksize)
if not chunk:
break
partnum += 1
outfile = os.path.join(path,('chunk%s' % partnum))
fobj = open(outfile, 'wb')
fobj.write(chunk)
fobj.close()
instream.close()
return partnum | {
"repo_name": "SanaMobile/middleware_mds_v1",
"path": "src/mds/mrs/util.py",
"copies": "1",
"size": "5691",
"license": "bsd-3-clause",
"hash": 8058107212505114000,
"line_mean": 30.6222222222,
"line_max": 80,
"alpha_frac": 0.58214725,
"autogenerated": false,
"ratio": 3.9658536585365853,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9998572186197535,
"avg_score": 0.009885744467810034,
"num_lines": 180
} |
""" A collection of utility methods
:Authors: Sana dev team
:Version: 1.1
"""
import os, sys, traceback
import time
import logging
import cjson
from django.conf import settings
LOGGING_ENABLED = 'LOGGING_ENABLE'
LOGGING_START = 'LOGGING_START_TIME'
def trace(f):
"""Decorator to add traces to a method.
"""
def new_f(*args, **kwargs):
extra = {'mac':'', 'type':''}
logging.info("TRACE %s ENTER" % f.func_name,extra=extra)
result = f(*args, **kwargs)
logging.info("TRACE %s EXIT" % f.func_name,extra=extra)
return result
new_f.func_name = f.func_name
return new_f
def log_traceback(logging):
"""Prints the traceback for the most recently caught exception to the log
and returns a nicely formatted message.
"""
et, val, tb = sys.exc_info()
trace = traceback.format_tb(tb)
stack = traceback.extract_tb(tb)
for item in stack:
logging.error(traceback.format_tb(item))
mod = stack[0]
return "Exception : %s %s %s" % (et, val, trace[0])
def flush(flushable):
""" Removes data stored for a model instance cached in this servers data
stores
flushable => a instance of a class which provides a flush method
"""
flush_setting = 'FLUSH_'+flushable.__class__.__name__.upper()
if getattr(settings, flush_setting):
flushable.flush()
def mark(module, line,*args):
""" in code tracing util for debugging """
print('Mark %s.%s: %s' % (module, line, args))
| {
"repo_name": "dekatzenel/team-k",
"path": "mds/api/v1/util.py",
"copies": "3",
"size": "1518",
"license": "bsd-3-clause",
"hash": -4011071215848655400,
"line_mean": 23.4838709677,
"line_max": 78,
"alpha_frac": 0.628458498,
"autogenerated": false,
"ratio": 3.5886524822695036,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5717110980269503,
"avg_score": null,
"num_lines": null
} |
"""A collection of utility methods used by various parts of the bidon package."""
import json
import re
import sys
from contextlib import contextmanager
from uuid import UUID
from . import convert
from . import date
_JSON_DEFAULTS = []
# pylint: disable=invalid-name
try_parse_int = convert.try_wrapper(convert.to_int)
try_parse_float = convert.try_wrapper(convert.to_float)
try_parse_decimal = convert.try_wrapper(convert.to_decimal)
try_parse_date = convert.try_wrapper(date.parse_date)
try_parse_time = convert.try_wrapper(date.parse_time)
try_parse_datetime = convert.try_wrapper(date.parse_datetime)
# pylint: enable=invalid-name
IS_UUID_RE = re.compile(r"^[a-z0-9]{8}(-[a-z0-9]{4}){3}-[a-z0-9]{12}$")
def is_uuid(val):
"""Checks whether the value is either an instance of UUID, or if it matches a uuid regex.
:val: the value to check"""
return val and (isinstance(val, UUID) or IS_UUID_RE.match(val) != None)
def exclude(source, keys, *, transform=None):
"""Returns a dictionary excluding keys from a source dictionary.
:source: a dictionary
:keys: a set of keys, or a predicate function that accepting a key
:transform: a function that transforms the values
"""
check = keys if callable(keys) else lambda key: key in keys
return {key: transform(source[key]) if transform else source[key]
for key in source if not check(key)}
def pick(source, keys, *, transform=None):
"""Returns a dictionary including only specified keys from a source dictionary.
:source: a dictionary
:keys: a set of keys, or a predicate function that accepting a key
:transform: a function that transforms the values
"""
check = keys if callable(keys) else lambda key: key in keys
return {key: transform(source[key]) if transform else source[key]
for key in source if check(key)}
def register_json_default(pred, convert_):
"""Register a predicate and converter function for json defaults.
The registered functions are searched in order they are registered.
:pred: a function that returns true when a particular value should be converted using :convert_:
:convert_: a function that transforms a value to one that can be included in JSON
"""
_JSON_DEFAULTS.append((pred, convert_))
def json_default(obj):
"""Convert an object to JSON, via the defaults set with register_json_default.
:obj: the object to convert
"""
for default in _JSON_DEFAULTS:
if default[0](obj):
return default[1](obj)
raise TypeError(repr(obj) + " is not JSON serializable")
def to_json(obj, pretty=False):
"""Converts an object to JSON, using the defaults specified in register_json_default.
:obj: the object to convert to JSON
:pretty: if True, extra whitespace is added to make the output easier to read
"""
sort_keys = False
indent = None
separators = (",", ":")
if isinstance(pretty, tuple):
sort_keys, indent, separators = pretty
elif pretty is True:
sort_keys = True
indent = 2
separators = (", ", ": ")
return json.dumps(obj, sort_keys=sort_keys, indent=indent, separators=separators,
default=json_default)
def has_value(obj, name):
"""A flexible method for getting values from objects by name.
returns:
- obj is None: (False, None)
- obj is dict: (name in obj, obj.get(name))
- obj hasattr(name): (True, getattr(obj, name))
- else: (False, None)
:obj: the object to pull values from
:name: the name to use when getting the value
"""
if obj is None:
return (False, None)
elif isinstance(obj, dict):
return (name in obj, obj.get(name))
elif hasattr(obj, name):
return (True, getattr(obj, name))
elif hasattr(obj, "__getitem__") and hasattr(obj, "__contains__") and name in obj:
return (True, obj[name])
else:
return (False, None)
def get_value(obj, name, fallback=None):
"""Calls through to has_value. If has_value[0] is True, return has_value[1] otherwise returns
fallback() if fallback is callable, else just fallback.
:obj: the object to pull values from
:name: the name to use when getting the value
:fallback: the value to return when has_value(:obj:, :name:) returns False
"""
present, value = has_value(obj, name)
if present:
return value
else:
if callable(fallback):
return fallback()
else:
return fallback
def set_value(obj, name, value):
"""A flexible method for setting a value on an object.
If the object implements __setitem__ (such as a dict) performs obj[name] = value, else performs
setattr(obj, name, value).
:obj: the object to set the value on
:name: the name to assign the value to
:value: the value to assign
"""
if hasattr(obj, "__setitem__"):
obj[name] = value
else:
setattr(obj, name, value)
def with_defaults(method, nparams, defaults=None):
"""Call method with nparams positional parameters, all non-specified defaults are passed None.
:method: the method to call
:nparams: the number of parameters the function expects
:defaults: the default values to pass in for the last len(defaults) params
"""
args = [None] * nparams if not defaults else defaults + max(nparams - len(defaults), 0) * [None]
return method(*args)
def namedtuple_with_defaults(ntup, defaults=None):
"""Wraps with_defaults for a named tuple.
:ntup: the namedtuple constructor
:defaults: the defaultvalues to pass in for the last len(defaults) params
"""
return with_defaults(ntup, len(ntup._fields), defaults)
def delegate(from_owner, to_owner, methods):
"""Creates methods on from_owner to call through to methods on to_owner.
:from_owner: the object to delegate to
:to_owner: the owner on which to delegate from
:methods: a list of methods to delegate
"""
for method in methods:
_delegate(from_owner, to_owner, method)
def _delegate(from_owner, to_owner, method):
"""Creates a method on from_owner to calls through to the same method on to_owner.
:from_owner: the object to delegate to
:to_owner: the owner on which to delegate from
:methods: the method to delegate
"""
dgate = lambda self, *args, **kwargs: getattr(getattr(self, to_owner), method)(*args, **kwargs)
dgate.__name__ = method
dgate.__doc__ = "Delegates to {0}.{1}: {2}".format(to_owner, method, method.__doc__)
setattr(from_owner, method, dgate)
def flatten_dict(source, ancestors=None):
"""Flattens a dictionary into (key, value) tuples. Where key is a tuple of ancestor keys.
:source: the root dictionary
:ancestors: the tuple of ancestors for every key in :source:"""
if not ancestors:
ancestors = ()
for key in source:
if isinstance(source[key], dict):
yield from flatten_dict(source[key], ancestors + (key, ))
else:
yield (ancestors + (key, ), source[key])
def esc_split(text, delimiter=" ", maxsplit=-1, escape="\\", *, ignore_empty=False):
"""Escape-aware text splitting:
Split text on on a delimiter, recognizing escaped delimiters."""
is_escaped = False
split_count = 0
yval = []
for char in text:
if is_escaped:
is_escaped = False
yval.append(char)
else:
if char == escape:
is_escaped = True
elif char in delimiter and split_count != maxsplit:
if yval or not ignore_empty:
yield "".join(yval)
split_count += 1
yval = []
else:
yval.append(char)
yield "".join(yval)
def esc_join(iterable, delimiter=" ", escape="\\"):
"""Join an iterable by a delimiter, replacing instances of delimiter in items
with escape + delimiter.
"""
rep = escape + delimiter
return delimiter.join(i.replace(delimiter, rep) for i in iterable)
@contextmanager
def get_file_object(filename, mode="r"):
"""Context manager for a file object. If filename is present, this is the
same as with open(filename, mode): ...
If filename is not present, then the file object returned is either
sys.stdin or sys.stdout depending on the mode.
:filename: the name of the file, or None for STDIN
:mode: the mode to open the file with
"""
if filename is None:
if mode.startswith("r"):
yield sys.stdin
else:
yield sys.stdout
else:
with open(filename, mode) as fobj:
yield fobj
| {
"repo_name": "treycucco/bidon",
"path": "bidon/util/__init__.py",
"copies": "1",
"size": "8228",
"license": "mit",
"hash": 5888479120542731000,
"line_mean": 29.8164794007,
"line_max": 98,
"alpha_frac": 0.6861934857,
"autogenerated": false,
"ratio": 3.71802982376864,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.490422330946864,
"avg_score": null,
"num_lines": null
} |
"""A collection of utility methods used by various parts of the idb package."""
import json
import re
import sys
from contextlib import contextmanager
from datetime import datetime, timezone
from . import convert
_JSON_DEFAULTS=[]
try_parse_int = convert.try_wrapper(convert.to_int)
try_parse_float = convert.try_wrapper(convert.to_float)
try_parse_decimal = convert.try_wrapper(convert.to_decimal)
try_parse_date = convert.try_wrapper(convert.to_date)
try_parse_datetime = convert.try_wrapper(convert.to_datetime)
def local_now():
return datetime.now()
def utc_now():
return datetime.utcnow().replace(tzinfo=timezone.utc)
IS_UUID_RE = re.compile(r"^[a-z0-9]{8}(-[a-z0-9]{4}){3}-[a-z0-9]{12}$")
def is_uuid(s):
return s and (isinstance(s, UUID) or IS_UUID_RE.match(s) != None)
def exclude(d, keys, *, transform=None):
check = keys if callable(keys) else lambda k: k in keys
return { k: transform(d[k]) if transform else d[k] for k in d if not check(k) }
def pick(d, keys, *, transform=None):
check = keys if callable(keys) else lambda k: k in keys
return { k: transform(d[k]) if transform else d[k] for k in d if check(k) }
def register_json_default(pred, convert):
_JSON_DEFAULTS.append((pred, convert))
def json_default(obj):
for default in _JSON_DEFAULTS:
if default[0](obj):
return default[1](obj)
raise TypeError(repr(obj) + " is not JSON serializable")
def to_json(obj, pretty=False):
sort_keys = False
indent = None
separators = (",", ":")
if pretty:
sort_keys = True
indent = 2
separators = (", ", ": ")
return json.dumps(obj, sort_keys=sort_keys, indent=indent, separators=separators, default=json_default)
def has_value(obj, name):
if obj == None:
return (False, None)
elif isinstance(obj, dict):
return (name in obj, obj.get(name))
elif hasattr(obj, name):
return (True, getattr(obj, name))
elif hasattr(obj, "__getitem__") and hasattr(obj, "__contains__") and name in obj:
return (True, obj[name])
else:
return (False, None)
def get_value(obj, name, fallback=None):
present, value = has_value(obj, name)
if present:
return value
else:
if callable(fallback):
return fallback()
else:
return fallback
def set_value(obj, name, value):
if hasattr(obj, "__setitem__"):
obj[name] = value
else:
setattr(obj, name, value)
def with_defaults(method, n, defaults=None):
"""Call method with n positional parameters, all non-specified defaults are passed None"""
args = [None] * n if not defaults else defaults + max(n - len(defaults), 0) * [None]
return method(*args)
def namedtuple_with_defaults(nt, defaults=None):
return with_defaults(nt, len(nt._fields), defaults)
def delegate(f, t, methods):
for method in methods:
_delegate(f, t, method)
def _delegate(f, t, method):
d = lambda self, *args, **kwargs: getattr(getattr(self, t), method)(*args, **kwargs)
d.__name__ = method
d.__doc__ = "Delegates to {0}.{1}: {2}".format(t, method, method.__doc__)
setattr(f, method, d)
def flatten_dict(d, ancestors=None):
"""Flattens a dictionary into (key, value) tuples. Where key is a tuple of ancestor keys."""
if not ancestors:
ancestors = ()
for k in d:
if isinstance(d[k], dict):
yield from flatten_dict(d[k], joiner, ancestors + (k, ))
else:
yield (ancestors + (k, ), d[k])
@contextmanager
def get_file_object(filename, mode="r"):
"""Context manager for a file object. If filename is present, this is the
same as with open(filename, mode): ...
If filename is not present, then the file object returned is either
sys.stdin or sys.stdout depending on the mode.
"""
if filename is None:
if mode.startswith("r"):
yield sys.stdin
else:
yield sys.stdout
else:
with open(filename, mode) as f:
yield f
from .data_table import DataTable
from .field_mapping import FieldMapping
from . import terminal
from . import xmlu
from . import jsonu
| {
"repo_name": "treycucco/py-utils",
"path": "idb/util/__init__.py",
"copies": "1",
"size": "3990",
"license": "bsd-3-clause",
"hash": -7418890542203724000,
"line_mean": 25.0784313725,
"line_max": 105,
"alpha_frac": 0.6691729323,
"autogenerated": false,
"ratio": 3.3277731442869056,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44969460765869057,
"avg_score": null,
"num_lines": null
} |
"""A collection of utils for ReGraph library."""
import copy
from regraph.command_parser import parser
from regraph.exceptions import ReGraphError, ParsingError, RewritingError
from regraph.attribute_sets import AttributeSet, FiniteSet
def set_attrs(old_attrs, attrs, normalize=True, update=True):
if normalize:
normalize_attrs(attrs)
for key in attrs:
old_attrs[key] = attrs[key]
if update:
keys_to_remove = set()
for key in old_attrs:
if key not in attrs:
keys_to_remove.add(key)
for key in keys_to_remove:
del old_attrs[key]
return old_attrs
def add_attrs(old_attrs, attrs, normalize=True):
if normalize:
normalize_attrs(attrs)
for key in attrs:
if key in old_attrs:
old_attrs[key] = old_attrs[key].union(attrs[key])
else:
old_attrs[key] = attrs[key]
def remove_attrs(old_attrs, attrs, normalize=True):
if normalize:
normalize_attrs(attrs)
for key, value in attrs.items():
if key in old_attrs:
new_set = old_attrs[key].difference(value)
if not new_set:
del old_attrs[key]
else:
old_attrs[key] = new_set
def assign_attrs(element, attrs):
for k, v in attrs.items():
element[k] = v
def merge_attrs(original_dict, attrs):
"""Add attrs to the container."""
if attrs is not None:
normalize_attrs(attrs)
else:
attrs = dict()
if original_dict is None:
original_dict = attrs
else:
for key in attrs:
if key in original_dict:
original_dict[key] = original_dict[key].union(attrs[key])
else:
original_dict[key] = attrs[key]
return
def safe_deepcopy_dict(d):
"""Util for safe deepcopy of a dict.
Solves the issue with 'TypeError: can't pickle dict_items objects'
of the default 'copy.deepcopy'.
"""
try:
new_d = copy.deepcopy(d)
except TypeError:
new_d = dict()
for k, v in d.items():
new_d[k] = copy.deepcopy(list(v.items()))
return new_d
def generate_new_id(collection, basename):
"""Generate unique id for a node."""
node_id = basename
i = 1
while node_id in collection:
node_id = "{}_{}".format(basename, i)
i += 1
return node_id
def attrs_to_json(attrs):
"""Convert attributes to json."""
normalize_attrs(attrs)
json_data = dict()
if attrs is not None:
for key, value in attrs.items():
json_data[key] = value.to_json()
return json_data
def attrs_from_json(json_data):
"""Retrieve attrs from json-like dict."""
attrs = dict()
for key, value in json_data.items():
attrs[key] = AttributeSet.from_json(value)
return attrs
def relation_to_json(rel):
"""Convert relation to json-serializable."""
json_data = {}
for k, v in rel.items():
json_data[k] = list(v)
return json_data
def load_nodes_from_json(j_data):
"""Load nodes from json-like dict."""
loaded_nodes = []
if "nodes" in j_data.keys():
j_nodes = j_data["nodes"]
for node in j_nodes:
if "id" in node.keys():
node_id = node["id"]
else:
raise ReGraphError(
"Error loading graph: node id is not specified!")
attrs = None
if "attrs" in node.keys():
attrs = json_dict_to_attrs(node["attrs"])
loaded_nodes.append((node_id, attrs))
else:
raise ReGraphError(
"Error loading graph: no nodes specified!")
return loaded_nodes
def load_edges_from_json(j_data):
"""Load edges from json-like dict."""
loaded_edges = []
if "edges" in j_data.keys():
j_edges = j_data["edges"]
for edge in j_edges:
if "from" in edge.keys():
s_node = edge["from"]
else:
raise ReGraphError(
"Error loading graph: edge source is not specified!")
if "to" in edge.keys():
t_node = edge["to"]
else:
raise ReGraphError(
"Error loading graph: edge target is not specified!")
if "attrs" in edge.keys():
attrs = json_dict_to_attrs(edge["attrs"])
loaded_edges.append((s_node, t_node, attrs))
else:
loaded_edges.append((s_node, t_node))
return loaded_edges
def json_dict_to_attrs(d):
"""Convert json dictionary to attributes."""
attrs = {}
for k, v in d.items():
if "strSet" in v.keys() or "numSet" in v.keys():
new_v = {
"type": "FiniteSet",
"data": []
}
if "pos_list" in v["strSet"].keys():
new_v["data"].append(v["strSet"]["pos_list"])
if "pos_list" in v["numSet"].keys():
new_v["data"].append(v["numSet"]["pos_list"])
v = new_v
attrs[k] = AttributeSet.from_json(v)
return attrs
def valid_attributes(source, target):
"""Test the validity of attributes."""
for key, value in source.items():
if key not in target:
return False
if not value.issubset(target[key]):
return False
return True
def is_subdict(small_dict, big_dict):
"""Check if the dictionary is a subset of other."""
normalize_attrs(small_dict)
normalize_attrs(big_dict)
if small_dict is None:
return True
if len(small_dict) == 0:
return True
if all([len(v) == 0 for k, v in small_dict.items()]):
return True
if big_dict is None and len(small_dict) != 0:
return False
if len(big_dict) == 0 and len(small_dict) != 0:
return False
for key, value in small_dict.items():
if key not in big_dict.keys():
return False
else:
if not value.issubset(big_dict[key]):
return False
return True
def attrs_intersection(attrs1, attrs2):
"""Intersect two dictionaries with attrbutes."""
if attrs1 is None or attrs2 is None:
return {}
res = dict()
for key in attrs1.keys():
if key in attrs2.keys():
new_set = attrs1[key].intersection(attrs2[key])
if new_set:
res[key] = new_set
return res
def attrs_union(attrs1, attrs2):
"""Find a union of two dictionaries with attrs."""
if attrs1 is None:
if attrs2 is not None:
return attrs2
else:
return {}
if attrs2 is None:
return attrs1
res = dict()
for key in attrs1:
if key in attrs2:
res[key] = attrs1[key].union(attrs2[key])
else:
res[key] = attrs1[key]
for key in attrs2:
if key not in attrs1:
res[key] = attrs2[key]
return res
def keys_by_value(dictionary, val):
"""Get keys of a dictionary by a value."""
res = []
for key, value in dictionary.items():
if value == val:
res.append(key)
return res
def fold_left(f, init, l):
""" f : a -> b -> b
init : b
l : a list
Returns f(...f(l[1],f(l[0], init)) """
res = init
for x in l:
res = f(x, res)
return res
def to_set(value):
"""Convert a value to set."""
if type(value) == set or type(value) == list:
return set(value)
else:
return set([value])
def to_list(value):
"""Convert a value to list."""
if type(value) == set | type(value == list):
return list(value)
else:
return [value]
def normalize_attrs(attrs):
"""Normalize node attributes."""
if attrs is not None:
for k, v in list(attrs.items()):
if not isinstance(v, AttributeSet):
attrs[k] = FiniteSet(v)
if attrs[k].is_empty():
del attrs[k]
return
def normalize_relation(relation):
new_relation_dict = dict()
for key, values in relation.items():
if type(values) == set:
new_relation_dict[key] = values
elif type(values) == str:
new_relation_dict[key] = {values}
else:
try:
new_set = set()
for v in values:
new_set.add(v)
new_relation_dict[key] = new_set
except TypeError:
new_relation_dict[key] = {values}
relation = new_relation_dict
return new_relation_dict
def merge_attributes(attr1, attr2, method="union"):
"""Merge two dictionaries of attributes."""
if method == "union":
return attrs_union(attr1, attr2)
elif method == "intersection":
return attrs_intersection(attr1, attr2)
else:
raise ReGraphError("Merging method %s is not defined!" % method)
def dict_sub(attrs1, attrs2):
"""Remove attributes `attrs2` from `attrs1`."""
new_dict = {}
for key in attrs1:
if key in attrs2:
new_set = attrs1[key].difference(attrs2[key])
if new_set:
new_dict[key] = new_set
else:
new_dict[key] = attrs1[key]
return new_dict
def simplify_commands(commands, di=False):
"""Simplify a list of graph transformation commands."""
command_strings = [c for c in commands.splitlines() if len(c) > 0]
actions = []
for command in command_strings:
try:
print(command)
parsed = parser.parseString(command).asDict()
actions.append(parsed)
except:
raise ParsingError("Cannot parse command '%s'" % command)
# We keep updated a list of the element we added, the lines of
# transformations that added them or added attributes to them
# and the type of addition we did (node or edge)
added = []
ad_index = []
ad_type = []
# We keep updated a list of the element we deleted and the lines of
# transformation that deleted them or deleted attributes from them
deleted = []
del_index = []
# We keep updated a list of the element we cloned and the line of
# transformation that cloned them
cloned = []
clone_index = []
# List of elements to remove at the end
elements_to_remove = []
# For each line of command we change what to remove and what to keep
# We update the lists at each step, the only operations that actually
# do simplify the commands are the deletion of nodes and edges and the
# merges. They try to find the all the operations they can remove
# without changing the behaviour
for i in range(len(actions)):
action = actions[i]
if action["keyword"] == "add_node":
added.append(action["node"])
ad_index.append([i])
ad_type.append("node")
elif action["keyword"] == "delete_node":
if action["node"] not in cloned:
# If the node haven't been cloned before
rem_el = []
for j in range(len(added)):
el = added[j]
if (type(el) == tuple and (el[0] == action["node"] or
el[1] == action["node"])) or\
el == action["node"]:
# If the node have been involved in an addition
# we remove that addition since it has been
# deleted now, if there are not more lines that
# refers to the addition of that node, we can
# remove the deletion of the node
# Finding the node in added is not enough to
# remove the deletion since it can be an
# addition of an edge, we have to check if it
# the node itself that we added
if el == action["node"]:
elements_to_remove.append(i)
for k in ad_index[j]:
elements_to_remove.append(k)
rem_el.append(j)
k = 0
for j in rem_el:
del added[j - k]
del ad_index[j - k]
del ad_type[j - k]
k += 1
rem_el = []
for j in range(len(deleted)):
el = deleted[j]
if (type(el) == tuple and (el[0] == action["node"] or
el[1] == action["node"])) or\
el == action["node"]:
# If the node have been involved in a deletion
# we can remove that deletion since the deletion
# of the node itself will delete what the deletion
# would have deleted
for k in del_index[j]:
elements_to_remove.append(k)
rem_el.append(j)
k = 0
for j in rem_el:
del deleted[j - k]
del del_index[j - k]
k += 1
else:
# If the node have been cloned before, we can't delete the
# transformations that happened before the cloning since
# they affected the clones too. We do so by comparing the
# line of the transformation we are looking at and the line
# of the last cloning operation that happened
rem_el = []
ind = max([clone_index[i] for i in range(
len(cloned)) if cloned[i] == action["node"]])
for j in range(len(added)):
el = added[j]
if (type(el) == tuple and (el[0] == action["node"] or
el[1] == action["node"])) or\
el == action["node"]:
rem_ind = []
for k in ad_index[j]:
if k > ind:
elements_to_remove.append(k)
rem_ind.append(k)
if ad_index[j] == rem_ind:
rem_el.append(j)
else:
for k in rem_ind:
ad_index[j].remove(k)
m = 0
for j in rem_el:
del added[j - m]
del ad_index[j - m]
del ad_type[j - m]
m += 1
rem_el = []
for j in range(len(deleted)):
el = deleted[j]
if (type(el) == tuple and (el[0] == action["node"] or
el[1] == action["node"])) or\
el == action["node"]:
rem_ind = []
for k in del_index[j]:
if k > ind:
elements_to_remove.append(k)
rem_ind.append(k)
if del_index[j] == rem_ind:
rem_el.append(j)
else:
for k in rem_ind:
del_index[j].remove(k)
m = 0
for j in rem_el:
del deleted[j - m]
del del_index[j - m]
m += 1
ind = clone_index.index(ind)
del cloned[ind]
del clone_index[ind]
deleted.append(action["node"])
del_index.append([i])
elif action["keyword"] == "add_node_attrs":
if action["node"] in added:
j = added.index(action["node"])
ad_index[j].append(i)
else:
added.append(action["node"])
ad_index.append([i])
ad_type.append("node_attrs")
elif action["keyword"] == "delete_node_attrs":
if action["node"] in deleted:
j = deleted.index(action["node"])
del_index[j].append(i)
else:
deleted.append(action["node"])
del_index.append([i])
elif action["keyword"] == "add_edge":
e = (action["node_1"], action["node_2"])
added.append(e)
ad_index.append([i])
ad_type.append("edge")
elif action["keyword"] == "delete_edge":
# It is the same idea as in the delete_node function, but with
# a little bit more complexity since we have two nodes that
# can possibly be cloned.
# This time, finding the edge in the added list automatically
# means we have to remove the deletion and the addition in the
# case we didn't clone any of our nodes
e = (action["node_1"], action["node_2"])
if e[0] not in cloned and e[1] not in cloned:
rem_el = []
for j in range(len(added)):
el = added[j]
if type(el) == tuple and\
(el == e or (not di and el == (e[1], e[0]))):
elements_to_remove.append(i)
for k in ad_index[j]:
elements_to_remove.append(k)
rem_el.append(j)
k = 0
for j in rem_el:
del added[j - k]
del ad_index[j - k]
del ad_type[j - k]
k += 1
rem_el = []
for j in range(len(deleted)):
el = deleted[j]
if type(el) == tuple and\
(el == e or (not di and el == (e[1], e[0]))):
for k in del_index[j]:
elements_to_remove.append(k)
rem_el.append(j)
k = 0
for j in rem_el:
del deleted[j - k]
del del_index[j - k]
k += 1
else:
# Same idea as before if one of the nodes have been cloned,
# but we have to take the max of the line number of all the
# cloning operation on node 0 and node 1
ind = 0
if e[0] in cloned:
ind = max([clone_index[i]
for i in range(len(cloned)) if cloned[i] == e[0]])
if e[1] in cloned:
ind = max([ind] + [clone_index[i]
for i in range(len(cloned)) if cloned[i] == e[1]])
ind = clone_index.index(ind)
if e[0] in cloned:
rem_el = []
for j in range(len(added)):
el = added[j]
if type(el) == tuple and\
(el == e or (not di and el == (e[1], e[0]))):
rem_ind = []
for k in ad_index[j]:
if k > clone_index[ind]:
elements_to_remove.append(k)
# We remove the delete_edge operation
# iff the same edge have been added
# after the last cloning operation
if ad_type[j] == "edge":
elements_to_remove.append(i)
rem_ind.append(k)
if ad_index[j] == rem_ind:
rem_el.append(j)
else:
for k in rem_ind:
ad_index[j].remove(k)
m = 0
for j in rem_el:
del added[j - m]
del ad_index[j - m]
del ad_type[j - m]
m += 1
rem_el = []
for j in range(len(deleted)):
el = deleted[j]
if type(el) == tuple and\
(el == e or (not di and el == (e[1], e[0]))):
rem_ind = []
for k in del_index[j]:
if k > clone_index[ind]:
elements_to_remove.append(k)
rem_ind.append(k)
if del_index[j] == rem_ind:
rem_el.append(j)
else:
for k in rem_ind:
del_index[j].remove(k)
m = 0
for j in rem_el:
del deleted[j - m]
del del_index[j - m]
m += 1
if e[1] in cloned:
rem_el = []
for j in range(len(added)):
el = added[j]
if type(el) == tuple and\
(el == e or (not di and el == (e[1], e[0]))):
rem_ind = []
for k in ad_index[j]:
if k > clone_index[ind]:
elements_to_remove.append(k)
if ad_type[j] == "edge":
elements_to_remove.append(i)
rem_ind.append(k)
if ad_index[j] == rem_ind:
rem_el.append(j)
else:
for k in rem_ind:
ad_index[j].remove(k)
m = 0
for j in rem_el:
del added[j - m]
del ad_index[j - m]
del ad_type[j - m]
m += 1
rem_el = []
for j in range(len(deleted)):
el = deleted[j]
if type(el) == tuple and\
(el == e or (not di and el == (e[1], e[0]))):
rem_ind = []
for k in del_index[j]:
if k > clone_index[ind]:
elements_to_remove.append(k)
rem_ind.append(k)
if del_index[j] == rem_ind:
rem_el.append(j)
else:
for k in rem_ind:
del_index[j].remove(k)
m = 0
for j in rem_el:
del deleted[j - m]
del del_index[j - m]
m += 1
deleted.append(e)
del_index.append([i])
elif action["keyword"] == "add_edge_attrs":
e = (action["node_1"], action["node_2"])
if e in added:
j = added.index(e)
ad_index[j].append(i)
elif not di and (e[1], e[0]) in added:
j = added.index((e[1], e[0]))
ad_index[j].append(i)
else:
added.append(e)
ad_index.append([i])
ad_type.append("edge_attrs")
elif action["keyword"] == "delete_edge_attrs":
e = (action["node_1"], action["node_2"])
if e in deleted:
j = deleted.index(e)
del_index[j].append(i)
elif not di and (e[1], e[0]) in deleted:
j = deleted.index((e[1], e[0]))
del_index[j].append(i)
else:
deleted.append(e)
del_index.append([i])
elif action["keyword"] == "clone":
if "node_name" in action.keys():
added.append(action["node_name"])
ad_index.append([i])
ad_type.append("node")
cloned.append(action["node"])
clone_index.append(i)
elif action["keyword"] == "merge":
if "node_name" in action.keys():
node_name = action["node_name"]
else:
node_name = "_".join(action["nodes"])
added.append(node_name)
ad_index.append([i])
ad_type.append("node")
return "\n".join(
[command_strings[i]
for i in range(len(actions))
if i not in elements_to_remove])
def make_canonical_commands(g, commands, di=False):
"""Convert commands to the canonical form.
Takes commands and the graph it refers to and returns a list of
canonical transformations that have the same behaviour.
The canonical form of a transformation follows this pattern :
DELETIONS (DELETE_NODE, DELETE_NODE_ATTRS, DELETE_EDGE,
DELETE_EDGE_ATTRS) CLONING (CLONE)
ADDING and MERGING (ADD_NODE, ADD_NODE_ATTRS, ADD_EDGE,
ADD_EDGE_ATTRS, MERGE)
"""
res = []
# We do multiple steps of simplification, until we found a fixed-point
aux = commands
next_step = simplify_commands(commands, di)
while next_step != aux:
aux = next_step
next_step = simplify_commands(aux, di)
# We keep updated an environment with our nodes and our edges
env_nodes = [n for n in g.nodes()]
env_edges = [e for e in g.edges()]
if not di:
for e in g.edges():
if not (e[1], e[0]) in env_edges:
env_edges.append((e[1], e[0]))
# For each transformation we choose if we do it in this step or if we
# keep it for later
while next_step != '':
command_strings = [c for c in next_step.splitlines() if len(c) > 0]
actions = []
for command in command_strings:
try:
parsed = parser.parseString(command).asDict()
actions.append(parsed)
except:
raise ParsingError("Cannot parse command '%s'" % command)
next_step = ''
# We have 3 strings for each line of the canonical pattern
add_step = ''
del_step = ''
clone_step = ''
# Added is the list of elements we will add at to our environment
# at the end of the step, we add them at the end so they are not
# taken into account in the current step
added = []
cloned = []
# If a node is in clone_wait, every cloning operation on it will
# be delayed to next step. Same for other lists
clone_wait = []
merge_wait = []
del_wait = []
ad_wait = []
# If we can't add a node with name n in this step, we don't want
# another node with the same name to be added before it
protected_names = []
# For each action we update our lists and we chose what to do
for i in range(len(actions)):
action = actions[i]
if action["keyword"] == "add_node":
if action["node"] not in protected_names:
add_step += command_strings[i] + "\n"
added.append(action["node"])
elif action["keyword"] == "delete_node":
if action["node"] in env_nodes and\
action["node"] not in del_wait:
del_step += command_strings[i] + "\n"
env_nodes.remove(action["node"])
else:
next_step += command_strings[i] + "\n"
ad_wait.append(action["node"])
elif action["keyword"] == "add_node_attrs":
if action["node"] in env_nodes and\
action["node"] not in ad_wait:
add_step += command_strings[i] + "\n"
added.append(action["node"])
clone_wait.append(action["node"])
else:
next_step += command_strings[i] + "\n"
ad_wait.append(action["node"])
clone_wait.append(action["node"])
elif action["keyword"] == "delete_node_attrs":
if action["node"] in env_nodes and\
action["node"] not in del_wait:
del_step += command_strings[i] + "\n"
else:
next_step += command_strings[i] + "\n"
clone_wait.append(action["node"])
ad_wait.append(action["node"])
elif action["keyword"] == "add_edge":
e = (action["node_1"], action["node_2"])
if e[0] in env_nodes and\
e[1] in env_nodes and\
e[0] not in ad_wait and\
e[1] not in ad_wait:
add_step += command_strings[i] + "\n"
added.append(e)
if not di:
added.append((e[1], e[0]))
clone_wait.append(action["node_1"])
clone_wait.append(action["node_2"])
else:
next_step += command_strings[i] + "\n"
clone_wait.append(action["node_1"])
clone_wait.append(action["node_2"])
merge_wait.append(action["node_1"])
merge_wait.append(action["node_2"])
elif action["keyword"] == "delete_edge":
e = (action["node_1"], action["node_2"])
if (e in env_edges or
(not di and (e[1], e[0]) in env_edges)) and\
e[0] not in del_wait and\
e[1] not in del_wait:
is_cloned = False
for l in cloned:
if e[0] in l:
next_step += command_strings[i] + "\n"
clone_wait.append(action["node_1"])
clone_wait.append(action["node_2"])
merge_wait.append(action["node_1"])
merge_wait.append(action["node_2"])
is_cloned = True
break
if not is_cloned:
del_step += command_strings[i] + "\n"
clone_wait.append(action["node_1"])
clone_wait.append(action["node_2"])
env_edges.remove(e)
if not di:
env_edges.remove((e[1], e[0]))
else:
next_step += command_strings[i] + "\n"
clone_wait.append(action["node_1"])
clone_wait.append(action["node_2"])
merge_wait.append(action["node_1"])
merge_wait.append(action["node_2"])
elif action["keyword"] == "add_edge_attrs":
e = (action["node_1"], action["node_2"])
if (e in env_edges or
(not di and (e[1], e[0]) in env_edges)) and\
e[0] not in ad_wait and\
e[1] not in ad_wait:
add_step += command_strings[i] + "\n"
added.append(e)
if not di:
added.append((e[1], e[0]))
clone_wait.append(action["node_1"])
clone_wait.append(action["node_2"])
else:
next_step += command_strings[i] + "\n"
clone_wait.append(action["node_1"])
clone_wait.append(action["node_2"])
merge_wait.append(action["node_1"])
merge_wait.append(action["node_2"])
elif action["keyword"] == "delete_edge_attrs":
e = (action["node_1"], action["node_2"])
if (e in env_edges or
(not di and (e[1], e[0]) in env_edges)) and\
e[0] not in del_wait and\
e[1] not in del_wait:
is_cloned = False
for l in cloned:
if e[0] in l:
next_step += command_strings[i] + "\n"
clone_wait.append(action["node_1"])
clone_wait.append(action["node_2"])
merge_wait.append(action["node_1"])
merge_wait.append(action["node_2"])
is_cloned = True
elif e[1] in l:
next_step += command_strings[i] + "\n"
clone_wait.append(action["node_1"])
clone_wait.append(action["node_2"])
merge_wait.append(action["node_1"])
merge_wait.append(action["node_2"])
is_cloned = True
if not is_cloned:
del_step += command_strings[i] + "\n"
clone_wait.append(action["node_1"])
clone_wait.append(action["node_2"])
else:
next_step += command_strings[i] + "\n"
clone_wait.append(action["node_1"])
clone_wait.append(action["node_2"])
merge_wait.append(action["node_1"])
merge_wait.append(action["node_2"])
elif action["keyword"] == "clone":
node = action["node"]
if "node_name" in action.keys():
new_node = action["node_name"]
else:
j = 1
new_node = str(node) + str(j)
while new_node in env_nodes or new_node in added:
j += 1
new_node = str(node) + str(j)
if node in env_nodes and\
node not in clone_wait and\
new_node not in protected_names and\
fold_left(lambda e, acc: (e != node or
(type(e) == tuple and
e[1] != node and
e[0] != node)) and
acc,
True,
added):
clone_step += command_strings[i] + "\n"
added.append(new_node)
del_wait.append(node)
found = False
for i in range(len(cloned)):
if node in cloned[i]:
cloned[i].append(new_node)
found = True
if not found:
cloned.append([new_node, node])
to_add = []
for e in env_edges:
if e[0] == node:
to_add.append((new_node, e[1]))
elif e[1] == node:
to_add.append((e[0], new_node))
for e in added:
if type(e) == tuple:
if e[0] == node and\
e[1] != node:
to_add.append((new_node, e[1]))
elif e[1] == node and e[0] != node:
to_add.append((e[0], new_node))
for e in to_add:
added.append(e)
else:
next_step += command_strings[i] + "\n"
del_wait.append(node)
merge_wait.append(node)
ad_wait.append(node)
protected_names.append(new_node)
elif action["keyword"] == "merge":
if "node_name" in actions[i].keys():
node_name = actions[i]["node_name"]
else:
node_name = "_".join(actions[i]["nodes"])
if fold_left(lambda n, acc: (n in env_nodes and
n not in merge_wait) and
acc,
True,
action["nodes"]) and\
node_name not in protected_names:
add_step += command_strings[i] + "\n"
added.append(node_name)
clone_wait.append(node_name)
rem_el = []
for e in env_edges:
if e[0] in action["nodes"] and\
e[1] in action["nodes"]:
if e not in rem_el:
rem_el.append(e)
if e[0] in action["nodes"]:
if e not in rem_el:
rem_el.append(e)
if e[1] not in action["nodes"]:
added.append((node_name, e[1]))
elif e[1] in action["nodes"]:
if e not in rem_el:
rem_el.append(e)
if e[0] not in action["nodes"]:
added.append((e[0], node_name))
for e in rem_el:
while e in env_edges:
env_edges.remove(e)
if not di:
env_edges.remove((e[1], e[0]))
rem_el = []
for e in added:
if type(e) == tuple:
if e[0] in action["nodes"] and\
e[1] in action["nodes"]:
if e not in rem_el:
rem_el.append(e)
if e[0] in action["nodes"]:
if e not in rem_el:
rem_el.append(e)
if e[1] not in action["nodes"]:
added.append((node_name, e[1]))
elif e[1] in action["nodes"]:
if e not in rem_el:
rem_el.append(e)
if e[0] not in action["nodes"]:
added.append((e[0], node_name))
for e in rem_el:
while e in added:
added.remove(e)
if not di:
added.remove((e[1], e[0]))
else:
next_step += command_strings[i] + "\n"
protected_names.append(node_name)
for el in added:
if type(el) == tuple:
env_edges.append(el)
else:
env_nodes.append(el)
if len(next_step) != 0 and len(del_step + clone_step + add_step) == 0:
raise ReGraphError(
"Cannot find any new transformations and" +
"the sequence of actions is non-empty : {}".format(next_step)
)
res.append(del_step + clone_step + add_step)
return res
# def assert_nx_graph_eq(g1, g2):
# """Assertion function for graph equality."""
# assert(set(g1.nodes()) == set(g2.nodes()))
# assert(set(g1.edges()) == set(g2.edges()))
# for n in g1.nodes():
# assert(g1.get_node(n) == g2.get_node(n))
# for e1, e2 in g1.edges():
# assert(g1.get_edge(e1, e2] == g2.adj[e1][e2])
# return
def format_typing(typing):
if typing is None:
typing = dict()
new_typing = dict()
for key, value in typing.items():
if type(value) == dict:
new_typing[key] = copy.deepcopy(value)
else:
try:
if len(value) == 2:
new_typing[key] = copy.deepcopy(value)
elif len(value) == 1:
new_typing[key] = copy.deepcopy(value[0])
except:
raise ReGraphError("Typing format is not valid!")
return new_typing
def normalize_typing_relation(typing_rel):
new_typing_rel = format_typing(typing_rel)
for g, typing_rel in new_typing_rel.items():
for key, values in typing_rel.items():
value_set = set()
if type(values) == str:
value_set.add(values)
else:
try:
for v in values:
value_set.add(v)
except TypeError:
value_set.add(values)
if len(value_set) > 0:
new_typing_rel[g][key] = value_set
return new_typing_rel
def replace_source(n1, n2, mapping):
mapping[n2] = mapping[n1]
del mapping[n1]
def replace_target(n1, n2, mapping):
for (key, value) in mapping.items():
if value == n1:
mapping[key] = n2
def id_of(elements):
return {e: e for e in elements}
def restrict_mapping(nodes, mapping):
new_mapping = {}
for node in nodes:
new_mapping[node] = mapping[node]
return new_mapping
def reverse_image(mapping, nodes):
return [node for node in mapping if mapping[node] in nodes]
def union_mappings(map1, map2):
new_mapping = copy.deepcopy(map1)
for (source, target) in map2.items():
if source in new_mapping:
if new_mapping[source] != target:
raise ReGraphError("merging uncompatible mappings")
else:
new_mapping[source] = target
return new_mapping
def recursive_merge(dict1, dict2):
for k, v in dict2.items():
if (k in dict1.keys() and
isinstance(dict1[k], dict) and
isinstance(v, dict)):
recursive_merge(dict1[k], v)
else:
dict1[k] = v
def remove_forbidden(string):
return string.replace(" ", "_").replace(
"-", "_").replace(",", "_").replace(
"/", "_").replace(".", "_")
def test_strictness(hierarchy, origin_id, rule, instance, p_typing, rhs_typing):
"""Test strictness of rewriting in a hierarchy."""
ancestors = hierarchy.get_ancestors(origin_id).keys()
for anc in ancestors:
typing = hierarchy.get_typing(anc, origin_id)
for lhs_n in rule.removed_nodes():
graph_node = instance[lhs_n]
anc_nodes = keys_by_value(typing, graph_node)
if len(anc_nodes) > 0:
raise RewritingError(
"Rewriting is strict (no propagation of removals is "
"allowed), the removed node '{}' from '{}' ".format(
graph_node, origin_id) +
"has instances '{}' in '{}'".format(
anc_nodes, anc))
if len(rule.cloned_nodes()) > 0:
for lhs_node, p_nodes in rule.cloned_nodes().items():
graph_node = instance[lhs_node]
anc_nodes = keys_by_value(typing, graph_node)
if len(anc_nodes) > 0:
if anc not in p_typing:
raise RewritingError(
"Rewriting is strict (no propagation of clones is "
"allowed), the cloned node '{}' in '{}' ".format(
graph_node, origin_id) +
"has instances '{}' in '{}' and ".format(
anc_nodes, anc) +
"their typing by P is not specified")
else:
for anc_node in anc_nodes:
if anc_node not in p_typing[anc] or\
len(p_typing[anc][anc_node]) != 1:
raise RewritingError(
"Rewriting is strict (no propagation of clones is "
"allowed), typing by a clone in P of the "
"node '{}' in '{}' is required".format(
anc_nodes, anc))
anc_graph = hierarchy.get_graph(anc)
for p_s, p_t in rule.removed_edges():
graph_s = instance[rule.p_lhs[p_s]]
graph_t = instance[rule.p_rhs[p_t]]
anc_ss = keys_by_value(typing, graph_s)
anc_ts = keys_by_value(typing, graph_t)
for anc_s in anc_ss:
for anc_t in anc_ts:
if anc_graph.exists_edge(anc_s, anc_t):
raise RewritingError(
"Rewriting is strict (no propagation of removals is "
"allowed), the removed edge '{}->{}' from '{}' ".format(
graph_s, graph_t, origin_id) +
"has an instance ('{}->{}') in '{}'".format(
anc_s, anc_t, anc))
for lhs_node, attrs in rule.removed_node_attrs().items():
graph_node = instance[lhs_node]
anc_nodes = keys_by_value(typing, graph_node)
for anc_node in anc_nodes:
if valid_attributes(attrs, anc_graph.get_node(anc_node)):
raise RewritingError(
"Rewriting is strict (no propagation of removals is "
"allowed), the removed attributes '{}' from '{}' in '{}' ".format(
attrs, graph_node, origin_id) +
"have instances in '{}' from '{}'".format(
anc_node, anc))
for p_s, p_t, attrs in rule.removed_edge_attrs():
graph_s = instance[rule.p_lhs[p_s]]
graph_t = instance[rule.p_rhs[p_t]]
anc_ss = keys_by_value(typing, graph_s)
anc_ts = keys_by_value(typing, graph_t)
for anc_s in anc_ss:
for anc_t in anc_ts:
if anc_graph.exists_edge(anc_s, anc_t):
if valid_attributes(
attrs, anc_graph.get_edge(anc_s, anc_t)):
raise RewritingError(
"Rewriting is strict (no propagation of removals is "
"allowed), the removed edge attributes '{}' ".format(
attrs) +
"from '{}->{}' in '{}' ".format(
graph_s, graph_t, origin_id) +
"have instances in '{}->{}' from '{}'".format(
anc_s, anc_t, anc))
descendants = hierarchy.get_descendants(origin_id).keys()
for desc in descendants:
typing = hierarchy.get_typing(origin_id, desc)
for rhs_node, p_nodes in rule.merged_nodes().items():
lhs_nodes = [rule.p_lhs[n] for n in p_nodes]
graph_nodes = [instance[n] for n in lhs_nodes]
types = set([
typing[n]
for n in graph_nodes
])
if len(types) > 1:
raise RewritingError(
"Rewriting is strict (no merging of types is "
"allowed), merged nodes '{}' from '{}' ".format(
graph_nodes, origin_id) +
"induces merging of '{}' from '{}'".format(
types, desc))
if len(rule.added_nodes()) > 0:
if desc not in rhs_typing:
raise RewritingError(
"Rewriting is strict (no propagation of types is "
"allowed), typing of the added nodes '{}' ".format(
rule.added_nodes()) +
"by '{}' is required".format(desc))
else:
for rhs_n in rule.added_nodes():
if rhs_n not in rhs_typing[desc] or\
len(rhs_typing[desc][rhs_n]) != 1:
raise RewritingError(
"Rewriting is strict (no propagation of "
"types is allowed), typing of the added "
"node '{}' by '{}' is required".format(
rhs_n, desc))
desc_graph = hierarchy.get_graph(desc)
for rhs_node, attrs in rule.added_node_attrs().items():
if rhs_node in rule.added_nodes():
desc_node = list(rhs_typing[desc][rhs_node])[0]
if not valid_attributes(attrs, desc_graph.get_node(desc_node)):
raise RewritingError(
"Rewriting is strict (no propagation of attribute "
"addition allowed), rule adds new attributes '{}' ".format(
attrs) +
"to the node '{}' from '{}'".format(desc_node, desc))
else:
lhs_nodes = [
rule.p_lhs[n]
for n in keys_by_value(rule.p_rhs, rhs_node)
]
graph_nodes = [instance[n] for n in lhs_nodes]
# There is only one type, otherwise it would fail before
desc_node = [
typing[n]
for n in graph_nodes
][0]
if not valid_attributes(attrs, desc_graph.get_node(desc_node)):
raise RewritingError(
"Rewriting is strict (no propagation of attribute "
"addition is allowed), rule adds new attributes "
"'{}' ".format(attrs) +
"to the node '{}' from '{}'".format(desc_node, desc))
for rhs_s, rhs_t in rule.added_edges():
if rhs_s in rule.added_nodes():
desc_s = list(rhs_typing[desc][rhs_s])[0]
else:
lhs_nodes = [
rule.p_lhs[n]
for n in keys_by_value(rule.p_rhs, rhs_s)
]
graph_nodes = [instance[n] for n in lhs_nodes]
# There is only one type, otherwise it would fail before
desc_s = [
typing[n]
for n in graph_nodes
][0]
if rhs_t in rule.added_nodes():
desc_t = list(rhs_typing[desc][rhs_t])[0]
else:
lhs_nodes = [
rule.p_lhs[n]
for n in keys_by_value(rule.p_rhs, rhs_t)
]
graph_nodes = [instance[n] for n in lhs_nodes]
# There is only one type, otherwise it would fail before
desc_t = [
typing[n]
for n in graph_nodes
][0]
if not desc_graph.exists_edge(desc_s, desc_t):
raise RewritingError(
"Rewriting is strict (no propagation of edge "
"addition is allowed), rule adds new edge "
"'{}->{}' ".format(desc_s, desc_t) +
"in '{}'".format(desc))
for rhs_s, rhs_t, attrs in rule.added_edge_attrs().items():
if rhs_s in rule.added_nodes():
desc_s = list(rhs_typing[desc][rhs_s])[0]
else:
lhs_nodes = [
rule.p_lhs[n]
for n in keys_by_value(rule.p_rhs, rhs_s)
]
graph_nodes = [instance[n] for n in lhs_nodes]
# There is only one type, otherwise it would fail before
desc_s = [
typing[n]
for n in graph_nodes
][0]
if rhs_t in rule.added_nodes():
desc_t = list(rhs_typing[desc][rhs_t])[0]
else:
lhs_nodes = [
rule.p_lhs[n]
for n in keys_by_value(rule.p_rhs, rhs_t)
]
graph_nodes = [instance[n] for n in lhs_nodes]
# There is only one type, otherwise it would fail before
desc_t = [
typing[n]
for n in graph_nodes
][0]
desc_attrs = desc_graph.get_edge(desc_s, desc_t)
if not valid_attributes(attrs, desc_attrs):
raise RewritingError(
"Rewriting is strict (no propagation of attribute "
"addition is allowed), rule adds new attributes "
"'{}' ".format(attrs) +
"to the edge '{}->{}' from '{}'".format(desc_s, desc_t, desc))
| {
"repo_name": "Kappa-Dev/ReGraph",
"path": "regraph/utils.py",
"copies": "1",
"size": "53507",
"license": "mit",
"hash": 3794238635125315600,
"line_mean": 38.1992673993,
"line_max": 90,
"alpha_frac": 0.4360924739,
"autogenerated": false,
"ratio": 4.361865166707426,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5297957640607427,
"avg_score": null,
"num_lines": null
} |
"""A collection of utils for ReGraph library."""
import copy
from regraph.parser import parser
from regraph.exceptions import ReGraphError, ParsingError
from regraph.attribute_sets import AttributeSet, FiniteSet
def json_dict_to_attrs(d):
"""Convert json dictionary to attributes."""
attrs = {}
for k, v in d.items():
if "strSet" in v.keys() or "numSet" in v.keys():
new_v = {
"type": "FiniteSet",
"data": []
}
if "pos_list" in v["strSet"].keys():
new_v["data"].append(v["strSet"]["pos_list"])
if "pos_list" in v["numSet"].keys():
new_v["data"].append(v["numSet"]["pos_list"])
v = new_v
attrs[k] = AttributeSet.from_json(v)
return attrs
def valid_attributes(source, target):
"""Test the validity of attributes."""
for key, value in source.items():
if key not in target:
return False
if not value.issubset(target[key]):
return False
return True
def is_subdict(small_dict, big_dict):
"""Check if the dictionary is a subset of other."""
normalize_attrs(small_dict)
normalize_attrs(big_dict)
if small_dict is None:
return True
if len(small_dict) == 0:
return True
if all([len(v) == 0 for k, v in small_dict.items()]):
return True
if big_dict is None and len(small_dict) != 0:
return False
if len(big_dict) == 0 and len(small_dict) != 0:
return False
for key, value in small_dict.items():
if key not in big_dict.keys():
return False
else:
if not value.issubset(big_dict[key]):
return False
return True
def attrs_intersection(attrs1, attrs2):
"""Intersect two dictionaries with attrbutes."""
if attrs1 is None or attrs2 is None:
return {}
res = dict()
for key in attrs1:
if key in attrs2:
new_set = attrs1[key].intersect(attrs2[key])
if new_set:
res[key] = new_set
return res
def attrs_union(attrs1, attrs2):
"""Find a union of two dictionaries with attrs."""
if attrs1 is None:
if attrs2 is not None:
return attrs2
else:
return {}
if attrs2 is None:
return attrs1
res = dict()
for key in attrs1:
if key in attrs2:
res[key] = attrs1[key].union(attrs2[key])
else:
res[key] = attrs1[key]
for key in attrs2:
if key not in attrs1:
res[key] = attrs2[key]
return res
def keys_by_value(dictionary, val):
"""Get keys of a dictionary by a value."""
res = []
for key, value in dictionary.items():
if value == val:
res.append(key)
return res
def fold_left(f, init, l):
""" f : a -> b -> b
init : b
l : a list
Returns f(...f(l[1],f(l[0], init)) """
res = init
for x in l:
res = f(x, res)
return res
def to_set(value):
"""Convert a value to set."""
if type(value) == set or type(value) == list:
return set(value)
else:
return set([value])
def to_list(value):
"""Convert a value to list."""
if type(value) == set | type(value == list):
return list(value)
else:
return [value]
def normalize_attrs(attrs_):
"""Normalize node attributes."""
if attrs_ is not None:
for k, v in list(attrs_.items()):
if not isinstance(v, AttributeSet):
attrs_[k] = FiniteSet(v)
if attrs_[k].is_empty():
del attrs_[k]
return
def merge_attributes(attr1, attr2, method="union"):
"""Merge two dictionaries of attributes."""
if method == "union":
return attrs_union(attr1, attr2)
elif method == "intersection":
return attrs_intersection(attr1, attr2)
else:
raise ReGraphError("Merging method %s is not defined!" % method)
def dict_sub(attrs1, attrs2):
"""Remove attributes `attrs2` from `attrs1`."""
new_dict = {}
for key in attrs1:
if key in attrs2:
new_set = attrs1[key].difference(attrs2[key])
if new_set:
new_dict[key] = new_set
else:
new_dict[key] = attrs1[key]
return new_dict
def simplify_commands(commands, di=False):
"""Simplify a list of graph transformation commands."""
command_strings = [c for c in commands.splitlines() if len(c) > 0]
actions = []
for command in command_strings:
try:
parsed = parser.parseString(command).asDict()
actions.append(parsed)
except:
raise ParsingError("Cannot parse command '%s'" % command)
# We keep updated a list of the element we added, the lines of
# transformations that added them or added attributes to them
# and the type of addition we did (node or edge)
added = []
ad_index = []
ad_type = []
# We keep updated a list of the element we deleted and the lines of
# transformation that deleted them or deleted attributes from them
deleted = []
del_index = []
# We keep updated a list of the element we cloned and the line of
# transformation that cloned them
cloned = []
clone_index = []
# List of elements to remove at the end
elements_to_remove = []
# For each line of command we change what to remove and what to keep
# We update the lists at each step, the only operations that actually
# do simplify the commands are the deletion of nodes and edges and the
# merges. They try to find the all the operations they can remove
# without changing the behaviour
for i in range(len(actions)):
action = actions[i]
if action["keyword"] == "add_node":
added.append(action["node"])
ad_index.append([i])
ad_type.append("node")
elif action["keyword"] == "delete_node":
if action["node"] not in cloned:
# If the node haven't been cloned before
rem_el = []
for j in range(len(added)):
el = added[j]
if (type(el) == tuple and (el[0] == action["node"] or
el[1] == action["node"])) or\
el == action["node"]:
# If the node have been involved in an addition
# we remove that addition since it has been
# deleted now, if there are not more lines that
# refers to the addition of that node, we can
# remove the deletion of the node
# Finding the node in added is not enough to
# remove the deletion since it can be an
# addition of an edge, we have to check if it
# the node itself that we added
if el == action["node"]:
elements_to_remove.append(i)
for k in ad_index[j]:
elements_to_remove.append(k)
rem_el.append(j)
k = 0
for j in rem_el:
del added[j - k]
del ad_index[j - k]
del ad_type[j - k]
k += 1
rem_el = []
for j in range(len(deleted)):
el = deleted[j]
if (type(el) == tuple and (el[0] == action["node"] or
el[1] == action["node"])) or\
el == action["node"]:
# If the node have been involved in a deletion
# we can remove that deletion since the deletion
# of the node itself will delete what the deletion
# would have deleted
for k in del_index[j]:
elements_to_remove.append(k)
rem_el.append(j)
k = 0
for j in rem_el:
del deleted[j - k]
del del_index[j - k]
k += 1
else:
# If the node have been cloned before, we can't delete the
# transformations that happened before the cloning since
# they affected the clones too. We do so by comparing the
# line of the transformation we are looking at and the line
# of the last cloning operation that happened
rem_el = []
ind = max([clone_index[i] for i in range(
len(cloned)) if cloned[i] == action["node"]])
for j in range(len(added)):
el = added[j]
if (type(el) == tuple and (el[0] == action["node"] or
el[1] == action["node"])) or\
el == action["node"]:
rem_ind = []
for k in ad_index[j]:
if k > ind:
elements_to_remove.append(k)
rem_ind.append(k)
if ad_index[j] == rem_ind:
rem_el.append(j)
else:
for k in rem_ind:
ad_index[j].remove(k)
m = 0
for j in rem_el:
del added[j - m]
del ad_index[j - m]
del ad_type[j - m]
m += 1
rem_el = []
for j in range(len(deleted)):
el = deleted[j]
if (type(el) == tuple and (el[0] == action["node"] or
el[1] == action["node"])) or\
el == action["node"]:
rem_ind = []
for k in del_index[j]:
if k > ind:
elements_to_remove.append(k)
rem_ind.append(k)
if del_index[j] == rem_ind:
rem_el.append(j)
else:
for k in rem_ind:
del_index[j].remove(k)
m = 0
for j in rem_el:
del deleted[j - m]
del del_index[j - m]
m += 1
ind = clone_index.index(ind)
del cloned[ind]
del clone_index[ind]
deleted.append(action["node"])
del_index.append([i])
elif action["keyword"] == "add_node_attrs":
if action["node"] in added:
j = added.index(action["node"])
ad_index[j].append(i)
else:
added.append(action["node"])
ad_index.append([i])
ad_type.append("node_attrs")
elif action["keyword"] == "delete_node_attrs":
if action["node"] in deleted:
j = deleted.index(action["node"])
del_index[j].append(i)
else:
deleted.append(action["node"])
del_index.append([i])
elif action["keyword"] == "add_edge":
e = (action["node_1"], action["node_2"])
added.append(e)
ad_index.append([i])
ad_type.append("edge")
elif action["keyword"] == "delete_edge":
# It is the same idea as in the delete_node function, but with
# a little bit more complexity since we have two nodes that
# can possibly be cloned.
# This time, finding the edge in the added list automatically
# means we have to remove the deletion and the addition in the
# case we didn't clone any of our nodes
e = (action["node_1"], action["node_2"])
if e[0] not in cloned and e[1] not in cloned:
rem_el = []
for j in range(len(added)):
el = added[j]
if type(el) == tuple and\
(el == e or (not di and el == (e[1], e[0]))):
elements_to_remove.append(i)
for k in ad_index[j]:
elements_to_remove.append(k)
rem_el.append(j)
k = 0
for j in rem_el:
del added[j - k]
del ad_index[j - k]
del ad_type[j - k]
k += 1
rem_el = []
for j in range(len(deleted)):
el = deleted[j]
if type(el) == tuple and\
(el == e or (not di and el == (e[1], e[0]))):
for k in del_index[j]:
elements_to_remove.append(k)
rem_el.append(j)
k = 0
for j in rem_el:
del deleted[j - k]
del del_index[j - k]
k += 1
else:
# Same idea as before if one of the nodes have been cloned,
# but we have to take the max of the line number of all the
# cloning operation on node 0 and node 1
ind = 0
if e[0] in cloned:
ind = max([clone_index[i]
for i in range(len(cloned)) if cloned[i] == e[0]])
if e[1] in cloned:
ind = max([ind] + [clone_index[i]
for i in range(len(cloned)) if cloned[i] == e[1]])
ind = clone_index.index(ind)
if e[0] in cloned:
rem_el = []
for j in range(len(added)):
el = added[j]
if type(el) == tuple and\
(el == e or (not di and el == (e[1], e[0]))):
rem_ind = []
for k in ad_index[j]:
if k > clone_index[ind]:
elements_to_remove.append(k)
# We remove the delete_edge operation
# iff the same edge have been added
# after the last cloning operation
if ad_type[j] == "edge":
elements_to_remove.append(i)
rem_ind.append(k)
if ad_index[j] == rem_ind:
rem_el.append(j)
else:
for k in rem_ind:
ad_index[j].remove(k)
m = 0
for j in rem_el:
del added[j - m]
del ad_index[j - m]
del ad_type[j - m]
m += 1
rem_el = []
for j in range(len(deleted)):
el = deleted[j]
if type(el) == tuple and\
(el == e or (not di and el == (e[1], e[0]))):
rem_ind = []
for k in del_index[j]:
if k > clone_index[ind]:
elements_to_remove.append(k)
rem_ind.append(k)
if del_index[j] == rem_ind:
rem_el.append(j)
else:
for k in rem_ind:
del_index[j].remove(k)
m = 0
for j in rem_el:
del deleted[j - m]
del del_index[j - m]
m += 1
if e[1] in cloned:
rem_el = []
for j in range(len(added)):
el = added[j]
if type(el) == tuple and\
(el == e or (not di and el == (e[1], e[0]))):
rem_ind = []
for k in ad_index[j]:
if k > clone_index[ind]:
elements_to_remove.append(k)
if ad_type[j] == "edge":
elements_to_remove.append(i)
rem_ind.append(k)
if ad_index[j] == rem_ind:
rem_el.append(j)
else:
for k in rem_ind:
ad_index[j].remove(k)
m = 0
for j in rem_el:
del added[j - m]
del ad_index[j - m]
del ad_type[j - m]
m += 1
rem_el = []
for j in range(len(deleted)):
el = deleted[j]
if type(el) == tuple and\
(el == e or (not di and el == (e[1], e[0]))):
rem_ind = []
for k in del_index[j]:
if k > clone_index[ind]:
elements_to_remove.append(k)
rem_ind.append(k)
if del_index[j] == rem_ind:
rem_el.append(j)
else:
for k in rem_ind:
del_index[j].remove(k)
m = 0
for j in rem_el:
del deleted[j - m]
del del_index[j - m]
m += 1
deleted.append(e)
del_index.append([i])
elif action["keyword"] == "add_edge_attrs":
e = (action["node_1"], action["node_2"])
if e in added:
j = added.index(e)
ad_index[j].append(i)
elif not di and (e[1], e[0]) in added:
j = added.index((e[1], e[0]))
ad_index[j].append(i)
else:
added.append(e)
ad_index.append([i])
ad_type.append("edge_attrs")
elif action["keyword"] == "delete_edge_attrs":
e = (action["node_1"], action["node_2"])
if e in deleted:
j = deleted.index(e)
del_index[j].append(i)
elif not di and (e[1], e[0]) in deleted:
j = deleted.index((e[1], e[0]))
del_index[j].append(i)
else:
deleted.append(e)
del_index.append([i])
elif action["keyword"] == "clone":
if "node_name" in action.keys():
added.append(action["node_name"])
ad_index.append([i])
ad_type.append("node")
cloned.append(action["node"])
clone_index.append(i)
elif action["keyword"] == "merge":
if "node_name" in action.keys():
node_name = action["node_name"]
else:
node_name = "_".join(action["nodes"])
added.append(node_name)
ad_index.append([i])
ad_type.append("node")
return "\n".join(
[command_strings[i] for i in range(len(actions)) if i not in elements_to_remove])
def make_canonical_commands(g, commands, di=False):
""" Takes commands and the graph it refers to and returns a list of
canonical transformations that have the same behaviour.
The canonical form of a transformation follows this pattern :
DELETIONS (DELETE_NODE, DELETE_NODE_ATTRS, DELETE_EDGE, DELETE_EDGE_ATTRS)
CLONING (CLONE)
ADDING and MERGING (ADD_NODE, ADD_NODE_ATTRS, ADD_EDGE, ADD_EDGE_ATTRS, MERGE)
"""
res = []
# We do multiple steps of simplification, until we found a fixed-point
aux = commands
next_step = simplify_commands(commands, di)
while next_step != aux:
aux = next_step
next_step = simplify_commands(aux, di)
# We keep updated an environment with our nodes and our edges
env_nodes = [n for n in g.nodes()]
env_edges = [e for e in g.edges()]
if not di:
for e in g.edges():
if not (e[1], e[0]) in env_edges:
env_edges.append((e[1], e[0]))
# For each transformation we choose if we do it in this step or if we
# keep it for later
while next_step != '':
command_strings = [c for c in next_step.splitlines() if len(c) > 0]
actions = []
for command in command_strings:
try:
parsed = parser.parseString(command).asDict()
actions.append(parsed)
except:
raise ParsingError("Cannot parse command '%s'" % command)
next_step = ''
# We have 3 strings for each line of the canonical pattern
add_step = ''
del_step = ''
clone_step = ''
# Added is the list of elements we will add at to our environment
# at the end of the step, we add them at the end so they are not
# taken into account in the current step
added = []
cloned = []
# If a node is in clone_wait, every cloning operation on it will
# be delayed to next step. Same for other lists
clone_wait = []
merge_wait = []
del_wait = []
ad_wait = []
# If we can't add a node with name n in this step, we don't want
# another node with the same name to be added before it
protected_names = []
# For each action we update our lists and we chose what to do
for i in range(len(actions)):
action = actions[i]
if action["keyword"] == "add_node":
if action["node"] not in protected_names:
add_step += command_strings[i] + "\n"
added.append(action["node"])
elif action["keyword"] == "delete_node":
if action["node"] in env_nodes and\
action["node"] not in del_wait:
del_step += command_strings[i] + "\n"
env_nodes.remove(action["node"])
else:
next_step += command_strings[i] + "\n"
ad_wait.append(action["node"])
elif action["keyword"] == "add_node_attrs":
if action["node"] in env_nodes and\
action["node"] not in ad_wait:
add_step += command_strings[i] + "\n"
added.append(action["node"])
clone_wait.append(action["node"])
else:
next_step += command_strings[i] + "\n"
ad_wait.append(action["node"])
clone_wait.append(action["node"])
elif action["keyword"] == "delete_node_attrs":
if action["node"] in env_nodes and\
action["node"] not in del_wait:
del_step += command_strings[i] + "\n"
else:
next_step += command_strings[i] + "\n"
clone_wait.append(action["node"])
ad_wait.append(action["node"])
elif action["keyword"] == "add_edge":
e = (action["node_1"], action["node_2"])
if e[0] in env_nodes and\
e[1] in env_nodes and\
e[0] not in ad_wait and\
e[1] not in ad_wait:
add_step += command_strings[i] + "\n"
added.append(e)
if not di:
added.append((e[1], e[0]))
clone_wait.append(action["node_1"])
clone_wait.append(action["node_2"])
else:
next_step += command_strings[i] + "\n"
clone_wait.append(action["node_1"])
clone_wait.append(action["node_2"])
merge_wait.append(action["node_1"])
merge_wait.append(action["node_2"])
elif action["keyword"] == "delete_edge":
e = (action["node_1"], action["node_2"])
if (e in env_edges or
(not di and (e[1], e[0]) in env_edges)) and\
e[0] not in del_wait and\
e[1] not in del_wait:
is_cloned = False
for l in cloned:
if e[0] in l:
next_step += command_strings[i] + "\n"
clone_wait.append(action["node_1"])
clone_wait.append(action["node_2"])
merge_wait.append(action["node_1"])
merge_wait.append(action["node_2"])
is_cloned = True
break
if not is_cloned:
del_step += command_strings[i] + "\n"
clone_wait.append(action["node_1"])
clone_wait.append(action["node_2"])
env_edges.remove(e)
if not di:
env_edges.remove((e[1], e[0]))
else:
next_step += command_strings[i] + "\n"
clone_wait.append(action["node_1"])
clone_wait.append(action["node_2"])
merge_wait.append(action["node_1"])
merge_wait.append(action["node_2"])
elif action["keyword"] == "add_edge_attrs":
e = (action["node_1"], action["node_2"])
if (e in env_edges or
(not di and (e[1], e[0]) in env_edges)) and\
e[0] not in ad_wait and\
e[1] not in ad_wait:
add_step += command_strings[i] + "\n"
added.append(e)
if not di:
added.append((e[1], e[0]))
clone_wait.append(action["node_1"])
clone_wait.append(action["node_2"])
else:
next_step += command_strings[i] + "\n"
clone_wait.append(action["node_1"])
clone_wait.append(action["node_2"])
merge_wait.append(action["node_1"])
merge_wait.append(action["node_2"])
elif action["keyword"] == "delete_edge_attrs":
e = (action["node_1"], action["node_2"])
if (e in env_edges or
(not di and (e[1], e[0]) in env_edges)) and\
e[0] not in del_wait and\
e[1] not in del_wait:
is_cloned = False
for l in cloned:
if e[0] in l:
next_step += command_strings[i] + "\n"
clone_wait.append(action["node_1"])
clone_wait.append(action["node_2"])
merge_wait.append(action["node_1"])
merge_wait.append(action["node_2"])
is_cloned = True
elif e[1] in l:
next_step += command_strings[i] + "\n"
clone_wait.append(action["node_1"])
clone_wait.append(action["node_2"])
merge_wait.append(action["node_1"])
merge_wait.append(action["node_2"])
is_cloned = True
if not is_cloned:
del_step += command_strings[i] + "\n"
clone_wait.append(action["node_1"])
clone_wait.append(action["node_2"])
else:
next_step += command_strings[i] + "\n"
clone_wait.append(action["node_1"])
clone_wait.append(action["node_2"])
merge_wait.append(action["node_1"])
merge_wait.append(action["node_2"])
elif action["keyword"] == "clone":
node = action["node"]
if "node_name" in action.keys():
new_node = action["node_name"]
else:
j = 1
new_node = str(node) + str(j)
while new_node in env_nodes or new_node in added:
j += 1
new_node = str(node) + str(j)
if node in env_nodes and\
node not in clone_wait and\
new_node not in protected_names and\
fold_left(lambda e, acc: (e != node or
(type(e) == tuple and
e[1] != node and
e[0] != node)) and
acc,
True,
added):
clone_step += command_strings[i] + "\n"
added.append(new_node)
del_wait.append(node)
found = False
for i in range(len(cloned)):
if node in cloned[i]:
cloned[i].append(new_node)
found = True
if not found:
cloned.append([new_node, node])
to_add = []
for e in env_edges:
if e[0] == node:
to_add.append((new_node, e[1]))
elif e[1] == node:
to_add.append((e[0], new_node))
for e in added:
if type(e) == tuple:
if e[0] == node and\
e[1] != node:
to_add.append((new_node, e[1]))
elif e[1] == node and e[0] != node:
to_add.append((e[0], new_node))
for e in to_add:
added.append(e)
else:
next_step += command_strings[i] + "\n"
del_wait.append(node)
merge_wait.append(node)
ad_wait.append(node)
protected_names.append(new_node)
elif action["keyword"] == "merge":
if "node_name" in actions[i].keys():
node_name = actions[i]["node_name"]
else:
node_name = "_".join(actions[i]["nodes"])
if fold_left(lambda n, acc: (n in env_nodes and
n not in merge_wait) and
acc,
True,
action["nodes"]) and\
node_name not in protected_names:
add_step += command_strings[i] + "\n"
added.append(node_name)
clone_wait.append(node_name)
rem_el = []
for e in env_edges:
if e[0] in action["nodes"] and\
e[1] in action["nodes"]:
if not e in rem_el:
rem_el.append(e)
if e[0] in action["nodes"]:
if not e in rem_el:
rem_el.append(e)
if e[1] not in action["nodes"]:
added.append((node_name, e[1]))
elif e[1] in action["nodes"]:
if not e in rem_el:
rem_el.append(e)
if e[0] not in action["nodes"]:
added.append((e[0], node_name))
for e in rem_el:
while e in env_edges:
env_edges.remove(e)
if not di:
env_edges.remove((e[1], e[0]))
rem_el = []
for e in added:
if type(e) == tuple:
if e[0] in action["nodes"] and\
e[1] in action["nodes"]:
if e not in rem_el:
rem_el.append(e)
if e[0] in action["nodes"]:
if e not in rem_el:
rem_el.append(e)
if e[1] not in action["nodes"]:
added.append((node_name, e[1]))
elif e[1] in action["nodes"]:
if e not in rem_el:
rem_el.append(e)
if e[0] not in action["nodes"]:
added.append((e[0], node_name))
for e in rem_el:
while e in added:
added.remove(e)
if not di:
added.remove((e[1], e[0]))
else:
next_step += command_strings[i] + "\n"
protected_names.append(node_name)
for el in added:
if type(el) == tuple:
env_edges.append(el)
else:
env_nodes.append(el)
if del_step + clone_step + add_step == '':
raise ReGraphError(
"Can't find any new transformations and actions is non-empty :\n%s" %
next_step
)
res.append(del_step + clone_step + add_step)
return res
def assert_graph_eq(g1, g2):
"""Assertion function for graph equality."""
assert(set(g1.nodes()) == set(g2.nodes()))
if g1.is_directed() and g2.is_directed():
assert(set(g1.edges()) == set(g2.edges()))
else:
for s, t in g1.edges():
assert((s, t) in g2.edges() or (t, s) in g2.edges())
for n in g1.nodes():
assert(g1.node[n] == g2.node[n])
for e1, e2 in g1.edges():
assert(g1.edge[e1][e2] == g2.edge[e1][e2])
return
def format_typing(typing):
if typing is None:
typing = dict()
new_typing = dict()
for key, value in typing.items():
if type(value) == dict:
new_typing[key] = copy.deepcopy(value)
else:
try:
if len(value) == 2:
new_typing[key] = copy.deepcopy(value)
elif len(value) == 1:
new_typing[key] = copy.deepcopy(value[0])
except:
raise ReGraphError("Typing format is not valid!")
return new_typing
def replace_source(n1, n2, mapping):
mapping[n2] = mapping[n1]
del mapping[n1]
def replace_target(n1, n2, mapping):
for (key, value) in mapping.items():
if value == n1:
mapping[key] = n2
def id_of(elements):
return {e: e for e in elements}
def restrict_mapping(nodes, mapping):
new_mapping = {}
for node in nodes:
new_mapping[node] = mapping[node]
return new_mapping
def reverse_image(mapping, nodes):
return [node for node in mapping if mapping[node] in nodes]
def union_mappings(map1, map2):
new_mapping = copy.deepcopy(map1)
for (source, target) in map2.items():
if source in new_mapping:
if new_mapping[source] != target:
raise ReGraphError("merging uncompatible mappings")
else:
new_mapping[source] = target
return new_mapping
def recursive_merge(dict1, dict2):
for k, v in dict2.items():
if (k in dict1.keys() and
isinstance(dict1[k], dict) and
isinstance(v, dict)):
recursive_merge(dict1[k], v)
else:
dict1[k] = v
| {
"repo_name": "eugeniashurko/ReGraph",
"path": "regraph/utils.py",
"copies": "1",
"size": "37108",
"license": "mit",
"hash": 9146462481449485000,
"line_mean": 38.8154506438,
"line_max": 90,
"alpha_frac": 0.4202867306,
"autogenerated": false,
"ratio": 4.435572555582118,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0004126819807981237,
"num_lines": 932
} |
"""A collection of various helper functions and utility functions."""
import asyncio
import functools
import aiohttp
import websockets
from discord.utils import maybe_coroutine
from discord.errors import HTTPException, GatewayNotFound, ConnectionClosed
def estimate_reading_time(text):
"""Estimates the time needed for a user to read a piece of text
This is assuming 0.9 seconds to react to start reading
plus 15 chars per second to actually read the text.
Parameters
----------
text : str
The text the reading time should be estimated for.
"""
read_time = 0.9 + len(text) / 15
read_time = round(read_time, 1)
return read_time if read_time > 2.4 else 2.4 # minimum is 2.4 seconds
def autorestart(delay_start=None, pause=None, restart_check=None):
"""Decorator that automatically restarts the decorated
coroutine function when a connection issue occurs.
Parameters
----------
delay_start : Callable
Will be yielded from before starting the
execution of the decorated coroutine function.
pause : Callable
Will be yielded from before restarting the
execution of the decorated coroutine function.
restart_check : Callable
A callable that checks whether the decorated
coroutine function should be restarted if it
has been cancelled. Should return a truth value.
May be a coroutine function.
"""
if not (delay_start is None or callable(delay_start)):
raise TypeError("delay_start must be a callable")
if not (pause is None or callable(pause)):
raise TypeError("pause must be a callable")
if not (restart_check is None or callable(restart_check)):
raise TypeError("restart_check must be a callable")
def wrapper(coro):
if not asyncio.iscoroutinefunction(coro):
raise TypeError("decorated function must be a coroutine function")
@functools.wraps(coro)
@asyncio.coroutine
def wrapped(*args, **kwargs):
if delay_start is not None:
yield from maybe_coroutine(delay_start)
try:
if pause is not None:
yield from maybe_coroutine(pause)
return (yield from coro(*args, **kwargs))
except asyncio.CancelledError:
if restart_check is not None and (yield from maybe_coroutine(restart_check)):
yield from wrapped(*args, **kwargs)
else:
raise
# catch connection issues
except (OSError,
HTTPException,
GatewayNotFound,
ConnectionClosed,
aiohttp.ClientError,
asyncio.TimeoutError,
websockets.InvalidHandshake,
websockets.WebSocketProtocolError) as ex:
if any((isinstance(ex, ConnectionClosed) and ex.code == 1000, # clean disconnect
not isinstance(ex, ConnectionClosed))):
yield from wrapped(*args, **kwargs)
else:
raise
return wrapped
return wrapper
| {
"repo_name": "Dwarf-Community/Dwarf",
"path": "utils.py",
"copies": "1",
"size": "3222",
"license": "mit",
"hash": -5625878838442534000,
"line_mean": 35.6136363636,
"line_max": 97,
"alpha_frac": 0.6160769708,
"autogenerated": false,
"ratio": 5.034375,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.61504519708,
"avg_score": null,
"num_lines": null
} |
""" A collection of "worlds" suitable by solution by a CSP.
Each world has a make_XXX_SCP function that creates a new
CSP object, and some auxiliary utilities.
"""
import re, math
from collections import defaultdict
from types import StringTypes
from csplib import CSP
#----------------------------------------------------------------
#
# Map coloring
#
def unequal_vals_constraint(A, a, B, b):
""" A simple constraint: two neighbors must always have
different values.
"""
return a != b
def make_map_coloring_CSP(colors, neighbors):
if isinstance(neighbors, StringTypes):
neighbors = parse_neighbors_graph(neighbors)
return CSP(
vars=neighbors.keys(),
domains=defaultdict(lambda: colors),
neighbors=neighbors,
binary_constraint=unequal_vals_constraint)
def parse_neighbors_graph(neighbors, vars=[]):
""" A utility for converting a string of the form
'X: Y Z; Y: Z' into a dict mapping variables to their
neighbors. The syntax is a vertex name followed by a
':' followed by zero or more vertex names, followed by
';', repeated for each vertes. Neighborhood is
commutative.
'vars' may contain vertices that have no neighbors.
"""
graph = defaultdict(list)
for var in vars:
graph[var] = []
specs = [spec.split(':') for spec in neighbors.split(';')]
for (v, v_neighbors) in specs:
v = v.strip()
graph.setdefault(v, [])
for u in v_neighbors.split():
graph[v].append(u)
graph[u].append(v)
return graph
def make_australia_CSP():
#
# WA---NT---Q
# \ | / \
# \ | / \
# \ |/ \
# SA------NSW
# \ /
# \ /
# \ /
# V
#
#
# T
#
return make_map_coloring_CSP(
list('RGB'),
'SA: WA NT Q NSW V; NT: WA Q; NSW: Q V; T: ')
def make_USA_CSP():
return make_map_coloring_CSP(list('RGBY'),
"""WA: OR ID; OR: ID NV CA; CA: NV AZ; NV: ID UT AZ; ID: MT WY UT;
UT: WY CO AZ; MT: ND SD WY; WY: SD NE CO; CO: NE KA OK NM; NM: OK TX;
ND: MN SD; SD: MN IA NE; NE: IA MO KA; KA: MO OK; OK: MO AR TX;
TX: AR LA; MN: WI IA; IA: WI IL MO; MO: IL KY TN AR; AR: MS TN LA;
LA: MS; WI: MI IL; IL: IN; IN: KY; MS: TN AL; AL: TN GA FL; MI: OH;
OH: PA WV KY; KY: WV VA TN; TN: VA NC GA; GA: NC SC FL;
PA: NY NJ DE MD WV; WV: MD VA; VA: MD DC NC; NC: SC; NY: VT MA CT NJ;
NJ: DE; DE: MD; MD: DC; VT: NH MA; MA: NH RI CT; CT: RI; ME: NH;
HI: ; AK: """)
#----------------------------------------------------------------
#
# N-Queens
#
# (var, val) is (column, row)
# The domain is 0..N-1
#
def queens_constraint(A, a, B, b):
""" Constraint is satisfied if it's the same column (queens
are assigned by columns), or if the queens are not in the
same row or diagonal
"""
if A == B:
return True
return a != b and A + a != B + b and A - a != B - b
class NQueensCSP(CSP):
def to_str(self, assignment):
s = ''
for row in self.domains:
for col in self.vars:
if assignment[col] == row:
s += '*'
else:
s += 'o'
s += '\n'
return s
def make_NQueens_CSP(n):
""" Creates a N-Queens CSP problem for a given N.
Note that this isn't a particularly efficient
representation.
"""
# columns
vars = list(range(n))
# rows
domains = list(range(n))
neighbors = {}
for v in vars:
neighbors[v] = vars[:]
neighbors[v].remove(v)
return NQueensCSP(
vars=vars,
domains=defaultdict(lambda: domains),
neighbors=neighbors,
binary_constraint=queens_constraint)
#----------------------------------------------------------------
#
# Sudoku
#
# Vars are (row, col) pairs.
# Values are 1..9
#
class SudokuCSP(CSP):
def to_str(self, assignment):
s = ''
for row in range(9):
if row % 3 == 0:
s += '+-------+-------+-------+\n'
s += '| '
for col in range(9):
if (row, col) in assignment:
s += str(assignment[(row, col)])
else:
s += '_'
if col % 3 == 2:
s += ' | '
else:
s += ' '
s += '\n'
s += '+-------+-------+-------+\n'
return s
def cross(A, B):
return [(a, b) for a in A for b in B]
def parse_sudoku_assignment(grid):
""" Given a string of 81 digits, return the assignment it
represents. 0 means unassigned.
Whitespace is ignored.
"""
digits = re.sub('\s', '', grid)
assert len(digits) == 81
digit = iter(digits)
asg = {}
for row in range(9):
for col in range(9):
d = int(digit.next())
if d > 0:
asg[(row, col)] = d
return asg
def make_sudoku_CSP():
""" A regular 9x9 Sudoku puzzle. Note that it's an 'empty'
Sudoku board. Solving partially filled boards is done
by passing an initial assignment (obtained with
parse_sudoku_assignment) to the solve_search method of
the CSP.
"""
# All (row, col) cells
rows = range(9)
cols = range(9)
vars = cross(rows, cols)
# Available values
domains = defaultdict(lambda: range(1, 10))
triples = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
unitlist = ([cross(rows, [c]) for c in cols] +
[cross([r], cols) for r in rows] +
[cross(rs, cs) for rs in triples for cs in triples])
# Neighbors holds sets, but that's fine for CSP - it just
# wants 'em to be iterable
#
neighbors = defaultdict(lambda: set([]))
for unit in unitlist:
for cell in unit:
neighbors[cell].update(unit)
neighbors[cell].remove(cell)
return SudokuCSP(
vars=vars,
domains=domains,
neighbors=neighbors,
binary_constraint=unequal_vals_constraint)
#----------------------------------------------------------------
#
# Magic squares
#
class MagicSquareCSP(CSP):
def to_str(self, assignment):
s = ''
ns = range(int(math.sqrt(len(self.vars))))
for row in ns:
for col in ns:
if (row, col) in assignment:
s += str(assignment[(row, col)])
else:
s += '_'
s += ' '
s += '\n'
return s
def make_magic_square_CSP(n):
""" A NxN additive magic square
A sample solution for 3x3:
2 7 6
9 5 1
4 3 8
(row, column and diagonal sum = 15)
"""
rows = range(n)
cols = range(n)
vars = cross(rows, cols)
domains = defaultdict(lambda: range(1, n*n + 1))
magic_sum = n * (n*n + 1) / 2
# All cells are different --> neighbors of one another.
#
neighbors = {}
for v in vars:
neighbors[v] = vars[:]
neighbors[v].remove(v)
def check_sum(values):
s = sum(values)
if s > magic_sum:
return False
return not (len(values) == n and s != magic_sum)
def sum_constraint(new_asgn, cur_asgn):
square = {}
square.update(new_asgn)
square.update(cur_asgn)
# Only new assignments can cause conflicts...
#
for (vrow, vcol) in new_asgn.iterkeys():
#~ if check_sum([square.get((vrow, col), 0) for col in cols]) == False:
if check_sum([square[(vrow, col)] for col in cols if (vrow, col) in square]) == False:
return False
#~ if check_sum([square.get((row, vcol), 0) for row in rows]) == False:
if check_sum([square[(row, vcol)] for row in rows if (row, vcol) in square]) == False:
return False
# \ diagonal
if ( vrow == vcol and
check_sum([square[(row, row)] for row in rows if (row, row) in square]) == False):
return False
# / diagonal
if ( vrow == n - 1 - vcol and
check_sum([square[(n - 1 - row, row)]
for row in rows
if (n - 1 - row, row) in square]) == False):
return False
return True
return MagicSquareCSP(
vars=vars,
domains=domains,
neighbors=neighbors,
binary_constraint=unequal_vals_constraint,
global_constraint=sum_constraint)
#----------------------------------------------------------------
#
# Magic gons (Project Euler problem 68)
#
class Magic3gonCSP(CSP):
def to_str(self, assignment):
asgn = defaultdict(lambda: '*')
asgn.update(assignment)
s = ''
s += ' %s\n' % asgn[2]
s += '\n'
s += ' %s\n' % asgn[4]
s += '\n'
s += ' %s %s %s\n' % (asgn[3], asgn[5], asgn[6])
s += '\n'
s += '%s\n' % asgn[1]
return s
def make_magic_3gon_CSP():
vars = range(1, 7)
domains = defaultdict(lambda: vars)
# All cells are different --> neighbors of one another.
#
neighbors = {}
for v in vars:
neighbors[v] = vars[:]
neighbors[v].remove(v)
groups = [[1, 3, 4], [2, 4, 5], [6, 5, 3]]
def sum_constraint(new_asgn, cur_asgn):
asgn = defaultdict(lambda: 999)
asgn.update(new_asgn)
asgn.update(cur_asgn)
last_total = None
for group in groups:
total = sum(asgn[i] for i in group)
if total < 1000:
if last_total is None:
last_total = total
elif last_total != total:
return False
return True
return Magic3gonCSP(
vars=vars,
domains=domains,
neighbors=neighbors,
binary_constraint=unequal_vals_constraint,
global_constraint=sum_constraint)
class Magic5gonCSP(CSP):
def to_str(self, assignment):
asgn = defaultdict(lambda: '*')
asgn.update(assignment)
s = ''
s += ' %s %s\n' % (asgn[2], asgn[9])
s += ' %s\n' % asgn[5]
s += ' %s %s\n' % (asgn[3], asgn[8])
s += '%s\n' % (asgn[1])
s += ' %s %s %s\n' % (asgn[4], asgn[7], asgn[10])
s += '\n'
s += ' %s\n' % asgn[6]
return s
def make_magic_5gon_CSP():
vars = range(1, 11)
domains = defaultdict(lambda: vars)
# All cells are different --> neighbors of one another.
#
neighbors = {}
for v in vars:
neighbors[v] = vars[:]
neighbors[v].remove(v)
groups = [[1, 3, 5], [2, 5, 8], [9, 8, 7], [10, 7, 4], [6, 4, 3]]
def sum_constraint(new_asgn, cur_asgn):
asgn = defaultdict(lambda: 999)
asgn.update(new_asgn)
asgn.update(cur_asgn)
last_total = None
for group in groups:
total = sum(asgn[i] for i in group)
if total < 1000:
if last_total is None:
last_total = total
elif last_total != total:
return False
return True
return Magic5gonCSP(
vars=vars,
domains=domains,
neighbors=neighbors,
binary_constraint=unequal_vals_constraint,
global_constraint=sum_constraint)
| {
"repo_name": "sunilthorat09/code-for-blog",
"path": "2009/csp_for_euler68/csp_sample_problems.py",
"copies": "13",
"size": "12407",
"license": "unlicense",
"hash": 730510484253290400,
"line_mean": 25.6837416481,
"line_max": 102,
"alpha_frac": 0.4577254776,
"autogenerated": false,
"ratio": 3.6816023738872405,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""A collection of wrappers around subprocess."""
import os
import subprocess # nosec
def swallow(command):
"""Execute a command, swallow all output, and return the status code.
:param list command: command to execute
"""
if isinstance(command, str):
command = command.split()
with open(os.devnull, 'w') as devnull:
return subprocess.call(command, stdout=devnull, stderr=devnull)
def stdout(command):
"""Execute a command, swallow stderr only, and returning stdout.
:param list command: command to execute
"""
if isinstance(command, str):
command = command.split()
with open(os.devnull, 'w') as devnull:
return subprocess.Popen(command, stdout=subprocess.PIPE, stderr=devnull).communicate()[0]
def call_input(command, input_):
if isinstance(command, str):
command = command.split()
proc = subprocess.Popen(command, stdin=subprocess.PIPE)
proc.communicate(input=input_)
return proc.returncode
def execute(command):
if isinstance(command, str):
command = command.split()
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
command_stdout, command_stderr = proc.communicate()
return command_stdout, command_stderr, proc.returncode
def check_output(command):
if isinstance(command, str):
command = command.split()
return subprocess.check_output(command)
def call(command):
if isinstance(command, str):
command = command.split()
return subprocess.call(command)
def pipe(command1, command2):
echo = subprocess.Popen(command1, stdout=subprocess.PIPE)
subprocess.call(command2, stdin=echo.stdout)
echo.wait()
| {
"repo_name": "Brickstertwo/git-commands",
"path": "bin/commands/utils/execute.py",
"copies": "1",
"size": "1712",
"license": "mit",
"hash": -5812327511511106000,
"line_mean": 27.5333333333,
"line_max": 97,
"alpha_frac": 0.6933411215,
"autogenerated": false,
"ratio": 4.066508313539193,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00036614645858343335,
"num_lines": 60
} |
"""A collection of ZeroMQ servers
test_notification_service - send out notifications of test status changes
- Registers a PULL socket that model.py sends notifications of tests to.
- Registers a PUB socket that broadcasts notifications to cluster_api websocket subscribers.
console_monitor_service - monitor the console out of a cluster
"""
import zmq
from daemonize import Daemonize
import argparse
import logging
import traceback
import threading
import time
from collections import defaultdict, deque
from functools import partial
import json
import datetime
log = logging.getLogger(__name__)
TEST_NOTIFICATION_PORT_PUSH = 5556
TEST_NOTIFICATION_PORT_SUB = 5557
CONSOLE_MONITOR_PORT_PUSH = 5558
CONSOLE_MONITOR_PORT_SUB = 5559
def test_notification_service(port_pull=TEST_NOTIFICATION_PORT_PUSH, port_pub=TEST_NOTIFICATION_PORT_SUB, ip='127.0.0.1'):
url_pull = "tcp://{ip}:{port_pull}".format(**locals())
url_pub = "tcp://{ip}:{port_pub}".format(**locals())
try:
log.info('test_notification_service staring')
log.info('test_notification_service pull url: {url_pull}'.format(**locals()))
log.info('test_notification_service pub url: {url_pub}'.format(**locals()))
context = zmq.Context()
receiver = context.socket(zmq.PULL)
receiver.bind(url_pull)
publisher = context.socket(zmq.PUB)
publisher.bind(url_pub)
while True:
data = receiver.recv_string()
log.info('notification: {data}'.format(data=data))
publisher.send_string(data)
except Exception, e:
# Log every error. If we're not running in the foreground, we
# won't see the errrors any other way:
log.error(traceback.format_exc())
log.info("test_notification_service shutdown")
def console_monitor_service(port_pull=CONSOLE_MONITOR_PORT_PUSH, port_pub=CONSOLE_MONITOR_PORT_SUB, ip='127.0.0.1'):
url_pull = "tcp://{ip}:{port_pull}".format(**locals())
url_pub = "tcp://{ip}:{port_pub}".format(**locals())
try:
log.info('console_monitor_service staring')
log.info('console_monitor_service pull url: {url_pull}'.format(**locals()))
log.info('console_monitor_service pub url: {url_pub}'.format(**locals()))
context = zmq.Context()
receiver = context.socket(zmq.PULL)
receiver.bind(url_pull)
publisher = context.socket(zmq.XPUB)
publisher.bind(url_pub)
poller = zmq.Poller()
poller.register(receiver, zmq.POLLIN)
poller.register(publisher, zmq.POLLIN)
# Cache the last 100 messages per cluster:
cache = defaultdict(partial(deque, maxlen=100)) # cluster_name -> deque
while True:
events = dict(poller.poll(1000))
if receiver in events:
data = receiver.recv()
topic, cluster, msg = data.split(' ', 2)
cache[cluster].append(msg)
# Mark message as realtime:
msg = json.loads(msg)
msg['realtime'] = True
msg = json.dumps(msg)
data = " ".join([topic, cluster, msg])
log.debug("PUB - {msg}".format(msg=data))
publisher.send(data)
if publisher in events:
event = publisher.recv()
# Subscription events areone byte: 0=unsub or 1=sub,
# followed by topic:
if event[0] == b'\x01':
topic, cluster = event[1:].strip().split(" ")
log.debug("SUBSCRIBE - {sub}".format(sub=event[1:]))
if topic == 'console':
# Client subscribed, send out previous messages:
log.debug("Sending backlog:")
for msg in cache[cluster]:
# Mark messages as non-realtime:
data = json.loads(msg)
data['realtime'] = False
msg = json.dumps(data)
data = "console {cluster} {msg}".format(cluster=cluster, msg=msg)
log.debug(data)
publisher.send(data)
elif event[0] == b'\x00':
log.debug("UNSUBSCRIBE - {sub}".format(sub=event[1:]))
except Exception, e:
# Log every error. If we're not running in the foreground, we
# won't see the errrors any other way:
log.error(traceback.format_exc())
log.info("console_monitor_service shutdown")
def multi_service():
"""Start all the services in separate threads"""
threads = []
for service in [test_notification_service, console_monitor_service]:
threads.append(threading.Thread(target=service))
for thread in threads:
thread.daemon = True
thread.start()
while threading.active_count() > 0:
try:
time.sleep(0.1)
except KeyboardInterrupt:
exit()
def zmq_socket_subscribe(url, topic='', timeout=5000):
zmq_context = zmq.Context()
zmq_socket = zmq_context.socket(zmq.SUB)
zmq_socket.connect(url)
zmq_socket.setsockopt_string(
zmq.SUBSCRIBE,
unicode(topic))
# Timeout:
zmq_socket.setsockopt(zmq.RCVTIMEO, timeout)
return zmq_socket
def console_publish(cluster_name, data):
"""Publish a console message or control message
cluster_name - the name of the cluster the data came from
data - a dictionary containing the following:
job_id - the job id the cluster is currently working on
msg - a message shown on the console
ctl - A control message indicating cluster status START, DONE, IDLE
"""
zmq_context = zmq.Context()
zmq_socket = zmq_context.socket(zmq.PUSH)
zmq_socket.connect("tcp://127.0.0.1:{port}".format(port=CONSOLE_MONITOR_PORT_PUSH))
if not data.has_key('timestamp'):
data['timestamp'] = (datetime.datetime.utcnow() - datetime.datetime(1970,1,1)).total_seconds()
zmq_socket.send_string("console {cluster_name} {data}".format(
cluster_name=cluster_name,
data=json.dumps(data)))
def console_subscribe(cluster_name):
return zmq_socket_subscribe(
'tcp://localhost:{port}'.format(port=CONSOLE_MONITOR_PORT_SUB),
'console {cluster_name} '.format(cluster_name=cluster_name))
def main():
parser = argparse.ArgumentParser(description='cstar_perf_notifications')
parser.add_argument('-F', '--foreground', dest='foreground',
action='store_true', help='Run in the foreground instead of daemonizing')
parser.add_argument('--pid', default="/tmp/cstar_perf_notifications.pid",
help='PID file for daemon', dest='pid')
parser.add_argument('-l', '--log', default='/tmp/cstar_perf_notifications.log',
help='File to log to', dest='logfile')
parser.add_argument('-v', '--verbose', action='store_true',
help='Print log messages', dest='verbose')
args = parser.parse_args()
log.setLevel(logging.DEBUG)
log.propagate = False
fh = logging.FileHandler(args.logfile, "a")
formatter = logging.Formatter("%(levelname)s:%(funcName)s:%(asctime) -8s %(message)s")
fh.setFormatter(formatter)
fh.setLevel(logging.DEBUG)
log.addHandler(fh)
if args.verbose:
sh = logging.StreamHandler()
sh.setFormatter(formatter)
sh.setLevel(logging.DEBUG)
log.addHandler(sh)
keep_fds = [fh.stream.fileno()]
if args.foreground:
multi_service()
else:
daemon = Daemonize(app="notifications", pid=args.pid, action=multi_service, keep_fds=keep_fds)
daemon.start()
if __name__ == "__main__":
main()
| {
"repo_name": "datastax/cstar_perf",
"path": "frontend/cstar_perf/frontend/server/notifications.py",
"copies": "2",
"size": "7873",
"license": "apache-2.0",
"hash": 5139459249352286000,
"line_mean": 37.0338164251,
"line_max": 122,
"alpha_frac": 0.6037088784,
"autogenerated": false,
"ratio": 4.045734840698869,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5649443719098869,
"avg_score": null,
"num_lines": null
} |
"""A colored output console.
Example
-------
from cmt.ui.widgets.outputconsole import OutputConsole
widget = OutputConsole()
widget.add_color("^Error", 217, 83, 79)
widget.add_color("^Fail", 240, 173, 78)
widget.add_color("^Success", 92, 184, 92)
widget.show()
widget.write("And now some text\n")
widget.write("ERROR: This is an error\n")
widget.write("FAIL: We have failed\n")
widget.write("SUCCESS: We did it!\n")
"""
import re
from PySide2.QtGui import *
from PySide2.QtCore import *
from PySide2.QtWidgets import *
class OutputConsole(QTextEdit):
"""Colored text output console."""
normal_color = QColor(200, 200, 200)
def __init__(self, parent=None):
"""Constructor
:param parent: Parent QWidget.
"""
super(OutputConsole, self).__init__(parent)
self.setReadOnly(True)
self.color_regex = {}
self.setTextColor(OutputConsole.normal_color)
def add_color(self, regex, r, g, b):
"""Add a regex with associated color.
:param regex: Regular expression pattern
:param r: Red 0-255
:param g: Green 0-255
:param b: Blue 0-255
"""
regex = re.compile(regex, re.IGNORECASE)
self.color_regex[regex] = QColor(r, g, b)
def write(self, text):
"""Write text into the QTextEdit."""
# Color the output if it matches any regex
for regex, color in self.color_regex.items():
if regex.search(text):
self.setTextColor(color)
break
self.insertPlainText(text)
self.setTextColor(OutputConsole.normal_color)
def flush(self):
"""Required for stream purposes"""
pass
| {
"repo_name": "chadmv/cmt",
"path": "scripts/cmt/ui/widgets/outputconsole.py",
"copies": "1",
"size": "1696",
"license": "mit",
"hash": -4744314602680346000,
"line_mean": 26.3548387097,
"line_max": 54,
"alpha_frac": 0.6208726415,
"autogenerated": false,
"ratio": 3.711159737417943,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9832032378917943,
"avg_score": 0,
"num_lines": 62
} |
"""A Column represents a deferred Tensor computation in a DataFrame."""
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import ABCMeta
class Column(object):
"""A single output column.
Represents the deferred construction of a graph that computes the column
values.
Note every `Column` should be a `TransformedColumn`, except when mocked.
"""
__metaclass__ = ABCMeta
def build(self, cache):
"""Returns a Tensor."""
raise NotImplementedError()
class TransformedColumn(Column):
"""A `Column` that results from applying a `Transform` to a list of inputs."""
def __init__(self, input_columns, transform, output_name):
super(TransformedColumn, self).__init__()
self._input_columns = input_columns
self._transform = transform
self._output_name = output_name
if output_name is None:
raise ValueError("output_name must be provided")
if len(input_columns) != transform.input_valency:
raise ValueError("Expected %s input Columns but received %s." %
(transform.input_valency, len(input_columns)))
self._repr = TransformedColumn.make_repr(
self._input_columns, self._transform, self._output_name)
def build(self, cache=None):
if cache is None:
cache = {}
all_outputs = self._transform.apply_transform(self._input_columns, cache)
return getattr(all_outputs, self._output_name)
def __repr__(self):
return self._repr
# Note we need to generate column reprs from Transform, without needing the
# columns themselves. So we just make this public. Alternatively we could
# create throwaway columns just in order to call repr() on them.
@staticmethod
def make_repr(input_columns, transform, output_name):
"""Generate a key for caching Tensors produced for a TransformedColumn.
Generally we a need a deterministic unique key representing which transform
was applied to which inputs, and which output was selected.
Args:
input_columns: the input `Columns` for the `Transform`
transform: the `Transform` being applied
output_name: the name of the specific output from the `Transform` that is
to be cached
Returns:
A string suitable for use as a cache key for Tensors produced via a
TransformedColumn
"""
input_column_keys = [repr(column) for column in input_columns]
input_column_keys_joined = ", ".join(input_column_keys)
return "%s(%s)[%s]" % (
repr(transform), input_column_keys_joined, output_name)
| {
"repo_name": "ivano666/tensorflow",
"path": "tensorflow/contrib/learn/python/learn/dataframe/column.py",
"copies": "2",
"size": "3187",
"license": "apache-2.0",
"hash": -333943839374474700,
"line_mean": 33.2688172043,
"line_max": 80,
"alpha_frac": 0.7053655475,
"autogenerated": false,
"ratio": 4.193421052631579,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.015509869351332015,
"num_lines": 93
} |
"""Acolyte models"""
from acolyte.database import db
class School(db.Model):
"""School SqlAlchemy model
"""
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50))
def __init__(self, *args, **kwargs):
self.name = kwargs.get('name')
class Spell(db.Model):
"""Spell SqlAlchemy model
"""
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100))
required = db.Column(db.Integer)
target = db.Column(db.String(50))
description = db.Column(db.Text)
school_id = db.Column(db.Integer, db.ForeignKey('school.id'))
school = db.relationship('School', backref=db.backref(
'spells',
order_by=name,
cascade="all, delete-orphan"))
def __init__(self, **kwargs):
"""Initialise model
Arguments:
**kwargs {list} -- Keyword arguments
Keyword Arguments:
name {str} -- Spell name
school_id {int} -- Spell school ID
required {int} -- Required dice roll
target {str} -- Spell target
description {str} -- Spell description
"""
self.name = kwargs.get('name')
self.school_id = kwargs.get('school_id')
self.required = kwargs.get('required')
self.target = kwargs.get('target')
self.description = kwargs.get('description')
| {
"repo_name": "rabramley/frostgrave_acolyte",
"path": "acolyte/models.py",
"copies": "1",
"size": "1392",
"license": "mit",
"hash": 7205725785750463000,
"line_mean": 25.7692307692,
"line_max": 65,
"alpha_frac": 0.5804597701,
"autogenerated": false,
"ratio": 3.5876288659793816,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46680886360793816,
"avg_score": null,
"num_lines": null
} |
"""Acolyte spell importer"""
import yaml
from acolyte.models import Spell, School
from acolyte.database import db
def import_spells(app, spell_path):
"""Import spells from a YAML file
Arguments:
app {obj} -- Acolyte application
spell_path {str} -- Path to spells YAML file
"""
app.logger.info('Importing spells from {}'.format(spell_path))
with open(spell_path, "r", encoding="utf-8") as f:
spells = yaml.load_all(f)
for school_details in spells:
school = School.query.filter(
School.name == school_details['name']).first()
if not school:
app.logger.info('New School: {}'.format(
school_details['name'].title()))
school = School(
name=school_details['name']
)
db.session.add(school)
for spell_details in school_details['spells']:
spell = Spell.query.filter(
Spell.name == spell_details['name'].title()).first()
if not spell:
app.logger.info('New Spell: {}'.format(
spell_details['name'].title()))
spell = Spell(
name=spell_details['name'].title(),
school_id=school.id,
required=spell_details['required'],
target=spell_details['target'],
description=spell_details['description']
)
db.session.add(spell)
db.session.commit()
app.logger.info('Importing spells completed')
| {
"repo_name": "rabramley/frostgrave_acolyte",
"path": "acolyte/importer.py",
"copies": "1",
"size": "1676",
"license": "mit",
"hash": -7347754990143360000,
"line_mean": 29.4727272727,
"line_max": 72,
"alpha_frac": 0.5089498807,
"autogenerated": false,
"ratio": 4.097799511002445,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5106749391702445,
"avg_score": null,
"num_lines": null
} |
# A combination of both the DHT and pihole scripts
# Requires adafruit python DHT11, DHT22 Library. See other DHT script
# Bradley Gillap 2016
import lcddriver #Driver
import socket #For host and IP
import time #For general timers. Sleep etc.
import fcntl #For host and IP
import struct
import json #For pihole API
import urllib2 #For pihole API
import Adafruit_DHT as dht #Arguments dht instead of Adafruit_DHT, DHT11 device, GPIO26
#Initialize IP Address Check
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
#Initialize Hostname Check
socket.gethostbyname(socket.gethostname())
#VARIABLES
# If you use something from the driver library use the "display." prefix first
pin = 26 #GPIO pin we are communicating on CHANGE THIS
h,t = dht.read_retry(dht.DHT11, pin) #Refreshes the DHT sensor. ARG DHT11 or DHT22 sensor
temp = 'Temp : {0:0.1f}C'.format(t) #Store temp string info
humid = 'Humid: {1:0.1f}%'.format(t,h) #Store Humidity info
display = lcddriver.lcd() #Load lcddriver and set it to display
ipaddy = get_ip_address('eth0') #Define Ip address variable
url = ("http://" + str(ipaddy) + "/admin/api.php") #Connect to pihole API
#INIT FUNCTIONS KILLING KITTENS
def pihole_hit(): #Function to poll the API for pihole.
global data
global blocked
global percent
global queries
global domains
data = json.load(urllib2.urlopen(url)) #Store pihole api data.
blocked = data['ads_blocked_today']
percent = data['ads_percentage_today']
queries = data['dns_queries_today']
domains = data['domains_being_blocked']
def temp_hit(): #Poll the temp sensor and update vars
global h,t
global temp
global humid
h,t = dht.read_retry(dht.DHT11, pin) #Change sensor here DHT11, DHT22 etc
temp = 'Temp : {0:0.1f}C'.format(t) #Formatting
humid = 'Humid: {1:0.1f}%'.format(t,h) #Formatting don't really understand these args
#DISPLAY FUNCTIONS to output to the lcd.
def net_info():
display.lcd_clear()
display.lcd_display_string("Host:" + str((socket.gethostname())), 1) #Show host
display.lcd_display_string("IP:" + str(ipaddy), 2) #Show IP address
def pi_info1():
display.lcd_clear()
display.lcd_display_string("Blocked: " + str(blocked), 1) #Show sites blocked on screen max 16 chars
display.lcd_display_string("Percent: " + str(percent)+"%", 2) #Show percentage of sites blocked max 16 chars
def pi_info2():
display.lcd_clear()
display.lcd_display_string("Queries: " + str(queries), 1) #Show total queries on screen max 16 chars
display.lcd_display_string("Domains: " + str(domains), 2) #Show total domains in blocklist max 16 chars
def temp_info():
display.lcd_clear()
display.lcd_display_string(temp, 1) #write temp to screen
display.lcd_display_string(humid, 2) #write humdity to screen
#Infinite Loop to Screen
lcdloop = 1
while lcdloop == 1 : # This constructs an infinite loop
pihole_hit()
temp_hit()
net_info()
time.sleep(4)
pi_info2()
time.sleep(4)
pi_info1()
time.sleep(4)
temp_info()
time.sleep(4)
| {
"repo_name": "bradgillap/I2C-LCD-Display",
"path": "16x2LCD/dht11pluspihole.py",
"copies": "1",
"size": "3371",
"license": "apache-2.0",
"hash": -629108071137612200,
"line_mean": 36.043956044,
"line_max": 122,
"alpha_frac": 0.6701275586,
"autogenerated": false,
"ratio": 3.050678733031674,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9048562717411086,
"avg_score": 0.03444871484411746,
"num_lines": 91
} |
"""A combined model combines multiple models."""
import itertools
import logging
from label_microservice import models
class CombinedLabelModels(models.IssueLabelModel):
"""Generate predictions with multiple models and then combine the results"""
def __init__(self, models=None):
# A list of models to generate predictions
self._models = models
def predict_issue_labels(self, org:str, repo:str,
title:str , text:str, context=None):
"""Return a dictionary of label probabilities.
Args:
title: The title for the issue
text: The text for the issue
Return
------
dict: Dictionary of label to probability of that label for the
the issue str -> float
"""
if not self._models:
raise ValueError("Can't generate predictions; no models loaded")
predictions = {}
for i, m in enumerate(self._models):
logging.info(f"Generating predictions with model {i}")
latest = m.predict_issue_labels(org, repo, title, text, context=context)
predictions = self._combine_predictions(predictions, latest)
return predictions
@staticmethod
def _combine_predictions(left, right):
"""Combine two sets of predictions by taking the max probability."""
results = {}
results.update(left)
for label, probability in right.items():
if not label in results:
results[label] = probability
continue
results[label] = max(left[label], right[label])
return results
| {
"repo_name": "kubeflow/code-intelligence",
"path": "py/label_microservice/combined_model.py",
"copies": "1",
"size": "1511",
"license": "mit",
"hash": 4814102127452941000,
"line_mean": 26.9814814815,
"line_max": 78,
"alpha_frac": 0.6704169424,
"autogenerated": false,
"ratio": 4.497023809523809,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007264865097084072,
"num_lines": 54
} |
"""A command line interface (CLI) to the main PUDL ETL functionality.
This script generates datapacakges based on the datapackage settings enumerated
in the settings_file which is given as an argument to this script. If the
settings has empty datapackage parameters (meaning there are no years or
tables included), no datapacakges will be generated. If the settings include a
datapackage that has empty parameters, the other valid datatpackages will be
generated, but not the empty one. If there are invalid parameters (meaning a
partition that is not included in the pudl.constant.working_partitions), the
build will fail early on in the process.
The datapackages will be stored in "PUDL_OUT" in the "datapackge" subdirectory.
Currently, this function only uses default directories for "PUDL_IN" and
"PUDL_OUT" (meaning those stored in $HOME/.pudl.yml). To setup your default
pudl directories see the pudl_setup script (pudl_setup --help for more details).
"""
import argparse
import logging
import pathlib
import sys
import coloredlogs
import yaml
import pudl
logger = logging.getLogger(__name__)
def parse_command_line(argv):
"""
Parse script command line arguments. See the -h option.
Args:
argv (list): command line arguments including caller file name.
Returns:
dict: A dictionary mapping command line arguments to their values.
"""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
dest='settings_file',
type=str,
default='',
help="path to YAML datapackage settings file.")
parser.add_argument(
'-c',
'--clobber',
action='store_true',
help="""Clobber existing datapackages if they exist. If clobber is not
included but the datapackage bundle directory already exists the _build
will fail. Either the datapkg_bundle_name in the settings_file needs to
be unique or you need to include --clobber""",
default=False)
parser.add_argument(
"--sandbox", action="store_true", default=False,
help="Use the Zenodo sandbox rather than production")
parser.add_argument(
"--logfile", default=None,
help="If specified, write logs to this file.")
parser.add_argument(
"--gcs-cache-path",
type=str,
help="Load datastore resources from Google Cloud Storage. Should be gs://bucket[/path_prefix]")
parser.add_argument(
"--bypass-local-cache",
action="store_true",
default=False,
help="If enabled, the local file cache for datastore will not be used.")
arguments = parser.parse_args(argv[1:])
return arguments
def main():
"""Parse command line and initialize PUDL DB."""
# Display logged output from the PUDL package:
pudl_logger = logging.getLogger("pudl")
log_format = '%(asctime)s [%(levelname)8s] %(name)s:%(lineno)s %(message)s'
coloredlogs.install(fmt=log_format, level='INFO', logger=pudl_logger)
args = parse_command_line(sys.argv)
if args.logfile:
file_logger = logging.FileHandler(args.logfile)
file_logger.setFormatter(logging.Formatter(log_format))
pudl_logger.addHandler(file_logger)
with pathlib.Path(args.settings_file).open() as f:
script_settings = yaml.safe_load(f)
try:
pudl_in = script_settings["pudl_in"]
except KeyError:
pudl_in = pudl.workspace.setup.get_defaults()["pudl_in"]
try:
pudl_out = script_settings["pudl_out"]
except KeyError:
pudl_out = pudl.workspace.setup.get_defaults()["pudl_out"]
pudl_settings = pudl.workspace.setup.derive_paths(
pudl_in=pudl_in, pudl_out=pudl_out)
pudl_settings["sandbox"] = args.sandbox
try:
datapkg_bundle_doi = script_settings["datapkg_bundle_doi"]
if not pudl.helpers.is_doi(datapkg_bundle_doi):
raise ValueError(
f"Found invalid bundle DOI: {datapkg_bundle_doi} "
f"in bundle {script_settings['datpkg_bundle_name']}."
)
except KeyError:
datapkg_bundle_doi = None
_ = pudl.etl.generate_datapkg_bundle(
script_settings['datapkg_bundle_settings'],
pudl_settings,
datapkg_bundle_name=script_settings['datapkg_bundle_name'],
datapkg_bundle_doi=datapkg_bundle_doi,
clobber=args.clobber,
use_local_cache=not args.bypass_local_cache,
gcs_cache_path=args.gcs_cache_path)
if __name__ == "__main__":
sys.exit(main())
| {
"repo_name": "catalyst-cooperative/pudl",
"path": "src/pudl/cli.py",
"copies": "1",
"size": "4538",
"license": "mit",
"hash": 5877586881318611000,
"line_mean": 35.0158730159,
"line_max": 103,
"alpha_frac": 0.6729836933,
"autogenerated": false,
"ratio": 3.753515301902399,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9925137616526205,
"avg_score": 0.0002722757352386982,
"num_lines": 126
} |
""" A command line interface to start a Flask service for classifying handwritten digits. """
import argparse
import constants
import cv2
from flask import Flask, jsonify, request, render_template, abort
import io
from keras.models import load_model
import logging
from logging import handlers
import numpy as np
import os
import tensorflow as tf
parser = argparse.ArgumentParser(description='Starts a classification service.')
parser.add_argument('--host',
default=constants.DEFAULT_HOST,
help='Host for the classification service')
parser.add_argument('--port', type=int,
default=constants.DEFAULT_PORT,
help='port on which to run the service')
parser.add_argument('--model',
default=constants.CNN_MODEL_FILENAME,
help='Model filename to use for prediction. Needs to refer to a HDF5 file')
# we need to make graph a global variable
# see https://github.com/fchollet/keras/issues/2397#issuecomment-254919212
# similarly we make model global, so we don't have to pass it as argument to the classification function
global graph, model
app = Flask(__name__)
app.config['ALLOWED_EXTENSIONS'] = {'png', 'jpg', 'jpeg'}
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024 # 16 MB
def allowed_file(filename):
"""
Validate the file extension.
:param filename: filename to check
:return: (boolean) whether the file is allowed
"""
return '.' in filename and \
filename.rsplit('.', 1)[1] in app.config['ALLOWED_EXTENSIONS']
def validate_img_data(data):
"""
Validate the processed image data. Checks number of rows and columns.
:param data: input numpy array
:return: (boolean) whether the processed image conforms to the expected format
"""
if not isinstance(data, np.ndarray):
return False
try:
correct_nb_rows = len(data) == constants.IMG_ROWS
correct_nb_cols = all(len(data[i]) == constants.IMG_COLS for i in range(len(data)))
return correct_nb_cols and correct_nb_rows
except TypeError:
return False
def convert_to_mnist_format(input_file):
"""
Convert input file to the MNIST format. Uses OpenCV for decoding and transforming the image.
:param input_file: input file, as received by the server in the request files
:return: (np.ndarray) processed image as a numpy array
"""
try:
in_memory_file = io.BytesIO()
input_file.save(in_memory_file)
data = np.fromstring(in_memory_file.getvalue(), dtype=np.uint8)
grey_img = cv2.imdecode(data, cv2.IMREAD_GRAYSCALE)
grey_inv_img = cv2.bitwise_not(grey_img)
(thresh, im_bw) = cv2.threshold(grey_inv_img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
img = cv2.resize(im_bw, (28, 28), interpolation=cv2.INTER_AREA)
except Exception as e:
app.logger.error("Image processing failed. Exception: %s", e)
abort(404)
return
if not validate_img_data(img):
app.logger.error("Image format validation failed")
abort(404)
return img
@app.route('/mnist/classify', methods=['POST'])
def make_predict_image():
"""
Hold the main logic of request parsing and response.
"""
# Check that there is data in the upload form 'image'
if 'image' not in request.files or not request.files['image']:
app.logger.error("Please upload an image in the 'image' form member.")
abort(404)
input_image_file = request.files['image']
# Validate image extension
if not allowed_file(input_image_file.filename):
app.logger.error("File upload error. File extension should be png or jpg.")
abort(404)
# Convert the file to the mnist format (numpy array, 28 * 28 pixels, pixel: 0 <> background, 255 <> max signal)
img = convert_to_mnist_format(input_image_file)
try:
# Attempt to classify the given example
predict_request = img.reshape(1, constants.IMG_ROWS, constants.IMG_COLS, 1)
with graph.as_default():
preds = model.predict_classes(predict_request, verbose=0)
# jsonify is the safe way to generate a JSON file to return
# see http://flask.pocoo.org/docs/0.10/security/#json-security
return jsonify({"classification": str(preds[0])})
except Exception as e:
app.logger.error("Model prediction error. Exception: %s", e)
abort(404)
@app.errorhandler(404)
def page_not_found(error):
""" Render neat template when page not found """
return render_template('404.html', planet_ascii_art=constants.PLANET_ASCII_ART), 404
@app.errorhandler(405)
def method_not_allowed(error):
""" Render neat template when method not allowed """
return render_template('405.html', planet_ascii_art=constants.PLANET_ASCII_ART), 405
if __name__ == "__main__":
handler = handlers.RotatingFileHandler('classify.log', maxBytes=10000, backupCount=1)
handler.setLevel(logging.INFO)
app.logger.addHandler(handler)
args = parser.parse_args()
try:
filepath = os.path.join(constants.MODELS_DIR, args.model)
model = load_model(filepath)
except OSError as e:
msg = "Model file could not be found at {0} or was invalid. Error: {1}".format(args.filename, e)
logging.error(msg)
exit(1)
graph = tf.get_default_graph()
app.run(host=args.host, port=args.port)
| {
"repo_name": "thomas-legrand/mnist-classification",
"path": "app.py",
"copies": "1",
"size": "5437",
"license": "mit",
"hash": -9070020211253986000,
"line_mean": 36.2397260274,
"line_max": 115,
"alpha_frac": 0.6663601251,
"autogenerated": false,
"ratio": 3.802097902097902,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4968458027197902,
"avg_score": null,
"num_lines": null
} |
"""A command line search utility. Command line options are flagged as '-t' for table,
'-f' for format, '-o-' for output, and '-q' for query. Enter these as arguments and
the program will output a plain text file with the relevant data."""
import MySQLdb
import optparse
opt = optparse.OptionParser()
opt.add_option("-d", "--database",
action = "store",
type = "string",
dest = "database")
opt.add_option("-c", "--column",
action = "store",
type = "string",
dest = "column")
opt.add_option("-q", "--query",
action = "store",
type = "string",
dest = "term")
opt.add_option("-t", "--table",
action = "store",
type = "string",
dest = "table")
opt.add_option("-f", "--format",
action="store_true",
dest="format")
opt.add_option("-o", "--output",
action = "store",
type = "string",
dest = "outfile")
opt, args = opt.parse_args()
database = opt.database
try:
mydb = MySQLdb.connect(host='localhost',
user='root',
db=database)
except: print "That database doesn't exist here."
cur = mydb.cursor()
format = opt.format
table = opt.table
column = opt.column
term = opt.term
statement = """SELECT * FROM %s WHERE %s like '%s'""" %(table, column, term)
try:
command = cur.execute(statement)
results = cur.fetchall()
except: print "Something's wrong with the db/table/col combo...it doesn't exist."
column_list = []
for record in results:
column_list.append(record[0:])
if format is True:
columns_query = """DESCRIBE %s""" %(table)
columns_command = cur.execute(columns_query)
headers = cur.fetchall()
column_list = []
for record in headers:
column_list.append(record[0])
output=""
for record in results:
output = output + "<><<>><><<>><><<>><><<>><><<>><><<>>\n\n"
for field_no in range(0, len(column_list)):
output = output + column_list[field_no]+": " + str(record[field_no]) + "\n"
output = output + "\n"
else:
output=[]
for record in range(0, len(results)):
output.append(results[record])
output = ''.join(output)
if opt.outfile:
outfile = opt.outfile
out = open(outfile, 'w')
out.write(output)
out.close()
else:
print output
mydb.close()
| {
"repo_name": "EricAlanMack/MySQL-search",
"path": "searchme.py",
"copies": "1",
"size": "2204",
"license": "mit",
"hash": 6051278752320407000,
"line_mean": 21.7216494845,
"line_max": 86,
"alpha_frac": 0.6161524501,
"autogenerated": false,
"ratio": 3.010928961748634,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8604033220251186,
"avg_score": 0.10460963831948959,
"num_lines": 97
} |
"""A command line tool for classifying tweets collected by get_twitter.py
take care, will overwrite output_filename
Run this using:
python classifier.py input_filename output_filename
(obviously replacing the last two)
If output_filename exists, this will load that file's contents and rewrite it.
Be careful!!!
This functionality allows you to quit the program and restart at a later time,
without losing your progress. However if the file is not legitimate, what
will likely happen is that you will lose that file.
"""
import sys
import os
import csv
input_filename = sys.argv[1]
output_filename = sys.argv[2]
existing_lines = dict()
if os.path.exists(output_filename):
with open(output_filename) as inf:
for line in inf:
if len(line.strip()) == 0:
continue
data = line.split(",")
y = int(data[-1])
text = ",".join(data[:-1])
existing_lines[text] = y
print("Loaded {} existing entries".format(len(existing_lines)))
with open(input_filename) as inf, open(output_filename, 'w') as outf:
for line, prediction in existing_lines.items():
outf.write("{},{}\n".format(line, prediction))
for line in inf:
line = line.strip()
print("\n\n\n")
print("num {}".format(len(existing_lines)))
print(line)
if line in existing_lines:
print("Already found: {}".format(existing_lines[line]))
prediction = existing_lines[line]
continue
else:
a = raw_input("\nIs this spam? (enter for 'no')")
prediction = 0
if a.lower() == 'y':
prediction = 1
existing_lines[line] = prediction
print("Prediction: {}".format(prediction))
outf.write("{},{}\n".format(line, prediction))
| {
"repo_name": "robertlayton/authorship_tutorials",
"path": "pyconau2014/classifier.py",
"copies": "1",
"size": "1644",
"license": "bsd-3-clause",
"hash": 8298524792455471000,
"line_mean": 26.8644067797,
"line_max": 78,
"alpha_frac": 0.6885644769,
"autogenerated": false,
"ratio": 3.334685598377282,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45232500752772814,
"avg_score": null,
"num_lines": null
} |
"""A commandline tool to convert an EXR image to a .npz dictionary.
Useful when you have problems installing OpenEXR for Python 3. This uses
Python 2 and writes to disk, so that you can later load it with Python 3.
Xiuming Zhang, MIT CSAIL.
Feburary 2018.
"""
from argparse import ArgumentParser
from os import makedirs
from os.path import exists, abspath, dirname
import numpy as np
import OpenEXR
import Imath
# Parse variables
parser = ArgumentParser(
description="Load OpenEXR image as dictionary of NumPy arrays")
parser.add_argument('input', metavar='i', type=str,
help="input .exr file")
parser.add_argument('outpath', metavar='o', type=str,
help="output .npz file")
args = parser.parse_args()
inpath = args.input
outpath = abspath(args.outpath)
if not outpath.endswith('.npz'):
outpath += '.npz'
# Make directory
outdir = dirname(outpath)
if not exists(outdir):
makedirs(outdir)
f = OpenEXR.InputFile(inpath)
pix_type = Imath.PixelType(Imath.PixelType.FLOAT)
data_win = f.header()['dataWindow']
win_size = (data_win.max.y - data_win.min.y + 1,
data_win.max.x - data_win.min.x + 1)
imgs = {}
for c in f.header()['channels']:
arr = np.fromstring(f.channel(c, pix_type), dtype=np.float32)
imgs[c] = arr.reshape(win_size)
np.savez(outpath, **imgs)
| {
"repo_name": "google/neural-light-transport",
"path": "third_party/xiuminglib/cli/exr2npz.py",
"copies": "2",
"size": "1336",
"license": "apache-2.0",
"hash": 8755690576642390000,
"line_mean": 25.72,
"line_max": 73,
"alpha_frac": 0.6938622754,
"autogenerated": false,
"ratio": 3.1658767772511847,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9859739052651185,
"avg_score": 0,
"num_lines": 50
} |
"""A commandline tool to extract frames from a video.
Xiuming Zhang, MIT CSAIL.
May 2017.
"""
from argparse import ArgumentParser
from os import makedirs
from os.path import exists, join, abspath
from cv2 import imwrite, VideoCapture
from xiuminglib import log
logger, thisfile = log.create_logger(abspath(__file__))
logger.name = thisfile
# Parse variables
parser = ArgumentParser(description="Extract frames from a video file")
parser.add_argument('videopath', metavar='i', type=str, help="input video file")
parser.add_argument('outdir', metavar='o', type=str, help="output directory")
parser.add_argument('--every', metavar='n', type=int, default=1,
help="sample one frame every n frame(s) (default: 1)")
parser.add_argument('--outlen', metavar='l', type=int, default=4,
help="length of output filenames (default: 4)")
args = parser.parse_args()
videopath = args.videopath
outdir = abspath(args.outdir)
every = args.every
outlen = args.outlen
# Make directory
if not exists(outdir):
makedirs(outdir)
# Read frames from video
vid = VideoCapture(videopath)
frameidx = 0
frameidx_out = 1
while vid.isOpened():
success, im = vid.read()
if not success:
break
if frameidx % every == 0:
outpath = join(outdir, str(frameidx_out).zfill(outlen) + '.png')
logger.info("Frame %d saved as %s", frameidx, outpath)
imwrite('%s' % outpath, im)
frameidx_out += 1
frameidx += 1
vid.release()
| {
"repo_name": "google/neural-light-transport",
"path": "third_party/xiuminglib/cli/extract_frames.py",
"copies": "2",
"size": "1486",
"license": "apache-2.0",
"hash": -8378440978144276000,
"line_mean": 28.72,
"line_max": 80,
"alpha_frac": 0.6843876178,
"autogenerated": false,
"ratio": 3.496470588235294,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002469135802469136,
"num_lines": 50
} |
"""A commandline tool to make annotated GIF from image-text pairs.
Xiuming Zhang, MIT CSAIL.
July 2017.
"""
from argparse import ArgumentParser
from os import makedirs
from os.path import exists, join, abspath, dirname, basename
from shutil import rmtree
from subprocess import call
from random import choice
from string import ascii_letters, digits
import cv2
from xiuminglib import log
logger, thisfile = log.create_logger(abspath(__file__))
logger.name = thisfile
# Parse variables
parser = ArgumentParser(description="Make annotated GIF from image-text pairs")
parser.add_argument('input', metavar='i', type=str, nargs='+',
help="input image-text pairs, e.g., im.png,'foo bar' or im.png")
parser.add_argument('outpath', metavar='o', type=str, help="output GIF")
parser.add_argument('--cropbox', metavar='b', type=str, default='0,0,-1,-1',
help=("top left corner, height and width of the cropping rectangle; "
"use -1 for \"to the end\"; default: 0,0,-1,-1 (no cropping)"))
parser.add_argument('--delay', metavar='d', type=int, default=200, help="delay parameter; default: 200")
parser.add_argument('--width', metavar='w', type=int, default=1080, help="output GIF width; default: 1080")
parser.add_argument('--fontscale', metavar='s', type=int, default=4, help="font scale; default: 4")
parser.add_argument('--fontthick', metavar='t', type=int, default=5, help="font thickness; default: 5")
parser.add_argument('--fontbgr', metavar='c', type=str, default='0,0,255', help="font BGR; default: 0,0,255 (red)")
parser.add_argument('--anchor', metavar='a', type=str, default='50,50',
help="bottom left corner of text box; default: 50,50")
args = parser.parse_args()
pairs = args.input
outpath = abspath(args.outpath)
cropbox = tuple([int(x) for x in args.cropbox.split(',')])
gif_delay = args.delay
gif_width = args.width
font_scale = args.fontscale
font_thick = args.fontthick
font_bgr = tuple([int(x) for x in args.fontbgr.split(',')])
bottom_left_corner = tuple([int(x) for x in args.anchor.split(',')])
# Make directory
outdir = dirname(outpath)
if not exists(outdir):
makedirs(outdir)
tmpdir = join(outdir, 'tmp_make_gif_' + ''.join(
[choice(ascii_letters + digits) for n in range(10)]))
if not exists(tmpdir):
makedirs(tmpdir)
for impath_text in pairs:
impath_text = impath_text.split(',')
impath = impath_text[0]
# Crop, if asked to, and resize
im = cv2.imread(impath)
assert im is not None, "%s not found" % impath
if cropbox != (0, 0, -1, -1):
y_min, x_min, h, w = cropbox
if h == -1:
h = im.shape[0] - y_min
if w == -1:
w = im.shape[1] - x_min
im = im[y_min:(y_min + h), x_min:(x_min + w), ...]
im = cv2.resize(im, (gif_width, int(im.shape[0] * gif_width / im.shape[1])))
# Put text
if len(impath_text) > 1 and impath_text[1] != '':
text = impath_text[1]
cv2.putText(im, text, bottom_left_corner, cv2.FONT_HERSHEY_SIMPLEX,
font_scale, font_bgr, font_thick)
# Write new image
tmppath = join(tmpdir, basename(impath))
cv2.imwrite(tmppath, im)
# Make GIF
call(['convert', '-delay', str(gif_delay), '-loop', '0',
join(tmpdir, '*'), outpath])
# Clean up
rmtree(tmpdir)
logger.info("Generated %s", outpath)
| {
"repo_name": "google/nerfactor",
"path": "third_party/xiuminglib/cli/make_gif.py",
"copies": "2",
"size": "3368",
"license": "apache-2.0",
"hash": -8794342738874370000,
"line_mean": 36.010989011,
"line_max": 115,
"alpha_frac": 0.6445961995,
"autogenerated": false,
"ratio": 3.1359404096834265,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4780536609183426,
"avg_score": null,
"num_lines": null
} |
""" A command model that can be easily transformed into jobs.
author: Brian Schrader
since: 2015-12-21
"""
from .tokens import Input, Output, FileToken, PathToken, alias_pattern
class Command(object):
def __init__(self, alias, parts=[]):
self.alias = alias
self.parts = parts
if len(self.output_parts) > 1:
for i, output in enumerate(self.output_parts):
output.alias = alias_pattern.format(command=self.alias,
output_number=i+1)
else:
for output in self.output_parts:
output.alias = self.alias
def __repr__(self):
return '<Command: {}>'.format(self.alias)
@property
def depends_on(self):
""" Returns a list of command template aliases that the given command
template depends on.
"""
return [part.command_alias for part in self.input_parts
if part.command_alias is not None]
@property
def input_parts(self):
""" Returns a list of the input tokens in the list of parts. """
return [part for part in self.file_parts
if isinstance(part, Input)]
@property
def output_parts(self):
""" Returns a list of the output tokens in the list of parts. """
return [part for part in self.file_parts
if isinstance(part, Output)]
@property
def file_parts(self):
""" Returns a list of the file tokens in the list of parts. """
file_parts = []
for part in self.parts:
try:
for sub_part in part:
if isinstance(sub_part, FileToken):
file_parts.append(sub_part)
except TypeError:
if isinstance(part, FileToken):
file_parts.append(part)
return file_parts
@property
def path_parts(self):
""" Returns a list of the path tokens in the list of parts. """
return [part for part in self.parts
if isinstance(part, PathToken)]
def update_dependent_files(self, prev_commands=[]):
""" Update the command's dependencies based on the evaluated input and
output of previous commands.
"""
for command in prev_commands:
for my_input in self.input_parts:
for their_output in command.output_parts:
if their_output == my_input:
my_input.filename = their_output.eval()
def eval(self):
""" Evaluate the given job and return a complete shell script to be run
by the job manager.
"""
eval = []
for part in self.parts:
try:
result = part.eval()
except AttributeError:
result = part
if result[-1] != '\n':
result += ' '
eval.append(result)
return ''.join(eval).strip()
| {
"repo_name": "TorkamaniLab/metapipe",
"path": "metapipe/models/command.py",
"copies": "2",
"size": "2935",
"license": "mit",
"hash": -943302913037680500,
"line_mean": 31.2527472527,
"line_max": 79,
"alpha_frac": 0.5567291312,
"autogenerated": false,
"ratio": 4.48776758409786,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01229708942874086,
"num_lines": 91
} |
"""A command to generate or load a subset of data from the live
database.
The `load` operation truncates all existing data.
"""
import json
import os
import tempfile
import subprocess
from django.core.management import BaseCommand
from django.db import connection
from django.template import Context, Engine, Template
# Tables for which we should copy all the data
copy_all = [
'django_migrations',
'auth_user',
'frontend_chemical',
'frontend_genericcodemapping',
'frontend_importlog',
'frontend_measure',
'frontend_measureglobal',
'frontend_regionalteam',
'frontend_stp',
'frontend_pct',
'frontend_practice',
'frontend_orgbookmark',
'frontend_practiceisdispensing',
'frontend_presentation',
'frontend_product',
'frontend_profile',
'frontend_qofprevalence',
'frontend_searchbookmark',
'frontend_section',
'pipeline_tasklog',
'spatial_ref_sys',
'dmd_lookup_availability_restriction',
'dmd_lookup_basis_of_name',
'dmd_lookup_basis_of_strnth',
'dmd_lookup_colour',
'dmd_lookup_combination_pack_ind',
'dmd_lookup_combination_prod_ind',
'dmd_lookup_control_drug_category',
'dmd_lookup_df_indicator',
'dmd_lookup_discontinued_ind',
'dmd_lookup_dnd',
'dmd_lookup_dt_payment_category',
'dmd_lookup_flavour',
'dmd_lookup_form',
'dmd_lookup_legal_category',
'dmd_lookup_licensing_authority',
'dmd_lookup_licensing_authority_change_reason',
'dmd_lookup_namechange_reason',
'dmd_lookup_ont_form_route',
'dmd_lookup_price_basis',
'dmd_lookup_reimbursement_status',
'dmd_lookup_route',
'dmd_lookup_spec_cont',
'dmd_lookup_supplier',
'dmd_lookup_unit_of_measure',
'dmd_lookup_virtual_product_non_avail',
'dmd_lookup_virtual_product_pres_status',
'dmd_amp',
'dmd_ampp',
'dmd_ap_info',
'dmd_ap_ing',
'dmd_ccontent',
'dmd_control_info',
'dmd_dform',
'dmd_droute',
'dmd_dtinfo',
'dmd_gtin',
'dmd_ing',
'dmd_lic_route',
'dmd_ncsoconcession',
'dmd_ont',
'dmd_pack_info',
'dmd_prescrib_info',
'dmd_price_info',
'dmd_product',
'dmd_product_temp',
'dmd_reimb_info',
'dmd_tariffprice',
'dmd_vmp',
'dmd_vmpp',
'dmd_vpi',
'dmd_vtm',
'vw__practice_summary',
'vw__presentation_summary',
]
# tables with WHERE clauses
copy_sample = {
'frontend_measurevalue': "pct_id = '{}'",
'frontend_prescription': "pct_id = '{}'",
'frontend_practicestatistics': "pct_id = '{}'",
'frontend_ppusaving': "pct_id = '{}'",
'vw__ccgstatistics': "pct_id = '{}'",
'vw__chemical_summary_by_ccg': "pct_id = '{}'",
'vw__chemical_summary_by_practice': (
"practice_id IN "
"(SELECT code FROM frontend_practice WHERE ccg_id = '{}')"),
'vw__presentation_summary_by_ccg': "pct_id = '{}'",
}
tables_to_sample = ['frontend_prescription']
def dump_create_table(table, dest_dir):
"""
Save an array of column names in the order they'll be dumped.
This allows us to recreate them on loading.
"""
dest = os.path.join(dest_dir, table + '.json')
with connection.cursor() as cursor:
sql = ("SELECT column_name FROM information_schema.columns "
"WHERE table_schema = 'public' AND table_name = '{}'"
.format(table))
res = cursor.execute(sql)
fields = cursor.fetchall()
with open(dest, 'wb') as f:
json.dump([x[0] for x in fields], f)
def quote_cols(cols):
"""Quote SQL column names (because dm+d uses the reserved word `desc`)
"""
return ['"{}"'.format(item) for item in cols]
class Command(BaseCommand):
def dump(self, path, ccg):
with connection.cursor() as cursor:
for table in copy_all:
with open(os.path.join(path, table), 'wb') as f:
sql = "copy (SELECT * FROM {}) TO STDOUT WITH NULL '\N'"
sql = sql.format(table)
dump_create_table(table, path)
cursor.copy_expert(sql, f)
for table, where in copy_sample.items():
where = where.format(ccg)
with open(os.path.join(path, table), 'wb') as f:
if table in tables_to_sample:
sample = 'TABLESAMPLE SYSTEM (1)'
else:
sample = ''
sql = ("copy (SELECT * FROM {} {} WHERE {}) "
"TO STDOUT WITH NULL '\N'")
sql = sql.format(table, sample, where)
dump_create_table(table, path)
cursor.copy_expert(sql, f)
def load(self, path):
with connection.cursor() as cursor:
# Create empty views
view_sql = os.path.join(
'frontend', 'management', 'commands', 'replace_matviews.sql')
with open(view_sql, 'rb') as f:
cursor.execute(f.read())
# Create DMD tables
view_sql = os.path.join(
'dmd', 'dmd_structure.sql')
with open(view_sql, 'rb') as f:
cursor.execute(f.read())
# Now fill other (existing) tables
for table in copy_all:
with open(os.path.join(path, table), 'rb') as f:
cursor.execute("TRUNCATE TABLE {} CASCADE".format(table))
with open(os.path.join(path, table + '.json'), 'rb') as f2:
cols = json.load(f2)
cursor.copy_from(
f, table, null='\N', columns=quote_cols(cols))
for table, where in copy_sample.items():
with open(os.path.join(path, table), 'rb') as f:
cursor.execute("TRUNCATE TABLE {} CASCADE".format(table))
with open(os.path.join(path, table + '.json'), 'rb') as f2:
cols = json.load(f2)
cursor.copy_from(
f, table, null='\N', columns=quote_cols(cols))
def add_arguments(self, parser):
parser.add_argument('operation', nargs=1, choices=['load', 'dump'])
parser.add_argument(
'--dir',
help="directory containing previously dumped files",
default=tempfile.gettempdir())
parser.add_argument(
'--ccg', help="CCG to sample data for", default='09X')
def handle(self, *args, **options):
if 'load' in options['operation']:
self.load(options['dir'])
else:
self.dump(options['dir'], options['ccg'])
| {
"repo_name": "annapowellsmith/openpresc",
"path": "openprescribing/frontend/management/commands/sample_data.py",
"copies": "1",
"size": "6653",
"license": "mit",
"hash": -3386556577246731300,
"line_mean": 32.601010101,
"line_max": 79,
"alpha_frac": 0.5618517962,
"autogenerated": false,
"ratio": 3.523834745762712,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9585686541962712,
"avg_score": 0,
"num_lines": 198
} |
"""A command to generate or load a subset of data from the live
database.
The `load` operation truncates all existing data.
"""
import json
import os
import tempfile
from django.apps import apps
from django.core.management import BaseCommand
from django.db import connection
# Tables for which we should copy all the data
copy_all = [
"django_migrations",
"auth_user",
"frontend_chemical",
"frontend_importlog",
"frontend_measure",
"frontend_measureglobal",
"frontend_ncsoconcession",
"frontend_orgbookmark",
"frontend_pct",
"frontend_practice",
"frontend_practiceisdispensing",
"frontend_presentation",
"frontend_product",
"frontend_profile",
"frontend_qofprevalence",
"frontend_regionalteam",
"frontend_searchbookmark",
"frontend_section",
"frontend_stp",
"frontend_tariffprice",
"pipeline_tasklog",
"spatial_ref_sys",
]
for m in apps.get_app_config("dmd").get_models():
copy_all.append(m._meta.db_table)
# tables with WHERE clauses
copy_sample = {
"frontend_measurevalue": "pct_id = '{}'",
"frontend_prescription": "pct_id = '{}'",
"frontend_practicestatistics": "pct_id = '{}'",
}
tables_to_sample = ["frontend_prescription"]
def dump_create_table(table, dest_dir):
"""
Save an array of column names in the order they'll be dumped.
This allows us to recreate them on loading.
"""
dest = os.path.join(dest_dir, table + ".json")
with connection.cursor() as cursor:
sql = (
"SELECT column_name FROM information_schema.columns "
"WHERE table_schema = 'public' AND table_name = '{}'".format(table)
)
cursor.execute(sql)
fields = cursor.fetchall()
with open(dest, "wb") as f:
json.dump([x[0] for x in fields], f)
def quote_cols(cols):
"""Quote SQL column names (because dm+d uses the reserved word `desc`)"""
return ['"{}"'.format(item) for item in cols]
class Command(BaseCommand):
def dump(self, path, ccg):
with connection.cursor() as cursor:
for table in copy_all:
with open(os.path.join(path, table), "wb") as f:
sql = r"copy (SELECT * FROM {}) TO STDOUT WITH NULL '\N'"
sql = sql.format(table)
dump_create_table(table, path)
cursor.copy_expert(sql, f)
for table, where in copy_sample.items():
where = where.format(ccg)
with open(os.path.join(path, table), "wb") as f:
if table in tables_to_sample:
sample = "TABLESAMPLE SYSTEM (1)"
else:
sample = ""
sql = (
r"copy (SELECT * FROM {} {} WHERE {}) "
r"TO STDOUT WITH NULL '\N'"
)
sql = sql.format(table, sample, where)
dump_create_table(table, path)
cursor.copy_expert(sql, f)
def load(self, path):
with connection.cursor() as cursor:
# Create DMD tables
view_sql = os.path.join("dmd", "dmd_structure.sql")
with open(view_sql, "rb") as f:
cursor.execute(f.read())
# Now fill other (existing) tables
for table in copy_all:
with open(os.path.join(path, table), "rb") as f:
cursor.execute("TRUNCATE TABLE {} CASCADE".format(table))
with open(os.path.join(path, table + ".json"), "rb") as f2:
cols = json.load(f2)
cursor.copy_from(f, table, null=r"\N", columns=quote_cols(cols))
for table, where in copy_sample.items():
with open(os.path.join(path, table), "rb") as f:
cursor.execute("TRUNCATE TABLE {} CASCADE".format(table))
with open(os.path.join(path, table + ".json"), "rb") as f2:
cols = json.load(f2)
cursor.copy_from(f, table, null=r"\N", columns=quote_cols(cols))
def add_arguments(self, parser):
parser.add_argument("operation", nargs=1, choices=["load", "dump"])
parser.add_argument(
"--dir",
help="directory containing previously dumped files",
default=tempfile.gettempdir(),
)
parser.add_argument("--ccg", help="CCG to sample data for", default="09X")
def handle(self, *args, **options):
if "load" in options["operation"]:
self.load(options["dir"])
else:
self.dump(options["dir"], options["ccg"])
| {
"repo_name": "ebmdatalab/openprescribing",
"path": "openprescribing/frontend/management/commands/sample_data.py",
"copies": "1",
"size": "4704",
"license": "mit",
"hash": -41128886794764780,
"line_mean": 33.8444444444,
"line_max": 84,
"alpha_frac": 0.5542091837,
"autogenerated": false,
"ratio": 3.884393063583815,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4938602247283815,
"avg_score": null,
"num_lines": null
} |
"""A command to generate the SQL required to aggregate statistics
stashed in the JSON column for star_pus in the practice_statistics
table.
When the keys in the JSON change, replace
`views_sql/ccgstatistics.sql` with the output of running this command
"""
from django.core.management import BaseCommand
from django.template import Context, Engine, Template
class Command(BaseCommand):
def handle(self, *args, **kwargs):
keys = [
'analgesics_cost',
'antidepressants_adq',
'antidepressants_cost',
'antiepileptic_drugs_cost',
'antiplatelet_drugs_cost',
'benzodiazepine_caps_and_tabs_cost',
'bisphosphonates_and_other_drugs_cost',
'bronchodilators_cost',
'calcium-channel_blockers_cost',
'cox-2_inhibitors_cost',
'drugs_acting_on_benzodiazepine_receptors_cost',
'drugs_affecting_the_renin_angiotensin_system_cost',
'drugs_for_dementia_cost',
'drugs_used_in_parkinsonism_and_related_disorders_cost',
'hypnotics_adq',
'inhaled_corticosteroids_cost',
'laxatives_cost',
'lipid-regulating_drugs_cost',
'omega-3_fatty_acid_compounds_adq',
'oral_antibacterials_cost',
'oral_antibacterials_item',
'oral_nsaids_cost',
'proton_pump_inhibitors_cost',
'statins_cost',
'ulcer_healing_drugs_cost'
]
sql = """
-- This SQL is generated by `generate_ccg_statistics_sql.py`
CREATE TEMPORARY FUNCTION
jsonify_starpu({% for safe_key in safe_keys %}
{{ safe_key }} FLOAT64{% if not forloop.last %},{% endif %}{% endfor %}
)
RETURNS STRING
LANGUAGE js AS '''
var obj = {};{% for key, safe_key in zipped_keys %}
obj['{{ key }}'] = {{ safe_key }};{% endfor %}
return JSON.stringify(obj);
''';
SELECT
month AS date,
practices.ccg_id AS pct_id,
ccgs.name AS name,
SUM(total_list_size) AS total_list_size,
SUM(astro_pu_items) AS astro_pu_items,
SUM(astro_pu_cost) AS astro_pu_cost,
jsonify_starpu({% for key in keys %}
SUM(CAST(JSON_EXTRACT_SCALAR(star_pu, '$.{{ key }}') AS FLOAT64)){% if not forloop.last %},{% endif %}{% endfor %}
) AS star_pu
FROM {hscic}.practice_statistics
INNER JOIN {hscic}.practices
ON practice_statistics.practice = practices.code
INNER JOIN {hscic}.ccgs ccgs
ON practices.ccg_id = ccgs.code AND ccgs.org_type = 'CCG'
WHERE month > TIMESTAMP(DATE_SUB(DATE "{this_month}", INTERVAL 5 YEAR))
GROUP BY
month,
practices.ccg_id,
name
""".strip()
template = Template(sql)
safe_keys = [key.replace('-', '_') for key in keys]
zipped_keys = zip(keys, safe_keys)
ctx = Context({
'keys': keys,
'safe_keys': safe_keys,
'zipped_keys': zipped_keys,
},
autoescape=False
)
print template.render(ctx)
| {
"repo_name": "annapowellsmith/openpresc",
"path": "openprescribing/frontend/management/commands/generate_ccg_statistics_sql.py",
"copies": "1",
"size": "2988",
"license": "mit",
"hash": 1791663267181318100,
"line_mean": 31.8351648352,
"line_max": 118,
"alpha_frac": 0.6064257028,
"autogenerated": false,
"ratio": 3.2337662337662336,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43401919365662334,
"avg_score": null,
"num_lines": null
} |
"""A command to list events."""
from baseCmd import *
from baseResponse import *
class listEventsCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "false"
"""list resources by account. Must be used with the domainId parameter."""
self.account = None
self.typeInfo['account'] = 'string'
"""list only resources belonging to the domain specified"""
self.domainid = None
self.typeInfo['domainid'] = 'uuid'
"""the duration of the event"""
self.duration = None
self.typeInfo['duration'] = 'integer'
"""the end date range of the list you want to retrieve (use format "yyyy-MM-dd" or the new format "yyyy-MM-dd HH:mm:ss")"""
self.enddate = None
self.typeInfo['enddate'] = 'date'
"""the time the event was entered"""
self.entrytime = None
self.typeInfo['entrytime'] = 'integer'
"""the ID of the event"""
self.id = None
self.typeInfo['id'] = 'uuid'
"""defaults to false, but if true, lists all resources from the parent specified by the domainId till leaves."""
self.isrecursive = None
self.typeInfo['isrecursive'] = 'boolean'
"""List by keyword"""
self.keyword = None
self.typeInfo['keyword'] = 'string'
"""the event level (INFO, WARN, ERROR)"""
self.level = None
self.typeInfo['level'] = 'string'
"""If set to false, list only resources belonging to the command's caller; if set to true - list resources that the caller is authorized to see. Default value is false"""
self.listall = None
self.typeInfo['listall'] = 'boolean'
""""""
self.page = None
self.typeInfo['page'] = 'integer'
""""""
self.pagesize = None
self.typeInfo['pagesize'] = 'integer'
"""list objects by project"""
self.projectid = None
self.typeInfo['projectid'] = 'uuid'
"""the start date range of the list you want to retrieve (use format "yyyy-MM-dd" or the new format "yyyy-MM-dd HH:mm:ss")"""
self.startdate = None
self.typeInfo['startdate'] = 'date'
"""the event type (see event types)"""
self.type = None
self.typeInfo['type'] = 'string'
self.required = []
class listEventsResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""the ID of the event"""
self.id = None
self.typeInfo['id'] = 'string'
"""the account name for the account that owns the object being acted on in the event (e.g. the owner of the virtual machine, ip address, or security group)"""
self.account = None
self.typeInfo['account'] = 'string'
"""the date the event was created"""
self.created = None
self.typeInfo['created'] = 'date'
"""a brief description of the event"""
self.description = None
self.typeInfo['description'] = 'string'
"""the name of the account's domain"""
self.domain = None
self.typeInfo['domain'] = 'string'
"""the id of the account's domain"""
self.domainid = None
self.typeInfo['domainid'] = 'string'
"""the event level (INFO, WARN, ERROR)"""
self.level = None
self.typeInfo['level'] = 'string'
"""whether the event is parented"""
self.parentid = None
self.typeInfo['parentid'] = 'string'
"""the project name of the address"""
self.project = None
self.typeInfo['project'] = 'string'
"""the project id of the ipaddress"""
self.projectid = None
self.typeInfo['projectid'] = 'string'
"""the state of the event"""
self.state = None
self.typeInfo['state'] = 'state'
"""the type of the event (see event types)"""
self.type = None
self.typeInfo['type'] = 'string'
"""the name of the user who performed the action (can be different from the account if an admin is performing an action for a user, e.g. starting/stopping a user's virtual machine)"""
self.username = None
self.typeInfo['username'] = 'string'
| {
"repo_name": "MissionCriticalCloud/marvin",
"path": "marvin/cloudstackAPI/listEvents.py",
"copies": "1",
"size": "4188",
"license": "apache-2.0",
"hash": 8110051326058380000,
"line_mean": 40.0588235294,
"line_max": 191,
"alpha_frac": 0.5859598854,
"autogenerated": false,
"ratio": 4.069970845481049,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5155930730881049,
"avg_score": null,
"num_lines": null
} |
"""A command to run the tests specified in settings.TEST_APPS
Created on Aug 22, 2010
@author: jnaous
"""
import os
import pkg_resources
from django.core.management.base import NoArgsCommand
MAKEFILE = \
r"""
##
## Makefile to keep the hash symlinks in SSLCACertificatePath up to date
## Copyright (c) 1998-2001 Ralf S. Engelschall, All Rights Reserved.
##
SSL_PROGRAM=
update: clean
-@ssl_program="$(SSL_PROGRAM)"; \
if [ ".$$ssl_program" = . ]; then \
for dir in . `echo $$PATH | sed -e 's/:/ /g'`; do \
for program in openssl ssleay; do \
if [ -f "$$dir/$$program" ]; then \
if [ -x "$$dir/$$program" ]; then \
ssl_program="$$dir/$$program"; \
break; \
fi; \
fi; \
done; \
if [ ".$$ssl_program" != . ]; then \
break; \
fi; \
done; \
fi; \
if [ ".$$ssl_program" = . ]; then \
echo "Error: neither 'openssl' nor 'ssleay' program found" 1>&2; \
exit 1; \
fi; \
for file in *.crt; do \
if [ ".`grep SKIPME $$file`" != . ]; then \
echo dummy |\
awk '{ printf("%-15s ... Skipped\n", file); }' \
"file=$$file"; \
else \
n=0; \
while [ 1 ]; do \
hash="`$$ssl_program x509 -noout -hash <$$file`"; \
if [ -r "$$hash.$$n" ]; then \
n=`expr $$n + 1`; \
else \
echo dummy |\
awk '{ printf("%-15s ... %s\n", file, hash); }' \
"file=$$file" "hash=$$hash.$$n"; \
ln -s $$file $$hash.$$n; \
break; \
fi; \
done; \
fi; \
done
clean:
-@rm -f [0-9a-fA-F]*.[0-9]*
"""
class Command(NoArgsCommand):
help = "This command runs the tests that are bundled with Expedient."
def handle_noargs(self, **options):
from django.conf import settings
dir = os.path.abspath(settings.XMLRPC_TRUSTED_CA_PATH)
loc = os.path.join(dir, "Makefile")
print "Writing the Makefile into directory %s..." % dir
pkg_resources.ensure_directory(loc)
f = open(loc, mode="w")
f.write(MAKEFILE)
f.close()
print "Done."
| {
"repo_name": "dana-i2cat/felix",
"path": "expedient/src/python/expedient/clearinghouse/commands/management/commands/install_cert_makefile.py",
"copies": "8",
"size": "2388",
"license": "apache-2.0",
"hash": -5947025016118221000,
"line_mean": 28.85,
"line_max": 74,
"alpha_frac": 0.4568676717,
"autogenerated": false,
"ratio": 3.4310344827586206,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.788790215445862,
"avg_score": null,
"num_lines": null
} |
# A common base page object to be used by all the page object
import time
from selenium.common.exceptions import TimeoutException, StaleElementReferenceException, NoSuchElementException
from selenium.webdriver.support.expected_conditions import visibility_of_element_located, element_to_be_clickable
from selenium.webdriver.support.wait import WebDriverWait
class PageNotLoaded(Exception):
pass
class PageObject(object):
"""
The base page object, all pages will extend this page
"""
def __init__(self, webdriver, base_url=None):
self._webdriver = webdriver
self._base_url = base_url
def get(self, url=None):
"""retrieve the page or visit a link within the page
:param url: a relative url to the page, if left empty, the
method will just attempt to get the page using the base url
"""
if self._base_url is None and url is None:
raise ValueError("Both base_url and url are None. "
"At least one of the base_url or url should be a valid url string")
# if only the url is present then go to that url
elif self._base_url is None:
self._webdriver.get(url)
# only base url is present, go to the base url
elif url is None:
self._webdriver.get(self._base_url)
# both are present, combine then and then go to that url
else:
self._webdriver.get(self._base_url + url)
@property
def base_url(self):
return self._base_url
@base_url.setter
def base_url(self, new_url):
self._base_url = new_url
@property
def title(self):
return self._webdriver.title
def _is_loaded_helper(self, element_locator):
"""A helper method that determines if a pages is loaded based on the presence of an element
:param element_locator: the element locator, this should be some pivotal element in that page
:return True: if the page is loaded, i.e. element is found and visible. False otherwise
"""
try:
WebDriverWait(self._webdriver, 10, 1).until(visibility_of_element_located(element_locator))
return True
except TimeoutException:
return False
def is_loaded(self):
"""A method that would determine if the current page object is visible on the browser
:returns: True if the page is load, False otherwise
"""
raise NotImplemented("This method needs to be implemented in the specific page object instance")
def find_element(self, locator, context=None):
"""Find an element , will filter out only those that are actually visible by the user
:param locator: a selenium locator that can be used to find the element, e.g. (By.ID, "some_id")
:param context: can be used if user needs to find a nested element. in that case pass in the element
as a context, if left as None, the method will just use the webdriver ( top of the html tree )
to find the element
:return WebElement: if there is atleast one displayed element matching locator pattern. If there are more
than one, then method will return the first.
If no elements are found, or none are displayed, the method will return None.
"""
elements = self.find_multi_elements(locator=locator, context=context)
# return only the menu that is displayed and enabled, i.e. one that is actually clickable
for element in elements:
try:
# return the first element that is displayed and is enabled.
if element.is_displayed() and element.is_enabled():
return element
except StaleElementReferenceException:
pass
# either no user menu elements were found at all, or none were clickable
return None
def find_multi_elements(self, locator, context=None):
"""Find a list of elements with same locator within this page
:param locator: a selenium web element locator, of the format (By.<locator_type>, "search pattern")
:param context: if a context is passed, find_element will use the context to find the element within it.
This is useful if the page needs to use a web element as a context to find a nested element for
example.
:returns: the list of the matching web elements. note if no match is found , the function will return
an empty list
"""
if context is None:
context = self._webdriver
return context.find_elements(*locator)
def click_element(self, locator, context=None, retries=1, delay=1):
"""find the element then click it"""
if context is None:
context = self._webdriver
screen = None
stacktrace = None
for count in range(retries):
# wait until element is clickable before attempting to click
try:
WebDriverWait(context, 2, 0.5).until(element_to_be_clickable(locator)).click()
return
except (TimeoutException, NoSuchElementException, StaleElementReferenceException):
time.sleep(delay)
raise TimeoutException("could not click button", screen, stacktrace)
def send_keys(self, locator, text, context=None):
"""Find an editable element and change the text"""
WebDriverWait(self._webdriver, 5, 0.5).until(element_to_be_clickable(locator))
element = self.find_element(locator=locator, context=context)
# clear first then send the new text
element.clear()
element.send_keys(text)
| {
"repo_name": "aelnahas/automation-quandl",
"path": "common/page_object.py",
"copies": "1",
"size": "5827",
"license": "mit",
"hash": -303010983064721100,
"line_mean": 41.2246376812,
"line_max": 117,
"alpha_frac": 0.6349751158,
"autogenerated": false,
"ratio": 4.650438946528332,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.009009575657578043,
"num_lines": 138
} |
"""A community galerka site
Usage:
galerka [options]
Options:
-h, --help Show help
--hostname=HOSTNAME Hostname to run on [default: localhost]
-p, --port=NUM Port to listen on [default: 4000]
-d, --debug Enable debugging
--redis-url=URL Redis URL [default: redis://localhost:6379/#galerka]
--postgres-dsn=DSN Postgres DSN [default: dbname=galerka user=galerka]
--postgres-prefix=PREFIX Postgres table name prefix [default: galerka_]
The Redis URL can be in the form:
redis://[db-number[:password]@]host:port[?option=value][#prefix]
A ':' will be appended toi the prefix if it's not already there
Options are:
poolsize: Connections to allocate [default: 20]
"""
from werkzeug.serving import run_simple
import docopt
from galerka.app import application
from galerka.middleware import galerka_app_context
def main(options):
print(options)
debug = options['--debug']
context = galerka_app_context(
application,
redis_url=options['--redis-url'],
postgres_dsn=options['--postgres-dsn'],
postgres_prefix=options['--postgres-prefix'],
debug=debug,
)
with context as app:
run_simple(
hostname=options['--hostname'],
port=int(options['--port']),
application=app,
use_reloader=debug,
use_debugger=debug,
extra_files=app.extra_files,
)
main(docopt.docopt(__doc__))
| {
"repo_name": "encukou/galerka",
"path": "galerka/__main__.py",
"copies": "1",
"size": "1512",
"license": "mit",
"hash": 7548781897419135000,
"line_mean": 28.0769230769,
"line_max": 80,
"alpha_frac": 0.6223544974,
"autogenerated": false,
"ratio": 3.7241379310344827,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48464924284344824,
"avg_score": null,
"num_lines": null
} |
"""A compact GUI application for optical distortion calibration of endoscopes.
See:
https://github.com/gift-surg/endocal
"""
from setuptools import setup
# To use a consistent encoding
from codecs import open
from os import path
doc_dir = path.abspath(path.join(path.dirname(__file__), 'doc'))
# Get the summary
summary = 'A cross-platform, compact GUI application for the optical' +\
' distortion calibration of fluid-immersed endoscopes.'
# Get the long description
with open(path.join(doc_dir, 'description.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='endocal',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='18.02.13',
description=summary,
long_description=long_description,
# The project's main homepage.
url='https://github.com/gift-surg/endocal',
# Author details
author='Dzhoshkun Ismail Shakir',
author_email='d.shakir@ucl.ac.uk',
# Choose your license
license='BSD-3-Clause',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Intended Audience :: Healthcare Industry',
'Topic :: Scientific/Engineering :: Medical Science Apps.',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Graphics',
'Topic :: Multimedia :: Video :: Capture',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: BSD License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
],
# What does your project relate to?
keywords='optical distortion calibration, endoscope, endoscopy, medical imaging,'
'image processing, biomedical engineering, medical physics,'
'image-guided interventions',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=['endocal', 'cad'],
# As recommended in
# https://docs.python.org/2/distutils/setupscript.html#installing-package-data
package_dir={'endocal': 'endocal', 'cad': 'cad'},
py_modules=['endocal.calibration', 'cad.dxf'],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['PyYAML', 'numpy'],
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={'endocal': ['data/sample_001/*', 'data/sample_002/*'],
'cad': ['data/dxf/header.dxf', 'data/dxf/footer.dxf',
'data/dxf/polyline.dxf', 'data/dxf/seqend.dxf']},
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'endocal=endocal:main',
'endocal-test=endocal:test',
'dxf=cad:generate_dxf'
],
},
)
| {
"repo_name": "gift-surg/endocal",
"path": "setup.py",
"copies": "1",
"size": "4008",
"license": "bsd-3-clause",
"hash": -1158979411797983000,
"line_mean": 35.7706422018,
"line_max": 85,
"alpha_frac": 0.6551896208,
"autogenerated": false,
"ratio": 3.89126213592233,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.504645175672233,
"avg_score": null,
"num_lines": null
} |
# A complete rewrite of the CERN@school particle recognition and classification algorithm,
# for ease of integration with existing LUCID data and libraries.
# Can be imported and called from anywhere to identify particle types based on their attributes
# Author (code): Cal Hewitt, based on an algorithm from http://github.com/cernatschool/cluster-sorter
import numpy as np
from scipy.optimize import leastsq
import json
import os
from collections import OrderedDict
try:
import common
except ImportError:
from . import common
# Load up the types file
types = json.loads(open(os.path.dirname(os.path.realpath(__file__)) + "/types/old_algorithm.json").read())
# A list of bounds of properties of various particle types, adapted from http://github.com/cernatschool/cluster-sorter
# Stores and calculates the attributes of a single cluster ('blob') of pixels
class Blob(common.Blob):
def classify(self):
# Set up a dictionary of the blob's own values
blob_values = {"num_pixels": self.num_pixels,
"radius": self.radius,
"density": self.density,
"squiggliness": self.squiggliness}
# Loop through each potential particle type, looking for a match
for particle_name, subtypes in types.items():
for name, properties in subtypes.items():
# Initially, presume the particle is a match
match = True
# Check through each property, in the form {name: (lower_bound, upper_bound)}
for property_name, property_value in properties.items():
# If the blob's properties lie outside the bounds specified in the types file, the blob is not a match
if (blob_values[property_name] < property_value[0]) or (blob_values[property_name] > property_value[1]):
match = False
# If the current particle matches the attributes of the blob, then return its name
if match:
return particle_name
# By this point, all potential particles have been checked, so the blob must be something else
return "other"
def classify(blob):
# A quick wrapper method for ease of use
b = Blob(blob)
return b.classify()
def classify_multiple(blobs):
classifications = []
for blob in blobs:
classifications.append(classify(blob))
return classifications
def classify_masked(blob):
# Method for early LUCID data where half of pixels are masked:
b = Blob(blob)
b.num_pixels *= 2
b.density *= 2
return b.classify()
| {
"repo_name": "calhewitt/lucid_utils",
"path": "lucid_utils/classification/old_algorithm.py",
"copies": "2",
"size": "2638",
"license": "mit",
"hash": 2514214697699761000,
"line_mean": 42.2459016393,
"line_max": 124,
"alpha_frac": 0.6595905989,
"autogenerated": false,
"ratio": 4.331691297208539,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004543997511715716,
"num_lines": 61
} |
# A complete working Python program to demonstrate all
# stack operations using a doubly linked list
class Node:
def __init__(self, data):
self.data = data # Assign data
self.next = None # Initialize next as null
self.prev = None # Initialize prev as null
class Stack:
"""
>>> stack = Stack()
>>> stack.is_empty()
True
>>> stack.print_stack()
stack elements are:
>>> for i in range(4):
... stack.push(i)
...
>>> stack.is_empty()
False
>>> stack.print_stack()
stack elements are:
3->2->1->0->
>>> stack.top()
3
>>> len(stack)
4
>>> stack.pop()
3
>>> stack.print_stack()
stack elements are:
2->1->0->
"""
def __init__(self):
self.head = None
def push(self, data):
"""add a Node to the stack"""
if self.head is None:
self.head = Node(data)
else:
new_node = Node(data)
self.head.prev = new_node
new_node.next = self.head
new_node.prev = None
self.head = new_node
def pop(self):
"""pop the top element off the stack"""
if self.head is None:
return None
else:
temp = self.head.data
self.head = self.head.next
self.head.prev = None
return temp
def top(self):
"""return the top element of the stack"""
return self.head.data
def __len__(self):
temp = self.head
count = 0
while temp is not None:
count += 1
temp = temp.next
return count
def is_empty(self):
return self.head is None
def print_stack(self):
print("stack elements are:")
temp = self.head
while temp is not None:
print(temp.data, end="->")
temp = temp.next
# Code execution starts here
if __name__ == "__main__":
# Start with the empty stack
stack = Stack()
# Insert 4 at the beginning. So stack becomes 4->None
print("Stack operations using Doubly LinkedList")
stack.push(4)
# Insert 5 at the beginning. So stack becomes 4->5->None
stack.push(5)
# Insert 6 at the beginning. So stack becomes 4->5->6->None
stack.push(6)
# Insert 7 at the beginning. So stack becomes 4->5->6->7->None
stack.push(7)
# Print the stack
stack.print_stack()
# Print the top element
print("\nTop element is ", stack.top())
# Print the stack size
print("Size of the stack is ", len(stack))
# pop the top element
stack.pop()
# pop the top element
stack.pop()
# two elements have now been popped off
stack.print_stack()
# Print True if the stack is empty else False
print("\nstack is empty:", stack.is_empty())
| {
"repo_name": "TheAlgorithms/Python",
"path": "data_structures/stacks/stack_using_dll.py",
"copies": "1",
"size": "2960",
"license": "mit",
"hash": -3943922271114758000,
"line_mean": 22.0650406504,
"line_max": 66,
"alpha_frac": 0.5233108108,
"autogenerated": false,
"ratio": 4.060356652949245,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5083667463749245,
"avg_score": null,
"num_lines": null
} |
"""A component registry, similar to nlp_saft::RegisteredClass<>.
Like nlp_saft::RegisteredClass<>, one does not need to explicitly import the
module containing each subclass. It is sufficient to add subclasses as build
dependencies.
Unlike nlp_saft::RegisteredClass<>, which allows subclasses to be registered
under arbitrary names, subclasses must be looked up based on their type name.
This restriction allows the registry to dynamically import the module containing
the desired subclass.
Example usage:
# In basepackage/base.py...
@registry.RegisteredClass
class MyBase:
def my_method(self):
pass
# In implpackage/impl.py...
class MyImpl(MyBase):
def my_method(self):
...
# In userpackage/user.py...
try
impl = MyBase.Create("implpackage.impl.MyImpl")
except ValueError as error:
...
Note that there is no registration statement in impl.py. For convenience, if
the base class and subclass share a package prefix, the shared portion of the
package path may be omitted in the call to Create(). For example, if the base
class is 'foo.bar.Base' and the subclass is 'foo.bar.baz.Impl', then these are
all equivalent:
Base.Create('foo.bar.baz.Impl')
Base.Create('bar.baz.Impl')
Base.Create('baz.Impl')
Name resolution happens in inside-out fashion, so if there is also a subclass
'foo.baz.Impl', then
Base.Create('baz.Impl') # returns foo.bar.baz.Impl
Base.Create('bar.baz.Impl') # returns foo.bar.baz.Impl
Base.Create('foo.baz.Impl') # returns foo.baz.Impl
NB: Care is required when moving code, because config files may refer to the
classes being moved by their type name, which may include the package path. To
preserve existing names, leave a stub in the original location that imports the
class from its new location. For example,
# Before move, in oldpackage/old.py...
class Foo(Base):
...
# After move, in newpackage/new.py...
class Bar(Base):
...
# After move, in oldpackage/old.py...
from newpackage import new
Foo = new.Bar
"""
import inspect
import sys
from tensorflow.python.platform import tf_logging as logging
def _GetClass(name):
"""Looks up a class by name.
Args:
name: The fully-qualified type name of the class to return.
Returns:
The class associated with the |name|, or None on error.
"""
elements = name.split('.')
# Need at least "module.Class".
if len(elements) < 2:
logging.debug('Malformed type: "%s"', name)
return None
module_path = '.'.join(elements[:-1])
class_name = elements[-1]
# Import the module.
try:
__import__(module_path)
except ImportError as e:
logging.debug('Unable to find module "%s": "%s"', module_path, e)
return None
module = sys.modules[module_path]
# Look up the class.
if not hasattr(module, class_name):
logging.debug('Name "%s" not found in module: "%s"', class_name,
module_path)
return None
class_obj = getattr(module, class_name)
# Check that it is actually a class.
if not inspect.isclass(class_obj):
logging.debug('Name does not refer to a class: "%s"', name)
return None
return class_obj
def _Create(baseclass, subclass_name, *args, **kwargs):
"""Creates an instance of a named subclass.
Args:
baseclass: The expected base class.
subclass_name: The fully-qualified type name of the subclass to create.
*args: Passed to the subclass constructor.
**kwargs: Passed to the subclass constructor.
Returns:
An instance of the named subclass, or None on error.
"""
subclass = _GetClass(subclass_name)
if subclass is None:
return None # _GetClass() already logged an error
if not issubclass(subclass, baseclass):
logging.debug('Class "%s" is not a subclass of "%s"', subclass_name,
baseclass.__name__)
return None
return subclass(*args, **kwargs)
def _ResolveAndCreate(baseclass, path, subclass_name, *args, **kwargs):
"""Resolves the name of a subclass and creates an instance of it.
The subclass is resolved with respect to a package path in an inside-out
manner. For example, if |path| is 'google3.foo.bar' and |subclass_name| is
'baz.ClassName', then attempts are made to create instances of the following
fully-qualified class names:
'google3.foo.bar.baz.ClassName'
'google3.foo.baz.ClassName'
'google3.baz.ClassName'
'baz.ClassName'
An instance corresponding to the first successful attempt is returned.
Args:
baseclass: The expected base class.
path: The path to use to resolve the subclass.
subclass_name: The name of the subclass to create.
*args: Passed to the subclass constructor.
**kwargs: Passed to the subclass constructor.
Returns:
An instance of the named subclass corresponding to the inner-most successful
name resolution, or None if the name could not be resolved.
Raises:
ValueError: If the subclass cannot be resolved and created.
"""
elements = path.split('.')
while True:
resolved_subclass_name = '.'.join(elements + [subclass_name])
subclass = _Create(baseclass, resolved_subclass_name, *args, **kwargs)
if subclass: return subclass # success
if not elements: break # no more paths to try
elements.pop() # try resolving against the next-outer path
raise ValueError(
'Failed to create subclass "%s" of base class %s using path %s' %
(subclass_name, baseclass.__name__, path))
def RegisteredClass(baseclass):
"""Decorates the |baseclass| with a static Create() method."""
assert not hasattr(baseclass, 'Create')
def Create(subclass_name, *args, **kwargs):
"""A wrapper around _Create() that curries the |baseclass|."""
path = inspect.getmodule(baseclass).__name__
return _ResolveAndCreate(baseclass, path, subclass_name, *args, **kwargs)
baseclass.Create = staticmethod(Create)
return baseclass
| {
"repo_name": "jmhsi/justin_tinker",
"path": "data_science/courses/learning_dl_packages/models/research/syntaxnet/syntaxnet/util/registry.py",
"copies": "15",
"size": "5847",
"license": "apache-2.0",
"hash": -8933790108381755000,
"line_mean": 30.6054054054,
"line_max": 80,
"alpha_frac": 0.7063451343,
"autogenerated": false,
"ratio": 3.801690507152146,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""A component that allows creates the source for the glyphs and
handle transformation.
"""
# Author: KK Rai (kk.rai [at] iitb.ac.in)
# R. Ambareesha (ambareesha [at] iitb.ac.in)
# Prabhu Ramachandran <prabhu_r@users.sf.net>
# Enthought library imports.
from traits.api import (Instance, List, Trait, Bool,
TraitPrefixList, Property, Dict)
from traitsui.api import View, Group, Item, InstanceEditor
from tvtk.api import tvtk
from tvtk.common import camel2enthought, configure_outputs
from apptools.persistence.state_pickler import set_state
# Local imports.
from mayavi.core.common import handle_children_state
from mayavi.core.component import Component
######################################################################
# `GlyphSource` class.
######################################################################
class GlyphSource(Component):
# The version of this class. Used for persistence.
__version__ = 1
# Glyph position. This can be one of ['head', 'tail', 'center'],
# and indicates the position of the glyph with respect to the
# input point data. Please note that this will work correctly
# only if you do not mess with the source glyph's basic size. For
# example if you use a ConeSource and set its height != 1, then the
# 'head' and 'tail' options will not work correctly.
glyph_position = Trait('center', TraitPrefixList(['head', 'tail',
'center']),
desc='position of glyph w.r.t. data point')
# The Source to use for the glyph. This is chosen from
# `self._glyph_list` or `self.glyph_dict`.
glyph_source = Instance(tvtk.Object, allow_none=False, record=True)
# A dict of glyphs to use.
glyph_dict = Dict(desc='the glyph sources to select from',
record=False)
# A list of predefined glyph sources that can be used.
glyph_list = Property(List(tvtk.Object), record=False)
########################################
# Private traits.
# The transformation to use to place glyph appropriately.
_trfm = Instance(tvtk.TransformFilter, args=())
# Used for optimization.
_updating = Bool(False)
########################################
# View related traits.
view = View(Group(Group(Item(name='glyph_position')),
Group(Item(name='glyph_source',
style='custom',
resizable=True,
editor=InstanceEditor(name='glyph_list'),
),
label='Glyph Source',
show_labels=False)
),
resizable=True)
######################################################################
# `Base` interface
######################################################################
def __get_pure_state__(self):
d = super(GlyphSource, self).__get_pure_state__()
for attr in ('_updating', 'glyph_list'):
d.pop(attr, None)
return d
def __set_pure_state__(self, state):
if 'glyph_dict' in state:
# Set their state.
set_state(self, state, first=['glyph_dict'], ignore=['*'])
ignore = ['glyph_dict']
else:
# Set the dict state using the persisted list.
gd = self.glyph_dict
gl = self.glyph_list
handle_children_state(gl, state.glyph_list)
for g, gs in zip(gl, state.glyph_list):
name = camel2enthought(g.__class__.__name__)
if name not in gd:
gd[name] = g
# Set the glyph source's state.
set_state(g, gs)
ignore = ['glyph_list']
g_name = state.glyph_source.__metadata__['class_name']
name = camel2enthought(g_name)
# Set the correct glyph_source.
self.glyph_source = self.glyph_dict[name]
set_state(self, state, ignore=ignore)
######################################################################
# `Component` interface
######################################################################
def setup_pipeline(self):
"""Override this method so that it *creates* the tvtk
pipeline.
This method is invoked when the object is initialized via
`__init__`. Note that at the time this method is called, the
tvtk data pipeline will *not* yet be setup. So upstream data
will not be available. The idea is that you simply create the
basic objects and setup those parts of the pipeline not
dependent on upstream sources and filters. You should also
set the `actors` attribute up at this point.
"""
self._trfm.transform = tvtk.Transform()
# Setup the glyphs.
self.glyph_source = self.glyph_dict['glyph_source2d']
def update_pipeline(self):
"""Override this method so that it *updates* the tvtk pipeline
when data upstream is known to have changed.
This method is invoked (automatically) when any of the inputs
sends a `pipeline_changed` event.
"""
self._glyph_position_changed(self.glyph_position)
self.pipeline_changed = True
def update_data(self):
"""Override this method so that it flushes the vtk pipeline if
that is necessary.
This method is invoked (automatically) when any of the inputs
sends a `data_changed` event.
"""
self.data_changed = True
def render(self):
if not self._updating:
super(GlyphSource, self).render()
######################################################################
# Non-public methods.
######################################################################
def _glyph_source_changed(self, value):
if self._updating == True:
return
gd = self.glyph_dict
value_cls = camel2enthought(value.__class__.__name__)
if value not in gd.values():
gd[value_cls] = value
# Now change the glyph's source trait.
self._updating = True
recorder = self.recorder
if recorder is not None:
name = recorder.get_script_id(self)
lhs = '%s.glyph_source'%name
rhs = '%s.glyph_dict[%r]'%(name, value_cls)
recorder.record('%s = %s'%(lhs, rhs))
name = value.__class__.__name__
if name == 'GlyphSource2D':
configure_outputs(self, value)
else:
self.configure_input(self._trfm, value)
configure_outputs(self, self._trfm)
value.on_trait_change(self.render)
self._updating = False
# Now update the glyph position since the transformation might
# be different.
self._glyph_position_changed(self.glyph_position)
def _glyph_position_changed(self, value):
if self._updating == True:
return
self._updating = True
tr = self._trfm.transform
tr.identity()
g = self.glyph_source
name = g.__class__.__name__
# Compute transformation factor
if name == 'CubeSource':
tr_factor = g.x_length/2.0
elif name == 'CylinderSource':
tr_factor = -g.height/2.0
elif name == 'ConeSource':
tr_factor = g.height/2.0
elif name == 'SphereSource':
tr_factor = g.radius
else:
tr_factor = 1.
# Translate the glyph
if value == 'tail':
if name == 'GlyphSource2D':
g.center = 0.5, 0.0, 0.0
elif name == 'ArrowSource':
pass
elif name == 'CylinderSource':
g.center = 0, tr_factor, 0.0
elif hasattr(g, 'center'):
g.center = tr_factor, 0.0, 0.0
elif value == 'head':
if name == 'GlyphSource2D':
g.center = -0.5, 0.0, 0.0
elif name == 'ArrowSource':
tr.translate(-1, 0, 0)
elif name == 'CylinderSource':
g.center = 0,-tr_factor, 0.0
else:
g.center = -tr_factor, 0.0, 0.0
else:
if name == 'ArrowSource':
tr.translate(-0.5, 0, 0)
elif name != 'Axes':
g.center = 0.0, 0.0, 0.0
if name == 'CylinderSource':
tr.rotate_z(90)
self._updating = False
self.render()
def _get_glyph_list(self):
# Return the glyph list as per the original order in earlier
# implementation.
order = ['glyph_source2d', 'arrow_source', 'cone_source',
'cylinder_source', 'sphere_source', 'cube_source',
'axes']
gd = self.glyph_dict
for key in gd:
if key not in order:
order.append(key)
return [gd[key] for key in order]
def _glyph_dict_default(self):
g = {'glyph_source2d': tvtk.GlyphSource2D(glyph_type='arrow', filled=False),
'arrow_source': tvtk.ArrowSource(),
'cone_source': tvtk.ConeSource(height=1.0, radius=0.2, resolution=15),
'cylinder_source': tvtk.CylinderSource(height=1.0, radius=0.15,
resolution=10),
'sphere_source': tvtk.SphereSource(),
'cube_source': tvtk.CubeSource(),
'axes': tvtk.Axes(symmetric=1)}
return g
| {
"repo_name": "alexandreleroux/mayavi",
"path": "mayavi/components/glyph_source.py",
"copies": "3",
"size": "9623",
"license": "bsd-3-clause",
"hash": -8443530728403129000,
"line_mean": 36.737254902,
"line_max": 84,
"alpha_frac": 0.5183414736,
"autogenerated": false,
"ratio": 4.2959821428571425,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001032891734554322,
"num_lines": 255
} |
"""A component that provides a selection of implicit widgets
to be used by various modules.
"""
# Author: Suyog Dutt Jain <suyog.jain@aero.iitb.ac.in>
# Prabhu Ramachandran <prabhu at aero.iitb.ac.in>
# Copyright (c) 2009-2015, Enthought, Inc.
# License: BSD Style.
import pickle
from traits.api import (Instance, Trait, Bool, TraitMap, Enum, Dict,
Str, Int)
from traitsui.api import View, Group, Item
from tvtk.api import tvtk
from apptools.persistence.state_pickler import set_state
from mayavi.core.component import Component
######################################################################
# `ImplicitWidgets` class.
######################################################################
class ImplicitWidgets(Component):
# The version of this class. Used for persistence.
__version__ = 0
# The widget type to use.
widget_mode = Enum('Box', 'Sphere', 'Plane','ImplicitPlane',
desc='the implicit widget to use')
# The actual poly data source widget.
widget = Instance(tvtk.ThreeDWidget, record=True)
update_mode = Trait('semi-interactive',
TraitMap({'interactive':'InteractionEvent',
'semi-interactive': 'EndInteractionEvent'}),
desc='speed at which the data should be updated')
implicit_function = Instance(tvtk.ImplicitFunction, allow_none=False)
########################################
# Private traits.
_first = Bool(True)
_busy = Bool(False)
_observer_id = Int(-1)
# The actual widgets.
_widget_dict = Dict(Str, Instance(tvtk.ThreeDWidget,
allow_none=False))
# The actual implicit functions.
_implicit_function_dict = Dict(Str, Instance(tvtk.ImplicitFunction,
allow_none=False))
########################################
# View related traits.
########################################
# Create the UI for the traits.
view = View(Group(Item(name='widget_mode'), Item(name='widget',
style='custom', resizable=True),
label='Widget Source', show_labels=False),
resizable=True)
#####################################################################
# `object` interface
######################################################################
def __init__(self, **traits):
# Call parent class' init.
super(ImplicitWidgets, self).__init__(**traits)
# Initialize the source to the default widget's instance from
# the dictionary if needed.
if 'widget_mode' not in traits:
self._widget_mode_changed(self.widget_mode)
######################################################################
# `Base` interface
######################################################################
def __get_pure_state__(self):
d = super(ImplicitWidgets, self).__get_pure_state__()
for attr in ('_first', '_busy', '_observer_id', 'widget',
'implicit_function'):
d.pop(attr, None)
# The box widget requires a transformation matrix to be pickled.
tfm = tvtk.Transform()
w = self._widget_dict['Box']
w.get_transform(tfm)
d['matrix'] = pickle.dumps(tfm.matrix)
return d
def __set_pure_state__(self, state):
# Pop the transformation matrix for the box widget.
mat = state.pop('matrix')
# Now set their state.
set_state(self, state, first=['widget_mode'], ignore=['*'])
# Set state of rest of the attributes ignoring the widget_mode.
set_state(self, state, ignore=['widget_mode'])
# Set the transformation for Box widget.
tfm = tvtk.Transform()
tfm.set_matrix(pickle.loads(mat))
w = self._widget_dict['Box']
w.set_transform(tfm)
# Some widgets need some cajoling to get their setup right.
w = self.widget
# Set the input.
if len(self.inputs) > 0:
w.input = self.inputs[0].outputs[0]
w.update_traits()
mode = self.widget_mode
if mode == 'Plane':
wd = state._widget_dict[mode]
w.origin = wd.origin
w.normal = wd.normal
w.update_placement()
self.update_implicit_function()
# Set the widgets trait so that the widget is rendered if needed.
self.widgets = [w]
######################################################################
# `Component` interface
######################################################################
def setup_pipeline(self):
"""Override this method so that it *creates* the tvtk
pipeline.
"""
# Setup the widgets.
self.widgets = [self.widget]
def update_pipeline(self):
"""Override this method so that it *updates* the tvtk pipeline
when data upstream is known to have changed.
This method is invoked (automatically) when any of the inputs
sends a `pipeline_changed` event.
"""
if len(self.inputs) == 0:
return
inp = self.inputs[0].outputs[0]
w = self.widget
w.input = inp
if self._first:
w.place_widget()
self._first = False
# Set our output.
if self.outputs != [inp]:
self.outputs = [inp]
else:
self.data_changed = True
self.pipeline_changed = True
def update_data(self):
"""Override this method so that it flushes the vtk pipeline if
that is necessary.
This method is invoked (automatically) when any of the inputs
sends a `data_changed` event.
"""
self.data_changed = True
######################################################################
# `SourceWidget` interface
######################################################################
def update_implicit_function(self):
"""Update the implicit_function from the widget data.
"""
dispatch = {'Sphere': 'get_sphere', 'Box': 'get_planes',
'Plane': 'get_plane', 'ImplicitPlane': 'get_plane'}
method = getattr(self.widget, dispatch[self.widget_mode])
method(self.implicit_function)
######################################################################
# Non-public traits.
######################################################################
def _widget_changed(self, old, value):
if len(self.inputs) > 0:
value.input = self.inputs[0].outputs[0]
value.place_widget()
self.implicit_function = self._implicit_function_dict[self.widget_mode]
if old is not None:
self._connect(old, remove=True)
self._connect(value, remove=False)
self.widgets = [value]
def _connect(self, value, remove=False):
"""Wire up event handlers or tear them down given a widget
`value`. If `remove` is True, then tear them down."""
if remove and self._observer_id > 0:
value.remove_observer(self._observer_id)
else:
self._observer_id = value.add_observer(self.update_mode_,
self._on_interaction_event)
if isinstance(value, tvtk.PlaneWidget) or \
isinstance(value, tvtk.ImplicitPlaneWidget):
value.on_trait_change(self._on_alignment_set,
'normal_to_x_axis', remove=remove)
value.on_trait_change(self._on_alignment_set,
'normal_to_y_axis', remove=remove)
value.on_trait_change(self._on_alignment_set,
'normal_to_z_axis', remove=remove)
value.on_trait_change(self._on_widget_trait_changed,
remove=remove)
value.on_trait_change(self.render, remove=remove)
def _on_interaction_event(self, obj, event):
self.update_implicit_function()
def _update_mode_changed(self, old, new):
w = self.widget
if w is not None:
w.remove_observer(self._observer_id)
self._observer_id = w.add_observer(self.update_mode_,
self._on_interaction_event)
w.on_trait_change(self.render)
self.render()
def _on_widget_trait_changed(self):
if (not self._busy) and (self.update_mode != 'non-interactive'):
self._busy = True
self.implicit_function = self._implicit_function_dict[self.widget_mode]
self.update_implicit_function()
self.render()
self._busy = False
def _on_alignment_set(self):
"""Event handler when the widget's normal is reset (if
applicable)."""
w = self.widget
w.place_widget()
w.update_traits()
self.render()
def _scene_changed(self, old, new):
super(ImplicitWidgets, self)._scene_changed(old, new)
self._foreground_changed_for_scene(None, new.foreground)
def _widget_mode_changed(self, value):
"""This method is invoked (automatically) when the `source`
trait is changed.
"""
self.widget = self._widget_dict[self.widget_mode]
def __widget_dict_default(self):
"""Default value for source dict."""
w = {'Box':tvtk.BoxWidget(place_factor = 0.9),
'Sphere':tvtk.SphereWidget(place_factor = 0.9),
'Plane':tvtk.PlaneWidget(place_factor = 0.9),
'ImplicitPlane':
tvtk.ImplicitPlaneWidget(place_factor=0.9,
draw_plane=False)}
return w
def __implicit_function_dict_default(self):
"""Default value for source dict."""
ip = {'Box':tvtk.Planes(),
'Sphere':tvtk.Sphere(),
'Plane':tvtk.Plane(),
'ImplicitPlane': tvtk.Plane()}
return ip
| {
"repo_name": "dmsurti/mayavi",
"path": "mayavi/components/implicit_widgets.py",
"copies": "1",
"size": "10112",
"license": "bsd-3-clause",
"hash": 3947001194242373600,
"line_mean": 36.872659176,
"line_max": 83,
"alpha_frac": 0.5146360759,
"autogenerated": false,
"ratio": 4.40994330571304,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5424579381613039,
"avg_score": null,
"num_lines": null
} |
"""A component that provides a selection of poly data source widgets
to be used by various modules.
"""
# Author: Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2005, Enthought, Inc.
# License: BSD Style.
# Enthought library imports.
from traits.api import Event, Instance, List, Trait, Bool, TraitPrefixList
from traitsui.api import View, Group, Item, InstanceEditor
from tvtk.api import tvtk
from tvtk.common import configure_input_data
from apptools.persistence.state_pickler import set_state
# Local imports.
from mayavi.core.common import handle_children_state
from mayavi.core.component import Component
######################################################################
# `SourceWidget` class.
######################################################################
class SourceWidget(Component):
# The version of this class. Used for persistence.
__version__ = 0
# The actual poly data source widget.
widget = Instance(tvtk.ThreeDWidget, record=True)
# Specifies the updation mode of the poly_data attribute. There
# are three modes: 1) 'interactive' -- the poly_data attribute is
# updated as the widget is interacted with, 2) 'semi-interactive'
# -- poly_data attribute is updated when the traits of the widget
# change and when the widget interaction is complete, 3)
# 'non-interactive' -- poly_data is updated only explicitly at
# users request by calling `object.update_poly_data`.
update_mode = Trait('interactive', TraitPrefixList(['interactive',
'semi-interactive',
'non-interactive']),
desc='the speed at which the poly data is updated')
# A list of predefined glyph sources that can be used.
widget_list = List(tvtk.Object, record=False)
# The poly data that the widget manages.
poly_data = Instance(tvtk.PolyData, args=())
########################################
# Private traits.
_first = Bool(True)
_busy = Bool(False)
_unpickling = Bool(False)
########################################
# View related traits.
view = View(Group(Item(name='widget', style='custom', resizable=True,
editor=InstanceEditor(name='widget_list')),
label='Source Widget',
show_labels=False,
),
resizable=True,
)
######################################################################
# `Base` interface
######################################################################
def __get_pure_state__(self):
d = super(SourceWidget, self).__get_pure_state__()
for attr in ('poly_data', '_unpickling', '_first', '_busy'):
d.pop(attr, None)
return d
def __set_pure_state__(self, state):
self._unpickling = True
# First create all the allowed widgets in the widget_list attr.
handle_children_state(self.widget_list, state.widget_list)
# Now set their state.
set_state(self, state, first=['widget_list'], ignore=['*'])
# Set the widget attr depending on value saved.
m = [x.__class__.__name__ for x in self.widget_list]
w_c_name = state.widget.__metadata__['class_name']
w = self.widget = self.widget_list[m.index(w_c_name)]
# Set the input.
if len(self.inputs) > 0:
self.configure_input_data(w, self.inputs[0].outputs[0])
# Fix for the point widget.
if w_c_name == 'PointWidget':
w.place_widget()
# Set state of rest of the attributes ignoring the widget_list.
set_state(self, state, ignore=['widget_list'])
# Some widgets need some cajoling to get their setup right.
w.update_traits()
if w_c_name == 'PlaneWidget':
w.origin = state.widget.origin
w.normal = state.widget.normal
w.update_placement()
w.get_poly_data(self.poly_data)
elif w_c_name == 'SphereWidget':
# XXX: This hack is necessary because the sphere widget
# does not update its poly data even when its ivars are
# set (plus it does not have an update_placement method
# which is a bug). So we force this by creating a similar
# sphere source and copy its output.
s = tvtk.SphereSource(center=w.center, radius=w.radius,
theta_resolution=w.theta_resolution,
phi_resolution=w.phi_resolution,
lat_long_tessellation=True)
s.update()
self.poly_data.shallow_copy(s.output)
else:
w.get_poly_data(self.poly_data)
self._unpickling = False
# Set the widgets trait so that the widget is rendered if needed.
self.widgets = [w]
######################################################################
# `Component` interface
######################################################################
def setup_pipeline(self):
"""Override this method so that it *creates* the tvtk
pipeline.
This method is invoked when the object is initialized via
`__init__`. Note that at the time this method is called, the
tvtk data pipeline will *not* yet be setup. So upstream data
will not be available. The idea is that you simply create the
basic objects and setup those parts of the pipeline not
dependent on upstream sources and filters. You should also
set the `actors` attribute up at this point.
"""
# Setup the glyphs.
sources = [tvtk.SphereWidget(theta_resolution=8, phi_resolution=6),
tvtk.LineWidget(clamp_to_bounds=False),
tvtk.PlaneWidget(),
tvtk.PointWidget(outline=False, x_shadows=False,
y_shadows=False, z_shadows=False),
]
self.widget_list = sources
# The 'widgets' trait is set in the '_widget_changed' handler.
self.widget = sources[0]
for s in sources:
self._connect(s)
def update_pipeline(self):
"""Override this method so that it *updates* the tvtk pipeline
when data upstream is known to have changed.
This method is invoked (automatically) when any of the inputs
sends a `pipeline_changed` event.
"""
if len(self.inputs) == 0:
return
inp = self.inputs[0].outputs[0]
w = self.widget
self.configure_input(w, inp)
if self._first:
w.place_widget()
self._first = False
# If the dataset is effectively 2D switch to using the line
# widget since that works best.
b = inp.bounds
l = [(b[1]-b[0]), (b[3]-b[2]), (b[5]-b[4])]
max_l = max(l)
for i, x in enumerate(l):
if x/max_l < 1.0e-6:
w = self.widget = self.widget_list[1]
w.clamp_to_bounds = True
w.align = ['z_axis', 'z_axis', 'y_axis'][i]
break
# Set our output.
w.get_poly_data(self.poly_data)
self.outputs = [self.poly_data]
self.pipeline_changed = True
def update_data(self):
"""Override this method so that it flushes the vtk pipeline if
that is necessary.
This method is invoked (automatically) when any of the inputs
sends a `data_changed` event.
"""
self.data_changed = True
######################################################################
# `SourceWidget` interface
######################################################################
def update_poly_data(self):
self.widget.get_poly_data(self.poly_data)
######################################################################
# Non-public traits.
######################################################################
def _widget_changed(self, value):
# If we are being unpickled do nothing.
if self._unpickling:
return
if value not in self.widget_list:
classes = [o.__class__ for o in self.widget_list]
vc = value.__class__
self._connect(value)
if vc in classes:
self.widget_list[classes.index(vc)] = value
else:
self.widget_list.append(value)
recorder = self.recorder
if recorder is not None:
idx = self.widget_list.index(value)
name = recorder.get_script_id(self)
lhs = '%s.widget'%name
rhs = '%s.widget_list[%d]'%(name, idx)
recorder.record('%s = %s'%(lhs, rhs))
if len(self.inputs) > 0:
configure_input_data(value, self.inputs[0].outputs[0])
value.place_widget()
value.on_trait_change(self.render)
self.widgets = [value]
def _update_mode_changed(self, value):
if value in ['interactive', 'semi-interactive']:
self.update_poly_data()
self.render()
def _on_interaction_event(self, obj, event):
if (not self._busy) and (self.update_mode == 'interactive'):
self._busy = True
self.update_poly_data()
self._busy = False
def _on_widget_trait_changed(self):
if (not self._busy) and (self.update_mode != 'non-interactive'):
self._busy = True
# This render call forces any changes to the trait to be
# rendered only then will updating the poly data make
# sense.
self.render()
self.update_poly_data()
self._busy = False
def _on_alignment_set(self):
w = self.widget
w.place_widget()
w.update_traits()
def _connect(self, obj):
"""Wires up all the event handlers."""
obj.add_observer('InteractionEvent',
self._on_interaction_event)
if isinstance(obj, tvtk.PlaneWidget):
obj.on_trait_change(self._on_alignment_set, 'normal_to_x_axis')
obj.on_trait_change(self._on_alignment_set, 'normal_to_y_axis')
obj.on_trait_change(self._on_alignment_set, 'normal_to_z_axis')
elif isinstance(obj, tvtk.LineWidget):
obj.on_trait_change(self._on_alignment_set, 'align')
# Setup the widgets colors.
fg = (1,1,1)
if self.scene is not None:
fg = self.scene.foreground
self._setup_widget_colors(obj, fg)
obj.on_trait_change(self._on_widget_trait_changed)
obj.on_trait_change(self.render)
def _setup_widget_colors(self, widget, color):
trait_names = widget.trait_names()
props = [x for x in trait_names
if 'property' in x and 'selected' not in x]
sel_props = [x for x in trait_names
if 'property' in x and 'selected' in x]
for p in props:
setattr(getattr(widget, p), 'color', color)
setattr(getattr(widget, p), 'line_width', 2)
for p in sel_props:
# Set the selected color to 'red'.
setattr(getattr(widget, p), 'color', (1,0,0))
setattr(getattr(widget, p), 'line_width', 2)
self.render()
def _foreground_changed_for_scene(self, old, new):
# Change the default color for the actor.
for w in self.widget_list:
self._setup_widget_colors(w, new)
self.render()
def _scene_changed(self, old, new):
super(SourceWidget, self)._scene_changed(old, new)
self._foreground_changed_for_scene(None, new.foreground)
| {
"repo_name": "liulion/mayavi",
"path": "mayavi/components/source_widget.py",
"copies": "2",
"size": "11844",
"license": "bsd-3-clause",
"hash": -1627766990019474400,
"line_mean": 38.8787878788,
"line_max": 76,
"alpha_frac": 0.5371496116,
"autogenerated": false,
"ratio": 4.22095509622238,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000839693465704938,
"num_lines": 297
} |
"""A component to manage an implicit plane widget.
"""
# Author: Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2005, Enthought, Inc.
# License: BSD Style.
# Enthought library imports.
from traits.api import Instance, Bool, Property
from traitsui.api import View, Group, Item, InstanceEditor
from tvtk.api import tvtk
# Local imports.
from mayavi.core.component import Component
VTK_VER = tvtk.Version().vtk_version
######################################################################
# `ImplicitPlane` class.
######################################################################
class ImplicitPlane(Component):
# The version of this class. Used for persistence.
__version__ = 0
# The widget that controls the plane.
widget = Instance(tvtk.ImplicitPlaneWidget, args=(),
kw={'key_press_activation': False,
'place_factor':1.2,
'draw_plane':False,
'outline_translation':False},
record=True)
# The plane that the widget controls. Do not change the
# attributes of the plane, do it via the widget.
plane = Instance(tvtk.Plane, args=(),
kw={'origin':(0.0, 0.0, 0.0),
'normal':(0,0,1)},
record=True)
# Convenience property for the normal delegated to the widget.
normal = Property
# Convenience property for the origin delegated to the widget.
origin = Property
########################################
# Private traits
_first = Bool(True)
_busy = Bool(False)
########################################
# View related traits.
if VTK_VER[:3] in ['4.2', '4.4']:
_widget_group = Group(Item(name='enabled'),
Item(name='normal_to_x_axis'),
Item(name='normal_to_y_axis'),
Item(name='normal_to_z_axis'),
Item(name='outline_translation'),
Item(name='tubing'),
Item(name='draw_plane'),
Item(name='normal'),
Item(name='origin')
)
else:
_widget_group = Group(Item(name='enabled'),
Item(name='normal_to_x_axis'),
Item(name='normal_to_y_axis'),
Item(name='normal_to_z_axis'),
Item(name='outline_translation'),
Item(name='scale_enabled'),
Item(name='tubing'),
Item(name='draw_plane'),
Item(name='normal'),
Item(name='origin')
)
view = View(Group(Item(name='widget', style='custom',
editor=InstanceEditor(view=View(_widget_group))),
show_labels=False)
)
######################################################################
# `Component` interface
######################################################################
def setup_pipeline(self):
"""Override this method so that it *creates* its tvtk
pipeline.
This method is invoked when the object is initialized via
`__init__`. Note that at the time this method is called, the
tvtk data pipeline will *not* yet be setup. So upstream data
will not be available. The idea is that you simply create the
basic objects and setup those parts of the pipeline not
dependent on upstream sources and filters.
"""
# Setup our widgets and hook up all handlers.
self.widgets = [self.widget]
self._connect()
def update_pipeline(self):
"""Override this method so that it *updates* the tvtk pipeline
when data upstream is known to have changed.
This method is invoked (automatically) when the input fires a
`pipeline_changed` event.
"""
if len(self.inputs) == 0:
return
inp = self.inputs[0].outputs[0]
w = self.widget
self.configure_input_data(w, inp)
if self._first:
w.place_widget()
self.origin = inp.center
self._first = False
else:
n = self.normal
# A hack to update the widget when data changes upstream.
# This is perhaps a VTK bug, not sure.
self.normal = n[0], n[1], n[2] + 0.001
self.normal = n
# Just pass the inputs back out. This may trigger a pipeline
# changed downstream if it does not then fire a data_changed.
if self.outputs != [inp]:
self.outputs = [inp]
else:
self.data_changed = True
def update_data(self):
"""Override this method to do what is necessary when upstream
data changes.
This method is invoked (automatically) when any of the inputs
sends a `data_changed` event.
"""
self.data_changed = True
def update_plane(self):
"""Convenience method to update the plane once the widget is
changed.
"""
self.widget.get_plane(self.plane)
######################################################################
# Non-public interface.
######################################################################
def _get_normal(self):
return self.widget.normal
def _set_normal(self, value):
w = self.widget
old = w.normal
w.normal = value
self.trait_property_changed('normal', old, value)
self.update_plane()
def _get_origin(self):
return self.widget.origin
def _set_origin(self, value):
# Ugly, but needed.
w = tvtk.to_vtk(self.widget)
old = w.GetOrigin()
w.SetOrigin(list(value))
self.trait_property_changed('origin', old, value)
self.update_plane()
def _on_interaction_event(self, obj, event):
if not self._busy:
self._busy = True
self.update_plane()
self._busy = False
def _on_normal_set(self):
w = self.widget
w.place_widget()
w.update_traits()
def _connect(self):
"""Wires up all the event handlers."""
w = self.widget
w.add_observer('InteractionEvent',
self._on_interaction_event)
w.on_trait_change(self._on_normal_set, 'normal_to_x_axis')
w.on_trait_change(self._on_normal_set, 'normal_to_y_axis')
w.on_trait_change(self._on_normal_set, 'normal_to_z_axis')
w.on_trait_change(self._on_interaction_event)
for obj in (self.plane, w):
obj.on_trait_change(self.render)
| {
"repo_name": "liulion/mayavi",
"path": "mayavi/components/implicit_plane.py",
"copies": "3",
"size": "6933",
"license": "bsd-3-clause",
"hash": -3656126923366633500,
"line_mean": 35.109375,
"line_max": 76,
"alpha_frac": 0.496610414,
"autogenerated": false,
"ratio": 4.516612377850163,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6513222791850164,
"avg_score": null,
"num_lines": null
} |
"""A Composite Map class
Author: `Keith Hughitt <keith.hughitt@nasa.gov>`
"""
from __future__ import absolute_import
import matplotlib.pyplot as plt
from sunpy.map import Map
__author__ = "Keith Hughitt"
__email__ = "keith.hughitt@nasa.gov"
class CompositeMap:
"""
CompositeMap(map1 [,map2,..])
Parameters
----------
args : [sunpy.map | string]
One or more map of filepaths
Methods
-------
add_map(map, zorder=None, alpha=1, levels=False)
Adds a map to the CompositeMap
remove_map(index)
Removes and returns the map with the given index
list_maps()
Prints a list of the currently included maps
get_alpha(index=None)
Gets the alpha-channel value for a layer in the composite image
get_zorder(index=None)
Gets the layering preference (z-order) for a map within the composite.
get_colors(index=None)
Gets the colors for a map within the CompositeMap.
get_norm(index=None)
Gets the normalization for a map within the CompositeMap.
get_levels(index=None)
Gets the list of contour levels for a map within the CompositeMap
set_norm(self, index, norm)
Sets the norm for a layer in the composite image
set_levels(index, levels, percent=False)
Sets the contour levels for a layer in the CompositeMap
set_colors(index=None, cm)
Sets the color map for a layer in the CompositeMap
set_alpha(index=None, alpha)
Sets the alpha-channel value for a layer in the CompositeMap
set_zorder(index=None, zorder)
Set the layering preference (z-order) for a map within the CompositeMap
plot(figure=None, overlays=None, draw_limb=False, gamma=1.0,
draw_grid=False, colorbar=True, basic_plot=False,title="SunPy Plot",
matplot_args)
Plots the composite map object using matplotlib
Examples
--------
>>> import sunpy
>>> sunpy.CompositeMap(sunpy.AIA_171_IMAGE, sunpy.RHESSI_IMAGE).show()
>>> comp_map = sunpy.CompositeMap(sunpy.AIA_171_IMAGE, sunpy.EIT_195_IMAGE)
>>> comp_map.add_map(sunpy.RHESSI_IMAGE)
>>> comp_map.show()
"""
def __init__(self, *args):
self._maps = []
# Default alpha and zorder values
alphas = [1] * len(args)
zorders = range(0, 10 * len(args), 10)
levels = [False] * len(args)
# Parse input Maps/filepaths
for i, item in enumerate(args):
# Parse map
if isinstance(item, Map):
m = item
else:
m = Map.read(item)
# Set z-order and alpha values for the map
m.zorder = zorders[i]
m.alpha = alphas[i]
m.levels = levels[i]
# Add map
self._maps.append(m)
def add_map(self, input_, zorder=None, alpha=1, levels=False):
"""Adds a map to the CompositeMap
Parameters
----------
input_ : {sunpy.map, string}
Map instance or filepath to map to be added
zorder : int
The index to use when determining where the map should lie along
the z-axis; maps with higher z-orders appear above maps with lower
z-orders.
alpha : float
Opacity at which the map should be displayed. An alpha value of 0
results in a fully transparent image while an alpha value of 1
results in a fully opaque image. Values between result in semi-
transparent images.
"""
if zorder is None:
zorder = max([m.zorder for m in self._maps]) + 10
m = Map.read(input_)
m.zorder = zorder
m.alpha = alpha
m.levels = levels
self._maps.append(m)
def remove_map(self, index):
"""Removes and returns the map with the given index"""
return self._maps.pop(index)
def list_maps(self):
"""Prints a list of the currently included maps"""
print [m.__class__ for m in self._maps]
def get_alpha(self, index=None):
"""Gets the alpha-channel value for a layer in the composite image"""
if index is None:
return [_map.alpha for _map in self._maps]
else:
return self._maps[index].alpha
def get_zorder(self, index = None):
"""Gets the layering preference (z-order) for a map within the
composite.
"""
if index is None:
return [_map.zorder for _map in self._maps]
else:
return self._maps[index].zorder
def get_colors(self, index = None):
"""Gets the colors for a map within the compositemap."""
if index is None:
return [_map.cmap for _map in self._maps]
else:
return self._maps[index].cmap
def get_norm(self, index = None):
"""Gets the normalization for a map within the
composite.
"""
if index is None:
return [_map.norm for _map in self._maps]
else:
return self._maps[index].norm
def get_levels(self, index = None):
"""Gets the list of contour levels for a map within the
composite.
"""
if index is None:
return [_map.levels for _map in self._maps]
else:
return self._maps[index].levels
def set_norm(self, index, norm):
"""Sets the norm for a layer in the composite image"""
self._maps[index].norm = norm
def set_levels(self, index, levels, percent = False):
"""Sets the contour levels for a layer in the composite image"""
if percent is False:
self._maps[index].levels = levels
else:
self._maps[index].levels = [self._maps[index].max()*level/100.0 for level in levels]
def set_colors(self, index, cm):
"""Sets the color map for a layer in the composite image"""
self._maps[index].cmap = cm
def set_alpha(self, index, alpha):
"""Sets the alpha-channel value for a layer in the composite image"""
if 0 <= alpha <= 1:
self._maps[index].alpha = alpha
else:
raise OutOfRangeAlphaValue("Alpha value must be between 0 and 1.")
def set_zorder(self, index, zorder):
"""Set the layering preference (z-order) for a map within the
composite.
"""
self._maps[index].zorder = zorder
def plot(self, figure=None, overlays=None, draw_limb=False, gamma=1.0, # pylint: disable=W0613
draw_grid=False, colorbar=True, basic_plot=False, # pylint: disable=W0613
title="SunPy Plot", **matplot_args):
"""Plots the composite map object using matplotlib
Parameters
----------
title : string
Title to use for the plot
overlays : list
List of overlays to include in the plot
**matplot_args : dict
Matplotlib Any additional imshow arguments that should be used
when plotting the image.
Returns
-------
out : matplotlib.figure.Figure
A Matplotlib figure instance representing the composite map plot
"""
if overlays is None:
overlays = []
# Create a figure and add title and axes
if figure is None:
figure = plt.figure()
axes = figure.add_subplot(111)
axes.set_title(title)
axes.set_xlabel('X-position [' + self._maps[0].units['x'] + ']')
axes.set_ylabel('Y-position [' + self._maps[0].units['y'] + ']')
# Plot layers of composite map
for m in self._maps:
# Parameters for plotting
params = {
"origin": "lower",
"extent": m.xrange + m.yrange,
"cmap": m.cmap,
"norm": m.norm(),
"alpha": m.alpha,
"zorder": m.zorder,
}
params.update(matplot_args)
if m.levels is False:
plt.imshow(m, **params)
# Use contour for contour data, and imshow otherwise
if m.levels is not False:
# Set data with values <= 0 to transparent
# contour_data = np.ma.masked_array(m, mask=(m <= 0))
plt.contour(m, m.levels, **params)
# Adjust axes extents to include all data
axes.axis('image')
for overlay in overlays:
figure, axes = overlay(figure, axes)
return figure
def show(self, figure=None, overlays=None, draw_limb=False, gamma=1.0,
draw_grid=False, colorbar=True, basic_plot=False,
title="SunPy Plot", **matplot_args):
"""Displays the composite map on the screen.
Parameters
----------
title : string
Title to use for the plot
overlays : list
List of overlays to include in the plot
**matplot_args : dict
Matplotlib Any additional imshow arguments that should be used
when plotting the image.
"""
self.plot(figure, overlays, draw_limb, gamma, draw_grid, colorbar,
basic_plot, title, **matplot_args).show()
class OutOfRangeAlphaValue(ValueError):
"""Exception to raise when an alpha value outside of the range 0-1 is
requested.
"""
pass
| {
"repo_name": "jslhs/sunpy",
"path": "sunpy/map/compositemap.py",
"copies": "1",
"size": "9605",
"license": "bsd-2-clause",
"hash": 4316360777746123300,
"line_mean": 33.5503597122,
"line_max": 98,
"alpha_frac": 0.5626236335,
"autogenerated": false,
"ratio": 4.141871496334627,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01161866995771135,
"num_lines": 278
} |
"""A Composite Map class
Author: `Keith Hughitt <keith.hughitt@nasa.gov>`
"""
from __future__ import absolute_import
import matplotlib.pyplot as plt
from sunpy.map import Map
__all__ = ['CompositeMap']
__author__ = "Keith Hughitt"
__email__ = "keith.hughitt@nasa.gov"
class CompositeMap:
"""
CompositeMap(map1 [,map2,..])
A Composite Map class
Parameters
----------
args : [sunpy.map | string]
One or more map of filepaths
Methods
-------
add_map(map, zorder=None, alpha=1, levels=False)
Adds a map to the CompositeMap
remove_map(index)
Removes and returns the map with the given index
list_maps()
Prints a list of the currently included maps
get_alpha(index=None)
Gets the alpha-channel value for a layer in the composite image
get_zorder(index=None)
Gets the layering preference (z-order) for a map within the composite.
get_colors(index=None)
Gets the colors for a map within the CompositeMap.
get_norm(index=None)
Gets the normalization for a map within the CompositeMap.
get_levels(index=None)
Gets the list of contour levels for a map within the CompositeMap
set_norm(self, index, norm)
Sets the norm for a layer in the composite image
set_levels(index, levels, percent=False)
Sets the contour levels for a layer in the CompositeMap
set_colors(index=None, cm)
Sets the color map for a layer in the CompositeMap
set_alpha(index=None, alpha)
Sets the alpha-channel value for a layer in the CompositeMap
set_zorder(index=None, zorder)
Set the layering preference (z-order) for a map within the CompositeMap
plot(figure=None, overlays=None, draw_limb=False, gamma=1.0,
draw_grid=False, colorbar=True, basic_plot=False,title="SunPy Plot",
matplot_args)
Plots the composite map object using matplotlib
Examples
--------
>>> import sunpy
>>> sunpy.CompositeMap(sunpy.AIA_171_IMAGE, sunpy.RHESSI_IMAGE).peek()
>>> comp_map = sunpy.CompositeMap(sunpy.AIA_171_IMAGE, sunpy.EIT_195_IMAGE)
>>> comp_map.add_map(sunpy.RHESSI_IMAGE)
>>> comp_map.peek()
"""
def __init__(self, *args):
self._maps = []
# Default alpha and zorder values
alphas = [1] * len(args)
zorders = range(0, 10 * len(args), 10)
levels = [False] * len(args)
# Parse input Maps/filepaths
for i, item in enumerate(args):
# Parse map
if isinstance(item, Map):
m = item
else:
m = Map.read(item)
# Set z-order and alpha values for the map
m.zorder = zorders[i]
m.alpha = alphas[i]
m.levels = levels[i]
# Add map
self._maps.append(m)
def add_map(self, input_, zorder=None, alpha=1, levels=False):
"""Adds a map to the CompositeMap
Parameters
----------
input_ : {sunpy.map, string}
Map instance or filepath to map to be added
zorder : int
The index to use when determining where the map should lie along
the z-axis; maps with higher z-orders appear above maps with lower
z-orders.
alpha : float
Opacity at which the map should be displayed. An alpha value of 0
results in a fully transparent image while an alpha value of 1
results in a fully opaque image. Values between result in semi-
transparent images.
"""
if zorder is None:
zorder = max([m.zorder for m in self._maps]) + 10
m = Map.read(input_)
m.zorder = zorder
m.alpha = alpha
m.levels = levels
self._maps.append(m)
def remove_map(self, index):
"""Removes and returns the map with the given index"""
return self._maps.pop(index)
def list_maps(self):
"""Prints a list of the currently included maps"""
print [m.__class__ for m in self._maps]
def get_map(self, index):
""" Returns the map with given index """
return self._maps[index]
def get_alpha(self, index=None):
"""Gets the alpha-channel value for a layer in the composite image"""
if index is None:
return [_map.alpha for _map in self._maps]
else:
return self._maps[index].alpha
def get_zorder(self, index = None):
"""Gets the layering preference (z-order) for a map within the
composite.
"""
if index is None:
return [_map.zorder for _map in self._maps]
else:
return self._maps[index].zorder
def get_colors(self, index = None):
"""Gets the colors for a map within the compositemap."""
if index is None:
return [_map.cmap for _map in self._maps]
else:
return self._maps[index].cmap
def get_norm(self, index = None):
"""Gets the normalization for a map within the
composite.
"""
if index is None:
return [_map.norm for _map in self._maps]
else:
return self._maps[index].norm
def get_levels(self, index = None):
"""Gets the list of contour levels for a map within the
composite.
"""
if index is None:
return [_map.levels for _map in self._maps]
else:
return self._maps[index].levels
def set_norm(self, index, norm):
"""Sets the norm for a layer in the composite image"""
self._maps[index].norm = norm
def set_levels(self, index, levels, percent = False):
"""Sets the contour levels for a layer in the composite image"""
if percent is False:
self._maps[index].levels = levels
else:
self._maps[index].levels = [self._maps[index].max()*level/100.0 for level in levels]
def set_colors(self, index, cm):
"""Sets the color map for a layer in the composite image"""
self._maps[index].cmap = cm
def set_alpha(self, index, alpha):
"""Sets the alpha-channel value for a layer in the composite image"""
if 0 <= alpha <= 1:
self._maps[index].alpha = alpha
else:
raise OutOfRangeAlphaValue("Alpha value must be between 0 and 1.")
def set_zorder(self, index, zorder):
"""Set the layering preference (z-order) for a map within the
composite.
"""
self._maps[index].zorder = zorder
def draw_limb(self, index=None, axes=None):
"""Draws a circle representing the solar limb
Parameters
----------
index: integer
Map index to use to plot limb.
axes: matplotlib.axes object or None
Axes to plot limb on or None to use current axes.
Returns
-------
matplotlib.axes object
"""
if index is None:
for i,amap in enumerate(self._maps):
if hasattr(amap,'rsun_arcseconds'):
index = i
break
index_check = hasattr(self._maps[index],'rsun_arcseconds')
if not index_check or index is None:
raise ValueError("Specified index does not have all the required attributes to draw limb.")
return self._maps[index].draw_limb(axes=axes)
def draw_grid(self, index=None, axes=None, grid_spacing=20):
"""Draws a grid over the surface of the Sun
Parameters
----------
index: integer
Index to determine which map to use to draw grid.
axes: matplotlib.axes object or None
Axes to plot limb on or None to use current axes.
grid_spacing: float
Spacing (in degrees) for longitude and latitude grid.
Returns
-------
matplotlib.axes object
"""
if index is None:
needed_attrs = ['rsun_meters', 'dsun', 'heliographic_latitude',
'heliographic_longitude']
for i, amap in enumerate(self._maps):
if all([hasattr(amap,k) for k in needed_attrs]):
index = i
break
index_check = all([hasattr(self._maps[index],k) for k in needed_attrs])
if not index_check or index is None:
raise ValueError("Specified index does not have all the required attributes to draw grid.")
ax = self._maps[index].draw_grid(axes=axes, grid_spacing=grid_spacing)
return ax
def plot(self, axes=None, gamma=None, annotate=True, # pylint: disable=W0613
title="SunPy Composite Plot", **matplot_args):
"""Plots the composite map object using matplotlib
Parameters
----------
axes: matplotlib.axes object or None
If provided the image will be plotted on the given axes. Else the
current matplotlib axes will be used.
gamma : float
Gamma value to use for the color map
annotate : bool
If true, the data is plotted at it's natural scale; with
title and axis labels.
**matplot_args : dict
Matplotlib Any additional imshow arguments that should be used
when plotting the image.
Returns
-------
ret : List
List of axes image or quad contour sets that have been plotted.
"""
#Get current axes
if not axes:
axes = plt.gca()
if annotate:
# x-axis label
if self._maps[0].coordinate_system['x'] == 'HG':
xlabel = 'Longitude [%s]' % self._maps[0].units['x']
else:
xlabel = 'X-position [%s]' % self._maps[0].units['x']
# y-axis label
if self._maps[0].coordinate_system['y'] == 'HG':
ylabel = 'Latitude [%s]' % self._maps[0].units['y']
else:
ylabel = 'Y-position [%s]' % self._maps[0].units['y']
axes.set_xlabel(xlabel)
axes.set_ylabel(ylabel)
axes.set_title(title)
#Define a list of plotted objects
ret = []
# Plot layers of composite map
for m in self._maps:
# Parameters for plotting
params = {
"origin": "lower",
"extent": m.xrange + m.yrange,
"cmap": m.cmap,
"norm": m.norm(),
"alpha": m.alpha,
"zorder": m.zorder,
}
params.update(matplot_args)
if m.levels is False:
ret.append(axes.imshow(m, **params))
# Use contour for contour data, and imshow otherwise
if m.levels is not False:
# Set data with values <= 0 to transparent
# contour_data = np.ma.masked_array(m, mask=(m <= 0))
ret.append(axes.contour(m, m.levels, **params))
#Set the label of the first line so a legend can be created
ret[-1].collections[0].set_label(m.name)
# Adjust axes extents to include all data
axes.axis('image')
#Set current image (makes colorbar work)
plt.sci(ret[0])
return ret
def peek(self, gamma=None, colorbar=True, basic_plot=False, draw_limb=True,
draw_grid=False, **matplot_args):
"""Displays the map in a new figure
Parameters
----------
gamma : float
Gamma value to use for the color map
colorbar : bool or int
Whether to display a colorbar next to the plot.
If specified as an integer a colorbar is plotted for that index.
basic_plot : bool
If true, the data is plotted by itself at it's natural scale; no
title, labels, or axes are shown.
**matplot_args : dict
Matplotlib Any additional imshow arguments that should be used
when plotting the image.
"""
# Create a figure and add title and axes
figure = plt.figure(frameon=not basic_plot)
# Basic plot
if basic_plot:
axes = plt.Axes(figure, [0., 0., 1., 1.])
axes.set_axis_off()
figure.add_axes(axes)
matplot_args.update({'annotate':False})
else:
axes = figure.add_subplot(111)
ret = self.plot(axes=axes,**matplot_args)
if not isinstance(colorbar, bool) and isinstance(colorbar, int):
figure.colorbar(ret[colorbar])
elif colorbar:
plt.colorbar()
if draw_limb:
self.draw_limb(axes=axes)
if isinstance(draw_grid, bool):
if draw_grid:
self.draw_grid(axes=axes)
elif isinstance(draw_grid, (int, long, float)):
self.draw_grid(axes=axes, grid_spacing=draw_grid)
else:
raise TypeError("draw_grid should be bool, int, long or float")
figure.show()
return figure
class OutOfRangeAlphaValue(ValueError):
"""Exception to raise when an alpha value outside of the range 0-1 is
requested.
"""
pass
| {
"repo_name": "mjm159/sunpy",
"path": "sunpy/map/compositemap.py",
"copies": "1",
"size": "13808",
"license": "bsd-2-clause",
"hash": 5842915076980323000,
"line_mean": 33.0938271605,
"line_max": 103,
"alpha_frac": 0.5437427578,
"autogenerated": false,
"ratio": 4.28287841191067,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.532662116971067,
"avg_score": null,
"num_lines": null
} |
"""A Composite Map class
Author: `Keith Hughitt <keith.hughitt@nasa.gov>`
"""
from __future__ import absolute_import, print_function, division
import matplotlib.pyplot as plt
import astropy.units as u
from sunpy.map import GenericMap
from sunpy.util import expand_list
from sunpy.extern import six
from sunpy.extern.six.moves import range
__all__ = ['CompositeMap']
__author__ = "Keith Hughitt"
__email__ = "keith.hughitt@nasa.gov"
class CompositeMap(object):
"""
CompositeMap(map1 [,map2,..])
A Composite Map class
Parameters
----------
args : [`~sunpy.map.Map` | string]
One or more map of filepaths
Methods
-------
add_map(map, zorder=None, alpha=1, levels=False)
Adds a map to the CompositeMap
remove_map(index)
Removes and returns the map with the given index.
list_maps()
Prints a list of the currently included maps.
get_alpha(index=None)
Returns the alpha-channel value for a layer in the composite image
get_zorder(index=None)
Returns the layering preference (z-order) for a map within the composite.
get_colors(index=None)
Returns the colors for a map within the CompositeMap.
get_norm(index=None)
Returns the normalization for a map within the CompositeMap.
get_levels(index=None)
Returns the list of contour levels for a map within the CompositeMap
set_norm(self, index, norm)
Sets the norm for a layer in the composite image.
set_levels(index, levels, percent=False)
Sets the contour levels for a layer in the CompositeMap.
set_colors(index=None, cm)
Sets the color map for a layer in the CompositeMap.
set_alpha(index=None, alpha)
Sets the alpha-channel value for a layer in the CompositeMap.
set_zorder(index=None, zorder)
Set the layering preference (z-order) for a map within the CompositeMap.
plot(figure=None, overlays=None, draw_limb=False, gamma=1.0,
draw_grid=False, colorbar=True, basic_plot=False,title="SunPy Plot",
matplot_args)
Plots the composite map object using matplotlib
Examples
--------
>>> import sunpy.map
>>> import sunpy.data
>>> sunpy.data.download_sample_data(overwrite=False) # doctest: +SKIP
>>> import sunpy.data.sample
>>> comp_map = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE,
... sunpy.data.sample.EIT_195_IMAGE,
... composite=True)
>>> comp_map.add_map(sunpy.map.Map(sunpy.data.sample.RHESSI_IMAGE))
>>> comp_map.peek() # doctest: +SKIP
"""
def __init__(self, *args, **kwargs):
self._maps = expand_list(args)
for m in self._maps:
if not isinstance(m, GenericMap):
raise ValueError(
'CompositeMap expects pre-constructed map objects.')
# Default alpha and zorder values
alphas = [1] * len(self._maps)
zorders = list(range(0, 10 * len(self._maps), 10))
levels = [False] * len(self._maps)
# Set z-order and alpha values for the map
for i, m in enumerate(self._maps):
m.zorder = zorders[i]
m.alpha = alphas[i]
m.levels = levels[i]
def add_map(self, amap, zorder=None, alpha=1, levels=False):
"""Adds a map to the CompositeMap.
Parameters
----------
map : `~sunpy.map.GenericMap` or subclass
Map instance to be added
zorder : `int`
The index to use when determining where the map should lie along
the z-axis; maps with higher z-orders appear above maps with lower
z-orders.
alpha : `float`
Opacity at which the map should be displayed. An alpha value of 0
results in a fully transparent image while an alpha value of 1
results in a fully opaque image. Values between result in semi-
transparent images.
"""
if zorder is None:
zorder = max([m.zorder for m in self._maps]) + 10
amap.zorder = zorder
amap.alpha = alpha
amap.levels = levels
self._maps.append(amap)
def remove_map(self, index):
"""Removes and returns the map with the given index.
Parameters
----------
index : `int`
The index of the map in the composite map.
Returns
-------
`sunpy.map.CompositeMap`
A composite map with the map indexed by 'index' removed from the
composite map.
"""
return self._maps.pop(index)
def list_maps(self):
"""Prints a list of the currently included maps."""
print([m.__class__ for m in self._maps])
def get_map(self, index):
"""Returns the map with given index """
return self._maps[index]
def get_alpha(self, index=None):
"""Returns the alpha-channel value for a layer in the composite image"""
if index is None:
return [_map.alpha for _map in self._maps]
else:
return self._maps[index].alpha
def get_zorder(self, index=None):
"""Returns the layering preference (z-order) for a map within the
composite.
Parameters
----------
index : {`int` | None}
The index of the map in the composite map.
Returns
-------
{`float` | `list`}
The layering order (z-order) of the map(s) in the composite
map. If None then the layering order of all the maps is returned in
a list.
"""
if index is None:
return [_map.zorder for _map in self._maps]
else:
return self._maps[index].zorder
def get_colors(self, index=None):
"""Returns the colors for a map within the composite map.
Parameters
----------
index : {`int` | None}
The index of the map in the composite map.
Returns
-------
{`sunpy.cm` | `list`}
The colormaps of the map(s) in the composite map. If None then the
colormaps of all the maps are returned in a list.
"""
if index is None:
return [_map.plot_settings['cmap'] for _map in self._maps]
else:
return self._maps[index].plot_settings['cmap']
def get_mpl_color_normalizer(self, index=None):
"""Returns the color normalizer for a map within the
composite.
Parameters
----------
index : {`int` | None}
The index of the map in the composite map.
Returns
-------
{color normalizer | `list`}
The color normalizer(s) of the map(s) in the composite map.
If None then the color normalizers of all the maps are returned in
a list.
"""
if index is None:
return [_map.mpl_color_normalizer for _map in self._maps]
else:
return self._maps[index].mpl_color_normalizer
def get_levels(self, index=None):
"""Returns the list of contour levels for a map within the
composite.
Parameters
----------
index : {`int` | None}
The index of the map in the composite map.
Returns
-------
`list`
A list of the contour levels of map at index 'index' in the
composite map. If index is None, then the contour levels of all
the maps are returned as a list of lists.
"""
if index is None:
return [_map.levels for _map in self._maps]
else:
return self._maps[index].levels
def set_mpl_color_normalizer(self, index, norm):
"""Sets the color normalizer for a layer in the composite image.
Parameters
----------
index : `int`
The index of the map in the composite map.
norm : a color normalizer
The function used to stretch the color table.
Returns
-------
`~sunpy.map.CompositeMap`
Sets the color normalizer of the map at index 'index' in the
composite map to the value given by 'norm'."""
self._maps[index].mpl_color_normalizer = norm
def set_levels(self, index, levels, percent=False):
"""
Sets the contour levels for a layer in the composite image.
Parameters
----------
index : `int`
The index of the map in the composite map.
levels : array-like
The contour levels.
percent : `bool`
If True, the input 'levels' are interpreted as percentages relative
to the maximum value of the data in layer 'index' of the composite
map. If False, the contour levels are set directly from 'levels'.
Returns
-------
`~sunpy.map.CompositeMap`
A composite map with contour levels 'levels' at layer 'index'.
"""
if percent is False:
self._maps[index].levels = levels
else:
self._maps[index].levels = [self._maps[index].max()*level/100.0 for level in levels]
def set_colors(self, index, cm):
"""Sets the color map for a layer in the composite image.
Parameters
----------
index : `int`
The index of the map in the composite map.
cm : a color map
The contour levels.
Returns
-------
`~sunpy.map.CompositeMap`
A composite map with colormap 'cm' at layer 'index'.
"""
self._maps[index].plot_settings['cmap'] = cm
def set_alpha(self, index, alpha):
"""Sets the alpha-channel value for a layer in the composite image.
Parameters
----------
index : `int`
The index of the map in the composite map.
alpha : `float`
A float in the range 0 to 1.
Returns
-------
`~sunpy.map.CompositeMap`
A composite map with alpha-channel value 'alpha' at layer 'index'.
"""
if 0 <= alpha <= 1:
self._maps[index].alpha = alpha
else:
raise OutOfRangeAlphaValue("Alpha value must be between 0 and 1.")
def set_zorder(self, index, zorder):
"""Set the layering order (z-order) for a map within the
composite.
Parameters
----------
index : `int`
The index of the map in the composite map.
zorder : `int`
The layer order.
Returns
-------
`~sunpy.map.CompositeMap`
A composite map with the map at layer 'index' having layering order
'zorder'.
"""
self._maps[index].zorder = zorder
def draw_limb(self, index=None, axes=None):
"""Draws a circle representing the solar limb.
Parameters
----------
index : `int`
Map index to use to plot limb.
axes : `matplotlib.axes.Axes` or None
Axes to plot limb on or None to use current axes.
Returns
-------
`matplotlib.axes.Axes`
"""
if index is None:
for i,amap in enumerate(self._maps):
if hasattr(amap,'rsun_obs'):
index = i
break
index_check = hasattr(self._maps[index],'rsun_obs')
if not index_check or index is None:
raise ValueError("Specified index does not have all the required attributes to draw limb.")
return self._maps[index].draw_limb(axes=axes)
@u.quantity_input(grid_spacing=u.deg)
def draw_grid(self, index=None, axes=None, grid_spacing=20*u.deg):
"""Draws a grid over the surface of the Sun.
Parameters
----------
index: int
Index to determine which map to use to draw grid.
axes: `~matplotlib.axes.Axes` or None
Axes to plot limb on or None to use current axes.
grid_spacing : `float`
Spacing (in degrees) for longitude and latitude grid.
Returns
-------
`matplotlib.axes.Axes` object
"""
needed_attrs = ['rsun_meters', 'dsun', 'heliographic_latitude',
'heliographic_longitude']
if index is None:
for i, amap in enumerate(self._maps):
if all([hasattr(amap,k) for k in needed_attrs]):
index = i
break
index_check = all([hasattr(self._maps[index],k) for k in needed_attrs])
if not index_check or index is None:
raise ValueError("Specified index does not have all the required attributes to draw grid.")
ax = self._maps[index].draw_grid(axes=axes, grid_spacing=grid_spacing)
return ax
def plot(self, axes=None, annotate=True, # pylint: disable=W0613
title="SunPy Composite Plot", **matplot_args):
"""Plots the composite map object using matplotlib
Parameters
----------
axes: `~matplotlib.axes.Axes` or None
If provided the image will be plotted on the given axes. Else the
current matplotlib axes will be used.
annotate : `bool`
If true, the data is plotted at it's natural scale; with
title and axis labels.
**matplot_args : `dict`
Matplotlib Any additional imshow arguments that should be used
when plotting.
Returns
-------
ret : `list`
List of axes image or quad contour sets that have been plotted.
"""
# Get current axes
if not axes:
axes = plt.gca()
if annotate:
# x-axis label
if self._maps[0].coordinate_system.x == 'HG':
xlabel = 'Longitude [{lon}]'.format(lon=self._maps[0].spatial_units.x)
else:
xlabel = 'X-position [{solx}]'.format(solx=self._maps[0].spatial_units.x)
# y-axis label
if self._maps[0].coordinate_system.y == 'HG':
ylabel = 'Latitude [{lat}]'.format(lat=self._maps[0].spatial_units.y)
else:
ylabel = 'Y-position [{soly}]'.format(soly=self._maps[0].spatial_units.y)
axes.set_xlabel(xlabel)
axes.set_ylabel(ylabel)
axes.set_title(title)
# Define a list of plotted objects
ret = []
# Plot layers of composite map
for m in self._maps:
# Parameters for plotting
params = {
"origin": "lower",
"extent": list(m.xrange.value) + list(m.yrange.value),
"cmap": m.plot_settings['cmap'],
"norm": m.plot_settings['norm'],
"alpha": m.alpha,
"zorder": m.zorder,
}
params.update(matplot_args)
if m.levels is False:
ret.append(axes.imshow(m.data, **params))
# Use contour for contour data, and imshow otherwise
if m.levels is not False:
# Set data with values <= 0 to transparent
# contour_data = np.ma.masked_array(m, mask=(m <= 0))
ret.append(axes.contour(m.data, m.levels, **params))
# Set the label of the first line so a legend can be created
ret[-1].collections[0].set_label(m.name)
# Adjust axes extents to include all data
axes.axis('image')
# Set current image (makes colorbar work)
plt.sci(ret[0])
return ret
def peek(self, colorbar=True, basic_plot=False, draw_limb=True,
draw_grid=False, **matplot_args):
"""Displays the map in a new figure.
Parameters
----------
colorbar : `bool` or `int`
Whether to display a colorbar next to the plot.
If specified as an integer a colorbar is plotted for that index.
basic_plot : `bool`
If true, the data is plotted by itself at it's natural scale; no
title, labels, or axes are shown.
**matplot_args : dict
Matplotlib Any additional imshow arguments that should be used
when plotting.
"""
# Create a figure and add title and axes
figure = plt.figure(frameon=not basic_plot)
# Basic plot
if basic_plot:
axes = plt.Axes(figure, [0., 0., 1., 1.])
axes.set_axis_off()
figure.add_axes(axes)
matplot_args.update({'annotate':False})
else:
axes = figure.add_subplot(111)
ret = self.plot(axes=axes,**matplot_args)
if not isinstance(colorbar, bool) and isinstance(colorbar, int):
figure.colorbar(ret[colorbar])
elif colorbar:
plt.colorbar()
if draw_limb:
self.draw_limb(axes=axes)
if isinstance(draw_grid, bool):
if draw_grid:
self.draw_grid(axes=axes)
elif isinstance(draw_grid, six.integer_types + (float,)):
self.draw_grid(axes=axes, grid_spacing=draw_grid)
else:
raise TypeError("draw_grid should be bool, int, long or float")
figure.show()
class OutOfRangeAlphaValue(ValueError):
"""Exception to raise when an alpha value outside of the range 0-1 is
requested.
"""
pass
| {
"repo_name": "Alex-Ian-Hamilton/sunpy",
"path": "sunpy/map/compositemap.py",
"copies": "1",
"size": "17540",
"license": "bsd-2-clause",
"hash": -3728552030891562500,
"line_mean": 31.4814814815,
"line_max": 103,
"alpha_frac": 0.5591790194,
"autogenerated": false,
"ratio": 4.189156914258419,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5248335933658419,
"avg_score": null,
"num_lines": null
} |
"""A Composite Map class
Author: `Keith Hughitt <keith.hughitt@nasa.gov>`
"""
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import astropy.units as u
from sunpy.map import GenericMap
from sunpy.visualization import axis_labels_from_ctype, peek_show
from sunpy.util import expand_list
__all__ = ['CompositeMap']
__author__ = "Keith Hughitt"
__email__ = "keith.hughitt@nasa.gov"
class CompositeMap(object):
"""
CompositeMap(map1 [,map2,..])
A Composite Map class
Parameters
----------
args : [`~sunpy.map.Map` | string]
One or more map of filepaths
Methods
-------
add_map(map, zorder=None, alpha=1, levels=False)
Adds a map to the CompositeMap
remove_map(index)
Removes and returns the map with the given index.
list_maps()
Prints a list of the currently included maps.
get_alpha(index=None)
Returns the alpha-channel value for a layer in the composite image
get_levels(index=None)
Returns the list of contour levels for a map within the CompositeMap.
get_plot_settings(index=None)
Returns the plot settings for a map within the CompositeMap.
get_zorder(index=None)
Returns the layering preference (z-order) for a map within the composite.
set_alpha(index, alpha)
Sets the alpha-channel value for a layer in the CompositeMap.
set_levels(index, levels, percent=False)
Sets the contour levels for a layer in the CompositeMap.
set_plot_settings(index, plot_setiings)
Set the plot settings for a map with the CompositeMap.
set_zorder(index, zorder)
Set the layering preference (z-order) for a map within the CompositeMap.
plot(figure=None, overlays=None, draw_limb=False,
draw_grid=False, colorbar=True, basic_plot=False,title="SunPy Plot",
matplot_args)
Plots the composite map object using matplotlib
Examples
--------
>>> import sunpy.map
>>> import sunpy.data.sample # doctest: +REMOTE_DATA
>>> comp_map = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE,
... sunpy.data.sample.EIT_195_IMAGE,
... composite=True) # doctest: +REMOTE_DATA
>>> comp_map.add_map(sunpy.map.Map(sunpy.data.sample.RHESSI_IMAGE)) # doctest: +REMOTE_DATA
>>> comp_map.peek() # doctest: +SKIP
"""
def __init__(self, *args, **kwargs):
self._maps = expand_list(args)
for m in self._maps:
if not isinstance(m, GenericMap):
raise ValueError(
'CompositeMap expects pre-constructed map objects.')
# Default alpha and zorder values
alphas = [1] * len(self._maps)
zorders = list(range(0, 10 * len(self._maps), 10))
levels = [False] * len(self._maps)
# Set z-order and alpha values for the map
for i, m in enumerate(self._maps):
m.zorder = zorders[i]
m.alpha = alphas[i]
m.levels = levels[i]
def add_map(self, amap, zorder=None, alpha=1, levels=False):
"""Adds a map to the CompositeMap.
Parameters
----------
amap : `~sunpy.map.GenericMap` or subclass
Map instance to be added
zorder : `int`
The index to use when determining where the map should lie along
the z-axis; maps with higher z-orders appear above maps with lower
z-orders.
alpha : `float`
Opacity at which the map should be displayed. An alpha value of 0
results in a fully transparent image while an alpha value of 1
results in a fully opaque image. Values between result in semi-
transparent images.
"""
if zorder is None:
zorder = max([m.zorder for m in self._maps]) + 10
amap.zorder = zorder
amap.alpha = alpha
amap.levels = levels
self._maps.append(amap)
def remove_map(self, index):
"""Removes and returns the map with the given index.
Parameters
----------
index : `int`
The index of the map in the composite map.
Returns
-------
`sunpy.map.CompositeMap`
A composite map with the map indexed by 'index' removed from the
composite map.
"""
return self._maps.pop(index)
def list_maps(self):
"""Prints a list of the currently included maps."""
print([m.__class__ for m in self._maps])
def get_map(self, index):
"""Returns the map with given index """
return self._maps[index]
def get_alpha(self, index=None):
"""
Returns the alpha-channel value for a layer in the composite image.
"""
if index is None:
return [_map.alpha for _map in self._maps]
else:
return self._maps[index].alpha
def get_levels(self, index=None):
"""Returns the list of contour levels for a map within the
composite.
Parameters
----------
index : {`int` | None}
The index of the map in the composite map.
Returns
-------
`list`
A list of the contour levels of map at index 'index' in the
composite map. If index is None, then the contour levels of all
the maps are returned as a list of lists.
"""
if index is None:
return [_map.levels for _map in self._maps]
else:
return self._maps[index].levels
def get_plot_settings(self, index=None):
"""Returns the plot settings for a map within the composite map.
Parameters
----------
index : {`int` | None}
The index of the map in the composite map.
Returns
-------
{`dict` | `list`}
The plot settings of the map(s) in the composite map. If None
then the plot settings of all the maps are returned in a list.
"""
if index is None:
return [_map.plot_settings for _map in self._maps]
else:
return self._maps[index].plot_settings
def get_zorder(self, index=None):
"""Returns the layering preference (z-order) for a map within the
composite.
Parameters
----------
index : {`int` | None}
The index of the map in the composite map.
Returns
-------
{`float` | `list`}
The layering order (z-order) of the map(s) in the composite
map. If None then the layering order of all the maps is returned in
a list.
"""
if index is None:
return [_map.zorder for _map in self._maps]
else:
return self._maps[index].zorder
def set_alpha(self, index, alpha):
"""Sets the alpha-channel value for a layer in the composite image.
Parameters
----------
index : `int`
The index of the map in the composite map.
alpha : `float`
A float in the range 0 to 1. Increasing values of alpha decrease
the transparency of the layer (0 is complete transparency, 1
indicates the layer will be completely opaque).
Returns
-------
`~sunpy.map.CompositeMap`
A composite map with alpha-channel value 'alpha' at layer 'index'.
"""
if 0 <= alpha <= 1:
self._maps[index].alpha = alpha
else:
raise OutOfRangeAlphaValue("Alpha value must be between 0 and 1.")
def set_levels(self, index, levels, percent=False):
"""
Sets the contour levels for a layer in the composite image.
Parameters
----------
index : `int`
The index of the map in the composite map.
levels : array-like
The contour levels.
percent : `bool`
If True, the input 'levels' are interpreted as percentages relative
to the maximum value of the data in layer 'index' of the composite
map. If False, the contour levels are set directly from 'levels'.
Returns
-------
`~sunpy.map.CompositeMap`
A composite map with contour levels 'levels' at layer 'index'.
"""
if percent is False:
self._maps[index].levels = levels
else:
self._maps[index].levels = [self._maps[index].max()*level/100.0 for level in levels]
def set_plot_settings(self, index, plot_settings):
"""Sets the plot settings for a layer in the composite image.
Parameters
----------
index : `int`
The index of the map in the composite map.
plot_settings : `dict`
A dictionary of the form
Returns
-------
`~sunpy.map.CompositeMap`
A composite map with plot settings 'plot_settings' at layer
'index'.
"""
self._maps[index].plot_settings = plot_settings
def set_zorder(self, index, zorder):
"""Set the layering order (z-order) for a map within the
composite.
Parameters
----------
index : `int`
The index of the map in the composite map.
zorder : `int`
The layer order.
Returns
-------
`~sunpy.map.CompositeMap`
A composite map with the map at layer 'index' having layering order
'zorder'.
"""
self._maps[index].zorder = zorder
def draw_limb(self, index=None, axes=None, **kwargs):
"""Draws a circle representing the solar limb.
Parameters
----------
index : `int`
Map index to use to plot limb.
axes : `matplotlib.axes.Axes` or None
Axes to plot limb on or None to use current axes.
Returns
-------
`matplotlib.axes.Axes`
Notes
-----
Keyword arguments are passed onto `sunpy.map.mapbase.GenericMap.draw_limb`.
"""
if index is None:
for i, amap in enumerate(self._maps):
if hasattr(amap, 'rsun_obs'):
index = i
break
index_check = hasattr(self._maps[index], 'rsun_obs')
if not index_check or index is None:
raise ValueError("Specified index does not have all"
" the required attributes to draw limb.")
return self._maps[index].draw_limb(axes=axes, **kwargs)
@u.quantity_input
def draw_grid(self, index=None, axes=None, grid_spacing: u.deg=20*u.deg, **kwargs):
"""Draws a grid over the surface of the Sun.
Parameters
----------
index: int
Index to determine which map to use to draw grid.
axes: `~matplotlib.axes.Axes` or None
Axes to plot limb on or None to use current axes.
grid_spacing : `float`
Spacing (in degrees) for longitude and latitude grid.
Returns
-------
`matplotlib.axes.Axes` object
Notes
-----
Keyword arguments are passed onto `sunpy.map.mapbase.GenericMap.draw_grid`.
"""
needed_attrs = ['rsun_meters', 'dsun', 'heliographic_latitude',
'heliographic_longitude']
if index is None:
for i, amap in enumerate(self._maps):
if all([hasattr(amap, k) for k in needed_attrs]):
index = i
break
index_check = all([hasattr(self._maps[index], k) for k in needed_attrs])
if not index_check or index is None:
raise ValueError("Specified index does not have all"
" the required attributes to draw grid.")
ax = self._maps[index].draw_grid(axes=axes, grid_spacing=grid_spacing, **kwargs)
return ax
def plot(self, axes=None, annotate=True, # pylint: disable=W0613
title="SunPy Composite Plot", **matplot_args):
"""Plots the composite map object using matplotlib
Parameters
----------
axes: `~matplotlib.axes.Axes` or None
If provided the image will be plotted on the given axes. Else the
current matplotlib axes will be used.
annotate : `bool`
If true, the data is plotted at it's natural scale; with
title and axis labels.
title : `str`
Title of the composite map.
**matplot_args : `dict`
Matplotlib Any additional imshow arguments that should be used
when plotting.
Returns
-------
ret : `list`
List of axes image or quad contour sets that have been plotted.
"""
# Get current axes
if not axes:
axes = plt.gca()
if annotate:
axes.set_xlabel(axis_labels_from_ctype(self._maps[0].coordinate_system[0],
self._maps[0].spatial_units[0]))
axes.set_ylabel(axis_labels_from_ctype(self._maps[0].coordinate_system[1],
self._maps[0].spatial_units[1]))
axes.set_title(title)
# Define a list of plotted objects
ret = []
# Plot layers of composite map
for m in self._maps:
# Parameters for plotting
bl = m._get_lon_lat(m.bottom_left_coord)
tr = m._get_lon_lat(m.top_right_coord)
x_range = list(u.Quantity([bl[0], tr[0]]).to(m.spatial_units[0]).value)
y_range = list(u.Quantity([bl[1], tr[1]]).to(m.spatial_units[1]).value)
params = {
"origin": "lower",
"extent": x_range + y_range,
"cmap": m.plot_settings['cmap'],
"norm": m.plot_settings['norm'],
"alpha": m.alpha,
"zorder": m.zorder,
}
params.update(matplot_args)
# The request to show a map layer rendered as a contour is indicated by a
# non False levels property. If levels is False, then the layer is
# rendered using imshow.
if m.levels is False:
# Check for the presence of masked map data
if m.mask is None:
ret.append(axes.imshow(m.data, **params))
else:
ret.append(axes.imshow(np.ma.array(np.asarray(m.data), mask=m.mask), **params))
else:
# Check for the presence of masked map data
if m.mask is None:
ret.append(axes.contour(m.data, m.levels, **params))
else:
ret.append(axes.contour(np.ma.array(np.asarray(m.data), mask=m.mask), m.levels, **params))
# Set the label of the first line so a legend can be created
ret[-1].collections[0].set_label(m.name)
# Adjust axes extents to include all data
axes.axis('image')
# Set current image (makes colorbar work)
plt.sci(ret[0])
return ret
@peek_show
def peek(self, colorbar=True, basic_plot=False, draw_limb=True,
draw_grid=False, **matplot_args):
"""
Displays a graphical overview of the data in this object for user evaluation.
For the creation of plots, users should instead use the `~sunpy.map.CompositeMap.plot`
method and Matplotlib's pyplot framework.
Parameters
----------
colorbar : `bool` or `int`
Whether to display a colorbar next to the plot.
If specified as an integer a colorbar is plotted for that index.
basic_plot : `bool`
If true, the data is plotted by itself at it's natural scale; no
title, labels, or axes are shown.
draw_limb : `bool`
If true, draws a circle representing the solar limb.
draw_grid : `bool`
If true, draws a grid over the surface of the Sun.
**matplot_args : dict
Matplotlib Any additional imshow arguments that should be used
when plotting.
"""
# Create a figure and add title and axes
figure = plt.figure(frameon=not basic_plot)
# Basic plot
if basic_plot:
axes = plt.Axes(figure, [0., 0., 1., 1.])
axes.set_axis_off()
figure.add_axes(axes)
matplot_args.update({'annotate': False})
else:
axes = figure.add_subplot(111)
ret = self.plot(axes=axes, **matplot_args)
if not isinstance(colorbar, bool) and isinstance(colorbar, int):
figure.colorbar(ret[colorbar])
elif colorbar:
plt.colorbar()
if draw_limb:
self.draw_limb(axes=axes)
if isinstance(draw_grid, bool):
if draw_grid:
self.draw_grid(axes=axes)
elif isinstance(draw_grid, (int, float)):
self.draw_grid(axes=axes, grid_spacing=draw_grid)
else:
raise TypeError("draw_grid should be bool, int, long or float")
return figure
class OutOfRangeAlphaValue(ValueError):
"""Exception to raise when an alpha value outside of the range 0-1 is
requested.
"""
pass
| {
"repo_name": "dpshelio/sunpy",
"path": "sunpy/map/compositemap.py",
"copies": "1",
"size": "17464",
"license": "bsd-2-clause",
"hash": 5628462146645484000,
"line_mean": 32.3282442748,
"line_max": 110,
"alpha_frac": 0.557833257,
"autogenerated": false,
"ratio": 4.20616570327553,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005007362514581017,
"num_lines": 524
} |
''' A CompoundStim class represents a combination of constituent Stim classes.
'''
from pliers.utils import listify
from .base import _get_stim_class
from .audio import AudioStim
from .text import ComplexTextStim
class CompoundStim:
''' A container for an arbitrary set of Stim elements.
Args:
elements (Stim or list): a single Stim (of any type) or a list of
elements.
'''
_allowed_types = None
_allow_multiple = True
_primary = None
def __init__(self, elements):
self.elements = []
self.history = None
_type_dict = {}
for s in elements:
stim_cl, self_cl = s.__class__.__name__, self.__class__.__name__
if self._allowed_types and not isinstance(s, self._allowed_types):
raise ValueError("A stim of class %s was passed, but the %s "
"class does not allow elements of this "
"type." % (stim_cl, self_cl))
if self._allow_multiple or stim_cl not in _type_dict:
_type_dict[stim_cl] = 1
self.elements.append(s)
else:
msg = "Multiple components of same type not allowed, and " + \
"a stim of type {} already exists in this {}.".format(stim_cl, self_cl)
raise ValueError(msg)
if self._primary is not None:
primary = self.get_stim(self._primary)
self.name = primary.name
self.filename = primary.filename
else:
self.name = '&'.join([s.name for s in self.elements])[:255]
self.filename = None
def __iter__(self):
""" Element iteration. """
yield from self.elements
def get_stim(self, type_, return_all=False):
''' Returns component elements of the specified type.
Args:
type_ (str or Stim class): the desired Stim subclass to return.
return_all (bool): when True, returns all elements that matched the
specified type as a list. When False (default), returns only
the first matching Stim.
Returns:
If return_all is True, a list of matching elements (or an empty
list if no elements match). If return_all is False, returns the
first matching Stim, or None if no elements match.
'''
if isinstance(type_, str):
type_ = _get_stim_class(type_)
matches = []
for s in self.elements:
if isinstance(s, type_):
if not return_all:
return s
matches.append(s)
if not matches:
return [] if return_all else None
return matches
def get_types(self):
''' Return tuple of types of all available Stims. '''
return tuple({e.__class__ for e in self.elements})
def has_types(self, types, all_=True):
''' Check whether the current component list matches all Stim types
in the types argument.
Args:
types (Stim, list): a Stim class or iterable of Stim classes.
all_ (bool): if True, all input types must match; if False, at
least one input type must match.
Returns:
True if all passed types match at least one Stim in the component
list, otherwise False.
'''
func = all if all_ else any
return func([self.get_stim(t) for t in listify(types)])
def __getattr__(self, attr):
try:
stim = _get_stim_class(attr)
except Exception as e:
try:
primary = self.get_stim(self._primary)
return getattr(primary, attr)
except Exception as e:
return self.__getattribute__(attr)
return self.get_stim(stim)
class TranscribedAudioCompoundStim(CompoundStim):
''' An AudioStim with an associated text transcription.
Args:
filename (str): The path to the audio clip.
audio (AudioStim): An AudioStim containing the audio content.
text (ComplexTextStim): A ComplexTextStim containing the transcribed
text (and associated timing information).
'''
_allowed_types = (AudioStim, ComplexTextStim)
_allow_multiple = False
_primary = AudioStim
def __init__(self, audio, text):
super().__init__(
elements=[audio, text])
| {
"repo_name": "tyarkoni/pliers",
"path": "pliers/stimuli/compound.py",
"copies": "2",
"size": "4463",
"license": "bsd-3-clause",
"hash": 4642588919070486000,
"line_mean": 33.5968992248,
"line_max": 93,
"alpha_frac": 0.5673313914,
"autogenerated": false,
"ratio": 4.375490196078432,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5942821587478432,
"avg_score": null,
"num_lines": null
} |
"""A COM Server which exposes the NT Performance monitor in a very rudimentary way
Usage from VB:
set ob = CreateObject("Python.PerfmonQuery")
freeBytes = ob.Query("Memory", "Available Bytes")
"""
from win32com.server import exception, register
import pythoncom, win32pdhutil, winerror
class PerfMonQuery:
_reg_verprogid_ = "Python.PerfmonQuery.1"
_reg_progid_ = "Python.PerfmonQuery"
_reg_desc_ = "Python Performance Monitor query object"
_reg_clsid_ = "{64cef7a0-8ece-11d1-a65a-00aa00125a98}"
_reg_class_spec_ = "win32com.servers.perfmon.PerfMonQuery"
_public_methods_ = [ 'Query' ]
def Query(self, object, counter, instance = None, machine = None):
try:
return win32pdhutil.GetPerformanceAttributes(object, counter, instance, machine=machine)
except win32pdhutil.error as exc:
raise exception.Exception(desc=exc.strerror)
except TypeError as desc:
raise exception.Exception(desc=desc,scode=winerror.DISP_E_TYPEMISMATCH)
if __name__=='__main__':
print("Registering COM server...")
register.UseCommandLine(PerfMonQuery)
| {
"repo_name": "Microsoft/PTVS",
"path": "Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/win32com/servers/perfmon.py",
"copies": "11",
"size": "1050",
"license": "apache-2.0",
"hash": -4354104318837345000,
"line_mean": 37.8888888889,
"line_max": 91,
"alpha_frac": 0.7523809524,
"autogenerated": false,
"ratio": 3.0346820809248554,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9287063033324856,
"avg_score": null,
"num_lines": null
} |
"""A COM Server which exposes the NT Performance monitor in a very rudimentary way
Usage from VB:
set ob = CreateObject("Python.PerfmonQuery")
freeBytes = ob.Query("Memory", "Available Bytes")
"""
from win32com.server import exception, register
import pythoncom, win32pdhutil, winerror
class PerfMonQuery:
_reg_verprogid_ = "Python.PerfmonQuery.1"
_reg_progid_ = "Python.PerfmonQuery"
_reg_desc_ = "Python Performance Monitor query object"
_reg_clsid_ = "{64cef7a0-8ece-11d1-a65a-00aa00125a98}"
_reg_class_spec_ = "win32com.servers.perfmon.PerfMonQuery"
_public_methods_ = [ 'Query' ]
def Query(self, object, counter, instance = None, machine = None):
try:
return win32pdhutil.GetPerformanceAttributes(object, counter, instance, machine=machine)
except win32pdhutil.error, exc:
raise exception.Exception(desc=exc.strerror)
except TypeError, desc:
raise exception.Exception(desc=desc,scode=winerror.DISP_E_TYPEMISMATCH)
if __name__=='__main__':
print "Registering COM server..."
register.UseCommandLine(PerfMonQuery)
| {
"repo_name": "ntuecon/server",
"path": "pyenv/Lib/site-packages/win32com/servers/perfmon.py",
"copies": "4",
"size": "1072",
"license": "bsd-3-clause",
"hash": 6846595764353661000,
"line_mean": 37.7037037037,
"line_max": 91,
"alpha_frac": 0.7332089552,
"autogenerated": false,
"ratio": 3.0716332378223496,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.580484219302235,
"avg_score": null,
"num_lines": null
} |
# The MIT License (MIT)
# Copyright (c) 2014 Emilio Daniel Gonzalez
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import errno
from .. import apoll
class DumbContext(object):
def __init__(self):
from context_logger import ContextLogger
self.log = ContextLogger()
class ExecutionContext(object):
_instance = None
@classmethod
def instance(cls):
if not cls._instance:
cls._instance = Context()
return cls._instance
def __init__(self):
from context_logger import ContextLogger
self.log = ContextLogger()
self.running = False
self._queue = apoll.instantiate()
self._handlers = {}
self._events = {}
def attach(self, connector):
from iconnector import IConnector
assert isinstance(connector, IConnector)
connector._attach_to(self)
fileno = connector.fileno()
self._handlers[fileno] = connector.handler
self._queue.register(fileno, connector.events())
def detach(self, fd):
self._handlers.pop(fd, None)
self._events.pop(fd, None)
self._queue.unregister(fd)
def run(self):
self.running = True
try:
while self.running:
try:
event_pairs = self._queue.poll(60)
if not event_pairs:
continue
except Exception as e:
# Depending on python version and IOLoop implementation,
# different exception types may be thrown and there are
# two ways EINTR might be signaled:
# * e.errno == errno.EINTR
# * e.args is like (errno.EINTR, 'Interrupted system call')
if (getattr(e, 'errno', None) == errno.EINTR or
(isinstance(getattr(e, 'args', None), tuple) and
len(e.args) == 2 and e.args[0] == errno.EINTR)):
continue
else:
raise
self._events.update(event_pairs)
while self._events:
fh, events = self._events.popitem()
try:
self._handlers[fh](fh, events)
except (OSError, IOError) as e:
if e.args[0] == errno.EPIPE:
# Happens when the client closes the connection
pass
else:
self.log.error("something wrong happend with the handlers for %i" % fh)
except Exception:
self.log.warning("There are no handlers for %i" % fh)
raise
sys.exit()
except KeyboardInterrupt:
self.running = False
| {
"repo_name": "aconcagua/aconcagua-python",
"path": "aconcagua/context/execution_context.py",
"copies": "1",
"size": "3935",
"license": "mit",
"hash": 1428667394615251500,
"line_mean": 35.4351851852,
"line_max": 99,
"alpha_frac": 0.5786531131,
"autogenerated": false,
"ratio": 4.758162031438936,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5836815144538936,
"avg_score": null,
"num_lines": null
} |
### For practical reasons, a big part of this module was inspired (copied &
### pasted) from Tornado's ioloop module, so the credits for the goodness of
### this code goes for its team.
### You should take a look to that awesome project at:
### http://www.tornadoweb.org/ & https://github.com/facebook/tornado/
class APoll(object):
# Constants from the epoll module
_EPOLLIN = 0x001
_EPOLLPRI = 0x002
_EPOLLOUT = 0x004
_EPOLLERR = 0x008
_EPOLLHUP = 0x010
_EPOLLRDHUP = 0x2000
_EPOLLONESHOT = (1 << 30)
_EPOLLET = (1 << 31)
# Our events map exactly to the epoll events
NONE = 0
READ = _EPOLLIN
WRITE = _EPOLLOUT
ERROR = _EPOLLERR | _EPOLLHUP
def fileno(self):
raise NotImplementedError()
def register(self, fd, events):
raise NotImplementedError()
def unregister(self, fd):
raise NotImplementedError()
def modify(self, fd, events):
raise NotImplementedError()
def poll(self, timeout):
raise NotImplementedError()
def close(self):
raise NotImplementedError()
| {
"repo_name": "aconcagua/aconcagua-python",
"path": "aconcagua/apoll/apoll.py",
"copies": "1",
"size": "2265",
"license": "mit",
"hash": 4915662321024771000,
"line_mean": 32.8059701493,
"line_max": 80,
"alpha_frac": 0.7090507726,
"autogenerated": false,
"ratio": 3.9528795811518322,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5161930353751832,
"avg_score": null,
"num_lines": null
} |
# A configuration file for the fileprocess. We could do a .ini, but everybody
# knows python here
import logging
import os
from logging import handlers
config = {
'port': 48260,
'S3.accesskey': '17G635SNK33G1Y7NZ2R2',
'S3.secret': 'PHDzFig4NYRJoKKW/FerfhojljL+sbNyYB9bEpHs',
'S3.music_bucket': 'music.rubiconmusicplayer.com',
'S3.upload': True,
'sqlalchemy.default.convert_unicode': True,
'upload_dir': '../masterapp/tmp',
'media_dir': '../masterapp/media',
'pyfacebook.callbackpath': None,
'pyfacebook.apikey': 'cec673d0ef3fbc12395d0d3500cd72f9',
'pyfacebook.secret': 'a08f822bf3d7f80ee25c47414fe98be1',
'pyfacebook.appid': '2364724122',
'musicdns.key': 'ffa7339e1b6bb1d26593776b4257fce1',
'maxkbps': 192000,
'sqlalchemy.default.url': 'sqlite:///../masterapp/music.db',
'cache_dir': '../masterapp/cache'
}
dev_config = {
'S3.upload': False,
'tagshelf': '../masterapp/tags.archive',
'amqp_vhost': '/dev'
}
test_config = {
'sqlalchemy.default.url': 'sqlite:///:memory:',
'sqlalchemy.reflect.url': 'sqlite:///../../masterapp/music.db',
'upload_dir': './test/testuploaddir',
'media_dir': './test/teststagingdir',
'tagshelf': './test/tagshelf'
}
production_config = {
'S3.upload': True,
'sqlalchemy.default.url': \
'mysql://webappuser:gravelbits@localhost:3306/rubicon',
'sqlalchemy.default.pool_recycle': 3600,
'upload_dir': '/var/opt/stage_uploads',
'media_dir': os.environ.get('MEDIA'),
'tagshelf': '/var/opt/tagshelf.archive',
'cache_dir': '/tmp/stage_cache',
'amqp_vhost': '/staging'
}
live_config = {
'port': 48262,
'upload_dir': '/var/opt/uploads',
'sqlalchemy.default.url': \
'mysql://webappuser:gravelbits@localhost:3306/harmonize',
'cache_dir': '/tmp/live_cache',
'amqp_vhost': '/live'
}
base_logging = {
'level': logging.INFO,
'format':'%(asctime)s,%(msecs)03d %(levelname)-5.5s [%(name)s] %(message)s',
'datefmt': '%H:%M:%S',
'handler': logging.StreamHandler,
'handler_args': ()
}
dev_logging = {
'level': logging.DEBUG
}
production_logging = {
'level': logging.INFO,
'handler': handlers.TimedRotatingFileHandler,
'handler_args': ('/var/log/rubicon/filepipe', 'midnight', 0, 7)
}
live_logging = {
'handler_args': ('/var/log/harmonize/filepipe', 'midnight', 0, 7)
}
def update_config(nconfig):
global config
config.update(nconfig)
def lupdate_config(nconfig):
global base_logging
base_logging.update(config)
| {
"repo_name": "JustinTulloss/harmonize.fm",
"path": "filemq/filemq/configuration.py",
"copies": "1",
"size": "2548",
"license": "mit",
"hash": 2688927516315322400,
"line_mean": 27,
"line_max": 80,
"alpha_frac": 0.6456043956,
"autogenerated": false,
"ratio": 2.837416481069042,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3983020876669042,
"avg_score": null,
"num_lines": null
} |
"""A configuration management class built on top of etcd
See: http://python-etcd.readthedocs.org/
It provides a read-only access and just exposes a nested dict
"""
import functools
import time
import etcd3
from conman.conman_base import ConManBase
def thrice(delay=0.5):
"""This decorator tries failed operations 3 times before it gives up
The delay determines how long to wait between tries (in seconds)
"""
def decorated(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
for i in range(3):
try:
return f(*args, **kwargs)
except Exception:
if i == 2:
raise
time.sleep(delay)
return wrapped
return decorated
class ConManEtcd(ConManBase):
def __init__(self,
host='127.0.0.1',
port=2379,
ca_cert=None,
cert_key=None,
cert_cert=None,
timeout=None,
user=None,
password=None,
grpc_options=None,
on_change=lambda e: None):
ConManBase.__init__(self)
self.on_change = on_change
self.client = etcd3.client(
host=host,
port=port,
ca_cert=ca_cert,
cert_key=cert_key,
cert_cert=cert_cert,
timeout=timeout,
user=user,
password=password,
grpc_options=grpc_options,
)
def _add_key_recursively(self, etcd_result):
ok = False
target = self._conf
for x in etcd_result:
ok = True
value = x[0].decode()
key = x[1].key.decode()
components = key.split('/')
t = target
for c in components[:-1]:
if c not in t:
t[c] = {}
t = t[c]
t[components[-1]] = value
if not ok:
raise Exception('Empty result')
def watch(self, key):
watch_id = self.client.add_watch_callback(key, self.on_change)
return watch_id
def watch_prefix(self, key):
return self.client.watch_prefix(key)
def cancel(self, watch_id):
self.client.cancel_watch(watch_id)
def add_key(self, key, watch=False):
"""Add a key to managed etcd keys and store its data
:param str key: the etcd path
:param bool watch: determine if need to watch the key
When a key is added all its data is stored as a dict
"""
etcd_result = self.client.get_prefix(key, sort_order='ascend')
self._add_key_recursively(etcd_result)
if watch:
self.watch(key)
def refresh(self, key=None):
"""Refresh an existing key or all keys
:param key: the key to refresh (if None refresh all keys)
If the key parameter doesn't exist an exception will be raised.
No need to watch again the conf keys.
"""
keys = [key] if key else self._conf.keys()
for k in keys:
if k in self._conf:
del self._conf[k]
self.add_key(k, watch=False)
| {
"repo_name": "the-gigi/conman",
"path": "conman/conman_etcd.py",
"copies": "1",
"size": "3240",
"license": "mit",
"hash": -2054108859139198700,
"line_mean": 27.6725663717,
"line_max": 72,
"alpha_frac": 0.525617284,
"autogenerated": false,
"ratio": 4.10126582278481,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.512688310678481,
"avg_score": null,
"num_lines": null
} |
"""A connection adapter that tries to use the best polling method for the
platform pika is running on.
"""
import abc
import collections
import errno
import heapq
import logging
import select
import time
import threading
import pika.compat
from pika.adapters.utils import nbio_interface
from pika.adapters.base_connection import BaseConnection
from pika.adapters.utils.selector_ioloop_adapter import (
SelectorIOServicesAdapter, AbstractSelectorIOLoop)
LOGGER = logging.getLogger(__name__)
# One of select, epoll, kqueue or poll
SELECT_TYPE = None
# Reason for this unconventional dict initialization is the fact that on some
# platforms select.error is an aliases for OSError. We don't want the lambda
# for select.error to win over one for OSError.
_SELECT_ERROR_CHECKERS = {}
if pika.compat.PY3:
# InterruptedError is undefined in PY2
# pylint: disable=E0602
_SELECT_ERROR_CHECKERS[InterruptedError] = lambda e: True
_SELECT_ERROR_CHECKERS[select.error] = lambda e: e.args[0] == errno.EINTR
_SELECT_ERROR_CHECKERS[IOError] = lambda e: e.errno == errno.EINTR
_SELECT_ERROR_CHECKERS[OSError] = lambda e: e.errno == errno.EINTR
# We can reduce the number of elements in the list by looking at super-sub
# class relationship because only the most generic ones needs to be caught.
# For now the optimization is left out.
# Following is better but still incomplete.
# _SELECT_ERRORS = tuple(filter(lambda e: not isinstance(e, OSError),
# _SELECT_ERROR_CHECKERS.keys())
# + [OSError])
_SELECT_ERRORS = tuple(_SELECT_ERROR_CHECKERS.keys())
def _is_resumable(exc):
"""Check if caught exception represents EINTR error.
:param exc: exception; must be one of classes in _SELECT_ERRORS
"""
checker = _SELECT_ERROR_CHECKERS.get(exc.__class__, None)
if checker is not None:
return checker(exc)
else:
return False
class SelectConnection(BaseConnection):
"""An asynchronous connection adapter that attempts to use the fastest
event loop adapter for the given platform.
"""
def __init__(
self, # pylint: disable=R0913
parameters=None,
on_open_callback=None,
on_open_error_callback=None,
on_close_callback=None,
custom_ioloop=None,
internal_connection_workflow=True):
"""Create a new instance of the Connection object.
:param pika.connection.Parameters parameters: Connection parameters
:param callable on_open_callback: Method to call on connection open
:param None | method on_open_error_callback: Called if the connection
can't be established or connection establishment is interrupted by
`Connection.close()`: on_open_error_callback(Connection, exception).
:param None | method on_close_callback: Called when a previously fully
open connection is closed:
`on_close_callback(Connection, exception)`, where `exception` is
either an instance of `exceptions.ConnectionClosed` if closed by
user or broker or exception of another type that describes the cause
of connection failure.
:param None | IOLoop | nbio_interface.AbstractIOServices custom_ioloop:
Provide a custom I/O Loop object.
:param bool internal_connection_workflow: True for autonomous connection
establishment which is default; False for externally-managed
connection workflow via the `create_connection()` factory.
:raises: RuntimeError
"""
if isinstance(custom_ioloop, nbio_interface.AbstractIOServices):
nbio = custom_ioloop
else:
nbio = SelectorIOServicesAdapter(custom_ioloop or IOLoop())
super(SelectConnection, self).__init__(
parameters,
on_open_callback,
on_open_error_callback,
on_close_callback,
nbio,
internal_connection_workflow=internal_connection_workflow)
@classmethod
def create_connection(cls,
connection_configs,
on_done,
custom_ioloop=None,
workflow=None):
"""Implement
:py:classmethod:`pika.adapters.BaseConnection.create_connection()`.
"""
nbio = SelectorIOServicesAdapter(custom_ioloop or IOLoop())
def connection_factory(params):
"""Connection factory."""
if params is None:
raise ValueError('Expected pika.connection.Parameters '
'instance, but got None in params arg.')
return cls(
parameters=params,
custom_ioloop=nbio,
internal_connection_workflow=False)
return cls._start_connection_workflow(
connection_configs=connection_configs,
connection_factory=connection_factory,
nbio=nbio,
workflow=workflow,
on_done=on_done)
def _get_write_buffer_size(self):
"""
:returns: Current size of output data buffered by the transport
:rtype: int
"""
return self._transport.get_write_buffer_size()
class _Timeout(object):
"""Represents a timeout"""
__slots__ = (
'deadline',
'callback',
)
def __init__(self, deadline, callback):
"""
:param float deadline: timer expiration as non-negative epoch number
:param callable callback: callback to call when timeout expires
:raises ValueError, TypeError:
"""
if deadline < 0:
raise ValueError(
'deadline must be non-negative epoch number, but got %r' %
(deadline,))
if not callable(callback):
raise TypeError(
'callback must be a callable, but got %r' % (callback,))
self.deadline = deadline
self.callback = callback
def __eq__(self, other):
"""NOTE: not supporting sort stability"""
if isinstance(other, _Timeout):
return self.deadline == other.deadline
return NotImplemented
def __ne__(self, other):
"""NOTE: not supporting sort stability"""
result = self.__eq__(other)
if result is not NotImplemented:
return not result
return NotImplemented
def __lt__(self, other):
"""NOTE: not supporting sort stability"""
if isinstance(other, _Timeout):
return self.deadline < other.deadline
return NotImplemented
def __gt__(self, other):
"""NOTE: not supporting sort stability"""
if isinstance(other, _Timeout):
return self.deadline > other.deadline
return NotImplemented
def __le__(self, other):
"""NOTE: not supporting sort stability"""
if isinstance(other, _Timeout):
return self.deadline <= other.deadline
return NotImplemented
def __ge__(self, other):
"""NOTE: not supporting sort stability"""
if isinstance(other, _Timeout):
return self.deadline >= other.deadline
return NotImplemented
class _Timer(object):
"""Manage timeouts for use in ioloop"""
# Cancellation count threshold for triggering garbage collection of
# cancelled timers
_GC_CANCELLATION_THRESHOLD = 1024
def __init__(self):
self._timeout_heap = []
# Number of canceled timeouts on heap; for scheduling garbage
# collection of canceled timeouts
self._num_cancellations = 0
def close(self):
"""Release resources. Don't use the `_Timer` instance after closing
it
"""
# Eliminate potential reference cycles to aid garbage-collection
if self._timeout_heap is not None:
for timeout in self._timeout_heap:
timeout.callback = None
self._timeout_heap = None
def call_later(self, delay, callback):
"""Schedule a one-shot timeout given delay seconds.
NOTE: you may cancel the timer before dispatch of the callback. Timer
Manager cancels the timer upon dispatch of the callback.
:param float delay: Non-negative number of seconds from now until
expiration
:param callable callback: The callback method, having the signature
`callback()`
:rtype: _Timeout
:raises ValueError, TypeError
"""
if self._timeout_heap is None:
raise ValueError("Timeout closed before call")
if delay < 0:
raise ValueError(
'call_later: delay must be non-negative, but got %r' % (delay,))
now = pika.compat.time_now()
timeout = _Timeout(now + delay, callback)
heapq.heappush(self._timeout_heap, timeout)
LOGGER.debug(
'call_later: added timeout %r with deadline=%r and '
'callback=%r; now=%s; delay=%s', timeout, timeout.deadline,
timeout.callback, now, delay)
return timeout
def remove_timeout(self, timeout):
"""Cancel the timeout
:param _Timeout timeout: The timer to cancel
"""
# NOTE removing from the heap is difficult, so we just deactivate the
# timeout and garbage-collect it at a later time; see discussion
# in http://docs.python.org/library/heapq.html
if timeout.callback is None:
LOGGER.debug(
'remove_timeout: timeout was already removed or called %r',
timeout)
else:
LOGGER.debug(
'remove_timeout: removing timeout %r with deadline=%r '
'and callback=%r', timeout, timeout.deadline, timeout.callback)
timeout.callback = None
self._num_cancellations += 1
def get_remaining_interval(self):
"""Get the interval to the next timeout expiration
:returns: non-negative number of seconds until next timer expiration;
None if there are no timers
:rtype: float
"""
if self._timeout_heap:
now = pika.compat.time_now()
interval = max(0, self._timeout_heap[0].deadline - now)
else:
interval = None
return interval
def process_timeouts(self):
"""Process pending timeouts, invoking callbacks for those whose time has
come
"""
if self._timeout_heap:
now = pika.compat.time_now()
# Remove ready timeouts from the heap now to prevent IO starvation
# from timeouts added during callback processing
ready_timeouts = []
while self._timeout_heap and self._timeout_heap[0].deadline <= now:
timeout = heapq.heappop(self._timeout_heap)
if timeout.callback is not None:
ready_timeouts.append(timeout)
else:
self._num_cancellations -= 1
# Invoke ready timeout callbacks
for timeout in ready_timeouts:
if timeout.callback is None:
# Must have been canceled from a prior callback
self._num_cancellations -= 1
continue
timeout.callback()
timeout.callback = None
# Garbage-collect canceled timeouts if they exceed threshold
if (self._num_cancellations >= self._GC_CANCELLATION_THRESHOLD and
self._num_cancellations > (len(self._timeout_heap) >> 1)):
self._num_cancellations = 0
self._timeout_heap = [
t for t in self._timeout_heap if t.callback is not None
]
heapq.heapify(self._timeout_heap)
class PollEvents(object):
"""Event flags for I/O"""
# Use epoll's constants to keep life easy
READ = getattr(select, 'POLLIN', 0x01) # available for read
WRITE = getattr(select, 'POLLOUT', 0x04) # available for write
ERROR = getattr(select, 'POLLERR', 0x08) # error on associated fd
class IOLoop(AbstractSelectorIOLoop):
"""I/O loop implementation that picks a suitable poller (`select`,
`poll`, `epoll`, `kqueue`) to use based on platform.
Implements the
`pika.adapters.utils.selector_ioloop_adapter.AbstractSelectorIOLoop`
interface.
"""
# READ/WRITE/ERROR per `AbstractSelectorIOLoop` requirements
READ = PollEvents.READ
WRITE = PollEvents.WRITE
ERROR = PollEvents.ERROR
def __init__(self):
self._timer = _Timer()
# Callbacks requested via `add_callback`
self._callbacks = collections.deque()
self._poller = self._get_poller(self._get_remaining_interval,
self.process_timeouts)
def close(self):
"""Release IOLoop's resources.
`IOLoop.close` is intended to be called by the application or test code
only after `IOLoop.start()` returns. After calling `close()`, no other
interaction with the closed instance of `IOLoop` should be performed.
"""
if self._callbacks is not None:
self._poller.close()
self._timer.close()
# Set _callbacks to empty list rather than None so that race from
# another thread calling add_callback_threadsafe() won't result in
# AttributeError
self._callbacks = []
@staticmethod
def _get_poller(get_wait_seconds, process_timeouts):
"""Determine the best poller to use for this environment and instantiate
it.
:param get_wait_seconds: Function for getting the maximum number of
seconds to wait for IO for use by the poller
:param process_timeouts: Function for processing timeouts for use by the
poller
:returns: The instantiated poller instance supporting `_PollerBase` API
:rtype: object
"""
poller = None
kwargs = dict(
get_wait_seconds=get_wait_seconds,
process_timeouts=process_timeouts)
if hasattr(select, 'epoll'):
if not SELECT_TYPE or SELECT_TYPE == 'epoll':
LOGGER.debug('Using EPollPoller')
poller = EPollPoller(**kwargs)
if not poller and hasattr(select, 'kqueue'):
if not SELECT_TYPE or SELECT_TYPE == 'kqueue':
LOGGER.debug('Using KQueuePoller')
poller = KQueuePoller(**kwargs)
if (not poller and hasattr(select, 'poll') and
hasattr(select.poll(), 'modify')): # pylint: disable=E1101
if not SELECT_TYPE or SELECT_TYPE == 'poll':
LOGGER.debug('Using PollPoller')
poller = PollPoller(**kwargs)
if not poller:
LOGGER.debug('Using SelectPoller')
poller = SelectPoller(**kwargs)
return poller
def call_later(self, delay, callback):
"""Add the callback to the IOLoop timer to be called after delay seconds
from the time of call on best-effort basis. Returns a handle to the
timeout.
:param float delay: The number of seconds to wait to call callback
:param callable callback: The callback method
:returns: handle to the created timeout that may be passed to
`remove_timeout()`
:rtype: object
"""
return self._timer.call_later(delay, callback)
def remove_timeout(self, timeout_handle):
"""Remove a timeout
:param timeout_handle: Handle of timeout to remove
"""
self._timer.remove_timeout(timeout_handle)
def add_callback_threadsafe(self, callback):
"""Requests a call to the given function as soon as possible in the
context of this IOLoop's thread.
NOTE: This is the only thread-safe method in IOLoop. All other
manipulations of IOLoop must be performed from the IOLoop's thread.
For example, a thread may request a call to the `stop` method of an
ioloop that is running in a different thread via
`ioloop.add_callback_threadsafe(ioloop.stop)`
:param callable callback: The callback method
"""
if not callable(callback):
raise TypeError(
'callback must be a callable, but got %r' % (callback,))
# NOTE: `deque.append` is atomic
self._callbacks.append(callback)
# Wake up the IOLoop which may be running in another thread
self._poller.wake_threadsafe()
LOGGER.debug('add_callback_threadsafe: added callback=%r', callback)
# To satisfy `AbstractSelectorIOLoop` requirement
add_callback = add_callback_threadsafe
def process_timeouts(self):
"""[Extension] Process pending callbacks and timeouts, invoking those
whose time has come. Internal use only.
"""
# Avoid I/O starvation by postponing new callbacks to the next iteration
for _ in pika.compat.xrange(len(self._callbacks)):
callback = self._callbacks.popleft()
LOGGER.debug('process_timeouts: invoking callback=%r', callback)
callback()
self._timer.process_timeouts()
def _get_remaining_interval(self):
"""Get the remaining interval to the next callback or timeout
expiration.
:returns: non-negative number of seconds until next callback or timer
expiration; None if there are no callbacks and timers
:rtype: float
"""
if self._callbacks:
return 0
return self._timer.get_remaining_interval()
def add_handler(self, fd, handler, events):
"""Start watching the given file descriptor for events
:param int fd: The file descriptor
:param callable handler: When requested event(s) occur,
`handler(fd, events)` will be called.
:param int events: The event mask using READ, WRITE, ERROR.
"""
self._poller.add_handler(fd, handler, events)
def update_handler(self, fd, events):
"""Changes the events we watch for
:param int fd: The file descriptor
:param int events: The event mask using READ, WRITE, ERROR
"""
self._poller.update_handler(fd, events)
def remove_handler(self, fd):
"""Stop watching the given file descriptor for events
:param int fd: The file descriptor
"""
self._poller.remove_handler(fd)
def start(self):
"""[API] Start the main poller loop. It will loop until requested to
exit. See `IOLoop.stop`.
"""
self._poller.start()
def stop(self):
"""[API] Request exit from the ioloop. The loop is NOT guaranteed to
stop before this method returns.
To invoke `stop()` safely from a thread other than this IOLoop's thread,
call it via `add_callback_threadsafe`; e.g.,
`ioloop.add_callback_threadsafe(ioloop.stop)`
"""
self._poller.stop()
def activate_poller(self):
"""[Extension] Activate the poller
"""
self._poller.activate_poller()
def deactivate_poller(self):
"""[Extension] Deactivate the poller
"""
self._poller.deactivate_poller()
def poll(self):
"""[Extension] Wait for events of interest on registered file
descriptors until an event of interest occurs or next timer deadline or
`_PollerBase._MAX_POLL_TIMEOUT`, whichever is sooner, and dispatch the
corresponding event handlers.
"""
self._poller.poll()
class _PollerBase(pika.compat.AbstractBase): # pylint: disable=R0902
"""Base class for select-based IOLoop implementations"""
# Drop out of the poll loop every _MAX_POLL_TIMEOUT secs as a worst case;
# this is only a backstop value; we will run timeouts when they are
# scheduled.
_MAX_POLL_TIMEOUT = 5
# if the poller uses MS override with 1000
POLL_TIMEOUT_MULT = 1
def __init__(self, get_wait_seconds, process_timeouts):
"""
:param get_wait_seconds: Function for getting the maximum number of
seconds to wait for IO for use by the poller
:param process_timeouts: Function for processing timeouts for use by the
poller
"""
self._get_wait_seconds = get_wait_seconds
self._process_timeouts = process_timeouts
# We guard access to the waking file descriptors to avoid races from
# closing them while another thread is calling our `wake()` method.
self._waking_mutex = threading.Lock()
# fd-to-handler function mappings
self._fd_handlers = dict()
# event-to-fdset mappings
self._fd_events = {
PollEvents.READ: set(),
PollEvents.WRITE: set(),
PollEvents.ERROR: set()
}
self._processing_fd_event_map = {}
# Reentrancy tracker of the `start` method
self._running = False
self._stopping = False
# Create ioloop-interrupt socket pair and register read handler.
self._r_interrupt, self._w_interrupt = self._get_interrupt_pair()
self.add_handler(self._r_interrupt.fileno(), self._read_interrupt,
PollEvents.READ)
def close(self):
"""Release poller's resources.
`close()` is intended to be called after the poller's `start()` method
returns. After calling `close()`, no other interaction with the closed
poller instance should be performed.
"""
# Unregister and close ioloop-interrupt socket pair; mutual exclusion is
# necessary to avoid race condition with `wake_threadsafe` executing in
# another thread's context
assert not self._running, 'Cannot call close() before start() unwinds.'
with self._waking_mutex:
if self._w_interrupt is not None:
self.remove_handler(self._r_interrupt.fileno()) # pylint: disable=E1101
self._r_interrupt.close()
self._r_interrupt = None
self._w_interrupt.close()
self._w_interrupt = None
self.deactivate_poller()
self._fd_handlers = None
self._fd_events = None
self._processing_fd_event_map = None
def wake_threadsafe(self):
"""Wake up the poller as soon as possible. As the name indicates, this
method is thread-safe.
"""
with self._waking_mutex:
if self._w_interrupt is None:
return
try:
# Send byte to interrupt the poll loop, use send() instead of
# os.write for Windows compatibility
self._w_interrupt.send(b'X')
except pika.compat.SOCKET_ERROR as err:
if err.errno != errno.EWOULDBLOCK:
raise
except Exception as err:
# There's nothing sensible to do here, we'll exit the interrupt
# loop after POLL_TIMEOUT secs in worst case anyway.
LOGGER.warning("Failed to send interrupt to poller: %s", err)
raise
def _get_max_wait(self):
"""Get the interval to the next timeout event, or a default interval
:returns: maximum number of self.POLL_TIMEOUT_MULT-scaled time units
to wait for IO events
:rtype: int
"""
delay = self._get_wait_seconds()
if delay is None:
delay = self._MAX_POLL_TIMEOUT
else:
delay = min(delay, self._MAX_POLL_TIMEOUT)
return delay * self.POLL_TIMEOUT_MULT
def add_handler(self, fileno, handler, events):
"""Add a new fileno to the set to be monitored
:param int fileno: The file descriptor
:param callable handler: What is called when an event happens
:param int events: The event mask using READ, WRITE, ERROR
"""
self._fd_handlers[fileno] = handler
self._set_handler_events(fileno, events)
# Inform the derived class
self._register_fd(fileno, events)
def update_handler(self, fileno, events):
"""Set the events to the current events
:param int fileno: The file descriptor
:param int events: The event mask using READ, WRITE, ERROR
"""
# Record the change
events_cleared, events_set = self._set_handler_events(fileno, events)
# Inform the derived class
self._modify_fd_events(
fileno,
events=events,
events_to_clear=events_cleared,
events_to_set=events_set)
def remove_handler(self, fileno):
"""Remove a file descriptor from the set
:param int fileno: The file descriptor
"""
try:
del self._processing_fd_event_map[fileno]
except KeyError:
pass
events_cleared, _ = self._set_handler_events(fileno, 0)
del self._fd_handlers[fileno]
# Inform the derived class
self._unregister_fd(fileno, events_to_clear=events_cleared)
def _set_handler_events(self, fileno, events):
"""Set the handler's events to the given events; internal to
`_PollerBase`.
:param int fileno: The file descriptor
:param int events: The event mask (READ, WRITE, ERROR)
:returns: a 2-tuple (events_cleared, events_set)
:rtype: tuple
"""
events_cleared = 0
events_set = 0
for evt in (PollEvents.READ, PollEvents.WRITE, PollEvents.ERROR):
if events & evt:
if fileno not in self._fd_events[evt]:
self._fd_events[evt].add(fileno)
events_set |= evt
else:
if fileno in self._fd_events[evt]:
self._fd_events[evt].discard(fileno)
events_cleared |= evt
return events_cleared, events_set
def activate_poller(self):
"""Activate the poller
"""
# Activate the underlying poller and register current events
self._init_poller()
fd_to_events = collections.defaultdict(int)
for event, file_descriptors in self._fd_events.items():
for fileno in file_descriptors:
fd_to_events[fileno] |= event
for fileno, events in fd_to_events.items():
self._register_fd(fileno, events)
def deactivate_poller(self):
"""Deactivate the poller
"""
self._uninit_poller()
def start(self):
"""Start the main poller loop. It will loop until requested to exit.
This method is not reentrant and will raise an error if called
recursively (pika/pika#1095)
:raises: RuntimeError
"""
if self._running:
raise RuntimeError('IOLoop is not reentrant and is already running')
LOGGER.debug('Entering IOLoop')
self._running = True
self.activate_poller()
try:
# Run event loop
while not self._stopping:
self.poll()
self._process_timeouts()
finally:
try:
LOGGER.debug('Deactivating poller')
self.deactivate_poller()
finally:
self._stopping = False
self._running = False
def stop(self):
"""Request exit from the ioloop. The loop is NOT guaranteed to stop
before this method returns.
"""
LOGGER.debug('Stopping IOLoop')
self._stopping = True
self.wake_threadsafe()
@abc.abstractmethod
def poll(self):
"""Wait for events on interested filedescriptors.
"""
raise NotImplementedError
@abc.abstractmethod
def _init_poller(self):
"""Notify the implementation to allocate the poller resource"""
raise NotImplementedError
@abc.abstractmethod
def _uninit_poller(self):
"""Notify the implementation to release the poller resource"""
raise NotImplementedError
@abc.abstractmethod
def _register_fd(self, fileno, events):
"""The base class invokes this method to notify the implementation to
register the file descriptor with the polling object. The request must
be ignored if the poller is not activated.
:param int fileno: The file descriptor
:param int events: The event mask (READ, WRITE, ERROR)
"""
raise NotImplementedError
@abc.abstractmethod
def _modify_fd_events(self, fileno, events, events_to_clear, events_to_set):
"""The base class invoikes this method to notify the implementation to
modify an already registered file descriptor. The request must be
ignored if the poller is not activated.
:param int fileno: The file descriptor
:param int events: absolute events (READ, WRITE, ERROR)
:param int events_to_clear: The events to clear (READ, WRITE, ERROR)
:param int events_to_set: The events to set (READ, WRITE, ERROR)
"""
raise NotImplementedError
@abc.abstractmethod
def _unregister_fd(self, fileno, events_to_clear):
"""The base class invokes this method to notify the implementation to
unregister the file descriptor being tracked by the polling object. The
request must be ignored if the poller is not activated.
:param int fileno: The file descriptor
:param int events_to_clear: The events to clear (READ, WRITE, ERROR)
"""
raise NotImplementedError
def _dispatch_fd_events(self, fd_event_map):
""" Helper to dispatch callbacks for file descriptors that received
events.
Before doing so we re-calculate the event mask based on what is
currently set in case it has been changed under our feet by a
previous callback. We also take a store a refernce to the
fd_event_map so that we can detect removal of an
fileno during processing of another callback and not generate
spurious callbacks on it.
:param dict fd_event_map: Map of fds to events received on them.
"""
# Reset the prior map; if the call is nested, this will suppress the
# remaining dispatch in the earlier call.
self._processing_fd_event_map.clear()
self._processing_fd_event_map = fd_event_map
for fileno in pika.compat.dictkeys(fd_event_map):
if fileno not in fd_event_map:
# the fileno has been removed from the map under our feet.
continue
events = fd_event_map[fileno]
for evt in [PollEvents.READ, PollEvents.WRITE, PollEvents.ERROR]:
if fileno not in self._fd_events[evt]:
events &= ~evt
if events:
handler = self._fd_handlers[fileno]
handler(fileno, events)
@staticmethod
def _get_interrupt_pair():
""" Use a socketpair to be able to interrupt the ioloop if called
from another thread. Socketpair() is not supported on some OS (Win)
so use a pair of simple TCP sockets instead. The sockets will be
closed and garbage collected by python when the ioloop itself is.
"""
return pika.compat._nonblocking_socketpair() # pylint: disable=W0212
def _read_interrupt(self, _interrupt_fd, _events):
""" Read the interrupt byte(s). We ignore the event mask as we can ony
get here if there's data to be read on our fd.
:param int _interrupt_fd: (unused) The file descriptor to read from
:param int _events: (unused) The events generated for this fd
"""
try:
# NOTE Use recv instead of os.read for windows compatibility
self._r_interrupt.recv(512) # pylint: disable=E1101
except pika.compat.SOCKET_ERROR as err:
if err.errno != errno.EAGAIN:
raise
class SelectPoller(_PollerBase):
"""Default behavior is to use Select since it's the widest supported and has
all of the methods we need for child classes as well. One should only need
to override the update_handler and start methods for additional types.
"""
# if the poller uses MS specify 1000
POLL_TIMEOUT_MULT = 1
def poll(self):
"""Wait for events of interest on registered file descriptors until an
event of interest occurs or next timer deadline or _MAX_POLL_TIMEOUT,
whichever is sooner, and dispatch the corresponding event handlers.
"""
while True:
try:
if (self._fd_events[PollEvents.READ] or
self._fd_events[PollEvents.WRITE] or
self._fd_events[PollEvents.ERROR]):
read, write, error = select.select(
self._fd_events[PollEvents.READ],
self._fd_events[PollEvents.WRITE],
self._fd_events[PollEvents.ERROR], self._get_max_wait())
else:
# NOTE When called without any FDs, select fails on
# Windows with error 10022, 'An invalid argument was
# supplied'.
time.sleep(self._get_max_wait())
read, write, error = [], [], []
break
except _SELECT_ERRORS as error:
if _is_resumable(error):
continue
else:
raise
# Build an event bit mask for each fileno we've received an event for
fd_event_map = collections.defaultdict(int)
for fd_set, evt in zip(
(read, write, error),
(PollEvents.READ, PollEvents.WRITE, PollEvents.ERROR)):
for fileno in fd_set:
fd_event_map[fileno] |= evt
self._dispatch_fd_events(fd_event_map)
def _init_poller(self):
"""Notify the implementation to allocate the poller resource"""
# It's a no op in SelectPoller
def _uninit_poller(self):
"""Notify the implementation to release the poller resource"""
# It's a no op in SelectPoller
def _register_fd(self, fileno, events):
"""The base class invokes this method to notify the implementation to
register the file descriptor with the polling object. The request must
be ignored if the poller is not activated.
:param int fileno: The file descriptor
:param int events: The event mask using READ, WRITE, ERROR
"""
# It's a no op in SelectPoller
def _modify_fd_events(self, fileno, events, events_to_clear, events_to_set):
"""The base class invoikes this method to notify the implementation to
modify an already registered file descriptor. The request must be
ignored if the poller is not activated.
:param int fileno: The file descriptor
:param int events: absolute events (READ, WRITE, ERROR)
:param int events_to_clear: The events to clear (READ, WRITE, ERROR)
:param int events_to_set: The events to set (READ, WRITE, ERROR)
"""
# It's a no op in SelectPoller
def _unregister_fd(self, fileno, events_to_clear):
"""The base class invokes this method to notify the implementation to
unregister the file descriptor being tracked by the polling object. The
request must be ignored if the poller is not activated.
:param int fileno: The file descriptor
:param int events_to_clear: The events to clear (READ, WRITE, ERROR)
"""
# It's a no op in SelectPoller
class KQueuePoller(_PollerBase):
# pylint: disable=E1101
"""KQueuePoller works on BSD based systems and is faster than select"""
def __init__(self, get_wait_seconds, process_timeouts):
"""Create an instance of the KQueuePoller
"""
self._kqueue = None
super(KQueuePoller, self).__init__(get_wait_seconds, process_timeouts)
@staticmethod
def _map_event(kevent):
"""return the event type associated with a kevent object
:param kevent kevent: a kevent object as returned by kqueue.control()
"""
mask = 0
if kevent.filter == select.KQ_FILTER_READ:
mask = PollEvents.READ
elif kevent.filter == select.KQ_FILTER_WRITE:
mask = PollEvents.WRITE
if kevent.flags & select.KQ_EV_EOF:
# May be set when the peer reader disconnects. We don't check
# KQ_EV_EOF for KQ_FILTER_READ because in that case it may be
# set before the remaining data is consumed from sockbuf.
mask |= PollEvents.ERROR
elif kevent.flags & select.KQ_EV_ERROR:
mask = PollEvents.ERROR
else:
LOGGER.critical('Unexpected kevent: %s', kevent)
return mask
def poll(self):
"""Wait for events of interest on registered file descriptors until an
event of interest occurs or next timer deadline or _MAX_POLL_TIMEOUT,
whichever is sooner, and dispatch the corresponding event handlers.
"""
while True:
try:
kevents = self._kqueue.control(None, 1000, self._get_max_wait())
break
except _SELECT_ERRORS as error:
if _is_resumable(error):
continue
else:
raise
fd_event_map = collections.defaultdict(int)
for event in kevents:
fd_event_map[event.ident] |= self._map_event(event)
self._dispatch_fd_events(fd_event_map)
def _init_poller(self):
"""Notify the implementation to allocate the poller resource"""
assert self._kqueue is None
self._kqueue = select.kqueue()
def _uninit_poller(self):
"""Notify the implementation to release the poller resource"""
if self._kqueue is not None:
self._kqueue.close()
self._kqueue = None
def _register_fd(self, fileno, events):
"""The base class invokes this method to notify the implementation to
register the file descriptor with the polling object. The request must
be ignored if the poller is not activated.
:param int fileno: The file descriptor
:param int events: The event mask using READ, WRITE, ERROR
"""
self._modify_fd_events(
fileno, events=events, events_to_clear=0, events_to_set=events)
def _modify_fd_events(self, fileno, events, events_to_clear, events_to_set):
"""The base class invoikes this method to notify the implementation to
modify an already registered file descriptor. The request must be
ignored if the poller is not activated.
:param int fileno: The file descriptor
:param int events: absolute events (READ, WRITE, ERROR)
:param int events_to_clear: The events to clear (READ, WRITE, ERROR)
:param int events_to_set: The events to set (READ, WRITE, ERROR)
"""
if self._kqueue is None:
return
kevents = list()
if events_to_clear & PollEvents.READ:
kevents.append(
select.kevent(
fileno,
filter=select.KQ_FILTER_READ,
flags=select.KQ_EV_DELETE))
if events_to_set & PollEvents.READ:
kevents.append(
select.kevent(
fileno,
filter=select.KQ_FILTER_READ,
flags=select.KQ_EV_ADD))
if events_to_clear & PollEvents.WRITE:
kevents.append(
select.kevent(
fileno,
filter=select.KQ_FILTER_WRITE,
flags=select.KQ_EV_DELETE))
if events_to_set & PollEvents.WRITE:
kevents.append(
select.kevent(
fileno,
filter=select.KQ_FILTER_WRITE,
flags=select.KQ_EV_ADD))
self._kqueue.control(kevents, 0)
def _unregister_fd(self, fileno, events_to_clear):
"""The base class invokes this method to notify the implementation to
unregister the file descriptor being tracked by the polling object. The
request must be ignored if the poller is not activated.
:param int fileno: The file descriptor
:param int events_to_clear: The events to clear (READ, WRITE, ERROR)
"""
self._modify_fd_events(
fileno, events=0, events_to_clear=events_to_clear, events_to_set=0)
class PollPoller(_PollerBase):
"""Poll works on Linux and can have better performance than EPoll in
certain scenarios. Both are faster than select.
"""
POLL_TIMEOUT_MULT = 1000
def __init__(self, get_wait_seconds, process_timeouts):
"""Create an instance of the KQueuePoller
"""
self._poll = None
super(PollPoller, self).__init__(get_wait_seconds, process_timeouts)
@staticmethod
def _create_poller():
"""
:rtype: `select.poll`
"""
return select.poll() # pylint: disable=E1101
def poll(self):
"""Wait for events of interest on registered file descriptors until an
event of interest occurs or next timer deadline or _MAX_POLL_TIMEOUT,
whichever is sooner, and dispatch the corresponding event handlers.
"""
while True:
try:
events = self._poll.poll(self._get_max_wait())
break
except _SELECT_ERRORS as error:
if _is_resumable(error):
continue
else:
raise
fd_event_map = collections.defaultdict(int)
for fileno, event in events:
# NOTE: On OS X, when poll() sets POLLHUP, it's mutually-exclusive with
# POLLOUT and it doesn't seem to set POLLERR along with POLLHUP when
# socket connection fails, for example. So, we need to at least add
# POLLERR when we see POLLHUP
if (event & select.POLLHUP) and pika.compat.ON_OSX:
event |= select.POLLERR
fd_event_map[fileno] |= event
self._dispatch_fd_events(fd_event_map)
def _init_poller(self):
"""Notify the implementation to allocate the poller resource"""
assert self._poll is None
self._poll = self._create_poller()
def _uninit_poller(self):
"""Notify the implementation to release the poller resource"""
if self._poll is not None:
if hasattr(self._poll, "close"):
self._poll.close()
self._poll = None
def _register_fd(self, fileno, events):
"""The base class invokes this method to notify the implementation to
register the file descriptor with the polling object. The request must
be ignored if the poller is not activated.
:param int fileno: The file descriptor
:param int events: The event mask using READ, WRITE, ERROR
"""
if self._poll is not None:
self._poll.register(fileno, events)
def _modify_fd_events(self, fileno, events, events_to_clear, events_to_set):
"""The base class invoikes this method to notify the implementation to
modify an already registered file descriptor. The request must be
ignored if the poller is not activated.
:param int fileno: The file descriptor
:param int events: absolute events (READ, WRITE, ERROR)
:param int events_to_clear: The events to clear (READ, WRITE, ERROR)
:param int events_to_set: The events to set (READ, WRITE, ERROR)
"""
if self._poll is not None:
self._poll.modify(fileno, events)
def _unregister_fd(self, fileno, events_to_clear):
"""The base class invokes this method to notify the implementation to
unregister the file descriptor being tracked by the polling object. The
request must be ignored if the poller is not activated.
:param int fileno: The file descriptor
:param int events_to_clear: The events to clear (READ, WRITE, ERROR)
"""
if self._poll is not None:
self._poll.unregister(fileno)
class EPollPoller(PollPoller):
"""EPoll works on Linux and can have better performance than Poll in
certain scenarios. Both are faster than select.
"""
POLL_TIMEOUT_MULT = 1
@staticmethod
def _create_poller():
"""
:rtype: `select.poll`
"""
return select.epoll() # pylint: disable=E1101
| {
"repo_name": "pika/pika",
"path": "pika/adapters/select_connection.py",
"copies": "1",
"size": "45161",
"license": "bsd-3-clause",
"hash": -8919746841868658000,
"line_mean": 34.6440410418,
"line_max": 88,
"alpha_frac": 0.6040388831,
"autogenerated": false,
"ratio": 4.567253236245954,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002228886947278618,
"num_lines": 1267
} |
"""A connection adapter that tries to use the best polling method for the
platform pika is running on.
"""
import abc
import os
import logging
import socket
import select
import errno
import time
from collections import defaultdict
import threading
import pika.compat
from pika.compat import dictkeys
from pika.adapters.base_connection import BaseConnection
LOGGER = logging.getLogger(__name__)
# One of select, epoll, kqueue or poll
SELECT_TYPE = None
# Use epoll's constants to keep life easy
READ = 0x0001
WRITE = 0x0004
ERROR = 0x0008
# Reason for this unconventional dict initialization is the fact that on some
# platforms select.error is an aliases for OSError. We don't want the lambda
# for select.error to win over one for OSError.
_SELECT_ERROR_CHECKERS = {}
if pika.compat.PY3:
#InterruptedError is undefined in PY2
#pylint: disable=E0602
_SELECT_ERROR_CHECKERS[InterruptedError] = lambda e: True
_SELECT_ERROR_CHECKERS[select.error] = lambda e: e.args[0] == errno.EINTR
_SELECT_ERROR_CHECKERS[IOError] = lambda e: e.errno == errno.EINTR
_SELECT_ERROR_CHECKERS[OSError] = lambda e: e.errno == errno.EINTR
# We can reduce the number of elements in the list by looking at super-sub
# class relationship because only the most generic ones needs to be caught.
# For now the optimization is left out.
# Following is better but still incomplete.
#_SELECT_ERRORS = tuple(filter(lambda e: not isinstance(e, OSError),
# _SELECT_ERROR_CHECKERS.keys())
# + [OSError])
_SELECT_ERRORS = tuple(_SELECT_ERROR_CHECKERS.keys())
def _is_resumable(exc):
''' Check if caught exception represents EINTR error.
:param exc: exception; must be one of classes in _SELECT_ERRORS '''
checker = _SELECT_ERROR_CHECKERS.get(exc.__class__, None)
if checker is not None:
return checker(exc)
else:
return False
class SelectConnection(BaseConnection):
"""An asynchronous connection adapter that attempts to use the fastest
event loop adapter for the given platform.
"""
def __init__(self, # pylint: disable=R0913
parameters=None,
on_open_callback=None,
on_open_error_callback=None,
on_close_callback=None,
stop_ioloop_on_close=True,
custom_ioloop=None):
"""Create a new instance of the Connection object.
:param pika.connection.Parameters parameters: Connection parameters
:param method on_open_callback: Method to call on connection open
:param method on_open_error_callback: Called if the connection can't
be established: on_open_error_callback(connection, str|exception)
:param method on_close_callback: Called when the connection is closed:
on_close_callback(connection, reason_code, reason_text)
:param bool stop_ioloop_on_close: Call ioloop.stop() if disconnected
:param custom_ioloop: Override using the global IOLoop in Tornado
:raises: RuntimeError
"""
ioloop = custom_ioloop or IOLoop()
super(SelectConnection, self).__init__(parameters, on_open_callback,
on_open_error_callback,
on_close_callback, ioloop,
stop_ioloop_on_close)
def _adapter_connect(self):
"""Connect to the RabbitMQ broker, returning True on success, False
on failure.
:rtype: bool
"""
error = super(SelectConnection, self)._adapter_connect()
if not error:
self.ioloop.add_handler(self.socket.fileno(), self._handle_events,
self.event_state)
return error
def _adapter_disconnect(self):
"""Disconnect from the RabbitMQ broker"""
if self.socket:
self.ioloop.remove_handler(self.socket.fileno())
super(SelectConnection, self)._adapter_disconnect()
class IOLoop(object):
"""Singlton wrapper that decides which type of poller to use, creates an
instance of it in start_poller and keeps the invoking application in a
blocking state by calling the pollers start method. Poller should keep
looping until IOLoop.instance().stop() is called or there is a socket
error.
Passes through all operations to the loaded poller object.
"""
def __init__(self):
self._poller = self._get_poller()
@staticmethod
def _get_poller():
"""Determine the best poller to use for this enviroment."""
poller = None
if hasattr(select, 'epoll'):
if not SELECT_TYPE or SELECT_TYPE == 'epoll':
LOGGER.debug('Using EPollPoller')
poller = EPollPoller()
if not poller and hasattr(select, 'kqueue'):
if not SELECT_TYPE or SELECT_TYPE == 'kqueue':
LOGGER.debug('Using KQueuePoller')
poller = KQueuePoller()
if (not poller and hasattr(select, 'poll') and
hasattr(select.poll(), 'modify')): # pylint: disable=E1101
if not SELECT_TYPE or SELECT_TYPE == 'poll':
LOGGER.debug('Using PollPoller')
poller = PollPoller()
if not poller:
LOGGER.debug('Using SelectPoller')
poller = SelectPoller()
return poller
def add_timeout(self, deadline, callback_method):
"""[API] Add the callback_method to the IOLoop timer to fire after
deadline seconds. Returns a handle to the timeout. Do not confuse with
Tornado's timeout where you pass in the time you want to have your
callback called. Only pass in the seconds until it's to be called.
:param int deadline: The number of seconds to wait to call callback
:param method callback_method: The callback method
:rtype: str
"""
return self._poller.add_timeout(deadline, callback_method)
def remove_timeout(self, timeout_id):
"""[API] Remove a timeout if it's still in the timeout stack
:param str timeout_id: The timeout id to remove
"""
self._poller.remove_timeout(timeout_id)
def add_handler(self, fileno, handler, events):
"""[API] Add a new fileno to the set to be monitored
:param int fileno: The file descriptor
:param method handler: What is called when an event happens
:param int events: The event mask using READ, WRITE, ERROR
"""
self._poller.add_handler(fileno, handler, events)
def update_handler(self, fileno, events):
"""[API] Set the events to the current events
:param int fileno: The file descriptor
:param int events: The event mask using READ, WRITE, ERROR
"""
self._poller.update_handler(fileno, events)
def remove_handler(self, fileno):
"""[API] Remove a file descriptor from the set
:param int fileno: The file descriptor
"""
self._poller.remove_handler(fileno)
def start(self):
"""[API] Start the main poller loop. It will loop until requested to
exit. See `IOLoop.stop`.
"""
self._poller.start()
def stop(self):
"""[API] Request exit from the ioloop. The loop is NOT guaranteed to
stop before this method returns. This is the only method that may be
called from another thread.
"""
self._poller.stop()
def process_timeouts(self):
"""[Extension] Process pending timeouts, invoking callbacks for those
whose time has come
"""
self._poller.process_timeouts()
def activate_poller(self):
"""[Extension] Activate the poller
"""
self._poller.activate_poller()
def deactivate_poller(self):
"""[Extension] Deactivate the poller
"""
self._poller.deactivate_poller()
def poll(self):
"""[Extension] Wait for events of interest on registered file
descriptors until an event of interest occurs or next timer deadline or
`_PollerBase._MAX_POLL_TIMEOUT`, whichever is sooner, and dispatch the
corresponding event handlers.
"""
self._poller.poll()
# Define a base class for deriving abstract base classes for compatibility
# between python 2 and 3 (metaclass syntax changed in Python 3). Ideally, would
# use `@six.add_metaclass` or `six.with_metaclass`, but pika traditionally has
# resisted external dependencies in its core code.
if pika.compat.PY2:
class _AbstractBase(object): # pylint: disable=R0903
"""PY2 Abstract base for _PollerBase class"""
__metaclass__ = abc.ABCMeta
else:
# NOTE: Wrapping in exec, because
# `class _AbstractBase(metaclass=abc.ABCMeta)` fails to load on python 2.
exec('class _AbstractBase(metaclass=abc.ABCMeta): pass') # pylint: disable=W0122
class _PollerBase(_AbstractBase): # pylint: disable=R0902
"""Base class for select-based IOLoop implementations"""
# Drop out of the poll loop every _MAX_POLL_TIMEOUT secs as a worst case;
# this is only a backstop value; we will run timeouts when they are
# scheduled.
_MAX_POLL_TIMEOUT = 5
# if the poller uses MS override with 1000
POLL_TIMEOUT_MULT = 1
def __init__(self):
# fd-to-handler function mappings
self._fd_handlers = dict()
# event-to-fdset mappings
self._fd_events = {READ: set(), WRITE: set(), ERROR: set()}
self._processing_fd_event_map = {}
# Reentrancy tracker of the `start` method
self._start_nesting_levels = 0
self._timeouts = {}
self._next_timeout = None
self._stopping = False
# Mutex for controlling critical sections where ioloop-interrupt sockets
# are created, used, and destroyed. Needed in case `stop()` is called
# from a thread.
self._mutex = threading.Lock()
# ioloop-interrupt socket pair; initialized in start()
self._r_interrupt = None
self._w_interrupt = None
def add_timeout(self, deadline, callback_method):
"""Add the callback_method to the IOLoop timer to fire after deadline
seconds. Returns a handle to the timeout. Do not confuse with
Tornado's timeout where you pass in the time you want to have your
callback called. Only pass in the seconds until it's to be called.
:param int deadline: The number of seconds to wait to call callback
:param method callback_method: The callback method
:rtype: str
"""
timeout_at = time.time() + deadline
value = {'deadline': timeout_at, 'callback': callback_method}
timeout_id = hash(frozenset(value.items()))
self._timeouts[timeout_id] = value
if not self._next_timeout or timeout_at < self._next_timeout:
self._next_timeout = timeout_at
LOGGER.debug('add_timeout: added timeout %s; deadline=%s at %s',
timeout_id, deadline, timeout_at)
return timeout_id
def remove_timeout(self, timeout_id):
"""Remove a timeout if it's still in the timeout stack
:param str timeout_id: The timeout id to remove
"""
try:
timeout = self._timeouts.pop(timeout_id)
except KeyError:
LOGGER.warning('remove_timeout: %s not found', timeout_id)
else:
if timeout['deadline'] == self._next_timeout:
self._next_timeout = None
LOGGER.debug('remove_timeout: removed %s', timeout_id)
def _get_next_deadline(self):
"""Get the interval to the next timeout event, or a default interval
"""
if self._next_timeout:
timeout = max((self._next_timeout - time.time(), 0))
elif self._timeouts:
deadlines = [t['deadline'] for t in self._timeouts.values()]
self._next_timeout = min(deadlines)
timeout = max((self._next_timeout - time.time(), 0))
else:
timeout = self._MAX_POLL_TIMEOUT
timeout = min((timeout, self._MAX_POLL_TIMEOUT))
return timeout * self.POLL_TIMEOUT_MULT
def process_timeouts(self):
"""Process pending timeouts, invoking callbacks for those whose time has
come
"""
now = time.time()
# Run the timeouts in order of deadlines. Although this shouldn't
# be strictly necessary it preserves old behaviour when timeouts
# were only run periodically.
to_run = sorted([(k, timer) for (k, timer) in self._timeouts.items()
if timer['deadline'] <= now],
key=lambda item: item[1]['deadline'])
for k, timer in to_run:
if k not in self._timeouts:
# Previous invocation(s) should have deleted the timer.
continue
try:
timer['callback']()
finally:
# Don't do 'del self._timeout[k]' as the key might
# have been deleted just now.
if self._timeouts.pop(k, None) is not None:
self._next_timeout = None
def add_handler(self, fileno, handler, events):
"""Add a new fileno to the set to be monitored
:param int fileno: The file descriptor
:param method handler: What is called when an event happens
:param int events: The event mask using READ, WRITE, ERROR
"""
self._fd_handlers[fileno] = handler
self._set_handler_events(fileno, events)
# Inform the derived class
self._register_fd(fileno, events)
def update_handler(self, fileno, events):
"""Set the events to the current events
:param int fileno: The file descriptor
:param int events: The event mask using READ, WRITE, ERROR
"""
# Record the change
events_cleared, events_set = self._set_handler_events(fileno, events)
# Inform the derived class
self._modify_fd_events(fileno,
events=events,
events_to_clear=events_cleared,
events_to_set=events_set)
def remove_handler(self, fileno):
"""Remove a file descriptor from the set
:param int fileno: The file descriptor
"""
try:
del self._processing_fd_event_map[fileno]
except KeyError:
pass
events_cleared, _ = self._set_handler_events(fileno, 0)
del self._fd_handlers[fileno]
# Inform the derived class
self._unregister_fd(fileno, events_to_clear=events_cleared)
def _set_handler_events(self, fileno, events):
"""Set the handler's events to the given events; internal to
`_PollerBase`.
:param int fileno: The file descriptor
:param int events: The event mask (READ, WRITE, ERROR)
:returns: a 2-tuple (events_cleared, events_set)
"""
events_cleared = 0
events_set = 0
for evt in (READ, WRITE, ERROR):
if events & evt:
if fileno not in self._fd_events[evt]:
self._fd_events[evt].add(fileno)
events_set |= evt
else:
if fileno in self._fd_events[evt]:
self._fd_events[evt].discard(fileno)
events_cleared |= evt
return events_cleared, events_set
def activate_poller(self):
"""Activate the poller
"""
# Activate the underlying poller and register current events
self._init_poller()
fd_to_events = defaultdict(int)
for event, file_descriptors in self._fd_events.items():
for fileno in file_descriptors:
fd_to_events[fileno] |= event
for fileno, events in fd_to_events.items():
self._register_fd(fileno, events)
def deactivate_poller(self):
"""Deactivate the poller
"""
self._uninit_poller()
def start(self):
"""Start the main poller loop. It will loop until requested to exit
"""
self._start_nesting_levels += 1
if self._start_nesting_levels == 1:
LOGGER.debug('Entering IOLoop')
self._stopping = False
# Activate the underlying poller and register current events
self.activate_poller()
# Create ioloop-interrupt socket pair and register read handler.
# NOTE: we defer their creation because some users (e.g.,
# BlockingConnection adapter) don't use the event loop and these
# sockets would get reported as leaks
with self._mutex:
assert self._r_interrupt is None
self._r_interrupt, self._w_interrupt = self._get_interrupt_pair()
self.add_handler(self._r_interrupt.fileno(),
self._read_interrupt,
READ)
else:
LOGGER.debug('Reentering IOLoop at nesting level=%s',
self._start_nesting_levels)
try:
# Run event loop
while not self._stopping:
self.poll()
self.process_timeouts()
finally:
self._start_nesting_levels -= 1
if self._start_nesting_levels == 0:
LOGGER.debug('Cleaning up IOLoop')
# Unregister and close ioloop-interrupt socket pair
with self._mutex:
self.remove_handler(self._r_interrupt.fileno())
self._r_interrupt.close()
self._r_interrupt = None
self._w_interrupt.close()
self._w_interrupt = None
# Deactivate the underlying poller
self.deactivate_poller()
else:
LOGGER.debug('Leaving IOLoop with %s nesting levels remaining',
self._start_nesting_levels)
def stop(self):
"""Request exit from the ioloop. The loop is NOT guaranteed to stop
before this method returns. This is the only method that may be called
from another thread.
"""
LOGGER.debug('Stopping IOLoop')
self._stopping = True
with self._mutex:
if self._w_interrupt is None:
return
try:
# Send byte to interrupt the poll loop, use write() for
# consitency.
os.write(self._w_interrupt.fileno(), b'X')
except OSError as err:
if err.errno != errno.EWOULDBLOCK:
raise
except Exception as err:
# There's nothing sensible to do here, we'll exit the interrupt
# loop after POLL_TIMEOUT secs in worst case anyway.
LOGGER.warning("Failed to send ioloop interrupt: %s", err)
raise
@abc.abstractmethod
def poll(self):
"""Wait for events on interested filedescriptors.
"""
raise NotImplementedError
@abc.abstractmethod
def _init_poller(self):
"""Notify the implementation to allocate the poller resource"""
raise NotImplementedError
@abc.abstractmethod
def _uninit_poller(self):
"""Notify the implementation to release the poller resource"""
raise NotImplementedError
@abc.abstractmethod
def _register_fd(self, fileno, events):
"""The base class invokes this method to notify the implementation to
register the file descriptor with the polling object. The request must
be ignored if the poller is not activated.
:param int fileno: The file descriptor
:param int events: The event mask (READ, WRITE, ERROR)
"""
raise NotImplementedError
@abc.abstractmethod
def _modify_fd_events(self, fileno, events, events_to_clear, events_to_set):
"""The base class invoikes this method to notify the implementation to
modify an already registered file descriptor. The request must be
ignored if the poller is not activated.
:param int fileno: The file descriptor
:param int events: absolute events (READ, WRITE, ERROR)
:param int events_to_clear: The events to clear (READ, WRITE, ERROR)
:param int events_to_set: The events to set (READ, WRITE, ERROR)
"""
raise NotImplementedError
@abc.abstractmethod
def _unregister_fd(self, fileno, events_to_clear):
"""The base class invokes this method to notify the implementation to
unregister the file descriptor being tracked by the polling object. The
request must be ignored if the poller is not activated.
:param int fileno: The file descriptor
:param int events_to_clear: The events to clear (READ, WRITE, ERROR)
"""
raise NotImplementedError
def _dispatch_fd_events(self, fd_event_map):
""" Helper to dispatch callbacks for file descriptors that received
events.
Before doing so we re-calculate the event mask based on what is
currently set in case it has been changed under our feet by a
previous callback. We also take a store a refernce to the
fd_event_map so that we can detect removal of an
fileno during processing of another callback and not generate
spurious callbacks on it.
:param dict fd_event_map: Map of fds to events received on them.
"""
# Reset the prior map; if the call is nested, this will suppress the
# remaining dispatch in the earlier call.
self._processing_fd_event_map.clear()
self._processing_fd_event_map = fd_event_map
for fileno in dictkeys(fd_event_map):
if fileno not in fd_event_map:
# the fileno has been removed from the map under our feet.
continue
events = fd_event_map[fileno]
for evt in [READ, WRITE, ERROR]:
if fileno not in self._fd_events[evt]:
events &= ~evt
if events:
handler = self._fd_handlers[fileno]
handler(fileno, events)
@staticmethod
def _get_interrupt_pair():
""" Use a socketpair to be able to interrupt the ioloop if called
from another thread. Socketpair() is not supported on some OS (Win)
so use a pair of simple UDP sockets instead. The sockets will be
closed and garbage collected by python when the ioloop itself is.
"""
try:
read_sock, write_sock = socket.socketpair()
except AttributeError:
LOGGER.debug("Using custom socketpair for interrupt")
read_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
read_sock.bind(('localhost', 0))
write_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
write_sock.connect(read_sock.getsockname())
read_sock.setblocking(0)
write_sock.setblocking(0)
return read_sock, write_sock
@staticmethod
def _read_interrupt(interrupt_fd, events): # pylint: disable=W0613
""" Read the interrupt byte(s). We ignore the event mask as we can ony
get here if there's data to be read on our fd.
:param int interrupt_fd: The file descriptor to read from
:param int events: (unused) The events generated for this fd
"""
try:
os.read(interrupt_fd, 512)
except OSError as err:
if err.errno != errno.EAGAIN:
raise
class SelectPoller(_PollerBase):
"""Default behavior is to use Select since it's the widest supported and has
all of the methods we need for child classes as well. One should only need
to override the update_handler and start methods for additional types.
"""
# if the poller uses MS specify 1000
POLL_TIMEOUT_MULT = 1
def __init__(self):
"""Create an instance of the SelectPoller
"""
super(SelectPoller, self).__init__()
def poll(self):
"""Wait for events of interest on registered file descriptors until an
event of interest occurs or next timer deadline or _MAX_POLL_TIMEOUT,
whichever is sooner, and dispatch the corresponding event handlers.
"""
while True:
try:
read, write, error = select.select(self._fd_events[READ],
self._fd_events[WRITE],
self._fd_events[ERROR],
self._get_next_deadline())
break
except _SELECT_ERRORS as error:
if _is_resumable(error):
continue
else:
raise
# Build an event bit mask for each fileno we've received an event for
fd_event_map = defaultdict(int)
for fd_set, evt in zip((read, write, error), (READ, WRITE, ERROR)):
for fileno in fd_set:
fd_event_map[fileno] |= evt
self._dispatch_fd_events(fd_event_map)
def _init_poller(self):
"""Notify the implementation to allocate the poller resource"""
# It's a no op in SelectPoller
pass
def _uninit_poller(self):
"""Notify the implementation to release the poller resource"""
# It's a no op in SelectPoller
pass
def _register_fd(self, fileno, events):
"""The base class invokes this method to notify the implementation to
register the file descriptor with the polling object. The request must
be ignored if the poller is not activated.
:param int fileno: The file descriptor
:param int events: The event mask using READ, WRITE, ERROR
"""
# It's a no op in SelectPoller
pass
def _modify_fd_events(self, fileno, events, events_to_clear, events_to_set):
"""The base class invoikes this method to notify the implementation to
modify an already registered file descriptor. The request must be
ignored if the poller is not activated.
:param int fileno: The file descriptor
:param int events: absolute events (READ, WRITE, ERROR)
:param int events_to_clear: The events to clear (READ, WRITE, ERROR)
:param int events_to_set: The events to set (READ, WRITE, ERROR)
"""
# It's a no op in SelectPoller
pass
def _unregister_fd(self, fileno, events_to_clear):
"""The base class invokes this method to notify the implementation to
unregister the file descriptor being tracked by the polling object. The
request must be ignored if the poller is not activated.
:param int fileno: The file descriptor
:param int events_to_clear: The events to clear (READ, WRITE, ERROR)
"""
# It's a no op in SelectPoller
pass
class KQueuePoller(_PollerBase):
"""KQueuePoller works on BSD based systems and is faster than select"""
def __init__(self):
"""Create an instance of the KQueuePoller
:param int fileno: The file descriptor to check events for
:param method handler: What is called when an event happens
:param int events: The events to look for
"""
super(KQueuePoller, self).__init__()
self._kqueue = None
@staticmethod
def _map_event(kevent):
"""return the event type associated with a kevent object
:param kevent kevent: a kevent object as returned by kqueue.control()
"""
if kevent.filter == select.KQ_FILTER_READ:
return READ
elif kevent.filter == select.KQ_FILTER_WRITE:
return WRITE
elif kevent.flags & select.KQ_EV_ERROR:
return ERROR
def poll(self):
"""Wait for events of interest on registered file descriptors until an
event of interest occurs or next timer deadline or _MAX_POLL_TIMEOUT,
whichever is sooner, and dispatch the corresponding event handlers.
"""
while True:
try:
kevents = self._kqueue.control(None, 1000,
self._get_next_deadline())
break
except _SELECT_ERRORS as error:
if _is_resumable(error):
continue
else:
raise
fd_event_map = defaultdict(int)
for event in kevents:
fd_event_map[event.ident] |= self._map_event(event)
self._dispatch_fd_events(fd_event_map)
def _init_poller(self):
"""Notify the implementation to allocate the poller resource"""
assert self._kqueue is None
self._kqueue = select.kqueue()
def _uninit_poller(self):
"""Notify the implementation to release the poller resource"""
self._kqueue.close()
self._kqueue = None
def _register_fd(self, fileno, events):
"""The base class invokes this method to notify the implementation to
register the file descriptor with the polling object. The request must
be ignored if the poller is not activated.
:param int fileno: The file descriptor
:param int events: The event mask using READ, WRITE, ERROR
"""
self._modify_fd_events(fileno,
events=events,
events_to_clear=0,
events_to_set=events)
def _modify_fd_events(self, fileno, events, events_to_clear, events_to_set):
"""The base class invoikes this method to notify the implementation to
modify an already registered file descriptor. The request must be
ignored if the poller is not activated.
:param int fileno: The file descriptor
:param int events: absolute events (READ, WRITE, ERROR)
:param int events_to_clear: The events to clear (READ, WRITE, ERROR)
:param int events_to_set: The events to set (READ, WRITE, ERROR)
"""
if self._kqueue is None:
return
kevents = list()
if events_to_clear & READ:
kevents.append(select.kevent(fileno,
filter=select.KQ_FILTER_READ,
flags=select.KQ_EV_DELETE))
if events_to_set & READ:
kevents.append(select.kevent(fileno,
filter=select.KQ_FILTER_READ,
flags=select.KQ_EV_ADD))
if events_to_clear & WRITE:
kevents.append(select.kevent(fileno,
filter=select.KQ_FILTER_WRITE,
flags=select.KQ_EV_DELETE))
if events_to_set & WRITE:
kevents.append(select.kevent(fileno,
filter=select.KQ_FILTER_WRITE,
flags=select.KQ_EV_ADD))
self._kqueue.control(kevents, 0)
def _unregister_fd(self, fileno, events_to_clear):
"""The base class invokes this method to notify the implementation to
unregister the file descriptor being tracked by the polling object. The
request must be ignored if the poller is not activated.
:param int fileno: The file descriptor
:param int events_to_clear: The events to clear (READ, WRITE, ERROR)
"""
self._modify_fd_events(fileno,
events=0,
events_to_clear=events_to_clear,
events_to_set=0)
class PollPoller(_PollerBase):
"""Poll works on Linux and can have better performance than EPoll in
certain scenarios. Both are faster than select.
"""
POLL_TIMEOUT_MULT = 1000
def __init__(self):
"""Create an instance of the KQueuePoller
:param int fileno: The file descriptor to check events for
:param method handler: What is called when an event happens
:param int events: The events to look for
"""
self._poll = None
super(PollPoller, self).__init__()
@staticmethod
def _create_poller():
"""
:rtype: `select.poll`
"""
return select.poll() # pylint: disable=E1101
def poll(self):
"""Wait for events of interest on registered file descriptors until an
event of interest occurs or next timer deadline or _MAX_POLL_TIMEOUT,
whichever is sooner, and dispatch the corresponding event handlers.
"""
while True:
try:
events = self._poll.poll(self._get_next_deadline())
break
except _SELECT_ERRORS as error:
if _is_resumable(error):
continue
else:
raise
fd_event_map = defaultdict(int)
for fileno, event in events:
fd_event_map[fileno] |= event
self._dispatch_fd_events(fd_event_map)
def _init_poller(self):
"""Notify the implementation to allocate the poller resource"""
assert self._poll is None
self._poll = self._create_poller()
def _uninit_poller(self):
"""Notify the implementation to release the poller resource"""
if hasattr(self._poll, "close"):
self._poll.close()
self._poll = None
def _register_fd(self, fileno, events):
"""The base class invokes this method to notify the implementation to
register the file descriptor with the polling object. The request must
be ignored if the poller is not activated.
:param int fileno: The file descriptor
:param int events: The event mask using READ, WRITE, ERROR
"""
if self._poll is not None:
self._poll.register(fileno, events)
def _modify_fd_events(self, fileno, events, events_to_clear, events_to_set):
"""The base class invoikes this method to notify the implementation to
modify an already registered file descriptor. The request must be
ignored if the poller is not activated.
:param int fileno: The file descriptor
:param int events: absolute events (READ, WRITE, ERROR)
:param int events_to_clear: The events to clear (READ, WRITE, ERROR)
:param int events_to_set: The events to set (READ, WRITE, ERROR)
"""
if self._poll is not None:
self._poll.modify(fileno, events)
def _unregister_fd(self, fileno, events_to_clear):
"""The base class invokes this method to notify the implementation to
unregister the file descriptor being tracked by the polling object. The
request must be ignored if the poller is not activated.
:param int fileno: The file descriptor
:param int events_to_clear: The events to clear (READ, WRITE, ERROR)
"""
if self._poll is not None:
self._poll.unregister(fileno)
class EPollPoller(PollPoller):
"""EPoll works on Linux and can have better performance than Poll in
certain scenarios. Both are faster than select.
"""
POLL_TIMEOUT_MULT = 1
@staticmethod
def _create_poller():
"""
:rtype: `select.poll`
"""
return select.epoll() # pylint: disable=E1101
| {
"repo_name": "knowsis/pika",
"path": "pika/adapters/select_connection.py",
"copies": "1",
"size": "35777",
"license": "bsd-3-clause",
"hash": -3033070405437947000,
"line_mean": 34.9929577465,
"line_max": 85,
"alpha_frac": 0.5993515387,
"autogenerated": false,
"ratio": 4.5316022799240026,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0003459913780859819,
"num_lines": 994
} |
"""A connection adapter that tries to use the best polling method for the
platform pika is running on.
"""
import logging
import select
import time
from pika.adapters.base_connection import BaseConnection
LOGGER = logging.getLogger(__name__)
# One of select, epoll, kqueue or poll
SELECT_TYPE = None
# Use epoll's constants to keep life easy
READ = 0x0001
WRITE = 0x0004
ERROR = 0x0008
class SelectConnection(BaseConnection):
"""An asynchronous connection adapter that attempts to use the fastest
event loop adapter for the given platform.
"""
def _adapter_connect(self):
"""Connect to the RabbitMQ broker, returning True on success, False
on failure.
:rtype: bool
"""
if super(SelectConnection, self)._adapter_connect():
self.ioloop = IOLoop(self._manage_event_state)
self.ioloop.start_poller(self._handle_events,
self.event_state,
self.socket.fileno())
return True
return False
def _flush_outbound(self):
"""Call the state manager who will figure out that we need to write then
call the poller's poll function to force it to process events.
"""
self.ioloop.poller._manage_event_state()
# Force our poller to come up for air, but in write only mode
# write only mode prevents messages from coming in and kicking off
# events through the consumer
self.ioloop.poller.poll(write_only=True)
class IOLoop(object):
"""Singlton wrapper that decides which type of poller to use, creates an
instance of it in start_poller and keeps the invoking application in a
blocking state by calling the pollers start method. Poller should keep
looping until IOLoop.instance().stop() is called or there is a socket
error.
Also provides a convenient pass-through for add_timeout and set_events
"""
def __init__(self, state_manager):
"""Create an instance of the IOLoop object.
:param method state_manager: The method to manage state
"""
self.poller = None
self._manage_event_state = state_manager
def add_timeout(self, deadline, callback_method):
"""Add the callback_method to the IOLoop timer to fire after deadline
seconds. Returns a handle to the timeout. Do not confuse with
Tornado's timeout where you pass in the time you want to have your
callback called. Only pass in the seconds until it's to be called.
:param int deadline: The number of seconds to wait to call callback
:param method callback_method: The callback method
:rtype: str
"""
return self.poller.add_timeout(deadline, callback_method)
@property
def poller_type(self):
"""Return the type of poller.
:rtype: str
"""
return self.poller.__class__.__name__
def remove_timeout(self, timeout_id):
"""Remove a timeout if it's still in the timeout stack of the poller
:param str timeout_id: The timeout id to remove
"""
self.poller.remove_timeout(timeout_id)
def start(self):
"""Start the IOLoop, waiting for a Poller to take over."""
LOGGER.debug('Starting IOLoop')
while not self.poller:
time.sleep(SelectPoller.TIMEOUT)
self.poller.start()
self.poller.flush_pending_timeouts()
def start_poller(self, handler, events, fileno):
"""Start the Poller, once started will take over for IOLoop.start()
:param method handler: The method to call to handle events
:param int events: The events to handle
:param int fileno: The file descriptor to poll for
"""
LOGGER.debug('Starting the Poller')
self.poller = None
if hasattr(select, 'poll') and hasattr(select.poll, 'modify'):
if not SELECT_TYPE or SELECT_TYPE == 'poll':
LOGGER.debug('Using PollPoller')
self.poller = PollPoller(fileno, handler, events,
self._manage_event_state)
if not self.poller and hasattr(select, 'epoll'):
if not SELECT_TYPE or SELECT_TYPE == 'epoll':
LOGGER.debug('Using EPollPoller')
self.poller = EPollPoller(fileno, handler, events,
self._manage_event_state)
if not self.poller and hasattr(select, 'kqueue'):
if not SELECT_TYPE or SELECT_TYPE == 'kqueue':
LOGGER.debug('Using KQueuePoller')
self.poller = KQueuePoller(fileno, handler, events,
self._manage_event_state)
if not self.poller:
LOGGER.debug('Using SelectPoller')
self.poller = SelectPoller(fileno, handler, events,
self._manage_event_state)
def stop(self):
"""Stop the poller's event loop"""
LOGGER.debug('Stopping the poller event loop')
self.poller.open = False
def update_handler(self, fileno, events):
"""Pass in the events to process for the given file descriptor.
:param int fileno: The file descriptor to poll for
:param int events: The events to handle
"""
self.poller.update_handler(fileno, events)
class SelectPoller(object):
"""Default behavior is to use Select since it's the widest supported and has
all of the methods we need for child classes as well. One should only need
to override the update_handler and start methods for additional types.
"""
TIMEOUT = 1
def __init__(self, fileno, handler, events, state_manager):
"""Create an instance of the SelectPoller
:param int fileno: The file descriptor to check events for
:param method handler: What is called when an event happens
:param int events: The events to look for
:param method state_manager: The method to manage state
"""
self.fileno = fileno
self.events = events
self.open = True
self._handler = handler
self._timeouts = dict()
self._manage_event_state = state_manager
def add_timeout(self, deadline, callback_method):
"""Add the callback_method to the IOLoop timer to fire after deadline
seconds. Returns a handle to the timeout. Do not confuse with
Tornado's timeout where you pass in the time you want to have your
callback called. Only pass in the seconds until it's to be called.
:param int deadline: The number of seconds to wait to call callback
:param method callback_method: The callback method
:rtype: str
"""
value = {'deadline': time.time() + deadline,
'callback': callback_method}
timeout_id = hash(frozenset(value.items()))
self._timeouts[timeout_id] = value
return timeout_id
def flush_pending_timeouts(self):
"""
"""
if len(self._timeouts) > 0:
time.sleep(SelectPoller.TIMEOUT)
self.process_timeouts()
def poll(self, write_only=False):
"""Check to see if the events that are cared about have fired.
:param bool write_only: Don't look at self.events, just look to see if
the adapter can write.
"""
# Build our values to pass into select
input_fileno, output_fileno, error_fileno = [], [], []
if self.events & READ:
input_fileno = [self.fileno]
if self.events & WRITE:
output_fileno = [self.fileno]
if self.events & ERROR:
error_fileno = [self.fileno]
# Wait on select to let us know what's up
try:
read, write, error = select.select(input_fileno,
output_fileno,
error_fileno,
SelectPoller.TIMEOUT)
except select.error, error:
return self._handler(self.fileno, ERROR, error)
# Build our events bit mask
events = 0
if read:
events |= READ
if write:
events |= WRITE
if error:
events |= ERROR
if events:
self._handler(self.fileno, events, write_only=write_only)
def process_timeouts(self):
"""Process the self._timeouts event stack"""
start_time = time.time()
for timeout_id in self._timeouts.keys():
if timeout_id not in self._timeouts:
continue
if self._timeouts[timeout_id]['deadline'] <= start_time:
callback = self._timeouts[timeout_id]['callback']
del self._timeouts[timeout_id]
callback()
def remove_timeout(self, timeout_id):
"""Remove a timeout if it's still in the timeout stack
:param str timeout_id: The timeout id to remove
"""
if timeout_id in self._timeouts:
del self._timeouts[timeout_id]
def start(self):
"""Start the main poller loop. It will loop here until self.closed"""
while self.open:
self.poll()
self.process_timeouts()
self._manage_event_state()
def update_handler(self, fileno, events):
"""Set the events to the current events
:param int fileno: The file descriptor
:param int events: The event mask
"""
self.events = events
class KQueuePoller(SelectPoller):
"""KQueuePoller works on BSD based systems and is faster than select"""
def __init__(self, fileno, handler, events, state_manager):
"""Create an instance of the KQueuePoller
:param int fileno: The file descriptor to check events for
:param method handler: What is called when an event happens
:param int events: The events to look for
:param method state_manager: The method to manage state
"""
super(KQueuePoller, self).__init__(fileno, handler, events,
state_manager)
self.events = 0
self._kqueue = select.kqueue()
self.update_handler(fileno, events)
self._manage_event_state = state_manager
def update_handler(self, fileno, events):
"""Set the events to the current events
:param int fileno: The file descriptor
:param int events: The event mask
"""
# No need to update if our events are the same
if self.events == events:
return
kevents = list()
if not events & READ:
if self.events & READ:
kevents.append(select.kevent(fileno,
filter=select.KQ_FILTER_READ,
flags=select.KQ_EV_DELETE))
else:
if not self.events & READ:
kevents.append(select.kevent(fileno,
filter=select.KQ_FILTER_READ,
flags=select.KQ_EV_ADD))
if not events & WRITE:
if self.events & WRITE:
kevents.append(select.kevent(fileno,
filter=select.KQ_FILTER_WRITE,
flags=select.KQ_EV_DELETE))
else:
if not self.events & WRITE:
kevents.append(select.kevent(fileno,
filter=select.KQ_FILTER_WRITE,
flags=select.KQ_EV_ADD))
for event in kevents:
self._kqueue.control([event], 0)
self.events = events
def start(self):
"""Start the main poller loop. It will loop here until self.closed"""
while self.open:
self.poll()
self.process_timeouts()
self._manage_event_state()
def poll(self, write_only=False):
"""Check to see if the events that are cared about have fired.
:param bool write_only: Don't look at self.events, just look to see if
the adapter can write.
"""
events = 0
try:
kevents = self._kqueue.control(None, 1000, SelectPoller.TIMEOUT)
except OSError, error:
return self._handler(self.fileno, ERROR, error)
for event in kevents:
if event.filter == select.KQ_FILTER_READ and READ & self.events:
events |= READ
if event.filter == select.KQ_FILTER_WRITE and WRITE & self.events:
events |= WRITE
if event.flags & select.KQ_EV_ERROR and ERROR & self.events:
events |= ERROR
if events:
LOGGER.debug("Calling %s(%i)", self._handler, events)
self._handler(self.fileno, events, write_only=write_only)
class PollPoller(SelectPoller):
"""Poll works on Linux and can have better performance than EPoll in
certain scenarios. Both are faster than select.
"""
def __init__(self, fileno, handler, events, state_manager):
"""Create an instance of the KQueuePoller
:param int fileno: The file descriptor to check events for
:param method handler: What is called when an event happens
:param int events: The events to look for
:param method state_manager: The method to manage state
"""
super(PollPoller, self).__init__(fileno, handler, events, state_manager)
self._poll = select.poll()
self._poll.register(fileno, self.events)
def update_handler(self, fileno, events):
"""Set the events to the current events
:param int fileno: The file descriptor
:param int events: The event mask
"""
self.events = events
self._poll.modify(fileno, self.events)
def start(self):
"""Start the main poller loop. It will loop here until self.closed"""
was_open = self.open
while self.open:
self.poll()
self.process_timeouts()
self._manage_event_state()
if not was_open:
return
try:
LOGGER.info("Unregistering poller on fd %d" % self.fileno)
self.update_handler(self.fileno, 0)
self._poll.unregister(self.fileno)
except IOError, err:
LOGGER.debug("Got IOError while shutting down poller: %s", err)
def poll(self, write_only=False):
"""Poll until TIMEOUT waiting for an event
:param bool write_only: Only process write events
"""
events = self._poll.poll(int(SelectPoller.TIMEOUT * 1000))
if events:
LOGGER.debug("Calling %s with %d events",
self._handler, len(events))
for fileno, event in events:
self._handler(fileno, event, write_only=write_only)
class EPollPoller(PollPoller):
"""EPoll works on Linux and can have better performance than Poll in
certain scenarios. Both are faster than select.
"""
def __init__(self, fileno, handler, events, state_manager):
"""Create an instance of the EPollPoller
:param int fileno: The file descriptor to check events for
:param method handler: What is called when an event happens
:param int events: The events to look for
:param method state_manager: The method to manage state
"""
super(EPollPoller, self).__init__(fileno, handler, events,
state_manager)
self._poll = select.epoll()
self._poll.register(fileno, self.events)
def poll(self, write_only=False):
"""Poll until TIMEOUT waiting for an event
:param bool write_only: Only process write events
"""
events = self._poll.poll(SelectPoller.TIMEOUT)
if events:
LOGGER.debug("Calling %s", self._handler)
for fileno, event in events:
self._handler(fileno, event, write_only=write_only)
| {
"repo_name": "godotgildor/Suns",
"path": "src/pika/adapters/select_connection.py",
"copies": "2",
"size": "16227",
"license": "bsd-3-clause",
"hash": 6523432751432935000,
"line_mean": 34.9800443459,
"line_max": 80,
"alpha_frac": 0.5856288901,
"autogenerated": false,
"ratio": 4.59038189533239,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00008212203334154553,
"num_lines": 451
} |
"""A connection adapter that tries to use the best polling method for the
platform pika is running on.
"""
import os
import logging
import socket
import select
import errno
import time
from collections import defaultdict
import threading
from servicebus.pika import compat
from servicebus.pika.compat import dictkeys
from servicebus.pika.adapters.base_connection import BaseConnection
LOGGER = logging.getLogger(__name__)
# One of select, epoll, kqueue or poll
SELECT_TYPE = None
# Use epoll's constants to keep life easy
READ = 0x0001
WRITE = 0x0004
ERROR = 0x0008
if compat.PY2:
_SELECT_ERROR = select.error
else:
# select.error was deprecated and replaced by OSError in python 3.3
_SELECT_ERROR = OSError
def _get_select_errno(error):
if compat.PY2:
assert isinstance(error, select.error), repr(error)
return error.args[0]
else:
assert isinstance(error, OSError), repr(error)
return error.errno
class SelectConnection(BaseConnection):
"""An asynchronous connection adapter that attempts to use the fastest
event loop adapter for the given platform.
"""
def __init__(self,
parameters=None,
on_open_callback=None,
on_open_error_callback=None,
on_close_callback=None,
stop_ioloop_on_close=True,
custom_ioloop=None):
"""Create a new instance of the Connection object.
:param pika.connection.Parameters parameters: Connection parameters
:param method on_open_callback: Method to call on connection open
:param on_open_error_callback: Method to call if the connection cant
be opened
:type on_open_error_callback: method
:param method on_close_callback: Method to call on connection close
:param bool stop_ioloop_on_close: Call ioloop.stop() if disconnected
:param custom_ioloop: Override using the global IOLoop in Tornado
:raises: RuntimeError
"""
ioloop = custom_ioloop or IOLoop()
super(SelectConnection, self).__init__(parameters, on_open_callback,
on_open_error_callback,
on_close_callback, ioloop,
stop_ioloop_on_close)
def _adapter_connect(self):
"""Connect to the RabbitMQ broker, returning True on success, False
on failure.
:rtype: bool
"""
error = super(SelectConnection, self)._adapter_connect()
if not error:
self.ioloop.add_handler(self.socket.fileno(), self._handle_events,
self.event_state)
return error
def _adapter_disconnect(self):
"""Disconnect from the RabbitMQ broker"""
if self.socket:
self.ioloop.remove_handler(self.socket.fileno())
super(SelectConnection, self)._adapter_disconnect()
class IOLoop(object):
"""Singlton wrapper that decides which type of poller to use, creates an
instance of it in start_poller and keeps the invoking application in a
blocking state by calling the pollers start method. Poller should keep
looping until IOLoop.instance().stop() is called or there is a socket
error.
Passes through all operations to the loaded poller object.
"""
def __init__(self):
self._poller = self._get_poller()
def __getattr__(self, attr):
return getattr(self._poller, attr)
def _get_poller(self):
"""Determine the best poller to use for this enviroment."""
poller = None
if hasattr(select, 'epoll'):
if not SELECT_TYPE or SELECT_TYPE == 'epoll':
LOGGER.debug('Using EPollPoller')
poller = EPollPoller()
if not poller and hasattr(select, 'kqueue'):
if not SELECT_TYPE or SELECT_TYPE == 'kqueue':
LOGGER.debug('Using KQueuePoller')
poller = KQueuePoller()
if (not poller and hasattr(select, 'poll') and
hasattr(select.poll(), 'modify')): # pylint: disable=E1101
if not SELECT_TYPE or SELECT_TYPE == 'poll':
LOGGER.debug('Using PollPoller')
poller = PollPoller()
if not poller:
LOGGER.debug('Using SelectPoller')
poller = SelectPoller()
return poller
class SelectPoller(object):
"""Default behavior is to use Select since it's the widest supported and has
all of the methods we need for child classes as well. One should only need
to override the update_handler and start methods for additional types.
"""
# Drop out of the poll loop every POLL_TIMEOUT secs as a worst case, this
# is only a backstop value. We will run timeouts when they are scheduled.
POLL_TIMEOUT = 5
# if the poller uses MS specify 1000
POLL_TIMEOUT_MULT = 1
def __init__(self):
"""Create an instance of the SelectPoller
"""
# fd-to-handler function mappings
self._fd_handlers = dict()
# event-to-fdset mappings
self._fd_events = {READ: set(), WRITE: set(), ERROR: set()}
self._stopping = False
self._timeouts = {}
self._next_timeout = None
self._processing_fd_event_map = {}
# Mutex for controlling critical sections where ioloop-interrupt sockets
# are created, used, and destroyed. Needed in case `stop()` is called
# from a thread.
self._mutex = threading.Lock()
# ioloop-interrupt socket pair; initialized in start()
self._r_interrupt = None
self._w_interrupt = None
def get_interrupt_pair(self):
""" Use a socketpair to be able to interrupt the ioloop if called
from another thread. Socketpair() is not supported on some OS (Win)
so use a pair of simple UDP sockets instead. The sockets will be
closed and garbage collected by python when the ioloop itself is.
"""
try:
read_sock, write_sock = socket.socketpair()
except AttributeError:
LOGGER.debug("Using custom socketpair for interrupt")
read_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
read_sock.bind(('localhost', 0))
write_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
write_sock.connect(read_sock.getsockname())
read_sock.setblocking(0)
write_sock.setblocking(0)
return read_sock, write_sock
def read_interrupt(self, interrupt_sock,
events, write_only): # pylint: disable=W0613
""" Read the interrupt byte(s). We ignore the event mask and write_only
flag as we can ony get here if there's data to be read on our fd.
:param int interrupt_sock: The file descriptor to read from
:param int events: (unused) The events generated for this fd
:param bool write_only: (unused) True if poll was called to trigger a
write
"""
try:
os.read(interrupt_sock, 512)
except OSError as err:
if err.errno != errno.EAGAIN:
raise
def add_timeout(self, deadline, callback_method):
"""Add the callback_method to the IOLoop timer to fire after deadline
seconds. Returns a handle to the timeout. Do not confuse with
Tornado's timeout where you pass in the time you want to have your
callback called. Only pass in the seconds until it's to be called.
:param int deadline: The number of seconds to wait to call callback
:param method callback_method: The callback method
:rtype: str
"""
timeout_at = time.time() + deadline
value = {'deadline': timeout_at, 'callback': callback_method}
timeout_id = hash(frozenset(value.items()))
self._timeouts[timeout_id] = value
if not self._next_timeout or timeout_at < self._next_timeout:
self._next_timeout = timeout_at
return timeout_id
def remove_timeout(self, timeout_id):
"""Remove a timeout if it's still in the timeout stack
:param str timeout_id: The timeout id to remove
"""
try:
timeout = self._timeouts.pop(timeout_id)
if timeout['deadline'] == self._next_timeout:
self._next_timeout = None
except KeyError:
pass
def get_next_deadline(self):
"""Get the interval to the next timeout event, or a default interval
"""
if self._next_timeout:
timeout = max((self._next_timeout - time.time(), 0))
elif self._timeouts:
deadlines = [t['deadline'] for t in self._timeouts.values()]
self._next_timeout = min(deadlines)
timeout = max((self._next_timeout - time.time(), 0))
else:
timeout = SelectPoller.POLL_TIMEOUT
timeout = min((timeout, SelectPoller.POLL_TIMEOUT))
return timeout * SelectPoller.POLL_TIMEOUT_MULT
def process_timeouts(self):
"""Process the self._timeouts event stack"""
now = time.time()
# Run the timeouts in order of deadlines. Although this shouldn't
# be strictly necessary it preserves old behaviour when timeouts
# were only run periodically.
to_run = sorted([(k, timer) for (k, timer) in self._timeouts.items()
if timer['deadline'] <= now],
key=lambda item: item[1]['deadline'])
for k, timer in to_run:
if k not in self._timeouts:
# Previous invocation(s) should have deleted the timer.
continue
try:
timer['callback']()
finally:
# Don't do 'del self._timeout[k]' as the key might
# have been deleted just now.
if self._timeouts.pop(k, None) is not None:
self._next_timeout = None
def add_handler(self, fileno, handler, events):
"""Add a new fileno to the set to be monitored
:param int fileno: The file descriptor
:param method handler: What is called when an event happens
:param int events: The event mask
"""
self._fd_handlers[fileno] = handler
self.update_handler(fileno, events)
def update_handler(self, fileno, events):
"""Set the events to the current events
:param int fileno: The file descriptor
:param int events: The event mask
"""
for ev in (READ, WRITE, ERROR):
if events & ev:
self._fd_events[ev].add(fileno)
else:
self._fd_events[ev].discard(fileno)
def remove_handler(self, fileno):
"""Remove a file descriptor from the set
:param int fileno: The file descriptor
"""
try:
del self._processing_fd_event_map[fileno]
except KeyError:
pass
self.update_handler(fileno, 0)
del self._fd_handlers[fileno]
def start(self):
"""Start the main poller loop. It will loop here until self._stopping"""
LOGGER.debug('Starting IOLoop')
self._stopping = False
with self._mutex:
# Watch out for reentry
if self._r_interrupt is None:
# Create ioloop-interrupt socket pair and register read handler.
# NOTE: we defer their creation because some users (e.g.,
# BlockingConnection adapter) don't use the event loop and these
# sockets would get reported as leaks
self._r_interrupt, self._w_interrupt = self.get_interrupt_pair()
self.add_handler(self._r_interrupt.fileno(),
self.read_interrupt,
READ)
interrupt_sockets_created = True
else:
interrupt_sockets_created = False
try:
# Run event loop
while not self._stopping:
self.poll()
self.process_timeouts()
finally:
# Unregister and close ioloop-interrupt socket pair
if interrupt_sockets_created:
with self._mutex:
self.remove_handler(self._r_interrupt.fileno())
self._r_interrupt.close()
self._r_interrupt = None
self._w_interrupt.close()
self._w_interrupt = None
def stop(self):
"""Request exit from the ioloop."""
LOGGER.debug('Stopping IOLoop')
self._stopping = True
with self._mutex:
if self._w_interrupt is None:
return
try:
# Send byte to interrupt the poll loop, use write() for
# consitency.
os.write(self._w_interrupt.fileno(), b'X')
except OSError as err:
if err.errno != errno.EWOULDBLOCK:
raise
except Exception as err:
# There's nothing sensible to do here, we'll exit the interrupt
# loop after POLL_TIMEOUT secs in worst case anyway.
LOGGER.warning("Failed to send ioloop interrupt: %s", err)
raise
def poll(self, write_only=False):
"""Wait for events on interested filedescriptors.
:param bool write_only: Passed through to the hadnlers to indicate
that they should only process write events.
"""
while True:
try:
read, write, error = select.select(self._fd_events[READ],
self._fd_events[WRITE],
self._fd_events[ERROR],
self.get_next_deadline())
break
except _SELECT_ERROR as error:
if _get_select_errno(error) == errno.EINTR:
continue
else:
raise
# Build an event bit mask for each fileno we've recieved an event for
fd_event_map = defaultdict(int)
for fd_set, ev in zip((read, write, error), (READ, WRITE, ERROR)):
for fileno in fd_set:
fd_event_map[fileno] |= ev
self._process_fd_events(fd_event_map, write_only)
def _process_fd_events(self, fd_event_map, write_only):
""" Processes the callbacks for each fileno we've recieved events.
Before doing so we re-calculate the event mask based on what is
currently set in case it has been changed under our feet by a
previous callback. We also take a store a refernce to the
fd_event_map in the class so that we can detect removal of an
fileno during processing of another callback and not generate
spurious callbacks on it.
:param dict fd_event_map: Map of fds to events recieved on them.
"""
self._processing_fd_event_map = fd_event_map
for fileno in dictkeys(fd_event_map):
if fileno not in fd_event_map:
# the fileno has been removed from the map under our feet.
continue
events = fd_event_map[fileno]
for ev in [READ, WRITE, ERROR]:
if fileno not in self._fd_events[ev]:
events &= ~ev
if events:
handler = self._fd_handlers[fileno]
handler(fileno, events, write_only=write_only)
class KQueuePoller(SelectPoller):
"""KQueuePoller works on BSD based systems and is faster than select"""
def __init__(self):
"""Create an instance of the KQueuePoller
:param int fileno: The file descriptor to check events for
:param method handler: What is called when an event happens
:param int events: The events to look for
"""
self._kqueue = select.kqueue()
super(KQueuePoller, self).__init__()
def update_handler(self, fileno, events):
"""Set the events to the current events
:param int fileno: The file descriptor
:param int events: The event mask
"""
kevents = list()
if not events & READ:
if fileno in self._fd_events[READ]:
kevents.append(select.kevent(fileno,
filter=select.KQ_FILTER_READ,
flags=select.KQ_EV_DELETE))
else:
if fileno not in self._fd_events[READ]:
kevents.append(select.kevent(fileno,
filter=select.KQ_FILTER_READ,
flags=select.KQ_EV_ADD))
if not events & WRITE:
if fileno in self._fd_events[WRITE]:
kevents.append(select.kevent(fileno,
filter=select.KQ_FILTER_WRITE,
flags=select.KQ_EV_DELETE))
else:
if fileno not in self._fd_events[WRITE]:
kevents.append(select.kevent(fileno,
filter=select.KQ_FILTER_WRITE,
flags=select.KQ_EV_ADD))
for event in kevents:
self._kqueue.control([event], 0)
super(KQueuePoller, self).update_handler(fileno, events)
def _map_event(self, kevent):
"""return the event type associated with a kevent object
:param kevent kevent: a kevent object as returned by kqueue.control()
"""
if kevent.filter == select.KQ_FILTER_READ:
return READ
elif kevent.filter == select.KQ_FILTER_WRITE:
return WRITE
elif kevent.flags & select.KQ_EV_ERROR:
return ERROR
def poll(self, write_only=False):
"""Check to see if the events that are cared about have fired.
:param bool write_only: Don't look at self.events, just look to see if
the adapter can write.
"""
while True:
try:
kevents = self._kqueue.control(None, 1000,
self.get_next_deadline())
break
except _SELECT_ERROR as error:
if _get_select_errno(error) == errno.EINTR:
continue
else:
raise
fd_event_map = defaultdict(int)
for event in kevents:
fileno = event.ident
fd_event_map[fileno] |= self._map_event(event)
self._process_fd_events(fd_event_map, write_only)
class PollPoller(SelectPoller):
"""Poll works on Linux and can have better performance than EPoll in
certain scenarios. Both are faster than select.
"""
POLL_TIMEOUT_MULT = 1000
def __init__(self):
"""Create an instance of the KQueuePoller
:param int fileno: The file descriptor to check events for
:param method handler: What is called when an event happens
:param int events: The events to look for
"""
self._poll = self.create_poller()
super(PollPoller, self).__init__()
def create_poller(self):
return select.poll() # pylint: disable=E1101
def add_handler(self, fileno, handler, events):
"""Add a file descriptor to the poll set
:param int fileno: The file descriptor to check events for
:param method handler: What is called when an event happens
:param int events: The events to look for
"""
self._poll.register(fileno, events)
super(PollPoller, self).add_handler(fileno, handler, events)
def update_handler(self, fileno, events):
"""Set the events to the current events
:param int fileno: The file descriptor
:param int events: The event mask
"""
super(PollPoller, self).update_handler(fileno, events)
self._poll.modify(fileno, events)
def remove_handler(self, fileno):
"""Remove a fileno to the set
:param int fileno: The file descriptor
"""
super(PollPoller, self).remove_handler(fileno)
self._poll.unregister(fileno)
def poll(self, write_only=False):
"""Poll until the next timeout waiting for an event
:param bool write_only: Only process write events
"""
while True:
try:
events = self._poll.poll(self.get_next_deadline())
break
except _SELECT_ERROR as error:
if _get_select_errno(error) == errno.EINTR:
continue
else:
raise
fd_event_map = defaultdict(int)
for fileno, event in events:
fd_event_map[fileno] |= event
self._process_fd_events(fd_event_map, write_only)
class EPollPoller(PollPoller):
"""EPoll works on Linux and can have better performance than Poll in
certain scenarios. Both are faster than select.
"""
POLL_TIMEOUT_MULT = 1
def create_poller(self):
return select.epoll() # pylint: disable=E1101
| {
"repo_name": "blacktear23/py-servicebus",
"path": "servicebus/pika/adapters/select_connection.py",
"copies": "1",
"size": "21495",
"license": "bsd-3-clause",
"hash": -8185252918028387000,
"line_mean": 34.1800327332,
"line_max": 80,
"alpha_frac": 0.5735287276,
"autogenerated": false,
"ratio": 4.551132754605124,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5624661482205124,
"avg_score": null,
"num_lines": null
} |
"""A connection adapter that tries to use the best polling method for the
platform pika is running on.
"""
import os
import logging
import socket
import select
import errno
import time
from operator import itemgetter
from collections import defaultdict
import threading
import pika.compat
from pika.compat import dictkeys
from pika.adapters.base_connection import BaseConnection
LOGGER = logging.getLogger(__name__)
# One of select, epoll, kqueue or poll
SELECT_TYPE = None
# Use epoll's constants to keep life easy
READ = 0x0001
WRITE = 0x0004
ERROR = 0x0008
if pika.compat.PY2:
_SELECT_ERROR = select.error
else:
# select.error was deprecated and replaced by OSError in python 3.3
_SELECT_ERROR = OSError
def _get_select_errno(error):
if pika.compat.PY2:
assert isinstance(error, select.error), repr(error)
return error.args[0]
else:
assert isinstance(error, OSError), repr(error)
return error.errno
class SelectConnection(BaseConnection):
"""An asynchronous connection adapter that attempts to use the fastest
event loop adapter for the given platform.
"""
def __init__(self,
parameters=None,
on_open_callback=None,
on_open_error_callback=None,
on_close_callback=None,
stop_ioloop_on_close=True,
custom_ioloop=None):
"""Create a new instance of the Connection object.
:param pika.connection.Parameters parameters: Connection parameters
:param method on_open_callback: Method to call on connection open
:param on_open_error_callback: Method to call if the connection cant
be opened
:type on_open_error_callback: method
:param method on_close_callback: Method to call on connection close
:param bool stop_ioloop_on_close: Call ioloop.stop() if disconnected
:param custom_ioloop: Override using the global IOLoop in Tornado
:raises: RuntimeError
"""
ioloop = custom_ioloop or IOLoop()
super(SelectConnection, self).__init__(parameters, on_open_callback,
on_open_error_callback,
on_close_callback, ioloop,
stop_ioloop_on_close)
def _adapter_connect(self):
"""Connect to the RabbitMQ broker, returning True on success, False
on failure.
:rtype: bool
"""
error = super(SelectConnection, self)._adapter_connect()
if not error:
self.ioloop.add_handler(self.socket.fileno(), self._handle_events,
self.event_state)
return error
def _adapter_disconnect(self):
"""Disconnect from the RabbitMQ broker"""
if self.socket:
self.ioloop.remove_handler(self.socket.fileno())
super(SelectConnection, self)._adapter_disconnect()
class IOLoop(object):
"""Singlton wrapper that decides which type of poller to use, creates an
instance of it in start_poller and keeps the invoking application in a
blocking state by calling the pollers start method. Poller should keep
looping until IOLoop.instance().stop() is called or there is a socket
error.
Passes through all operations to the loaded poller object.
"""
def __init__(self):
self._poller = self._get_poller()
def __getattr__(self, attr):
return getattr(self._poller, attr)
def _get_poller(self):
"""Determine the best poller to use for this enviroment."""
poller = None
if hasattr(select, 'epoll'):
if not SELECT_TYPE or SELECT_TYPE == 'epoll':
LOGGER.debug('Using EPollPoller')
poller = EPollPoller()
if not poller and hasattr(select, 'kqueue'):
if not SELECT_TYPE or SELECT_TYPE == 'kqueue':
LOGGER.debug('Using KQueuePoller')
poller = KQueuePoller()
if (not poller and hasattr(select, 'poll') and
hasattr(select.poll(), 'modify')): # pylint: disable=E1101
if not SELECT_TYPE or SELECT_TYPE == 'poll':
LOGGER.debug('Using PollPoller')
poller = PollPoller()
if not poller:
LOGGER.debug('Using SelectPoller')
poller = SelectPoller()
return poller
class SelectPoller(object):
"""Default behavior is to use Select since it's the widest supported and has
all of the methods we need for child classes as well. One should only need
to override the update_handler and start methods for additional types.
"""
# Drop out of the poll loop every POLL_TIMEOUT secs as a worst case, this
# is only a backstop value. We will run timeouts when they are scheduled.
POLL_TIMEOUT = 5
# if the poller uses MS specify 1000
POLL_TIMEOUT_MULT = 1
def __init__(self):
"""Create an instance of the SelectPoller
"""
# fd-to-handler function mappings
self._fd_handlers = dict()
# event-to-fdset mappings
self._fd_events = {READ: set(), WRITE: set(), ERROR: set()}
self._stopping = False
self._timeouts = {}
self._next_timeout = None
self._processing_fd_event_map = {}
# Mutex for controlling critical sections where ioloop-interrupt sockets
# are created, used, and destroyed. Needed in case `stop()` is called
# from a thread.
self._mutex = threading.Lock()
# ioloop-interrupt socket pair; initialized in start()
self._r_interrupt = None
self._w_interrupt = None
def get_interrupt_pair(self):
""" Use a socketpair to be able to interrupt the ioloop if called
from another thread. Socketpair() is not supported on some OS (Win)
so use a pair of simple UDP sockets instead. The sockets will be
closed and garbage collected by python when the ioloop itself is.
"""
try:
read_sock, write_sock = socket.socketpair()
except AttributeError:
LOGGER.debug("Using custom socketpair for interrupt")
read_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
read_sock.bind(('localhost', 0))
write_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
write_sock.connect(read_sock.getsockname())
read_sock.setblocking(0)
write_sock.setblocking(0)
return read_sock, write_sock
def read_interrupt(self, interrupt_sock,
events, write_only): # pylint: disable=W0613
""" Read the interrupt byte(s). We ignore the event mask and write_only
flag as we can ony get here if there's data to be read on our fd.
:param int interrupt_sock: The file descriptor to read from
:param int events: (unused) The events generated for this fd
:param bool write_only: (unused) True if poll was called to trigger a
write
"""
try:
os.read(interrupt_sock, 512)
except OSError as err:
if err.errno != errno.EAGAIN:
raise
def add_timeout(self, deadline, callback_method):
"""Add the callback_method to the IOLoop timer to fire after deadline
seconds. Returns a handle to the timeout. Do not confuse with
Tornado's timeout where you pass in the time you want to have your
callback called. Only pass in the seconds until it's to be called.
:param int deadline: The number of seconds to wait to call callback
:param method callback_method: The callback method
:rtype: str
"""
timeout_at = time.time() + deadline
value = {'deadline': timeout_at, 'callback': callback_method}
timeout_id = hash(frozenset(value.items()))
self._timeouts[timeout_id] = value
if not self._next_timeout or timeout_at < self._next_timeout:
self._next_timeout = timeout_at
return timeout_id
def remove_timeout(self, timeout_id):
"""Remove a timeout if it's still in the timeout stack
:param str timeout_id: The timeout id to remove
"""
try:
timeout = self._timeouts.pop(timeout_id)
if timeout['deadline'] == self._next_timeout:
self._next_timeout = None
except KeyError:
pass
def get_next_deadline(self):
"""Get the interval to the next timeout event, or a default interval
"""
if self._next_timeout:
timeout = max((self._next_timeout - time.time(), 0))
elif self._timeouts:
deadlines = [t['deadline'] for t in self._timeouts.values()]
self._next_timeout = min(deadlines)
timeout = max((self._next_timeout - time.time(), 0))
else:
timeout = SelectPoller.POLL_TIMEOUT
timeout = min((timeout, SelectPoller.POLL_TIMEOUT))
return timeout * SelectPoller.POLL_TIMEOUT_MULT
def process_timeouts(self):
"""Process the self._timeouts event stack"""
now = time.time()
to_run = [timer for timer in self._timeouts.values()
if timer['deadline'] <= now]
# Run the timeouts in order of deadlines. Although this shouldn't
# be strictly necessary it preserves old behaviour when timeouts
# were only run periodically.
for t in sorted(to_run, key=itemgetter('deadline')):
t['callback']()
del self._timeouts[hash(frozenset(t.items()))]
self._next_timeout = None
def add_handler(self, fileno, handler, events):
"""Add a new fileno to the set to be monitored
:param int fileno: The file descriptor
:param method handler: What is called when an event happens
:param int events: The event mask
"""
self._fd_handlers[fileno] = handler
self.update_handler(fileno, events)
def update_handler(self, fileno, events):
"""Set the events to the current events
:param int fileno: The file descriptor
:param int events: The event mask
"""
for ev in (READ, WRITE, ERROR):
if events & ev:
self._fd_events[ev].add(fileno)
else:
self._fd_events[ev].discard(fileno)
def remove_handler(self, fileno):
"""Remove a file descriptor from the set
:param int fileno: The file descriptor
"""
try:
del self._processing_fd_event_map[fileno]
except KeyError:
pass
self.update_handler(fileno, 0)
del self._fd_handlers[fileno]
def start(self):
"""Start the main poller loop. It will loop here until self._stopping"""
LOGGER.debug('Starting IOLoop')
self._stopping = False
with self._mutex:
# Watch out for reentry
if self._r_interrupt is None:
# Create ioloop-interrupt socket pair and register read handler.
# NOTE: we defer their creation because some users (e.g.,
# BlockingConnection adapter) don't use the event loop and these
# sockets would get reported as leaks
self._r_interrupt, self._w_interrupt = self.get_interrupt_pair()
self.add_handler(self._r_interrupt.fileno(),
self.read_interrupt,
READ)
interrupt_sockets_created = True
else:
interrupt_sockets_created = False
try:
# Run event loop
while not self._stopping:
self.poll()
self.process_timeouts()
finally:
# Unregister and close ioloop-interrupt socket pair
if interrupt_sockets_created:
with self._mutex:
self.remove_handler(self._r_interrupt.fileno())
self._r_interrupt.close()
self._r_interrupt = None
self._w_interrupt.close()
self._w_interrupt = None
def stop(self):
"""Request exit from the ioloop."""
LOGGER.debug('Stopping IOLoop')
self._stopping = True
with self._mutex:
if self._w_interrupt is None:
return
try:
# Send byte to interrupt the poll loop, use write() for
# consitency.
os.write(self._w_interrupt.fileno(), b'X')
except OSError as err:
if err.errno != errno.EWOULDBLOCK:
raise
except Exception as err:
# There's nothing sensible to do here, we'll exit the interrupt
# loop after POLL_TIMEOUT secs in worst case anyway.
LOGGER.warning("Failed to send ioloop interrupt: %s", err)
raise
def poll(self, write_only=False):
"""Wait for events on interested filedescriptors.
:param bool write_only: Passed through to the hadnlers to indicate
that they should only process write events.
"""
while True:
try:
read, write, error = select.select(self._fd_events[READ],
self._fd_events[WRITE],
self._fd_events[ERROR],
self.get_next_deadline())
break
except _SELECT_ERROR as error:
if _get_select_errno(error) == errno.EINTR:
continue
else:
raise
# Build an event bit mask for each fileno we've recieved an event for
fd_event_map = defaultdict(int)
for fd_set, ev in zip((read, write, error), (READ, WRITE, ERROR)):
for fileno in fd_set:
fd_event_map[fileno] |= ev
self._process_fd_events(fd_event_map, write_only)
def _process_fd_events(self, fd_event_map, write_only):
""" Processes the callbacks for each fileno we've recieved events.
Before doing so we re-calculate the event mask based on what is
currently set in case it has been changed under our feet by a
previous callback. We also take a store a refernce to the
fd_event_map in the class so that we can detect removal of an
fileno during processing of another callback and not generate
spurious callbacks on it.
:param dict fd_event_map: Map of fds to events recieved on them.
"""
self._processing_fd_event_map = fd_event_map
for fileno in dictkeys(fd_event_map):
if fileno not in fd_event_map:
# the fileno has been removed from the map under our feet.
continue
events = fd_event_map[fileno]
for ev in [READ, WRITE, ERROR]:
if fileno not in self._fd_events[ev]:
events &= ~ev
if events:
handler = self._fd_handlers[fileno]
handler(fileno, events, write_only=write_only)
class KQueuePoller(SelectPoller):
"""KQueuePoller works on BSD based systems and is faster than select"""
def __init__(self):
"""Create an instance of the KQueuePoller
:param int fileno: The file descriptor to check events for
:param method handler: What is called when an event happens
:param int events: The events to look for
"""
self._kqueue = select.kqueue()
super(KQueuePoller, self).__init__()
def update_handler(self, fileno, events):
"""Set the events to the current events
:param int fileno: The file descriptor
:param int events: The event mask
"""
kevents = list()
if not events & READ:
if fileno in self._fd_events[READ]:
kevents.append(select.kevent(fileno,
filter=select.KQ_FILTER_READ,
flags=select.KQ_EV_DELETE))
else:
if fileno not in self._fd_events[READ]:
kevents.append(select.kevent(fileno,
filter=select.KQ_FILTER_READ,
flags=select.KQ_EV_ADD))
if not events & WRITE:
if fileno in self._fd_events[WRITE]:
kevents.append(select.kevent(fileno,
filter=select.KQ_FILTER_WRITE,
flags=select.KQ_EV_DELETE))
else:
if fileno not in self._fd_events[WRITE]:
kevents.append(select.kevent(fileno,
filter=select.KQ_FILTER_WRITE,
flags=select.KQ_EV_ADD))
for event in kevents:
self._kqueue.control([event], 0)
super(KQueuePoller, self).update_handler(fileno, events)
def _map_event(self, kevent):
"""return the event type associated with a kevent object
:param kevent kevent: a kevent object as returned by kqueue.control()
"""
if kevent.filter == select.KQ_FILTER_READ:
return READ
elif kevent.filter == select.KQ_FILTER_WRITE:
return WRITE
elif kevent.flags & select.KQ_EV_ERROR:
return ERROR
def poll(self, write_only=False):
"""Check to see if the events that are cared about have fired.
:param bool write_only: Don't look at self.events, just look to see if
the adapter can write.
"""
while True:
try:
kevents = self._kqueue.control(None, 1000,
self.get_next_deadline())
break
except _SELECT_ERROR as error:
if _get_select_errno(error) == errno.EINTR:
continue
else:
raise
fd_event_map = defaultdict(int)
for event in kevents:
fileno = event.ident
fd_event_map[fileno] |= self._map_event(event)
self._process_fd_events(fd_event_map, write_only)
class PollPoller(SelectPoller):
"""Poll works on Linux and can have better performance than EPoll in
certain scenarios. Both are faster than select.
"""
POLL_TIMEOUT_MULT = 1000
def __init__(self):
"""Create an instance of the KQueuePoller
:param int fileno: The file descriptor to check events for
:param method handler: What is called when an event happens
:param int events: The events to look for
"""
self._poll = self.create_poller()
super(PollPoller, self).__init__()
def create_poller(self):
return select.poll() # pylint: disable=E1101
def add_handler(self, fileno, handler, events):
"""Add a file descriptor to the poll set
:param int fileno: The file descriptor to check events for
:param method handler: What is called when an event happens
:param int events: The events to look for
"""
self._poll.register(fileno, events)
super(PollPoller, self).add_handler(fileno, handler, events)
def update_handler(self, fileno, events):
"""Set the events to the current events
:param int fileno: The file descriptor
:param int events: The event mask
"""
super(PollPoller, self).update_handler(fileno, events)
self._poll.modify(fileno, events)
def remove_handler(self, fileno):
"""Remove a fileno to the set
:param int fileno: The file descriptor
"""
super(PollPoller, self).remove_handler(fileno)
self._poll.unregister(fileno)
def poll(self, write_only=False):
"""Poll until the next timeout waiting for an event
:param bool write_only: Only process write events
"""
while True:
try:
events = self._poll.poll(self.get_next_deadline())
break
except _SELECT_ERROR as error:
if _get_select_errno(error) == errno.EINTR:
continue
else:
raise
fd_event_map = defaultdict(int)
for fileno, event in events:
fd_event_map[fileno] |= event
self._process_fd_events(fd_event_map, write_only)
class EPollPoller(PollPoller):
"""EPoll works on Linux and can have better performance than Poll in
certain scenarios. Both are faster than select.
"""
POLL_TIMEOUT_MULT = 1
def create_poller(self):
return select.epoll() # pylint: disable=E1101
| {
"repo_name": "reddec/pika",
"path": "pika/adapters/select_connection.py",
"copies": "3",
"size": "21137",
"license": "bsd-3-clause",
"hash": 2467033644874818000,
"line_mean": 33.9950331126,
"line_max": 80,
"alpha_frac": 0.5762407153,
"autogenerated": false,
"ratio": 4.539733676975945,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00021490335803582886,
"num_lines": 604
} |
"""A Connection is a list of links. Propagates data to and from Nodes."""
import Packet
class Connection(object):
"""Container for Link objects."""
def __init__(self, start_node, end_node, tapping_nodes=None):
self.links = [[], []]
self.start_node = start_node
self.end_node = end_node
self.tapping_nodes = tapping_nodes
self.buffer = []
self.packets_in_buffer = []
def tick(self, tick=1):
"""Update every link in this connection by 'ticking' time by 'tick' units."""
for _counter in range(tick):
for link in self.links[Packet.DOWNSTREAM] + self.links[Packet.UPSTREAM]:
link.tick()
self.pass_to_node(link.data_bubble)
def send_packet(self, origin, packet):
"""Send the packet to the link."""
if origin == self.start_node.address:
packet.direction = Packet.DOWNSTREAM
packet.log(self.end_node.address)
else:
packet.direction = Packet.UPSTREAM
packet.log(self.start_node.address)
self.best_link(packet.direction).recieve_packet(packet)
def best_link(self, direction):
"""Find the least busy link in this connection."""
min_index = 0
for index, time in enumerate([link.buffer_sum() for link in self.links[direction]]):
if time == None:
return self.links[0][direction]
if not min_index:
min_index = time if time else 0
elif min_index > time:
min_index = index
try:
return self.links[min_index][direction]
except:
return self.links[0][direction]
def add_link(self, link=None, links=None):
"""Add a link and/or links to this connection."""
# flexible to allow to for a list of links or a single object.
if not links:
links = []
links.append(link)
for new_link in links:
pos = Packet.UPSTREAM if new_link.start_node == self.end_node else Packet.UPSTREAM
# add a link to upstream or downstream links
self.links[pos].append(new_link)
def pass_to_node(self, packet):
"""Give data to the node that the data did not come from.
Also pass to tapping nodes"""
# for tap in self.tapping_nodes:
# tap.recieve_packet.append(packet)
if packet:
if packet.destination == self.start_node.address:
self.start_node.recieve_packet(packet)
return
if packet.destination == self.end_node.address:
self.end_node.recieve_packet(packet)
return
if packet.direction == Packet.DOWNSTREAM:
self.end_node.recieve_packet(packet)
if packet.direction == Packet.UPSTREAM:
self.start_node.recieve_packet(packet)
def end_point(self, node_address):
"""Checks if a given address is a start or end point of this Connection."""
return node_address == self.end_node or node_address == self.start_node
def connected(self, node1_address, node2_address):
"""Checks if this connection connect node1 to node2?"""
return self.end_point(node1_address) and self.end_point(node2_address)
| {
"repo_name": "SamuelDoud/tomography",
"path": "tomography/src/connection.py",
"copies": "1",
"size": "3321",
"license": "mit",
"hash": -6292368498254758000,
"line_mean": 39.5,
"line_max": 94,
"alpha_frac": 0.5916892502,
"autogenerated": false,
"ratio": 4.010869565217392,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5102558815417392,
"avg_score": null,
"num_lines": null
} |
"""A connector for Facebook Messenger."""
import json
import logging
import aiohttp
from voluptuous import Required
from opsdroid.connector import Connector, register_event
from opsdroid.events import Message
_LOGGER = logging.getLogger(__name__)
_FACEBOOK_SEND_URL = "https://graph.facebook.com/v2.6/me/messages" "?access_token={}"
CONFIG_SCHEMA = {
Required("verify-token"): str,
Required("page-access-token"): str,
"bot-name": str,
}
class ConnectorFacebook(Connector):
"""A connector for Facebook Messenger.
It handles the incoming messages from facebook messenger and sends the user
messages. It also handles the authentication challenge by verifying the
token.
Attributes:
config: The config for this connector specified in the
`configuration.yaml` file.
name: String name of the connector.
opsdroid: opsdroid instance.
default_target: String name of default room for chat messages.
bot_name: String name for bot.
"""
def __init__(self, config, opsdroid=None):
"""Connector Setup."""
super().__init__(config, opsdroid=opsdroid)
_LOGGER.debug(_("Starting Facebook Connector."))
self.name = self.config.get("name", "facebook")
self.bot_name = config.get("bot-name", "opsdroid")
async def connect(self):
"""Connect to the chat service."""
self.opsdroid.web_server.web_app.router.add_post(
"/connector/{}".format(self.name), self.facebook_message_handler
)
self.opsdroid.web_server.web_app.router.add_get(
"/connector/{}".format(self.name), self.facebook_challenge_handler
)
async def facebook_message_handler(self, request):
"""Handle incoming message.
For each entry in request, it will check if the entry is a `messaging`
type. Then it will process all the incoming messages.
Return:
A 200 OK response. The Messenger Platform will resend the webhook
event every 20 seconds, until a 200 OK response is received.
Failing to return a 200 OK may cause your webhook to be
unsubscribed by the Messenger Platform.
"""
req_data = await request.json()
if "object" in req_data and req_data["object"] == "page":
for entry in req_data["entry"]:
for fb_msg in entry["messaging"]:
_LOGGER.debug(fb_msg)
try:
message = Message(
fb_msg["sender"]["id"],
fb_msg["sender"]["id"],
self,
fb_msg["message"]["text"],
)
await self.opsdroid.parse(message)
except KeyError as error:
_LOGGER.error(
"Unable to process message. Invalid payload. See the debug log for more information."
)
_LOGGER.debug(error)
return aiohttp.web.Response(text=json.dumps("Received"), status=200)
async def facebook_challenge_handler(self, request):
"""Handle auth challenge.
Return:
A response if challenge is a success or failure.
"""
_LOGGER.debug(request.query)
if request.query["hub.verify_token"] == self.config.get("verify-token"):
return aiohttp.web.Response(text=request.query["hub.challenge"], status=200)
return aiohttp.web.Response(text=json.dumps("Bad verify token"), status=403)
async def listen(self):
"""Listen for new message.
Listening is handled by the aiohttp web server
"""
@register_event(Message)
async def send_message(self, message):
"""Respond with a message."""
_LOGGER.debug(_("Responding to Facebook."))
url = _FACEBOOK_SEND_URL.format(self.config.get("page-access-token"))
headers = {"content-type": "application/json"}
payload = {
"recipient": {"id": message.target},
"message": {"text": message.text},
}
async with aiohttp.ClientSession(trust_env=True) as session:
resp = await session.post(url, data=json.dumps(payload), headers=headers)
if resp.status < 300:
_LOGGER.info(_("Responded with: %s."), message.text)
else:
_LOGGER.debug(resp.status)
_LOGGER.debug(await resp.text())
_LOGGER.error(_("Unable to respond to Facebook."))
| {
"repo_name": "jacobtomlinson/opsdroid",
"path": "opsdroid/connector/facebook/__init__.py",
"copies": "3",
"size": "4627",
"license": "apache-2.0",
"hash": -3694544177897171500,
"line_mean": 35.4330708661,
"line_max": 113,
"alpha_frac": 0.5856926734,
"autogenerated": false,
"ratio": 4.394112060778728,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005305045962907729,
"num_lines": 127
} |
"""A connector for Gitter."""
import logging
import aiohttp
import asyncio
import json
import urllib
from voluptuous import Required
from opsdroid.connector import Connector, register_event
from opsdroid.events import Message
_LOGGER = logging.getLogger(__name__)
GITTER_STREAM_API = "https://stream.gitter.im/v1/rooms"
GITTER_MESSAGE_BASE_API = "https://api.gitter.im/v1/rooms"
CURRENT_USER_API = "https://api.gitter.im/v1/user/me"
CONFIG_SCHEMA = {Required("token"): str, Required("room-id"): str, "bot-name": str}
class ConnectorGitter(Connector):
"""A connector for Gitter."""
def __init__(self, config, opsdroid=None):
"""Create the connector."""
super().__init__(config, opsdroid=opsdroid)
_LOGGER.debug(_("Starting Gitter Connector."))
self.name = "gitter"
self.bot_name = None # set at connection time
self.session = None
self.response = None
self.room_id = self.config.get("room-id")
self.access_token = self.config.get("token")
self.update_interval = 1
self.opsdroid = opsdroid
self.listening = True
async def connect(self):
"""Create the connection."""
# Create connection object with chat library
_LOGGER.debug(_("Connecting with Gitter stream."))
self.session = aiohttp.ClientSession()
# Gitter figures out who we are based on just our access token, but we
# need to additionally know our own user ID in order to know which
# messages comes from us.
current_user_url = self.build_url(
CURRENT_USER_API,
access_token=self.access_token,
)
response = await self.session.get(current_user_url, timeout=None)
# We cannot continue without a user ID, so raise if this failed.
response.raise_for_status()
response_json = await response.json()
self.bot_gitter_id = response_json["id"]
self.bot_name = response_json["username"]
_LOGGER.debug(
_("Successfully obtained bot's gitter id, %s."), self.bot_gitter_id
)
message_stream_url = self.build_url(
GITTER_STREAM_API,
self.room_id,
"chatMessages",
access_token=self.access_token,
)
self.response = await self.session.get(message_stream_url, timeout=None)
self.response.raise_for_status()
def build_url(self, base_url, *res, **params):
"""Build the url. args ex:(base_url,p1,p2=1,p2=2)."""
url = base_url
for r in res:
url = "{}/{}".format(url, r)
if params:
url = "{}?{}".format(url, urllib.parse.urlencode(params))
return url
async def listen(self):
"""Keep listing to the gitter channel."""
_LOGGER.debug(_("Listening with Gitter stream."))
while self.listening:
try:
await self._get_messages()
except AttributeError:
break
async def _get_messages(self):
"""Message listener."""
await asyncio.sleep(self.update_interval)
async for data in self.response.content.iter_chunked(1024):
message = await self.parse_message(data)
# Do not parse messages that we ourselves sent.
if message is not None and message.user_id != self.bot_gitter_id:
await self.opsdroid.parse(message)
async def parse_message(self, message):
"""Parse response from gitter to send message."""
message = message.decode("utf-8").rstrip("\r\n")
if len(message) > 1:
message = json.loads(message)
_LOGGER.debug(message)
try:
return Message(
text=message["text"],
user=message["fromUser"]["username"],
user_id=message["fromUser"]["id"],
target=self.room_id,
connector=self,
)
except KeyError as err:
_LOGGER.error(_("Unable to parse message %r."), err)
@register_event(Message)
async def send_message(self, message):
"""Received parsed message and send it back to gitter room."""
# Send message.text back to the chat service
url = self.build_url(GITTER_MESSAGE_BASE_API, message.target, "chatMessages")
headers = {
"Authorization": "Bearer " + self.access_token,
"Content-Type": "application/json",
"Accept": "application/json",
}
payload = {"text": message.text}
resp = await self.session.post(url, json=payload, headers=headers)
if resp.status == 200:
_LOGGER.info(_("Successfully responded."))
else:
_LOGGER.error(_("Unable to respond."))
async def disconnect(self):
"""Disconnect the gitter."""
# Disconnect from the chat service
self.listening = False
await self.session.close()
| {
"repo_name": "jacobtomlinson/opsdroid",
"path": "opsdroid/connector/gitter/__init__.py",
"copies": "3",
"size": "5012",
"license": "apache-2.0",
"hash": -5535807783010239000,
"line_mean": 35.8529411765,
"line_max": 85,
"alpha_frac": 0.5883878691,
"autogenerated": false,
"ratio": 4.091428571428572,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6179816440528572,
"avg_score": null,
"num_lines": null
} |
"""A connector for Mattermost."""
import logging
import json
from mattermostdriver import Driver, Websocket
from voluptuous import Required
from opsdroid.connector import Connector, register_event
from opsdroid.events import Message
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = {
Required("token"): str,
Required("url"): str,
Required("team-name"): str,
"scheme": str,
"port": int,
"ssl-verify": bool,
"connect-timeout": int,
}
class ConnectorMattermost(Connector):
"""A connector for Mattermost."""
def __init__(self, config, opsdroid=None):
"""Create the connector."""
super().__init__(config, opsdroid=opsdroid)
_LOGGER.debug(_("Starting Mattermost connector"))
self.name = "mattermost"
self.token = config["token"]
self.url = config["url"]
self.team_name = config["team-name"]
self.scheme = config.get("scheme", "https")
self.port = config.get("port", 8065)
self.verify = config.get("ssl-verify", True)
self.timeout = config.get("connect-timeout", 30)
self.request_timeout = None
self.mfa_token = None
self.debug = False
self.listening = True
self.bot_id = None
self.mm_driver = Driver(
{
"url": self.url,
"token": self.token,
"scheme": self.scheme,
"port": self.port,
"verify": self.verify,
"timeout": self.timeout,
"request_timeout": self.request_timeout,
"mfa_token": self.mfa_token,
"debug": self.debug,
}
)
async def connect(self):
"""Connect to the chat service."""
_LOGGER.info(_("Connecting to Mattermost"))
login_response = self.mm_driver.login()
_LOGGER.info(login_response)
if "id" in login_response:
self.bot_id = login_response["id"]
if "username" in login_response:
self.bot_name = login_response["username"]
_LOGGER.info(_("Connected as %s"), self.bot_name)
self.mm_driver.websocket = Websocket(
self.mm_driver.options, self.mm_driver.client.token
)
_LOGGER.info(_("Connected successfully"))
async def disconnect(self):
"""Disconnect from Mattermost."""
self.listening = False
self.mm_driver.logout()
async def listen(self):
"""Listen for and parse new messages."""
await self.mm_driver.websocket.connect(self.process_message)
async def process_message(self, raw_message):
"""Process a raw message and pass it to the parser."""
_LOGGER.info(raw_message)
message = json.loads(raw_message)
if "event" in message and message["event"] == "posted":
data = message["data"]
post = json.loads(data["post"])
# if connected to Mattermost, don't parse our own messages
# (https://github.com/opsdroid/opsdroid/issues/1775)
if self.bot_id is None or self.bot_id != post["user_id"]:
await self.opsdroid.parse(
Message(
text=post["message"],
user=data["sender_name"],
target=data["channel_name"],
connector=self,
raw_event=message,
)
)
@register_event(Message)
async def send_message(self, message):
"""Respond with a message."""
_LOGGER.debug(
_("Responding with: '%s' in room %s"), message.text, message.target
)
channel_id = self.mm_driver.channels.get_channel_by_name_and_team_name(
self.team_name, message.target
)["id"]
self.mm_driver.posts.create_post(
options={"channel_id": channel_id, "message": message.text}
)
| {
"repo_name": "jacobtomlinson/opsdroid",
"path": "opsdroid/connector/mattermost/__init__.py",
"copies": "2",
"size": "3963",
"license": "apache-2.0",
"hash": 372010124581057000,
"line_mean": 32.025,
"line_max": 80,
"alpha_frac": 0.5561443351,
"autogenerated": false,
"ratio": 4.068788501026694,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5624932836126695,
"avg_score": null,
"num_lines": null
} |
"""A connector for Webex Teams."""
import json
import logging
import uuid
import os
import aiohttp
from webexteamssdk import WebexTeamsAPI
from voluptuous import Required, Url
from opsdroid.connector import Connector, register_event
from opsdroid.events import Message
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = {Required("webhook-url"): Url(), Required("token"): str}
class ConnectorWebexTeams(Connector):
"""A connector for Webex Teams."""
def __init__(self, config, opsdroid=None):
"""Create a connector."""
_LOGGER.debug(_("Loaded WebEx Teams Connector."))
super().__init__(config, opsdroid=opsdroid)
self.name = "webexteams"
self.config = config
self.opsdroid = opsdroid
self.default_target = None
self.bot_name = config.get("bot-name", "opsdroid")
self.bot_webex_id = None
self.secret = uuid.uuid4().hex
self.people = {}
async def connect(self):
"""Connect to the chat service."""
try:
self.api = WebexTeamsAPI(
access_token=self.config["token"],
proxies={
"http": os.environ.get("HTTP_PROXY"),
"https": os.environ.get("HTTPS_PROXY"),
},
)
except KeyError:
_LOGGER.error(_("Must set access-token for WebEx Teams Connector."))
return
await self.clean_up_webhooks()
await self.subscribe_to_rooms()
await self.set_own_id()
async def webexteams_message_handler(self, request):
"""Handle webhooks from the Webex Teams api."""
_LOGGER.debug(_("Handling message from WebEx Teams."))
req_data = await request.json()
_LOGGER.debug(req_data)
msg = self.api.messages.get(req_data["data"]["id"])
if req_data["data"]["personId"] != self.bot_webex_id:
person = await self.get_person(req_data["data"]["personId"])
try:
message = Message(
text=msg.text,
user=person.displayName,
target={"id": msg.roomId, "type": msg.roomType},
connector=self,
)
await self.opsdroid.parse(message)
except KeyError as error:
_LOGGER.error(error)
return aiohttp.web.Response(text=json.dumps("Received"), status=201)
async def clean_up_webhooks(self):
"""Remove all existing webhooks."""
for webhook in self.api.webhooks.list():
self.api.webhooks.delete(webhook.id)
async def subscribe_to_rooms(self):
"""Create webhooks for all rooms."""
_LOGGER.debug(_("Creating Webex Teams webhook."))
webhook_endpoint = "/connector/webexteams"
self.opsdroid.web_server.web_app.router.add_post(
webhook_endpoint, self.webexteams_message_handler
)
self.api.webhooks.create(
name="opsdroid",
targetUrl="{}{}".format(self.config.get("webhook-url"), webhook_endpoint),
resource="messages",
event="created",
secret=self.secret,
)
async def get_person(self, personId):
"""Get a person's info from the api or cache."""
if personId not in self.people:
self.people[personId] = self.api.people.get(personId)
return self.people[personId]
async def set_own_id(self):
"""Get the bot id and set it in the class."""
self.bot_webex_id = self.api.people.me().id
async def listen(self):
"""Listen for and parse new messages."""
pass # Listening is handled by the aiohttp web server
@register_event(Message)
async def send_message(self, message):
"""Respond with a message."""
self.api.messages.create(message.target["id"], text=message.text)
| {
"repo_name": "FabioRosado/opsdroid",
"path": "opsdroid/connector/webexteams/__init__.py",
"copies": "3",
"size": "3910",
"license": "apache-2.0",
"hash": 3141325656245777400,
"line_mean": 31.8571428571,
"line_max": 86,
"alpha_frac": 0.5851662404,
"autogenerated": false,
"ratio": 4.014373716632443,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00020033556206646132,
"num_lines": 119
} |
"""A connector to send messages using the command line."""
import logging
import os
import sys
import platform
import asyncio
from opsdroid.connector import Connector, register_event
from opsdroid.events import Message
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = {"bot-name": str}
class ConnectorShell(Connector):
"""A connector to send messages using the command line."""
def __init__(self, config, opsdroid=None):
"""Create the connector."""
_LOGGER.debug(_("Loaded shell Connector."))
super().__init__(config, opsdroid=opsdroid)
self.name = "shell"
self.config = config
self.bot_name = config.get("bot-name", "opsdroid")
self.prompt_length = None
self.listening = True
self.reader = None
self._closing = asyncio.Event()
self.loop = asyncio.get_event_loop()
for name in ("LOGNAME", "USER", "LNAME", "USERNAME"):
user = os.environ.get(name)
if user:
self.user = user
@property
def is_listening(self):
"""Get listening status."""
return self.listening
@is_listening.setter
def is_listening(self, val):
"""Set listening status."""
self.listening = val
async def read_stdin(self):
"""Create a stream reader to read stdin asynchronously.
Returns:
class: asyncio.streams.StreamReader
"""
self.reader = asyncio.StreamReader(loop=self.loop)
reader_protocol = asyncio.StreamReaderProtocol(self.reader)
await self.loop.connect_read_pipe(lambda: reader_protocol, sys.stdin)
return self.reader
async def async_input(self):
"""Read user input asynchronously from stdin.
Returns:
string: A decoded string from user input.
"""
if not self.reader:
self.reader = await self.read_stdin()
line = await self.reader.readline()
return line.decode("utf8").replace("\r", "").replace("\n", "")
def draw_prompt(self):
"""Draw the user input prompt."""
prompt = self.bot_name + "> "
self.prompt_length = len(prompt)
print(prompt, end="", flush=True)
def clear_prompt(self):
"""Clear the prompt."""
print("\r" + (" " * self.prompt_length) + "\r", end="", flush=True)
async def parseloop(self):
"""Parseloop moved out for testing."""
self.draw_prompt()
user_input = await self.async_input()
message = Message(text=user_input, user=self.user, target=None, connector=self)
await self.opsdroid.parse(message)
async def _parse_message(self):
"""Parse user input."""
while self.is_listening:
await self.parseloop()
async def connect(self):
"""Connect to the shell.
There is nothing to do here since stdin is already available.
Since this is the first method called when opsdroid starts, a logging
message is shown if the user is using windows.
"""
if platform.system() == "Windows":
_LOGGER.warning(
"The shell connector does not work on windows. Please install the Opsdroid Desktop App."
)
pass
async def listen(self):
"""Listen for and parse new user input."""
_LOGGER.debug(_("Connecting to shell."))
message_processor = self.loop.create_task(self._parse_message())
await self._closing.wait()
message_processor.cancel()
@register_event(Message)
async def respond(self, message):
"""Respond with a message.
Args:
message (object): An instance of Message
"""
_LOGGER.debug(_("Responding with: %s."), message.text)
self.clear_prompt()
print(message.text)
async def disconnect(self):
"""Disconnects the connector."""
self._closing.set()
| {
"repo_name": "opsdroid/opsdroid",
"path": "opsdroid/connector/shell/__init__.py",
"copies": "3",
"size": "3948",
"license": "apache-2.0",
"hash": -8783304690829939000,
"line_mean": 29.1374045802,
"line_max": 104,
"alpha_frac": 0.5987841945,
"autogenerated": false,
"ratio": 4.258899676375404,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6357683870875404,
"avg_score": null,
"num_lines": null
} |
"""A connector which allows websocket connections."""
import logging
import json
import uuid
from datetime import datetime
import aiohttp
import aiohttp.web
from aiohttp import WSCloseCode
from opsdroid.connector import Connector, register_event
from opsdroid.events import Message
_LOGGER = logging.getLogger(__name__)
HEADERS = {"Access-Control-Allow-Origin": "*"}
CONFIG_SCHEMA = {"bot-name": str, "max-connections": int, "connection-timeout": int}
class ConnectorWebsocket(Connector):
"""A connector which allows websocket connections."""
def __init__(self, config, opsdroid=None):
"""Create the connector."""
super().__init__(config, opsdroid=opsdroid)
_LOGGER.debug(_("Starting Websocket connector."))
self.name = "websocket"
self.max_connections = self.config.get("max-connections", 10)
self.connection_timeout = self.config.get("connection-timeout", 60)
self.accepting_connections = True
self.active_connections = {}
self.available_connections = []
self.bot_name = self.config.get("bot-name", "opsdroid")
async def connect(self):
"""Connect to the chat service."""
self.accepting_connections = True
self.opsdroid.web_server.web_app.router.add_get(
"/connector/websocket/{socket}", self.websocket_handler
)
self.opsdroid.web_server.web_app.router.add_post(
"/connector/websocket", self.new_websocket_handler
)
async def disconnect(self):
"""Disconnect from current sessions."""
self.accepting_connections = False
connections_to_close = self.active_connections.copy()
for connection in connections_to_close:
await connections_to_close[connection].close(
code=WSCloseCode.GOING_AWAY, message="Server shutdown"
)
async def new_websocket_handler(self, request):
"""Handle for aiohttp creating websocket connections."""
if (
len(self.active_connections) + len(self.available_connections)
< self.max_connections
and self.accepting_connections
):
socket = {"id": str(uuid.uuid1()), "date": datetime.now()}
self.available_connections.append(socket)
return aiohttp.web.Response(
text=json.dumps({"socket": socket["id"]}), headers=HEADERS, status=200
)
return aiohttp.web.Response(
text=json.dumps("No connections available"), headers=HEADERS, status=429
)
async def websocket_handler(self, request):
"""Handle for aiohttp handling websocket connections."""
socket = request.match_info.get("socket")
available = [
item for item in self.available_connections if item["id"] == socket
]
if len(available) != 1:
return aiohttp.web.Response(
text=json.dumps("Please request a socket first"),
headers=HEADERS,
status=400,
)
if (
datetime.now() - available[0]["date"]
).total_seconds() > self.connection_timeout:
self.available_connections.remove(available[0])
return aiohttp.web.Response(
text=json.dumps("Socket request timed out"), headers=HEADERS, status=408
)
self.available_connections.remove(available[0])
_LOGGER.debug(_("User connected to %s."), socket)
websocket = aiohttp.web.WebSocketResponse()
await websocket.prepare(request)
self.active_connections[socket] = websocket
async for msg in websocket:
if msg.type == aiohttp.WSMsgType.TEXT:
message = Message(text=msg.data, user=None, target=None, connector=self)
await self.opsdroid.parse(message)
elif msg.type == aiohttp.WSMsgType.ERROR:
_LOGGER.error(
_("Websocket connection closed with exception %s."),
websocket.exception(),
)
_LOGGER.info(_("websocket connection closed"))
self.active_connections.pop(socket, None)
return websocket
async def listen(self):
"""Listen for and parse new messages.
Listening is handled by the aiohttp web server so
we don't need to do anything here.
"""
@register_event(Message)
async def send_message(self, message):
"""Respond with a message."""
try:
if message.target is None:
message.target = next(iter(self.active_connections))
_LOGGER.debug(
_("Responding with: '%s' in target %s"), message.text, message.target
)
await self.active_connections[message.target].send_str(message.text)
except KeyError:
_LOGGER.error(_("No active socket for target %s"), message.target)
| {
"repo_name": "opsdroid/opsdroid",
"path": "opsdroid/connector/websocket/__init__.py",
"copies": "3",
"size": "4945",
"license": "apache-2.0",
"hash": -2900391089878215000,
"line_mean": 36.4621212121,
"line_max": 88,
"alpha_frac": 0.6107178969,
"autogenerated": false,
"ratio": 4.450945094509451,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006171906116645341,
"num_lines": 132
} |
"""A console interface to the Ubuntu IRC logs using an Algolia backend."""
# pylint: disable=invalid-name
from threading import Thread
from algoliahelper import algoliahelper
from turwidal import turwidal
# Run Lola Run!
if __name__ == '__main__':
# Init with a quick sanity check.
class Ubunolia(turwidal.Interaction):
"""Extend the Interaction class."""
def do_connect(self):
"""Connect to the pretend server."""
# Instantiante the Aloglia object. Come at me, Pythonistas.
self.algolia = algoliahelper.AlgoliaHelper() # pylint: disable=attribute-defined-outside-init
# Start querying and dumping logs.
import time
def run():
"""This is the thread that injects IRC logs into the window."""
while True:
# Ideally we could set which day we wanted to replay. :P
day = '2017-05-16T'
hhmm = time.strftime('%H:%M')
datestamp = day + hhmm
# Ideally we would be able to switch channels. :P
logs = self.algolia.get_irc_logs(datestamp, 'ubuntu')
for line in logs:
terminal.output(line)
# Simulate a running conversation by breaking up each
# minute-block by the number of log lines from that
# minute. HAHA "simulate"
#terminal.output('sleeping ' + str(60/len(logs)) + ' seconds.')
time.sleep(60 / len(logs))
thread = Thread(target=run)
thread.daemon = True
thread.start()
return 'Connected to irc://irc.ubuntu.com/#ubuntu'
def do_list(self):
"""List the channels."""
channels = self.algolia.get_channels()
obj = 'Channel list:\n'
for channel in channels:
obj = obj + '#' + channel + '\n'
return obj
def do_whois(self, username):
"""Get info about a username."""
whois = self.algolia.get_userinfo(username)
obj = username + ' was first seen on ' + whois['firstseen'] + \
'. Since then they have sent ' + str(whois['messages']) + \
' in the following channels: '
for channel in whois['channels']:
obj = obj + channel + ' '
return obj
def do_seen(self, username):
"""Do a "last seen" on a username."""
lastseen = self.algolia.get_most_recent_user_stamp(username)
obj = username + ' was last seen on: ' + lastseen
return obj
caption = 'Tab to switch focus to upper frame.'
terminal = turwidal.Terminal(title='Ubunolia', cap=caption, cmd=Ubunolia())
# Ok go forilla.
terminal.loop()
| {
"repo_name": "phrawzty/ubunolia",
"path": "ubunolia.py",
"copies": "1",
"size": "2928",
"license": "mpl-2.0",
"hash": 2609665577469510700,
"line_mean": 32.6551724138,
"line_max": 105,
"alpha_frac": 0.5338114754,
"autogenerated": false,
"ratio": 4.363636363636363,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5397447839036363,
"avg_score": null,
"num_lines": null
} |
"""A container for a Pico-8 game, and routines to load and save game files."""
__all__ = [
'DEFAULT_VERSION',
'Game',
'InvalidP8HeaderError',
'InvalidP8SectionError'
]
import os
import re
import tempfile
from .. import util
from ..lua.lua import Lua
from ..lua.lua import PICO8_LUA_CHAR_LIMIT
from ..lua.lua import PICO8_LUA_TOKEN_LIMIT
from ..gfx.gfx import Gfx
from ..gff.gff import Gff
from ..map.map import Map
from ..sfx.sfx import Sfx
from ..music.music import Music
HEADER_TITLE_STR = b'pico-8 cartridge // http://www.pico-8.com\n'
HEADER_VERSION_RE = re.compile(br'version (\d+)\n')
SECTION_DELIM_RE = re.compile(br'__(\w+)__\n')
DEFAULT_VERSION = 8
EMPTY_LABEL_FNAME = os.path.join(os.path.dirname(__file__), 'empty_018.p8.png')
COMPRESSED_LUA_CHAR_TABLE = list(b'#\n 0123456789abcdefghijklmnopqrstuvwxyz!#%(){}[]<>+=/*:;.,~_')
# Pico-8 adds this automatically to compressed code and removes it
# automatically from decompressed code to maintain compatibility with Pico-8
# 0.1.7.
PICO8_FUTURE_CODE1 = (b'if(_update60)_update=function()'
b'_update60()_update60()end')
PICO8_FUTURE_CODE2 = (b'if(_update60)_update=function()'
b'_update60()_update_buttons()_update60()end')
class InvalidP8HeaderError(util.InvalidP8DataError):
"""Exception for invalid .p8 file header."""
def __str__(self):
return 'Invalid .p8: missing or corrupt header'
class InvalidP8SectionError(util.InvalidP8DataError):
"""Exception for invalid .p8 file section delimiter."""
def __init__(self, bad_delim):
self.bad_delim = bad_delim
def __str__(self):
return 'Invalid .p8: bad section delimiter {}'.format(
repr(self.bad_delim))
class InvalidP8PNGError(util.InvalidP8DataError):
"""Exception for PNG parsing errors."""
pass
class Game():
"""A Pico-8 game."""
def __init__(self, filename=None, compressed_size=None):
"""Initializer.
Prefer factory functions such as Game.from_p8_file().
Args:
filename: The filename, if any, for tool messages.
compressed_size: The byte size of the compressed Lua data region,
or None if the Lua region was not compressed (.p8 or v0 .p8.png).
"""
self.filename = filename
self.compressed_size = compressed_size
self.lua = None
self.gfx = None
self.gff = None
self.map = None
self.sfx = None
self.music = None
self.label = None
self.version = None
@classmethod
def make_empty_game(cls, filename=None, version=DEFAULT_VERSION):
"""Create an empty game.
Args:
filename: An optional filename to use with error messages.
version: The version ID of the empty game.
Returns:
A Game instance with valid but empty data regions.
"""
g = cls(filename=filename)
g.lua = Lua(version=version)
g.lua.update_from_lines([])
g.gfx = Gfx.empty(version=version)
g.gff = Gff.empty(version=version)
g.map = Map.empty(version=version, gfx=g.gfx)
g.sfx = Sfx.empty(version=version)
g.music = Music.empty(version=version)
g.label = Gfx.empty(version=version)
g.version = version
return g
@classmethod
def from_filename(cls, filename):
"""Loads a game from a named file.
Args:
filename: The name of the file. Must end in either ".p8" or ".p8.png".
Returns:
A Game containing the game data.
Raises:
lexer.LexerError
parser.ParserError
InvalidP8HeaderError
"""
assert filename.endswith('.p8.png') or filename.endswith('.p8')
if filename.endswith('.p8'):
with open(filename, 'rb') as fh:
g = Game.from_p8_file(fh, filename=filename)
else:
with open(filename, 'rb') as fh:
g = Game.from_p8png_file(fh, filename=filename)
return g
@classmethod
def get_raw_data_from_p8_file(cls, instr, filename=None):
header_title_str = instr.readline()
if header_title_str != HEADER_TITLE_STR:
raise InvalidP8HeaderError()
header_version_str = instr.readline()
version_m = HEADER_VERSION_RE.match(header_version_str)
if version_m is None:
raise InvalidP8HeaderError()
version = int(version_m.group(1))
# (section is a text str.)
section = None
section_lines = {}
while True:
line = instr.readline()
if not line:
break
section_delim_m = SECTION_DELIM_RE.match(line)
if section_delim_m:
section = str(section_delim_m.group(1), encoding='ascii')
section_lines[section] = []
elif section:
section_lines[section].append(line)
class P8Data(object):
pass
data = P8Data()
data.version = version
data.section_lines = section_lines
return data
@classmethod
def from_p8_file(cls, instr, filename=None):
"""Loads a game from a .p8 file.
Args:
instr: The binary input stream.
filename: The filename, if any, for tool messages.
Returns:
A Game containing the game data.
Raises:
InvalidP8HeaderError
"""
data = cls.get_raw_data_from_p8_file(instr, filename=filename)
new_game = cls.make_empty_game(filename=filename)
# Discard empty label until one is found in the file.
new_game.label = None
new_game.version = data.version
for section in data.section_lines:
if section == 'lua':
new_game.lua = Lua.from_lines(
data.section_lines[section], version=data.version)
elif section == 'gfx':
new_game.gfx = Gfx.from_lines(
data.section_lines[section], version=data.version)
my_map = getattr(new_game, 'map')
if my_map is not None:
my_map._gfx = new_game.gfx
elif section == 'gff':
new_game.gff = Gff.from_lines(
data.section_lines[section], version=data.version)
elif section == 'map':
my_gfx = getattr(new_game, 'gfx')
new_game.map = Map.from_lines(
data.section_lines[section], version=data.version, gfx=my_gfx)
elif section == 'sfx':
new_game.sfx = Sfx.from_lines(
data.section_lines[section], version=data.version)
elif section == 'music':
new_game.music = Music.from_lines(
data.section_lines[section], version=data.version)
elif section == 'label':
new_game.label = Gfx.from_lines(
data.section_lines[section], version=data.version)
else:
raise InvalidP8SectionError(section)
return new_game
@classmethod
def get_picodata_from_pngdata(cls, width, height, pngdata, attrs):
"""Extracts Pico-8 bytes from a .p8.png's PNG data.
The arguments are expected in the format returned by png.Reader's
read() method.
Args:
width: The PNG width.
height: The PNG height.
pngdata: The PNG data region, an iterable of 'height' rows, where
each row is an indexable 'width' * 'attrs['planes']' long.
attrs: The PNG attrs.
Returns:
The Pico-8 data, a list of width * height (0x8000) byte-size numbers.
"""
picodata = [0] * width * height
row_i = 0
for row in pngdata:
for col_i in range(width):
picodata[row_i * width + col_i] |= (
(row[col_i * attrs['planes'] + 2] & 3) << (0 * 2))
picodata[row_i * width + col_i] |= (
(row[col_i * attrs['planes'] + 1] & 3) << (1 * 2))
picodata[row_i * width + col_i] |= (
(row[col_i * attrs['planes'] + 0] & 3) << (2 * 2))
picodata[row_i * width + col_i] |= (
(row[col_i * attrs['planes'] + 3] & 3) << (3 * 2))
row_i += 1
return picodata
@classmethod
def get_pngdata_from_picodata(cls, picodata, pngdata, attrs):
"""Encodes Pico-8 bytes into a given PNG's image data.
Args:
picodata: The Pico-8 data, a bytearray of 0x8000 bytes.
pngdata: The PNG image data of the original cart image,
as an iterable of rows as returned by pypng.
attrs: The attrs of the original PNG image, as returned by pypng.
Returns:
New PNG image data, as an iterable of rows, suitable for writing
by pypng.
"""
new_rows = []
planes = attrs['planes']
for row_i, row in enumerate(pngdata):
width = int(len(row) / planes)
new_row = bytearray(width * planes)
for col_i in range(width):
if (row_i * width + col_i) < len(picodata):
picobyte = picodata[row_i * width + col_i]
new_row[col_i * planes + 2] = (
(row[col_i * planes + 2] & ~3) |
(picobyte & 3))
new_row[col_i * planes + 1] = (
(row[col_i * planes + 1] & ~3) |
((picobyte >> 2) & 3))
new_row[col_i * planes + 0] = (
(row[col_i * planes + 0] & ~3) |
((picobyte >> 4) & 3))
new_row[col_i * planes + 3] = (
(row[col_i * planes + 3] & ~3) |
((picobyte >> 6) & 3))
else:
for n in range(4):
new_row[col_i * planes + n] = (
row[col_i * planes + n])
new_rows.append(new_row)
return new_rows
@classmethod
def _find_repeatable_block(cls, dat, pos):
"""Find a repeatable block in the data.
Part of the literal port of the Pico-8 compression routine. See
compress_code().
Args:
dat: Array of data bytes.
pos: Starting index in dat.
Returns:
A tuple: (best_len, block_offset)
"""
max_block_len = 17
max_hist_len = (255 - len(COMPRESSED_LUA_CHAR_TABLE)) * 16
best_len = 0
best_i = -100000
max_len = min(max_block_len, len(dat) - pos)
max_hist_len = min(max_hist_len, pos);
i = pos - max_hist_len
while i < pos:
j = i
while (j - i) < max_len and j < pos and dat[j] == dat[pos + j - i]:
j += 1
if (j - i) > best_len:
best_len = j - i
best_i = i
i += 1
block_offset = pos - best_i
return best_len, block_offset
@classmethod
def compress_code(cls, in_p):
"""A literal port of the Pico-8 C compression routine.
TODO: The original algorithm uses a brute force search for blocks
(_find_repeatable_block()), which makes the overall algorithm O(n^2).
I had a previous implementation that was faster but did not produce
the same compressed result. It should be possible to optimize the
working implementation using Python features without changing its result.
(A quick attempt at memoization did not result in a speed increase.)
Args:
in_p: The code to compress, as a bytestring.
Returns:
The compressed code, as a bytearray. The compressed result is
returned even if it is longer than in_p. The caller is responsible
for comparing it to the original and acting accordingly.
"""
PICO8_CODE_ALLOC_SIZE = (0x10000 + 1)
pos = 0
literal_index = [0] * 256
for i in range(1, len(COMPRESSED_LUA_CHAR_TABLE)):
literal_index[COMPRESSED_LUA_CHAR_TABLE[i]] = i
if b'_update60' in in_p and len(in_p) < PICO8_CODE_ALLOC_SIZE - (
len(PICO8_FUTURE_CODE2) + 1):
if in_p[-1] != b' '[0] and in_p[-1] != b'\n'[0]:
in_p += b'\n'
in_p += PICO8_FUTURE_CODE2
out = bytearray()
# The Pico-8 C code adds the preamble here, but we do it in
# get_bytes_from_code().
#out += b':c:\x00'
#out.append(len(in_p) // 256)
#out.append(len(in_p) % 256)
#out += b'\x00\x00'
while pos < len(in_p):
block_len, block_offset = cls._find_repeatable_block(in_p, pos)
if block_len >= 3:
out.append(
(block_offset // 16) + len(COMPRESSED_LUA_CHAR_TABLE))
out.append((block_offset % 16) + (block_len - 2) * 16)
pos += block_len
else:
out.append(literal_index[in_p[pos]])
if literal_index[in_p[pos]] == 0:
out.append(in_p[pos])
pos += 1
return out
@classmethod
def decompress_code(cls, codedata):
"""Decompresses compressed code data.
Args:
codedata: The bytes of the code region (0x4300:0x8000).
Returns:
The tuple (code_length, code, compressed_size). code is a bytestring.
"""
code_length = (codedata[4] << 8) | codedata[5]
assert bytes(codedata[6:8]) == b'\x00\x00'
out = [0] * code_length
in_i = 8
out_i = 0
while out_i < code_length and in_i < len(codedata):
if codedata[in_i] == 0x00:
in_i += 1
out[out_i] = codedata[in_i]
out_i += 1
elif codedata[in_i] <= 0x3b:
out[out_i] = COMPRESSED_LUA_CHAR_TABLE[codedata[in_i]]
out_i += 1
else:
in_i += 1
offset = ((codedata[in_i - 1] - 0x3c) * 16 +
(codedata[in_i] & 0xf))
length = (codedata[in_i] >> 4) + 2
out[out_i:out_i + length] = \
out[out_i - offset:out_i - offset + length]
out_i += length
in_i += 1
code = bytes(out).strip(b'\x00')
if code.endswith(PICO8_FUTURE_CODE1):
code = code[:-len(PICO8_FUTURE_CODE1)]
if code[-1] == b'\n'[0]:
code = code[:-1]
if code.endswith(PICO8_FUTURE_CODE2):
code = code[:-len(PICO8_FUTURE_CODE2)]
if code[-1] == b'\n'[0]:
code = code[:-1]
compressed_size = in_i
return code_length, code, compressed_size
@classmethod
def get_code_from_bytes(cls, codedata, version):
"""Gets the code text from the byte data.
Args:
codedata: The bytes of the code region (0x4300:0x8000).
version: The version of the cart data.
Returns:
The tuple (code_length, code, compressed_size). compressed_size is
None if the code data was not compressed. code is a bytestring.
"""
if version == 0 or bytes(codedata[:4]) != b':c:\x00':
# code is ASCII
try:
code_length = codedata.index(0)
except ValueError:
# Edge case: uncompressed code completely fills the code area.
code_length = 0x8000 - 0x4300
code = bytes(codedata[:code_length]) + b'\n'
compressed_size = None
else:
# code is compressed
code_length, code, compressed_size = cls.decompress_code(codedata)
code = code.replace(b'\r', b' ')
return code_length, code, compressed_size
@classmethod
def get_bytes_from_code(cls, code):
"""Gets the byte data for code text.
Args:
code: The code text.
Returns:
The bytes for the code, possibly compressed.
"""
compressed_bytes = cls.compress_code(code)
if len(compressed_bytes) < len(code):
# Use compressed.
code_length_bytes = bytes([len(code) >> 8, len(code) & 255])
code_bytes = b''.join([b':c:\0', code_length_bytes, b'\0\0',
compressed_bytes])
else:
# Use uncompressed.
code_bytes = bytes(code, 'ascii')
byte_array = bytearray(0x8000-0x4300)
byte_array[:len(code_bytes)] = code_bytes
return byte_array
@classmethod
def get_raw_data_from_p8png_file(cls, instr, filename=None):
"""Read and unpack raw section data from a .p8.png file.
Args:
instr: The input stream.
filename: The filename, if any, for tool messages.
Returns:
An object with properties of raw data: gfx, p8map, gfx_props,
song, sfx, codedata, version, code_length, code,
compressed_size.
"""
# To install: python3 -m pip install pypng
import png
try:
r = png.Reader(file=instr)
(width, height, data, attrs) = r.read()
data = list(data)
except png.Error:
raise InvalidP8PNGError()
picodata = cls.get_picodata_from_pngdata(width, height, data, attrs)
class ParsedData(object):
pass
data = ParsedData()
data.gfx = picodata[0x0:0x2000]
data.p8map = picodata[0x2000:0x3000]
data.gfx_props = picodata[0x3000:0x3100]
data.song = picodata[0x3100:0x3200]
data.sfx = picodata[0x3200:0x4300]
data.codedata = picodata[0x4300:0x8000]
data.version = picodata[0x8000]
# TODO: Extract new_game.label from data
(data.code_length, data.code, data.compressed_size) = \
cls.get_code_from_bytes(data.codedata, data.version)
return data
@classmethod
def from_p8png_file(cls, instr, filename=None):
"""Loads a game from a .p8.png file.
Args:
instr: The input stream.
filename: The filename, if any, for tool messages.
Returns:
A Game containing the game data.
"""
data = cls.get_raw_data_from_p8png_file(instr, filename=filename)
new_game = cls(filename=filename, compressed_size=data.compressed_size)
new_game.version = data.version
new_game.lua = Lua.from_lines(
[data.code], version=data.version)
new_game.gfx = Gfx.from_bytes(
data.gfx, version=data.version)
new_game.gff = Gff.from_bytes(
data.gfx_props, version=data.version)
new_game.map = Map.from_bytes(
data.p8map, version=data.version, gfx=new_game.gfx)
new_game.sfx = Sfx.from_bytes(
data.sfx, version=data.version)
new_game.music = Music.from_bytes(
data.song, version=data.version)
return new_game
def get_compressed_size(self):
"""Gets the compressed code size.
If the code was not already stored compressed, this runs the
compression routine to determine the size it would be if compressed.
Returns:
The compressed code size, as a number of bytes.
"""
if self.compressed_size is not None:
return self.compressed_size
comp_result = self.compress_code(b''.join(self.lua.to_lines()))
return len(comp_result)
def to_p8_file(self, outstr, lua_writer_cls=None, lua_writer_args=None,
filename=None):
"""Write the game data as a .p8 file.
Args:
outstr: The output stream.
lua_writer_cls: The Lua writer class to use. If None, defaults to
LuaEchoWriter.
lua_writer_args: Args to pass to the Lua writer.
filename: The output filename, for error messages.
"""
outstr.write(HEADER_TITLE_STR)
outstr.write(b'version 8\n')
# Sanity-check the Lua written by the writer.
transformed_lua = Lua.from_lines(
self.lua.to_lines(writer_cls=lua_writer_cls,
writer_args=lua_writer_args),
version=(self.version or 0))
if transformed_lua.get_char_count() > PICO8_LUA_CHAR_LIMIT:
if filename is not None:
util.error('{}: '.format(filename))
util.error('warning: character count {} exceeds the Pico-8 '
'limit of {}\n'.format(
transformed_lua.get_char_count(),
PICO8_LUA_CHAR_LIMIT))
if transformed_lua.get_token_count() > PICO8_LUA_TOKEN_LIMIT:
if filename is not None:
util.error('{}: '.format(filename))
util.error('warning: token count {} exceeds the Pico-8 '
'limit of {}\n'.format(
transformed_lua.get_token_count(),
PICO8_LUA_TOKEN_LIMIT))
outstr.write(b'__lua__\n')
ended_in_newline = None
for l in self.lua.to_lines(writer_cls=lua_writer_cls,
writer_args=lua_writer_args):
outstr.write(l)
ended_in_newline = l.endswith(b'\n')
if not ended_in_newline:
outstr.write(b'\n')
outstr.write(b'__gfx__\n')
for l in self.gfx.to_lines():
outstr.write(l)
if self.label:
outstr.write(b'__label__\n')
for l in self.label.to_lines():
outstr.write(l)
# Pico-8 emits an extra newline before __gff__ for no good reason, as
# of 0.1.10c. Pico-8 doesn't care whether we do, but our tests want to
# match the test cart data exactly.
outstr.write(b'\n')
outstr.write(b'__gff__\n')
for l in self.gff.to_lines():
outstr.write(l)
outstr.write(b'__map__\n')
for l in self.map.to_lines():
outstr.write(l)
outstr.write(b'__sfx__\n')
for l in self.sfx.to_lines():
outstr.write(l)
outstr.write(b'__music__\n')
for l in self.music.to_lines():
outstr.write(l)
outstr.write(b'\n')
def to_p8png_file(self, outstr, label_fname=None, lua_writer_cls=None,
lua_writer_args=None, filename=None):
"""Write the game data as a .p8.png file.
Args:
outstr: The output stream.
label_fname: The .p8.png file (or appropriately spec'd .png file)
to use for the label. If None, uses a Pico-8-generated empty label.
lua_writer_cls: The Lua writer class to use. If None, defaults to
LuaEchoWriter.
lua_writer_args: Args to pass to the Lua writer.
filename: The output filename, for error messages.
"""
# To install: python3 -m pip install pypng
import png
# TODO: If self.label, use EMPTY_LABEL_FNAME and substitute the appropriate img_data
label_fname = label_fname or EMPTY_LABEL_FNAME
try:
with open(label_fname, 'rb') as label_fh:
r = png.Reader(file=label_fh)
(width, height, img_data, attrs) = r.read()
img_data = list(img_data)
except png.Error:
raise InvalidP8PNGError()
cart_lua = self.lua.to_lines(writer_cls=lua_writer_cls,
writer_args=lua_writer_args)
code_bytes = self.get_bytes_from_code(b''.join(cart_lua))
picodata = b''.join((self.gfx.to_bytes(),
self.map.to_bytes(),
self.gff.to_bytes(),
self.music.to_bytes(),
self.sfx.to_bytes(),
code_bytes,
bytes((self.version,))))
new_rows = self.get_pngdata_from_picodata(picodata, img_data, attrs)
wr = png.Writer(width, height, **attrs)
wr.write(outstr, new_rows)
def to_file(self, filename=None, *args, **kwargs):
"""Write the game data to a file, based on a filename.
If filename ends with .png, the output is a .p8.png file. If the
output file exists, its label is reused, otherwise an empty label is
used. The label can be overridden by the caller with the
'label_fname' argument.
If filename does not end with .png, then the output is a .p8 file.
Args:
filename: The filename.
"""
file_args = {'mode':'wb+'}
with tempfile.TemporaryFile(**file_args) as outfh:
if filename.endswith('.png'):
if kwargs.get('label_fname', None) is None:
if os.path.exists(filename):
kwargs['label_fname'] = filename
self.to_p8png_file(outfh, filename=filename, *args, **kwargs)
else:
self.to_p8_file(outfh, *args, **kwargs)
outfh.seek(0)
with open(filename, **file_args) as finalfh:
finalfh.write(outfh.read())
def write_cart_data(self, data, start_addr=0):
"""Write binary data to an arbitrary cart address.
Args:
data: The data to write, as a byte string or bytearray.
start_addr: The address to start writing.
"""
if start_addr + len(data) > 0x4300:
raise ValueError('Data too large: {} bytes starting at {} exceeds '
'0x4300'.format(len(data), start_addr))
memmap = ((0x0,0x2000,self.gfx._data),
(0x2000,0x3000,self.map._data),
(0x3000,0x3100,self.gff._data),
(0x3100,0x3200,self.music._data),
(0x3200,0x4300,self.sfx._data))
for start_a, end_a, section_data in memmap:
if (start_addr > end_a or
start_addr + len(data) < start_a):
continue
data_start_a = (start_addr - start_a
if start_addr > start_a
else 0)
data_end_a = (start_addr + len(data) - start_a
if start_addr + len(data) < end_a
else end_a)
text_start_a = (0 if start_addr > start_a
else start_a - start_addr)
text_end_a = (len(data)
if start_addr + len(data) < end_a
else -(start_addr + len(data) - end_a))
section_data[data_start_a:data_end_a] = \
data[text_start_a:text_end_a]
| {
"repo_name": "dansanderson/picotool",
"path": "pico8/game/game.py",
"copies": "1",
"size": "26969",
"license": "mit",
"hash": -1126616443399691900,
"line_mean": 34.4388961892,
"line_max": 98,
"alpha_frac": 0.5355037265,
"autogenerated": false,
"ratio": 3.704024172503777,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.972689200681687,
"avg_score": 0.0025271784373814827,
"num_lines": 761
} |
"""A container for a Pico-8 game, and routines to load and save game files."""
__all__ = [
'Game',
'InvalidP8HeaderError',
'InvalidP8SectionError'
]
import re
from .. import util
from ..lua.lua import Lua
from ..lua.lua import PICO8_LUA_CHAR_LIMIT
from ..lua.lua import PICO8_LUA_TOKEN_LIMIT
from ..gfx.gfx import Gfx
from ..gff.gff import Gff
from ..map.map import Map
from ..sfx.sfx import Sfx
from ..music.music import Music
HEADER_TITLE_STR = 'pico-8 cartridge // http://www.pico-8.com\n'
HEADER_VERSION_RE = re.compile('version (\d+)\n')
HEADER_VERSION_PAT = 'version {}\n'
SECTION_DELIM_RE = re.compile('__(\w+)__\n')
SECTION_DELIM_PAT = '__{}__\n'
class InvalidP8HeaderError(util.InvalidP8DataError):
"""Exception for invalid .p8 file header."""
def __str__(self):
return 'Invalid .p8: missing or corrupt header'
class InvalidP8SectionError(util.InvalidP8DataError):
"""Exception for invalid .p8 file section delimiter."""
def __init__(self, bad_delim):
self.bad_delim = bad_delim
def __str__(self):
return 'Invalid .p8: bad section delimiter {}'.format(
repr(self.bad_delim))
class Game():
"""A Pico-8 game."""
def __init__(self, filename=None, compressed_size=None):
"""Initializer.
Prefer factory functions such as Game.from_p8_file().
Args:
filename: The filename, if any, for tool messages.
compressed_size: The byte size of the compressed Lua data region,
or None if the Lua region was not compressed (.p8 or v0 .p8.png).
"""
self.filename = filename
self.compressed_size = compressed_size
self.lua = None
self.gfx = None
self.gff = None
self.map = None
self.sfx = None
self.music = None
self.version = None
@classmethod
def make_empty_game(cls, filename=None):
"""Create an empty game.
Args:
filename: An optional filename to use with error messages.
Returns:
A Game instance with valid but empty data regions.
"""
g = cls(filename=filename)
g.lua = Lua(version=5)
g.lua.update_from_lines([])
g.gfx = Gfx.empty(version=5)
g.gff = Gff.empty(version=5)
g.map = Map.empty(version=5, gfx=g.gfx)
g.sfx = Sfx.empty(version=5)
g.music = Music.empty(version=5)
g.version = 5
return g
@classmethod
def from_filename(cls, filename):
"""Loads a game from a named file.
Args:
filename: The name of the file. Must end in either ".p8" or ".p8.png".
Returns:
A Game containing the game data.
Raises:
lexer.LexerError
parser.ParserError
InvalidP8HeaderError
"""
assert filename.endswith('.p8.png') or filename.endswith('.p8')
if filename.endswith('.p8'):
with open(filename, 'r', encoding='utf-8') as fh:
g = Game.from_p8_file(fh, filename=filename)
else:
with open(filename, 'rb') as fh:
g = Game.from_p8png_file(fh, filename=filename)
return g
@classmethod
def from_p8_file(cls, instr, filename=None):
"""Loads a game from a .p8 file.
Args:
instr: The input stream.
filename: The filename, if any, for tool messages.
Returns:
A Game containing the game data.
Raises:
InvalidP8HeaderError
"""
header_title_str = instr.readline()
if header_title_str != HEADER_TITLE_STR:
raise InvalidP8HeaderError()
header_version_str = instr.readline()
version_m = HEADER_VERSION_RE.match(header_version_str)
if version_m is None:
raise InvalidP8HeaderError()
version = int(version_m.group(1))
section = None
section_lines = {}
while True:
line = instr.readline()
if not line:
break
section_delim_m = SECTION_DELIM_RE.match(line)
if section_delim_m:
section = section_delim_m.group(1)
section_lines[section] = []
elif section:
section_lines[section].append(line)
new_game = cls.make_empty_game(filename=filename)
new_game.version = version
for section in section_lines:
if section == 'lua':
new_game.lua = Lua.from_lines(
section_lines[section], version=version)
elif section == 'gfx':
new_game.gfx = Gfx.from_lines(
section_lines[section], version=version)
my_map = getattr(new_game, 'map')
if my_map is not None:
my_map._gfx = new_game.gfx
elif section == 'gff':
new_game.gff = Gff.from_lines(
section_lines[section], version=version)
elif section == 'map':
my_gfx = getattr(new_game, 'gfx')
new_game.map = Map.from_lines(
section_lines[section], version=version, gfx=my_gfx)
elif section == 'sfx':
new_game.sfx = Sfx.from_lines(
section_lines[section], version=version)
elif section == 'music':
new_game.music = Music.from_lines(
section_lines[section], version=version)
else:
raise InvalidP8SectionError(section)
return new_game
@classmethod
def from_p8png_file(cls, instr, filename=None):
"""Loads a game from a .p8.png file.
Args:
instr: The input stream.
filename: The filename, if any, for tool messages.
Returns:
A Game containing the game data.
"""
# To install: python3 -m pip install pypng
import png
r = png.Reader(file=instr)
(width, height, data, attrs) = r.read()
picodata = [0] * width * height
row_i = 0
for row in data:
for col_i in range(width):
picodata[row_i * width + col_i] |= (
(row[col_i * attrs['planes'] + 2] & 3) << (0 * 2))
picodata[row_i * width + col_i] |= (
(row[col_i * attrs['planes'] + 1] & 3) << (1 * 2))
picodata[row_i * width + col_i] |= (
(row[col_i * attrs['planes'] + 0] & 3) << (2 * 2))
picodata[row_i * width + col_i] |= (
(row[col_i * attrs['planes'] + 3] & 3) << (3 * 2))
row_i += 1
gfx = picodata[0x0:0x2000]
p8map = picodata[0x2000:0x3000]
gfx_props = picodata[0x3000:0x3100]
song = picodata[0x3100:0x3200]
sfx = picodata[0x3200:0x4300]
code = picodata[0x4300:0x8000]
version = picodata[0x8000]
compressed_size = None
if version == 0 or bytes(code[:4]) != b':c:\x00':
# code is ASCII
# (I assume this fails if uncompressed code completely
# fills the code area, in which case code_length =
# 0x8000-0x4300.)
code_length = code.index(0)
code = ''.join(chr(c) for c in code[:code_length]) + '\n'
elif version == 1 or version == 5:
# code is compressed
code_length = (code[4] << 8) | code[5]
assert bytes(code[6:8]) == b'\x00\x00'
chars = list(b'#\n 0123456789abcdefghijklmnopqrstuvwxyz!#%(){}[]<>+=/*:;.,~_')
out = [0] * code_length
in_i = 8
out_i = 0
while out_i < code_length and in_i < len(code):
if code[in_i] == 0x00:
in_i += 1
out[out_i] = code[in_i]
out_i += 1
elif code[in_i] <= 0x3b:
out[out_i] = chars[code[in_i]]
out_i += 1
else:
in_i += 1
offset = (code[in_i - 1] - 0x3c) * 16 + (code[in_i] & 0xf)
length = (code[in_i] >> 4) + 2
out[out_i:out_i + length] = out[out_i - offset:out_i - offset + length]
out_i += length
in_i += 1
code = ''.join(chr(c) for c in out) + '\n'
compressed_size = in_i
new_game = cls(filename=filename, compressed_size=compressed_size)
new_game.version = version
new_game.lua = Lua.from_lines(
[code], version=version)
new_game.gfx = Gfx.from_bytes(
gfx, version=version)
new_game.gff = Gff.from_bytes(
gfx_props, version=version)
new_game.map = Map.from_bytes(
p8map, version=version, gfx=new_game.gfx)
new_game.sfx = Sfx.from_bytes(
sfx, version=version)
new_game.music = Music.from_bytes(
song, version=version)
return new_game
def to_p8_file(self, outstr, lua_writer_cls=None, lua_writer_args=None,
filename=None):
"""Write the game data as a .p8 file.
Args:
outstr: The output stream.
lua_writer_cls: The Lua writer class to use. If None, defaults to
LuaEchoWriter.
lua_writer_args: Args to pass to the Lua writer.
filename: The output filename, for error messages.
"""
outstr.write(HEADER_TITLE_STR)
# Even though we can get the original cart version, we
# hard-code version 5 for output because we only know how to
# write v5 .p8 files. There are minor changes from previous
# versions of .p8 that don't apply to .p8.png (such as the gff
# section).
outstr.write(HEADER_VERSION_PAT.format(5))
# Sanity-check the Lua written by the writer.
transformed_lua = Lua.from_lines(
self.lua.to_lines(writer_cls=lua_writer_cls,
writer_args=lua_writer_args),
version=(self.version or 0))
if transformed_lua.get_char_count() > PICO8_LUA_CHAR_LIMIT:
if filename is not None:
util.error('{}: '.format(filename))
util.error('warning: character count {} exceeds the Pico-8 '
'limit of {}'.format(
transformed_lua.get_char_count(),
PICO8_LUA_CHAR_LIMIT))
if transformed_lua.get_token_count() > PICO8_LUA_TOKEN_LIMIT:
if filename is not None:
util.error('{}: '.format(filename))
util.error('warning: token count {} exceeds the Pico-8 '
'limit of {}'.format(
transformed_lua.get_char_count(),
PICO8_LUA_CHAR_LIMIT))
outstr.write(SECTION_DELIM_PAT.format('lua'))
ended_in_newline = None
for l in self.lua.to_lines(writer_cls=lua_writer_cls,
writer_args=lua_writer_args):
outstr.write(l)
ended_in_newline = l.endswith('\n')
if not ended_in_newline:
outstr.write('\n')
outstr.write(SECTION_DELIM_PAT.format('gfx'))
for l in self.gfx.to_lines():
outstr.write(l)
outstr.write(SECTION_DELIM_PAT.format('gff'))
for l in self.gff.to_lines():
outstr.write(l)
outstr.write(SECTION_DELIM_PAT.format('map'))
for l in self.map.to_lines():
outstr.write(l)
outstr.write(SECTION_DELIM_PAT.format('sfx'))
for l in self.sfx.to_lines():
outstr.write(l)
outstr.write(SECTION_DELIM_PAT.format('music'))
for l in self.music.to_lines():
outstr.write(l)
outstr.write('\n')
def write_cart_data(self, data, start_addr=0):
"""Write binary data to an arbitrary cart address.
Args:
data: The data to write, as a byte string or bytearray.
start_addr: The address to start writing.
"""
if start_addr + len(data) > 0x4300:
raise ValueError('Data too large: {} bytes starting at {} exceeds '
'0x4300'.format(len(data), start_addr))
memmap = ((0x0,0x2000,self.gfx._data),
(0x2000,0x3000,self.map._data),
(0x3000,0x3100,self.gff._data),
(0x3100,0x3200,self.music._data),
(0x3200,0x4300,self.sfx._data))
for start_a, end_a, section_data in memmap:
if (start_addr > end_a or
start_addr + len(data) < start_a):
continue
data_start_a = (start_addr - start_a
if start_addr > start_a
else 0)
data_end_a = (start_addr + len(data) - start_a
if start_addr + len(data) < end_a
else end_a)
text_start_a = (0 if start_addr > start_a
else start_a - start_addr)
text_end_a = (len(data)
if start_addr + len(data) < end_a
else -(start_addr + len(data) - end_a))
section_data[data_start_a:data_end_a] = \
data[text_start_a:text_end_a]
| {
"repo_name": "andmatand/midi-to-pico8",
"path": "pico8/game/game.py",
"copies": "1",
"size": "13402",
"license": "mit",
"hash": 4676517104761051000,
"line_mean": 34.1758530184,
"line_max": 91,
"alpha_frac": 0.5248470378,
"autogenerated": false,
"ratio": 3.649782135076253,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46746291728762523,
"avg_score": null,
"num_lines": null
} |
"""A container for Moya code tracebacks"""
from __future__ import print_function
from .console import Console, Cell
from .template.errors import TemplateError
from .context.expression import ExpressionError
from .context.errors import SubstitutionError
from .moyaexceptions import MoyaException
from .compat import implements_to_string, text_type
from .traceframe import Frame
import io
import sys
from operator import attrgetter
import traceback as pytraceback
_PYTHON_ERROR_TEXT = """A Python Exception may indicate either a bug in a Python extension, or Moya itself.
Consider reporting this to the Moya developers."""
@implements_to_string
class Traceback(object):
_get_frame_compare = attrgetter("_location", "lineno", "libid")
def __init__(self, url=None, method=None, handler=None, exc=None):
self.url = url
self.method = method
self.handler = handler
self.stack = []
self.exception = None
self.tb = None
self.error_message = None
self.exc = exc
self.exc_info = None
self.msg = None
self.error_type = "internal error"
self._displayed = False
self.diagnosis = getattr("exc", "diagnosis", None)
@property
def console_error(self):
if hasattr(self.exc, "__moyaconsole__") and getattr(
self.exc.__moyaconsole__, "is_default_error_message", False
):
return None
console = Console(html=True)
console.obj(None, self.exc)
return console.get_text()
def remove_duplicates(self):
current = None
out = []
for frame in self.stack:
if current is None or self._get_frame_compare(
frame
) != self._get_frame_compare(current):
out.append(frame)
current = frame
self.stack = out
def add_frame(self, frame):
self.stack.append(frame)
def __str__(self):
console = Console(text=True)
self.__moyaconsole__(console)
return console.get_text()
def __moyaconsole__(self, console):
stack = self.stack
console.div("Logic Error", bold=True, fg="red")
for frame in stack:
console.wraptext(frame.location)
if frame.one_line:
console.pysnippet(
"\n" * (frame.lineno - 1) + frame.code, frame.lineno, extralines=0
)
elif frame.code:
if frame.format == "xml":
console.xmlsnippet(frame.code, frame.lineno, extralines=3)
elif frame.format == "moyatemplate":
start, end = frame.cols
console.templatesnippet(
frame.code, lineno=frame.lineno, colno=start, endcolno=end
)
else:
console.pysnippet(frame.code, frame.lineno, extralines=3)
console.nl()
if self.tb:
console.exception(self.tb, tb=True)
else:
console.error(self.msg)
if self.diagnosis:
console.table([[Cell(self.diagnosis, italic=True)]])
console.div()
def build(context, stack, node, exc, exc_info, request, py_traceback=True):
add_pytraceback = True
if node is not None:
node = getattr(node, "node", node)
if stack is None:
stack = context.get("._callstack", [])
if request is not None:
traceback = Traceback(request.path_info, request.method, exc=exc)
else:
traceback = Traceback(exc=exc)
traceback.diagnosis = getattr(exc, "diagnosis", None)
add_pytraceback = not getattr(exc, "hide_py_traceback", False)
traceback.error_type = getattr(exc, "error_type", "internal error")
base = context.get(".sys.base", "")
def relativefrom(base, path):
if base and path.startswith(base):
path = "./" + path[len(base) :]
return path
for s in stack:
e = getattr(s, "element", None)
if e and e._code:
frame = Frame(
e._code,
e._location,
e.source_line or 1,
obj=text_type(e),
libid=e.libid,
)
traceback.add_frame(frame)
element = getattr(exc, "element", None)
if element is not None and hasattr(element.document, "structure"):
frame = Frame(
element.document.structure.xml,
element._location,
element.source_line or 1,
obj=text_type(element),
libid=element.libid,
)
traceback.add_frame(frame)
add_pytraceback = False
elif hasattr(node, "_location") and hasattr(node, "source_line"):
if node._code:
frame = Frame(
node._code,
node._location,
node.source_line or 1,
obj=text_type(node),
libid=node.libid,
)
traceback.add_frame(frame)
if isinstance(exc, MoyaException):
traceback.error_type = "Moya Exception"
traceback.moya_exception_type = exc.type
add_pytraceback = False
elif isinstance(exc, ExpressionError):
traceback.error_type = "Expression Error"
add_pytraceback = False
elif isinstance(exc, SubstitutionError):
traceback.error_type = "Substitution Error"
add_pytraceback = False
elif isinstance(exc, TemplateError):
traceback.error_type = "Template Error"
traceback.exception = exc
traceback.msg = text_type(exc)
traceback.diagnosis = traceback.diagnosis or getattr(exc, "diagnosis", None)
if hasattr(exc, "get_moya_frames"):
mf = exc.get_moya_frames()
traceback.stack.extend(mf)
if context.get(".develop", False):
add_pytraceback = True
if add_pytraceback and exc_info and py_traceback:
traceback.error_type = "Python Exception"
tb_type, tb_value, tb = exc_info
traceback.tb = "".join(pytraceback.format_exception(tb_type, tb_value, tb))
pyframes = pytraceback.extract_tb(tb)
for i, f in enumerate(reversed(pyframes)):
if f[2] == "logic":
pyframes = pyframes[len(pyframes) - i - 1 :]
break
for (filename, line_number, function_name, text) in pyframes:
one_line = False
try:
with io.open(filename, "rt") as f:
code = f.read()
except:
code = text
one_line = True
code_path = relativefrom(base, filename)
frame = Frame(
code,
code_path,
line_number,
one_line=one_line,
obj=function_name,
format="python",
)
traceback.add_frame(frame)
traceback.msg = text_type(exc)
if traceback.diagnosis is None:
traceback.diagnosis = _PYTHON_ERROR_TEXT
traceback.remove_duplicates()
return traceback
def format_trace(context, stack, node, exc_info=None):
if exc_info is None:
exc_info = sys.exc_info()
request = context.get(".request", None)
moya_trace = build(
context, stack, None, node, exc_info, request, py_traceback=False
)
return text_type(moya_trace)
| {
"repo_name": "moyaproject/moya",
"path": "moya/trace.py",
"copies": "1",
"size": "7410",
"license": "mit",
"hash": -8463510254811693000,
"line_mean": 30.2658227848,
"line_max": 107,
"alpha_frac": 0.5701754386,
"autogenerated": false,
"ratio": 4.064728469555678,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006167631273048015,
"num_lines": 237
} |
"""A container that contains information to generate documentation files"""
from __future__ import unicode_literals
from __future__ import print_function
from ..html import slugify
from ..compat import PY2
from json import dump
class Doc(object):
def __init__(self, namespace, name, doc_class="document"):
self.doc_namespace = namespace
self.id = self.make_id(name)
self.name = name
self.doc_class = doc_class
self.data = {}
self.references = []
def __repr__(self):
return "<doc '{}'>".format(self.id)
@classmethod
def from_dict(cls, d):
doc = cls(d["doc_namespace"], d["name"], d["doc_class"])
doc.id = d["id"]
doc.data = d["data"]
doc.references = d["references"]
return doc
def make_id(self, name):
return "{}.{}".format(self.doc_namespace, name)
def add_reference(self, name):
doc_id = self.make_id(name) if "." not in name else name
self.references.append(doc_id)
@property
def package(self):
data = {k: v for k, v in self.data.iteritems() if not k.startswith("_")}
doc_package = {
"id": self.id,
"name": self.name,
"doc_class": self.doc_class,
"references": self.references,
"data": data,
"doc_namespace": self.doc_namespace,
}
return doc_package
def _process_docmap(self, docmap):
doctree = [{"title": "Document", "level": 0, "children": []}]
stack = [doctree[0]]
for level, text in docmap:
current_level = stack[-1]["level"]
node = {
"title": text.strip(),
"slug": slugify(text),
"level": level,
"children": [],
}
if level > current_level:
stack[-1]["children"].append(node)
stack.append(node)
elif level == current_level:
stack[-2]["children"].append(node)
stack[-1] = node
else:
while level < stack[-1]["level"]:
stack.pop()
stack[-2]["children"].append(node)
stack[-1] = node
# def recurse(n, l=0):
# print(" " * l + n['title'])
# for child in n['children']:
# recurse(child, l + 1)
#
# recurse(doctree[0])
doctree = doctree[0]["children"]
return doctree
@property
def doctree(self):
if "docmap" in self.data:
doctree = self._process_docmap(self.data["docmap"])
else:
doctree = None
return doctree
def write(self, fs):
"""Write a pickle file containing the doc info"""
doc_package = self.package
filename = "{}.json".format(self.name).replace("/", "_")
with fs.open(filename, "wb" if PY2 else "wt") as f:
dump(doc_package, f, indent=4, separators=(",", ": "))
| {
"repo_name": "moyaproject/moya",
"path": "moya/docgen/doc.py",
"copies": "1",
"size": "3022",
"license": "mit",
"hash": -4733038168351256000,
"line_mean": 29.22,
"line_max": 80,
"alpha_frac": 0.5086035738,
"autogenerated": false,
"ratio": 3.950326797385621,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9958313087235003,
"avg_score": 0.0001234567901234568,
"num_lines": 100
} |
"""A contents manager that combine multiple content managers."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from .compat import JUPYTER
if JUPYTER:
from notebook.services.contents.manager import ContentsManager
from notebook.services.contents.filemanager import FileContentsManager
from traitlets.traitlets import List
from traitlets import import_item
else:
from IPython.html.services.contents.manager import ContentsManager
from IPython.html.services.contents.filemanager import FileContentsManager
from IPython.utils.traitlets import List
from IPython.utils.importstring import import_item
#make pyflakes happy
FileContentsManager
def _split_path(path):
"""split a path return by the api
return
- the sentinel:
- the rest of the path as a list.
- the original path stripped of / for normalisation.
"""
path = path.strip('/')
list_path = path.split('/')
sentinel = list_path.pop(0)
return sentinel, list_path, path
class MixedContentsManager(ContentsManager):
filesystem_scheme = List([
{
'root':'local',
'contents':"IPython.html.services.contents.filemanager.FileContentsManager"
},
{
'root': 'gdrive',
'contents': 'jupyterdrive.clientsidenbmanager.ClientSideContentsManager'
}
],
help="""
List of virtual mount point name and corresponding contents manager
""", config=True)
def __init__(self, **kwargs):
super(MixedContentsManager, self).__init__(**kwargs)
self.managers = {}
## check consistency of scheme.
if not len(set(map(lambda x:x['root'], self.filesystem_scheme))) == len(self.filesystem_scheme):
raise ValueError('Scheme should not mount two contents manager on the same mountpoint')
kwargs.update({'parent':self})
for scheme in self.filesystem_scheme:
manager_class = import_item(scheme['contents'])
self.managers[scheme['root']] = manager_class(**kwargs)
def path_dispatch1(method):
def _wrapper_method(self, path, *args, **kwargs):
sentinel, _path, path = _split_path(path);
man = self.managers.get(sentinel, None)
if man is not None:
meth = getattr(man, method.__name__)
sub = meth('/'.join(_path), *args, **kwargs)
return sub
else :
return method(self, path, *args, **kwargs)
return _wrapper_method
def path_dispatch2(method):
def _wrapper_method(self, other, path, *args, **kwargs):
sentinel, _path, path = _split_path(path);
man = self.managers.get(sentinel, None)
if man is not None:
meth = getattr(man, method.__name__)
sub = meth(other, '/'.join(_path), *args, **kwargs)
return sub
else :
return method(self, other, path, *args, **kwargs)
return _wrapper_method
def path_dispatch_kwarg(method):
def _wrapper_method(self, path=''):
sentinel, _path, path = _split_path(path);
man = self.managers.get(sentinel, None)
if man is not None:
meth = getattr(man, method.__name__)
sub = meth(path='/'.join(_path))
return sub
else :
return method(self, path=path)
return _wrapper_method
# ContentsManager API part 1: methods that must be
# implemented in subclasses.
@path_dispatch1
def dir_exists(self, path):
## root exists
if len(path) == 0:
return True
if path in self.managers.keys():
return True
return False
@path_dispatch1
def is_hidden(self, path):
if (len(path) == 0) or path in self.managers.keys():
return False;
raise NotImplementedError('....'+path)
@path_dispatch_kwarg
def file_exists(self, path=''):
if len(path) == 0:
return False
raise NotImplementedError('NotImplementedError')
@path_dispatch1
def exists(self, path):
if len(path) == 0:
return True
raise NotImplementedError('NotImplementedError')
@path_dispatch1
def get(self, path, **kwargs):
if len(path) == 0:
return [ {'type':'directory'}]
raise NotImplementedError('NotImplementedError')
@path_dispatch2
def save(self, model, path):
raise NotImplementedError('NotImplementedError')
def update(self, model, path):
sentinel, listpath, path = _split_path(path)
m_sentinel, m_listpath, orig_path = _split_path(model['path'])
if sentinel != m_sentinel:
raise ValueError('Does not know how to move model across mountpoints')
model['path'] = '/'.join(m_listpath)
man = self.managers.get(sentinel, None)
if man is not None:
meth = getattr(man, 'update')
sub = meth(model, '/'.join(listpath))
return sub
else :
return self.method(model, path)
@path_dispatch1
def delete(self, path):
raise NotImplementedError('NotImplementedError')
@path_dispatch1
def create_checkpoint(self, path):
raise NotImplementedError('NotImplementedError')
@path_dispatch1
def list_checkpoints(self, path):
raise NotImplementedError('NotImplementedError')
@path_dispatch2
def restore_checkpoint(self, checkpoint_id, path):
raise NotImplementedError('NotImplementedError')
@path_dispatch2
def delete_checkpoint(self, checkpoint_id, path):
raise NotImplementedError('NotImplementedError')
# ContentsManager API part 2: methods that have useable default
# implementations, but can be overridden in subclasses.
# TODO (route optional methods too)
## Path dispatch on args 2 and 3 for rename.
def path_dispatch_rename(rename_like_method):
"""
decorator for rename-like function, that need dispatch on 2 arguments
"""
def _wrapper_method(self, old_path, new_path):
old_path, _old_path, old_sentinel = _split_path(old_path);
new_path, _new_path, new_sentinel = _split_path(new_path);
if old_sentinel != new_sentinel:
raise ValueError('Does not know how to move things across contents manager mountpoints')
else:
sentinel = new_sentinel
man = self.managers.get(sentinel, None)
if man is not None:
rename_meth = getattr(man, rename_like_method.__name__)
sub = rename_meth('/'.join(_old_path), '/'.join(_new_path))
return sub
else :
return rename_meth(self, old_path, new_path)
return _wrapper_method
@path_dispatch_rename
def rename_file(self, old_path, new_path):
"""Rename a file."""
raise NotImplementedError('must be implemented in a subclass')
@path_dispatch_rename
def rename(self, old_path, new_path):
"""Rename a file."""
raise NotImplementedError('must be implemented in a subclass')
| {
"repo_name": "jupyter/jupyter-drive",
"path": "jupyterdrive/mixednbmanager.py",
"copies": "1",
"size": "7371",
"license": "bsd-2-clause",
"hash": -3464623326680197600,
"line_mean": 32.5045454545,
"line_max": 104,
"alpha_frac": 0.6022249356,
"autogenerated": false,
"ratio": 4.405857740585774,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5508082676185774,
"avg_score": null,
"num_lines": null
} |
"""A contents manager that uses HDFS file system for storage."""
# Copyright (c) A
# Distributed under the terms of the Modified BSD License.
from hdfs3 import HDFileSystem
from hdfscontents.hdfsio import HDFSManagerMixin
from hdfscontents.hdfscheckpoints import HDFSCheckpoints
from notebook.services.contents.manager import ContentsManager
from notebook.utils import to_os_path
try: # new notebook
from notebook import _tz as tz
except ImportError: # old notebook
from notebook.services.contents import tz
from tornado import web
from tornado.web import HTTPError
import mimetypes
import nbformat
from traitlets import Instance, Integer, Unicode, default
try: # PY3
from base64 import encodebytes, decodebytes
except ImportError: # PY2
from base64 import encodestring as encodebytes, decodestring as decodebytes
class HDFSContentsManager(ContentsManager, HDFSManagerMixin):
"""
ContentsManager that persists to HDFS filesystem local filesystem.
"""
hdfs_namenode_host = Unicode(u'localhost', config=True, help='The HDFS namenode host')
hdfs_namenode_port = Integer(9000, config=True, help='The HDFS namenode port')
hdfs_user = Unicode(None, allow_none=True, config=True, help='The HDFS user name')
root_dir = Unicode(u'/', config=True, help='The HDFS root directory to use')
# The HDFS3 object used to interact with HDFS cluster.
hdfs = Instance(HDFileSystem, config=True)
@default('hdfs')
def _default_hdfs(self):
return HDFileSystem(host=self.hdfs_namenode_host, port=self.hdfs_namenode_port, user=self.hdfs_user)
def _checkpoints_class_default(self):
# TODO: a better way to pass hdfs and root_dir?
HDFSCheckpoints.hdfs = self.hdfs
HDFSCheckpoints.root_dir = self.root_dir
return HDFSCheckpoints
# ContentsManager API part 1: methods that must be
# implemented in subclasses.
def dir_exists(self, path):
"""Does a directory exist at the given path?
Like os.path.isdir
Parameters
----------
path : string
The relative API style path to check
Returns
-------
exists : bool
Whether the path does indeed exist.
"""
path = path.strip('/')
hdfs_path = to_os_path(path, self.root_dir)
return self._hdfs_dir_exists(hdfs_path)
def is_hidden(self, path):
"""Is path a hidden directory or file?
Parameters
----------
path : string
The path to check. This is an API path (`/` separated,
relative to root dir).
Returns
-------
hidden : bool
Whether the path is hidden.
"""
path = path.strip('/')
hdfs_path = to_os_path(path, self.root_dir)
return self._hdfs_is_hidden(hdfs_path)
def file_exists(self, path=''):
"""Does a file exist at the given path?
Like os.path.isfile
Override this method in subclasses.
Parameters
----------
path : string
The API path of a file to check for.
Returns
-------
exists : bool
Whether the file exists.
"""
path = path.strip('/')
hdfs_path = to_os_path(path, self.root_dir)
return self._hdfs_file_exists(hdfs_path)
def exists(self, path):
"""Does a file or directory exist at the given path?
Like os.path.exists
Parameters
----------
path : string
The API path of a file or directory to check for.
Returns
-------
exists : bool
Whether the target exists.
"""
path = path.strip('/')
hdfs_path = to_os_path(path, self.root_dir)
return self.hdfs.exists(hdfs_path)
def _base_model(self, path):
"""Build the common base of a hdfscontents model"""
hdfs_path = to_os_path(path, self.root_dir)
info = self.hdfs.info(hdfs_path)
last_modified = tz.utcfromtimestamp(info.get(u'last_mod'))
# TODO: don't have time created! now storing last accessed instead
created = tz.utcfromtimestamp(info.get(u'last_access'))
# Create the base model.
model = {}
model['name'] = path.rsplit('/', 1)[-1]
model['path'] = path
model['last_modified'] = last_modified
model['created'] = created
model['content'] = None
model['format'] = None
model['mimetype'] = None
# TODO: Now just checking if user have write permission in HDFS. Need to cover all cases and check the user & group
try:
model['writable'] = (info.get(u'permissions') & 0o0200) > 0
except OSError:
self.log.error("Failed to check write permissions on %s", hdfs_path)
model['writable'] = False
return model
def _dir_model(self, path, content=True):
"""Build a model for a directory
if content is requested, will include a listing of the directory
"""
hdfs_path = to_os_path(path, self.root_dir)
four_o_four = u'directory does not exist: %r' % path
if not self.dir_exists(path):
raise web.HTTPError(404, four_o_four)
elif self.is_hidden(path):
self.log.info("Refusing to serve hidden directory %r, via 404 Error",
hdfs_path
)
raise web.HTTPError(404, four_o_four)
model = self._base_model(path)
model['type'] = 'directory'
if content:
model['content'] = contents = []
for subpath in self.hdfs.ls(hdfs_path, detail=False):
name = subpath.strip('/').rsplit('/', 1)[-1]
if self.should_list(name) and not self._hdfs_is_hidden(subpath):
contents.append(self.get(
path='%s/%s' % (path, name),
content=False)
)
model['format'] = 'json'
return model
def _file_model(self, path, content=True, format=None):
"""Build a model for a file
if content is requested, include the file hdfscontents.
format:
If 'text', the hdfscontents will be decoded as UTF-8.
If 'base64', the raw bytes hdfscontents will be encoded as base64.
If not specified, try to decode as UTF-8, and fall back to base64
"""
model = self._base_model(path)
model['type'] = 'file'
hdfs_path = to_os_path(path, self.root_dir)
model['mimetype'] = mimetypes.guess_type(hdfs_path)[0]
if content:
content, format = self._read_file(hdfs_path, format)
if model['mimetype'] is None:
default_mime = {
'text': 'text/plain',
'base64': 'application/octet-stream'
}[format]
model['mimetype'] = default_mime
model.update(
content=content,
format=format,
)
return model
def _notebook_model(self, path, content=True):
"""Build a notebook model
if content is requested, the notebook content will be populated
as a JSON structure (not double-serialized)
"""
model = self._base_model(path)
model['type'] = 'notebook'
if content:
hdfs_path = to_os_path(path, self.root_dir)
nb = self._read_notebook(hdfs_path, as_version=4)
self.mark_trusted_cells(nb, path)
model['content'] = nb
model['format'] = 'json'
self.validate_notebook_model(model)
return model
def _save_directory(self, hdfs_path, model, path=''):
"""create a directory"""
if self._hdfs_is_hidden(hdfs_path):
raise HTTPError(400, u'Cannot create hidden directory %r' % hdfs_path)
if not self.hdfs.exists(hdfs_path):
try:
self.hdfs.mkdir(hdfs_path)
except:
raise HTTPError(403, u'Permission denied: %s' % path)
elif not self._hdfs_dir_exists(hdfs_path):
raise HTTPError(400, u'Not a directory: %s' % (hdfs_path))
else:
self.log.debug("Directory %r already exists", hdfs_path)
def get(self, path, content=True, type=None, format=None):
"""Get a file or directory model."""
""" Takes a path for an entity and returns its model
Parameters
----------
path : str
the API path that describes the relative path for the target
content : bool
Whether to include the hdfscontents in the reply
type : str, optional
The requested type - 'file', 'notebook', or 'directory'.
Will raise HTTPError 400 if the content doesn't match.
format : str, optional
The requested format for file contents. 'text' or 'base64'.
Ignored if this returns a notebook or directory model.
Returns
-------
model : dict
the contents model. If content=True, returns the contents
of the file or directory as well.
"""
path = path.strip('/')
if not self.exists(path):
raise web.HTTPError(404, u'No such file or directory: %s' % path)
if self.dir_exists(path):
if type not in (None, 'directory'):
raise web.HTTPError(400,
u'%s is a directory, not a %s' % (path, type), reason='bad type')
model = self._dir_model(path, content=content)
elif type == 'notebook' or (type is None and path.endswith('.ipynb')):
model = self._notebook_model(path, content=content)
else:
if type == 'directory':
raise web.HTTPError(400,
u'%s is not a directory' % path, reason='bad type')
model = self._file_model(path, content=content, format=format)
return model
def save(self, model, path=''):
"""
Save a file or directory model to path.
Should return the saved model with no content. Save implementations
should call self.run_pre_save_hook(model=model, path=path) prior to
writing any data.
"""
path = path.strip('/')
if 'type' not in model:
raise web.HTTPError(400, u'No file type provided')
if 'content' not in model and model['type'] != 'directory':
raise web.HTTPError(400, u'No file content provided')
path = path.strip('/')
hdfs_path = to_os_path(path, self.root_dir)
self.log.debug("Saving %s", hdfs_path)
self.run_pre_save_hook(model=model, path=path)
try:
if model['type'] == 'notebook':
nb = nbformat.from_dict(model['content'])
self.check_and_sign(nb, path)
self._save_notebook(hdfs_path, nb)
# One checkpoint should always exist for notebooks.
if not self.checkpoints.list_checkpoints(path):
self.create_checkpoint(path)
elif model['type'] == 'file':
# Missing format will be handled internally by _save_file.
self._save_file(hdfs_path, model['content'], model.get('format'))
elif model['type'] == 'directory':
self._save_directory(hdfs_path, model, path)
else:
raise web.HTTPError(400, "Unhandled hdfscontents type: %s" % model['type'])
except web.HTTPError:
raise
except Exception as e:
self.log.error(u'Error while saving file: %s %s', path, e, exc_info=True)
raise web.HTTPError(500, u'Unexpected error while saving file: %s %s' % (path, e))
validation_message = None
if model['type'] == 'notebook':
self.validate_notebook_model(model)
validation_message = model.get('message', None)
model = self.get(path, content=False)
if validation_message:
model['message'] = validation_message
#self.run_post_save_hook(model=model, os_path=hdfs_path)
return model
def delete_file(self, path):
"""Delete file at path."""
path = path.strip('/')
hdfs_path = to_os_path(path, self.root_dir)
if self._hdfs_dir_exists(hdfs_path):
listing = self.hdfs.ls(hdfs_path, detail=False)
# Don't delete non-empty directories.
# A directory containing only leftover checkpoints is
# considered empty.
cp_dir = getattr(self.checkpoints, 'checkpoint_dir', None)
for longentry in listing:
entry = longentry.strip('/').rsplit('/', 1)[-1]
if entry != cp_dir:
raise web.HTTPError(400, u'Directory %s not empty' % hdfs_path)
elif not self._hdfs_file_exists(hdfs_path):
raise web.HTTPError(404, u'File does not exist: %s' % hdfs_path)
if self._hdfs_dir_exists(hdfs_path):
self.log.debug("Removing directory %s", hdfs_path)
try:
self.hdfs.rm(hdfs_path, recursive=True)
except:
raise HTTPError(403, u'Permission denied: %s' % path)
else:
self.log.debug("Removing file %s", hdfs_path)
try:
self.hdfs.rm(hdfs_path, recursive=False)
except:
raise HTTPError(403, u'Permission denied: %s' % path)
def rename_file(self, old_path, new_path):
"""Rename a file."""
old_path = old_path.strip('/')
new_path = new_path.strip('/')
if new_path == old_path:
return
new_hdfs_path = to_os_path(new_path, self.root_dir)
old_hdfs_path = to_os_path(old_path, self.root_dir)
# Should we proceed with the move?
if self.hdfs.exists(new_hdfs_path):
raise web.HTTPError(409, u'File already exists: %s' % new_path)
# Move the file
try:
self._hdfs_move_file(old_hdfs_path, new_hdfs_path)
except Exception as e:
raise web.HTTPError(500, u'Unknown error renaming file: %s %s' % (old_path, e))
def info_string(self):
return "Serving notebooks from HDFS directory: %s" % self.root_dir
| {
"repo_name": "alshishtawy/hdfscontents",
"path": "hdfscontents/hdfsmanager.py",
"copies": "1",
"size": "14840",
"license": "apache-2.0",
"hash": 4596108433531908000,
"line_mean": 37.1491002571,
"line_max": 123,
"alpha_frac": 0.5573450135,
"autogenerated": false,
"ratio": 4.20873511060692,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0013329985777972822,
"num_lines": 389
} |
"""A contents manager that uses the ambry database for storage"""
import nbformat
import os
from ipython_genutils.importstring import import_item
from ipython_genutils.py3compat import getcwd, string_types
from notebook.services.contents.checkpoints import Checkpoints, GenericCheckpointsMixin
from notebook.services.contents.manager import ContentsManager
from tornado import web
from traitlets import Any, Unicode, Bool, TraitError
_script_exporter = None
# FIXME. Should actually implement checkpoints
class AmbryCheckpoints(Checkpoints, GenericCheckpointsMixin):
def restore_checkpoint(self, contents_mgr, checkpoint_id, path):
pass
def rename_checkpoint(self, checkpoint_id, old_path, new_path):
pass
def list_checkpoints(self, path):
return []
def delete_checkpoint(self, checkpoint_id, path):
pass
def create_checkpoint(self, contents_mgr, path):
from datetime import datetime
return dict(
id='0',
last_modified=datetime.now(),
)
class AmbryContentsManager(ContentsManager):
def __init__(self, *args, **kwargs):
super(AmbryContentsManager, self).__init__(*args, **kwargs)
self._library_args = self.parent._library.ctor_args
@property
def library_context(self):
from ambry.library import LibraryContext
return LibraryContext(self._library_args)
root_dir = Unicode(config=True)
def _root_dir_default(self):
try:
return self.parent.notebook_dir
except AttributeError:
return getcwd()
save_script = Bool(False, config=True, help='DEPRECATED, use post_save_hook')
def _save_script_changed(self):
self.log.warn("""
`--script` is deprecated. You can trigger nbconvert via pre- or post-save hooks:
ContentsManager.pre_save_hook
FileContentsManager.post_save_hook
A post-save hook has been registered that calls:
ipython nbconvert --to script [notebook]
which behaves similarly to `--script`.
""")
pass
post_save_hook = Any(None, config=True,
help="""Python callable or importstring thereof
to be called on the path of a file just saved.
This can be used to process the file on disk,
such as converting the notebook to a script or HTML via nbconvert.
It will be called as (all arguments passed by keyword)::
hook(os_path=os_path, model=model, contents_manager=instance)
- path: the filesystem path to the file just written
- model: the model representing the file
- contents_manager: this ContentsManager instance
"""
)
def _post_save_hook_changed(self, name, old, new):
if new and isinstance(new, string_types):
self.post_save_hook = import_item(self.post_save_hook)
elif new:
if not callable(new):
raise TraitError("post_save_hook must be callable")
def run_post_save_hook(self, model, os_path):
"""Run the post-save hook if defined, and log errors"""
if self.post_save_hook:
try:
self.log.debug("Running post-save hook on %s", os_path)
self.post_save_hook(os_path=os_path, model=model, contents_manager=self)
except Exception:
self.log.error("Post-save hook failed on %s", os_path, exc_info=True)
def _root_dir_changed(self, name, old, new):
"""Do a bit of validation of the root_dir."""
if not os.path.isabs(new):
# If we receive a non-absolute path, make it absolute.
self.root_dir = os.path.abspath(new)
return
if not os.path.isdir(new):
raise TraitError("%r is not a directory" % new)
def _checkpoints_class_default(self):
return AmbryCheckpoints
def file_exists(self, path):
"""Returns True if the file exists, else returns False.
API-style wrapper for os.path.isfile
Parameters
----------
path : string
The relative path to the file (with '/' as separator)
Returns
-------
exists : bool
Whether the file exists.
"""
from ambry.orm.exc import NotFoundError
path = path.strip('/')
parts = path.split('/')
cache_key = os.path.join(*parts[:2])
file_name = parts[-1]
if path == '':
# Root
return False # Isn't a file
elif path.count('/') == 0:
return False # Isn't a file
elif path.count('/') == 1:
return False # Isn't a file
elif path.count('/') == 2:
with self.library_context as l:
b = l.bundle_by_cache_key(cache_key)
try:
bs = b.dataset.bsfile(file_name)
return True
except NotFoundError:
return False
else:
return False
def is_hidden(self, path):
return False # We have nothing to hide
def dir_exists(self, path):
path = path.strip('/')
if path == '':
# Root
return True # It always exists
elif path.count('/') == 0:
# Source
return True # HACK, just assume it does exist
elif path.count('/') == 1:
# Bundle
return True # Isn't a file
elif path.count('/') == 2:
return False # A bundle file, isn't a directory
def _root_model(self):
from datetime import datetime
model = {'name': '',
'path': '',
'type': 'directory',
'format': 'json',
'mimetype': None,
'last_modified': datetime.now(),
'created': datetime.now(),
'writable': False}
content = {}
with self.library_context as l:
for b in l.bundles:
cm = {'name': b.identity.source,
'path': b.identity.source,
'type': 'directory',
'format': None,
'mimetype': None,
'content': None,
'writable': True}
content[b.identity.source] = cm
model['content'] = sorted(content.values())
return model
def _source_model(self, source):
from datetime import datetime
model = {'name': source,
'path': '/' + source,
'type': 'directory',
'format': 'json',
'mimetype': None,
'last_modified': datetime.now(),
'created': datetime.now(),
'writable': False}
content = []
with self.library_context as l:
for b in l.bundles:
if b.identity.source == source:
source, name = b.identity.cache_key.split('/')
cm = {'name': name,
'path': b.identity.cache_key,
'type': 'directory',
'format': 'json',
'mimetype': None,
'content': None,
'writable': False}
content.append(cm)
model['content'] = sorted(content)
return model
def _bundle_model(self, cache_key):
from datetime import datetime
from ambry.orm import File
with self.library_context as l:
b = l.bundle_by_cache_key(cache_key)
model = {'name': b.identity.vname,
'path': '/' + cache_key,
'type': 'directory',
'format': 'json',
'mimetype': None,
'last_modified': datetime.now(),
'created': datetime.now(),
'writable': False}
content = []
for f in b.build_source_files.list_records():
cm = {'name': f.file_name,
'path': '/{}/{}'.format(cache_key, f.file_name),
'type': 'notebook' if f.file_const == File.BSFILE.NOTEBOOK else 'file',
'format': 'json',
'mimetype': f.record.mime_type,
'content': None,
'writable': False}
content.append(cm)
model['content'] = sorted(content)
return model
def _file_model(self, path, content=True, format=None):
"""Build a model for a file
if content is requested, include the file contents.
format:
If 'text', the contents will be decoded as UTF-8.
If 'base64', the raw bytes contents will be encoded as base64.
If not specified, try to decode as UTF-8, and fall back to base64
"""
parts = path.split('/')
cache_key = os.path.join(*parts[:2])
file_name = parts[-1]
with self.library_context as l:
b = l.bundle_by_cache_key(cache_key)
f = b.build_source_files.instance_from_name(file_name)
model = {'name': file_name,
'path': path,
'type': 'file',
'format': 'text',
'mimetype': 'text/plain',
'last_modified': f.record.modified_datetime,
'created': f.record.modified_datetime,
'writable': False}
model['content'] = f.getcontent()
return model
def _file_from_path(self, l, path):
parts = path.split('/')
cache_key = os.path.join(*parts[:2])
file_name = parts[-1]
b = l.bundle_by_cache_key(cache_key)
f = b.build_source_files.instance_from_name(file_name)
if not f.record.modified:
import time
f.record.modified = int(time.time())
assert f.record.modified
return b, f
def _notebook_model(self, path, content=True):
"""Build a notebook model
if content is requested, the notebook content will be populated
as a JSON structure (not double-serialized)
"""
with self.library_context as l:
b, f = self._file_from_path(l, path)
model = {'name': f.file_name,
'path': path,
'type': 'notebook',
'format': None,
'mimetype': None,
'last_modified': f.record.modified_datetime,
'created': f.record.modified_datetime,
'writable': True,
'content': None
}
if content:
from cStringIO import StringIO
sio = StringIO()
f.record_to_fh(sio)
sio.seek(0)
nb = nbformat.read(sio, as_version=4)
self.mark_trusted_cells(nb, path)
model['content'] = nb
model['format'] = 'json'
self.validate_notebook_model(model)
pass
return model
def get(self, path, content=True, type=None, format=None):
""" Takes a path for an entity and returns its model
Parameters
----------
path : str
the API path that describes the relative path for the target
content : bool
Whether to include the contents in the reply
type : str, optional
The requested type - 'file', 'notebook', or 'directory'.
Will raise HTTPError 400 if the content doesn't match.
format : str, optional
The requested format for file contents. 'text' or 'base64'.
Ignored if this returns a notebook or directory model.
Returns
-------
model : dict
the contents model. If content=True, returns the contents
of the file or directory as well.
"""
path = path.strip('/')
if path == '':
model = self._root_model()
elif path.count('/') == 0:
model = self._source_model(path.strip('/'))
elif path.count('/') == 1:
model = self._bundle_model(path)
elif type == 'notebook' or (type is None and path.endswith('.ipynb')):
model = self._notebook_model(path, content=content)
else:
if type == 'directory':
raise web.HTTPError(400,
u'%s is not a directory' % path, reason='bad type')
model = self._file_model(path, content=content, format=format)
return model
def save(self, model, path=''):
"""Save the file model and return the model with no content."""
import json
path = path.strip('/')
with self.library_context as l:
b, f = self._file_from_path(l, path)
if 'type' not in model:
raise web.HTTPError(400, u'No file type provided')
if 'content' not in model and model['type'] != 'directory':
raise web.HTTPError(400, u'No file content provided')
self.run_pre_save_hook(model=model, path=f.record.id)
if not f.record.size:
f.record.update_contents(f.default, 'application/json')
else:
f.record.update_contents(json.dumps(model['content']), 'application/json')
try:
if model['type'] == 'notebook':
nb = nbformat.from_dict(model['content'])
self.check_and_sign(nb, path)
# One checkpoint should always exist for notebooks.
if not self.checkpoints.list_checkpoints(path):
self.create_checkpoint(path)
elif model['type'] == 'file':
pass
elif model['type'] == 'directory':
pass
else:
raise web.HTTPError(400, "Unhandled contents type: %s" % model['type'])
except web.HTTPError:
raise
except Exception as e:
self.log.error(u'Error while saving file: %s %s', path, e, exc_info=True)
raise web.HTTPError(500, u'Unexpected error while saving file: %s %s' % (path, e))
validation_message = None
if model['type'] == 'notebook':
self.validate_notebook_model(model)
validation_message = model.get('message', None)
model = self.get(path, content=False)
if validation_message:
model['message'] = validation_message
return model
def delete_file(self, path):
"""Delete file at path."""
from ambry.orm.exc import NotFoundError
path = path.strip('/')
if path == '':
raise web.HTTPError(400, u"Not deletable")
elif path.count('/') == 0:
raise web.HTTPError(400, u"Not deletable")
elif path.count('/') == 1:
raise web.HTTPError(400, u"Not deletable")
with self.library_context as l:
from ambry.orm.exc import CommitTrap
l.database._raise_on_commit = True
try:
b, f = self._file_from_path(l, path)
f.remove()
f.remove_record()
except CommitTrap:
raise
except NotFoundError:
raise web.HTTPError(404, u"Bundle does not exist: {}".format(path))
finally:
l.database._raise_on_commit = False
def rename_file(self, old_path, new_path):
"""Rename a file."""
from ambry.orm.exc import NotFoundError
old_path = old_path.strip('/')
new_path = new_path.strip('/')
if new_path == old_path:
return
with self.library_context as l:
b, f_old = self._file_from_path(l, old_path)
parts = new_path.split('/')
file_name = parts[-1]
try:
bs = b.dataset.bsfile(file_name)
raise web.HTTPError(409, u'File already exists: %s' % new_path)
except NotFoundError:
pass
f_old.record.path = file_name
def info_string(self):
return ""
def get_kernel_path(self, path, model=None):
"""Return the initial API path of a kernel associated with a given notebook"""
if '/' in path:
parent_dir = path.rsplit('/', 1)[0]
else:
parent_dir = ''
return parent_dir
| {
"repo_name": "CivicKnowledge/ambry-ui",
"path": "ambry_ui/jupyter.py",
"copies": "1",
"size": "16884",
"license": "bsd-3-clause",
"hash": 755308177526119300,
"line_mean": 28.9893428064,
"line_max": 98,
"alpha_frac": 0.5182421227,
"autogenerated": false,
"ratio": 4.4525316455696204,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0036218459445241354,
"num_lines": 563
} |
"""A contents manager that uses the local file system for storage."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import base64
import errno
import io
import os
import shutil
from contextlib import contextmanager
import mimetypes
from tornado import web
from .manager import ContentsManager
from IPython import nbformat
from IPython.utils.io import atomic_writing
from IPython.utils.path import ensure_dir_exists
from IPython.utils.traitlets import Unicode, Bool, TraitError
from IPython.utils.py3compat import getcwd, str_to_unicode
from IPython.utils import tz
from IPython.html.utils import is_hidden, to_os_path, to_api_path
class FileContentsManager(ContentsManager):
root_dir = Unicode(config=True)
def _root_dir_default(self):
try:
return self.parent.notebook_dir
except AttributeError:
return getcwd()
@contextmanager
def perm_to_403(self, os_path=''):
"""context manager for turning permission errors into 403"""
try:
yield
except OSError as e:
if e.errno in {errno.EPERM, errno.EACCES}:
# make 403 error message without root prefix
# this may not work perfectly on unicode paths on Python 2,
# but nobody should be doing that anyway.
if not os_path:
os_path = str_to_unicode(e.filename or 'unknown file')
path = to_api_path(os_path, self.root_dir)
raise web.HTTPError(403, u'Permission denied: %s' % path)
else:
raise
@contextmanager
def open(self, os_path, *args, **kwargs):
"""wrapper around io.open that turns permission errors into 403"""
with self.perm_to_403(os_path):
with io.open(os_path, *args, **kwargs) as f:
yield f
@contextmanager
def atomic_writing(self, os_path, *args, **kwargs):
"""wrapper around atomic_writing that turns permission errors into 403"""
with self.perm_to_403(os_path):
with atomic_writing(os_path, *args, **kwargs) as f:
yield f
save_script = Bool(False, config=True, help='DEPRECATED, IGNORED')
def _save_script_changed(self):
self.log.warn("""
Automatically saving notebooks as scripts has been removed.
Use `ipython nbconvert --to python [notebook]` instead.
""")
def _root_dir_changed(self, name, old, new):
"""Do a bit of validation of the root_dir."""
if not os.path.isabs(new):
# If we receive a non-absolute path, make it absolute.
self.root_dir = os.path.abspath(new)
return
if not os.path.isdir(new):
raise TraitError("%r is not a directory" % new)
checkpoint_dir = Unicode('.ipynb_checkpoints', config=True,
help="""The directory name in which to keep file checkpoints
This is a path relative to the file's own directory.
By default, it is .ipynb_checkpoints
"""
)
def _copy(self, src, dest):
"""copy src to dest
like shutil.copy2, but log errors in copystat
"""
shutil.copyfile(src, dest)
try:
shutil.copystat(src, dest)
except OSError as e:
self.log.debug("copystat on %s failed", dest, exc_info=True)
def _get_os_path(self, path):
"""Given an API path, return its file system path.
Parameters
----------
path : string
The relative API path to the named file.
Returns
-------
path : string
Native, absolute OS path to for a file.
"""
return to_os_path(path, self.root_dir)
def dir_exists(self, path):
"""Does the API-style path refer to an extant directory?
API-style wrapper for os.path.isdir
Parameters
----------
path : string
The path to check. This is an API path (`/` separated,
relative to root_dir).
Returns
-------
exists : bool
Whether the path is indeed a directory.
"""
path = path.strip('/')
os_path = self._get_os_path(path=path)
return os.path.isdir(os_path)
def is_hidden(self, path):
"""Does the API style path correspond to a hidden directory or file?
Parameters
----------
path : string
The path to check. This is an API path (`/` separated,
relative to root_dir).
Returns
-------
hidden : bool
Whether the path exists and is hidden.
"""
path = path.strip('/')
os_path = self._get_os_path(path=path)
return is_hidden(os_path, self.root_dir)
def file_exists(self, path):
"""Returns True if the file exists, else returns False.
API-style wrapper for os.path.isfile
Parameters
----------
path : string
The relative path to the file (with '/' as separator)
Returns
-------
exists : bool
Whether the file exists.
"""
path = path.strip('/')
os_path = self._get_os_path(path)
return os.path.isfile(os_path)
def exists(self, path):
"""Returns True if the path exists, else returns False.
API-style wrapper for os.path.exists
Parameters
----------
path : string
The API path to the file (with '/' as separator)
Returns
-------
exists : bool
Whether the target exists.
"""
path = path.strip('/')
os_path = self._get_os_path(path=path)
return os.path.exists(os_path)
def _base_model(self, path):
"""Build the common base of a contents model"""
os_path = self._get_os_path(path)
info = os.stat(os_path)
last_modified = tz.utcfromtimestamp(info.st_mtime)
created = tz.utcfromtimestamp(info.st_ctime)
# Create the base model.
model = {}
model['name'] = path.rsplit('/', 1)[-1]
model['path'] = path
model['last_modified'] = last_modified
model['created'] = created
model['content'] = None
model['format'] = None
model['mimetype'] = None
try:
model['writable'] = os.access(os_path, os.W_OK)
except OSError:
self.log.error("Failed to check write permissions on %s", os_path)
model['writable'] = False
return model
def _dir_model(self, path, content=True):
"""Build a model for a directory
if content is requested, will include a listing of the directory
"""
os_path = self._get_os_path(path)
four_o_four = u'directory does not exist: %r' % path
if not os.path.isdir(os_path):
raise web.HTTPError(404, four_o_four)
elif is_hidden(os_path, self.root_dir):
self.log.info("Refusing to serve hidden directory %r, via 404 Error",
os_path
)
raise web.HTTPError(404, four_o_four)
model = self._base_model(path)
model['type'] = 'directory'
if content:
model['content'] = contents = []
os_dir = self._get_os_path(path)
for name in os.listdir(os_dir):
os_path = os.path.join(os_dir, name)
# skip over broken symlinks in listing
if not os.path.exists(os_path):
self.log.warn("%s doesn't exist", os_path)
continue
elif not os.path.isfile(os_path) and not os.path.isdir(os_path):
self.log.debug("%s not a regular file", os_path)
continue
if self.should_list(name) and not is_hidden(os_path, self.root_dir):
contents.append(self.get(
path='%s/%s' % (path, name),
content=False)
)
model['format'] = 'json'
return model
def _file_model(self, path, content=True, format=None):
"""Build a model for a file
if content is requested, include the file contents.
format:
If 'text', the contents will be decoded as UTF-8.
If 'base64', the raw bytes contents will be encoded as base64.
If not specified, try to decode as UTF-8, and fall back to base64
"""
model = self._base_model(path)
model['type'] = 'file'
os_path = self._get_os_path(path)
model['mimetype'] = mimetypes.guess_type(os_path)[0] or 'text/plain'
if content:
if not os.path.isfile(os_path):
# could be FIFO
raise web.HTTPError(
400, "Cannot get content of non-file %s" % os_path)
with self.open(os_path, 'rb') as f:
bcontent = f.read()
if format != 'base64':
try:
model['content'] = bcontent.decode('utf8')
except UnicodeError as e:
if format == 'text':
raise web.HTTPError(
400, "%s is not UTF-8 encoded" % path)
else:
model['format'] = 'text'
if model['content'] is None:
model['content'] = base64.encodestring(
bcontent).decode('ascii')
model['format'] = 'base64'
return model
def _notebook_model(self, path, content=True):
"""Build a notebook model
if content is requested, the notebook content will be populated
as a JSON structure (not double-serialized)
"""
model = self._base_model(path)
model['type'] = 'notebook'
if content:
os_path = self._get_os_path(path)
with self.open(os_path, 'r', encoding='utf-8') as f:
try:
nb = nbformat.read(f, as_version=4)
except Exception as e:
raise web.HTTPError(
400, u"Unreadable Notebook: %s %r" % (os_path, e))
self.mark_trusted_cells(nb, path)
model['content'] = nb
model['format'] = 'json'
self.validate_notebook_model(model)
return model
def get(self, path, content=True, type_=None, format=None):
""" Takes a path for an entity and returns its model
Parameters
----------
path : str
the API path that describes the relative path for the target
content : bool
Whether to include the contents in the reply
type_ : str, optional
The requested type - 'file', 'notebook', or 'directory'.
Will raise HTTPError 400 if the content doesn't match.
format : str, optional
The requested format for file contents. 'text' or 'base64'.
Ignored if this returns a notebook or directory model.
Returns
-------
model : dict
the contents model. If content=True, returns the contents
of the file or directory as well.
"""
path = path.strip('/')
if not self.exists(path):
raise web.HTTPError(404, u'No such file or directory: %s' % path)
os_path = self._get_os_path(path)
if os.path.isdir(os_path):
if type_ not in (None, 'directory'):
raise web.HTTPError(400,
u'%s is a directory, not a %s' % (path, type_))
model = self._dir_model(path, content=content)
elif type_ == 'notebook' or (type_ is None and path.endswith('.ipynb')):
model = self._notebook_model(path, content=content)
else:
if type_ == 'directory':
raise web.HTTPError(400,
u'%s is not a directory')
model = self._file_model(path, content=content, format=format)
return model
def _save_notebook(self, os_path, model, path=''):
"""save a notebook file"""
# Save the notebook file
nb = nbformat.from_dict(model['content'])
self.check_and_sign(nb, path)
with self.atomic_writing(os_path, encoding='utf-8') as f:
nbformat.write(nb, f, version=nbformat.NO_CONVERT)
def _save_file(self, os_path, model, path=''):
"""save a non-notebook file"""
fmt = model.get('format', None)
if fmt not in {'text', 'base64'}:
raise web.HTTPError(
400, "Must specify format of file contents as 'text' or 'base64'")
try:
content = model['content']
if fmt == 'text':
bcontent = content.encode('utf8')
else:
b64_bytes = content.encode('ascii')
bcontent = base64.decodestring(b64_bytes)
except Exception as e:
raise web.HTTPError(
400, u'Encoding error saving %s: %s' % (os_path, e))
with self.atomic_writing(os_path, text=False) as f:
f.write(bcontent)
def _save_directory(self, os_path, model, path=''):
"""create a directory"""
if is_hidden(os_path, self.root_dir):
raise web.HTTPError(
400, u'Cannot create hidden directory %r' % os_path)
if not os.path.exists(os_path):
with self.perm_to_403():
os.mkdir(os_path)
elif not os.path.isdir(os_path):
raise web.HTTPError(400, u'Not a directory: %s' % (os_path))
else:
self.log.debug("Directory %r already exists", os_path)
def save(self, model, path=''):
"""Save the file model and return the model with no content."""
path = path.strip('/')
if 'type' not in model:
raise web.HTTPError(400, u'No file type provided')
if 'content' not in model and model['type'] != 'directory':
raise web.HTTPError(400, u'No file content provided')
# One checkpoint should always exist
if self.file_exists(path) and not self.list_checkpoints(path):
self.create_checkpoint(path)
os_path = self._get_os_path(path)
self.log.debug("Saving %s", os_path)
try:
if model['type'] == 'notebook':
self._save_notebook(os_path, model, path)
elif model['type'] == 'file':
self._save_file(os_path, model, path)
elif model['type'] == 'directory':
self._save_directory(os_path, model, path)
else:
raise web.HTTPError(
400, "Unhandled contents type: %s" % model['type'])
except web.HTTPError:
raise
except Exception as e:
self.log.error(
u'Error while saving file: %s %s', path, e, exc_info=True)
raise web.HTTPError(
500, u'Unexpected error while saving file: %s %s' % (path, e))
validation_message = None
if model['type'] == 'notebook':
self.validate_notebook_model(model)
validation_message = model.get('message', None)
model = self.get(path, content=False)
if validation_message:
model['message'] = validation_message
return model
def update(self, model, path):
"""Update the file's path
For use in PATCH requests, to enable renaming a file without
re-uploading its contents. Only used for renaming at the moment.
"""
path = path.strip('/')
new_path = model.get('path', path).strip('/')
if path != new_path:
self.rename(path, new_path)
model = self.get(new_path, content=False)
return model
def delete(self, path):
"""Delete file at path."""
path = path.strip('/')
os_path = self._get_os_path(path)
rm = os.unlink
if os.path.isdir(os_path):
listing = os.listdir(os_path)
# don't delete non-empty directories (checkpoints dir doesn't
# count)
if listing and listing != [self.checkpoint_dir]:
raise web.HTTPError(400, u'Directory %s not empty' % os_path)
elif not os.path.isfile(os_path):
raise web.HTTPError(404, u'File does not exist: %s' % os_path)
# clear checkpoints
for checkpoint in self.list_checkpoints(path):
checkpoint_id = checkpoint['id']
cp_path = self.get_checkpoint_path(checkpoint_id, path)
if os.path.isfile(cp_path):
self.log.debug("Unlinking checkpoint %s", cp_path)
with self.perm_to_403():
rm(cp_path)
if os.path.isdir(os_path):
self.log.debug("Removing directory %s", os_path)
with self.perm_to_403():
shutil.rmtree(os_path)
else:
self.log.debug("Unlinking file %s", os_path)
with self.perm_to_403():
rm(os_path)
def rename(self, old_path, new_path):
"""Rename a file."""
old_path = old_path.strip('/')
new_path = new_path.strip('/')
if new_path == old_path:
return
new_os_path = self._get_os_path(new_path)
old_os_path = self._get_os_path(old_path)
# Should we proceed with the move?
if os.path.exists(new_os_path):
raise web.HTTPError(409, u'File already exists: %s' % new_path)
# Move the file
try:
with self.perm_to_403():
shutil.move(old_os_path, new_os_path)
except web.HTTPError:
raise
except Exception as e:
raise web.HTTPError(
500, u'Unknown error renaming file: %s %s' % (old_path, e))
# Move the checkpoints
old_checkpoints = self.list_checkpoints(old_path)
for cp in old_checkpoints:
checkpoint_id = cp['id']
old_cp_path = self.get_checkpoint_path(checkpoint_id, old_path)
new_cp_path = self.get_checkpoint_path(checkpoint_id, new_path)
if os.path.isfile(old_cp_path):
self.log.debug(
"Renaming checkpoint %s -> %s", old_cp_path, new_cp_path)
with self.perm_to_403():
shutil.move(old_cp_path, new_cp_path)
# Checkpoint-related utilities
def get_checkpoint_path(self, checkpoint_id, path):
"""find the path to a checkpoint"""
path = path.strip('/')
parent, name = ('/' + path).rsplit('/', 1)
parent = parent.strip('/')
basename, ext = os.path.splitext(name)
filename = u"{name}-{checkpoint_id}{ext}".format(
name=basename,
checkpoint_id=checkpoint_id,
ext=ext,
)
os_path = self._get_os_path(path=parent)
cp_dir = os.path.join(os_path, self.checkpoint_dir)
with self.perm_to_403():
ensure_dir_exists(cp_dir)
cp_path = os.path.join(cp_dir, filename)
return cp_path
def get_checkpoint_model(self, checkpoint_id, path):
"""construct the info dict for a given checkpoint"""
path = path.strip('/')
cp_path = self.get_checkpoint_path(checkpoint_id, path)
stats = os.stat(cp_path)
last_modified = tz.utcfromtimestamp(stats.st_mtime)
info = dict(
id=checkpoint_id,
last_modified=last_modified,
)
return info
# public checkpoint API
def create_checkpoint(self, path):
"""Create a checkpoint from the current state of a file"""
path = path.strip('/')
if not self.file_exists(path):
raise web.HTTPError(404)
src_path = self._get_os_path(path)
# only the one checkpoint ID:
checkpoint_id = u"checkpoint"
cp_path = self.get_checkpoint_path(checkpoint_id, path)
self.log.debug("creating checkpoint for %s", path)
with self.perm_to_403():
self._copy(src_path, cp_path)
# return the checkpoint info
return self.get_checkpoint_model(checkpoint_id, path)
def list_checkpoints(self, path):
"""list the checkpoints for a given file
This contents manager currently only supports one checkpoint per file.
"""
path = path.strip('/')
checkpoint_id = "checkpoint"
os_path = self.get_checkpoint_path(checkpoint_id, path)
if not os.path.exists(os_path):
return []
else:
return [self.get_checkpoint_model(checkpoint_id, path)]
def restore_checkpoint(self, checkpoint_id, path):
"""restore a file to a checkpointed state"""
path = path.strip('/')
self.log.info("restoring %s from checkpoint %s", path, checkpoint_id)
nb_path = self._get_os_path(path)
cp_path = self.get_checkpoint_path(checkpoint_id, path)
if not os.path.isfile(cp_path):
self.log.debug("checkpoint file does not exist: %s", cp_path)
raise web.HTTPError(404,
u'checkpoint does not exist: %s@%s' % (
path, checkpoint_id)
)
# ensure notebook is readable (never restore from an unreadable
# notebook)
if cp_path.endswith('.ipynb'):
with self.open(cp_path, 'r', encoding='utf-8') as f:
nbformat.read(f, as_version=4)
self.log.debug("copying %s -> %s", cp_path, nb_path)
with self.perm_to_403():
self._copy(cp_path, nb_path)
def delete_checkpoint(self, checkpoint_id, path):
"""delete a file's checkpoint"""
path = path.strip('/')
cp_path = self.get_checkpoint_path(checkpoint_id, path)
if not os.path.isfile(cp_path):
raise web.HTTPError(404,
u'Checkpoint does not exist: %s@%s' % (
path, checkpoint_id)
)
self.log.debug("unlinking %s", cp_path)
os.unlink(cp_path)
def info_string(self):
return "Serving notebooks from local directory: %s" % self.root_dir
def get_kernel_path(self, path, model=None):
"""Return the initial API path of a kernel associated with a given notebook"""
if '/' in path:
parent_dir = path.rsplit('/', 1)[0]
else:
parent_dir = ''
return parent_dir
| {
"repo_name": "mattvonrocketstein/smash",
"path": "smashlib/ipy3x/html/services/contents/filemanager.py",
"copies": "1",
"size": "22850",
"license": "mit",
"hash": 5275885692462031000,
"line_mean": 35.0410094637,
"line_max": 89,
"alpha_frac": 0.544726477,
"autogenerated": false,
"ratio": 4.108973206257867,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5153699683257867,
"avg_score": null,
"num_lines": null
} |
"""A contents manager that uses the local file system for storage."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import io
import os
import shutil
import mimetypes
from tornado import web
from .filecheckpoints import FileCheckpoints
from .fileio import FileManagerMixin
from .manager import ContentsManager
from IPython import nbformat
from IPython.utils.importstring import import_item
from IPython.utils.traitlets import Any, Unicode, Bool, TraitError
from IPython.utils.py3compat import getcwd, string_types
from IPython.utils import tz
from IPython.html.utils import (
is_hidden,
to_api_path,
)
_script_exporter = None
def _post_save_script(model, os_path, contents_manager, **kwargs):
"""convert notebooks to Python script after save with nbconvert
replaces `ipython notebook --script`
"""
from IPython.nbconvert.exporters.script import ScriptExporter
if model['type'] != 'notebook':
return
global _script_exporter
if _script_exporter is None:
_script_exporter = ScriptExporter(parent=contents_manager)
log = contents_manager.log
base, ext = os.path.splitext(os_path)
py_fname = base + '.py'
script, resources = _script_exporter.from_filename(os_path)
script_fname = base + resources.get('output_extension', '.txt')
log.info("Saving script /%s", to_api_path(script_fname, contents_manager.root_dir))
with io.open(script_fname, 'w', encoding='utf-8') as f:
f.write(script)
class FileContentsManager(FileManagerMixin, ContentsManager):
root_dir = Unicode(config=True)
def _root_dir_default(self):
try:
return self.parent.notebook_dir
except AttributeError:
return getcwd()
save_script = Bool(False, config=True, help='DEPRECATED, use post_save_hook')
def _save_script_changed(self):
self.log.warn("""
`--script` is deprecated. You can trigger nbconvert via pre- or post-save hooks:
ContentsManager.pre_save_hook
FileContentsManager.post_save_hook
A post-save hook has been registered that calls:
ipython nbconvert --to script [notebook]
which behaves similarly to `--script`.
""")
self.post_save_hook = _post_save_script
post_save_hook = Any(None, config=True,
help="""Python callable or importstring thereof
to be called on the path of a file just saved.
This can be used to process the file on disk,
such as converting the notebook to a script or HTML via nbconvert.
It will be called as (all arguments passed by keyword)::
hook(os_path=os_path, model=model, contents_manager=instance)
- path: the filesystem path to the file just written
- model: the model representing the file
- contents_manager: this ContentsManager instance
"""
)
def _post_save_hook_changed(self, name, old, new):
if new and isinstance(new, string_types):
self.post_save_hook = import_item(self.post_save_hook)
elif new:
if not callable(new):
raise TraitError("post_save_hook must be callable")
def run_post_save_hook(self, model, os_path):
"""Run the post-save hook if defined, and log errors"""
if self.post_save_hook:
try:
self.log.debug("Running post-save hook on %s", os_path)
self.post_save_hook(os_path=os_path, model=model, contents_manager=self)
except Exception:
self.log.error("Post-save hook failed on %s", os_path, exc_info=True)
def _root_dir_changed(self, name, old, new):
"""Do a bit of validation of the root_dir."""
if not os.path.isabs(new):
# If we receive a non-absolute path, make it absolute.
self.root_dir = os.path.abspath(new)
return
if not os.path.isdir(new):
raise TraitError("%r is not a directory" % new)
def _checkpoints_class_default(self):
return FileCheckpoints
def is_hidden(self, path):
"""Does the API style path correspond to a hidden directory or file?
Parameters
----------
path : string
The path to check. This is an API path (`/` separated,
relative to root_dir).
Returns
-------
hidden : bool
Whether the path exists and is hidden.
"""
path = path.strip('/')
os_path = self._get_os_path(path=path)
return is_hidden(os_path, self.root_dir)
def file_exists(self, path):
"""Returns True if the file exists, else returns False.
API-style wrapper for os.path.isfile
Parameters
----------
path : string
The relative path to the file (with '/' as separator)
Returns
-------
exists : bool
Whether the file exists.
"""
path = path.strip('/')
os_path = self._get_os_path(path)
return os.path.isfile(os_path)
def dir_exists(self, path):
"""Does the API-style path refer to an extant directory?
API-style wrapper for os.path.isdir
Parameters
----------
path : string
The path to check. This is an API path (`/` separated,
relative to root_dir).
Returns
-------
exists : bool
Whether the path is indeed a directory.
"""
path = path.strip('/')
os_path = self._get_os_path(path=path)
return os.path.isdir(os_path)
def exists(self, path):
"""Returns True if the path exists, else returns False.
API-style wrapper for os.path.exists
Parameters
----------
path : string
The API path to the file (with '/' as separator)
Returns
-------
exists : bool
Whether the target exists.
"""
path = path.strip('/')
os_path = self._get_os_path(path=path)
return os.path.exists(os_path)
def _base_model(self, path):
"""Build the common base of a contents model"""
os_path = self._get_os_path(path)
info = os.stat(os_path)
last_modified = tz.utcfromtimestamp(info.st_mtime)
created = tz.utcfromtimestamp(info.st_ctime)
# Create the base model.
model = {}
model['name'] = path.rsplit('/', 1)[-1]
model['path'] = path
model['last_modified'] = last_modified
model['created'] = created
model['content'] = None
model['format'] = None
model['mimetype'] = None
try:
model['writable'] = os.access(os_path, os.W_OK)
except OSError:
self.log.error("Failed to check write permissions on %s", os_path)
model['writable'] = False
return model
def _dir_model(self, path, content=True):
"""Build a model for a directory
if content is requested, will include a listing of the directory
"""
os_path = self._get_os_path(path)
four_o_four = u'directory does not exist: %r' % path
if not os.path.isdir(os_path):
raise web.HTTPError(404, four_o_four)
elif is_hidden(os_path, self.root_dir):
self.log.info("Refusing to serve hidden directory %r, via 404 Error",
os_path
)
raise web.HTTPError(404, four_o_four)
model = self._base_model(path)
model['type'] = 'directory'
if content:
model['content'] = contents = []
os_dir = self._get_os_path(path)
for name in os.listdir(os_dir):
os_path = os.path.join(os_dir, name)
# skip over broken symlinks in listing
if not os.path.exists(os_path):
self.log.warn("%s doesn't exist", os_path)
continue
elif not os.path.isfile(os_path) and not os.path.isdir(os_path):
self.log.debug("%s not a regular file", os_path)
continue
if self.should_list(name) and not is_hidden(os_path, self.root_dir):
contents.append(self.get(
path='%s/%s' % (path, name),
content=False)
)
model['format'] = 'json'
return model
def _file_model(self, path, content=True, format=None):
"""Build a model for a file
if content is requested, include the file contents.
format:
If 'text', the contents will be decoded as UTF-8.
If 'base64', the raw bytes contents will be encoded as base64.
If not specified, try to decode as UTF-8, and fall back to base64
"""
model = self._base_model(path)
model['type'] = 'file'
os_path = self._get_os_path(path)
if content:
content, format = self._read_file(os_path, format)
default_mime = {
'text': 'text/plain',
'base64': 'application/octet-stream'
}[format]
model.update(
content=content,
format=format,
mimetype=mimetypes.guess_type(os_path)[0] or default_mime,
)
return model
def _notebook_model(self, path, content=True):
"""Build a notebook model
if content is requested, the notebook content will be populated
as a JSON structure (not double-serialized)
"""
model = self._base_model(path)
model['type'] = 'notebook'
if content:
os_path = self._get_os_path(path)
nb = self._read_notebook(os_path, as_version=4)
self.mark_trusted_cells(nb, path)
model['content'] = nb
model['format'] = 'json'
self.validate_notebook_model(model)
return model
def get(self, path, content=True, type=None, format=None):
""" Takes a path for an entity and returns its model
Parameters
----------
path : str
the API path that describes the relative path for the target
content : bool
Whether to include the contents in the reply
type : str, optional
The requested type - 'file', 'notebook', or 'directory'.
Will raise HTTPError 400 if the content doesn't match.
format : str, optional
The requested format for file contents. 'text' or 'base64'.
Ignored if this returns a notebook or directory model.
Returns
-------
model : dict
the contents model. If content=True, returns the contents
of the file or directory as well.
"""
path = path.strip('/')
if not self.exists(path):
raise web.HTTPError(404, u'No such file or directory: %s' % path)
os_path = self._get_os_path(path)
if os.path.isdir(os_path):
if type not in (None, 'directory'):
raise web.HTTPError(400,
u'%s is a directory, not a %s' % (path, type), reason='bad type')
model = self._dir_model(path, content=content)
elif type == 'notebook' or (type is None and path.endswith('.ipynb')):
model = self._notebook_model(path, content=content)
else:
if type == 'directory':
raise web.HTTPError(400,
u'%s is not a directory' % path, reason='bad type')
model = self._file_model(path, content=content, format=format)
return model
def _save_directory(self, os_path, model, path=''):
"""create a directory"""
if is_hidden(os_path, self.root_dir):
raise web.HTTPError(400, u'Cannot create hidden directory %r' % os_path)
if not os.path.exists(os_path):
with self.perm_to_403():
os.mkdir(os_path)
elif not os.path.isdir(os_path):
raise web.HTTPError(400, u'Not a directory: %s' % (os_path))
else:
self.log.debug("Directory %r already exists", os_path)
def save(self, model, path=''):
"""Save the file model and return the model with no content."""
path = path.strip('/')
if 'type' not in model:
raise web.HTTPError(400, u'No file type provided')
if 'content' not in model and model['type'] != 'directory':
raise web.HTTPError(400, u'No file content provided')
os_path = self._get_os_path(path)
self.log.debug("Saving %s", os_path)
self.run_pre_save_hook(model=model, path=path)
try:
if model['type'] == 'notebook':
nb = nbformat.from_dict(model['content'])
self.check_and_sign(nb, path)
self._save_notebook(os_path, nb)
# One checkpoint should always exist for notebooks.
if not self.checkpoints.list_checkpoints(path):
self.create_checkpoint(path)
elif model['type'] == 'file':
# Missing format will be handled internally by _save_file.
self._save_file(os_path, model['content'], model.get('format'))
elif model['type'] == 'directory':
self._save_directory(os_path, model, path)
else:
raise web.HTTPError(400, "Unhandled contents type: %s" % model['type'])
except web.HTTPError:
raise
except Exception as e:
self.log.error(u'Error while saving file: %s %s', path, e, exc_info=True)
raise web.HTTPError(500, u'Unexpected error while saving file: %s %s' % (path, e))
validation_message = None
if model['type'] == 'notebook':
self.validate_notebook_model(model)
validation_message = model.get('message', None)
model = self.get(path, content=False)
if validation_message:
model['message'] = validation_message
self.run_post_save_hook(model=model, os_path=os_path)
return model
def delete_file(self, path):
"""Delete file at path."""
path = path.strip('/')
os_path = self._get_os_path(path)
rm = os.unlink
if os.path.isdir(os_path):
listing = os.listdir(os_path)
# Don't delete non-empty directories.
# A directory containing only leftover checkpoints is
# considered empty.
cp_dir = getattr(self.checkpoints, 'checkpoint_dir', None)
for entry in listing:
if entry != cp_dir:
raise web.HTTPError(400, u'Directory %s not empty' % os_path)
elif not os.path.isfile(os_path):
raise web.HTTPError(404, u'File does not exist: %s' % os_path)
if os.path.isdir(os_path):
self.log.debug("Removing directory %s", os_path)
with self.perm_to_403():
shutil.rmtree(os_path)
else:
self.log.debug("Unlinking file %s", os_path)
with self.perm_to_403():
rm(os_path)
def rename_file(self, old_path, new_path):
"""Rename a file."""
old_path = old_path.strip('/')
new_path = new_path.strip('/')
if new_path == old_path:
return
new_os_path = self._get_os_path(new_path)
old_os_path = self._get_os_path(old_path)
# Should we proceed with the move?
if os.path.exists(new_os_path):
raise web.HTTPError(409, u'File already exists: %s' % new_path)
# Move the file
try:
with self.perm_to_403():
shutil.move(old_os_path, new_os_path)
except web.HTTPError:
raise
except Exception as e:
raise web.HTTPError(500, u'Unknown error renaming file: %s %s' % (old_path, e))
def info_string(self):
return "Serving notebooks from local directory: %s" % self.root_dir
def get_kernel_path(self, path, model=None):
"""Return the initial API path of a kernel associated with a given notebook"""
if '/' in path:
parent_dir = path.rsplit('/', 1)[0]
else:
parent_dir = ''
return parent_dir
| {
"repo_name": "martydill/url_shortener",
"path": "code/venv/lib/python2.7/site-packages/IPython/html/services/contents/filemanager.py",
"copies": "4",
"size": "16487",
"license": "mit",
"hash": 7917994969353552000,
"line_mean": 33.8562367865,
"line_max": 97,
"alpha_frac": 0.5669921757,
"autogenerated": false,
"ratio": 4.079930710220243,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0011870429206084685,
"num_lines": 473
} |
"""A contents manager that uses the local file system for storage."""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from datetime import datetime
import errno
import io
import os
import shutil
import stat
import sys
import warnings
import mimetypes
import nbformat
from send2trash import send2trash
from tornado import web
from .filecheckpoints import FileCheckpoints
from .fileio import FileManagerMixin
from .manager import ContentsManager
from ...utils import exists
from ipython_genutils.importstring import import_item
from traitlets import Any, Unicode, Bool, TraitError, observe, default, validate
from ipython_genutils.py3compat import getcwd, string_types
from notebook import _tz as tz
from notebook.utils import (
is_hidden, is_file_hidden,
to_api_path,
)
from notebook.base.handlers import AuthenticatedFileHandler
from notebook.transutils import _
from os.path import samefile
_script_exporter = None
def _post_save_script(model, os_path, contents_manager, **kwargs):
"""convert notebooks to Python script after save with nbconvert
replaces `jupyter notebook --script`
"""
from nbconvert.exporters.script import ScriptExporter
warnings.warn("`_post_save_script` is deprecated and will be removed in Notebook 5.0", DeprecationWarning)
if model['type'] != 'notebook':
return
global _script_exporter
if _script_exporter is None:
_script_exporter = ScriptExporter(parent=contents_manager)
log = contents_manager.log
base, ext = os.path.splitext(os_path)
script, resources = _script_exporter.from_filename(os_path)
script_fname = base + resources.get('output_extension', '.txt')
log.info("Saving script /%s", to_api_path(script_fname, contents_manager.root_dir))
with io.open(script_fname, 'w', encoding='utf-8') as f:
f.write(script)
class FileContentsManager(FileManagerMixin, ContentsManager):
root_dir = Unicode(config=True)
@default('root_dir')
def _default_root_dir(self):
try:
return self.parent.notebook_dir
except AttributeError:
return getcwd()
save_script = Bool(False, config=True, help='DEPRECATED, use post_save_hook. Will be removed in Notebook 5.0')
@observe('save_script')
def _update_save_script(self, change):
if not change['new']:
return
self.log.warning("""
`--script` is deprecated and will be removed in notebook 5.0.
You can trigger nbconvert via pre- or post-save hooks:
ContentsManager.pre_save_hook
FileContentsManager.post_save_hook
A post-save hook has been registered that calls:
jupyter nbconvert --to script [notebook]
which behaves similarly to `--script`.
""")
self.post_save_hook = _post_save_script
post_save_hook = Any(None, config=True, allow_none=True,
help="""Python callable or importstring thereof
to be called on the path of a file just saved.
This can be used to process the file on disk,
such as converting the notebook to a script or HTML via nbconvert.
It will be called as (all arguments passed by keyword)::
hook(os_path=os_path, model=model, contents_manager=instance)
- path: the filesystem path to the file just written
- model: the model representing the file
- contents_manager: this ContentsManager instance
"""
)
@validate('post_save_hook')
def _validate_post_save_hook(self, proposal):
value = proposal['value']
if isinstance(value, string_types):
value = import_item(value)
if not callable(value):
raise TraitError("post_save_hook must be callable")
return value
def run_post_save_hook(self, model, os_path):
"""Run the post-save hook if defined, and log errors"""
if self.post_save_hook:
try:
self.log.debug("Running post-save hook on %s", os_path)
self.post_save_hook(os_path=os_path, model=model, contents_manager=self)
except Exception as e:
self.log.error("Post-save hook failed o-n %s", os_path, exc_info=True)
raise web.HTTPError(500, u'Unexpected error while running post hook save: %s'
% e) from e
@validate('root_dir')
def _validate_root_dir(self, proposal):
"""Do a bit of validation of the root_dir."""
value = proposal['value']
if not os.path.isabs(value):
# If we receive a non-absolute path, make it absolute.
value = os.path.abspath(value)
if not os.path.isdir(value):
raise TraitError("%r is not a directory" % value)
return value
@default('checkpoints_class')
def _checkpoints_class_default(self):
return FileCheckpoints
delete_to_trash = Bool(True, config=True,
help="""If True (default), deleting files will send them to the
platform's trash/recycle bin, where they can be recovered. If False,
deleting files really deletes them.""")
@default('files_handler_class')
def _files_handler_class_default(self):
return AuthenticatedFileHandler
@default('files_handler_params')
def _files_handler_params_default(self):
return {'path': self.root_dir}
def is_hidden(self, path):
"""Does the API style path correspond to a hidden directory or file?
Parameters
----------
path : string
The path to check. This is an API path (`/` separated,
relative to root_dir).
Returns
-------
hidden : bool
Whether the path exists and is hidden.
"""
path = path.strip('/')
os_path = self._get_os_path(path=path)
return is_hidden(os_path, self.root_dir)
def file_exists(self, path):
"""Returns True if the file exists, else returns False.
API-style wrapper for os.path.isfile
Parameters
----------
path : string
The relative path to the file (with '/' as separator)
Returns
-------
exists : bool
Whether the file exists.
"""
path = path.strip('/')
os_path = self._get_os_path(path)
return os.path.isfile(os_path)
def dir_exists(self, path):
"""Does the API-style path refer to an extant directory?
API-style wrapper for os.path.isdir
Parameters
----------
path : string
The path to check. This is an API path (`/` separated,
relative to root_dir).
Returns
-------
exists : bool
Whether the path is indeed a directory.
"""
path = path.strip('/')
os_path = self._get_os_path(path=path)
return os.path.isdir(os_path)
def exists(self, path):
"""Returns True if the path exists, else returns False.
API-style wrapper for os.path.exists
Parameters
----------
path : string
The API path to the file (with '/' as separator)
Returns
-------
exists : bool
Whether the target exists.
"""
path = path.strip('/')
os_path = self._get_os_path(path=path)
return exists(os_path)
def _base_model(self, path):
"""Build the common base of a contents model"""
os_path = self._get_os_path(path)
info = os.lstat(os_path)
try:
# size of file
size = info.st_size
except (ValueError, OSError):
self.log.warning('Unable to get size.')
size = None
try:
last_modified = tz.utcfromtimestamp(info.st_mtime)
except (ValueError, OSError):
# Files can rarely have an invalid timestamp
# https://github.com/jupyter/notebook/issues/2539
# https://github.com/jupyter/notebook/issues/2757
# Use the Unix epoch as a fallback so we don't crash.
self.log.warning('Invalid mtime %s for %s', info.st_mtime, os_path)
last_modified = datetime(1970, 1, 1, 0, 0, tzinfo=tz.UTC)
try:
created = tz.utcfromtimestamp(info.st_ctime)
except (ValueError, OSError): # See above
self.log.warning('Invalid ctime %s for %s', info.st_ctime, os_path)
created = datetime(1970, 1, 1, 0, 0, tzinfo=tz.UTC)
# Create the base model.
model = {}
model['name'] = path.rsplit('/', 1)[-1]
model['path'] = path
model['last_modified'] = last_modified
model['created'] = created
model['content'] = None
model['format'] = None
model['mimetype'] = None
model['size'] = size
try:
model['writable'] = os.access(os_path, os.W_OK)
except OSError:
self.log.error("Failed to check write permissions on %s", os_path)
model['writable'] = False
return model
def _dir_model(self, path, content=True):
"""Build a model for a directory
if content is requested, will include a listing of the directory
"""
os_path = self._get_os_path(path)
four_o_four = u'directory does not exist: %r' % path
if not os.path.isdir(os_path):
raise web.HTTPError(404, four_o_four)
elif is_hidden(os_path, self.root_dir) and not self.allow_hidden:
self.log.info("Refusing to serve hidden directory %r, via 404 Error",
os_path
)
raise web.HTTPError(404, four_o_four)
model = self._base_model(path)
model['type'] = 'directory'
model['size'] = None
if content:
model['content'] = contents = []
os_dir = self._get_os_path(path)
for name in os.listdir(os_dir):
try:
os_path = os.path.join(os_dir, name)
except UnicodeDecodeError as e:
self.log.warning(
"failed to decode filename '%s': %s", name, e)
continue
try:
st = os.lstat(os_path)
except OSError as e:
# skip over broken symlinks in listing
if e.errno == errno.ENOENT:
self.log.warning("%s doesn't exist", os_path)
else:
self.log.warning("Error stat-ing %s: %s", os_path, e)
continue
if (not stat.S_ISLNK(st.st_mode)
and not stat.S_ISREG(st.st_mode)
and not stat.S_ISDIR(st.st_mode)):
self.log.debug("%s not a regular file", os_path)
continue
try:
if self.should_list(name):
if self.allow_hidden or not is_file_hidden(os_path, stat_res=st):
contents.append(
self.get(path='%s/%s' % (path, name), content=False)
)
except OSError as e:
# ELOOP: recursive symlink
if e.errno != errno.ELOOP:
self.log.warning(
"Unknown error checking if file %r is hidden",
os_path,
exc_info=True,
)
model['format'] = 'json'
return model
def _file_model(self, path, content=True, format=None):
"""Build a model for a file
if content is requested, include the file contents.
format:
If 'text', the contents will be decoded as UTF-8.
If 'base64', the raw bytes contents will be encoded as base64.
If not specified, try to decode as UTF-8, and fall back to base64
"""
model = self._base_model(path)
model['type'] = 'file'
os_path = self._get_os_path(path)
model['mimetype'] = mimetypes.guess_type(os_path)[0]
if content:
content, format = self._read_file(os_path, format)
if model['mimetype'] is None:
default_mime = {
'text': 'text/plain',
'base64': 'application/octet-stream'
}[format]
model['mimetype'] = default_mime
model.update(
content=content,
format=format,
)
return model
def _notebook_model(self, path, content=True):
"""Build a notebook model
if content is requested, the notebook content will be populated
as a JSON structure (not double-serialized)
"""
model = self._base_model(path)
model['type'] = 'notebook'
os_path = self._get_os_path(path)
if content:
nb = self._read_notebook(os_path, as_version=4)
self.mark_trusted_cells(nb, path)
model['content'] = nb
model['format'] = 'json'
self.validate_notebook_model(model)
return model
def get(self, path, content=True, type=None, format=None):
""" Takes a path for an entity and returns its model
Parameters
----------
path : str
the API path that describes the relative path for the target
content : bool
Whether to include the contents in the reply
type : str, optional
The requested type - 'file', 'notebook', or 'directory'.
Will raise HTTPError 400 if the content doesn't match.
format : str, optional
The requested format for file contents. 'text' or 'base64'.
Ignored if this returns a notebook or directory model.
Returns
-------
model : dict
the contents model. If content=True, returns the contents
of the file or directory as well.
"""
path = path.strip('/')
if not self.exists(path):
raise web.HTTPError(404, u'No such file or directory: %s' % path)
os_path = self._get_os_path(path)
if os.path.isdir(os_path):
if type not in (None, 'directory'):
raise web.HTTPError(400,
u'%s is a directory, not a %s' % (path, type), reason='bad type')
model = self._dir_model(path, content=content)
elif type == 'notebook' or (type is None and path.endswith('.ipynb')):
model = self._notebook_model(path, content=content)
else:
if type == 'directory':
raise web.HTTPError(400,
u'%s is not a directory' % path, reason='bad type')
model = self._file_model(path, content=content, format=format)
return model
def _save_directory(self, os_path, model, path=''):
"""create a directory"""
if is_hidden(os_path, self.root_dir) and not self.allow_hidden:
raise web.HTTPError(400, u'Cannot create hidden directory %r' % os_path)
if not os.path.exists(os_path):
with self.perm_to_403():
os.mkdir(os_path)
elif not os.path.isdir(os_path):
raise web.HTTPError(400, u'Not a directory: %s' % (os_path))
else:
self.log.debug("Directory %r already exists", os_path)
def save(self, model, path=''):
"""Save the file model and return the model with no content."""
path = path.strip('/')
if 'type' not in model:
raise web.HTTPError(400, u'No file type provided')
if 'content' not in model and model['type'] != 'directory':
raise web.HTTPError(400, u'No file content provided')
os_path = self._get_os_path(path)
self.log.debug("Saving %s", os_path)
self.run_pre_save_hook(model=model, path=path)
try:
if model['type'] == 'notebook':
nb = nbformat.from_dict(model['content'])
self.check_and_sign(nb, path)
self._save_notebook(os_path, nb)
# One checkpoint should always exist for notebooks.
if not self.checkpoints.list_checkpoints(path):
self.create_checkpoint(path)
elif model['type'] == 'file':
# Missing format will be handled internally by _save_file.
self._save_file(os_path, model['content'], model.get('format'))
elif model['type'] == 'directory':
self._save_directory(os_path, model, path)
else:
raise web.HTTPError(400, "Unhandled contents type: %s" % model['type'])
except web.HTTPError:
raise
except Exception as e:
self.log.error(u'Error while saving file: %s %s', path, e, exc_info=True)
raise web.HTTPError(500, u'Unexpected error while saving file: %s %s' %
(path, e)) from e
validation_message = None
if model['type'] == 'notebook':
self.validate_notebook_model(model)
validation_message = model.get('message', None)
model = self.get(path, content=False)
if validation_message:
model['message'] = validation_message
self.run_post_save_hook(model=model, os_path=os_path)
return model
def delete_file(self, path):
"""Delete file at path."""
path = path.strip('/')
os_path = self._get_os_path(path)
rm = os.unlink
if not os.path.exists(os_path):
raise web.HTTPError(404, u'File or directory does not exist: %s' % os_path)
def _check_trash(os_path):
if sys.platform in {'win32', 'darwin'}:
return True
# It's a bit more nuanced than this, but until we can better
# distinguish errors from send2trash, assume that we can only trash
# files on the same partition as the home directory.
file_dev = os.stat(os_path).st_dev
home_dev = os.stat(os.path.expanduser('~')).st_dev
return file_dev == home_dev
def is_non_empty_dir(os_path):
if os.path.isdir(os_path):
# A directory containing only leftover checkpoints is
# considered empty.
cp_dir = getattr(self.checkpoints, 'checkpoint_dir', None)
if set(os.listdir(os_path)) - {cp_dir}:
return True
return False
if self.delete_to_trash:
if sys.platform == 'win32' and is_non_empty_dir(os_path):
# send2trash can really delete files on Windows, so disallow
# deleting non-empty files. See Github issue 3631.
raise web.HTTPError(400, u'Directory %s not empty' % os_path)
if _check_trash(os_path):
self.log.debug("Sending %s to trash", os_path)
# Looking at the code in send2trash, I don't think the errors it
# raises let us distinguish permission errors from other errors in
# code. So for now, just let them all get logged as server errors.
send2trash(os_path)
return
else:
self.log.warning("Skipping trash for %s, on different device "
"to home directory", os_path)
if os.path.isdir(os_path):
# Don't permanently delete non-empty directories.
if is_non_empty_dir(os_path):
raise web.HTTPError(400, u'Directory %s not empty' % os_path)
self.log.debug("Removing directory %s", os_path)
with self.perm_to_403():
shutil.rmtree(os_path)
else:
self.log.debug("Unlinking file %s", os_path)
with self.perm_to_403():
rm(os_path)
def rename_file(self, old_path, new_path):
"""Rename a file."""
old_path = old_path.strip('/')
new_path = new_path.strip('/')
if new_path == old_path:
return
# Perform path validation prior to converting to os-specific value since this
# is still relative to root_dir.
self._validate_path(new_path)
new_os_path = self._get_os_path(new_path)
old_os_path = self._get_os_path(old_path)
# Should we proceed with the move?
if os.path.exists(new_os_path) and not samefile(old_os_path, new_os_path):
raise web.HTTPError(409, u'File already exists: %s' % new_path)
# Move the file
try:
with self.perm_to_403():
shutil.move(old_os_path, new_os_path)
except web.HTTPError:
raise
except Exception as e:
raise web.HTTPError(500, u'Unknown error renaming file: %s %s' %
(old_path, e)) from e
def info_string(self):
return _("Serving notebooks from local directory: %s") % self.root_dir
def get_kernel_path(self, path, model=None):
"""Return the initial API path of a kernel associated with a given notebook"""
if self.dir_exists(path):
return path
if '/' in path:
parent_dir = path.rsplit('/', 1)[0]
else:
parent_dir = ''
return parent_dir
@staticmethod
def _validate_path(path):
"""Checks if the path contains invalid characters relative to the current platform"""
if sys.platform == 'win32':
# On Windows systems, we MUST disallow colons otherwise an Alternative Data Stream will
# be created and confusion will reign! (See https://github.com/jupyter/notebook/issues/5190)
# Go ahead and add other invalid (and non-path-separator) characters here as well so there's
# consistent behavior - although all others will result in '[Errno 22]Invalid Argument' errors.
invalid_chars = '?:><*"|'
else:
# On non-windows systems, allow the underlying file creation to perform enforcement when appropriate
invalid_chars = ''
for char in invalid_chars:
if char in path:
raise web.HTTPError(400, "Path '{}' contains characters that are invalid for the filesystem. "
"Path names on this filesystem cannot contain any of the following "
"characters: {}".format(path, invalid_chars))
| {
"repo_name": "sserrot/champion_relationships",
"path": "venv/Lib/site-packages/notebook/services/contents/filemanager.py",
"copies": "1",
"size": "22852",
"license": "mit",
"hash": -7042987752445893000,
"line_mean": 35.6805778491,
"line_max": 114,
"alpha_frac": 0.5625765797,
"autogenerated": false,
"ratio": 4.2061476164181855,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001894538194616128,
"num_lines": 623
} |
"""A contents manager that uses the local file system for storage."""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import errno
import io
import os
import shutil
import stat
import warnings
import mimetypes
import nbformat
from tornado import web
from .filecheckpoints import FileCheckpoints
from .fileio import FileManagerMixin
from .manager import ContentsManager
from ipython_genutils.importstring import import_item
from traitlets import Any, Unicode, Bool, TraitError, observe, default, validate
from ipython_genutils.py3compat import getcwd, string_types
from notebook import _tz as tz
from notebook.utils import (
is_hidden, is_file_hidden,
to_api_path,
)
try:
from os.path import samefile
except ImportError:
# windows + py2
from notebook.utils import samefile_simple as samefile
_script_exporter = None
def _post_save_script(model, os_path, contents_manager, **kwargs):
"""convert notebooks to Python script after save with nbconvert
replaces `jupyter notebook --script`
"""
from nbconvert.exporters.script import ScriptExporter
warnings.warn("`_post_save_script` is deprecated and will be removed in Notebook 5.0", DeprecationWarning)
if model['type'] != 'notebook':
return
global _script_exporter
if _script_exporter is None:
_script_exporter = ScriptExporter(parent=contents_manager)
log = contents_manager.log
base, ext = os.path.splitext(os_path)
script, resources = _script_exporter.from_filename(os_path)
script_fname = base + resources.get('output_extension', '.txt')
log.info("Saving script /%s", to_api_path(script_fname, contents_manager.root_dir))
with io.open(script_fname, 'w', encoding='utf-8') as f:
f.write(script)
class FileContentsManager(FileManagerMixin, ContentsManager):
root_dir = Unicode(config=True)
@default('root_dir')
def _default_root_dir(self):
try:
return self.parent.notebook_dir
except AttributeError:
return getcwd()
save_script = Bool(False, config=True, help='DEPRECATED, use post_save_hook. Will be removed in Notebook 5.0')
@observe('save_script')
def _update_save_script(self):
self.log.warning("""
`--script` is deprecated and will be removed in notebook 5.0.
You can trigger nbconvert via pre- or post-save hooks:
ContentsManager.pre_save_hook
FileContentsManager.post_save_hook
A post-save hook has been registered that calls:
jupyter nbconvert --to script [notebook]
which behaves similarly to `--script`.
""")
self.post_save_hook = _post_save_script
post_save_hook = Any(None, config=True, allow_none=True,
help="""Python callable or importstring thereof
to be called on the path of a file just saved.
This can be used to process the file on disk,
such as converting the notebook to a script or HTML via nbconvert.
It will be called as (all arguments passed by keyword)::
hook(os_path=os_path, model=model, contents_manager=instance)
- path: the filesystem path to the file just written
- model: the model representing the file
- contents_manager: this ContentsManager instance
"""
)
@validate('post_save_hook')
def _validate_post_save_hook(self, proposal):
value = proposal['value']
if isinstance(value, string_types):
value = import_item(value)
if not callable(value):
raise TraitError("post_save_hook must be callable")
return value
def run_post_save_hook(self, model, os_path):
"""Run the post-save hook if defined, and log errors"""
if self.post_save_hook:
try:
self.log.debug("Running post-save hook on %s", os_path)
self.post_save_hook(os_path=os_path, model=model, contents_manager=self)
except Exception as e:
self.log.error("Post-save hook failed o-n %s", os_path, exc_info=True)
raise web.HTTPError(500, u'Unexpected error while running post hook save: %s' % e)
@validate('root_dir')
def _validate_root_dir(self, proposal):
"""Do a bit of validation of the root_dir."""
value = proposal['value']
if not os.path.isabs(value):
# If we receive a non-absolute path, make it absolute.
value = os.path.abspath(value)
if not os.path.isdir(value):
raise TraitError("%r is not a directory" % value)
return value
@default('checkpoints_class')
def _checkpoints_class_default(self):
return FileCheckpoints
def is_hidden(self, path):
"""Does the API style path correspond to a hidden directory or file?
Parameters
----------
path : string
The path to check. This is an API path (`/` separated,
relative to root_dir).
Returns
-------
hidden : bool
Whether the path exists and is hidden.
"""
path = path.strip('/')
os_path = self._get_os_path(path=path)
return is_hidden(os_path, self.root_dir)
def file_exists(self, path):
"""Returns True if the file exists, else returns False.
API-style wrapper for os.path.isfile
Parameters
----------
path : string
The relative path to the file (with '/' as separator)
Returns
-------
exists : bool
Whether the file exists.
"""
path = path.strip('/')
os_path = self._get_os_path(path)
return os.path.isfile(os_path)
def dir_exists(self, path):
"""Does the API-style path refer to an extant directory?
API-style wrapper for os.path.isdir
Parameters
----------
path : string
The path to check. This is an API path (`/` separated,
relative to root_dir).
Returns
-------
exists : bool
Whether the path is indeed a directory.
"""
path = path.strip('/')
os_path = self._get_os_path(path=path)
return os.path.isdir(os_path)
def exists(self, path):
"""Returns True if the path exists, else returns False.
API-style wrapper for os.path.exists
Parameters
----------
path : string
The API path to the file (with '/' as separator)
Returns
-------
exists : bool
Whether the target exists.
"""
path = path.strip('/')
os_path = self._get_os_path(path=path)
return os.path.exists(os_path)
def _base_model(self, path):
"""Build the common base of a contents model"""
os_path = self._get_os_path(path)
info = os.stat(os_path)
last_modified = tz.utcfromtimestamp(info.st_mtime)
created = tz.utcfromtimestamp(info.st_ctime)
# Create the base model.
model = {}
model['name'] = path.rsplit('/', 1)[-1]
model['path'] = path
model['last_modified'] = last_modified
model['created'] = created
model['content'] = None
model['format'] = None
model['mimetype'] = None
try:
model['writable'] = os.access(os_path, os.W_OK)
except OSError:
self.log.error("Failed to check write permissions on %s", os_path)
model['writable'] = False
return model
def _dir_model(self, path, content=True):
"""Build a model for a directory
if content is requested, will include a listing of the directory
"""
os_path = self._get_os_path(path)
four_o_four = u'directory does not exist: %r' % path
if not os.path.isdir(os_path):
raise web.HTTPError(404, four_o_four)
elif is_hidden(os_path, self.root_dir):
self.log.info("Refusing to serve hidden directory %r, via 404 Error",
os_path
)
raise web.HTTPError(404, four_o_four)
model = self._base_model(path)
model['type'] = 'directory'
if content:
model['content'] = contents = []
os_dir = self._get_os_path(path)
for name in os.listdir(os_dir):
try:
os_path = os.path.join(os_dir, name)
except UnicodeDecodeError as e:
self.log.warning(
"failed to decode filename '%s': %s", name, e)
continue
try:
st = os.stat(os_path)
except OSError as e:
# skip over broken symlinks in listing
if e.errno == errno.ENOENT:
self.log.warning("%s doesn't exist", os_path)
else:
self.log.warning("Error stat-ing %s: %s", (os_path, e))
continue
if not stat.S_ISREG(st.st_mode) and not stat.S_ISDIR(st.st_mode):
self.log.debug("%s not a regular file", os_path)
continue
if self.should_list(name) and not is_file_hidden(os_path, stat_res=st):
contents.append(self.get(
path='%s/%s' % (path, name),
content=False)
)
model['format'] = 'json'
return model
def _file_model(self, path, content=True, format=None):
"""Build a model for a file
if content is requested, include the file contents.
format:
If 'text', the contents will be decoded as UTF-8.
If 'base64', the raw bytes contents will be encoded as base64.
If not specified, try to decode as UTF-8, and fall back to base64
"""
model = self._base_model(path)
model['type'] = 'file'
os_path = self._get_os_path(path)
model['mimetype'] = mimetypes.guess_type(os_path)[0]
if content:
content, format = self._read_file(os_path, format)
if model['mimetype'] is None:
default_mime = {
'text': 'text/plain',
'base64': 'application/octet-stream'
}[format]
model['mimetype'] = default_mime
model.update(
content=content,
format=format,
)
return model
def _notebook_model(self, path, content=True):
"""Build a notebook model
if content is requested, the notebook content will be populated
as a JSON structure (not double-serialized)
"""
model = self._base_model(path)
model['type'] = 'notebook'
if content:
os_path = self._get_os_path(path)
nb = self._read_notebook(os_path, as_version=4)
self.mark_trusted_cells(nb, path)
model['content'] = nb
model['format'] = 'json'
self.validate_notebook_model(model)
return model
def get(self, path, content=True, type=None, format=None):
""" Takes a path for an entity and returns its model
Parameters
----------
path : str
the API path that describes the relative path for the target
content : bool
Whether to include the contents in the reply
type : str, optional
The requested type - 'file', 'notebook', or 'directory'.
Will raise HTTPError 400 if the content doesn't match.
format : str, optional
The requested format for file contents. 'text' or 'base64'.
Ignored if this returns a notebook or directory model.
Returns
-------
model : dict
the contents model. If content=True, returns the contents
of the file or directory as well.
"""
path = path.strip('/')
if not self.exists(path):
raise web.HTTPError(404, u'No such file or directory: %s' % path)
os_path = self._get_os_path(path)
if os.path.isdir(os_path):
if type not in (None, 'directory'):
raise web.HTTPError(400,
u'%s is a directory, not a %s' % (path, type), reason='bad type')
model = self._dir_model(path, content=content)
elif type == 'notebook' or (type is None and path.endswith('.ipynb')):
model = self._notebook_model(path, content=content)
else:
if type == 'directory':
raise web.HTTPError(400,
u'%s is not a directory' % path, reason='bad type')
model = self._file_model(path, content=content, format=format)
return model
def _save_directory(self, os_path, model, path=''):
"""create a directory"""
if is_hidden(os_path, self.root_dir):
raise web.HTTPError(400, u'Cannot create hidden directory %r' % os_path)
if not os.path.exists(os_path):
with self.perm_to_403():
os.mkdir(os_path)
elif not os.path.isdir(os_path):
raise web.HTTPError(400, u'Not a directory: %s' % (os_path))
else:
self.log.debug("Directory %r already exists", os_path)
def save(self, model, path=''):
"""Save the file model and return the model with no content."""
path = path.strip('/')
if 'type' not in model:
raise web.HTTPError(400, u'No file type provided')
if 'content' not in model and model['type'] != 'directory':
raise web.HTTPError(400, u'No file content provided')
os_path = self._get_os_path(path)
self.log.debug("Saving %s", os_path)
self.run_pre_save_hook(model=model, path=path)
try:
if model['type'] == 'notebook':
nb = nbformat.from_dict(model['content'])
self.check_and_sign(nb, path)
self._save_notebook(os_path, nb)
# One checkpoint should always exist for notebooks.
if not self.checkpoints.list_checkpoints(path):
self.create_checkpoint(path)
elif model['type'] == 'file':
# Missing format will be handled internally by _save_file.
self._save_file(os_path, model['content'], model.get('format'))
elif model['type'] == 'directory':
self._save_directory(os_path, model, path)
else:
raise web.HTTPError(400, "Unhandled contents type: %s" % model['type'])
except web.HTTPError:
raise
except Exception as e:
self.log.error(u'Error while saving file: %s %s', path, e, exc_info=True)
raise web.HTTPError(500, u'Unexpected error while saving file: %s %s' % (path, e))
validation_message = None
if model['type'] == 'notebook':
self.validate_notebook_model(model)
validation_message = model.get('message', None)
model = self.get(path, content=False)
if validation_message:
model['message'] = validation_message
self.run_post_save_hook(model=model, os_path=os_path)
return model
def delete_file(self, path):
"""Delete file at path."""
path = path.strip('/')
os_path = self._get_os_path(path)
rm = os.unlink
if os.path.isdir(os_path):
listing = os.listdir(os_path)
# Don't delete non-empty directories.
# A directory containing only leftover checkpoints is
# considered empty.
cp_dir = getattr(self.checkpoints, 'checkpoint_dir', None)
for entry in listing:
if entry != cp_dir:
raise web.HTTPError(400, u'Directory %s not empty' % os_path)
elif not os.path.isfile(os_path):
raise web.HTTPError(404, u'File does not exist: %s' % os_path)
if os.path.isdir(os_path):
self.log.debug("Removing directory %s", os_path)
with self.perm_to_403():
shutil.rmtree(os_path)
else:
self.log.debug("Unlinking file %s", os_path)
with self.perm_to_403():
rm(os_path)
def rename_file(self, old_path, new_path):
"""Rename a file."""
old_path = old_path.strip('/')
new_path = new_path.strip('/')
if new_path == old_path:
return
new_os_path = self._get_os_path(new_path)
old_os_path = self._get_os_path(old_path)
# Should we proceed with the move?
if os.path.exists(new_os_path) and not samefile(old_os_path, new_os_path):
raise web.HTTPError(409, u'File already exists: %s' % new_path)
# Move the file
try:
with self.perm_to_403():
shutil.move(old_os_path, new_os_path)
except web.HTTPError:
raise
except Exception as e:
raise web.HTTPError(500, u'Unknown error renaming file: %s %s' % (old_path, e))
def info_string(self):
return "Serving notebooks from local directory: %s" % self.root_dir
def get_kernel_path(self, path, model=None):
"""Return the initial API path of a kernel associated with a given notebook"""
if self.dir_exists(path):
return path
if '/' in path:
parent_dir = path.rsplit('/', 1)[0]
else:
parent_dir = ''
return parent_dir
| {
"repo_name": "unnikrishnankgs/va",
"path": "venv/lib/python3.5/site-packages/notebook/services/contents/filemanager.py",
"copies": "3",
"size": "17764",
"license": "bsd-2-clause",
"hash": -7572116872360113000,
"line_mean": 33.6953125,
"line_max": 114,
"alpha_frac": 0.5660887188,
"autogenerated": false,
"ratio": 4.100646352723915,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6166735071523914,
"avg_score": null,
"num_lines": null
} |
"""A contents manager that uses the local file system for storage."""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import io
import os
import shutil
import mimetypes
import nbformat
from tornado import web
from .filecheckpoints import FileCheckpoints
from .fileio import FileManagerMixin
from .manager import ContentsManager
from ipython_genutils.importstring import import_item
from traitlets import Any, Unicode, Bool, TraitError
from ipython_genutils.py3compat import getcwd, string_types
from . import tz
from notebook.utils import (
is_hidden,
to_api_path,
)
_script_exporter = None
def _post_save_script(model, os_path, contents_manager, **kwargs):
"""convert notebooks to Python script after save with nbconvert
replaces `ipython notebook --script`
"""
from nbconvert.exporters.script import ScriptExporter
if model['type'] != 'notebook':
return
global _script_exporter
if _script_exporter is None:
_script_exporter = ScriptExporter(parent=contents_manager)
log = contents_manager.log
base, ext = os.path.splitext(os_path)
py_fname = base + '.py'
script, resources = _script_exporter.from_filename(os_path)
script_fname = base + resources.get('output_extension', '.txt')
log.info("Saving script /%s", to_api_path(script_fname, contents_manager.root_dir))
with io.open(script_fname, 'w', encoding='utf-8') as f:
f.write(script)
class FileContentsManager(FileManagerMixin, ContentsManager):
root_dir = Unicode(config=True)
def _root_dir_default(self):
try:
return self.parent.notebook_dir
except AttributeError:
return getcwd()
save_script = Bool(False, config=True, help='DEPRECATED, use post_save_hook')
def _save_script_changed(self):
self.log.warn("""
`--script` is deprecated. You can trigger nbconvert via pre- or post-save hooks:
ContentsManager.pre_save_hook
FileContentsManager.post_save_hook
A post-save hook has been registered that calls:
ipython nbconvert --to script [notebook]
which behaves similarly to `--script`.
""")
self.post_save_hook = _post_save_script
post_save_hook = Any(None, config=True,
help="""Python callable or importstring thereof
to be called on the path of a file just saved.
This can be used to process the file on disk,
such as converting the notebook to a script or HTML via nbconvert.
It will be called as (all arguments passed by keyword)::
hook(os_path=os_path, model=model, contents_manager=instance)
- path: the filesystem path to the file just written
- model: the model representing the file
- contents_manager: this ContentsManager instance
"""
)
def _post_save_hook_changed(self, name, old, new):
if new and isinstance(new, string_types):
self.post_save_hook = import_item(self.post_save_hook)
elif new:
if not callable(new):
raise TraitError("post_save_hook must be callable")
def run_post_save_hook(self, model, os_path):
"""Run the post-save hook if defined, and log errors"""
if self.post_save_hook:
try:
self.log.debug("Running post-save hook on %s", os_path)
self.post_save_hook(os_path=os_path, model=model, contents_manager=self)
except Exception:
self.log.error("Post-save hook failed on %s", os_path, exc_info=True)
def _root_dir_changed(self, name, old, new):
"""Do a bit of validation of the root_dir."""
if not os.path.isabs(new):
# If we receive a non-absolute path, make it absolute.
self.root_dir = os.path.abspath(new)
return
if not os.path.isdir(new):
raise TraitError("%r is not a directory" % new)
def _checkpoints_class_default(self):
return FileCheckpoints
def is_hidden(self, path):
"""Does the API style path correspond to a hidden directory or file?
Parameters
----------
path : string
The path to check. This is an API path (`/` separated,
relative to root_dir).
Returns
-------
hidden : bool
Whether the path exists and is hidden.
"""
path = path.strip('/')
os_path = self._get_os_path(path=path)
return is_hidden(os_path, self.root_dir)
def file_exists(self, path):
"""Returns True if the file exists, else returns False.
API-style wrapper for os.path.isfile
Parameters
----------
path : string
The relative path to the file (with '/' as separator)
Returns
-------
exists : bool
Whether the file exists.
"""
path = path.strip('/')
os_path = self._get_os_path(path)
return os.path.isfile(os_path)
def dir_exists(self, path):
"""Does the API-style path refer to an extant directory?
API-style wrapper for os.path.isdir
Parameters
----------
path : string
The path to check. This is an API path (`/` separated,
relative to root_dir).
Returns
-------
exists : bool
Whether the path is indeed a directory.
"""
path = path.strip('/')
os_path = self._get_os_path(path=path)
return os.path.isdir(os_path)
def exists(self, path):
"""Returns True if the path exists, else returns False.
API-style wrapper for os.path.exists
Parameters
----------
path : string
The API path to the file (with '/' as separator)
Returns
-------
exists : bool
Whether the target exists.
"""
path = path.strip('/')
os_path = self._get_os_path(path=path)
return os.path.exists(os_path)
def _base_model(self, path):
"""Build the common base of a contents model"""
os_path = self._get_os_path(path)
info = os.stat(os_path)
last_modified = tz.utcfromtimestamp(info.st_mtime)
created = tz.utcfromtimestamp(info.st_ctime)
# Create the base model.
model = {}
model['name'] = path.rsplit('/', 1)[-1]
model['path'] = path
model['last_modified'] = last_modified
model['created'] = created
model['content'] = None
model['format'] = None
model['mimetype'] = None
try:
model['writable'] = os.access(os_path, os.W_OK)
except OSError:
self.log.error("Failed to check write permissions on %s", os_path)
model['writable'] = False
return model
def _dir_model(self, path, content=True):
"""Build a model for a directory
if content is requested, will include a listing of the directory
"""
os_path = self._get_os_path(path)
four_o_four = u'directory does not exist: %r' % path
if not os.path.isdir(os_path):
raise web.HTTPError(404, four_o_four)
elif is_hidden(os_path, self.root_dir):
self.log.info("Refusing to serve hidden directory %r, via 404 Error",
os_path
)
raise web.HTTPError(404, four_o_four)
model = self._base_model(path)
model['type'] = 'directory'
if content:
model['content'] = contents = []
os_dir = self._get_os_path(path)
for name in os.listdir(os_dir):
os_path = os.path.join(os_dir, name)
# skip over broken symlinks in listing
if not os.path.exists(os_path):
self.log.warn("%s doesn't exist", os_path)
continue
elif not os.path.isfile(os_path) and not os.path.isdir(os_path):
self.log.debug("%s not a regular file", os_path)
continue
if self.should_list(name) and not is_hidden(os_path, self.root_dir):
contents.append(self.get(
path='%s/%s' % (path, name),
content=False)
)
model['format'] = 'json'
return model
def _file_model(self, path, content=True, format=None):
"""Build a model for a file
if content is requested, include the file contents.
format:
If 'text', the contents will be decoded as UTF-8.
If 'base64', the raw bytes contents will be encoded as base64.
If not specified, try to decode as UTF-8, and fall back to base64
"""
model = self._base_model(path)
model['type'] = 'file'
os_path = self._get_os_path(path)
if content:
content, format = self._read_file(os_path, format)
default_mime = {
'text': 'text/plain',
'base64': 'application/octet-stream'
}[format]
model.update(
content=content,
format=format,
mimetype=mimetypes.guess_type(os_path)[0] or default_mime,
)
return model
def _notebook_model(self, path, content=True):
"""Build a notebook model
if content is requested, the notebook content will be populated
as a JSON structure (not double-serialized)
"""
model = self._base_model(path)
model['type'] = 'notebook'
if content:
os_path = self._get_os_path(path)
nb = self._read_notebook(os_path, as_version=4)
self.mark_trusted_cells(nb, path)
model['content'] = nb
model['format'] = 'json'
self.validate_notebook_model(model)
return model
def get(self, path, content=True, type=None, format=None):
""" Takes a path for an entity and returns its model
Parameters
----------
path : str
the API path that describes the relative path for the target
content : bool
Whether to include the contents in the reply
type : str, optional
The requested type - 'file', 'notebook', or 'directory'.
Will raise HTTPError 400 if the content doesn't match.
format : str, optional
The requested format for file contents. 'text' or 'base64'.
Ignored if this returns a notebook or directory model.
Returns
-------
model : dict
the contents model. If content=True, returns the contents
of the file or directory as well.
"""
path = path.strip('/')
if not self.exists(path):
raise web.HTTPError(404, u'No such file or directory: %s' % path)
os_path = self._get_os_path(path)
if os.path.isdir(os_path):
if type not in (None, 'directory'):
raise web.HTTPError(400,
u'%s is a directory, not a %s' % (path, type), reason='bad type')
model = self._dir_model(path, content=content)
elif type == 'notebook' or (type is None and path.endswith('.ipynb')):
model = self._notebook_model(path, content=content)
else:
if type == 'directory':
raise web.HTTPError(400,
u'%s is not a directory' % path, reason='bad type')
model = self._file_model(path, content=content, format=format)
return model
def _save_directory(self, os_path, model, path=''):
"""create a directory"""
if is_hidden(os_path, self.root_dir):
raise web.HTTPError(400, u'Cannot create hidden directory %r' % os_path)
if not os.path.exists(os_path):
with self.perm_to_403():
os.mkdir(os_path)
elif not os.path.isdir(os_path):
raise web.HTTPError(400, u'Not a directory: %s' % (os_path))
else:
self.log.debug("Directory %r already exists", os_path)
def save(self, model, path=''):
"""Save the file model and return the model with no content."""
path = path.strip('/')
if 'type' not in model:
raise web.HTTPError(400, u'No file type provided')
if 'content' not in model and model['type'] != 'directory':
raise web.HTTPError(400, u'No file content provided')
os_path = self._get_os_path(path)
self.log.debug("Saving %s", os_path)
self.run_pre_save_hook(model=model, path=path)
try:
if model['type'] == 'notebook':
nb = nbformat.from_dict(model['content'])
self.check_and_sign(nb, path)
self._save_notebook(os_path, nb)
# One checkpoint should always exist for notebooks.
if not self.checkpoints.list_checkpoints(path):
self.create_checkpoint(path)
elif model['type'] == 'file':
# Missing format will be handled internally by _save_file.
self._save_file(os_path, model['content'], model.get('format'))
elif model['type'] == 'directory':
self._save_directory(os_path, model, path)
else:
raise web.HTTPError(400, "Unhandled contents type: %s" % model['type'])
except web.HTTPError:
raise
except Exception as e:
self.log.error(u'Error while saving file: %s %s', path, e, exc_info=True)
raise web.HTTPError(500, u'Unexpected error while saving file: %s %s' % (path, e))
validation_message = None
if model['type'] == 'notebook':
self.validate_notebook_model(model)
validation_message = model.get('message', None)
model = self.get(path, content=False)
if validation_message:
model['message'] = validation_message
self.run_post_save_hook(model=model, os_path=os_path)
return model
def delete_file(self, path):
"""Delete file at path."""
path = path.strip('/')
os_path = self._get_os_path(path)
rm = os.unlink
if os.path.isdir(os_path):
listing = os.listdir(os_path)
# Don't delete non-empty directories.
# A directory containing only leftover checkpoints is
# considered empty.
cp_dir = getattr(self.checkpoints, 'checkpoint_dir', None)
for entry in listing:
if entry != cp_dir:
raise web.HTTPError(400, u'Directory %s not empty' % os_path)
elif not os.path.isfile(os_path):
raise web.HTTPError(404, u'File does not exist: %s' % os_path)
if os.path.isdir(os_path):
self.log.debug("Removing directory %s", os_path)
with self.perm_to_403():
shutil.rmtree(os_path)
else:
self.log.debug("Unlinking file %s", os_path)
with self.perm_to_403():
rm(os_path)
def rename_file(self, old_path, new_path):
"""Rename a file."""
old_path = old_path.strip('/')
new_path = new_path.strip('/')
if new_path == old_path:
return
new_os_path = self._get_os_path(new_path)
old_os_path = self._get_os_path(old_path)
# Should we proceed with the move?
if os.path.exists(new_os_path):
raise web.HTTPError(409, u'File already exists: %s' % new_path)
# Move the file
try:
with self.perm_to_403():
shutil.move(old_os_path, new_os_path)
except web.HTTPError:
raise
except Exception as e:
raise web.HTTPError(500, u'Unknown error renaming file: %s %s' % (old_path, e))
def info_string(self):
return "Serving notebooks from local directory: %s" % self.root_dir
def get_kernel_path(self, path, model=None):
"""Return the initial API path of a kernel associated with a given notebook"""
if '/' in path:
parent_dir = path.rsplit('/', 1)[0]
else:
parent_dir = ''
return parent_dir
| {
"repo_name": "bdh1011/wau",
"path": "venv/lib/python2.7/site-packages/notebook/services/contents/filemanager.py",
"copies": "1",
"size": "16443",
"license": "mit",
"hash": 1926732610543299000,
"line_mean": 33.6898734177,
"line_max": 97,
"alpha_frac": 0.5661375661,
"autogenerated": false,
"ratio": 4.075092936802974,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001184538610649379,
"num_lines": 474
} |
'''A Context Free Grammar class along with peripheral classes, data structures,
and algorithms.'''
import itertools
import cgi
from util.tree import Tree
from util.mixin import Comparable, Keyed, Subscripted, Primed
class Symbol(Comparable, Keyed):
'''A base class for symbols which appear in a grammar. Terminal and
Nonterminal classes derive from this.'''
def __init__(self, identifier):
'''Initialize the symbol with a string used to distinguish it.'''
assert isinstance(identifier, str)
self._identifier = identifier
@property
def name(self):
'''Return the symbol's name or identifier.'''
return self._identifier
def is_nonterminal(self):
'''Tell whether this is a nonterminal symbol.'''
raise NotImplementedError()
def is_terminal(self):
'''Tell whether this is a terminal symbol.'''
raise NotImplementedError()
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self._identifier)
def __eq__(self, y):
'''Symbols must be of the same class and have the same identifier to be
considered equal.'''
return (
self.__class__ == y.__class__ and
self._identifier == y._identifier)
def __key__(self):
return (self.sort_num(), self.name)
def sort_num(self):
return 0
def html(self):
'''Return a suitable representation of the object in html.'''
return '<i>%s</i>' % cgi.escape(self.name)
def dot_html(self):
return self.html()
class Nonterminal(Symbol):
'''A class for nonterminal symbols, or variables, in a grammar.'''
def __str__(self):
'''The nonterminal's name appears in angle brackets unless it is a
single capital letter.'''
if len(self.name) != 1 or not self.name.isupper():
return '<%s>' % self.name
return self.name
def is_nonterminal(self):
return True
def is_terminal(self):
return False
def sort_num(self):
return -1
def html(self):
return self._html_interp()
def html_insert(self, html):
return self._html_interp(insert=html)
def html_after(self, html):
return self._html_interp(after=html)
def dot_html(self):
return self._html_interp(dot=True)
def _html_interp(self, insert = '', after = '', dot = False):
if len(self.name) != 1:
return '⟨%s⟩%s%s' % (cgi.escape(self.name), insert, after)
tag = 'i' if dot else 'var'
return '<%s>%s%s</%s>%s' % (tag, cgi.escape(self.name), insert, tag, after)
def _next_unused(original, taken, start, type_):
while True:
result = type_(original, start)
if result in taken: start += 1
else: break
return result
class SubscriptedNonterminal(Subscripted, Nonterminal):
'''A nonterminal with a subscript.'''
def __init__(self, name, subscript):
Subscripted.__init__(self, subscript)
Nonterminal.__init__(self, name)
def __repr__(self):
return 'SubscriptedNonterminal(%r, %r)' % (self.name, self.subscript)
def html(self):
return self.html_after('<sub>%s</sub>' % cgi.escape(str(self.subscript)))
@staticmethod
def next_unused(name, nonterminals):
return _next_unused(name, nonterminals, 1, SubscriptedNonterminal)
class PrimedNonterminal(Primed, Nonterminal):
'''A nonterminal with some number of "prime" marks.'''
def __init__(self, name, num_primes):
Primed.__init__(self, num_primes)
Nonterminal.__init__(self, name)
def __repr__(self):
return 'PrimedNonterminal(%r, %r)' % (self.name, self.num_primes)
def html(self):
if self.num_primes == 2:
primestr = '″' # double prime
elif self.num_primes == 3:
primestr = '‴' # triple prime
elif self.num_primes == 4:
primtstr = '⁗' # quadruple prime
else:
primestr = '′' * self.num_primes
return self.html_insert(primestr)
def dot_html(self):
return self._html_interp(insert = ''' * self.num_primes, dot=True)
@staticmethod
def next_unused(name, nonterminals):
return _next_unused(name, nonterminals, 1, PrimedNonterminal)
class Terminal(Symbol):
'''A class for terminal symbols in a grammar.'''
def __str__(self):
'''The terminal's identifier appears in double quotes unless it is a
single lowercase letter.s'''
if len(self.name) != 1 or self.name.isupper():
return '"%s"' % self.name
return self.name
def is_nonterminal(self):
return False
def is_terminal(self):
return True
def html(self):
return '<code>%s</code>' % (cgi.escape(self.name)) if self.name else '“”'
def dot_html(self):
return '<b>%s</b>' % cgi.escape(self.name)
def sort_num(self):
return 1
class Marker(Terminal):
'''A class for special marker symbols e.g. at the bottom of stacks, the ends
of input tapes, etc. Traditionally represented as $, but may be initialized
with any identifier. It is equal to no terminal symbol.'''
def html(self):
if len(self.name) == 1:
return cgi.escape(self.name)
return Nonterminal.html(self)
def sort_num(self):
return 3
class Epsilon(Terminal):
def __init__(self):
super(Epsilon, self).__init__('')
def html(self):
return '<i>ε</i>'
def sort_num(self):
return 2
class ProductionRule(object):
'''A class for production rules in a context-free grammar. The left side
must consist of a single variable, and the right side must be a string of
terminals or nonterminals.'''
def __init__(self, left_side, right_side):
'''Initialize the rule with a variable for the left side and a sequence
of symbols for the right side.'''
assert isinstance(left_side, Symbol)
for s in right_side:
assert isinstance(s, Symbol)
self.left_side = left_side
self.right_side = tuple(right_side)
def __str__(self):
'''Represented with arrow notation. The symbols on the right side are
separated by spaces unless all of their string representations are a
aingle character long.'''
if all(map(lambda x: len(str(x)) == 1, self.right_side)):
sep = ''
else:
sep = ' '
return '%s -> %s' % (self.left_side, sep.join([str(s) for s in \
self.right_side]))
def __repr__(self):
return '%s(%s, %s)' % (self.__class__.__name__, repr(self.left_side), \
repr(self.right_side))
def __eq__(self, o):
'''Tests if the left and right side are equal.'''
return (
isinstance(o, ProductionRule) and
self.left_side == o.left_side and
self.right_side == o.right_side)
def __hash__(self):
return hash((self.left_side, self.right_side))
def _html(self, tohtml):
if self.right_side:
if any(filter(lambda X: isinstance(X, Terminal) and len(X.name) != 1, self.right_side)):
sep = ' '
else:
sep = ''
right = sep.join([tohtml(X) for X in self.right_side])
else:
right = '<i>ε</i>'
return '%s → %s' % (tohtml(self.left_side), right)
def html(self):
return self._html(lambda x: x.html())
def dot_html(self):
return self._html(lambda x: x.dot_html())
class ParseTree(Tree(Symbol)):
'''A class for parse trees or syntax trees.'''
@property
def symbol(self):
'''Return the symbol object at this node.'''
return self.value
class ContextFreeGrammar(object):
def __init__(self, *args):
'''Initialize a context-free grammar in one of three ways:
1. ContextFreeGrammar(string)
The CFG is built from a string listing its production rules which
follows a familiar syntax that allows test CFGs to be specified
quickly and easily. The names of all symbols are one letter long,
and all capital letters are treated as variables. Each line of the
string is of the form
A -> X1 | X2 | ... | Xn
where A is a variable and the Xi are sentential forms. The CFG's
terminal and nonterminal symbols are inferred from the productions
given. The left side of the first rule is made the start variable.
2. ContextFreeGrammar(list)
The CFG is built from a list of production rule objects. The
nonterminals, terminals, and start variable are all inferred from
this listing.
3. ContextFreeGrammar(Nu, Sigma, P, S)
The CFG's nonterminals (Nu), terminals (Sigma), production rules
(P), and start variable (S) are explicitly given and checked for
correctness.'''
if len(args) == 1:
if isinstance(args[0], str):
self._init_string(*args)
else:
self._check_productions(*args)
self._init_productions(*args)
elif len(args) == 4:
self._check_tuple(*args)
self._init_tuple(*args)
else:
raise TypeError('ContextFreeGrammar takes 1 or 4 arguments')
@property
def nonterminals(self):
'''Return a set of the nonterminal symbols which appear in the grammar.
'''
return self._get_symbols_of_type(Nonterminal) | \
set(p.left_side for p in self._productions) | \
self._extra_nonterminals
@property
def terminals(self):
'''Return a set of the terminal symbols which appear in the grammar.'''
return self._get_symbols_of_type(Terminal) | \
self._extra_terminals
@property
def productions(self):
'''Return a list of the productions of the grammar in order.'''
return self._productions
@property
def start(self):
'''Return the grammar's start variable.'''
return self._start
@property
def symbols(self):
'''Return a list of the grammar's nonterminals and terminals.'''
return self.nonterminals | self.terminals
def productions_with_left_side(self, left_side):
'''Return all production rules in the grammar with a certain
symbol on the left side.'''
return filter(lambda x: x.left_side == left_side, self.productions)
def production_dict(self):
'''Return a mapping of variables to the sentences they produce, in
order.'''
result = {n : [] for n in self.nonterminals}
for p in self.productions:
result[p.left_side].append(p.right_side)
return result
def _get_symbols_of_type(self, T):
return set(s for p in self._productions for s in p.right_side \
if isinstance(s, T))
def _init_string(self, string):
lines = filter(None, string.split('\n'))
split_sides = [[w.strip() for w in line.split('->', 1)] for line in lines]
for split_rule in split_sides:
if len(split_rule) != 2:
raise ValueError('line is not formatted as a rule')
left, right = split_rule
if not (len(left) == 1 and left.isupper()):
raise ValueError('%r is not valid on the left side of a production rule' % left)
self._extra_nonterminals = set()
self._extra_terminals = set()
self._productions = []
for left, right in split_sides:
left_side = Nonterminal(left)
for symbol_string in right.split('|'):
right_side = []
for c in symbol_string.strip():
if c.isupper():
right_side.append(Nonterminal(c))
else:
right_side.append(Terminal(c))
self._productions.append(ProductionRule(left_side, right_side))
if not self._productions:
raise ValueError('not production rules were given')
self._start = self._productions[0].left_side
def _check_productions(self, productions):
if not productions:
raise ValueError('no production rules were given')
for p in productions:
if not isinstance(p, ProductionRule):
raise TypeError('production rules must be instances of ProductionRule')
def _init_productions(self, productions):
self._extra_nonterminals = set()
self._extra_terminals = set()
self._productions = list(productions)
self._start = productions[0].left_side
def _check_tuple(self, nonterminals, terminals, productions, start):
# Check nonterminals
for n in nonterminals:
if not isinstance(n, Nonterminal):
raise TypeError('%r is not an instance of Nonterminal' % n)
# Check terminals
for t in terminals:
if not isinstance(t, Terminal):
raise TypeError('%r is not an instance of Terminal' % t)
# Check production rules
if not productions:
raise ValueError('no production rules were given')
for p in productions:
if not isinstance(p, ProductionRule):
raise TypeError('%r is not an instance of ProductionRule' % p)
if not (p.left_side in nonterminals):
raise ValueError('%r is on the left side of a production rule but is not a nonterminal in the grammar' % p.left_side)
for s in p.right_side:
if not (s in terminals or s in nonterminals):
raise ValueError('%r is on the right side of a production rule but is not a symbol in the grammar' % s)
# Check start symbol
if not isinstance(start, Nonterminal):
raise TypeError('start variable %r is not an instance of Nonterminal' % start)
if not (start in nonterminals):
raise ValueError('start variable %r is not a nonterminal in the grammar' % start)
def _init_tuple(self, nonterminals, terminals, productions, start):
# Assign members
self._productions = list(productions)
self._extra_nonterminals = set(nonterminals) - \
self._get_symbols_of_type(Nonterminal)
self._extra_terminals = set(terminals) - \
self._get_symbols_of_type(Terminal)
self._start = start
def __str__(self):
return '\n'.join(str(p) for p in self.productions)
def __repr__(self):
return "%s('''\n%s\n''')" % (self.__class__.__name__, self)
def _html(self, tohtml):
rows = ['<tr><td>%s</td></tr>' % tohtml(p) for p in self.productions]
return '''\
<table>
%s
</table>
''' % '\n '.join(rows)
def html(self):
return self._html(lambda x: x.html())
def dot_html(self):
return self._html(lambda x: x.dot_html())
| {
"repo_name": "bdusell/pycfg",
"path": "src/cfg/core.py",
"copies": "1",
"size": "15263",
"license": "mit",
"hash": 5842546802059050000,
"line_mean": 34.4953488372,
"line_max": 133,
"alpha_frac": 0.5860577868,
"autogenerated": false,
"ratio": 4.08866863112778,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004867405612513916,
"num_lines": 430
} |
"""A context is similar to a table, but doesn't have a specific name.
It is the basic container for intermediate data when evaluating a query.
"""
from __future__ import absolute_import
import collections
import itertools
import logging
import six
from tinyquery import repeated_util
from tinyquery import tq_modes
class Context(object):
"""Represents the columns accessible when evaluating an expression.
Fields:
num_rows: The number of rows for all columns in this context.
columns: An OrderedDict from (table_name, column_name) name to Column.
The table_name can be None. These should match the values in the
corresponding TypeContext.
aggregate_context: Either None, indicating that aggregate functions
aren't allowed, or another Context to use whenever we enter into an
aggregate function.
"""
def __init__(self, num_rows, columns, aggregate_context):
assert isinstance(columns, collections.OrderedDict)
for (table_name, col_name), column in columns.items():
assert len(column.values) == num_rows, (
'Column %s had %s rows, expected %s.' % (
(table_name, col_name), len(column.values), num_rows))
if aggregate_context is not None:
assert isinstance(aggregate_context, Context)
self.num_rows = num_rows
self.columns = columns
self.aggregate_context = aggregate_context
def column_from_ref(self, column_ref):
"""Given a ColumnRef, return the corresponding column."""
return self.columns[(column_ref.table, column_ref.column)]
def __repr__(self):
return 'Context({}, {}, {})'.format(self.num_rows, self.columns,
self.aggregate_context)
def __eq__(self, other):
return ((self.num_rows, self.columns, self.aggregate_context) ==
(other.num_rows, other.columns, other.aggregate_context))
def __hash__(self):
return hash((
self.num_rows,
tuple(tuple(column.values) for column in self.columns.values()),
self.aggregate_context))
class Column(collections.namedtuple('Column', ['type', 'mode', 'values'])):
"""Represents a single column of data.
Fields:
type: A constant from the tq_types module.
values: A list of raw values for the column contents.
"""
def context_from_table(table, type_context):
"""Given a table and a type context, build a context with those values.
The order of the columns in the type context must match the order of the
columns in the table.
"""
any_column = table.columns[next(iter(table.columns))]
new_columns = collections.OrderedDict([
(column_name, column)
for (column_name, column) in zip(type_context.columns,
table.columns.values())
])
return Context(len(any_column.values), new_columns, None)
def context_with_overlayed_type_context(context, type_context):
"""Given a context, use the given type context for all column names."""
any_column = context.columns[next(iter(context.columns))]
new_columns = collections.OrderedDict([
(column_name, column)
for (column_name, column) in zip(type_context.columns,
context.columns.values())
])
return Context(len(any_column.values), new_columns, None)
def empty_context_from_type_context(type_context):
assert type_context.aggregate_context is None
result_columns = collections.OrderedDict(
# TODO(Samantha): Fix this. Mode is not always nullable
(col_name, Column(type=col_type, mode=tq_modes.NULLABLE, values=[]))
for col_name, col_type in type_context.columns.items()
)
return Context(0, result_columns, None)
def mask_context(context, mask):
"""Apply a row filter to a given context.
Arguments:
context: A Context to filter.
mask: A column of type bool. Each row in this column should be True if
the row should be kept for the whole context and False otherwise.
"""
assert context.aggregate_context is None, (
'Cannot mask a context with an aggregate context.')
# If the mask column is repeated, we need to handle it specially.
# There's several possibilities here, which are described inline.
# TODO(colin): these have the same subtle differences from bigquery's
# behavior as function evaluation on repeated fields. Fix.
if mask.mode == tq_modes.REPEATED:
num_rows = len(
[r for r in (any(row) for row in mask.values) if r]
)
new_columns = collections.OrderedDict()
for col_name, col in context.columns.items():
if col.mode == tq_modes.REPEATED:
allowable = True
new_values = []
for mask_row, col_row in zip(mask.values, col.values):
if not any(mask_row):
# No matter any of the other conditions, if there's no
# truthy values in the mask in a row we want to skip
# the whole row.
continue
if len(mask_row) == 1:
# We already know this single value is truthy, or else
# we'd have matched the previous block. Just pass on
# the whole row in this case.
new_values.append(
repeated_util.normalize_repeated_null(col_row))
elif len(mask_row) == len(col_row):
# As for function evaluation, when the number of values
# in a row matches across columns, we match them up
# individually.
new_values.append(
repeated_util.normalize_repeated_null(
list(itertools.compress(col_row, mask_row))))
elif len(col_row) in (0, 1):
# If the column has 0 or 1 values, we need to fill out
# to the length of the mask.
norm_row = repeated_util.normalize_column_to_length(
col_row, len(mask_row))
new_values.append(
repeated_util.normalize_repeated_null(
list(itertools.compress(norm_row, mask_row))))
else:
# If none of these conditions apply, we can't match up
# the number of values in the mask and a column. This
# *may* be ok, since at this point this might be a
# column that we're not going to select in the final
# result anyway. In this case, since we can't do
# anything sensible, we're going to discard it from the
# output. Since this is a little unexpected, we log a
# warning too. This is preferable to leaving it in,
# since a missing column will be a hard error, but one
# with a strange number of values might allow a
# successful query that just does something weird.
allowable = False
break
if not allowable:
logging.warn(
'Ignoring unselectable repeated column %s' % (
col_name,))
continue
else:
# For non-repeated columns, we retain the row if any of the
# items in the mask will be retained.
new_values = list(itertools.compress(
col.values,
(any(mask_row) for mask_row in mask.values)))
new_columns[col_name] = Column(
type=col.type,
mode=col.mode,
values=new_values)
else:
orig_column_values = [
col.values for col in context.columns.values()]
mask_values = mask.values
num_rows = len([v for v in mask.values if v])
new_values = [
Column(
type=col.type,
mode=col.mode,
values=list(itertools.compress(values, mask_values)))
for col, values in zip(context.columns.values(),
orig_column_values)]
new_columns = collections.OrderedDict([
(name, col) for name, col in zip(context.columns,
new_values)])
return Context(
num_rows,
new_columns,
None)
def empty_context_from_template(context):
"""Returns a new context that has the same columns as the given context."""
return Context(
num_rows=0,
columns=collections.OrderedDict(
(name, empty_column_from_template(column))
for name, column in context.columns.items()
),
aggregate_context=None)
def empty_column_from_template(column):
"""Returns a new empty column with the same type as the given one."""
return Column(type=column.type, mode=column.mode, values=[])
def append_row_to_context(src_context, index, dest_context):
"""Take row i from src_context and append it to dest_context.
The schemas of the two contexts must match.
"""
dest_context.num_rows += 1
for name, column in dest_context.columns.items():
column.values.append(src_context.columns[name].values[index])
def append_partial_context_to_context(src_context, dest_context):
"""Modifies dest_context to include all rows in src_context.
The schemas don't need to match exactly; src_context just needs to have a
subset, and all other columns will be given a null value.
Also, it is assumed that the destination context only uses short column
names rather than fully-qualified names.
"""
dest_context.num_rows += src_context.num_rows
# Ignore fully-qualified names for this operation.
short_named_src_column_values = {
col_name: column.values
for (_, col_name), column in src_context.columns.items()}
for (_, col_name), dest_column in dest_context.columns.items():
src_column_values = short_named_src_column_values.get(col_name)
if src_column_values is None:
dest_column.values.extend([None] * src_context.num_rows)
else:
dest_column.values.extend(src_column_values)
def append_context_to_context(src_context, dest_context):
"""Adds all rows in src_context to dest_context.
The columns must be a subset, but all fully-qualified names are taken into
account.
"""
dest_context.num_rows += src_context.num_rows
for dest_column_key, dest_column in dest_context.columns.items():
src_column = src_context.columns.get(dest_column_key)
if src_column is None:
dest_column.values.extend([None] * src_context.num_rows)
else:
dest_column.values.extend(src_column.values)
def row_context_from_context(src_context, index):
"""Pull a specific row out of a context as its own context."""
assert src_context.aggregate_context is None
columns = collections.OrderedDict(
(col_name, Column(type=col.type, mode=col.mode,
values=[col.values[index]]))
for col_name, col in src_context.columns.items()
)
return Context(1, columns, None)
def cross_join_contexts(context1, context2):
assert context1.aggregate_context is None
assert context2.aggregate_context is None
result_columns = collections.OrderedDict(
[(col_name, Column(type=col.type, mode=col.mode, values=[]))
for col_name, col in context1.columns.items()] +
[(col_name, Column(type=col.type, mode=col.mode, values=[]))
for col_name, col in context2.columns.items()])
for index1 in six.moves.xrange(context1.num_rows):
for index2 in six.moves.xrange(context2.num_rows):
for col_name, column in context1.columns.items():
result_columns[col_name].values.append(column.values[index1])
for col_name, column in context2.columns.items():
result_columns[col_name].values.append(column.values[index2])
return Context(context1.num_rows * context2.num_rows, result_columns, None)
def truncate_context(context, limit):
"""Modify the given context to have at most the given number of rows."""
assert context.aggregate_context is None
# BigQuery adds non-int limits, so we need to allow floats up until now.
limit = int(limit)
if context.num_rows <= limit:
return
context.num_rows = limit
for column in context.columns.values():
column.values[limit:] = []
| {
"repo_name": "Khan/tinyquery",
"path": "tinyquery/context.py",
"copies": "1",
"size": "13038",
"license": "mit",
"hash": 1012359320069804300,
"line_mean": 41.0580645161,
"line_max": 79,
"alpha_frac": 0.595413407,
"autogenerated": false,
"ratio": 4.346,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5441413407,
"avg_score": null,
"num_lines": null
} |
"""A context is similar to a table, but doesn't have a specific name.
It is the basic container for intermediate data when evaluating a query.
"""
import collections
import itertools
class Context(object):
"""Represents the columns accessible when evaluating an expression.
Fields:
num_rows: The number of rows for all columns in this context.
columns: An OrderedDict from (table_name, column_name) name to Column.
The table_name can be None. These should match the values in the
corresponding TypeContext.
aggregate_context: Either None, indicating that aggregate functions
aren't allowed, or another Context to use whenever we enter into an
aggregate function.
"""
def __init__(self, num_rows, columns, aggregate_context):
assert isinstance(columns, collections.OrderedDict)
for (table_name, col_name), column in columns.iteritems():
assert len(column.values) == num_rows, (
'Column %s had %s rows, expected %s.' % (
(table_name, col_name), len(column.values), num_rows))
if aggregate_context is not None:
assert isinstance(aggregate_context, Context)
self.num_rows = num_rows
self.columns = columns
self.aggregate_context = aggregate_context
def column_from_ref(self, column_ref):
"""Given a ColumnRef, return the corresponding column."""
return self.columns[(column_ref.table, column_ref.column)]
def __repr__(self):
return 'Context({}, {}, {})'.format(self.num_rows, self.columns,
self.aggregate_context)
def __eq__(self, other):
return ((self.num_rows, self.columns, self.aggregate_context) ==
(other.num_rows, other.columns, other.aggregate_context))
def __hash__(self):
return hash((
self.num_rows,
tuple(tuple(column.values) for column in self.columns.values()),
self.aggregate_context))
class Column(collections.namedtuple('Column', ['type', 'values'])):
"""Represents a single column of data.
Fields:
type: A constant from the tq_types module.
values: A list of raw values for the column contents.
"""
def context_from_table(table, type_context):
"""Given a table and a type context, build a context with those values.
The order of the columns in the type context must match the order of the
columns in the table.
"""
any_column = table.columns.itervalues().next()
new_columns = collections.OrderedDict([
(column_name, column)
for (column_name, column) in zip(type_context.columns.iterkeys(),
table.columns.itervalues())
])
return Context(len(any_column.values), new_columns, None)
def context_with_overlayed_type_context(context, type_context):
"""Given a context, use the given type context for all column names."""
any_column = context.columns.itervalues().next()
new_columns = collections.OrderedDict([
(column_name, column)
for (column_name, column) in zip(type_context.columns.iterkeys(),
context.columns.itervalues())
])
return Context(len(any_column.values), new_columns, None)
def empty_context_from_type_context(type_context):
assert type_context.aggregate_context is None
result_columns = collections.OrderedDict(
(col_name, Column(col_type, []))
for col_name, col_type in type_context.columns.iteritems()
)
return Context(0, result_columns, None)
def mask_context(context, mask):
"""Apply a row filter to a given context.
Arguments:
context: A Context to filter.
mask: A column of type bool. Each row in this column should be True if
the row should be kept for the whole context and False otherwise.
"""
assert context.aggregate_context is None, (
'Cannot mask a context with an aggregate context.')
new_columns = collections.OrderedDict([
(column_name,
Column(column.type, list(itertools.compress(column.values, mask))))
for (column_name, column) in context.columns.iteritems()
])
return Context(sum(mask), new_columns, None)
def empty_context_from_template(context):
"""Returns a new context that has the same columns as the given context."""
return Context(
num_rows=0,
columns=collections.OrderedDict(
(name, empty_column_from_template(column))
for name, column in context.columns.iteritems()
),
aggregate_context=None)
def empty_column_from_template(column):
"""Returns a new empty column with the same type as the given one."""
return Column(column.type, [])
def append_row_to_context(src_context, index, dest_context):
"""Take row i from src_context and append it to dest_context.
The schemas of the two contexts must match.
"""
dest_context.num_rows += 1
for name, column in dest_context.columns.iteritems():
column.values.append(src_context.columns[name].values[index])
def append_partial_context_to_context(src_context, dest_context):
"""Modifies dest_context to include all rows in src_context.
The schemas don't need to match exactly; src_context just needs to have a
subset, and all other columns will be given a null value.
Also, it is assumed that the destination context only uses short column
names rather than fully-qualified names.
"""
dest_context.num_rows += src_context.num_rows
# Ignore fully-qualified names for this operation.
short_named_src_column_values = {
col_name: column.values
for (_, col_name), column in src_context.columns.iteritems()}
for (_, col_name), dest_column in dest_context.columns.iteritems():
src_column_values = short_named_src_column_values.get(col_name)
if src_column_values is None:
dest_column.values.extend([None] * src_context.num_rows)
else:
dest_column.values.extend(src_column_values)
def append_context_to_context(src_context, dest_context):
"""Adds all rows in src_context to dest_context.
The columns must be a subset, but all fully-qualified names are taken into
account.
"""
dest_context.num_rows += src_context.num_rows
for dest_column_key, dest_column in dest_context.columns.iteritems():
src_column = src_context.columns.get(dest_column_key)
if src_column is None:
dest_column.values.extend([None] * src_context.num_rows)
else:
dest_column.values.extend(src_column.values)
def row_context_from_context(src_context, index):
"""Pull a specific row out of a context as its own context."""
assert src_context.aggregate_context is None
columns = collections.OrderedDict(
(col_name, Column(col.type, [col.values[index]]))
for col_name, col in src_context.columns.iteritems()
)
return Context(1, columns, None)
def cross_join_contexts(context1, context2):
assert context1.aggregate_context is None
assert context2.aggregate_context is None
result_columns = collections.OrderedDict(
[(col_name, Column(col.type, []))
for col_name, col in context1.columns.iteritems()] +
[(col_name, Column(col.type, []))
for col_name, col in context2.columns.iteritems()])
for index1 in xrange(context1.num_rows):
for index2 in xrange(context2.num_rows):
for col_name, column in context1.columns.iteritems():
result_columns[col_name].values.append(column.values[index1])
for col_name, column in context2.columns.iteritems():
result_columns[col_name].values.append(column.values[index2])
return Context(context1.num_rows * context2.num_rows, result_columns, None)
def truncate_context(context, limit):
"""Modify the given context to have at most the given number of rows."""
assert context.aggregate_context is None
# BigQuery adds non-int limits, so we need to allow floats up until now.
limit = int(limit)
if context.num_rows <= limit:
return
context.num_rows = limit
for column in context.columns.itervalues():
column.values[limit:] = []
| {
"repo_name": "burnhamup/tinyquery",
"path": "tinyquery/context.py",
"copies": "1",
"size": "8385",
"license": "mit",
"hash": -4483320483931194000,
"line_mean": 37.6405529954,
"line_max": 79,
"alpha_frac": 0.6553369112,
"autogenerated": false,
"ratio": 4.11432777232581,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.526966468352581,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.