text
stringlengths 4
1.02M
| meta
dict |
|---|---|
class ENoteAuth():
def __init__(self):
pass
def get_token(self):
token = raw_input('Request token from https://www.evernote.com/api/DeveloperToken.action and past it here: ')
return token
|
{
"content_hash": "70c2256a5f0671b72678ddfcd3ba14a6",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 117,
"avg_line_length": 31.571428571428573,
"alnum_prop": 0.6289592760180995,
"repo_name": "tkjacobsen/enote",
"id": "2395b001e2ebabe2c7f63eba6f3629d309161fc6",
"size": "316",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "enote/auth.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15120"
}
],
"symlink_target": ""
}
|
from time import time
import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
def load_cifar10(path=None):
if path is None:
# DATASET_DIR = '/Users/Zhang/Research/Deep Learning Dataset/CIFAR/cifar-10-batches-py/'
DATASET_DIR = '/home/ubuntu/datasets/cifar-10-batches-py/'
else:
DATASET_DIR = path
TRAINSET_NAME = ['data_batch_1', 'data_batch_2', 'data_batch_3', 'data_batch_4', 'data_batch_5']
# TRAINSET_NAME = ['data_batch_1']
TESTSET_NAME = 'test_batch'
_train_batch_files = []
for name in TRAINSET_NAME:
_train_batch_files.append('{}{}'.format(DATASET_DIR, name))
test_batch = '{}{}'.format(DATASET_DIR, TESTSET_NAME)
import cPickle
_train_batches = []
for batch_file in _train_batch_files:
with open(batch_file, 'rb') as f:
_train_batches.append(cPickle.load(f))
with open(test_batch, 'rb') as f:
test_batch = cPickle.load(f)
train_images = np.vstack(batch['data'] for batch in _train_batches)
train_ys = np.hstack(batch['labels'] for batch in _train_batches)
test_images = test_batch['data']
test_ys = np.array(test_batch['labels'])
train_labels = np.zeros(shape=(len(train_ys), 10), dtype=np.float32)
train_labels[np.arange(len(train_ys)), train_ys] = 1
test_labels = np.zeros(shape=(len(test_ys), 10), dtype=np.float32)
test_labels[np.arange(len(test_ys)), test_ys] = 1
return train_images.astype(np.float32), train_labels, test_images.astype(np.float32), test_labels
def AlexNet_model_fn():
NUM_IMAGE_WIDTH = 32
NUM_IMAGE_HEIGHT = 32
NUM_IMAGE_CHANNEL = 3
NUM_CLASS = 10
with tf.variable_scope('input'):
x_raw = tf.placeholder(dtype=tf.float32, shape=[None, NUM_IMAGE_WIDTH * NUM_IMAGE_HEIGHT * NUM_IMAGE_CHANNEL],
name='raw_images')
x = tf.reshape(x_raw, shape=[-1, NUM_IMAGE_CHANNEL, NUM_IMAGE_WIDTH, NUM_IMAGE_HEIGHT], name='input_images')
data_format = 'NCHW'
y = tf.placeholder(dtype=tf.int32, shape=[None, NUM_CLASS], name='input_onehot_labels')
if tf.test.is_built_with_cuda() is not True:
# When running on GPU, transpose the data from channels_last (NHWC) to
# channels_first (NCHW) to improve performance.
# See https://www.tensorflow.org/performance/performance_guide#data_formats
data_format = 'NHWC'
x = tf.transpose(x, [0, 2, 3, 1])
with tf.variable_scope('conv1') as scope:
kernel = tf.get_variable('W', shape=[3, 3, 3, 48],
initializer=tf.truncated_normal_initializer(stddev=0.05, dtype=tf.float32),
dtype=tf.float32)
bias = tf.get_variable('b', [48], initializer=tf.constant_initializer(0.1))
conv = tf.nn.conv2d(x, kernel, [1, 1, 2, 2], 'SAME', data_format=data_format)
pre_activation = tf.nn.bias_add(conv, bias, data_format=data_format)
conv1 = tf.nn.relu(pre_activation, name=scope.name)
tf.summary.histogram('Convolution_layers/conv1', conv1)
tf.summary.scalar('Convolution_layers/conv1', tf.nn.zero_fraction(conv1))
# norm1 = tf.nn.lrn(conv1, depth_radius=4, bias=2.0, alpha=0.001 / 9.0, beta=0.75, name='norm1')
if data_format == 'NCHW':
pool1 = tf.nn.max_pool(conv1, [1, 1, 3, 3], [1, 1, 2, 2], padding='SAME', data_format=data_format, name='pool1')
else:
pool1 = tf.nn.max_pool(conv1, [1, 3, 3, 1], [1, 2, 2, 1], padding='SAME', data_format=data_format, name='pool1')
with tf.variable_scope('conv2') as scope:
kernel = tf.get_variable('W', shape=[3, 3, 48, 128],
initializer=tf.truncated_normal_initializer(stddev=0.05, dtype=tf.float32),
dtype=tf.float32)
bias = tf.get_variable('b', [128], initializer=tf.constant_initializer(0.1))
conv = tf.nn.conv2d(pool1, kernel, [1, 1, 1, 1], 'SAME', data_format=data_format)
pre_activation = tf.nn.bias_add(conv, bias, data_format=data_format)
conv2 = tf.nn.relu(pre_activation, name=scope.name)
tf.summary.histogram('Convolution_layers/conv2', conv2)
tf.summary.scalar('Convolution_layers/conv2', tf.nn.zero_fraction(conv2))
# norm2 = tf.nn.lrn(conv2, depth_radius=4, bias=2.0, alpha=0.001 / 9.0, beta=0.75, name='norm2')
if data_format == 'NCHW':
pool2 = tf.nn.max_pool(conv2, [1, 1, 3, 3], [1, 1, 2, 2], padding='SAME', data_format=data_format, name='pool2')
else:
pool2 = tf.nn.max_pool(conv2, [1, 3, 3, 1], [1, 2, 2, 1], padding='SAME', data_format=data_format, name='pool2')
with tf.variable_scope('conv3') as scope:
kernel = tf.get_variable('W', shape=[3, 3, 128, 128],
initializer=tf.truncated_normal_initializer(stddev=0.05, dtype=tf.float32),
dtype=tf.float32)
bias = tf.get_variable('b', [128], initializer=tf.constant_initializer(0.1))
conv = tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], 'SAME', data_format=data_format)
pre_activation = tf.nn.bias_add(conv, bias, data_format=data_format)
conv3 = tf.nn.relu(pre_activation, name=scope.name)
tf.summary.histogram('Convolution_layers/conv3', conv3)
tf.summary.scalar('Convolution_layers/conv3', tf.nn.zero_fraction(conv3))
with tf.variable_scope('conv4') as scope:
kernel = tf.get_variable('W', shape=[3, 3, 128, 64],
initializer=tf.truncated_normal_initializer(stddev=0.05, dtype=tf.float32),
dtype=tf.float32)
bias = tf.get_variable('b', [64], initializer=tf.constant_initializer(0.1))
conv = tf.nn.conv2d(conv3, kernel, [1, 1, 1, 1], 'SAME', data_format=data_format)
pre_activation = tf.nn.bias_add(conv, bias, data_format=data_format)
conv4 = tf.nn.relu(pre_activation, name=scope.name)
tf.summary.histogram('Convolution_layers/conv4', conv4)
tf.summary.scalar('Convolution_layers/conv4', tf.nn.zero_fraction(conv4))
if data_format == 'NCHW':
pool3 = tf.nn.max_pool(conv4, [1, 1, 3, 3], [1, 1, 2, 2], padding='SAME', data_format=data_format, name='pool3')
else:
pool3 = tf.nn.max_pool(conv4, [1, 3, 3, 1], [1, 2, 2, 1], padding='SAME', data_format=data_format, name='pool3')
with tf.variable_scope('fully_connected1') as scope:
pool3_flat = tf.reshape(pool3, [-1, 2 * 2 * 64])
weights = tf.get_variable('W', shape=[2 * 2 * 64, 64],
initializer=tf.truncated_normal_initializer(stddev=0.04, dtype=tf.float32),
dtype=tf.float32)
weight_decay = tf.multiply(tf.nn.l2_loss(weights), 0.004, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
biases = tf.get_variable('b', [64], initializer=tf.constant_initializer(0.1))
fc1 = tf.nn.relu(tf.matmul(pool3_flat, weights) + biases, name=scope.name)
tf.summary.histogram('Fully_connected_layers/fc1', fc1)
tf.summary.scalar('Fully_connected_layers/fc1', tf.nn.zero_fraction(fc1))
with tf.variable_scope('fully_connected2') as scope:
weights = tf.get_variable('W', shape=[64, 32],
initializer=tf.truncated_normal_initializer(stddev=0.04, dtype=tf.float32),
dtype=tf.float32)
weight_decay = tf.multiply(tf.nn.l2_loss(weights), 0.004, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
biases = tf.get_variable('b', [32], initializer=tf.constant_initializer(0.1))
fc2 = tf.nn.relu(tf.matmul(fc1, weights) + biases, name=scope.name)
tf.summary.histogram('Fully_connected_layers/fc2', fc2)
tf.summary.scalar('Fully_connected_layers/fc2', tf.nn.zero_fraction(fc2))
with tf.variable_scope('fully_connected3') as scope:
weights = tf.get_variable('W', shape=[32, NUM_CLASS],
initializer=tf.truncated_normal_initializer(stddev=1 / 192.0, dtype=tf.float32),
dtype=tf.float32)
weight_decay = tf.multiply(tf.nn.l2_loss(weights), 0.004, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
biases = tf.get_variable('b', [NUM_CLASS], initializer=tf.constant_initializer(0.001))
softmax_linear = tf.add(tf.matmul(fc2, weights), biases, name=scope.name)
tf.summary.histogram('Fully_connected_layers/fc3', softmax_linear)
global_step = tf.Variable(initial_value=0, name='global_step', trainable=False)
y_pred_cls = tf.argmax(softmax_linear, axis=1)
return x_raw, y, softmax_linear, global_step, y_pred_cls
if __name__ == "__main__":
x, y, output, global_step, y_pred_cls = AlexNet_model_fn()
with tf.variable_scope('loss'):
cross_entropy_mean = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,
logits=output),
name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
loss = tf.add_n(tf.get_collection('losses'), name='total_loss')
optimizer = tf.train.RMSPropOptimizer(0.0001).minimize(loss, global_step=global_step)
with tf.variable_scope('accuracy'):
correct_prediction = tf.equal(y_pred_cls, tf.argmax(y, axis=1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('Accuracy/train', accuracy)
merged = tf.summary.merge_all()
saver = tf.train.Saver()
sess = tf.Session()
tf_train_writer = tf.summary.FileWriter('./tensorbroad/alexnet', sess.graph)
sess.run(tf.global_variables_initializer())
train_images, train_labels, test_images, test_labels = load_cifar10()
NUM_ITERATION = 60000
SIZE_BATCH = 64
for iter in range(NUM_ITERATION):
randidx = np.random.randint(len(train_images), size=SIZE_BATCH)
batch_train_images = train_images[randidx]
batch_train_labels = train_labels[randidx]
start_time = time()
_global_step, _ = sess.run([global_step, optimizer],
feed_dict={x: batch_train_images,
y: batch_train_labels})
duration = time() - start_time
if (_global_step % 10 == 0) or (iter == NUM_ITERATION - 1):
_loss, batch_acc = sess.run([loss, accuracy], feed_dict={x: batch_train_images, y: batch_train_labels})
msg = "Global Step: {0:>6}, accuracy: {1:>6.1%}, loss = {2:.2f} ({3:.1f} examples/sec, {4:.2f} sec/batch)"
print(msg.format(_global_step, batch_acc, _loss, SIZE_BATCH / duration, duration))
if (_global_step % 100 == 0) or (iter == NUM_ITERATION - 1):
data_merged, global_step_iter = sess.run([merged, global_step],
feed_dict={x: batch_train_images,
y: batch_train_labels})
predicted_class = sess.run(y_pred_cls, feed_dict={x: test_images, y: test_labels})
correct = (np.argmax(test_labels, axis=1) == predicted_class)
acc = correct.mean() * 100
correct_numbers = correct.sum()
print("Accuracy on Test-Set: {0:.2f}% ({1} / {2})".format(acc, correct_numbers, len(test_labels)))
summary = tf.Summary(value=[
tf.Summary.Value(tag="Accuracy/test", simple_value=acc/100.0),
])
tf_train_writer.add_summary(data_merged, global_step_iter)
tf_train_writer.add_summary(summary, global_step_iter)
saver.save(sess, save_path='./tensorbroad/alexnet/', global_step=global_step)
print("Saved checkpoint.")
sess.close()
|
{
"content_hash": "58293735e4f922568750e391c846c6c5",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 120,
"avg_line_length": 49.08571428571429,
"alnum_prop": 0.5965408282055547,
"repo_name": "zhangjunpeng9354/Learning-Tensorflow-by-Models",
"id": "f416a3005f79f42406acf3fdfd4627e0cb0d0729",
"size": "12026",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alexnet-cifar10.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "111994"
}
],
"symlink_target": ""
}
|
"""
WSGI config for djdan project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djdan.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
{
"content_hash": "f4a68cd220766091a777f454e477889a",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 79,
"avg_line_length": 40.42857142857143,
"alnum_prop": 0.799469964664311,
"repo_name": "BL-Labs/annotator_demonstrator",
"id": "3e779158329f8301096bf1430822566717eb5ae4",
"size": "1132",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djdan/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18496"
},
{
"name": "JavaScript",
"bytes": "1888"
},
{
"name": "Python",
"bytes": "25726"
}
],
"symlink_target": ""
}
|
"""Registration facilities for DOM. This module should not be used
directly. Instead, the functions getDOMImplementation and
registerDOMImplementation should be imported from xml.dom."""
# This is a list of well-known implementations. Well-known names
# should be published by posting to xml-sig@python.org, and are
# subsequently recorded in this file.
import sys
well_known_implementations = {
'minidom':'xml.dom.minidom',
'4DOM': 'xml.dom.DOMImplementation',
}
# DOM implementations not officially registered should register
# themselves with their
registered = {}
def registerDOMImplementation(name, factory):
"""registerDOMImplementation(name, factory)
Register the factory function with the name. The factory function
should return an object which implements the DOMImplementation
interface. The factory function can either return the same object,
or a new one (e.g. if that implementation supports some
customization)."""
registered[name] = factory
def _good_enough(dom, features):
"_good_enough(dom, features) -> Return 1 if the dom offers the features"
for f,v in features:
if not dom.hasFeature(f,v):
return 0
return 1
def getDOMImplementation(name=None, features=()):
"""getDOMImplementation(name = None, features = ()) -> DOM implementation.
Return a suitable DOM implementation. The name is either
well-known, the module name of a DOM implementation, or None. If
it is not None, imports the corresponding module and returns
DOMImplementation object if the import succeeds.
If name is not given, consider the available implementations to
find one with the required feature set. If no implementation can
be found, raise an ImportError. The features list must be a sequence
of (feature, version) pairs which are passed to hasFeature."""
import os
creator = None
mod = well_known_implementations.get(name)
if mod:
mod = __import__(mod, {}, {}, ['getDOMImplementation'])
return mod.getDOMImplementation()
elif name:
return registered[name]()
elif not sys.flags.ignore_environment and "PYTHON_DOM" in os.environ:
return getDOMImplementation(name = os.environ["PYTHON_DOM"])
# User did not specify a name, try implementations in arbitrary
# order, returning the one that has the required features
if isinstance(features, str):
features = _parse_feature_string(features)
for creator in registered.values():
dom = creator()
if _good_enough(dom, features):
return dom
for creator in well_known_implementations.keys():
try:
dom = getDOMImplementation(name = creator)
except Exception: # typically ImportError, or AttributeError
continue
if _good_enough(dom, features):
return dom
raise ImportError("no suitable DOM implementation found")
def _parse_feature_string(s):
features = []
parts = s.split()
i = 0
length = len(parts)
while i < length:
feature = parts[i]
if feature[0] in "0123456789":
raise ValueError("bad feature name: %r" % (feature,))
i = i + 1
version = None
if i < length:
v = parts[i]
if v[0] in "0123456789":
i = i + 1
version = v
features.append((feature, version))
return tuple(features)
|
{
"content_hash": "4c78d07410705910edffc39c5925cd19",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 78,
"avg_line_length": 34.85858585858586,
"alnum_prop": 0.6708200521587946,
"repo_name": "huguesv/PTVS",
"id": "69c17eebb265daad3243e62018409d72942a93af",
"size": "3451",
"binary": false,
"copies": "41",
"ref": "refs/heads/master",
"path": "Python/Product/Miniconda/Miniconda3-x64/Lib/xml/dom/domreg.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "109"
},
{
"name": "Batchfile",
"bytes": "10898"
},
{
"name": "C",
"bytes": "23236"
},
{
"name": "C#",
"bytes": "12464429"
},
{
"name": "C++",
"bytes": "211838"
},
{
"name": "CSS",
"bytes": "7025"
},
{
"name": "HTML",
"bytes": "34251"
},
{
"name": "JavaScript",
"bytes": "87257"
},
{
"name": "PowerShell",
"bytes": "25220"
},
{
"name": "Python",
"bytes": "913395"
},
{
"name": "Rich Text Format",
"bytes": "260880"
},
{
"name": "Smarty",
"bytes": "8156"
},
{
"name": "Tcl",
"bytes": "24968"
}
],
"symlink_target": ""
}
|
"""
Code to manage the creation and SQL rendering of 'where' constraints.
"""
import operator
from functools import reduce
from django.core.exceptions import EmptyResultSet
from django.db.models.expressions import Case, When
from django.db.models.lookups import Exact
from django.utils import tree
from django.utils.functional import cached_property
# Connection types
AND = "AND"
OR = "OR"
XOR = "XOR"
class WhereNode(tree.Node):
"""
An SQL WHERE clause.
The class is tied to the Query class that created it (in order to create
the correct SQL).
A child is usually an expression producing boolean values. Most likely the
expression is a Lookup instance.
However, a child could also be any class with as_sql() and either
relabeled_clone() method or relabel_aliases() and clone() methods and
contains_aggregate attribute.
"""
default = AND
resolved = False
conditional = True
def split_having_qualify(self, negated=False, must_group_by=False):
"""
Return three possibly None nodes: one for those parts of self that
should be included in the WHERE clause, one for those parts of self
that must be included in the HAVING clause, and one for those parts
that refer to window functions.
"""
if not self.contains_aggregate and not self.contains_over_clause:
return self, None, None
in_negated = negated ^ self.negated
# Whether or not children must be connected in the same filtering
# clause (WHERE > HAVING > QUALIFY) to maintain logical semantic.
must_remain_connected = (
(in_negated and self.connector == AND)
or (not in_negated and self.connector == OR)
or self.connector == XOR
)
if (
must_remain_connected
and self.contains_aggregate
and not self.contains_over_clause
):
# It's must cheaper to short-circuit and stash everything in the
# HAVING clause than split children if possible.
return None, self, None
where_parts = []
having_parts = []
qualify_parts = []
for c in self.children:
if hasattr(c, "split_having_qualify"):
where_part, having_part, qualify_part = c.split_having_qualify(
in_negated, must_group_by
)
if where_part is not None:
where_parts.append(where_part)
if having_part is not None:
having_parts.append(having_part)
if qualify_part is not None:
qualify_parts.append(qualify_part)
elif c.contains_over_clause:
qualify_parts.append(c)
elif c.contains_aggregate:
having_parts.append(c)
else:
where_parts.append(c)
if must_remain_connected and qualify_parts:
# Disjunctive heterogeneous predicates can be pushed down to
# qualify as long as no conditional aggregation is involved.
if not where_parts or (where_parts and not must_group_by):
return None, None, self
elif where_parts:
# In theory this should only be enforced when dealing with
# where_parts containing predicates against multi-valued
# relationships that could affect aggregation results but this
# is complex to infer properly.
raise NotImplementedError(
"Heterogeneous disjunctive predicates against window functions are "
"not implemented when performing conditional aggregation."
)
where_node = (
self.create(where_parts, self.connector, self.negated)
if where_parts
else None
)
having_node = (
self.create(having_parts, self.connector, self.negated)
if having_parts
else None
)
qualify_node = (
self.create(qualify_parts, self.connector, self.negated)
if qualify_parts
else None
)
return where_node, having_node, qualify_node
def as_sql(self, compiler, connection):
"""
Return the SQL version of the where clause and the value to be
substituted in. Return '', [] if this node matches everything,
None, [] if this node is empty, and raise EmptyResultSet if this
node can't match anything.
"""
result = []
result_params = []
if self.connector == AND:
full_needed, empty_needed = len(self.children), 1
else:
full_needed, empty_needed = 1, len(self.children)
if self.connector == XOR and not connection.features.supports_logical_xor:
# Convert if the database doesn't support XOR:
# a XOR b XOR c XOR ...
# to:
# (a OR b OR c OR ...) AND (a + b + c + ...) == 1
lhs = self.__class__(self.children, OR)
rhs_sum = reduce(
operator.add,
(Case(When(c, then=1), default=0) for c in self.children),
)
rhs = Exact(1, rhs_sum)
return self.__class__([lhs, rhs], AND, self.negated).as_sql(
compiler, connection
)
for child in self.children:
try:
sql, params = compiler.compile(child)
except EmptyResultSet:
empty_needed -= 1
else:
if sql:
result.append(sql)
result_params.extend(params)
else:
full_needed -= 1
# Check if this node matches nothing or everything.
# First check the amount of full nodes and empty nodes
# to make this node empty/full.
# Now, check if this node is full/empty using the
# counts.
if empty_needed == 0:
if self.negated:
return "", []
else:
raise EmptyResultSet
if full_needed == 0:
if self.negated:
raise EmptyResultSet
else:
return "", []
conn = " %s " % self.connector
sql_string = conn.join(result)
if sql_string:
if self.negated:
# Some backends (Oracle at least) need parentheses
# around the inner SQL in the negated case, even if the
# inner SQL contains just a single expression.
sql_string = "NOT (%s)" % sql_string
elif len(result) > 1 or self.resolved:
sql_string = "(%s)" % sql_string
return sql_string, result_params
def get_group_by_cols(self, alias=None):
cols = []
for child in self.children:
cols.extend(child.get_group_by_cols())
return cols
def get_source_expressions(self):
return self.children[:]
def set_source_expressions(self, children):
assert len(children) == len(self.children)
self.children = children
def relabel_aliases(self, change_map):
"""
Relabel the alias values of any children. 'change_map' is a dictionary
mapping old (current) alias values to the new values.
"""
for pos, child in enumerate(self.children):
if hasattr(child, "relabel_aliases"):
# For example another WhereNode
child.relabel_aliases(change_map)
elif hasattr(child, "relabeled_clone"):
self.children[pos] = child.relabeled_clone(change_map)
def clone(self):
clone = self.create(connector=self.connector, negated=self.negated)
for child in self.children:
if hasattr(child, "clone"):
child = child.clone()
clone.children.append(child)
return clone
def relabeled_clone(self, change_map):
clone = self.clone()
clone.relabel_aliases(change_map)
return clone
def replace_expressions(self, replacements):
if replacement := replacements.get(self):
return replacement
clone = self.create(connector=self.connector, negated=self.negated)
for child in self.children:
clone.children.append(child.replace_expressions(replacements))
return clone
@classmethod
def _contains_aggregate(cls, obj):
if isinstance(obj, tree.Node):
return any(cls._contains_aggregate(c) for c in obj.children)
return obj.contains_aggregate
@cached_property
def contains_aggregate(self):
return self._contains_aggregate(self)
@classmethod
def _contains_over_clause(cls, obj):
if isinstance(obj, tree.Node):
return any(cls._contains_over_clause(c) for c in obj.children)
return obj.contains_over_clause
@cached_property
def contains_over_clause(self):
return self._contains_over_clause(self)
@staticmethod
def _resolve_leaf(expr, query, *args, **kwargs):
if hasattr(expr, "resolve_expression"):
expr = expr.resolve_expression(query, *args, **kwargs)
return expr
@classmethod
def _resolve_node(cls, node, query, *args, **kwargs):
if hasattr(node, "children"):
for child in node.children:
cls._resolve_node(child, query, *args, **kwargs)
if hasattr(node, "lhs"):
node.lhs = cls._resolve_leaf(node.lhs, query, *args, **kwargs)
if hasattr(node, "rhs"):
node.rhs = cls._resolve_leaf(node.rhs, query, *args, **kwargs)
def resolve_expression(self, *args, **kwargs):
clone = self.clone()
clone._resolve_node(clone, *args, **kwargs)
clone.resolved = True
return clone
@cached_property
def output_field(self):
from django.db.models import BooleanField
return BooleanField()
@property
def _output_field_or_none(self):
return self.output_field
def select_format(self, compiler, sql, params):
# Wrap filters with a CASE WHEN expression if a database backend
# (e.g. Oracle) doesn't support boolean expression in SELECT or GROUP
# BY list.
if not compiler.connection.features.supports_boolean_expr_in_select_clause:
sql = f"CASE WHEN {sql} THEN 1 ELSE 0 END"
return sql, params
def get_db_converters(self, connection):
return self.output_field.get_db_converters(connection)
def get_lookup(self, lookup):
return self.output_field.get_lookup(lookup)
def leaves(self):
for child in self.children:
if isinstance(child, WhereNode):
yield from child.leaves()
else:
yield child
class NothingNode:
"""A node that matches nothing."""
contains_aggregate = False
contains_over_clause = False
def as_sql(self, compiler=None, connection=None):
raise EmptyResultSet
class ExtraWhere:
# The contents are a black box - assume no aggregates or windows are used.
contains_aggregate = False
contains_over_clause = False
def __init__(self, sqls, params):
self.sqls = sqls
self.params = params
def as_sql(self, compiler=None, connection=None):
sqls = ["(%s)" % sql for sql in self.sqls]
return " AND ".join(sqls), list(self.params or ())
class SubqueryConstraint:
# Even if aggregates or windows would be used in a subquery,
# the outer query isn't interested about those.
contains_aggregate = False
contains_over_clause = False
def __init__(self, alias, columns, targets, query_object):
self.alias = alias
self.columns = columns
self.targets = targets
query_object.clear_ordering(clear_default=True)
self.query_object = query_object
def as_sql(self, compiler, connection):
query = self.query_object
query.set_values(self.targets)
query_compiler = query.get_compiler(connection=connection)
return query_compiler.as_subquery_condition(self.alias, self.columns, compiler)
|
{
"content_hash": "445df33c93329ed633986545cba44a51",
"timestamp": "",
"source": "github",
"line_count": 342,
"max_line_length": 88,
"avg_line_length": 36.187134502923975,
"alnum_prop": 0.5889625080801552,
"repo_name": "MarkusH/django",
"id": "e2af46a30927763c578706d6afe3a5d5e3f66d54",
"size": "12376",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "django/db/models/sql/where.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "89800"
},
{
"name": "HTML",
"bytes": "238228"
},
{
"name": "JavaScript",
"bytes": "147868"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Python",
"bytes": "16079477"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "392"
}
],
"symlink_target": ""
}
|
'''
This file holds all of the forms for the cleaning and validation of
the parameters being used for friends.
Created on Dec 20, 2013
@author: Andrew Oberlin, Jake Gregg
'''
from django import forms
from rememerme.friends.models import Friends
from rememerme.friends.rest.exceptions import FriendsListNotFoundException, FriendNotFoundException
from rememerme.friends.serializers import FriendsSerializer
from uuid import UUID
from rememerme.users.client import UserClient
from pycassa.cassandra.ttypes import NotFoundException as CassaNotFoundException
import json
'''
Submits this form and returns the friends of the currrent user.
@return: The friends matching the query with the given offset/limit
'''
class FriendsGetListForm(forms.Form):
'''
Submits this form to retrieve the correct information requested by the user.
Searches by user_id
@return: A list of friends with the given offset/limit
'''
def submit(self, request):
try:
ans = Friends.getByID(request.user.pk)
except CassaNotFoundException:
ans = Friends(user_id=request.user.pk, friends_list={})
return FriendsSerializer(ans).data
'''
Submits this form and deletes the friend from the user's friend list.
'''
class FriendsDeleteForm(forms.Form):
user_id = forms.CharField(required=True)
def clean(self):
try:
self.cleaned_data['user_id'] = str(UUID(self.cleaned_data['user_id']))
return self.cleaned_data
except ValueError:
raise FriendNotFoundException()
'''
Submits a form to retrieve a user given the user_id.
@return: A user with the given user_id
'''
def submit(self, request):
try:
ans = Friends.getByID(request.user.pk)
if not ans:
raise FriendNotFoundException()
except CassaNotFoundException:
raise FriendNotFoundException()
del ans.friends_list[self.cleaned_data['user_id']]
ans.save()
return UserClient(request.auth).get(self.cleaned_data['user_id'])
|
{
"content_hash": "7c223558d56bb5234b2c6fdc0b9d61cf",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 99,
"avg_line_length": 31.056338028169016,
"alnum_prop": 0.6580498866213151,
"repo_name": "rememerme/friends-api",
"id": "20480ca045a6509189943227f656ab6f573e3a37",
"size": "2205",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rememerme/friends/rest/friends/forms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "34876"
},
{
"name": "JavaScript",
"bytes": "251351"
},
{
"name": "Python",
"bytes": "24965"
}
],
"symlink_target": ""
}
|
import numpy as np
import pyflux as pf
noise = np.random.normal(0,1,200)
data = np.zeros(200)
for i in range(1,len(data)):
data[i] = 1.0*data[i-1] + noise[i]
countdata = np.random.poisson(3,200)
def test_skewt_couple_terms():
"""
Tests latent variable list length is correct, and that the estimated
latent variables are not nan
"""
model = pf.GASLLEV(data=data, family=pf.Skewt())
x = model.fit()
assert(len(model.latent_variables.z_list) == 4)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_skewt_couple_terms_integ():
"""
Tests latent variable list length is correct, and that the estimated
latent variables are not nan
"""
model = pf.GASLLEV(data=data, integ=1, family=pf.Skewt())
x = model.fit()
assert(len(model.latent_variables.z_list) == 4)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_skewt_bbvi():
"""
Tests an GAS model estimated with BBVI and that the length of the latent variable
list is correct, and that the estimated latent variables are not nan
"""
model = pf.GASLLEV(data=data, family=pf.Skewt())
x = model.fit('BBVI',iterations=100)
assert(len(model.latent_variables.z_list) == 4)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_skewt_bbvi_mini_batch():
"""
Tests an ARIMA model estimated with BBVI and that the length of the latent variable
list is correct, and that the estimated latent variables are not nan
"""
model = pf.GASLLEV(data=data, family=pf.Skewt())
x = model.fit('BBVI',iterations=100, mini_batch=32)
assert(len(model.latent_variables.z_list) == 4)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_skewt_bbvi_elbo():
"""
Tests that the ELBO increases
"""
model = pf.GASLLEV(data=data, family=pf.Skewt())
x = model.fit('BBVI',iterations=100, record_elbo=True)
assert(x.elbo_records[-1]>x.elbo_records[0])
def test_skewt_bbvi_mini_batch_elbo():
"""
Tests that the ELBO increases
"""
model = pf.GASLLEV(data=data, family=pf.Skewt())
x = model.fit('BBVI',iterations=100, mini_batch=32, record_elbo=True)
assert(x.elbo_records[-1]>x.elbo_records[0])
def test_skewt_mh():
"""
Tests an GAS model estimated with Metropolis-Hastings and that the length of the
latent variable list is correct, and that the estimated latent variables are not nan
"""
model = pf.GASLLEV(data=data, family=pf.Skewt())
x = model.fit('M-H',nsims=300)
assert(len(model.latent_variables.z_list) == 4)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
""" Uncomment in future if Skewt becomes more robust
def test_skewt_laplace():
Tests an GAS model estimated with Laplace approximation and that the length of the
latent variable list is correct, and that the estimated latent variables are not nan
model = pf.GASLLEV(data=data, family=pf.Skewt())
x = model.fit('Laplace')
assert(len(model.latent_variables.z_list) == 4)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
"""
def test_skewt_pml():
"""
Tests a PML model estimated with Laplace approximation and that the length of the
latent variable list is correct, and that the estimated latent variables are not nan
"""
model = pf.GASLLEV(data=data, family=pf.Skewt())
x = model.fit('PML')
assert(len(model.latent_variables.z_list) == 4)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_skewt_predict_length():
"""
Tests that the prediction dataframe length is equal to the number of steps h
"""
model = pf.GASLLEV(data=data, family=pf.Skewt())
x = model.fit()
x.summary()
assert(model.predict(h=5).shape[0] == 5)
def test_skewt_predict_is_length():
"""
Tests that the prediction IS dataframe length is equal to the number of steps h
"""
model = pf.GASLLEV(data=data, family=pf.Skewt())
x = model.fit()
assert(model.predict_is(h=5).shape[0] == 5)
def test_skewt_predict_nans():
"""
Tests that the predictions are not nans
model = pf.GASLLEV(data=data, family=pf.Skewt())
"""
model = pf.GASLLEV(data=data, family=pf.Skewt())
x = model.fit()
x.summary()
assert(len(model.predict(h=5).values[np.isnan(model.predict(h=5).values)]) == 0)
"""
def test_skewt_predict_is_nans():
Tests that the in-sample predictions are not nans
model = pf.GASLLEV(data=data, family=pf.Skewt())
x = model.fit()
x.summary()
assert(len(model.predict_is(h=5).values[np.isnan(model.predict_is(h=5).values)]) == 0)
"""
|
{
"content_hash": "eb1c847ff11a8025cd33799304365649",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 87,
"avg_line_length": 33.429577464788736,
"alnum_prop": 0.6928586475668843,
"repo_name": "RJT1990/pyflux",
"id": "0ff925beaf5ef2567704c3c1920f77502dc58969",
"size": "4747",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyflux/gas/tests/gas_llev_tests_skewt.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1918616"
}
],
"symlink_target": ""
}
|
from django.db import models
class Organization(models.Model):
owner = models.ForeignKey('auth.User',related_name="organizations")
name = models.CharField(max_length=128)
created = models.DateTimeField(auto_now_add=True)
class Household(models.Model):
organization = models.ForeignKey(Organization,related_name="households")
name = models.CharField(max_length=128)
address = models.CharField(max_length=256)
notes = models.TextField(blank=True)
created = models.DateTimeField(auto_now_add=True)
moved = models.BooleanField(default=False)
class Member(models.Model):
household = models.ForeignKey(Household, related_name="members")
first_name = models.CharField(max_length=128)
last_name = models.CharField(max_length=128)
birthday = models.DateField(blank=True)
phone = models.CharField(blank=True, max_length=28)
email = models.CharField(max_length=128)
is_home_teacher = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
class District(models.Model):
organization = models.ForeignKey(Organization, related_name="districts")
leader = models.ForeignKey(Member, related_name="districts")
name = models.CharField(max_length=128)
created = models.DateTimeField(auto_now_add=True)
class Companionship(models.Model):
district = models.ForeignKey(District,related_name="companionships")
created = models.DateTimeField(auto_now_add=True)
class Companion(models.Model):
member = models.ForeignKey(Member, related_name="companionships")
companionship = models.ForeignKey(Companionship, related_name="companions")
created = models.DateTimeField(auto_now_add=True)
class Assignment(models.Model):
companionship = models.ForeignKey(Companionship, related_name="assignments")
household = models.ForeignKey(Household, related_name="active_assignment")
created = models.DateTimeField(auto_now_add=True)
class AssignmentHistory(models.Model):
household = models.ForeignKey(Household, related_name="past_assignments")
date = models.DateField()
class Visit(models.Model):
household = models.ForeignKey(Household, related_name="visits")
visitor1 = models.CharField(max_length=128)
visitor2 = models.CharField(max_length=128, blank=True)
date = models.DateField()
notes = models.TextField(blank=True)
|
{
"content_hash": "c98d58007dd90ef1c6cadcaaffad61c5",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 77,
"avg_line_length": 39.70175438596491,
"alnum_prop": 0.7843570481661511,
"repo_name": "ryanrborn/lds-leader-tool-suite-backend",
"id": "b7ac02e10cdec85afd4ab29eadd226733841c16e",
"size": "2263",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "llts/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23587"
}
],
"symlink_target": ""
}
|
from msrest.service_client import ServiceClient
from msrest import Configuration, Serializer, Deserializer
from .operations.byte import Byte
from . import models
class AutoRestSwaggerBATByteServiceConfiguration(Configuration):
def __init__(
self, base_url=None, filepath=None):
if not base_url:
base_url = 'http://localhost'
super(AutoRestSwaggerBATByteServiceConfiguration, self).__init__(base_url, filepath)
self.user_agent = 'auto_rest_swagger_bat_byte_service/1.0.0'
class AutoRestSwaggerBATByteService(object):
def __init__(self, config):
self._client = ServiceClient(None, config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer()
self._deserialize = Deserializer(client_models)
self.config = config
self.byte = Byte(
self._client, self.config, self._serialize, self._deserialize)
|
{
"content_hash": "9729ed212e384422e59bfc87fe56a8c3",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 92,
"avg_line_length": 30.5625,
"alnum_prop": 0.6738241308793456,
"repo_name": "vulcansteel/autorest",
"id": "4b4f5b9727cb332a70d92897c36c4ed944ff5d4a",
"size": "1452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "AutoRest/Generators/Python/Python.Tests/Expected/AcceptanceTests/BodyByte/auto_rest_swagger_bat_byte_service/api_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "819"
},
{
"name": "C#",
"bytes": "8857811"
},
{
"name": "CSS",
"bytes": "110"
},
{
"name": "HTML",
"bytes": "274"
},
{
"name": "Java",
"bytes": "3171512"
},
{
"name": "JavaScript",
"bytes": "4063363"
},
{
"name": "PowerShell",
"bytes": "8003"
},
{
"name": "Puppet",
"bytes": "145"
},
{
"name": "Python",
"bytes": "1831874"
},
{
"name": "Ruby",
"bytes": "218212"
},
{
"name": "TypeScript",
"bytes": "158339"
}
],
"symlink_target": ""
}
|
from pprint import pformat
import netifaces
from miniworld.errors import NetworkBackendErrorReset
from miniworld.model.singletons.Singletons import singletons
from miniworld.model.network.backends import AbstractSwitch
from miniworld.model.network.backends.bridged.iproute2 import IPRoute2Commands
from miniworld.model.singletons import Resetable
__author__ = "Nils Schmidt"
# TODO: #54,#55: DOC
class Bridge(AbstractSwitch.AbstractSwitch, Resetable.Resetable):
"""
Attributes
----------
id : str
Name of the bridge.
bridge:
See Also
--------
http://baturin.org/docs/iproute2/#Create%20a%20bridge%20interface
http://lists.openwall.net/netdev/2015/06/16/44
"""
def run(self, cmd):
return singletons.shell_helper.run_shell(self.id, cmd, prefixes=["bridge"])
def __init__(self, id, interface):
super(Bridge, self).__init__(id, interface)
# we want the shortened id due to the limitation of the tap device name length
self.id = id
self.bridge = None
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self.id)
###############################################
# Subclass stuff
###############################################
# TODO: #54,#55: arguments needed for abstract start() ??
# TODO: only allow starting once!
def _start(self, bridge_dev_name=None, switch=False):
"""
Parameters
----------
bridge_dev_name : str, optional (default is None)
switch : bool, optional (default is False)
Returns
-------
Raises
------
NetworkBackendStartError
"""
self.bridge_dev_name = bridge_dev_name
# TODO: #54,#55: exceptions around all networkbackends!
# TODO: #54,#55: recognize or delete if_up
def add_if(self, _if_name, if_up=True):
"""
Parameters
----------
_if_name
Returns
-------
Raises
------
NetworkBackendBridgedBridgeError
"""
pass
def reset(self):
"""
Raises
------
NetworkBackendErrorReset
Returns
-------
"""
try:
if self.started and self.bridge_dev_name:
self.run(IPRoute2Commands.get_link_del_cmd(self.bridge_dev_name))
except Exception as e:
raise NetworkBackendErrorReset("""Could not shutdown the bridge '%s'
Interface dump:
%s
""" % (self, pformat(self.get_interfaces())), caused_by=e)
@staticmethod
def get_interfaces():
# ip.by_name.keys()
# return [x.get_attr('IFLA_IFNAME') for x in ipr.get_links()]
return ', '.join(netifaces.interfaces())
|
{
"content_hash": "5064ba3eae39526d54ddab48697e0eb0",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 86,
"avg_line_length": 25.59259259259259,
"alnum_prop": 0.566931982633864,
"repo_name": "miniworld-project/miniworld_core",
"id": "a86407a97391fea8ec18b2b3c5c04a249fb39576",
"size": "2764",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "miniworld/model/network/backends/bridged/Bridge.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "696934"
},
{
"name": "Shell",
"bytes": "1770"
}
],
"symlink_target": ""
}
|
from collections import deque
from datetime import datetime
import errno
from functools import partial
import os
import operator
import socket
import ssl
import sys
import time
from .. import http
from ..http import wsgi
from .. import util
from . import base
from .. import six
try:
import concurrent.futures as futures
except ImportError:
raise RuntimeError("""
You need 'concurrent' installed to use this worker with this python
version.
""")
try:
from asyncio import selectors
except ImportError:
try:
from trollius import selectors
except ImportError:
raise RuntimeError("""
You need 'trollius' installed to use this worker with this python
version.
""")
class TConn(object):
def __init__(self, cfg, listener, sock, addr):
self.cfg = cfg
self.listener = listener
self.sock = sock
self.addr = addr
self.timeout = None
self.parser = None
# set the socket to non blocking
self.sock.setblocking(False)
def init(self):
self.sock.setblocking(True)
if self.parser is None:
# wrap the socket if needed
if self.cfg.is_ssl:
self.sock = ssl.wrap_socket(client, server_side=True,
**self.cfg.ssl_options)
# initialize the parser
self.parser = http.RequestParser(self.cfg, self.sock)
return True
return False
def set_timeout(self):
# set the timeout
self.timeout = time.time() + self.cfg.keepalive
def __lt__(self, other):
return self.timeout < other.timeout
__cmp__ = __lt__
class ThreadWorker(base.Worker):
def __init__(self, *args, **kwargs):
super(ThreadWorker, self).__init__(*args, **kwargs)
self.worker_connections = self.cfg.worker_connections
# initialise the pool
self.tpool = None
self.poller = None
self.futures = deque()
self._keep = deque()
def _wrap_future(self, fs, conn):
fs.conn = conn
self.futures.append(fs)
fs.add_done_callback(self.finish_request)
def init_process(self):
self.tpool = futures.ThreadPoolExecutor(max_workers=self.cfg.threads)
self.poller = selectors.DefaultSelector()
super(ThreadWorker, self).init_process()
def accept(self, listener):
try:
client, addr = listener.accept()
conn = TConn(self.cfg, listener, client, addr)
# wait for the read event to handle the connection
self.poller.register(client, selectors.EVENT_READ,
partial(self.handle_client, conn))
except socket.error as e:
if e.args[0] not in (errno.EAGAIN,
errno.ECONNABORTED, errno.EWOULDBLOCK):
raise
def handle_client(self, conn, client):
# unregister the client from the poller
self.poller.unregister(client)
# submit the connection to a worker
fs = self.tpool.submit(self.handle, conn)
self._wrap_future(fs, conn)
def murder_keepalived(self):
now = time.time()
while True:
try:
conn = self._keep.popleft()
except IndexError:
break
delta = conn.timeout - now
if delta > 0:
self._keep.appendleft(conn)
break
else:
# remove the connection from the queue
conn = self._keep.popleft()
# remove the socket from the poller
self.poller.unregister(conn.sock)
# close the socket
util.close(conn.sock)
def run(self):
# init listeners, add them to the event loop
for s in self.sockets:
s.setblocking(False)
self.poller.register(s, selectors.EVENT_READ, self.accept)
timeout = self.cfg.timeout or 0.5
while self.alive:
# If our parent changed then we shut down.
if self.ppid != os.getppid():
self.log.info("Parent changed, shutting down: %s", self)
return
# notify the arbiter we are alive
self.notify()
events = self.poller.select(0.2)
for key, mask in events:
callback = key.data
callback(key.fileobj)
# hanle keepalive timeouts
self.murder_keepalived()
# if we more connections than the max number of connections
# accepted on a worker, wait until some complete or exit.
if len(self.futures) >= self.worker_connections:
res = futures.wait(self.futures, timeout=timeout)
if not res:
self.log.info("max requests achieved")
break
# shutdown the pool
self.poller.close()
self.tpool.shutdown(False)
# wait for the workers
futures.wait(self.futures, timeout=self.cfg.graceful_timeout)
# if we have still fures running, try to close them
while True:
try:
fs = self.futures.popleft()
except IndexError:
break
sock = fs.conn.sock
# the future is not running, cancel it
if not fs.done() and not fs.running():
fs.cancel()
# make sure we close the sockets after the graceful timeout
util.close(sock)
def finish_request(self, fs):
try:
(keepalive, conn) = fs.result()
# if the connection should be kept alived add it
# to the eventloop and record it
if keepalive:
# flag the socket as non blocked
conn.sock.setblocking(False)
# register the connection
conn.set_timeout()
self._keep.append(conn)
# add the socket to the event loop
self.poller.register(conn.sock, selectors.EVENT_READ,
partial(self.handle_client, conn))
else:
util.close(conn.sock)
except:
# an exception happened, make sure to close the
# socket.
util.close(fs.conn.sock)
finally:
# remove the future from our list
try:
self.futures.remove(fs)
except ValueError:
pass
def handle(self, conn):
if not conn.init():
# connection kept alive
try:
self._keep.remove(conn)
except ValueError:
pass
keepalive = False
req = None
try:
req = six.next(conn.parser)
if not req:
return (False, conn)
# handle the request
keepalive = self.handle_request(req, conn)
if keepalive:
return (keepalive, conn)
except http.errors.NoMoreData as e:
self.log.debug("Ignored premature client disconnection. %s", e)
except StopIteration as e:
self.log.debug("Closing connection. %s", e)
except ssl.SSLError as e:
if e.args[0] == ssl.SSL_ERROR_EOF:
self.log.debug("ssl connection closed")
conn.sock.close()
else:
self.log.debug("Error processing SSL request.")
self.handle_error(req, conn.sock, conn.addr, e)
except socket.error as e:
if e.args[0] not in (errno.EPIPE, errno.ECONNRESET):
self.log.exception("Socket error processing request.")
else:
if e.args[0] == errno.ECONNRESET:
self.log.debug("Ignoring connection reset")
else:
self.log.debug("Ignoring connection epipe")
except Exception as e:
self.handle_error(req, conn.sock, conn.addr, e)
return (False, conn)
def handle_request(self, req, conn):
environ = {}
resp = None
try:
self.cfg.pre_request(self, req)
request_start = datetime.now()
resp, environ = wsgi.create(req, conn.sock, conn.addr,
conn.listener.getsockname(), self.cfg)
environ["wsgi.multithread"] = True
self.nr += 1
if self.alive and self.nr >= self.max_requests:
self.log.info("Autorestarting worker after current request.")
resp.force_close()
self.alive = False
if not self.cfg.keepalive:
resp.force_close()
respiter = self.wsgi(environ, resp.start_response)
try:
if isinstance(respiter, environ['wsgi.file_wrapper']):
resp.write_file(respiter)
else:
for item in respiter:
resp.write(item)
resp.close()
request_time = datetime.now() - request_start
self.log.access(resp, req, environ, request_time)
finally:
if hasattr(respiter, "close"):
respiter.close()
if resp.should_close():
self.log.debug("Closing connection.")
return False
except socket.error:
exc_info = sys.exc_info()
# pass to next try-except level
six.reraise(exc_info[0], exc_info[1], exc_info[2])
except Exception:
if resp and resp.headers_sent:
# If the requests have already been sent, we should close the
# connection to indicate the error.
self.log.exception("Error handling request")
try:
conn.sock.shutdown(socket.SHUT_RDWR)
conn.sock.close()
except socket.error:
pass
raise StopIteration()
raise
finally:
try:
self.cfg.post_request(self, req, environ, resp)
except Exception:
self.log.exception("Exception in post_request hook")
return True
|
{
"content_hash": "fdc1d6d0a814199d2492ff38d310f312",
"timestamp": "",
"source": "github",
"line_count": 333,
"max_line_length": 77,
"avg_line_length": 31.075075075075077,
"alnum_prop": 0.5373018940858136,
"repo_name": "wong2/gunicorn",
"id": "122d923e4df632ea4ba07abb5c52ef592ffdd13b",
"size": "10775",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "gunicorn/workers/gthread.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6671"
},
{
"name": "JavaScript",
"bytes": "1479"
},
{
"name": "Python",
"bytes": "559076"
},
{
"name": "Shell",
"bytes": "5327"
}
],
"symlink_target": ""
}
|
import os
import sys
from unittest import TestCase
sys.modules.pop('twiggy', None)
os.environ.pop('TWIGGY_UNDER_TEST', None) # we need globals!!
from twiggy import logging_compat, add_emitters, log
from twiggy.outputs import ListOutput
from twiggy.logging_compat import (hijack, restore, basicConfig,
getLogger, root, DEBUG, INFO, ERROR,
LoggingBridgeOutput, LoggingBridgeFormat,
orig_logging)
class HijackTest(TestCase):
def compare_modules(self, m1, m2):
self.failUnlessEqual(m1.__name__, m2.__name__)
def verify_orig(self):
import logging
self.compare_modules(logging, orig_logging)
def verify_comp(self):
import logging
self.compare_modules(logging, logging_compat)
def tearDown(self):
sys.modules.pop('logging', None)
def test_hijack(self):
self.verify_orig()
hijack()
self.verify_comp()
def test_restore(self):
hijack()
restore()
self.verify_orig()
class TestGetLogger(TestCase):
def test_name(self):
self.failUnlessEqual(getLogger("spam")._logger._fields["name"], "spam")
def test_root(self):
self.failUnlessEqual(getLogger(), root)
def test_cache(self):
eggs = getLogger("eggs")
self.failUnless(getLogger("eggs") is eggs)
class TestFakeLogger(TestCase):
def setUp(self):
self.logger = getLogger("spam")
self.logger.setLevel(DEBUG)
self.list_output = ListOutput()
self.messages = self.list_output.messages
add_emitters(("spam", DEBUG, None, self.list_output))
def test_level(self):
for level in [INFO, ERROR]:
self.logger.setLevel(level)
self.failUnlessEqual(self.logger.level, level)
def test_percent(self):
self.failUnlessEqual(self.logger._logger._options["style"], "percent")
def test_exception(self):
try:
1/0
except:
self.logger.exception("spam")
self.failUnless("ZeroDivisionError" in self.messages[0].traceback)
def test_isEnabledFor(self):
self.logger.setLevel(INFO)
self.failIf(self.logger.isEnabledFor(DEBUG))
self.logger.setLevel(DEBUG)
self.failUnless(self.logger.isEnabledFor(DEBUG))
def test_log_no_exc_info(self):
self.logger.info("nothing", exc_info=True)
self.failUnlessEqual(self.messages[0].traceback, None)
def test_log_exc_info(self):
try:
1/0
except:
self.logger.error("exception", exc_info=True)
self.failUnless("ZeroDivisionError" in self.messages[0].traceback)
def test_basicConfig(self):
self.failUnlessRaises(RuntimeError, basicConfig)
def test_log(self):
for index, level in enumerate((INFO, ERROR)):
self.logger.log(level, "spam")
self.failUnlessEqual(self.messages[index].text, "spam")
self.failUnlessEqual(self.messages[index].level, level)
def test_log_bad_level(self):
self.failUnlessRaises(ValueError, self.logger.log, "illegal level", "eggs")
class TestLoggingBridge(TestCase):
def test_format(self):
logger = log.name("spam")
list_output = ListOutput(format=LoggingBridgeFormat())
messages = list_output.messages
add_emitters(("spam", DEBUG, None, list_output))
logger.error("eggs")
self.failUnlessEqual(messages[0], ('|eggs\n', ERROR, 'spam'))
def test_sanity(self):
logger = log.name("decoy")
add_emitters(("decoy", DEBUG, None, LoggingBridgeOutput()))
logger.error("spam")
logger.notice("eggs")
|
{
"content_hash": "5d8b379e60861c59e943cad59d9d60ec",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 83,
"avg_line_length": 32.40677966101695,
"alnum_prop": 0.6111401673640168,
"repo_name": "alessandrod/twiggy",
"id": "2bd0b95bec9bb7d3039d980016d8476dbfd6ef7e",
"size": "3824",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_logging_compat.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "97214"
},
{
"name": "Shell",
"bytes": "317"
}
],
"symlink_target": ""
}
|
from setuptools import setup
VERSION = '0.0.1'
setup(
name='pymarketo',
version=VERSION,
packages=['pymarketo'],
install_requires=['requests>=2.8.1'],
description='Python interface to the Marketo REST API',
author='Jeremy Swinarton',
author_email='jeremy@swinarton.com',
license='MIT',
)
|
{
"content_hash": "7c8ed2dd21e86adbee2c76462abf5846",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 59,
"avg_line_length": 21.533333333333335,
"alnum_prop": 0.6656346749226006,
"repo_name": "jswinarton/pymarketo",
"id": "6702142eed3efd1b07159cd8e7a70110602a6e8d",
"size": "323",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7305"
}
],
"symlink_target": ""
}
|
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from ..builder import NECKS
class Transition(BaseModule):
"""Base class for transition.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
"""
def __init__(self, in_channels, out_channels, init_cfg=None):
super().__init__(init_cfg)
self.in_channels = in_channels
self.out_channels = out_channels
def forward(x):
pass
class UpInterpolationConv(Transition):
"""A transition used for up-sampling.
Up-sample the input by interpolation then refines the feature by
a convolution layer.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
scale_factor (int): Up-sampling factor. Default: 2.
mode (int): Interpolation mode. Default: nearest.
align_corners (bool): Whether align corners when interpolation.
Default: None.
kernel_size (int): Kernel size for the conv. Default: 3.
"""
def __init__(self,
in_channels,
out_channels,
scale_factor=2,
mode='nearest',
align_corners=None,
kernel_size=3,
init_cfg=None,
**kwargs):
super().__init__(in_channels, out_channels, init_cfg)
self.mode = mode
self.scale_factor = scale_factor
self.align_corners = align_corners
self.conv = ConvModule(
in_channels,
out_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
**kwargs)
def forward(self, x):
x = F.interpolate(
x,
scale_factor=self.scale_factor,
mode=self.mode,
align_corners=self.align_corners)
x = self.conv(x)
return x
class LastConv(Transition):
"""A transition used for refining the output of the last stage.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
num_inputs (int): Number of inputs of the FPN features.
kernel_size (int): Kernel size for the conv. Default: 3.
"""
def __init__(self,
in_channels,
out_channels,
num_inputs,
kernel_size=3,
init_cfg=None,
**kwargs):
super().__init__(in_channels, out_channels, init_cfg)
self.num_inputs = num_inputs
self.conv_out = ConvModule(
in_channels,
out_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
**kwargs)
def forward(self, inputs):
assert len(inputs) == self.num_inputs
return self.conv_out(inputs[-1])
@NECKS.register_module()
class FPG(BaseModule):
"""FPG.
Implementation of `Feature Pyramid Grids (FPG)
<https://arxiv.org/abs/2004.03580>`_.
This implementation only gives the basic structure stated in the paper.
But users can implement different type of transitions to fully explore the
the potential power of the structure of FPG.
Args:
in_channels (int): Number of input channels (feature maps of all levels
should have the same channels).
out_channels (int): Number of output channels (used at each scale)
num_outs (int): Number of output scales.
stack_times (int): The number of times the pyramid architecture will
be stacked.
paths (list[str]): Specify the path order of each stack level.
Each element in the list should be either 'bu' (bottom-up) or
'td' (top-down).
inter_channels (int): Number of inter channels.
same_up_trans (dict): Transition that goes down at the same stage.
same_down_trans (dict): Transition that goes up at the same stage.
across_lateral_trans (dict): Across-pathway same-stage
across_down_trans (dict): Across-pathway bottom-up connection.
across_up_trans (dict): Across-pathway top-down connection.
across_skip_trans (dict): Across-pathway skip connection.
output_trans (dict): Transition that trans the output of the
last stage.
start_level (int): Index of the start input backbone level used to
build the feature pyramid. Default: 0.
end_level (int): Index of the end input backbone level (exclusive) to
build the feature pyramid. Default: -1, which means the last level.
add_extra_convs (bool): It decides whether to add conv
layers on top of the original feature maps. Default to False.
If True, its actual mode is specified by `extra_convs_on_inputs`.
norm_cfg (dict): Config dict for normalization layer. Default: None.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
transition_types = {
'conv': ConvModule,
'interpolation_conv': UpInterpolationConv,
'last_conv': LastConv,
}
def __init__(self,
in_channels,
out_channels,
num_outs,
stack_times,
paths,
inter_channels=None,
same_down_trans=None,
same_up_trans=dict(
type='conv', kernel_size=3, stride=2, padding=1),
across_lateral_trans=dict(type='conv', kernel_size=1),
across_down_trans=dict(type='conv', kernel_size=3),
across_up_trans=None,
across_skip_trans=dict(type='identity'),
output_trans=dict(type='last_conv', kernel_size=3),
start_level=0,
end_level=-1,
add_extra_convs=False,
norm_cfg=None,
skip_inds=None,
init_cfg=[
dict(type='Caffe2Xavier', layer='Conv2d'),
dict(
type='Constant',
layer=[
'_BatchNorm', '_InstanceNorm', 'GroupNorm',
'LayerNorm'
],
val=1.0)
]):
super(FPG, self).__init__(init_cfg)
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
if inter_channels is None:
self.inter_channels = [out_channels for _ in range(num_outs)]
elif isinstance(inter_channels, int):
self.inter_channels = [inter_channels for _ in range(num_outs)]
else:
assert isinstance(inter_channels, list)
assert len(inter_channels) == num_outs
self.inter_channels = inter_channels
self.stack_times = stack_times
self.paths = paths
assert isinstance(paths, list) and len(paths) == stack_times
for d in paths:
assert d in ('bu', 'td')
self.same_down_trans = same_down_trans
self.same_up_trans = same_up_trans
self.across_lateral_trans = across_lateral_trans
self.across_down_trans = across_down_trans
self.across_up_trans = across_up_trans
self.output_trans = output_trans
self.across_skip_trans = across_skip_trans
self.with_bias = norm_cfg is None
# skip inds must be specified if across skip trans is not None
if self.across_skip_trans is not None:
skip_inds is not None
self.skip_inds = skip_inds
assert len(self.skip_inds[0]) <= self.stack_times
if end_level == -1 or end_level == self.num_ins - 1:
self.backbone_end_level = self.num_ins
assert num_outs >= self.num_ins - start_level
else:
# if end_level is not the last level, no extra level is allowed
self.backbone_end_level = end_level + 1
assert end_level < self.num_ins
assert num_outs == end_level - start_level + 1
self.start_level = start_level
self.end_level = end_level
self.add_extra_convs = add_extra_convs
# build lateral 1x1 convs to reduce channels
self.lateral_convs = nn.ModuleList()
for i in range(self.start_level, self.backbone_end_level):
l_conv = nn.Conv2d(self.in_channels[i],
self.inter_channels[i - self.start_level], 1)
self.lateral_convs.append(l_conv)
extra_levels = num_outs - self.backbone_end_level + self.start_level
self.extra_downsamples = nn.ModuleList()
for i in range(extra_levels):
if self.add_extra_convs:
fpn_idx = self.backbone_end_level - self.start_level + i
extra_conv = nn.Conv2d(
self.inter_channels[fpn_idx - 1],
self.inter_channels[fpn_idx],
3,
stride=2,
padding=1)
self.extra_downsamples.append(extra_conv)
else:
self.extra_downsamples.append(nn.MaxPool2d(1, stride=2))
self.fpn_transitions = nn.ModuleList() # stack times
for s in range(self.stack_times):
stage_trans = nn.ModuleList() # num of feature levels
for i in range(self.num_outs):
# same, across_lateral, across_down, across_up
trans = nn.ModuleDict()
if s in self.skip_inds[i]:
stage_trans.append(trans)
continue
# build same-stage down trans (used in bottom-up paths)
if i == 0 or self.same_up_trans is None:
same_up_trans = None
else:
same_up_trans = self.build_trans(
self.same_up_trans, self.inter_channels[i - 1],
self.inter_channels[i])
trans['same_up'] = same_up_trans
# build same-stage up trans (used in top-down paths)
if i == self.num_outs - 1 or self.same_down_trans is None:
same_down_trans = None
else:
same_down_trans = self.build_trans(
self.same_down_trans, self.inter_channels[i + 1],
self.inter_channels[i])
trans['same_down'] = same_down_trans
# build across lateral trans
across_lateral_trans = self.build_trans(
self.across_lateral_trans, self.inter_channels[i],
self.inter_channels[i])
trans['across_lateral'] = across_lateral_trans
# build across down trans
if i == self.num_outs - 1 or self.across_down_trans is None:
across_down_trans = None
else:
across_down_trans = self.build_trans(
self.across_down_trans, self.inter_channels[i + 1],
self.inter_channels[i])
trans['across_down'] = across_down_trans
# build across up trans
if i == 0 or self.across_up_trans is None:
across_up_trans = None
else:
across_up_trans = self.build_trans(
self.across_up_trans, self.inter_channels[i - 1],
self.inter_channels[i])
trans['across_up'] = across_up_trans
if self.across_skip_trans is None:
across_skip_trans = None
else:
across_skip_trans = self.build_trans(
self.across_skip_trans, self.inter_channels[i - 1],
self.inter_channels[i])
trans['across_skip'] = across_skip_trans
# build across_skip trans
stage_trans.append(trans)
self.fpn_transitions.append(stage_trans)
self.output_transition = nn.ModuleList() # output levels
for i in range(self.num_outs):
trans = self.build_trans(
self.output_trans,
self.inter_channels[i],
self.out_channels,
num_inputs=self.stack_times + 1)
self.output_transition.append(trans)
self.relu = nn.ReLU(inplace=True)
def build_trans(self, cfg, in_channels, out_channels, **extra_args):
cfg_ = cfg.copy()
trans_type = cfg_.pop('type')
trans_cls = self.transition_types[trans_type]
return trans_cls(in_channels, out_channels, **cfg_, **extra_args)
def fuse(self, fuse_dict):
out = None
for item in fuse_dict.values():
if item is not None:
if out is None:
out = item
else:
out = out + item
return out
def forward(self, inputs):
assert len(inputs) == len(self.in_channels)
# build all levels from original feature maps
feats = [
lateral_conv(inputs[i + self.start_level])
for i, lateral_conv in enumerate(self.lateral_convs)
]
for downsample in self.extra_downsamples:
feats.append(downsample(feats[-1]))
outs = [feats]
for i in range(self.stack_times):
current_outs = outs[-1]
next_outs = []
direction = self.paths[i]
for j in range(self.num_outs):
if i in self.skip_inds[j]:
next_outs.append(outs[-1][j])
continue
# feature level
if direction == 'td':
lvl = self.num_outs - j - 1
else:
lvl = j
# get transitions
if direction == 'td':
same_trans = self.fpn_transitions[i][lvl]['same_down']
else:
same_trans = self.fpn_transitions[i][lvl]['same_up']
across_lateral_trans = self.fpn_transitions[i][lvl][
'across_lateral']
across_down_trans = self.fpn_transitions[i][lvl]['across_down']
across_up_trans = self.fpn_transitions[i][lvl]['across_up']
across_skip_trans = self.fpn_transitions[i][lvl]['across_skip']
# init output
to_fuse = dict(
same=None, lateral=None, across_up=None, across_down=None)
# same downsample/upsample
if same_trans is not None:
to_fuse['same'] = same_trans(next_outs[-1])
# across lateral
if across_lateral_trans is not None:
to_fuse['lateral'] = across_lateral_trans(
current_outs[lvl])
# across downsample
if lvl > 0 and across_up_trans is not None:
to_fuse['across_up'] = across_up_trans(current_outs[lvl -
1])
# across upsample
if (lvl < self.num_outs - 1 and across_down_trans is not None):
to_fuse['across_down'] = across_down_trans(
current_outs[lvl + 1])
if across_skip_trans is not None:
to_fuse['across_skip'] = across_skip_trans(outs[0][lvl])
x = self.fuse(to_fuse)
next_outs.append(x)
if direction == 'td':
outs.append(next_outs[::-1])
else:
outs.append(next_outs)
# output trans
final_outs = []
for i in range(self.num_outs):
lvl_out_list = []
for s in range(len(outs)):
lvl_out_list.append(outs[s][i])
lvl_out = self.output_transition[i](lvl_out_list)
final_outs.append(lvl_out)
return final_outs
|
{
"content_hash": "24ef3b56b83a0e4bf9454f0a905a5e85",
"timestamp": "",
"source": "github",
"line_count": 405,
"max_line_length": 79,
"avg_line_length": 40.34320987654321,
"alnum_prop": 0.5288573352102331,
"repo_name": "open-mmlab/mmdetection",
"id": "a6a2a12ed415bbb517b056d01172a83f6e30833d",
"size": "16387",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mmdet/models/necks/fpg.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2540"
},
{
"name": "Python",
"bytes": "4811377"
},
{
"name": "Shell",
"bytes": "47911"
}
],
"symlink_target": ""
}
|
from django.db import models
from server.models import *
# Create your models here.
class Catalog(models.Model):
machine_group = models.ForeignKey(MachineGroup)
content = models.TextField()
name = models.CharField(max_length=253)
sha256hash = models.CharField(max_length=64)
class Meta:
ordering = ['name', 'machine_group']
|
{
"content_hash": "90ce5d8edc278dc55a2233e278d1dcce",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 51,
"avg_line_length": 32.09090909090909,
"alnum_prop": 0.7110481586402266,
"repo_name": "erikng/sal",
"id": "8558c737776ffc27b948a7551823ea7386474378",
"size": "353",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "catalog/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "254975"
},
{
"name": "HTML",
"bytes": "248381"
},
{
"name": "JavaScript",
"bytes": "1148377"
},
{
"name": "Makefile",
"bytes": "2208"
},
{
"name": "Nginx",
"bytes": "1946"
},
{
"name": "Python",
"bytes": "757954"
},
{
"name": "Shell",
"bytes": "5922"
}
],
"symlink_target": ""
}
|
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilent2000A import *
class agilentMSOX2002A(agilent2000A):
"Agilent InfiniiVision MSOX2002A IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'MSO-X 2002A')
super(agilentMSOX2002A, self).__init__(*args, **kwargs)
self._analog_channel_count = 2
self._digital_channel_count = 16
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 70e6
self._init_channels()
|
{
"content_hash": "9f28162442652a810b173a40b2ad63e2",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 86,
"avg_line_length": 38.5,
"alnum_prop": 0.7367178276269185,
"repo_name": "getzze/python-ivi",
"id": "8666bcdcdf64d097850854000fafc35af180dc64",
"size": "1694",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "ivi/agilent/agilentMSOX2002A.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1739388"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0026_order_reimbursements_by_year_and_issue_date'),
]
operations = [
migrations.AlterModelOptions(
name='historicalreimbursement',
options={'get_latest_by': 'history_date', 'ordering': ('-history_date', '-history_id'), 'verbose_name': 'historical reembolso'},
),
migrations.AlterModelOptions(
name='reimbursement',
options={'ordering': ('-year', '-issue_date'), 'verbose_name': 'reembolso', 'verbose_name_plural': 'reembolsos'},
),
migrations.AlterField(
model_name='historicalreimbursement',
name='applicant_id',
field=models.IntegerField(db_index=True, verbose_name='Identificador do Solicitante'),
),
migrations.AlterField(
model_name='historicalreimbursement',
name='available_in_latest_dataset',
field=models.BooleanField(default=True, verbose_name='Disponível na Câmara dos Deputados'),
),
migrations.AlterField(
model_name='historicalreimbursement',
name='batch_number',
field=models.IntegerField(verbose_name='Número do Lote'),
),
migrations.AlterField(
model_name='historicalreimbursement',
name='cnpj_cpf',
field=models.CharField(blank=True, db_index=True, max_length=14, null=True, verbose_name='CNPJ ou CPF'),
),
migrations.AlterField(
model_name='historicalreimbursement',
name='congressperson_document',
field=models.IntegerField(blank=True, null=True, verbose_name='Número da Carteira Parlamentar'),
),
migrations.AlterField(
model_name='historicalreimbursement',
name='congressperson_id',
field=models.IntegerField(blank=True, db_index=True, null=True, verbose_name='Identificador Único do Parlamentar'),
),
migrations.AlterField(
model_name='historicalreimbursement',
name='congressperson_name',
field=models.CharField(blank=True, db_index=True, max_length=140, null=True, verbose_name='Nome do Parlamentar'),
),
migrations.AlterField(
model_name='historicalreimbursement',
name='document_id',
field=models.IntegerField(db_index=True, verbose_name='Número do Reembolso'),
),
migrations.AlterField(
model_name='historicalreimbursement',
name='document_number',
field=models.CharField(blank=True, max_length=140, null=True, verbose_name='Número do Documento'),
),
migrations.AlterField(
model_name='historicalreimbursement',
name='document_type',
field=models.IntegerField(verbose_name='Indicativo de Tipo de Documento Fiscal'),
),
migrations.AlterField(
model_name='historicalreimbursement',
name='document_value',
field=models.DecimalField(decimal_places=3, max_digits=10, verbose_name='Valor do Documento'),
),
migrations.AlterField(
model_name='historicalreimbursement',
name='installment',
field=models.IntegerField(blank=True, null=True, verbose_name='Número da Parcela'),
),
migrations.AlterField(
model_name='historicalreimbursement',
name='issue_date',
field=models.DateField(verbose_name='Data de Emissão'),
),
migrations.AlterField(
model_name='historicalreimbursement',
name='last_update',
field=models.DateTimeField(blank=True, db_index=True, editable=False, verbose_name='Atualizado no Jarbas em'),
),
migrations.AlterField(
model_name='historicalreimbursement',
name='leg_of_the_trip',
field=models.CharField(blank=True, max_length=140, null=True, verbose_name='Trecho'),
),
migrations.AlterField(
model_name='historicalreimbursement',
name='month',
field=models.IntegerField(db_index=True, verbose_name='Mês'),
),
migrations.AlterField(
model_name='historicalreimbursement',
name='net_values',
field=models.CharField(max_length=140, verbose_name='Valores Líquidos dos Ressarcimentos'),
),
migrations.AlterField(
model_name='historicalreimbursement',
name='party',
field=models.CharField(blank=True, db_index=True, max_length=7, null=True, verbose_name='Partido'),
),
migrations.AlterField(
model_name='historicalreimbursement',
name='passenger',
field=models.CharField(blank=True, max_length=140, null=True, verbose_name='Passageiro'),
),
migrations.AlterField(
model_name='historicalreimbursement',
name='probability',
field=models.DecimalField(blank=True, decimal_places=5, max_digits=6, null=True, verbose_name='Probabilidade'),
),
migrations.AlterField(
model_name='historicalreimbursement',
name='receipt_fetched',
field=models.BooleanField(db_index=True, default=False, verbose_name='Tentamos aessar a URL do documento fiscal?'),
),
migrations.AlterField(
model_name='historicalreimbursement',
name='receipt_url',
field=models.CharField(blank=True, max_length=140, null=True, verbose_name='URL do Documento Fiscal'),
),
migrations.AlterField(
model_name='historicalreimbursement',
name='reimbursement_numbers',
field=models.CharField(max_length=140, verbose_name='Números dos Ressarcimentos'),
),
migrations.AlterField(
model_name='historicalreimbursement',
name='reimbursement_values',
field=models.CharField(blank=True, max_length=140, null=True, verbose_name='Valores dos Ressarcimentos'),
),
migrations.AlterField(
model_name='historicalreimbursement',
name='remark_value',
field=models.DecimalField(blank=True, decimal_places=3, max_digits=10, null=True, verbose_name='Valor da Glosa'),
),
migrations.AlterField(
model_name='historicalreimbursement',
name='state',
field=models.CharField(blank=True, db_index=True, max_length=2, null=True, verbose_name='UF'),
),
migrations.AlterField(
model_name='historicalreimbursement',
name='subquota_description',
field=models.CharField(max_length=140, verbose_name='Descrição da Subcota'),
),
migrations.AlterField(
model_name='historicalreimbursement',
name='subquota_group_description',
field=models.CharField(blank=True, max_length=140, null=True, verbose_name='Descrição da Especificação da Subcota'),
),
migrations.AlterField(
model_name='historicalreimbursement',
name='subquota_group_id',
field=models.IntegerField(blank=True, null=True, verbose_name='Número da Especificação da Subcota'),
),
migrations.AlterField(
model_name='historicalreimbursement',
name='subquota_id',
field=models.IntegerField(db_index=True, verbose_name='Número da Subcota'),
),
migrations.AlterField(
model_name='historicalreimbursement',
name='supplier',
field=models.CharField(max_length=140, verbose_name='Fornecedor'),
),
migrations.AlterField(
model_name='historicalreimbursement',
name='suspicions',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True, verbose_name='Suspeitas'),
),
migrations.AlterField(
model_name='historicalreimbursement',
name='term',
field=models.IntegerField(blank=True, null=True, verbose_name='Número da Legislatura'),
),
migrations.AlterField(
model_name='historicalreimbursement',
name='term_id',
field=models.IntegerField(blank=True, null=True, verbose_name='Código da Legislatura'),
),
migrations.AlterField(
model_name='historicalreimbursement',
name='total_net_value',
field=models.DecimalField(db_index=True, decimal_places=3, max_digits=10, verbose_name='Valor Líquido'),
),
migrations.AlterField(
model_name='historicalreimbursement',
name='total_reimbursement_value',
field=models.DecimalField(blank=True, decimal_places=3, max_digits=10, null=True, verbose_name='Valor da Restituição'),
),
migrations.AlterField(
model_name='historicalreimbursement',
name='year',
field=models.IntegerField(db_index=True, verbose_name='Ano'),
),
migrations.AlterField(
model_name='reimbursement',
name='applicant_id',
field=models.IntegerField(db_index=True, verbose_name='Identificador do Solicitante'),
),
migrations.AlterField(
model_name='reimbursement',
name='available_in_latest_dataset',
field=models.BooleanField(default=True, verbose_name='Disponível na Câmara dos Deputados'),
),
migrations.AlterField(
model_name='reimbursement',
name='batch_number',
field=models.IntegerField(verbose_name='Número do Lote'),
),
migrations.AlterField(
model_name='reimbursement',
name='cnpj_cpf',
field=models.CharField(blank=True, db_index=True, max_length=14, null=True, verbose_name='CNPJ ou CPF'),
),
migrations.AlterField(
model_name='reimbursement',
name='congressperson_document',
field=models.IntegerField(blank=True, null=True, verbose_name='Número da Carteira Parlamentar'),
),
migrations.AlterField(
model_name='reimbursement',
name='congressperson_id',
field=models.IntegerField(blank=True, db_index=True, null=True, verbose_name='Identificador Único do Parlamentar'),
),
migrations.AlterField(
model_name='reimbursement',
name='congressperson_name',
field=models.CharField(blank=True, db_index=True, max_length=140, null=True, verbose_name='Nome do Parlamentar'),
),
migrations.AlterField(
model_name='reimbursement',
name='document_id',
field=models.IntegerField(db_index=True, unique=True, verbose_name='Número do Reembolso'),
),
migrations.AlterField(
model_name='reimbursement',
name='document_number',
field=models.CharField(blank=True, max_length=140, null=True, verbose_name='Número do Documento'),
),
migrations.AlterField(
model_name='reimbursement',
name='document_type',
field=models.IntegerField(verbose_name='Indicativo de Tipo de Documento Fiscal'),
),
migrations.AlterField(
model_name='reimbursement',
name='document_value',
field=models.DecimalField(decimal_places=3, max_digits=10, verbose_name='Valor do Documento'),
),
migrations.AlterField(
model_name='reimbursement',
name='installment',
field=models.IntegerField(blank=True, null=True, verbose_name='Número da Parcela'),
),
migrations.AlterField(
model_name='reimbursement',
name='issue_date',
field=models.DateField(verbose_name='Data de Emissão'),
),
migrations.AlterField(
model_name='reimbursement',
name='last_update',
field=models.DateTimeField(auto_now=True, db_index=True, verbose_name='Atualizado no Jarbas em'),
),
migrations.AlterField(
model_name='reimbursement',
name='leg_of_the_trip',
field=models.CharField(blank=True, max_length=140, null=True, verbose_name='Trecho'),
),
migrations.AlterField(
model_name='reimbursement',
name='month',
field=models.IntegerField(db_index=True, verbose_name='Mês'),
),
migrations.AlterField(
model_name='reimbursement',
name='net_values',
field=models.CharField(max_length=140, verbose_name='Valores Líquidos dos Ressarcimentos'),
),
migrations.AlterField(
model_name='reimbursement',
name='party',
field=models.CharField(blank=True, db_index=True, max_length=7, null=True, verbose_name='Partido'),
),
migrations.AlterField(
model_name='reimbursement',
name='passenger',
field=models.CharField(blank=True, max_length=140, null=True, verbose_name='Passageiro'),
),
migrations.AlterField(
model_name='reimbursement',
name='probability',
field=models.DecimalField(blank=True, decimal_places=5, max_digits=6, null=True, verbose_name='Probabilidade'),
),
migrations.AlterField(
model_name='reimbursement',
name='receipt_fetched',
field=models.BooleanField(db_index=True, default=False, verbose_name='Tentamos aessar a URL do documento fiscal?'),
),
migrations.AlterField(
model_name='reimbursement',
name='receipt_url',
field=models.CharField(blank=True, max_length=140, null=True, verbose_name='URL do Documento Fiscal'),
),
migrations.AlterField(
model_name='reimbursement',
name='reimbursement_numbers',
field=models.CharField(max_length=140, verbose_name='Números dos Ressarcimentos'),
),
migrations.AlterField(
model_name='reimbursement',
name='reimbursement_values',
field=models.CharField(blank=True, max_length=140, null=True, verbose_name='Valores dos Ressarcimentos'),
),
migrations.AlterField(
model_name='reimbursement',
name='remark_value',
field=models.DecimalField(blank=True, decimal_places=3, max_digits=10, null=True, verbose_name='Valor da Glosa'),
),
migrations.AlterField(
model_name='reimbursement',
name='state',
field=models.CharField(blank=True, db_index=True, max_length=2, null=True, verbose_name='UF'),
),
migrations.AlterField(
model_name='reimbursement',
name='subquota_description',
field=models.CharField(max_length=140, verbose_name='Descrição da Subcota'),
),
migrations.AlterField(
model_name='reimbursement',
name='subquota_group_description',
field=models.CharField(blank=True, max_length=140, null=True, verbose_name='Descrição da Especificação da Subcota'),
),
migrations.AlterField(
model_name='reimbursement',
name='subquota_group_id',
field=models.IntegerField(blank=True, null=True, verbose_name='Número da Especificação da Subcota'),
),
migrations.AlterField(
model_name='reimbursement',
name='subquota_id',
field=models.IntegerField(db_index=True, verbose_name='Número da Subcota'),
),
migrations.AlterField(
model_name='reimbursement',
name='supplier',
field=models.CharField(max_length=140, verbose_name='Fornecedor'),
),
migrations.AlterField(
model_name='reimbursement',
name='suspicions',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True, verbose_name='Suspeitas'),
),
migrations.AlterField(
model_name='reimbursement',
name='term',
field=models.IntegerField(blank=True, null=True, verbose_name='Número da Legislatura'),
),
migrations.AlterField(
model_name='reimbursement',
name='term_id',
field=models.IntegerField(blank=True, null=True, verbose_name='Código da Legislatura'),
),
migrations.AlterField(
model_name='reimbursement',
name='total_net_value',
field=models.DecimalField(db_index=True, decimal_places=3, max_digits=10, verbose_name='Valor Líquido'),
),
migrations.AlterField(
model_name='reimbursement',
name='total_reimbursement_value',
field=models.DecimalField(blank=True, decimal_places=3, max_digits=10, null=True, verbose_name='Valor da Restituição'),
),
migrations.AlterField(
model_name='reimbursement',
name='year',
field=models.IntegerField(db_index=True, verbose_name='Ano'),
),
]
|
{
"content_hash": "dd9cbfa2e763c8c097d18b36ee8a0e86",
"timestamp": "",
"source": "github",
"line_count": 392,
"max_line_length": 140,
"avg_line_length": 44.566326530612244,
"alnum_prop": 0.6045792787635947,
"repo_name": "datasciencebr/serenata-de-amor",
"id": "74c8790f187db5467145776d77dca27e0cb5147b",
"size": "17597",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "jarbas/core/migrations/0027_translate_verbose_names.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "31926816"
},
{
"name": "Jupyter Notebook",
"bytes": "31547878"
},
{
"name": "Python",
"bytes": "424995"
}
],
"symlink_target": ""
}
|
"""HTML sanitizing service."""
import logging
import urlparse
import bleach
from core.domain import rte_component_registry
def filter_a(name, value):
"""Returns whether the described attribute of an anchor ('a') tag should be
whitelisted.
Args:
name: str. The name of the attribute.
value: str. The value of the attribute.
Returns:
bool. Whether the given attribute should be whitelisted.
"""
if name in ('title', 'target'):
return True
if name == 'href':
url_components = urlparse.urlsplit(value)
if url_components[0] in ['http', 'https']:
return True
logging.error('Found invalid URL href: %s' % value)
return False
ATTRS_WHITELIST = {
'a': filter_a,
'b': [],
'blockquote': [],
'br': [],
'code': [],
'div': [],
'em': [],
'hr': [],
'i': [],
'li': [],
'ol': [],
'p': [],
'pre': [],
'span': [],
'strong': [],
'table': ['border'],
'tbody': [],
'td': [],
'tr': [],
'u': [],
'ul': [],
}
def clean(user_submitted_html):
"""Cleans a piece of user submitted HTML.
This only allows HTML from a restricted set of tags, attrs and styles.
Args:
user_submitted_html: str. An untrusted HTML string.
Returns:
str. The HTML string that results after stripping out unrecognized tags
and attributes.
"""
oppia_custom_tags = (
rte_component_registry.Registry.get_tag_list_with_attrs())
core_tags = ATTRS_WHITELIST.copy()
core_tags.update(oppia_custom_tags)
tag_names = core_tags.keys()
# TODO(sll): Alert the caller if the input was changed due to this call.
# TODO(sll): Add a log message if bad HTML is detected.
return bleach.clean(
user_submitted_html, tags=tag_names, attributes=core_tags, strip=True)
def strip_html_tags(html):
"""Strips all HTML markup from an HTML string.
Args:
html: str. An HTML string.
Returns:
str. The HTML string that results after all the tags and attributes are
stripped out.
"""
return bleach.clean(html, tags=[], attributes={}, strip=True)
|
{
"content_hash": "4cdbf3d6646b8a87c996f976c0e5da5c",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 79,
"avg_line_length": 23.591397849462364,
"alnum_prop": 0.5870556061987238,
"repo_name": "amgowano/oppia",
"id": "42f8275d2db3fc5cb9abb32852172e8653f64348",
"size": "2817",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "core/domain/html_cleaner.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "90367"
},
{
"name": "HTML",
"bytes": "762503"
},
{
"name": "JavaScript",
"bytes": "2389490"
},
{
"name": "Python",
"bytes": "2710544"
},
{
"name": "Shell",
"bytes": "44950"
}
],
"symlink_target": ""
}
|
import os
import argparse
import django
from datetime import datetime
import arrow
from django.contrib.auth import get_user_model
from django.db.models import Count
from django.utils import timezone
def get_number_of_deliveries(from_datetime, to_datetime):
"""
Get the number of deliveries made.
Simply counts the number of comments posted by students with files on
all FeedbackSets with deadlines within the from and to datetime arguments.
"""
from devilry.devilry_group.models import GroupComment, FeedbackSet
#: Get all `FeedbackSets` with deadlines within the from and to datetime range.
feedbackset_queryset = FeedbackSet.objects\
.filter(deadline_datetime__gte=from_datetime,
deadline_datetime__lte=to_datetime)
#: UNCOMMENT THIS IF YOU WANT TO:
#:
#: Filter only the last FeedbackSet for the AssignmentGroup.
# feedbackset_queryset = feedbackset_queryset \
# .filter(group__cached_data__last_feedbackset_id=models.F('id'))
# Get all comments for all `FeedbackSet`s with deadline within the
# from and to datetime posted by a student.
group_comment_queryset = GroupComment.objects\
.filter(user_role=GroupComment.USER_ROLE_STUDENT)\
.filter(feedback_set_id__in=feedbackset_queryset.values_list('id', flat=True))
#: UNCOMMENT THIS IF YOU WANT TO:
#:
#: Filter only comments posted before the deadline expired on the
#: feedbackset the comment belongs to.
# group_comment_queryset = group_comment_queryset\
# .filter(published_datetime__gte=models.F('feedback_set__deadline_datetime'))
#: Annotate with file count on each comment (a delivery).
group_comment_queryset = group_comment_queryset.annotate(file_num=Count('commentfile'))
return group_comment_queryset.filter(file_num__gt=0).count()
def get_unique_logins(from_datetime):
"""
Get the number of unique logins since a specified datetime.
"""
unique_logins = get_user_model().objects\
.filter(last_login__gte=from_datetime)
return unique_logins.count()
def populate_arguments_and_get_parser():
parser = argparse.ArgumentParser(description='Set up department permission groups for missing subjects.')
parser.add_argument(
'--from-date',
dest='from_date',
default='1900-01-01',
help='A %%Y-%%m-%%d formatted from-date. Defaults to 1900-01-01.')
parser.add_argument(
'--to-date',
dest='to_date',
default='5999-12-31',
help='A %%Y-%%m-%%d formatted to-date. Defaults to 5999-12-31.')
return parser
if __name__ == "__main__":
# For development:
os.environ.setdefault('DJANGOENV', 'develop')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "devilry.project.settingsproxy")
django.setup()
# For production: Specify python path to your settings file here
# os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'devilry_settings')
# django.setup()
parser = populate_arguments_and_get_parser()
args = parser.parse_args()
arguments_dict = vars(args)
from_datetime = timezone.make_aware(datetime.strptime(arguments_dict['from_date'], '%Y-%m-%d')).replace(
hour=0, minute=0, second=0)
to_datetime = timezone.make_aware(datetime.strptime(arguments_dict['to_date'], '%Y-%m-%d')).replace(
hour=23, minute=59, second=59)
# Get unique logins
unique_login_count = get_unique_logins(from_datetime=from_datetime)
print('Unique logins since {}: {}'.format(
arrow.get(from_datetime).format('MMM D. YYYY HH:mm:ss'),
unique_login_count))
# Get number of deliveries
delivery_count = get_number_of_deliveries(from_datetime, to_datetime)
print('Deliveries made between {} and {}: {}'.format(
arrow.get(from_datetime).format('MMM D. YYYY HH:mm:ss'),
arrow.get(to_datetime).format('MMM D. YYYY HH:mm:ss'),
delivery_count
))
|
{
"content_hash": "95e7ba043e3506b95f42f39c8721fc7d",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 109,
"avg_line_length": 37.339622641509436,
"alnum_prop": 0.67988883274381,
"repo_name": "devilry/devilry-django",
"id": "bfc9ef3f91dcbef3f2127aea4d7573925368cb8c",
"size": "3980",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "not_for_deploy/sysadmin_example_scripts/usage_statistics.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "513510"
},
{
"name": "Dockerfile",
"bytes": "211"
},
{
"name": "HTML",
"bytes": "421969"
},
{
"name": "JavaScript",
"bytes": "756713"
},
{
"name": "Less",
"bytes": "166670"
},
{
"name": "PLpgSQL",
"bytes": "397986"
},
{
"name": "Python",
"bytes": "6507968"
},
{
"name": "Shell",
"bytes": "10328"
}
],
"symlink_target": ""
}
|
hiddenimports = ['k_exceptions', 'services', 'typeconv_naked',
'typeconv_backcompat', 'typeconv_23plus',
'typeconv_datetime_stdlib', 'typeconv_datetime_mx',
'typeconv_datetime_naked', 'typeconv_fixed_fixedpoint',
'typeconv_fixed_stdlib', 'typeconv_text_unicode',
'typeconv_util_isinstance', '_kinterbasdb', '_kiservices']
|
{
"content_hash": "7a3cd9edce37f4c371c300a00206254e",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 75,
"avg_line_length": 68,
"alnum_prop": 0.5931372549019608,
"repo_name": "deandunbar/bitwave",
"id": "bad5e65b3cf4297144b3a6fed7cbf7d4219ce76c",
"size": "827",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "hackathon_version/venv/lib/python2.7/site-packages/PyInstaller/hooks/hook-kinterbasdb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "6018"
},
{
"name": "CSS",
"bytes": "53871"
},
{
"name": "HTML",
"bytes": "1159945"
},
{
"name": "JavaScript",
"bytes": "531995"
},
{
"name": "Makefile",
"bytes": "120304"
},
{
"name": "Python",
"bytes": "9014662"
},
{
"name": "Shell",
"bytes": "3833"
},
{
"name": "TeX",
"bytes": "164573"
}
],
"symlink_target": ""
}
|
"""Script that initializes the database."""
import datetime
import os
import sys
import transaction
from pyramid.paster import (
get_appsettings,
setup_logging,
)
from pyramid.scripts.common import parse_vars
from ..models import (
get_engine,
get_session_factory,
get_tm_session,
BlogPost
)
from ..models.meta import Base
def usage(argv):
"""Error print function."""
cmd = os.path.basename(argv[0])
print('usage: %s <config_uri> [var=value]\n'
'(example: "%s development.ini")' % (cmd, cmd))
sys.exit(1)
def main(argv=sys.argv):
"""Main db creation function."""
if len(argv) < 2:
usage(argv)
config_uri = argv[1]
options = parse_vars(argv[2:])
setup_logging(config_uri)
settings = get_appsettings(config_uri, options=options)
sql_url = os.environ.get('DATABASE_URL', 'sqlite:///BlogPostDB.sqlite')
settings["sqlalchemy.url"] = sql_url
engine = get_engine(settings)
Base.metadata.create_all(engine)
session_factory = get_session_factory(engine)
with transaction.manager:
dbsession = get_tm_session(session_factory, transaction.manager)
model = BlogPost(
title='Test Title',
body='Test Body',
html='<p>Test Body</p>',
date=datetime.date.today()
)
dbsession.add(model)
|
{
"content_hash": "2870f50e8271eba9346edb7a2bc2256e",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 75,
"avg_line_length": 24.78181818181818,
"alnum_prop": 0.632428466617755,
"repo_name": "amosboldor/Portfolio-Site",
"id": "d79bb1fdf52c4903e05dd9f70ca9afbe18f0107b",
"size": "1363",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "portfolio/scripts/initializedb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "25063"
},
{
"name": "Python",
"bytes": "24163"
},
{
"name": "Shell",
"bytes": "91"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from .models import ThreadedComments
@admin.register(ThreadedComments)
class ThreadedCommentsAdmin(admin.ModelAdmin):
pass
# def save_model(self, request, obj, form, change):
# super().save_model(request, obj, form, change)
|
{
"content_hash": "23eb148574ef716c2d9030a8a66ef8bf",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 59,
"avg_line_length": 27.9,
"alnum_prop": 0.7311827956989247,
"repo_name": "hakancelik96/coogger",
"id": "1dae2977c34e3f5121ede42164e43bc73b32095a",
"size": "279",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/threaded_comment/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4942"
},
{
"name": "HTML",
"bytes": "102923"
},
{
"name": "JavaScript",
"bytes": "9135"
},
{
"name": "Python",
"bytes": "92335"
}
],
"symlink_target": ""
}
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# pip install CppHeaderParser ply
#
#
# how to use api_reference.py
#
# 1. run `./tools/api_reference.py`
#
# 2. check documents
# - documentation/PluginXX/api-reference-cpp.md
# - documentation/PluginXX/api-reference-lua.md
# - documentation/PluginXX/api-reference-js.md
#
# how to config
# 1. manual binding function
# - edit `plugins/pluginx/lua/config/binding.ini`
# - add section `manual_binding_function = PluginXX::[setListener,otherFunction]`
#
# 2. rename callback function name (optional)
# - edit `plugins/pluginx/lua/config/binding.ini`
# - add section `rename_script_callback = PluginXX::[onCallbackLongFunction=onCallbackFunction]`
# before `[headers]`
#
import ConfigParser
import os
import sys
import CppHeaderParser
from argparse import ArgumentParser
from sets import Set
CURR_DIR = os.path.split(os.path.realpath(__file__))[0]
OUT_DIR = os.path.realpath(CURR_DIR + "./..")
PLUGIN_PREFIX = "Plugin"
def get_doxygen(comment):
if '\r\n' in comment:
cs = comment.split('\r\n')
else:
cs = comment.split('\n')
ret = ""
has_more_comment = False
for line in cs:
if line.startswith("/*") or line.endswith("*/") or line.startswith("* @param"):
continue
elif line == "*": # brief end with empty line
if not has_more_comment:
ret += '\n<pre>\n'
has_more_comment = True
else:
if line.startswith("* "):
line = line[2:]
ret += line + '\n'
if has_more_comment:
ret += '</pre>\n'
ret = ret.replace("@brief ", "").replace('*', '>').replace('\n<pre>\n</pre>\n', '')
return ret
def format_function(function_string):
"""
static void some_function(int param1, int param2);
=>
static void some_function(int param1,
int param2)
"""
ret = function_string
if len(ret) <= 80:
return ret
ret = ret.split(',')
index = ret[0].rfind('(')
for x in xrange(1, len(ret)):
ret[x] = " %s%s" % (index * " ", ret[x])
ret = ',\n'.join(ret)
return ret
class APIRefGenerator(object):
"""APIRefGenerator("/path/to/PluginTune.h", "Tune")"""
def __init__(self, header, plugin_name, ini_config):
self.header = header
self.pluginName = plugin_name
section = plugin_name.lower()
self.target_namespace = ini_config.get(section, "target_namespace")
self.className = ini_config.get(section, "classes")
self.skipFunction = []
if "skip" in ini_config.options(section) and ini_config.get(section, "skip") != '':
self.skipFunction = ini_config.get(section, "skip").split("::")[1].replace("[", "").replace("]", "").replace("^", "").replace("$", "").split(" ")
self.manual_binding_function = []
if "manual_binding_function" in ini_config.options(section) and ini_config.get(section, "manual_binding_function") != '':
self.manual_binding_function = ini_config.get(section, "manual_binding_function").split("::")[1].replace("[", "").replace("]", "").split(",")
self.skipFunction = Set(self.skipFunction) - (Set(self.skipFunction) & Set(self.manual_binding_function))
print plugin_name, self.skipFunction
self.renameCallBackName = {}
if "rename_script_callback" in ini_config.options(section):
callback_names = ini_config.get(section, "rename_script_callback").split("::")[1].replace("[", "").replace("]", "").split(",")
for name in callback_names:
v = name.split("=")
self.renameCallBackName[v[0]] = v[1]
def run(self):
try:
cppHeader = CppHeaderParser.CppHeader(self.header)
except CppHeaderParser.CppParseError as e:
print(e)
sys.exit(1)
#
# Generate plugin api reference
#
cpp_ref = lua_fef = js_ref = cpp_l_ref = lua_l_ref = js_l_ref = ""
classes = cppHeader.classes
plugin_name = self.className
if plugin_name not in classes:
return False, cpp_ref, cpp_l_ref, lua_fef, lua_l_ref, js_ref, js_l_ref
plugin_class = classes[plugin_name]
methods = plugin_class["methods"]["public"]
for m in methods:
if m["debug"][0] == "~":
continue
cpp_ref += "```cpp\n%s\n```\n" % format_function(m["debug"])
doxygen = ""
if "doxygen" in m.keys():
doxygen = "> " + get_doxygen(m["doxygen"])
if len(doxygen):
cpp_ref += doxygen
cpp_ref += '\n'
function_name = m["name"]
if function_name in self.skipFunction:
continue
pa = "("
i = 0
for p in m["parameters"]:
if i > 0:
pa += ", "
pa += p["name"]
i += 1
pa += ")"
f = "%s.%s." % (self.target_namespace, plugin_name) + function_name + pa
js_ref += "```javascript\n%s;\n```\n" % (format_function(f))
if len(doxygen):
js_ref += doxygen
js_ref += '\n'
f = "%s.%s:" % (self.target_namespace, plugin_name) + function_name + pa
lua_fef += "```lua\n%s\n```\n" % (format_function(f))
if len(doxygen):
lua_fef += doxygen
lua_fef += '\n'
#
# Generate listener api reference
#
listener_class = None
if self.pluginName + "Listener" in classes.keys():
listener_class = classes[self.pluginName + "Listener"]
elif self.pluginName.upper() + "Listener" in classes.keys():
listener_class = classes[self.pluginName.upper() + "Listener"]
if listener_class:
methods = listener_class["methods"]["public"]
for m in methods:
if m["debug"][0] == "~":
continue
cpp_l_ref += "```cpp\n%s\n```\n" % format_function(m["debug"].replace("virtual ", "").replace(" = 0 ", "").replace("{", ""))
doxygen = ""
if "doxygen" in m.keys():
doxygen = "> " + get_doxygen(m["doxygen"])
if len(doxygen):
cpp_l_ref += doxygen
cpp_l_ref += '\n'
function_name = m["name"]
if function_name in self.renameCallBackName.keys():
function_name = self.renameCallBackName[function_name]
pa = "("
i = 0
for p in m["parameters"]:
if i > 0:
pa += ", "
pa += p["name"]
i += 1
pa += ")"
f = function_name + pa
js_l_ref += "```javascript\n%s;\n```\n" % (format_function(f))
if len(doxygen):
js_l_ref += doxygen
js_l_ref += '\n'
f = function_name + pa
lua_l_ref += "```lua\n%s\n```\n" % (format_function(f))
if len(doxygen):
lua_l_ref += doxygen
lua_l_ref += '\n'
return True, cpp_ref, cpp_l_ref, lua_fef, lua_l_ref, js_ref, js_l_ref
class Walker(object):
"""docstring for Walker"""
def __init__(self, arg):
super(Walker, self).__init__()
self.plugin = arg.plugin
self.out_dir = arg.out_dir
self.src_dir = arg.src
def run(self):
join = os.path.join
ret = ""
document_output_dir = self.out_dir or OUT_DIR
found_special_plugin = not self.plugin
for d in os.listdir(self.src_dir):
if d in ["core", "shared"] or not os.path.isdir(join(self.src_dir, d)):
continue
if self.plugin and d != self.plugin:
continue
if self.plugin and d == self.plugin:
found_special_plugin = True
name = ""
header_file_path = ""
header_dir = join(self.src_dir, d, "share")
if not os.path.exists(header_dir):
continue
for f in os.listdir(header_dir):
if f.startswith(PLUGIN_PREFIX) and f.endswith(".h"):
header_file_path = join(header_dir, f)
name = f.replace(PLUGIN_PREFIX, "").replace(".h", "")
break
ini_file_path = join(self.src_dir, d, "lua/config", "binding.ini")
config = ConfigParser.ConfigParser()
config.read(ini_file_path)
ok, cpp, cppl, lua, lual, js, jsl = APIRefGenerator(header_file_path, name, config).run()
if ok:
ret += "%s ok\n" % name
if not os.path.exists(join(document_output_dir, d)):
os.makedirs(join(document_output_dir, d))
template = "## API Reference\n\n### Methods\n%s\n### Listeners\n%s\n"
data = template % (cpp, cppl)
doc_path = join(document_output_dir, 'src', d, 'v3-cpp')
if not os.path.exists(doc_path):
os.makedirs(doc_path)
open(join(doc_path, 'api-reference.md'), 'w').write(data)
data = template % (lua, lual)
doc_path = join(document_output_dir, 'src', d, 'v3-lua')
if not os.path.exists(doc_path):
os.makedirs(doc_path)
open(join(doc_path, 'api-reference.md'), 'w').write(data)
data = template % (js, jsl)
doc_path = join(document_output_dir, 'src', d, 'v3-js')
if not os.path.exists(doc_path):
os.makedirs(doc_path)
open(join(doc_path, 'api-reference.md'), 'w').write(data)
else:
ret += "%s missing\n" % name
if not found_special_plugin:
ret = "\nNot found %s plugin\n" % self.plugin
print ret
if __name__ == '__main__':
parser = ArgumentParser(description="Generate API Reference for SDKBox. example usage: tools/api_reference.py -s ~/Projects/store/csc/plugins -o . -g sdkboxplay")
parser.add_argument('-s', dest='src',
help='source code directory')
parser.add_argument('-g', dest='plugin',
help='Plugin name.')
parser.add_argument('-o', '--output', dest='out_dir',
help='Where to save Api Reference documentation. Default to src folder.')
(args, unknown) = parser.parse_known_args()
if len(unknown) > 0:
print("unknown arguments: %s" % unknown)
parser.print_help()
sys.exit(1)
if args.src is None:
print 'Please specify source code directory with -s flag'
parser.print_help()
sys.exit(1)
Walker(args).run()
|
{
"content_hash": "4c31f2da6a6aad4ed378a6ebfc4b0aa6",
"timestamp": "",
"source": "github",
"line_count": 328,
"max_line_length": 166,
"avg_line_length": 33.67987804878049,
"alnum_prop": 0.512537340454422,
"repo_name": "yinjimmy/en",
"id": "3a14596aea39a4c5c9be87e599ea0f36b315a5d6",
"size": "11047",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/api_reference.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "29112"
},
{
"name": "HTML",
"bytes": "9252"
},
{
"name": "JavaScript",
"bytes": "1022"
},
{
"name": "Python",
"bytes": "17152"
},
{
"name": "Shell",
"bytes": "828"
}
],
"symlink_target": ""
}
|
"""Functions that prepare GAE user code for running in a GCE VM."""
import functools
import json
import logging
import math
import os
import re
import typing
import wsgiref.util
from google.appengine.runtime import thread_hooks
import six
def GetTraceAndSpanId():
"""Gets the Trace and Span ID for the given request via environment variables.
Checks and parses the envirionment variable "HTTP_X_CLOUD_TRACE_CONTEXT",
which is a header provided by AppEngine (and is set as an environment
variable as a part of the WSGI Specification).
Returns:
A tuple of Trace ID and the Span ID
"""
ctx = os.getenv('HTTP_X_CLOUD_TRACE_CONTEXT')
if ctx:
m = re.search(r'^(\w+)/(\d+)(?:;o=[01])?$', ctx)
if not m:
return (None, None)
trace_id = m.group(1)
span_id = m.group(2)
project_id = os.getenv('GOOGLE_CLOUD_PROJECT')
return ('projects/{}/traces/{}'.format(project_id, trace_id), span_id)
return (None, None)
def GetRequestUrl():
"""Constructs the full request url from WSGI environment variables."""
try:
environ = typing.cast(typing.Dict[str, typing.Any], os.environ)
return wsgiref.util.request_uri(environ)
except KeyError:
return None
class JsonFormatter(logging.Formatter):
"""Class for logging to the cloud logging api with json metadata."""
def format(self, record):
"""Format the record as json the cloud logging agent understands.
Args:
record: A logging.LogRecord to format.
Returns:
A json string to log.
"""
float_frac_sec, float_sec = math.modf(record.created)
message = record.getMessage()
if record.exc_info:
message = '%s\n%s' % (message, self.formatException(record.exc_info))
data = {
'message': message,
'thread': record.thread,
'severity': record.levelname,
'timestamp': {
'seconds': int(float_sec),
'nanos': int(float_frac_sec * 1000000000)
},
'logging.googleapis.com/sourceLocation': {
'file': record.pathname,
'line': str(record.lineno),
'function': record.funcName,
},
'serviceContext': {
'version': os.getenv('GAE_VERSION'),
'service': os.getenv('GAE_SERVICE'),
},
'context': {
'user': os.getenv('USER_NICKNAME'),
'httpRequest': {
'url': GetRequestUrl(),
'userAgent': os.getenv('HTTP_USER_AGENT'),
'requestMethod': os.getenv('REQUEST_METHOD'),
'protocol': os.getenv('SERVER_PROTOCOL')
}
},
}
trace_id, span_id = GetTraceAndSpanId()
if trace_id:
data['logging.googleapis.com/trace'] = trace_id
data['logging.googleapis.com/spanId'] = span_id
return json.dumps(data)
class SplitLogHandler(logging.StreamHandler):
"""Class for splitting large logs into chunks."""
def emit(self, record):
"""Emit a record.
If the message is larger than the max size of a log entry of 256KB
(https://cloud.google.com/logging/quotas#log-limits), it's split in
chunks to prevent it from being lost
Args:
record: an instance of logging.LogRecord
"""
message = str(record.msg)
max_message_size = 256000
if len(message) <= max_message_size or six.PY2:
super(SplitLogHandler, self).emit(record)
else:
chunks = [
message[i:i + max_message_size]
for i in range(0, len(message), max_message_size)
]
for idx, chunk in enumerate(chunks):
record.msg = 'Part {}/{}: {}'.format(str(idx + 1), len(chunks), chunk)
super().emit(record)
def InitializeLogging(custom_json_formatter=None):
"""Helper called from CreateAndRunService() to set up syslog logging."""
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
not_clear_logging_handlers = os.environ.get(
'TITANOBOA_CLEAR_LOGGING_HANDLERS', '1') == '0'
if not not_clear_logging_handlers:
if len(logger.handlers) > 1:
logger.warning(
'Removing more than one logging handler. '
'This implies that a user-added logging handler is being removed!')
logger.handlers.clear()
logging_handler = SplitLogHandler()
json_formatter = custom_json_formatter or JsonFormatter()
logging_handler.setFormatter(json_formatter)
logger.addHandler(logging_handler)
@functools.lru_cache(maxsize=None)
def InitializeThreadingApis():
"""Helper to monkey-patch various threading APIs."""
thread_hooks.PatchStartNewThread(
hooks=[thread_hooks.RequestEnvironmentThreadHook])
|
{
"content_hash": "ab288ec6b3a208e65930760be6ccaf04",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 80,
"avg_line_length": 27.84431137724551,
"alnum_prop": 0.6412903225806451,
"repo_name": "GoogleCloudPlatform/appengine-python-standard",
"id": "2306d2e82554286c50c61e0f3d8eab4cf7930a34",
"size": "5250",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/google/appengine/runtime/initialize.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3778254"
}
],
"symlink_target": ""
}
|
import synapse.exc as s_exc
import synapse.lib.stormlib.json as s_json
import synapse.tests.utils as s_test
class JsonTest(s_test.SynTest):
async def test_stormlib_json(self):
async with self.getTestCore() as core:
self.eq(((1, 2, 3)), await core.callStorm('return($lib.json.load("[1, 2, 3]"))'))
self.eq(('["foo", "bar", "baz"]'), await core.callStorm('return($lib.json.save((foo, bar, baz)))'))
with self.raises(s_exc.BadJsonText):
await core.callStorm('return($lib.json.load(foo))')
with self.raises(s_exc.MustBeJsonSafe):
await core.callStorm('return($lib.json.save($lib.print))')
# jsonschema tests
self.true(s_json.compileJsSchema(s_test.test_schema))
resp = s_json.runJsSchema(s_test.test_schema, {'key:integer': 137})
self.eq(137, resp.get('key:integer'))
self.eq('Default string!', resp.get('key:string'))
opts = {'vars': {'schema': s_test.test_schema}}
q = '''$schemaObj = $lib.json.schema($schema)
$item=$lib.dict()
$item."key:integer"=(4)
return ( $schemaObj.validate($item) )
'''
isok, valu = await core.callStorm(q, opts=opts)
self.true(isok)
self.eq(4, valu.get('key:integer'))
self.eq('Default string!', valu.get('key:string'))
q = '''$schemaObj = $lib.json.schema($schema)
$item=$lib.dict()
$item."key:integer"=4
return ( $schemaObj.validate($item) )
'''
isok, valu = await core.callStorm(q, opts=opts)
self.false(isok)
self.eq('data.key:integer must be integer', valu.get('mesg'))
with self.raises(s_exc.StormRuntimeError):
q = '$schemaObj=$lib.json.schema((foo, bar))'
await core.callStorm(q)
q = '''
$schemaObj = $lib.json.schema($schema, use_default=$lib.false)
$item = ({"key:integer": 4})
return($schemaObj.validate($item))
'''
isok, valu = await core.callStorm(q, opts={'vars': {'schema': s_test.test_schema}})
self.true(isok)
self.eq(4, valu.get('key:integer'))
self.notin('key:string', valu)
# Print a json schema obj
q = "$schemaObj = $lib.json.schema($schema) $lib.print('schema={s}', s=$schemaObj)"
msgs = await core.stormlist(q, opts=opts)
self.stormIsInPrint('storm:json:schema: {', msgs)
q = "$schemaObj = $lib.json.schema($schema) return ( $schemaObj.schema() )"
schema = await core.callStorm(q, opts=opts)
self.eq(schema, s_test.test_schema)
|
{
"content_hash": "07a35a1ee10102544e75fc283b79c9fc",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 111,
"avg_line_length": 40.507246376811594,
"alnum_prop": 0.5413237924865831,
"repo_name": "vertexproject/synapse",
"id": "78a48afbf2160151b332084a44215f993fb195a0",
"size": "2795",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "synapse/tests/test_lib_stormlib_json.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "4010"
},
{
"name": "HTML",
"bytes": "3"
},
{
"name": "Python",
"bytes": "5894053"
},
{
"name": "Shell",
"bytes": "10776"
}
],
"symlink_target": ""
}
|
import unittest
import numpy as np
from numpy.testing import assert_array_equal
from graphs.base.pairs import EdgePairGraph, SymmEdgePairGraph
PAIRS = np.array([[0,1],[0,2],[1,1],[2,1],[3,3]])
ADJ = [[0,1,1,0],
[0,1,0,0],
[0,1,0,0],
[0,0,0,1]]
class TestEdgePairGraph(unittest.TestCase):
def setUp(self):
self.epg = EdgePairGraph(PAIRS)
def test_pairs(self):
self.assert_(self.epg.pairs(copy=False) is PAIRS)
P = self.epg.pairs(copy=True)
self.assert_(P is not PAIRS)
assert_array_equal(P, PAIRS)
# test the directed case
P = self.epg.pairs(directed=False)
assert_array_equal(P, [[0,1],[0,2],[1,1],[1,2],[3,3]])
def test_matrix(self):
M = self.epg.matrix()
assert_array_equal(M.toarray(), ADJ)
M = self.epg.matrix('dense')
assert_array_equal(M, ADJ)
M = self.epg.matrix('csr')
self.assertEqual(M.format, 'csr')
assert_array_equal(M.toarray(), ADJ)
def test_self_edges(self):
self.epg.add_self_edges()
expected = self.epg.pairs()
# Ensure that calling it again does the right thing.
self.epg.add_self_edges()
assert_array_equal(self.epg.pairs(), expected)
def test_symmetrize(self):
# Check that copy=True doesn't change anything
self.epg.symmetrize(copy=True)
assert_array_equal(self.epg.matrix('dense'), ADJ)
class TestSymmEdgePairGraph(unittest.TestCase):
def setUp(self):
self.G = SymmEdgePairGraph(PAIRS)
def test_copy(self):
gg = self.G.copy()
self.assertIsNot(gg, self.G)
assert_array_equal(gg.matrix('dense'), self.G.matrix('dense'))
assert_array_equal(gg.pairs(), self.G.pairs())
def test_pairs(self):
expected = [[0,1], [0,2], [1,0], [1,1], [1,2], [2,0], [2,1], [3,3]]
P = self.G.pairs()
assert_array_equal(sorted(P.tolist()), expected)
# test the directed case
P = self.G.pairs(directed=False)
assert_array_equal(P, [[0,1],[0,2],[1,1],[1,2],[3,3]])
def test_symmetrize(self):
self.assertIs(self.G.symmetrize(copy=False), self.G)
S = self.G.symmetrize(copy=True)
self.assertIsNot(S, self.G)
assert_array_equal(S.matrix('dense'), self.G.matrix('dense'))
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "a9e34b1e6f17165ad79db7797b91c4c5",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 71,
"avg_line_length": 29.89189189189189,
"alnum_prop": 0.6360759493670886,
"repo_name": "all-umass/graphs",
"id": "7a3348f76508425a2513d2f63adfb0dc813e8c90",
"size": "2212",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "graphs/base/tests/test_pairs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "217808"
},
{
"name": "Shell",
"bytes": "153"
}
],
"symlink_target": ""
}
|
"""
Contains the protobufs for RTS communication.
"""
|
{
"content_hash": "a404b8e53a438a7b682e26dbc482d96a",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 45,
"avg_line_length": 18,
"alnum_prop": 0.7222222222222222,
"repo_name": "SCAII/SCAII",
"id": "7de4b66ca72a5228dbeb7e06053e09376b1f3a93",
"size": "54",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backends/sky-rts/glue/python/sky_rts/protos/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "9134"
},
{
"name": "HTML",
"bytes": "10345"
},
{
"name": "JavaScript",
"bytes": "456557"
},
{
"name": "Lua",
"bytes": "22488"
},
{
"name": "PowerShell",
"bytes": "1230"
},
{
"name": "Python",
"bytes": "224320"
},
{
"name": "Rust",
"bytes": "384423"
},
{
"name": "Shell",
"bytes": "940"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, division
import numpy as np
from ciabatta.meta import make_repr_str
from ahoy import measurers
from ahoy.dc_dx_measurers import TemporalDcDxMeasurer, dc_dx_factory
class NoiseMeasurer(measurers.Measurer):
def __init__(self, noise_0, *args, **kwargs):
self.noise_0 = noise_0
def get_noise(self):
return self.noise_0
def __repr__(self):
fs = [('noise_0', self.noise_0)]
return make_repr_str(self, fs)
class ChemoNoiseMeasurer(NoiseMeasurer):
def __init__(self, noise_0, chi, dc_dx_measurer):
NoiseMeasurer.__init__(self, noise_0)
self.chi = chi
self.dc_dx_measurer = dc_dx_measurer
@property
def is_temporal(self):
return isinstance(self.dc_dx_measurer, TemporalDcDxMeasurer)
def get_noise(self):
dc_dxs = self.dc_dx_measurer.get_dc_dxs()
return self.noise_0 * (1.0 - self.chi * dc_dxs)
def __repr__(self):
fs = [('noise_0', self.noise_0), ('chi', self.chi),
('dc_dx_measurer', self.dc_dx_measurer)]
return make_repr_str(self, fs)
class OneSidedChemoNoiseMeasurer(ChemoNoiseMeasurer):
def get_noise(self):
noise_two_sided = super(OneSidedChemoNoiseMeasurer, self).get_noise()
return np.minimum(self.noise_0, noise_two_sided)
def chemo_noise_measurer_factory(onesided_flag, *args, **kwargs):
if onesided_flag:
return OneSidedChemoNoiseMeasurer(*args, **kwargs)
else:
return ChemoNoiseMeasurer(*args, **kwargs)
def noise_measurer_factory(chemo_flag,
noise_0,
onesided_flag, chi,
temporal_chemo_flag,
ds,
ps, v_0, dt_mem, t_mem, t_rot_0, time,
c_field_flag, c_field):
if chemo_flag:
dc_dx_measurer = dc_dx_factory(temporal_chemo_flag,
ds,
ps, v_0, dt_mem, t_mem, t_rot_0, time,
c_field_flag, c_field)
return chemo_noise_measurer_factory(onesided_flag, noise_0, chi,
dc_dx_measurer)
else:
return NoiseMeasurer(noise_0)
|
{
"content_hash": "4ac6e55622c5b96a0fcd7ed7b4929fec",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 77,
"avg_line_length": 32.54929577464789,
"alnum_prop": 0.5612289052358287,
"repo_name": "eddiejessup/ahoy",
"id": "2c0a107e6a712ce8248332b15cbf81061a382a6a",
"size": "2311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ahoy/noise_measurers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1684"
},
{
"name": "Python",
"bytes": "180250"
},
{
"name": "Shell",
"bytes": "277"
}
],
"symlink_target": ""
}
|
import forwarding_buffer
from units import node
class ForwardingProtocol(object):
def __init__(self):
self.scheduling_policy = None
self.test = None
self.network = None
def set_scheduling_policy(self, p):
self.scheduling_policy = p
def init(self, test):
self.test = test
self.network = self.test.network
def create_node(self, node_name, network):
return node.Node(node_name, network)
def create_buffer(self):
return self.scheduling_policy()
def run_communication_step(self):
pass
def run_forwarding_step(self):
pass
|
{
"content_hash": "fc6f04ce001d991ad76a78ee839e68b1",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 46,
"avg_line_length": 21.82758620689655,
"alnum_prop": 0.6287519747235387,
"repo_name": "efectivo/network_sim",
"id": "e0ba53d1009e4d2395680d7e6212f3ca6d035e42",
"size": "633",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "protocols/forwarding_protocol.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7993"
},
{
"name": "Python",
"bytes": "61634"
}
],
"symlink_target": ""
}
|
class Grammar:
def __init__(self, ast):
ast = rewrite_sequences_with_labels(ast)
self.ast = simplify(ast)
self.rules = self.ast[1]
self.rule_names = set(r[1] for r in self.rules)
self.starting_rule = self.rules[0][1]
def check_for_left_recursion(ast):
"""Returns a list of all potentially left-recursive rules."""
lr_rules = set()
rules = {}
for _, name, body in ast[1]:
rules[name] = body
for _, name, body in ast[1]:
seen = set()
has_lr = _check_lr(name, body, rules, seen)
if has_lr:
lr_rules.add(name)
return lr_rules
def rewrite_left_recursion(ast):
lr_rules = check_for_left_recursion(ast)
new_rules = []
for rule in ast[1]:
if rule[1] in lr_rules:
new_rules.append([rule[0], rule[1], ['leftrec', rule[2], rule[1]]])
else:
new_rules.append(rule)
return ['rules', new_rules]
def _check_lr(name, node, rules, seen):
# pylint: disable=too-many-branches
ty = node[0]
if ty == 'action':
return False
if ty == 'apply':
if node[1] == name:
return True # Direct recursion.
if node[1] in ('anything', 'end'):
return False
if node[1] in seen:
# We've hit left recursion on a different rule, so, no.
return False
seen.add(node[1])
return _check_lr(name, rules[node[1]], rules, seen)
if ty == 'capture':
return _check_lr(name, node[1], rules, seen)
if ty == 'choice':
return any(_check_lr(name, n, rules, seen) for n in node[1])
if ty == 'empty':
return False
if ty == 'eq':
return False
if ty == 'label':
return _check_lr(name, node[1], rules, seen)
if ty == 'lit':
return False
if ty == 'not':
return _check_lr(name, node[1], rules, seen)
if ty == 'opt':
return _check_lr(name, node[1], rules, seen)
if ty == 'paren':
return _check_lr(name, node[1], rules, seen)
if ty == 'plus':
return _check_lr(name, node[1], rules, seen)
if ty == 'pos':
return False
if ty == 'pred':
return False
if ty == 'range':
return False
if ty == 'scope':
for subnode in node[1]:
r = _check_lr(name, subnode, rules, seen)
if r:
return r
return False
if ty == 'seq':
for subnode in node[1]:
r = _check_lr(name, subnode, rules, seen)
if r:
return r
return False
if ty == 'star':
return _check_lr(name, node[1], rules, seen)
assert False, 'unexpected AST node type %s' % ty # pragma: no cover
def memoize(ast, rules_to_memoize):
"""Returns a new AST with the given rules memoized."""
new_rules = []
for rule in ast[1]:
_, name, node = rule
if name in rules_to_memoize:
new_rules.append(['rule', name, ['memo', node, name]])
else:
new_rules.append(rule)
return ['rules', new_rules]
def add_builtin_vars(node):
"""Returns a new AST rewritten to support the _* vars."""
if node[0] == 'rules':
return ['rules', [add_builtin_vars(rule) for rule in node[1]]]
assert node[0] == 'rule', 'unexpected AST node %s' % node[0]
name = node[1]
body = node[2]
if body[0] == 'leftrec':
if body[1][0] == 'choice':
choices = body[1][1]
else:
choices = [body[1]]
elif body[0] == 'choice':
choices = body[1]
else:
choices = [body]
new_choices = _rewrite_choices(name, choices)
if body[0] == 'leftrec':
if len(new_choices) > 1:
return ['rule', name, ['leftrec', ['choice', new_choices], body[2]]]
return ['rule', name, ['leftrec', new_choices[0], body[2]]]
if len(new_choices) > 1:
return ['rule', name, ['choice', new_choices]]
return ['rule', name, new_choices[0]]
def _rewrite_choices(rule_name, choices):
new_choices = []
for seq in choices:
tag = seq[0]
if tag not in ('seq', 'scope'):
new_choices.append(seq)
continue
terms = seq[1]
new_terms = []
for i, term in enumerate(terms[:-1]):
pos_var = '_%d' % (i + 1)
pos_is_needed = (any(_var_is_needed(pos_var, st)
for st in terms[i+1:]))
if pos_is_needed:
tag = 'scope'
new_terms.append(['label', term, pos_var])
else:
new_terms.append(term)
new_terms.append(terms[-1])
if tag == 'scope':
new_choices.append([tag, new_terms, rule_name])
else:
new_choices.append([tag, new_terms])
return new_choices
def _var_is_needed(name, node):
ty = node[0]
if ty == 'll_var' and node[1] == name:
return True
if ty in ('eq', 'pred', 'action',
'll_paren', 'll_getitem', 'll_getattr'):
return _var_is_needed(name, node[1])
if ty == 'll_plus':
return (_var_is_needed(name, node[1]) or
_var_is_needed(name, node[2]))
if ty == 'll_qual':
return (_var_is_needed(name, node[1]) or
any(_var_is_needed(name, sn) for sn in node[2]))
if ty in ('choice', 'seq', 'll_arr', 'll_call'):
return any(_var_is_needed(name, sn) for sn in node[1])
return False
def rename(node, prefix):
"""Returns a new AST with all of the rule names prefixed by |prefix|."""
if node[0] == 'rule':
return [node[0], prefix + node[1], rename(node[2], prefix)]
if node[0] == 'apply':
return [node[0], prefix + node[1]]
if node[0] in ('choice', 'rules', 'seq'):
return [node[0], [rename(n, prefix) for n in node[1]]]
if node[0] in ('capture', 'memo', 'not', 'opt', 'paren', 'plus', 're',
'star'):
return [node[0], rename(node[1], prefix)]
if node[0] == 'label':
return [node[0], rename(node[1], prefix), node[2]]
if node[0] == 'leftrec':
return [node[0], rename(node[1], prefix), prefix + node[2]]
if node[0] == 'scope':
return [node[0], [rename(n, prefix) for n in node[1]],
node[2]]
return node
def simplify(node):
"""Returns a new, simplified version of an AST:
* Any `choice`, `seq`, or `scope` node with only one child is replaced
the child.
* Any `paren` node is replaced by its child node.
"""
node_type = node[0]
if node_type == 'rules':
return [node_type, [simplify(n) for n in node[1]]]
if node_type == 'rule':
return [node_type, node[1], simplify(node[2])]
if node_type in ('choice', 'seq'):
if len(node[1]) == 1:
return simplify(node[1][0])
return [node_type, [simplify(n) for n in node[1]]]
if node_type in ('capture', 'not', 'opt', 'plus', 're', 'star'):
return [node_type, simplify(node[1])]
if node_type == 'paren':
# TODO: simplify when it is safe to do so.
return [node_type, simplify(node[1])]
if node_type in ('label', 'leftrec', 'memo'):
return [node_type, simplify(node[1]), node[2]]
if node_type == 'scope':
return [node_type, [simplify(n) for n in node[1]], node[2]]
return node
def rewrite_sequences_with_labels(ast):
for rule in ast[1]:
rule_name = rule[1]
if rule[2][0] == 'choice':
new_choices = []
for choice in rule[2][1]:
if choice[0] == 'seq':
if _has_labels(choice[1]):
new_choice = ['scope', choice[1], rule_name]
else:
new_choice = choice
else:
new_choice = choice
new_choices.append(new_choice)
rule[2][1] = new_choices
return ast
def _has_labels(node):
if node and node[0] == 'label':
return True
for n in node:
if isinstance(n, list) and _has_labels(n):
return True
return False
def flatten(ast, should_flatten):
"""Return a new ast with nested sequences or choices moved to new rules."""
ast = rename(ast, '_r_')
new_rules = []
for _, old_name, old_node in ast[1]:
new_subnode, new_subrules = _flatten(old_name, old_node,
should_flatten)
new_rules += [['rule', old_name, new_subnode]] + new_subrules
return ['rules', new_rules]
def _flatten(old_name, old_node, should_flatten):
# pylint: disable=too-many-branches
old_type = old_node[0]
new_rules = []
if old_type in ('choice', 'scope', 'seq'):
new_subnodes = []
for i, subnode in enumerate(old_node[1]):
new_name = '_s_%s_%s%d' % (old_name[3:], old_type[0], i)
new_subnode, new_subrules = _flatten(new_name, subnode,
should_flatten)
if should_flatten(new_subnode):
new_subnodes.append(['apply', new_name])
new_rules += [['rule', new_name, new_subnode]]
else:
new_subnodes.append(new_subnode)
new_rules += new_subrules
if old_type == 'scope':
new_node = [old_type, new_subnodes, old_node[2]]
else:
new_node = [old_type, new_subnodes]
elif old_type in ('label', 'leftrec'):
new_name = '_s_%s_%s' % (old_name[3:], old_type[0])
new_subnode, new_subrules = _flatten(new_name, old_node[1],
should_flatten)
if should_flatten(new_subnode):
new_node = [old_type, ['apply', new_name], old_node[2]]
new_rules += [['rule', new_name, new_subnode]]
else:
new_node = [old_type, new_subnode, old_node[2]]
new_rules += new_subrules
elif old_type in ('capture', 'memo', 'not', 'opt', 'paren',
'plus', 're', 'star'):
new_name = '_s_%s_%s' % (old_name[3:], old_type[0])
new_subnode, new_subrules = _flatten(new_name, old_node[1],
should_flatten)
if should_flatten(new_subnode):
new_node = [old_type, ['apply', new_name]]
new_rules += [['rule', new_name, new_subnode]]
else:
new_node = [old_type, new_subnode]
new_rules += new_subrules
else:
new_node = old_node
return new_node, new_rules
|
{
"content_hash": "f933f849a1fe5aa7219b4bfeaed3dbf2",
"timestamp": "",
"source": "github",
"line_count": 311,
"max_line_length": 80,
"avg_line_length": 34.138263665594856,
"alnum_prop": 0.5143637562399924,
"repo_name": "dpranke/glop",
"id": "1c599fd27b4862d21156feae2028a2f48bc278b4",
"size": "11215",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "glop/ir.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "GAP",
"bytes": "19409"
},
{
"name": "Python",
"bytes": "137086"
}
],
"symlink_target": ""
}
|
import pyowm
import time
import random
import datetime
import telepot
import wikipedia
import json
import os
from pyshorteners import Shortener
import subprocess
import bs4
import requests
import youtube_dl
import validators
api = '' # API Key for OpenWaetherMap
api_key = '' #API key for Google(Url shortening)
owm = pyowm.OWM(api)
forecast = owm.daily_forecast("Delhi,in") # Weather forecast details
tomorrow = pyowm.timeutils.tomorrow()
forecast.will_be_sunny_at(tomorrow)
observation = owm.weather_at_place('Delhi,in') #Hardcoding the weather place to be delhi,Soon add functionality to autodetect location
w = observation.get_weather()
def handle(msg):
chat_id = msg['chat']['id'] # Stores chat id for referencing message
try:
command = msg['text'] # Filters text from the message sent
except KeyError as k:
try:
file_id=(msg['photo'][2])['file_id']
file_path=bot.getFile(file_id)
f_path="https://api.telegram.org/file/bot221225786:AAElg0gODaJi7-xy0AM68eKH5moyuXZOzh0/"+file_path['file_path']
#file=bot.download_file(f_path)
bot.sendPhoto(chat_id,file_id)
return
except e:
print(e)
command="Not Applicable"
bot.sendMessage(chat_id,"Please try again with a command or an image /help")
return
print ('Got command: %s' % command)
# Here Starts the command interpretation
if command == '/roll':
bot.sendMessage(chat_id, random.randint(1,6))
elif command == '/time':
bot.sendMessage(chat_id, str(datetime.datetime.now()))
elif command == '/hi':
bot.sendMessage(chat_id, "Hello")
elif command == '/weather':
temperature="Temperature > "+str(w.get_temperature(unit='celsius')['temp'])+" degree celcius\nMaximum Temperature > "+str(w.get_temperature(unit='celsius')['temp_max'])+" degree celcius\nMinimum Temperature > "+str(w.get_temperature(unit='celsius')['temp_min'])+" degree celcius"
wind="Speed > "+str(w.get_wind()['speed'])+"\nDegrees > "+str(w.get_wind()['deg'])+" degrees clockwise from North direction"
pressure="Sea Level > "+str(w.get_pressure()['sea_level'])+"\nPressure > "+str(w.get_pressure()['press'])
bot.sendMessage(chat_id,"New Delhi,India\n( " + w.get_detailed_status() + " )\n\nTemperature Details :\n" + temperature+ "\n\nWind Speed Details :\n" +wind +"\n\nCloud Coverage : \n" + str(w.get_clouds())+"%"+"\n\nHumidity : \n" + str(w.get_humidity())+"%"+"\n\nPressure Details :\n" + pressure+"\n\nData fetched by openweathermap API.All copyrights reserved")
elif '/wiki' in command :
try:
ny = wikipedia.summary(command[5:len(str(command))],sentences = 7)
bot.sendMessage(chat_id,ny)
except wikipedia.exceptions.DisambiguationError as e:
stri="This may refer to :\n\n"
for i,topic in enumerate(e.options):
stri=stri+str(i)+" "+topic+"\n"
stri=stri+"\nPlease choose anyone from above options"
bot.sendMessage(chat_id,stri)
except wikipedia.exceptions.PageError as e:
bot.sendMessage(chat_id,"No partial/full match found for this")
elif command == '/help' :
bot.sendMessage(chat_id,"""List of supported commands is\n
/hi - Greet Your Device\n
/roll - Rolls a dice\n
/weather - Tells detailed current weather report of Raspberry Pi's location\n
/time - Tells current date and time\n
/wiki <Topic Name> - Does a topic search on wikipedia and gives a summary of the topic.Try long tapping /wiki in autofill\n
/torrent <magnet link/torrent url/infohash> - Adds and downloads torrent to your raspberry pi remotely\n
/torrent_status - Give the detailed status of your torrent(s) you have added/downloaded\n
/url <URL> - Shorten the given URL using Google API(goo.gl).\n
/url_exp <Shortened URL> - Expands the given shortened url made using Google API\n
/speedtest - Does a detailed network speed test using ookla's speedtest API\n
/yt <Youtube video link> - Creates the shortened download link for given youtube video\n
/news <Topic> - Displays top 10 latest headlines fetched by Google News API about given toipc using Beautiful soup py Library.
\n\nSee your autofill for quick selection of command or tap '/' icon on right side of your chat textbox.\n
For Commands with parameters,you can long tap the autosuggestion for quick typing and type your parameter followed by a space.""")
elif '/torrent ' in command :
os.system("deluge-console add Desktop "+command[8:len(str(command))])
bot.sendMessage(chat_id,"Torrent Successfully added")
elif command == '/torrent_status':
p = os.popen("deluge-console info")
q = p.read()
try:
bot.sendMessage(chat_id,str(q))
except telepot.exception.TelegramError as e:
bot.sendMessage(chat_id,"No added torrents found for remote download")
p.close()
elif '/url ' in command :
url = str(command[5:len(command)])
if validators.url(url):
shortener = Shortener('Google', api_key=api_key)
bot.sendMessage(chat_id,"Shortened URL is\n" + str(shortener.short(url)))
else:
bot.sendMessage(chat_id,"Please enter a valid url")
elif '/url_exp ' in command:
url = str(command[9:len(command)])
shortener = Shortener('Google', api_key=api_key)
bot.sendMessage(chat_id,"Expanded URL is\n" + shortener.expand(url))
elif command == '/speedtest':
bot.sendMessage(chat_id,"""Wait for a while until we check and measure speed of system's network.
If result does'nt come in 30 seconds,Try again.Little patience is appreciated...""")
try:
p = str(subprocess.check_output(["speedtest-cli"]))
q = p[2:len(p)-1]
r = q.replace("\\r","")
s = r.split("\\n")
bot.sendMessage(chat_id,'\n'.join(s))
except:
bot.sendMessage(chat_id,"Something went wrong,Please try again\n Or\nTry some other commands /help")
elif '/news ' in command :
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'}
tempurl = "https://www.google.com/search?q=%s&num=10&start=10&tbm=nws#q=%s&tbas=0&tbs=sbd:1&tbm=nws&gl=d"
news_topic = command[6:]
url = tempurl % (news_topic,news_topic)
print(url)
ahrefs = []
titles = []
req = requests.get(url, headers=headers)
soup = bs4.BeautifulSoup(req.text, "html.parser")
#you don't even have to process the div container
#just go strait to <a> and using indexing get "href"
#headlines
ahref = [a["href"] for a in soup.find_all("a", class_="_HId")]
#"buckets"
ahref += [a["href"] for a in soup.find_all("a", class_="_sQb")]
ahrefs.append(ahref)
#or get_text() will return the array inside the hyperlink
#the title you want
title = [a.get_text() for a in soup.find_all("a", class_="_HId")]
title += [a.get_text() for a in soup.find_all("a", class_="_sQb")]
titles.append(title)
#print(ahrefs)
titles = str(titles)
titles = titles.strip("[[]]")
titles = titles.replace('"','\'')
titles=" "+titles
tit = titles.split(',')
ans=""
k=0
for i in tit:
if str(i)[0] == " " and str(i)[1]== "'":
ans=ans+"\n"+str(k+1)+". "+str(i)
k=k+1
else:
ans=ans+"\n"+str(i)
bot.sendMessage(chat_id, "Top "+str(k) +" latest news headlines for the given topic are :\n\n"+ans)
elif '/yt ' in command :
bot.sendMessage(chat_id,"Wait until we create the download link,Sitback and relax..")
url = command[4:len(command)]
ydl = youtube_dl.YoutubeDL({'outtmpl': '%(id)s%(ext)s'})
with ydl:
result = ydl.extract_info(url,download=False) # We just need the info
if 'entries' in result:
# Can be a playlist or a list of videos
video = result['entries'][0]
else:
# Just a video
video = result
video_url = video['url']
p = shortener.short(video_url)
bot.sendMessage(chat_id,"Download link for given youtube video is:\n" + p)
elif '/cal ' in command:
ans = eval(str(command[5:len(command)]))
bot.sendMessage(chat_id,"Answer is:\n" + ans)
else :
bot.sendMessage(chat_id,"Type /help for list of supported commands till now,There are many more to come!!")
# Here is the Telegram Bot API key
key = '' # API key for telegram bot
bot = telepot.Bot(key)
bot.message_loop(handle) # Calling bot and keeping it active infinitely
print ('I am listening ...')
while 1:
time.sleep(10)
|
{
"content_hash": "f868143124ea03c4aca897a359f355b8",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 368,
"avg_line_length": 43.65533980582524,
"alnum_prop": 0.6218169687534749,
"repo_name": "nsniteshsahni/telepy",
"id": "d2d5ef50928fa525540bbd2788a82757c35b7266",
"size": "8993",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8993"
}
],
"symlink_target": ""
}
|
from api.src.base import Base
from collections import defaultdict
class Mapper(Base):
#Global accumulator
output = defaultdict(int)
def handler(self, contents):
'''
Perform mapping operation over one file with contents
WRITE HERE YOUR OWN LOGIC
:param contents: unparsed CSV file
:return: self.output
'''
for line in contents.split('\n')[:-1]:
# line_count += 1
data = line.split(',')
k = data[0]
self.output[k] += 1
|
{
"content_hash": "79775f3c447398c20dacf0b7156ba8b8",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 61,
"avg_line_length": 23.434782608695652,
"alnum_prop": 0.5677179962894249,
"repo_name": "iwitaly/big-lambda-serverless",
"id": "6daacda2ff39688da7113ae13638fbec48d628d9",
"size": "539",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/mapper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15349"
},
{
"name": "Shell",
"bytes": "69"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, url
from robot_control import views
urlpatterns = patterns('',
url(r'^$', views.index, name='index'),
url(r'^tutorial/$', views.tutorial, name='tutorial'),
url(r'^blockly/$', views.blockly, name='blockly'),
url(r'^quiz/$', views.quiz, name='quiz'),
url(r'^joystick-status/$', views.joystick_status, name='joystick_status'),
url(r'^RFID-status/$', views.RFID_status, name='RFID_status'),
url(r'^movements/forward$', views.forward, name='movement_forward'),
url(r'^movements/turn_left$', views.turn_left, name='movement_turn_left'),
url(r'^movements/turn_right$', views.turn_right, name='movement_turn_right'),
url(r'^movements/sensor_wall$', views.sensor_wall, name='movement_sensor_wall'),
)
|
{
"content_hash": "653c72a598d8c1d73038c84864aaf6d2",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 81,
"avg_line_length": 44.88235294117647,
"alnum_prop": 0.6815203145478375,
"repo_name": "gmartinvela/RoMIE",
"id": "566e72d8dc6dc7130cd69dea9d26052a259fd4a8",
"size": "763",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "robot_control/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "8066"
},
{
"name": "C++",
"bytes": "3187"
},
{
"name": "CSS",
"bytes": "161267"
},
{
"name": "JavaScript",
"bytes": "1208495"
},
{
"name": "Python",
"bytes": "61637"
},
{
"name": "Shell",
"bytes": "55"
}
],
"symlink_target": ""
}
|
def command():
return "edit-farm"
def init_argument(parser):
parser.add_argument("--farm-no", required=True)
parser.add_argument("--comment", required=False)
def execute(requester, args):
farm_no = args.farm_no
comment = args.comment
parameters = {}
parameters["FarmNo"] = farm_no
if (comment != None):
parameters["Comment"] = comment
return requester.execute("/EditFarm", parameters)
|
{
"content_hash": "864cfe69850c0fab9a77780473407ca4",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 53,
"avg_line_length": 24.11111111111111,
"alnum_prop": 0.652073732718894,
"repo_name": "primecloud-controller-org/pcc-cli",
"id": "c60558c48de526aaf07f7af0d6dc76fa00b32623",
"size": "459",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pcc/api/farm/edit_farm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "81"
},
{
"name": "Python",
"bytes": "37461"
},
{
"name": "Shell",
"bytes": "251"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.core.validators import RegexValidator
from django.contrib.auth.models import (
BaseUserManager, AbstractUser
)
from django.utils import timezone
class UserManager(BaseUserManager):
def _create_user(self, username, email, avatar,
is_staff, is_superuser, **extra_fields):
"""
Creates and saves a User with the given username, email and
avatar.
"""
now = timezone.now()
if not username:
raise ValueError('The given username must be set')
email = self.normalize_email(email)
if avatar is None:
from api.utils import make_default_avatar
avatar = make_default_avatar(email)
user = self.model(username=username, email=email, avatar=avatar,
is_staff=is_staff, is_active=True,
is_superuser=is_superuser, last_login=now,
date_joined=now, **extra_fields)
user.set_password(self.make_random_password())
user.save(using=self._db)
return user
def create_user(self, username, email=None, avatar=None, **extra_fields):
"""
"""
return self._create_user(username, email, avatar,
False, False, **extra_fields)
def create_superuser(self, username, email, avatar=None, **extra_fields):
"""
"""
return self._create_user(username, email, avatar,
True, True, **extra_fields)
class User(AbstractUser):
# add a avatar, retrived from social account
avatar = models.URLField(default='')
objects = UserManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
|
{
"content_hash": "d11ecf26b806be7836e831722f524d91",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 77,
"avg_line_length": 33.490566037735846,
"alnum_prop": 0.5915492957746479,
"repo_name": "ideal/bublfish",
"id": "564ffca9a34f107567b709bd29696e655eacba62",
"size": "1839",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "account/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "35855"
}
],
"symlink_target": ""
}
|
'''
cocos.director.director is the singleton that creates and handles the main ``Window``
and manages the logic behind the ``Scenes``.
Initializing
------------
The first thing to do, is to initialize the ``director``::
from cocos.director import director
director.init( parameters )
This will initialize the director, and will create a display area
(a 640x480 window by default).
The parameters that are supported by director.init() are the same
parameters that are supported by pyglet.window.Window(), plus a few
cocos exclusive ones. They are all named parameters (kwargs).
See ``Director.init()`` for details.
Example::
director.init( width=800, height=600, caption="Hello World", fullscreen=True )
Running a Scene
----------------
Once you have initialized the director, you can run your first ``Scene``::
director.run( Scene( MyLayer() ) )
This will run a scene that has only 1 layer: ``MyLayer()``. You can run a scene
that has multiple layers. For more information about ``Layers`` and ``Scenes``
refer to the ``Layers`` and ``Scene`` documentation.
Once a scene is running you can do the following actions:
* ``director.replace( new_scene ):``
Replaces the running scene with the new_scene
You could also use a transition. For example:
director.replace( SplitRowsTransition( new_scene, duration=2 ) )
* ``director.push( new_scene ):``
The running scene will be pushed to a queue of scenes to run,
and new_scene will be executed.
* ``director.pop():``
Will pop out a scene from the queue, and it will replace the running scene.
* ``director.scene.end( end_value ):``
Finishes the current scene with an end value of ``end_value``. The next scene
to be run will be popped from the queue.
Other functions you can use are:
* ``director.get_window_size():``
Returns an (x,y) pair with the _logical_ dimensions of the display.
The display might have been resized, but coordinates are always relative
to this size. If you need the _physical_ dimensions, check the dimensions
of ``director.window``
* ``get_virtual_coordinates(self, x, y):``
Transforms coordinates that belongs the real (physical) window size, to
the coordinates that belongs to the virtual (logical) window. Returns
an x,y pair in logical coordinates.
The director also has some useful attributes:
* ``director.return_value``: The value returned by the last scene that
called ``director.scene.end``. This is useful to use scenes somewhat like
function calls: you push a scene to call it, and check the return value
when the director returns control to you.
* ``director.window``: This is the pyglet window handled by this director,
if you happen to need low level access to it.
* ``self.show_FPS``: You can set this to a boolean value to enable, disable
the framerate indicator.
* ``self.scene``: The scene currently active
'''
__docformat__ = 'restructuredtext'
import sys
from os import getenv
import pyglet
from pyglet import window, event
from pyglet import clock
#from pyglet import media
from pyglet.gl import *
import cocos, cocos.audio, cocos.custom_clocks
if hasattr(sys, 'is_epydoc') and sys.is_epydoc:
__all__ = ['director', 'Director', 'DefaultHandler']
else:
__all__ = ['director', 'DefaultHandler']
class DefaultHandler( object ):
def __init__(self):
super(DefaultHandler,self).__init__()
self.wired = False
def on_key_press( self, symbol, modifiers ):
if symbol == pyglet.window.key.F and (modifiers & pyglet.window.key.MOD_ACCEL):
director.window.set_fullscreen( not director.window.fullscreen )
return True
elif symbol == pyglet.window.key.P and (modifiers & pyglet.window.key.MOD_ACCEL):
import scenes.pause as pause
pause_sc = pause.get_pause_scene()
if pause:
director.push( pause_sc )
return True
elif symbol == pyglet.window.key.W and (modifiers & pyglet.window.key.MOD_ACCEL):
# import wired
if self.wired == False:
glDisable(GL_TEXTURE_2D);
glPolygonMode(GL_FRONT, GL_LINE);
glPolygonMode(GL_BACK, GL_LINE);
# wired.wired.install()
# wired.wired.uset4F('color', 1.0, 1.0, 1.0, 1.0 )
self.wired = True
else:
glEnable(GL_TEXTURE_2D);
glPolygonMode(GL_FRONT, GL_FILL);
glPolygonMode(GL_BACK, GL_FILL);
self.wired = False
# wired.wired.uninstall()
return True
elif symbol == pyglet.window.key.X and (modifiers & pyglet.window.key.MOD_ACCEL):
director.show_FPS = not director.show_FPS
return True
elif symbol == pyglet.window.key.I and (modifiers & pyglet.window.key.MOD_ACCEL):
from layer import PythonInterpreterLayer
if not director.show_interpreter:
if director.python_interpreter == None:
director.python_interpreter = cocos.scene.Scene( PythonInterpreterLayer() )
director.python_interpreter.enable_handlers( True )
director.python_interpreter.on_enter()
director.show_interpreter = True
else:
director.python_interpreter.on_exit()
director.show_interpreter= False
return True
elif symbol == pyglet.window.key.S and (modifiers & pyglet.window.key.MOD_ACCEL):
import time
pyglet.image.get_buffer_manager().get_color_buffer().save('screenshot-%d.png' % (int( time.time() ) ) )
return True
if symbol == pyglet.window.key.ESCAPE:
director.pop()
return True
class Director(event.EventDispatcher):
"""Class that creates and handle the main Window and manages how
and when to execute the Scenes
You should not directly instantiate the class, instead you do::
from cocos.director import director
to access the only one Director instance.
"""
#: a dict with locals for the interactive python interpreter (fill with what you need)
interpreter_locals = {}
def init(self, *args, **kwargs):
"""
Initializes the Director creating the main window.
There are a few cocos exclusive parameters, the rest are the
standard pyglet parameters for pyglet.window.Window.__init__
This docstring only partially list the pyglet parameteres; a full
list is available at pyglet Window API Reference at
http://pyglet.org/doc/api/pyglet.window.Window-class.html
:Parameters:
`do_not_scale` : bool
False: on window resizes, cocos will scale the view so that your
app don't need to handle resizes.
True: your app must include logic to deal with diferent window
sizes along the session.
Defaults to False
`audio_backend` : string
one in ['pyglet','sdl']. Defaults to 'pyglet' for legacy support.
`audio` : dict or None
None or a dict providing parameters for the sdl audio backend.
None: in this case a "null" audio system will be used, where all the
sdl sound operations will be no-ops. This may be useful if you do not
want to depend on SDL_mixer
A dictionary with string keys; these are the arguments for setting up
the audio output (sample rate and bit-width, channels, buffer size).
The key names/values should match the positional arguments of
http://www.pygame.org/docs/ref/mixer.html#pygame.mixer.init
The default value is {}, which means sound enabled with default
settings
`fullscreen` : bool
Window is created in fullscreen. Default is False
`resizable` : bool
Window is resizable. Default is False
`vsync` : bool
Sync with the vertical retrace. Default is True
`width` : int
Window width size. Default is 640
`height` : int
Window height size. Default is 480
`caption` : string
Window title.
`visible` : bool
Window is visible or not. Default is True.
:rtype: pyglet.window.Window
:returns: The main window, an instance of pyglet.window.Window class.
"""
#: whether or not the FPS are displayed
self.show_FPS = False
#: stack of scenes
self.scene_stack = []
#: scene that is being run
self.scene = None
#: this is the next scene that will be shown
self.next_scene = None
# python interpreter
self.python_interpreter = None
#: whether or not to show the python interpreter
self.show_interpreter = False
#: flag requesting app termination
self.terminate_app = False
# pop out the Cocos-specific flags
self.do_not_scale_window = kwargs.pop('do_not_scale', False)
audio_backend = kwargs.pop('audio_backend', 'pyglet')
audio_settings = kwargs.pop('audio', {})
# handle pyglet 1.1.x vs 1.2dev differences in fullscreen
self._window_virtual_width = kwargs.get('width', None)
self._window_virtual_height = kwargs.get('height', None)
if pyglet.version.startswith('1.1') and kwargs.get('fullscreen', False):
# pyglet 1.1.x dont allow fullscreen with explicit width or height
kwargs.pop('width', 0)
kwargs.pop('height', 0)
#: pyglet's window object
self.window = window.Window( *args, **kwargs )
# complete the viewport geometry info, both virtual and real,
# also set the appropiate on_resize handler
if self._window_virtual_width is None:
self._window_virtual_width = self.window.width
if self._window_virtual_height is None:
self._window_virtual_height = self.window.height
self._window_virtual_aspect = (
self._window_virtual_width / float( self._window_virtual_height ))
self._offset_x = 0
self._offset_y = 0
if self.do_not_scale_window:
resize_handler = self.unscaled_resize_window
self.set_projection = self.set_projection2D
else:
resize_handler = self.scaled_resize_window
self.set_projection = self.set_projection3D
# the offsets and size for the viewport will be proper after this
self._resize_no_events = True
resize_handler(self.window.width, self.window.height)
self._resize_no_events = False
self.window.push_handlers(on_resize=resize_handler)
self.window.push_handlers(self.on_draw)
# opengl settings
self.set_alpha_blending()
# default handler
self.window.push_handlers( DefaultHandler() )
# Environment variable COCOS2d_NOSOUND=1 overrides audio settings
if getenv('COCOS2D_NOSOUND', None) == '1' or audio_backend == 'pyglet':
audio_settings = None
# if audio is not working, better to not work at all. Except if
# explicitely instructed to continue
if not cocos.audio._working and audio_settings is not None:
from cocos.audio.exceptions import NoAudioError
msg = "cocos.audio isn't able to work without needed dependencies. " \
"Try installing pygame for fixing it, or forcing no audio " \
"mode by calling director.init with audio=None, or setting the " \
"COCOS2D_NOSOUND=1 variable in your env."
raise NoAudioError(msg)
# Audio setup:
#TODO: reshape audio to not screw unittests
import os
if not os.environ.get('cocos_utest', False):
cocos.audio.initialize(audio_settings)
return self.window
fps_display = None
def set_show_FPS(self, value):
if value and self.fps_display is None:
self.fps_display = clock.ClockDisplay()
elif not value and self.fps_display is not None:
self.fps_display.unschedule()
self.fps_display = None
show_FPS = property(lambda self: self.fps_display is not None,
set_show_FPS)
def run(self, scene):
"""Runs a scene, entering in the Director's main loop.
:Parameters:
`scene` : `Scene`
The scene that will be run.
"""
self._set_scene( scene )
event_loop.run()
def set_recorder(self, framerate, template="frame-%d.png", duration=None):
'''Will replace the app clock so that now we can ensure a steady
frame rate and save one image per frame
:Parameters
`framerate`: int
the number of frames per second
`template`: str
the template that will be completed with an in for the name of the files
`duration`: float
the amount of seconds to record, or 0 for infinite
'''
clock = cocos.custom_clocks.get_recorder_clock(framerate, template, duration)
cocos.custom_clocks.set_app_clock(clock)
def on_draw( self ):
"""Handles the event 'on_draw' from the pyglet.window.Window
Realizes switch to other scene and app termination if needed
Clears the window area
The windows is painted as:
- Render the current scene by calling it's visit method
- Eventualy draw the fps metter
- Eventually draw the interpreter
When the window is minimized any pending switch to scene will be
delayed to the next de-minimizing time.
"""
# typically True when window minimized
if ((self.window.width==0 or self.window.height==0) and
not self.terminate_app):
# if surface area is zero, we don't need to draw; also
# we dont't want to allow scene changes in this situation: usually
# on_enter does some scaling, which would lead to division by zero
return
# handle scene changes and app termination
if self.terminate_app:
self.next_scene = None
if self.next_scene is not None or self.terminate_app:
self._set_scene( self.next_scene )
if self.terminate_app:
pyglet.app.exit()
return
self.window.clear()
# draw all the objects
glPushMatrix()
self.scene.visit()
glPopMatrix()
# finally show the FPS
if self.show_FPS:
self.fps_display.draw()
if self.show_interpreter:
self.python_interpreter.visit()
def push(self, scene):
"""Suspends the execution of the running scene, pushing it
on the stack of suspended scenes. The new scene will be executed.
:Parameters:
`scene` : `Scene`
It is the scene that will be run.
"""
self.dispatch_event("on_push", scene )
def on_push( self, scene ):
self.next_scene = scene
self.scene_stack.append( self.scene )
def pop(self):
"""If the scene stack is empty the appication is terminated.
Else pops out a scene from the stack and sets as the running one.
"""
self.dispatch_event("on_pop")
def on_pop(self):
if len(self.scene_stack)==0:
self.terminate_app = True
else:
self.next_scene = self.scene_stack.pop()
def replace(self, scene):
"""Replaces the running scene with a new one. The running scene is terminated.
:Parameters:
`scene` : `Scene`
It is the scene that will be run.
"""
self.next_scene = scene
def _set_scene(self, scene ):
"""Makes scene the current scene
Operates on behalf of the public scene switching methods
User code must not call directly
"""
# Even library code should not call it directly: instead set
# ._next_scene and let 'on_draw' call here at the proper time
self.next_scene = None
# always true except for first scene in the app
if self.scene is not None:
self.scene.on_exit()
self.scene.enable_handlers( False )
old = self.scene
self.scene = scene
# always true except when terminating the app
if self.scene is not None:
self.scene.enable_handlers( True )
scene.on_enter()
return old
#
# Window Helper Functions
#
def get_window_size( self ):
"""Returns the size of the window when it was created, and not the
actual size of the window.
Usually you don't want to know the current window size, because the
Director() hides the complexity of rescaling your objects when
the Window is resized or if the window is made fullscreen.
If you created a window of 640x480, the you should continue to place
your objects in a 640x480 world, no matter if your window is resized or not.
Director will do the magic for you.
:rtype: (x,y)
:returns: The size of the window when it was created
"""
return ( self._window_virtual_width, self._window_virtual_height)
def get_virtual_coordinates( self, x, y ):
"""Transforms coordinates that belongs the *real* window size, to the
coordinates that belongs to the *virtual* window.
For example, if you created a window of 640x480, and it was resized
to 640x1000, then if you move your mouse over that window,
it will return the coordinates that belongs to the newly resized window.
Probably you are not interested in those coordinates, but in the coordinates
that belongs to your *virtual* window.
:rtype: (x,y)
:returns: Transformed coordinates from the *real* window to the *virtual* window
"""
x_diff = self._window_virtual_width / float( self.window.width - self._offset_x * 2 )
y_diff = self._window_virtual_height / float( self.window.height - self._offset_y * 2 )
adjust_x = (self.window.width * x_diff - self._window_virtual_width ) / 2
adjust_y = (self.window.height * y_diff - self._window_virtual_height ) / 2
return ( int( x_diff * x) - adjust_x, int( y_diff * y ) - adjust_y )
def scaled_resize_window( self, width, height):
"""One of two possible methods that are called when the main window is resized.
This implementation scales the display such that the initial resolution
requested by the programmer (the "logical" resolution) is always retained
and the content scaled to fit the physical display.
This implementation also sets up a 3D projection for compatibility with the
largest set of Cocos transforms.
The other implementation is `unscaled_resize_window`.
:Parameters:
`width` : Integer
New width
`height` : Integer
New height
"""
# physical view size
pw, ph = width, height
# virtual (desired) view size
vw, vh = self.get_window_size()
# desired aspect ratio
v_ar = vw/float(vh)
# usable width, heigh
uw = int(min(pw, ph*v_ar))
uh = int(min(ph, pw/v_ar))
ox = (pw-uw)//2
oy = (ph-uh)//2
self._offset_x = ox
self._offset_y = oy
self._usable_width = uw
self._usable_height = uh
self.set_projection()
if self._resize_no_events:
# setting viewport geometry, not handling an event
return
# deprecated - see issue 154
self.dispatch_event("on_resize", width, height)
self.dispatch_event("on_cocos_resize", self._usable_width, self._usable_height)
# dismiss the pyglet BaseWindow default 'on_resize' handler
return pyglet.event.EVENT_HANDLED
def unscaled_resize_window(self, width, height):
"""One of two possible methods that are called when the main window is resized.
This implementation does not scale the display but rather forces the logical
resolution to match the physical one.
This implementation sets up a 2D projection, resulting in the best pixel
alignment possible. This is good for text and other detailed 2d graphics
rendering.
The other implementation is `scaled_resize_window`.
:Parameters:
`width` : Integer
New width
`height` : Integer
New height
"""
self._usable_width = width
self._usable_height = height
if self._resize_no_events:
# setting viewport geometry, not handling an event
return
# deprecated - see issue 154
self.dispatch_event("on_resize", width, height)
self.dispatch_event("on_cocos_resize", self._usable_width, self._usable_height)
def set_projection(self):
"""
placeholder, will be set to one of set_projection2D or set_projection3D
when director.init is called
"""
pass
def set_projection3D(self):
'''Sets a 3D projection mantaining the aspect ratio of the original window size'''
# virtual (desired) view size
vw, vh = self.get_window_size()
glViewport(self._offset_x, self._offset_y, self._usable_width, self._usable_height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(60, self._usable_width/float(self._usable_height), 0.1, 3000.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
gluLookAt( vw/2.0, vh/2.0, vh/1.1566, # eye
vw/2.0, vh/2.0, 0, # center
0.0, 1.0, 0.0 # up vector
)
def set_projection2D(self):
"""Sets a 2D projection (ortho) covering all the window"""
# called only for the side effect of setting matrices in pyglet
self.window.on_resize(self._usable_width, self._usable_height)
#
# Misc functions
#
def set_alpha_blending( self, on=True ):
"""
Enables/Disables alpha blending in OpenGL
using the GL_ONE_MINUS_SRC_ALPHA algorithm.
On by default.
"""
if on:
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
else:
glDisable(GL_BLEND)
def set_depth_test( sefl, on=True ):
'''Enables z test. On by default
'''
if on:
glClearDepth(1.0)
glEnable(GL_DEPTH_TEST)
glDepthFunc(GL_LEQUAL)
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST)
else:
glDisable( GL_DEPTH_TEST )
event_loop = pyglet.app.event_loop
if not hasattr(event_loop, "event"):
event_loop = pyglet.app.EventLoop()
director = Director()
director.event = event_loop.event
"""The singleton; check `cocos.director.Director` for details on usage.
Don't instantiate Director(). Just use this singleton."""
director.interpreter_locals["director"] = director
director.interpreter_locals["cocos"] = cocos
Director.register_event_type('on_push')
Director.register_event_type('on_pop')
Director.register_event_type('on_resize')
Director.register_event_type('on_cocos_resize')
|
{
"content_hash": "e12fcbe1b998c860cd14a14678f24d1b",
"timestamp": "",
"source": "github",
"line_count": 662,
"max_line_length": 115,
"avg_line_length": 37.240181268882175,
"alnum_prop": 0.5946132316553766,
"repo_name": "shadowmint/nwidget",
"id": "f842e6eeadfadd4832c8e763c5d336765699381f",
"size": "26462",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/cocos2d-0.5.5/cocos/director.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "11298"
},
{
"name": "JavaScript",
"bytes": "17394"
},
{
"name": "PHP",
"bytes": "2190"
},
{
"name": "Python",
"bytes": "9815941"
},
{
"name": "Shell",
"bytes": "10521"
}
],
"symlink_target": ""
}
|
import argparse
import logging
import pathlib
import sys
from builder import Builder
def main():
logging.basicConfig(format="%(levelname)s - %(message)s", level=logging.INFO)
parser = argparse.ArgumentParser(description="Build HTML pages from RST sources")
parser.add_argument(
"--build-dir",
required=True,
type=pathlib.Path,
help="build directory where HTML files will be stored",
)
parser.add_argument("pages", nargs="*", type=pathlib.Path, help="list of RST pages to render")
args = parser.parse_args()
if not args.build_dir.is_dir():
sys.exit(f"Error: '{args.build_dir}' is not a valid build directory")
builder = Builder(args.pages, args.build_dir)
builder.run()
if __name__ == "__main__":
main()
|
{
"content_hash": "3e37505143577b85e3c311ed452c8315",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 98,
"avg_line_length": 27.24137931034483,
"alnum_prop": 0.6569620253164556,
"repo_name": "napnac/napnac.fr",
"id": "3a723d344bba59e4997c82bcaef577df2a79fef9",
"size": "814",
"binary": false,
"copies": "5",
"ref": "refs/heads/main",
"path": "builder/main.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "37414"
},
{
"name": "C++",
"bytes": "7569"
},
{
"name": "CSS",
"bytes": "6555"
},
{
"name": "HTML",
"bytes": "660773"
},
{
"name": "JavaScript",
"bytes": "414"
},
{
"name": "Makefile",
"bytes": "419"
},
{
"name": "Matlab",
"bytes": "493"
},
{
"name": "Python",
"bytes": "10528"
},
{
"name": "Shell",
"bytes": "868"
},
{
"name": "TeX",
"bytes": "149285"
}
],
"symlink_target": ""
}
|
from common.methods import set_progress
from azure.common.credentials import ServicePrincipalCredentials
from resourcehandlers.azure_arm.models import AzureARMHandler
from azure.mgmt.sql import SqlManagementClient
from msrestazure.azure_exceptions import CloudError
import azure.mgmt.resource.resources as resources
RESOURCE_IDENTIFIER = 'azure_server_name'
def get_tenant_id_for_azure(handler):
'''
Handling Azure RH table changes for older and newer versions (> 9.4.5)
'''
if hasattr(handler,"azure_tenant_id"):
return handler.azure_tenant_id
return handler.tenant_id
def discover_resources(**kwargs):
discovered_azure_sql = []
for handler in AzureARMHandler.objects.all():
set_progress('Connecting to Azure sql \
DB for handler: {}'.format(handler))
credentials = ServicePrincipalCredentials(
client_id=handler.client_id,
secret=handler.secret,
tenant=get_tenant_id_for_azure(handler)
)
azure_client = SqlManagementClient(credentials, handler.serviceaccount)
azure_resources_client = resources.ResourceManagementClient(credentials, handler.serviceaccount)
for resource_group in azure_resources_client.resource_groups.list():
try:
for server in azure_client.servers.list_by_resource_group(resource_group.name)._get_next().json()['value']:
discovered_azure_sql.append(
{
'name': server['name'],
'azure_server_name': server['name'],
'resource_group_name': resource_group.name,
'azure_rh_id': handler.id
}
)
except CloudError:
continue
return discovered_azure_sql
|
{
"content_hash": "c9ae4fb199c7efd9dd0993aa8c56f740",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 123,
"avg_line_length": 37.84,
"alnum_prop": 0.6178646934460887,
"repo_name": "CloudBoltSoftware/cloudbolt-forge",
"id": "7b4ede8ba4b615b3f49b57dc96816dcce4965c01",
"size": "1892",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blueprints/azure_sql_server/sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1665"
},
{
"name": "HTML",
"bytes": "165828"
},
{
"name": "JavaScript",
"bytes": "1871"
},
{
"name": "PowerShell",
"bytes": "5779"
},
{
"name": "Python",
"bytes": "1742154"
},
{
"name": "Shell",
"bytes": "16836"
}
],
"symlink_target": ""
}
|
'''
It's a very simple example of a "text-based game",
where you can go to one room or another.
It uses classes, inheritance and composition.
Of course, it can be improved or extended in the future.
'''
class Game(object):
def __init__(self):
self.kitchen = Kitchen()
self.living_room = LivingRoom()
def start(self):
print "starting game..."
action = raw_input("choose room: ")
if action == "kitchen":
self.kitchen.enter()
elif action == "living_room":
self.living_room.enter()
else:
print "I don't know that place"
class Room(object):
def enter(self):
pass
def leave(self):
print "leaving room"
class Kitchen(Room):
def enter(self):
print "entering Kitchen..."
class LivingRoom(Room):
def enter(self):
print "entering Living Room..."
game = Game()
game.start()
|
{
"content_hash": "9218f1c9432026de7b09e37f8a150fc2",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 56,
"avg_line_length": 21.615384615384617,
"alnum_prop": 0.6476868327402135,
"repo_name": "pwittchen/learn-python-the-hard-way",
"id": "1a276d0c4c81301f2dcbff6ddaeb7404767c75e5",
"size": "875",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "exercises/exercise45.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "996"
},
{
"name": "Python",
"bytes": "62483"
},
{
"name": "Shell",
"bytes": "144"
}
],
"symlink_target": ""
}
|
from tempest.api.identity import base
from tempest.common.utils import data_utils
from tempest.test import attr
class PoliciesTestJSON(base.BaseIdentityAdminTest):
_interface = 'json'
def _delete_policy(self, policy_id):
resp, _ = self.policy_client.delete_policy(policy_id)
self.assertEqual(204, resp.status)
@attr(type='smoke')
def test_list_policies(self):
# Test to list policies
policy_ids = list()
fetched_ids = list()
for _ in range(3):
blob = data_utils.rand_name('BlobName-')
policy_type = data_utils.rand_name('PolicyType-')
resp, policy = self.policy_client.create_policy(blob,
policy_type)
# Delete the Policy at the end of this method
self.addCleanup(self._delete_policy, policy['id'])
policy_ids.append(policy['id'])
# List and Verify Policies
resp, body = self.policy_client.list_policies()
self.assertEqual(resp['status'], '200')
for p in body:
fetched_ids.append(p['id'])
missing_pols = [p for p in policy_ids if p not in fetched_ids]
self.assertEqual(0, len(missing_pols))
@attr(type='smoke')
def test_create_update_delete_policy(self):
# Test to update policy
blob = data_utils.rand_name('BlobName-')
policy_type = data_utils.rand_name('PolicyType-')
resp, policy = self.policy_client.create_policy(blob, policy_type)
self.addCleanup(self._delete_policy, policy['id'])
self.assertIn('id', policy)
self.assertIn('type', policy)
self.assertIn('blob', policy)
self.assertIsNotNone(policy['id'])
self.assertEqual(blob, policy['blob'])
self.assertEqual(policy_type, policy['type'])
resp, fetched_policy = self.policy_client.get_policy(policy['id'])
self.assertEqual(resp['status'], '200')
# Update policy
update_type = data_utils.rand_name('UpdatedPolicyType-')
resp, data = self.policy_client.update_policy(
policy['id'], type=update_type)
self.assertIn('type', data)
# Assertion for updated value with fetched value
resp, fetched_policy = self.policy_client.get_policy(policy['id'])
self.assertIn('id', fetched_policy)
self.assertIn('blob', fetched_policy)
self.assertIn('type', fetched_policy)
self.assertEqual(fetched_policy['id'], policy['id'])
self.assertEqual(fetched_policy['blob'], policy['blob'])
self.assertEqual(update_type, fetched_policy['type'])
class PoliciesTestXML(PoliciesTestJSON):
_interface = 'xml'
|
{
"content_hash": "0df401955b038a9ae48090c089a4c281",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 74,
"avg_line_length": 41.87692307692308,
"alnum_prop": 0.6160911094783248,
"repo_name": "eltonkevani/tempest_el_env",
"id": "48f8fcd6af9730befcbf7e6dc1d6d5e9a117b2e6",
"size": "3403",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tempest/api/identity/admin/v3/test_policies.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1871339"
},
{
"name": "Shell",
"bytes": "5748"
}
],
"symlink_target": ""
}
|
""" ManagedInstanceGroup: describes a managed instance group
"""
from googleapiclient.http import HttpError
from vm_network_migration.modules.instance_group_modules.instance_group import InstanceGroup
class ManagedInstanceGroup(InstanceGroup):
def __init__(self, compute, project, instance_group_name, network_name,
subnetwork_name, preserve_instance_ip):
""" Initialization
Args:
compute: google compute engine
project: project ID
instance_group_name: name of the instance group
network_name: target network
subnetwork_name: target subnet
preserve_instance_ip: (only valid for unmanaged instance group) whether
to preserve instances external IPs
"""
super(ManagedInstanceGroup, self).__init__(compute, project,
instance_group_name,
network_name,
subnetwork_name,
preserve_instance_ip)
self.instance_group_manager_api = None
self.autoscaler_api = None
self.operation = None
# self.zone_or_region is the region name for a RegionManagedInstanceGroup, and
# is the zone name for a SingleZoneManagedInstanceGroup
self.zone_or_region = None
self.original_instance_group_configs = None
self.new_instance_group_configs = None
self.is_multi_zone = False
self.autoscaler = None
self.autoscaler_configs = None
self.selfLink = None
def get_instance_group_configs(self) -> dict:
""" Get the configs of the instance group
Returns: configs
"""
args = {
'project': self.project,
'instanceGroupManager': self.instance_group_name
}
self.add_zone_or_region_into_args(args)
return self.instance_group_manager_api.get(**args).execute()
def create_instance_group(self, configs) -> dict:
""" Create an instance group
Args:
configs: instance group's configs
Returns: a deserialized object of the response
"""
args = {
'project': self.project,
'body': configs
}
self.add_zone_or_region_into_args(args)
create_instance_group_operation = self.instance_group_manager_api.insert(
**args).execute()
if self.is_multi_zone:
self.operation.wait_for_region_operation(
create_instance_group_operation['name'])
else:
self.operation.wait_for_zone_operation(
create_instance_group_operation['name'])
# If an autoscaler serves the original instance group,
# it should be recreated
if self.autoscaler != None and not self.autoscaler_exists():
self.insert_autoscaler()
return create_instance_group_operation
def delete_instance_group(self) -> dict:
""" Delete an instance group
Returns: a deserialized object of the response
"""
if self.autoscaler != None and self.autoscaler_exists():
self.delete_autoscaler()
args = {
'project': self.project,
'instanceGroupManager': self.instance_group_name
}
self.add_zone_or_region_into_args(args)
delete_instance_group_operation = self.instance_group_manager_api.delete(
**args).execute()
if self.is_multi_zone:
self.operation.wait_for_region_operation(
delete_instance_group_operation['name'])
else:
self.operation.wait_for_zone_operation(
delete_instance_group_operation['name'])
return delete_instance_group_operation
def retrieve_instance_template_name(self, instance_group_configs) -> str:
""" Get the name of the instance template which is used by
the instance group
Args:
instance_group_configs: configs of the instance group
Returns: name of the instance template
"""
instance_template_link = instance_group_configs['instanceTemplate']
return instance_template_link.split('/')[-1]
def modify_instance_group_configs_with_instance_template(self,
instance_group_configs,
instance_template_link) -> dict:
""" Modify the instance group with the new instance template link
Args:
instance_group_configs: configs of the instance group
instance_template_link: instance template link
Returns: modified configs of the instance group
"""
instance_group_configs['instanceTemplate'] = instance_template_link
instance_group_configs['versions'][0][
'instanceTemplate'] = instance_template_link
return instance_group_configs
def add_zone_or_region_into_args(self, args):
""" Add the zone/region key into args.
Args:
args: a dictionary object
"""
if self.is_multi_zone:
args['region'] = self.zone_or_region
else:
args['zone'] = self.zone_or_region
def get_autoscaler(self):
""" Get the autoscaler's name which is serving the instance group
Returns: autoscaler's name if there is an autoscaler
"""
if self.original_instance_group_configs == None:
self.original_instance_group_configs = self.get_instance_group_configs()
if 'autoscaler' not in self.original_instance_group_configs['status']:
return None
else:
return \
self.original_instance_group_configs['status'][
'autoscaler'].split(
'/')[-1]
def get_autoscaler_configs(self):
""" Get the configs of the instance group's autoscaler
Returns: configs
"""
if self.autoscaler != None:
args = {
'project': self.project,
'autoscaler': self.autoscaler
}
self.add_zone_or_region_into_args(args)
autoscaler_configs = self.autoscaler_api.get(**args).execute()
return autoscaler_configs
return None
def autoscaler_exists(self) -> bool:
""" Check if the autoscaler exists
Returns: boolean
"""
try:
autoscaler_configs = self.get_autoscaler_configs()
except HttpError:
return False
else:
return autoscaler_configs != None
def delete_autoscaler(self) -> dict:
""" Delete the autoscaler
Returns: a deserialized object of the response
"""
args = {
'project': self.project,
'autoscaler': self.autoscaler
}
self.add_zone_or_region_into_args(args)
delete_autoscaler_operation = self.autoscaler_api.delete(
**args).execute()
if self.is_multi_zone:
self.operation.wait_for_region_operation(
delete_autoscaler_operation['name'])
else:
self.operation.wait_for_zone_operation(
delete_autoscaler_operation['name'])
return delete_autoscaler_operation
def insert_autoscaler(self) -> dict:
"""Create an autoscaler
Returns: a deserialized object of the response
"""
args = {
'project': self.project,
'body': self.autoscaler_configs
}
self.add_zone_or_region_into_args(args)
insert_autoscaler_operation = self.autoscaler_api.insert(
**args).execute()
if self.is_multi_zone:
self.operation.wait_for_region_operation(
insert_autoscaler_operation['name'])
else:
self.operation.wait_for_zone_operation(
insert_autoscaler_operation['name'])
return insert_autoscaler_operation
def set_target_pool(self, target_pool_selfLink):
""" Set the target pool of the managed instance group
Args:
target_pool_selfLink: selfLink of the target pool
Returns: a deserialized Python object of the response
"""
current_target_pools = self.get_target_pools()
current_target_pools.append(target_pool_selfLink)
args = {
'project': self.project,
'instanceGroupManager': self.instance_group_name,
'body': {
'targetPools': current_target_pools
}
}
self.add_zone_or_region_into_args(args)
set_target_pool_operation = self.instance_group_manager_api.setTargetPools(
**args).execute()
if self.is_multi_zone:
self.operation.wait_for_region_operation(
set_target_pool_operation['name'])
else:
self.operation.wait_for_zone_operation(
set_target_pool_operation['name'])
return set_target_pool_operation
def remove_target_pool(self, target_pool_selfLink):
""" Remove the target pool of the managed instance group
Args:
target_pool_selfLink: selfLink of the target pool
Returns: a deserialized Python object of the response
"""
current_target_pools = self.get_target_pools()
current_target_pools.remove(target_pool_selfLink)
args = {
'project': self.project,
'instanceGroupManager': self.instance_group_name,
'body': {
'targetPools': current_target_pools
}
}
self.add_zone_or_region_into_args(args)
remove_target_pool_operation = self.instance_group_manager_api.setTargetPools(
**args).execute()
if self.is_multi_zone:
self.operation.wait_for_region_operation(
remove_target_pool_operation['name'])
else:
self.operation.wait_for_zone_operation(
remove_target_pool_operation['name'])
return remove_target_pool_operation
def get_target_pools(self) -> list:
"""Get a list of target pools served by the instance group
Returns: a list of target pools' selfLink
"""
configs = self.get_instance_group_configs()
if 'targetPools' not in configs:
return []
return configs['targetPools']
def list_instances(self) -> list:
""" List managed instances' selfLinks
Returns: a list of instances' selfLinks
"""
instance_selfLinks = []
args = {
'project': self.project,
'instanceGroupManager': self.instance_group_name,
}
self.add_zone_or_region_into_args(args)
list_instances_operation = self.instance_group_manager_api.listManagedInstances(
**args).execute()
for item in list_instances_operation['managedInstances']:
instance_selfLinks.append(item['instance'])
return instance_selfLinks
|
{
"content_hash": "d2ffb50aaf580a99b3e6a5a570784184",
"timestamp": "",
"source": "github",
"line_count": 322,
"max_line_length": 93,
"avg_line_length": 34.97826086956522,
"alnum_prop": 0.5816389949391814,
"repo_name": "googleinterns/vm-network-migration",
"id": "c6f092427a9f447936d71e90201aaae11a453172",
"size": "11838",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vm_network_migration/modules/instance_group_modules/managed_instance_group.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "502858"
}
],
"symlink_target": ""
}
|
import sys
import os
from django.conf import settings
from s3upload import __version__
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
settings.configure()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-storages-s3upload'
copyright = u'2014, Matt Austin'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if on_rtd:
html_theme = 'default'
else:
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-storages-s3uploaddoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'django-storages-s3upload.tex', u'django-storages-s3upload Documentation',
u'Matt Austin', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-storages-s3upload', u'django-storages-s3upload Documentation',
[u'Matt Austin'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-storages-s3upload', u'django-storages-s3upload Documentation',
u'Matt Austin', 'django-storages-s3upload', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
{
"content_hash": "a0df0fc2d587045db97a4354ba2b6567",
"timestamp": "",
"source": "github",
"line_count": 257,
"max_line_length": 86,
"avg_line_length": 31.51750972762646,
"alnum_prop": 0.7075308641975309,
"repo_name": "mattaustin/django-storages-s3upload",
"id": "d1ddf85b075f1e0ffc3083ee590c8f9f64a1c9d6",
"size": "8537",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "11414"
},
{
"name": "JavaScript",
"bytes": "63618"
},
{
"name": "Python",
"bytes": "33853"
},
{
"name": "Shell",
"bytes": "6737"
}
],
"symlink_target": ""
}
|
import requests
from urllib.parse import urljoin
from .base import BaseBankIdClient
class OschadBankId(BaseBankIdClient):
"""
Oschadbank-specific BankID client.
"""
default_authorization_base_url = 'https://id.bank.gov.ua/v1/bank/oauth2/authorize'
default_api_base_url = 'https://id.bank.gov.ua/v1/'
token_endpoint = 'bank/oauth2/token'
def user_info(self, token, declaration):
headers = {
'Authorization': 'Bearer {}'.format(token.access_token),
'Accept': 'application/json'
}
response = requests.post(urljoin(self.api_base_url, 'bank/resource/client'),
json=declaration,
headers=headers)
if response.status_code == requests.codes.ok:
return response.json()
else:
self._handle_errors(response)
|
{
"content_hash": "960c4212f05b42fa7daf608d9128a919",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 86,
"avg_line_length": 31.535714285714285,
"alnum_prop": 0.6013590033975085,
"repo_name": "badparking/badparking.in.ua",
"id": "646df2fd193de58f1ec6ebfb0f72ee240b814728",
"size": "883",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "badparking/profiles/bankid/oschadbank.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "99868"
}
],
"symlink_target": ""
}
|
import numpy as np
def permutate(data,labels):
# permutate the data
indices = np.random.permutation(data.index)
data = data.reindex(indices)
labels = labels.reindex(indices)
return (data,labels)
def split_test_train(data,labels,percent_train):
splitpoint = int(data.index.size * percent_train)
trainData = data[0:splitpoint]
testData = data[splitpoint + 1:]
trainLabels = labels[0:splitpoint]
testLabels = labels[splitpoint + 1:]
return (trainData,trainLabels,testData,testLabels)
def labelMatrixToArray(labelMatrix, threshold):
labels = []
exclude = []
for row in labelMatrix.index:
r = labelMatrix.loc[row,:]
lblInfo = r[r > threshold]
lbl = 0
# TODO: for training, it would be better
# to remove the ones where 0 is more than 50 and label is less than 15
if lblInfo.size > 0:
lbl = lblInfo.index[0]
labels.append(lbl)
else:
exclude.append(row)
return (labels, exclude)
def normalizeZeroClass(labels, data):
counts = labels.groupby(0).size()
max = counts[1:].max()
zeroIndex = labels[labels[0] == 0.0].index
selectedIndex = np.random.choice(zeroIndex, size=max, replace=False)
removalIndex = zeroIndex.drop(selectedIndex)
labelDF = labels.drop(removalIndex)
trainData = data.drop(removalIndex)
return (labelDF, trainData, removalIndex)
def normalizeZeroClassArray(labels_arr, data):
lbls = np.array(labels_arr)
zeroIndex = np.where(lbls == 0.0)[0]#lbls[lbls == 0.0].index
equalizer = zeroIndex.size-(len(labels_arr)-zeroIndex.size)
removalIndex = np.random.choice(zeroIndex, size=equalizer, replace=False)
for index in sorted(removalIndex, reverse=True):
del labels_arr[index]
data = data.drop(data.index[removalIndex])
return (labels_arr, data, removalIndex)
|
{
"content_hash": "3384199dc3f1edde18c838756792ee2c",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 78,
"avg_line_length": 36.26923076923077,
"alnum_prop": 0.6712619300106044,
"repo_name": "joergsimon/gesture-analysis",
"id": "75b7d151c574e7a79ae6abd066f9fe8a89f286f1",
"size": "1886",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "analysis/preparation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "18735"
},
{
"name": "Python",
"bytes": "103012"
}
],
"symlink_target": ""
}
|
from flavorsync.openstack.openstack_manager import OpenStackManager
from flavorsync.database_manager import DatabaseManager
from flavorsync.model import FlavorCollection, Flavor, InfrastructureCollection
from flavorsync.exceptions import FlavorNotFoundExceptionError,\
PromotedNotPublicFlavorBadRequestError, UnpublishUnpromotedFlavorError
from uuid import uuid4
from flask import session
class FlavorSynchronizer():
def __init__(self):
self.database_manager = DatabaseManager()
##############
#Management of the completed lifecycle of the flavors, including private, public and promoted flavors
#That should be only possible if all the management is done using this API, if the different regions don't use it,
#it will be complicated to maintain the coherence.
#Hence, this part is not finished and tested, since we have not centralized this management.
#########
def register_infrastructure(self, infrastructure):
self.database_manager.register_infrastructure(infrastructure)
def unregister_infrastructure(self, name):
self.database_manager.unregister_infrastructure(name)
def get_flavors(self, public, promoted):
flavors = self._get_public_flavors(public, promoted)
flavor_collection = FlavorCollection(flavors)
if not public and not promoted:
private_flavors = self._get_private_flavors()
self._remove_duplicated_flavors(flavors, private_flavors)
flavor_collection.extend(private_flavors)
return flavor_collection
def get_flavors_region(self, region):
private_flavors = self._get_private_flavors()
return flavor_collection
def create_flavor(self, flavor):
f = self._create_private_flavor(flavor)
return f
def get_flavor(self, flavor_id):
try:
flavor = self._get_public_flavor(flavor_id)
except FlavorNotFoundExceptionError:
flavor = self._get_private_flavor(flavor_id)
return flavor
def publish_or_promote_flavor(self, flavor_id, modified_flavor):
self._check_well_formed_publishing_conditions(modified_flavor)
current_flavor = self.get_flavor(flavor_id)
self._check_publishing_conditions(current_flavor, modified_flavor)
self._publish_or_promote_flavor(current_flavor, modified_flavor)
return current_flavor
def add_node_to_flavor(self, flavor_id, modified_flavor):
current_flavor = self.get_flavor(flavor_id)
self._create_private_flavor(current_flavor)
self.database_manager.add_node_to_flavor(current_flavor,
modified_flavor.nodes)
return current_flavor
def delete_flavor(self, flavor_id):
flavor = self.get_flavor(flavor_id)
self._delete_private_flavor(flavor.id)
if flavor.public:
self._delete_node_in_flavor(flavor)
def is_node_included(self, region):
return self._exist_node(region)
def get_nodes(self):
return self._get_nodes()
#########
#########
##############
#Management of the promoted flavors, which are not related with the private flavors.
#This part has been integrated with the DashFI.
#########
def get_public_flavor(self, flavor_id):
try:
flavor = self._get_public_flavor(flavor_id)
except FlavorNotFoundExceptionError:
flavor = None
return flavor
def create_promoted_flavor(self, flavor):
#this method only manage the promoted flavors. Hence, it is responsible to force the promoted attribute.
flavor.promoted = True
flavor.public = True
if not flavor.id:
flavor.id = str(uuid4())
print (flavor)
print (flavor.id)
f = self._publish_flavor(flavor)
return f
def delete_promoted_flavor(self, flavor_id):
#If the promoted flavor has nodes associated an error raised, since it is not allowed to remove promote flavors with nodes.
#If you only manage promoted flavors, it shouldn't happen,
#nevertheless if you are managing the complete life cycle, the flavor cannot be deleted if it has nodes associated.
self.database_manager.delete_flavor(flavor_id)
#########
#########
#########
#Manage only the private flavors for the different regions.
#This part has been integrated with the DashFI.
#########
def get_region_flavors(self, region):
private_flavors = self._get_private_region_flavors(region)
return private_flavors
def get_region_flavor(self, region, flavor_id):
try:
flavor = self._get_private_region_flavor(region, flavor_id)
except FlavorNotFoundExceptionError:
flavor = None
return flavor
def create_region_flavor(self, region, flavor):
f = self._create_private_region_flavor(region, flavor)
return f
def delete_region_flavor(self, region, flavor_id):
self._delete_private_region_flavor(region, flavor_id)
########
########
########
##Common and private methods
########
def _get_public_flavors(self, public, promoted):
return self.database_manager.get_flavors(public, promoted)
def _exist_node(self, region):
infrastructure = self.database_manager.get_infrastructure(region)
if infrastructure is None:
return False
else:
return True
def _get_nodes(self):
infrastructures = self.database_manager.get_infrastructures()
infrastructure_collection = InfrastructureCollection(infrastructures)
return infrastructure_collection
#with token of KeyStone
def _get_private_region_flavors(self, region):
token = session['token_keystone']
infrastructure = self.database_manager.get_infrastructure(region)
openstack_manager = OpenStackManager(region, token)
openstack_flavors = openstack_manager.get_flavors()
#TODO review how to manage the infrastructure to be aligned with the data base definition.
return FlavorCollection.from_openstack_flavor_list(openstack_flavors, infrastructure)
def _get_private_region_flavor(self, region, flavor_id):
token = session['token_keystone']
infrastructure = self.database_manager.get_infrastructure(region)
openstack_manager = OpenStackManager(region, token)
openstack_flavor = openstack_manager.get_flavor(flavor_id)
return Flavor.from_openstack_flavor(openstack_flavor, infrastructure)
def _create_private_region_flavor(self, region, flavor):
token = session['token_keystone']
infrastructure = self.database_manager.get_infrastructure(region)
openstack_manager = OpenStackManager(region, token)
created_flavor = openstack_manager.create_flavor(flavor)
return Flavor.from_openstack_flavor(created_flavor, infrastructure)
def _delete_private_region_flavor(self, region, flavor_id):
token = session['token_keystone']
infrastructure = self.database_manager.get_infrastructure(region)
openstack_manager = OpenStackManager(region, token)
openstack_manager.delete_flavor(flavor_id)
#without Keystone token
def _get_private_flavors(self):
infrastructure = self.database_manager.get_infrastructure('Mordor')
openstack_manager = OpenStackManager(infrastructure)
openstack_flavors = openstack_manager.get_flavors()
return FlavorCollection.from_openstack_flavor_list(openstack_flavors, infrastructure)
def _create_private_flavor(self, flavor):
infrastructure = self.database_manager.get_infrastructure('Mordor')
openstack_manager = OpenStackManager(infrastructure)
created_flavor = openstack_manager.create_flavor(flavor)
return Flavor.from_openstack_flavor(created_flavor, infrastructure)
def _publish_flavor(self, flavor):
f = self.database_manager.create_flavor(flavor)
return f
def _get_private_flavor(self, flavor_id):
infrastructure = self.database_manager.get_infrastructure('Mordor')
openstack_manager = OpenStackManager(infrastructure)
openstack_flavor = openstack_manager.get_flavor(flavor_id)
return Flavor.from_openstack_flavor(openstack_flavor, infrastructure)
def _get_public_flavor(self, flavor_id):
return self.database_manager.get_flavor(flavor_id)
def _delete_private_flavor(self, flavor_id):
infrastructure = self.database_manager.get_infrastructure('Mordor')
openstack_manager = OpenStackManager(infrastructure)
openstack_manager.delete_flavor(flavor_id)
def _delete_node_in_flavor(self, flavor):
infrastructure = self.database_manager.get_infrastructure('Mordor')
self.database_manager.delete_node_in_flavor(flavor, infrastructure)
def _check_well_formed_publishing_conditions(self, flavor):
if flavor.promoted and flavor.public is not None and not flavor.public:
raise PromotedNotPublicFlavorBadRequestError()
def _check_publishing_conditions(self, current_flavor, modified_flavor):
if (modified_flavor.public is not None
and current_flavor.public
and not modified_flavor.public
or current_flavor.promoted and not modified_flavor.promoted):
raise UnpublishUnpromotedFlavorError()
def _publish_or_promote_flavor(self, current_flavor, modified_flavor):
if modified_flavor.public:
current_flavor.public = modified_flavor.public
if modified_flavor.promoted is not None:
current_flavor.promoted = modified_flavor.promoted
current_flavor = self._publish_flavor(current_flavor)
elif modified_flavor.promoted:
current_flavor = self.database_manager.promote_flavor(current_flavor)
return current_flavor
def _is_openstack_flavor(self, flavor):
return flavor is None
def _remove_duplicated_flavors(self, public_flavors, private_flavors):
for public_flavor in public_flavors:
for private_flavor in private_flavors.flavors:
if private_flavor.id in public_flavor.id:
private_flavors.flavors.remove(private_flavor)
|
{
"content_hash": "2259cda8e68a807f78b45ffcd5435b64",
"timestamp": "",
"source": "github",
"line_count": 266,
"max_line_length": 131,
"avg_line_length": 39.86466165413534,
"alnum_prop": 0.6675782723500566,
"repo_name": "Atos-FiwareOps/flavor-sync",
"id": "2711e43be235295e13039ed97bf958e72774cc1b",
"size": "10604",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "flavorsync/flavor_synchronizer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "API Blueprint",
"bytes": "36933"
},
{
"name": "Python",
"bytes": "135827"
}
],
"symlink_target": ""
}
|
import os
import subprocess
import tempfile
import zipfile
# Locate this script in the file system.
startDir = os.path.dirname(os.path.abspath(__file__))
fileNum = 1
# Prepare two scratch zip files, one for the input data as-is and another with 256-byte padding.
with tempfile.NamedTemporaryFile(suffix='primary.zip', delete=False, mode='w') as pathToZip:
with zipfile.ZipFile(pathToZip.name, 'w', zipfile.ZIP_DEFLATED) as archive:
# Iterate over every file in this directory and use it to assemble our corpus.
for root, dirs, files in os.walk(startDir):
for file in files:
# Exclude files that won't be useful fuzzer inputs.
if (not file.startswith('.') # Hidden
and not file.endswith('.py') # Python
and not file.endswith('.test') # ES2 conformance script
and not file.endswith('.txt')): # Text
# Prepend a number to each output filename to guarantee uniqueness.
pathInZip = '%d_%s' % (fileNum, file)
fileNum += 1
with open('%s/%s' % (root, file), 'r') as skslFile:
# Read the SkSL text as input.
inputSkSL = skslFile.read()
# Copy the SkSL into our zip archive.
archive.writestr(pathInZip, inputSkSL)
try:
# Upload our zip file to cloud storage.
output = subprocess.check_output(
['gsutil', 'cp', pathToZip.name,
'gs://skia-fuzzer/oss-fuzz/sksl_seed_corpus.zip'],
stderr=subprocess.STDOUT)
# Make the uploaded file world-readable.
output = subprocess.check_output(
['gsutil', 'acl', 'ch', '-u', 'AllUsers:R',
'gs://skia-fuzzer/oss-fuzz/sksl_seed_corpus.zip'],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
# Report the error.
print("### Unable to upload fuzzer corpus to Google Cloud:")
print(" " + "\n ".join(err.output.splitlines()))
print("\nPlease read the notes at the top of update_fuzzer.py for next steps.\n")
sys.exit(err.returncode)
|
{
"content_hash": "703ffdbddc90d1500308e6e70f54ef22",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 96,
"avg_line_length": 47.333333333333336,
"alnum_prop": 0.5735035211267606,
"repo_name": "aosp-mirror/platform_external_skia",
"id": "5b641468b9cb92249cd7f042a110329fe1ec29a0",
"size": "2739",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "resources/sksl/update_fuzzer.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "12716940"
},
{
"name": "Batchfile",
"bytes": "904"
},
{
"name": "C",
"bytes": "620774"
},
{
"name": "C#",
"bytes": "4683"
},
{
"name": "C++",
"bytes": "27394853"
},
{
"name": "GLSL",
"bytes": "67013"
},
{
"name": "Go",
"bytes": "80137"
},
{
"name": "HTML",
"bytes": "1002516"
},
{
"name": "Java",
"bytes": "32794"
},
{
"name": "JavaScript",
"bytes": "51666"
},
{
"name": "Lex",
"bytes": "4372"
},
{
"name": "Lua",
"bytes": "70974"
},
{
"name": "Makefile",
"bytes": "2295"
},
{
"name": "Objective-C",
"bytes": "35223"
},
{
"name": "Objective-C++",
"bytes": "34410"
},
{
"name": "PHP",
"bytes": "120845"
},
{
"name": "Python",
"bytes": "1002226"
},
{
"name": "Shell",
"bytes": "49974"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import json
import logging
log = logging.getLogger()
log.setLevel(logging.DEBUG)
def handler(event, context):
log.debug("Received event {}".format(json.dumps(event)))
return { "hello" : event['name'] }
|
{
"content_hash": "ff8ccb115793d76d584ec7eb5af4acad",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 60,
"avg_line_length": 22.818181818181817,
"alnum_prop": 0.7051792828685259,
"repo_name": "moee/lambda-hands-on",
"id": "7ff6abd01d0ef73584c8dbea30a24a9b8c3aa00b",
"size": "251",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "voting-app/greeter/handler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1772"
},
{
"name": "HTML",
"bytes": "427"
},
{
"name": "JavaScript",
"bytes": "4782"
},
{
"name": "Python",
"bytes": "5892"
},
{
"name": "Shell",
"bytes": "296"
}
],
"symlink_target": ""
}
|
import unittest
import twitter
from twitter.twitter_utils import (
calc_expected_status_length,
parse_media_file
)
class ApiTest(unittest.TestCase):
def setUp(self):
self.api = twitter.Api(
consumer_key='test',
consumer_secret='test',
access_token_key='test',
access_token_secret='test',
sleep_on_rate_limit=False)
self.base_url = 'https://api.twitter.com/1.1'
def test_parse_media_file_http(self):
data_file, filename, file_size, media_type = parse_media_file(
'https://raw.githubusercontent.com/bear/python-twitter/master/testdata/168NQ.jpg')
self.assertTrue(hasattr(data_file, 'read'))
self.assertEqual(filename, '168NQ.jpg')
self.assertEqual(file_size, 44772)
self.assertEqual(media_type, 'image/jpeg')
def test_parse_media_file_local_file(self):
data_file, filename, file_size, media_type = parse_media_file(
'testdata/168NQ.jpg')
self.assertTrue(hasattr(data_file, 'read'))
self.assertEqual(filename, '168NQ.jpg')
self.assertEqual(file_size, 44772)
self.assertEqual(media_type, 'image/jpeg')
def test_parse_media_file_fileobj(self):
with open('testdata/168NQ.jpg', 'rb') as f:
data_file, filename, file_size, media_type = parse_media_file(f)
self.assertTrue(hasattr(data_file, 'read'))
self.assertEqual(filename, '168NQ.jpg')
self.assertEqual(file_size, 44772)
self.assertEqual(media_type, 'image/jpeg')
def test_utils_error_checking(self):
with open('testdata/168NQ.jpg', 'r') as f:
self.assertRaises(
twitter.TwitterError,
lambda: parse_media_file(f))
with open('testdata/user_timeline.json', 'rb') as f:
self.assertRaises(
twitter.TwitterError,
lambda: parse_media_file(f))
self.assertRaises(
twitter.TwitterError,
lambda: twitter.twitter_utils.enf_type('test', int, 'hi'))
def test_calc_expected_status_length(self):
status = 'hi a tweet there'
len_status = calc_expected_status_length(status)
self.assertEqual(len_status, 16)
def test_calc_expected_status_length_with_url(self):
status = 'hi a tweet there example.com'
len_status = calc_expected_status_length(status)
self.assertEqual(len_status, 40)
def test_calc_expected_status_length_with_url_and_extra_spaces(self):
status = 'hi a tweet there example.com'
len_status = calc_expected_status_length(status)
self.assertEqual(len_status, 63)
|
{
"content_hash": "8ea58fe27c5681096ff9cd04bb3d7746",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 94,
"avg_line_length": 36.986486486486484,
"alnum_prop": 0.6160029229082937,
"repo_name": "sinharrajesh/dbtools",
"id": "b021e3476ede5b9209ca8560c8af06c118b55d99",
"size": "2756",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "twitter-analysis/tests/test_twitter_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6475"
},
{
"name": "HTML",
"bytes": "334382"
},
{
"name": "Makefile",
"bytes": "8260"
},
{
"name": "Python",
"bytes": "534864"
},
{
"name": "Shell",
"bytes": "5668"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Ship()
result.template = "object/ship/shared_droid_fighter_tier2.iff"
result.attribute_template_id = -1
result.stfName("","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "e5180bc97d6e97d3a30ee16bc184e3d9",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 63,
"avg_line_length": 20.846153846153847,
"alnum_prop": 0.6678966789667896,
"repo_name": "obi-two/Rebelion",
"id": "0c086730bada8750cf7390ba00fbbc700c0dc055",
"size": "416",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/ship/shared_droid_fighter_tier2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
import unittest2
class Test__new_value_pb(unittest2.TestCase):
def _callFUT(self, entity_pb, name):
from gcloud.datastore.helpers import _new_value_pb
return _new_value_pb(entity_pb, name)
def test_it(self):
from gcloud.datastore._generated import entity_pb2
entity_pb = entity_pb2.Entity()
name = 'foo'
result = self._callFUT(entity_pb, name)
self.assertTrue(isinstance(result, entity_pb2.Value))
self.assertEqual(len(entity_pb.properties), 1)
self.assertEqual(entity_pb.properties[name], result)
class Test__property_tuples(unittest2.TestCase):
def _callFUT(self, entity_pb):
from gcloud.datastore.helpers import _property_tuples
return _property_tuples(entity_pb)
def test_it(self):
import types
from gcloud.datastore._generated import entity_pb2
from gcloud.datastore.helpers import _new_value_pb
entity_pb = entity_pb2.Entity()
name1 = 'foo'
name2 = 'bar'
val_pb1 = _new_value_pb(entity_pb, name1)
val_pb2 = _new_value_pb(entity_pb, name2)
result = self._callFUT(entity_pb)
self.assertTrue(isinstance(result, types.GeneratorType))
self.assertEqual(sorted(result),
sorted([(name1, val_pb1), (name2, val_pb2)]))
class Test_entity_from_protobuf(unittest2.TestCase):
def _callFUT(self, val):
from gcloud.datastore.helpers import entity_from_protobuf
return entity_from_protobuf(val)
def test_it(self):
from gcloud.datastore._generated import entity_pb2
from gcloud.datastore.helpers import _new_value_pb
_PROJECT = 'PROJECT'
_KIND = 'KIND'
_ID = 1234
entity_pb = entity_pb2.Entity()
entity_pb.key.partition_id.project_id = _PROJECT
entity_pb.key.path.add(kind=_KIND, id=_ID)
value_pb = _new_value_pb(entity_pb, 'foo')
value_pb.string_value = 'Foo'
unindexed_val_pb = _new_value_pb(entity_pb, 'bar')
unindexed_val_pb.integer_value = 10
unindexed_val_pb.exclude_from_indexes = True
array_val_pb1 = _new_value_pb(entity_pb, 'baz')
array_pb1 = array_val_pb1.array_value.values
unindexed_array_val_pb = array_pb1.add()
unindexed_array_val_pb.integer_value = 11
unindexed_array_val_pb.exclude_from_indexes = True
array_val_pb2 = _new_value_pb(entity_pb, 'qux')
array_pb2 = array_val_pb2.array_value.values
indexed_array_val_pb = array_pb2.add()
indexed_array_val_pb.integer_value = 12
entity = self._callFUT(entity_pb)
self.assertEqual(entity.kind, _KIND)
self.assertEqual(entity.exclude_from_indexes,
frozenset(['bar', 'baz']))
entity_props = dict(entity)
self.assertEqual(entity_props,
{'foo': 'Foo', 'bar': 10, 'baz': [11], 'qux': [12]})
# Also check the key.
key = entity.key
self.assertEqual(key.project, _PROJECT)
self.assertEqual(key.namespace, None)
self.assertEqual(key.kind, _KIND)
self.assertEqual(key.id, _ID)
def test_mismatched_value_indexed(self):
from gcloud.datastore._generated import entity_pb2
from gcloud.datastore.helpers import _new_value_pb
_PROJECT = 'PROJECT'
_KIND = 'KIND'
_ID = 1234
entity_pb = entity_pb2.Entity()
entity_pb.key.partition_id.project_id = _PROJECT
entity_pb.key.path.add(kind=_KIND, id=_ID)
array_val_pb = _new_value_pb(entity_pb, 'baz')
array_pb = array_val_pb.array_value.values
unindexed_value_pb1 = array_pb.add()
unindexed_value_pb1.integer_value = 10
unindexed_value_pb1.exclude_from_indexes = True
unindexed_value_pb2 = array_pb.add()
unindexed_value_pb2.integer_value = 11
with self.assertRaises(ValueError):
self._callFUT(entity_pb)
def test_entity_no_key(self):
from gcloud.datastore._generated import entity_pb2
entity_pb = entity_pb2.Entity()
entity = self._callFUT(entity_pb)
self.assertEqual(entity.key, None)
self.assertEqual(dict(entity), {})
def test_entity_with_meaning(self):
from gcloud.datastore._generated import entity_pb2
from gcloud.datastore.helpers import _new_value_pb
entity_pb = entity_pb2.Entity()
name = 'hello'
value_pb = _new_value_pb(entity_pb, name)
value_pb.meaning = meaning = 9
value_pb.string_value = val = u'something'
entity = self._callFUT(entity_pb)
self.assertEqual(entity.key, None)
self.assertEqual(dict(entity), {name: val})
self.assertEqual(entity._meanings, {name: (meaning, val)})
def test_nested_entity_no_key(self):
from gcloud.datastore._generated import entity_pb2
from gcloud.datastore.helpers import _new_value_pb
PROJECT = 'FOO'
KIND = 'KIND'
INSIDE_NAME = 'IFOO'
OUTSIDE_NAME = 'OBAR'
INSIDE_VALUE = 1337
entity_inside = entity_pb2.Entity()
inside_val_pb = _new_value_pb(entity_inside, INSIDE_NAME)
inside_val_pb.integer_value = INSIDE_VALUE
entity_pb = entity_pb2.Entity()
entity_pb.key.partition_id.project_id = PROJECT
element = entity_pb.key.path.add()
element.kind = KIND
outside_val_pb = _new_value_pb(entity_pb, OUTSIDE_NAME)
outside_val_pb.entity_value.CopyFrom(entity_inside)
entity = self._callFUT(entity_pb)
self.assertEqual(entity.key.project, PROJECT)
self.assertEqual(entity.key.flat_path, (KIND,))
self.assertEqual(len(entity), 1)
inside_entity = entity[OUTSIDE_NAME]
self.assertEqual(inside_entity.key, None)
self.assertEqual(len(inside_entity), 1)
self.assertEqual(inside_entity[INSIDE_NAME], INSIDE_VALUE)
class Test_entity_to_protobuf(unittest2.TestCase):
def _callFUT(self, entity):
from gcloud.datastore.helpers import entity_to_protobuf
return entity_to_protobuf(entity)
def _compareEntityProto(self, entity_pb1, entity_pb2):
from gcloud.datastore.helpers import _property_tuples
self.assertEqual(entity_pb1.key, entity_pb2.key)
value_list1 = sorted(_property_tuples(entity_pb1))
value_list2 = sorted(_property_tuples(entity_pb2))
self.assertEqual(len(value_list1), len(value_list2))
for pair1, pair2 in zip(value_list1, value_list2):
name1, val1 = pair1
name2, val2 = pair2
self.assertEqual(name1, name2)
if val1.HasField('entity_value'): # Message field (Entity)
self.assertEqual(val1.meaning, val2.meaning)
self._compareEntityProto(val1.entity_value,
val2.entity_value)
else:
self.assertEqual(val1, val2)
def test_empty(self):
from gcloud.datastore._generated import entity_pb2
from gcloud.datastore.entity import Entity
entity = Entity()
entity_pb = self._callFUT(entity)
self._compareEntityProto(entity_pb, entity_pb2.Entity())
def test_key_only(self):
from gcloud.datastore._generated import entity_pb2
from gcloud.datastore.entity import Entity
from gcloud.datastore.key import Key
kind, name = 'PATH', 'NAME'
project = 'PROJECT'
key = Key(kind, name, project=project)
entity = Entity(key=key)
entity_pb = self._callFUT(entity)
expected_pb = entity_pb2.Entity()
expected_pb.key.partition_id.project_id = project
path_elt = expected_pb.key.path.add()
path_elt.kind = kind
path_elt.name = name
self._compareEntityProto(entity_pb, expected_pb)
def test_simple_fields(self):
from gcloud.datastore._generated import entity_pb2
from gcloud.datastore.entity import Entity
from gcloud.datastore.helpers import _new_value_pb
entity = Entity()
name1 = 'foo'
entity[name1] = value1 = 42
name2 = 'bar'
entity[name2] = value2 = u'some-string'
entity_pb = self._callFUT(entity)
expected_pb = entity_pb2.Entity()
val_pb1 = _new_value_pb(expected_pb, name1)
val_pb1.integer_value = value1
val_pb2 = _new_value_pb(expected_pb, name2)
val_pb2.string_value = value2
self._compareEntityProto(entity_pb, expected_pb)
def test_with_empty_list(self):
from gcloud.datastore._generated import entity_pb2
from gcloud.datastore.entity import Entity
entity = Entity()
entity['foo'] = []
entity_pb = self._callFUT(entity)
self._compareEntityProto(entity_pb, entity_pb2.Entity())
def test_inverts_to_protobuf(self):
from gcloud.datastore._generated import entity_pb2
from gcloud.datastore.helpers import _new_value_pb
from gcloud.datastore.helpers import entity_from_protobuf
original_pb = entity_pb2.Entity()
# Add a key.
original_pb.key.partition_id.project_id = project = 'PROJECT'
elem1 = original_pb.key.path.add()
elem1.kind = 'Family'
elem1.id = 1234
elem2 = original_pb.key.path.add()
elem2.kind = 'King'
elem2.name = 'Spades'
# Add an integer property.
val_pb1 = _new_value_pb(original_pb, 'foo')
val_pb1.integer_value = 1337
val_pb1.exclude_from_indexes = True
# Add a string property.
val_pb2 = _new_value_pb(original_pb, 'bar')
val_pb2.string_value = u'hello'
# Add a nested (entity) property.
val_pb3 = _new_value_pb(original_pb, 'entity-baz')
sub_pb = entity_pb2.Entity()
sub_val_pb1 = _new_value_pb(sub_pb, 'x')
sub_val_pb1.double_value = 3.14
sub_val_pb2 = _new_value_pb(sub_pb, 'y')
sub_val_pb2.double_value = 2.718281828
val_pb3.meaning = 9
val_pb3.entity_value.CopyFrom(sub_pb)
# Add a list property.
val_pb4 = _new_value_pb(original_pb, 'list-quux')
array_val1 = val_pb4.array_value.values.add()
array_val1.exclude_from_indexes = False
array_val1.meaning = meaning = 22
array_val1.blob_value = b'\xe2\x98\x83'
array_val2 = val_pb4.array_value.values.add()
array_val2.exclude_from_indexes = False
array_val2.meaning = meaning
array_val2.blob_value = b'\xe2\x98\x85'
# Convert to the user-space Entity.
entity = entity_from_protobuf(original_pb)
# Convert the user-space Entity back to a protobuf.
new_pb = self._callFUT(entity)
# NOTE: entity_to_protobuf() strips the project so we "cheat".
new_pb.key.partition_id.project_id = project
self._compareEntityProto(original_pb, new_pb)
def test_meaning_with_change(self):
from gcloud.datastore._generated import entity_pb2
from gcloud.datastore.entity import Entity
from gcloud.datastore.helpers import _new_value_pb
entity = Entity()
name = 'foo'
entity[name] = value = 42
entity._meanings[name] = (9, 1337)
entity_pb = self._callFUT(entity)
expected_pb = entity_pb2.Entity()
value_pb = _new_value_pb(expected_pb, name)
value_pb.integer_value = value
# NOTE: No meaning is used since the value differs from the
# value stored.
self._compareEntityProto(entity_pb, expected_pb)
def test_variable_meanings(self):
from gcloud.datastore._generated import entity_pb2
from gcloud.datastore.entity import Entity
from gcloud.datastore.helpers import _new_value_pb
entity = Entity()
name = 'quux'
entity[name] = values = [1, 20, 300]
meaning = 9
entity._meanings[name] = ([None, meaning, None], values)
entity_pb = self._callFUT(entity)
# Construct the expected protobuf.
expected_pb = entity_pb2.Entity()
value_pb = _new_value_pb(expected_pb, name)
value0 = value_pb.array_value.values.add()
value0.integer_value = values[0]
# The only array entry with a meaning is the middle one.
value1 = value_pb.array_value.values.add()
value1.integer_value = values[1]
value1.meaning = meaning
value2 = value_pb.array_value.values.add()
value2.integer_value = values[2]
self._compareEntityProto(entity_pb, expected_pb)
class Test_key_from_protobuf(unittest2.TestCase):
def _callFUT(self, val):
from gcloud.datastore.helpers import key_from_protobuf
return key_from_protobuf(val)
def _makePB(self, project=None, namespace=None, path=()):
from gcloud.datastore._generated import entity_pb2
pb = entity_pb2.Key()
if project is not None:
pb.partition_id.project_id = project
if namespace is not None:
pb.partition_id.namespace_id = namespace
for elem in path:
added = pb.path.add()
added.kind = elem['kind']
if 'id' in elem:
added.id = elem['id']
if 'name' in elem:
added.name = elem['name']
return pb
def test_wo_namespace_in_pb(self):
_PROJECT = 'PROJECT'
pb = self._makePB(path=[{'kind': 'KIND'}], project=_PROJECT)
key = self._callFUT(pb)
self.assertEqual(key.project, _PROJECT)
self.assertEqual(key.namespace, None)
def test_w_namespace_in_pb(self):
_PROJECT = 'PROJECT'
_NAMESPACE = 'NAMESPACE'
pb = self._makePB(path=[{'kind': 'KIND'}], namespace=_NAMESPACE,
project=_PROJECT)
key = self._callFUT(pb)
self.assertEqual(key.project, _PROJECT)
self.assertEqual(key.namespace, _NAMESPACE)
def test_w_nested_path_in_pb(self):
_PATH = [
{'kind': 'PARENT', 'name': 'NAME'},
{'kind': 'CHILD', 'id': 1234},
{'kind': 'GRANDCHILD', 'id': 5678},
]
pb = self._makePB(path=_PATH, project='PROJECT')
key = self._callFUT(pb)
self.assertEqual(key.path, _PATH)
def test_w_nothing_in_pb(self):
pb = self._makePB()
self.assertRaises(ValueError, self._callFUT, pb)
class Test__pb_attr_value(unittest2.TestCase):
def _callFUT(self, val):
from gcloud.datastore.helpers import _pb_attr_value
return _pb_attr_value(val)
def test_datetime_naive(self):
import calendar
import datetime
from gcloud._helpers import UTC
micros = 4375
naive = datetime.datetime(2014, 9, 16, 10, 19, 32, micros) # No zone.
utc = datetime.datetime(2014, 9, 16, 10, 19, 32, micros, UTC)
name, value = self._callFUT(naive)
self.assertEqual(name, 'timestamp_value')
self.assertEqual(value.seconds, calendar.timegm(utc.timetuple()))
self.assertEqual(value.nanos, 1000 * micros)
def test_datetime_w_zone(self):
import calendar
import datetime
from gcloud._helpers import UTC
micros = 4375
utc = datetime.datetime(2014, 9, 16, 10, 19, 32, micros, UTC)
name, value = self._callFUT(utc)
self.assertEqual(name, 'timestamp_value')
self.assertEqual(value.seconds, calendar.timegm(utc.timetuple()))
self.assertEqual(value.nanos, 1000 * micros)
def test_key(self):
from gcloud.datastore.key import Key
key = Key('PATH', 1234, project='PROJECT')
name, value = self._callFUT(key)
self.assertEqual(name, 'key_value')
self.assertEqual(value, key.to_protobuf())
def test_bool(self):
name, value = self._callFUT(False)
self.assertEqual(name, 'boolean_value')
self.assertEqual(value, False)
def test_float(self):
name, value = self._callFUT(3.1415926)
self.assertEqual(name, 'double_value')
self.assertEqual(value, 3.1415926)
def test_int(self):
name, value = self._callFUT(42)
self.assertEqual(name, 'integer_value')
self.assertEqual(value, 42)
def test_long(self):
must_be_long = (1 << 63) - 1
name, value = self._callFUT(must_be_long)
self.assertEqual(name, 'integer_value')
self.assertEqual(value, must_be_long)
def test_native_str(self):
import six
name, value = self._callFUT('str')
if six.PY2:
self.assertEqual(name, 'blob_value')
else: # pragma: NO COVER Python 3
self.assertEqual(name, 'string_value')
self.assertEqual(value, 'str')
def test_bytes(self):
name, value = self._callFUT(b'bytes')
self.assertEqual(name, 'blob_value')
self.assertEqual(value, b'bytes')
def test_unicode(self):
name, value = self._callFUT(u'str')
self.assertEqual(name, 'string_value')
self.assertEqual(value, u'str')
def test_entity(self):
from gcloud.datastore.entity import Entity
entity = Entity()
name, value = self._callFUT(entity)
self.assertEqual(name, 'entity_value')
self.assertTrue(value is entity)
def test_array(self):
values = ['a', 0, 3.14]
name, value = self._callFUT(values)
self.assertEqual(name, 'array_value')
self.assertTrue(value is values)
def test_geo_point(self):
from google.type import latlng_pb2
from gcloud.datastore.helpers import GeoPoint
lat = 42.42
lng = 99.0007
geo_pt = GeoPoint(latitude=lat, longitude=lng)
geo_pt_pb = latlng_pb2.LatLng(latitude=lat, longitude=lng)
name, value = self._callFUT(geo_pt)
self.assertEqual(name, 'geo_point_value')
self.assertEqual(value, geo_pt_pb)
def test_null(self):
from google.protobuf import struct_pb2
name, value = self._callFUT(None)
self.assertEqual(name, 'null_value')
self.assertEqual(value, struct_pb2.NULL_VALUE)
def test_object(self):
self.assertRaises(ValueError, self._callFUT, object())
class Test__get_value_from_value_pb(unittest2.TestCase):
def _callFUT(self, pb):
from gcloud.datastore.helpers import _get_value_from_value_pb
return _get_value_from_value_pb(pb)
def _makePB(self, attr_name, value):
from gcloud.datastore._generated import entity_pb2
pb = entity_pb2.Value()
setattr(pb, attr_name, value)
return pb
def test_datetime(self):
import calendar
import datetime
from gcloud._helpers import UTC
from gcloud.datastore._generated import entity_pb2
micros = 4375
utc = datetime.datetime(2014, 9, 16, 10, 19, 32, micros, UTC)
pb = entity_pb2.Value()
pb.timestamp_value.seconds = calendar.timegm(utc.timetuple())
pb.timestamp_value.nanos = 1000 * micros
self.assertEqual(self._callFUT(pb), utc)
def test_key(self):
from gcloud.datastore._generated import entity_pb2
from gcloud.datastore.key import Key
pb = entity_pb2.Value()
expected = Key('KIND', 1234, project='PROJECT').to_protobuf()
pb.key_value.CopyFrom(expected)
found = self._callFUT(pb)
self.assertEqual(found.to_protobuf(), expected)
def test_bool(self):
pb = self._makePB('boolean_value', False)
self.assertEqual(self._callFUT(pb), False)
def test_float(self):
pb = self._makePB('double_value', 3.1415926)
self.assertEqual(self._callFUT(pb), 3.1415926)
def test_int(self):
pb = self._makePB('integer_value', 42)
self.assertEqual(self._callFUT(pb), 42)
def test_bytes(self):
pb = self._makePB('blob_value', b'str')
self.assertEqual(self._callFUT(pb), b'str')
def test_unicode(self):
pb = self._makePB('string_value', u'str')
self.assertEqual(self._callFUT(pb), u'str')
def test_entity(self):
from gcloud.datastore._generated import entity_pb2
from gcloud.datastore.entity import Entity
from gcloud.datastore.helpers import _new_value_pb
pb = entity_pb2.Value()
entity_pb = pb.entity_value
entity_pb.key.path.add(kind='KIND')
entity_pb.key.partition_id.project_id = 'PROJECT'
value_pb = _new_value_pb(entity_pb, 'foo')
value_pb.string_value = 'Foo'
entity = self._callFUT(pb)
self.assertTrue(isinstance(entity, Entity))
self.assertEqual(entity['foo'], 'Foo')
def test_array(self):
from gcloud.datastore._generated import entity_pb2
pb = entity_pb2.Value()
array_pb = pb.array_value.values
item_pb = array_pb.add()
item_pb.string_value = 'Foo'
item_pb = array_pb.add()
item_pb.string_value = 'Bar'
items = self._callFUT(pb)
self.assertEqual(items, ['Foo', 'Bar'])
def test_geo_point(self):
from google.type import latlng_pb2
from gcloud.datastore._generated import entity_pb2
from gcloud.datastore.helpers import GeoPoint
lat = -3.14
lng = 13.37
geo_pt_pb = latlng_pb2.LatLng(latitude=lat, longitude=lng)
pb = entity_pb2.Value(geo_point_value=geo_pt_pb)
result = self._callFUT(pb)
self.assertIsInstance(result, GeoPoint)
self.assertEqual(result.latitude, lat)
self.assertEqual(result.longitude, lng)
def test_null(self):
from google.protobuf import struct_pb2
from gcloud.datastore._generated import entity_pb2
pb = entity_pb2.Value(null_value=struct_pb2.NULL_VALUE)
result = self._callFUT(pb)
self.assertIsNone(result)
def test_unknown(self):
from gcloud.datastore._generated import entity_pb2
pb = entity_pb2.Value()
with self.assertRaises(ValueError):
self._callFUT(pb)
class Test_set_protobuf_value(unittest2.TestCase):
def _callFUT(self, value_pb, val):
from gcloud.datastore.helpers import _set_protobuf_value
return _set_protobuf_value(value_pb, val)
def _makePB(self):
from gcloud.datastore._generated import entity_pb2
return entity_pb2.Value()
def test_datetime(self):
import calendar
import datetime
from gcloud._helpers import UTC
pb = self._makePB()
micros = 4375
utc = datetime.datetime(2014, 9, 16, 10, 19, 32, micros, UTC)
self._callFUT(pb, utc)
value = pb.timestamp_value
self.assertEqual(value.seconds, calendar.timegm(utc.timetuple()))
self.assertEqual(value.nanos, 1000 * micros)
def test_key(self):
from gcloud.datastore.key import Key
pb = self._makePB()
key = Key('KIND', 1234, project='PROJECT')
self._callFUT(pb, key)
value = pb.key_value
self.assertEqual(value, key.to_protobuf())
def test_none(self):
pb = self._makePB()
self._callFUT(pb, None)
self.assertEqual(pb.WhichOneof('value_type'), 'null_value')
def test_bool(self):
pb = self._makePB()
self._callFUT(pb, False)
value = pb.boolean_value
self.assertEqual(value, False)
def test_float(self):
pb = self._makePB()
self._callFUT(pb, 3.1415926)
value = pb.double_value
self.assertEqual(value, 3.1415926)
def test_int(self):
pb = self._makePB()
self._callFUT(pb, 42)
value = pb.integer_value
self.assertEqual(value, 42)
def test_long(self):
pb = self._makePB()
must_be_long = (1 << 63) - 1
self._callFUT(pb, must_be_long)
value = pb.integer_value
self.assertEqual(value, must_be_long)
def test_native_str(self):
import six
pb = self._makePB()
self._callFUT(pb, 'str')
if six.PY2:
value = pb.blob_value
else: # pragma: NO COVER Python 3
value = pb.string_value
self.assertEqual(value, 'str')
def test_bytes(self):
pb = self._makePB()
self._callFUT(pb, b'str')
value = pb.blob_value
self.assertEqual(value, b'str')
def test_unicode(self):
pb = self._makePB()
self._callFUT(pb, u'str')
value = pb.string_value
self.assertEqual(value, u'str')
def test_entity_empty_wo_key(self):
from gcloud.datastore.entity import Entity
from gcloud.datastore.helpers import _property_tuples
pb = self._makePB()
entity = Entity()
self._callFUT(pb, entity)
value = pb.entity_value
self.assertEqual(value.key.SerializeToString(), b'')
self.assertEqual(len(list(_property_tuples(value))), 0)
def test_entity_w_key(self):
from gcloud.datastore.entity import Entity
from gcloud.datastore.helpers import _property_tuples
from gcloud.datastore.key import Key
name = 'foo'
value = u'Foo'
pb = self._makePB()
key = Key('KIND', 123, project='PROJECT')
entity = Entity(key=key)
entity[name] = value
self._callFUT(pb, entity)
entity_pb = pb.entity_value
self.assertEqual(entity_pb.key, key.to_protobuf())
prop_dict = dict(_property_tuples(entity_pb))
self.assertEqual(len(prop_dict), 1)
self.assertEqual(list(prop_dict.keys()), [name])
self.assertEqual(prop_dict[name].string_value, value)
def test_array(self):
pb = self._makePB()
values = [u'a', 0, 3.14]
self._callFUT(pb, values)
marshalled = pb.array_value.values
self.assertEqual(len(marshalled), len(values))
self.assertEqual(marshalled[0].string_value, values[0])
self.assertEqual(marshalled[1].integer_value, values[1])
self.assertEqual(marshalled[2].double_value, values[2])
def test_geo_point(self):
from google.type import latlng_pb2
from gcloud.datastore.helpers import GeoPoint
pb = self._makePB()
lat = 9.11
lng = 3.337
geo_pt = GeoPoint(latitude=lat, longitude=lng)
geo_pt_pb = latlng_pb2.LatLng(latitude=lat, longitude=lng)
self._callFUT(pb, geo_pt)
self.assertEqual(pb.geo_point_value, geo_pt_pb)
class Test__get_meaning(unittest2.TestCase):
def _callFUT(self, *args, **kwargs):
from gcloud.datastore.helpers import _get_meaning
return _get_meaning(*args, **kwargs)
def test_no_meaning(self):
from gcloud.datastore._generated import entity_pb2
value_pb = entity_pb2.Value()
result = self._callFUT(value_pb)
self.assertEqual(result, None)
def test_single(self):
from gcloud.datastore._generated import entity_pb2
value_pb = entity_pb2.Value()
value_pb.meaning = meaning = 22
value_pb.string_value = u'hi'
result = self._callFUT(value_pb)
self.assertEqual(meaning, result)
def test_empty_array_value(self):
from gcloud.datastore._generated import entity_pb2
value_pb = entity_pb2.Value()
value_pb.array_value.values.add()
value_pb.array_value.values.pop()
result = self._callFUT(value_pb, is_list=True)
self.assertEqual(None, result)
def test_array_value(self):
from gcloud.datastore._generated import entity_pb2
value_pb = entity_pb2.Value()
meaning = 9
sub_value_pb1 = value_pb.array_value.values.add()
sub_value_pb2 = value_pb.array_value.values.add()
sub_value_pb1.meaning = sub_value_pb2.meaning = meaning
sub_value_pb1.string_value = u'hi'
sub_value_pb2.string_value = u'bye'
result = self._callFUT(value_pb, is_list=True)
self.assertEqual(meaning, result)
def test_array_value_multiple_meanings(self):
from gcloud.datastore._generated import entity_pb2
value_pb = entity_pb2.Value()
meaning1 = 9
meaning2 = 10
sub_value_pb1 = value_pb.array_value.values.add()
sub_value_pb2 = value_pb.array_value.values.add()
sub_value_pb1.meaning = meaning1
sub_value_pb2.meaning = meaning2
sub_value_pb1.string_value = u'hi'
sub_value_pb2.string_value = u'bye'
result = self._callFUT(value_pb, is_list=True)
self.assertEqual(result, [meaning1, meaning2])
def test_array_value_meaning_partially_unset(self):
from gcloud.datastore._generated import entity_pb2
value_pb = entity_pb2.Value()
meaning1 = 9
sub_value_pb1 = value_pb.array_value.values.add()
sub_value_pb2 = value_pb.array_value.values.add()
sub_value_pb1.meaning = meaning1
sub_value_pb1.string_value = u'hi'
sub_value_pb2.string_value = u'bye'
result = self._callFUT(value_pb, is_list=True)
self.assertEqual(result, [meaning1, None])
class TestGeoPoint(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.datastore.helpers import GeoPoint
return GeoPoint
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_constructor(self):
lat = 81.2
lng = 359.9999
geo_pt = self._makeOne(lat, lng)
self.assertEqual(geo_pt.latitude, lat)
self.assertEqual(geo_pt.longitude, lng)
def test_to_protobuf(self):
from google.type import latlng_pb2
lat = 0.0001
lng = 20.03
geo_pt = self._makeOne(lat, lng)
result = geo_pt.to_protobuf()
geo_pt_pb = latlng_pb2.LatLng(latitude=lat, longitude=lng)
self.assertEqual(result, geo_pt_pb)
def test___eq__(self):
lat = 0.0001
lng = 20.03
geo_pt1 = self._makeOne(lat, lng)
geo_pt2 = self._makeOne(lat, lng)
self.assertEqual(geo_pt1, geo_pt2)
def test___eq__type_differ(self):
lat = 0.0001
lng = 20.03
geo_pt1 = self._makeOne(lat, lng)
geo_pt2 = object()
self.assertNotEqual(geo_pt1, geo_pt2)
def test___ne__same_value(self):
lat = 0.0001
lng = 20.03
geo_pt1 = self._makeOne(lat, lng)
geo_pt2 = self._makeOne(lat, lng)
comparison_val = (geo_pt1 != geo_pt2)
self.assertFalse(comparison_val)
def test___ne__(self):
geo_pt1 = self._makeOne(0.0, 1.0)
geo_pt2 = self._makeOne(2.0, 3.0)
self.assertNotEqual(geo_pt1, geo_pt2)
|
{
"content_hash": "00edeceb1ef5f60ef3f5a3e14d774109",
"timestamp": "",
"source": "github",
"line_count": 912,
"max_line_length": 78,
"avg_line_length": 33.7828947368421,
"alnum_prop": 0.6095423563777994,
"repo_name": "waprin/google-cloud-python",
"id": "3cc7e6c6a26c0d1a6f08d39d5cd0ccd01c3498c9",
"size": "31407",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "gcloud/datastore/test_helpers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Protocol Buffer",
"bytes": "158375"
},
{
"name": "Python",
"bytes": "2785380"
},
{
"name": "Shell",
"bytes": "3120"
}
],
"symlink_target": ""
}
|
"""
vSphere Python SDK program for updating ESXi Advanced Settings
Usage:
python update_esxi_advanced_settings.py -s 192.168.1.200 \
-u 'administrator@vsphere.local' \
-p VMware1! -c VSAN-Cluster -k VSAN.ClomRepairDelay -v 120
"""
from pyVmomi import vim, vmodl
from tools import cli, service_instance, pchelper
def main():
"""
Simple command-line program demonstrating how to update
ESXi Advanced Settings
"""
parser = cli.Parser()
parser.add_required_arguments(cli.Argument.CLUSTER_NAME)
parser.add_custom_argument('--key', required=True, action='store',
help='Name of ESXi Advanced Setting to update')
parser.add_custom_argument('--value', required=True, action='store',
help='Value of the ESXi Advanced Setting to update')
args = parser.get_args()
try:
si = service_instance.connect(args)
content = si.RetrieveContent()
cluster = pchelper.get_obj(content, [vim.ClusterComputeResource], args.cluster_name)
hosts = cluster.host
for host in hosts:
option_manager = host.configManager.advancedOption
option = vim.option.OptionValue(key=args.key,
value=int(args.value))
print("Updating %s on ESXi host %s "
"with value of %s" % (args.key, host.name, args.value))
if option_manager.UpdateOptions(changedValue=[option]):
print("Settings updated!")
except vmodl.MethodFault as ex:
print("Caught vmodl fault : " + ex.msg)
return -1
except Exception as ex:
print("Caught exception : " + str(ex))
return -1
return 0
# Start program
if __name__ == "__main__":
main()
|
{
"content_hash": "2346266e4b3dec4210743efbcfb1b397",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 92,
"avg_line_length": 32.07142857142857,
"alnum_prop": 0.6085746102449888,
"repo_name": "vmware/pyvmomi-community-samples",
"id": "3720b61b60aae733dbe73846e1f7754a8770b443",
"size": "1859",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "samples/update_esxi_advanced_settings.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1631"
}
],
"symlink_target": ""
}
|
"""
This tool is Modis' embed API. It allows Modis modules to easily create
fancy looking GUIs in the Discord client.
"""
import logging
import discord
from modis import main
logger = logging.getLogger(__name__)
URL = "https://musicbyango.com/modis/"
ICON = "http://musicbyango.com/modis/dp/modis64t.png"
# TODO Update to ModisWorks url
class UI:
"""Enables easy management of Discord embeds."""
def __init__(self, channel, title, description, modulename="Modis",
colour=0xAAFF00, thumbnail=None, image=None, datapacks=()):
"""Initialise variables and build the embed.
Args:
channel (discord.TextChannel): Channel to lock UI to
title (str): GUI title, in bold
description (str): GUI description
modulename (str): Name of your module, default "Modis"
colour (int): Colour of line on left, default 0xAAFF00
thumbnail (str): URL to picture shown in top right corner, default None
datapacks (list): Contains tuples of (title str, data str, inline bool)
"""
self.channel = channel
self.title = title
self.description = description
self.modulename = modulename
self.colour = colour
self.thumbnail = thumbnail
self.image = image
self.datapacks = datapacks
self.datapack_lines = {}
self.built_embed = self.build()
self.sent_embed = None
def build(self):
"""Build the embed.
Returns:
discord.Embed: The built embed.
"""
embed = discord.Embed(
title=self.title,
type='rich',
description=self.description,
colour=self.colour)
embed.set_author(
name="Modis",
url=URL,
icon_url=ICON)
if self.thumbnail:
embed.set_thumbnail(url=self.thumbnail)
if self.image:
embed.set_image(url=self.image)
self.datapack_lines = {}
for pack in self.datapacks:
embed.add_field(name=pack[0], value=pack[1], inline=pack[2])
self.datapack_lines[pack[0]] = pack
return embed
async def send(self):
"""Send the embed message."""
await self.channel.trigger_typing()
self.sent_embed = await self.channel.send(embed=self.built_embed)
async def usend(self):
"""Update the existing embed."""
try:
await self.sent_embed.edit(embed=self.built_embed)
except Exception as e:
# TODO Add exceptions
logger.exception(e)
async def delete(self):
"""Delete the existing embed."""
try:
await self.sent_embed.delete()
except Exception as e:
# TODO Add exceptions
logger.exception(e)
self.sent_embed = None
def update_field(self, title, data):
"""Update a particular field's data.
Args:
title (str): The title of the field to update.
data (str): The new value to set for this datapack.
"""
if title in self.datapack_lines:
self.update_data(self.datapack_lines[title], data)
else:
logger.warning("No field with title '{}'".format(title))
def update_colour(self, new_colour):
"""Update the embed's colour.
Args:
new_colour (discord.Colour): The new colour for the embed.
"""
self.built_embed.colour = new_colour
def update_data(self, index, data):
"""Update a particular datapack's data.
Args:
index (int): The index of the datapack.
data (str): The new value to set for this datapack.
"""
datapack = self.built_embed.to_dict()["fields"][index]
self.built_embed.set_field_at(index, name=datapack["name"], value=data, inline=datapack["inline"])
|
{
"content_hash": "658a6ff21cfc441e301dffa862107af0",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 106,
"avg_line_length": 28.78102189781022,
"alnum_prop": 0.5835658128328683,
"repo_name": "Infraxion/modis",
"id": "8f3e4a0ef172a38ccae54bed37a38a55c2511d18",
"size": "3943",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modis/tools/embed.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "177093"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
SRC_DIR = 'src'
def get_version():
import sys
sys.path[:0] = [SRC_DIR]
return __import__('easy_alert').__version__
setup(
name='easy-alert',
version=get_version(),
description='Super Simple Process Monitoring Tool',
author='mogproject',
author_email='mogproj@gmail.com',
url='https://github.com/mogproject/easy-alert',
install_requires=[
'pyyaml',
'paramiko',
],
tests_require=[
'unittest2',
],
package_dir={'': SRC_DIR},
packages=find_packages(SRC_DIR),
include_package_data=True,
test_suite='tests',
entry_points="""
[console_scripts]
easy-alert = easy_alert.easy_alert:main
""",
)
|
{
"content_hash": "85475447fcd29809f50e022ee9bbd6aa",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 55,
"avg_line_length": 21.2,
"alnum_prop": 0.6064690026954178,
"repo_name": "mogproject/easy-alert",
"id": "6c70f7ef781aaea6854a456df978ba29657e91c1",
"size": "742",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "570"
},
{
"name": "Python",
"bytes": "98454"
}
],
"symlink_target": ""
}
|
'''
This module provides utilities to get the absolute filenames so that we can be sure that:
- The case of a file will match the actual file in the filesystem (otherwise breakpoints won't be hit).
- Providing means for the user to make path conversions when doing a remote debugging session in
one machine and debugging in another.
To do that, the PATHS_FROM_ECLIPSE_TO_PYTHON constant must be filled with the appropriate paths.
@note:
in this context, the server is where your python process is running
and the client is where eclipse is running.
E.g.:
If the server (your python process) has the structure
/user/projects/my_project/src/package/module1.py
and the client has:
c:\my_project\src\package\module1.py
the PATHS_FROM_ECLIPSE_TO_PYTHON would have to be:
PATHS_FROM_ECLIPSE_TO_PYTHON = [(r'c:\my_project\src', r'/user/projects/my_project/src')]
@note: DEBUG_CLIENT_SERVER_TRANSLATION can be set to True to debug the result of those translations
@note: the case of the paths is important! Note that this can be tricky to get right when one machine
uses a case-independent filesystem and the other uses a case-dependent filesystem (if the system being
debugged is case-independent, 'normcase()' should be used on the paths defined in PATHS_FROM_ECLIPSE_TO_PYTHON).
@note: all the paths with breakpoints must be translated (otherwise they won't be found in the server)
@note: to enable remote debugging in the target machine (pydev extensions in the eclipse installation)
import pydevd;pydevd.settrace(host, stdoutToServer, stderrToServer, port, suspend)
see parameter docs on pydevd.py
@note: for doing a remote debugging session, all the pydevd_ files must be on the server accessible
through the PYTHONPATH (and the PATHS_FROM_ECLIPSE_TO_PYTHON only needs to be set on the target
machine for the paths that'll actually have breakpoints).
'''
from pydevd_constants import * #@UnusedWildImport
import os.path
import sys
import traceback
normcase = os.path.normcase
basename = os.path.basename
exists = os.path.exists
join = os.path.join
try:
rPath = os.path.realpath #@UndefinedVariable
except:
# jython does not support os.path.realpath
# realpath is a no-op on systems without islink support
rPath = os.path.abspath
#defined as a list of tuples where the 1st element of the tuple is the path in the client machine
#and the 2nd element is the path in the server machine.
#see module docstring for more details.
PATHS_FROM_ECLIPSE_TO_PYTHON = []
#example:
#PATHS_FROM_ECLIPSE_TO_PYTHON = [
#(normcase(r'd:\temp\temp_workspace_2\test_python\src\yyy\yyy'),
# normcase(r'd:\temp\temp_workspace_2\test_python\src\hhh\xxx'))]
DEBUG_CLIENT_SERVER_TRANSLATION = False
#caches filled as requested during the debug session
NORM_FILENAME_CONTAINER = {}
NORM_FILENAME_AND_BASE_CONTAINER = {}
NORM_FILENAME_TO_SERVER_CONTAINER = {}
NORM_FILENAME_TO_CLIENT_CONTAINER = {}
def _NormFile(filename):
try:
return NORM_FILENAME_CONTAINER[filename]
except KeyError:
r = normcase(rPath(filename))
#cache it for fast access later
NORM_FILENAME_CONTAINER[filename] = r
return r
#Now, let's do a quick test to see if we're working with a version of python that has no problems
#related to the names generated...
try:
try:
code = rPath.func_code
except AttributeError:
code = rPath.__code__
if not exists(_NormFile(code.co_filename)):
sys.stderr.write('-------------------------------------------------------------------------------\n')
sys.stderr.write('pydev debugger: CRITICAL WARNING: This version of python seems to be incorrectly compiled (internal generated filenames are not absolute)\n')
sys.stderr.write('pydev debugger: The debugger may still function, but it will work slower and may miss breakpoints.\n')
sys.stderr.write('pydev debugger: Related bug: http://bugs.python.org/issue1666807\n')
sys.stderr.write('-------------------------------------------------------------------------------\n')
NORM_SEARCH_CACHE = {}
initial_norm_file = _NormFile
def _NormFile(filename): #Let's redefine _NormFile to work with paths that may be incorrect
try:
return NORM_SEARCH_CACHE[filename]
except KeyError:
ret = initial_norm_file(filename)
if not exists(ret):
#We must actually go on and check if we can find it as if it was a relative path for some of the paths in the pythonpath
for path in sys.path:
ret = initial_norm_file(join(path, filename))
if exists(ret):
break
else:
sys.stderr.write('pydev debugger: Unable to find real location for: %s\n' % (filename,))
ret = filename
NORM_SEARCH_CACHE[filename] = ret
return ret
except:
#Don't fail if there's something not correct here -- but at least print it to the user so that we can correct that
traceback.print_exc()
if PATHS_FROM_ECLIPSE_TO_PYTHON:
#Work on the client and server slashes.
eclipse_sep = None
python_sep = None
for eclipse_prefix, server_prefix in PATHS_FROM_ECLIPSE_TO_PYTHON:
if eclipse_sep is not None and python_sep is not None:
break
if eclipse_sep is None:
for c in eclipse_prefix:
if c in ('/', '\\'):
eclipse_sep = c
break
if python_sep is None:
for c in server_prefix:
if c in ('/', '\\'):
python_sep = c
break
#If they're the same or one of them cannot be determined, just make it all None.
if eclipse_sep == python_sep or eclipse_sep is None or python_sep is None:
eclipse_sep = python_sep = None
#only setup translation functions if absolutely needed!
def NormFileToServer(filename):
#Eclipse will send the passed filename to be translated to the python process
#So, this would be 'NormFileFromEclipseToPython'
try:
return NORM_FILENAME_TO_SERVER_CONTAINER[filename]
except KeyError:
#used to translate a path from the client to the debug server
translated = normcase(filename)
for eclipse_prefix, server_prefix in PATHS_FROM_ECLIPSE_TO_PYTHON:
if translated.startswith(eclipse_prefix):
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: replacing to server: %s\n' % (translated,))
translated = translated.replace(eclipse_prefix, server_prefix)
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: sent to server: %s\n' % (translated,))
break
else:
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: to server: unable to find matching prefix for: %s in %s\n' % \
(translated, [x[0] for x in PATHS_FROM_ECLIPSE_TO_PYTHON]))
#Note that when going to the server, we do the replace first and only later do the norm file.
if eclipse_sep is not None:
translated = translated.replace(eclipse_sep, python_sep)
translated = _NormFile(translated)
NORM_FILENAME_TO_SERVER_CONTAINER[filename] = translated
return translated
def NormFileToClient(filename):
#The result of this method will be passed to eclipse
#So, this would be 'NormFileFromPythonToEclipse'
try:
return NORM_FILENAME_TO_CLIENT_CONTAINER[filename]
except KeyError:
#used to translate a path from the debug server to the client
translated = _NormFile(filename)
for eclipse_prefix, pyhon_prefix in PATHS_FROM_ECLIPSE_TO_PYTHON:
if translated.startswith(pyhon_prefix):
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: replacing to client: %s\n' % (translated,))
translated = translated.replace(pyhon_prefix, eclipse_prefix)
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: sent to client: %s\n' % (translated,))
break
else:
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: to client: unable to find matching prefix for: %s in %s\n' % \
(translated, [x[1] for x in PATHS_FROM_ECLIPSE_TO_PYTHON]))
if eclipse_sep is not None:
translated = translated.replace(python_sep, eclipse_sep)
#The resulting path is not in the python process, so, we cannot do a _NormFile here,
#only at the beginning of this method.
NORM_FILENAME_TO_CLIENT_CONTAINER[filename] = translated
return translated
else:
#no translation step needed (just inline the calls)
NormFileToClient = _NormFile
NormFileToServer = _NormFile
def GetFilenameAndBase(frame):
#This one is just internal (so, does not need any kind of client-server translation)
f = frame.f_code.co_filename
try:
return NORM_FILENAME_AND_BASE_CONTAINER[f]
except KeyError:
filename = _NormFile(f)
base = basename(filename)
NORM_FILENAME_AND_BASE_CONTAINER[f] = filename, base
return filename, base
|
{
"content_hash": "1cd3aa5a2e9882aa43b9970b628a4759",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 167,
"avg_line_length": 44.33624454148472,
"alnum_prop": 0.6108539347975968,
"repo_name": "hinesmr/mica",
"id": "bee4c18b7b7f223cdd0124d4f7badfd10e688f02",
"size": "10153",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "util/pydevd/pysrc/pydevd_file_utils.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "19796"
},
{
"name": "C++",
"bytes": "1653"
},
{
"name": "CSS",
"bytes": "718628"
},
{
"name": "HTML",
"bytes": "1110507"
},
{
"name": "JavaScript",
"bytes": "3180714"
},
{
"name": "Makefile",
"bytes": "70"
},
{
"name": "Python",
"bytes": "1379726"
},
{
"name": "Shell",
"bytes": "6032"
}
],
"symlink_target": ""
}
|
""" Defines actions for the Godot plug-in.
"""
#------------------------------------------------------------------------------
# Imports:
#------------------------------------------------------------------------------
from os.path import exists, dirname, join
from enthought.io.api import File
from enthought.pyface.api import ImageResource, FileDialog, CANCEL, OK
from enthought.pyface.action.api import Action as PyFaceAction
from enthought.envisage.ui.action.api import Action, Group, Menu
from enthought.envisage.ui.workbench.api import WorkbenchActionSet
from wizard import NewDotGraphWizard
#------------------------------------------------------------------------------
# Constants:
#------------------------------------------------------------------------------
IMAGE_LOCATION = join(dirname(__file__), "..", "ui", "images")
#------------------------------------------------------------------------------
# "NewDotGraphAction" class:
#------------------------------------------------------------------------------
class NewDotGraphAction(PyFaceAction):
""" An action for creating a new Dot graph.
"""
#--------------------------------------------------------------------------
# "Action" interface:
#--------------------------------------------------------------------------
# A longer description of the action:
description = "Create a new Dot graph"
# The action"s name (displayed on menus/tool bar tools etc):
name = "Graph"
# A short description of the action used for tooltip text etc:
tooltip = "Create a Dot graph"
# The action's image (displayed on tool bar tools etc):
image = ImageResource("graph", search_path=[IMAGE_LOCATION])
#--------------------------------------------------------------------------
# "Action" interface:
#--------------------------------------------------------------------------
def perform(self, event):
""" Perform the action.
"""
wizard = NewDotGraphWizard(parent=self.window.control,
window=self.window, title="New Graph")
# Open the wizard
if wizard.open() == OK:
wizard.finished = True
#------------------------------------------------------------------------------
# "GodotWorkbenchActionSet" class:
#------------------------------------------------------------------------------
class GodotWorkbenchActionSet(WorkbenchActionSet):
""" A set of workbench related actions for the Godot plug-in.
"""
#--------------------------------------------------------------------------
# "ActionSet" interface:
#--------------------------------------------------------------------------
# The action set"s globally unique identifier.
id = "godot.plugin.workbench_action_set"
menus = [ Menu(name="&New", path="MenuBar/File", group="OpenGroup",
groups=["ContainerGroup", "ComponentGroup", "OtherGroup"]) ]
actions = [
Action(path="MenuBar/File/New", group="ComponentGroup",
class_name="godot.plugin.action:NewDotGraphAction"),
Action(path="Resource/New", group="ComponentGroup",
class_name="godot.plugin.action:NewDotGraphAction") ]
# EOF -------------------------------------------------------------------------
|
{
"content_hash": "85811120a86641ebfb8b16e69f3b58cd",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 79,
"avg_line_length": 37.44318181818182,
"alnum_prop": 0.43034901365705613,
"repo_name": "rwl/godot",
"id": "9595968d081cbdc25caa5ee7d2be36340b40fef3",
"size": "4151",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "godot/plugin/action.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "412228"
}
],
"symlink_target": ""
}
|
import os.path as op
import warnings
import numpy as np
from numpy.testing import assert_raises, assert_equal
from nose.tools import assert_true
from mne import read_events, Epochs, pick_types, read_cov
from mne.channels import read_layout
from mne.io import read_raw_fif
from mne.utils import slow_test, run_tests_if_main
from mne.viz.evoked import _line_plot_onselect, plot_compare_evokeds
from mne.viz.utils import _fake_click
# Set our plotters to test mode
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
warnings.simplefilter('always') # enable b/c these tests throw warnings
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
evoked_fname = op.join(base_dir, 'test-ave.fif')
raw_fname = op.join(base_dir, 'test_raw.fif')
cov_fname = op.join(base_dir, 'test-cov.fif')
event_name = op.join(base_dir, 'test-eve.fif')
event_id, tmin, tmax = 1, -0.1, 0.1
n_chan = 6
layout = read_layout('Vectorview-all')
def _get_picks(raw):
"""Get picks."""
return pick_types(raw.info, meg=True, eeg=False, stim=False,
ecg=False, eog=False, exclude='bads')
def _get_epochs():
"""Get epochs."""
raw = read_raw_fif(raw_fname)
raw.add_proj([], remove_existing=True)
events = read_events(event_name)
picks = _get_picks(raw)
# Use a subset of channels for plotting speed
picks = picks[np.round(np.linspace(0, len(picks) - 1, n_chan)).astype(int)]
# make sure we have a magnetometer and a pair of grad pairs for topomap.
picks = np.concatenate([[2, 3, 4, 6, 7], picks])
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks)
epochs.info['bads'] = [epochs.ch_names[-1]]
return epochs
def _get_epochs_delayed_ssp():
"""Get epochs with delayed SSP."""
raw = read_raw_fif(raw_fname)
events = read_events(event_name)
picks = _get_picks(raw)
reject = dict(mag=4e-12)
epochs_delayed_ssp = Epochs(raw, events[:10], event_id, tmin, tmax,
picks=picks, proj='delayed', reject=reject)
return epochs_delayed_ssp
@slow_test
def test_plot_evoked():
"""Test plotting of evoked."""
import matplotlib.pyplot as plt
evoked = _get_epochs().average()
with warnings.catch_warnings(record=True):
fig = evoked.plot(proj=True, hline=[1], exclude=[], window_title='foo')
# Test a click
ax = fig.get_axes()[0]
line = ax.lines[0]
_fake_click(fig, ax,
[line.get_xdata()[0], line.get_ydata()[0]], 'data')
_fake_click(fig, ax,
[ax.get_xlim()[0], ax.get_ylim()[1]], 'data')
# plot with bad channels excluded & spatial_colors & zorder
evoked.plot(exclude='bads')
# test selective updating of dict keys is working.
evoked.plot(hline=[1], units=dict(mag='femto foo'))
evoked_delayed_ssp = _get_epochs_delayed_ssp().average()
evoked_delayed_ssp.plot(proj='interactive')
evoked_delayed_ssp.apply_proj()
assert_raises(RuntimeError, evoked_delayed_ssp.plot,
proj='interactive')
evoked_delayed_ssp.info['projs'] = []
assert_raises(RuntimeError, evoked_delayed_ssp.plot,
proj='interactive')
assert_raises(RuntimeError, evoked_delayed_ssp.plot,
proj='interactive', axes='foo')
plt.close('all')
# test GFP only
evoked.plot(gfp='only')
assert_raises(ValueError, evoked.plot, gfp='foo')
evoked.plot_image(proj=True)
# plot with bad channels excluded
evoked.plot_image(exclude='bads', cmap='interactive')
evoked.plot_image(exclude=evoked.info['bads']) # does the same thing
plt.close('all')
evoked.plot_topo() # should auto-find layout
_line_plot_onselect(0, 200, ['mag', 'grad'], evoked.info, evoked.data,
evoked.times)
plt.close('all')
cov = read_cov(cov_fname)
cov['method'] = 'empirical'
evoked.plot_white(cov)
evoked.plot_white([cov, cov])
# plot_compare_evokeds: test condition contrast, CI, color assignment
plot_compare_evokeds(evoked.copy().pick_types(meg='mag'))
evoked.rename_channels({'MEG 2142': "MEG 1642"})
assert len(plot_compare_evokeds(evoked)) == 2
colors = dict(red='r', blue='b')
linestyles = dict(red='--', blue='-')
red, blue = evoked.copy(), evoked.copy()
red.data *= 1.1
blue.data *= 0.9
plot_compare_evokeds([red, blue], picks=3) # list of evokeds
plot_compare_evokeds([[red, evoked], [blue, evoked]],
picks=3) # list of lists
# test picking & plotting grads
contrast = dict()
contrast["red/stim"] = list((evoked.copy(), red))
contrast["blue/stim"] = list((evoked.copy(), blue))
# test a bunch of params at once
plot_compare_evokeds(contrast, colors=colors, linestyles=linestyles,
picks=[0, 2], vlines=[.01, -.04], invert_y=True,
truncate_yaxis=False, ylim=dict(mag=(-10, 10)),
styles={"red/stim": {"linewidth": 1}})
assert_raises(ValueError, plot_compare_evokeds,
contrast, picks='str') # bad picks: not int
assert_raises(ValueError, plot_compare_evokeds, evoked, picks=3,
colors=dict(fake=1)) # 'fake' not in conds
assert_raises(ValueError, plot_compare_evokeds, evoked, picks=3,
styles=dict(fake=1)) # 'fake' not in conds
assert_raises(ValueError, plot_compare_evokeds, [[1, 2], [3, 4]],
picks=3) # evoked must contain Evokeds
assert_raises(ValueError, plot_compare_evokeds, evoked, picks=3,
styles=dict(err=1)) # bad styles dict
assert_raises(ValueError, plot_compare_evokeds, evoked, picks=3,
gfp=True) # no single-channel GFP
assert_raises(TypeError, plot_compare_evokeds, evoked, picks=3,
ci='fake') # ci must be float or None
contrast["red/stim"] = red
contrast["blue/stim"] = blue
plot_compare_evokeds(contrast, picks=[0], colors=['r', 'b'],
ylim=dict(mag=(1, 10)))
# Hack to test plotting of maxfiltered data
evoked_sss = evoked.copy()
evoked_sss.info['proc_history'] = [dict(max_info=None)]
evoked_sss.plot_white(cov)
evoked_sss.plot_white(cov_fname)
# plot with bad channels excluded, spatial_colors, zorder & pos. layout
evoked.rename_channels({'MEG 0133': 'MEG 0000'})
evoked.plot(exclude=evoked.info['bads'], spatial_colors=True, gfp=True,
zorder='std')
evoked.plot(exclude=[], spatial_colors=True, zorder='unsorted')
assert_raises(TypeError, evoked.plot, zorder='asdf')
plt.close('all')
evoked.plot_sensors() # Test plot_sensors
plt.close('all')
evoked.pick_channels(evoked.ch_names[:4])
with warnings.catch_warnings(record=True) as ws:
evoked.plot()
assert_equal(len(ws), 2)
assert_true(all('Need more than one' in str(w.message) for w in ws))
run_tests_if_main()
|
{
"content_hash": "15213db0616930ce5db20c6d9fa84683",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 79,
"avg_line_length": 41.52542372881356,
"alnum_prop": 0.5982312925170068,
"repo_name": "jaeilepp/mne-python",
"id": "d5a77076cb4f42cbdd5ab983e1291f6f53798543",
"size": "7755",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mne/viz/tests/test_evoked.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Csound Document",
"bytes": "69806"
},
{
"name": "Makefile",
"bytes": "3928"
},
{
"name": "Python",
"bytes": "6113850"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
}
|
import sys
from datetime import datetime as dt
try:
import pymongo as M
except ImportError:
print >> sys.stderr, "This backend requires pymongo to be installed"
from .storage_backend import StorageBackend
class MongoBackend(StorageBackend):
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 27017
DEFAULT_DB = 'pastycake'
def __init__(self, kwargs):
self._mongo_kwargs = kwargs
self._db_name = kwargs.get('db_name', self.DEFAULT_DB)
self._connected = False
def already_visited_url(self, url):
return bool(self._db.posts.find({'url': url}).count())
def save_url(self, url, match_text=None, rec=0):
def _do_save_url(self, url, match_text):
self._db.posts.insert(
{
'url': url,
'matches': match_text,
'visited': dt.utcnow(),
}
)
try:
_do_save_url(self, url, match_text)
return
except M.errors.PyMongoError as e:
print >> sys.stderr, 'eror saving url: %s' % e
# let's try again in case that the cursor timed out
if not rec:
self.connect()
if self.connected():
self.save_url(url, match_text, rec + 1)
def connect(self):
try:
self._con = M.Connection(**self._mongo_kwargs)
self._db = self._con[self._db_name]
self._connected = True
except M.errors.PyMongoError as e:
print >> sys.stderr, "failed to connect: %s" % e
self._connected = False
def connected(self):
return self._connected
|
{
"content_hash": "95f9fa7650b023934c356fae02b56894",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 72,
"avg_line_length": 29.280701754385966,
"alnum_prop": 0.5494307968843619,
"repo_name": "9b/pastycake",
"id": "7216c164a502a888b177cad8bf82d979a04a7e7a",
"size": "1669",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pastycake/mongodb_backend.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "36364"
},
{
"name": "Shell",
"bytes": "5111"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, print_function
from .events import *
try:
from .pyuv import PyUVEventLoop
except ImportError:
pass
try:
from .pyside import PySideEventLoop
except ImportError:
pass
|
{
"content_hash": "17a03da42ac68a4565dd53535c7e94f8",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 54,
"avg_line_length": 17.23076923076923,
"alnum_prop": 0.7366071428571429,
"repo_name": "geertj/looping",
"id": "41184b3515db90716839eff21ad0832d4638a258",
"size": "525",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/looping/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "53293"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
setup(
name="KegPi",
version="0.1",
author="Bradley Brockman",
author_email="bradbrok@gmail.com",
url="https://github.com/bradbrok/KegPi",
license="MIT_License.txt",
description="Keg control system built on a Raspberry Pi.",
install_requires=[
"flask",
"FlaskWTF",
],
)
|
{
"content_hash": "5c4cf1b2548aac5931d7084544e525ed",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 62,
"avg_line_length": 19.88888888888889,
"alnum_prop": 0.6229050279329609,
"repo_name": "bradbrok/KegPi",
"id": "cbfbf1820525545e9d03319976a869d2de585684",
"size": "358",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5779"
},
{
"name": "HTML",
"bytes": "10296"
},
{
"name": "Python",
"bytes": "40515"
}
],
"symlink_target": ""
}
|
"""General train/eval loop for a single agent on a single train/test split.
The script:
* Finds all knows agents - all subclasses for offline_agents.Agent in the
included files.
* Loads train/dev or train/test task split for the specified seed and tier.
By default a dev split is used. Set --use-test-split=1 got get get the
final, (train + dev)/test split.
* Initializes the agent from the commandline flags.
* Trains the agent on the train part.
* Evaluates the agents on eval part.
* Saves the evalution results to `output_dir`/results.json. The file will
contain a dictionary with all evaluation metrics. The most important one,
AUCCESS@100 is saved with key "target_metric".
See offline_agents for example agents.
"""
from typing import Tuple
import argparse
import json
import logging
import os
import sys
import phyre
import offline_agents
def get_train_test(eval_setup_name: str, fold_id: int, use_test_split: bool
) -> Tuple[Tuple[str, ...], Tuple[str, ...]]:
train, dev, test = phyre.get_fold(eval_setup_name, fold_id)
if use_test_split:
return train + dev, test
else:
return train, dev
def main_with_seed(eval_setup_name, fold_id, use_test_split,
max_test_attempts_per_task, output_dir, agent_type,
**agent_kwargs):
train_task_ids, eval_task_ids = get_train_test(eval_setup_name, fold_id,
use_test_split)
agent_kwargs['tier'] = phyre.eval_setup_to_action_tier(eval_setup_name)
agent = find_all_agents()[agent_type]
# It's fine to use eval_task_ids iff it's dev.
dev_tasks_ids = None if use_test_split else eval_task_ids
logging.info('Starting training')
state = agent.train(train_task_ids,
output_dir=output_dir,
dev_tasks_ids=dev_tasks_ids,
**agent_kwargs)
logging.info('Starting eval')
evaluation = agent.eval(state,
eval_task_ids,
max_test_attempts_per_task,
output_dir=output_dir,
**agent_kwargs)
num_tasks = len(eval_task_ids)
results = {}
results['num_eval_tasks'] = num_tasks
results['metrics'] = evaluation.compute_all_metrics()
results['args'] = sys.argv
results['parsed_args'] = dict(
agent_kwargs=agent_kwargs,
main_kwargs=dict(eval_setup_name=eval_setup_name,
fold_id=fold_id,
use_test_split=use_test_split,
agent_type=agent_type,
max_test_attempts_per_task=max_test_attempts_per_task,
output_dir=output_dir))
print(results['parsed_args'])
results['target_metric'] = (results['metrics']['independent_solved_by_aucs']
[max_test_attempts_per_task])
logging.info('FINAL: %s', results['target_metric'])
if not os.path.exists(output_dir):
os.makedirs(output_dir)
out_path = os.path.join(output_dir, 'results.json')
with open(out_path, 'w') as stream:
json.dump(results, stream)
def main(fold_id, fold_id_list, **kwargs):
assert (fold_id is None) != (fold_id_list is None)
if fold_id_list is not None:
base_output_dir = kwargs['output_dir']
for seed in fold_id_list.split(','):
kwargs['output_dir'] = os.path.join(base_output_dir, seed)
logging.info('Runing with seed=%s and output folder %s', seed,
kwargs['output_dir'])
main_with_seed(fold_id=int(seed), **kwargs)
else:
main_with_seed(fold_id=fold_id, **kwargs)
def find_all_agents():
def yield_subclsses(base):
for cls in base.__subclasses__():
if not cls.__abstractmethods__:
yield cls
yield from yield_subclsses(cls)
return {cls.name(): cls for cls in yield_subclsses(offline_agents.Agent)}
def parse_and_log_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--use-test-split',
type=int,
default=0,
help='If false, will test on validation. Otherwise will train on'
' train+validation and evaluate on test.')
parser.add_argument('--eval-setup-name',
required=True,
choices=phyre.MAIN_EVAL_SETUPS)
fold_args = parser.add_mutually_exclusive_group(required=True)
fold_args.add_argument(
'--fold-id',
type=int,
help='Fold id to use. Mutually exclusive with `--fold-id-list`.')
fold_args.add_argument(
'--fold-id-list',
type=str,
help='Comma separared list of folds. If set, will call itself with each'
' seed. Results for each seed will be stored to a separate subfolder of'
' outputdir')
parser.add_argument('--output-dir',
required=True,
help='Folder to save itermidiate files and results.')
group = parser.add_argument_group('General agent options')
group.add_argument('--simulation-cache-size',
type=int,
help='Size of the simulation cache to use.')
group.add_argument(
'--max-train-actions',
type=int,
help='If set, will use only the specified number of actions from the'
' simulation cache.')
group.add_argument(
'--max-test-attempts-per-task',
type=int,
default=phyre.MAX_TEST_ATTEMPTS,
help='Do at most this many attempts per task during evaluation.')
agent_dict = find_all_agents()
parser.add_argument('--agent-type',
required=True,
choices=agent_dict.keys())
for cls in agent_dict.values():
cls.add_parser_arguments(parser)
parsed_args = parser.parse_args()
if parsed_args.max_test_attempts_per_task > phyre.MAX_TEST_ATTEMPTS:
parser.error('--max-test-attempts-per-task cannot be greater than %s' %
phyre.MAX_TEST_ATTEMPTS)
print('Args:', ' '.join(sys.argv))
logging.info('Args: %s', ' '.join(sys.argv))
print('Parsed args:', parsed_args)
logging.info('Parsed args: %s', vars(parsed_args))
return parsed_args
if __name__ == '__main__':
logging.basicConfig(format=('%(asctime)s %(levelname)-8s'
' {%(module)s:%(lineno)d} %(message)s'),
level=logging.DEBUG,
datefmt='%Y-%m-%d %H:%M:%S')
main(**vars(parse_and_log_args()))
|
{
"content_hash": "1f8061857261ba3d1b287402c035cd2a",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 80,
"avg_line_length": 36.55191256830601,
"alnum_prop": 0.5843922858424279,
"repo_name": "facebookresearch/phyre",
"id": "a445ef4e1d7f52636e7825ef8b5a3c116fc4e262",
"size": "7286",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "agents/train.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "870"
},
{
"name": "C++",
"bytes": "150849"
},
{
"name": "CMake",
"bytes": "8704"
},
{
"name": "CSS",
"bytes": "3314"
},
{
"name": "Dockerfile",
"bytes": "2111"
},
{
"name": "HTML",
"bytes": "2147"
},
{
"name": "JavaScript",
"bytes": "52489"
},
{
"name": "Makefile",
"bytes": "2775"
},
{
"name": "Python",
"bytes": "653884"
},
{
"name": "Shell",
"bytes": "5674"
},
{
"name": "Thrift",
"bytes": "7384"
}
],
"symlink_target": ""
}
|
<<<<<<< HEAD
<<<<<<< HEAD
"""Core implementation of import.
This module is NOT meant to be directly imported! It has been designed such
that it can be bootstrapped into Python as the implementation of import. As
such it requires the injection of specific modules and attributes in order to
work. One should use importlib as the public-facing version of this module.
"""
#
# IMPORTANT: Whenever making changes to this module, be sure to run
# a top-level make in order to get the frozen version of the module
# update. Not doing so will result in the Makefile to fail for
# all others who don't have a ./python around to freeze the module
# in the early stages of compilation.
#
# See importlib._setup() for what is injected into the global namespace.
# When editing this code be aware that code executed at import time CANNOT
# reference any injected objects! This includes not only global code but also
# anything specified at the class level.
# Bootstrap-related code ######################################################
_CASE_INSENSITIVE_PLATFORMS = 'win', 'cygwin', 'darwin'
def _make_relax_case():
if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS):
def _relax_case():
"""True if filenames must be checked case-insensitively."""
return b'PYTHONCASEOK' in _os.environ
else:
def _relax_case():
"""True if filenames must be checked case-insensitively."""
return False
return _relax_case
def _w_long(x):
"""Convert a 32-bit integer to little-endian."""
return (int(x) & 0xFFFFFFFF).to_bytes(4, 'little')
def _r_long(int_bytes):
"""Convert 4 bytes in little-endian to an integer."""
return int.from_bytes(int_bytes, 'little')
def _path_join(*path_parts):
"""Replacement for os.path.join()."""
return path_sep.join([part.rstrip(path_separators)
for part in path_parts if part])
def _path_split(path):
"""Replacement for os.path.split()."""
if len(path_separators) == 1:
front, _, tail = path.rpartition(path_sep)
return front, tail
for x in reversed(path):
if x in path_separators:
front, tail = path.rsplit(x, maxsplit=1)
return front, tail
return '', path
def _path_stat(path):
"""Stat the path.
Made a separate function to make it easier to override in experiments
(e.g. cache stat results).
"""
return _os.stat(path)
def _path_is_mode_type(path, mode):
"""Test whether the path is the specified mode type."""
try:
stat_info = _path_stat(path)
except OSError:
return False
return (stat_info.st_mode & 0o170000) == mode
def _path_isfile(path):
"""Replacement for os.path.isfile."""
return _path_is_mode_type(path, 0o100000)
def _path_isdir(path):
"""Replacement for os.path.isdir."""
if not path:
path = _os.getcwd()
return _path_is_mode_type(path, 0o040000)
def _write_atomic(path, data, mode=0o666):
"""Best-effort function to write data to a path atomically.
Be prepared to handle a FileExistsError if concurrent writing of the
temporary file is attempted."""
# id() is used to generate a pseudo-random filename.
path_tmp = '{}.{}'.format(path, id(path))
fd = _os.open(path_tmp,
_os.O_EXCL | _os.O_CREAT | _os.O_WRONLY, mode & 0o666)
try:
# We first write data to a temporary file, and then use os.replace() to
# perform an atomic rename.
with _io.FileIO(fd, 'wb') as file:
file.write(data)
_os.replace(path_tmp, path)
except OSError:
try:
_os.unlink(path_tmp)
except OSError:
pass
raise
def _wrap(new, old):
"""Simple substitute for functools.update_wrapper."""
for replace in ['__module__', '__name__', '__qualname__', '__doc__']:
if hasattr(old, replace):
setattr(new, replace, getattr(old, replace))
new.__dict__.update(old.__dict__)
def _new_module(name):
return type(sys)(name)
_code_type = type(_wrap.__code__)
class _ManageReload:
"""Manages the possible clean-up of sys.modules for load_module()."""
def __init__(self, name):
self._name = name
def __enter__(self):
self._is_reload = self._name in sys.modules
def __exit__(self, *args):
if any(arg is not None for arg in args) and not self._is_reload:
try:
del sys.modules[self._name]
except KeyError:
pass
# Module-level locking ########################################################
# A dict mapping module names to weakrefs of _ModuleLock instances
_module_locks = {}
# A dict mapping thread ids to _ModuleLock instances
_blocking_on = {}
class _DeadlockError(RuntimeError):
pass
class _ModuleLock:
"""A recursive lock implementation which is able to detect deadlocks
(e.g. thread 1 trying to take locks A then B, and thread 2 trying to
take locks B then A).
"""
def __init__(self, name):
self.lock = _thread.allocate_lock()
self.wakeup = _thread.allocate_lock()
self.name = name
self.owner = None
self.count = 0
self.waiters = 0
def has_deadlock(self):
# Deadlock avoidance for concurrent circular imports.
me = _thread.get_ident()
tid = self.owner
while True:
lock = _blocking_on.get(tid)
if lock is None:
return False
tid = lock.owner
if tid == me:
return True
def acquire(self):
"""
Acquire the module lock. If a potential deadlock is detected,
a _DeadlockError is raised.
Otherwise, the lock is always acquired and True is returned.
"""
tid = _thread.get_ident()
_blocking_on[tid] = self
try:
while True:
with self.lock:
if self.count == 0 or self.owner == tid:
self.owner = tid
self.count += 1
return True
if self.has_deadlock():
raise _DeadlockError('deadlock detected by %r' % self)
if self.wakeup.acquire(False):
self.waiters += 1
# Wait for a release() call
self.wakeup.acquire()
self.wakeup.release()
finally:
del _blocking_on[tid]
def release(self):
tid = _thread.get_ident()
with self.lock:
if self.owner != tid:
raise RuntimeError('cannot release un-acquired lock')
assert self.count > 0
self.count -= 1
if self.count == 0:
self.owner = None
if self.waiters:
self.waiters -= 1
self.wakeup.release()
def __repr__(self):
return '_ModuleLock({!r}) at {}'.format(self.name, id(self))
class _DummyModuleLock:
"""A simple _ModuleLock equivalent for Python builds without
multi-threading support."""
def __init__(self, name):
self.name = name
self.count = 0
def acquire(self):
self.count += 1
return True
def release(self):
if self.count == 0:
raise RuntimeError('cannot release un-acquired lock')
self.count -= 1
def __repr__(self):
return '_DummyModuleLock({!r}) at {}'.format(self.name, id(self))
class _ModuleLockManager:
def __init__(self, name):
self._name = name
self._lock = None
def __enter__(self):
try:
self._lock = _get_module_lock(self._name)
finally:
_imp.release_lock()
self._lock.acquire()
def __exit__(self, *args, **kwargs):
self._lock.release()
# The following two functions are for consumption by Python/import.c.
def _get_module_lock(name):
"""Get or create the module lock for a given module name.
Should only be called with the import lock taken."""
lock = None
try:
lock = _module_locks[name]()
except KeyError:
pass
if lock is None:
if _thread is None:
lock = _DummyModuleLock(name)
else:
lock = _ModuleLock(name)
def cb(_):
del _module_locks[name]
_module_locks[name] = _weakref.ref(lock, cb)
return lock
def _lock_unlock_module(name):
"""Release the global import lock, and acquires then release the
module lock for a given module name.
This is used to ensure a module is completely initialized, in the
event it is being imported by another thread.
Should only be called with the import lock taken."""
lock = _get_module_lock(name)
_imp.release_lock()
try:
lock.acquire()
except _DeadlockError:
# Concurrent circular import, we'll accept a partially initialized
# module object.
pass
else:
lock.release()
# Frame stripping magic ###############################################
def _call_with_frames_removed(f, *args, **kwds):
"""remove_importlib_frames in import.c will always remove sequences
of importlib frames that end with a call to this function
Use it instead of a normal call in places where including the importlib
frames introduces unwanted noise into the traceback (e.g. when executing
module code)
"""
return f(*args, **kwds)
# Finder/loader utility code ###############################################
# Magic word to reject .pyc files generated by other Python versions.
# It should change for each incompatible change to the bytecode.
#
# The value of CR and LF is incorporated so if you ever read or write
# a .pyc file in text mode the magic number will be wrong; also, the
# Apple MPW compiler swaps their values, botching string constants.
#
# The magic numbers must be spaced apart at least 2 values, as the
# -U interpeter flag will cause MAGIC+1 being used. They have been
# odd numbers for some time now.
#
# There were a variety of old schemes for setting the magic number.
# The current working scheme is to increment the previous value by
# 10.
#
# Starting with the adoption of PEP 3147 in Python 3.2, every bump in magic
# number also includes a new "magic tag", i.e. a human readable string used
# to represent the magic number in __pycache__ directories. When you change
# the magic number, you must also set a new unique magic tag. Generally this
# can be named after the Python major version of the magic number bump, but
# it can really be anything, as long as it's different than anything else
# that's come before. The tags are included in the following table, starting
# with Python 3.2a0.
#
# Known values:
# Python 1.5: 20121
# Python 1.5.1: 20121
# Python 1.5.2: 20121
# Python 1.6: 50428
# Python 2.0: 50823
# Python 2.0.1: 50823
# Python 2.1: 60202
# Python 2.1.1: 60202
# Python 2.1.2: 60202
# Python 2.2: 60717
# Python 2.3a0: 62011
# Python 2.3a0: 62021
# Python 2.3a0: 62011 (!)
# Python 2.4a0: 62041
# Python 2.4a3: 62051
# Python 2.4b1: 62061
# Python 2.5a0: 62071
# Python 2.5a0: 62081 (ast-branch)
# Python 2.5a0: 62091 (with)
# Python 2.5a0: 62092 (changed WITH_CLEANUP opcode)
# Python 2.5b3: 62101 (fix wrong code: for x, in ...)
# Python 2.5b3: 62111 (fix wrong code: x += yield)
# Python 2.5c1: 62121 (fix wrong lnotab with for loops and
# storing constants that should have been removed)
# Python 2.5c2: 62131 (fix wrong code: for x, in ... in listcomp/genexp)
# Python 2.6a0: 62151 (peephole optimizations and STORE_MAP opcode)
# Python 2.6a1: 62161 (WITH_CLEANUP optimization)
# Python 2.7a0: 62171 (optimize list comprehensions/change LIST_APPEND)
# Python 2.7a0: 62181 (optimize conditional branches:
# introduce POP_JUMP_IF_FALSE and POP_JUMP_IF_TRUE)
# Python 2.7a0 62191 (introduce SETUP_WITH)
# Python 2.7a0 62201 (introduce BUILD_SET)
# Python 2.7a0 62211 (introduce MAP_ADD and SET_ADD)
# Python 3000: 3000
# 3010 (removed UNARY_CONVERT)
# 3020 (added BUILD_SET)
# 3030 (added keyword-only parameters)
# 3040 (added signature annotations)
# 3050 (print becomes a function)
# 3060 (PEP 3115 metaclass syntax)
# 3061 (string literals become unicode)
# 3071 (PEP 3109 raise changes)
# 3081 (PEP 3137 make __file__ and __name__ unicode)
# 3091 (kill str8 interning)
# 3101 (merge from 2.6a0, see 62151)
# 3103 (__file__ points to source file)
# Python 3.0a4: 3111 (WITH_CLEANUP optimization).
# Python 3.0a5: 3131 (lexical exception stacking, including POP_EXCEPT)
# Python 3.1a0: 3141 (optimize list, set and dict comprehensions:
# change LIST_APPEND and SET_ADD, add MAP_ADD)
# Python 3.1a0: 3151 (optimize conditional branches:
# introduce POP_JUMP_IF_FALSE and POP_JUMP_IF_TRUE)
# Python 3.2a0: 3160 (add SETUP_WITH)
# tag: cpython-32
# Python 3.2a1: 3170 (add DUP_TOP_TWO, remove DUP_TOPX and ROT_FOUR)
# tag: cpython-32
# Python 3.2a2 3180 (add DELETE_DEREF)
# Python 3.3a0 3190 __class__ super closure changed
# Python 3.3a0 3200 (__qualname__ added)
# 3210 (added size modulo 2**32 to the pyc header)
# Python 3.3a1 3220 (changed PEP 380 implementation)
# Python 3.3a4 3230 (revert changes to implicit __class__ closure)
# Python 3.4a1 3250 (evaluate positional default arguments before
# keyword-only defaults)
# Python 3.4a1 3260 (add LOAD_CLASSDEREF; allow locals of class to override
# free vars)
# Python 3.4a1 3270 (various tweaks to the __class__ closure)
# Python 3.4a1 3280 (remove implicit class argument)
# Python 3.4a4 3290 (changes to __qualname__ computation)
# Python 3.4a4 3300 (more changes to __qualname__ computation)
# Python 3.4rc2 3310 (alter __qualname__ computation)
#
# MAGIC must change whenever the bytecode emitted by the compiler may no
# longer be understood by older implementations of the eval loop (usually
# due to the addition of new opcodes).
MAGIC_NUMBER = (3310).to_bytes(2, 'little') + b'\r\n'
_RAW_MAGIC_NUMBER = int.from_bytes(MAGIC_NUMBER, 'little') # For import.c
_PYCACHE = '__pycache__'
SOURCE_SUFFIXES = ['.py'] # _setup() adds .pyw as needed.
DEBUG_BYTECODE_SUFFIXES = ['.pyc']
OPTIMIZED_BYTECODE_SUFFIXES = ['.pyo']
def cache_from_source(path, debug_override=None):
"""Given the path to a .py file, return the path to its .pyc/.pyo file.
The .py file does not need to exist; this simply returns the path to the
.pyc/.pyo file calculated as if the .py file were imported. The extension
will be .pyc unless sys.flags.optimize is non-zero, then it will be .pyo.
If debug_override is not None, then it must be a boolean and is used in
place of sys.flags.optimize.
If sys.implementation.cache_tag is None then NotImplementedError is raised.
"""
debug = not sys.flags.optimize if debug_override is None else debug_override
if debug:
suffixes = DEBUG_BYTECODE_SUFFIXES
else:
suffixes = OPTIMIZED_BYTECODE_SUFFIXES
head, tail = _path_split(path)
base, sep, rest = tail.rpartition('.')
tag = sys.implementation.cache_tag
if tag is None:
raise NotImplementedError('sys.implementation.cache_tag is None')
filename = ''.join([(base if base else rest), sep, tag, suffixes[0]])
return _path_join(head, _PYCACHE, filename)
def source_from_cache(path):
"""Given the path to a .pyc./.pyo file, return the path to its .py file.
The .pyc/.pyo file does not need to exist; this simply returns the path to
the .py file calculated to correspond to the .pyc/.pyo file. If path does
not conform to PEP 3147 format, ValueError will be raised. If
sys.implementation.cache_tag is None then NotImplementedError is raised.
"""
if sys.implementation.cache_tag is None:
raise NotImplementedError('sys.implementation.cache_tag is None')
head, pycache_filename = _path_split(path)
head, pycache = _path_split(head)
if pycache != _PYCACHE:
raise ValueError('{} not bottom-level directory in '
'{!r}'.format(_PYCACHE, path))
if pycache_filename.count('.') != 2:
raise ValueError('expected only 2 dots in '
'{!r}'.format(pycache_filename))
base_filename = pycache_filename.partition('.')[0]
return _path_join(head, base_filename + SOURCE_SUFFIXES[0])
def _get_sourcefile(bytecode_path):
"""Convert a bytecode file path to a source path (if possible).
This function exists purely for backwards-compatibility for
PyImport_ExecCodeModuleWithFilenames() in the C API.
"""
if len(bytecode_path) == 0:
return None
rest, _, extension = bytecode_path.rpartition('.')
if not rest or extension.lower()[-3:-1] != 'py':
return bytecode_path
try:
source_path = source_from_cache(bytecode_path)
except (NotImplementedError, ValueError):
source_path = bytecode_path[:-1]
return source_path if _path_isfile(source_path) else bytecode_path
def _calc_mode(path):
"""Calculate the mode permissions for a bytecode file."""
try:
mode = _path_stat(path).st_mode
except OSError:
mode = 0o666
# We always ensure write access so we can update cached files
# later even when the source files are read-only on Windows (#6074)
mode |= 0o200
return mode
def _verbose_message(message, *args, verbosity=1):
"""Print the message to stderr if -v/PYTHONVERBOSE is turned on."""
if sys.flags.verbose >= verbosity:
if not message.startswith(('#', 'import ')):
message = '# ' + message
print(message.format(*args), file=sys.stderr)
def _check_name(method):
"""Decorator to verify that the module being requested matches the one the
loader can handle.
The first argument (self) must define _name which the second argument is
compared against. If the comparison fails then ImportError is raised.
"""
def _check_name_wrapper(self, name=None, *args, **kwargs):
if name is None:
name = self.name
elif self.name != name:
raise ImportError('loader cannot handle %s' % name, name=name)
return method(self, name, *args, **kwargs)
_wrap(_check_name_wrapper, method)
return _check_name_wrapper
def _requires_builtin(fxn):
"""Decorator to verify the named module is built-in."""
def _requires_builtin_wrapper(self, fullname):
if fullname not in sys.builtin_module_names:
raise ImportError('{!r} is not a built-in module'.format(fullname),
name=fullname)
return fxn(self, fullname)
_wrap(_requires_builtin_wrapper, fxn)
return _requires_builtin_wrapper
def _requires_frozen(fxn):
"""Decorator to verify the named module is frozen."""
def _requires_frozen_wrapper(self, fullname):
if not _imp.is_frozen(fullname):
raise ImportError('{!r} is not a frozen module'.format(fullname),
name=fullname)
return fxn(self, fullname)
_wrap(_requires_frozen_wrapper, fxn)
return _requires_frozen_wrapper
def _find_module_shim(self, fullname):
"""Try to find a loader for the specified module by delegating to
self.find_loader().
This method is deprecated in favor of finder.find_spec().
"""
# Call find_loader(). If it returns a string (indicating this
# is a namespace package portion), generate a warning and
# return None.
loader, portions = self.find_loader(fullname)
if loader is None and len(portions):
msg = 'Not importing directory {}: missing __init__'
_warnings.warn(msg.format(portions[0]), ImportWarning)
return loader
def _load_module_shim(self, fullname):
"""Load the specified module into sys.modules and return it.
This method is deprecated. Use loader.exec_module instead.
"""
spec = spec_from_loader(fullname, self)
methods = _SpecMethods(spec)
if fullname in sys.modules:
module = sys.modules[fullname]
methods.exec(module)
return sys.modules[fullname]
else:
return methods.load()
def _validate_bytecode_header(data, source_stats=None, name=None, path=None):
"""Validate the header of the passed-in bytecode against source_stats (if
given) and returning the bytecode that can be compiled by compile().
All other arguments are used to enhance error reporting.
ImportError is raised when the magic number is incorrect or the bytecode is
found to be stale. EOFError is raised when the data is found to be
truncated.
"""
exc_details = {}
if name is not None:
exc_details['name'] = name
else:
# To prevent having to make all messages have a conditional name.
name = '<bytecode>'
if path is not None:
exc_details['path'] = path
magic = data[:4]
raw_timestamp = data[4:8]
raw_size = data[8:12]
if magic != MAGIC_NUMBER:
message = 'bad magic number in {!r}: {!r}'.format(name, magic)
_verbose_message(message)
raise ImportError(message, **exc_details)
elif len(raw_timestamp) != 4:
message = 'reached EOF while reading timestamp in {!r}'.format(name)
_verbose_message(message)
raise EOFError(message)
elif len(raw_size) != 4:
message = 'reached EOF while reading size of source in {!r}'.format(name)
_verbose_message(message)
raise EOFError(message)
if source_stats is not None:
try:
source_mtime = int(source_stats['mtime'])
except KeyError:
pass
else:
if _r_long(raw_timestamp) != source_mtime:
message = 'bytecode is stale for {!r}'.format(name)
_verbose_message(message)
raise ImportError(message, **exc_details)
try:
source_size = source_stats['size'] & 0xFFFFFFFF
except KeyError:
pass
else:
if _r_long(raw_size) != source_size:
raise ImportError('bytecode is stale for {!r}'.format(name),
**exc_details)
return data[12:]
def _compile_bytecode(data, name=None, bytecode_path=None, source_path=None):
"""Compile bytecode as returned by _validate_bytecode_header()."""
code = marshal.loads(data)
if isinstance(code, _code_type):
_verbose_message('code object from {!r}', bytecode_path)
if source_path is not None:
_imp._fix_co_filename(code, source_path)
return code
else:
raise ImportError('Non-code object in {!r}'.format(bytecode_path),
name=name, path=bytecode_path)
def _code_to_bytecode(code, mtime=0, source_size=0):
"""Compile a code object into bytecode for writing out to a byte-compiled
file."""
data = bytearray(MAGIC_NUMBER)
data.extend(_w_long(mtime))
data.extend(_w_long(source_size))
data.extend(marshal.dumps(code))
return data
def decode_source(source_bytes):
"""Decode bytes representing source code and return the string.
Universal newline support is used in the decoding.
"""
import tokenize # To avoid bootstrap issues.
source_bytes_readline = _io.BytesIO(source_bytes).readline
encoding = tokenize.detect_encoding(source_bytes_readline)
newline_decoder = _io.IncrementalNewlineDecoder(None, True)
return newline_decoder.decode(source_bytes.decode(encoding[0]))
# Module specifications #######################################################
def _module_repr(module):
# The implementation of ModuleType__repr__().
loader = getattr(module, '__loader__', None)
if hasattr(loader, 'module_repr'):
# As soon as BuiltinImporter, FrozenImporter, and NamespaceLoader
# drop their implementations for module_repr. we can add a
# deprecation warning here.
try:
return loader.module_repr(module)
except Exception:
pass
try:
spec = module.__spec__
except AttributeError:
pass
else:
if spec is not None:
return _SpecMethods(spec).module_repr()
# We could use module.__class__.__name__ instead of 'module' in the
# various repr permutations.
try:
name = module.__name__
except AttributeError:
name = '?'
try:
filename = module.__file__
except AttributeError:
if loader is None:
return '<module {!r}>'.format(name)
else:
return '<module {!r} ({!r})>'.format(name, loader)
else:
return '<module {!r} from {!r}>'.format(name, filename)
class _installed_safely:
def __init__(self, module):
self._module = module
self._spec = module.__spec__
def __enter__(self):
# This must be done before putting the module in sys.modules
# (otherwise an optimization shortcut in import.c becomes
# wrong)
self._spec._initializing = True
sys.modules[self._spec.name] = self._module
def __exit__(self, *args):
try:
spec = self._spec
if any(arg is not None for arg in args):
try:
del sys.modules[spec.name]
except KeyError:
pass
else:
_verbose_message('import {!r} # {!r}', spec.name, spec.loader)
finally:
self._spec._initializing = False
class ModuleSpec:
"""The specification for a module, used for loading.
A module's spec is the source for information about the module. For
data associated with the module, including source, use the spec's
loader.
`name` is the absolute name of the module. `loader` is the loader
to use when loading the module. `parent` is the name of the
package the module is in. The parent is derived from the name.
`is_package` determines if the module is considered a package or
not. On modules this is reflected by the `__path__` attribute.
`origin` is the specific location used by the loader from which to
load the module, if that information is available. When filename is
set, origin will match.
`has_location` indicates that a spec's "origin" reflects a location.
When this is True, `__file__` attribute of the module is set.
`cached` is the location of the cached bytecode file, if any. It
corresponds to the `__cached__` attribute.
`submodule_search_locations` is the sequence of path entries to
search when importing submodules. If set, is_package should be
True--and False otherwise.
Packages are simply modules that (may) have submodules. If a spec
has a non-None value in `submodule_search_locations`, the import
system will consider modules loaded from the spec as packages.
Only finders (see importlib.abc.MetaPathFinder and
importlib.abc.PathEntryFinder) should modify ModuleSpec instances.
"""
def __init__(self, name, loader, *, origin=None, loader_state=None,
is_package=None):
self.name = name
self.loader = loader
self.origin = origin
self.loader_state = loader_state
self.submodule_search_locations = [] if is_package else None
# file-location attributes
self._set_fileattr = False
self._cached = None
def __repr__(self):
args = ['name={!r}'.format(self.name),
'loader={!r}'.format(self.loader)]
if self.origin is not None:
args.append('origin={!r}'.format(self.origin))
if self.submodule_search_locations is not None:
args.append('submodule_search_locations={}'
.format(self.submodule_search_locations))
return '{}({})'.format(self.__class__.__name__, ', '.join(args))
def __eq__(self, other):
smsl = self.submodule_search_locations
try:
return (self.name == other.name and
self.loader == other.loader and
self.origin == other.origin and
smsl == other.submodule_search_locations and
self.cached == other.cached and
self.has_location == other.has_location)
except AttributeError:
return False
@property
def cached(self):
if self._cached is None:
if self.origin is not None and self._set_fileattr:
filename = self.origin
if filename.endswith(tuple(SOURCE_SUFFIXES)):
try:
self._cached = cache_from_source(filename)
except NotImplementedError:
pass
elif filename.endswith(tuple(BYTECODE_SUFFIXES)):
self._cached = filename
return self._cached
@cached.setter
def cached(self, cached):
self._cached = cached
@property
def parent(self):
"""The name of the module's parent."""
if self.submodule_search_locations is None:
return self.name.rpartition('.')[0]
else:
return self.name
@property
def has_location(self):
return self._set_fileattr
@has_location.setter
def has_location(self, value):
self._set_fileattr = bool(value)
def spec_from_loader(name, loader, *, origin=None, is_package=None):
"""Return a module spec based on various loader methods."""
if hasattr(loader, 'get_filename'):
if is_package is None:
return spec_from_file_location(name, loader=loader)
search = [] if is_package else None
return spec_from_file_location(name, loader=loader,
submodule_search_locations=search)
if is_package is None:
if hasattr(loader, 'is_package'):
try:
is_package = loader.is_package(name)
except ImportError:
is_package = None # aka, undefined
else:
# the default
is_package = False
return ModuleSpec(name, loader, origin=origin, is_package=is_package)
_POPULATE = object()
def spec_from_file_location(name, location=None, *, loader=None,
submodule_search_locations=_POPULATE):
"""Return a module spec based on a file location.
To indicate that the module is a package, set
submodule_search_locations to a list of directory paths. An
empty list is sufficient, though its not otherwise useful to the
import system.
The loader must take a spec as its only __init__() arg.
"""
if location is None:
# The caller may simply want a partially populated location-
# oriented spec. So we set the location to a bogus value and
# fill in as much as we can.
location = '<unknown>'
if hasattr(loader, 'get_filename'):
# ExecutionLoader
try:
location = loader.get_filename(name)
except ImportError:
pass
# If the location is on the filesystem, but doesn't actually exist,
# we could return None here, indicating that the location is not
# valid. However, we don't have a good way of testing since an
# indirect location (e.g. a zip file or URL) will look like a
# non-existent file relative to the filesystem.
spec = ModuleSpec(name, loader, origin=location)
spec._set_fileattr = True
# Pick a loader if one wasn't provided.
if loader is None:
for loader_class, suffixes in _get_supported_file_loaders():
if location.endswith(tuple(suffixes)):
loader = loader_class(name, location)
spec.loader = loader
break
else:
return None
# Set submodule_search_paths appropriately.
if submodule_search_locations is _POPULATE:
# Check the loader.
if hasattr(loader, 'is_package'):
try:
is_package = loader.is_package(name)
except ImportError:
pass
else:
if is_package:
spec.submodule_search_locations = []
else:
spec.submodule_search_locations = submodule_search_locations
if spec.submodule_search_locations == []:
if location:
dirname = _path_split(location)[0]
spec.submodule_search_locations.append(dirname)
return spec
def _spec_from_module(module, loader=None, origin=None):
# This function is meant for use in _setup().
try:
spec = module.__spec__
except AttributeError:
pass
else:
if spec is not None:
return spec
name = module.__name__
if loader is None:
try:
loader = module.__loader__
except AttributeError:
# loader will stay None.
pass
try:
location = module.__file__
except AttributeError:
location = None
if origin is None:
if location is None:
try:
origin = loader._ORIGIN
except AttributeError:
origin = None
else:
origin = location
try:
cached = module.__cached__
except AttributeError:
cached = None
try:
submodule_search_locations = list(module.__path__)
except AttributeError:
submodule_search_locations = None
spec = ModuleSpec(name, loader, origin=origin)
spec._set_fileattr = False if location is None else True
spec.cached = cached
spec.submodule_search_locations = submodule_search_locations
return spec
class _SpecMethods:
"""Convenience wrapper around spec objects to provide spec-specific
methods."""
# The various spec_from_* functions could be made factory methods here.
def __init__(self, spec):
self.spec = spec
def module_repr(self):
"""Return the repr to use for the module."""
# We mostly replicate _module_repr() using the spec attributes.
spec = self.spec
name = '?' if spec.name is None else spec.name
if spec.origin is None:
if spec.loader is None:
return '<module {!r}>'.format(name)
else:
return '<module {!r} ({!r})>'.format(name, spec.loader)
else:
if spec.has_location:
return '<module {!r} from {!r}>'.format(name, spec.origin)
else:
return '<module {!r} ({})>'.format(spec.name, spec.origin)
def init_module_attrs(self, module, *, _override=False, _force_name=True):
"""Set the module's attributes.
All missing import-related module attributes will be set. Here
is how the spec attributes map onto the module:
spec.name -> module.__name__
spec.loader -> module.__loader__
spec.parent -> module.__package__
spec -> module.__spec__
Optional:
spec.origin -> module.__file__ (if spec.set_fileattr is true)
spec.cached -> module.__cached__ (if __file__ also set)
spec.submodule_search_locations -> module.__path__ (if set)
"""
spec = self.spec
# The passed in module may be not support attribute assignment,
# in which case we simply don't set the attributes.
# __name__
if (_override or _force_name or
getattr(module, '__name__', None) is None):
try:
module.__name__ = spec.name
except AttributeError:
pass
# __loader__
if _override or getattr(module, '__loader__', None) is None:
loader = spec.loader
if loader is None:
# A backward compatibility hack.
if spec.submodule_search_locations is not None:
loader = _NamespaceLoader.__new__(_NamespaceLoader)
loader._path = spec.submodule_search_locations
try:
module.__loader__ = loader
except AttributeError:
pass
# __package__
if _override or getattr(module, '__package__', None) is None:
try:
module.__package__ = spec.parent
except AttributeError:
pass
# __spec__
try:
module.__spec__ = spec
except AttributeError:
pass
# __path__
if _override or getattr(module, '__path__', None) is None:
if spec.submodule_search_locations is not None:
try:
module.__path__ = spec.submodule_search_locations
except AttributeError:
pass
if spec.has_location:
# __file__
if _override or getattr(module, '__file__', None) is None:
try:
module.__file__ = spec.origin
except AttributeError:
pass
# __cached__
if _override or getattr(module, '__cached__', None) is None:
if spec.cached is not None:
try:
module.__cached__ = spec.cached
except AttributeError:
pass
def create(self):
"""Return a new module to be loaded.
The import-related module attributes are also set with the
appropriate values from the spec.
"""
spec = self.spec
# Typically loaders will not implement create_module().
if hasattr(spec.loader, 'create_module'):
# If create_module() returns `None` it means the default
# module creation should be used.
module = spec.loader.create_module(spec)
else:
module = None
if module is None:
# This must be done before open() is ever called as the 'io'
# module implicitly imports 'locale' and would otherwise
# trigger an infinite loop.
module = _new_module(spec.name)
self.init_module_attrs(module)
return module
def _exec(self, module):
"""Do everything necessary to execute the module.
The namespace of `module` is used as the target of execution.
This method uses the loader's `exec_module()` method.
"""
self.spec.loader.exec_module(module)
# Used by importlib.reload() and _load_module_shim().
def exec(self, module):
"""Execute the spec in an existing module's namespace."""
name = self.spec.name
_imp.acquire_lock()
with _ModuleLockManager(name):
if sys.modules.get(name) is not module:
msg = 'module {!r} not in sys.modules'.format(name)
raise ImportError(msg, name=name)
if self.spec.loader is None:
if self.spec.submodule_search_locations is None:
raise ImportError('missing loader', name=self.spec.name)
# namespace package
self.init_module_attrs(module, _override=True)
return module
self.init_module_attrs(module, _override=True)
if not hasattr(self.spec.loader, 'exec_module'):
# (issue19713) Once BuiltinImporter and ExtensionFileLoader
# have exec_module() implemented, we can add a deprecation
# warning here.
self.spec.loader.load_module(name)
else:
self._exec(module)
return sys.modules[name]
def _load_backward_compatible(self):
# (issue19713) Once BuiltinImporter and ExtensionFileLoader
# have exec_module() implemented, we can add a deprecation
# warning here.
spec = self.spec
spec.loader.load_module(spec.name)
# The module must be in sys.modules at this point!
module = sys.modules[spec.name]
if getattr(module, '__loader__', None) is None:
try:
module.__loader__ = spec.loader
except AttributeError:
pass
if getattr(module, '__package__', None) is None:
try:
# Since module.__path__ may not line up with
# spec.submodule_search_paths, we can't necessarily rely
# on spec.parent here.
module.__package__ = module.__name__
if not hasattr(module, '__path__'):
module.__package__ = spec.name.rpartition('.')[0]
except AttributeError:
pass
if getattr(module, '__spec__', None) is None:
try:
module.__spec__ = spec
except AttributeError:
pass
return module
def _load_unlocked(self):
# A helper for direct use by the import system.
if self.spec.loader is not None:
# not a namespace package
if not hasattr(self.spec.loader, 'exec_module'):
return self._load_backward_compatible()
module = self.create()
with _installed_safely(module):
if self.spec.loader is None:
if self.spec.submodule_search_locations is None:
raise ImportError('missing loader', name=self.spec.name)
# A namespace package so do nothing.
else:
self._exec(module)
# We don't ensure that the import-related module attributes get
# set in the sys.modules replacement case. Such modules are on
# their own.
return sys.modules[self.spec.name]
# A method used during testing of _load_unlocked() and by
# _load_module_shim().
def load(self):
"""Return a new module object, loaded by the spec's loader.
The module is not added to its parent.
If a module is already in sys.modules, that existing module gets
clobbered.
"""
_imp.acquire_lock()
with _ModuleLockManager(self.spec.name):
return self._load_unlocked()
def _fix_up_module(ns, name, pathname, cpathname=None):
# This function is used by PyImport_ExecCodeModuleObject().
loader = ns.get('__loader__')
spec = ns.get('__spec__')
if not loader:
if spec:
loader = spec.loader
elif pathname == cpathname:
loader = SourcelessFileLoader(name, pathname)
else:
loader = SourceFileLoader(name, pathname)
if not spec:
spec = spec_from_file_location(name, pathname, loader=loader)
try:
ns['__spec__'] = spec
ns['__loader__'] = loader
ns['__file__'] = pathname
ns['__cached__'] = cpathname
except Exception:
# Not important enough to report.
pass
# Loaders #####################################################################
class BuiltinImporter:
"""Meta path import for built-in modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
@staticmethod
def module_repr(module):
"""Return repr for the module.
The method is deprecated. The import machinery does the job itself.
"""
return '<module {!r} (built-in)>'.format(module.__name__)
@classmethod
def find_spec(cls, fullname, path=None, target=None):
if path is not None:
return None
if _imp.is_builtin(fullname):
return spec_from_loader(fullname, cls, origin='built-in')
else:
return None
@classmethod
def find_module(cls, fullname, path=None):
"""Find the built-in module.
If 'path' is ever specified then the search is considered a failure.
This method is deprecated. Use find_spec() instead.
"""
spec = cls.find_spec(fullname, path)
return spec.loader if spec is not None else None
@classmethod
@_requires_builtin
def load_module(cls, fullname):
"""Load a built-in module."""
# Once an exec_module() implementation is added we can also
# add a deprecation warning here.
with _ManageReload(fullname):
module = _call_with_frames_removed(_imp.init_builtin, fullname)
module.__loader__ = cls
module.__package__ = ''
return module
@classmethod
@_requires_builtin
def get_code(cls, fullname):
"""Return None as built-in modules do not have code objects."""
return None
@classmethod
@_requires_builtin
def get_source(cls, fullname):
"""Return None as built-in modules do not have source code."""
return None
@classmethod
@_requires_builtin
def is_package(cls, fullname):
"""Return False as built-in modules are never packages."""
return False
class FrozenImporter:
"""Meta path import for frozen modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
@staticmethod
def module_repr(m):
"""Return repr for the module.
The method is deprecated. The import machinery does the job itself.
"""
return '<module {!r} (frozen)>'.format(m.__name__)
@classmethod
def find_spec(cls, fullname, path=None, target=None):
if _imp.is_frozen(fullname):
return spec_from_loader(fullname, cls, origin='frozen')
else:
return None
@classmethod
def find_module(cls, fullname, path=None):
"""Find a frozen module.
This method is deprecated. Use find_spec() instead.
"""
return cls if _imp.is_frozen(fullname) else None
@staticmethod
def exec_module(module):
name = module.__spec__.name
if not _imp.is_frozen(name):
raise ImportError('{!r} is not a frozen module'.format(name),
name=name)
code = _call_with_frames_removed(_imp.get_frozen_object, name)
exec(code, module.__dict__)
@classmethod
def load_module(cls, fullname):
"""Load a frozen module.
This method is deprecated. Use exec_module() instead.
"""
return _load_module_shim(cls, fullname)
@classmethod
@_requires_frozen
def get_code(cls, fullname):
"""Return the code object for the frozen module."""
return _imp.get_frozen_object(fullname)
@classmethod
@_requires_frozen
def get_source(cls, fullname):
"""Return None as frozen modules do not have source code."""
return None
@classmethod
@_requires_frozen
def is_package(cls, fullname):
"""Return True if the frozen module is a package."""
return _imp.is_frozen_package(fullname)
class WindowsRegistryFinder:
"""Meta path finder for modules declared in the Windows registry."""
REGISTRY_KEY = (
'Software\\Python\\PythonCore\\{sys_version}'
'\\Modules\\{fullname}')
REGISTRY_KEY_DEBUG = (
'Software\\Python\\PythonCore\\{sys_version}'
'\\Modules\\{fullname}\\Debug')
DEBUG_BUILD = False # Changed in _setup()
@classmethod
def _open_registry(cls, key):
try:
return _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, key)
except OSError:
return _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, key)
@classmethod
def _search_registry(cls, fullname):
if cls.DEBUG_BUILD:
registry_key = cls.REGISTRY_KEY_DEBUG
else:
registry_key = cls.REGISTRY_KEY
key = registry_key.format(fullname=fullname,
sys_version=sys.version[:3])
try:
with cls._open_registry(key) as hkey:
filepath = _winreg.QueryValue(hkey, '')
except OSError:
return None
return filepath
@classmethod
def find_spec(cls, fullname, path=None, target=None):
filepath = cls._search_registry(fullname)
if filepath is None:
return None
try:
_path_stat(filepath)
except OSError:
return None
for loader, suffixes in _get_supported_file_loaders():
if filepath.endswith(tuple(suffixes)):
spec = spec_from_loader(fullname, loader(fullname, filepath),
origin=filepath)
return spec
@classmethod
def find_module(cls, fullname, path=None):
"""Find module named in the registry.
This method is deprecated. Use exec_module() instead.
"""
spec = cls.find_spec(fullname, path)
if spec is not None:
return spec.loader
else:
return None
class _LoaderBasics:
"""Base class of common code needed by both SourceLoader and
SourcelessFileLoader."""
def is_package(self, fullname):
"""Concrete implementation of InspectLoader.is_package by checking if
the path returned by get_filename has a filename of '__init__.py'."""
filename = _path_split(self.get_filename(fullname))[1]
filename_base = filename.rsplit('.', 1)[0]
tail_name = fullname.rpartition('.')[2]
return filename_base == '__init__' and tail_name != '__init__'
def exec_module(self, module):
"""Execute the module."""
code = self.get_code(module.__name__)
if code is None:
raise ImportError('cannot load module {!r} when get_code() '
'returns None'.format(module.__name__))
_call_with_frames_removed(exec, code, module.__dict__)
load_module = _load_module_shim
class SourceLoader(_LoaderBasics):
def path_mtime(self, path):
"""Optional method that returns the modification time (an int) for the
specified path, where path is a str.
Raises IOError when the path cannot be handled.
"""
raise IOError
def path_stats(self, path):
"""Optional method returning a metadata dict for the specified path
to by the path (str).
Possible keys:
- 'mtime' (mandatory) is the numeric timestamp of last source
code modification;
- 'size' (optional) is the size in bytes of the source code.
Implementing this method allows the loader to read bytecode files.
Raises IOError when the path cannot be handled.
"""
return {'mtime': self.path_mtime(path)}
def _cache_bytecode(self, source_path, cache_path, data):
"""Optional method which writes data (bytes) to a file path (a str).
Implementing this method allows for the writing of bytecode files.
The source path is needed in order to correctly transfer permissions
"""
# For backwards compatibility, we delegate to set_data()
return self.set_data(cache_path, data)
def set_data(self, path, data):
"""Optional method which writes data (bytes) to a file path (a str).
Implementing this method allows for the writing of bytecode files.
"""
def get_source(self, fullname):
"""Concrete implementation of InspectLoader.get_source."""
path = self.get_filename(fullname)
try:
source_bytes = self.get_data(path)
except OSError as exc:
raise ImportError('source not available through get_data()',
name=fullname) from exc
return decode_source(source_bytes)
def source_to_code(self, data, path, *, _optimize=-1):
"""Return the code object compiled from source.
The 'data' argument can be any object type that compile() supports.
"""
return _call_with_frames_removed(compile, data, path, 'exec',
dont_inherit=True, optimize=_optimize)
def get_code(self, fullname):
"""Concrete implementation of InspectLoader.get_code.
Reading of bytecode requires path_stats to be implemented. To write
bytecode, set_data must also be implemented.
"""
source_path = self.get_filename(fullname)
source_mtime = None
try:
bytecode_path = cache_from_source(source_path)
except NotImplementedError:
bytecode_path = None
else:
try:
st = self.path_stats(source_path)
except IOError:
pass
else:
source_mtime = int(st['mtime'])
try:
data = self.get_data(bytecode_path)
except OSError:
pass
else:
try:
bytes_data = _validate_bytecode_header(data,
source_stats=st, name=fullname,
path=bytecode_path)
except (ImportError, EOFError):
pass
else:
_verbose_message('{} matches {}', bytecode_path,
source_path)
return _compile_bytecode(bytes_data, name=fullname,
bytecode_path=bytecode_path,
source_path=source_path)
source_bytes = self.get_data(source_path)
code_object = self.source_to_code(source_bytes, source_path)
_verbose_message('code object from {}', source_path)
if (not sys.dont_write_bytecode and bytecode_path is not None and
source_mtime is not None):
data = _code_to_bytecode(code_object, source_mtime,
len(source_bytes))
try:
self._cache_bytecode(source_path, bytecode_path, data)
_verbose_message('wrote {!r}', bytecode_path)
except NotImplementedError:
pass
return code_object
class FileLoader:
"""Base file loader class which implements the loader protocol methods that
require file system usage."""
def __init__(self, fullname, path):
"""Cache the module name and the path to the file found by the
finder."""
self.name = fullname
self.path = path
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self.__dict__ == other.__dict__)
def __hash__(self):
return hash(self.name) ^ hash(self.path)
@_check_name
def load_module(self, fullname):
"""Load a module from a file.
This method is deprecated. Use exec_module() instead.
"""
# The only reason for this method is for the name check.
# Issue #14857: Avoid the zero-argument form of super so the implementation
# of that form can be updated without breaking the frozen module
return super(FileLoader, self).load_module(fullname)
@_check_name
def get_filename(self, fullname):
"""Return the path to the source file as found by the finder."""
return self.path
def get_data(self, path):
"""Return the data from path as raw bytes."""
with _io.FileIO(path, 'r') as file:
return file.read()
class SourceFileLoader(FileLoader, SourceLoader):
"""Concrete implementation of SourceLoader using the file system."""
def path_stats(self, path):
"""Return the metadata for the path."""
st = _path_stat(path)
return {'mtime': st.st_mtime, 'size': st.st_size}
def _cache_bytecode(self, source_path, bytecode_path, data):
# Adapt between the two APIs
mode = _calc_mode(source_path)
return self.set_data(bytecode_path, data, _mode=mode)
def set_data(self, path, data, *, _mode=0o666):
"""Write bytes data to a file."""
parent, filename = _path_split(path)
path_parts = []
# Figure out what directories are missing.
while parent and not _path_isdir(parent):
parent, part = _path_split(parent)
path_parts.append(part)
# Create needed directories.
for part in reversed(path_parts):
parent = _path_join(parent, part)
try:
_os.mkdir(parent)
except FileExistsError:
# Probably another Python process already created the dir.
continue
except OSError as exc:
# Could be a permission error, read-only filesystem: just forget
# about writing the data.
_verbose_message('could not create {!r}: {!r}', parent, exc)
return
try:
_write_atomic(path, data, _mode)
_verbose_message('created {!r}', path)
except OSError as exc:
# Same as above: just don't write the bytecode.
_verbose_message('could not create {!r}: {!r}', path, exc)
class SourcelessFileLoader(FileLoader, _LoaderBasics):
"""Loader which handles sourceless file imports."""
def get_code(self, fullname):
path = self.get_filename(fullname)
data = self.get_data(path)
bytes_data = _validate_bytecode_header(data, name=fullname, path=path)
return _compile_bytecode(bytes_data, name=fullname, bytecode_path=path)
def get_source(self, fullname):
"""Return None as there is no source code."""
return None
# Filled in by _setup().
EXTENSION_SUFFIXES = []
class ExtensionFileLoader:
"""Loader for extension modules.
The constructor is designed to work with FileFinder.
"""
def __init__(self, name, path):
self.name = name
self.path = path
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self.__dict__ == other.__dict__)
def __hash__(self):
return hash(self.name) ^ hash(self.path)
@_check_name
def load_module(self, fullname):
"""Load an extension module."""
# Once an exec_module() implementation is added we can also
# add a deprecation warning here.
with _ManageReload(fullname):
module = _call_with_frames_removed(_imp.load_dynamic,
fullname, self.path)
_verbose_message('extension module loaded from {!r}', self.path)
is_package = self.is_package(fullname)
if is_package and not hasattr(module, '__path__'):
module.__path__ = [_path_split(self.path)[0]]
module.__loader__ = self
module.__package__ = module.__name__
if not is_package:
module.__package__ = module.__package__.rpartition('.')[0]
return module
def is_package(self, fullname):
"""Return True if the extension module is a package."""
file_name = _path_split(self.path)[1]
return any(file_name == '__init__' + suffix
for suffix in EXTENSION_SUFFIXES)
def get_code(self, fullname):
"""Return None as an extension module cannot create a code object."""
return None
def get_source(self, fullname):
"""Return None as extension modules have no source code."""
return None
@_check_name
def get_filename(self, fullname):
"""Return the path to the source file as found by the finder."""
return self.path
class _NamespacePath:
"""Represents a namespace package's path. It uses the module name
to find its parent module, and from there it looks up the parent's
__path__. When this changes, the module's own path is recomputed,
using path_finder. For top-level modules, the parent module's path
is sys.path."""
def __init__(self, name, path, path_finder):
self._name = name
self._path = path
self._last_parent_path = tuple(self._get_parent_path())
self._path_finder = path_finder
def _find_parent_path_names(self):
"""Returns a tuple of (parent-module-name, parent-path-attr-name)"""
parent, dot, me = self._name.rpartition('.')
if dot == '':
# This is a top-level module. sys.path contains the parent path.
return 'sys', 'path'
# Not a top-level module. parent-module.__path__ contains the
# parent path.
return parent, '__path__'
def _get_parent_path(self):
parent_module_name, path_attr_name = self._find_parent_path_names()
return getattr(sys.modules[parent_module_name], path_attr_name)
def _recalculate(self):
# If the parent's path has changed, recalculate _path
parent_path = tuple(self._get_parent_path()) # Make a copy
if parent_path != self._last_parent_path:
spec = self._path_finder(self._name, parent_path)
# Note that no changes are made if a loader is returned, but we
# do remember the new parent path
if spec is not None and spec.loader is None:
if spec.submodule_search_locations:
self._path = spec.submodule_search_locations
self._last_parent_path = parent_path # Save the copy
return self._path
def __iter__(self):
return iter(self._recalculate())
def __len__(self):
return len(self._recalculate())
def __repr__(self):
return '_NamespacePath({!r})'.format(self._path)
def __contains__(self, item):
return item in self._recalculate()
def append(self, item):
self._path.append(item)
# We use this exclusively in init_module_attrs() for backward-compatibility.
class _NamespaceLoader:
def __init__(self, name, path, path_finder):
self._path = _NamespacePath(name, path, path_finder)
@classmethod
def module_repr(cls, module):
"""Return repr for the module.
The method is deprecated. The import machinery does the job itself.
"""
return '<module {!r} (namespace)>'.format(module.__name__)
def is_package(self, fullname):
return True
def get_source(self, fullname):
return ''
def get_code(self, fullname):
return compile('', '<string>', 'exec', dont_inherit=True)
def exec_module(self, module):
pass
def load_module(self, fullname):
"""Load a namespace module.
This method is deprecated. Use exec_module() instead.
"""
# The import system never calls this method.
_verbose_message('namespace module loaded with path {!r}', self._path)
return _load_module_shim(self, fullname)
# Finders #####################################################################
class PathFinder:
"""Meta path finder for sys.path and package __path__ attributes."""
@classmethod
def invalidate_caches(cls):
"""Call the invalidate_caches() method on all path entry finders
stored in sys.path_importer_caches (where implemented)."""
for finder in sys.path_importer_cache.values():
if hasattr(finder, 'invalidate_caches'):
finder.invalidate_caches()
@classmethod
def _path_hooks(cls, path):
"""Search sequence of hooks for a finder for 'path'.
If 'hooks' is false then use sys.path_hooks.
"""
if not sys.path_hooks:
_warnings.warn('sys.path_hooks is empty', ImportWarning)
for hook in sys.path_hooks:
try:
return hook(path)
except ImportError:
continue
else:
return None
@classmethod
def _path_importer_cache(cls, path):
"""Get the finder for the path entry from sys.path_importer_cache.
If the path entry is not in the cache, find the appropriate finder
and cache it. If no finder is available, store None.
"""
if path == '':
path = _os.getcwd()
try:
finder = sys.path_importer_cache[path]
except KeyError:
finder = cls._path_hooks(path)
sys.path_importer_cache[path] = finder
return finder
@classmethod
def _legacy_get_spec(cls, fullname, finder):
# This would be a good place for a DeprecationWarning if
# we ended up going that route.
if hasattr(finder, 'find_loader'):
loader, portions = finder.find_loader(fullname)
else:
loader = finder.find_module(fullname)
portions = []
if loader is not None:
return spec_from_loader(fullname, loader)
spec = ModuleSpec(fullname, None)
spec.submodule_search_locations = portions
return spec
@classmethod
def _get_spec(cls, fullname, path, target=None):
"""Find the loader or namespace_path for this module/package name."""
# If this ends up being a namespace package, namespace_path is
# the list of paths that will become its __path__
namespace_path = []
for entry in path:
if not isinstance(entry, (str, bytes)):
continue
finder = cls._path_importer_cache(entry)
if finder is not None:
if hasattr(finder, 'find_spec'):
spec = finder.find_spec(fullname, target)
else:
spec = cls._legacy_get_spec(fullname, finder)
if spec is None:
continue
if spec.loader is not None:
return spec
portions = spec.submodule_search_locations
if portions is None:
raise ImportError('spec missing loader')
# This is possibly part of a namespace package.
# Remember these path entries (if any) for when we
# create a namespace package, and continue iterating
# on path.
namespace_path.extend(portions)
else:
spec = ModuleSpec(fullname, None)
spec.submodule_search_locations = namespace_path
return spec
@classmethod
def find_spec(cls, fullname, path=None, target=None):
"""find the module on sys.path or 'path' based on sys.path_hooks and
sys.path_importer_cache."""
if path is None:
path = sys.path
spec = cls._get_spec(fullname, path, target)
if spec is None:
return None
elif spec.loader is None:
namespace_path = spec.submodule_search_locations
if namespace_path:
# We found at least one namespace path. Return a
# spec which can create the namespace package.
spec.origin = 'namespace'
spec.submodule_search_locations = _NamespacePath(fullname, namespace_path, cls._get_spec)
return spec
else:
return None
else:
return spec
@classmethod
def find_module(cls, fullname, path=None):
"""find the module on sys.path or 'path' based on sys.path_hooks and
sys.path_importer_cache.
This method is deprecated. Use find_spec() instead.
"""
spec = cls.find_spec(fullname, path)
if spec is None:
return None
return spec.loader
class FileFinder:
"""File-based finder.
Interactions with the file system are cached for performance, being
refreshed when the directory the finder is handling has been modified.
"""
def __init__(self, path, *loader_details):
"""Initialize with the path to search on and a variable number of
2-tuples containing the loader and the file suffixes the loader
recognizes."""
loaders = []
for loader, suffixes in loader_details:
loaders.extend((suffix, loader) for suffix in suffixes)
self._loaders = loaders
# Base (directory) path
self.path = path or '.'
self._path_mtime = -1
self._path_cache = set()
self._relaxed_path_cache = set()
def invalidate_caches(self):
"""Invalidate the directory mtime."""
self._path_mtime = -1
find_module = _find_module_shim
def find_loader(self, fullname):
"""Try to find a loader for the specified module, or the namespace
package portions. Returns (loader, list-of-portions).
This method is deprecated. Use find_spec() instead.
"""
spec = self.find_spec(fullname)
if spec is None:
return None, []
return spec.loader, spec.submodule_search_locations or []
def _get_spec(self, loader_class, fullname, path, smsl, target):
loader = loader_class(fullname, path)
return spec_from_file_location(fullname, path, loader=loader,
submodule_search_locations=smsl)
def find_spec(self, fullname, target=None):
"""Try to find a loader for the specified module, or the namespace
package portions. Returns (loader, list-of-portions)."""
is_namespace = False
tail_module = fullname.rpartition('.')[2]
try:
mtime = _path_stat(self.path or _os.getcwd()).st_mtime
except OSError:
mtime = -1
if mtime != self._path_mtime:
self._fill_cache()
self._path_mtime = mtime
# tail_module keeps the original casing, for __file__ and friends
if _relax_case():
cache = self._relaxed_path_cache
cache_module = tail_module.lower()
else:
cache = self._path_cache
cache_module = tail_module
# Check if the module is the name of a directory (and thus a package).
if cache_module in cache:
base_path = _path_join(self.path, tail_module)
for suffix, loader_class in self._loaders:
init_filename = '__init__' + suffix
full_path = _path_join(base_path, init_filename)
if _path_isfile(full_path):
return self._get_spec(loader_class, fullname, full_path, [base_path], target)
else:
# If a namespace package, return the path if we don't
# find a module in the next section.
is_namespace = _path_isdir(base_path)
# Check for a file w/ a proper suffix exists.
for suffix, loader_class in self._loaders:
full_path = _path_join(self.path, tail_module + suffix)
_verbose_message('trying {}'.format(full_path), verbosity=2)
if cache_module + suffix in cache:
if _path_isfile(full_path):
return self._get_spec(loader_class, fullname, full_path, None, target)
if is_namespace:
_verbose_message('possible namespace for {}'.format(base_path))
spec = ModuleSpec(fullname, None)
spec.submodule_search_locations = [base_path]
return spec
return None
def _fill_cache(self):
"""Fill the cache of potential modules and packages for this directory."""
path = self.path
try:
contents = _os.listdir(path or _os.getcwd())
except (FileNotFoundError, PermissionError, NotADirectoryError):
# Directory has either been removed, turned into a file, or made
# unreadable.
contents = []
# We store two cached versions, to handle runtime changes of the
# PYTHONCASEOK environment variable.
if not sys.platform.startswith('win'):
self._path_cache = set(contents)
else:
# Windows users can import modules with case-insensitive file
# suffixes (for legacy reasons). Make the suffix lowercase here
# so it's done once instead of for every import. This is safe as
# the specified suffixes to check against are always specified in a
# case-sensitive manner.
lower_suffix_contents = set()
for item in contents:
name, dot, suffix = item.partition('.')
if dot:
new_name = '{}.{}'.format(name, suffix.lower())
else:
new_name = name
lower_suffix_contents.add(new_name)
self._path_cache = lower_suffix_contents
if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS):
self._relaxed_path_cache = {fn.lower() for fn in contents}
@classmethod
def path_hook(cls, *loader_details):
"""A class method which returns a closure to use on sys.path_hook
which will return an instance using the specified loaders and the path
called on the closure.
If the path called on the closure is not a directory, ImportError is
raised.
"""
def path_hook_for_FileFinder(path):
"""Path hook for importlib.machinery.FileFinder."""
if not _path_isdir(path):
raise ImportError('only directories are supported', path=path)
return cls(path, *loader_details)
return path_hook_for_FileFinder
def __repr__(self):
return 'FileFinder({!r})'.format(self.path)
# Import itself ###############################################################
class _ImportLockContext:
"""Context manager for the import lock."""
def __enter__(self):
"""Acquire the import lock."""
_imp.acquire_lock()
def __exit__(self, exc_type, exc_value, exc_traceback):
"""Release the import lock regardless of any raised exceptions."""
_imp.release_lock()
def _resolve_name(name, package, level):
"""Resolve a relative module name to an absolute one."""
bits = package.rsplit('.', level - 1)
if len(bits) < level:
raise ValueError('attempted relative import beyond top-level package')
base = bits[0]
return '{}.{}'.format(base, name) if name else base
def _find_spec_legacy(finder, name, path):
# This would be a good place for a DeprecationWarning if
# we ended up going that route.
loader = finder.find_module(name, path)
if loader is None:
return None
return spec_from_loader(name, loader)
def _find_spec(name, path, target=None):
"""Find a module's loader."""
if not sys.meta_path:
_warnings.warn('sys.meta_path is empty', ImportWarning)
# We check sys.modules here for the reload case. While a passed-in
# target will usually indicate a reload there is no guarantee, whereas
# sys.modules provides one.
is_reload = name in sys.modules
for finder in sys.meta_path:
with _ImportLockContext():
try:
find_spec = finder.find_spec
except AttributeError:
spec = _find_spec_legacy(finder, name, path)
if spec is None:
continue
else:
spec = find_spec(name, path, target)
if spec is not None:
# The parent import may have already imported this module.
if not is_reload and name in sys.modules:
module = sys.modules[name]
try:
__spec__ = module.__spec__
except AttributeError:
# We use the found spec since that is the one that
# we would have used if the parent module hadn't
# beaten us to the punch.
return spec
else:
if __spec__ is None:
return spec
else:
return __spec__
else:
return spec
else:
return None
def _sanity_check(name, package, level):
"""Verify arguments are "sane"."""
if not isinstance(name, str):
raise TypeError('module name must be str, not {}'.format(type(name)))
if level < 0:
raise ValueError('level must be >= 0')
if package:
if not isinstance(package, str):
raise TypeError('__package__ not set to a string')
elif package not in sys.modules:
msg = ('Parent module {!r} not loaded, cannot perform relative '
'import')
raise SystemError(msg.format(package))
if not name and level == 0:
raise ValueError('Empty module name')
_ERR_MSG_PREFIX = 'No module named '
_ERR_MSG = _ERR_MSG_PREFIX + '{!r}'
def _find_and_load_unlocked(name, import_):
path = None
parent = name.rpartition('.')[0]
if parent:
if parent not in sys.modules:
_call_with_frames_removed(import_, parent)
# Crazy side-effects!
if name in sys.modules:
return sys.modules[name]
parent_module = sys.modules[parent]
try:
path = parent_module.__path__
except AttributeError:
msg = (_ERR_MSG + '; {!r} is not a package').format(name, parent)
raise ImportError(msg, name=name)
spec = _find_spec(name, path)
if spec is None:
raise ImportError(_ERR_MSG.format(name), name=name)
else:
module = _SpecMethods(spec)._load_unlocked()
if parent:
# Set the module as an attribute on its parent.
parent_module = sys.modules[parent]
setattr(parent_module, name.rpartition('.')[2], module)
return module
def _find_and_load(name, import_):
"""Find and load the module, and release the import lock."""
with _ModuleLockManager(name):
return _find_and_load_unlocked(name, import_)
def _gcd_import(name, package=None, level=0):
"""Import and return the module based on its name, the package the call is
being made from, and the level adjustment.
This function represents the greatest common denominator of functionality
between import_module and __import__. This includes setting __package__ if
the loader did not.
"""
_sanity_check(name, package, level)
if level > 0:
name = _resolve_name(name, package, level)
_imp.acquire_lock()
if name not in sys.modules:
return _find_and_load(name, _gcd_import)
module = sys.modules[name]
if module is None:
_imp.release_lock()
message = ('import of {} halted; '
'None in sys.modules'.format(name))
raise ImportError(message, name=name)
_lock_unlock_module(name)
return module
def _handle_fromlist(module, fromlist, import_):
"""Figure out what __import__ should return.
The import_ parameter is a callable which takes the name of module to
import. It is required to decouple the function from assuming importlib's
import implementation is desired.
"""
# The hell that is fromlist ...
# If a package was imported, try to import stuff from fromlist.
if hasattr(module, '__path__'):
if '*' in fromlist:
fromlist = list(fromlist)
fromlist.remove('*')
if hasattr(module, '__all__'):
fromlist.extend(module.__all__)
for x in fromlist:
if not hasattr(module, x):
from_name = '{}.{}'.format(module.__name__, x)
try:
_call_with_frames_removed(import_, from_name)
except ImportError as exc:
# Backwards-compatibility dictates we ignore failed
# imports triggered by fromlist for modules that don't
# exist.
if str(exc).startswith(_ERR_MSG_PREFIX):
if exc.name == from_name:
continue
raise
return module
def _calc___package__(globals):
"""Calculate what __package__ should be.
__package__ is not guaranteed to be defined or could be set to None
to represent that its proper value is unknown.
"""
package = globals.get('__package__')
if package is None:
package = globals['__name__']
if '__path__' not in globals:
package = package.rpartition('.')[0]
return package
def _get_supported_file_loaders():
"""Returns a list of file-based module loaders.
Each item is a tuple (loader, suffixes).
"""
extensions = ExtensionFileLoader, _imp.extension_suffixes()
source = SourceFileLoader, SOURCE_SUFFIXES
bytecode = SourcelessFileLoader, BYTECODE_SUFFIXES
return [extensions, source, bytecode]
def __import__(name, globals=None, locals=None, fromlist=(), level=0):
"""Import a module.
The 'globals' argument is used to infer where the import is occuring from
to handle relative imports. The 'locals' argument is ignored. The
'fromlist' argument specifies what should exist as attributes on the module
being imported (e.g. ``from module import <fromlist>``). The 'level'
argument represents the package location to import from in a relative
import (e.g. ``from ..pkg import mod`` would have a 'level' of 2).
"""
if level == 0:
module = _gcd_import(name)
else:
globals_ = globals if globals is not None else {}
package = _calc___package__(globals_)
module = _gcd_import(name, package, level)
if not fromlist:
# Return up to the first dot in 'name'. This is complicated by the fact
# that 'name' may be relative.
if level == 0:
return _gcd_import(name.partition('.')[0])
elif not name:
return module
else:
# Figure out where to slice the module's name up to the first dot
# in 'name'.
cut_off = len(name) - len(name.partition('.')[0])
# Slice end needs to be positive to alleviate need to special-case
# when ``'.' not in name``.
return sys.modules[module.__name__[:len(module.__name__)-cut_off]]
else:
return _handle_fromlist(module, fromlist, _gcd_import)
def _builtin_from_name(name):
spec = BuiltinImporter.find_spec(name)
if spec is None:
raise ImportError('no built-in module named ' + name)
methods = _SpecMethods(spec)
return methods._load_unlocked()
def _setup(sys_module, _imp_module):
"""Setup importlib by importing needed built-in modules and injecting them
into the global namespace.
As sys is needed for sys.modules access and _imp is needed to load built-in
modules, those two modules must be explicitly passed in.
"""
global _imp, sys, BYTECODE_SUFFIXES
_imp = _imp_module
sys = sys_module
if sys.flags.optimize:
BYTECODE_SUFFIXES = OPTIMIZED_BYTECODE_SUFFIXES
else:
BYTECODE_SUFFIXES = DEBUG_BYTECODE_SUFFIXES
# Set up the spec for existing builtin/frozen modules.
module_type = type(sys)
for name, module in sys.modules.items():
if isinstance(module, module_type):
if name in sys.builtin_module_names:
loader = BuiltinImporter
elif _imp.is_frozen(name):
loader = FrozenImporter
else:
continue
spec = _spec_from_module(module, loader)
methods = _SpecMethods(spec)
methods.init_module_attrs(module)
# Directly load built-in modules needed during bootstrap.
self_module = sys.modules[__name__]
for builtin_name in ('_io', '_warnings', 'builtins', 'marshal'):
if builtin_name not in sys.modules:
builtin_module = _builtin_from_name(builtin_name)
else:
builtin_module = sys.modules[builtin_name]
setattr(self_module, builtin_name, builtin_module)
# Directly load the os module (needed during bootstrap).
os_details = ('posix', ['/']), ('nt', ['\\', '/'])
for builtin_os, path_separators in os_details:
# Assumption made in _path_join()
assert all(len(sep) == 1 for sep in path_separators)
path_sep = path_separators[0]
if builtin_os in sys.modules:
os_module = sys.modules[builtin_os]
break
else:
try:
os_module = _builtin_from_name(builtin_os)
break
except ImportError:
continue
else:
raise ImportError('importlib requires posix or nt')
setattr(self_module, '_os', os_module)
setattr(self_module, 'path_sep', path_sep)
setattr(self_module, 'path_separators', ''.join(path_separators))
# Directly load the _thread module (needed during bootstrap).
try:
thread_module = _builtin_from_name('_thread')
except ImportError:
# Python was built without threads
thread_module = None
setattr(self_module, '_thread', thread_module)
# Directly load the _weakref module (needed during bootstrap).
weakref_module = _builtin_from_name('_weakref')
setattr(self_module, '_weakref', weakref_module)
# Directly load the winreg module (needed during bootstrap).
if builtin_os == 'nt':
winreg_module = _builtin_from_name('winreg')
setattr(self_module, '_winreg', winreg_module)
# Constants
setattr(self_module, '_relax_case', _make_relax_case())
EXTENSION_SUFFIXES.extend(_imp.extension_suffixes())
if builtin_os == 'nt':
SOURCE_SUFFIXES.append('.pyw')
if '_d.pyd' in EXTENSION_SUFFIXES:
WindowsRegistryFinder.DEBUG_BUILD = True
def _install(sys_module, _imp_module):
"""Install importlib as the implementation of import."""
_setup(sys_module, _imp_module)
supported_loaders = _get_supported_file_loaders()
sys.path_hooks.extend([FileFinder.path_hook(*supported_loaders)])
sys.meta_path.append(BuiltinImporter)
sys.meta_path.append(FrozenImporter)
if _os.__name__ == 'nt':
sys.meta_path.append(WindowsRegistryFinder)
sys.meta_path.append(PathFinder)
=======
"""Core implementation of import.
This module is NOT meant to be directly imported! It has been designed such
that it can be bootstrapped into Python as the implementation of import. As
such it requires the injection of specific modules and attributes in order to
work. One should use importlib as the public-facing version of this module.
"""
#
# IMPORTANT: Whenever making changes to this module, be sure to run
# a top-level make in order to get the frozen version of the module
# update. Not doing so will result in the Makefile to fail for
# all others who don't have a ./python around to freeze the module
# in the early stages of compilation.
#
# See importlib._setup() for what is injected into the global namespace.
# When editing this code be aware that code executed at import time CANNOT
# reference any injected objects! This includes not only global code but also
# anything specified at the class level.
# Bootstrap-related code ######################################################
_CASE_INSENSITIVE_PLATFORMS = 'win', 'cygwin', 'darwin'
def _make_relax_case():
if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS):
def _relax_case():
"""True if filenames must be checked case-insensitively."""
return b'PYTHONCASEOK' in _os.environ
else:
def _relax_case():
"""True if filenames must be checked case-insensitively."""
return False
return _relax_case
def _w_long(x):
"""Convert a 32-bit integer to little-endian."""
return (int(x) & 0xFFFFFFFF).to_bytes(4, 'little')
def _r_long(int_bytes):
"""Convert 4 bytes in little-endian to an integer."""
return int.from_bytes(int_bytes, 'little')
def _path_join(*path_parts):
"""Replacement for os.path.join()."""
return path_sep.join([part.rstrip(path_separators)
for part in path_parts if part])
def _path_split(path):
"""Replacement for os.path.split()."""
if len(path_separators) == 1:
front, _, tail = path.rpartition(path_sep)
return front, tail
for x in reversed(path):
if x in path_separators:
front, tail = path.rsplit(x, maxsplit=1)
return front, tail
return '', path
def _path_stat(path):
"""Stat the path.
Made a separate function to make it easier to override in experiments
(e.g. cache stat results).
"""
return _os.stat(path)
def _path_is_mode_type(path, mode):
"""Test whether the path is the specified mode type."""
try:
stat_info = _path_stat(path)
except OSError:
return False
return (stat_info.st_mode & 0o170000) == mode
def _path_isfile(path):
"""Replacement for os.path.isfile."""
return _path_is_mode_type(path, 0o100000)
def _path_isdir(path):
"""Replacement for os.path.isdir."""
if not path:
path = _os.getcwd()
return _path_is_mode_type(path, 0o040000)
def _write_atomic(path, data, mode=0o666):
"""Best-effort function to write data to a path atomically.
Be prepared to handle a FileExistsError if concurrent writing of the
temporary file is attempted."""
# id() is used to generate a pseudo-random filename.
path_tmp = '{}.{}'.format(path, id(path))
fd = _os.open(path_tmp,
_os.O_EXCL | _os.O_CREAT | _os.O_WRONLY, mode & 0o666)
try:
# We first write data to a temporary file, and then use os.replace() to
# perform an atomic rename.
with _io.FileIO(fd, 'wb') as file:
file.write(data)
_os.replace(path_tmp, path)
except OSError:
try:
_os.unlink(path_tmp)
except OSError:
pass
raise
def _wrap(new, old):
"""Simple substitute for functools.update_wrapper."""
for replace in ['__module__', '__name__', '__qualname__', '__doc__']:
if hasattr(old, replace):
setattr(new, replace, getattr(old, replace))
new.__dict__.update(old.__dict__)
def _new_module(name):
return type(sys)(name)
_code_type = type(_wrap.__code__)
class _ManageReload:
"""Manages the possible clean-up of sys.modules for load_module()."""
def __init__(self, name):
self._name = name
def __enter__(self):
self._is_reload = self._name in sys.modules
def __exit__(self, *args):
if any(arg is not None for arg in args) and not self._is_reload:
try:
del sys.modules[self._name]
except KeyError:
pass
# Module-level locking ########################################################
# A dict mapping module names to weakrefs of _ModuleLock instances
_module_locks = {}
# A dict mapping thread ids to _ModuleLock instances
_blocking_on = {}
class _DeadlockError(RuntimeError):
pass
class _ModuleLock:
"""A recursive lock implementation which is able to detect deadlocks
(e.g. thread 1 trying to take locks A then B, and thread 2 trying to
take locks B then A).
"""
def __init__(self, name):
self.lock = _thread.allocate_lock()
self.wakeup = _thread.allocate_lock()
self.name = name
self.owner = None
self.count = 0
self.waiters = 0
def has_deadlock(self):
# Deadlock avoidance for concurrent circular imports.
me = _thread.get_ident()
tid = self.owner
while True:
lock = _blocking_on.get(tid)
if lock is None:
return False
tid = lock.owner
if tid == me:
return True
def acquire(self):
"""
Acquire the module lock. If a potential deadlock is detected,
a _DeadlockError is raised.
Otherwise, the lock is always acquired and True is returned.
"""
tid = _thread.get_ident()
_blocking_on[tid] = self
try:
while True:
with self.lock:
if self.count == 0 or self.owner == tid:
self.owner = tid
self.count += 1
return True
if self.has_deadlock():
raise _DeadlockError('deadlock detected by %r' % self)
if self.wakeup.acquire(False):
self.waiters += 1
# Wait for a release() call
self.wakeup.acquire()
self.wakeup.release()
finally:
del _blocking_on[tid]
def release(self):
tid = _thread.get_ident()
with self.lock:
if self.owner != tid:
raise RuntimeError('cannot release un-acquired lock')
assert self.count > 0
self.count -= 1
if self.count == 0:
self.owner = None
if self.waiters:
self.waiters -= 1
self.wakeup.release()
def __repr__(self):
return '_ModuleLock({!r}) at {}'.format(self.name, id(self))
class _DummyModuleLock:
"""A simple _ModuleLock equivalent for Python builds without
multi-threading support."""
def __init__(self, name):
self.name = name
self.count = 0
def acquire(self):
self.count += 1
return True
def release(self):
if self.count == 0:
raise RuntimeError('cannot release un-acquired lock')
self.count -= 1
def __repr__(self):
return '_DummyModuleLock({!r}) at {}'.format(self.name, id(self))
class _ModuleLockManager:
def __init__(self, name):
self._name = name
self._lock = None
def __enter__(self):
try:
self._lock = _get_module_lock(self._name)
finally:
_imp.release_lock()
self._lock.acquire()
def __exit__(self, *args, **kwargs):
self._lock.release()
# The following two functions are for consumption by Python/import.c.
def _get_module_lock(name):
"""Get or create the module lock for a given module name.
Should only be called with the import lock taken."""
lock = None
try:
lock = _module_locks[name]()
except KeyError:
pass
if lock is None:
if _thread is None:
lock = _DummyModuleLock(name)
else:
lock = _ModuleLock(name)
def cb(_):
del _module_locks[name]
_module_locks[name] = _weakref.ref(lock, cb)
return lock
def _lock_unlock_module(name):
"""Release the global import lock, and acquires then release the
module lock for a given module name.
This is used to ensure a module is completely initialized, in the
event it is being imported by another thread.
Should only be called with the import lock taken."""
lock = _get_module_lock(name)
_imp.release_lock()
try:
lock.acquire()
except _DeadlockError:
# Concurrent circular import, we'll accept a partially initialized
# module object.
pass
else:
lock.release()
# Frame stripping magic ###############################################
def _call_with_frames_removed(f, *args, **kwds):
"""remove_importlib_frames in import.c will always remove sequences
of importlib frames that end with a call to this function
Use it instead of a normal call in places where including the importlib
frames introduces unwanted noise into the traceback (e.g. when executing
module code)
"""
return f(*args, **kwds)
# Finder/loader utility code ###############################################
# Magic word to reject .pyc files generated by other Python versions.
# It should change for each incompatible change to the bytecode.
#
# The value of CR and LF is incorporated so if you ever read or write
# a .pyc file in text mode the magic number will be wrong; also, the
# Apple MPW compiler swaps their values, botching string constants.
#
# The magic numbers must be spaced apart at least 2 values, as the
# -U interpeter flag will cause MAGIC+1 being used. They have been
# odd numbers for some time now.
#
# There were a variety of old schemes for setting the magic number.
# The current working scheme is to increment the previous value by
# 10.
#
# Starting with the adoption of PEP 3147 in Python 3.2, every bump in magic
# number also includes a new "magic tag", i.e. a human readable string used
# to represent the magic number in __pycache__ directories. When you change
# the magic number, you must also set a new unique magic tag. Generally this
# can be named after the Python major version of the magic number bump, but
# it can really be anything, as long as it's different than anything else
# that's come before. The tags are included in the following table, starting
# with Python 3.2a0.
#
# Known values:
# Python 1.5: 20121
# Python 1.5.1: 20121
# Python 1.5.2: 20121
# Python 1.6: 50428
# Python 2.0: 50823
# Python 2.0.1: 50823
# Python 2.1: 60202
# Python 2.1.1: 60202
# Python 2.1.2: 60202
# Python 2.2: 60717
# Python 2.3a0: 62011
# Python 2.3a0: 62021
# Python 2.3a0: 62011 (!)
# Python 2.4a0: 62041
# Python 2.4a3: 62051
# Python 2.4b1: 62061
# Python 2.5a0: 62071
# Python 2.5a0: 62081 (ast-branch)
# Python 2.5a0: 62091 (with)
# Python 2.5a0: 62092 (changed WITH_CLEANUP opcode)
# Python 2.5b3: 62101 (fix wrong code: for x, in ...)
# Python 2.5b3: 62111 (fix wrong code: x += yield)
# Python 2.5c1: 62121 (fix wrong lnotab with for loops and
# storing constants that should have been removed)
# Python 2.5c2: 62131 (fix wrong code: for x, in ... in listcomp/genexp)
# Python 2.6a0: 62151 (peephole optimizations and STORE_MAP opcode)
# Python 2.6a1: 62161 (WITH_CLEANUP optimization)
# Python 2.7a0: 62171 (optimize list comprehensions/change LIST_APPEND)
# Python 2.7a0: 62181 (optimize conditional branches:
# introduce POP_JUMP_IF_FALSE and POP_JUMP_IF_TRUE)
# Python 2.7a0 62191 (introduce SETUP_WITH)
# Python 2.7a0 62201 (introduce BUILD_SET)
# Python 2.7a0 62211 (introduce MAP_ADD and SET_ADD)
# Python 3000: 3000
# 3010 (removed UNARY_CONVERT)
# 3020 (added BUILD_SET)
# 3030 (added keyword-only parameters)
# 3040 (added signature annotations)
# 3050 (print becomes a function)
# 3060 (PEP 3115 metaclass syntax)
# 3061 (string literals become unicode)
# 3071 (PEP 3109 raise changes)
# 3081 (PEP 3137 make __file__ and __name__ unicode)
# 3091 (kill str8 interning)
# 3101 (merge from 2.6a0, see 62151)
# 3103 (__file__ points to source file)
# Python 3.0a4: 3111 (WITH_CLEANUP optimization).
# Python 3.0a5: 3131 (lexical exception stacking, including POP_EXCEPT)
# Python 3.1a0: 3141 (optimize list, set and dict comprehensions:
# change LIST_APPEND and SET_ADD, add MAP_ADD)
# Python 3.1a0: 3151 (optimize conditional branches:
# introduce POP_JUMP_IF_FALSE and POP_JUMP_IF_TRUE)
# Python 3.2a0: 3160 (add SETUP_WITH)
# tag: cpython-32
# Python 3.2a1: 3170 (add DUP_TOP_TWO, remove DUP_TOPX and ROT_FOUR)
# tag: cpython-32
# Python 3.2a2 3180 (add DELETE_DEREF)
# Python 3.3a0 3190 __class__ super closure changed
# Python 3.3a0 3200 (__qualname__ added)
# 3210 (added size modulo 2**32 to the pyc header)
# Python 3.3a1 3220 (changed PEP 380 implementation)
# Python 3.3a4 3230 (revert changes to implicit __class__ closure)
# Python 3.4a1 3250 (evaluate positional default arguments before
# keyword-only defaults)
# Python 3.4a1 3260 (add LOAD_CLASSDEREF; allow locals of class to override
# free vars)
# Python 3.4a1 3270 (various tweaks to the __class__ closure)
# Python 3.4a1 3280 (remove implicit class argument)
# Python 3.4a4 3290 (changes to __qualname__ computation)
# Python 3.4a4 3300 (more changes to __qualname__ computation)
# Python 3.4rc2 3310 (alter __qualname__ computation)
#
# MAGIC must change whenever the bytecode emitted by the compiler may no
# longer be understood by older implementations of the eval loop (usually
# due to the addition of new opcodes).
MAGIC_NUMBER = (3310).to_bytes(2, 'little') + b'\r\n'
_RAW_MAGIC_NUMBER = int.from_bytes(MAGIC_NUMBER, 'little') # For import.c
_PYCACHE = '__pycache__'
SOURCE_SUFFIXES = ['.py'] # _setup() adds .pyw as needed.
DEBUG_BYTECODE_SUFFIXES = ['.pyc']
OPTIMIZED_BYTECODE_SUFFIXES = ['.pyo']
def cache_from_source(path, debug_override=None):
"""Given the path to a .py file, return the path to its .pyc/.pyo file.
The .py file does not need to exist; this simply returns the path to the
.pyc/.pyo file calculated as if the .py file were imported. The extension
will be .pyc unless sys.flags.optimize is non-zero, then it will be .pyo.
If debug_override is not None, then it must be a boolean and is used in
place of sys.flags.optimize.
If sys.implementation.cache_tag is None then NotImplementedError is raised.
"""
debug = not sys.flags.optimize if debug_override is None else debug_override
if debug:
suffixes = DEBUG_BYTECODE_SUFFIXES
else:
suffixes = OPTIMIZED_BYTECODE_SUFFIXES
head, tail = _path_split(path)
base, sep, rest = tail.rpartition('.')
tag = sys.implementation.cache_tag
if tag is None:
raise NotImplementedError('sys.implementation.cache_tag is None')
filename = ''.join([(base if base else rest), sep, tag, suffixes[0]])
return _path_join(head, _PYCACHE, filename)
def source_from_cache(path):
"""Given the path to a .pyc./.pyo file, return the path to its .py file.
The .pyc/.pyo file does not need to exist; this simply returns the path to
the .py file calculated to correspond to the .pyc/.pyo file. If path does
not conform to PEP 3147 format, ValueError will be raised. If
sys.implementation.cache_tag is None then NotImplementedError is raised.
"""
if sys.implementation.cache_tag is None:
raise NotImplementedError('sys.implementation.cache_tag is None')
head, pycache_filename = _path_split(path)
head, pycache = _path_split(head)
if pycache != _PYCACHE:
raise ValueError('{} not bottom-level directory in '
'{!r}'.format(_PYCACHE, path))
if pycache_filename.count('.') != 2:
raise ValueError('expected only 2 dots in '
'{!r}'.format(pycache_filename))
base_filename = pycache_filename.partition('.')[0]
return _path_join(head, base_filename + SOURCE_SUFFIXES[0])
def _get_sourcefile(bytecode_path):
"""Convert a bytecode file path to a source path (if possible).
This function exists purely for backwards-compatibility for
PyImport_ExecCodeModuleWithFilenames() in the C API.
"""
if len(bytecode_path) == 0:
return None
rest, _, extension = bytecode_path.rpartition('.')
if not rest or extension.lower()[-3:-1] != 'py':
return bytecode_path
try:
source_path = source_from_cache(bytecode_path)
except (NotImplementedError, ValueError):
source_path = bytecode_path[:-1]
return source_path if _path_isfile(source_path) else bytecode_path
def _calc_mode(path):
"""Calculate the mode permissions for a bytecode file."""
try:
mode = _path_stat(path).st_mode
except OSError:
mode = 0o666
# We always ensure write access so we can update cached files
# later even when the source files are read-only on Windows (#6074)
mode |= 0o200
return mode
def _verbose_message(message, *args, verbosity=1):
"""Print the message to stderr if -v/PYTHONVERBOSE is turned on."""
if sys.flags.verbose >= verbosity:
if not message.startswith(('#', 'import ')):
message = '# ' + message
print(message.format(*args), file=sys.stderr)
def _check_name(method):
"""Decorator to verify that the module being requested matches the one the
loader can handle.
The first argument (self) must define _name which the second argument is
compared against. If the comparison fails then ImportError is raised.
"""
def _check_name_wrapper(self, name=None, *args, **kwargs):
if name is None:
name = self.name
elif self.name != name:
raise ImportError('loader cannot handle %s' % name, name=name)
return method(self, name, *args, **kwargs)
_wrap(_check_name_wrapper, method)
return _check_name_wrapper
def _requires_builtin(fxn):
"""Decorator to verify the named module is built-in."""
def _requires_builtin_wrapper(self, fullname):
if fullname not in sys.builtin_module_names:
raise ImportError('{!r} is not a built-in module'.format(fullname),
name=fullname)
return fxn(self, fullname)
_wrap(_requires_builtin_wrapper, fxn)
return _requires_builtin_wrapper
def _requires_frozen(fxn):
"""Decorator to verify the named module is frozen."""
def _requires_frozen_wrapper(self, fullname):
if not _imp.is_frozen(fullname):
raise ImportError('{!r} is not a frozen module'.format(fullname),
name=fullname)
return fxn(self, fullname)
_wrap(_requires_frozen_wrapper, fxn)
return _requires_frozen_wrapper
def _find_module_shim(self, fullname):
"""Try to find a loader for the specified module by delegating to
self.find_loader().
This method is deprecated in favor of finder.find_spec().
"""
# Call find_loader(). If it returns a string (indicating this
# is a namespace package portion), generate a warning and
# return None.
loader, portions = self.find_loader(fullname)
if loader is None and len(portions):
msg = 'Not importing directory {}: missing __init__'
_warnings.warn(msg.format(portions[0]), ImportWarning)
return loader
def _load_module_shim(self, fullname):
"""Load the specified module into sys.modules and return it.
This method is deprecated. Use loader.exec_module instead.
"""
spec = spec_from_loader(fullname, self)
methods = _SpecMethods(spec)
if fullname in sys.modules:
module = sys.modules[fullname]
methods.exec(module)
return sys.modules[fullname]
else:
return methods.load()
def _validate_bytecode_header(data, source_stats=None, name=None, path=None):
"""Validate the header of the passed-in bytecode against source_stats (if
given) and returning the bytecode that can be compiled by compile().
All other arguments are used to enhance error reporting.
ImportError is raised when the magic number is incorrect or the bytecode is
found to be stale. EOFError is raised when the data is found to be
truncated.
"""
exc_details = {}
if name is not None:
exc_details['name'] = name
else:
# To prevent having to make all messages have a conditional name.
name = '<bytecode>'
if path is not None:
exc_details['path'] = path
magic = data[:4]
raw_timestamp = data[4:8]
raw_size = data[8:12]
if magic != MAGIC_NUMBER:
message = 'bad magic number in {!r}: {!r}'.format(name, magic)
_verbose_message(message)
raise ImportError(message, **exc_details)
elif len(raw_timestamp) != 4:
message = 'reached EOF while reading timestamp in {!r}'.format(name)
_verbose_message(message)
raise EOFError(message)
elif len(raw_size) != 4:
message = 'reached EOF while reading size of source in {!r}'.format(name)
_verbose_message(message)
raise EOFError(message)
if source_stats is not None:
try:
source_mtime = int(source_stats['mtime'])
except KeyError:
pass
else:
if _r_long(raw_timestamp) != source_mtime:
message = 'bytecode is stale for {!r}'.format(name)
_verbose_message(message)
raise ImportError(message, **exc_details)
try:
source_size = source_stats['size'] & 0xFFFFFFFF
except KeyError:
pass
else:
if _r_long(raw_size) != source_size:
raise ImportError('bytecode is stale for {!r}'.format(name),
**exc_details)
return data[12:]
def _compile_bytecode(data, name=None, bytecode_path=None, source_path=None):
"""Compile bytecode as returned by _validate_bytecode_header()."""
code = marshal.loads(data)
if isinstance(code, _code_type):
_verbose_message('code object from {!r}', bytecode_path)
if source_path is not None:
_imp._fix_co_filename(code, source_path)
return code
else:
raise ImportError('Non-code object in {!r}'.format(bytecode_path),
name=name, path=bytecode_path)
def _code_to_bytecode(code, mtime=0, source_size=0):
"""Compile a code object into bytecode for writing out to a byte-compiled
file."""
data = bytearray(MAGIC_NUMBER)
data.extend(_w_long(mtime))
data.extend(_w_long(source_size))
data.extend(marshal.dumps(code))
return data
def decode_source(source_bytes):
"""Decode bytes representing source code and return the string.
Universal newline support is used in the decoding.
"""
import tokenize # To avoid bootstrap issues.
source_bytes_readline = _io.BytesIO(source_bytes).readline
encoding = tokenize.detect_encoding(source_bytes_readline)
newline_decoder = _io.IncrementalNewlineDecoder(None, True)
return newline_decoder.decode(source_bytes.decode(encoding[0]))
# Module specifications #######################################################
def _module_repr(module):
# The implementation of ModuleType__repr__().
loader = getattr(module, '__loader__', None)
if hasattr(loader, 'module_repr'):
# As soon as BuiltinImporter, FrozenImporter, and NamespaceLoader
# drop their implementations for module_repr. we can add a
# deprecation warning here.
try:
return loader.module_repr(module)
except Exception:
pass
try:
spec = module.__spec__
except AttributeError:
pass
else:
if spec is not None:
return _SpecMethods(spec).module_repr()
# We could use module.__class__.__name__ instead of 'module' in the
# various repr permutations.
try:
name = module.__name__
except AttributeError:
name = '?'
try:
filename = module.__file__
except AttributeError:
if loader is None:
return '<module {!r}>'.format(name)
else:
return '<module {!r} ({!r})>'.format(name, loader)
else:
return '<module {!r} from {!r}>'.format(name, filename)
class _installed_safely:
def __init__(self, module):
self._module = module
self._spec = module.__spec__
def __enter__(self):
# This must be done before putting the module in sys.modules
# (otherwise an optimization shortcut in import.c becomes
# wrong)
self._spec._initializing = True
sys.modules[self._spec.name] = self._module
def __exit__(self, *args):
try:
spec = self._spec
if any(arg is not None for arg in args):
try:
del sys.modules[spec.name]
except KeyError:
pass
else:
_verbose_message('import {!r} # {!r}', spec.name, spec.loader)
finally:
self._spec._initializing = False
class ModuleSpec:
"""The specification for a module, used for loading.
A module's spec is the source for information about the module. For
data associated with the module, including source, use the spec's
loader.
`name` is the absolute name of the module. `loader` is the loader
to use when loading the module. `parent` is the name of the
package the module is in. The parent is derived from the name.
`is_package` determines if the module is considered a package or
not. On modules this is reflected by the `__path__` attribute.
`origin` is the specific location used by the loader from which to
load the module, if that information is available. When filename is
set, origin will match.
`has_location` indicates that a spec's "origin" reflects a location.
When this is True, `__file__` attribute of the module is set.
`cached` is the location of the cached bytecode file, if any. It
corresponds to the `__cached__` attribute.
`submodule_search_locations` is the sequence of path entries to
search when importing submodules. If set, is_package should be
True--and False otherwise.
Packages are simply modules that (may) have submodules. If a spec
has a non-None value in `submodule_search_locations`, the import
system will consider modules loaded from the spec as packages.
Only finders (see importlib.abc.MetaPathFinder and
importlib.abc.PathEntryFinder) should modify ModuleSpec instances.
"""
def __init__(self, name, loader, *, origin=None, loader_state=None,
is_package=None):
self.name = name
self.loader = loader
self.origin = origin
self.loader_state = loader_state
self.submodule_search_locations = [] if is_package else None
# file-location attributes
self._set_fileattr = False
self._cached = None
def __repr__(self):
args = ['name={!r}'.format(self.name),
'loader={!r}'.format(self.loader)]
if self.origin is not None:
args.append('origin={!r}'.format(self.origin))
if self.submodule_search_locations is not None:
args.append('submodule_search_locations={}'
.format(self.submodule_search_locations))
return '{}({})'.format(self.__class__.__name__, ', '.join(args))
def __eq__(self, other):
smsl = self.submodule_search_locations
try:
return (self.name == other.name and
self.loader == other.loader and
self.origin == other.origin and
smsl == other.submodule_search_locations and
self.cached == other.cached and
self.has_location == other.has_location)
except AttributeError:
return False
@property
def cached(self):
if self._cached is None:
if self.origin is not None and self._set_fileattr:
filename = self.origin
if filename.endswith(tuple(SOURCE_SUFFIXES)):
try:
self._cached = cache_from_source(filename)
except NotImplementedError:
pass
elif filename.endswith(tuple(BYTECODE_SUFFIXES)):
self._cached = filename
return self._cached
@cached.setter
def cached(self, cached):
self._cached = cached
@property
def parent(self):
"""The name of the module's parent."""
if self.submodule_search_locations is None:
return self.name.rpartition('.')[0]
else:
return self.name
@property
def has_location(self):
return self._set_fileattr
@has_location.setter
def has_location(self, value):
self._set_fileattr = bool(value)
def spec_from_loader(name, loader, *, origin=None, is_package=None):
"""Return a module spec based on various loader methods."""
if hasattr(loader, 'get_filename'):
if is_package is None:
return spec_from_file_location(name, loader=loader)
search = [] if is_package else None
return spec_from_file_location(name, loader=loader,
submodule_search_locations=search)
if is_package is None:
if hasattr(loader, 'is_package'):
try:
is_package = loader.is_package(name)
except ImportError:
is_package = None # aka, undefined
else:
# the default
is_package = False
return ModuleSpec(name, loader, origin=origin, is_package=is_package)
_POPULATE = object()
def spec_from_file_location(name, location=None, *, loader=None,
submodule_search_locations=_POPULATE):
"""Return a module spec based on a file location.
To indicate that the module is a package, set
submodule_search_locations to a list of directory paths. An
empty list is sufficient, though its not otherwise useful to the
import system.
The loader must take a spec as its only __init__() arg.
"""
if location is None:
# The caller may simply want a partially populated location-
# oriented spec. So we set the location to a bogus value and
# fill in as much as we can.
location = '<unknown>'
if hasattr(loader, 'get_filename'):
# ExecutionLoader
try:
location = loader.get_filename(name)
except ImportError:
pass
# If the location is on the filesystem, but doesn't actually exist,
# we could return None here, indicating that the location is not
# valid. However, we don't have a good way of testing since an
# indirect location (e.g. a zip file or URL) will look like a
# non-existent file relative to the filesystem.
spec = ModuleSpec(name, loader, origin=location)
spec._set_fileattr = True
# Pick a loader if one wasn't provided.
if loader is None:
for loader_class, suffixes in _get_supported_file_loaders():
if location.endswith(tuple(suffixes)):
loader = loader_class(name, location)
spec.loader = loader
break
else:
return None
# Set submodule_search_paths appropriately.
if submodule_search_locations is _POPULATE:
# Check the loader.
if hasattr(loader, 'is_package'):
try:
is_package = loader.is_package(name)
except ImportError:
pass
else:
if is_package:
spec.submodule_search_locations = []
else:
spec.submodule_search_locations = submodule_search_locations
if spec.submodule_search_locations == []:
if location:
dirname = _path_split(location)[0]
spec.submodule_search_locations.append(dirname)
return spec
def _spec_from_module(module, loader=None, origin=None):
# This function is meant for use in _setup().
try:
spec = module.__spec__
except AttributeError:
pass
else:
if spec is not None:
return spec
name = module.__name__
if loader is None:
try:
loader = module.__loader__
except AttributeError:
# loader will stay None.
pass
try:
location = module.__file__
except AttributeError:
location = None
if origin is None:
if location is None:
try:
origin = loader._ORIGIN
except AttributeError:
origin = None
else:
origin = location
try:
cached = module.__cached__
except AttributeError:
cached = None
try:
submodule_search_locations = list(module.__path__)
except AttributeError:
submodule_search_locations = None
spec = ModuleSpec(name, loader, origin=origin)
spec._set_fileattr = False if location is None else True
spec.cached = cached
spec.submodule_search_locations = submodule_search_locations
return spec
class _SpecMethods:
"""Convenience wrapper around spec objects to provide spec-specific
methods."""
# The various spec_from_* functions could be made factory methods here.
def __init__(self, spec):
self.spec = spec
def module_repr(self):
"""Return the repr to use for the module."""
# We mostly replicate _module_repr() using the spec attributes.
spec = self.spec
name = '?' if spec.name is None else spec.name
if spec.origin is None:
if spec.loader is None:
return '<module {!r}>'.format(name)
else:
return '<module {!r} ({!r})>'.format(name, spec.loader)
else:
if spec.has_location:
return '<module {!r} from {!r}>'.format(name, spec.origin)
else:
return '<module {!r} ({})>'.format(spec.name, spec.origin)
def init_module_attrs(self, module, *, _override=False, _force_name=True):
"""Set the module's attributes.
All missing import-related module attributes will be set. Here
is how the spec attributes map onto the module:
spec.name -> module.__name__
spec.loader -> module.__loader__
spec.parent -> module.__package__
spec -> module.__spec__
Optional:
spec.origin -> module.__file__ (if spec.set_fileattr is true)
spec.cached -> module.__cached__ (if __file__ also set)
spec.submodule_search_locations -> module.__path__ (if set)
"""
spec = self.spec
# The passed in module may be not support attribute assignment,
# in which case we simply don't set the attributes.
# __name__
if (_override or _force_name or
getattr(module, '__name__', None) is None):
try:
module.__name__ = spec.name
except AttributeError:
pass
# __loader__
if _override or getattr(module, '__loader__', None) is None:
loader = spec.loader
if loader is None:
# A backward compatibility hack.
if spec.submodule_search_locations is not None:
loader = _NamespaceLoader.__new__(_NamespaceLoader)
loader._path = spec.submodule_search_locations
try:
module.__loader__ = loader
except AttributeError:
pass
# __package__
if _override or getattr(module, '__package__', None) is None:
try:
module.__package__ = spec.parent
except AttributeError:
pass
# __spec__
try:
module.__spec__ = spec
except AttributeError:
pass
# __path__
if _override or getattr(module, '__path__', None) is None:
if spec.submodule_search_locations is not None:
try:
module.__path__ = spec.submodule_search_locations
except AttributeError:
pass
if spec.has_location:
# __file__
if _override or getattr(module, '__file__', None) is None:
try:
module.__file__ = spec.origin
except AttributeError:
pass
# __cached__
if _override or getattr(module, '__cached__', None) is None:
if spec.cached is not None:
try:
module.__cached__ = spec.cached
except AttributeError:
pass
def create(self):
"""Return a new module to be loaded.
The import-related module attributes are also set with the
appropriate values from the spec.
"""
spec = self.spec
# Typically loaders will not implement create_module().
if hasattr(spec.loader, 'create_module'):
# If create_module() returns `None` it means the default
# module creation should be used.
module = spec.loader.create_module(spec)
else:
module = None
if module is None:
# This must be done before open() is ever called as the 'io'
# module implicitly imports 'locale' and would otherwise
# trigger an infinite loop.
module = _new_module(spec.name)
self.init_module_attrs(module)
return module
def _exec(self, module):
"""Do everything necessary to execute the module.
The namespace of `module` is used as the target of execution.
This method uses the loader's `exec_module()` method.
"""
self.spec.loader.exec_module(module)
# Used by importlib.reload() and _load_module_shim().
def exec(self, module):
"""Execute the spec in an existing module's namespace."""
name = self.spec.name
_imp.acquire_lock()
with _ModuleLockManager(name):
if sys.modules.get(name) is not module:
msg = 'module {!r} not in sys.modules'.format(name)
raise ImportError(msg, name=name)
if self.spec.loader is None:
if self.spec.submodule_search_locations is None:
raise ImportError('missing loader', name=self.spec.name)
# namespace package
self.init_module_attrs(module, _override=True)
return module
self.init_module_attrs(module, _override=True)
if not hasattr(self.spec.loader, 'exec_module'):
# (issue19713) Once BuiltinImporter and ExtensionFileLoader
# have exec_module() implemented, we can add a deprecation
# warning here.
self.spec.loader.load_module(name)
else:
self._exec(module)
return sys.modules[name]
def _load_backward_compatible(self):
# (issue19713) Once BuiltinImporter and ExtensionFileLoader
# have exec_module() implemented, we can add a deprecation
# warning here.
spec = self.spec
spec.loader.load_module(spec.name)
# The module must be in sys.modules at this point!
module = sys.modules[spec.name]
if getattr(module, '__loader__', None) is None:
try:
module.__loader__ = spec.loader
except AttributeError:
pass
if getattr(module, '__package__', None) is None:
try:
# Since module.__path__ may not line up with
# spec.submodule_search_paths, we can't necessarily rely
# on spec.parent here.
module.__package__ = module.__name__
if not hasattr(module, '__path__'):
module.__package__ = spec.name.rpartition('.')[0]
except AttributeError:
pass
if getattr(module, '__spec__', None) is None:
try:
module.__spec__ = spec
except AttributeError:
pass
return module
def _load_unlocked(self):
# A helper for direct use by the import system.
if self.spec.loader is not None:
# not a namespace package
if not hasattr(self.spec.loader, 'exec_module'):
return self._load_backward_compatible()
module = self.create()
with _installed_safely(module):
if self.spec.loader is None:
if self.spec.submodule_search_locations is None:
raise ImportError('missing loader', name=self.spec.name)
# A namespace package so do nothing.
else:
self._exec(module)
# We don't ensure that the import-related module attributes get
# set in the sys.modules replacement case. Such modules are on
# their own.
return sys.modules[self.spec.name]
# A method used during testing of _load_unlocked() and by
# _load_module_shim().
def load(self):
"""Return a new module object, loaded by the spec's loader.
The module is not added to its parent.
If a module is already in sys.modules, that existing module gets
clobbered.
"""
_imp.acquire_lock()
with _ModuleLockManager(self.spec.name):
return self._load_unlocked()
def _fix_up_module(ns, name, pathname, cpathname=None):
# This function is used by PyImport_ExecCodeModuleObject().
loader = ns.get('__loader__')
spec = ns.get('__spec__')
if not loader:
if spec:
loader = spec.loader
elif pathname == cpathname:
loader = SourcelessFileLoader(name, pathname)
else:
loader = SourceFileLoader(name, pathname)
if not spec:
spec = spec_from_file_location(name, pathname, loader=loader)
try:
ns['__spec__'] = spec
ns['__loader__'] = loader
ns['__file__'] = pathname
ns['__cached__'] = cpathname
except Exception:
# Not important enough to report.
pass
# Loaders #####################################################################
class BuiltinImporter:
"""Meta path import for built-in modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
@staticmethod
def module_repr(module):
"""Return repr for the module.
The method is deprecated. The import machinery does the job itself.
"""
return '<module {!r} (built-in)>'.format(module.__name__)
@classmethod
def find_spec(cls, fullname, path=None, target=None):
if path is not None:
return None
if _imp.is_builtin(fullname):
return spec_from_loader(fullname, cls, origin='built-in')
else:
return None
@classmethod
def find_module(cls, fullname, path=None):
"""Find the built-in module.
If 'path' is ever specified then the search is considered a failure.
This method is deprecated. Use find_spec() instead.
"""
spec = cls.find_spec(fullname, path)
return spec.loader if spec is not None else None
@classmethod
@_requires_builtin
def load_module(cls, fullname):
"""Load a built-in module."""
# Once an exec_module() implementation is added we can also
# add a deprecation warning here.
with _ManageReload(fullname):
module = _call_with_frames_removed(_imp.init_builtin, fullname)
module.__loader__ = cls
module.__package__ = ''
return module
@classmethod
@_requires_builtin
def get_code(cls, fullname):
"""Return None as built-in modules do not have code objects."""
return None
@classmethod
@_requires_builtin
def get_source(cls, fullname):
"""Return None as built-in modules do not have source code."""
return None
@classmethod
@_requires_builtin
def is_package(cls, fullname):
"""Return False as built-in modules are never packages."""
return False
class FrozenImporter:
"""Meta path import for frozen modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
@staticmethod
def module_repr(m):
"""Return repr for the module.
The method is deprecated. The import machinery does the job itself.
"""
return '<module {!r} (frozen)>'.format(m.__name__)
@classmethod
def find_spec(cls, fullname, path=None, target=None):
if _imp.is_frozen(fullname):
return spec_from_loader(fullname, cls, origin='frozen')
else:
return None
@classmethod
def find_module(cls, fullname, path=None):
"""Find a frozen module.
This method is deprecated. Use find_spec() instead.
"""
return cls if _imp.is_frozen(fullname) else None
@staticmethod
def exec_module(module):
name = module.__spec__.name
if not _imp.is_frozen(name):
raise ImportError('{!r} is not a frozen module'.format(name),
name=name)
code = _call_with_frames_removed(_imp.get_frozen_object, name)
exec(code, module.__dict__)
@classmethod
def load_module(cls, fullname):
"""Load a frozen module.
This method is deprecated. Use exec_module() instead.
"""
return _load_module_shim(cls, fullname)
@classmethod
@_requires_frozen
def get_code(cls, fullname):
"""Return the code object for the frozen module."""
return _imp.get_frozen_object(fullname)
@classmethod
@_requires_frozen
def get_source(cls, fullname):
"""Return None as frozen modules do not have source code."""
return None
@classmethod
@_requires_frozen
def is_package(cls, fullname):
"""Return True if the frozen module is a package."""
return _imp.is_frozen_package(fullname)
class WindowsRegistryFinder:
"""Meta path finder for modules declared in the Windows registry."""
REGISTRY_KEY = (
'Software\\Python\\PythonCore\\{sys_version}'
'\\Modules\\{fullname}')
REGISTRY_KEY_DEBUG = (
'Software\\Python\\PythonCore\\{sys_version}'
'\\Modules\\{fullname}\\Debug')
DEBUG_BUILD = False # Changed in _setup()
@classmethod
def _open_registry(cls, key):
try:
return _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, key)
except OSError:
return _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, key)
@classmethod
def _search_registry(cls, fullname):
if cls.DEBUG_BUILD:
registry_key = cls.REGISTRY_KEY_DEBUG
else:
registry_key = cls.REGISTRY_KEY
key = registry_key.format(fullname=fullname,
sys_version=sys.version[:3])
try:
with cls._open_registry(key) as hkey:
filepath = _winreg.QueryValue(hkey, '')
except OSError:
return None
return filepath
@classmethod
def find_spec(cls, fullname, path=None, target=None):
filepath = cls._search_registry(fullname)
if filepath is None:
return None
try:
_path_stat(filepath)
except OSError:
return None
for loader, suffixes in _get_supported_file_loaders():
if filepath.endswith(tuple(suffixes)):
spec = spec_from_loader(fullname, loader(fullname, filepath),
origin=filepath)
return spec
@classmethod
def find_module(cls, fullname, path=None):
"""Find module named in the registry.
This method is deprecated. Use exec_module() instead.
"""
spec = cls.find_spec(fullname, path)
if spec is not None:
return spec.loader
else:
return None
class _LoaderBasics:
"""Base class of common code needed by both SourceLoader and
SourcelessFileLoader."""
def is_package(self, fullname):
"""Concrete implementation of InspectLoader.is_package by checking if
the path returned by get_filename has a filename of '__init__.py'."""
filename = _path_split(self.get_filename(fullname))[1]
filename_base = filename.rsplit('.', 1)[0]
tail_name = fullname.rpartition('.')[2]
return filename_base == '__init__' and tail_name != '__init__'
def exec_module(self, module):
"""Execute the module."""
code = self.get_code(module.__name__)
if code is None:
raise ImportError('cannot load module {!r} when get_code() '
'returns None'.format(module.__name__))
_call_with_frames_removed(exec, code, module.__dict__)
load_module = _load_module_shim
class SourceLoader(_LoaderBasics):
def path_mtime(self, path):
"""Optional method that returns the modification time (an int) for the
specified path, where path is a str.
Raises IOError when the path cannot be handled.
"""
raise IOError
def path_stats(self, path):
"""Optional method returning a metadata dict for the specified path
to by the path (str).
Possible keys:
- 'mtime' (mandatory) is the numeric timestamp of last source
code modification;
- 'size' (optional) is the size in bytes of the source code.
Implementing this method allows the loader to read bytecode files.
Raises IOError when the path cannot be handled.
"""
return {'mtime': self.path_mtime(path)}
def _cache_bytecode(self, source_path, cache_path, data):
"""Optional method which writes data (bytes) to a file path (a str).
Implementing this method allows for the writing of bytecode files.
The source path is needed in order to correctly transfer permissions
"""
# For backwards compatibility, we delegate to set_data()
return self.set_data(cache_path, data)
def set_data(self, path, data):
"""Optional method which writes data (bytes) to a file path (a str).
Implementing this method allows for the writing of bytecode files.
"""
def get_source(self, fullname):
"""Concrete implementation of InspectLoader.get_source."""
path = self.get_filename(fullname)
try:
source_bytes = self.get_data(path)
except OSError as exc:
raise ImportError('source not available through get_data()',
name=fullname) from exc
return decode_source(source_bytes)
def source_to_code(self, data, path, *, _optimize=-1):
"""Return the code object compiled from source.
The 'data' argument can be any object type that compile() supports.
"""
return _call_with_frames_removed(compile, data, path, 'exec',
dont_inherit=True, optimize=_optimize)
def get_code(self, fullname):
"""Concrete implementation of InspectLoader.get_code.
Reading of bytecode requires path_stats to be implemented. To write
bytecode, set_data must also be implemented.
"""
source_path = self.get_filename(fullname)
source_mtime = None
try:
bytecode_path = cache_from_source(source_path)
except NotImplementedError:
bytecode_path = None
else:
try:
st = self.path_stats(source_path)
except IOError:
pass
else:
source_mtime = int(st['mtime'])
try:
data = self.get_data(bytecode_path)
except OSError:
pass
else:
try:
bytes_data = _validate_bytecode_header(data,
source_stats=st, name=fullname,
path=bytecode_path)
except (ImportError, EOFError):
pass
else:
_verbose_message('{} matches {}', bytecode_path,
source_path)
return _compile_bytecode(bytes_data, name=fullname,
bytecode_path=bytecode_path,
source_path=source_path)
source_bytes = self.get_data(source_path)
code_object = self.source_to_code(source_bytes, source_path)
_verbose_message('code object from {}', source_path)
if (not sys.dont_write_bytecode and bytecode_path is not None and
source_mtime is not None):
data = _code_to_bytecode(code_object, source_mtime,
len(source_bytes))
try:
self._cache_bytecode(source_path, bytecode_path, data)
_verbose_message('wrote {!r}', bytecode_path)
except NotImplementedError:
pass
return code_object
class FileLoader:
"""Base file loader class which implements the loader protocol methods that
require file system usage."""
def __init__(self, fullname, path):
"""Cache the module name and the path to the file found by the
finder."""
self.name = fullname
self.path = path
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self.__dict__ == other.__dict__)
def __hash__(self):
return hash(self.name) ^ hash(self.path)
@_check_name
def load_module(self, fullname):
"""Load a module from a file.
This method is deprecated. Use exec_module() instead.
"""
# The only reason for this method is for the name check.
# Issue #14857: Avoid the zero-argument form of super so the implementation
# of that form can be updated without breaking the frozen module
return super(FileLoader, self).load_module(fullname)
@_check_name
def get_filename(self, fullname):
"""Return the path to the source file as found by the finder."""
return self.path
def get_data(self, path):
"""Return the data from path as raw bytes."""
with _io.FileIO(path, 'r') as file:
return file.read()
class SourceFileLoader(FileLoader, SourceLoader):
"""Concrete implementation of SourceLoader using the file system."""
def path_stats(self, path):
"""Return the metadata for the path."""
st = _path_stat(path)
return {'mtime': st.st_mtime, 'size': st.st_size}
def _cache_bytecode(self, source_path, bytecode_path, data):
# Adapt between the two APIs
mode = _calc_mode(source_path)
return self.set_data(bytecode_path, data, _mode=mode)
def set_data(self, path, data, *, _mode=0o666):
"""Write bytes data to a file."""
parent, filename = _path_split(path)
path_parts = []
# Figure out what directories are missing.
while parent and not _path_isdir(parent):
parent, part = _path_split(parent)
path_parts.append(part)
# Create needed directories.
for part in reversed(path_parts):
parent = _path_join(parent, part)
try:
_os.mkdir(parent)
except FileExistsError:
# Probably another Python process already created the dir.
continue
except OSError as exc:
# Could be a permission error, read-only filesystem: just forget
# about writing the data.
_verbose_message('could not create {!r}: {!r}', parent, exc)
return
try:
_write_atomic(path, data, _mode)
_verbose_message('created {!r}', path)
except OSError as exc:
# Same as above: just don't write the bytecode.
_verbose_message('could not create {!r}: {!r}', path, exc)
class SourcelessFileLoader(FileLoader, _LoaderBasics):
"""Loader which handles sourceless file imports."""
def get_code(self, fullname):
path = self.get_filename(fullname)
data = self.get_data(path)
bytes_data = _validate_bytecode_header(data, name=fullname, path=path)
return _compile_bytecode(bytes_data, name=fullname, bytecode_path=path)
def get_source(self, fullname):
"""Return None as there is no source code."""
return None
# Filled in by _setup().
EXTENSION_SUFFIXES = []
class ExtensionFileLoader:
"""Loader for extension modules.
The constructor is designed to work with FileFinder.
"""
def __init__(self, name, path):
self.name = name
self.path = path
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self.__dict__ == other.__dict__)
def __hash__(self):
return hash(self.name) ^ hash(self.path)
@_check_name
def load_module(self, fullname):
"""Load an extension module."""
# Once an exec_module() implementation is added we can also
# add a deprecation warning here.
with _ManageReload(fullname):
module = _call_with_frames_removed(_imp.load_dynamic,
fullname, self.path)
_verbose_message('extension module loaded from {!r}', self.path)
is_package = self.is_package(fullname)
if is_package and not hasattr(module, '__path__'):
module.__path__ = [_path_split(self.path)[0]]
module.__loader__ = self
module.__package__ = module.__name__
if not is_package:
module.__package__ = module.__package__.rpartition('.')[0]
return module
def is_package(self, fullname):
"""Return True if the extension module is a package."""
file_name = _path_split(self.path)[1]
return any(file_name == '__init__' + suffix
for suffix in EXTENSION_SUFFIXES)
def get_code(self, fullname):
"""Return None as an extension module cannot create a code object."""
return None
def get_source(self, fullname):
"""Return None as extension modules have no source code."""
return None
@_check_name
def get_filename(self, fullname):
"""Return the path to the source file as found by the finder."""
return self.path
class _NamespacePath:
"""Represents a namespace package's path. It uses the module name
to find its parent module, and from there it looks up the parent's
__path__. When this changes, the module's own path is recomputed,
using path_finder. For top-level modules, the parent module's path
is sys.path."""
def __init__(self, name, path, path_finder):
self._name = name
self._path = path
self._last_parent_path = tuple(self._get_parent_path())
self._path_finder = path_finder
def _find_parent_path_names(self):
"""Returns a tuple of (parent-module-name, parent-path-attr-name)"""
parent, dot, me = self._name.rpartition('.')
if dot == '':
# This is a top-level module. sys.path contains the parent path.
return 'sys', 'path'
# Not a top-level module. parent-module.__path__ contains the
# parent path.
return parent, '__path__'
def _get_parent_path(self):
parent_module_name, path_attr_name = self._find_parent_path_names()
return getattr(sys.modules[parent_module_name], path_attr_name)
def _recalculate(self):
# If the parent's path has changed, recalculate _path
parent_path = tuple(self._get_parent_path()) # Make a copy
if parent_path != self._last_parent_path:
spec = self._path_finder(self._name, parent_path)
# Note that no changes are made if a loader is returned, but we
# do remember the new parent path
if spec is not None and spec.loader is None:
if spec.submodule_search_locations:
self._path = spec.submodule_search_locations
self._last_parent_path = parent_path # Save the copy
return self._path
def __iter__(self):
return iter(self._recalculate())
def __len__(self):
return len(self._recalculate())
def __repr__(self):
return '_NamespacePath({!r})'.format(self._path)
def __contains__(self, item):
return item in self._recalculate()
def append(self, item):
self._path.append(item)
# We use this exclusively in init_module_attrs() for backward-compatibility.
class _NamespaceLoader:
def __init__(self, name, path, path_finder):
self._path = _NamespacePath(name, path, path_finder)
@classmethod
def module_repr(cls, module):
"""Return repr for the module.
The method is deprecated. The import machinery does the job itself.
"""
return '<module {!r} (namespace)>'.format(module.__name__)
def is_package(self, fullname):
return True
def get_source(self, fullname):
return ''
def get_code(self, fullname):
return compile('', '<string>', 'exec', dont_inherit=True)
def exec_module(self, module):
pass
def load_module(self, fullname):
"""Load a namespace module.
This method is deprecated. Use exec_module() instead.
"""
# The import system never calls this method.
_verbose_message('namespace module loaded with path {!r}', self._path)
return _load_module_shim(self, fullname)
# Finders #####################################################################
class PathFinder:
"""Meta path finder for sys.path and package __path__ attributes."""
@classmethod
def invalidate_caches(cls):
"""Call the invalidate_caches() method on all path entry finders
stored in sys.path_importer_caches (where implemented)."""
for finder in sys.path_importer_cache.values():
if hasattr(finder, 'invalidate_caches'):
finder.invalidate_caches()
@classmethod
def _path_hooks(cls, path):
"""Search sequence of hooks for a finder for 'path'.
If 'hooks' is false then use sys.path_hooks.
"""
if not sys.path_hooks:
_warnings.warn('sys.path_hooks is empty', ImportWarning)
for hook in sys.path_hooks:
try:
return hook(path)
except ImportError:
continue
else:
return None
@classmethod
def _path_importer_cache(cls, path):
"""Get the finder for the path entry from sys.path_importer_cache.
If the path entry is not in the cache, find the appropriate finder
and cache it. If no finder is available, store None.
"""
if path == '':
path = _os.getcwd()
try:
finder = sys.path_importer_cache[path]
except KeyError:
finder = cls._path_hooks(path)
sys.path_importer_cache[path] = finder
return finder
@classmethod
def _legacy_get_spec(cls, fullname, finder):
# This would be a good place for a DeprecationWarning if
# we ended up going that route.
if hasattr(finder, 'find_loader'):
loader, portions = finder.find_loader(fullname)
else:
loader = finder.find_module(fullname)
portions = []
if loader is not None:
return spec_from_loader(fullname, loader)
spec = ModuleSpec(fullname, None)
spec.submodule_search_locations = portions
return spec
@classmethod
def _get_spec(cls, fullname, path, target=None):
"""Find the loader or namespace_path for this module/package name."""
# If this ends up being a namespace package, namespace_path is
# the list of paths that will become its __path__
namespace_path = []
for entry in path:
if not isinstance(entry, (str, bytes)):
continue
finder = cls._path_importer_cache(entry)
if finder is not None:
if hasattr(finder, 'find_spec'):
spec = finder.find_spec(fullname, target)
else:
spec = cls._legacy_get_spec(fullname, finder)
if spec is None:
continue
if spec.loader is not None:
return spec
portions = spec.submodule_search_locations
if portions is None:
raise ImportError('spec missing loader')
# This is possibly part of a namespace package.
# Remember these path entries (if any) for when we
# create a namespace package, and continue iterating
# on path.
namespace_path.extend(portions)
else:
spec = ModuleSpec(fullname, None)
spec.submodule_search_locations = namespace_path
return spec
@classmethod
def find_spec(cls, fullname, path=None, target=None):
"""find the module on sys.path or 'path' based on sys.path_hooks and
sys.path_importer_cache."""
if path is None:
path = sys.path
spec = cls._get_spec(fullname, path, target)
if spec is None:
return None
elif spec.loader is None:
namespace_path = spec.submodule_search_locations
if namespace_path:
# We found at least one namespace path. Return a
# spec which can create the namespace package.
spec.origin = 'namespace'
spec.submodule_search_locations = _NamespacePath(fullname, namespace_path, cls._get_spec)
return spec
else:
return None
else:
return spec
@classmethod
def find_module(cls, fullname, path=None):
"""find the module on sys.path or 'path' based on sys.path_hooks and
sys.path_importer_cache.
This method is deprecated. Use find_spec() instead.
"""
spec = cls.find_spec(fullname, path)
if spec is None:
return None
return spec.loader
class FileFinder:
"""File-based finder.
Interactions with the file system are cached for performance, being
refreshed when the directory the finder is handling has been modified.
"""
def __init__(self, path, *loader_details):
"""Initialize with the path to search on and a variable number of
2-tuples containing the loader and the file suffixes the loader
recognizes."""
loaders = []
for loader, suffixes in loader_details:
loaders.extend((suffix, loader) for suffix in suffixes)
self._loaders = loaders
# Base (directory) path
self.path = path or '.'
self._path_mtime = -1
self._path_cache = set()
self._relaxed_path_cache = set()
def invalidate_caches(self):
"""Invalidate the directory mtime."""
self._path_mtime = -1
find_module = _find_module_shim
def find_loader(self, fullname):
"""Try to find a loader for the specified module, or the namespace
package portions. Returns (loader, list-of-portions).
This method is deprecated. Use find_spec() instead.
"""
spec = self.find_spec(fullname)
if spec is None:
return None, []
return spec.loader, spec.submodule_search_locations or []
def _get_spec(self, loader_class, fullname, path, smsl, target):
loader = loader_class(fullname, path)
return spec_from_file_location(fullname, path, loader=loader,
submodule_search_locations=smsl)
def find_spec(self, fullname, target=None):
"""Try to find a loader for the specified module, or the namespace
package portions. Returns (loader, list-of-portions)."""
is_namespace = False
tail_module = fullname.rpartition('.')[2]
try:
mtime = _path_stat(self.path or _os.getcwd()).st_mtime
except OSError:
mtime = -1
if mtime != self._path_mtime:
self._fill_cache()
self._path_mtime = mtime
# tail_module keeps the original casing, for __file__ and friends
if _relax_case():
cache = self._relaxed_path_cache
cache_module = tail_module.lower()
else:
cache = self._path_cache
cache_module = tail_module
# Check if the module is the name of a directory (and thus a package).
if cache_module in cache:
base_path = _path_join(self.path, tail_module)
for suffix, loader_class in self._loaders:
init_filename = '__init__' + suffix
full_path = _path_join(base_path, init_filename)
if _path_isfile(full_path):
return self._get_spec(loader_class, fullname, full_path, [base_path], target)
else:
# If a namespace package, return the path if we don't
# find a module in the next section.
is_namespace = _path_isdir(base_path)
# Check for a file w/ a proper suffix exists.
for suffix, loader_class in self._loaders:
full_path = _path_join(self.path, tail_module + suffix)
_verbose_message('trying {}'.format(full_path), verbosity=2)
if cache_module + suffix in cache:
if _path_isfile(full_path):
return self._get_spec(loader_class, fullname, full_path, None, target)
if is_namespace:
_verbose_message('possible namespace for {}'.format(base_path))
spec = ModuleSpec(fullname, None)
spec.submodule_search_locations = [base_path]
return spec
return None
def _fill_cache(self):
"""Fill the cache of potential modules and packages for this directory."""
path = self.path
try:
contents = _os.listdir(path or _os.getcwd())
except (FileNotFoundError, PermissionError, NotADirectoryError):
# Directory has either been removed, turned into a file, or made
# unreadable.
contents = []
# We store two cached versions, to handle runtime changes of the
# PYTHONCASEOK environment variable.
if not sys.platform.startswith('win'):
self._path_cache = set(contents)
else:
# Windows users can import modules with case-insensitive file
# suffixes (for legacy reasons). Make the suffix lowercase here
# so it's done once instead of for every import. This is safe as
# the specified suffixes to check against are always specified in a
# case-sensitive manner.
lower_suffix_contents = set()
for item in contents:
name, dot, suffix = item.partition('.')
if dot:
new_name = '{}.{}'.format(name, suffix.lower())
else:
new_name = name
lower_suffix_contents.add(new_name)
self._path_cache = lower_suffix_contents
if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS):
self._relaxed_path_cache = {fn.lower() for fn in contents}
@classmethod
def path_hook(cls, *loader_details):
"""A class method which returns a closure to use on sys.path_hook
which will return an instance using the specified loaders and the path
called on the closure.
If the path called on the closure is not a directory, ImportError is
raised.
"""
def path_hook_for_FileFinder(path):
"""Path hook for importlib.machinery.FileFinder."""
if not _path_isdir(path):
raise ImportError('only directories are supported', path=path)
return cls(path, *loader_details)
return path_hook_for_FileFinder
def __repr__(self):
return 'FileFinder({!r})'.format(self.path)
# Import itself ###############################################################
class _ImportLockContext:
"""Context manager for the import lock."""
def __enter__(self):
"""Acquire the import lock."""
_imp.acquire_lock()
def __exit__(self, exc_type, exc_value, exc_traceback):
"""Release the import lock regardless of any raised exceptions."""
_imp.release_lock()
def _resolve_name(name, package, level):
"""Resolve a relative module name to an absolute one."""
bits = package.rsplit('.', level - 1)
if len(bits) < level:
raise ValueError('attempted relative import beyond top-level package')
base = bits[0]
return '{}.{}'.format(base, name) if name else base
def _find_spec_legacy(finder, name, path):
# This would be a good place for a DeprecationWarning if
# we ended up going that route.
loader = finder.find_module(name, path)
if loader is None:
return None
return spec_from_loader(name, loader)
def _find_spec(name, path, target=None):
"""Find a module's loader."""
if not sys.meta_path:
_warnings.warn('sys.meta_path is empty', ImportWarning)
# We check sys.modules here for the reload case. While a passed-in
# target will usually indicate a reload there is no guarantee, whereas
# sys.modules provides one.
is_reload = name in sys.modules
for finder in sys.meta_path:
with _ImportLockContext():
try:
find_spec = finder.find_spec
except AttributeError:
spec = _find_spec_legacy(finder, name, path)
if spec is None:
continue
else:
spec = find_spec(name, path, target)
if spec is not None:
# The parent import may have already imported this module.
if not is_reload and name in sys.modules:
module = sys.modules[name]
try:
__spec__ = module.__spec__
except AttributeError:
# We use the found spec since that is the one that
# we would have used if the parent module hadn't
# beaten us to the punch.
return spec
else:
if __spec__ is None:
return spec
else:
return __spec__
else:
return spec
else:
return None
def _sanity_check(name, package, level):
"""Verify arguments are "sane"."""
if not isinstance(name, str):
raise TypeError('module name must be str, not {}'.format(type(name)))
if level < 0:
raise ValueError('level must be >= 0')
if package:
if not isinstance(package, str):
raise TypeError('__package__ not set to a string')
elif package not in sys.modules:
msg = ('Parent module {!r} not loaded, cannot perform relative '
'import')
raise SystemError(msg.format(package))
if not name and level == 0:
raise ValueError('Empty module name')
_ERR_MSG_PREFIX = 'No module named '
_ERR_MSG = _ERR_MSG_PREFIX + '{!r}'
def _find_and_load_unlocked(name, import_):
path = None
parent = name.rpartition('.')[0]
if parent:
if parent not in sys.modules:
_call_with_frames_removed(import_, parent)
# Crazy side-effects!
if name in sys.modules:
return sys.modules[name]
parent_module = sys.modules[parent]
try:
path = parent_module.__path__
except AttributeError:
msg = (_ERR_MSG + '; {!r} is not a package').format(name, parent)
raise ImportError(msg, name=name)
spec = _find_spec(name, path)
if spec is None:
raise ImportError(_ERR_MSG.format(name), name=name)
else:
module = _SpecMethods(spec)._load_unlocked()
if parent:
# Set the module as an attribute on its parent.
parent_module = sys.modules[parent]
setattr(parent_module, name.rpartition('.')[2], module)
return module
def _find_and_load(name, import_):
"""Find and load the module, and release the import lock."""
with _ModuleLockManager(name):
return _find_and_load_unlocked(name, import_)
def _gcd_import(name, package=None, level=0):
"""Import and return the module based on its name, the package the call is
being made from, and the level adjustment.
This function represents the greatest common denominator of functionality
between import_module and __import__. This includes setting __package__ if
the loader did not.
"""
_sanity_check(name, package, level)
if level > 0:
name = _resolve_name(name, package, level)
_imp.acquire_lock()
if name not in sys.modules:
return _find_and_load(name, _gcd_import)
module = sys.modules[name]
if module is None:
_imp.release_lock()
message = ('import of {} halted; '
'None in sys.modules'.format(name))
raise ImportError(message, name=name)
_lock_unlock_module(name)
return module
def _handle_fromlist(module, fromlist, import_):
"""Figure out what __import__ should return.
The import_ parameter is a callable which takes the name of module to
import. It is required to decouple the function from assuming importlib's
import implementation is desired.
"""
# The hell that is fromlist ...
# If a package was imported, try to import stuff from fromlist.
if hasattr(module, '__path__'):
if '*' in fromlist:
fromlist = list(fromlist)
fromlist.remove('*')
if hasattr(module, '__all__'):
fromlist.extend(module.__all__)
for x in fromlist:
if not hasattr(module, x):
from_name = '{}.{}'.format(module.__name__, x)
try:
_call_with_frames_removed(import_, from_name)
except ImportError as exc:
# Backwards-compatibility dictates we ignore failed
# imports triggered by fromlist for modules that don't
# exist.
if str(exc).startswith(_ERR_MSG_PREFIX):
if exc.name == from_name:
continue
raise
return module
def _calc___package__(globals):
"""Calculate what __package__ should be.
__package__ is not guaranteed to be defined or could be set to None
to represent that its proper value is unknown.
"""
package = globals.get('__package__')
if package is None:
package = globals['__name__']
if '__path__' not in globals:
package = package.rpartition('.')[0]
return package
def _get_supported_file_loaders():
"""Returns a list of file-based module loaders.
Each item is a tuple (loader, suffixes).
"""
extensions = ExtensionFileLoader, _imp.extension_suffixes()
source = SourceFileLoader, SOURCE_SUFFIXES
bytecode = SourcelessFileLoader, BYTECODE_SUFFIXES
return [extensions, source, bytecode]
def __import__(name, globals=None, locals=None, fromlist=(), level=0):
"""Import a module.
The 'globals' argument is used to infer where the import is occuring from
to handle relative imports. The 'locals' argument is ignored. The
'fromlist' argument specifies what should exist as attributes on the module
being imported (e.g. ``from module import <fromlist>``). The 'level'
argument represents the package location to import from in a relative
import (e.g. ``from ..pkg import mod`` would have a 'level' of 2).
"""
if level == 0:
module = _gcd_import(name)
else:
globals_ = globals if globals is not None else {}
package = _calc___package__(globals_)
module = _gcd_import(name, package, level)
if not fromlist:
# Return up to the first dot in 'name'. This is complicated by the fact
# that 'name' may be relative.
if level == 0:
return _gcd_import(name.partition('.')[0])
elif not name:
return module
else:
# Figure out where to slice the module's name up to the first dot
# in 'name'.
cut_off = len(name) - len(name.partition('.')[0])
# Slice end needs to be positive to alleviate need to special-case
# when ``'.' not in name``.
return sys.modules[module.__name__[:len(module.__name__)-cut_off]]
else:
return _handle_fromlist(module, fromlist, _gcd_import)
def _builtin_from_name(name):
spec = BuiltinImporter.find_spec(name)
if spec is None:
raise ImportError('no built-in module named ' + name)
methods = _SpecMethods(spec)
return methods._load_unlocked()
def _setup(sys_module, _imp_module):
"""Setup importlib by importing needed built-in modules and injecting them
into the global namespace.
As sys is needed for sys.modules access and _imp is needed to load built-in
modules, those two modules must be explicitly passed in.
"""
global _imp, sys, BYTECODE_SUFFIXES
_imp = _imp_module
sys = sys_module
if sys.flags.optimize:
BYTECODE_SUFFIXES = OPTIMIZED_BYTECODE_SUFFIXES
else:
BYTECODE_SUFFIXES = DEBUG_BYTECODE_SUFFIXES
# Set up the spec for existing builtin/frozen modules.
module_type = type(sys)
for name, module in sys.modules.items():
if isinstance(module, module_type):
if name in sys.builtin_module_names:
loader = BuiltinImporter
elif _imp.is_frozen(name):
loader = FrozenImporter
else:
continue
spec = _spec_from_module(module, loader)
methods = _SpecMethods(spec)
methods.init_module_attrs(module)
# Directly load built-in modules needed during bootstrap.
self_module = sys.modules[__name__]
for builtin_name in ('_io', '_warnings', 'builtins', 'marshal'):
if builtin_name not in sys.modules:
builtin_module = _builtin_from_name(builtin_name)
else:
builtin_module = sys.modules[builtin_name]
setattr(self_module, builtin_name, builtin_module)
# Directly load the os module (needed during bootstrap).
os_details = ('posix', ['/']), ('nt', ['\\', '/'])
for builtin_os, path_separators in os_details:
# Assumption made in _path_join()
assert all(len(sep) == 1 for sep in path_separators)
path_sep = path_separators[0]
if builtin_os in sys.modules:
os_module = sys.modules[builtin_os]
break
else:
try:
os_module = _builtin_from_name(builtin_os)
break
except ImportError:
continue
else:
raise ImportError('importlib requires posix or nt')
setattr(self_module, '_os', os_module)
setattr(self_module, 'path_sep', path_sep)
setattr(self_module, 'path_separators', ''.join(path_separators))
# Directly load the _thread module (needed during bootstrap).
try:
thread_module = _builtin_from_name('_thread')
except ImportError:
# Python was built without threads
thread_module = None
setattr(self_module, '_thread', thread_module)
# Directly load the _weakref module (needed during bootstrap).
weakref_module = _builtin_from_name('_weakref')
setattr(self_module, '_weakref', weakref_module)
# Directly load the winreg module (needed during bootstrap).
if builtin_os == 'nt':
winreg_module = _builtin_from_name('winreg')
setattr(self_module, '_winreg', winreg_module)
# Constants
setattr(self_module, '_relax_case', _make_relax_case())
EXTENSION_SUFFIXES.extend(_imp.extension_suffixes())
if builtin_os == 'nt':
SOURCE_SUFFIXES.append('.pyw')
if '_d.pyd' in EXTENSION_SUFFIXES:
WindowsRegistryFinder.DEBUG_BUILD = True
def _install(sys_module, _imp_module):
"""Install importlib as the implementation of import."""
_setup(sys_module, _imp_module)
supported_loaders = _get_supported_file_loaders()
sys.path_hooks.extend([FileFinder.path_hook(*supported_loaders)])
sys.meta_path.append(BuiltinImporter)
sys.meta_path.append(FrozenImporter)
if _os.__name__ == 'nt':
sys.meta_path.append(WindowsRegistryFinder)
sys.meta_path.append(PathFinder)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
"""Core implementation of import.
This module is NOT meant to be directly imported! It has been designed such
that it can be bootstrapped into Python as the implementation of import. As
such it requires the injection of specific modules and attributes in order to
work. One should use importlib as the public-facing version of this module.
"""
#
# IMPORTANT: Whenever making changes to this module, be sure to run
# a top-level make in order to get the frozen version of the module
# update. Not doing so will result in the Makefile to fail for
# all others who don't have a ./python around to freeze the module
# in the early stages of compilation.
#
# See importlib._setup() for what is injected into the global namespace.
# When editing this code be aware that code executed at import time CANNOT
# reference any injected objects! This includes not only global code but also
# anything specified at the class level.
# Bootstrap-related code ######################################################
_CASE_INSENSITIVE_PLATFORMS = 'win', 'cygwin', 'darwin'
def _make_relax_case():
if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS):
def _relax_case():
"""True if filenames must be checked case-insensitively."""
return b'PYTHONCASEOK' in _os.environ
else:
def _relax_case():
"""True if filenames must be checked case-insensitively."""
return False
return _relax_case
def _w_long(x):
"""Convert a 32-bit integer to little-endian."""
return (int(x) & 0xFFFFFFFF).to_bytes(4, 'little')
def _r_long(int_bytes):
"""Convert 4 bytes in little-endian to an integer."""
return int.from_bytes(int_bytes, 'little')
def _path_join(*path_parts):
"""Replacement for os.path.join()."""
return path_sep.join([part.rstrip(path_separators)
for part in path_parts if part])
def _path_split(path):
"""Replacement for os.path.split()."""
if len(path_separators) == 1:
front, _, tail = path.rpartition(path_sep)
return front, tail
for x in reversed(path):
if x in path_separators:
front, tail = path.rsplit(x, maxsplit=1)
return front, tail
return '', path
def _path_stat(path):
"""Stat the path.
Made a separate function to make it easier to override in experiments
(e.g. cache stat results).
"""
return _os.stat(path)
def _path_is_mode_type(path, mode):
"""Test whether the path is the specified mode type."""
try:
stat_info = _path_stat(path)
except OSError:
return False
return (stat_info.st_mode & 0o170000) == mode
def _path_isfile(path):
"""Replacement for os.path.isfile."""
return _path_is_mode_type(path, 0o100000)
def _path_isdir(path):
"""Replacement for os.path.isdir."""
if not path:
path = _os.getcwd()
return _path_is_mode_type(path, 0o040000)
def _write_atomic(path, data, mode=0o666):
"""Best-effort function to write data to a path atomically.
Be prepared to handle a FileExistsError if concurrent writing of the
temporary file is attempted."""
# id() is used to generate a pseudo-random filename.
path_tmp = '{}.{}'.format(path, id(path))
fd = _os.open(path_tmp,
_os.O_EXCL | _os.O_CREAT | _os.O_WRONLY, mode & 0o666)
try:
# We first write data to a temporary file, and then use os.replace() to
# perform an atomic rename.
with _io.FileIO(fd, 'wb') as file:
file.write(data)
_os.replace(path_tmp, path)
except OSError:
try:
_os.unlink(path_tmp)
except OSError:
pass
raise
def _wrap(new, old):
"""Simple substitute for functools.update_wrapper."""
for replace in ['__module__', '__name__', '__qualname__', '__doc__']:
if hasattr(old, replace):
setattr(new, replace, getattr(old, replace))
new.__dict__.update(old.__dict__)
def _new_module(name):
return type(sys)(name)
_code_type = type(_wrap.__code__)
class _ManageReload:
"""Manages the possible clean-up of sys.modules for load_module()."""
def __init__(self, name):
self._name = name
def __enter__(self):
self._is_reload = self._name in sys.modules
def __exit__(self, *args):
if any(arg is not None for arg in args) and not self._is_reload:
try:
del sys.modules[self._name]
except KeyError:
pass
# Module-level locking ########################################################
# A dict mapping module names to weakrefs of _ModuleLock instances
_module_locks = {}
# A dict mapping thread ids to _ModuleLock instances
_blocking_on = {}
class _DeadlockError(RuntimeError):
pass
class _ModuleLock:
"""A recursive lock implementation which is able to detect deadlocks
(e.g. thread 1 trying to take locks A then B, and thread 2 trying to
take locks B then A).
"""
def __init__(self, name):
self.lock = _thread.allocate_lock()
self.wakeup = _thread.allocate_lock()
self.name = name
self.owner = None
self.count = 0
self.waiters = 0
def has_deadlock(self):
# Deadlock avoidance for concurrent circular imports.
me = _thread.get_ident()
tid = self.owner
while True:
lock = _blocking_on.get(tid)
if lock is None:
return False
tid = lock.owner
if tid == me:
return True
def acquire(self):
"""
Acquire the module lock. If a potential deadlock is detected,
a _DeadlockError is raised.
Otherwise, the lock is always acquired and True is returned.
"""
tid = _thread.get_ident()
_blocking_on[tid] = self
try:
while True:
with self.lock:
if self.count == 0 or self.owner == tid:
self.owner = tid
self.count += 1
return True
if self.has_deadlock():
raise _DeadlockError('deadlock detected by %r' % self)
if self.wakeup.acquire(False):
self.waiters += 1
# Wait for a release() call
self.wakeup.acquire()
self.wakeup.release()
finally:
del _blocking_on[tid]
def release(self):
tid = _thread.get_ident()
with self.lock:
if self.owner != tid:
raise RuntimeError('cannot release un-acquired lock')
assert self.count > 0
self.count -= 1
if self.count == 0:
self.owner = None
if self.waiters:
self.waiters -= 1
self.wakeup.release()
def __repr__(self):
return '_ModuleLock({!r}) at {}'.format(self.name, id(self))
class _DummyModuleLock:
"""A simple _ModuleLock equivalent for Python builds without
multi-threading support."""
def __init__(self, name):
self.name = name
self.count = 0
def acquire(self):
self.count += 1
return True
def release(self):
if self.count == 0:
raise RuntimeError('cannot release un-acquired lock')
self.count -= 1
def __repr__(self):
return '_DummyModuleLock({!r}) at {}'.format(self.name, id(self))
class _ModuleLockManager:
def __init__(self, name):
self._name = name
self._lock = None
def __enter__(self):
try:
self._lock = _get_module_lock(self._name)
finally:
_imp.release_lock()
self._lock.acquire()
def __exit__(self, *args, **kwargs):
self._lock.release()
# The following two functions are for consumption by Python/import.c.
def _get_module_lock(name):
"""Get or create the module lock for a given module name.
Should only be called with the import lock taken."""
lock = None
try:
lock = _module_locks[name]()
except KeyError:
pass
if lock is None:
if _thread is None:
lock = _DummyModuleLock(name)
else:
lock = _ModuleLock(name)
def cb(_):
del _module_locks[name]
_module_locks[name] = _weakref.ref(lock, cb)
return lock
def _lock_unlock_module(name):
"""Release the global import lock, and acquires then release the
module lock for a given module name.
This is used to ensure a module is completely initialized, in the
event it is being imported by another thread.
Should only be called with the import lock taken."""
lock = _get_module_lock(name)
_imp.release_lock()
try:
lock.acquire()
except _DeadlockError:
# Concurrent circular import, we'll accept a partially initialized
# module object.
pass
else:
lock.release()
# Frame stripping magic ###############################################
def _call_with_frames_removed(f, *args, **kwds):
"""remove_importlib_frames in import.c will always remove sequences
of importlib frames that end with a call to this function
Use it instead of a normal call in places where including the importlib
frames introduces unwanted noise into the traceback (e.g. when executing
module code)
"""
return f(*args, **kwds)
# Finder/loader utility code ###############################################
# Magic word to reject .pyc files generated by other Python versions.
# It should change for each incompatible change to the bytecode.
#
# The value of CR and LF is incorporated so if you ever read or write
# a .pyc file in text mode the magic number will be wrong; also, the
# Apple MPW compiler swaps their values, botching string constants.
#
# The magic numbers must be spaced apart at least 2 values, as the
# -U interpeter flag will cause MAGIC+1 being used. They have been
# odd numbers for some time now.
#
# There were a variety of old schemes for setting the magic number.
# The current working scheme is to increment the previous value by
# 10.
#
# Starting with the adoption of PEP 3147 in Python 3.2, every bump in magic
# number also includes a new "magic tag", i.e. a human readable string used
# to represent the magic number in __pycache__ directories. When you change
# the magic number, you must also set a new unique magic tag. Generally this
# can be named after the Python major version of the magic number bump, but
# it can really be anything, as long as it's different than anything else
# that's come before. The tags are included in the following table, starting
# with Python 3.2a0.
#
# Known values:
# Python 1.5: 20121
# Python 1.5.1: 20121
# Python 1.5.2: 20121
# Python 1.6: 50428
# Python 2.0: 50823
# Python 2.0.1: 50823
# Python 2.1: 60202
# Python 2.1.1: 60202
# Python 2.1.2: 60202
# Python 2.2: 60717
# Python 2.3a0: 62011
# Python 2.3a0: 62021
# Python 2.3a0: 62011 (!)
# Python 2.4a0: 62041
# Python 2.4a3: 62051
# Python 2.4b1: 62061
# Python 2.5a0: 62071
# Python 2.5a0: 62081 (ast-branch)
# Python 2.5a0: 62091 (with)
# Python 2.5a0: 62092 (changed WITH_CLEANUP opcode)
# Python 2.5b3: 62101 (fix wrong code: for x, in ...)
# Python 2.5b3: 62111 (fix wrong code: x += yield)
# Python 2.5c1: 62121 (fix wrong lnotab with for loops and
# storing constants that should have been removed)
# Python 2.5c2: 62131 (fix wrong code: for x, in ... in listcomp/genexp)
# Python 2.6a0: 62151 (peephole optimizations and STORE_MAP opcode)
# Python 2.6a1: 62161 (WITH_CLEANUP optimization)
# Python 2.7a0: 62171 (optimize list comprehensions/change LIST_APPEND)
# Python 2.7a0: 62181 (optimize conditional branches:
# introduce POP_JUMP_IF_FALSE and POP_JUMP_IF_TRUE)
# Python 2.7a0 62191 (introduce SETUP_WITH)
# Python 2.7a0 62201 (introduce BUILD_SET)
# Python 2.7a0 62211 (introduce MAP_ADD and SET_ADD)
# Python 3000: 3000
# 3010 (removed UNARY_CONVERT)
# 3020 (added BUILD_SET)
# 3030 (added keyword-only parameters)
# 3040 (added signature annotations)
# 3050 (print becomes a function)
# 3060 (PEP 3115 metaclass syntax)
# 3061 (string literals become unicode)
# 3071 (PEP 3109 raise changes)
# 3081 (PEP 3137 make __file__ and __name__ unicode)
# 3091 (kill str8 interning)
# 3101 (merge from 2.6a0, see 62151)
# 3103 (__file__ points to source file)
# Python 3.0a4: 3111 (WITH_CLEANUP optimization).
# Python 3.0a5: 3131 (lexical exception stacking, including POP_EXCEPT)
# Python 3.1a0: 3141 (optimize list, set and dict comprehensions:
# change LIST_APPEND and SET_ADD, add MAP_ADD)
# Python 3.1a0: 3151 (optimize conditional branches:
# introduce POP_JUMP_IF_FALSE and POP_JUMP_IF_TRUE)
# Python 3.2a0: 3160 (add SETUP_WITH)
# tag: cpython-32
# Python 3.2a1: 3170 (add DUP_TOP_TWO, remove DUP_TOPX and ROT_FOUR)
# tag: cpython-32
# Python 3.2a2 3180 (add DELETE_DEREF)
# Python 3.3a0 3190 __class__ super closure changed
# Python 3.3a0 3200 (__qualname__ added)
# 3210 (added size modulo 2**32 to the pyc header)
# Python 3.3a1 3220 (changed PEP 380 implementation)
# Python 3.3a4 3230 (revert changes to implicit __class__ closure)
# Python 3.4a1 3250 (evaluate positional default arguments before
# keyword-only defaults)
# Python 3.4a1 3260 (add LOAD_CLASSDEREF; allow locals of class to override
# free vars)
# Python 3.4a1 3270 (various tweaks to the __class__ closure)
# Python 3.4a1 3280 (remove implicit class argument)
# Python 3.4a4 3290 (changes to __qualname__ computation)
# Python 3.4a4 3300 (more changes to __qualname__ computation)
# Python 3.4rc2 3310 (alter __qualname__ computation)
#
# MAGIC must change whenever the bytecode emitted by the compiler may no
# longer be understood by older implementations of the eval loop (usually
# due to the addition of new opcodes).
MAGIC_NUMBER = (3310).to_bytes(2, 'little') + b'\r\n'
_RAW_MAGIC_NUMBER = int.from_bytes(MAGIC_NUMBER, 'little') # For import.c
_PYCACHE = '__pycache__'
SOURCE_SUFFIXES = ['.py'] # _setup() adds .pyw as needed.
DEBUG_BYTECODE_SUFFIXES = ['.pyc']
OPTIMIZED_BYTECODE_SUFFIXES = ['.pyo']
def cache_from_source(path, debug_override=None):
"""Given the path to a .py file, return the path to its .pyc/.pyo file.
The .py file does not need to exist; this simply returns the path to the
.pyc/.pyo file calculated as if the .py file were imported. The extension
will be .pyc unless sys.flags.optimize is non-zero, then it will be .pyo.
If debug_override is not None, then it must be a boolean and is used in
place of sys.flags.optimize.
If sys.implementation.cache_tag is None then NotImplementedError is raised.
"""
debug = not sys.flags.optimize if debug_override is None else debug_override
if debug:
suffixes = DEBUG_BYTECODE_SUFFIXES
else:
suffixes = OPTIMIZED_BYTECODE_SUFFIXES
head, tail = _path_split(path)
base, sep, rest = tail.rpartition('.')
tag = sys.implementation.cache_tag
if tag is None:
raise NotImplementedError('sys.implementation.cache_tag is None')
filename = ''.join([(base if base else rest), sep, tag, suffixes[0]])
return _path_join(head, _PYCACHE, filename)
def source_from_cache(path):
"""Given the path to a .pyc./.pyo file, return the path to its .py file.
The .pyc/.pyo file does not need to exist; this simply returns the path to
the .py file calculated to correspond to the .pyc/.pyo file. If path does
not conform to PEP 3147 format, ValueError will be raised. If
sys.implementation.cache_tag is None then NotImplementedError is raised.
"""
if sys.implementation.cache_tag is None:
raise NotImplementedError('sys.implementation.cache_tag is None')
head, pycache_filename = _path_split(path)
head, pycache = _path_split(head)
if pycache != _PYCACHE:
raise ValueError('{} not bottom-level directory in '
'{!r}'.format(_PYCACHE, path))
if pycache_filename.count('.') != 2:
raise ValueError('expected only 2 dots in '
'{!r}'.format(pycache_filename))
base_filename = pycache_filename.partition('.')[0]
return _path_join(head, base_filename + SOURCE_SUFFIXES[0])
def _get_sourcefile(bytecode_path):
"""Convert a bytecode file path to a source path (if possible).
This function exists purely for backwards-compatibility for
PyImport_ExecCodeModuleWithFilenames() in the C API.
"""
if len(bytecode_path) == 0:
return None
rest, _, extension = bytecode_path.rpartition('.')
if not rest or extension.lower()[-3:-1] != 'py':
return bytecode_path
try:
source_path = source_from_cache(bytecode_path)
except (NotImplementedError, ValueError):
source_path = bytecode_path[:-1]
return source_path if _path_isfile(source_path) else bytecode_path
def _calc_mode(path):
"""Calculate the mode permissions for a bytecode file."""
try:
mode = _path_stat(path).st_mode
except OSError:
mode = 0o666
# We always ensure write access so we can update cached files
# later even when the source files are read-only on Windows (#6074)
mode |= 0o200
return mode
def _verbose_message(message, *args, verbosity=1):
"""Print the message to stderr if -v/PYTHONVERBOSE is turned on."""
if sys.flags.verbose >= verbosity:
if not message.startswith(('#', 'import ')):
message = '# ' + message
print(message.format(*args), file=sys.stderr)
def _check_name(method):
"""Decorator to verify that the module being requested matches the one the
loader can handle.
The first argument (self) must define _name which the second argument is
compared against. If the comparison fails then ImportError is raised.
"""
def _check_name_wrapper(self, name=None, *args, **kwargs):
if name is None:
name = self.name
elif self.name != name:
raise ImportError('loader cannot handle %s' % name, name=name)
return method(self, name, *args, **kwargs)
_wrap(_check_name_wrapper, method)
return _check_name_wrapper
def _requires_builtin(fxn):
"""Decorator to verify the named module is built-in."""
def _requires_builtin_wrapper(self, fullname):
if fullname not in sys.builtin_module_names:
raise ImportError('{!r} is not a built-in module'.format(fullname),
name=fullname)
return fxn(self, fullname)
_wrap(_requires_builtin_wrapper, fxn)
return _requires_builtin_wrapper
def _requires_frozen(fxn):
"""Decorator to verify the named module is frozen."""
def _requires_frozen_wrapper(self, fullname):
if not _imp.is_frozen(fullname):
raise ImportError('{!r} is not a frozen module'.format(fullname),
name=fullname)
return fxn(self, fullname)
_wrap(_requires_frozen_wrapper, fxn)
return _requires_frozen_wrapper
def _find_module_shim(self, fullname):
"""Try to find a loader for the specified module by delegating to
self.find_loader().
This method is deprecated in favor of finder.find_spec().
"""
# Call find_loader(). If it returns a string (indicating this
# is a namespace package portion), generate a warning and
# return None.
loader, portions = self.find_loader(fullname)
if loader is None and len(portions):
msg = 'Not importing directory {}: missing __init__'
_warnings.warn(msg.format(portions[0]), ImportWarning)
return loader
def _load_module_shim(self, fullname):
"""Load the specified module into sys.modules and return it.
This method is deprecated. Use loader.exec_module instead.
"""
spec = spec_from_loader(fullname, self)
methods = _SpecMethods(spec)
if fullname in sys.modules:
module = sys.modules[fullname]
methods.exec(module)
return sys.modules[fullname]
else:
return methods.load()
def _validate_bytecode_header(data, source_stats=None, name=None, path=None):
"""Validate the header of the passed-in bytecode against source_stats (if
given) and returning the bytecode that can be compiled by compile().
All other arguments are used to enhance error reporting.
ImportError is raised when the magic number is incorrect or the bytecode is
found to be stale. EOFError is raised when the data is found to be
truncated.
"""
exc_details = {}
if name is not None:
exc_details['name'] = name
else:
# To prevent having to make all messages have a conditional name.
name = '<bytecode>'
if path is not None:
exc_details['path'] = path
magic = data[:4]
raw_timestamp = data[4:8]
raw_size = data[8:12]
if magic != MAGIC_NUMBER:
message = 'bad magic number in {!r}: {!r}'.format(name, magic)
_verbose_message(message)
raise ImportError(message, **exc_details)
elif len(raw_timestamp) != 4:
message = 'reached EOF while reading timestamp in {!r}'.format(name)
_verbose_message(message)
raise EOFError(message)
elif len(raw_size) != 4:
message = 'reached EOF while reading size of source in {!r}'.format(name)
_verbose_message(message)
raise EOFError(message)
if source_stats is not None:
try:
source_mtime = int(source_stats['mtime'])
except KeyError:
pass
else:
if _r_long(raw_timestamp) != source_mtime:
message = 'bytecode is stale for {!r}'.format(name)
_verbose_message(message)
raise ImportError(message, **exc_details)
try:
source_size = source_stats['size'] & 0xFFFFFFFF
except KeyError:
pass
else:
if _r_long(raw_size) != source_size:
raise ImportError('bytecode is stale for {!r}'.format(name),
**exc_details)
return data[12:]
def _compile_bytecode(data, name=None, bytecode_path=None, source_path=None):
"""Compile bytecode as returned by _validate_bytecode_header()."""
code = marshal.loads(data)
if isinstance(code, _code_type):
_verbose_message('code object from {!r}', bytecode_path)
if source_path is not None:
_imp._fix_co_filename(code, source_path)
return code
else:
raise ImportError('Non-code object in {!r}'.format(bytecode_path),
name=name, path=bytecode_path)
def _code_to_bytecode(code, mtime=0, source_size=0):
"""Compile a code object into bytecode for writing out to a byte-compiled
file."""
data = bytearray(MAGIC_NUMBER)
data.extend(_w_long(mtime))
data.extend(_w_long(source_size))
data.extend(marshal.dumps(code))
return data
def decode_source(source_bytes):
"""Decode bytes representing source code and return the string.
Universal newline support is used in the decoding.
"""
import tokenize # To avoid bootstrap issues.
source_bytes_readline = _io.BytesIO(source_bytes).readline
encoding = tokenize.detect_encoding(source_bytes_readline)
newline_decoder = _io.IncrementalNewlineDecoder(None, True)
return newline_decoder.decode(source_bytes.decode(encoding[0]))
# Module specifications #######################################################
def _module_repr(module):
# The implementation of ModuleType__repr__().
loader = getattr(module, '__loader__', None)
if hasattr(loader, 'module_repr'):
# As soon as BuiltinImporter, FrozenImporter, and NamespaceLoader
# drop their implementations for module_repr. we can add a
# deprecation warning here.
try:
return loader.module_repr(module)
except Exception:
pass
try:
spec = module.__spec__
except AttributeError:
pass
else:
if spec is not None:
return _SpecMethods(spec).module_repr()
# We could use module.__class__.__name__ instead of 'module' in the
# various repr permutations.
try:
name = module.__name__
except AttributeError:
name = '?'
try:
filename = module.__file__
except AttributeError:
if loader is None:
return '<module {!r}>'.format(name)
else:
return '<module {!r} ({!r})>'.format(name, loader)
else:
return '<module {!r} from {!r}>'.format(name, filename)
class _installed_safely:
def __init__(self, module):
self._module = module
self._spec = module.__spec__
def __enter__(self):
# This must be done before putting the module in sys.modules
# (otherwise an optimization shortcut in import.c becomes
# wrong)
self._spec._initializing = True
sys.modules[self._spec.name] = self._module
def __exit__(self, *args):
try:
spec = self._spec
if any(arg is not None for arg in args):
try:
del sys.modules[spec.name]
except KeyError:
pass
else:
_verbose_message('import {!r} # {!r}', spec.name, spec.loader)
finally:
self._spec._initializing = False
class ModuleSpec:
"""The specification for a module, used for loading.
A module's spec is the source for information about the module. For
data associated with the module, including source, use the spec's
loader.
`name` is the absolute name of the module. `loader` is the loader
to use when loading the module. `parent` is the name of the
package the module is in. The parent is derived from the name.
`is_package` determines if the module is considered a package or
not. On modules this is reflected by the `__path__` attribute.
`origin` is the specific location used by the loader from which to
load the module, if that information is available. When filename is
set, origin will match.
`has_location` indicates that a spec's "origin" reflects a location.
When this is True, `__file__` attribute of the module is set.
`cached` is the location of the cached bytecode file, if any. It
corresponds to the `__cached__` attribute.
`submodule_search_locations` is the sequence of path entries to
search when importing submodules. If set, is_package should be
True--and False otherwise.
Packages are simply modules that (may) have submodules. If a spec
has a non-None value in `submodule_search_locations`, the import
system will consider modules loaded from the spec as packages.
Only finders (see importlib.abc.MetaPathFinder and
importlib.abc.PathEntryFinder) should modify ModuleSpec instances.
"""
def __init__(self, name, loader, *, origin=None, loader_state=None,
is_package=None):
self.name = name
self.loader = loader
self.origin = origin
self.loader_state = loader_state
self.submodule_search_locations = [] if is_package else None
# file-location attributes
self._set_fileattr = False
self._cached = None
def __repr__(self):
args = ['name={!r}'.format(self.name),
'loader={!r}'.format(self.loader)]
if self.origin is not None:
args.append('origin={!r}'.format(self.origin))
if self.submodule_search_locations is not None:
args.append('submodule_search_locations={}'
.format(self.submodule_search_locations))
return '{}({})'.format(self.__class__.__name__, ', '.join(args))
def __eq__(self, other):
smsl = self.submodule_search_locations
try:
return (self.name == other.name and
self.loader == other.loader and
self.origin == other.origin and
smsl == other.submodule_search_locations and
self.cached == other.cached and
self.has_location == other.has_location)
except AttributeError:
return False
@property
def cached(self):
if self._cached is None:
if self.origin is not None and self._set_fileattr:
filename = self.origin
if filename.endswith(tuple(SOURCE_SUFFIXES)):
try:
self._cached = cache_from_source(filename)
except NotImplementedError:
pass
elif filename.endswith(tuple(BYTECODE_SUFFIXES)):
self._cached = filename
return self._cached
@cached.setter
def cached(self, cached):
self._cached = cached
@property
def parent(self):
"""The name of the module's parent."""
if self.submodule_search_locations is None:
return self.name.rpartition('.')[0]
else:
return self.name
@property
def has_location(self):
return self._set_fileattr
@has_location.setter
def has_location(self, value):
self._set_fileattr = bool(value)
def spec_from_loader(name, loader, *, origin=None, is_package=None):
"""Return a module spec based on various loader methods."""
if hasattr(loader, 'get_filename'):
if is_package is None:
return spec_from_file_location(name, loader=loader)
search = [] if is_package else None
return spec_from_file_location(name, loader=loader,
submodule_search_locations=search)
if is_package is None:
if hasattr(loader, 'is_package'):
try:
is_package = loader.is_package(name)
except ImportError:
is_package = None # aka, undefined
else:
# the default
is_package = False
return ModuleSpec(name, loader, origin=origin, is_package=is_package)
_POPULATE = object()
def spec_from_file_location(name, location=None, *, loader=None,
submodule_search_locations=_POPULATE):
"""Return a module spec based on a file location.
To indicate that the module is a package, set
submodule_search_locations to a list of directory paths. An
empty list is sufficient, though its not otherwise useful to the
import system.
The loader must take a spec as its only __init__() arg.
"""
if location is None:
# The caller may simply want a partially populated location-
# oriented spec. So we set the location to a bogus value and
# fill in as much as we can.
location = '<unknown>'
if hasattr(loader, 'get_filename'):
# ExecutionLoader
try:
location = loader.get_filename(name)
except ImportError:
pass
# If the location is on the filesystem, but doesn't actually exist,
# we could return None here, indicating that the location is not
# valid. However, we don't have a good way of testing since an
# indirect location (e.g. a zip file or URL) will look like a
# non-existent file relative to the filesystem.
spec = ModuleSpec(name, loader, origin=location)
spec._set_fileattr = True
# Pick a loader if one wasn't provided.
if loader is None:
for loader_class, suffixes in _get_supported_file_loaders():
if location.endswith(tuple(suffixes)):
loader = loader_class(name, location)
spec.loader = loader
break
else:
return None
# Set submodule_search_paths appropriately.
if submodule_search_locations is _POPULATE:
# Check the loader.
if hasattr(loader, 'is_package'):
try:
is_package = loader.is_package(name)
except ImportError:
pass
else:
if is_package:
spec.submodule_search_locations = []
else:
spec.submodule_search_locations = submodule_search_locations
if spec.submodule_search_locations == []:
if location:
dirname = _path_split(location)[0]
spec.submodule_search_locations.append(dirname)
return spec
def _spec_from_module(module, loader=None, origin=None):
# This function is meant for use in _setup().
try:
spec = module.__spec__
except AttributeError:
pass
else:
if spec is not None:
return spec
name = module.__name__
if loader is None:
try:
loader = module.__loader__
except AttributeError:
# loader will stay None.
pass
try:
location = module.__file__
except AttributeError:
location = None
if origin is None:
if location is None:
try:
origin = loader._ORIGIN
except AttributeError:
origin = None
else:
origin = location
try:
cached = module.__cached__
except AttributeError:
cached = None
try:
submodule_search_locations = list(module.__path__)
except AttributeError:
submodule_search_locations = None
spec = ModuleSpec(name, loader, origin=origin)
spec._set_fileattr = False if location is None else True
spec.cached = cached
spec.submodule_search_locations = submodule_search_locations
return spec
class _SpecMethods:
"""Convenience wrapper around spec objects to provide spec-specific
methods."""
# The various spec_from_* functions could be made factory methods here.
def __init__(self, spec):
self.spec = spec
def module_repr(self):
"""Return the repr to use for the module."""
# We mostly replicate _module_repr() using the spec attributes.
spec = self.spec
name = '?' if spec.name is None else spec.name
if spec.origin is None:
if spec.loader is None:
return '<module {!r}>'.format(name)
else:
return '<module {!r} ({!r})>'.format(name, spec.loader)
else:
if spec.has_location:
return '<module {!r} from {!r}>'.format(name, spec.origin)
else:
return '<module {!r} ({})>'.format(spec.name, spec.origin)
def init_module_attrs(self, module, *, _override=False, _force_name=True):
"""Set the module's attributes.
All missing import-related module attributes will be set. Here
is how the spec attributes map onto the module:
spec.name -> module.__name__
spec.loader -> module.__loader__
spec.parent -> module.__package__
spec -> module.__spec__
Optional:
spec.origin -> module.__file__ (if spec.set_fileattr is true)
spec.cached -> module.__cached__ (if __file__ also set)
spec.submodule_search_locations -> module.__path__ (if set)
"""
spec = self.spec
# The passed in module may be not support attribute assignment,
# in which case we simply don't set the attributes.
# __name__
if (_override or _force_name or
getattr(module, '__name__', None) is None):
try:
module.__name__ = spec.name
except AttributeError:
pass
# __loader__
if _override or getattr(module, '__loader__', None) is None:
loader = spec.loader
if loader is None:
# A backward compatibility hack.
if spec.submodule_search_locations is not None:
loader = _NamespaceLoader.__new__(_NamespaceLoader)
loader._path = spec.submodule_search_locations
try:
module.__loader__ = loader
except AttributeError:
pass
# __package__
if _override or getattr(module, '__package__', None) is None:
try:
module.__package__ = spec.parent
except AttributeError:
pass
# __spec__
try:
module.__spec__ = spec
except AttributeError:
pass
# __path__
if _override or getattr(module, '__path__', None) is None:
if spec.submodule_search_locations is not None:
try:
module.__path__ = spec.submodule_search_locations
except AttributeError:
pass
if spec.has_location:
# __file__
if _override or getattr(module, '__file__', None) is None:
try:
module.__file__ = spec.origin
except AttributeError:
pass
# __cached__
if _override or getattr(module, '__cached__', None) is None:
if spec.cached is not None:
try:
module.__cached__ = spec.cached
except AttributeError:
pass
def create(self):
"""Return a new module to be loaded.
The import-related module attributes are also set with the
appropriate values from the spec.
"""
spec = self.spec
# Typically loaders will not implement create_module().
if hasattr(spec.loader, 'create_module'):
# If create_module() returns `None` it means the default
# module creation should be used.
module = spec.loader.create_module(spec)
else:
module = None
if module is None:
# This must be done before open() is ever called as the 'io'
# module implicitly imports 'locale' and would otherwise
# trigger an infinite loop.
module = _new_module(spec.name)
self.init_module_attrs(module)
return module
def _exec(self, module):
"""Do everything necessary to execute the module.
The namespace of `module` is used as the target of execution.
This method uses the loader's `exec_module()` method.
"""
self.spec.loader.exec_module(module)
# Used by importlib.reload() and _load_module_shim().
def exec(self, module):
"""Execute the spec in an existing module's namespace."""
name = self.spec.name
_imp.acquire_lock()
with _ModuleLockManager(name):
if sys.modules.get(name) is not module:
msg = 'module {!r} not in sys.modules'.format(name)
raise ImportError(msg, name=name)
if self.spec.loader is None:
if self.spec.submodule_search_locations is None:
raise ImportError('missing loader', name=self.spec.name)
# namespace package
self.init_module_attrs(module, _override=True)
return module
self.init_module_attrs(module, _override=True)
if not hasattr(self.spec.loader, 'exec_module'):
# (issue19713) Once BuiltinImporter and ExtensionFileLoader
# have exec_module() implemented, we can add a deprecation
# warning here.
self.spec.loader.load_module(name)
else:
self._exec(module)
return sys.modules[name]
def _load_backward_compatible(self):
# (issue19713) Once BuiltinImporter and ExtensionFileLoader
# have exec_module() implemented, we can add a deprecation
# warning here.
spec = self.spec
spec.loader.load_module(spec.name)
# The module must be in sys.modules at this point!
module = sys.modules[spec.name]
if getattr(module, '__loader__', None) is None:
try:
module.__loader__ = spec.loader
except AttributeError:
pass
if getattr(module, '__package__', None) is None:
try:
# Since module.__path__ may not line up with
# spec.submodule_search_paths, we can't necessarily rely
# on spec.parent here.
module.__package__ = module.__name__
if not hasattr(module, '__path__'):
module.__package__ = spec.name.rpartition('.')[0]
except AttributeError:
pass
if getattr(module, '__spec__', None) is None:
try:
module.__spec__ = spec
except AttributeError:
pass
return module
def _load_unlocked(self):
# A helper for direct use by the import system.
if self.spec.loader is not None:
# not a namespace package
if not hasattr(self.spec.loader, 'exec_module'):
return self._load_backward_compatible()
module = self.create()
with _installed_safely(module):
if self.spec.loader is None:
if self.spec.submodule_search_locations is None:
raise ImportError('missing loader', name=self.spec.name)
# A namespace package so do nothing.
else:
self._exec(module)
# We don't ensure that the import-related module attributes get
# set in the sys.modules replacement case. Such modules are on
# their own.
return sys.modules[self.spec.name]
# A method used during testing of _load_unlocked() and by
# _load_module_shim().
def load(self):
"""Return a new module object, loaded by the spec's loader.
The module is not added to its parent.
If a module is already in sys.modules, that existing module gets
clobbered.
"""
_imp.acquire_lock()
with _ModuleLockManager(self.spec.name):
return self._load_unlocked()
def _fix_up_module(ns, name, pathname, cpathname=None):
# This function is used by PyImport_ExecCodeModuleObject().
loader = ns.get('__loader__')
spec = ns.get('__spec__')
if not loader:
if spec:
loader = spec.loader
elif pathname == cpathname:
loader = SourcelessFileLoader(name, pathname)
else:
loader = SourceFileLoader(name, pathname)
if not spec:
spec = spec_from_file_location(name, pathname, loader=loader)
try:
ns['__spec__'] = spec
ns['__loader__'] = loader
ns['__file__'] = pathname
ns['__cached__'] = cpathname
except Exception:
# Not important enough to report.
pass
# Loaders #####################################################################
class BuiltinImporter:
"""Meta path import for built-in modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
@staticmethod
def module_repr(module):
"""Return repr for the module.
The method is deprecated. The import machinery does the job itself.
"""
return '<module {!r} (built-in)>'.format(module.__name__)
@classmethod
def find_spec(cls, fullname, path=None, target=None):
if path is not None:
return None
if _imp.is_builtin(fullname):
return spec_from_loader(fullname, cls, origin='built-in')
else:
return None
@classmethod
def find_module(cls, fullname, path=None):
"""Find the built-in module.
If 'path' is ever specified then the search is considered a failure.
This method is deprecated. Use find_spec() instead.
"""
spec = cls.find_spec(fullname, path)
return spec.loader if spec is not None else None
@classmethod
@_requires_builtin
def load_module(cls, fullname):
"""Load a built-in module."""
# Once an exec_module() implementation is added we can also
# add a deprecation warning here.
with _ManageReload(fullname):
module = _call_with_frames_removed(_imp.init_builtin, fullname)
module.__loader__ = cls
module.__package__ = ''
return module
@classmethod
@_requires_builtin
def get_code(cls, fullname):
"""Return None as built-in modules do not have code objects."""
return None
@classmethod
@_requires_builtin
def get_source(cls, fullname):
"""Return None as built-in modules do not have source code."""
return None
@classmethod
@_requires_builtin
def is_package(cls, fullname):
"""Return False as built-in modules are never packages."""
return False
class FrozenImporter:
"""Meta path import for frozen modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
@staticmethod
def module_repr(m):
"""Return repr for the module.
The method is deprecated. The import machinery does the job itself.
"""
return '<module {!r} (frozen)>'.format(m.__name__)
@classmethod
def find_spec(cls, fullname, path=None, target=None):
if _imp.is_frozen(fullname):
return spec_from_loader(fullname, cls, origin='frozen')
else:
return None
@classmethod
def find_module(cls, fullname, path=None):
"""Find a frozen module.
This method is deprecated. Use find_spec() instead.
"""
return cls if _imp.is_frozen(fullname) else None
@staticmethod
def exec_module(module):
name = module.__spec__.name
if not _imp.is_frozen(name):
raise ImportError('{!r} is not a frozen module'.format(name),
name=name)
code = _call_with_frames_removed(_imp.get_frozen_object, name)
exec(code, module.__dict__)
@classmethod
def load_module(cls, fullname):
"""Load a frozen module.
This method is deprecated. Use exec_module() instead.
"""
return _load_module_shim(cls, fullname)
@classmethod
@_requires_frozen
def get_code(cls, fullname):
"""Return the code object for the frozen module."""
return _imp.get_frozen_object(fullname)
@classmethod
@_requires_frozen
def get_source(cls, fullname):
"""Return None as frozen modules do not have source code."""
return None
@classmethod
@_requires_frozen
def is_package(cls, fullname):
"""Return True if the frozen module is a package."""
return _imp.is_frozen_package(fullname)
class WindowsRegistryFinder:
"""Meta path finder for modules declared in the Windows registry."""
REGISTRY_KEY = (
'Software\\Python\\PythonCore\\{sys_version}'
'\\Modules\\{fullname}')
REGISTRY_KEY_DEBUG = (
'Software\\Python\\PythonCore\\{sys_version}'
'\\Modules\\{fullname}\\Debug')
DEBUG_BUILD = False # Changed in _setup()
@classmethod
def _open_registry(cls, key):
try:
return _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, key)
except OSError:
return _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, key)
@classmethod
def _search_registry(cls, fullname):
if cls.DEBUG_BUILD:
registry_key = cls.REGISTRY_KEY_DEBUG
else:
registry_key = cls.REGISTRY_KEY
key = registry_key.format(fullname=fullname,
sys_version=sys.version[:3])
try:
with cls._open_registry(key) as hkey:
filepath = _winreg.QueryValue(hkey, '')
except OSError:
return None
return filepath
@classmethod
def find_spec(cls, fullname, path=None, target=None):
filepath = cls._search_registry(fullname)
if filepath is None:
return None
try:
_path_stat(filepath)
except OSError:
return None
for loader, suffixes in _get_supported_file_loaders():
if filepath.endswith(tuple(suffixes)):
spec = spec_from_loader(fullname, loader(fullname, filepath),
origin=filepath)
return spec
@classmethod
def find_module(cls, fullname, path=None):
"""Find module named in the registry.
This method is deprecated. Use exec_module() instead.
"""
spec = cls.find_spec(fullname, path)
if spec is not None:
return spec.loader
else:
return None
class _LoaderBasics:
"""Base class of common code needed by both SourceLoader and
SourcelessFileLoader."""
def is_package(self, fullname):
"""Concrete implementation of InspectLoader.is_package by checking if
the path returned by get_filename has a filename of '__init__.py'."""
filename = _path_split(self.get_filename(fullname))[1]
filename_base = filename.rsplit('.', 1)[0]
tail_name = fullname.rpartition('.')[2]
return filename_base == '__init__' and tail_name != '__init__'
def exec_module(self, module):
"""Execute the module."""
code = self.get_code(module.__name__)
if code is None:
raise ImportError('cannot load module {!r} when get_code() '
'returns None'.format(module.__name__))
_call_with_frames_removed(exec, code, module.__dict__)
load_module = _load_module_shim
class SourceLoader(_LoaderBasics):
def path_mtime(self, path):
"""Optional method that returns the modification time (an int) for the
specified path, where path is a str.
Raises IOError when the path cannot be handled.
"""
raise IOError
def path_stats(self, path):
"""Optional method returning a metadata dict for the specified path
to by the path (str).
Possible keys:
- 'mtime' (mandatory) is the numeric timestamp of last source
code modification;
- 'size' (optional) is the size in bytes of the source code.
Implementing this method allows the loader to read bytecode files.
Raises IOError when the path cannot be handled.
"""
return {'mtime': self.path_mtime(path)}
def _cache_bytecode(self, source_path, cache_path, data):
"""Optional method which writes data (bytes) to a file path (a str).
Implementing this method allows for the writing of bytecode files.
The source path is needed in order to correctly transfer permissions
"""
# For backwards compatibility, we delegate to set_data()
return self.set_data(cache_path, data)
def set_data(self, path, data):
"""Optional method which writes data (bytes) to a file path (a str).
Implementing this method allows for the writing of bytecode files.
"""
def get_source(self, fullname):
"""Concrete implementation of InspectLoader.get_source."""
path = self.get_filename(fullname)
try:
source_bytes = self.get_data(path)
except OSError as exc:
raise ImportError('source not available through get_data()',
name=fullname) from exc
return decode_source(source_bytes)
def source_to_code(self, data, path, *, _optimize=-1):
"""Return the code object compiled from source.
The 'data' argument can be any object type that compile() supports.
"""
return _call_with_frames_removed(compile, data, path, 'exec',
dont_inherit=True, optimize=_optimize)
def get_code(self, fullname):
"""Concrete implementation of InspectLoader.get_code.
Reading of bytecode requires path_stats to be implemented. To write
bytecode, set_data must also be implemented.
"""
source_path = self.get_filename(fullname)
source_mtime = None
try:
bytecode_path = cache_from_source(source_path)
except NotImplementedError:
bytecode_path = None
else:
try:
st = self.path_stats(source_path)
except IOError:
pass
else:
source_mtime = int(st['mtime'])
try:
data = self.get_data(bytecode_path)
except OSError:
pass
else:
try:
bytes_data = _validate_bytecode_header(data,
source_stats=st, name=fullname,
path=bytecode_path)
except (ImportError, EOFError):
pass
else:
_verbose_message('{} matches {}', bytecode_path,
source_path)
return _compile_bytecode(bytes_data, name=fullname,
bytecode_path=bytecode_path,
source_path=source_path)
source_bytes = self.get_data(source_path)
code_object = self.source_to_code(source_bytes, source_path)
_verbose_message('code object from {}', source_path)
if (not sys.dont_write_bytecode and bytecode_path is not None and
source_mtime is not None):
data = _code_to_bytecode(code_object, source_mtime,
len(source_bytes))
try:
self._cache_bytecode(source_path, bytecode_path, data)
_verbose_message('wrote {!r}', bytecode_path)
except NotImplementedError:
pass
return code_object
class FileLoader:
"""Base file loader class which implements the loader protocol methods that
require file system usage."""
def __init__(self, fullname, path):
"""Cache the module name and the path to the file found by the
finder."""
self.name = fullname
self.path = path
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self.__dict__ == other.__dict__)
def __hash__(self):
return hash(self.name) ^ hash(self.path)
@_check_name
def load_module(self, fullname):
"""Load a module from a file.
This method is deprecated. Use exec_module() instead.
"""
# The only reason for this method is for the name check.
# Issue #14857: Avoid the zero-argument form of super so the implementation
# of that form can be updated without breaking the frozen module
return super(FileLoader, self).load_module(fullname)
@_check_name
def get_filename(self, fullname):
"""Return the path to the source file as found by the finder."""
return self.path
def get_data(self, path):
"""Return the data from path as raw bytes."""
with _io.FileIO(path, 'r') as file:
return file.read()
class SourceFileLoader(FileLoader, SourceLoader):
"""Concrete implementation of SourceLoader using the file system."""
def path_stats(self, path):
"""Return the metadata for the path."""
st = _path_stat(path)
return {'mtime': st.st_mtime, 'size': st.st_size}
def _cache_bytecode(self, source_path, bytecode_path, data):
# Adapt between the two APIs
mode = _calc_mode(source_path)
return self.set_data(bytecode_path, data, _mode=mode)
def set_data(self, path, data, *, _mode=0o666):
"""Write bytes data to a file."""
parent, filename = _path_split(path)
path_parts = []
# Figure out what directories are missing.
while parent and not _path_isdir(parent):
parent, part = _path_split(parent)
path_parts.append(part)
# Create needed directories.
for part in reversed(path_parts):
parent = _path_join(parent, part)
try:
_os.mkdir(parent)
except FileExistsError:
# Probably another Python process already created the dir.
continue
except OSError as exc:
# Could be a permission error, read-only filesystem: just forget
# about writing the data.
_verbose_message('could not create {!r}: {!r}', parent, exc)
return
try:
_write_atomic(path, data, _mode)
_verbose_message('created {!r}', path)
except OSError as exc:
# Same as above: just don't write the bytecode.
_verbose_message('could not create {!r}: {!r}', path, exc)
class SourcelessFileLoader(FileLoader, _LoaderBasics):
"""Loader which handles sourceless file imports."""
def get_code(self, fullname):
path = self.get_filename(fullname)
data = self.get_data(path)
bytes_data = _validate_bytecode_header(data, name=fullname, path=path)
return _compile_bytecode(bytes_data, name=fullname, bytecode_path=path)
def get_source(self, fullname):
"""Return None as there is no source code."""
return None
# Filled in by _setup().
EXTENSION_SUFFIXES = []
class ExtensionFileLoader:
"""Loader for extension modules.
The constructor is designed to work with FileFinder.
"""
def __init__(self, name, path):
self.name = name
self.path = path
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self.__dict__ == other.__dict__)
def __hash__(self):
return hash(self.name) ^ hash(self.path)
@_check_name
def load_module(self, fullname):
"""Load an extension module."""
# Once an exec_module() implementation is added we can also
# add a deprecation warning here.
with _ManageReload(fullname):
module = _call_with_frames_removed(_imp.load_dynamic,
fullname, self.path)
_verbose_message('extension module loaded from {!r}', self.path)
is_package = self.is_package(fullname)
if is_package and not hasattr(module, '__path__'):
module.__path__ = [_path_split(self.path)[0]]
module.__loader__ = self
module.__package__ = module.__name__
if not is_package:
module.__package__ = module.__package__.rpartition('.')[0]
return module
def is_package(self, fullname):
"""Return True if the extension module is a package."""
file_name = _path_split(self.path)[1]
return any(file_name == '__init__' + suffix
for suffix in EXTENSION_SUFFIXES)
def get_code(self, fullname):
"""Return None as an extension module cannot create a code object."""
return None
def get_source(self, fullname):
"""Return None as extension modules have no source code."""
return None
@_check_name
def get_filename(self, fullname):
"""Return the path to the source file as found by the finder."""
return self.path
class _NamespacePath:
"""Represents a namespace package's path. It uses the module name
to find its parent module, and from there it looks up the parent's
__path__. When this changes, the module's own path is recomputed,
using path_finder. For top-level modules, the parent module's path
is sys.path."""
def __init__(self, name, path, path_finder):
self._name = name
self._path = path
self._last_parent_path = tuple(self._get_parent_path())
self._path_finder = path_finder
def _find_parent_path_names(self):
"""Returns a tuple of (parent-module-name, parent-path-attr-name)"""
parent, dot, me = self._name.rpartition('.')
if dot == '':
# This is a top-level module. sys.path contains the parent path.
return 'sys', 'path'
# Not a top-level module. parent-module.__path__ contains the
# parent path.
return parent, '__path__'
def _get_parent_path(self):
parent_module_name, path_attr_name = self._find_parent_path_names()
return getattr(sys.modules[parent_module_name], path_attr_name)
def _recalculate(self):
# If the parent's path has changed, recalculate _path
parent_path = tuple(self._get_parent_path()) # Make a copy
if parent_path != self._last_parent_path:
spec = self._path_finder(self._name, parent_path)
# Note that no changes are made if a loader is returned, but we
# do remember the new parent path
if spec is not None and spec.loader is None:
if spec.submodule_search_locations:
self._path = spec.submodule_search_locations
self._last_parent_path = parent_path # Save the copy
return self._path
def __iter__(self):
return iter(self._recalculate())
def __len__(self):
return len(self._recalculate())
def __repr__(self):
return '_NamespacePath({!r})'.format(self._path)
def __contains__(self, item):
return item in self._recalculate()
def append(self, item):
self._path.append(item)
# We use this exclusively in init_module_attrs() for backward-compatibility.
class _NamespaceLoader:
def __init__(self, name, path, path_finder):
self._path = _NamespacePath(name, path, path_finder)
@classmethod
def module_repr(cls, module):
"""Return repr for the module.
The method is deprecated. The import machinery does the job itself.
"""
return '<module {!r} (namespace)>'.format(module.__name__)
def is_package(self, fullname):
return True
def get_source(self, fullname):
return ''
def get_code(self, fullname):
return compile('', '<string>', 'exec', dont_inherit=True)
def exec_module(self, module):
pass
def load_module(self, fullname):
"""Load a namespace module.
This method is deprecated. Use exec_module() instead.
"""
# The import system never calls this method.
_verbose_message('namespace module loaded with path {!r}', self._path)
return _load_module_shim(self, fullname)
# Finders #####################################################################
class PathFinder:
"""Meta path finder for sys.path and package __path__ attributes."""
@classmethod
def invalidate_caches(cls):
"""Call the invalidate_caches() method on all path entry finders
stored in sys.path_importer_caches (where implemented)."""
for finder in sys.path_importer_cache.values():
if hasattr(finder, 'invalidate_caches'):
finder.invalidate_caches()
@classmethod
def _path_hooks(cls, path):
"""Search sequence of hooks for a finder for 'path'.
If 'hooks' is false then use sys.path_hooks.
"""
if not sys.path_hooks:
_warnings.warn('sys.path_hooks is empty', ImportWarning)
for hook in sys.path_hooks:
try:
return hook(path)
except ImportError:
continue
else:
return None
@classmethod
def _path_importer_cache(cls, path):
"""Get the finder for the path entry from sys.path_importer_cache.
If the path entry is not in the cache, find the appropriate finder
and cache it. If no finder is available, store None.
"""
if path == '':
path = _os.getcwd()
try:
finder = sys.path_importer_cache[path]
except KeyError:
finder = cls._path_hooks(path)
sys.path_importer_cache[path] = finder
return finder
@classmethod
def _legacy_get_spec(cls, fullname, finder):
# This would be a good place for a DeprecationWarning if
# we ended up going that route.
if hasattr(finder, 'find_loader'):
loader, portions = finder.find_loader(fullname)
else:
loader = finder.find_module(fullname)
portions = []
if loader is not None:
return spec_from_loader(fullname, loader)
spec = ModuleSpec(fullname, None)
spec.submodule_search_locations = portions
return spec
@classmethod
def _get_spec(cls, fullname, path, target=None):
"""Find the loader or namespace_path for this module/package name."""
# If this ends up being a namespace package, namespace_path is
# the list of paths that will become its __path__
namespace_path = []
for entry in path:
if not isinstance(entry, (str, bytes)):
continue
finder = cls._path_importer_cache(entry)
if finder is not None:
if hasattr(finder, 'find_spec'):
spec = finder.find_spec(fullname, target)
else:
spec = cls._legacy_get_spec(fullname, finder)
if spec is None:
continue
if spec.loader is not None:
return spec
portions = spec.submodule_search_locations
if portions is None:
raise ImportError('spec missing loader')
# This is possibly part of a namespace package.
# Remember these path entries (if any) for when we
# create a namespace package, and continue iterating
# on path.
namespace_path.extend(portions)
else:
spec = ModuleSpec(fullname, None)
spec.submodule_search_locations = namespace_path
return spec
@classmethod
def find_spec(cls, fullname, path=None, target=None):
"""find the module on sys.path or 'path' based on sys.path_hooks and
sys.path_importer_cache."""
if path is None:
path = sys.path
spec = cls._get_spec(fullname, path, target)
if spec is None:
return None
elif spec.loader is None:
namespace_path = spec.submodule_search_locations
if namespace_path:
# We found at least one namespace path. Return a
# spec which can create the namespace package.
spec.origin = 'namespace'
spec.submodule_search_locations = _NamespacePath(fullname, namespace_path, cls._get_spec)
return spec
else:
return None
else:
return spec
@classmethod
def find_module(cls, fullname, path=None):
"""find the module on sys.path or 'path' based on sys.path_hooks and
sys.path_importer_cache.
This method is deprecated. Use find_spec() instead.
"""
spec = cls.find_spec(fullname, path)
if spec is None:
return None
return spec.loader
class FileFinder:
"""File-based finder.
Interactions with the file system are cached for performance, being
refreshed when the directory the finder is handling has been modified.
"""
def __init__(self, path, *loader_details):
"""Initialize with the path to search on and a variable number of
2-tuples containing the loader and the file suffixes the loader
recognizes."""
loaders = []
for loader, suffixes in loader_details:
loaders.extend((suffix, loader) for suffix in suffixes)
self._loaders = loaders
# Base (directory) path
self.path = path or '.'
self._path_mtime = -1
self._path_cache = set()
self._relaxed_path_cache = set()
def invalidate_caches(self):
"""Invalidate the directory mtime."""
self._path_mtime = -1
find_module = _find_module_shim
def find_loader(self, fullname):
"""Try to find a loader for the specified module, or the namespace
package portions. Returns (loader, list-of-portions).
This method is deprecated. Use find_spec() instead.
"""
spec = self.find_spec(fullname)
if spec is None:
return None, []
return spec.loader, spec.submodule_search_locations or []
def _get_spec(self, loader_class, fullname, path, smsl, target):
loader = loader_class(fullname, path)
return spec_from_file_location(fullname, path, loader=loader,
submodule_search_locations=smsl)
def find_spec(self, fullname, target=None):
"""Try to find a loader for the specified module, or the namespace
package portions. Returns (loader, list-of-portions)."""
is_namespace = False
tail_module = fullname.rpartition('.')[2]
try:
mtime = _path_stat(self.path or _os.getcwd()).st_mtime
except OSError:
mtime = -1
if mtime != self._path_mtime:
self._fill_cache()
self._path_mtime = mtime
# tail_module keeps the original casing, for __file__ and friends
if _relax_case():
cache = self._relaxed_path_cache
cache_module = tail_module.lower()
else:
cache = self._path_cache
cache_module = tail_module
# Check if the module is the name of a directory (and thus a package).
if cache_module in cache:
base_path = _path_join(self.path, tail_module)
for suffix, loader_class in self._loaders:
init_filename = '__init__' + suffix
full_path = _path_join(base_path, init_filename)
if _path_isfile(full_path):
return self._get_spec(loader_class, fullname, full_path, [base_path], target)
else:
# If a namespace package, return the path if we don't
# find a module in the next section.
is_namespace = _path_isdir(base_path)
# Check for a file w/ a proper suffix exists.
for suffix, loader_class in self._loaders:
full_path = _path_join(self.path, tail_module + suffix)
_verbose_message('trying {}'.format(full_path), verbosity=2)
if cache_module + suffix in cache:
if _path_isfile(full_path):
return self._get_spec(loader_class, fullname, full_path, None, target)
if is_namespace:
_verbose_message('possible namespace for {}'.format(base_path))
spec = ModuleSpec(fullname, None)
spec.submodule_search_locations = [base_path]
return spec
return None
def _fill_cache(self):
"""Fill the cache of potential modules and packages for this directory."""
path = self.path
try:
contents = _os.listdir(path or _os.getcwd())
except (FileNotFoundError, PermissionError, NotADirectoryError):
# Directory has either been removed, turned into a file, or made
# unreadable.
contents = []
# We store two cached versions, to handle runtime changes of the
# PYTHONCASEOK environment variable.
if not sys.platform.startswith('win'):
self._path_cache = set(contents)
else:
# Windows users can import modules with case-insensitive file
# suffixes (for legacy reasons). Make the suffix lowercase here
# so it's done once instead of for every import. This is safe as
# the specified suffixes to check against are always specified in a
# case-sensitive manner.
lower_suffix_contents = set()
for item in contents:
name, dot, suffix = item.partition('.')
if dot:
new_name = '{}.{}'.format(name, suffix.lower())
else:
new_name = name
lower_suffix_contents.add(new_name)
self._path_cache = lower_suffix_contents
if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS):
self._relaxed_path_cache = {fn.lower() for fn in contents}
@classmethod
def path_hook(cls, *loader_details):
"""A class method which returns a closure to use on sys.path_hook
which will return an instance using the specified loaders and the path
called on the closure.
If the path called on the closure is not a directory, ImportError is
raised.
"""
def path_hook_for_FileFinder(path):
"""Path hook for importlib.machinery.FileFinder."""
if not _path_isdir(path):
raise ImportError('only directories are supported', path=path)
return cls(path, *loader_details)
return path_hook_for_FileFinder
def __repr__(self):
return 'FileFinder({!r})'.format(self.path)
# Import itself ###############################################################
class _ImportLockContext:
"""Context manager for the import lock."""
def __enter__(self):
"""Acquire the import lock."""
_imp.acquire_lock()
def __exit__(self, exc_type, exc_value, exc_traceback):
"""Release the import lock regardless of any raised exceptions."""
_imp.release_lock()
def _resolve_name(name, package, level):
"""Resolve a relative module name to an absolute one."""
bits = package.rsplit('.', level - 1)
if len(bits) < level:
raise ValueError('attempted relative import beyond top-level package')
base = bits[0]
return '{}.{}'.format(base, name) if name else base
def _find_spec_legacy(finder, name, path):
# This would be a good place for a DeprecationWarning if
# we ended up going that route.
loader = finder.find_module(name, path)
if loader is None:
return None
return spec_from_loader(name, loader)
def _find_spec(name, path, target=None):
"""Find a module's loader."""
if not sys.meta_path:
_warnings.warn('sys.meta_path is empty', ImportWarning)
# We check sys.modules here for the reload case. While a passed-in
# target will usually indicate a reload there is no guarantee, whereas
# sys.modules provides one.
is_reload = name in sys.modules
for finder in sys.meta_path:
with _ImportLockContext():
try:
find_spec = finder.find_spec
except AttributeError:
spec = _find_spec_legacy(finder, name, path)
if spec is None:
continue
else:
spec = find_spec(name, path, target)
if spec is not None:
# The parent import may have already imported this module.
if not is_reload and name in sys.modules:
module = sys.modules[name]
try:
__spec__ = module.__spec__
except AttributeError:
# We use the found spec since that is the one that
# we would have used if the parent module hadn't
# beaten us to the punch.
return spec
else:
if __spec__ is None:
return spec
else:
return __spec__
else:
return spec
else:
return None
def _sanity_check(name, package, level):
"""Verify arguments are "sane"."""
if not isinstance(name, str):
raise TypeError('module name must be str, not {}'.format(type(name)))
if level < 0:
raise ValueError('level must be >= 0')
if package:
if not isinstance(package, str):
raise TypeError('__package__ not set to a string')
elif package not in sys.modules:
msg = ('Parent module {!r} not loaded, cannot perform relative '
'import')
raise SystemError(msg.format(package))
if not name and level == 0:
raise ValueError('Empty module name')
_ERR_MSG_PREFIX = 'No module named '
_ERR_MSG = _ERR_MSG_PREFIX + '{!r}'
def _find_and_load_unlocked(name, import_):
path = None
parent = name.rpartition('.')[0]
if parent:
if parent not in sys.modules:
_call_with_frames_removed(import_, parent)
# Crazy side-effects!
if name in sys.modules:
return sys.modules[name]
parent_module = sys.modules[parent]
try:
path = parent_module.__path__
except AttributeError:
msg = (_ERR_MSG + '; {!r} is not a package').format(name, parent)
raise ImportError(msg, name=name)
spec = _find_spec(name, path)
if spec is None:
raise ImportError(_ERR_MSG.format(name), name=name)
else:
module = _SpecMethods(spec)._load_unlocked()
if parent:
# Set the module as an attribute on its parent.
parent_module = sys.modules[parent]
setattr(parent_module, name.rpartition('.')[2], module)
return module
def _find_and_load(name, import_):
"""Find and load the module, and release the import lock."""
with _ModuleLockManager(name):
return _find_and_load_unlocked(name, import_)
def _gcd_import(name, package=None, level=0):
"""Import and return the module based on its name, the package the call is
being made from, and the level adjustment.
This function represents the greatest common denominator of functionality
between import_module and __import__. This includes setting __package__ if
the loader did not.
"""
_sanity_check(name, package, level)
if level > 0:
name = _resolve_name(name, package, level)
_imp.acquire_lock()
if name not in sys.modules:
return _find_and_load(name, _gcd_import)
module = sys.modules[name]
if module is None:
_imp.release_lock()
message = ('import of {} halted; '
'None in sys.modules'.format(name))
raise ImportError(message, name=name)
_lock_unlock_module(name)
return module
def _handle_fromlist(module, fromlist, import_):
"""Figure out what __import__ should return.
The import_ parameter is a callable which takes the name of module to
import. It is required to decouple the function from assuming importlib's
import implementation is desired.
"""
# The hell that is fromlist ...
# If a package was imported, try to import stuff from fromlist.
if hasattr(module, '__path__'):
if '*' in fromlist:
fromlist = list(fromlist)
fromlist.remove('*')
if hasattr(module, '__all__'):
fromlist.extend(module.__all__)
for x in fromlist:
if not hasattr(module, x):
from_name = '{}.{}'.format(module.__name__, x)
try:
_call_with_frames_removed(import_, from_name)
except ImportError as exc:
# Backwards-compatibility dictates we ignore failed
# imports triggered by fromlist for modules that don't
# exist.
if str(exc).startswith(_ERR_MSG_PREFIX):
if exc.name == from_name:
continue
raise
return module
def _calc___package__(globals):
"""Calculate what __package__ should be.
__package__ is not guaranteed to be defined or could be set to None
to represent that its proper value is unknown.
"""
package = globals.get('__package__')
if package is None:
package = globals['__name__']
if '__path__' not in globals:
package = package.rpartition('.')[0]
return package
def _get_supported_file_loaders():
"""Returns a list of file-based module loaders.
Each item is a tuple (loader, suffixes).
"""
extensions = ExtensionFileLoader, _imp.extension_suffixes()
source = SourceFileLoader, SOURCE_SUFFIXES
bytecode = SourcelessFileLoader, BYTECODE_SUFFIXES
return [extensions, source, bytecode]
def __import__(name, globals=None, locals=None, fromlist=(), level=0):
"""Import a module.
The 'globals' argument is used to infer where the import is occuring from
to handle relative imports. The 'locals' argument is ignored. The
'fromlist' argument specifies what should exist as attributes on the module
being imported (e.g. ``from module import <fromlist>``). The 'level'
argument represents the package location to import from in a relative
import (e.g. ``from ..pkg import mod`` would have a 'level' of 2).
"""
if level == 0:
module = _gcd_import(name)
else:
globals_ = globals if globals is not None else {}
package = _calc___package__(globals_)
module = _gcd_import(name, package, level)
if not fromlist:
# Return up to the first dot in 'name'. This is complicated by the fact
# that 'name' may be relative.
if level == 0:
return _gcd_import(name.partition('.')[0])
elif not name:
return module
else:
# Figure out where to slice the module's name up to the first dot
# in 'name'.
cut_off = len(name) - len(name.partition('.')[0])
# Slice end needs to be positive to alleviate need to special-case
# when ``'.' not in name``.
return sys.modules[module.__name__[:len(module.__name__)-cut_off]]
else:
return _handle_fromlist(module, fromlist, _gcd_import)
def _builtin_from_name(name):
spec = BuiltinImporter.find_spec(name)
if spec is None:
raise ImportError('no built-in module named ' + name)
methods = _SpecMethods(spec)
return methods._load_unlocked()
def _setup(sys_module, _imp_module):
"""Setup importlib by importing needed built-in modules and injecting them
into the global namespace.
As sys is needed for sys.modules access and _imp is needed to load built-in
modules, those two modules must be explicitly passed in.
"""
global _imp, sys, BYTECODE_SUFFIXES
_imp = _imp_module
sys = sys_module
if sys.flags.optimize:
BYTECODE_SUFFIXES = OPTIMIZED_BYTECODE_SUFFIXES
else:
BYTECODE_SUFFIXES = DEBUG_BYTECODE_SUFFIXES
# Set up the spec for existing builtin/frozen modules.
module_type = type(sys)
for name, module in sys.modules.items():
if isinstance(module, module_type):
if name in sys.builtin_module_names:
loader = BuiltinImporter
elif _imp.is_frozen(name):
loader = FrozenImporter
else:
continue
spec = _spec_from_module(module, loader)
methods = _SpecMethods(spec)
methods.init_module_attrs(module)
# Directly load built-in modules needed during bootstrap.
self_module = sys.modules[__name__]
for builtin_name in ('_io', '_warnings', 'builtins', 'marshal'):
if builtin_name not in sys.modules:
builtin_module = _builtin_from_name(builtin_name)
else:
builtin_module = sys.modules[builtin_name]
setattr(self_module, builtin_name, builtin_module)
# Directly load the os module (needed during bootstrap).
os_details = ('posix', ['/']), ('nt', ['\\', '/'])
for builtin_os, path_separators in os_details:
# Assumption made in _path_join()
assert all(len(sep) == 1 for sep in path_separators)
path_sep = path_separators[0]
if builtin_os in sys.modules:
os_module = sys.modules[builtin_os]
break
else:
try:
os_module = _builtin_from_name(builtin_os)
break
except ImportError:
continue
else:
raise ImportError('importlib requires posix or nt')
setattr(self_module, '_os', os_module)
setattr(self_module, 'path_sep', path_sep)
setattr(self_module, 'path_separators', ''.join(path_separators))
# Directly load the _thread module (needed during bootstrap).
try:
thread_module = _builtin_from_name('_thread')
except ImportError:
# Python was built without threads
thread_module = None
setattr(self_module, '_thread', thread_module)
# Directly load the _weakref module (needed during bootstrap).
weakref_module = _builtin_from_name('_weakref')
setattr(self_module, '_weakref', weakref_module)
# Directly load the winreg module (needed during bootstrap).
if builtin_os == 'nt':
winreg_module = _builtin_from_name('winreg')
setattr(self_module, '_winreg', winreg_module)
# Constants
setattr(self_module, '_relax_case', _make_relax_case())
EXTENSION_SUFFIXES.extend(_imp.extension_suffixes())
if builtin_os == 'nt':
SOURCE_SUFFIXES.append('.pyw')
if '_d.pyd' in EXTENSION_SUFFIXES:
WindowsRegistryFinder.DEBUG_BUILD = True
def _install(sys_module, _imp_module):
"""Install importlib as the implementation of import."""
_setup(sys_module, _imp_module)
supported_loaders = _get_supported_file_loaders()
sys.path_hooks.extend([FileFinder.path_hook(*supported_loaders)])
sys.meta_path.append(BuiltinImporter)
sys.meta_path.append(FrozenImporter)
if _os.__name__ == 'nt':
sys.meta_path.append(WindowsRegistryFinder)
sys.meta_path.append(PathFinder)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
{
"content_hash": "91721326a6d9f4e80f7fc1874a4805a0",
"timestamp": "",
"source": "github",
"line_count": 7389,
"max_line_length": 105,
"avg_line_length": 35.25768033563405,
"alnum_prop": 0.5904943593365551,
"repo_name": "ArcherSys/ArcherSys",
"id": "76326a04e6e1140907d21bee32ea83d18f533cf9",
"size": "260519",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Lib/importlib/_bootstrap.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
from django.conf.urls import patterns, url
from .views import PostListView, PostDetailView
urlpatterns = patterns('',
url(r'(?P<pk>\d+)/$', PostDetailView.as_view(), name='detail'),
url(r'$', PostListView.as_view(), name='list'),
)
|
{
"content_hash": "9870ae756c278a22c1ed591df9ebee47",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 67,
"avg_line_length": 30.25,
"alnum_prop": 0.6735537190082644,
"repo_name": "mpachas/django-embed-video",
"id": "122e7934f3c9cd786142f23659aa8eb9a2ab2be9",
"size": "242",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "example_project/posts/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2300"
},
{
"name": "Python",
"bytes": "57466"
}
],
"symlink_target": ""
}
|
from google.cloud import securitycenter_v1beta1
def sample_group_findings():
# Create a client
client = securitycenter_v1beta1.SecurityCenterClient()
# Initialize request argument(s)
request = securitycenter_v1beta1.GroupFindingsRequest(
parent="parent_value",
group_by="group_by_value",
)
# Make the request
page_result = client.group_findings(request=request)
# Handle the response
for response in page_result:
print(response)
# [END securitycenter_v1beta1_generated_SecurityCenter_GroupFindings_sync]
|
{
"content_hash": "d5048c8b88dd51c8d857f1d9128517d8",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 74,
"avg_line_length": 27.142857142857142,
"alnum_prop": 0.7157894736842105,
"repo_name": "googleapis/python-securitycenter",
"id": "ed1728b456c9091fa4ddc77e954d7140813a3663",
"size": "1975",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/securitycenter_v1beta1_generated_security_center_group_findings_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "2740673"
},
{
"name": "Shell",
"bytes": "30684"
}
],
"symlink_target": ""
}
|
"""
Descargar y recorrer http://www.teoruiz.com/lemario/lemario-20101017.txt.gz
Comprobar si una palabra está o no en el diccionario.
Dada una palabra devolver una lista con todos los anagramas de esa palabra que pertenezcan al castellano.
"""
from Ejercicios.PrimTrim.Ejercicio47 import lista_anagramas
_DICCIONARIO = 'lemario-20101017.txt'
def word_in_dic(word):
""" Comprueba que word esté en el diccionario de palabras.
"""
try:
fichero = open(_DICCIONARIO, encoding="utf-8")
for palabra in fichero:
if word == palabra.strip():
fichero.close()
return True
fichero.close()
return False
except FileNotFoundError as error:
print("No se ha podido abrir el archivo.", error)
def anagrams_in_dic(word):
""" Devuelve una lista de todos los anagramas de una palabra siempre que estos estén en el diccionario
de palabras.
"""
lanagrams = lista_anagramas(word)
ret = []
for anagrama in lanagrams:
if word_in_dic(anagrama):
ret.append(anagrama)
return sorted(ret)
print(anagrams_in_dic("zorra"))
|
{
"content_hash": "fcd36c3f18ff7dcbd6eecfcc42846785",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 113,
"avg_line_length": 32.27777777777778,
"alnum_prop": 0.653184165232358,
"repo_name": "IhToN/DAW1-PRG",
"id": "7c8609e19e01279382cc4e5745d7d1922e990a65",
"size": "1165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Ejercicios/SeguTrim/Ejercicio16.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "276667"
}
],
"symlink_target": ""
}
|
from actstream.tests.test_gfk import GFKManagerTestCase
from actstream.tests.test_zombies import ZombieTest
from actstream.tests.test_activity import ActivityTestCase
from actstream.tests.test_feeds import FeedsTestCase
from actstream.tests.test_views import ViewsTest
|
{
"content_hash": "ed424f6bbbe2a55cbdcae2dd717baa0a",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 58,
"avg_line_length": 53.8,
"alnum_prop": 0.8698884758364313,
"repo_name": "jrsupplee/django-activity-stream",
"id": "52c0ac82b111332e6e1b591a6b3b62446df5a3a4",
"size": "269",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "actstream/tests/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "3119"
},
{
"name": "Makefile",
"bytes": "464"
},
{
"name": "Python",
"bytes": "107812"
}
],
"symlink_target": ""
}
|
from congestion_approx import CongestionApprox
from graph_util import EDGE_CAPACITY_ATTR
class TreeCongestionApprox(CongestionApprox):
def __init__(s, tree, tree_root, alpha):
# TODO: node to index
s.tree = tree.copy()
s.root = tree_root
s.cached_dfs_edges = list(s.recursive_dfs_edges(s.root, set(), False))
s.cached_dfs_edges_data = list(s.recursive_dfs_edges(s.root, set(), True))
s.alpha_upper = alpha
def route_flow(s, demands):
node_flow = dict(zip(s.tree.nodes(), demands))
edge_flow = {}
for parent, child in reversed(s.dfs_edges()):
child_flow = node_flow[child]
node_flow[parent] += child_flow
edge_flow[(parent, child)] = child_flow
return edge_flow
def compute_node_potentials(s, edge_potentials):
node_potentials = dict([(s.root, 0)])
for edge, potential in zip(s.dfs_edges(), edge_potentials):
parent, child = edge
node_potentials[child] = node_potentials[parent] + potential
return node_potentials
def recursive_dfs_edges(s, cur_node, visited, data):
if cur_node in visited:
return
visited.add(cur_node)
if data:
for neighbor, edict in s.tree[cur_node].items():
if neighbor in visited:
continue
yield (cur_node, neighbor, edict)
for e in s.recursive_dfs_edges(neighbor, visited, data):
yield e
else:
for neighbor in s.tree[cur_node].keys():
if neighbor in visited:
continue
yield (cur_node, neighbor)
for e in s.recursive_dfs_edges(neighbor, visited, data):
yield e
def dfs_edges(s, data=False):
if data:
return s.cached_dfs_edges_data
else:
return s.cached_dfs_edges
def compute_dot(s, b):
flow = s.route_flow(b)
return list(flow[(u, v)] / edict[EDGE_CAPACITY_ATTR] / s.alpha() for (u, v, edict) in (
s.dfs_edges(data=True)))
def compute_transpose_dot(s, x):
edge_potentials = (xi / edict[EDGE_CAPACITY_ATTR] for (xi, (u,v,edict)) in(
zip(x, s.dfs_edges(data=True))))
node_potentials = s.compute_node_potentials(edge_potentials)
return list(node_potentials[n] / s.alpha() for n in s.tree.nodes())
def alpha(s):
return s.alpha_upper
|
{
"content_hash": "31e537f57e5009ad99bf5a2a180a4f9d",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 91,
"avg_line_length": 30.27027027027027,
"alnum_prop": 0.6348214285714285,
"repo_name": "weinstein/FastMaxFlow",
"id": "8f27db1bd711261515d9303e865fa7102f295776",
"size": "2240",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tree_congestion_approx.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "62477"
}
],
"symlink_target": ""
}
|
"""Set of methods aimed to merge all features of each feature cluster
into one:
> process_features():
Combine the lower intensity features into the most intense
feature (peak centre) within each feature clusters. Wide peaks
and leading and trailing tails that are indicative of
contaminants are also removed.
Examples:
>>> from Configuration import LFParameters
>>> from LFDataFrame import LFDataFrame
>>> from PeakFilter import PeakFinder
>>> parameters = LFParameters('peakfilter', 'parameters.json')
>>> data = LFDataFrame('dataset.csv', parameters)
>>> PeakFinder.process_features(data, parameters)
"""
import numpy
import pandas
from LipidFinder.PeakFilter import Clustering
def process_features(data, parameters):
# type: (LFDataFrame, LFParameters) -> None
"""Combine the intensities of lower intensity features into the most
intense feature within feature clusters.
For all features in all replicates the intensities of lower
intensity features (peak frames) within feature clusters are
combined into the most intense feature (peak centre) where they are
part of the same peak. Wide peaks and leading and trailing tails
that are indicative of contaminants are also removed.
Keyword Arguments:
data -- LFDataFrame instance
parameters -- LipidFinder's PeakFilter parameters instance
"""
firstSampleIndex = parameters['firstSampleIndex'] - 1
lastSampleIndex = firstSampleIndex \
+ (parameters['numSamples'] * parameters['numTechReps'])
# Perform m/z and feature clustering
Clustering.cluster_by_mz(data, parameters)
Clustering.cluster_by_features(data, parameters)
featureIDCol = data.columns.values[-1]
# Add dummy cluster to avoid unexpected behavior when using apply():
# "In the current implementation, apply calls func twice on the
# first column/row to decide whether it can take a fast or slow code
# path."
firstGroup = data[data[featureIDCol] == 1].copy()
firstGroupIndices = firstGroup.index.values
firstGroup.loc[:, featureIDCol] = 0
tmpData = pandas.concat([firstGroup, data], ignore_index=True)
# Get array of retention time column
rtArray = tmpData[parameters['rtCol']].values
# Select out just the replicates only
replicates = tmpData.iloc[:, firstSampleIndex : lastSampleIndex]
# Add dummy column as first Series to avoid unexpected behavior when
# using apply()
tempCol = tmpData.iloc[:, firstSampleIndex]
replicates.insert(0, 'DummyColumn', tempCol)
# Create groupby object on "Feature Clusters" and process each
# feature
replicates = replicates.groupby(tmpData[featureIDCol]).apply(
__process_feature__, parameters=parameters, rtArray=rtArray)
# Drop dummy cluster and column
replicates.drop(firstGroupIndices, inplace=True)
replicates.drop('DummyColumn', axis=1, inplace=True)
replicates.reset_index(inplace=True, drop=True)
# Copy the new samples intensities to data
data.iloc[:, firstSampleIndex : lastSampleIndex] = replicates
# Drop empty frames (if any)
data.drop_empty_frames('Empty frames after Peak Finder', parameters)
def __process_feature__(featureCluster, parameters, rtArray):
# type: (pandas.DataFrame, LFParameters, numpy.array)
# -> pandas.DataFrame
"""Process the feature cluster.
Keyword Arguments:
featureCluster -- feature cluster dataframe
parameters -- LipidFinder's PeakFilter parameters instance
rtArray -- array of retention times from source data
"""
return featureCluster.apply(__single_rep_feature__, parameters=parameters,
rtArray=rtArray)
def __single_rep_feature__(repFeature, parameters, rtArray):
# type: (pandas.Series, LFParameters, numpy.array) -> pandas.Series
"""Process the each sample replicate of the feature.
Keyword Arguments:
repFeature -- sample replicate intensities
parameters -- LipidFinder's PeakFilter parameters instance
rtArray -- array of retention times from source data
"""
# Count of None/NaN in 'repFeature': if less than 2 then there is
# either a single frame peak or no peak, so no processing is needed
if (numpy.count_nonzero(repFeature.values) > 1):
repRT = rtArray[repFeature.index.values]
__feat_peak_analysis__(parameters, repFeature.values, repRT)
return repFeature
def __feat_peak_analysis__(parameters, intensities, repRT):
# type: (LFParameters, numpy.ndarray, numpy.array) -> None
"""Analyse feature peak.
Keyword Arguments:
parameters -- LipidFinder's PeakFilter parameters instance
intensities -- array of feature peak intensities
repRT -- array of retention times of the sample replicate
"""
# Index of start of feature (left in to ease code refactoring)
lowestIndex = 0
# Index of end of feature
highestIndex = intensities.size - 1
# Create array to record the frame category in little-endian
# unicode-8 (left in to ease code refactoring)
peakCategory = numpy.empty_like(intensities, dtype='<U2')
peakCategory.fill('--')
# Create an array to hold the intensities. Each time an intensity
# is categorised it is removed, leaving only uncatergorised
# intensities so the highest can be be selected.
intensityPeakCat = numpy.copy(intensities)
# Do while there are uncategorised frames within the feature group.
# numpy.count_nonzero() counts the number of intensities equal to 0.
while (sum(peakCategory == '--') > numpy.count_nonzero(intensities == 0)):
intensityPeakCat[numpy.where(peakCategory != '--')[0]] = -1
# Get the index of the highest intensity uncategorised and set
# it as the peak centre
peakCentreIndex = intensityPeakCat.argmax()
# Initialize peak start and finish index to peak centre
peakLowestIndex = peakCentreIndex
peakHighestIndex = peakCentreIndex
# Catergorise the peak centre as "PC"
peakCategory[peakCentreIndex] = 'PC'
# Retention time at PC (this can change to fairly accommodate
# wide peaks, but the PC index remains the same)
rtPC = repRT[peakCentreIndex]
# Wide peak is where the actual most intense region of a peak is
# close to the border between two frames, so we can allow two
# similar frames to be a peak centre together (default: False)
widePeak = None
# Indicates whether there are frames to left and right of
# current peak edges to consider
framesLeft = True
framesRight = True
# Check left side for peakiness
if ((lowestIndex < peakLowestIndex)
and (intensities[peakLowestIndex - 1] != 0)):
decIndex = peakLowestIndex - 1
if ((parameters['peakMinFoldDiff'] * intensities[decIndex])
>= intensities[peakLowestIndex]):
# Potential wide peak
# Check if there are more frames
if ((lowestIndex < decIndex)
and (intensities[peakLowestIndex - 2] != 0)):
dec2Index = peakLowestIndex - 2
# Is the intensity at 'peakLowestIndex' greater
# than parameters["peakMinFoldDiff"] * (intensity at
# 'peakLowestIndex' - 1) - ('peakCentreIndex' - 1)
# and ('peakCentreIndex' - 2)
if ((parameters['peakMinFoldDiff'] * intensities[dec2Index])
>= intensities[decIndex]):
# Categorise as solvent feature and check for
# further solvents left and right.
# 'peakCentreIndex' set to "SF" twice: this
# avoids running into another feature if PC is
# located at first of last position.
# Check for low range of solvent feature
peakCategory = __solvents_low_rt__(
parameters, peakCategory, intensities,
peakCentreIndex, lowestIndex)
# Check for high range of solvent feature
peakCategory = __solvents_high_rt__(
parameters, peakCategory, intensities,
peakCentreIndex, highestIndex)
# The feature is fully categorised: exit current
# iteration
continue
else:
widePeak = "Left"
peakLowestIndex -= 2
else:
# No further left frames or the left frame has been
# categorised, hence the peak is a wide peak
framesLeft = False
widePeak = "Left"
peakLowestIndex -= 1
else:
# The PC is currently a peak
peakLowestIndex -= 1
else:
framesLeft = False
# Check right side for peakiness
if ((highestIndex > peakHighestIndex)
and (intensities[peakHighestIndex + 1] != 0)):
incIndex = peakHighestIndex + 1
if ((parameters['peakMinFoldDiff'] * intensities[incIndex])
>= intensities[peakHighestIndex]):
if (widePeak):
# It is a solvent or the end of the peak. Categorise
# as solvent feature and check for further solvents
# left and right. 'peakCentreIndex' set to "SF"
# twice: this avoids running into another feature if
# PC is located at first of last position.
# Check for low range of solvent feature
peakCategory = __solvents_low_rt__(
parameters, peakCategory, intensities,
peakCentreIndex, lowestIndex)
# Check for high range of solvent feature
peakCategory = __solvents_high_rt__(
parameters, peakCategory, intensities,
peakCentreIndex, highestIndex)
# The feature is fully categorised: exit current
# iteration
continue
# Potential wide peak
elif ((highestIndex > incIndex)
and (intensities[peakHighestIndex + 2] != 0)):
inc2Index = peakHighestIndex + 2
# Is the intensity at 'peakHighestIndex' greater
# than parameters["peakMinFoldDiff"] * (intensity at
# 'peakHighestIndex' + 1) - ('peakCentreIndex' + 1)
# and ('peakCentreIndex' + 2)
if ((parameters['peakMinFoldDiff'] * intensities[inc2Index])
>= intensities[incIndex]):
# Categorise as solvent feature and check for
# further solvents left and right.
# 'peakCentreIndex' set to "SF" twice: this
# avoids running into another feature if PC is
# located at first of last position.
# Check for low range of solvent feature
peakCategory = __solvents_low_rt__(
parameters, peakCategory, intensities,
peakCentreIndex, lowestIndex)
# Check for high range of solvent feature
peakCategory = __solvents_high_rt__(
parameters, peakCategory, intensities,
peakCentreIndex, highestIndex)
# The feature is fully categorised: exit
# current iteration
continue
else:
# Right side is a wide peak and there are still
# frames on the right and left side of the peak
# to categorise
widePeak = "Right"
peakHighestIndex += 2
else:
# No further right frames or the right frame has
# been categorised, hence the peak is a wide peak
framesRight = False
peakHighestIndex += 1
else:
peakHighestIndex += 1
else:
framesRight = False
# Change 'rtPC' to be centre point of current peak
if (widePeak == "Left"):
rtPC = (repRT[peakCentreIndex] + repRT[peakCentreIndex - 1]) / 2.0
elif (widePeak == "Right"):
rtPC = (repRT[peakCentreIndex] + repRT[peakCentreIndex + 1]) / 2.0
# There are peak frames to categorise that we found during the
# process of checking if the PC is part of a peak or solvent
# feature. "PS" will be used for the rare situation where a
# frame is part of 2 peaks and needs to be shared amongst both.
nextIndex = peakHighestIndex + 1
peakCategory[peakLowestIndex : nextIndex][
peakCategory[peakLowestIndex : nextIndex] == 'PF'
] = 'PS'
peakCategory[peakLowestIndex : nextIndex][
(peakCategory[peakLowestIndex : nextIndex] != 'PC')
& (peakCategory[peakLowestIndex : nextIndex] != 'PS')
] = 'PF'
# Complete left part of the peak
if (framesLeft):
# Check there is frame at ('peakLowestIndex' - 1) originally
# set as 'peakCentreIndex' that may have changed after
# checking if "PC" is a peak
while (lowestIndex < peakLowestIndex):
prevIndex = peakLowestIndex - 1
if (intensities[prevIndex] == 0):
break
# The frame at ('peakLowestIndex' - 1) is not a
# previously catergorised "PF": the intensity is not 0
# and adding the frame will keep the peak within the
# allowed width. Test each side of peak for time width
# limits.
if (round(rtPC - repRT[prevIndex], 3)
<= round(parameters['peakMaxRTWidth'] / 2.0, 3)):
if ((parameters['peakMinFoldDiff'] * intensities[prevIndex])
>= intensities[peakLowestIndex]):
if ((parameters['peakMinFoldDiff']
* intensities[peakLowestIndex])
>= intensities[prevIndex]):
# It is a solvent: check for the low range
# of the feature
peakCategory = __solvents_low_rt__(
parameters, peakCategory, intensities,
prevIndex, lowestIndex)
# ('peakLowestIndex' - 1) is either too large to
# be a solvent or at least one solvent chain has
# been identified: end of left peak
break
else:
peakLowestIndex -= 1
if (peakCategory[peakLowestIndex] == 'PF'):
# Set as shared frame ("PS")
peakCategory[peakLowestIndex] = 'PS'
else:
# New peak frame
peakCategory[peakLowestIndex] = 'PF'
else:
# End of left peak, but there may be frames that
# would be in the peak if it was wider (tail
# frames). Is the next frame part of the tail of the
# last peak? If so, it is a solvent frame. This
# stops tails of peaks being categorised as "PC",
# avoiding false positives.
if ((peakCategory[prevIndex] == '--')
and ((parameters['peakMinFoldDiff']
* intensities[peakLowestIndex])
>= intensities[prevIndex])):
# It is a solvent: check for the low range of
# the feature
peakCategory = __solvents_low_rt__(
parameters, peakCategory, intensities,
prevIndex, lowestIndex)
break
# Complete right side of the peak
if (framesRight):
# Check there is frame at ('peakHighestIndex' - 1) originally
# set as 'peakCentreIndex' that may have changed after
# checking if "PC" is a peak
while (highestIndex > peakHighestIndex):
nextIndex = peakHighestIndex + 1
if (intensities[nextIndex] == 0):
break
# The frame at ('peakHighestIndex' + 1) is not a
# previously catergorised "PF": the intensity is not 0
# and adding the frame will keep the peak within the
# allowed width
if (round(repRT[nextIndex] - rtPC, 3)
<= round(parameters['peakMaxRTWidth'] / 2.0, 3)):
if ((parameters['peakMinFoldDiff'] * intensities[nextIndex])
>= intensities[peakHighestIndex]):
if ((parameters['peakMinFoldDiff']
* intensities[peakHighestIndex])
>= intensities[nextIndex]):
# It is a solvent: check for the high range
# of the feature
peakCategory = __solvents_high_rt__(
parameters, peakCategory, intensities,
nextIndex, highestIndex)
# ('peakHighestIndex' - 1) is either too large
# to be a solvent or at least one solvent chain
# has been identified: end of right peak
break
else:
peakHighestIndex += 1
if (peakCategory[peakHighestIndex] == 'PF'):
# Set as shared frame ("PS")
peakCategory[peakHighestIndex] = 'PS'
else:
# New peak frame
peakCategory[peakHighestIndex] = 'PF'
else:
# End of right peak, but there may be frames that
# would be in the peak if it was wider (tail
# frames). Is the next frame part of the tail of the
# last peak? If so, it is a solvent frame. This
# stops tails of peaks being categorised as "PC",
# avoiding false positives.
if ((peakCategory[nextIndex] == '--')
and ((parameters['peakMinFoldDiff']
* intensities[peakHighestIndex])
>= intensities[nextIndex])):
# It is a solvent: check for the high range of
# the feature
peakCategory = __solvents_high_rt__(
parameters, peakCategory, intensities,
nextIndex, highestIndex)
break
# Determine peak concatenation type
nextIndex = peakHighestIndex + 1
if (parameters['concatAllFrames']):
# "PC" intensity is set to be the sum of all frames in peak
intensities[peakCentreIndex] = \
intensities[peakLowestIndex : nextIndex].sum()
else:
# "PC" intensity is set to be the sum of "PC" and the most
# intense "PF". If there is only "PC", NaN will be returned
# and raise an exception. This should not happen since a
# peak at this point must have at least one "PF" and the
# "PC".
if (sum(peakCategory[peakLowestIndex : nextIndex] == 'PF') > 0):
pfsArray = numpy.where(
peakCategory[peakLowestIndex : nextIndex] == 'PF')[0]
intensities[peakCentreIndex] += \
intensities[peakLowestIndex : nextIndex][pfsArray].max()
# Set all non "PC" frames to 0
intensities[numpy.where(peakCategory != 'PC')[0]] = 0
def __solvents_low_rt__(parameters, # LFParameters
peakCategory, # numpy.array
intensities, # numpy.array
peakLowestIndex, # int
lowestIndex # int
):
# type: (...) -> numpy.array
"""Check for the low solvent range of the given feature.
Keyword Arguments:
parameters -- LipidFinder's PeakFilter parameters instance
peakCategory -- frame category array
intensities -- array of feature peak intensities
peakLowestIndex -- lowest index of the peak
lowestIndex -- lowest index of the feature
"""
peakCategory[peakLowestIndex] = 'SF'
while (lowestIndex < peakLowestIndex):
prevIndex = peakLowestIndex - 1
if ((peakCategory[prevIndex] == '--')
and (intensities[prevIndex] != 0)):
if ((parameters['peakMinFoldDiff'] * intensities[peakLowestIndex])
>= intensities[prevIndex]):
peakLowestIndex -= 1
peakCategory[peakLowestIndex] = 'SF'
continue
else:
# ('peakLowestIndex' - 1) intensity is too large to be a
# solvent: end of left peak
break
else:
# Either the new frame is already categorised or its
# intensity is 0: end of the left part of the feature
break
return peakCategory
def __solvents_high_rt__(parameters, # LFParameters
peakCategory, # numpy.array
intensities, # numpy.array
peakHighestIndex, # int
highestIndex # int
):
# type: (...) -> numpy.array
"""Check for the high solvent range of the given feature.
Keyword Arguments:
parameters -- LipidFinder's PeakFilter parameters instance
peakCategory -- frame category array
intensities -- array of feature peak intensities
peakHighestIndex -- highest index of the peak
highestIndex -- highest index of the feature
"""
peakCategory[peakHighestIndex] = 'SF'
while (highestIndex > peakHighestIndex):
nextIndex = peakHighestIndex + 1
if ((peakCategory[nextIndex] == '--')
and (intensities[nextIndex] != 0)):
if ((parameters['peakMinFoldDiff'] * intensities[peakHighestIndex])
>= intensities[nextIndex]):
peakHighestIndex += 1
peakCategory[peakHighestIndex] = 'SF'
continue
else:
# ('peakLowestIndex' + 1) intensity is too large to be a
# solvent: end of right peak
break
else:
# Either the new frame is already categorised or its
# intensity is 0: end of the right part of the feature
break
return peakCategory
|
{
"content_hash": "d74f3b4891583b29937a57a2064c74c9",
"timestamp": "",
"source": "github",
"line_count": 480,
"max_line_length": 80,
"avg_line_length": 49.93958333333333,
"alnum_prop": 0.5474114555087397,
"repo_name": "cjbrasher/LipidFinder",
"id": "2d9426d52477d0d588187a42f96da0ac16b49192",
"size": "24207",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "LipidFinder/PeakFilter/PeakFinder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "211378"
}
],
"symlink_target": ""
}
|
from typing import Callable
from unittest.mock import DEFAULT, Mock, call, patch
import pytest
from azure.ai.ml import load_registry
from azure.ai.ml._scope_dependent_operations import OperationScope
from azure.ai.ml.entities._registry.registry import Registry
from azure.ai.ml.operations import RegistryOperations
from azure.core.exceptions import ResourceExistsError
from azure.core.polling import LROPoller
from pytest_mock import MockFixture
@pytest.fixture
def mock_registry_operation(
mock_registry_scope: OperationScope,
mock_aml_services_2022_01_01_preview: Mock,
mock_machinelearning_client: Mock,
mock_credential: Mock,
) -> RegistryOperations:
yield RegistryOperations(
operation_scope=mock_registry_scope,
service_client=mock_aml_services_2022_01_01_preview,
all_operations=mock_machinelearning_client._operation_container,
credentials=mock_credential,
)
@pytest.mark.unittest
@pytest.mark.production_experiences_test
class TestRegistryOperations:
def test_list(self, mock_registry_operation: RegistryOperations) -> None:
# Test different input options for the scope value
mock_registry_operation.list()
mock_registry_operation._operation.list.assert_called_once()
mock_registry_operation.list(scope="invalid")
assert mock_registry_operation._operation.list.call_count == 2
mock_registry_operation._operation.list_by_subscription.assert_not_called()
mock_registry_operation.list(scope="subscription")
assert mock_registry_operation._operation.list.call_count == 2
mock_registry_operation._operation.list_by_subscription.assert_called_once()
def test_get(self, mock_registry_operation: RegistryOperations, randstr: Callable[[], str]) -> None:
mock_registry_operation.get(f"unittest_{randstr('reg_name')}")
mock_registry_operation._operation.get.assert_called_once()
def test_check_registry_name(self, mock_registry_operation: RegistryOperations):
mock_registry_operation._default_registry_name = None
with pytest.raises(Exception):
mock_registry_operation._check_registry_name(None)
def test_create(self, mock_registry_operation: RegistryOperations, randstr: Callable[[], str]) -> None:
reg_name = f"unittest{randstr('reg_name')}"
params_override = [
{
"name": reg_name
}
]
reg = load_registry(
source="./tests/test_configs/registry/registry_valid_min.yaml", params_override=params_override
)
# valid creation of new registry
mock_registry_operation.begin_create(registry=reg)
mock_registry_operation._operation.begin_create_or_update.assert_called_once()
def test_delete(self, mock_registry_operation: RegistryOperations, randstr: Callable[[], str]) -> None:
mock_registry_operation.begin_delete(name="some registry")
mock_registry_operation._operation.begin_delete.assert_called_once()
|
{
"content_hash": "96d53e4e797dfc3851813dc61aed5f31",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 107,
"avg_line_length": 43.32857142857143,
"alnum_prop": 0.7171117705242335,
"repo_name": "Azure/azure-sdk-for-python",
"id": "40dd561470a79625c4b19eb965e74398e9748896",
"size": "3033",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/ml/azure-ai-ml/tests/registry/unittests/test_registry_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import win32api
import win32con
import win32file
import pywintypes
from winsys import constants, core, exc, utils
class x_handles(exc.x_winsys):
pass
WINERROR_MAP = {
}
wrapped = exc.wrapper(WINERROR_MAP, x_handles)
class Handle(core._WinSysObject):
def __init__(self, handle):
core._WinSysObject.__init__(self)
self._handle = handle
self.name = str(int(self._handle))
def __int__(self):
return int(self._handle)
def pyobject(self):
return self._handle
def duplicate(self, process=None, inheritable=True):
target_process = processes.process(process).hProcess
this_process = wrapped(win32api.GetCurrentProcess)
return self.__class__(
wrapped(
win32api.DuplicateHandle,
this_process,
self._handle,
target_process,
0,
inheritable,
win32con.DUPLICATE_SAME_ACCESS
)
)
def read(self, buffer_size=0):
data = ""
while True:
hr, _data = wrapped(win32file.ReadFile, self._handle, buffer_size)
data += _data
if hr == 0:
break
def write(self, data):
wrapped(win32file.WriteFile, self._handle, data)
def handle(handle):
if handle is None:
return None
elif isinstance(handle, int):
return Handle(pywintypes.HANDLE(handle))
else:
return Handle(handle)
|
{
"content_hash": "9e07bb8b8e6451b2047e01c7b73488c4",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 78,
"avg_line_length": 24.79032258064516,
"alnum_prop": 0.5803513337670787,
"repo_name": "operepo/ope",
"id": "01d8e28b6299c9d398dfed91432e965efd08183e",
"size": "1561",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "laptop_credential/winsys/handles.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AL",
"bytes": "40379"
},
{
"name": "Awk",
"bytes": "22377"
},
{
"name": "Batchfile",
"bytes": "81725"
},
{
"name": "C",
"bytes": "655"
},
{
"name": "C++",
"bytes": "200907"
},
{
"name": "CMake",
"bytes": "8149"
},
{
"name": "CSS",
"bytes": "103747"
},
{
"name": "Dockerfile",
"bytes": "47152"
},
{
"name": "Emacs Lisp",
"bytes": "90665"
},
{
"name": "HTML",
"bytes": "37373861"
},
{
"name": "Java",
"bytes": "916104"
},
{
"name": "JavaScript",
"bytes": "9115492"
},
{
"name": "Makefile",
"bytes": "7428"
},
{
"name": "NewLisp",
"bytes": "111955"
},
{
"name": "PHP",
"bytes": "5053"
},
{
"name": "Perl",
"bytes": "45839826"
},
{
"name": "PostScript",
"bytes": "192210"
},
{
"name": "PowerShell",
"bytes": "2870"
},
{
"name": "Procfile",
"bytes": "114"
},
{
"name": "Prolog",
"bytes": "248055"
},
{
"name": "Python",
"bytes": "9037346"
},
{
"name": "QML",
"bytes": "125647"
},
{
"name": "QMake",
"bytes": "7566"
},
{
"name": "Raku",
"bytes": "7174577"
},
{
"name": "Roff",
"bytes": "25148"
},
{
"name": "Ruby",
"bytes": "162111"
},
{
"name": "Shell",
"bytes": "2574077"
},
{
"name": "Smalltalk",
"bytes": "77031"
},
{
"name": "SystemVerilog",
"bytes": "83394"
},
{
"name": "Tcl",
"bytes": "7061959"
},
{
"name": "Vim script",
"bytes": "27705984"
},
{
"name": "kvlang",
"bytes": "60630"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('form_designer_form', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='cmsformdefinition',
name='cmsplugin_ptr',
field=models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='form_designer_form_cmsformdefinition', serialize=False, to='cms.CMSPlugin'),
),
]
|
{
"content_hash": "01739ead8ff27756120b04fc88a54ae5",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 229,
"avg_line_length": 32,
"alnum_prop": 0.6792763157894737,
"repo_name": "martijn-maklu/django-form-designer",
"id": "bff46bb68923c770a5151e43f595e77d6e74acc9",
"size": "681",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "form_designer/contrib/cms_plugins/form_designer_form/migrations/0002_auto_20170427_1713.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "7765"
},
{
"name": "Python",
"bytes": "139636"
}
],
"symlink_target": ""
}
|
import os
import re
import sys
import pytumblr
import json
from datetime import datetime, timedelta
from PIL import Image
class Scraper:
#Constructors
def cleanupImageFolder(self):
os.system('rm -r ./images')
os.system('mkdir images')
def __init__(self):
__init__(self,searchTerms)
def __init__(self,searchTerms):
print "No image count given. Defaulting to 1000 images."
__init__(self,searchTerms,1000)
def __init__(self,searchTerms,imgsNeeded):
if searchTerms == []:
print 'No search terms given!'
sys.exit()
if not len(searchTerms) == len(imgsNeeded):
print 'Number of images needed was not correctly specified! Ex:( ["rabbit","bieber","kentucky"][1000,100,400] )'
print 'Defaulting to 1000 for all search terms!'
imgsNeeded = []
for i in range(len(searchTerms)):
imgsNeeded.append(1000)
else:
self.consumerKey = '' #Put in your consumerKey
self.consumerSecret = '' #Put in your consumerSecret
self.oauthToken = '' #Put in your auth Token
self.oauthSecret = '' #Put in your auth Secret
self.client = pytumblr.TumblrRestClient(self.consumerKey, self.consumerSecret, self.oauthToken, self.oauthSecret)
self.timestampList = []
self.imageList = []
self.searchTerms = searchTerms
self.imgsDownloadedMap = {} #This is a map that keeps track of the number of images downloaded for each search term.
self.imgsNeededMap = {}
self.unixTimestamp = ''
self.imgsDownloaded = 0
self.imgsNeeded = imgsNeeded
self.buildMaps()
self.cleanupImageFolder()
self.delegateSearches(self.searchTerms)
def buildMaps(self):
i = 0
for searchTerm in self.searchTerms:
self.imgsNeededMap[searchTerm] = self.imgsNeeded[i]
self.imgsDownloadedMap[searchTerm] = 0
i = i + 1
def delegateSearches(self,searchTerms):
if searchTerms == []:
print 'No search terms given!'
sys.exit()
else:
for searchTerm in searchTerms:
self.imgsDownloadedMap[searchTerm] = 0
self.scrapeImages(searchTerm)
def downloadImage(self,link):
os.system('wget -P ./images %s'%link)
def scrapeImages(self,searchTerm):
#As of 5/12/2014, the tumblr API only returns 20 results for each call.
#(We have a workaround for that!) :)
#We take the timestamp of the 20th result and tell tumblr to only return results older than that in the next call.
#We do this until imgsDownloaded and imgsNeeded are equivalent.
responses = self.client.tagged(searchTerm)
jsonResponses = json.dumps(responses)
lastTimestamp = responses[(len(responses)-1)]['timestamp']
while self.imgsDownloadedMap[searchTerm] < self.imgsNeededMap[searchTerm]:
for resp in responses:
if 'photos' in resp:
photos = resp['photos']
for photo in photos:
if self.imgsDownloadedMap[searchTerm] < self.imgsNeededMap[searchTerm]:
if 'alt_sizes' in photo:
alt_sizes = photo['alt_sizes']
#The only guaranteed common image size on tumblr is 75x75 px (the smallest they offer).
smallest_size_img = alt_sizes[len(alt_sizes)-1]
link = smallest_size_img['url']
if '.jpg' in link:
self.downloadImage(link)
self.imgsDownloadedMap[searchTerm] = self.imgsDownloadedMap[searchTerm] + 1
self.imageList.append(link)
else:
break
responses = self.client.tagged(searchTerm, before=lastTimestamp)
jsonResponses = json.dumps(responses)
lastTimestamp = responses[(len(responses)-1)]['timestamp']
|
{
"content_hash": "11c900c1544a05270ef0980dc4033b59",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 128,
"avg_line_length": 35.08910891089109,
"alnum_prop": 0.6853837471783296,
"repo_name": "JunHuang01/GrammerArtist",
"id": "bcad8e7d3af6de14946d53289a0ff9203357e8da",
"size": "3661",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Scraper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35810"
}
],
"symlink_target": ""
}
|
from django.conf.urls import url
from django.contrib.auth.views import logout, logout_then_login
from .views import LoginView, LogoutView, ProfileView
urlpatterns = [
url(r'^login/$', LoginView.as_view(), name='login'),
url(r'^logout/$', LogoutView.as_view(), name='logout'),
url(r'^logout-then-login/$', logout_then_login, name='logout-then-login'),
url(r'^profile/$', ProfileView.as_view(), name='profile'),
]
|
{
"content_hash": "7e34738085646a5b5c9c5eb3634644c5",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 78,
"avg_line_length": 30.928571428571427,
"alnum_prop": 0.6859122401847575,
"repo_name": "nicorellius/pdxpixel",
"id": "503713dc017c198998b90092354d9b47479bc222",
"size": "433",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pdxpixel/apps/accounts/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3414"
},
{
"name": "HTML",
"bytes": "21829"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Nginx",
"bytes": "339"
},
{
"name": "Python",
"bytes": "41777"
}
],
"symlink_target": ""
}
|
from core.config_loader import cur_conf
import mwapi
import user_config
session = mwapi.Session(cur_conf["core"]["site"], user_agent="StabiliserBot/1.0", api_path=cur_conf["core"]["api_path"])
def login():
session.login(user_config.username, user_config.password)
return True
|
{
"content_hash": "7ae432d9ead1de0b43cd541228eac3f9",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 120,
"avg_line_length": 28.7,
"alnum_prop": 0.7282229965156795,
"repo_name": "4shadoww/stabilizerbot",
"id": "5953d0562760179501010738d0f820355d0b739f",
"size": "287",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/session.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36392"
}
],
"symlink_target": ""
}
|
import arrayfire
import numpy
from .multiarray import ndarray
from . import random
from .core import *
from .lib import *
from . import linalg
from .linalg import vdot, dot
import ctypes
def arrayfire_version(numeric = False):
major = ctypes.c_int(0)
minor = ctypes.c_int(0)
patch = ctypes.c_int(0)
arrayfire.backend.get().af_get_version(ctypes.pointer(major),
ctypes.pointer(minor),
ctypes.pointer(patch));
if(numeric):
return major.value * 1000000 + minor.value*1000 + patch.value
return '%d.%d.%d' % (major.value, minor.value, patch.value)
def inplace_setitem(self, key, val):
try:
n_dims = self.numdims()
if (arrayfire.util._is_number(val)):
tdims = arrayfire.array._get_assign_dims(key, self.dims())
other_arr = arrayfire.array.constant_array(val, tdims[0], tdims[1], tdims[2], tdims[3], self.type())
del_other = True
else:
other_arr = val.arr
del_other = False
inds = arrayfire.array._get_indices(key)
# In place assignment. Notice passing a pointer to self.arr as output
arrayfire.util.safe_call(arrayfire.backend.get().af_assign_gen(ctypes.pointer(self.arr),
self.arr, ctypes.c_longlong(n_dims),
inds.pointer,
other_arr))
if del_other:
arrayfire.safe_call(arrayfire.backend.get().af_release_array(other_arr))
except RuntimeError as e:
raise IndexError(str(e))
def raw_ptr(self):
"""
Return the device pointer held by the array.
Returns
------
ptr : int
Contains location of the device pointer
Note
----
- This can be used to integrate with custom C code and / or PyCUDA or PyOpenCL.
- No mem copy is peformed, this function returns the raw device pointer.
"""
ptr = ctypes.c_void_p(0)
arrayfire.backend.get().af_get_raw_ptr(ctypes.pointer(ptr), self.arr)
return ptr.value
arrayfire.Array.__setitem__ = inplace_setitem
if arrayfire_version(numeric=True) >= 3003000:
arrayfire.Array.device_ptr = raw_ptr
elif arrayfire_version(numeric=True) >= 3002000:
raise RuntimeError('afnumpy is incompatible with arrayfire 3.2. Please upgrade.')
# This defines if we'll try to force JIT evals
# after every instructions.
# We we do not we risk having certain operations out of order
force_eval = True
# Check arrays for out of bounds indexing
# Also properly handle negative indices
safe_indexing = True
# The version number of afnumpy
__version__ = "1.0"
|
{
"content_hash": "e9286bfae32599dc11ca3ae3e6918cf1",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 112,
"avg_line_length": 34.46341463414634,
"alnum_prop": 0.5955414012738853,
"repo_name": "daurer/afnumpy",
"id": "7481e34bab2ed3ca4fc2be14a94e7b0ea1acd73f",
"size": "2826",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "afnumpy/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "104"
},
{
"name": "Python",
"bytes": "145243"
}
],
"symlink_target": ""
}
|
"""
Useful functions.
"""
import cPickle as pickle
import errno
import os
import signal
import sys
def println(line):
sys.stdout.write(line)
sys.stdout.flush()
def print_settings(settings, indent=3, title="=> settings"):
"""
Pretty print.
"""
print(title)
maxlen = max([len(s) for s in settings])
for k, v in settings.items():
print(indent*' ' + '| {}:{}{}'.format(k, (maxlen - len(k) + 1)*' ', v))
sys.stdout.flush()
def get_here(file):
return os.path.abspath(os.path.dirname(file))
def get_parent(dir):
return os.path.abspath(os.path.join(dir, os.pardir))
def mkdir_p(path):
"""
Portable mkdir -p
"""
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def dump(filename, obj):
"""
Disable keyboard interrupt while pickling.
"""
s = signal.signal(signal.SIGINT, signal.SIG_IGN)
with open(filename, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
signal.signal(signal.SIGINT, s)
|
{
"content_hash": "72b77b0305b021fd3c8a1598f2f76260",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 79,
"avg_line_length": 20.9811320754717,
"alnum_prop": 0.5989208633093526,
"repo_name": "xjwanglab/pycog",
"id": "3a965f57ff5b2eda5a3c7ac897ff8e6d371a770a",
"size": "1112",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pycog/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "240028"
},
{
"name": "Shell",
"bytes": "101"
}
],
"symlink_target": ""
}
|
from solve import *
|
{
"content_hash": "a7877bc5e89741c45fc55a415b4b3a62",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 19,
"avg_line_length": 19,
"alnum_prop": 0.7894736842105263,
"repo_name": "wathen/PhD",
"id": "bf313677c71b298edce8fcbd6eb2066c10a1b5da",
"size": "19",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MHD/FEniCS/MyPackage/PackageName/Solvers/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "101292"
},
{
"name": "C++",
"bytes": "3504448"
},
{
"name": "CMake",
"bytes": "4827"
},
{
"name": "Fortran",
"bytes": "936"
},
{
"name": "GLSL",
"bytes": "3050"
},
{
"name": "Gnuplot",
"bytes": "350"
},
{
"name": "HTML",
"bytes": "4136"
},
{
"name": "Jupyter Notebook",
"bytes": "5728473"
},
{
"name": "Limbo",
"bytes": "314"
},
{
"name": "Matlab",
"bytes": "340199"
},
{
"name": "Perl",
"bytes": "6031568"
},
{
"name": "Perl 6",
"bytes": "81429"
},
{
"name": "PostScript",
"bytes": "14691521"
},
{
"name": "Python",
"bytes": "4585284"
},
{
"name": "Roff",
"bytes": "1010"
},
{
"name": "Shell",
"bytes": "2832"
},
{
"name": "TeX",
"bytes": "2256768"
},
{
"name": "Terra",
"bytes": "2923"
}
],
"symlink_target": ""
}
|
"""Script to sort downloaded data (html and tsv) into folder structure
automatically based on file names(form titles)."""
__authors__ = ["Ole Herman Schumacher Elgesem"]
__copyright__ = "Ole Herman Schumacher Elgesem"
__credits__ = ["Erik Vesteraas"]
__license__ = "MIT"
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
import os
import sys
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import re
from shutil import copyfile
from file_funcs import path_join
def get_args():
ap = ArgumentParser(description='Sort downloads into folder structure',
formatter_class=ArgumentDefaultsHelpFormatter)
ap.add_argument('--input', '-i', type=str, default='./downloads',
help='Input directory')
ap.add_argument('--output', '-o', type=str, default='./data',
help='Output directory')
ap.add_argument('--verbose', '-v', action="store_true",
help='Print moves')
ap.add_argument('--delete', '-d', action="store_true",
help='Delete moved files')
ap.add_argument('--exclude', '-e', type=str,
help=r'Exclude regex',
default=r'(testskjema)|(XXX)|(\*\*\*)')
args = ap.parse_args()
return args
def main():
args = get_args()
delete = args.delete
exclude_pattern = re.compile(args.exclude)
semester_pattern = re.compile(r'(V|H)[0-9]{4}')
course_code_pattern = re.compile(r'(([A-Z]{1,5}-){0,1}[A-Z]{1,5}[0-9]{3,4})([A-Z]{1,5}){0,1}')
for root, subdirs, files in os.walk(args.input):
for file_x in files:
path = path_join(root, file_x)
filename, extension = os.path.splitext(path)
m = exclude_pattern.search(path)
if m is not None or path[0] == ".":
print("Excluded: " + path)
continue
m = semester_pattern.search(path)
if m is None:
print("Skipped - No semester: " + path)
continue
semester = m.group(0)
m = course_code_pattern.search(path)
if m is None:
print("Skipped - No course code: " + path)
continue
course = m.group(0)
dir_name = extension[1:]
if dir_name == "json":
dir_name = "participation"
target_folder = path_join(args.output, semester, "downloads", dir_name)
os.makedirs( target_folder, exist_ok=True )
newpath = path_join(target_folder, course + extension )
if delete:
# I hate windows:
try:
os.remove(newpath)
except:
pass
os.rename(path, newpath)
else:
copyfile(path, newpath)
if args.verbose:
print(path)
print(" -> "+newpath)
print(root)
while delete:
delete = False
for root, subdirs, files in os.walk(args.input):
if len(subdirs) == 0 and len(files) == 0:
os.rmdir(root)
if args.verbose:
print("rm: "+path)
delete = True
if __name__ == '__main__':
main()
|
{
"content_hash": "dc870f7af7df5d9be885ea63f886de04",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 98,
"avg_line_length": 36.70652173913044,
"alnum_prop": 0.5318329878590465,
"repo_name": "fui/fui-kk",
"id": "00d061b29213d64c15ba860d8d6e1d151aa1e0bf",
"size": "3400",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fui_kk/sort_downloads.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1670"
},
{
"name": "HTML",
"bytes": "38445"
},
{
"name": "JavaScript",
"bytes": "3147"
},
{
"name": "Makefile",
"bytes": "3473"
},
{
"name": "Python",
"bytes": "105286"
},
{
"name": "Shell",
"bytes": "2479"
},
{
"name": "TeX",
"bytes": "10185"
}
],
"symlink_target": ""
}
|
"""Support for Modbus."""
import logging
import threading
from pymodbus.client.sync import ModbusSerialClient, ModbusTcpClient, ModbusUdpClient
from pymodbus.transaction import ModbusRtuFramer
from homeassistant.const import (
ATTR_STATE,
CONF_COVERS,
CONF_DELAY,
CONF_HOST,
CONF_METHOD,
CONF_NAME,
CONF_PORT,
CONF_TIMEOUT,
CONF_TYPE,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.helpers.discovery import load_platform
from .const import (
ATTR_ADDRESS,
ATTR_HUB,
ATTR_UNIT,
ATTR_VALUE,
CONF_BAUDRATE,
CONF_BYTESIZE,
CONF_CLIMATE,
CONF_CLIMATES,
CONF_COVER,
CONF_PARITY,
CONF_STOPBITS,
MODBUS_DOMAIN as DOMAIN,
SERVICE_WRITE_COIL,
SERVICE_WRITE_REGISTER,
)
_LOGGER = logging.getLogger(__name__)
def modbus_setup(
hass, config, service_write_register_schema, service_write_coil_schema
):
"""Set up Modbus component."""
hass.data[DOMAIN] = hub_collect = {}
for conf_hub in config[DOMAIN]:
hub_collect[conf_hub[CONF_NAME]] = ModbusHub(conf_hub)
# modbus needs to be activated before components are loaded
# to avoid a racing problem
hub_collect[conf_hub[CONF_NAME]].setup()
# load platforms
for component, conf_key in (
(CONF_CLIMATE, CONF_CLIMATES),
(CONF_COVER, CONF_COVERS),
):
if conf_key in conf_hub:
load_platform(hass, component, DOMAIN, conf_hub, config)
def stop_modbus(event):
"""Stop Modbus service."""
for client in hub_collect.values():
client.close()
def write_register(service):
"""Write Modbus registers."""
unit = int(float(service.data[ATTR_UNIT]))
address = int(float(service.data[ATTR_ADDRESS]))
value = service.data[ATTR_VALUE]
client_name = service.data[ATTR_HUB]
if isinstance(value, list):
hub_collect[client_name].write_registers(
unit, address, [int(float(i)) for i in value]
)
else:
hub_collect[client_name].write_register(unit, address, int(float(value)))
def write_coil(service):
"""Write Modbus coil."""
unit = service.data[ATTR_UNIT]
address = service.data[ATTR_ADDRESS]
state = service.data[ATTR_STATE]
client_name = service.data[ATTR_HUB]
hub_collect[client_name].write_coil(unit, address, state)
# register function to gracefully stop modbus
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, stop_modbus)
# Register services for modbus
hass.services.register(
DOMAIN,
SERVICE_WRITE_REGISTER,
write_register,
schema=service_write_register_schema,
)
hass.services.register(
DOMAIN, SERVICE_WRITE_COIL, write_coil, schema=service_write_coil_schema
)
return True
class ModbusHub:
"""Thread safe wrapper class for pymodbus."""
def __init__(self, client_config):
"""Initialize the Modbus hub."""
# generic configuration
self._client = None
self._lock = threading.Lock()
self._config_name = client_config[CONF_NAME]
self._config_type = client_config[CONF_TYPE]
self._config_port = client_config[CONF_PORT]
self._config_timeout = client_config[CONF_TIMEOUT]
self._config_delay = 0
if self._config_type == "serial":
# serial configuration
self._config_method = client_config[CONF_METHOD]
self._config_baudrate = client_config[CONF_BAUDRATE]
self._config_stopbits = client_config[CONF_STOPBITS]
self._config_bytesize = client_config[CONF_BYTESIZE]
self._config_parity = client_config[CONF_PARITY]
else:
# network configuration
self._config_host = client_config[CONF_HOST]
self._config_delay = client_config[CONF_DELAY]
if self._config_delay > 0:
_LOGGER.warning(
"Parameter delay is accepted but not used in this version"
)
@property
def name(self):
"""Return the name of this hub."""
return self._config_name
def setup(self):
"""Set up pymodbus client."""
if self._config_type == "serial":
self._client = ModbusSerialClient(
method=self._config_method,
port=self._config_port,
baudrate=self._config_baudrate,
stopbits=self._config_stopbits,
bytesize=self._config_bytesize,
parity=self._config_parity,
timeout=self._config_timeout,
retry_on_empty=True,
)
elif self._config_type == "rtuovertcp":
self._client = ModbusTcpClient(
host=self._config_host,
port=self._config_port,
framer=ModbusRtuFramer,
timeout=self._config_timeout,
)
elif self._config_type == "tcp":
self._client = ModbusTcpClient(
host=self._config_host,
port=self._config_port,
timeout=self._config_timeout,
)
elif self._config_type == "udp":
self._client = ModbusUdpClient(
host=self._config_host,
port=self._config_port,
timeout=self._config_timeout,
)
else:
assert False
# Connect device
self.connect()
def close(self):
"""Disconnect client."""
with self._lock:
self._client.close()
def connect(self):
"""Connect client."""
with self._lock:
self._client.connect()
def read_coils(self, unit, address, count):
"""Read coils."""
with self._lock:
kwargs = {"unit": unit} if unit else {}
return self._client.read_coils(address, count, **kwargs)
def read_discrete_inputs(self, unit, address, count):
"""Read discrete inputs."""
with self._lock:
kwargs = {"unit": unit} if unit else {}
return self._client.read_discrete_inputs(address, count, **kwargs)
def read_input_registers(self, unit, address, count):
"""Read input registers."""
with self._lock:
kwargs = {"unit": unit} if unit else {}
return self._client.read_input_registers(address, count, **kwargs)
def read_holding_registers(self, unit, address, count):
"""Read holding registers."""
with self._lock:
kwargs = {"unit": unit} if unit else {}
return self._client.read_holding_registers(address, count, **kwargs)
def write_coil(self, unit, address, value):
"""Write coil."""
with self._lock:
kwargs = {"unit": unit} if unit else {}
self._client.write_coil(address, value, **kwargs)
def write_register(self, unit, address, value):
"""Write register."""
with self._lock:
kwargs = {"unit": unit} if unit else {}
self._client.write_register(address, value, **kwargs)
def write_registers(self, unit, address, values):
"""Write registers."""
with self._lock:
kwargs = {"unit": unit} if unit else {}
self._client.write_registers(address, values, **kwargs)
|
{
"content_hash": "56100d9a9bfeec54a3abcf7eedede49e",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 85,
"avg_line_length": 32.49781659388646,
"alnum_prop": 0.5799516259070142,
"repo_name": "partofthething/home-assistant",
"id": "21c6caa6fcccd43f9c8d788ea7f9b22654c1a84a",
"size": "7442",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/modbus/modbus.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1720"
},
{
"name": "Python",
"bytes": "31051838"
},
{
"name": "Shell",
"bytes": "4832"
}
],
"symlink_target": ""
}
|
from tibanna.lambdas import (
check_task_awsem,
run_task_awsem,
update_cost_awsem
)
|
{
"content_hash": "6e2b8fac4208416d22a682d8622fc426",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 29,
"avg_line_length": 19.2,
"alnum_prop": 0.6770833333333334,
"repo_name": "4dn-dcic/tibanna",
"id": "e2d74252a63a0a651d87bed14ffa81f5b5993c5a",
"size": "111",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tibanna/lambdas/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Common Workflow Language",
"bytes": "1542"
},
{
"name": "Dockerfile",
"bytes": "3222"
},
{
"name": "HTML",
"bytes": "320852"
},
{
"name": "Makefile",
"bytes": "953"
},
{
"name": "Python",
"bytes": "576806"
},
{
"name": "Shell",
"bytes": "50474"
}
],
"symlink_target": ""
}
|
import os
from scipy.interpolate import CubicSpline
import matplotlib.pyplot as plt
import numpy as np
import gdal
import seaborn
from jdcal import gcal2jd, jd2gcal
# ============= local library imports ===========================
def calculate_julian_date(year, julian_doy):
""""""
first_of_the_year = int(sum(gcal2jd(year, 1, 1)))
# print "first day jd", first_of_the_year
julian_date = first_of_the_year + int(julian_doy)
# print "full julian date", julian_date
return julian_date
def format_date(date, year):
""""""
year_jd = int(sum(gcal2jd(year, 1, 1)))
# print "year 2000", year_2000
date = date - year_jd
d = jd2gcal(year_jd, date)
print "datedate", d
# test
for i in (1, 10, 100):
print "{num:02d}".format(num=i)
date_string = "{}_{a:02d}_{b:02d}".format(d[0], a=d[1], b=d[2])
return date_string
def read_files(file_list):
""""""
# erdas imagine driver..
driver = gdal.GetDriverByName('HFA')
# must register the driver before you can use it.
driver.Register()
# now we can open files
img_file_list = []
for img_file in file_list:
img_obj = gdal.Open(img_file)
if img_obj is None:
print 'Couldnt open' + img_file
gdal.sys.exit(1)
img_file_list.append(img_obj)
return img_file_list
def format_list(lst):
length = len(lst)
print "length of list", length
print "heres the list", lst
print "range {}".format(range(length))
tuple_list = []
for i in range(length):
if i < length-1:
tupper = (lst[i],lst[i+1])
tuple_list.append(tupper)
elif i == range(length):
tupper = (lst[i - 1], lst[i])
tuple_list.append(tupper)
print "tuple list {}".format(tuple_list)
return tuple_list
def findRasterIntersect(raster1, raster2):
# load data
band1 = raster1.GetRasterBand(1)
band2 = raster2.GetRasterBand(1)
gt1 = raster1.GetGeoTransform()
# print "here is geotransform 1 {}".format(gt1)
gt2 = raster2.GetGeoTransform()
# print "here is geotransform 1 {}".format(gt2)
# print "raster1.RasterXSize = {}".format(raster1.RasterXSize)
# print "raster1.RasterYSize = {}".format(raster1.RasterYSize)
# print "raster2.RasterXSize = {}".format(raster2.RasterXSize)
# print "raster2.RasterYSize = {}".format(raster2.RasterYSize)
# find each image's bounding box
# r1 has left, top, right, bottom of dataset's bounds in geospatial coordinates.
r1 = [gt1[0], gt1[3], gt1[0] + (gt1[1] * raster1.RasterXSize), gt1[3] + (gt1[5] * raster1.RasterYSize)]
r2 = [gt2[0], gt2[3], gt2[0] + (gt2[1] * raster2.RasterXSize), gt2[3] + (gt2[5] * raster2.RasterYSize)]
print '\t1 bounding box: %s' % str(r1)
print '\t2 bounding box: %s' % str(r2)
test_list = [r1[0], r2[0]]
# find intersection between bounding boxes
intersection = [max(test_list), min(r1[1], r2[1]), min(r1[2], r2[2]), max(r1[3], r2[3])]
if r1 != r2:
print '\t** different bounding boxes **'
# check for any overlap at all...
if (intersection[2] < intersection[0]) or (intersection[1] < intersection[3]):
intersection = None
print '\t*** no overlap ***'
return
else:
print '\tintersection:', intersection
left1 = int(round((intersection[0] - r1[0]) / gt1[1])) # difference divided by pixel dimension
top1 = int(round((intersection[1] - r1[1]) / gt1[5]))
col1 = int(round((intersection[2] - r1[0]) / gt1[1])) - left1 # difference minus offset left
row1 = int(round((intersection[3] - r1[1]) / gt1[5])) - top1
print "left 1: {}, top1: {}, col1: {}, row1: {}".format(left1, top1, col1, row1)
left2 = int(round((intersection[0] - r2[0]) / gt2[1])) # difference divided by pixel dimension
top2 = int(round((intersection[1] - r2[1]) / gt2[5]))
col2 = int(round((intersection[2] - r2[0]) / gt2[1])) - left2 # difference minus new left offset
row2 = int(round((intersection[3] - r2[1]) / gt2[5])) - top2
# print '\tcol1:',col1,'row1:',row1,'col2:',col2,'row2:',row2
if col1 != col2 or row1 != row2:
print "*** MEGA ERROR *** COLS and ROWS DO NOT MATCH ***"
# these arrays should now have the same spatial geometry though NaNs may differ
array1 = band1.ReadAsArray(left1, top1, col1, row1)
array2 = band2.ReadAsArray(left2, top2, col2, row2)
else: # same dimensions from the get go
print "same dimensions from the get go..."
col1 = raster1.RasterXSize # = col2
row1 = raster1.RasterYSize # = row2
array1 = band1.ReadAsArray()
array2 = band2.ReadAsArray()
return array1, array2, col1, row1, intersection
def pull_files(path_to_files):
""""""
# TODO - somehow go back and make all the lists returned here be filled w tuples of current and next interpolation
# TODO - so then, we can iterate through each list in run_interpolation() and feed the function two rasters at a time.
jd_list = []
path_list = []
year_list = []
for p, dir, files in os.walk(path_to_files):
for i in files:
if i.endswith(".img"):
print "i", i
date = i[9:16]
year = date[:-3]
julian_doy = date[4:]
# print "date", date
# print "year", year
# print "Julian DOY", julian_doy
year_list.append(year)
julian_date = calculate_julian_date(year, julian_doy)
jd_list.append(julian_date)
file_path = os.path.join(p, i)
path_list.append(file_path)
# print "jd list", jd_list
# print "file path list", path_list
# diff = jd_list[1] - jd_list[0] # just checking that the difference between days of the year is correct...
# print "difference", diff
# return a list of arrays from gdal, or a list of tuples containing arrays,
raster_obj_list = read_files(path_list)
# print "obj list", raster_obj_list
jd_list = format_list(jd_list)
path_list = format_list(path_list)
raster_obj_list = format_list(raster_obj_list)
return jd_list, path_list, raster_obj_list, year_list
def get_arr(gdal_obj):
""""""
# get the imagine driver and register it
driver = gdal.GetDriverByName('HFA')
driver.Register()
# print "here is the gdal_obj", gdal_obj
band = gdal_obj.GetRasterBand(1)
data = band.ReadAsArray(0, 0)
# print "data", data
return data
def write_file(current_obj, arr, col, row, filename):
""""""
driver = current_obj.GetDriver()
driver.Register()
# TODO - Change for new output folder...
ds = driver.Create("/Volumes/Seagate Backup Plus Drive/all_dates_test/{}.img".format(filename), col, row, 1, gdal.GDT_Float32) #"" /Users/Gabe/Desktop/hard_drive_overflow/rapid_testfile_output/{}.img
ds.SetGeoTransform(current_obj.GetGeoTransform())
ds.SetProjection(current_obj.GetProjection())
ds_band = ds.GetRasterBand(1)
ds_band.WriteArray(arr)
def output_rasters(current_arr, next_arr, slope, start_date, end_date, date_count, current_obj, next_obj, year):
""""""
# get the driver from one of the objects
driver = current_obj.GetDriver()
driver.Register()
# geotransform = current_obj.GetGeoTransform()
col = current_obj.RasterXSize
row = current_obj.RasterYSize
# output the current arr to a file
print "START -> ndvi{}".format(start_date)
# reformat the date stirng
date_string = format_date(start_date, year)
write_file(current_obj, current_arr, col, row, "NDVI{}".format(date_string))
# output all the in-between rasters to files
cnt = 1 # TODO - Check here again if problem
print "here's the range \n", range(start_date + 1, end_date)
for i in range(start_date + 1, end_date): # -1
interp_arr = np.add(current_arr, (slope * cnt))
print "Bout to write ndvi_{}".format(i)
# reformat the date String
date_string = format_date(i, year)
write_file(current_obj, interp_arr, col, row, "NDVI{}".format(date_string))
print "wrote a file. Count: {}".format(cnt)
cnt += 1
# # output the next arr to a file
# todo fix the filename thing...
print "END -> ndvi_{}".format(end_date)
date_string = format_date(end_date, year)
write_file(current_obj, next_arr, col, row, "NDVI{}".format(date_string))
def interpolator(jd_list, path_list, raster_obj_list, year):
"""
:param jd_list:
:param path_list:
:param raster_obj_list:
:return:
"""
start_date = jd_list[0]
end_date = jd_list[-1]
# we need a total count of the number of days between our images.
date_count = end_date - start_date # + 1
print "date count", date_count
# this creates a range of every date between start and end.
date_range = range(start_date, end_date + 1)
# print "date range", date_range
current_obj = raster_obj_list[0]
next_obj = raster_obj_list[1]
current_arr = get_arr(current_obj)
# print "shape current arr", current_arr.shape
next_arr = get_arr(next_obj)
# print "shape next arr", next_arr.shape
diff = np.subtract(next_arr, current_arr)
slope = np.divide(diff, float(date_count))
# print "we got a slope, people!", slope
# Get the paths as well
current_path = path_list[0]
next_path = path_list[1]
output_rasters(current_arr, next_arr, slope, start_date, end_date, date_count, current_obj, next_obj, year)
def run_interpolator():
"""
This function is the master function that orchestrates reading of raster files into arrays, then passes them into an
interpolation function that interpolates the ndvi on a daily basis with a linear interpolation
:return:
"""
path_to_files = "/Users/Gabe/Desktop/hard_drive_overflow/METRIC_ETRM_Jornada_NDVI_P33R37" #METRIC_ETRM_Jornada_NDVI_P33R37"
jd_list, path_list, raster_obj_list, year_list = pull_files(path_to_files)
# # todo - have the array use findRasterIntersect here for each pair of rasters in the raster_obj_list
# # not sure if it's necessary yet to use it so...
# for i in raster_obj_list:
# findRasterIntersect(i[0], i[1])
print 'jd list \n', jd_list
# use the lists to run the interpolation.
for i, k, j, year in zip(jd_list, path_list, raster_obj_list, year_list):
interpolator(i, k, j, year)
if __name__ == "__main__":
run_interpolator()
|
{
"content_hash": "60416fd99357ea6b14f5f20814a26922",
"timestamp": "",
"source": "github",
"line_count": 324,
"max_line_length": 203,
"avg_line_length": 33.17283950617284,
"alnum_prop": 0.6104391514700409,
"repo_name": "NMTHydro/Recharge",
"id": "3bf2666e6ac9cd8f80789ba24ec509c67c6c08f6",
"size": "11551",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "utils/ndvi_linear_interpolation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1871063"
}
],
"symlink_target": ""
}
|
import contextlib
import unittest
import sys
class TestMROEntry(unittest.TestCase):
def test_mro_entry_signature(self):
tested = []
class B: ...
class C:
def __mro_entries__(self, *args, **kwargs):
tested.extend([args, kwargs])
return (C,)
c = C()
self.assertEqual(tested, [])
class D(B, c): ...
self.assertEqual(tested[0], ((B, c),))
self.assertEqual(tested[1], {})
def test_mro_entry(self):
tested = []
class A: ...
class B: ...
class C:
def __mro_entries__(self, bases):
tested.append(bases)
return (self.__class__,)
c = C()
self.assertEqual(tested, [])
class D(A, c, B): ...
self.assertEqual(tested[-1], (A, c, B))
self.assertEqual(D.__bases__, (A, C, B))
self.assertEqual(D.__orig_bases__, (A, c, B))
self.assertEqual(D.__mro__, (D, A, C, B, object))
d = D()
class E(d): ...
self.assertEqual(tested[-1], (d,))
self.assertEqual(E.__bases__, (D,))
def test_mro_entry_none(self):
tested = []
class A: ...
class B: ...
class C:
def __mro_entries__(self, bases):
tested.append(bases)
return ()
c = C()
self.assertEqual(tested, [])
class D(A, c, B): ...
self.assertEqual(tested[-1], (A, c, B))
self.assertEqual(D.__bases__, (A, B))
self.assertEqual(D.__orig_bases__, (A, c, B))
self.assertEqual(D.__mro__, (D, A, B, object))
class E(c): ...
self.assertEqual(tested[-1], (c,))
if sys.version_info[0] > 2:
# not all of it works on Python 2
self.assertEqual(E.__bases__, (object,))
self.assertEqual(E.__orig_bases__, (c,))
if sys.version_info[0] > 2:
# not all of it works on Python 2
self.assertEqual(E.__mro__, (E, object))
def test_mro_entry_with_builtins(self):
tested = []
class A: ...
class C:
def __mro_entries__(self, bases):
tested.append(bases)
return (dict,)
c = C()
self.assertEqual(tested, [])
class D(A, c): ...
self.assertEqual(tested[-1], (A, c))
self.assertEqual(D.__bases__, (A, dict))
self.assertEqual(D.__orig_bases__, (A, c))
self.assertEqual(D.__mro__, (D, A, dict, object))
def test_mro_entry_with_builtins_2(self):
tested = []
class C:
def __mro_entries__(self, bases):
tested.append(bases)
return (C,)
c = C()
self.assertEqual(tested, [])
class D(c, dict): ...
self.assertEqual(tested[-1], (c, dict))
self.assertEqual(D.__bases__, (C, dict))
self.assertEqual(D.__orig_bases__, (c, dict))
self.assertEqual(D.__mro__, (D, C, dict, object))
def test_mro_entry_errors(self):
class C_too_many:
def __mro_entries__(self, bases, something, other):
return ()
c = C_too_many()
with self.assertRaises(TypeError):
class D(c): ...
class C_too_few:
def __mro_entries__(self):
return ()
d = C_too_few()
with self.assertRaises(TypeError):
class D(d): ...
def test_mro_entry_errors_2(self):
class C_not_callable:
__mro_entries__ = "Surprise!"
c = C_not_callable()
with self.assertRaises(TypeError):
class D(c): ...
class C_not_tuple:
def __mro_entries__(self):
return object
c = C_not_tuple()
with self.assertRaises(TypeError):
class D(c): ...
def test_mro_entry_metaclass(self):
meta_args = []
class Meta(type):
def __new__(mcls, name, bases, ns):
meta_args.extend([mcls, name, bases, ns])
return super().__new__(mcls, name, bases, ns)
class A: ...
class C:
def __mro_entries__(self, bases):
return (A,)
c = C()
class D(c, metaclass=Meta):
x = 1
self.assertEqual(meta_args[0], Meta)
self.assertEqual(meta_args[1], 'D')
self.assertEqual(meta_args[2], (A,))
self.assertEqual(meta_args[3]['x'], 1)
self.assertEqual(D.__bases__, (A,))
self.assertEqual(D.__orig_bases__, (c,))
self.assertEqual(D.__mro__, (D, A, object))
self.assertEqual(D.__class__, Meta)
@unittest.skipIf(sys.version_info < (3, 7), "'type' checks for __mro_entries__ not implemented")
def test_mro_entry_type_call(self):
# Substitution should _not_ happen in direct type call
class C:
def __mro_entries__(self, bases):
return ()
c = C()
with self.assertRaisesRegex(TypeError,
"MRO entry resolution; "
"use types.new_class()"):
type('Bad', (c,), {})
class TestClassGetitem(unittest.TestCase):
# BEGIN - Additional tests from cython
def test_no_class_getitem(self):
class C: ...
# PyPy<7.3.8 raises AttributeError on __class_getitem__
if hasattr(sys, "pypy_version_info") and sys.pypy_version_info < (7, 3, 8):
err = AttributeError
else:
err = TypeError
with self.assertRaises(err):
C[int]
# END - Additional tests from cython
def test_class_getitem(self):
getitem_args = []
class C:
def __class_getitem__(*args, **kwargs):
getitem_args.extend([args, kwargs])
return None
C[int, str]
self.assertEqual(getitem_args[0], (C, (int, str)))
self.assertEqual(getitem_args[1], {})
def test_class_getitem_format(self):
class C:
def __class_getitem__(cls, item):
return f'C[{item.__name__}]'
self.assertEqual(C[int], 'C[int]')
self.assertEqual(C[C], 'C[C]')
def test_class_getitem_inheritance(self):
class C:
def __class_getitem__(cls, item):
return f'{cls.__name__}[{item.__name__}]'
class D(C): ...
self.assertEqual(D[int], 'D[int]')
self.assertEqual(D[D], 'D[D]')
def test_class_getitem_inheritance_2(self):
class C:
def __class_getitem__(cls, item):
return 'Should not see this'
class D(C):
def __class_getitem__(cls, item):
return f'{cls.__name__}[{item.__name__}]'
self.assertEqual(D[int], 'D[int]')
self.assertEqual(D[D], 'D[D]')
def test_class_getitem_classmethod(self):
class C:
@classmethod
def __class_getitem__(cls, item):
return f'{cls.__name__}[{item.__name__}]'
class D(C): ...
self.assertEqual(D[int], 'D[int]')
self.assertEqual(D[D], 'D[D]')
@unittest.skipIf(sys.version_info < (3, 6), "__init_subclass__() requires Py3.6+ (PEP 487)")
def test_class_getitem_patched(self):
class C:
def __init_subclass__(cls):
def __class_getitem__(cls, item):
return f'{cls.__name__}[{item.__name__}]'
cls.__class_getitem__ = classmethod(__class_getitem__)
class D(C): ...
self.assertEqual(D[int], 'D[int]')
self.assertEqual(D[D], 'D[D]')
def test_class_getitem_with_builtins(self):
class A(dict):
called_with = None
def __class_getitem__(cls, item):
cls.called_with = item
class B(A):
pass
self.assertIs(B.called_with, None)
B[int]
self.assertIs(B.called_with, int)
def test_class_getitem_errors(self):
class C_too_few:
def __class_getitem__(cls):
return None
with self.assertRaises(TypeError):
C_too_few[int]
class C_too_many:
def __class_getitem__(cls, one, two):
return None
with self.assertRaises(TypeError):
C_too_many[int]
def test_class_getitem_errors_2(self):
class C:
def __class_getitem__(cls, item):
return None
with self.assertRaises(TypeError):
C()[int]
class E: ...
e = E()
e.__class_getitem__ = lambda cls, item: 'This will not work'
with self.assertRaises(TypeError):
e[int]
class C_not_callable:
__class_getitem__ = "Surprise!"
with self.assertRaises(TypeError):
C_not_callable[int]
def test_class_getitem_metaclass(self):
class Meta(type):
def __class_getitem__(cls, item):
return f'{cls.__name__}[{item.__name__}]'
self.assertEqual(Meta[int], 'Meta[int]')
def test_class_getitem_with_metaclass(self):
class Meta(type): pass
class C(metaclass=Meta):
def __class_getitem__(cls, item):
return f'{cls.__name__}[{item.__name__}]'
self.assertEqual(C[int], 'C[int]')
def test_class_getitem_metaclass_first(self):
class Meta(type):
def __getitem__(cls, item):
return 'from metaclass'
class C(metaclass=Meta):
def __class_getitem__(cls, item):
return 'from __class_getitem__'
self.assertEqual(C[int], 'from metaclass')
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "48075f13466e90a027e1626f49d1212f",
"timestamp": "",
"source": "github",
"line_count": 288,
"max_line_length": 100,
"avg_line_length": 33.77777777777778,
"alnum_prop": 0.4968133223684211,
"repo_name": "da-woods/cython",
"id": "1afc9d4617c6879890cd9ba08db373e420f8548c",
"size": "9810",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/run/test_genericclass.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1429"
},
{
"name": "C",
"bytes": "784502"
},
{
"name": "C++",
"bytes": "31117"
},
{
"name": "Cython",
"bytes": "3339772"
},
{
"name": "Emacs Lisp",
"bytes": "12379"
},
{
"name": "Makefile",
"bytes": "3184"
},
{
"name": "PowerShell",
"bytes": "4022"
},
{
"name": "Python",
"bytes": "3905495"
},
{
"name": "Shell",
"bytes": "6235"
},
{
"name": "Smalltalk",
"bytes": "618"
},
{
"name": "Starlark",
"bytes": "3341"
},
{
"name": "sed",
"bytes": "807"
}
],
"symlink_target": ""
}
|
from Kamaelia.Internet.TCPClient import TCPClient
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.File.Writing import SimpleFileWriter
outputfile = "/tmp/received.raw"
clientServerTestPort=1500
Pipeline(TCPClient("127.0.0.1",clientServerTestPort),
SimpleFileWriter(outputfile)
).run()
|
{
"content_hash": "b65fbdd6a97aa290b90b433e6f38397e",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 53,
"avg_line_length": 28.90909090909091,
"alnum_prop": 0.7861635220125787,
"repo_name": "bbc/kamaelia",
"id": "a664cde68ee5309b5ed64e351d80ebfaed8622ca",
"size": "1487",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "Sketches/MPS/BugReports/FixTests/Kamaelia/Examples/UsingChassis/WhatIsTheCarouselFor/ClientStreamToFile.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "62985"
},
{
"name": "C",
"bytes": "212854"
},
{
"name": "C++",
"bytes": "327546"
},
{
"name": "CSS",
"bytes": "114434"
},
{
"name": "ChucK",
"bytes": "422"
},
{
"name": "Diff",
"bytes": "483"
},
{
"name": "Gettext Catalog",
"bytes": "3919909"
},
{
"name": "HTML",
"bytes": "1288960"
},
{
"name": "Java",
"bytes": "31832"
},
{
"name": "JavaScript",
"bytes": "829491"
},
{
"name": "Makefile",
"bytes": "5768"
},
{
"name": "NSIS",
"bytes": "18867"
},
{
"name": "PHP",
"bytes": "49059"
},
{
"name": "Perl",
"bytes": "31234"
},
{
"name": "Processing",
"bytes": "2885"
},
{
"name": "Pure Data",
"bytes": "7485482"
},
{
"name": "Python",
"bytes": "18896320"
},
{
"name": "Ruby",
"bytes": "4165"
},
{
"name": "Shell",
"bytes": "711244"
}
],
"symlink_target": ""
}
|
"""Create initial tables
Revision ID: 21b664b587ce
Revises:
Create Date: 2016-04-13 11:14:23.063513
"""
# revision identifiers, used by Alembic.
revision = '21b664b587ce'
down_revision = None
branch_labels = None
depends_on = None
from alembic import op
from sqlalchemy import Column, String, Integer, ForeignKey
from sqlalchemy.dialects.postgresql import TIMESTAMP, JSONB
def upgrade():
# First migration. Set up all tables.
# squads
op.create_table(
'squads',
Column('id', type_=Integer, primary_key=True, autoincrement=True),
Column('name', type_=String(50), unique=True)
)
# features
op.create_table(
'features',
Column('name', String(50), unique=True, primary_key=True),
Column('prefix', String(10)),
Column('squad_id', Integer, ForeignKey('squads.id')),
Column('created_on', TIMESTAMP),
Column('deleted_on', TIMESTAMP)
)
# environments
envs_table = op.create_table(
'environments',
Column('name', type_=String(40), primary_key=True, unique=True),
Column('squad_id', Integer, ForeignKey('squads.id'),
nullable=True),
)
# toggles
op.create_table(
'toggles',
Column('feature', String, ForeignKey('features.name')),
Column('env', String, ForeignKey('environments.name')),
Column('state', String(5)),
)
op.create_index('on_togs', 'toggles', ['feature', 'env'], unique=True)
# employees
op.create_table(
'employees',
Column('username', type_=String(25), unique=True, primary_key=True),
Column('name', String),
Column('squad_id', Integer, ForeignKey('squads.id')),
Column('email', type_=String),
Column('role_id', Integer)
)
# auditing
op.create_table(
'auditing',
Column('id', Integer, autoincrement=True, primary_key=True),
Column('event', String(length=50), nullable=False, index=True),
Column('user', String(), nullable=True),
Column('date', TIMESTAMP),
Column('event_data', JSONB, nullable=True)
)
# Seed
op.bulk_insert(
envs_table,
[
{'name': 'Production'}
]
)
def downgrade():
op.drop_table('employees')
op.drop_table('toggles')
op.drop_table('features')
op.drop_table('environments')
op.drop_table('squads')
op.drop_table('auditing')
|
{
"content_hash": "256da5dbe4cd9fbf3e4989b97f1dce1a",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 76,
"avg_line_length": 26.333333333333332,
"alnum_prop": 0.6022866476112699,
"repo_name": "CanopyTax/toggle-meister",
"id": "07c6bc4b55f3c10177b6975f593540f6d2620892",
"size": "2449",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrations/versions/21b664b587ce_create_initial_tables.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1981"
},
{
"name": "Dockerfile",
"bytes": "836"
},
{
"name": "HTML",
"bytes": "680"
},
{
"name": "JavaScript",
"bytes": "80026"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "85035"
},
{
"name": "Shell",
"bytes": "358"
}
],
"symlink_target": ""
}
|
"""finalize_c2c_23_upgrade
Revision ID: 1cb8168b89d8
Revises: 84b558deac2
Create Date: 2018-10-18 09:48:12.655466
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = '1cb8168b89d8'
down_revision = '84b558deac2'
branch_labels = None
depends_on = None
def upgrade():
op.execute("""
UPDATE geov3.treeitem SET name = 'background' WHERE name = 'bglayers';
UPDATE geov3.interface SET name='main' WHERE name = 'desktop';
ALTER TABLE geov3.lux_print_job ALTER COLUMN id TYPE VARCHAR(120);
""")
# Auth is now an enum.
op.execute(
"UPDATE geov3.ogc_server SET auth = 'No auth' WHERE auth = 'none';"
)
# Internal WMS layers need to have a no-url ogc server to go through the lux proxy.
op.execute("""
INSERT
INTO geov3.ogc_server (name, description, url, type, image_type, auth)
VALUES ('Internal WMS', 'Use Luxembourg proxy', '', 'mapserver', 'image/png', 'No auth');
UPDATE geov3.layer_wms
SET ogc_server_id = (SELECT id FROM geov3.ogc_server WHERE url = '' limit 1)
WHERE id IN (SELECT id FROM geov3.lux_layer_internal_wms);
""")
def downgrade():
# We are not planning to come back from 2.3 et 1.6.
# Please do some backups and restore these if needed.
pass
|
{
"content_hash": "e69d135af9507b90e9dc8b74f4d8a3b1",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 97,
"avg_line_length": 29.795454545454547,
"alnum_prop": 0.6536994660564455,
"repo_name": "Geoportail-Luxembourg/geoportailv3",
"id": "e1cf826143eb077a445525706f23190d313a07b1",
"size": "2899",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geoportal/LUX_alembic/versions/1cb8168b89d8_finalize_c2c_23_upgrade.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "432229"
},
{
"name": "Dockerfile",
"bytes": "16989"
},
{
"name": "EJS",
"bytes": "158409"
},
{
"name": "HTML",
"bytes": "441209"
},
{
"name": "JavaScript",
"bytes": "3500634"
},
{
"name": "Less",
"bytes": "165289"
},
{
"name": "Makefile",
"bytes": "26467"
},
{
"name": "Mako",
"bytes": "696"
},
{
"name": "PLpgSQL",
"bytes": "1588593"
},
{
"name": "Python",
"bytes": "619684"
},
{
"name": "SCSS",
"bytes": "1878"
},
{
"name": "Shell",
"bytes": "11608"
},
{
"name": "TypeScript",
"bytes": "7440"
}
],
"symlink_target": ""
}
|
"""Test for certbot_apache._internal.configurator."""
import copy
import shutil
import socket
import tempfile
import unittest
try:
import mock
except ImportError: # pragma: no cover
from unittest import mock # type: ignore
from acme import challenges
from certbot import achallenges
from certbot import crypto_util
from certbot import errors
from certbot.compat import filesystem
from certbot.compat import os
from certbot.tests import acme_util
from certbot.tests import util as certbot_util
from certbot_apache._internal import apache_util
from certbot_apache._internal import constants
from certbot_apache._internal import obj
from certbot_apache._internal import parser
import util
class MultipleVhostsTest(util.ApacheTest):
"""Test two standard well-configured HTTP vhosts."""
def setUp(self): # pylint: disable=arguments-differ
super().setUp()
self.config = util.get_apache_configurator(
self.config_path, self.vhost_path, self.config_dir, self.work_dir)
self.config = self.mock_deploy_cert(self.config)
self.vh_truth = util.get_vh_truth(
self.temp_dir, "debian_apache_2_4/multiple_vhosts")
def mock_deploy_cert(self, config):
"""A test for a mock deploy cert"""
config.real_deploy_cert = self.config.deploy_cert
def mocked_deploy_cert(*args, **kwargs):
"""a helper to mock a deployed cert"""
g_mod = "certbot_apache._internal.configurator.ApacheConfigurator.enable_mod"
with mock.patch(g_mod):
config.real_deploy_cert(*args, **kwargs)
self.config.deploy_cert = mocked_deploy_cert
return self.config
@mock.patch("certbot_apache._internal.configurator.path_surgery")
def test_prepare_no_install(self, mock_surgery):
silly_path = {"PATH": "/tmp/nothingness2342"}
mock_surgery.return_value = False
with mock.patch.dict('os.environ', silly_path):
self.assertRaises(errors.NoInstallationError, self.config.prepare)
self.assertEqual(mock_surgery.call_count, 1)
@mock.patch("certbot_apache._internal.parser.ApacheParser")
@mock.patch("certbot_apache._internal.configurator.util.exe_exists")
def test_prepare_version(self, mock_exe_exists, _):
mock_exe_exists.return_value = True
self.config.version = None
self.config.config_test = mock.Mock()
self.config.get_version = mock.Mock(return_value=(1, 1))
self.assertRaises(
errors.NotSupportedError, self.config.prepare)
def test_prepare_locked(self):
server_root = self.config.conf("server-root")
self.config.config_test = mock.Mock()
os.remove(os.path.join(server_root, ".certbot.lock"))
certbot_util.lock_and_call(self._test_prepare_locked, server_root)
@mock.patch("certbot_apache._internal.parser.ApacheParser")
@mock.patch("certbot_apache._internal.configurator.util.exe_exists")
@mock.patch("certbot_apache._internal.configurator.ApacheConfigurator.get_parsernode_root")
def _test_prepare_locked(self, _node, _exists, _parser):
try:
self.config.prepare()
except errors.PluginError as err:
err_msg = str(err)
self.assertTrue("lock" in err_msg)
self.assertTrue(self.config.conf("server-root") in err_msg)
else: # pragma: no cover
self.fail("Exception wasn't raised!")
def test_add_parser_arguments(self): # pylint: disable=no-self-use
from certbot_apache._internal.configurator import ApacheConfigurator
# Weak test..
ApacheConfigurator.add_parser_arguments(mock.MagicMock())
def test_docs_parser_arguments(self):
os.environ["CERTBOT_DOCS"] = "1"
from certbot_apache._internal.configurator import ApacheConfigurator
mock_add = mock.MagicMock()
ApacheConfigurator.add_parser_arguments(mock_add)
parserargs = ["server_root", "enmod", "dismod", "le_vhost_ext",
"vhost_root", "logs_root", "challenge_location",
"handle_modules", "handle_sites", "ctl"]
exp = {}
for k in ApacheConfigurator.OS_DEFAULTS.__dict__.keys():
if k in parserargs:
exp[k.replace("_", "-")] = getattr(ApacheConfigurator.OS_DEFAULTS, k)
# Special cases
exp["vhost-root"] = None
found = set()
for call in mock_add.call_args_list:
found.add(call[0][0])
# Make sure that all (and only) the expected values exist
self.assertEqual(len(mock_add.call_args_list), len(found))
for e in exp:
self.assertTrue(e in found)
del os.environ["CERTBOT_DOCS"]
def test_add_parser_arguments_all_configurators(self): # pylint: disable=no-self-use
from certbot_apache._internal.entrypoint import OVERRIDE_CLASSES
for cls in OVERRIDE_CLASSES.values():
cls.add_parser_arguments(mock.MagicMock())
def test_all_configurators_defaults_defined(self):
from certbot_apache._internal.entrypoint import OVERRIDE_CLASSES
from certbot_apache._internal.configurator import ApacheConfigurator
parameters = set(ApacheConfigurator.OS_DEFAULTS.__dict__.keys())
for cls in OVERRIDE_CLASSES.values():
self.assertTrue(parameters.issubset(set(cls.OS_DEFAULTS.__dict__.keys())))
def test_constant(self):
self.assertTrue("debian_apache_2_4/multiple_vhosts/apache" in
self.config.options.server_root)
@certbot_util.patch_display_util()
def test_get_all_names(self, mock_getutility):
mock_utility = mock_getutility()
mock_utility.notification = mock.MagicMock(return_value=True)
names = self.config.get_all_names()
self.assertEqual(names, {"certbot.demo", "ocspvhost.com", "encryption-example.demo",
"nonsym.link", "vhost.in.rootconf", "www.certbot.demo",
"duplicate.example.com"})
@certbot_util.patch_display_util()
@mock.patch("certbot_apache._internal.configurator.socket.gethostbyaddr")
def test_get_all_names_addrs(self, mock_gethost, mock_getutility):
mock_gethost.side_effect = [("google.com", "", ""), socket.error]
mock_utility = mock_getutility()
mock_utility.notification.return_value = True
vhost = obj.VirtualHost(
"fp", "ap",
{obj.Addr(("8.8.8.8", "443")),
obj.Addr(("zombo.com",)),
obj.Addr(("192.168.1.2"))},
True, False)
self.config.vhosts.append(vhost)
names = self.config.get_all_names()
self.assertEqual(len(names), 9)
self.assertTrue("zombo.com" in names)
self.assertTrue("google.com" in names)
self.assertTrue("certbot.demo" in names)
def test_get_bad_path(self):
self.assertEqual(apache_util.get_file_path(None), None)
self.assertEqual(apache_util.get_file_path("nonexistent"), None)
self.assertEqual(self.config._create_vhost("nonexistent"), None) # pylint: disable=protected-access
def test_get_aug_internal_path(self):
from certbot_apache._internal.apache_util import get_internal_aug_path
internal_paths = [
"Virtualhost", "IfModule/VirtualHost", "VirtualHost", "VirtualHost",
"Macro/VirtualHost", "IfModule/VirtualHost", "VirtualHost",
"IfModule/VirtualHost"]
for i, internal_path in enumerate(internal_paths):
self.assertEqual(
get_internal_aug_path(self.vh_truth[i].path), internal_path)
def test_bad_servername_alias(self):
ssl_vh1 = obj.VirtualHost(
"fp1", "ap1", {obj.Addr(("*", "443"))},
True, False)
# pylint: disable=protected-access
self.config._add_servernames(ssl_vh1)
self.assertTrue(
self.config._add_servername_alias("oy_vey", ssl_vh1) is None)
def test_add_servernames_alias(self):
self.config.parser.add_dir(
self.vh_truth[2].path, "ServerAlias", ["*.le.co"])
# pylint: disable=protected-access
self.config._add_servernames(self.vh_truth[2])
self.assertEqual(
self.vh_truth[2].get_names(), {"*.le.co", "ip-172-30-0-17"})
def test_get_virtual_hosts(self):
"""Make sure all vhosts are being properly found."""
vhs = self.config.get_virtual_hosts()
self.assertEqual(len(vhs), 12)
found = 0
for vhost in vhs:
for truth in self.vh_truth:
if vhost == truth:
found += 1
break
else:
raise Exception("Missed: %s" % vhost) # pragma: no cover
self.assertEqual(found, 12)
# Handle case of non-debian layout get_virtual_hosts
with mock.patch(
"certbot_apache._internal.configurator.ApacheConfigurator.conf"
) as mock_conf:
mock_conf.return_value = False
vhs = self.config.get_virtual_hosts()
self.assertEqual(len(vhs), 12)
@mock.patch("certbot_apache._internal.display_ops.select_vhost")
def test_choose_vhost_none_avail(self, mock_select):
mock_select.return_value = None
self.assertRaises(
errors.PluginError, self.config.choose_vhost, "none.com")
@mock.patch("certbot_apache._internal.display_ops.select_vhost")
def test_choose_vhost_select_vhost_ssl(self, mock_select):
mock_select.return_value = self.vh_truth[1]
self.assertEqual(
self.vh_truth[1], self.config.choose_vhost("none.com"))
@mock.patch("certbot_apache._internal.display_ops.select_vhost")
@mock.patch("certbot_apache._internal.obj.VirtualHost.conflicts")
def test_choose_vhost_select_vhost_non_ssl(self, mock_conf, mock_select):
mock_select.return_value = self.vh_truth[0]
mock_conf.return_value = False
chosen_vhost = self.config.choose_vhost("none.com")
self.vh_truth[0].aliases.add("none.com")
self.assertEqual(
self.vh_truth[0].get_names(), chosen_vhost.get_names())
# Make sure we go from HTTP -> HTTPS
self.assertFalse(self.vh_truth[0].ssl)
self.assertTrue(chosen_vhost.ssl)
@mock.patch("certbot_apache._internal.configurator.ApacheConfigurator._find_best_vhost")
@mock.patch("certbot_apache._internal.parser.ApacheParser.add_dir")
def test_choose_vhost_and_servername_addition(self, mock_add, mock_find):
ret_vh = self.vh_truth[8]
ret_vh.enabled = False
mock_find.return_value = self.vh_truth[8]
self.config.choose_vhost("whatever.com")
self.assertTrue(mock_add.called)
@mock.patch("certbot_apache._internal.display_ops.select_vhost")
def test_choose_vhost_select_vhost_with_temp(self, mock_select):
mock_select.return_value = self.vh_truth[0]
chosen_vhost = self.config.choose_vhost("none.com", create_if_no_ssl=False)
self.assertEqual(self.vh_truth[0], chosen_vhost)
@mock.patch("certbot_apache._internal.display_ops.select_vhost")
def test_choose_vhost_select_vhost_conflicting_non_ssl(self, mock_select):
mock_select.return_value = self.vh_truth[3]
conflicting_vhost = obj.VirtualHost(
"path", "aug_path", {obj.Addr.fromstring("*:443")},
True, True)
self.config.vhosts.append(conflicting_vhost)
self.assertRaises(
errors.PluginError, self.config.choose_vhost, "none.com")
def test_find_best_http_vhost_default(self):
vh = obj.VirtualHost(
"fp", "ap", {obj.Addr.fromstring("_default_:80")}, False, True)
self.config.vhosts = [vh]
self.assertEqual(self.config.find_best_http_vhost("foo.bar", False), vh)
def test_find_best_http_vhost_port(self):
port = "8080"
vh = obj.VirtualHost(
"fp", "ap", {obj.Addr.fromstring("*:" + port)},
False, True, "encryption-example.demo")
self.config.vhosts.append(vh)
self.assertEqual(self.config.find_best_http_vhost("foo.bar", False, port), vh)
def test_findbest_continues_on_short_domain(self):
# pylint: disable=protected-access
chosen_vhost = self.config._find_best_vhost("purple.com")
self.assertEqual(None, chosen_vhost)
def test_findbest_continues_on_long_domain(self):
# pylint: disable=protected-access
chosen_vhost = self.config._find_best_vhost("green.red.purple.com")
self.assertEqual(None, chosen_vhost)
def test_find_best_vhost(self):
# pylint: disable=protected-access
self.assertEqual(
self.vh_truth[3], self.config._find_best_vhost("certbot.demo"))
self.assertEqual(
self.vh_truth[0],
self.config._find_best_vhost("encryption-example.demo"))
self.assertEqual(
self.config._find_best_vhost("does-not-exist.com"), None)
def test_find_best_vhost_variety(self):
# pylint: disable=protected-access
ssl_vh = obj.VirtualHost(
"fp", "ap", {obj.Addr(("*", "443")),
obj.Addr(("zombo.com",))},
True, False)
self.config.vhosts.append(ssl_vh)
self.assertEqual(self.config._find_best_vhost("zombo.com"), ssl_vh)
def test_find_best_vhost_default(self):
# pylint: disable=protected-access
# Assume only the two default vhosts.
self.config.vhosts = [
vh for vh in self.config.vhosts
if vh.name not in ["certbot.demo", "nonsym.link",
"encryption-example.demo", "duplicate.example.com",
"ocspvhost.com", "vhost.in.rootconf"]
and "*.blue.purple.com" not in vh.aliases
]
self.assertEqual(
self.config._find_best_vhost("encryption-example.demo"),
self.vh_truth[2])
def test_non_default_vhosts(self):
# pylint: disable=protected-access
vhosts = self.config._non_default_vhosts(self.config.vhosts)
self.assertEqual(len(vhosts), 10)
@mock.patch('certbot_apache._internal.configurator.display_util.notify')
def test_deploy_cert_enable_new_vhost(self, unused_mock_notify):
# Create
ssl_vhost = self.config.make_vhost_ssl(self.vh_truth[0])
self.config.parser.modules["ssl_module"] = None
self.config.parser.modules["mod_ssl.c"] = None
self.config.parser.modules["socache_shmcb_module"] = None
self.assertFalse(ssl_vhost.enabled)
self.config.deploy_cert(
"encryption-example.demo", "example/cert.pem", "example/key.pem",
"example/cert_chain.pem", "example/fullchain.pem")
self.assertTrue(ssl_vhost.enabled)
def test_no_duplicate_include(self):
def mock_find_dir(directive, argument, _):
"""Mock method for parser.find_dir"""
if directive == "Include" and argument.endswith("options-ssl-apache.conf"):
return ["/path/to/whatever"]
return None # pragma: no cover
mock_add = mock.MagicMock()
self.config.parser.add_dir = mock_add
self.config._add_dummy_ssl_directives(self.vh_truth[0]) # pylint: disable=protected-access
tried_to_add = False
for a in mock_add.call_args_list:
if a[0][1] == "Include" and a[0][2] == self.config.mod_ssl_conf:
tried_to_add = True
# Include should be added, find_dir is not patched, and returns falsy
self.assertTrue(tried_to_add)
self.config.parser.find_dir = mock_find_dir
mock_add.reset_mock()
self.config._add_dummy_ssl_directives(self.vh_truth[0]) # pylint: disable=protected-access
for a in mock_add.call_args_list:
if a[0][1] == "Include" and a[0][2] == self.config.mod_ssl_conf:
self.fail("Include shouldn't be added, as patched find_dir 'finds' existing one") \
# pragma: no cover
@mock.patch('certbot_apache._internal.configurator.display_util.notify')
def test_deploy_cert(self, unused_mock_notify):
self.config.parser.modules["ssl_module"] = None
self.config.parser.modules["mod_ssl.c"] = None
self.config.parser.modules["socache_shmcb_module"] = None
# Patch _add_dummy_ssl_directives to make sure we write them correctly
# pylint: disable=protected-access
orig_add_dummy = self.config._add_dummy_ssl_directives
def mock_add_dummy_ssl(vhostpath):
"""Mock method for _add_dummy_ssl_directives"""
def find_args(path, directive):
"""Return list of arguments in requested directive at path"""
f_args = []
dirs = self.config.parser.find_dir(directive, None,
path)
for d in dirs:
f_args.append(self.config.parser.get_arg(d))
return f_args
# Verify that the dummy directives do not exist
self.assertFalse(
"insert_cert_file_path" in find_args(vhostpath,
"SSLCertificateFile"))
self.assertFalse(
"insert_key_file_path" in find_args(vhostpath,
"SSLCertificateKeyFile"))
orig_add_dummy(vhostpath)
# Verify that the dummy directives exist
self.assertTrue(
"insert_cert_file_path" in find_args(vhostpath,
"SSLCertificateFile"))
self.assertTrue(
"insert_key_file_path" in find_args(vhostpath,
"SSLCertificateKeyFile"))
# pylint: disable=protected-access
self.config._add_dummy_ssl_directives = mock_add_dummy_ssl
# Get the default 443 vhost
self.config.assoc["random.demo"] = self.vh_truth[1]
self.config.deploy_cert(
"random.demo",
"example/cert.pem", "example/key.pem", "example/cert_chain.pem")
self.config.save()
# Verify ssl_module was enabled.
self.assertTrue(self.vh_truth[1].enabled)
self.assertTrue("ssl_module" in self.config.parser.modules)
loc_cert = self.config.parser.find_dir(
"sslcertificatefile", "example/cert.pem", self.vh_truth[1].path)
loc_key = self.config.parser.find_dir(
"sslcertificateKeyfile", "example/key.pem", self.vh_truth[1].path)
loc_chain = self.config.parser.find_dir(
"SSLCertificateChainFile", "example/cert_chain.pem",
self.vh_truth[1].path)
# Verify one directive was found in the correct file
self.assertEqual(len(loc_cert), 1)
self.assertEqual(
apache_util.get_file_path(loc_cert[0]),
self.vh_truth[1].filep)
self.assertEqual(len(loc_key), 1)
self.assertEqual(
apache_util.get_file_path(loc_key[0]),
self.vh_truth[1].filep)
self.assertEqual(len(loc_chain), 1)
self.assertEqual(
apache_util.get_file_path(loc_chain[0]),
self.vh_truth[1].filep)
# One more time for chain directive setting
self.config.deploy_cert(
"random.demo",
"two/cert.pem", "two/key.pem", "two/cert_chain.pem")
self.assertTrue(self.config.parser.find_dir(
"SSLCertificateChainFile", "two/cert_chain.pem",
self.vh_truth[1].path))
def test_is_name_vhost(self):
addr = obj.Addr.fromstring("*:80")
self.assertTrue(self.config.is_name_vhost(addr))
self.config.version = (2, 2)
self.assertFalse(self.config.is_name_vhost(addr))
def test_add_name_vhost(self):
self.config.add_name_vhost(obj.Addr.fromstring("*:443"))
self.config.add_name_vhost(obj.Addr.fromstring("*:80"))
self.assertTrue(self.config.parser.find_dir(
"NameVirtualHost", "*:443", exclude=False))
self.assertTrue(self.config.parser.find_dir(
"NameVirtualHost", "*:80"))
def test_add_listen_80(self):
mock_find = mock.Mock()
mock_add_dir = mock.Mock()
mock_find.return_value = []
self.config.parser.find_dir = mock_find
self.config.parser.add_dir = mock_add_dir
self.config.ensure_listen("80")
self.assertTrue(mock_add_dir.called)
self.assertTrue(mock_find.called)
self.assertEqual(mock_add_dir.call_args[0][1], "Listen")
self.assertEqual(mock_add_dir.call_args[0][2], "80")
def test_add_listen_80_named(self):
mock_find = mock.Mock()
mock_find.return_value = ["test1", "test2", "test3"]
mock_get = mock.Mock()
mock_get.side_effect = ["1.2.3.4:80", "[::1]:80", "1.1.1.1:443"]
mock_add_dir = mock.Mock()
self.config.parser.find_dir = mock_find
self.config.parser.get_arg = mock_get
self.config.parser.add_dir = mock_add_dir
self.config.ensure_listen("80")
self.assertEqual(mock_add_dir.call_count, 0)
# Reset return lists and inputs
mock_add_dir.reset_mock()
mock_get.side_effect = ["1.2.3.4:80", "[::1]:80", "1.1.1.1:443"]
# Test
self.config.ensure_listen("8080")
self.assertEqual(mock_add_dir.call_count, 3)
self.assertTrue(mock_add_dir.called)
self.assertEqual(mock_add_dir.call_args[0][1], "Listen")
call_found = False
for mock_call in mock_add_dir.mock_calls:
if mock_call[1][2] == ['1.2.3.4:8080']:
call_found = True
self.assertTrue(call_found)
@mock.patch("certbot_apache._internal.parser.ApacheParser.reset_modules")
def test_prepare_server_https(self, mock_reset):
mock_enable = mock.Mock()
self.config.enable_mod = mock_enable
mock_find = mock.Mock()
mock_add_dir = mock.Mock()
mock_find.return_value = []
# This will test the Add listen
self.config.parser.find_dir = mock_find
self.config.parser.add_dir_to_ifmodssl = mock_add_dir
self.config.prepare_server_https("443")
# Changing the order these modules are enabled breaks the reverter
self.assertEqual(mock_enable.call_args_list[0][0][0], "socache_shmcb")
self.assertEqual(mock_enable.call_args[0][0], "ssl")
self.assertEqual(mock_enable.call_args[1], {"temp": False})
self.config.prepare_server_https("8080", temp=True)
# Changing the order these modules are enabled breaks the reverter
self.assertEqual(mock_enable.call_args_list[2][0][0], "socache_shmcb")
self.assertEqual(mock_enable.call_args[0][0], "ssl")
# Enable mod is temporary
self.assertEqual(mock_enable.call_args[1], {"temp": True})
self.assertEqual(mock_add_dir.call_count, 2)
@mock.patch("certbot_apache._internal.parser.ApacheParser.reset_modules")
def test_prepare_server_https_named_listen(self, mock_reset):
mock_find = mock.Mock()
mock_find.return_value = ["test1", "test2", "test3"]
mock_get = mock.Mock()
mock_get.side_effect = ["1.2.3.4:80", "[::1]:80", "1.1.1.1:443"]
mock_add_dir = mock.Mock()
mock_enable = mock.Mock()
self.config.parser.find_dir = mock_find
self.config.parser.get_arg = mock_get
self.config.parser.add_dir_to_ifmodssl = mock_add_dir
self.config.enable_mod = mock_enable
# Test Listen statements with specific ip listeed
self.config.prepare_server_https("443")
# Should be 0 as one interface already listens to 443
self.assertEqual(mock_add_dir.call_count, 0)
# Reset return lists and inputs
mock_add_dir.reset_mock()
mock_get.side_effect = ["1.2.3.4:80", "[::1]:80", "1.1.1.1:443"]
# Test
self.config.prepare_server_https("8080", temp=True)
self.assertEqual(mock_add_dir.call_count, 3)
call_args_list = [mock_add_dir.call_args_list[i][0][2] for i in range(3)]
self.assertEqual(
sorted(call_args_list),
sorted([["1.2.3.4:8080", "https"],
["[::1]:8080", "https"],
["1.1.1.1:8080", "https"]]))
# mock_get.side_effect = ["1.2.3.4:80", "[::1]:80"]
# mock_find.return_value = ["test1", "test2", "test3"]
# self.config.parser.get_arg = mock_get
# self.config.prepare_server_https("8080", temp=True)
# self.assertEqual(self.listens, 0)
@mock.patch("certbot_apache._internal.parser.ApacheParser.reset_modules")
def test_prepare_server_https_needed_listen(self, mock_reset):
mock_find = mock.Mock()
mock_find.return_value = ["test1", "test2"]
mock_get = mock.Mock()
mock_get.side_effect = ["1.2.3.4:8080", "80"]
mock_add_dir = mock.Mock()
mock_enable = mock.Mock()
self.config.parser.find_dir = mock_find
self.config.parser.get_arg = mock_get
self.config.parser.add_dir_to_ifmodssl = mock_add_dir
self.config.enable_mod = mock_enable
self.config.prepare_server_https("443")
self.assertEqual(mock_add_dir.call_count, 1)
@mock.patch("certbot_apache._internal.parser.ApacheParser.reset_modules")
def test_prepare_server_https_mixed_listen(self, mock_reset):
mock_find = mock.Mock()
mock_find.return_value = ["test1", "test2"]
mock_get = mock.Mock()
mock_get.side_effect = ["1.2.3.4:8080", "443"]
mock_add_dir = mock.Mock()
mock_enable = mock.Mock()
self.config.parser.find_dir = mock_find
self.config.parser.get_arg = mock_get
self.config.parser.add_dir_to_ifmodssl = mock_add_dir
self.config.enable_mod = mock_enable
# Test Listen statements with specific ip listeed
self.config.prepare_server_https("443")
# Should only be 2 here, as the third interface
# already listens to the correct port
self.assertEqual(mock_add_dir.call_count, 0)
def test_make_vhost_ssl_with_mock_span(self):
# span excludes the closing </VirtualHost> tag in older versions
# of Augeas
return_value = [self.vh_truth[0].filep, 1, 12, 0, 0, 0, 1142]
with mock.patch.object(self.config.parser.aug, 'span') as mock_span:
mock_span.return_value = return_value
self.test_make_vhost_ssl()
def test_make_vhost_ssl_with_mock_span2(self):
# span includes the closing </VirtualHost> tag in newer versions
# of Augeas
return_value = [self.vh_truth[0].filep, 1, 12, 0, 0, 0, 1157]
with mock.patch.object(self.config.parser.aug, 'span') as mock_span:
mock_span.return_value = return_value
self.test_make_vhost_ssl()
def test_make_vhost_ssl_nonsymlink(self):
ssl_vhost_slink = self.config.make_vhost_ssl(self.vh_truth[8])
self.assertTrue(ssl_vhost_slink.ssl)
self.assertTrue(ssl_vhost_slink.enabled)
self.assertEqual(ssl_vhost_slink.name, "nonsym.link")
def test_make_vhost_ssl_nonexistent_vhost_path(self):
ssl_vhost = self.config.make_vhost_ssl(self.vh_truth[1])
self.assertEqual(os.path.dirname(ssl_vhost.filep),
os.path.dirname(filesystem.realpath(self.vh_truth[1].filep)))
def test_make_vhost_ssl(self):
ssl_vhost = self.config.make_vhost_ssl(self.vh_truth[0])
self.assertEqual(
ssl_vhost.filep,
os.path.join(self.config_path, "sites-available",
"encryption-example-le-ssl.conf"))
self.assertEqual(ssl_vhost.path,
"/files" + ssl_vhost.filep + "/IfModule/Virtualhost")
self.assertEqual(len(ssl_vhost.addrs), 1)
self.assertEqual({obj.Addr.fromstring("*:443")}, ssl_vhost.addrs)
self.assertEqual(ssl_vhost.name, "encryption-example.demo")
self.assertTrue(ssl_vhost.ssl)
self.assertFalse(ssl_vhost.enabled)
self.assertEqual(self.config.is_name_vhost(self.vh_truth[0]),
self.config.is_name_vhost(ssl_vhost))
self.assertEqual(len(self.config.vhosts), 13)
def test_clean_vhost_ssl(self):
# pylint: disable=protected-access
for directive in ["SSLCertificateFile", "SSLCertificateKeyFile",
"SSLCertificateChainFile", "SSLCACertificatePath"]:
for _ in range(10):
self.config.parser.add_dir(self.vh_truth[1].path,
directive, ["bogus"])
self.config.save()
self.config._clean_vhost(self.vh_truth[1])
self.config.save()
loc_cert = self.config.parser.find_dir(
'SSLCertificateFile', None, self.vh_truth[1].path, False)
loc_key = self.config.parser.find_dir(
'SSLCertificateKeyFile', None, self.vh_truth[1].path, False)
loc_chain = self.config.parser.find_dir(
'SSLCertificateChainFile', None, self.vh_truth[1].path, False)
loc_cacert = self.config.parser.find_dir(
'SSLCACertificatePath', None, self.vh_truth[1].path, False)
self.assertEqual(len(loc_cert), 1)
self.assertEqual(len(loc_key), 1)
self.assertEqual(len(loc_chain), 0)
self.assertEqual(len(loc_cacert), 10)
def test_deduplicate_directives(self):
# pylint: disable=protected-access
DIRECTIVE = "Foo"
for _ in range(10):
self.config.parser.add_dir(self.vh_truth[1].path,
DIRECTIVE, ["bar"])
self.config.save()
self.config._deduplicate_directives(self.vh_truth[1].path, [DIRECTIVE])
self.config.save()
self.assertEqual(
len(self.config.parser.find_dir(
DIRECTIVE, None, self.vh_truth[1].path, False)), 1)
def test_remove_directives(self):
# pylint: disable=protected-access
DIRECTIVES = ["Foo", "Bar"]
for directive in DIRECTIVES:
for _ in range(10):
self.config.parser.add_dir(self.vh_truth[2].path,
directive, ["baz"])
self.config.save()
self.config._remove_directives(self.vh_truth[2].path, DIRECTIVES)
self.config.save()
for directive in DIRECTIVES:
self.assertEqual(
len(self.config.parser.find_dir(
directive, None, self.vh_truth[2].path, False)), 0)
def test_make_vhost_ssl_bad_write(self):
mock_open = mock.mock_open()
# This calls open
self.config.reverter.register_file_creation = mock.Mock()
mock_open.side_effect = IOError
with mock.patch("builtins.open", mock_open):
self.assertRaises(
errors.PluginError,
self.config.make_vhost_ssl, self.vh_truth[0])
def test_get_ssl_vhost_path(self):
# pylint: disable=protected-access
self.assertTrue(
self.config._get_ssl_vhost_path("example_path").endswith(".conf"))
def test_add_name_vhost_if_necessary(self):
# pylint: disable=protected-access
self.config.add_name_vhost = mock.Mock()
self.config.version = (2, 2)
self.config._add_name_vhost_if_necessary(self.vh_truth[0])
self.assertTrue(self.config.add_name_vhost.called)
new_addrs = set()
for addr in self.vh_truth[0].addrs:
new_addrs.add(obj.Addr(("_default_", addr.get_port(),)))
self.vh_truth[0].addrs = new_addrs
self.config._add_name_vhost_if_necessary(self.vh_truth[0])
self.assertEqual(self.config.add_name_vhost.call_count, 2)
@mock.patch("certbot_apache._internal.configurator.http_01.ApacheHttp01.perform")
@mock.patch("certbot_apache._internal.configurator.ApacheConfigurator.restart")
def test_perform(self, mock_restart, mock_http_perform):
# Only tests functionality specific to configurator.perform
# Note: As more challenges are offered this will have to be expanded
account_key, achalls = self.get_key_and_achalls()
expected = [achall.response(account_key) for achall in achalls]
mock_http_perform.return_value = expected
responses = self.config.perform(achalls)
self.assertEqual(mock_http_perform.call_count, 1)
self.assertEqual(responses, expected)
self.assertEqual(mock_restart.call_count, 1)
@mock.patch("certbot_apache._internal.configurator.ApacheConfigurator.restart")
@mock.patch("certbot_apache._internal.apache_util._get_runtime_cfg")
def test_cleanup(self, mock_cfg, mock_restart):
mock_cfg.return_value = ""
_, achalls = self.get_key_and_achalls()
for achall in achalls:
self.config._chall_out.add(achall) # pylint: disable=protected-access
for i, achall in enumerate(achalls):
self.config.cleanup([achall])
if i == len(achalls) - 1:
self.assertTrue(mock_restart.called)
else:
self.assertFalse(mock_restart.called)
@mock.patch("certbot_apache._internal.configurator.ApacheConfigurator.restart")
@mock.patch("certbot_apache._internal.apache_util._get_runtime_cfg")
def test_cleanup_no_errors(self, mock_cfg, mock_restart):
mock_cfg.return_value = ""
_, achalls = self.get_key_and_achalls()
self.config.http_doer = mock.MagicMock()
for achall in achalls:
self.config._chall_out.add(achall) # pylint: disable=protected-access
self.config.cleanup([achalls[-1]])
self.assertFalse(mock_restart.called)
self.config.cleanup(achalls)
self.assertTrue(mock_restart.called)
@mock.patch("certbot.util.run_script")
def test_get_version(self, mock_script):
mock_script.return_value = (
"Server Version: Apache/2.4.2 (Debian)", "")
self.assertEqual(self.config.get_version(), (2, 4, 2))
mock_script.return_value = (
"Server Version: Apache/2 (Linux)", "")
self.assertEqual(self.config.get_version(), (2,))
mock_script.return_value = (
"Server Version: Apache (Debian)", "")
self.assertRaises(errors.PluginError, self.config.get_version)
mock_script.return_value = (
"Server Version: Apache/2.3{0} Apache/2.4.7".format(
os.linesep), "")
self.assertRaises(errors.PluginError, self.config.get_version)
mock_script.side_effect = errors.SubprocessError("Can't find program")
self.assertRaises(errors.PluginError, self.config.get_version)
@mock.patch("certbot_apache._internal.configurator.util.run_script")
def test_restart(self, _):
self.config.restart()
@mock.patch("certbot_apache._internal.configurator.util.run_script")
def test_restart_bad_process(self, mock_run_script):
mock_run_script.side_effect = [None, errors.SubprocessError]
self.assertRaises(errors.MisconfigurationError, self.config.restart)
@mock.patch("certbot.util.run_script")
def test_config_test(self, _):
self.config.config_test()
@mock.patch("certbot.util.run_script")
def test_config_test_bad_process(self, mock_run_script):
mock_run_script.side_effect = errors.SubprocessError
self.assertRaises(errors.MisconfigurationError,
self.config.config_test)
def test_more_info(self):
self.assertTrue(self.config.more_info())
def test_get_chall_pref(self):
self.assertTrue(isinstance(self.config.get_chall_pref(""), list))
def test_install_ssl_options_conf(self):
path = os.path.join(self.work_dir, "test_it")
other_path = os.path.join(self.work_dir, "other_test_it")
self.config.install_ssl_options_conf(path, other_path)
self.assertTrue(os.path.isfile(path))
self.assertTrue(os.path.isfile(other_path))
# TEST ENHANCEMENTS
def test_supported_enhancements(self):
self.assertTrue(isinstance(self.config.supported_enhancements(), list))
def test_find_http_vhost_without_ancestor(self):
# pylint: disable=protected-access
vhost = self.vh_truth[0]
vhost.ssl = True
vhost.ancestor = None
res = self.config._get_http_vhost(vhost)
self.assertEqual(self.vh_truth[0].name, res.name)
self.assertEqual(self.vh_truth[0].aliases, res.aliases)
@mock.patch("certbot_apache._internal.configurator.ApacheConfigurator._get_http_vhost")
@mock.patch("certbot_apache._internal.display_ops.select_vhost")
@mock.patch("certbot.util.exe_exists")
def test_enhance_unknown_vhost(self, mock_exe, mock_sel_vhost, mock_get):
self.config.parser.modules["rewrite_module"] = None
mock_exe.return_value = True
ssl_vh1 = obj.VirtualHost(
"fp1", "ap1", {obj.Addr(("*", "443"))},
True, False)
ssl_vh1.name = "satoshi.com"
self.config.vhosts.append(ssl_vh1)
mock_sel_vhost.return_value = None
mock_get.return_value = None
self.assertRaises(
errors.PluginError,
self.config.enhance, "satoshi.com", "redirect")
def test_enhance_unknown_enhancement(self):
self.assertRaises(
errors.PluginError,
self.config.enhance, "certbot.demo", "unknown_enhancement")
def test_enhance_no_ssl_vhost(self):
with mock.patch("certbot_apache._internal.configurator.logger.error") as mock_log:
self.assertRaises(errors.PluginError, self.config.enhance,
"certbot.demo", "redirect")
# Check that correct logger.warning was printed
self.assertTrue("not able to find" in mock_log.call_args[0][0])
self.assertTrue("\"redirect\"" in mock_log.call_args[0][0])
mock_log.reset_mock()
self.assertRaises(errors.PluginError, self.config.enhance,
"certbot.demo", "ensure-http-header", "Test")
# Check that correct logger.warning was printed
self.assertTrue("not able to find" in mock_log.call_args[0][0])
self.assertTrue("Test" in mock_log.call_args[0][0])
@mock.patch("certbot.util.exe_exists")
def test_ocsp_stapling(self, mock_exe):
self.config.parser.update_runtime_variables = mock.Mock()
self.config.parser.modules["mod_ssl.c"] = None
self.config.parser.modules["socache_shmcb_module"] = None
self.config.get_version = mock.Mock(return_value=(2, 4, 7))
mock_exe.return_value = True
# This will create an ssl vhost for certbot.demo
self.config.choose_vhost("certbot.demo")
self.config.enhance("certbot.demo", "staple-ocsp")
# Get the ssl vhost for certbot.demo
ssl_vhost = self.config.assoc["certbot.demo"]
ssl_use_stapling_aug_path = self.config.parser.find_dir(
"SSLUseStapling", "on", ssl_vhost.path)
self.assertEqual(len(ssl_use_stapling_aug_path), 1)
ssl_vhost_aug_path = parser.get_aug_path(ssl_vhost.filep)
stapling_cache_aug_path = self.config.parser.find_dir('SSLStaplingCache',
"shmcb:/var/run/apache2/stapling_cache(128000)",
ssl_vhost_aug_path)
self.assertEqual(len(stapling_cache_aug_path), 1)
@mock.patch("certbot.util.exe_exists")
def test_ocsp_stapling_twice(self, mock_exe):
self.config.parser.update_runtime_variables = mock.Mock()
self.config.parser.modules["mod_ssl.c"] = None
self.config.parser.modules["socache_shmcb_module"] = None
self.config.get_version = mock.Mock(return_value=(2, 4, 7))
mock_exe.return_value = True
# Checking the case with already enabled ocsp stapling configuration
self.config.choose_vhost("ocspvhost.com")
self.config.enhance("ocspvhost.com", "staple-ocsp")
# Get the ssl vhost for letsencrypt.demo
ssl_vhost = self.config.assoc["ocspvhost.com"]
ssl_use_stapling_aug_path = self.config.parser.find_dir(
"SSLUseStapling", "on", ssl_vhost.path)
self.assertEqual(len(ssl_use_stapling_aug_path), 1)
ssl_vhost_aug_path = parser.get_aug_path(ssl_vhost.filep)
stapling_cache_aug_path = self.config.parser.find_dir('SSLStaplingCache',
"shmcb:/var/run/apache2/stapling_cache(128000)",
ssl_vhost_aug_path)
self.assertEqual(len(stapling_cache_aug_path), 1)
@mock.patch("certbot.util.exe_exists")
def test_ocsp_unsupported_apache_version(self, mock_exe):
mock_exe.return_value = True
self.config.parser.update_runtime_variables = mock.Mock()
self.config.parser.modules["mod_ssl.c"] = None
self.config.parser.modules["socache_shmcb_module"] = None
self.config.get_version = mock.Mock(return_value=(2, 2, 0))
self.config.choose_vhost("certbot.demo")
self.assertRaises(errors.PluginError,
self.config.enhance, "certbot.demo", "staple-ocsp")
def test_get_http_vhost_third_filter(self):
ssl_vh = obj.VirtualHost(
"fp", "ap", {obj.Addr(("*", "443"))},
True, False)
ssl_vh.name = "satoshi.com"
self.config.vhosts.append(ssl_vh)
# pylint: disable=protected-access
http_vh = self.config._get_http_vhost(ssl_vh)
self.assertFalse(http_vh.ssl)
@mock.patch("certbot.util.run_script")
@mock.patch("certbot.util.exe_exists")
def test_http_header_hsts(self, mock_exe, _):
self.config.parser.update_runtime_variables = mock.Mock()
self.config.parser.modules["mod_ssl.c"] = None
self.config.parser.modules["headers_module"] = None
mock_exe.return_value = True
# This will create an ssl vhost for certbot.demo
self.config.choose_vhost("certbot.demo")
self.config.enhance("certbot.demo", "ensure-http-header",
"Strict-Transport-Security")
# Get the ssl vhost for certbot.demo
ssl_vhost = self.config.assoc["certbot.demo"]
# These are not immediately available in find_dir even with save() and
# load(). They must be found in sites-available
hsts_header = self.config.parser.find_dir(
"Header", None, ssl_vhost.path)
# four args to HSTS header
self.assertEqual(len(hsts_header), 4)
def test_http_header_hsts_twice(self):
self.config.parser.modules["mod_ssl.c"] = None
# skip the enable mod
self.config.parser.modules["headers_module"] = None
# This will create an ssl vhost for encryption-example.demo
self.config.choose_vhost("encryption-example.demo")
self.config.enhance("encryption-example.demo", "ensure-http-header",
"Strict-Transport-Security")
self.assertRaises(
errors.PluginEnhancementAlreadyPresent,
self.config.enhance, "encryption-example.demo",
"ensure-http-header", "Strict-Transport-Security")
@mock.patch("certbot.util.run_script")
@mock.patch("certbot.util.exe_exists")
def test_http_header_uir(self, mock_exe, _):
self.config.parser.update_runtime_variables = mock.Mock()
self.config.parser.modules["mod_ssl.c"] = None
self.config.parser.modules["headers_module"] = None
mock_exe.return_value = True
# This will create an ssl vhost for certbot.demo
self.config.choose_vhost("certbot.demo")
self.config.enhance("certbot.demo", "ensure-http-header",
"Upgrade-Insecure-Requests")
self.assertTrue("headers_module" in self.config.parser.modules)
# Get the ssl vhost for certbot.demo
ssl_vhost = self.config.assoc["certbot.demo"]
# These are not immediately available in find_dir even with save() and
# load(). They must be found in sites-available
uir_header = self.config.parser.find_dir(
"Header", None, ssl_vhost.path)
# four args to HSTS header
self.assertEqual(len(uir_header), 4)
def test_http_header_uir_twice(self):
self.config.parser.modules["mod_ssl.c"] = None
# skip the enable mod
self.config.parser.modules["headers_module"] = None
# This will create an ssl vhost for encryption-example.demo
self.config.choose_vhost("encryption-example.demo")
self.config.enhance("encryption-example.demo", "ensure-http-header",
"Upgrade-Insecure-Requests")
self.assertRaises(
errors.PluginEnhancementAlreadyPresent,
self.config.enhance, "encryption-example.demo",
"ensure-http-header", "Upgrade-Insecure-Requests")
@mock.patch("certbot.util.run_script")
@mock.patch("certbot.util.exe_exists")
def test_redirect_well_formed_http(self, mock_exe, _):
self.config.parser.modules["rewrite_module"] = None
self.config.parser.update_runtime_variables = mock.Mock()
mock_exe.return_value = True
self.config.get_version = mock.Mock(return_value=(2, 2))
# This will create an ssl vhost for certbot.demo
self.config.choose_vhost("certbot.demo")
self.config.enhance("certbot.demo", "redirect")
# These are not immediately available in find_dir even with save() and
# load(). They must be found in sites-available
rw_engine = self.config.parser.find_dir(
"RewriteEngine", "on", self.vh_truth[3].path)
rw_rule = self.config.parser.find_dir(
"RewriteRule", None, self.vh_truth[3].path)
self.assertEqual(len(rw_engine), 1)
# three args to rw_rule
self.assertEqual(len(rw_rule), 3)
# [:-3] to remove the vhost index number
self.assertTrue(rw_engine[0].startswith(self.vh_truth[3].path[:-3]))
self.assertTrue(rw_rule[0].startswith(self.vh_truth[3].path[:-3]))
def test_rewrite_rule_exists(self):
# Skip the enable mod
self.config.parser.modules["rewrite_module"] = None
self.config.get_version = mock.Mock(return_value=(2, 3, 9))
self.config.parser.add_dir(
self.vh_truth[3].path, "RewriteRule", ["Unknown"])
# pylint: disable=protected-access
self.assertTrue(self.config._is_rewrite_exists(self.vh_truth[3]))
def test_rewrite_engine_exists(self):
# Skip the enable mod
self.config.parser.modules["rewrite_module"] = None
self.config.get_version = mock.Mock(return_value=(2, 3, 9))
self.config.parser.add_dir(
self.vh_truth[3].path, "RewriteEngine", "on")
# pylint: disable=protected-access
self.assertTrue(self.config._is_rewrite_engine_on(self.vh_truth[3]))
@mock.patch("certbot.util.run_script")
@mock.patch("certbot.util.exe_exists")
def test_redirect_with_existing_rewrite(self, mock_exe, _):
self.config.parser.modules["rewrite_module"] = None
self.config.parser.update_runtime_variables = mock.Mock()
mock_exe.return_value = True
self.config.get_version = mock.Mock(return_value=(2, 2, 0))
# Create a preexisting rewrite rule
self.config.parser.add_dir(
self.vh_truth[3].path, "RewriteRule", ["UnknownPattern",
"UnknownTarget"])
self.config.save()
# This will create an ssl vhost for certbot.demo
self.config.choose_vhost("certbot.demo")
self.config.enhance("certbot.demo", "redirect")
# These are not immediately available in find_dir even with save() and
# load(). They must be found in sites-available
rw_engine = self.config.parser.find_dir(
"RewriteEngine", "on", self.vh_truth[3].path)
rw_rule = self.config.parser.find_dir(
"RewriteRule", None, self.vh_truth[3].path)
self.assertEqual(len(rw_engine), 1)
# three args to rw_rule + 1 arg for the pre existing rewrite
self.assertEqual(len(rw_rule), 5)
# [:-3] to remove the vhost index number
self.assertTrue(rw_engine[0].startswith(self.vh_truth[3].path[:-3]))
self.assertTrue(rw_rule[0].startswith(self.vh_truth[3].path[:-3]))
self.assertTrue("rewrite_module" in self.config.parser.modules)
@mock.patch("certbot.util.run_script")
@mock.patch("certbot.util.exe_exists")
def test_redirect_with_old_https_redirection(self, mock_exe, _):
self.config.parser.modules["rewrite_module"] = None
self.config.parser.update_runtime_variables = mock.Mock()
mock_exe.return_value = True
self.config.get_version = mock.Mock(return_value=(2, 2, 0))
ssl_vhost = self.config.choose_vhost("certbot.demo")
# pylint: disable=protected-access
http_vhost = self.config._get_http_vhost(ssl_vhost)
# Create an old (previously suppoorted) https redirectoin rewrite rule
self.config.parser.add_dir(
http_vhost.path, "RewriteRule",
["^",
"https://%{SERVER_NAME}%{REQUEST_URI}",
"[L,QSA,R=permanent]"])
self.config.save()
try:
self.config.enhance("certbot.demo", "redirect")
except errors.PluginEnhancementAlreadyPresent:
args_paths = self.config.parser.find_dir(
"RewriteRule", None, http_vhost.path, False)
arg_vals = [self.config.parser.aug.get(x) for x in args_paths]
self.assertEqual(arg_vals, constants.REWRITE_HTTPS_ARGS)
def test_redirect_with_conflict(self):
self.config.parser.modules["rewrite_module"] = None
ssl_vh = obj.VirtualHost(
"fp", "ap", {obj.Addr(("*", "443")),
obj.Addr(("zombo.com",))},
True, False)
# No names ^ this guy should conflict.
# pylint: disable=protected-access
self.assertRaises(
errors.PluginError, self.config._enable_redirect, ssl_vh, "")
def test_redirect_two_domains_one_vhost(self):
# Skip the enable mod
self.config.parser.modules["rewrite_module"] = None
self.config.get_version = mock.Mock(return_value=(2, 3, 9))
# Creates ssl vhost for the domain
self.config.choose_vhost("red.blue.purple.com")
self.config.enhance("red.blue.purple.com", "redirect")
verify_no_redirect = ("certbot_apache._internal.configurator."
"ApacheConfigurator._verify_no_certbot_redirect")
with mock.patch(verify_no_redirect) as mock_verify:
self.config.enhance("green.blue.purple.com", "redirect")
self.assertFalse(mock_verify.called)
def test_redirect_from_previous_run(self):
# Skip the enable mod
self.config.parser.modules["rewrite_module"] = None
self.config.get_version = mock.Mock(return_value=(2, 3, 9))
self.config.choose_vhost("red.blue.purple.com")
self.config.enhance("red.blue.purple.com", "redirect")
# Clear state about enabling redirect on this run
# pylint: disable=protected-access
self.config._enhanced_vhosts["redirect"].clear()
self.assertRaises(
errors.PluginEnhancementAlreadyPresent,
self.config.enhance, "green.blue.purple.com", "redirect")
def test_create_own_redirect(self):
self.config.parser.modules["rewrite_module"] = None
self.config.get_version = mock.Mock(return_value=(2, 3, 9))
# For full testing... give names...
self.vh_truth[1].name = "default.com"
self.vh_truth[1].aliases = {"yes.default.com"}
# pylint: disable=protected-access
self.config._enable_redirect(self.vh_truth[1], "")
self.assertEqual(len(self.config.vhosts), 13)
def test_create_own_redirect_for_old_apache_version(self):
self.config.parser.modules["rewrite_module"] = None
self.config.get_version = mock.Mock(return_value=(2, 2))
# For full testing... give names...
self.vh_truth[1].name = "default.com"
self.vh_truth[1].aliases = {"yes.default.com"}
# pylint: disable=protected-access
self.config._enable_redirect(self.vh_truth[1], "")
self.assertEqual(len(self.config.vhosts), 13)
def test_sift_rewrite_rule(self):
# pylint: disable=protected-access
small_quoted_target = "RewriteRule ^ \"http://\""
self.assertFalse(self.config._sift_rewrite_rule(small_quoted_target))
https_target = "RewriteRule ^ https://satoshi"
self.assertTrue(self.config._sift_rewrite_rule(https_target))
normal_target = "RewriteRule ^/(.*) http://www.a.com:1234/$1 [L,R]"
self.assertFalse(self.config._sift_rewrite_rule(normal_target))
not_rewriterule = "NotRewriteRule ^ ..."
self.assertFalse(self.config._sift_rewrite_rule(not_rewriterule))
def get_key_and_achalls(self):
"""Return testing achallenges."""
account_key = self.rsa512jwk
achall1 = achallenges.KeyAuthorizationAnnotatedChallenge(
challb=acme_util.chall_to_challb(
challenges.HTTP01(
token=b"jIq_Xy1mXGN37tb4L6Xj_es58fW571ZNyXekdZzhh7Q"),
"pending"),
domain="encryption-example.demo", account_key=account_key)
achall2 = achallenges.KeyAuthorizationAnnotatedChallenge(
challb=acme_util.chall_to_challb(
challenges.HTTP01(
token=b"uqnaPzxtrndteOqtrXb0Asl5gOJfWAnnx6QJyvcmlDU"),
"pending"),
domain="certbot.demo", account_key=account_key)
achall3 = achallenges.KeyAuthorizationAnnotatedChallenge(
challb=acme_util.chall_to_challb(
challenges.HTTP01(token=(b'x' * 16)), "pending"),
domain="example.org", account_key=account_key)
return account_key, (achall1, achall2, achall3)
def test_enable_site_nondebian(self):
inc_path = "/path/to/wherever"
vhost = self.vh_truth[0]
vhost.enabled = False
vhost.filep = inc_path
self.assertFalse(self.config.parser.find_dir("Include", inc_path))
self.assertFalse(
os.path.dirname(inc_path) in self.config.parser.existing_paths)
self.config.enable_site(vhost)
self.assertTrue(self.config.parser.find_dir("Include", inc_path))
self.assertTrue(
os.path.dirname(inc_path) in self.config.parser.existing_paths)
self.assertTrue(
os.path.basename(inc_path) in self.config.parser.existing_paths[
os.path.dirname(inc_path)])
@mock.patch('certbot_apache._internal.configurator.display_util.notify')
def test_deploy_cert_not_parsed_path(self, unused_mock_notify):
# Make sure that we add include to root config for vhosts when
# handle-sites is false
self.config.parser.modules["ssl_module"] = None
self.config.parser.modules["mod_ssl.c"] = None
self.config.parser.modules["socache_shmcb_module"] = None
tmp_path = filesystem.realpath(tempfile.mkdtemp("vhostroot"))
filesystem.chmod(tmp_path, 0o755)
mock_p = "certbot_apache._internal.configurator.ApacheConfigurator._get_ssl_vhost_path"
mock_a = "certbot_apache._internal.parser.ApacheParser.add_include"
with mock.patch(mock_p) as mock_path:
mock_path.return_value = os.path.join(tmp_path, "whatever.conf")
with mock.patch(mock_a) as mock_add:
self.config.deploy_cert(
"encryption-example.demo",
"example/cert.pem", "example/key.pem",
"example/cert_chain.pem")
# Test that we actually called add_include
self.assertTrue(mock_add.called)
shutil.rmtree(tmp_path)
def test_deploy_cert_no_mod_ssl(self):
# Create
ssl_vhost = self.config.make_vhost_ssl(self.vh_truth[0])
self.config.parser.modules["socache_shmcb_module"] = None
self.config.prepare_server_https = mock.Mock()
self.assertRaises(errors.MisconfigurationError, self.config.deploy_cert,
"encryption-example.demo", "example/cert.pem", "example/key.pem",
"example/cert_chain.pem", "example/fullchain.pem")
@mock.patch("certbot_apache._internal.parser.ApacheParser.parsed_in_original")
def test_choose_vhost_and_servername_addition_parsed(self, mock_parsed):
ret_vh = self.vh_truth[8]
ret_vh.enabled = True
self.config.enable_site(ret_vh)
# Make sure that we return early
self.assertFalse(mock_parsed.called)
def test_enable_mod_unsupported(self):
self.assertRaises(errors.MisconfigurationError,
self.config.enable_mod,
"whatever")
def test_choose_vhosts_wildcard(self):
# pylint: disable=protected-access
mock_path = "certbot_apache._internal.display_ops.select_vhost_multiple"
with mock.patch(mock_path) as mock_select_vhs:
mock_select_vhs.return_value = [self.vh_truth[3]]
vhs = self.config._choose_vhosts_wildcard("*.certbot.demo",
create_ssl=True)
# Check that the dialog was called with one vh: certbot.demo
self.assertEqual(mock_select_vhs.call_args[0][0][0], self.vh_truth[3])
self.assertEqual(len(mock_select_vhs.call_args_list), 1)
# And the actual returned values
self.assertEqual(len(vhs), 1)
self.assertEqual(vhs[0].name, "certbot.demo")
self.assertTrue(vhs[0].ssl)
self.assertNotEqual(vhs[0], self.vh_truth[3])
@mock.patch("certbot_apache._internal.configurator.ApacheConfigurator.make_vhost_ssl")
def test_choose_vhosts_wildcard_no_ssl(self, mock_makessl):
# pylint: disable=protected-access
mock_path = "certbot_apache._internal.display_ops.select_vhost_multiple"
with mock.patch(mock_path) as mock_select_vhs:
mock_select_vhs.return_value = [self.vh_truth[1]]
vhs = self.config._choose_vhosts_wildcard("*.certbot.demo",
create_ssl=False)
self.assertFalse(mock_makessl.called)
self.assertEqual(vhs[0], self.vh_truth[1])
@mock.patch("certbot_apache._internal.configurator.ApacheConfigurator._vhosts_for_wildcard")
@mock.patch("certbot_apache._internal.configurator.ApacheConfigurator.make_vhost_ssl")
def test_choose_vhosts_wildcard_already_ssl(self, mock_makessl, mock_vh_for_w):
# pylint: disable=protected-access
# Already SSL vhost
mock_vh_for_w.return_value = [self.vh_truth[7]]
mock_path = "certbot_apache._internal.display_ops.select_vhost_multiple"
with mock.patch(mock_path) as mock_select_vhs:
mock_select_vhs.return_value = [self.vh_truth[7]]
vhs = self.config._choose_vhosts_wildcard("whatever",
create_ssl=True)
self.assertEqual(mock_select_vhs.call_args[0][0][0], self.vh_truth[7])
self.assertEqual(len(mock_select_vhs.call_args_list), 1)
# Ensure that make_vhost_ssl was not called, vhost.ssl == true
self.assertFalse(mock_makessl.called)
# And the actual returned values
self.assertEqual(len(vhs), 1)
self.assertTrue(vhs[0].ssl)
self.assertEqual(vhs[0], self.vh_truth[7])
@mock.patch('certbot_apache._internal.configurator.display_util.notify')
def test_deploy_cert_wildcard(self, unused_mock_notify):
# pylint: disable=protected-access
mock_choose_vhosts = mock.MagicMock()
mock_choose_vhosts.return_value = [self.vh_truth[7]]
self.config._choose_vhosts_wildcard = mock_choose_vhosts
mock_d = "certbot_apache._internal.configurator.ApacheConfigurator._deploy_cert"
with mock.patch(mock_d) as mock_dep:
self.config.deploy_cert("*.wildcard.example.org", "/tmp/path",
"/tmp/path", "/tmp/path", "/tmp/path")
self.assertTrue(mock_dep.called)
self.assertEqual(len(mock_dep.call_args_list), 1)
self.assertEqual(self.vh_truth[7], mock_dep.call_args_list[0][0][0])
@mock.patch("certbot_apache._internal.display_ops.select_vhost_multiple")
def test_deploy_cert_wildcard_no_vhosts(self, mock_dialog):
# pylint: disable=protected-access
mock_dialog.return_value = []
self.assertRaises(errors.PluginError,
self.config.deploy_cert,
"*.wild.cat", "/tmp/path", "/tmp/path",
"/tmp/path", "/tmp/path")
@mock.patch("certbot_apache._internal.configurator.ApacheConfigurator._choose_vhosts_wildcard")
def test_enhance_wildcard_after_install(self, mock_choose):
# pylint: disable=protected-access
self.config.parser.modules["mod_ssl.c"] = None
self.config.parser.modules["headers_module"] = None
self.vh_truth[3].ssl = True
self.config._wildcard_vhosts["*.certbot.demo"] = [self.vh_truth[3]]
self.config.enhance("*.certbot.demo", "ensure-http-header",
"Upgrade-Insecure-Requests")
self.assertFalse(mock_choose.called)
@mock.patch("certbot_apache._internal.configurator.ApacheConfigurator._choose_vhosts_wildcard")
def test_enhance_wildcard_no_install(self, mock_choose):
self.vh_truth[3].ssl = True
mock_choose.return_value = [self.vh_truth[3]]
self.config.parser.modules["mod_ssl.c"] = None
self.config.parser.modules["headers_module"] = None
self.config.enhance("*.certbot.demo", "ensure-http-header",
"Upgrade-Insecure-Requests")
self.assertTrue(mock_choose.called)
def test_add_vhost_id(self):
for vh in [self.vh_truth[0], self.vh_truth[1], self.vh_truth[2]]:
vh_id = self.config.add_vhost_id(vh)
self.assertEqual(vh, self.config.find_vhost_by_id(vh_id))
def test_find_vhost_by_id_404(self):
self.assertRaises(errors.PluginError,
self.config.find_vhost_by_id,
"nonexistent")
def test_add_vhost_id_already_exists(self):
first_id = self.config.add_vhost_id(self.vh_truth[0])
second_id = self.config.add_vhost_id(self.vh_truth[0])
self.assertEqual(first_id, second_id)
def test_realpath_replaces_symlink(self):
orig_match = self.config.parser.aug.match
mock_vhost = copy.deepcopy(self.vh_truth[0])
mock_vhost.filep = mock_vhost.filep.replace('sites-enabled', u'sites-available')
mock_vhost.path = mock_vhost.path.replace('sites-enabled', 'sites-available')
mock_vhost.enabled = False
self.config.parser.parse_file(mock_vhost.filep)
def mock_match(aug_expr):
"""Return a mocked match list of VirtualHosts"""
if "/mocked/path" in aug_expr:
return [self.vh_truth[1].path, self.vh_truth[0].path, mock_vhost.path]
return orig_match(aug_expr)
self.config.parser.parser_paths = ["/mocked/path"]
self.config.parser.aug.match = mock_match
vhs = self.config.get_virtual_hosts()
self.assertEqual(len(vhs), 2)
self.assertEqual(vhs[0], self.vh_truth[1])
# mock_vhost should have replaced the vh_truth[0], because its filepath
# isn't a symlink
self.assertEqual(vhs[1], mock_vhost)
class AugeasVhostsTest(util.ApacheTest):
"""Test vhosts with illegal names dependent on augeas version."""
# pylint: disable=protected-access
def setUp(self): # pylint: disable=arguments-differ
td = "debian_apache_2_4/augeas_vhosts"
cr = "debian_apache_2_4/augeas_vhosts/apache2"
vr = "debian_apache_2_4/augeas_vhosts/apache2/sites-available"
super().setUp(test_dir=td,
config_root=cr,
vhost_root=vr)
self.config = util.get_apache_configurator(
self.config_path, self.vhost_path, self.config_dir,
self.work_dir)
def test_choosevhost_with_illegal_name(self):
self.config.parser.aug = mock.MagicMock()
self.config.parser.aug.match.side_effect = RuntimeError
path = "debian_apache_2_4/augeas_vhosts/apache2/sites-available/old-and-default.conf"
chosen_vhost = self.config._create_vhost(path)
self.assertEqual(None, chosen_vhost)
def test_choosevhost_works(self):
path = "debian_apache_2_4/augeas_vhosts/apache2/sites-available/old-and-default.conf"
chosen_vhost = self.config._create_vhost(path)
self.assertTrue(chosen_vhost is None or chosen_vhost.path == path)
@mock.patch("certbot_apache._internal.configurator.ApacheConfigurator._create_vhost")
def test_get_vhost_continue(self, mock_vhost):
mock_vhost.return_value = None
vhs = self.config.get_virtual_hosts()
self.assertEqual([], vhs)
def test_choose_vhost_with_matching_wildcard(self):
names = (
"an.example.net", "another.example.net", "an.other.example.net")
for name in names:
self.assertFalse(name in self.config.choose_vhost(name).aliases)
@mock.patch("certbot_apache._internal.obj.VirtualHost.conflicts")
def test_choose_vhost_without_matching_wildcard(self, mock_conflicts):
mock_conflicts.return_value = False
mock_path = "certbot_apache._internal.display_ops.select_vhost"
with mock.patch(mock_path, lambda _, vhosts: vhosts[0]):
for name in ("a.example.net", "other.example.net"):
self.assertTrue(name in self.config.choose_vhost(name).aliases)
@mock.patch("certbot_apache._internal.obj.VirtualHost.conflicts")
def test_choose_vhost_wildcard_not_found(self, mock_conflicts):
mock_conflicts.return_value = False
mock_path = "certbot_apache._internal.display_ops.select_vhost"
names = (
"abc.example.net", "not.there.tld", "aa.wildcard.tld"
)
with mock.patch(mock_path) as mock_select:
mock_select.return_value = self.config.vhosts[0]
for name in names:
orig_cc = mock_select.call_count
self.config.choose_vhost(name)
self.assertEqual(mock_select.call_count - orig_cc, 1)
def test_choose_vhost_wildcard_found(self):
mock_path = "certbot_apache._internal.display_ops.select_vhost"
names = (
"ab.example.net", "a.wildcard.tld", "yetanother.example.net"
)
with mock.patch(mock_path) as mock_select:
mock_select.return_value = self.config.vhosts[0]
for name in names:
self.config.choose_vhost(name)
self.assertEqual(mock_select.call_count, 0)
def test_augeas_span_error(self):
broken_vhost = self.config.vhosts[0]
broken_vhost.path = broken_vhost.path + "/nonexistent"
self.assertRaises(errors.PluginError, self.config.make_vhost_ssl,
broken_vhost)
class MultiVhostsTest(util.ApacheTest):
"""Test configuration with multiple virtualhosts in a single file."""
# pylint: disable=protected-access
def setUp(self): # pylint: disable=arguments-differ
td = "debian_apache_2_4/multi_vhosts"
cr = "debian_apache_2_4/multi_vhosts/apache2"
vr = "debian_apache_2_4/multi_vhosts/apache2/sites-available"
super().setUp(test_dir=td,
config_root=cr,
vhost_root=vr)
self.config = util.get_apache_configurator(
self.config_path, self.vhost_path,
self.config_dir, self.work_dir, conf_vhost_path=self.vhost_path)
self.vh_truth = util.get_vh_truth(
self.temp_dir, "debian_apache_2_4/multi_vhosts")
def test_make_vhost_ssl(self):
ssl_vhost = self.config.make_vhost_ssl(self.vh_truth[1])
self.assertEqual(
ssl_vhost.filep,
os.path.join(self.config_path, "sites-available",
"default-le-ssl.conf"))
self.assertEqual(ssl_vhost.path,
"/files" + ssl_vhost.filep + "/IfModule/VirtualHost")
self.assertEqual(len(ssl_vhost.addrs), 1)
self.assertEqual({obj.Addr.fromstring("*:443")}, ssl_vhost.addrs)
self.assertEqual(ssl_vhost.name, "banana.vomit.com")
self.assertTrue(ssl_vhost.ssl)
self.assertFalse(ssl_vhost.enabled)
self.assertEqual(self.config.is_name_vhost(self.vh_truth[1]),
self.config.is_name_vhost(ssl_vhost))
mock_path = "certbot_apache._internal.configurator.ApacheConfigurator._get_new_vh_path"
with mock.patch(mock_path) as mock_getpath:
mock_getpath.return_value = None
self.assertRaises(errors.PluginError, self.config.make_vhost_ssl,
self.vh_truth[1])
def test_get_new_path(self):
with_index_1 = ["/path[1]/section[1]"]
without_index = ["/path/section"]
with_index_2 = ["/path[2]/section[2]"]
self.assertEqual(self.config._get_new_vh_path(without_index,
with_index_1),
None)
self.assertEqual(self.config._get_new_vh_path(without_index,
with_index_2),
with_index_2[0])
both = with_index_1 + with_index_2
self.assertEqual(self.config._get_new_vh_path(without_index, both),
with_index_2[0])
@mock.patch("certbot_apache._internal.configurator.display_util.notify")
def test_make_vhost_ssl_with_existing_rewrite_rule(self, mock_notify):
self.config.parser.modules["rewrite_module"] = None
ssl_vhost = self.config.make_vhost_ssl(self.vh_truth[4])
self.assertTrue(self.config.parser.find_dir(
"RewriteEngine", "on", ssl_vhost.path, False))
with open(ssl_vhost.filep) as the_file:
conf_text = the_file.read()
commented_rewrite_rule = ("# RewriteRule \"^/secrets/(.+)\" "
"\"https://new.example.com/docs/$1\" [R,L]")
uncommented_rewrite_rule = ("RewriteRule \"^/docs/(.+)\" "
"\"http://new.example.com/docs/$1\" [R,L]")
self.assertTrue(commented_rewrite_rule in conf_text)
self.assertTrue(uncommented_rewrite_rule in conf_text)
self.assertEqual(mock_notify.call_count, 1)
self.assertIn("Some rewrite rules", mock_notify.call_args[0][0])
@mock.patch("certbot_apache._internal.configurator.display_util.notify")
def test_make_vhost_ssl_with_existing_rewrite_conds(self, mock_notify):
self.config.parser.modules["rewrite_module"] = None
ssl_vhost = self.config.make_vhost_ssl(self.vh_truth[3])
with open(ssl_vhost.filep) as the_file:
conf_lines = the_file.readlines()
conf_line_set = [l.strip() for l in conf_lines]
not_commented_cond1 = ("RewriteCond "
"%{DOCUMENT_ROOT}/%{REQUEST_FILENAME} !-f")
not_commented_rewrite_rule = ("RewriteRule "
"^(.*)$ b://u%{REQUEST_URI} [P,NE,L]")
commented_cond1 = "# RewriteCond %{HTTPS} !=on"
commented_cond2 = "# RewriteCond %{HTTPS} !^$"
commented_rewrite_rule = ("# RewriteRule ^ "
"https://%{SERVER_NAME}%{REQUEST_URI} "
"[L,NE,R=permanent]")
self.assertTrue(not_commented_cond1 in conf_line_set)
self.assertTrue(not_commented_rewrite_rule in conf_line_set)
self.assertTrue(commented_cond1 in conf_line_set)
self.assertTrue(commented_cond2 in conf_line_set)
self.assertTrue(commented_rewrite_rule in conf_line_set)
self.assertEqual(mock_notify.call_count, 1)
self.assertIn("Some rewrite rules", mock_notify.call_args[0][0])
class InstallSslOptionsConfTest(util.ApacheTest):
"""Test that the options-ssl-nginx.conf file is installed and updated properly."""
def setUp(self): # pylint: disable=arguments-differ
super().setUp()
self.config = util.get_apache_configurator(
self.config_path, self.vhost_path, self.config_dir, self.work_dir)
def _call(self):
self.config.install_ssl_options_conf(self.config.mod_ssl_conf,
self.config.updated_mod_ssl_conf_digest)
def _current_ssl_options_hash(self):
return crypto_util.sha256sum(self.config.pick_apache_config())
def _assert_current_file(self):
self.assertTrue(os.path.isfile(self.config.mod_ssl_conf))
self.assertEqual(crypto_util.sha256sum(self.config.mod_ssl_conf),
self._current_ssl_options_hash())
def test_no_file(self):
# prepare should have placed a file there
self._assert_current_file()
os.remove(self.config.mod_ssl_conf)
self.assertFalse(os.path.isfile(self.config.mod_ssl_conf))
self._call()
self._assert_current_file()
def test_current_file(self):
self._assert_current_file()
self._call()
self._assert_current_file()
def test_prev_file_updates_to_current(self):
from certbot_apache._internal.constants import ALL_SSL_OPTIONS_HASHES
ALL_SSL_OPTIONS_HASHES.insert(0, "test_hash_does_not_match")
with mock.patch('certbot.crypto_util.sha256sum') as mock_sha256:
mock_sha256.return_value = ALL_SSL_OPTIONS_HASHES[0]
self._call()
self._assert_current_file()
def test_manually_modified_current_file_does_not_update(self):
with open(self.config.mod_ssl_conf, "a") as mod_ssl_conf:
mod_ssl_conf.write("a new line for the wrong hash\n")
with mock.patch("certbot.plugins.common.logger") as mock_logger:
self._call()
self.assertFalse(mock_logger.warning.called)
self.assertTrue(os.path.isfile(self.config.mod_ssl_conf))
self.assertEqual(crypto_util.sha256sum(
self.config.pick_apache_config()),
self._current_ssl_options_hash())
self.assertNotEqual(crypto_util.sha256sum(self.config.mod_ssl_conf),
self._current_ssl_options_hash())
def test_manually_modified_past_file_warns(self):
with open(self.config.mod_ssl_conf, "a") as mod_ssl_conf:
mod_ssl_conf.write("a new line for the wrong hash\n")
with open(self.config.updated_mod_ssl_conf_digest, "w") as f:
f.write("hashofanoldversion")
with mock.patch("certbot.plugins.common.logger") as mock_logger:
self._call()
self.assertEqual(mock_logger.warning.call_args[0][0],
"%s has been manually modified; updated file "
"saved to %s. We recommend updating %s for security purposes.")
self.assertEqual(crypto_util.sha256sum(
self.config.pick_apache_config()),
self._current_ssl_options_hash())
# only print warning once
with mock.patch("certbot.plugins.common.logger") as mock_logger:
self._call()
self.assertFalse(mock_logger.warning.called)
def test_ssl_config_files_hash_in_all_hashes(self):
"""
It is really critical that all TLS Apache config files have their SHA256 hash registered in
constants.ALL_SSL_OPTIONS_HASHES. Otherwise Certbot will mistakenly assume that the config
file has been manually edited by the user, and will refuse to update it.
This test ensures that all necessary hashes are present.
"""
from certbot_apache._internal.constants import ALL_SSL_OPTIONS_HASHES
import pkg_resources
tls_configs_dir = pkg_resources.resource_filename(
"certbot_apache", os.path.join("_internal", "tls_configs"))
all_files = [os.path.join(tls_configs_dir, name) for name in os.listdir(tls_configs_dir)
if name.endswith('options-ssl-apache.conf')]
self.assertTrue(all_files)
for one_file in all_files:
file_hash = crypto_util.sha256sum(one_file)
self.assertTrue(file_hash in ALL_SSL_OPTIONS_HASHES,
"Constants.ALL_SSL_OPTIONS_HASHES must be appended with the sha256 "
"hash of {0} when it is updated.".format(one_file))
def test_openssl_version(self):
self.config._openssl_version = None
some_string_contents = b"""
SSLOpenSSLConfCmd
OpenSSL configuration command
SSLv3 not supported by this version of OpenSSL
'%s': invalid OpenSSL configuration command
OpenSSL 1.0.2g 1 Mar 2016
OpenSSL
AH02407: "SSLOpenSSLConfCmd %s %s" failed for %s
AH02556: "SSLOpenSSLConfCmd %s %s" applied to %s
OpenSSL 1.0.2g 1 Mar 2016
"""
# ssl_module as a DSO
self.config.parser.modules['ssl_module'] = '/fake/path'
with mock.patch("certbot_apache._internal.configurator."
"ApacheConfigurator._open_module_file") as mock_omf:
mock_omf.return_value = some_string_contents
self.assertEqual(self.config.openssl_version(), "1.0.2g")
# ssl_module statically linked
self.config._openssl_version = None
self.config.parser.modules['ssl_module'] = None
self.config.options.bin = '/fake/path/to/httpd'
with mock.patch("certbot_apache._internal.configurator."
"ApacheConfigurator._open_module_file") as mock_omf:
mock_omf.return_value = some_string_contents
self.assertEqual(self.config.openssl_version(), "1.0.2g")
def test_current_version(self):
self.config.version = (2, 4, 10)
self.config._openssl_version = '1.0.2m'
self.assertTrue('old' in self.config.pick_apache_config())
self.config.version = (2, 4, 11)
self.config._openssl_version = '1.0.2m'
self.assertTrue('current' in self.config.pick_apache_config())
self.config._openssl_version = '1.0.2a'
self.assertTrue('old' in self.config.pick_apache_config())
def test_openssl_version_warns(self):
self.config._openssl_version = '1.0.2a'
self.assertEqual(self.config.openssl_version(), '1.0.2a')
self.config._openssl_version = None
with mock.patch("certbot_apache._internal.configurator.logger.warning") as mock_log:
self.assertEqual(self.config.openssl_version(), None)
self.assertTrue("Could not find ssl_module" in mock_log.call_args[0][0])
# When no ssl_module is present at all
self.config._openssl_version = None
self.assertTrue("ssl_module" not in self.config.parser.modules)
with mock.patch("certbot_apache._internal.configurator.logger.warning") as mock_log:
self.assertEqual(self.config.openssl_version(), None)
self.assertTrue("Could not find ssl_module" in mock_log.call_args[0][0])
# When ssl_module is statically linked but --apache-bin not provided
self.config._openssl_version = None
self.config.options.bin = None
self.config.parser.modules['ssl_module'] = None
with mock.patch("certbot_apache._internal.configurator.logger.warning") as mock_log:
self.assertEqual(self.config.openssl_version(), None)
self.assertTrue("ssl_module is statically linked but" in mock_log.call_args[0][0])
self.config.parser.modules['ssl_module'] = "/fake/path"
with mock.patch("certbot_apache._internal.configurator.logger.warning") as mock_log:
# Check that correct logger.warning was printed
self.assertEqual(self.config.openssl_version(), None)
self.assertTrue("Unable to read" in mock_log.call_args[0][0])
contents_missing_openssl = b"these contents won't match the regex"
with mock.patch("certbot_apache._internal.configurator."
"ApacheConfigurator._open_module_file") as mock_omf:
mock_omf.return_value = contents_missing_openssl
with mock.patch("certbot_apache._internal.configurator.logger.warning") as mock_log:
# Check that correct logger.warning was printed
self.assertEqual(self.config.openssl_version(), None)
self.assertTrue("Could not find OpenSSL" in mock_log.call_args[0][0])
def test_open_module_file(self):
mock_open = mock.mock_open(read_data="testing 12 3")
with mock.patch("builtins.open", mock_open):
self.assertEqual(self.config._open_module_file("/nonsense/"), "testing 12 3")
if __name__ == "__main__":
unittest.main() # pragma: no cover
|
{
"content_hash": "5e5a031e63d1e8d9471e40e393e48f52",
"timestamp": "",
"source": "github",
"line_count": 1842,
"max_line_length": 107,
"avg_line_length": 44.414223669924,
"alnum_prop": 0.6198188507657895,
"repo_name": "stweil/letsencrypt",
"id": "84f9e205369b729bb5d729ae78c8e4c29399c14f",
"size": "81844",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "certbot-apache/tests/configurator_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "14147"
},
{
"name": "Augeas",
"bytes": "4997"
},
{
"name": "Batchfile",
"bytes": "35037"
},
{
"name": "DIGITAL Command Language",
"bytes": "133"
},
{
"name": "Groff",
"bytes": "222"
},
{
"name": "Makefile",
"bytes": "37309"
},
{
"name": "Nginx",
"bytes": "4270"
},
{
"name": "Python",
"bytes": "1355274"
},
{
"name": "Shell",
"bytes": "120566"
},
{
"name": "Standard ML",
"bytes": "256"
}
],
"symlink_target": ""
}
|
import six
import unittest
from airflow import configuration, models
from airflow.utils import db
from mock import patch, call
from airflow.contrib.hooks.spark_submit_hook import SparkSubmitHook
class TestSparkSubmitHook(unittest.TestCase):
_spark_job_file = 'test_application.py'
_config = {
'conf': {
'parquet.compression': 'SNAPPY'
},
'conn_id': 'default_spark',
'files': 'hive-site.xml',
'py_files': 'sample_library.py',
'jars': 'parquet.jar',
'packages': 'com.databricks:spark-avro_2.11:3.2.0',
'exclude_packages': 'org.bad.dependency:1.0.0',
'repositories': 'http://myrepo.org',
'total_executor_cores': 4,
'executor_cores': 4,
'executor_memory': '22g',
'keytab': 'privileged_user.keytab',
'principal': 'user/spark@airflow.org',
'name': 'spark-job',
'num_executors': 10,
'verbose': True,
'driver_memory': '3g',
'java_class': 'com.foo.bar.AppMain',
'application_args': [
'-f', 'foo',
'--bar', 'bar',
'--with-spaces', 'args should keep embdedded spaces',
'baz'
]
}
@staticmethod
def cmd_args_to_dict(list_cmd):
return_dict = {}
for arg in list_cmd:
if arg.startswith("--"):
pos = list_cmd.index(arg)
return_dict[arg] = list_cmd[pos + 1]
return return_dict
def setUp(self):
configuration.load_test_config()
db.merge_conn(
models.Connection(
conn_id='spark_yarn_cluster', conn_type='spark',
host='yarn://yarn-master',
extra='{"queue": "root.etl", "deploy-mode": "cluster"}')
)
db.merge_conn(
models.Connection(
conn_id='spark_default_mesos', conn_type='spark',
host='mesos://host', port=5050)
)
db.merge_conn(
models.Connection(
conn_id='spark_home_set', conn_type='spark',
host='yarn://yarn-master',
extra='{"spark-home": "/opt/myspark"}')
)
db.merge_conn(
models.Connection(
conn_id='spark_home_not_set', conn_type='spark',
host='yarn://yarn-master')
)
db.merge_conn(
models.Connection(
conn_id='spark_binary_set', conn_type='spark',
host='yarn', extra='{"spark-binary": "custom-spark-submit"}')
)
db.merge_conn(
models.Connection(
conn_id='spark_binary_and_home_set', conn_type='spark',
host='yarn',
extra='{"spark-home": "/path/to/spark_home", ' +
'"spark-binary": "custom-spark-submit"}')
)
db.merge_conn(
models.Connection(
conn_id='spark_standalone_cluster', conn_type='spark',
host='spark://spark-standalone-master:6066',
extra='{"spark-home": "/path/to/spark_home", "deploy-mode": "cluster"}')
)
def test_build_spark_submit_command(self):
# Given
hook = SparkSubmitHook(**self._config)
# When
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
expected_build_cmd = [
'spark-submit',
'--master', 'yarn',
'--conf', 'parquet.compression=SNAPPY',
'--files', 'hive-site.xml',
'--py-files', 'sample_library.py',
'--jars', 'parquet.jar',
'--packages', 'com.databricks:spark-avro_2.11:3.2.0',
'--exclude-packages', 'org.bad.dependency:1.0.0',
'--repositories', 'http://myrepo.org',
'--num-executors', '10',
'--total-executor-cores', '4',
'--executor-cores', '4',
'--executor-memory', '22g',
'--driver-memory', '3g',
'--keytab', 'privileged_user.keytab',
'--principal', 'user/spark@airflow.org',
'--name', 'spark-job',
'--class', 'com.foo.bar.AppMain',
'--verbose',
'test_application.py',
'-f', 'foo',
'--bar', 'bar',
'--with-spaces', 'args should keep embdedded spaces',
'baz'
]
self.assertEquals(expected_build_cmd, cmd)
@patch('airflow.contrib.hooks.spark_submit_hook.subprocess.Popen')
def test_spark_process_runcmd(self, mock_popen):
# Given
mock_popen.return_value.stdout = six.StringIO('stdout')
mock_popen.return_value.stderr = six.StringIO('stderr')
mock_popen.return_value.wait.return_value = 0
# When
hook = SparkSubmitHook(conn_id='')
hook.submit()
# Then
self.assertEqual(mock_popen.mock_calls[0],
call(['spark-submit', '--master', 'yarn',
'--name', 'default-name', ''],
stderr=-2, stdout=-1, universal_newlines=True, bufsize=-1))
def test_resolve_should_track_driver_status(self):
# Given
hook_default = SparkSubmitHook(conn_id='')
hook_spark_yarn_cluster = SparkSubmitHook(conn_id='spark_yarn_cluster')
hook_spark_default_mesos = SparkSubmitHook(conn_id='spark_default_mesos')
hook_spark_home_set = SparkSubmitHook(conn_id='spark_home_set')
hook_spark_home_not_set = SparkSubmitHook(conn_id='spark_home_not_set')
hook_spark_binary_set = SparkSubmitHook(conn_id='spark_binary_set')
hook_spark_binary_and_home_set = SparkSubmitHook(
conn_id='spark_binary_and_home_set')
hook_spark_standalone_cluster = SparkSubmitHook(
conn_id='spark_standalone_cluster')
# When
should_track_driver_status_default = hook_default \
._resolve_should_track_driver_status()
should_track_driver_status_spark_yarn_cluster = hook_spark_yarn_cluster \
._resolve_should_track_driver_status()
should_track_driver_status_spark_default_mesos = hook_spark_default_mesos \
._resolve_should_track_driver_status()
should_track_driver_status_spark_home_set = hook_spark_home_set \
._resolve_should_track_driver_status()
should_track_driver_status_spark_home_not_set = hook_spark_home_not_set \
._resolve_should_track_driver_status()
should_track_driver_status_spark_binary_set = hook_spark_binary_set \
._resolve_should_track_driver_status()
should_track_driver_status_spark_binary_and_home_set = \
hook_spark_binary_and_home_set._resolve_should_track_driver_status()
should_track_driver_status_spark_standalone_cluster = \
hook_spark_standalone_cluster._resolve_should_track_driver_status()
# Then
self.assertEqual(should_track_driver_status_default, False)
self.assertEqual(should_track_driver_status_spark_yarn_cluster, False)
self.assertEqual(should_track_driver_status_spark_default_mesos, False)
self.assertEqual(should_track_driver_status_spark_home_set, False)
self.assertEqual(should_track_driver_status_spark_home_not_set, False)
self.assertEqual(should_track_driver_status_spark_binary_set, False)
self.assertEqual(should_track_driver_status_spark_binary_and_home_set, False)
self.assertEqual(should_track_driver_status_spark_standalone_cluster, True)
def test_resolve_connection_yarn_default(self):
# Given
hook = SparkSubmitHook(conn_id='')
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
dict_cmd = self.cmd_args_to_dict(cmd)
expected_spark_connection = {"master": "yarn",
"spark_binary": "spark-submit",
"deploy_mode": None,
"queue": None,
"spark_home": None}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(dict_cmd["--master"], "yarn")
def test_resolve_connection_yarn_default_connection(self):
# Given
hook = SparkSubmitHook(conn_id='spark_default')
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
dict_cmd = self.cmd_args_to_dict(cmd)
expected_spark_connection = {"master": "yarn",
"spark_binary": "spark-submit",
"deploy_mode": None,
"queue": "root.default",
"spark_home": None}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(dict_cmd["--master"], "yarn")
self.assertEqual(dict_cmd["--queue"], "root.default")
def test_resolve_connection_mesos_default_connection(self):
# Given
hook = SparkSubmitHook(conn_id='spark_default_mesos')
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
dict_cmd = self.cmd_args_to_dict(cmd)
expected_spark_connection = {"master": "mesos://host:5050",
"spark_binary": "spark-submit",
"deploy_mode": None,
"queue": None,
"spark_home": None}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(dict_cmd["--master"], "mesos://host:5050")
def test_resolve_connection_spark_yarn_cluster_connection(self):
# Given
hook = SparkSubmitHook(conn_id='spark_yarn_cluster')
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
dict_cmd = self.cmd_args_to_dict(cmd)
expected_spark_connection = {"master": "yarn://yarn-master",
"spark_binary": "spark-submit",
"deploy_mode": "cluster",
"queue": "root.etl",
"spark_home": None}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(dict_cmd["--master"], "yarn://yarn-master")
self.assertEqual(dict_cmd["--queue"], "root.etl")
self.assertEqual(dict_cmd["--deploy-mode"], "cluster")
def test_resolve_connection_spark_home_set_connection(self):
# Given
hook = SparkSubmitHook(conn_id='spark_home_set')
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
expected_spark_connection = {"master": "yarn://yarn-master",
"spark_binary": "spark-submit",
"deploy_mode": None,
"queue": None,
"spark_home": "/opt/myspark"}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(cmd[0], '/opt/myspark/bin/spark-submit')
def test_resolve_connection_spark_home_not_set_connection(self):
# Given
hook = SparkSubmitHook(conn_id='spark_home_not_set')
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
expected_spark_connection = {"master": "yarn://yarn-master",
"spark_binary": "spark-submit",
"deploy_mode": None,
"queue": None,
"spark_home": None}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(cmd[0], 'spark-submit')
def test_resolve_connection_spark_binary_set_connection(self):
# Given
hook = SparkSubmitHook(conn_id='spark_binary_set')
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
expected_spark_connection = {"master": "yarn",
"spark_binary": "custom-spark-submit",
"deploy_mode": None,
"queue": None,
"spark_home": None}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(cmd[0], 'custom-spark-submit')
def test_resolve_connection_spark_binary_and_home_set_connection(self):
# Given
hook = SparkSubmitHook(conn_id='spark_binary_and_home_set')
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
expected_spark_connection = {"master": "yarn",
"spark_binary": "custom-spark-submit",
"deploy_mode": None,
"queue": None,
"spark_home": "/path/to/spark_home"}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(cmd[0], '/path/to/spark_home/bin/custom-spark-submit')
def test_resolve_connection_spark_standalone_cluster_connection(self):
# Given
hook = SparkSubmitHook(conn_id='spark_standalone_cluster')
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
expected_spark_connection = {"master": "spark://spark-standalone-master:6066",
"spark_binary": "spark-submit",
"deploy_mode": "cluster",
"queue": None,
"spark_home": "/path/to/spark_home"}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(cmd[0], '/path/to/spark_home/bin/spark-submit')
def test_process_spark_submit_log_yarn(self):
# Given
hook = SparkSubmitHook(conn_id='spark_yarn_cluster')
log_lines = [
'SPARK_MAJOR_VERSION is set to 2, using Spark2',
'WARN NativeCodeLoader: Unable to load native-hadoop library for your ' +
'platform... using builtin-java classes where applicable',
'WARN DomainSocketFactory: The short-circuit local reads feature cannot '
'be used because libhadoop cannot be loaded.',
'INFO Client: Requesting a new application from cluster with 10 NodeManagers',
'INFO Client: Submitting application application_1486558679801_1820 ' +
'to ResourceManager'
]
# When
hook._process_spark_submit_log(log_lines)
# Then
self.assertEqual(hook._yarn_application_id, 'application_1486558679801_1820')
def test_process_spark_submit_log_standalone_cluster(self):
# Given
hook = SparkSubmitHook(conn_id='spark_standalone_cluster')
log_lines = [
'Running Spark using the REST application submission protocol.',
'17/11/28 11:14:15 INFO RestSubmissionClient: Submitting a request '
'to launch an application in spark://spark-standalone-master:6066',
'17/11/28 11:14:15 INFO RestSubmissionClient: Submission successfully ' +
'created as driver-20171128111415-0001. Polling submission state...'
]
# When
hook._process_spark_submit_log(log_lines)
# Then
self.assertEqual(hook._driver_id, 'driver-20171128111415-0001')
def test_process_spark_driver_status_log(self):
# Given
hook = SparkSubmitHook(conn_id='spark_standalone_cluster')
log_lines = [
'Submitting a request for the status of submission ' +
'driver-20171128111415-0001 in spark://spark-standalone-master:6066',
'17/11/28 11:15:37 INFO RestSubmissionClient: Server responded with ' +
'SubmissionStatusResponse:',
'{',
'"action" : "SubmissionStatusResponse",',
'"driverState" : "RUNNING",',
'"serverSparkVersion" : "1.6.0",',
'"submissionId" : "driver-20171128111415-0001",',
'"success" : true,',
'"workerHostPort" : "172.18.0.7:38561",',
'"workerId" : "worker-20171128110741-172.18.0.7-38561"',
'}'
]
# When
hook._process_spark_status_log(log_lines)
# Then
self.assertEqual(hook._driver_status, 'RUNNING')
@patch('airflow.contrib.hooks.spark_submit_hook.subprocess.Popen')
def test_yarn_process_on_kill(self, mock_popen):
# Given
mock_popen.return_value.stdout = six.StringIO('stdout')
mock_popen.return_value.stderr = six.StringIO('stderr')
mock_popen.return_value.poll.return_value = None
mock_popen.return_value.wait.return_value = 0
log_lines = [
'SPARK_MAJOR_VERSION is set to 2, using Spark2',
'WARN NativeCodeLoader: Unable to load native-hadoop library for your ' +
'platform... using builtin-java classes where applicable',
'WARN DomainSocketFactory: The short-circuit local reads feature cannot ' +
'be used because libhadoop cannot be loaded.',
'INFO Client: Requesting a new application from cluster with 10 ' +
'NodeManagerapplication_1486558679801_1820s',
'INFO Client: Submitting application application_1486558679801_1820 ' +
'to ResourceManager'
]
hook = SparkSubmitHook(conn_id='spark_yarn_cluster')
hook._process_spark_submit_log(log_lines)
hook.submit()
# When
hook.on_kill()
# Then
self.assertIn(call(['yarn', 'application', '-kill',
'application_1486558679801_1820'],
stderr=-1, stdout=-1),
mock_popen.mock_calls)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "86063f4e55758d587d2831ab3a339b81",
"timestamp": "",
"source": "github",
"line_count": 447,
"max_line_length": 90,
"avg_line_length": 41.62639821029083,
"alnum_prop": 0.5543612618906864,
"repo_name": "yati-sagade/incubator-airflow",
"id": "6c55ce28e0cc90929aabad683d04a6d6c8689467",
"size": "19175",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/contrib/hooks/test_spark_submit_hook.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "57054"
},
{
"name": "HTML",
"bytes": "152530"
},
{
"name": "JavaScript",
"bytes": "1364571"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "2828163"
},
{
"name": "Shell",
"bytes": "34436"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tourism', '0003_auto_20190306_1417'),
]
operations = [
migrations.AlterModelOptions(
name='touristiccontenttype1',
options={'verbose_name': 'Type1', 'verbose_name_plural': 'First list types'},
),
migrations.AlterModelOptions(
name='touristiccontenttype2',
options={'verbose_name': 'Type2', 'verbose_name_plural': 'Second list types'},
),
migrations.AlterField(
model_name='touristiccontent',
name='type1',
field=models.ManyToManyField(blank=True, db_table='t_r_contenu_touristique_type1', related_name='contents1', to='tourism.TouristicContentType1', verbose_name='Type 1'),
),
migrations.AlterField(
model_name='touristiccontent',
name='type2',
field=models.ManyToManyField(blank=True, db_table='t_r_contenu_touristique_type2', related_name='contents2', to='tourism.TouristicContentType2', verbose_name='Type 2'),
),
]
|
{
"content_hash": "91823db2a1f8a6d75569b62ce2d0289d",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 180,
"avg_line_length": 39,
"alnum_prop": 0.6215738284703802,
"repo_name": "GeotrekCE/Geotrek-admin",
"id": "f03a350395c6ef760026fa83bddfb5dc1f73f878",
"size": "1182",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "geotrek/tourism/migrations/0004_auto_20190322_1908.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "46138"
},
{
"name": "Dockerfile",
"bytes": "1816"
},
{
"name": "HTML",
"bytes": "274524"
},
{
"name": "JavaScript",
"bytes": "231326"
},
{
"name": "Makefile",
"bytes": "1909"
},
{
"name": "PLpgSQL",
"bytes": "78024"
},
{
"name": "Python",
"bytes": "3456569"
},
{
"name": "SCSS",
"bytes": "7179"
},
{
"name": "Shell",
"bytes": "14369"
}
],
"symlink_target": ""
}
|
import sys
import os
import re
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('./../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
autoclass_content = 'both'
autodoc_default_flags = ['members', 'private-members', 'special-members',
'show-inheritance']
# 'undoc-members','show-inheritance']
def autodoc_skip_member(app, what, name, obj, skip, options):
exclusions = ('__weakref__', '__doc__', '__module__', '__dict__', '__init__')
exclude = name in exclusions
hidden_regex = re.compile('^_{1}[a-zA-Z].*$')
return (skip or exclude) and not hidden_regex.match(name)
def setup(app):
app.connect('autodoc-skip-member', autodoc_skip_member)
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Deck of Cards'
copyright = u'2015, Suhas Gaddam'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'DeckofCardsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'DeckofCards.tex', u'Deck of Cards Documentation',
u'Suhas Gaddam', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'deckofcards', u'Deck of Cards Documentation',
[u'Suhas Gaddam'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'DeckofCards', u'Deck of Cards Documentation',
u'Suhas Gaddam', 'DeckofCards', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
{
"content_hash": "d035820e32a07b1eaa143be494143f70",
"timestamp": "",
"source": "github",
"line_count": 270,
"max_line_length": 81,
"avg_line_length": 31.925925925925927,
"alnum_prop": 0.6975638051044084,
"repo_name": "suhasgaddam/deck-of-cards-python",
"id": "b5952cef66199a561206774e3e8d500272aaf057",
"size": "9047",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "6788"
},
{
"name": "Python",
"bytes": "33123"
}
],
"symlink_target": ""
}
|
import uuid
from lxml import etree
import mock
from neutronclient.common import exceptions as n_exc
from neutronclient.neutron import v2_0 as neutronv20
from oslo.config import cfg
from oslo.serialization import jsonutils
import webob
from nova.api.openstack.compute.contrib import security_groups
from nova.api.openstack import xmlutil
from nova import compute
from nova import context
import nova.db
from nova import exception
from nova.network import model
from nova.network import neutronv2
from nova.network.neutronv2 import api as neutron_api
from nova.network.security_group import neutron_driver
from nova.objects import instance as instance_obj
from nova import test
from nova.tests.api.openstack.compute.contrib import test_security_groups
from nova.tests.api.openstack import fakes
class TestNeutronSecurityGroupsTestCase(test.TestCase):
def setUp(self):
super(TestNeutronSecurityGroupsTestCase, self).setUp()
cfg.CONF.set_override('security_group_api', 'neutron')
self.original_client = neutronv2.get_client
neutronv2.get_client = get_client
def tearDown(self):
neutronv2.get_client = self.original_client
get_client()._reset()
super(TestNeutronSecurityGroupsTestCase, self).tearDown()
class TestNeutronSecurityGroupsV21(
test_security_groups.TestSecurityGroupsV21,
TestNeutronSecurityGroupsTestCase):
def _create_sg_template(self, **kwargs):
sg = test_security_groups.security_group_template(**kwargs)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
return self.controller.create(req, {'security_group': sg})
def _create_network(self):
body = {'network': {'name': 'net1'}}
neutron = get_client()
net = neutron.create_network(body)
body = {'subnet': {'network_id': net['network']['id'],
'cidr': '10.0.0.0/24'}}
neutron.create_subnet(body)
return net
def _create_port(self, **kwargs):
body = {'port': {'binding:vnic_type': model.VNIC_TYPE_NORMAL}}
fields = ['security_groups', 'device_id', 'network_id',
'port_security_enabled']
for field in fields:
if field in kwargs:
body['port'][field] = kwargs[field]
neutron = get_client()
return neutron.create_port(body)
def _create_security_group(self, **kwargs):
body = {'security_group': {}}
fields = ['name', 'description']
for field in fields:
if field in kwargs:
body['security_group'][field] = kwargs[field]
neutron = get_client()
return neutron.create_security_group(body)
def test_create_security_group_with_no_description(self):
# Neutron's security group description field is optional.
pass
def test_create_security_group_with_empty_description(self):
# Neutron's security group description field is optional.
pass
def test_create_security_group_with_blank_name(self):
# Neutron's security group name field is optional.
pass
def test_create_security_group_with_whitespace_name(self):
# Neutron allows security group name to be whitespace.
pass
def test_create_security_group_with_blank_description(self):
# Neutron's security group description field is optional.
pass
def test_create_security_group_with_whitespace_description(self):
# Neutron allows description to be whitespace.
pass
def test_create_security_group_with_duplicate_name(self):
# Neutron allows duplicate names for security groups.
pass
def test_create_security_group_non_string_name(self):
# Neutron allows security group name to be non string.
pass
def test_create_security_group_non_string_description(self):
# Neutron allows non string description.
pass
def test_create_security_group_quota_limit(self):
# Enforced by Neutron server.
pass
def test_update_security_group(self):
# Enforced by Neutron server.
pass
def test_get_security_group_list(self):
self._create_sg_template().get('security_group')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
list_dict = self.controller.index(req)
self.assertEqual(len(list_dict['security_groups']), 2)
def test_get_security_group_list_all_tenants(self):
pass
def test_get_security_group_by_instance(self):
sg = self._create_sg_template().get('security_group')
net = self._create_network()
self._create_port(
network_id=net['network']['id'], security_groups=[sg['id']],
device_id=test_security_groups.FAKE_UUID1)
expected = [{'rules': [], 'tenant_id': 'fake', 'id': sg['id'],
'name': 'test', 'description': 'test-description'}]
self.stubs.Set(nova.db, 'instance_get_by_uuid',
test_security_groups.return_server_by_uuid)
req = fakes.HTTPRequest.blank('/v2/fake/servers/%s/os-security-groups'
% test_security_groups.FAKE_UUID1)
res_dict = self.server_controller.index(
req, test_security_groups.FAKE_UUID1)['security_groups']
self.assertEqual(expected, res_dict)
def test_get_security_group_by_id(self):
sg = self._create_sg_template().get('security_group')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s'
% sg['id'])
res_dict = self.controller.show(req, sg['id'])
expected = {'security_group': sg}
self.assertEqual(res_dict, expected)
def test_delete_security_group_by_id(self):
sg = self._create_sg_template().get('security_group')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s' %
sg['id'])
self.controller.delete(req, sg['id'])
def test_delete_security_group_by_admin(self):
sg = self._create_sg_template().get('security_group')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s' %
sg['id'], use_admin_context=True)
self.controller.delete(req, sg['id'])
def test_delete_security_group_in_use(self):
sg = self._create_sg_template().get('security_group')
self._create_network()
db_inst = fakes.stub_instance(id=1, nw_cache=[], security_groups=[])
_context = context.get_admin_context()
instance = instance_obj.Instance._from_db_object(
_context, instance_obj.Instance(), db_inst,
expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS)
neutron = neutron_api.API()
with mock.patch.object(nova.db, 'instance_get_by_uuid',
return_value=db_inst):
neutron.allocate_for_instance(_context, instance,
security_groups=[sg['id']])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s'
% sg['id'])
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
req, sg['id'])
def test_associate_non_running_instance(self):
# Neutron does not care if the instance is running or not. When the
# instances is detected by nuetron it will push down the security
# group policy to it.
pass
def test_associate_already_associated_security_group_to_instance(self):
# Neutron security groups does not raise an error if you update a
# port adding a security group to it that was already associated
# to the port. This is because PUT semantics are used.
pass
def test_associate(self):
sg = self._create_sg_template().get('security_group')
net = self._create_network()
self._create_port(
network_id=net['network']['id'], security_groups=[sg['id']],
device_id=test_security_groups.FAKE_UUID1)
self.stubs.Set(nova.db, 'instance_get',
test_security_groups.return_server)
body = dict(addSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.manager._addSecurityGroup(req, '1', body)
def test_associate_duplicate_names(self):
sg1 = self._create_security_group(name='sg1',
description='sg1')['security_group']
self._create_security_group(name='sg1',
description='sg1')['security_group']
net = self._create_network()
self._create_port(
network_id=net['network']['id'], security_groups=[sg1['id']],
device_id=test_security_groups.FAKE_UUID1)
self.stubs.Set(nova.db, 'instance_get',
test_security_groups.return_server)
body = dict(addSecurityGroup=dict(name="sg1"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPConflict,
self.manager._addSecurityGroup, req, '1', body)
def test_associate_port_security_enabled_true(self):
sg = self._create_sg_template().get('security_group')
net = self._create_network()
self._create_port(
network_id=net['network']['id'], security_groups=[sg['id']],
port_security_enabled=True,
device_id=test_security_groups.FAKE_UUID1)
self.stubs.Set(nova.db, 'instance_get',
test_security_groups.return_server)
body = dict(addSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.manager._addSecurityGroup(req, '1', body)
def test_associate_port_security_enabled_false(self):
self._create_sg_template().get('security_group')
net = self._create_network()
self._create_port(
network_id=net['network']['id'], port_security_enabled=False,
device_id=test_security_groups.FAKE_UUID1)
self.stubs.Set(nova.db, 'instance_get',
test_security_groups.return_server)
body = dict(addSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._addSecurityGroup,
req, '1', body)
def test_disassociate_by_non_existing_security_group_name(self):
self.stubs.Set(nova.db, 'instance_get',
test_security_groups.return_server)
body = dict(removeSecurityGroup=dict(name='non-existing'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._removeSecurityGroup, req, '1', body)
def test_disassociate_non_running_instance(self):
# Neutron does not care if the instance is running or not. When the
# instances is detected by neutron it will push down the security
# group policy to it.
pass
def test_disassociate_already_associated_security_group_to_instance(self):
# Neutron security groups does not raise an error if you update a
# port adding a security group to it that was already associated
# to the port. This is because PUT semantics are used.
pass
def test_disassociate(self):
sg = self._create_sg_template().get('security_group')
net = self._create_network()
self._create_port(
network_id=net['network']['id'], security_groups=[sg['id']],
device_id=test_security_groups.FAKE_UUID1)
self.stubs.Set(nova.db, 'instance_get',
test_security_groups.return_server)
body = dict(removeSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.manager._removeSecurityGroup(req, '1', body)
def test_get_raises_no_unique_match_error(self):
def fake_find_resourceid_by_name_or_id(client, param, name,
project_id=None):
raise n_exc.NeutronClientNoUniqueMatch()
self.stubs.Set(neutronv20, 'find_resourceid_by_name_or_id',
fake_find_resourceid_by_name_or_id)
security_group_api = self.controller.security_group_api
self.assertRaises(exception.NoUniqueMatch, security_group_api.get,
context.get_admin_context(), 'foobar')
def test_get_instances_security_groups_bindings(self):
servers = [{'id': test_security_groups.FAKE_UUID1},
{'id': test_security_groups.FAKE_UUID2}]
sg1 = self._create_sg_template(name='test1').get('security_group')
sg2 = self._create_sg_template(name='test2').get('security_group')
# test name='' is replaced with id
sg3 = self._create_sg_template(name='').get('security_group')
net = self._create_network()
self._create_port(
network_id=net['network']['id'], security_groups=[sg1['id'],
sg2['id']],
device_id=test_security_groups.FAKE_UUID1)
self._create_port(
network_id=net['network']['id'], security_groups=[sg2['id'],
sg3['id']],
device_id=test_security_groups.FAKE_UUID2)
expected = {test_security_groups.FAKE_UUID1: [{'name': sg1['name']},
{'name': sg2['name']}],
test_security_groups.FAKE_UUID2: [{'name': sg2['name']},
{'name': sg3['id']}]}
security_group_api = self.controller.security_group_api
bindings = (
security_group_api.get_instances_security_groups_bindings(
context.get_admin_context(), servers))
self.assertEqual(bindings, expected)
def test_get_instance_security_groups(self):
sg1 = self._create_sg_template(name='test1').get('security_group')
sg2 = self._create_sg_template(name='test2').get('security_group')
# test name='' is replaced with id
sg3 = self._create_sg_template(name='').get('security_group')
net = self._create_network()
self._create_port(
network_id=net['network']['id'], security_groups=[sg1['id'],
sg2['id'],
sg3['id']],
device_id=test_security_groups.FAKE_UUID1)
expected = [{'name': sg1['name']}, {'name': sg2['name']},
{'name': sg3['id']}]
security_group_api = self.controller.security_group_api
sgs = security_group_api.get_instance_security_groups(
context.get_admin_context(), test_security_groups.FAKE_UUID1)
self.assertEqual(sgs, expected)
@mock.patch('nova.network.security_group.neutron_driver.SecurityGroupAPI.'
'get_instances_security_groups_bindings')
def test_get_security_group_empty_for_instance(self, neutron_sg_bind_mock):
servers = [{'id': test_security_groups.FAKE_UUID1}]
neutron_sg_bind_mock.return_value = {}
security_group_api = self.controller.security_group_api
ctx = context.get_admin_context()
sgs = security_group_api.get_instance_security_groups(ctx,
test_security_groups.FAKE_UUID1)
neutron_sg_bind_mock.assert_called_once_with(ctx, servers, False)
self.assertEqual([], sgs)
def test_create_port_with_sg_and_port_security_enabled_true(self):
sg1 = self._create_sg_template(name='test1').get('security_group')
net = self._create_network()
self._create_port(
network_id=net['network']['id'], security_groups=[sg1['id']],
port_security_enabled=True,
device_id=test_security_groups.FAKE_UUID1)
security_group_api = self.controller.security_group_api
sgs = security_group_api.get_instance_security_groups(
context.get_admin_context(), test_security_groups.FAKE_UUID1)
self.assertEqual(sgs, [{'name': 'test1'}])
def test_create_port_with_sg_and_port_security_enabled_false(self):
sg1 = self._create_sg_template(name='test1').get('security_group')
net = self._create_network()
self.assertRaises(exception.SecurityGroupCannotBeApplied,
self._create_port,
network_id=net['network']['id'],
security_groups=[sg1['id']],
port_security_enabled=False,
device_id=test_security_groups.FAKE_UUID1)
class TestNeutronSecurityGroupsV2(TestNeutronSecurityGroupsV21):
controller_cls = security_groups.SecurityGroupController
server_secgrp_ctl_cls = security_groups.ServerSecurityGroupController
secgrp_act_ctl_cls = security_groups.SecurityGroupActionController
class TestNeutronSecurityGroupRulesTestCase(TestNeutronSecurityGroupsTestCase):
def setUp(self):
super(TestNeutronSecurityGroupRulesTestCase, self).setUp()
id1 = '11111111-1111-1111-1111-111111111111'
sg_template1 = test_security_groups.security_group_template(
security_group_rules=[], id=id1)
id2 = '22222222-2222-2222-2222-222222222222'
sg_template2 = test_security_groups.security_group_template(
security_group_rules=[], id=id2)
self.controller_sg = security_groups.SecurityGroupController()
neutron = get_client()
neutron._fake_security_groups[id1] = sg_template1
neutron._fake_security_groups[id2] = sg_template2
def tearDown(self):
neutronv2.get_client = self.original_client
get_client()._reset()
super(TestNeutronSecurityGroupsTestCase, self).tearDown()
class TestNeutronSecurityGroupRules(
test_security_groups.TestSecurityGroupRules,
TestNeutronSecurityGroupRulesTestCase):
def test_create_add_existing_rules_by_cidr(self):
sg = test_security_groups.security_group_template()
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.controller_sg.create(req, {'security_group': sg})
rule = test_security_groups.security_group_rule_template(
cidr='15.0.0.0/8', parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.controller.create(req, {'security_group_rule': rule})
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_add_existing_rules_by_group_id(self):
sg = test_security_groups.security_group_template()
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.controller_sg.create(req, {'security_group': sg})
rule = test_security_groups.security_group_rule_template(
group=self.sg1['id'], parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.controller.create(req, {'security_group_rule': rule})
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_delete(self):
rule = test_security_groups.security_group_rule_template(
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules/%s'
% security_group_rule['id'])
self.controller.delete(req, security_group_rule['id'])
def test_create_rule_quota_limit(self):
# Enforced by neutron
pass
class TestNeutronSecurityGroupsXMLDeserializer(
test_security_groups.TestSecurityGroupXMLDeserializer,
TestNeutronSecurityGroupsTestCase):
pass
class TestNeutronSecurityGroupsXMLSerializer(
test_security_groups.TestSecurityGroupXMLSerializer,
TestNeutronSecurityGroupsTestCase):
pass
class TestNeutronSecurityGroupsOutputTest(TestNeutronSecurityGroupsTestCase):
content_type = 'application/json'
def setUp(self):
super(TestNeutronSecurityGroupsOutputTest, self).setUp()
fakes.stub_out_nw_api(self.stubs)
self.controller = security_groups.SecurityGroupController()
self.stubs.Set(compute.api.API, 'get',
test_security_groups.fake_compute_get)
self.stubs.Set(compute.api.API, 'get_all',
test_security_groups.fake_compute_get_all)
self.stubs.Set(compute.api.API, 'create',
test_security_groups.fake_compute_create)
self.stubs.Set(neutron_driver.SecurityGroupAPI,
'get_instances_security_groups_bindings',
(test_security_groups.
fake_get_instances_security_groups_bindings))
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Security_groups'])
def _make_request(self, url, body=None):
req = webob.Request.blank(url)
if body:
req.method = 'POST'
req.body = self._encode_body(body)
req.content_type = self.content_type
req.headers['Accept'] = self.content_type
res = req.get_response(fakes.wsgi_app(init_only=('servers',)))
return res
def _encode_body(self, body):
return jsonutils.dumps(body)
def _get_server(self, body):
return jsonutils.loads(body).get('server')
def _get_servers(self, body):
return jsonutils.loads(body).get('servers')
def _get_groups(self, server):
return server.get('security_groups')
def test_create(self):
url = '/v2/fake/servers'
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
security_groups = [{'name': 'fake-2-0'}, {'name': 'fake-2-1'}]
for security_group in security_groups:
sg = test_security_groups.security_group_template(
name=security_group['name'])
self.controller.create(req, {'security_group': sg})
server = dict(name='server_test', imageRef=image_uuid, flavorRef=2,
security_groups=security_groups)
res = self._make_request(url, {'server': server})
self.assertEqual(res.status_int, 202)
server = self._get_server(res.body)
for i, group in enumerate(self._get_groups(server)):
name = 'fake-2-%s' % i
self.assertEqual(group.get('name'), name)
def test_create_server_get_default_security_group(self):
url = '/v2/fake/servers'
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
res = self._make_request(url, {'server': server})
self.assertEqual(res.status_int, 202)
server = self._get_server(res.body)
group = self._get_groups(server)[0]
self.assertEqual(group.get('name'), 'default')
def test_show(self):
def fake_get_instance_security_groups(inst, context, id):
return [{'name': 'fake-2-0'}, {'name': 'fake-2-1'}]
self.stubs.Set(neutron_driver.SecurityGroupAPI,
'get_instance_security_groups',
fake_get_instance_security_groups)
url = '/v2/fake/servers'
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
security_groups = [{'name': 'fake-2-0'}, {'name': 'fake-2-1'}]
for security_group in security_groups:
sg = test_security_groups.security_group_template(
name=security_group['name'])
self.controller.create(req, {'security_group': sg})
server = dict(name='server_test', imageRef=image_uuid, flavorRef=2,
security_groups=security_groups)
res = self._make_request(url, {'server': server})
self.assertEqual(res.status_int, 202)
server = self._get_server(res.body)
for i, group in enumerate(self._get_groups(server)):
name = 'fake-2-%s' % i
self.assertEqual(group.get('name'), name)
# Test that show (GET) returns the same information as create (POST)
url = '/v2/fake/servers/' + test_security_groups.UUID3
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
server = self._get_server(res.body)
for i, group in enumerate(self._get_groups(server)):
name = 'fake-2-%s' % i
self.assertEqual(group.get('name'), name)
def test_detail(self):
url = '/v2/fake/servers/detail'
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
for i, server in enumerate(self._get_servers(res.body)):
for j, group in enumerate(self._get_groups(server)):
name = 'fake-%s-%s' % (i, j)
self.assertEqual(group.get('name'), name)
def test_no_instance_passthrough_404(self):
def fake_compute_get(*args, **kwargs):
raise exception.InstanceNotFound(instance_id='fake')
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
url = '/v2/fake/servers/70f6db34-de8d-4fbd-aafb-4065bdfa6115'
res = self._make_request(url)
self.assertEqual(res.status_int, 404)
class TestNeutronSecurityGroupsOutputXMLTest(
TestNeutronSecurityGroupsOutputTest):
content_type = 'application/xml'
class MinimalCreateServerTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server', selector='server')
root.set('name')
root.set('id')
root.set('imageRef')
root.set('flavorRef')
elem = xmlutil.SubTemplateElement(root, 'security_groups')
sg = xmlutil.SubTemplateElement(elem, 'security_group',
selector='security_groups')
sg.set('name')
return xmlutil.MasterTemplate(root, 1,
nsmap={None: xmlutil.XMLNS_V11})
def _encode_body(self, body):
serializer = self.MinimalCreateServerTemplate()
return serializer.serialize(body)
def _get_server(self, body):
return etree.XML(body)
def _get_servers(self, body):
return etree.XML(body).getchildren()
def _get_groups(self, server):
# NOTE(vish): we are adding security groups without an extension
# namespace so we don't break people using the existing
# functionality, but that means we need to use find with
# the existing server namespace.
namespace = server.nsmap[None]
return server.find('{%s}security_groups' % namespace).getchildren()
def get_client(context=None, admin=False):
return MockClient()
class MockClient(object):
# Needs to be global to survive multiple calls to get_client.
_fake_security_groups = {}
_fake_ports = {}
_fake_networks = {}
_fake_subnets = {}
_fake_security_group_rules = {}
def __init__(self):
# add default security group
if not len(self._fake_security_groups):
ret = {'name': 'default', 'description': 'default',
'tenant_id': 'fake_tenant', 'security_group_rules': [],
'id': str(uuid.uuid4())}
self._fake_security_groups[ret['id']] = ret
def _reset(self):
self._fake_security_groups.clear()
self._fake_ports.clear()
self._fake_networks.clear()
self._fake_subnets.clear()
self._fake_security_group_rules.clear()
def create_security_group(self, body=None):
s = body.get('security_group')
if len(s.get('name')) > 255 or len(s.get('description')) > 255:
msg = 'Security Group name great than 255'
raise n_exc.NeutronClientException(message=msg, status_code=401)
ret = {'name': s.get('name'), 'description': s.get('description'),
'tenant_id': 'fake', 'security_group_rules': [],
'id': str(uuid.uuid4())}
self._fake_security_groups[ret['id']] = ret
return {'security_group': ret}
def create_network(self, body):
n = body.get('network')
ret = {'status': 'ACTIVE', 'subnets': [], 'name': n.get('name'),
'admin_state_up': n.get('admin_state_up', True),
'tenant_id': 'fake_tenant',
'id': str(uuid.uuid4())}
if 'port_security_enabled' in n:
ret['port_security_enabled'] = n['port_security_enabled']
self._fake_networks[ret['id']] = ret
return {'network': ret}
def create_subnet(self, body):
s = body.get('subnet')
try:
net = self._fake_networks[s.get('network_id')]
except KeyError:
msg = 'Network %s not found' % s.get('network_id')
raise n_exc.NeutronClientException(message=msg, status_code=404)
ret = {'name': s.get('name'), 'network_id': s.get('network_id'),
'tenant_id': 'fake_tenant', 'cidr': s.get('cidr'),
'id': str(uuid.uuid4()), 'gateway_ip': '10.0.0.1'}
net['subnets'].append(ret['id'])
self._fake_networks[net['id']] = net
self._fake_subnets[ret['id']] = ret
return {'subnet': ret}
def create_port(self, body):
p = body.get('port')
ret = {'status': 'ACTIVE', 'id': str(uuid.uuid4()),
'mac_address': p.get('mac_address', 'fa:16:3e:b8:f5:fb'),
'device_id': p.get('device_id', str(uuid.uuid4())),
'admin_state_up': p.get('admin_state_up', True),
'security_groups': p.get('security_groups', []),
'network_id': p.get('network_id'),
'binding:vnic_type':
p.get('binding:vnic_type') or model.VNIC_TYPE_NORMAL}
network = self._fake_networks[p['network_id']]
if 'port_security_enabled' in p:
ret['port_security_enabled'] = p['port_security_enabled']
elif 'port_security_enabled' in network:
ret['port_security_enabled'] = network['port_security_enabled']
port_security = ret.get('port_security_enabled', True)
# port_security must be True if security groups are present
if not port_security and ret['security_groups']:
raise exception.SecurityGroupCannotBeApplied()
if network['subnets']:
ret['fixed_ips'] = [{'subnet_id': network['subnets'][0],
'ip_address': '10.0.0.1'}]
if not ret['security_groups'] and (port_security is None or
port_security is True):
for security_group in self._fake_security_groups.values():
if security_group['name'] == 'default':
ret['security_groups'] = [security_group['id']]
break
self._fake_ports[ret['id']] = ret
return {'port': ret}
def create_security_group_rule(self, body):
# does not handle bulk case so just picks rule[0]
r = body.get('security_group_rules')[0]
fields = ['direction', 'protocol', 'port_range_min', 'port_range_max',
'ethertype', 'remote_ip_prefix', 'tenant_id',
'security_group_id', 'remote_group_id']
ret = {}
for field in fields:
ret[field] = r.get(field)
ret['id'] = str(uuid.uuid4())
self._fake_security_group_rules[ret['id']] = ret
return {'security_group_rules': [ret]}
def show_security_group(self, security_group, **_params):
try:
sg = self._fake_security_groups[security_group]
except KeyError:
msg = 'Security Group %s not found' % security_group
raise n_exc.NeutronClientException(message=msg, status_code=404)
for security_group_rule in self._fake_security_group_rules.values():
if security_group_rule['security_group_id'] == sg['id']:
sg['security_group_rules'].append(security_group_rule)
return {'security_group': sg}
def show_security_group_rule(self, security_group_rule, **_params):
try:
return {'security_group_rule':
self._fake_security_group_rules[security_group_rule]}
except KeyError:
msg = 'Security Group rule %s not found' % security_group_rule
raise n_exc.NeutronClientException(message=msg, status_code=404)
def show_network(self, network, **_params):
try:
return {'network':
self._fake_networks[network]}
except KeyError:
msg = 'Network %s not found' % network
raise n_exc.NeutronClientException(message=msg, status_code=404)
def show_port(self, port, **_params):
try:
return {'port':
self._fake_ports[port]}
except KeyError:
msg = 'Port %s not found' % port
raise n_exc.NeutronClientException(message=msg, status_code=404)
def show_subnet(self, subnet, **_params):
try:
return {'subnet':
self._fake_subnets[subnet]}
except KeyError:
msg = 'Port %s not found' % subnet
raise n_exc.NeutronClientException(message=msg, status_code=404)
def list_security_groups(self, **_params):
ret = []
for security_group in self._fake_security_groups.values():
names = _params.get('name')
if names:
if not isinstance(names, list):
names = [names]
for name in names:
if security_group.get('name') == name:
ret.append(security_group)
ids = _params.get('id')
if ids:
if not isinstance(ids, list):
ids = [ids]
for id in ids:
if security_group.get('id') == id:
ret.append(security_group)
elif not (names or ids):
ret.append(security_group)
return {'security_groups': ret}
def list_networks(self, **_params):
# neutronv2/api.py _get_available_networks calls this assuming
# search_opts filter "shared" is implemented and not ignored
shared = _params.get("shared", None)
if shared:
return {'networks': []}
else:
return {'networks':
[network for network in self._fake_networks.values()]}
def list_ports(self, **_params):
ret = []
device_id = _params.get('device_id')
for port in self._fake_ports.values():
if device_id:
if port['device_id'] in device_id:
ret.append(port)
else:
ret.append(port)
return {'ports': ret}
def list_subnets(self, **_params):
return {'subnets':
[subnet for subnet in self._fake_subnets.values()]}
def list_floatingips(self, **_params):
return {'floatingips': []}
def delete_security_group(self, security_group):
self.show_security_group(security_group)
ports = self.list_ports()
for port in ports.get('ports'):
for sg_port in port['security_groups']:
if sg_port == security_group:
msg = ('Unable to delete Security group %s in use'
% security_group)
raise n_exc.NeutronClientException(message=msg,
status_code=409)
del self._fake_security_groups[security_group]
def delete_security_group_rule(self, security_group_rule):
self.show_security_group_rule(security_group_rule)
del self._fake_security_group_rules[security_group_rule]
def delete_network(self, network):
self.show_network(network)
self._check_ports_on_network(network)
for subnet in self._fake_subnets.values():
if subnet['network_id'] == network:
del self._fake_subnets[subnet['id']]
del self._fake_networks[network]
def delete_subnet(self, subnet):
subnet = self.show_subnet(subnet).get('subnet')
self._check_ports_on_network(subnet['network_id'])
del self._fake_subnet[subnet]
def delete_port(self, port):
self.show_port(port)
del self._fake_ports[port]
def update_port(self, port, body=None):
self.show_port(port)
self._fake_ports[port].update(body['port'])
return {'port': self._fake_ports[port]}
def list_extensions(self, **_parms):
return {'extensions': []}
def _check_ports_on_network(self, network):
ports = self.list_ports()
for port in ports:
if port['network_id'] == network:
msg = ('Unable to complete operation on network %s. There is '
'one or more ports still in use on the network'
% network)
raise n_exc.NeutronClientException(message=msg, status_code=409)
|
{
"content_hash": "b6b46b1a81b3bea4bf3ea3fd2fe502dd",
"timestamp": "",
"source": "github",
"line_count": 892,
"max_line_length": 79,
"avg_line_length": 42.80156950672646,
"alnum_prop": 0.5940962309122816,
"repo_name": "badock/nova",
"id": "95968f13a27df5af82800308608d1b6c293abcf7",
"size": "38805",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/tests/api/openstack/compute/contrib/test_neutron_security_groups.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groff",
"bytes": "112"
},
{
"name": "PLpgSQL",
"bytes": "2958"
},
{
"name": "Python",
"bytes": "15441440"
},
{
"name": "Shell",
"bytes": "20796"
},
{
"name": "Smarty",
"bytes": "693857"
}
],
"symlink_target": ""
}
|
import sys
import os
import time
path = os.getcwd()
if path not in sys.path:
sys.path.append(path)
from package_tracking.component.package_tracking import PackageTrackingComponentImpl
if __name__ == '__main__':
while True:
package_tracking_impl = PackageTrackingComponentImpl(create_table=True, mojoqq_host='127.0.0.1')
package_tracking_impl.update_subscribed_package()
time.sleep(60)
|
{
"content_hash": "a3134e033287dbc3abf876cf228de785",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 104,
"avg_line_length": 24.764705882352942,
"alnum_prop": 0.7149643705463183,
"repo_name": "lyrl/package_tracking",
"id": "a31138c5343516ff2da37a59f79f837899e2fd47",
"size": "502",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "package_tracking/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46337"
}
],
"symlink_target": ""
}
|
from bespin.collector import Collector
from bespin import helpers as hp
from docutils.statemachine import ViewList
from sphinx.util.compat import Directive
from textwrap import dedent
from docutils import nodes
class ShowTasksDirective(Directive):
"""Directive for outputting all the default bespin tasks"""
has_content = True
def run(self):
"""For each file in noseOfYeti/specs, output nodes to represent each spec file"""
with hp.a_temp_file() as fle:
fle.write(dedent("""
---
environments: { dev: {account_id: "123"} }
stacks: { app: {} }
""").encode('utf-8'))
fle.seek(0)
collector = Collector()
collector.prepare(fle.name, {'bespin': {'extra': ""}, "command": None, "bash": None})
section = nodes.section()
section['ids'].append("available-tasks")
title = nodes.title()
title += nodes.Text("Default tasks")
section += title
for name, task in sorted(collector.configuration['task_finder'].tasks.items(), key=lambda x: len(x[0])):
lines = [name] + [" {0}".format(line.strip()) for line in task.description.split('\n')]
viewlist = ViewList()
for line in lines:
viewlist.append(line, name)
self.state.nested_parse(viewlist, self.content_offset, section)
return [section]
def setup(app):
"""Setup the show_specs directive"""
app.add_directive('show_tasks', ShowTasksDirective)
|
{
"content_hash": "7132b8eeccb5bc31e74efcf955aa9ae3",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 112,
"avg_line_length": 34.48888888888889,
"alnum_prop": 0.601159793814433,
"repo_name": "delfick/bespin",
"id": "3fc219fc1b5dadd37ed1db90d8802f6630324943",
"size": "1552",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docs/sphinx/ext/show_tasks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "333556"
},
{
"name": "Shell",
"bytes": "119"
}
],
"symlink_target": ""
}
|
import inspect
from .base import DebugPanel
__all__ = ['RoutesDebugPanel']
class RoutesDebugPanel(DebugPanel):
"""
A panel to display the routes used by your aiohttp application.
"""
name = 'Routes'
has_content = True
template = 'routes.jinja2'
title = 'Routes'
nav_title = title
def __init__(self, request):
super().__init__(request)
self.populate(request)
def populate(self, request):
info = []
router = request.app.router
for route in router.routes():
info.append({
"name": route.name or '',
"method": route.method,
"info": sorted(route.get_info().items()),
"handler": repr(route.handler),
"source": inspect.getsource(route.handler)
})
self.data = {'routes': info}
|
{
"content_hash": "23d15c24f5e4e4b953709472f61d8b2a",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 67,
"avg_line_length": 24.13888888888889,
"alnum_prop": 0.5454545454545454,
"repo_name": "realer01/aiohttp-debugtoolbar",
"id": "096a8105bd98cfd77a2980ff8c8fa8a28551d3cb",
"size": "869",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "aiohttp_debugtoolbar/panels/routes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16618"
},
{
"name": "HTML",
"bytes": "692"
},
{
"name": "JavaScript",
"bytes": "409302"
},
{
"name": "Makefile",
"bytes": "1016"
},
{
"name": "Mako",
"bytes": "140"
},
{
"name": "Python",
"bytes": "104024"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('jobs', '0019_job_submitted_by'),
]
operations = [
migrations.AlterField(
model_name='job',
name='url',
field=models.URLField(null=True, verbose_name='URL'),
),
]
|
{
"content_hash": "7c01673e577e4cbc65c852871a1907a1",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 65,
"avg_line_length": 21.3125,
"alnum_prop": 0.5601173020527859,
"repo_name": "proevo/pythondotorg",
"id": "d6c9c26e981ee84a1a5f2ef2e9a049a9ecdb6e7b",
"size": "391",
"binary": false,
"copies": "3",
"ref": "refs/heads/dependabot/pip/django-allauth-0.51.0",
"path": "jobs/migrations/0020_auto_20191101_1601.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "711916"
},
{
"name": "JavaScript",
"bytes": "314514"
},
{
"name": "Makefile",
"bytes": "6811"
},
{
"name": "Python",
"bytes": "1448691"
},
{
"name": "Ruby",
"bytes": "218314"
},
{
"name": "Shell",
"bytes": "6730"
}
],
"symlink_target": ""
}
|
from __future__ import division, unicode_literals
"""
This module implements classes to perform bond valence analyses.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Oct 26, 2012"
import collections
import numpy as np
import operator
import os
from math import exp, sqrt
from six.moves import filter
from six.moves import zip
from monty.serialization import loadfn
import six
from pymatgen.core.periodic_table import Element, Specie
from pymatgen.core.structure import Structure
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.core.periodic_table import get_el_sp
#Let's initialize some module level properties.
#List of electronegative elements specified in M. O'Keefe, & N. Brese,
#JACS, 1991, 113(9), 3226-3229. doi:10.1021/ja00009a002.
ELECTRONEG = [Element(sym) for sym in ["H", "B", "C", "Si",
"N", "P", "As", "Sb",
"O", "S", "Se", "Te",
"F", "Cl", "Br", "I"]]
module_dir = os.path.dirname(os.path.abspath(__file__))
#Read in BV parameters.
BV_PARAMS = {}
for k, v in loadfn(os.path.join(module_dir, "bvparam_1991.yaml")).items():
BV_PARAMS[Element(k)] = v
#Read in yaml containing data-mined ICSD BV data.
all_data = loadfn(os.path.join(module_dir, "icsd_bv.yaml"))
ICSD_BV_DATA = {Specie.from_string(sp): data
for sp, data in all_data["bvsum"].items()}
PRIOR_PROB = {Specie.from_string(sp): data
for sp, data in all_data["occurrence"].items()}
def calculate_bv_sum(site, nn_list, scale_factor=1.0):
"""
Calculates the BV sum of a site.
Args:
site:
The site
nn_list:
List of nearest neighbors in the format [(nn_site, dist), ...].
anion_el:
The most electronegative element in the structure.
scale_factor:
A scale factor to be applied. This is useful for scaling distance,
esp in the case of calculation-relaxed structures which may tend
to under (GGA) or over bind (LDA).
"""
el1 = Element(site.specie.symbol)
bvsum = 0
for (nn, dist) in nn_list:
el2 = Element(nn.specie.symbol)
if (el1 in ELECTRONEG or el2 in ELECTRONEG) and el1 != el2:
r1 = BV_PARAMS[el1]["r"]
r2 = BV_PARAMS[el2]["r"]
c1 = BV_PARAMS[el1]["c"]
c2 = BV_PARAMS[el2]["c"]
R = r1 + r2 - r1 * r2 * (sqrt(c1) - sqrt(c2)) ** 2 / \
(c1 * r1 + c2 * r2)
vij = exp((R - dist * scale_factor) / 0.31)
bvsum += vij * (1 if el1.X < el2.X else -1)
return bvsum
def calculate_bv_sum_unordered(site, nn_list, scale_factor=1):
"""
Calculates the BV sum of a site for unordered structures.
Args:
site:
The site
nn_list:
List of nearest neighbors in the format [(nn_site, dist), ...].
anion_el:
The most electronegative element in the structure.
scale_factor:
A scale factor to be applied. This is useful for scaling distance,
esp in the case of calculation-relaxed structures which may tend
to under (GGA) or over bind (LDA).
"""
# If the site "site" has N partial occupations as : f_{site}_0,
# f_{site}_1, ... f_{site}_N of elements
# X_{site}_0, X_{site}_1, ... X_{site}_N, and each neighbors nn_i in nn
# has N_{nn_i} partial occupations as :
# f_{nn_i}_0, f_{nn_i}_1, ..., f_{nn_i}_{N_{nn_i}}, then the bv sum of
# site "site" is obtained as :
# \sum_{nn} \sum_j^N \sum_k^{N_{nn}} f_{site}_j f_{nn_i}_k vij_full
# where vij_full is the valence bond of the fully occupied bond
bvsum = 0
for specie1, occu1 in six.iteritems(site.species_and_occu):
el1 = Element(specie1.symbol)
for (nn, dist) in nn_list:
for specie2, occu2 in six.iteritems(nn.species_and_occu):
el2 = Element(specie2.symbol)
if (el1 in ELECTRONEG or el2 in ELECTRONEG) and el1 != el2:
r1 = BV_PARAMS[el1]["r"]
r2 = BV_PARAMS[el2]["r"]
c1 = BV_PARAMS[el1]["c"]
c2 = BV_PARAMS[el2]["c"]
R = r1 + r2 - r1 * r2 * (sqrt(c1) - sqrt(c2)) ** 2 / \
(c1 * r1 + c2 * r2)
vij = exp((R - dist * scale_factor) / 0.31)
bvsum += occu1 * occu2 * vij * (1 if el1.X < el2.X else -1)
return bvsum
class BVAnalyzer(object):
"""
This class implements a maximum a posteriori (MAP) estimation method to
determine oxidation states in a structure. The algorithm is as follows:
1) The bond valence sum of all symmetrically distinct sites in a structure
is calculated using the element-based parameters in M. O'Keefe, & N. Brese,
JACS, 1991, 113(9), 3226-3229. doi:10.1021/ja00009a002.
2) The posterior probabilities of all oxidation states is then calculated
using: P(oxi_state/BV) = K * P(BV/oxi_state) * P(oxi_state), where K is
a constant factor for each element. P(BV/oxi_state) is calculated as a
Gaussian with mean and std deviation determined from an analysis of
the ICSD. The posterior P(oxi_state) is determined from a frequency
analysis of the ICSD.
3) The oxidation states are then ranked in order of decreasing probability
and the oxidation state combination that result in a charge neutral cell
is selected.
"""
CHARGE_NEUTRALITY_TOLERANCE = 0.00001
def __init__(self, symm_tol=0.1, max_radius=4, max_permutations=100000,
distance_scale_factor=1.015,
charge_neutrality_tolerance=CHARGE_NEUTRALITY_TOLERANCE,
forbidden_species=None):
"""
Initializes the BV analyzer, with useful defaults.
Args:
symm_tol:
Symmetry tolerance used to determine which sites are
symmetrically equivalent. Set to 0 to turn off symmetry.
max_radius:
Maximum radius in Angstrom used to find nearest neighbors.
max_permutations:
The maximum number of permutations of oxidation states to test.
distance_scale_factor:
A scale factor to be applied. This is useful for scaling
distances, esp in the case of calculation-relaxed structures
which may tend to under (GGA) or over bind (LDA). The default
of 1.015 works for GGA. For experimental structure, set this to
1.
charge_neutrality_tolerance:
Tolerance on the charge neutrality when unordered structures
are at stake.
forbidden_species:
List of species that are forbidden (example : ["O-"] cannot be
used) It is used when e.g. someone knows that some oxidation
state cannot occur for some atom in a structure or list of
structures.
"""
self.symm_tol = symm_tol
self.max_radius = max_radius
self.max_permutations = max_permutations
self.dist_scale_factor = distance_scale_factor
self.charge_neutrality_tolerance = charge_neutrality_tolerance
forbidden_species = [get_el_sp(sp) for sp in forbidden_species] if \
forbidden_species else []
self.icsd_bv_data = {get_el_sp(specie): data
for specie, data in ICSD_BV_DATA.items()
if not specie in forbidden_species} \
if len(forbidden_species) > 0 else ICSD_BV_DATA
def _calc_site_probabilities(self, site, nn):
el = site.specie.symbol
bv_sum = calculate_bv_sum(site, nn,
scale_factor=self.dist_scale_factor)
prob = {}
for sp, data in self.icsd_bv_data.items():
if sp.symbol == el and sp.oxi_state != 0 and data["std"] > 0:
u = data["mean"]
sigma = data["std"]
# Calculate posterior probability. Note that constant
# factors are ignored. They have no effect on the results.
prob[sp.oxi_state] = exp(-(bv_sum - u) ** 2 / 2 /
(sigma ** 2)) \
/ sigma * PRIOR_PROB[sp]
# Normalize the probabilities
try:
prob = {k: v / sum(prob.values()) for k, v in prob.items()}
except ZeroDivisionError:
prob = {k: 0.0 for k in prob}
return prob
def _calc_site_probabilities_unordered(self, site, nn):
bv_sum = calculate_bv_sum_unordered(
site, nn, scale_factor=self.dist_scale_factor)
prob = {}
for specie, occu in six.iteritems(site.species_and_occu):
el = specie.symbol
prob[el] = {}
for sp, data in self.icsd_bv_data.items():
if sp.symbol == el and sp.oxi_state != 0 and data["std"] > 0:
u = data["mean"]
sigma = data["std"]
#Calculate posterior probability. Note that constant
#factors are ignored. They have no effect on the results.
prob[el][sp.oxi_state] = exp(-(bv_sum - u) ** 2 / 2 /
(sigma ** 2)) \
/ sigma * PRIOR_PROB[sp]
#Normalize the probabilities
try:
prob[el] = {k: v / sum(prob[el].values())
for k, v in prob[el].items()}
except ZeroDivisionError:
prob[el] = {k: 0.0 for k in prob[el]}
return prob
def get_valences(self, structure):
"""
Returns a list of valences for the structure. This currently works only
for ordered structures only.
Args:
structure: Structure to analyze
Returns:
A list of valences for each site in the structure (for an ordered
structure), e.g., [1, 1, -2] or a list of lists with the
valences for each fractional element of each site in the
structure (for an unordered structure),
e.g., [[2, 4], [3], [-2], [-2], [-2]]
Raises:
A ValueError if the valences cannot be determined.
"""
els = [Element(el.symbol) for el in structure.composition.elements]
if not set(els).issubset(set(BV_PARAMS.keys())):
raise ValueError(
"Structure contains elements not in set of BV parameters!"
)
#Perform symmetry determination and get sites grouped by symmetry.
if self.symm_tol:
finder = SpacegroupAnalyzer(structure, self.symm_tol)
symm_structure = finder.get_symmetrized_structure()
equi_sites = symm_structure.equivalent_sites
else:
equi_sites = [[site] for site in structure]
#Sort the equivalent sites by decreasing electronegativity.
equi_sites = sorted(equi_sites,
key=lambda sites: -sites[0].species_and_occu
.average_electroneg)
#Get a list of valences and probabilities for each symmetrically
#distinct site.
valences = []
all_prob = []
if structure.is_ordered:
for sites in equi_sites:
test_site = sites[0]
nn = structure.get_neighbors(test_site, self.max_radius)
prob = self._calc_site_probabilities(test_site, nn)
all_prob.append(prob)
val = list(prob.keys())
#Sort valences in order of decreasing probability.
val = sorted(val, key=lambda v: -prob[v])
#Retain probabilities that are at least 1/100 of highest prob.
valences.append(
list(filter(lambda v: prob[v] > 0.01 * prob[val[0]],
val)))
else:
full_all_prob = []
for sites in equi_sites:
test_site = sites[0]
nn = structure.get_neighbors(test_site, self.max_radius)
prob = self._calc_site_probabilities_unordered(test_site, nn)
all_prob.append(prob)
full_all_prob.extend(prob.values())
vals = []
for (elsp, occ) in get_z_ordered_elmap(
test_site.species_and_occu):
val = list(prob[elsp.symbol].keys())
#Sort valences in order of decreasing probability.
val = sorted(val, key=lambda v: -prob[elsp.symbol][v])
# Retain probabilities that are at least 1/100 of highest
# prob.
vals.append(
list(filter(
lambda v: prob[elsp.symbol][v] > 0.001 * prob[
elsp.symbol][val[0]], val)))
valences.append(vals)
#make variables needed for recursion
if structure.is_ordered:
nsites = np.array([len(i) for i in equi_sites])
vmin = np.array([min(i) for i in valences])
vmax = np.array([max(i) for i in valences])
self._n = 0
self._best_score = 0
self._best_vset = None
def evaluate_assignment(v_set):
el_oxi = collections.defaultdict(list)
for i, sites in enumerate(equi_sites):
el_oxi[sites[0].specie.symbol].append(v_set[i])
max_diff = max([max(v) - min(v) for v in el_oxi.values()])
if max_diff > 1:
return
score = six.moves.reduce(
operator.mul, [all_prob[i][v] for i, v in enumerate(v_set)])
if score > self._best_score:
self._best_vset = v_set
self._best_score = score
def _recurse(assigned=[]):
#recurses to find permutations of valences based on whether a
#charge balanced assignment can still be found
if self._n > self.max_permutations:
return
i = len(assigned)
highest = vmax.copy()
highest[:i] = assigned
highest *= nsites
highest = np.sum(highest)
lowest = vmin.copy()
lowest[:i] = assigned
lowest *= nsites
lowest = np.sum(lowest)
if highest < 0 or lowest > 0:
self._n += 1
return
if i == len(valences):
evaluate_assignment(assigned)
self._n += 1
return
else:
for v in valences[i]:
new_assigned = list(assigned)
_recurse(new_assigned + [v])
else:
nsites = np.array([len(i) for i in equi_sites])
tmp = []
attrib = []
for insite, nsite in enumerate(nsites):
for val in valences[insite]:
tmp.append(nsite)
attrib.append(insite)
new_nsites = np.array(tmp)
fractions = []
elements = []
for sites in equi_sites:
for sp, occu in get_z_ordered_elmap(sites[0].species_and_occu):
elements.append(sp.symbol)
fractions.append(occu)
fractions = np.array(fractions, np.float)
new_valences = []
for vals in valences:
for val in vals:
new_valences.append(val)
vmin = np.array([min(i) for i in new_valences], np.float)
vmax = np.array([max(i) for i in new_valences], np.float)
self._n = 0
self._best_score = 0
self._best_vset = None
def evaluate_assignment(v_set):
el_oxi = collections.defaultdict(list)
jj = 0
for i, sites in enumerate(equi_sites):
for specie, occu in get_z_ordered_elmap(
sites[0].species_and_occu):
el_oxi[specie.symbol].append(v_set[jj])
jj += 1
max_diff = max([max(v) - min(v) for v in el_oxi.values()])
if max_diff > 2:
return
score = six.moves.reduce(
operator.mul,
[all_prob[attrib[iv]][elements[iv]][vv]
for iv, vv in enumerate(v_set)])
if score > self._best_score:
self._best_vset = v_set
self._best_score = score
def _recurse(assigned=[]):
#recurses to find permutations of valences based on whether a
#charge balanced assignment can still be found
if self._n > self.max_permutations:
return
i = len(assigned)
highest = vmax.copy()
highest[:i] = assigned
highest *= new_nsites
highest *= fractions
highest = np.sum(highest)
lowest = vmin.copy()
lowest[:i] = assigned
lowest *= new_nsites
lowest *= fractions
lowest = np.sum(lowest)
if (highest < -self.charge_neutrality_tolerance or
lowest > self.charge_neutrality_tolerance):
self._n += 1
return
if i == len(new_valences):
evaluate_assignment(assigned)
self._n += 1
return
else:
for v in new_valences[i]:
new_assigned = list(assigned)
_recurse(new_assigned + [v])
_recurse()
if self._best_vset:
if structure.is_ordered:
assigned = {}
for val, sites in zip(self._best_vset, equi_sites):
for site in sites:
assigned[site] = val
return [int(assigned[site]) for site in structure]
else:
assigned = {}
new_best_vset = []
for ii in range(len(equi_sites)):
new_best_vset.append(list())
for ival, val in enumerate(self._best_vset):
new_best_vset[attrib[ival]].append(val)
for val, sites in zip(new_best_vset, equi_sites):
for site in sites:
assigned[site] = val
return [[int(frac_site) for frac_site in assigned[site]]
for site in structure]
else:
raise ValueError("Valences cannot be assigned!")
def get_oxi_state_decorated_structure(self, structure):
"""
Get an oxidation state decorated structure. This currently works only
for ordered structures only.
Args:
structure: Structure to analyze
Returns:
A modified structure that is oxidation state decorated.
Raises:
ValueError if the valences cannot be determined.
"""
s = structure.copy()
if s.is_ordered:
valences = self.get_valences(s)
s.add_oxidation_state_by_site(valences)
else:
valences = self.get_valences(s)
s = add_oxidation_state_by_site_fraction(s, valences)
return s
def get_z_ordered_elmap(comp):
"""
Arbitrary ordered elmap on the elements/species of a composition of a
given site in an unordered structure. Returns a list of tuples (
element_or_specie: occupation) in the arbitrary order.
The arbitrary order is based on the Z of the element and the smallest
fractional occupations first.
Example : {"Ni3+": 0.2, "Ni4+": 0.2, "Cr3+": 0.15, "Zn2+": 0.34,
"Cr4+": 0.11} will yield the species in the following order :
Cr4+, Cr3+, Ni3+, Ni4+, Zn2+ ... or
Cr4+, Cr3+, Ni4+, Ni3+, Zn2+
"""
return sorted([(elsp, comp[elsp]) for elsp in comp.keys()])
def add_oxidation_state_by_site_fraction(structure, oxidation_states):
"""
Add oxidation states to a structure by fractional site.
Args:
oxidation_states (list): List of list of oxidation states for each
site fraction for each site.
E.g., [[2, 4], [3], [-2], [-2], [-2]]
"""
try:
for i, site in enumerate(structure):
new_sp = collections.defaultdict(float)
for j, (el, occu) in enumerate(get_z_ordered_elmap(site
.species_and_occu)):
specie = Specie(el.symbol, oxidation_states[i][j])
new_sp[specie] += occu
structure[i] = new_sp
return structure
except IndexError:
raise ValueError("Oxidation state of all sites must be "
"specified in the list.")
|
{
"content_hash": "6fc53cbcb91aedca8a94e835286cd84c",
"timestamp": "",
"source": "github",
"line_count": 535,
"max_line_length": 80,
"avg_line_length": 40.3607476635514,
"alnum_prop": 0.5256796183948502,
"repo_name": "tallakahath/pymatgen",
"id": "4cfa17a036e31719b7b2f5d5cedac8c48d218c4e",
"size": "21703",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "pymatgen/analysis/bond_valence.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "Common Lisp",
"bytes": "3029065"
},
{
"name": "Perl",
"bytes": "229104"
},
{
"name": "Propeller Spin",
"bytes": "4026362"
},
{
"name": "Python",
"bytes": "5529281"
},
{
"name": "Roff",
"bytes": "868"
}
],
"symlink_target": ""
}
|
"""
Flip API
Flip # noqa: E501
The version of the OpenAPI document: 3.1
Contact: cloudsupport@telestream.net
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import telestream_cloud_flip
from telestream_cloud_flip.models.extra_file import ExtraFile # noqa: E501
from telestream_cloud_flip.rest import ApiException
class TestExtraFile(unittest.TestCase):
"""ExtraFile unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test ExtraFile
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = telestream_cloud_flip.models.extra_file.ExtraFile() # noqa: E501
if include_optional :
return ExtraFile(
tag = '0',
file_size = 56,
file_name = '0'
)
else :
return ExtraFile(
tag = '0',
file_size = 56,
file_name = '0',
)
def testExtraFile(self):
"""Test ExtraFile"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "c8948d1a372e9ef35bd546da68cffdf3",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 83,
"avg_line_length": 26,
"alnum_prop": 0.5927197802197802,
"repo_name": "Telestream/telestream-cloud-python-sdk",
"id": "e3eff68ffd86ffcb5f3dd134c25c0bac82e7e00e",
"size": "1473",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "telestream_cloud_flip_sdk/test/test_extra_file.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1339719"
},
{
"name": "Shell",
"bytes": "6712"
}
],
"symlink_target": ""
}
|
import cahal_tests
import unittest
import string
import types
class TestsCAHALAudioFormatDescription( unittest.TestCase ):
def test_convert_cahal_audio_format_id_to_cstring( self ):
cahal_tests.cahal_convert_audio_format_id_to_cstring( 0 )
cahal_tests.cahal_convert_audio_format_id_to_cstring ( \
int( cahal_tests.CAHAL_AUDIO_FORMAT_MACE6 ) \
) \
cahal_tests.cahal_convert_audio_format_id_to_cstring ( \
int( cahal_tests.CAHAL_AUDIO_FORMAT_LINEARPCM ) \
) \
cahal_tests.cahal_convert_audio_format_id_to_cstring ( \
int( cahal_tests.CAHAL_AUDIO_FORMAT_PARAMETERVALUESTREAM ) \
)
def test_print_cahal_audio_format_id( self ):
cahal_tests.cahal_print_audio_format_id( None, 0 )
cahal_tests.cahal_print_audio_format_id ( \
None, \
int( cahal_tests.CAHAL_AUDIO_FORMAT_MACE6 ) \
)
cahal_tests.cahal_print_audio_format_id ( \
"Format:", \
int( cahal_tests.CAHAL_AUDIO_FORMAT_LINEARPCM ) \
) \
cahal_tests.cahal_print_audio_format_id ( \
"", \
int( cahal_tests.CAHAL_AUDIO_FORMAT_PARAMETERVALUESTREAM ) \
)
def test_print_cahal_audio_format_description( self ):
device_list = cahal_tests.cahal_get_device_list()
index = 0;
device = cahal_tests.cahal_device_list_get( device_list, index )
while( device ):
stream_index = 0
stream = \
cahal_tests.cahal_device_stream_list_get ( \
device.device_streams, \
stream_index \
)
while( stream ):
format_description_index = 0
format_description = \
cahal_tests.cahal_audio_format_description_list_get ( \
stream.supported_formats, \
format_description_index \
)
while( format_description ):
cahal_tests.cahal_print_audio_format_description ( \
format_description \
)
format_description_index += 1
format_description = \
cahal_tests.cahal_audio_format_description_list_get ( \
stream.supported_formats, \
format_description_index \
)
stream_index += 1
stream = \
cahal_tests.cahal_device_stream_list_get ( \
device.device_streams, \
stream_index \
)
index += 1
device = cahal_tests.cahal_device_list_get( device_list, index )
if __name__ == '__main__':
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
cahal_tests.cpc_log_set_log_level( cahal_tests.CPC_LOG_LEVEL_ERROR )
cahal_tests.python_cahal_initialize()
unittest.main()
cahal_tests.cahal_terminate()
|
{
"content_hash": "8c6c8b3afc8feaa15ab77795a8a3de6c",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 88,
"avg_line_length": 44.9375,
"alnum_prop": 0.3875753361149745,
"repo_name": "bcarr092/CAHAL",
"id": "061c4f3d898749ed34ab313e95ae238061c90fc8",
"size": "4314",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_cahal_audio_format_description.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "348787"
},
{
"name": "C++",
"bytes": "69900"
},
{
"name": "CMake",
"bytes": "13992"
},
{
"name": "Objective-C",
"bytes": "4597"
},
{
"name": "Python",
"bytes": "34985"
}
],
"symlink_target": ""
}
|
import os, sys
from setuptools import setup, find_packages
def read(*path):
return open(os.path.join(os.path.abspath(os.path.dirname(__file__)), *path)).read()
setup(
name = 'asgard-utils',
version = '0.1',
url = 'http://asgardproject.org/utils/',
author = 'Myles Braithwaite',
author_email = 'me@mylesbraithwaite.com',
description = 'Asgard CMS system utilities.',
# long_description = read('docs', 'intro.rst'),
license = 'BSD License',
packages = find_packages('src'),
package_dir = {'': 'src'},
include_package_data = True,
install_requires = [
'distribute',
],
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Framework :: Django',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
],
)
|
{
"content_hash": "4a1d3bc171a8d11022e791a25e88c7c4",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 84,
"avg_line_length": 23.28205128205128,
"alnum_prop": 0.6541850220264317,
"repo_name": "asgardproject/asgard-utils",
"id": "99abc29b0ae66694590df78987c03efe8111aa63",
"size": "908",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "61315"
}
],
"symlink_target": ""
}
|
from iptest.assert_util import *
skiptest("silverlight")
import sys
import nt
# This module tests operations on the builtin file object. It is not yet complete, the tests cover read(),
# read(size), readline() and write() for binary, text and universal newline modes.
def test_sanity():
for i in range(5):
### general file robustness tests
f = file("onlyread.tmp", "w")
f.write("will only be read")
f.flush()
f.close()
sin = file("onlyread.tmp", "r")
sout = file("onlywrite.tmp", "w")
# writer is null for sin
AssertError(IOError, sin.write, "abc")
AssertError(IOError, sin.writelines, ["abc","def"])
# reader is null for sout
if is_cli:
AssertError(IOError, sout.read)
AssertError(IOError, sout.read, 10)
AssertError(IOError, sout.readline)
AssertError(IOError, sout.readline, 10)
AssertError(IOError, sout.readlines)
AssertError(IOError, sout.readlines, 10)
sin.close()
sout.close()
# now close a file and try to perform other I/O operations on it...
# should throw ValueError according to docs
f = file("onlywrite.tmp", "w")
f.close()
f.close()
AssertError(ValueError, f.__iter__)
AssertError(ValueError, f.flush)
AssertError(ValueError, f.fileno)
AssertError(ValueError, f.next)
AssertError(ValueError, f.read)
AssertError(ValueError, f.read, 10)
AssertError(ValueError, f.readline)
AssertError(ValueError, f.readline, 10)
AssertError(ValueError, f.readlines)
AssertError(ValueError, f.readlines, 10)
AssertError(ValueError, f.seek, 10)
AssertError(ValueError, f.seek, 10, 10)
AssertError(ValueError, f.write, "abc")
AssertError(ValueError, f.writelines, ["abc","def"])
###
# The name of a temporary test data file that will be used for the following
# file tests.
temp_file = path_combine(testpath.temporary_dir, "temp.dat")
# Test binary reading and writing fidelity using a round trip method. First
# construct some pseudo random binary data in a string (making it long enough
# that it's likely we'd show up any problems with the data being passed through
# a character encoding/decoding scheme). Then write this data to disk (in binary
# mode), read it back again (in binary) and check that no changes have occured.
# Construct the binary data. We want the test to be repeatable so seed the
# random number generator with a fixed value. Use a simple linear congruential
# method to generate the random byte values.
rng_seed = 0
def test_read_write_fidelity():
def randbyte():
global rng_seed
rng_seed = (1664525 * rng_seed) + 1013904223
return (rng_seed >> 8) & 0xff
data = ""
for i in range(10 * 1024):
data += chr(randbyte())
# Keep a copy of the data safe.
orig_data = data;
# Write the data to disk in binary mode.
f = file(temp_file, "wb")
f.write(data)
f.close()
# And read it back in again.
f = file(temp_file, "rb")
data = f.read()
f.close()
# Check nothing changed.
Assert(data == orig_data)
def test_cp10983():
# writing non-unicode characters > 127 should be preserved
x = open(temp_file, 'w')
x.write('\xa33')
x.close()
x = open(temp_file)
data = x.read()
x.close()
AreEqual(ord(data[0]), 163)
AreEqual(ord(data[1]), 51)
x = open(temp_file, 'w')
x.write("a2\xa33\u0163\x0F\x0FF\t\\\x0FF\x0FE\x00\x01\x7F\x7E\x80")
x.close()
x = open(temp_file)
data = x.read()
x.close()
AreEqual(data, 'a2\xa33\\u0163\x0f\x0fF\t\\\x0fF\x0fE\x00\x01\x7F\x7E\x80')
@skip('win32')
def test_cp27179():
# file.write() accepting Array[Byte]
from System import Array, Byte
data_string = 'abcdef\nghijkl\n\n'
data = Array[Byte](map(Byte, map(ord, data_string)))
f = open(temp_file, 'w+')
f.write(data)
f.close()
f = open(temp_file, 'r')
data_read = f.read()
f.close()
AreEqual(data_string, data_read)
# Helper used to format newline characters into a visible format.
def format_newlines(string):
out = ""
for char in string:
if char == '\r':
out += "\\r"
elif char == '\n':
out += "\\n"
else:
out += char
return out
# The set of read modes we wish to test. Each tuple consists of a human readable
# name for the mode followed by the corresponding mode string that will be
# passed to the file constructor.
read_modes = (("binary", "rb"), ("text", "r"), ("universal", "rU"))
# Same deal as above but for write modes. Note that writing doesn't support a
# universal newline mode.
write_modes = (("binary", "wb"), ("text", "w"))
# The following is the setup for a set of pattern mode tests that will check
# some tricky edge cases for newline translation for both reading and writing.
# The entry point is the test_patterns() function.
def test_newlines():
# Read mode test cases. Each tuple has three values; the raw on-disk value we
# start with (which also doubles as the value we should get back when we read in
# binary mode) then the value we expect to get when reading in text mode and
# finally the value we expect to get in universal newline mode.
read_patterns = (("\r", "\r", "\n"),
("\n", "\n", "\n"),
("\r\n", "\n", "\n"),
("\n\r", "\n\r", "\n\n"),
("\r\r", "\r\r", "\n\n"),
("\n\n", "\n\n", "\n\n"),
("\r\n\r\n", "\n\n", "\n\n"),
("\n\r\n\r", "\n\n\r", "\n\n\n"),
("The quick brown fox", "The quick brown fox", "The quick brown fox"),
("The \rquick\n brown fox\r\n", "The \rquick\n brown fox\n", "The \nquick\n brown fox\n"),
("The \r\rquick\r\n\r\n brown fox", "The \r\rquick\n\n brown fox", "The \n\nquick\n\n brown fox"))
# Write mode test cases. Same deal as above but with one less member in each
# tuple due to the lack of a universal newline write mode. The first value
# represents the in-memory value we start with (and expect to write in binary
# write mode) and the next value indicates the value we expect to end up on disk
# in text mode.
write_patterns = (("\r", "\r"),
("\n", "\r\n"),
("\r\n", "\r\r\n"),
("\n\r", "\r\n\r"),
("\r\r", "\r\r"),
("\n\n", "\r\n\r\n"),
("\r\n\r\n", "\r\r\n\r\r\n"),
("\n\r\n\r", "\r\n\r\r\n\r"),
("The quick brown fox", "The quick brown fox"),
("The \rquick\n brown fox\r\n", "The \rquick\r\n brown fox\r\r\n"),
("The \r\rquick\r\n\r\n brown fox", "The \r\rquick\r\r\n\r\r\n brown fox"))
# Test a specific read mode pattern.
def test_read_pattern(pattern):
# Write the initial data to disk using binary mode (we test this
# functionality earlier so we're satisfied it gets there unaltered).
f = file(temp_file, "wb")
f.write(pattern[0])
f.close()
# Read the data back in each read mode, checking that we get the correct
# transform each time.
for mode in range(3):
test_read_mode(pattern, mode);
# Test a specific read mode pattern for a given reading mode.
def test_read_mode(pattern, mode):
# Read the data back from disk using the given read mode.
f = file(temp_file, read_modes[mode][1])
contents = f.read()
f.close()
# Check it equals what we expected for this mode.
Assert(contents == pattern[mode])
# Test a specific write mode pattern.
def test_write_pattern(pattern):
for mode in range(2):
test_write_mode(pattern, mode);
# Test a specific write mode pattern for a given write mode.
def test_write_mode(pattern, mode):
# Write the raw data using the given mode.
f = file(temp_file, write_modes[mode][1])
f.write(pattern[0])
f.close()
# Read the data back in using binary mode (we tested this gets us back
# unaltered data earlier).
f = file(temp_file, "rb")
contents = f.read()
f.close()
# Check it equals what we expected for this mode.
Assert(contents == pattern[mode])
# Run through the read and write mode tests for all patterns.
def test_patterns():
for pattern in read_patterns:
test_read_pattern(pattern)
for pattern in write_patterns:
test_write_pattern(pattern)
# Actually run the pattern mode tests.
test_patterns()
# Now some tests of read(size).
# Test data is in the following format: ("raw data", read_size, (binary mode result strings) (binary mode result tell() result)
# (text mode result strings) (text mode result tell() result)
# (universal mode result strings) (univermose mode result tell() results)
def test_read_size():
read_size_tests = (("Hello", 1, ("H", "e", "l", "l", "o"), (1,2,3,4,5),
("H", "e", "l", "l", "o"), (1,2,3,4,5),
("H", "e", "l", "l", "o"), (1,2,3,4,5)),
("Hello", 2, ("He", "ll", "o"), (2,4,5),
("He", "ll", "o"), (2,4,5),
("He", "ll", "o"), (2,4,5)),
("H\re\n\r\nllo", 1, ("H", "\r", "e", "\n", "\r", "\n", "l", "l", "o"), (1,2,3,4,5,6,7, 8, 9),
("H", "\r", "e", "\n", "\n", "l", "l", "o"), (1,2,3,4,6,7,8,9),
("H", "\n", "e", "\n", "\n", "l", "l", "o"), (1,2,3,4,6,7,8,9)),
("H\re\n\r\nllo", 2, ("H\r", "e\n", "\r\n", "ll", "o"), (2, 4, 6, 8, 9),
("H\r", "e\n", "\nl", "lo"), (2,4,7, 9),
("H\n", "e\n", "\nl", "lo"), (2,4,7, 9)))
if not is_cli: return
for test in read_size_tests:
# Write the test pattern to disk in binary mode.
f = file(temp_file, "wb")
f.write(test[0])
f.close()
# Read the data back in each of the read modes we test.
for mode in range(3):
f = file(temp_file, read_modes[mode][1])
AreEqual(f.closed, False)
# We read the data in the size specified by the test and expect to get
# the set of strings given for this specific mode.
size = test[1]
strings = test[2 + mode*2]
lengths = test[3 + mode*2]
count = 0
while True:
data = f.read(size)
if data == "":
Assert(count == len(strings))
break
count = count + 1
Assert(count <= len(strings))
Assert(data == strings[count - 1])
AreEqual(f.tell(), lengths[count-1])
f.close()
AreEqual(f.closed, True)
# And some readline tests.
# Test data is in the following format: ("raw data", (binary mode result strings)
# (text mode result strings)
# (universal mode result strings))
def test_readline():
readline_tests = (("Mary had a little lamb", ("Mary had a little lamb", ),
("Mary had a little lamb", ),
("Mary had a little lamb", )),
("Mary had a little lamb\r", ("Mary had a little lamb\r", ),
("Mary had a little lamb\r", ),
("Mary had a little lamb\n", )),
("Mary had a \rlittle lamb\r", ("Mary had a \rlittle lamb\r", ),
("Mary had a \rlittle lamb\r", ),
("Mary had a \n", "little lamb\n")),
("Mary \r\nhad \na little lamb", ("Mary \r\n", "had \n", "a little lamb"),
("Mary \n", "had \n", "a little lamb"),
("Mary \n", "had \n", "a little lamb")))
for test in readline_tests:
# Write the test pattern to disk in binary mode.
f = file(temp_file, "wb")
f.write(test[0])
f.close()
# Read the data back in each of the read modes we test.
for mode in range(3):
f = file(temp_file, read_modes[mode][1])
# We read the data by line and expect to get a specific sets of lines back.
strings = test[1 + mode]
count = 0
while True:
data = f.readline()
if data == "":
AreEqual(count, len(strings))
break
count = count + 1
Assert(count <= len(strings))
AreEqual(data, strings[count - 1])
f.close()
def format_tuple(tup):
if tup == None:
return "None"
if (isinstance(tup, str)):
return format_newlines(tup)
out = "("
for entry in tup:
out += format_newlines(entry) + ", "
out += ")"
return out
# Test the 'newlines' attribute.
# Format of the test data is the raw data written to the test file followed by a tuple representing the values
# of newlines expected after each line is read from the file in universal newline mode.
def test_newlines_attribute():
newlines_tests = (("123", (None, )),
("1\r\n2\r3\n", ("\r\n", ("\r\n", "\r"), ("\r\n", "\r", "\n"))),
("1\r2\n3\r\n", ("\r", ("\r", "\n"), ("\r\n", "\r", "\n"))),
("1\n2\r\n3\r", ("\n", ("\r\n", "\n"), ("\r\n", "\r", "\n"))),
("1\r\n2\r\n3\r\n", ("\r\n", "\r\n", "\r\n")),
("1\r2\r3\r", ("\r", "\r", "\r")),
("1\n2\n3\n", ("\n", "\n", "\n")))
if not is_cli: return False
for test in newlines_tests:
# Write the test pattern to disk in binary mode.
f = file(temp_file, "wb")
f.write(test[0])
# Verify newlines isn't set while writing.
Assert(f.newlines == None)
f.close()
# Verify that reading the file in binary or text mode won't set newlines.
f = file(temp_file, "rb")
data = f.read()
Assert(f.newlines == None)
f.close()
f = file(temp_file, "r")
data = f.read()
Assert(f.newlines == None)
f.close()
# Read file in universal mode line by line and verify we see the expected output at each stage.
expected = test[1]
f = file(temp_file, "rU")
Assert(f.newlines == None)
count = 0
while True:
data = f.readline()
if data == "":
break
Assert(count < len(expected))
Assert(f.newlines == expected[count])
count = count + 1
f.close()
## coverage: a sequence of file operation
def test_coverage():
f = file(temp_file, 'w')
Assert(str(f).startswith("<open file '%s', mode 'w'" % temp_file))
Assert(f.fileno() <> -1)
Assert(f.fileno() <> 0)
# write
AssertError(TypeError, f.writelines, [3])
f.writelines(["firstline\n"])
f.close()
Assert(str(f).startswith("<closed file '%s', mode 'w'" % temp_file))
# append
f = file(temp_file, 'a+')
f.writelines(['\n', 'secondline\n'])
pos = len('secondline\n') + 1
f.seek(-1 * pos, 1)
f.writelines(['thirdline\n'])
f.close()
# read
f = file(temp_file, 'r+', 512)
f.seek(-1 * pos - 2, 2)
AreEqual(f.readline(), 'e\n')
AreEqual(f.readline(5), 'third')
AreEqual(f.read(-1), 'line\n')
AreEqual(f.read(-1), '')
f.close()
# read
f = file(temp_file, 'rb', 512)
f.seek(-1 * pos - 2, 2)
AreEqual(f.readline(), 'e\r\n')
AreEqual(f.readline(5), 'third')
AreEqual(f.read(-1), 'line\r\n')
AreEqual(f.read(-1), '')
f.close()
## file op in nt
nt.unlink(temp_file)
fd = nt.open(temp_file, nt.O_CREAT | nt.O_WRONLY)
nt.write(fd, "hello ")
nt.close(fd)
fd = nt.open(temp_file, nt.O_APPEND | nt.O_WRONLY)
nt.write(fd, "world")
nt.close(fd)
fd = nt.open(temp_file, 0)
AreEqual(nt.read(fd, 1024), "hello world")
nt.close(fd)
nt.unlink(temp_file)
def test_encoding():
#verify we start w/ ASCII
import sys
f = file(temp_file, 'w')
# we throw on flush, CPython throws on write, so both write & close need to catch
try:
f.write(u'\u6211')
f.close()
AssertUnreachable()
except UnicodeEncodeError:
pass
if hasattr(sys, "setdefaultencoding"):
#and verify UTF8 round trips correctly
setenc = sys.setdefaultencoding
saved = sys.getdefaultencoding()
try:
setenc('utf8')
f = file(temp_file, 'w')
f.write(u'\u6211')
f.close()
f = file(temp_file, 'r')
txt = f.read()
f.close()
AreEqual(txt, u'\u6211')
finally:
setenc(saved)
if is_cli:
def test_net_stream():
import System
fs = System.IO.FileStream(temp_file, System.IO.FileMode.Create, System.IO.FileAccess.Write)
f = file(fs, "wb")
f.write('hello\rworld\ngoodbye\r\n')
f.close()
f = file(temp_file, 'rb')
AreEqual(f.read(), 'hello\rworld\ngoodbye\r\n')
f.close()
f = file(temp_file, 'rU')
AreEqual(f.read(), 'hello\nworld\ngoodbye\n')
f.close()
def test_file_manager():
def return_fd1():
f = file(temp_file, 'w')
return f.fileno()
def return_fd2():
return nt.open(temp_file, 0)
import System
fd = return_fd1()
System.GC.Collect()
System.GC.WaitForPendingFinalizers()
AssertError(OSError, nt.fdopen, fd)
fd = return_fd2()
System.GC.Collect()
System.GC.WaitForPendingFinalizers()
f = nt.fdopen(fd)
f.close()
AssertError(OSError, nt.fdopen, fd)
def test_sharing():
modes = ['w', 'w+', 'a+', 'r', 'w']
for xx in modes:
for yy in modes:
x = file('tempfile.txt', xx)
y = file('tempfile.txt', yy)
x.close()
y.close()
nt.unlink('tempfile.txt')
def test_overwrite_readonly():
filename = "tmp.txt"
f = file(filename, "w+")
f.write("I am read-only")
f.close()
nt.chmod(filename, 256)
try:
try:
f = file(filename, "w+") # FAIL
finally:
nt.chmod(filename, 128)
nt.unlink(filename)
except IOError, e:
pass
else:
AssertUnreachable() # should throw
#any other exceptions fail
def test_inheritance_kwarg_override():
class TEST(file):
def __init__(self,fname,VERBOSITY=0):
file.__init__(self,fname,"w",1)
self.VERBOSITY = VERBOSITY
f=TEST(r'sometext.txt',VERBOSITY=1)
AreEqual(f.VERBOSITY, 1)
f.close()
nt.unlink('sometext.txt')
# file newline handling test
def test_newline():
def test_newline(norm, mode):
f = file("testfile.tmp", mode)
Assert(f.read() == norm)
for x in xrange(len(norm)):
f.seek(0)
a = f.read(x)
b = f.read(1)
c = f.read()
Assert(a+b+c == norm)
f.close()
AssertError(TypeError, file, None) # arg must be string
AssertError(TypeError, file, [])
AssertError(TypeError, file, 1)
norm = "Hi\nHello\nHey\nBye\nAhoy\n"
unnorm = "Hi\r\nHello\r\nHey\r\nBye\r\nAhoy\r\n"
f = file("testfile.tmp", "wb")
f.write(unnorm)
f.close()
test_newline(norm, "r")
test_newline(unnorm, "rb")
def test_creation():
f = file.__new__(file, None)
Assert(repr(f).startswith("<closed file '<uninitialized file>', mode '<uninitialized file>' at"))
AssertError(TypeError, file, None)
def test_repr():
class x(file):
def __repr__(self): return 'abc'
f = x('repr_does_not_exist', 'w')
AreEqual(repr(f), 'abc')
f.close()
nt.unlink('repr_does_not_exist')
def test_truncate():
# truncate()
a = file('abc.txt', 'w')
a.write('hello world\n')
a.truncate()
a.close()
a = file('abc.txt', 'r')
AreEqual(a.readlines(), ['hello world\n'])
a.close()
nt.unlink('abc.txt')
# truncate(#)
a = file('abc.txt', 'w')
a.write('hello\nworld\n')
a.truncate(6)
a.close()
a = file('abc.txt', 'r')
AreEqual(a.readlines(), ['hello\r'])
a.close()
nt.unlink('abc.txt')
# truncate(#) invalid args
a = file('abc.txt', 'w')
AssertError(IOError, a.truncate, -1)
AssertError(TypeError, a.truncate, None)
a.close()
# read-only file
a = file('abc.txt', 'r')
AssertError(IOError, a.truncate)
AssertError(IOError, a.truncate, 0)
a.close()
nt.unlink('abc.txt')
# std-out
AssertError(IOError, sys.stdout.truncate)
def test_modes():
"""test various strange mode combinations and error reporting"""
try:
x = file('test_file', 'w')
AreEqual(x.mode, 'w')
x.close()
# don't allow empty modes
AssertErrorWithMessage(ValueError, 'empty mode string', file, 'abc', '')
# mode must start with valid value
AssertErrorWithMessage(ValueError, "mode string must begin with one of 'r', 'w', 'a' or 'U', not 'p'", file, 'abc', 'p')
# allow anything w/ U but r and w
AssertErrorWithMessage(ValueError, "universal newline mode can only be used with modes starting with 'r'", file, 'abc', 'Uw')
AssertErrorWithMessage(ValueError, "universal newline mode can only be used with modes starting with 'r'", file, 'abc', 'Ua')
AssertErrorWithMessage(ValueError, "universal newline mode can only be used with modes starting with 'r'", file, 'abc', 'Uw+')
AssertErrorWithMessage(ValueError, "universal newline mode can only be used with modes starting with 'r'", file, 'abc', 'Ua+')
if is_cli:
#http://ironpython.codeplex.com/WorkItem/View.aspx?WorkItemId=21910
x = file('test_file', 'pU')
AreEqual(x.mode, 'pU')
x.close()
#http://ironpython.codeplex.com/WorkItem/View.aspx?WorkItemId=21910
x = file('test_file', 'pU+')
AreEqual(x.mode, 'pU+')
x.close()
#http://ironpython.codeplex.com/WorkItem/View.aspx?WorkItemId=21911
# extra info can be passed and is retained
x = file('test_file', 'rFOOBAR')
AreEqual(x.mode, 'rFOOBAR')
x.close()
else:
AssertError(ValueError, file, 'test_file', 'pU')
AssertError(ValueError, file, 'test_file', 'pU+')
AssertError(ValueError, file, 'test_file', 'rFOOBAR')
finally:
nt.unlink('test_file')
import thread
CP16623_LOCK = thread.allocate_lock()
@skip("win32") #This test is unstable under RunAgainstCpy.py
def test_cp16623():
'''
If this test ever fails randomly, there is a problem around file thread
safety. Do not wrap this test case with retry_on_failure!
'''
global FINISHED_COUNTER
FINISHED_COUNTER = 0
import time
expected_lines = ["a", "bbb" * 100, "cc"]
total_threads = 50
file_name = path_combine(testpath.temporary_dir, "cp16623.txt")
f = open(file_name, "w")
def write_stuff():
global FINISHED_COUNTER
global CP16623_LOCK
for j in xrange(100):
for i in xrange(50):
print >> f, "a"
print >> f, "bbb" * 1000
for i in xrange(10):
print >> f, "cc"
with CP16623_LOCK:
FINISHED_COUNTER += 1
for i in xrange(total_threads):
thread.start_new_thread(write_stuff, ())
#Give all threads some time to finish
for i in xrange(total_threads):
if FINISHED_COUNTER!=total_threads:
print "*",
time.sleep(1)
else:
break
AreEqual(FINISHED_COUNTER, total_threads)
f.close()
#Verifications - since print isn't threadsafe the following
#is pointless... Just make sure IP doesn't throw.
#f = open(file_name, "r")
#lines = f.readlines()
#for line in lines:
# Assert(line in expected_lines, line)
def test_write_buffer():
from iptest.file_util import delete_files
try:
for mode in ('b', ''):
foo = open('foo', 'w+' + mode)
b = buffer(b'hello world', 6)
foo.write(b)
foo.close()
foo = open('foo', 'r')
AreEqual(foo.readlines(), ['world'])
foo.close()
foo = open('foo', 'w+')
b = buffer(u'hello world', 6)
foo.write(b)
foo.close()
foo = open('foo', 'r')
AreEqual(foo.readlines(), ['world'])
foo.close()
foo = open('foo', 'w+b')
b = buffer(u'hello world', 6)
foo.write(b)
foo.close()
foo = open('foo', 'r')
if is_cpython:
AreEqual(foo.readlines(), ['l\x00o\x00 \x00w\x00o\x00r\x00l\x00d\x00'])
else:
AreEqual(foo.readlines(), ['world'])
foo.close()
finally:
delete_files("foo")
def test_errors():
try:
file('some_file_that_really_does_not_exist')
except Exception, e:
AreEqual(e.errno, 2)
else:
AssertUnreachable()
try:
file('path_too_long' * 100)
except Exception, e:
AreEqual(e.errno, 2)
else:
AssertUnreachable()
def test_write_bytes():
f = open("temp_ip", "w+")
try:
f.write(b"Hello\n")
f.close()
f = file('temp_ip')
AreEqual(f.readlines(), ['Hello\n'])
f.close()
finally:
nt.unlink('temp_ip')
def test_kw_args():
file(name = 'some_test_file.txt', mode = 'w').close()
nt.unlink('some_test_file.txt')
def test_buffering_kwparam():
#--Positive
for x in [-2147483648, -1, 0, 1, 2, 1024, 2147483646, 2147483647]:
f = file(name = 'some_test_file.txt', mode = 'w', buffering=x)
f.close()
nt.unlink('some_test_file.txt')
if is_cpython: #http://ironpython.codeplex.com/workitem/28214
AssertErrorWithMessage(TypeError, "integer argument expected, got float",
file, 'some_test_file.txt', 'w', 3.14)
else:
f = file(name = 'some_test_file.txt', mode = 'w', buffering=3.14)
f.close()
nt.unlink('some_test_file.txt')
#--Negative
for x in [None, "abc", u"", [], tuple()]:
AssertError(TypeError, #"an integer is required",
lambda: file(name = 'some_test_file.txt', mode = 'w', buffering=x))
for x in [2147483648, -2147483649]:
AssertError(OverflowError, #"long int too large to convert to int",
lambda: file(name = 'some_test_file.txt', mode = 'w', buffering=x))
#------------------------------------------------------------------------------
run_test(__name__)
|
{
"content_hash": "534daa1a377039c3b66cfb507c3b629c",
"timestamp": "",
"source": "github",
"line_count": 842,
"max_line_length": 135,
"avg_line_length": 33.53800475059382,
"alnum_prop": 0.5219023336520415,
"repo_name": "tempbottle/ironpython3",
"id": "771d88f35f14d99407380624bb73bd31429de711",
"size": "28965",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Tests/test_file.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "11099"
},
{
"name": "C#",
"bytes": "12216919"
},
{
"name": "CSS",
"bytes": "96"
},
{
"name": "Groff",
"bytes": "21080"
},
{
"name": "HTML",
"bytes": "13117230"
},
{
"name": "Makefile",
"bytes": "662"
},
{
"name": "PowerShell",
"bytes": "62360"
},
{
"name": "Python",
"bytes": "27266208"
},
{
"name": "R",
"bytes": "4949"
},
{
"name": "Ruby",
"bytes": "19"
},
{
"name": "Shell",
"bytes": "5147"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
}
|
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import BaseCommand, CommandError
from django.core import serializers
from django.db import router, DEFAULT_DB_ALIAS
from django.utils.datastructures import SortedDict
from optparse import make_option
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--format', default='json', dest='format',
help='Specifies the output serialization format for fixtures.'),
make_option('--indent', default=None, dest='indent', type='int',
help='Specifies the indent level to use when pretty-printing output'),
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a specific database to dump '
'fixtures from. Defaults to the "default" database.'),
make_option('-e', '--exclude', dest='exclude',action='append', default=[],
help='An appname or appname.ModelName to exclude (use multiple --exclude to exclude multiple apps/models).'),
make_option('-n', '--natural', action='store_true', dest='use_natural_keys', default=False,
help='Use natural keys if they are available.'),
make_option('-a', '--all', action='store_true', dest='use_base_manager', default=False,
help="Use Django's base manager to dump all models stored in the database, including those that would otherwise be filtered or modified by a custom manager."),
)
help = ("Output the contents of the database as a fixture of the given "
"format (using each model's default manager unless --all is "
"specified).")
args = '[appname appname.ModelName ...]'
def handle(self, *app_labels, **options):
from django.db.models import get_app, get_apps, get_model
format = options.get('format')
indent = options.get('indent')
using = options.get('database')
excludes = options.get('exclude')
show_traceback = options.get('traceback')
use_natural_keys = options.get('use_natural_keys')
use_base_manager = options.get('use_base_manager')
excluded_apps = set()
excluded_models = set()
for exclude in excludes:
if '.' in exclude:
app_label, model_name = exclude.split('.', 1)
model_obj = get_model(app_label, model_name)
if not model_obj:
raise CommandError('Unknown model in excludes: %s' % exclude)
excluded_models.add(model_obj)
else:
try:
app_obj = get_app(exclude)
excluded_apps.add(app_obj)
except ImproperlyConfigured:
raise CommandError('Unknown app in excludes: %s' % exclude)
if len(app_labels) == 0:
app_list = SortedDict((app, None) for app in get_apps() if app not in excluded_apps)
else:
app_list = SortedDict()
for label in app_labels:
try:
app_label, model_label = label.split('.')
try:
app = get_app(app_label)
except ImproperlyConfigured:
raise CommandError("Unknown application: %s" % app_label)
if app in excluded_apps:
continue
model = get_model(app_label, model_label)
if model is None:
raise CommandError("Unknown model: %s.%s" % (app_label, model_label))
if app in app_list.keys():
if app_list[app] and model not in app_list[app]:
app_list[app].append(model)
else:
app_list[app] = [model]
except ValueError:
# This is just an app - no model qualifier
app_label = label
try:
app = get_app(app_label)
except ImproperlyConfigured:
raise CommandError("Unknown application: %s" % app_label)
if app in excluded_apps:
continue
app_list[app] = None
# Check that the serialization format exists; this is a shortcut to
# avoid collating all the objects and _then_ failing.
if format not in serializers.get_public_serializer_formats():
raise CommandError("Unknown serialization format: %s" % format)
try:
serializers.get_serializer(format)
except KeyError:
raise CommandError("Unknown serialization format: %s" % format)
def get_objects():
# Collate the objects to be serialized.
for model in sort_dependencies(app_list.items()):
if model in excluded_models:
continue
if not model._meta.proxy and router.allow_syncdb(using, model):
if use_base_manager:
objects = model._base_manager
else:
objects = model._default_manager
for obj in objects.using(using).\
order_by(model._meta.pk.name).iterator():
yield obj
try:
self.stdout.ending = None
serializers.serialize(format, get_objects(), indent=indent,
use_natural_keys=use_natural_keys, stream=self.stdout)
except Exception as e:
if show_traceback:
raise
raise CommandError("Unable to serialize database: %s" % e)
def sort_dependencies(app_list):
"""Sort a list of app,modellist pairs into a single list of models.
The single list of models is sorted so that any model with a natural key
is serialized before a normal model, and any model with a natural key
dependency has it's dependencies serialized first.
"""
from django.db.models import get_model, get_models
# Process the list of models, and get the list of dependencies
model_dependencies = []
models = set()
for app, model_list in app_list:
if model_list is None:
model_list = get_models(app)
for model in model_list:
models.add(model)
# Add any explicitly defined dependencies
if hasattr(model, 'natural_key'):
deps = getattr(model.natural_key, 'dependencies', [])
if deps:
deps = [get_model(*d.split('.')) for d in deps]
else:
deps = []
# Now add a dependency for any FK or M2M relation with
# a model that defines a natural key
for field in model._meta.fields:
if hasattr(field.rel, 'to'):
rel_model = field.rel.to
if hasattr(rel_model, 'natural_key'):
deps.append(rel_model)
for field in model._meta.many_to_many:
rel_model = field.rel.to
if hasattr(rel_model, 'natural_key'):
deps.append(rel_model)
model_dependencies.append((model, deps))
model_dependencies.reverse()
# Now sort the models to ensure that dependencies are met. This
# is done by repeatedly iterating over the input list of models.
# If all the dependencies of a given model are in the final list,
# that model is promoted to the end of the final list. This process
# continues until the input list is empty, or we do a full iteration
# over the input models without promoting a model to the final list.
# If we do a full iteration without a promotion, that means there are
# circular dependencies in the list.
model_list = []
while model_dependencies:
skipped = []
changed = False
while model_dependencies:
model, deps = model_dependencies.pop()
# If all of the models in the dependency list are either already
# on the final model list, or not on the original serialization list,
# then we've found another model with all it's dependencies satisfied.
found = True
for candidate in ((d not in models or d in model_list) for d in deps):
if not candidate:
found = False
if found:
model_list.append(model)
changed = True
else:
skipped.append((model, deps))
if not changed:
raise CommandError("Can't resolve dependencies for %s in serialized app list." %
', '.join('%s.%s' % (model._meta.app_label, model._meta.object_name)
for model, deps in sorted(skipped, key=lambda obj: obj[0].__name__))
)
model_dependencies = skipped
return model_list
|
{
"content_hash": "e5adddd3a488e8ed1e36a16dfc708dd5",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 171,
"avg_line_length": 46.13265306122449,
"alnum_prop": 0.5667993806679938,
"repo_name": "cobalys/django",
"id": "9059625dec2ece045e075f9e53bd004d3687321b",
"size": "9042",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django/core/management/commands/dumpdata.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "89077"
},
{
"name": "Python",
"bytes": "8107614"
},
{
"name": "Shell",
"bytes": "4241"
}
],
"symlink_target": ""
}
|
import sys
import traceback
import time
import copy
import math
from diamond_game import *
from diamond_game.model.models import Board
import random
class AI(MVCObject):
def __init__(self, ev_manager):
MVCObject.__init__(self, ev_manager, '[ai]')
self.id = Conf.AI
self.board = Board()
self.pieces = {}
self.player = 0
self.available_locations = []
self.piece_selected_loc = (-1, -1)
self.counter = 0
@property
def get_next_event(self):
return self.event_manager.get_next_ai_event()
# noinspection PyBroadException
def run(self):
running = 1
try:
while running:
# Check if ai is required
event = self.get_next_event
# Handle events
# If quit event then terminate
if isinstance(event, QuitEvent):
print self.thread_name + ' is shutting down'
running = 0
elif isinstance(event, AIMakeMoveEvent):
# time.sleep(1)
self.counter += 1
self.make_move(event.data)
except:
e = sys.exc_info()[0]
print '>>>>>>>>>>> Fatal Error in: ' + self.thread_name
print e
traceback.print_exc()
self.post(QuitEvent(), Conf.ALL)
def make_move(self, data):
self.board = data['board']
self.player = data['player']
self.pieces = {}
self.find_pieces_and_moves()
if Conf.OPT_OPTIONS.get(Conf.AI_DIF) == Conf.OPT_EASY:
self.random_move()
elif Conf.OPT_OPTIONS.get(Conf.AI_DIF) == Conf.OPT_MEDIUM:
self.better_move()
def find_pieces_and_moves(self):
self.pieces = {}
for x in range(self.board.SIZE_BOARD_X_GRID):
for y in range(self.board.SIZE_BOARD_Y_GRID):
if self.board.get_field((x, y)).value == self.player:
self.get_available_moves(x, y)
if len(self.available_locations):
self.pieces[(x, y)] = self.available_locations
def get_available_moves(self, x, y):
self.available_locations = []
self.piece_selected_loc = (x, y)
self.add_normal_moves(self.piece_selected_loc)
self.add_jump_moves(self.piece_selected_loc)
def add_normal_moves(self, loc):
"""Add normal moves to the list of available moves.
:param loc: location(model based) points from where move is started.
Method checks piece surroundings and if it is possible to move there
adds it to available locations
"""
for direction in Conf.NORMAL_DIRECTIONS:
new_loc = (loc[0] + direction[0], loc[1] + direction[1])
if 0 <= new_loc[0] < self.board.SIZE_BOARD_X_GRID and \
0<= new_loc[1] < self.board.SIZE_BOARD_Y_GRID:
if self.board.get_field(new_loc).value == Conf.EMPTY:
self.available_locations.append(new_loc)
def add_jump_moves(self, loc):
"""Add jump moves to the list of available moves.
:param loc: location(model based) points from where move is started.
Method checks where a piece could jump and if it can then move is added
to available locations and jump moves is called recursively.
Have to be careful not to add and recurse on duplicate locations.
"""
# go through adjacent field
for i in range(len(Conf.NORMAL_DIRECTIONS)):
jump_over_loc = (loc[0] + Conf.NORMAL_DIRECTIONS[i][0], loc[1] + Conf.NORMAL_DIRECTIONS[i][1])
# if field is in bounds of data structure
if 0 <= jump_over_loc[0] < self.board.SIZE_BOARD_X_GRID and \
0<= jump_over_loc[1] < self.board.SIZE_BOARD_Y_GRID:
# if field is a piece
if not self.board.get_field(jump_over_loc).value == Conf.EMPTY and \
not self.board.get_field(jump_over_loc).value == Conf.NON_PLAYABLE:
# get jump to location
new_loc = (loc[0] + Conf.JUMP_DIRECTIONS[i][0], loc[1] + Conf.JUMP_DIRECTIONS[i][1])
# if jump to location is in bounds
if 0 <= new_loc[0] < self.board.SIZE_BOARD_X_GRID and \
0<= new_loc[1] < self.board.SIZE_BOARD_Y_GRID:
# if jump to location is free
if self.board.get_field(new_loc).value == Conf.EMPTY:
# if this location doesnt already exist and not the starting location
if new_loc not in self.available_locations and \
not new_loc == self.piece_selected_loc:
self.available_locations.append(new_loc)
self.add_jump_moves(new_loc)
def random_move(self):
random.seed()
if len(self.pieces) > 0:
start = random.choice(self.pieces.keys())
end = random.choice(self.pieces.get(start))
data = {'skip': 0, 'start': start, 'end': end}
else:
# In case there are no moves
data = {'skip': 1}
self.post(AIMovedEvent(data), Conf.MODEL)
def better_move(self):
datas = []
cpus = []
try:
if len(self.pieces) > 0:
for key, value in self.pieces.iteritems():
if len(value) > 0:
for loc in value:
cpu = CrazyCPU(key, loc, copy.deepcopy(self.board), datas, self.player)
cpus.append(cpu)
cpu.start()
waiting = 1
while waiting:
x = 0
for cpu in cpus:
if cpu.is_alive():
x = 1
waiting = x
smallest = 1001.0
for a_val in datas:
if a_val.val < smallest:
smallest = a_val.val
start, end = a_val.loc_s, a_val.loc_e
data = {'skip': 0, 'start': start, 'end': end}
print datas
else:
# In case there are no moves
data = {'skip': 1}
self.post(AIMovedEvent(data), Conf.MODEL)
except:
e = sys.exc_info()[0]
print '>>>>>>>>>>> Fatal Error in: CRAXZY CPUSSSSSSSSSS'
print e
traceback.print_exc()
# fight back
data = {'skip': 1}
self.post(AIMovedEvent(data), Conf.MODEL)
class Data(object):
def __init__(self, val, loc_s, loc_e):
self.val = val
self.loc_s = loc_s
self.loc_e = loc_e
def __str__(self):
return str(self.val) + " " + str(self.loc_s) + " " + str(self.loc_e)
def __repr__(self):
return self.__str__()
class CrazyCPU(AI):
cnt = 0
def __init__(self, loc_s, loc_e, board_copy, data_list, player):
AI.__init__(self, EventManager())
self.board = board_copy
self.data_list = data_list
self.loc_s = loc_s
self.loc_e = loc_e
CrazyCPU.cnt += 1
self.cnt = CrazyCPU.cnt
if Conf.DEBUG:
print '[CRAZY CPU #' + str(self.cnt) + ' STARTED]'
self.free_home = []
self.free_home2 = []
self.player = player
self.shortest = 100
def run(self):
self.make_free_home()
self.make_move(self.loc_s, self.loc_e)
self.make_free_home2()
# if len(self.free_home2) < len(self.free_home):
# self.data_list.append(Data(0, self.loc_s, self.loc_e))
# else:
self.find_pieces_and_moves()
self.data_list.append(Data(self.find_shortest(), self.loc_s, self.loc_e))
if Conf.DEBUG:
print '[CRAZY CPU #' + str(self.cnt) + ' FINISHED]'
def make_free_home(self):
for loc in self.board.win_sectors.get(self.player):
self.free_home.append(loc)
def make_free_home2(self):
for loc in self.board.win_sectors.get(self.player):
self.free_home2.append(loc)
def make_move(self, start_loc, end_loc):
"""Change actual model values for pieces stored.
:param start_loc: location(model based).
:param end_loc: location(model based).
Method swaps pieces at given locations.
"""
temp = self.board.get_field(start_loc)
self.board.set_field(start_loc, self.board.get_field(end_loc))
self.board.set_field(end_loc, temp)
def find_shortest(self):
valr = 200
print self.free_home2
print self.pieces
if len(self.pieces) > 0:
for key, value in self.pieces.iteritems():
if key not in self.board.win_sectors[self.player]:
for home_loc in self.free_home2:
dx = abs(key[0]-home_loc[0])
dy = abs(key[1]-home_loc[1])
val = math.sqrt(dx*dx+dy*dy)
print val
print self.loc_s
print self.loc_e
if 1.41 < val < 1.42:
val = 12
valr = val
valr += val
return valr
|
{
"content_hash": "fda3ee68a00b22b4ffc0da49dee5884f",
"timestamp": "",
"source": "github",
"line_count": 249,
"max_line_length": 106,
"avg_line_length": 38.48192771084337,
"alnum_prop": 0.5087664370695053,
"repo_name": "batousik/Python2-Diamond",
"id": "199f63ea52c3cb6e16363fab815b5ef991a71624",
"size": "9582",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "diamond_game/controller/ai.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "113209"
},
{
"name": "TeX",
"bytes": "2526"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.