text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from google.cloud import dialogflow_v2
def sample_get_entity_type():
# Create a client
client = dialogflow_v2.EntityTypesClient()
# Initialize request argument(s)
request = dialogflow_v2.GetEntityTypeRequest(
name="name_value",
)
# Make the request
response = client.get_entity_type(request=request)
# Handle the response
print(response)
# [END dialogflow_v2_generated_EntityTypes_GetEntityType_sync]
|
{
"content_hash": "6ffa3002a8f29ffe08bb664928736514",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 62,
"avg_line_length": 23.736842105263158,
"alnum_prop": 0.70509977827051,
"repo_name": "googleapis/python-dialogflow",
"id": "ba8abfec23f02c37d808347a0f755f09b1bf17f8",
"size": "1840",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/dialogflow_v2_generated_entity_types_get_entity_type_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "11184005"
},
{
"name": "Shell",
"bytes": "30672"
}
],
"symlink_target": ""
}
|
'''
## License
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2015 Dexter Industries
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
# Connect the CO2 sensor to the RPISER port on the GrovePi
import grove_co2_lib
import time
co2= grove_co2_lib.CO2()
while True:
[ppm,temp]= co2.read()
print("CO2 Conc: %d ppm\t Temp: %d C" %(ppm,temp))
time.sleep(1)
|
{
"content_hash": "539c479f5fce2c14dcc1b5f8a9aa2bc2",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 103,
"avg_line_length": 37.71052631578947,
"alnum_prop": 0.7794836008374041,
"repo_name": "karan259/GrovePi",
"id": "3caa1e8346bf0e678f2ef28e20c4308dec226f87",
"size": "1825",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Software/Python/grove_co2_sensor/grove_co2_example.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "144838"
},
{
"name": "C",
"bytes": "212753"
},
{
"name": "C#",
"bytes": "113353"
},
{
"name": "C++",
"bytes": "431714"
},
{
"name": "Go",
"bytes": "3646"
},
{
"name": "Java",
"bytes": "24732"
},
{
"name": "JavaScript",
"bytes": "37808"
},
{
"name": "Makefile",
"bytes": "7544"
},
{
"name": "Perl",
"bytes": "96047"
},
{
"name": "Processing",
"bytes": "1304"
},
{
"name": "Python",
"bytes": "629432"
},
{
"name": "Roff",
"bytes": "14045"
},
{
"name": "Shell",
"bytes": "30999"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from ionyweb.website.rendering.utils import render_view
def index_view(request, plugin):
separator = plugin.get_separator()
links = plugin.links_enabled
pages = []
# Ancestors
if plugin.ancestors_displayed:
pages = list(request.page.get_ancestors())
breadcrumb_items = []
for page in pages:
if links:
item_str = u'<a href="%s">%s</a>' % (page.get_absolute_url(), page.title)
else:
item_str = page.title
breadcrumb_items.append(item_str)
# Add current page
# Make the current page as a link if option enable
if links:
item_str = u'<a href="%s">%s</a>' % (request.page.get_absolute_url(), request.page.title)
breadcrumb_items.append(item_str)
else:
breadcrumb_items.append(request.page.title)
if hasattr(request, settings.BREADCRUMB_OBJECT_TITLE):
breadcrumb_items.append(getattr(request, settings.BREADCRUMB_OBJECT_TITLE))
return render_view(plugin.get_templates('plugin_breadcrumb/index.html'),
{'object': plugin,
'items': breadcrumb_items,
'separator': separator})
|
{
"content_hash": "e55cfeec084923af9e02c16a09968876",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 97,
"avg_line_length": 34.857142857142854,
"alnum_prop": 0.6180327868852459,
"repo_name": "makinacorpus/ionyweb",
"id": "f9bbdd925a7bd8f93e2bc6557813e63bec63d4f5",
"size": "1244",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ionyweb/plugin_app/plugin_breadcrumb/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "124754"
},
{
"name": "JavaScript",
"bytes": "260880"
},
{
"name": "Python",
"bytes": "1024305"
}
],
"symlink_target": ""
}
|
"""Common exceptions for ADB and Fastboot."""
class CommonUsbError(Exception):
"""Base class for usb communication errors."""
class FormatMessageWithArgumentsException(CommonUsbError):
"""Exception that both looks good and is functional.
Okay, not that kind of functional, it's still a class.
This interpolates the message with the given arguments to make it
human-readable, but keeps the arguments in case other code try-excepts it.
"""
def __init__(self, message, *args):
message %= args
super(FormatMessageWithArgumentsException, self).__init__(message, *args)
class DeviceNotFoundError(FormatMessageWithArgumentsException):
"""Device isn't on USB."""
class DeviceAuthError(FormatMessageWithArgumentsException):
"""Device authentication failed."""
class LibusbWrappingError(CommonUsbError):
"""Wraps libusb1 errors while keeping its original usefulness.
Attributes:
usb_error: Instance of libusb1.USBError
"""
def __init__(self, msg, usb_error):
super(LibusbWrappingError, self).__init__(msg)
self.usb_error = usb_error
def __str__(self):
return '%s: %s' % (
super(LibusbWrappingError, self).__str__(), str(self.usb_error))
class WriteFailedError(LibusbWrappingError):
"""Raised when the device doesn't accept our command."""
class ReadFailedError(LibusbWrappingError):
"""Raised when the device doesn't respond to our commands."""
class AdbCommandFailureException(Exception):
"""ADB Command returned a FAIL."""
class AdbOperationException(Exception):
"""Failed to communicate over adb with device after multiple retries."""
class TcpTimeoutException(FormatMessageWithArgumentsException):
"""TCP connection timed out in the time out given."""
|
{
"content_hash": "ad2fe95a033130433bddf33d822d4f10",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 81,
"avg_line_length": 28.666666666666668,
"alnum_prop": 0.7098560354374308,
"repo_name": "google/python-adb",
"id": "54f7e0b4b7afd41f09215e0de3ca6a5280718ed6",
"size": "2402",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "adb/usb_exceptions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "123917"
}
],
"symlink_target": ""
}
|
import os
import shutil
from collections import defaultdict
import cv2
import numpy as np
import yaml
from cv_helpers import show, get_classifier_directories, ls
from sides.serial_number_cv import SERIAL_IS_ZERO_CLASSIFIER_DIR
from modules.password_cv import PASSWORD_LETTER_CLASSIFIER_DIR
NUM_MODULE_POSITIONS = 6
MAX_INDEX = 374
# We'll keep one photo in every group of TEST_DATA_HOLDOUT_FREQUENCY to be test data.
TEST_DATA_HOLDOUT_FREQUENCY = 10
MAX_TRAINING_INDEX = MAX_INDEX * 9 / 10
# MAX_TRAINING_INDEX_STRING = "{0:04d}".format(MAX_TRAINING_INDEX)
MODULE_NAME_FOR_OFFSET = ["top-left", "top-middle", "top-right", "bottom-left", "bottom-middle", "bottom-right"]
def generate_vocab(representative_files, vocab_path):
features_unclustered = None
detector = cv2.SIFT()
for file_path in representative_files:
screenshot = cv2.imread(file_path)
keypoints, descriptor = detector.detectAndCompute(screenshot, None)
if features_unclustered is None:
features_unclustered = descriptor
else:
features_unclustered = np.concatenate((features_unclustered, descriptor))
bow_trainer = cv2.BOWKMeansTrainer(200)
vocab = bow_trainer.cluster(features_unclustered)
with open(vocab_path, "w") as f:
np.save(f, vocab)
def extract_features(vocab_path, image_and_features_paths):
with open(vocab_path, "rb") as f:
vocab = np.load(f)
# FLANN parameters
FLANN_INDEX_KDTREE = 0
# FLANN_INDEX_LSH = 6
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
# index_params = dict(algorithm=FLANN_INDEX_LSH,
# table_number=20,
# key_size=10,
# multi_probe_level=2)
search_params = dict(checks=50) # or pass empty dictionary
matcher = cv2.FlannBasedMatcher(index_params, search_params)
detector = cv2.SIFT()
extractor = cv2.DescriptorExtractor_create("SIFT")
bow_de = cv2.BOWImgDescriptorExtractor(extractor, matcher)
bow_de.setVocabulary(vocab)
for image_path, feature_path in image_and_features_paths:
screenshot = cv2.imread(image_path)
keypoints = detector.detect(screenshot)
descriptor = bow_de.compute(screenshot, keypoints)
with open(feature_path, "w") as f:
np.save(f, descriptor)
def cluster_features(num_clusters, feature_and_copy_paths):
names = []
features = None
for feature_path, src_path, dst_path_template in feature_and_copy_paths:
with open(feature_path) as f:
names.append((src_path, dst_path_template))
feature = np.load(f)
if features is None:
features = feature
else:
features = np.concatenate((features, feature))
tc = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 1000, 0.0001)
retval, best_labels, centers = cv2.kmeans(features, num_clusters, tc, 10, cv2.KMEANS_PP_CENTERS)
for i, label in enumerate(best_labels):
src_path, dst_path_template = names[i]
dst_path = dst_path_template % label
# print "Copying from %s to %s" % (src_path, dst_path)
if not os.path.exists(os.path.dirname(dst_path)):
os.makedirs(os.path.dirname(dst_path))
shutil.copyfile(src_path, dst_path)
def translate_data(labelled_photos_dir, features_dir, svm_data_dir):
int_to_label = {}
training_data = None
training_labels = np.empty([0, 1], dtype=int)
testing_data = None
testing_labels = np.empty([0, 1], dtype=int)
num_loaded = 0
for label_int, label_str in enumerate(os.listdir(labelled_photos_dir)):
int_to_label[label_int] = label_str
label_dir = os.path.join(labelled_photos_dir, label_str)
if not os.path.isdir(label_dir):
continue
for file_name in os.listdir(label_dir):
without_extension = '.'.join(file_name.split('.')[:-1])
if not without_extension:
continue
features_path = os.path.join(features_dir, without_extension + ".npy")
with open(features_path, "r") as f:
features = np.load(f)
# Determine if testing or training
if num_loaded % TEST_DATA_HOLDOUT_FREQUENCY != 0:
if training_data is None:
training_data = features
else:
training_data = np.concatenate((training_data, features))
training_labels = np.append(training_labels, label_int)
else:
if testing_data is None:
testing_data = features
else:
testing_data = np.concatenate((testing_data, features))
testing_labels = np.append(testing_labels, label_int)
num_loaded += 1
print "Loaded %s" % num_loaded
if not os.path.isdir:
os.makedirs(svm_data_dir)
with open(os.path.join(svm_data_dir, "training_data"), "w") as f:
np.save(f, training_data)
with open(os.path.join(svm_data_dir, "training_labels"), "w") as f:
np.save(f, training_labels)
with open(os.path.join(svm_data_dir, "testing_data"), "w") as f:
np.save(f, testing_data)
with open(os.path.join(svm_data_dir, "testing_labels"), "w") as f:
np.save(f, testing_labels)
def train_classifier(svm_data_dir):
with open(os.path.join(svm_data_dir, "training_data")) as f:
training_data = np.load(f)
with open(os.path.join(svm_data_dir, "training_labels")) as f:
training_labels = np.load(f)
with open(os.path.join(svm_data_dir, "testing_data")) as f:
testing_data = np.load(f)
with open(os.path.join(svm_data_dir, "testing_labels")) as f:
testing_labels = np.load(f)
svm = cv2.SVM()
svm_params = dict(kernel_type=cv2.SVM_LINEAR,
svm_type=cv2.SVM_C_SVC)
svm.train_auto(training_data, training_labels, None, None, params=svm_params, k_fold=50)
svm.save(os.path.join(svm_data_dir, 'svm_data.dat'))
results = svm.predict_all(testing_data)
mask = results == testing_labels.reshape((-1, 1))
correct = np.count_nonzero(mask)
print correct*100.0/results.size
def run_test(svm_data_dir):
with open(os.path.join(svm_data_dir, "testing_data")) as f:
testing_data = np.load(f)
with open(os.path.join(svm_data_dir, "testing_labels")) as f:
testing_labels = np.load(f)
labels = defaultdict(lambda: 0)
for label in testing_labels:
labels[label] += 1
print labels
svm = cv2.SVM()
svm.load(os.path.join(svm_data_dir, 'svm_data.dat'))
results = svm.predict_all(testing_data)
mask = results == testing_labels.reshape((-1, 1))
correct = np.count_nonzero(mask)
print "Accuracy on test set"
print correct*100.0/results.size
def save_label_mappings(labelled_photos_dir, svm_data_dir):
int_to_label = {}
for label_int, label_str in enumerate(os.listdir(labelled_photos_dir)):
label_dir = os.path.join(labelled_photos_dir, label_str)
if not os.path.isdir(label_dir):
continue
int_to_label[label_int] = label_str
with open(os.path.join(svm_data_dir, "label_mappings.yml"), "w") as f:
yaml.dump(int_to_label, f)
def load_mat(mat_path):
skip_lines = 2
with open(mat_path, 'r') as f:
for i in range(skip_lines):
_ = f.readline()
data = yaml.load(f)
mat = np.array(data["data"], dtype=data['dt'])
mat.resize(data["rows"], data["cols"])
return mat
def train_module_classifier():
vocab_path, unlabelled_dir, labelled_dir, features_dir, svm_data_dir = \
get_classifier_directories(PASSWORD_LETTER_CLASSIFIER_DIR)
def module_vocab_paths():
current_module_offset = 0
for i in range(MAX_INDEX + 1):
if i % 3 == 0:
continue
file_name = "{:04d}-full-{}.png".format(i, MODULE_NAME_FOR_OFFSET[current_module_offset])
file_path = os.path.join(unlabelled_dir, file_name)
current_module_offset = (current_module_offset + 1) % NUM_MODULE_POSITIONS
yield file_path
def image_and_features_path():
for screenshot_set in range(MAX_INDEX + 1):
if screenshot_set % 3 == 0:
continue
for module_offset in range(NUM_MODULE_POSITIONS):
file_name = "{:04d}-full-{}".format(screenshot_set, MODULE_NAME_FOR_OFFSET[module_offset])
screenshot_path = os.path.join(unlabelled_dir, file_name + ".png")
feature_path = os.path.join(features_dir, file_name + ".npy")
yield screenshot_path, feature_path
def cluster_images_pipeline(classifier_dir, num_clusters):
vocab_path, unlabelled_dir, labelled_dir, features_dir, svm_data_dir =\
get_classifier_directories(classifier_dir)
def representative_image_paths():
for i, file_name in enumerate(os.listdir(unlabelled_dir)):
if file_name == ".DS_Store":
continue
# Only want some of them
# if i % 10 != 0:
# continue
yield os.path.join(unlabelled_dir, file_name)
def image_and_feature_paths():
for file_name in os.listdir(unlabelled_dir):
if file_name == ".DS_Store":
continue
without_ext, _ = os.path.splitext(file_name)
letter_path = os.path.join(unlabelled_dir, file_name)
feature_path = os.path.join(features_dir, without_ext + ".npy")
yield letter_path, feature_path
def feature_and_copy_paths():
for file_name in os.listdir(unlabelled_dir):
if file_name == ".DS_Store":
continue
without_ext, _ = os.path.splitext(file_name)
feature_path = os.path.join(features_dir, without_ext + ".npy")
src_path = os.path.join(unlabelled_dir, file_name)
dst_path_template = os.path.join(labelled_dir, "%s", file_name)
yield feature_path, src_path, dst_path_template
print "Generating vocab"
generate_vocab(representative_image_paths(), vocab_path)
print "Extracting features"
extract_features(vocab_path, image_and_feature_paths())
print "Clustering images"
cluster_features(num_clusters, feature_and_copy_paths())
def train_classifier_pipeline(classifier_dir):
vocab_path, unlabelled_dir, labelled_dir, features_dir, svm_data_dir = get_classifier_directories(classifier_dir)
print "Translating data"
translate_data(labelled_dir, features_dir, svm_data_dir)
print "Training classifier"
train_classifier(svm_data_dir)
print "Testing classifier"
run_test(svm_data_dir)
print "Saving label mappings"
save_label_mappings(labelled_dir, svm_data_dir)
def manually_group_images(classifier_dir):
vocab_path, unlabelled_dir, labelled_dir, features_dir, svm_data_dir = get_classifier_directories(classifier_dir)
for file_path in ls(unlabelled_dir):
folder_name = chr(show(cv2.imread(file_path)))
folder = os.path.join(labelled_dir, folder_name)
if not os.path.exists(folder):
os.makedirs(folder)
dst = os.path.join(labelled_dir, folder, os.path.basename(file_path))
shutil.copyfile(file_path, dst)
def main():
classifier_dir = SERIAL_IS_ZERO_CLASSIFIER_DIR
# classifier_dir = os.path.join(MODULE_SPECIFIC_DIR, "memory", "tmp")
# cluster_images_pipeline(classifier_dir, 36)
train_classifier_pipeline(classifier_dir)
# manually_group_images(classifier_dir)
pass
if __name__ == '__main__':
main()
|
{
"content_hash": "b9bf761557d96e7b8f5a4ffb3da4735f",
"timestamp": "",
"source": "github",
"line_count": 312,
"max_line_length": 117,
"avg_line_length": 37.61538461538461,
"alnum_prop": 0.6255112474437627,
"repo_name": "FuegoFro/KeepTalkingBot",
"id": "d7f249d38f980344db6cb96e89fc5bb02d7f1f7c",
"size": "11736",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/train_classifier.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "187741"
}
],
"symlink_target": ""
}
|
from owslib.waterml.wml import SitesResponse, TimeSeriesResponse, VariablesResponse, namespaces
from owslib.etree import etree, ElementType
def ns(namespace):
return namespaces.get(namespace)
class WaterML_1_1(object):
def __init__(self, element):
if isinstance(element, ElementType):
self._root = element
else:
self._root = etree.fromstring(element)
if hasattr(self._root, 'getroot'):
self._root = self._root.getroot()
self._ns = 'wml1.1'
@property
def response(self):
try:
if self._root.tag == str(ns(self._ns) + 'variablesResponse'):
return VariablesResponse(self._root, self._ns)
elif self._root.tag == str(ns(self._ns) + 'timeSeriesResponse'):
return TimeSeriesResponse(self._root, self._ns)
elif self._root.tag == str(ns(self._ns) + 'sitesResponse'):
return SitesResponse(self._root, self._ns)
except Exception:
raise
raise ValueError('Unable to determine response type from xml')
|
{
"content_hash": "e3b880d2626ea4b27f966cf137e3700d",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 95,
"avg_line_length": 32.35294117647059,
"alnum_prop": 0.6063636363636363,
"repo_name": "ocefpaf/OWSLib",
"id": "a963ac8cc08fe01e87928b6522ef6e627733b469",
"size": "1100",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "owslib/waterml/wml11.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "67"
},
{
"name": "Python",
"bytes": "1023769"
}
],
"symlink_target": ""
}
|
"""
Different kinds of sliding windows
"""
from __future__ import absolute_import, division, print_function
from .base_sliding_window import BaseSlidingWindow
from .delayed_sliding_window import DelayedSlidingWindow
from .repeated_sliding_window import RepeatedSlidingWindow
from .sliding_window import SlidingWindow
|
{
"content_hash": "18a6a007e01d4efca79dfee98310991d",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 64,
"avg_line_length": 32.3,
"alnum_prop": 0.8142414860681114,
"repo_name": "w495/python-video-shot-detector",
"id": "dbbbfbf2e08e76dc001cf31e3d17b64c790ba048",
"size": "347",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shot_detector/utils/collections/sliding_windows/__init__.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "51"
},
{
"name": "Makefile",
"bytes": "1751"
},
{
"name": "Python",
"bytes": "599048"
},
{
"name": "Shell",
"bytes": "89"
}
],
"symlink_target": ""
}
|
from PyQt5 import Qt
class ColorDelegate(Qt.QStyledItemDelegate):
def createEditor(self, parent, option, midx):
if midx.isValid():
e = Qt.QColorDialog(parent)
e.setOption(Qt.QColorDialog.ShowAlphaChannel)
e.setOption(Qt.QColorDialog.DontUseNativeDialog)
# Due to a modal event loop peculiarity, the .result() value for a modal dialog created by a delegate is not updated upon
# dialog dismissal as it should be. The following installs attempts to install workaround for this oddity, and if it
# can not, OK/Cancel buttons are omitted from the color picker dialog. In this case, the color picker dialog may be dismissed
# by clicking anywhere in the application outside the dialog, and the selected color is always used.
bb = e.findChild(Qt.QDialogButtonBox)
if bb is None:
e.setOption(Qt.QColorDialog.NoButtons)
else:
bb.accepted.connect(lambda: e.setResult(1))
return e
def setEditorData(self, e, midx):
d = midx.data(Qt.Qt.DecorationRole)
if isinstance(d, Qt.QVariant):
d = d.value()
e.setCurrentColor(d)
def setModelData(self, e, model, midx):
has_bb = not e.testOption(Qt.QColorDialog.NoButtons)
if has_bb and e.result() or not has_bb:
color = e.currentColor()
model.setData(midx, (color.redF(), color.greenF(), color.blueF(), color.alphaF()))
def paint(self, painter, option, midx):
style = None
if option.widget is not None:
style = option.widget.style()
if style is None:
style = Qt.QApplication.style()
# Fill cell background in the *exact same manner* as the default delegate. This is the simplest way to get the correct
# cell background in all circumstances, including while dragging a row.
style.drawPrimitive(Qt.QStyle.PE_PanelItemViewItem, option, painter, option.widget)
d = midx.data(Qt.Qt.DecorationRole)
if isinstance(d, Qt.QVariant):
d = d.value()
swatch_rect = Qt.QStyle.alignedRect(
option.direction,
Qt.Qt.AlignCenter,
option.decorationSize,
option.rect)
painter.save()
try:
painter.setPen(Qt.QPen(option.palette.color(Qt.QPalette.Normal, Qt.QPalette.Mid)))
painter.setBrush(Qt.QBrush(d))
painter.drawRect(swatch_rect)
finally:
painter.restore()
|
{
"content_hash": "77a721895df3a0142669592623d1b714",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 138,
"avg_line_length": 46.67272727272727,
"alnum_prop": 0.6244643552785353,
"repo_name": "erikhvatum/RisWidget",
"id": "9ccec5dff81b4590ff36ccf1eeaa6c62718a2463",
"size": "3730",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ris_widget/qdelegates/color_delegate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "276275"
},
{
"name": "GLSL",
"bytes": "7012"
},
{
"name": "Python",
"bytes": "600996"
},
{
"name": "QML",
"bytes": "2311"
}
],
"symlink_target": ""
}
|
"""UK Environment Agency Flood Monitoring Integration."""
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import Platform
from homeassistant.core import HomeAssistant
from .const import DOMAIN
PLATFORMS = [Platform.SENSOR]
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up flood monitoring sensors for this config entry."""
hass.data.setdefault(DOMAIN, {})
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload flood monitoring sensors."""
return await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
|
{
"content_hash": "dcf9433b94250ee5f05271b99b5f8b71",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 78,
"avg_line_length": 35.95,
"alnum_prop": 0.7677329624478443,
"repo_name": "rohitranjan1991/home-assistant",
"id": "e7b6cd88092478122aec32d4e1a96a9503fd470e",
"size": "719",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/eafm/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1017265"
},
{
"name": "Python",
"bytes": "1051086"
},
{
"name": "Shell",
"bytes": "3946"
}
],
"symlink_target": ""
}
|
import os
from pprint import pformat
from django import http
from django.core import signals
from django.core.handlers.base import BaseHandler
from django.core.urlresolvers import set_script_prefix
from django.utils import datastructures
from django.utils.encoding import force_unicode, smart_str
# NOTE: do *not* import settings (or any module which eventually imports
# settings) until after ModPythonHandler has been called; otherwise os.environ
# won't be set up correctly (with respect to settings).
class ModPythonRequest(http.HttpRequest):
def __init__(self, req):
self._req = req
# FIXME: This isn't ideal. The request URI may be encoded (it's
# non-normalized) slightly differently to the "real" SCRIPT_NAME
# and PATH_INFO values. This causes problems when we compute path_info,
# below. For now, don't use script names that will be subject to
# encoding/decoding.
self.path = force_unicode(req.uri)
root = req.get_options().get('django.root', '')
self.django_root = root
# req.path_info isn't necessarily computed correctly in all
# circumstances (it's out of mod_python's control a bit), so we use
# req.uri and some string manipulations to get the right value.
if root and req.uri.startswith(root):
self.path_info = force_unicode(req.uri[len(root):])
else:
self.path_info = self.path
if not self.path_info:
# Django prefers empty paths to be '/', rather than '', to give us
# a common start character for URL patterns. So this is a little
# naughty, but also pretty harmless.
self.path_info = u'/'
def __repr__(self):
# Since this is called as part of error handling, we need to be very
# robust against potentially malformed input.
try:
get = pformat(self.GET)
except:
get = '<could not parse>'
try:
post = pformat(self.POST)
except:
post = '<could not parse>'
try:
cookies = pformat(self.COOKIES)
except:
cookies = '<could not parse>'
try:
meta = pformat(self.META)
except:
meta = '<could not parse>'
return smart_str(u'<ModPythonRequest\npath:%s,\nGET:%s,\nPOST:%s,\nCOOKIES:%s,\nMETA:%s>' %
(self.path, unicode(get), unicode(post),
unicode(cookies), unicode(meta)))
def get_full_path(self):
return '%s%s' % (self.path, self._req.args and ('?' + self._req.args) or '')
def is_secure(self):
try:
return self._req.is_https()
except AttributeError:
# mod_python < 3.2.10 doesn't have req.is_https().
return self._req.subprocess_env.get('HTTPS', '').lower() in ('on', '1')
def _load_post_and_files(self):
"Populates self._post and self._files"
if 'content-type' in self._req.headers_in and self._req.headers_in['content-type'].startswith('multipart'):
self._raw_post_data = ''
self._post, self._files = self.parse_file_upload(self.META, self._req)
else:
self._post, self._files = http.QueryDict(self.raw_post_data, encoding=self._encoding), datastructures.MultiValueDict()
def _get_request(self):
if not hasattr(self, '_request'):
self._request = datastructures.MergeDict(self.POST, self.GET)
return self._request
def _get_get(self):
if not hasattr(self, '_get'):
self._get = http.QueryDict(self._req.args, encoding=self._encoding)
return self._get
def _set_get(self, get):
self._get = get
def _get_post(self):
if not hasattr(self, '_post'):
self._load_post_and_files()
return self._post
def _set_post(self, post):
self._post = post
def _get_cookies(self):
if not hasattr(self, '_cookies'):
self._cookies = http.parse_cookie(self._req.headers_in.get('cookie', ''))
return self._cookies
def _set_cookies(self, cookies):
self._cookies = cookies
def _get_files(self):
if not hasattr(self, '_files'):
self._load_post_and_files()
return self._files
def _get_meta(self):
"Lazy loader that returns self.META dictionary"
if not hasattr(self, '_meta'):
self._meta = {
'AUTH_TYPE': self._req.ap_auth_type,
'CONTENT_LENGTH': self._req.clength, # This may be wrong
'CONTENT_TYPE': self._req.content_type, # This may be wrong
'GATEWAY_INTERFACE': 'CGI/1.1',
'PATH_INFO': self.path_info,
'PATH_TRANSLATED': None, # Not supported
'QUERY_STRING': self._req.args,
'REMOTE_ADDR': self._req.connection.remote_ip,
'REMOTE_HOST': None, # DNS lookups not supported
'REMOTE_IDENT': self._req.connection.remote_logname,
'REMOTE_USER': self._req.user,
'REQUEST_METHOD': self._req.method,
'SCRIPT_NAME': self.django_root,
'SERVER_NAME': self._req.server.server_hostname,
'SERVER_PORT': self._req.server.port,
'SERVER_PROTOCOL': self._req.protocol,
'SERVER_SOFTWARE': 'mod_python'
}
for key, value in self._req.headers_in.items():
key = 'HTTP_' + key.upper().replace('-', '_')
self._meta[key] = value
return self._meta
def _get_raw_post_data(self):
try:
return self._raw_post_data
except AttributeError:
self._raw_post_data = self._req.read()
return self._raw_post_data
def _get_method(self):
return self.META['REQUEST_METHOD'].upper()
GET = property(_get_get, _set_get)
POST = property(_get_post, _set_post)
COOKIES = property(_get_cookies, _set_cookies)
FILES = property(_get_files)
META = property(_get_meta)
REQUEST = property(_get_request)
raw_post_data = property(_get_raw_post_data)
method = property(_get_method)
class ModPythonHandler(BaseHandler):
request_class = ModPythonRequest
def __call__(self, req):
# mod_python fakes the environ, and thus doesn't process SetEnv. This fixes that
os.environ.update(req.subprocess_env)
# now that the environ works we can see the correct settings, so imports
# that use settings now can work
from django.conf import settings
# if we need to set up middleware, now that settings works we can do it now.
if self._request_middleware is None:
self.load_middleware()
set_script_prefix(req.get_options().get('django.root', ''))
signals.request_started.send(sender=self.__class__)
try:
try:
request = self.request_class(req)
except UnicodeDecodeError:
response = http.HttpResponseBadRequest()
else:
response = self.get_response(request)
# Apply response middleware
for middleware_method in self._response_middleware:
response = middleware_method(request, response)
response = self.apply_response_fixes(request, response)
finally:
signals.request_finished.send(sender=self.__class__)
# Convert our custom HttpResponse object back into the mod_python req.
req.content_type = response['Content-Type']
for key, value in response.items():
if key != 'content-type':
req.headers_out[str(key)] = str(value)
for c in response.cookies.values():
req.headers_out.add('Set-Cookie', c.output(header=''))
req.status = response.status_code
try:
for chunk in response:
req.write(chunk)
finally:
response.close()
return 0 # mod_python.apache.OK
def handler(req):
# mod_python hooks into this function.
return ModPythonHandler()(req)
|
{
"content_hash": "2c4f53907b29f922568ed0c1d1a05258",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 130,
"avg_line_length": 39.54761904761905,
"alnum_prop": 0.5794099939795304,
"repo_name": "Shrews/PyGerrit",
"id": "aa3fb23e39c32156258066470299d6bb00d8c2fc",
"size": "8305",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webapp/django/core/handlers/modpython.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "157968"
},
{
"name": "JavaScript",
"bytes": "181665"
},
{
"name": "Python",
"bytes": "3224616"
},
{
"name": "Shell",
"bytes": "6903"
}
],
"symlink_target": ""
}
|
import os
import inspect
import datetime
import sys
import shutil
from knitter.configure import General
from knitter import logger
def get_sub_folder_names(full_path):
return [ name for name in os.listdir(full_path) if os.path.isdir(os.path.join(full_path, name)) ]
def get_value_from_conf(conf_file, key):
if not os.path.exists(conf_file):
return ""
if not os.path.isfile(conf_file):
return ""
try:
with open(conf_file, 'r') as f:
while True:
data = f.readline()
if not data:
break
if len(data.split('=')) < 2:
continue
if data.strip()[0] == "#":
continue
if data.split('=')[0].strip() == key:
return str(data.split('=', 1)[1].strip())
except IOError:
return ""
def version_info():
from knitter import __version__ as knitter_version
from selenium import __version__ as selenium_version
from sys import version as python_version
browser_version = ""
for k, v in General.VersionInfo.items():
browser_version += "%s %s, " % (k, v)
return "Python %s, %sKnitter %s, Selenium %s" % (python_version.split(" ")[0],
browser_version, knitter_version, selenium_version)
def timestamp_date():
return datetime.datetime.now().strftime("%Y-%m-%d")
def timestamp_date_and_time():
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
def timestamp_for_file_name():
return datetime.datetime.now().strftime("%Y-%m-%d_%H_%M_%S")
def exception_error():
error_message = ""
for i in range(len(inspect.trace())):
error_line = """
File: %s - [%s]
Function: %s
Statement: %s
-------------------------------------------------------------------------------------------""" % \
(inspect.trace()[i][1], inspect.trace()[i][2], inspect.trace()[i][3], inspect.trace()[i][4])
error_message = "%s%s" % (error_message, error_line)
error_message = """Error!
%s
%s
======================================== Error Message ====================================%s
======================================== Error Message ======================================================""" % \
(sys.exc_info()[0], sys.exc_info()[1], error_message)
return error_message
def delete_folder(folder_path):
if os.path.exists(folder_path):
shutil.rmtree(folder_path)
def delete_file_or_folder(file_full_path):
if os.path.exists(file_full_path):
if os.path.isdir(file_full_path):
delete_folder(file_full_path)
else:
os.remove(file_full_path)
def copy(src, destination):
try:
if os.path.isdir(src):
shutil.copytree(src, destination)
else:
shutil.copy(src, destination)
except Exception as e:
logger.handle_exception(e)
def create_folder(path):
if not os.path.exists(path):
os.makedirs(path)
if __name__ == "__main__":
version_info()
|
{
"content_hash": "2d8a77ba7cb53ca20d8a627043a2766b",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 118,
"avg_line_length": 26.694915254237287,
"alnum_prop": 0.5146031746031746,
"repo_name": "hww712/knitter",
"id": "c8cbe34361ba83f8fb741c4081c968e07c971355",
"size": "3150",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "knitter/library.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "96001"
}
],
"symlink_target": ""
}
|
from binascii import hexlify, unhexlify
import traceback
import sys
from electrum.util import bfh, bh2u, versiontuple, UserCancelled, UserFacingException
from electrum.bitcoin import TYPE_ADDRESS, TYPE_SCRIPT
from electrum.bip32 import BIP32Node
from electrum import constants
from electrum.i18n import _
from electrum.plugin import Device
from electrum.transaction import deserialize, Transaction
from electrum.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data
# Safe-T mini initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class SafeTKeyStore(Hardware_KeyStore):
hw_type = 'safe_t'
device = 'Safe-T mini'
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise UserFacingException(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class SafeTPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, types
firmware_URL = 'https://safe-t.io'
libraries_URL = 'https://github.com/archos-safe-t/python-safet'
minimum_firmware = (1, 0, 5)
keystore_class = SafeTKeyStore
minimum_library = (0, 1, 0)
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
from . import client
from . import transport
import safetlib.messages
self.client_class = client.SafeTClient
self.types = safetlib.messages
self.DEVICE_IDS = ('Safe-T mini',)
self.transport_handler = transport.SafeTTransport()
self.device_manager().register_enumerate_func(self.enumerate)
def get_library_version(self):
import safetlib
try:
return safetlib.__version__
except AttributeError:
return 'unknown'
def enumerate(self):
devices = self.transport_handler.enumerate_devices()
return [Device(path=d.get_path(),
interface_number=-1,
id_=d.get_path(),
product_key='Safe-T mini',
usage_page=0,
transport_ui_string=d.get_path())
for d in devices]
def create_client(self, device, handler):
try:
self.logger.info(f"connecting to device at {device.path}")
transport = self.transport_handler.get_transport(device.path)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
if not transport:
self.logger.info(f"cannot connect at {device.path}")
return
self.logger.info(f"connected to device at {device.path}")
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.logger.info(f"ping failed {e}")
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.logger.info(msg)
if handler:
handler.show_error(msg)
else:
raise UserFacingException(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Fujicoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_safe_t_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(str(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
if method == TIM_RECOVER:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"),
blocking=True)
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
u2f_counter = 0
skip_backup = False
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language,
u2f_counter, skip_backup)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = self.types.HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise UserFacingException(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_safet_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.InputScriptType.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.InputScriptType.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.InputScriptType.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.InputScriptType.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_safet_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.OutputScriptType.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.OutputScriptType.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.OutputScriptType.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.OutputScriptType.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs,
lock_time=tx.locktime, version=tx.version)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 0):
keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
xpubs = wallet.get_master_public_keys()
if len(xpubs) == 1:
script_type = self.get_safet_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
else:
def f(xpub):
return self._make_node_path(xpub, [change, index])
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
pubkeys = list(map(f, sorted_xpubs))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * wallet.n,
m=wallet.m,
)
script_type = self.get_safet_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
txinputtype.script_type = self.get_safet_input_script_type(txin['type'])
else:
def f(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
return self._make_node_path(xpub, s)
pubkeys = list(map(f, x_pubkeys))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=list(map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures'))),
m=txin.get('num_sig'),
)
script_type = self.get_safet_input_script_type(txin['type'])
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx):
def create_output_by_derivation():
script_type = self.get_safet_output_script_type(info.script_type)
if len(xpubs) == 1:
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
address_n = self.client_class.expand_path("/%d/%d" % index)
pubkeys = [self._make_node_path(xpub, address_n) for xpub in xpubs]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(o)
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for o in tx.outputs():
_type, address, amount = o.type, o.address, o.value
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info.address_index, info.sorted_xpubs, info.num_sig
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t._extend_inputs(inputs)
for vout in d['outputs']:
o = t._add_bin_outputs()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
{
"content_hash": "9d23e0e5357eb8826c9c16adf1d48d89",
"timestamp": "",
"source": "github",
"line_count": 489,
"max_line_length": 127,
"avg_line_length": 43.513292433537835,
"alnum_prop": 0.5719052542532193,
"repo_name": "fujicoin/electrum-fjc",
"id": "c71523eb0bbc2a75b39eb669f91e77f361888fc5",
"size": "21278",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "electrum/plugins/safe_t/safe_t.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "7756"
},
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "Java",
"bytes": "2929"
},
{
"name": "Makefile",
"bytes": "877"
},
{
"name": "NSIS",
"bytes": "7450"
},
{
"name": "Python",
"bytes": "2346736"
},
{
"name": "Shell",
"bytes": "30493"
}
],
"symlink_target": ""
}
|
USE_PYTHON3 = True
def _RunBindingsTests(input_api, output_api):
pardir = input_api.os_path.pardir
cmd_name = 'run_bindings_tests.py'
run_bindings_tests_path = input_api.os_path.join(
input_api.PresubmitLocalPath(), *([pardir] * 4 + ['tools', cmd_name]))
cmd = [input_api.python3_executable, run_bindings_tests_path]
if input_api.verbose:
print('Running ' + cmd_name)
test_cmd = input_api.Command(
name=cmd_name, cmd=cmd, kwargs={}, message=output_api.PresubmitError)
return input_api.RunTests([test_cmd])
def CheckChangeOnUpload(input_api, output_api):
return _RunBindingsTests(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return _RunBindingsTests(input_api, output_api)
|
{
"content_hash": "0e082bf8afc6397dd0d4c24a4cbee3dd",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 78,
"avg_line_length": 34.72727272727273,
"alnum_prop": 0.6910994764397905,
"repo_name": "nwjs/chromium.src",
"id": "7b2498bc0add74d3a9d95408444b587283741dc6",
"size": "927",
"binary": false,
"copies": "1",
"ref": "refs/heads/nw70",
"path": "third_party/blink/renderer/build/scripts/blinkbuild/PRESUBMIT.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
__all__ = [
'map_readers', 'buffered', 'compose', 'chain', 'shuffle',
'ComposeNotAligned', 'firstn', 'xmap_readers', 'PipeReader'
]
from threading import Thread
import subprocess
from Queue import Queue
import itertools
import random
import zlib
def map_readers(func, *readers):
"""
Creates a data reader that outputs return value of function using
output of each data readers as arguments.
:param func: function to use. The type of func should be (Sample) => Sample
:type: callable
:param readers: readers whose outputs will be used as arguments of func.
:return: the created data reader.
:rtype: callable
"""
def reader():
rs = []
for r in readers:
rs.append(r())
for e in itertools.imap(func, *rs):
yield e
return reader
def shuffle(reader, buf_size):
"""
Creates a data reader whose data output is shuffled.
Output from the iterator that created by original reader will be
buffered into shuffle buffer, and then shuffled. The size of shuffle buffer
is determined by argument buf_size.
:param reader: the original reader whose output will be shuffled.
:type reader: callable
:param buf_size: shuffle buffer size.
:type buf_size: int
:return: the new reader whose output is shuffled.
:rtype: callable
"""
def data_reader():
buf = []
for e in reader():
buf.append(e)
if len(buf) >= buf_size:
random.shuffle(buf)
for b in buf:
yield b
buf = []
if len(buf) > 0:
random.shuffle(buf)
for b in buf:
yield b
return data_reader
def chain(*readers):
"""
Creates a data reader whose output is the outputs of input data
readers chained together.
If input readers output following data entries:
[0, 0, 0]
[1, 1, 1]
[2, 2, 2]
The chained reader will output:
[0, 0, 0, 1, 1, 1, 2, 2, 2]
:param readers: input readers.
:return: the new data reader.
:rtype: callable
"""
def reader():
rs = []
for r in readers:
rs.append(r())
for e in itertools.chain(*rs):
yield e
return reader
class ComposeNotAligned(ValueError):
pass
def compose(*readers, **kwargs):
"""
Creates a data reader whose output is the combination of input readers.
If input readers output following data entries:
(1, 2) 3 (4, 5)
The composed reader will output:
(1, 2, 3, 4, 5)
:param readers: readers that will be composed together.
:param check_alignment: if True, will check if input readers are aligned
correctly. If False, will not check alignment and trailing outputs
will be discarded. Defaults to True.
:type check_alignment: bool
:return: the new data reader.
:raises ComposeNotAligned: outputs of readers are not aligned.
Will not raise when check_alignment is set to False.
"""
check_alignment = kwargs.pop('check_alignment', True)
def make_tuple(x):
if isinstance(x, tuple):
return x
else:
return (x, )
def reader():
rs = []
for r in readers:
rs.append(r())
if not check_alignment:
for outputs in itertools.izip(*rs):
yield sum(map(make_tuple, outputs), ())
else:
for outputs in itertools.izip_longest(*rs):
for o in outputs:
if o is None:
# None will be not be present if compose is aligned
raise ComposeNotAligned(
"outputs of readers are not aligned.")
yield sum(map(make_tuple, outputs), ())
return reader
def buffered(reader, size):
"""
Creates a buffered data reader.
The buffered data reader will read and save data entries into a
buffer. Reading from the buffered data reader will proceed as long
as the buffer is not empty.
:param reader: the data reader to read from.
:type reader: callable
:param size: max buffer size.
:type size: int
:returns: the buffered data reader.
"""
class EndSignal():
pass
end = EndSignal()
def read_worker(r, q):
for d in r:
q.put(d)
q.put(end)
def data_reader():
r = reader()
q = Queue(maxsize=size)
t = Thread(
target=read_worker, args=(
r,
q, ))
t.daemon = True
t.start()
e = q.get()
while e != end:
yield e
e = q.get()
return data_reader
def firstn(reader, n):
"""
Limit the max number of samples that reader could return.
:param reader: the data reader to read from.
:type reader: callable
:param n: the max number of samples that return.
:type n: int
:return: the decorated reader.
:rtype: callable
"""
# TODO(yuyang18): Check if just drop the reader, could clean the opened
# resource or not?
def firstn_reader():
for i, item in enumerate(reader()):
if i == n:
break
yield item
return firstn_reader
class XmapEndSignal():
pass
def xmap_readers(mapper, reader, process_num, buffer_size, order=False):
"""
Use multiprocess to map samples from reader by a mapper defined by user.
And this function contains a buffered decorator.
:param mapper: a function to map sample.
:type mapper: callable
:param reader: the data reader to read from
:type reader: callable
:param process_num: process number to handle original sample
:type process_num: int
:param buffer_size: max buffer size
:type buffer_size: int
:param order: keep the order of reader
:type order: bool
:return: the decarated reader
:rtype: callable
"""
end = XmapEndSignal()
# define a worker to read samples from reader to in_queue
def read_worker(reader, in_queue):
for i in reader():
in_queue.put(i)
in_queue.put(end)
# define a worker to read samples from reader to in_queue with order flag
def order_read_worker(reader, in_queue):
in_order = 0
for i in reader():
in_queue.put((in_order, i))
in_order += 1
in_queue.put(end)
# define a worker to handle samples from in_queue by mapper
# and put mapped samples into out_queue
def handle_worker(in_queue, out_queue, mapper):
sample = in_queue.get()
while not isinstance(sample, XmapEndSignal):
r = mapper(sample)
out_queue.put(r)
sample = in_queue.get()
in_queue.put(end)
out_queue.put(end)
# define a worker to handle samples from in_queue by mapper
# and put mapped samples into out_queue by order
def order_handle_worker(in_queue, out_queue, mapper, out_order):
ins = in_queue.get()
while not isinstance(ins, XmapEndSignal):
order, sample = ins
r = mapper(sample)
while order != out_order[0]:
pass
out_queue.put(r)
out_order[0] += 1
ins = in_queue.get()
in_queue.put(end)
out_queue.put(end)
def xreader():
in_queue = Queue(buffer_size)
out_queue = Queue(buffer_size)
out_order = [0]
# start a read worker in a thread
target = order_read_worker if order else read_worker
t = Thread(target=target, args=(reader, in_queue))
t.daemon = True
t.start()
# start several handle_workers
target = order_handle_worker if order else handle_worker
args = (in_queue, out_queue, mapper, out_order) if order else (
in_queue, out_queue, mapper)
workers = []
for i in xrange(process_num):
worker = Thread(target=target, args=args)
worker.daemon = True
workers.append(worker)
for w in workers:
w.start()
sample = out_queue.get()
while not isinstance(sample, XmapEndSignal):
yield sample
sample = out_queue.get()
finish = 1
while finish < process_num:
sample = out_queue.get()
if isinstance(sample, XmapEndSignal):
finish += 1
else:
yield sample
return xreader
def _buf2lines(buf, line_break="\n"):
# FIXME: line_break should be automatically configured.
lines = buf.split(line_break)
return lines[:-1], lines[-1]
class PipeReader:
"""
PipeReader read data by stream from a command, take it's
stdout into a pipe buffer and redirect it to the parser to
parse, then yield data as your desired format.
You can using standard linux command or call another program
to read data, from HDFS, Ceph, URL, AWS S3 etc:
.. code-block:: python
cmd = "hadoop fs -cat /path/to/some/file"
cmd = "cat sample_file.tar.gz"
cmd = "curl http://someurl"
cmd = "python print_s3_bucket.py"
An example:
.. code-block:: python
def example_reader():
for f in myfiles:
pr = PipeReader("cat %s"%f)
for l in pr.get_line():
sample = l.split(" ")
yield sample
"""
def __init__(self, command, bufsize=8192, file_type="plain"):
if not isinstance(command, str):
raise TypeError("left_cmd must be a string")
if file_type == "gzip":
self.dec = zlib.decompressobj(
32 + zlib.MAX_WBITS) # offset 32 to skip the header
self.file_type = file_type
self.bufsize = bufsize
self.process = subprocess.Popen(
command.split(" "), bufsize=bufsize, stdout=subprocess.PIPE)
def get_line(self, cut_lines=True, line_break="\n"):
"""
:param cut_lines: cut buffer to lines
:type cut_lines: bool
:param line_break: line break of the file, like \n or \r
:type line_break: string
:return: one line or a buffer of bytes
:rtype: string
"""
remained = ""
while True:
buff = self.process.stdout.read(self.bufsize)
if buff:
if self.file_type == "gzip":
decomp_buff = self.dec.decompress(buff)
elif self.file_type == "plain":
decomp_buff = buff
else:
raise TypeError("file_type %s is not allowed" %
self.file_type)
if cut_lines:
lines, remained = _buf2lines(''.join(
[remained, decomp_buff]), line_break)
for line in lines:
yield line
else:
yield decomp_buff
else:
break
|
{
"content_hash": "7ff605c5a8671bd49fdae3e53b0d2c0b",
"timestamp": "",
"source": "github",
"line_count": 391,
"max_line_length": 79,
"avg_line_length": 28.67774936061381,
"alnum_prop": 0.5624721305627397,
"repo_name": "pkuyym/Paddle",
"id": "44a6e344630bb35d28ee29078bf8727053a24bef",
"size": "11823",
"binary": false,
"copies": "11",
"ref": "refs/heads/develop",
"path": "python/paddle/reader/decorator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "272910"
},
{
"name": "C++",
"bytes": "7511205"
},
{
"name": "CMake",
"bytes": "270494"
},
{
"name": "Cuda",
"bytes": "1074033"
},
{
"name": "Go",
"bytes": "109501"
},
{
"name": "Perl",
"bytes": "11456"
},
{
"name": "Python",
"bytes": "3565922"
},
{
"name": "Shell",
"bytes": "172893"
}
],
"symlink_target": ""
}
|
"""
chart.py
chart.py Module Docstring
"""
__all__ = ["ChartTypes", "Chart"]
__version__ = 0.1
__author__ = "Fin"
# Stdlib Imports
import string
import os
import tempfile
import webbrowser
import copy
# Third Party Imports
# pyhighcharts Imports
from .util import safe_update, format_script_tag, \
format_options, JS_TYPE
SHOW_TEMPLATE = """<html>
<head>
<meta charset="UTF-8">
<script src="https://ajax.googleapis.com/ajax/libs/jquery/2.1.4/jquery.min.js"></script>
<script src="http://code.highcharts.com/highcharts.js"></script>
<script src="http://code.highcharts.com/highcharts-more.js"></script>
<script src="http://code.highcharts.com/highcharts-3d.js"></script>
<script src="http://code.highcharts.com/modules/heatmap.js"></script>
<script src="http://code.highcharts.com/modules/treemap.js"></script>
<script src="http://code.highcharts.com/modules/funnel.js"></script>
<script src="https://code.highcharts.com/modules/drilldown.js"></script>
</head>
<body>
<div id="$container" style="height: 100%; width: 100%;"></div>
<script type='text/javascript'>
$chart
</script>
<body>
"""
SHOW_TEMPLATE = string.Template(SHOW_TEMPLATE)
class ChartTypes:
area = "area"
arearange = "arearange"
areaspline = "areaspline"
areasplinerange = "areasplinerange"
bar = "bar"
boxplot = "boxplot"
bubble = "bubble"
column = "column"
columnrange = "columnrange"
errorbar = "errorbar"
funnel = "funnel"
gauge = "gauge"
heatmap = "heatmap"
line = "line"
pie = "pie"
polygon = "polygon"
pyramid = "pyramid"
scatter = "scatter"
series = "series"
solidgauge = "solidgauge"
spline = "spline"
treemap = "treemap"
waterfall = "waterfall"
class Chart(object):
"""
A Chart object is a container for one or more data series, the chart itself
will contain the setting and configuration for its children.
"""
GLOBAL_OPTIONS = {}
options = {}
series = []
def __init__(self, *series, **options):
self.options = options
self.series = list(series)
self.container = "container"
# Update this charts global options into its settings
safe_update(self.options, self.GLOBAL_OPTIONS)
def new(self):
options = copy.deepcopy(self.options)
series = copy.deepcopy(self.series)
return Chart(*series, **options)
def set_container(self, container):
self.container = container
def add_data_series(self, chart_type, data_points, **options):
"""
add_data_series will add the provided data series to the chart object.
Arguments:
:param ChartTypes chart_type: The type of series we're adding
:param list data_points: A list of data points, can be a list of
values or a list of [x, y] values or [x, category] values.
:param str name: The name of the data series
:returns: None
:raises: NotImplementedError
"""
# Make sure we have the series option in the options dictionary
if "series" not in self.options:
self.options["series"] = self.series
# Grab the series name
new_series = {
"data": data_points,
"type": chart_type
}
# Logic to check if we're working as a timeseries
if ('pointStart' in options) or ('pointInterval' in options):
self.set_timeseries()
new_series.update(options)
self.series.append(new_series)
def set_options(self, **options):
"""
set_options will take the provided keyword arguments and update them
into the internal settings dictionary of the chart.
"""
safe_update(self.options, options)
def show(self):
"""
Show will open up a browser window and display the chart in browser.
Use Chart.cleanup_temporary_files to remove these files, as they will
not automatically clean up after themselves
"""
temp_file = tempfile.NamedTemporaryFile(delete=False)
chart_content = self.script()
file_content = SHOW_TEMPLATE.safe_substitute(container=self.container,
chart=chart_content)
temp_file.write(file_content)
temp_file.flush()
handler = webbrowser.get()
handler.open("file://" + temp_file.name)
@staticmethod
def cleanup_temporary_files(self):
"""
cleanup_temporary_files will remove all the files created by the show
method.
"""
temp_dir = tempfile.gettempdir()
file_list = os.listdir(temp_dir)
file_names = filter(lambda s: s.startswith(tempfile.gettempprefix()),
file_list)
map(lambda fn: os.remove(os.path.join(temp_dir, fn)), file_names)
def script(self):
return format_script_tag(container=self.container,
options=format_options(self.options))
# Helper Functions Just To Make Life Easier
def set_title(self, text):
"""
Sets the chart title to: text
"""
safe_update(self.options, {"title": {"text": text}})
def set_subtitle(self, text):
"""
Sets the chart subtitle to: text
"""
safe_update(self.options, {"subtitle": {"text": text}})
def set_zoomable(self, axis="x"):
"""
Sets the chart to be zoom-able and allows selection as to which axis it
will zoom on. Available options, x, y, xy
"""
safe_update(self.options, {"chart": {"zoomType": axis}})
def set_timeseries(self):
safe_update(self.options, {"xAxis": {"type": "datetime"}})
def invert_axis(self):
options = {
"chart": {
"inverted": True
},
"xAxis": {
"reversed": False
}
}
safe_update(self.options, options)
def set_colours(self, colours):
"""
Sets the colour pallet of the data series to the provided list:
Example: ['#000', '#888', '#FFF']
Arguments:
:param list colours: A list of colours
"""
safe_update(self.options, {"colors": colours})
set_colors = set_colours # For Americans
def set_credits(self, enabled=True):
"""
Sets the credits, to url or change if it is enabled or not:
Arguments:
:param bool enabled: If the credits are to be enabled or not
"""
safe_update(self.options, {"credits": {"enabled": enabled}})
def set_exporting(self, enabled=True):
"""
Sets the exporting to enabled.
Arguments:
:param bool enabled: If exporting is enabled or not
"""
safe_update(self.options, {"exporting": {"enabled": enabled}})
def set_legend(self, enabled=True):
"""
Sets the legend's visibility to enabled.
Arguments:
:param bool enabled: If the legend is to be visible or not.
"""
safe_update(self.options, {"legend": {"enabled": enabled}})
def set_yaxis_limits(self, ymin=None, ymax=None):
to_update = {"yAxis": {}}
if ymin is not None:
to_update["yAxis"]["min"] = ymin
if ymax is not None:
to_update["yAxis"]["max"] = ymax
safe_update(self.options, to_update)
def set_yaxis_title(self, title):
safe_update(self.options, {"yAxis": {"title": {"text": title}}})
def set_xaxis_title(self, title):
safe_update(self.options, {"xAxis": {"title": {"text": title}}})
def set_categories(self, categories, axis="x"):
to_update = {"%sAxis" % axis: {"categories": categories}}
safe_update(self.options, to_update)
def set_tooltip(self, **options):
safe_update(self.options, {"tooltip": options})
def make_3d(self, **options):
"""
Makes a chart 3d
"""
new_options = {
"enabled": True,
"alpha": 15,
"beta": 15,
"depth": 50,
"viewDistance": 25
}
new_options.update(options)
safe_update(self.options, {"chart": {"options3d": new_options}})
def set_colour_axis(self, **options):
defaults = {
"min": 0,
"minColor": "#FFFFFF",
"maxColor": JS_TYPE("Highcharts.getOptions().colors[0]")
}
defaults.update(options)
safe_update(self.options, {"colorAxis": defaults})
# Alias
set_color_axis = set_colour_axis
|
{
"content_hash": "bb36a8804720c4763c6030730c5ddf38",
"timestamp": "",
"source": "github",
"line_count": 286,
"max_line_length": 88,
"avg_line_length": 30.863636363636363,
"alnum_prop": 0.5739209244363883,
"repo_name": "BennettRand/Solar-Circuit",
"id": "b52b3cff5e531d9abe9cd3cae727c83eb6f51658",
"size": "8873",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "solar_circuit/libs/pyhighcharts/chart.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "178665"
}
],
"symlink_target": ""
}
|
'''
Quality Metrics:
- Top 3 colors across active scenes
- These probably need to be binned
- Avg_sat across active scenes
- Avg_val across active scenes
Longest scene:
- All metadata
Most active scene:
- All metadata
Scenes:
- Number of total scenes
- Number of static scenes
- Number of action scenes
- Average duration of scenes
Action:
- Action/(static + action) percentage
Shake:
- Worst shake coeff for static scene
- Duration of worst shake coeff
- Avg shake coeff (weighted by time)
- Avg shake coeff (unweighted by time)
- biggest range of middle 50% (flow_percentile_75 - flow_percentile_25)
- duration of biggest middle 50%
- Average flow angle standard dev
- Worst flow angle standard dev
- Duration of flow angle standard dev
Blur:
- Avg blur
- Max blur
- Duration of max blur
- Blur percentage
Metadata:
All metadata
'''
import pandas as pd
import numpy as np
import re
from collections import defaultdict, Counter
class ModelFeatures(object):
'''
'''
def __init__(self):
pass
@staticmethod
def try_split(x):
'''
Helper for get_top_colors
'''
try:
return float(x.split('_')[-1])
except:
return ''
@staticmethod
def get_top_colors(raw_df):
'''
Gah. A big boy function.
'''
## Convert colors strings to floats
df = raw_df.copy()
df.reset_index(inplace=True)
col_names = df.columns
for col in col_names:
matches = re.search('top_color_', col)
if matches:
#df[col] = df[col].apply(lambda x: try: float(x.split('_')[-1]) except: '')
df[col] = df[col].apply(ModelFeatures.try_split)
## Find column names with colors
color_columns = []
col_names = df.columns
for col in col_names:
matches = re.search('top_color_', col)
if matches:
color_columns.append(col)
sorted(color_columns)
top_color_cntr = Counter()
points = 10
for col in color_columns:
row = 0
for color_str in df[col]:
#import ipdb; ipdb.set_trace()
top_color_cntr[color_str] += int(points) * int(df['duration'][row])
row += 1
points = points - 1
top_colors = []
for k,v in top_color_cntr.most_common(10):
top_colors.append(k)
return top_colors
@staticmethod
def _weighted_avg(values, weights):
'''
'''
return 1. * np.sum((values * weights)) / weights.sum()
@staticmethod
def find_condition():
pass
@staticmethod
def create_features_for_video(metadata_df, scene_df):
'''
'''
feature_df = metadata_df.copy()
feature_df.reset_index(inplace=True)
action_df = scene_df[scene_df['is_static_scene'] == 0].copy()
static_df = scene_df[scene_df['is_static_scene'] == 1].copy()
## These probably need to be binned better
top_colors = ModelFeatures.get_top_colors(action_df)
for i, color in enumerate(top_colors):
feature_df['color_' + str(i)] = color
'''
Editing analysis
'''
# Leads with static scene
feature_df['static_begin'] = scene_df['is_static_scene'].values[0]
# Duration
if feature_df['static_begin'].any() == 1:
feature_df['static_begin_duration'] = scene_df['duration'].values[0]
else:
feature_df['static_begin_duration'] = 0
# Ends with static scene
feature_df['static_end'] = scene_df['is_static_scene'].values[-1]
# Duration
if feature_df['static_end'].any() == 1:
feature_df['static_begin_duration'] = scene_df['duration'].values[-1]
else:
feature_df['static_begin_duration'] = 0
'''
Image quality metrics
Only for action scenes
'''
# Avg_sat across active scenes
feature_df['avg_sat'] = ModelFeatures._weighted_avg(action_df['avg_sat'], action_df['duration'])
# Average saturation level of most saturated scene
feature_df['most_saturated_scene_sat'] = action_df['avg_sat'].max()
# Average saturation level of least saturated scene
feature_df['least_saturated_scene_sat'] = action_df['avg_sat'].min()
# Avg_val across active scenes
feature_df['avg_val'] = ModelFeatures._weighted_avg(action_df['avg_sat'], action_df['duration'])
# Average value level of highest V (HSV)scene
feature_df['most_saturated_scene_sat'] = action_df['avg_val'].max()
## Duration??
# Average value level of lowest V (HSV) scene
feature_df['least_saturated_scene_sat'] = action_df['avg_val'].min()
## Duration??
'''
Scenes
'''
# Number of total scenes
feature_df['num_scenes'] = scene_df.shape[0]
# Number of static scenes
feature_df['num_static_scenes'] = static_df.shape[0]
# Number of action scenes
feature_df['num_action_scenes'] = action_df.shape[0]
# Average duration of scenes
feature_df['avg_scene_duration'] = feature_df['duration'] / feature_df['num_scenes']
# Action scene percentage
feature_df['action_scene_pct'] = feature_df['num_action_scenes'] / feature_df['num_scenes']
# Action duration percentage
feature_df['action_time_pct'] = action_df['duration'].sum() / feature_df['duration']
'''
Shake
'''
# Worst shake coeff for static scene
feature_df['max_shake'] = action_df['shake_coeff'].max()
# Duration of worst shake coeff
max_shake = feature_df['max_shake'].values[0]
#print max_shake
#print action_df
#print action_df[action_df['shake_coeff'] == max_shake]
feature_df['max_shake_duration'] = action_df[action_df['shake_coeff'] == max_shake]['duration'].values[0]
# Avg shake coeff (weighted by time)
feature_df['avg_shake_weighted'] = ModelFeatures._weighted_avg(action_df['shake_coeff'], action_df['duration'])
# Avg shake coeff (unweighted by time)
feature_df['avg_shake_unweighted'] = action_df['shake_coeff'].mean()
# biggest range of middle 50% (flow_percentile_75 - flow_percentile_25)
feature_df['max_middle_50%_spread'] = np.max(action_df['flow_percentile_75'] - action_df['flow_percentile_25'])
# duration of biggest middle 50%
# feature_df['max_middle_50%_spread_duration'] = None
# Average flow angle standard dev weighted
feature_df['avg_flow_angle_std_weighted'] = ModelFeatures._weighted_avg(action_df['flow_angle_std_dev'], action_df['duration'])
# Average flow angle standard dev unweighted
feature_df['avg_flow_angle_std_unweighted'] = action_df['flow_angle_std_dev'].mean()
# largest flow angle standard dev
feature_df['largest_flow_angle_std_dev'] = action_df['flow_angle_std_dev'].max()
# Duration of flow angle standard dev
max_std_dev = feature_df['largest_flow_angle_std_dev'].values[0]
#print action_df[action_df['flow_angle_std_dev'] == max_std_dev]
feature_df['largest_flow_angle_std_dev_duration'] = action_df[action_df['flow_angle_std_dev'] == max_std_dev]['duration'].values[0]
# smallest flow angle standard dev
feature_df['smallest_flow_angle_std_dev'] = action_df['flow_angle_std_dev'].min()
'''
Movement
'''
# Number of flow points per action scene
feature_df['avg_flow_pts_unweighted'] = action_df['avg_flow_pts_per_frame'].mean()
feature_df['avg_flow_pts_weighted'] = ModelFeatures._weighted_avg(action_df['avg_flow_pts_per_frame'], action_df['duration'])
'''
Blur/Sharpness
'''
# Avg blur
feature_df['avg_blur'] = ModelFeatures._weighted_avg(action_df['blur'], action_df['duration'])
# Percentage of blurry frames
feature_df['percentage_blurry_frames'] = ModelFeatures._weighted_avg(action_df['blur_pct'], action_df['duration'])
'''
Longest scene:
'''
# Concatentate all metadata
'''
Most active scene:
'''
# Concatentate all metadata
return feature_df
@staticmethod
def get_feature_df(metadata_df):
path = "/Users/fiannacci/data_science_class/project_exploration/machine_learning/"
feature_df = pd.DataFrame()
for v_id in metadata_df['id']:
try:
## Note: get rid of relative paths!
print v_id
video_scene_df = pd.read_pickle(path + 'model/data/' + v_id + '.analysis.pkl')
video_meta_df = metadata_df[metadata_df['id'] == v_id]
video_feature_df = ModelFeatures.create_features_for_video(video_meta_df, video_scene_df)
feature_df = feature_df.append(video_feature_df, ignore_index=True)
except:
print "-I- " + v_id + " had a problem"
return feature_df
|
{
"content_hash": "7082fea86e2d1b73b3203722f60d07b3",
"timestamp": "",
"source": "github",
"line_count": 255,
"max_line_length": 139,
"avg_line_length": 36.015686274509804,
"alnum_prop": 0.5918989547038328,
"repo_name": "pdxcycling/carv.io",
"id": "0769df189a05586a2bcbc5a998da0057f0f098f6",
"size": "9184",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "machine_learning/code/video_features.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "79613"
}
],
"symlink_target": ""
}
|
from django.template import Library, Node, TemplateSyntaxError
register = Library()
class SetVariable(Node):
def __init__(self, varname, nodelist):
self.varname = varname
self.nodelist = nodelist
def render(self,context):
context[self.varname] = self.nodelist.render(context)
return ''
@register.tag(name = 'setvar')
def setvar(parser, token):
"""
Set value to content of a rendered block.
{% setvar var_name %}
....
{% endsetvar
"""
try:
# split_contents() knows not to split quoted strings.
tag_name, varname = token.split_contents()
except ValueError:
raise TemplateSyntaxError, "%r tag requires a single argument for variable name" % token.contents.split()[0]
nodelist = parser.parse(('endsetvar',))
parser.delete_first_token()
return SetVariable(varname, nodelist)
|
{
"content_hash": "ec38ed2c801829ef13b72d6ce208ed18",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 116,
"avg_line_length": 30.466666666666665,
"alnum_prop": 0.6312910284463895,
"repo_name": "Kami/munin_exchange",
"id": "8097d7d6213e6b397169192f5c832961b64bfbab",
"size": "914",
"binary": false,
"copies": "1",
"ref": "refs/heads/public",
"path": "munin_exchange/apps/core/templatetags/setvar.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "121082"
}
],
"symlink_target": ""
}
|
import serial
import time
import openanything
import xml.etree.ElementTree as ETree
useragent = 'pyFrenchToastArduino/1.0'
xml_source = 'http://www.universalhub.com/toast.xml'
server_response = { 'etag' : None, 'lastmodified' : None, 'data': None }
def processXML(xml):
tree = ETree.fromstring(xml)
status = tree.find('status')
if status != None:
#print status.text
# return first character, uppercase
return status.text[:1].upper()
# default case: return error code
return 'X'
def retrieveXML():
global xml_source
global server_response
# Check for updates to XML. If HTTP 200 is recieved, process the response
latest_server_response = openanything.fetch(xml_source, server_response['etag'],
server_response['lastmodified'], useragent)
#print(latest_server_response)
# Handle (most common) case, where the XML is unchanged
if latest_server_response['status'] == 304:
# We continure to return the last valid data, since nothing has changed
#print('XML Unchanged')
return server_response['data']
# Handle normal response
if latest_server_response['status'] in {200, 302}:
# Store this as a the current, valid, updated response
server_response = latest_server_response
#print(server_response['data'])
return server_response['data']
# if this is a permenant redirect, also update the URI for future calls
if latest_server_response['status'] == 301:
xml_source = params['url']
server_response = latest_server_response
return server_response['data']
# If we got some sort of other response
return None
# Init Serial Port
ser = serial.Serial('COM7', 9600, timeout=15)
# Loop forever
while 1:
try:
current_xml = retrieveXML()
response_code = processXML(current_xml)
print "Sending to Arduino: " + response_code
# Write to arduino on serial port
ser.write(response_code)
# Wait for response (max of serial timeout value), and print any reply we get from the arduino
serial_reply = ser.readline()
print "Recieved from Arduino: " + serial_reply.decode('ascii')
# Sleep for 15 minutes (then start over)
time.sleep(60 * 5)
except Exception as e:
print "Exception: " + e
time.sleep(60)
|
{
"content_hash": "9a933ff50162582b43db31ac71195452",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 98,
"avg_line_length": 28.73076923076923,
"alnum_prop": 0.6983489513609995,
"repo_name": "IvanGirderboot/frenchtoastino",
"id": "ee264769851218674673d1f9eb9b06477c30e986",
"size": "2241",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyFrenchToastino.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "1741"
},
{
"name": "Python",
"bytes": "6240"
}
],
"symlink_target": ""
}
|
"""Build a project using PEP 517 hooks.
"""
import argparse
import logging
import os
import shutil
import tempfile
from ._compat import tomllib
from .envbuild import BuildEnvironment
from .wrappers import Pep517HookCaller
log = logging.getLogger(__name__)
def validate_system(system):
"""
Ensure build system has the requisite fields.
"""
required = {'requires', 'build-backend'}
if not (required <= set(system)):
message = "Missing required fields: {missing}".format(
missing=required-set(system),
)
raise ValueError(message)
def load_system(source_dir):
"""
Load the build system from a source dir (pyproject.toml).
"""
pyproject = os.path.join(source_dir, 'pyproject.toml')
with open(pyproject, 'rb') as f:
pyproject_data = tomllib.load(f)
return pyproject_data['build-system']
def compat_system(source_dir):
"""
Given a source dir, attempt to get a build system backend
and requirements from pyproject.toml. Fallback to
setuptools but only if the file was not found or a build
system was not indicated.
"""
try:
system = load_system(source_dir)
except (FileNotFoundError, KeyError):
system = {}
system.setdefault(
'build-backend',
'setuptools.build_meta:__legacy__',
)
system.setdefault('requires', ['setuptools', 'wheel'])
return system
def _do_build(hooks, env, dist, dest):
get_requires_name = 'get_requires_for_build_{dist}'.format(**locals())
get_requires = getattr(hooks, get_requires_name)
reqs = get_requires({})
log.info('Got build requires: %s', reqs)
env.pip_install(reqs)
log.info('Installed dynamic build dependencies')
with tempfile.TemporaryDirectory() as td:
log.info('Trying to build %s in %s', dist, td)
build_name = 'build_{dist}'.format(**locals())
build = getattr(hooks, build_name)
filename = build(td, {})
source = os.path.join(td, filename)
shutil.move(source, os.path.join(dest, os.path.basename(filename)))
def build(source_dir, dist, dest=None, system=None):
system = system or load_system(source_dir)
dest = os.path.join(source_dir, dest or 'dist')
os.makedirs(dest, exist_ok=True)
validate_system(system)
hooks = Pep517HookCaller(
source_dir, system['build-backend'], system.get('backend-path')
)
with BuildEnvironment() as env:
env.pip_install(system['requires'])
_do_build(hooks, env, dist, dest)
parser = argparse.ArgumentParser()
parser.add_argument(
'source_dir',
help="A directory containing pyproject.toml",
)
parser.add_argument(
'--binary', '-b',
action='store_true',
default=False,
)
parser.add_argument(
'--source', '-s',
action='store_true',
default=False,
)
parser.add_argument(
'--out-dir', '-o',
help="Destination in which to save the builds relative to source dir",
)
def main(args):
log.warning('pep517.build is deprecated. '
'Consider switching to https://pypi.org/project/build/')
# determine which dists to build
dists = list(filter(None, (
'sdist' if args.source or not args.binary else None,
'wheel' if args.binary or not args.source else None,
)))
for dist in dists:
build(args.source_dir, dist, args.out_dir)
if __name__ == '__main__':
main(parser.parse_args())
|
{
"content_hash": "3a0ff357f154eb8dfac6e941643a58f6",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 75,
"avg_line_length": 27.325396825396826,
"alnum_prop": 0.6430438571013651,
"repo_name": "pradyunsg/pip",
"id": "b30909c8704a5954ef5250ef890ed4cb1d50cf07",
"size": "3443",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "src/pip/_vendor/pep517/build.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3137"
},
{
"name": "PowerShell",
"bytes": "2137"
},
{
"name": "Python",
"bytes": "7137519"
}
],
"symlink_target": ""
}
|
import time
import warnings
try:
import requests
REQUESTS_AVAILABLE = True
except ImportError:
REQUESTS_AVAILABLE = False
from .base import Connection
from ..exceptions import ConnectionError, ImproperlyConfigured, ConnectionTimeout, SSLError
from ..compat import urlencode, string_types
class RequestsHttpConnection(Connection):
"""
Connection using the `requests` library.
:arg http_auth: optional http auth information as either ':' separated
string or a tuple. Any value will be passed into requests as `auth`.
:arg use_ssl: use ssl for the connection if `True`
:arg verify_certs: whether to verify SSL certificates
:arg ca_certs: optional path to CA bundle. By default standard requests'
bundle will be used.
:arg client_cert: path to the file containing the private key and the
certificate
"""
def __init__(self, host='localhost', port=9200, http_auth=None,
use_ssl=False, verify_certs=False, ca_certs=None, client_cert=None,
**kwargs):
if not REQUESTS_AVAILABLE:
raise ImproperlyConfigured("Please install requests to use RequestsHttpConnection.")
super(RequestsHttpConnection, self).__init__(host= host, port=port, **kwargs)
self.session = requests.session()
if http_auth is not None:
if isinstance(http_auth, (tuple, list)):
http_auth = tuple(http_auth)
elif isinstance(http_auth, string_types):
http_auth = tuple(http_auth.split(':', 1))
self.session.auth = http_auth
self.base_url = 'http%s://%s:%d%s' % (
's' if use_ssl else '',
host, port, self.url_prefix
)
self.session.verify = verify_certs
self.session.cert = client_cert
if ca_certs:
if not verify_certs:
raise ImproperlyConfigured("You cannot pass CA certificates when verify SSL is off.")
self.session.verify = ca_certs
if use_ssl and not verify_certs:
warnings.warn(
'Connecting to %s using SSL with verify_certs=False is insecure.' % self.base_url)
def perform_request(self, method, url, params=None, body=None, timeout=None, ignore=()):
url = self.base_url + url
if params:
url = '%s?%s' % (url, urlencode(params or {}))
start = time.time()
try:
response = self.session.request(method, url, data=body, timeout=timeout or self.timeout)
duration = time.time() - start
raw_data = response.text
except requests.exceptions.SSLError as e:
self.log_request_fail(method, url, body, time.time() - start, exception=e)
raise SSLError('N/A', str(e), e)
except requests.Timeout as e:
self.log_request_fail(method, url, body, time.time() - start, exception=e)
raise ConnectionTimeout('TIMEOUT', str(e), e)
except requests.ConnectionError as e:
self.log_request_fail(method, url, body, time.time() - start, exception=e)
raise ConnectionError('N/A', str(e), e)
# raise errors based on http status codes, let the client handle those if needed
if not (200 <= response.status_code < 300) and response.status_code not in ignore:
self.log_request_fail(method, url, body, duration, response.status_code)
self._raise_error(response.status_code, raw_data)
self.log_request_success(method, url, response.request.path_url, body, response.status_code, raw_data, duration)
return response.status_code, response.headers, raw_data
|
{
"content_hash": "020fdf9b5db7d2c89763eff33af0017c",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 120,
"avg_line_length": 44.6219512195122,
"alnum_prop": 0.6354195135282864,
"repo_name": "prinsherbert/elasticsearch-py",
"id": "6aee2dba026ee7888da38c74463109a6d7584ae2",
"size": "3659",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "elasticsearch/connection/http_requests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "297898"
}
],
"symlink_target": ""
}
|
import unittest
from HelloWorld.challenge import HelloWorldChallenge
class HelloWorldTest(unittest.TestCase):
def setUp(self):
self.challenge = HelloWorldChallenge()
def test__init__(self):
self.assertIsInstance(self.challenge, HelloWorldChallenge)
self.assertIn('WorldHello', self.challenge.sample)
self.assertIn('Hello World', self.challenge.expect)
def test_build(self):
self.challenge.read()
self.challenge.build()
self.assertEqual(5, self.challenge.model.split_at)
self.assertEqual('WorldHello', self.challenge.model.word)
def test_calc(self):
self.challenge.model.split_at = 5
self.challenge.model.word = 'WorldHello'
self.challenge.calc()
self.assertEqual('Hello World', self.challenge.result.word)
self.assertEqual(11, self.challenge.result.length)
def test_format(self):
self.challenge.result.word = 'Hello World'
self.challenge.result.length = 11
self.challenge.format()
self.assertEqual(self.challenge.expectation(), self.challenge.output)
def test_full_integration(self):
self.challenge.main()
self.assertEqual(self.challenge.expectation(), self.challenge.output)
|
{
"content_hash": "5a7b6b11e506a5252ac50c9cd631e77c",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 77,
"avg_line_length": 34.08108108108108,
"alnum_prop": 0.6851704996034893,
"repo_name": "elmar-hinz/Python.Challenges",
"id": "8aa73722ec1520469d1ef91967861c49d3df2a4e",
"size": "1261",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "HelloWorld/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "64"
},
{
"name": "Python",
"bytes": "57476"
}
],
"symlink_target": ""
}
|
"""Long tests for Multinomial."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
class MultinomialTest(test.TestCase):
# check that events with tiny probabilities are not over-sampled
def testLargeDynamicRange(self):
random_seed.set_random_seed(10)
counts_by_indices = {}
with self.test_session():
samples = random_ops.multinomial(
constant_op.constant([[-30, 0]], dtype=dtypes.float32),
num_samples=1000000,
seed=15)
for _ in range(100):
x = self.evaluate(samples)
indices, counts = np.unique(x, return_counts=True)
for index, count in zip(indices, counts):
if index in counts_by_indices.keys():
counts_by_indices[index] += count
else:
counts_by_indices[index] = count
self.assertEqual(counts_by_indices[1], 100000000)
def testLargeDynamicRange2(self):
random_seed.set_random_seed(10)
counts_by_indices = {}
with self.test_session():
samples = random_ops.multinomial(
constant_op.constant([[0, -30]], dtype=dtypes.float32),
num_samples=1000000,
seed=15)
for _ in range(100):
x = self.evaluate(samples)
indices, counts = np.unique(x, return_counts=True)
for index, count in zip(indices, counts):
if index in counts_by_indices.keys():
counts_by_indices[index] += count
else:
counts_by_indices[index] = count
self.assertEqual(counts_by_indices[0], 100000000)
@test_util.run_deprecated_v1
def testLargeDynamicRange3(self):
random_seed.set_random_seed(10)
counts_by_indices = {}
# here the cpu undersamples and won't pass this test either
with self.test_session():
samples = random_ops.multinomial(
constant_op.constant([[0, -17]], dtype=dtypes.float32),
num_samples=1000000,
seed=22)
# we'll run out of memory if we try to draw 1e9 samples directly
# really should fit in 12GB of memory...
for _ in range(100):
x = self.evaluate(samples)
indices, counts = np.unique(x, return_counts=True)
for index, count in zip(indices, counts):
if index in counts_by_indices.keys():
counts_by_indices[index] += count
else:
counts_by_indices[index] = count
self.assertGreater(counts_by_indices[1], 0)
if __name__ == "__main__":
test.main()
|
{
"content_hash": "eaa2ed89af74c2301d347e6d06db1fdb",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 70,
"avg_line_length": 35.1125,
"alnum_prop": 0.6472054111783553,
"repo_name": "annarev/tensorflow",
"id": "2bf15db188544f373c3a41d17ecae50d86aa54db",
"size": "3498",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/kernel_tests/random/multinomial_op_big_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1286"
},
{
"name": "Batchfile",
"bytes": "9258"
},
{
"name": "C",
"bytes": "341894"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "49343974"
},
{
"name": "CMake",
"bytes": "195286"
},
{
"name": "Dockerfile",
"bytes": "36386"
},
{
"name": "Go",
"bytes": "1253646"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "863222"
},
{
"name": "Jupyter Notebook",
"bytes": "2604741"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "52734"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "41289329"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "469612"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
}
|
from optparse import OptionParser
from PIL import Image
import os
import random
from decimal import Decimal
import glob
import ast
import itertools
from collections import namedtuple
INPUTDIR = "data"
OUTPUTDIR = "current"
ScreenProperties = namedtuple("ScreenProperties", "width height voffset id")
# compute total virtual screen size
def compute_screens_resolutions(screens):
totalwidth = sum([s.width for s in screens])
totalheight = max([s.height + abs(s.voffset) for s in screens])
return (totalwidth, totalheight)
# increase size of image so that screen is filled, while keeping original ratio
def compute_resize_resolution(imagewidth, imageheight, screenwidth, screenheight):
resizewidth = imagewidth
resizeheight = imageheight
if (screenwidth > resizewidth):
resizewidth = screenwidth
resizeheight *= screenwidth
resizeheight /= imagewidth
if (screenheight > resizeheight):
resizewidth *= screenheight
resizewidth /= resizeheight
resizeheight = screenheight
return (resizewidth, resizeheight)
# crop center of image so that it matches screen dimensions
def compute_crop_coordinates(imagewidth, imageheight, screenwidth, screenheight):
x1 = 0
y1 = 0
x2 = imagewidth
y2 = imageheight
# todo: improve rouding, there is often a 1px error
if (screenwidth < imagewidth):
x1 = int(round(Decimal(imagewidth - screenwidth) / 2))
x2 -= x1
if (screenheight < imageheight):
y1 = int(round(Decimal(imageheight - screenheight) / 2))
y2 -= y1
return (x1, y1, x2, y2)
def save_wallpaper(resolutionwallpapers, id, outputdir):
if len(resolutionwallpapers) == 0:
return
elif len(resolutionwallpapers) == 1:
w = resolutionwallpapers[0][1]
else:
wallwidth = sum([rw[0].width for rw in resolutionwallpapers])
wallheight = max([rw[0].height + abs(rw[0].voffset) for rw in resolutionwallpapers])
#print "wallwidth=%d, wallheight=%d" % (wallwidth, wallheight)
w = Image.new(resolutionwallpapers[0][1].mode, (wallwidth, wallheight))
x = 0
y = 0
baseoffset = resolutionwallpapers[0][0].voffset
# TBD : check usage of rw[0].voffset
for rw in resolutionwallpapers:
w.paste(rw[1], (x, y - baseoffset + rw[0].voffset))
if baseoffset != 0:
w.paste(rw[1], (x, wallheight - baseoffset + rw[0].voffset))
x += rw[1].size[0]
w.save(os.path.join(outputdir, "wall%d.bmp" % id))
def generate_wallpaper(inputimagepath, outputdir, screenresolutions):
print "Source image: %s" % inputimagepath
(screenwidth, screenheight) = compute_screens_resolutions(screenresolutions)
print "Output screen resolution is: (%d,%d)" % (screenwidth, screenheight)
im = Image.open(inputimagepath)
(imagewidth, imageheight) = im.size
print "Source image resolution is: (%d,%d)" % (imagewidth, imageheight)
(resizewidth, resizeheight) = compute_resize_resolution(imagewidth, imageheight, screenwidth, screenheight)
if (imagewidth != resizewidth or imageheight != resizeheight):
print "Upscaled image resolution is: (%d,%d)" % (resizewidth, resizeheight)
im = im.resize((resizewidth, resizeheight), Image.BICUBIC)
if (screenwidth != resizewidth or screenheight != resizeheight):
(x1, y1, x2, y2) = compute_crop_coordinates(resizewidth, resizeheight, screenwidth, screenheight)
print "Cropped image coordinates are: (%d,%d),(%d,%d))" % (x1, y1, x2, y2)
im = im.crop((x1, y1, x2, y2))
im.save("im.bmp")
x = 0
screenid = 0
wallpapers = []
#i = 0
for s in screenresolutions:
wall = im.crop((x, s.voffset, x + s.width, s.height + s.voffset))
wall.load()
wallpapers.append(wall)
#wall.save(os.path.join(outputdir, "wall%d.bmp" % i))
x += s.width
# i+=1
wsl = zip(screenresolutions, wallpapers)
for key, group in itertools.groupby(wsl, lambda x: x[0].id):
l = list(group)
save_wallpaper(l, key, outputdir)
def make_wallpapers(uselatest, outputscreens):
if (False == os.path.isdir(OUTPUTDIR)):
os.makedirs(OUTPUTDIR)
inputimagepath = ""
if (uselatest == True):
inputimagepath = max(glob.iglob(os.path.join(INPUTDIR, "*.*")), key=os.path.getmtime)
else:
inputimagepath = os.path.join(INPUTDIR, random.choice(os.listdir(INPUTDIR)))
generate_wallpaper(inputimagepath, OUTPUTDIR, [ScreenProperties(screen[0], screen[1], screen[2], screen[3]) for screen in outputscreens])
if __name__=='__main__':
parser = OptionParser()
parser.add_option("-L", "--use-latest-image", dest="latest", action="store_true", default=False, help="Use the most recent image found in images directory if true, otherwise pick one at random")
parser.add_option("-O", "--output-screens", dest="outputscreens", default="[(1366, 768, 550, 0), (1280, 1024, 0, 1)]", help="Left-to-right ouput screens resolutions as a list of tuples: [(width1, height1, vertical offset1), ...]")
(options, args) = parser.parse_args()
make_wallpapers(options.latest, ast.literal_eval(options.outputscreens))
|
{
"content_hash": "77e98d0cb12b70400398327164262526",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 231,
"avg_line_length": 35.54014598540146,
"alnum_prop": 0.7169850071883344,
"repo_name": "esabouraud/mmwall",
"id": "7c688ace51d372ecd1e3a98abca16098caf91347",
"size": "5005",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/synergy_wallpaper.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1286"
},
{
"name": "Python",
"bytes": "32259"
}
],
"symlink_target": ""
}
|
"""
WSGI config for my1 project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "my1.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "my1.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
{
"content_hash": "d2fe6bbfcbcd77588186a55b249a4da3",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 79,
"avg_line_length": 44.0625,
"alnum_prop": 0.7914893617021277,
"repo_name": "praveenskumar/djangoapp",
"id": "786841a988adf3bc227547a004bc976fb0a3cca0",
"size": "1410",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "my1/my1/wsgi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "157"
},
{
"name": "Python",
"bytes": "8090"
}
],
"symlink_target": ""
}
|
from django.views.decorators.csrf import csrf_exempt
from orchestra.project_api.auth import IsSignedUser
from orchestra.project_api.auth import OrchestraProjectAPIAuthentication
from jsonview.decorators import json_view
from rest_framework.decorators import api_view
from rest_framework.decorators import authentication_classes
from rest_framework.decorators import permission_classes
import logging
logger = logging.getLogger(__name__)
def api_exception_logger(func):
def func_wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
logger.exception('An API exception occurred')
raise e
return func_wrapper
def api_endpoint(methods):
def api_endpoint_decorator(func):
@csrf_exempt
@api_view(methods)
@authentication_classes((OrchestraProjectAPIAuthentication,))
@permission_classes((IsSignedUser,))
@json_view
@api_exception_logger
def func_wrapper(*args, **kwargs):
return func(*args, **kwargs)
return func_wrapper
return api_endpoint_decorator
|
{
"content_hash": "3a318aa962392a2c05cf07356c98b880",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 72,
"avg_line_length": 33.029411764705884,
"alnum_prop": 0.7061442564559216,
"repo_name": "Sonblind/orchestra",
"id": "7a3ef47e0410b4f391a32ae100e1ed4a3d220e45",
"size": "1123",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "orchestra/project_api/decorators.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "76414"
},
{
"name": "HTML",
"bytes": "57292"
},
{
"name": "JavaScript",
"bytes": "234470"
},
{
"name": "Makefile",
"bytes": "826"
},
{
"name": "Python",
"bytes": "310246"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models
from core.models import State, Platform, Module
from image.models import Image, Imagerevision, Imagetype
from provisioning.models import Provisioning
from authentication.models import User, Usergroup
from managementnode.models import Managementnode
from scheduling.models import Schedule
# Create your models here.
class Computer(models.Model):
id = models.SmallIntegerField(primary_key=True)
stateid = models.ForeignKey(State, db_column='stateid')
ownerid = models.ForeignKey(User, db_column='ownerid', blank=True, null=True)
platformid = models.ForeignKey(Platform, db_column='platformid')
scheduleid = models.ForeignKey(Schedule, db_column='scheduleid', blank=True, null=True)
# ok down is instanced
vmhostid = models.ForeignKey('Vmhost', db_column='vmhostid', blank=True, null=True)
currentimageid = models.ForeignKey(Image, db_column='currentimageid', related_name="rel_currentimageid")
nextimageid = models.ForeignKey(Image, db_column='nextimageid', related_name="rel_nextimageid")
imagerevisionid = models.ForeignKey(Imagerevision, db_column='imagerevisionid')
ram = models.IntegerField(db_column='RAM') # Field name made lowercase.
procnumber = models.IntegerField()
procspeed = models.SmallIntegerField()
network = models.SmallIntegerField()
hostname = models.CharField(max_length=36)
ipaddress = models.CharField(db_column='IPaddress', max_length=15) # Field name made lowercase.
privateipaddress = models.CharField(db_column='privateIPaddress', max_length=15, blank=True, null=True) # Field name made lowercase.
eth0macaddress = models.CharField(max_length=17, blank=True, null=True)
eth1macaddress = models.CharField(max_length=17, blank=True, null=True)
type = models.CharField(max_length=14)
provisioningid = models.ForeignKey(Provisioning, db_column='provisioningid')
drivetype = models.CharField(max_length=4)
deleted = models.IntegerField()
datedeleted = models.DateTimeField()
notes = models.TextField(blank=True, null=True)
lastcheck = models.DateTimeField(blank=True, null=True)
location = models.CharField(max_length=255, blank=True, null=True)
dsa = models.TextField(blank=True, null=True)
dsapub = models.TextField(blank=True, null=True)
rsa = models.TextField(blank=True, null=True)
rsapub = models.TextField(blank=True, null=True)
host = models.TextField(blank=True, null=True)
hostpub = models.TextField(blank=True, null=True)
vmtypeid = models.IntegerField(blank=True, null=True)
predictivemoduleid = models.ForeignKey(Module, db_column='predictivemoduleid')
class Meta:
db_table = 'computer'
unique_together = (('eth1macaddress', 'datedeleted'), ('hostname', 'datedeleted'), ('eth0macaddress', 'datedeleted'),)
class Vmprofile(models.Model):
id = models.SmallIntegerField(primary_key=True)
profilename = models.CharField(unique=True, max_length=56)
imageid = models.ForeignKey(Image, db_column='imageid')
resourcepath = models.CharField(max_length=256, blank=True, null=True)
folderpath = models.CharField(max_length=256, blank=True, null=True)
repositorypath = models.CharField(max_length=128, blank=True, null=True)
repositoryimagetypeid = models.ForeignKey(Imagetype, db_column='repositoryimagetypeid', related_name="IR1")
datastorepath = models.CharField(max_length=128)
datastoreimagetypeid = models.ForeignKey(Imagetype, db_column='datastoreimagetypeid', related_name="IR3")
vmpath = models.CharField(max_length=128, blank=True, null=True)
virtualswitch0 = models.CharField(max_length=80)
virtualswitch1 = models.CharField(max_length=80)
virtualswitch2 = models.CharField(max_length=80, blank=True, null=True)
virtualswitch3 = models.CharField(max_length=80, blank=True, null=True)
vmdisk = models.CharField(max_length=9)
username = models.CharField(max_length=80, blank=True, null=True)
password = models.CharField(max_length=256, blank=True, null=True)
eth0generated = models.IntegerField()
eth1generated = models.IntegerField()
rsapub = models.TextField(blank=True, null=True)
rsakey = models.CharField(max_length=256, blank=True, null=True)
encryptedpasswd = models.TextField(blank=True, null=True)
class Meta:
db_table = 'vmprofile'
class Vmhost(models.Model):
id = models.SmallIntegerField(primary_key=True)
computerid = models.OneToOneField(Computer, db_column='computerid')
vmlimit = models.IntegerField()
vmprofileid = models.ForeignKey(Vmprofile, db_column='vmprofileid')
class Meta:
db_table = 'vmhost'
class Computerloadstate(models.Model):
id = models.SmallIntegerField(primary_key=True)
loadstatename = models.CharField(unique=True, max_length=24)
prettyname = models.CharField(max_length=50, blank=True, null=True)
est = models.IntegerField(blank=True, null=True)
class Meta:
db_table = 'computerloadstate'
class Computerloadflow(models.Model):
computerloadstateid = models.ForeignKey(Computerloadstate, db_column='computerloadstateid', related_name="clsi")
nextstateid = models.ForeignKey(Computerloadstate, db_column='nextstateid', blank=True, null=True, related_name="nsi")
type = models.CharField(max_length=14, blank=True, null=True)
class Meta:
db_table = 'computerloadflow'
class Semaphore(models.Model):
computerid = models.ForeignKey(Computer, db_column='computerid')
imageid = models.ForeignKey(Image, db_column='imageid')
imagerevisionid = models.ForeignKey(Imagerevision, db_column='imagerevisionid')
managementnodeid = models.ForeignKey(Managementnode, db_column='managementnodeid')
expires = models.DateTimeField()
procid = models.CharField(max_length=255)
class Meta:
db_table = 'semaphore'
class Serverprofile(models.Model):
id = models.SmallIntegerField(primary_key=True)
name = models.CharField(unique=True, max_length=255)
description = models.TextField()
imageid = models.ForeignKey(Image, db_column='imageid')
ownerid = models.ForeignKey(User, db_column='ownerid')
ending = models.CharField(max_length=10)
fixedip = models.CharField(db_column='fixedIP', max_length=15, blank=True, null=True) # Field name made lowercase.
fixedmac = models.CharField(db_column='fixedMAC', max_length=17, blank=True, null=True) # Field name made lowercase.
admingroupid = models.ForeignKey(Usergroup, db_column='admingroupid', blank=True, null=True, related_name="agi" )
logingroupid = models.ForeignKey(Usergroup, db_column='logingroupid', blank=True, null=True, related_name="lalog")
monitored = models.IntegerField()
class Meta:
db_table = 'serverprofile'
class Clickthroughs(models.Model):
userid = models.ForeignKey(User, db_column='userid')
imageid = models.ForeignKey(Image, db_column='imageid')
imagerevisionid = models.ForeignKey(Imagerevision, db_column='imagerevisionid', blank=True, null=True)
accepted = models.DateTimeField()
agreement = models.TextField()
class Meta:
db_table = 'clickThroughs'
|
{
"content_hash": "5d7fca256cccebe2174dfaab8753bfb7",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 137,
"avg_line_length": 46.732484076433124,
"alnum_prop": 0.7252282949434373,
"repo_name": "luisza/vcl_django",
"id": "60bb4ed6968bf95575fbb32a70e42654cde33a34",
"size": "7337",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "compute/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "134141"
},
{
"name": "Shell",
"bytes": "358"
}
],
"symlink_target": ""
}
|
"""
Enumeration of valid variable types for binary quadratic models.
Examples:
This example shows easy access to different Vartypes, which are in the main
namespace.
>>> vartype = dimod.SPIN
>>> print(vartype)
Vartype.SPIN
>>> vartype = dimod.BINARY
>>> print(vartype)
Vartype.BINARY
>>> vartype = dimod.Vartype.SPIN
>>> print(vartype)
Vartype.SPIN
>>> isinstance(vartype, dimod.Vartype)
True
This example shows access by value or name.
>>> print(dimod.Vartype({0, 1}))
Vartype.BINARY
>>> print(dimod.Vartype['SPIN'])
Vartype.SPIN
This example uses the `.value` parameter to validate.
>>> sample = {'u': -1, 'v': 1}
>>> vartype = dimod.Vartype.SPIN
>>> all(val in vartype.value for val in sample.values())
True
"""
import enum
__all__ = ['Vartype', 'SPIN', 'BINARY']
class Vartype(enum.Enum):
"""An :py:class:`~enum.Enum` over the types of variables for the binary quadratic model.
Attributes:
SPIN (:class:`.Vartype`): Vartype for spin-valued models; variables of
the model are either -1 or 1.
BINARY (:class:`.Vartype`): Vartype for binary models; variables of the
model are either 0 or 1.
"""
SPIN = frozenset({-1, 1})
BINARY = frozenset({0, 1})
SPIN = Vartype.SPIN
BINARY = Vartype.BINARY
|
{
"content_hash": "f89ce185ae0af17b651e60b8d93a6255",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 92,
"avg_line_length": 24.672727272727272,
"alnum_prop": 0.6271186440677966,
"repo_name": "oneklc/dimod",
"id": "af482d97770059ba412b6f7e741ffe0786666f05",
"size": "2068",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dimod/vartypes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C++",
"bytes": "59430"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Python",
"bytes": "676178"
}
],
"symlink_target": ""
}
|
"""octopus URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from engines import urls as engines_urls
from containers import urls as containers_urls
from images import urls as images_urls
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^engines/', include(engines_urls)),
url(r'^containers/', include(containers_urls)),
url(r'^images/', include(images_urls)),
]
|
{
"content_hash": "f32b10e4767ce5c658e9c4a6109c8699",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 77,
"avg_line_length": 36.67857142857143,
"alnum_prop": 0.7069133398247323,
"repo_name": "gregorianzhang/octopus",
"id": "35a9993c6a48d139de7a2d72af6da9e6a79e041c",
"size": "1027",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "octopus/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "10595"
},
{
"name": "Python",
"bytes": "13662"
}
],
"symlink_target": ""
}
|
from zope.interface import implements
from twisted.internet import defer
from twisted.python import log
from nevow import inevow, rend, loaders, static, url, tags, util
from nevow.flat import flatten
from nevow.stan import Proto, Tag
from itertools import count
cn = count().next
cookie = lambda: str(cn())
_hookup = {}
## If we need to use Canvas through a CGI which forwards to the appserver,
## then we will need to listen with the canvas protocol on another socket
## so the canvas movie can push data to us. Here is where we will keep it.
_canvasCGIService = None
# m = method
# a = argument
# <m n="moveTo" t="canvas">
# <a v="16" />
# <a v="16" />
# </m>
m = Proto('m') # method call; contains arguments
a = Proto('a') # argument; has v="" attribute for simple value, or <l> or <d> child for list or dict value
l = Proto('l') # list; has <a> children; <a> children must be simple values currently
d = Proto('d') # dict; has <i> children
i = Proto('i') # dict item; has k="" for key and v="" for simple value (no nested dicts yet)
def squish(it):
if isinstance(it, Tag):
return a[it]
return a(v=it)
class _Remoted(object):
def __init__(self, cookie, canvas):
self.cookie = cookie
self.canvas = canvas
class Text(_Remoted):
x = 0
y = 0
def change(self, text):
self.text = text
self.canvas.call('changeText', self.cookie, text)
def move(self, x, y):
self.x = x
self.y = y
self.canvas.call('moveText', self.cookie, x, y)
def listFonts(self):
if hasattr(self.canvas, '_fontList'):
return defer.succeed(self.canvas._fontList)
cook = cookie()
self.canvas.deferreds[cook] = d = defer.Deferred()
self.canvas.call('listFonts', cook)
def _cb(l):
L = l.split(',')
self.canvas._fontList = L
return L
return d.addCallback(_cb)
def font(self, font):
self.canvas.call('font', self.cookie, font)
def size(self, size):
self.canvas.call('size', self.cookie, size)
class Image(_Remoted):
def move(self, x, y):
self.canvas.call('moveImage', self.cookie, x, y)
def scale(self, x, y):
self.canvas.call('scaleImage', self.cookie, x, y)
def alpha(self, alpha):
self.canvas.call('alphaImage', self.cookie, alpha)
def rotate(self, angle):
self.canvas.call('rotateImage', self.cookie, angle)
class Sound(_Remoted):
def play(self, offset=0, timesLoop=0):
"""Play the sound, starting at "offset", in seconds. Loop the sound "timesLoop"
times.
"""
self.canvas.call('playSound', self.cookie, offset, timesLoop)
class GroupBase(object):
def call(self, method, *args):
"""Call a client-side method with the given arguments. Arguments
will be converted to strings. You should probably use the other higher-level
apis instead.
"""
flatcall = flatten(
m(n=method, t=self.groupName)[[
squish(x) for x in args if x is not None]])
self.socket.write(flatcall + '\0')
groupx = 0
groupy = 0
def reposition(self, x, y):
"""Reposition all the elements in this group
"""
self.groupx = x
self.groupy = y
self.call('reposition', x, y)
def rotate(self, angle):
"""Rotate all the elements of this group
"""
self.call('rotate', angle)
_alpha = 100
def alpha(self, percent):
"""Set the alpha value of this group
"""
self._alpha = percent
self.call('alpha', percent)
def line(self, x, y):
"""Draw a line from the current point to the given point.
(0,0) is in the center of the canvas.
"""
self.call('line', x, y)
x = 0
y = 0
def move(self, x, y):
"""Move the pen to the given point.
(0, 0) is in the center of the canvas.
"""
self.x = x
self.y = y
self.call('move', x, y)
def pen(self, width=None, rgb=None, alpha=None):
"""Change the current pen attributes.
width: an integer between 0 and 255; the pen thickness, in pixels.
rgb: an integer between 0x000000 and 0xffffff
alpha: an integer between 0 and 100; the opacity of the pen
"""
self.call('pen', width, rgb, alpha)
def clear(self):
"""Clear the current pen attributes.
"""
self.call('clear')
def fill(self, rgb, alpha=100):
"""Set the current fill. Fill will not be drawn until close is called.
rgb: color of fill, integer between 0x000000 and 0xffffff
alpha: an integer between 0 and 100; the opacity of the fill
"""
self.call('fill', rgb, alpha)
def close(self):
"""Close the current shape. A line will be drawn from the end point
to the start point, and the shape will be filled with the current fill.
"""
self.call('close')
def curve(self, controlX, controlY, anchorX, anchorY):
"""Draw a curve
"""
self.call('curve', controlX, controlY, anchorX, anchorY)
def gradient(self, type, colors, alphas, ratios, matrix):
"""Draw a gradient. Currently the API for this sucks, see the flash documentation
for info. Higher level objects for creating gradients will hopefully be developed
eventually.
"""
self.call('gradient', type,
l[[a(v=x) for x in colors]],
l[[a(v=x) for x in alphas]],
l[[a(v=x) for x in ratios]],
d[[i(k=k, v=v) for (k, v) in matrix.items()]])
def text(self, text, x, y, height, width):
"""Place the given text on the canvas using the given x, y, height and width.
The result is a Text object which can be further manipulated to affect the text.
"""
cook = cookie()
t = Text(cook, self)
t.text = text
self.call('text', cook, text, x, y, height, width)
return t
def image(self, where):
"""Load an image from the URL "where". The result is an Image object which
can be further manipulated to move it or change rotation.
"""
cook = cookie()
I = Image(cook, self)
self.call('image', cook, where)
print "IMAGE", where
return I
def sound(self, where, stream=True):
"""Load an mp3 from the URL "where". The result is a Sound object which
can be further manipulated.
If stream is True, the sound will play as soon as possible. If false,
"""
cook = cookie()
S = Sound(cook, self)
self.call('sound', cook, where, stream and 1 or 0)
return S
def group(self):
"""Create a new group of shapes. The returned object will
have all of the same APIs for drawing, except the grouped
items can all be moved simultaneously, deleted, etc.
"""
cook = cookie()
G = Group('%s.G_%s' % (self.groupName, cook), self.socket, self)
self.call('group', cook)
return G
class Group(GroupBase):
def __init__(self, groupName, socket, canvas):
self.groupName = groupName
self.socket = socket
self.canvas = canvas
self.deferreds = canvas.deferreds
closed = property(lambda self: self.canvas.closed)
def setMask(self, other=None):
"""Set the mask of self to the group "other". "other" must be a Group
instance, if provided. If not provided, any previous mask will be removed
from self.
"""
if other is None:
self.call('setMask', '')
else:
self.call('setMask', other.groupName)
def setVisible(self, visible):
self.call('setVisible', str(bool(visible)))
xscale = 100
yscale = 100
def scale(self, x, y):
self.call('scale', x, y)
def swapDepth(self, intOrGroup):
"""Swap the z-order depth of this group with another.
If an int is provided, the group will be placed at that depth,
regardless of whether there is an existing clip there.
If a group is provided, the z depth of self and the other group
are swapped.
"""
if isinstance(intOrGroup, Group):
self.call('swapGroup', intOrGroup.groupName)
else:
self.call('swapInt', intOrGroup)
def depth(self):
"""Return a deferred which will fire the depth of this group.
XXX TODO
"""
return 0
class CanvasSocket(GroupBase):
"""An object which represents the client-side canvas. Defines APIs for drawing
on the canvas. An instance of this class will be passed to your onload callback.
"""
implements(inevow.IResource)
groupName = 'canvas'
closed = False
def __init__(self):
self.canvas = self
self.d = defer.Deferred().addErrback(log.err)
def locateChild(self, ctx, segs):
self.cookie = segs[0]
return (self, ())
def renderHTTP(self, ctx):
try:
self.deferreds = {}
self.buffer = ''
## Don't try this at home kids! You'll blow your arm off!
self.socket = inevow.IRequest(ctx).transport
## We be hijackin'
self.socket.protocol = self
## This request never finishes until the user leaves the page
self.delegate = _hookup[self.cookie]
self.delegate.onload(self)
del _hookup[self.cookie]
except:
log.err()
return self.d
def dataReceived(self, data):
self.buffer += data
while '\0' in self.buffer:
I = self.buffer.index('\0')
message = self.buffer[:I]
self.buffer = self.buffer[I+1:]
self.gotMessage(message)
def gotMessage(self, message):
I = message.index(' ')
handler = getattr(self, 'handle_%s' % (message[:I], ), None)
if handler is not None:
handler(message[I+1:])
else:
self.deferreds[message[:I]].callback(message[I+1:])
del self.deferreds[message[:I]]
def connectionLost(self, reason):
self.closed = True
del self.socket
def done(self):
"""Done drawing; close the connection with the movie
"""
## All done with the request object
self.closed = True
self.d.callback('')
def handle_onKeyDown(self, info):
if self.delegate.onKeyDown:
self.delegate.onKeyDown(self, chr(int(info)))
def handle_onKeyUp(self, info):
if self.delegate.onKeyUp:
self.delegate.onKeyUp(self, chr(int(info)))
def handle_onMouseUp(self, info):
if self.delegate.onMouseUp:
self.delegate.onMouseUp(self, *map(int, map(float, info.split())))
def handle_onMouseDown(self, info):
if self.delegate.onMouseDown:
self.delegate.onMouseDown(self, *map(int, map(float, info.split())))
def handle_onMouseMove(self, info):
if self.delegate.onMouseMove:
self.delegate.onMouseMove(self, *map(int, map(float, info.split())))
def handle_diagnostic(self, info):
print "Trace", info
canvasServerMessage = loaders.stan(tags.html["This server dispatches for nevow canvas events."])
def canvas(width, height, delegate, useCGI=False):
C = cookie()
if useCGI:
global _canvasCGIService
if _canvasCGIService is None:
from nevow import appserver
# Import reactor here to avoid installing default at startup
from twisted.internet import reactor
_canvasCGIService = reactor.listenTCP(0, appserver.NevowSite(Canvas(docFactory=canvasServerMessage)))
_canvasCGIService.dispatchMap = {}
port = _canvasCGIService.getHost().port
prefix = '/'
movie_url = url.here.click('/').secure(False, port)
else:
movie_url = url.here
port = lambda c, d: inevow.IRequest(c).transport.server.port
def prefix(c, d):
pre = inevow.IRequest(c).path
if pre.endswith('/'):
return pre
return pre + '/'
_hookup[C] = delegate
handlerInfo = []
for handlerName in ['onMouseMove', 'onMouseDown', 'onMouseUp', 'onKeyDown', 'onKeyUp']:
if getattr(delegate, handlerName, None) is not None:
handlerInfo.append((handlerName, 1))
movie_url = movie_url.child('nevow_canvas_movie.swf').add('cookie', C).add('port', port).add('prefix', prefix)
for (k, v) in handlerInfo:
movie_url = movie_url.add(k, v)
return tags._object(classid="clsid:d27cdb6e-ae6d-11cf-96b8-444553540000",
codebase="http://download.macromedia.com/pub/shockwave/cabs/flash/swflash.cab#version=7,0,0,0",
width=width, height=height, id=("Canvas-", C), align="middle")[
tags.param(name="allowScriptAccess", value="sameDomain"),
tags.param(name="movie", value=movie_url),
tags.param(name="quality", value="high"),
tags.param(name="scale", value="noscale"),
tags.param(name="bgcolor", value="#ffffff"),
Tag('embed')(
src=movie_url,
quality="high",
scale="noscale",
bgcolor="#ffffff",
width=width,
height=height,
name=("Canvas-", C),
align="middle",
allowScriptAccess="sameDomain",
type="application/x-shockwave-flash",
pluginspage="http://www.macromedia.com/go/getflashplayer")]
class Canvas(rend.Page):
"""A page which can embed canvases. Simplest usage is to subclass and
override width, height and onload. Then, putting render_canvas in the
template will output that canvas there.
You can also embed more than one canvas in a page using the canvas
helper function, canvas(width, height, onload). The resulting stan
will cause a canvas of the given height and width to be embedded in
the page at that location, and the given onload callable to be called
with a CanvasSocket when the connection is established.
"""
addSlash = True
def __init__(self, original=None, width=None, height=None, onload=None,
onMouseMove=None, onMouseDown=None, onMouseUp=None,
onKeyDown=None, onKeyUp=None, **kw):
rend.Page.__init__(self, original, **kw)
if width: self.width = width
if height: self.height = height
if onload: self.onload = onload
if onMouseMove: self.onMouseMove = onMouseMove
if onMouseDown: self.onMouseDown = onMouseDown
if onMouseUp: self.onMouseUp = onMouseUp
if onKeyDown: self.onKeyDown = onKeyDown
if onKeyUp: self.onKeyUp = onKeyUp
def child_canvas_socket(self, ctx):
return CanvasSocket()
width = 1000
height = 500
onload = None
onMouseDown = None
onMouseUp = None
onMouseMove = None
onKeyUp = None
onKeyDown = None
def render_canvas(self, ctx, data):
return canvas(
self.width, self.height, self)
docFactory = loaders.stan(tags.html[render_canvas])
setattr(Canvas, 'child_nevow_canvas_movie.swf', static.File(
util.resource_filename('nevow', 'Canvas.swf'),
'application/x-shockwave-flash'))
|
{
"content_hash": "0910e43890dcbb6871e0cc73b49fb1dc",
"timestamp": "",
"source": "github",
"line_count": 471,
"max_line_length": 114,
"avg_line_length": 32.74946921443737,
"alnum_prop": 0.59935170178282,
"repo_name": "perkinslr/pypyjs",
"id": "2d32e0edc4ffb913fa8e78c615b8966077a7745a",
"size": "15482",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "addedLibraries/nevow/canvas.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "106"
},
{
"name": "C",
"bytes": "63586"
},
{
"name": "CSS",
"bytes": "7648"
},
{
"name": "D",
"bytes": "2081"
},
{
"name": "HTML",
"bytes": "7097"
},
{
"name": "JavaScript",
"bytes": "488078"
},
{
"name": "Makefile",
"bytes": "5877"
},
{
"name": "Objective-C",
"bytes": "1291"
},
{
"name": "Python",
"bytes": "26517313"
},
{
"name": "Shell",
"bytes": "1406"
}
],
"symlink_target": ""
}
|
import pytest
from notifiers.exceptions import BadArguments
provider = "simplepush"
class TestSimplePush:
"""SimplePush notifier tests
Note: These tests assume correct environs set for NOTIFIERS_SIMPLEPUSH_KEY
"""
def test_simplepush_metadata(self, provider):
assert provider.metadata == {
"base_url": "https://api.simplepush.io/send",
"site_url": "https://simplepush.io/",
"name": "simplepush",
}
@pytest.mark.parametrize(
"data, message", [({}, "key"), ({"key": "foo"}, "message")]
)
def test_simplepush_missing_required(self, data, message, provider):
data["env_prefix"] = "test"
with pytest.raises(BadArguments) as e:
provider.notify(**data)
assert f"'{message}' is a required property" in e.value.message
@pytest.mark.online
def test_simplepush_sanity(self, provider, test_message):
"""Successful simplepush notification"""
data = {"message": test_message}
rsp = provider.notify(**data)
rsp.raise_on_errors()
|
{
"content_hash": "9114a0f1ebb4a024bf586e403ed90891",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 78,
"avg_line_length": 31.02857142857143,
"alnum_prop": 0.6206261510128913,
"repo_name": "liiight/notifiers",
"id": "733772a17d210e89f23fd73cc0a410399e500af7",
"size": "1086",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/providers/test_simplepush.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "811"
},
{
"name": "Dockerfile",
"bytes": "106"
},
{
"name": "Makefile",
"bytes": "612"
},
{
"name": "Python",
"bytes": "189594"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0056_auto_20220116_0236'),
]
operations = [
migrations.AddField(
model_name='registration',
name='payment_type',
field=models.PositiveSmallIntegerField(choices=[(1, 'Card'), (2, 'Check')], default=1),
),
]
|
{
"content_hash": "39f5c6a7cf6c6c86e673eba5a15dc64d",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 99,
"avg_line_length": 24.5625,
"alnum_prop": 0.5826972010178118,
"repo_name": "bmun/huxley",
"id": "3e1227a7a663a6fa501320a4657c724d81fc49b3",
"size": "442",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "huxley/core/migrations/0057_registration_payment_type.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "13301"
},
{
"name": "JavaScript",
"bytes": "400597"
},
{
"name": "Less",
"bytes": "19215"
},
{
"name": "Python",
"bytes": "635783"
},
{
"name": "Shell",
"bytes": "2475"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class TextValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="text", parent_name="table.legendgrouptitle", **kwargs
):
super(TextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
**kwargs,
)
|
{
"content_hash": "5df1234897f017a960ee9a88546510c5",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 80,
"avg_line_length": 32.15384615384615,
"alnum_prop": 0.6052631578947368,
"repo_name": "plotly/plotly.py",
"id": "438b6def34e6b0c7b78132236a0be5668f7ddf2e",
"size": "418",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/table/legendgrouptitle/_text.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
"""
XMPP publish-subscribe protocol.
This protocol is specified in
U{XEP-0060<http://www.xmpp.org/extensions/xep-0060.html>}.
"""
from zope.interface import implements
from twisted.internet import defer
from twisted.python import log
from twisted.words.protocols.jabber import jid, error
from twisted.words.xish import domish
from wokkel import disco, data_form, generic, shim
from wokkel.compat import IQ
from wokkel.subprotocols import IQHandlerMixin, XMPPHandler
from wokkel.iwokkel import IPubSubClient, IPubSubService, IPubSubResource
# Iq get and set XPath queries
IQ_GET = '/iq[@type="get"]'
IQ_SET = '/iq[@type="set"]'
# Publish-subscribe namespaces
NS_PUBSUB = 'http://jabber.org/protocol/pubsub'
NS_PUBSUB_EVENT = NS_PUBSUB + '#event'
NS_PUBSUB_ERRORS = NS_PUBSUB + '#errors'
NS_PUBSUB_OWNER = NS_PUBSUB + "#owner"
NS_PUBSUB_NODE_CONFIG = NS_PUBSUB + "#node_config"
NS_PUBSUB_META_DATA = NS_PUBSUB + "#meta-data"
NS_PUBSUB_SUBSCRIBE_OPTIONS = NS_PUBSUB + "#subscribe_options"
# XPath to match pubsub requests
PUBSUB_REQUEST = '/iq[@type="get" or @type="set"]/' + \
'pubsub[@xmlns="' + NS_PUBSUB + '" or ' + \
'@xmlns="' + NS_PUBSUB_OWNER + '"]'
class SubscriptionPending(Exception):
"""
Raised when the requested subscription is pending acceptance.
"""
class SubscriptionUnconfigured(Exception):
"""
Raised when the requested subscription needs to be configured before
becoming active.
"""
class PubSubError(error.StanzaError):
"""
Exception with publish-subscribe specific condition.
"""
def __init__(self, condition, pubsubCondition, feature=None, text=None):
appCondition = domish.Element((NS_PUBSUB_ERRORS, pubsubCondition))
if feature:
appCondition['feature'] = feature
error.StanzaError.__init__(self, condition,
text=text,
appCondition=appCondition)
class BadRequest(error.StanzaError):
"""
Bad request stanza error.
"""
def __init__(self, pubsubCondition=None, text=None):
if pubsubCondition:
appCondition = domish.Element((NS_PUBSUB_ERRORS, pubsubCondition))
else:
appCondition = None
error.StanzaError.__init__(self, 'bad-request',
text=text,
appCondition=appCondition)
class Unsupported(PubSubError):
def __init__(self, feature, text=None):
self.feature = feature
PubSubError.__init__(self, 'feature-not-implemented',
'unsupported',
feature,
text)
def __str__(self):
message = PubSubError.__str__(self)
message += ', feature %r' % self.feature
return message
class Subscription(object):
"""
A subscription to a node.
@ivar nodeIdentifier: The identifier of the node subscribed to.
The root node is denoted by C{None}.
@ivar subscriber: The subscribing entity.
@ivar state: The subscription state. One of C{'subscribed'}, C{'pending'},
C{'unconfigured'}.
@ivar options: Optional list of subscription options.
@type options: C{dict}.
"""
def __init__(self, nodeIdentifier, subscriber, state, options=None):
self.nodeIdentifier = nodeIdentifier
self.subscriber = subscriber
self.state = state
self.options = options or {}
class Item(domish.Element):
"""
Publish subscribe item.
This behaves like an object providing L{domish.IElement}.
Item payload can be added using C{addChild} or C{addRawXml}, or using the
C{payload} keyword argument to C{__init__}.
"""
def __init__(self, id=None, payload=None):
"""
@param id: optional item identifier
@type id: L{unicode}
@param payload: optional item payload. Either as a domish element, or
as serialized XML.
@type payload: object providing L{domish.IElement} or L{unicode}.
"""
domish.Element.__init__(self, (NS_PUBSUB, 'item'))
if id is not None:
self['id'] = id
if payload is not None:
if isinstance(payload, basestring):
self.addRawXml(payload)
else:
self.addChild(payload)
class PubSubRequest(generic.Stanza):
"""
A publish-subscribe request.
The set of instance variables used depends on the type of request. If
a variable is not applicable or not passed in the request, its value is
C{None}.
@ivar verb: The type of publish-subscribe request. See L{_requestVerbMap}.
@type verb: C{str}.
@ivar affiliations: Affiliations to be modified.
@type affiliations: C{set}
@ivar items: The items to be published, as L{domish.Element}s.
@type items: C{list}
@ivar itemIdentifiers: Identifiers of the items to be retrieved or
retracted.
@type itemIdentifiers: C{set}
@ivar maxItems: Maximum number of items to retrieve.
@type maxItems: C{int}.
@ivar nodeIdentifier: Identifier of the node the request is about.
@type nodeIdentifier: C{unicode}
@ivar nodeType: The type of node that should be created, or for which the
configuration is retrieved. C{'leaf'} or C{'collection'}.
@type nodeType: C{str}
@ivar options: Configurations options for nodes, subscriptions and publish
requests.
@type options: L{data_form.Form}
@ivar subscriber: The subscribing entity.
@type subscriber: L{JID}
@ivar subscriptionIdentifier: Identifier for a specific subscription.
@type subscriptionIdentifier: C{unicode}
@ivar subscriptions: Subscriptions to be modified, as a set of
L{Subscription}.
@type subscriptions: C{set}
"""
verb = None
affiliations = None
items = None
itemIdentifiers = None
maxItems = None
nodeIdentifier = None
nodeType = None
options = None
subscriber = None
subscriptionIdentifier = None
subscriptions = None
# Map request iq type and subelement name to request verb
_requestVerbMap = {
('set', NS_PUBSUB, 'publish'): 'publish',
('set', NS_PUBSUB, 'subscribe'): 'subscribe',
('set', NS_PUBSUB, 'unsubscribe'): 'unsubscribe',
('get', NS_PUBSUB, 'options'): 'optionsGet',
('set', NS_PUBSUB, 'options'): 'optionsSet',
('get', NS_PUBSUB, 'subscriptions'): 'subscriptions',
('get', NS_PUBSUB, 'affiliations'): 'affiliations',
('set', NS_PUBSUB, 'create'): 'create',
('get', NS_PUBSUB_OWNER, 'default'): 'default',
('get', NS_PUBSUB_OWNER, 'configure'): 'configureGet',
('set', NS_PUBSUB_OWNER, 'configure'): 'configureSet',
('get', NS_PUBSUB, 'items'): 'items',
('set', NS_PUBSUB, 'retract'): 'retract',
('set', NS_PUBSUB_OWNER, 'purge'): 'purge',
('set', NS_PUBSUB_OWNER, 'delete'): 'delete',
('get', NS_PUBSUB_OWNER, 'affiliations'): 'affiliationsGet',
('set', NS_PUBSUB_OWNER, 'affiliations'): 'affiliationsSet',
('get', NS_PUBSUB_OWNER, 'subscriptions'): 'subscriptionsGet',
('set', NS_PUBSUB_OWNER, 'subscriptions'): 'subscriptionsSet',
}
# Map request verb to request iq type and subelement name
_verbRequestMap = dict(((v, k) for k, v in _requestVerbMap.iteritems()))
# Map request verb to parameter handler names
_parameters = {
'publish': ['node', 'items'],
'subscribe': ['nodeOrEmpty', 'jid'],
'unsubscribe': ['nodeOrEmpty', 'jid'],
'optionsGet': ['nodeOrEmpty', 'jid'],
'optionsSet': ['nodeOrEmpty', 'jid', 'options'],
'subscriptions': [],
'affiliations': [],
'create': ['nodeOrNone', 'configure'],
'default': ['default'],
'configureGet': ['nodeOrEmpty'],
'configureSet': ['nodeOrEmpty', 'configure'],
'items': ['node', 'maxItems', 'itemIdentifiers'],
'retract': ['node', 'itemIdentifiers'],
'purge': ['node'],
'delete': ['node'],
'affiliationsGet': ['nodeOrEmpty'],
'affiliationsSet': [],
'subscriptionsGet': ['nodeOrEmpty'],
'subscriptionsSet': [],
}
def __init__(self, verb=None):
self.verb = verb
@staticmethod
def _findForm(element, formNamespace):
"""
Find a Data Form.
Look for an element that represents a Data Form with the specified
form namespace as a child element of the given element.
"""
if not element:
return None
form = None
for child in element.elements():
try:
form = data_form.Form.fromElement(child)
except data_form.Error:
continue
if form.formNamespace != NS_PUBSUB_NODE_CONFIG:
continue
return form
def _parse_node(self, verbElement):
"""
Parse the required node identifier out of the verbElement.
"""
try:
self.nodeIdentifier = verbElement["node"]
except KeyError:
raise BadRequest('nodeid-required')
def _render_node(self, verbElement):
"""
Render the required node identifier on the verbElement.
"""
if not self.nodeIdentifier:
raise Exception("Node identifier is required")
verbElement['node'] = self.nodeIdentifier
def _parse_nodeOrEmpty(self, verbElement):
"""
Parse the node identifier out of the verbElement. May be empty.
"""
self.nodeIdentifier = verbElement.getAttribute("node", '')
def _render_nodeOrEmpty(self, verbElement):
"""
Render the node identifier on the verbElement. May be empty.
"""
if self.nodeIdentifier:
verbElement['node'] = self.nodeIdentifier
def _parse_nodeOrNone(self, verbElement):
"""
Parse the optional node identifier out of the verbElement.
"""
self.nodeIdentifier = verbElement.getAttribute("node")
def _render_nodeOrNone(self, verbElement):
"""
Render the optional node identifier on the verbElement.
"""
if self.nodeIdentifier:
verbElement['node'] = self.nodeIdentifier
def _parse_items(self, verbElement):
"""
Parse items out of the verbElement for publish requests.
"""
self.items = []
for element in verbElement.elements():
if element.uri == NS_PUBSUB and element.name == 'item':
self.items.append(element)
def _render_items(self, verbElement):
"""
Render items into the verbElement for publish requests.
"""
if self.items:
for item in self.items:
verbElement.addChild(item)
def _parse_jid(self, verbElement):
"""
Parse subscriber out of the verbElement for un-/subscribe requests.
"""
try:
self.subscriber = jid.internJID(verbElement["jid"])
except KeyError:
raise BadRequest('jid-required')
def _render_jid(self, verbElement):
"""
Render subscriber into the verbElement for un-/subscribe requests.
"""
verbElement['jid'] = self.subscriber.full()
def _parse_default(self, verbElement):
"""
Parse node type out of a request for the default node configuration.
"""
form = PubSubRequest._findForm(verbElement, NS_PUBSUB_NODE_CONFIG)
if form and form.formType == 'submit':
values = form.getValues()
self.nodeType = values.get('pubsub#node_type', 'leaf')
else:
self.nodeType = 'leaf'
def _parse_configure(self, verbElement):
"""
Parse options out of a request for setting the node configuration.
"""
form = PubSubRequest._findForm(verbElement, NS_PUBSUB_NODE_CONFIG)
if form:
if form.formType == 'submit':
self.options = form.getValues()
elif form.formType == 'cancel':
self.options = {}
else:
raise BadRequest(text="Unexpected form type %r" % form.formType)
else:
raise BadRequest(text="Missing configuration form")
def _parse_itemIdentifiers(self, verbElement):
"""
Parse item identifiers out of items and retract requests.
"""
self.itemIdentifiers = []
for element in verbElement.elements():
if element.uri == NS_PUBSUB and element.name == 'item':
try:
self.itemIdentifiers.append(element["id"])
except KeyError:
raise BadRequest()
def _render_itemIdentifiers(self, verbElement):
"""
Render item identifiers into items and retract requests.
"""
if self.itemIdentifiers:
for itemIdentifier in self.itemIdentifiers:
item = verbElement.addElement('item')
item['id'] = itemIdentifier
def _parse_maxItems(self, verbElement):
"""
Parse maximum items out of an items request.
"""
value = verbElement.getAttribute('max_items')
if value:
try:
self.maxItems = int(value)
except ValueError:
raise BadRequest(text="Field max_items requires a positive " +
"integer value")
def _render_maxItems(self, verbElement):
"""
Parse maximum items into an items request.
"""
if self.maxItems:
verbElement['max_items'] = unicode(self.maxItems)
def _render_configure(self, verbElement):
if self.options:
verbElement.addChild(self.options.toElement())
def _parse_options(self, verbElement):
form = PubSubRequest._findForm(verbElement, NS_PUBSUB_SUBSCRIBE_OPTIONS)
if form:
if form.formType == 'submit':
self.options = form.getValues()
elif form.formType == 'cancel':
self.options = {}
else:
raise BadRequest(text="Unexpected form type %r" % form.formType)
else:
raise BadRequest(text="Missing options form")
def parseElement(self, element):
"""
Parse the publish-subscribe verb and parameters out of a request.
"""
generic.Stanza.parseElement(self, element)
for child in element.pubsub.elements():
key = (self.stanzaType, child.uri, child.name)
try:
verb = self._requestVerbMap[key]
except KeyError:
continue
else:
self.verb = verb
break
if not self.verb:
raise NotImplementedError()
for parameter in self._parameters[verb]:
getattr(self, '_parse_%s' % parameter)(child)
def send(self, xs):
"""
Send this request to its recipient.
This renders all of the relevant parameters for this specific
requests into an L{IQ}, and invoke its C{send} method.
This returns a deferred that fires upon reception of a response. See
L{IQ} for details.
@param xs: The XML stream to send the request on.
@type xs: L{xmlstream.XmlStream}
@rtype: L{defer.Deferred}.
"""
try:
(self.stanzaType,
childURI,
childName) = self._verbRequestMap[self.verb]
except KeyError:
raise NotImplementedError("Unhandled verb: " + str(self.verb))
iq = IQ(xs, self.stanzaType)
iq.addElement((childURI, 'pubsub'))
verbElement = iq.pubsub.addElement(childName)
if self.sender:
iq['from'] = self.sender.full()
if self.recipient:
iq['to'] = self.recipient.full()
for parameter in self._parameters[self.verb]:
getattr(self, '_render_%s' % parameter)(verbElement)
return iq.send()
class PubSubEvent(object):
"""
A publish subscribe event.
@param sender: The entity from which the notification was received.
@type sender: L{jid.JID}
@param recipient: The entity to which the notification was sent.
@type recipient: L{wokkel.pubsub.ItemsEvent}
@param nodeIdentifier: Identifier of the node the event pertains to.
@type nodeIdentifier: C{unicode}
@param headers: SHIM headers, see L{wokkel.shim.extractHeaders}.
@type headers: L{dict}
"""
def __init__(self, sender, recipient, nodeIdentifier, headers):
self.sender = sender
self.recipient = recipient
self.nodeIdentifier = nodeIdentifier
self.headers = headers
class ItemsEvent(PubSubEvent):
"""
A publish-subscribe event that signifies new, updated and retracted items.
@param items: List of received items as domish elements.
@type items: C{list} of L{domish.Element}
"""
def __init__(self, sender, recipient, nodeIdentifier, items, headers):
PubSubEvent.__init__(self, sender, recipient, nodeIdentifier, headers)
self.items = items
class DeleteEvent(PubSubEvent):
"""
A publish-subscribe event that signifies the deletion of a node.
"""
redirectURI = None
class PurgeEvent(PubSubEvent):
"""
A publish-subscribe event that signifies the purging of a node.
"""
class PubSubClient(XMPPHandler):
"""
Publish subscribe client protocol.
"""
implements(IPubSubClient)
def connectionInitialized(self):
self.xmlstream.addObserver('/message/event[@xmlns="%s"]' %
NS_PUBSUB_EVENT, self._onEvent)
def _onEvent(self, message):
try:
sender = jid.JID(message["from"])
recipient = jid.JID(message["to"])
except KeyError:
return
actionElement = None
for element in message.event.elements():
if element.uri == NS_PUBSUB_EVENT:
actionElement = element
if not actionElement:
return
eventHandler = getattr(self, "_onEvent_%s" % actionElement.name, None)
if eventHandler:
headers = shim.extractHeaders(message)
eventHandler(sender, recipient, actionElement, headers)
message.handled = True
def _onEvent_items(self, sender, recipient, action, headers):
nodeIdentifier = action["node"]
items = [element for element in action.elements()
if element.name in ('item', 'retract')]
event = ItemsEvent(sender, recipient, nodeIdentifier, items, headers)
self.itemsReceived(event)
def _onEvent_delete(self, sender, recipient, action, headers):
nodeIdentifier = action["node"]
event = DeleteEvent(sender, recipient, nodeIdentifier, headers)
if action.redirect:
event.redirectURI = action.redirect.getAttribute('uri')
self.deleteReceived(event)
def _onEvent_purge(self, sender, recipient, action, headers):
nodeIdentifier = action["node"]
event = PurgeEvent(sender, recipient, nodeIdentifier, headers)
self.purgeReceived(event)
def itemsReceived(self, event):
pass
def deleteReceived(self, event):
pass
def purgeReceived(self, event):
pass
def _addOptionsFromDict(self, request, conf):
form = data_form.Form(formType="submit",
formNamespace=NS_PUBSUB_NODE_CONFIG)
for k,v in conf.iteritems():
if getattr(v, '__iter__', False) and not isinstance(v, basestring):
form.addField(data_form.Field(fieldType='text-multi',
var=k, values=[str(x) for x in v]))
else:
form.addField(data_form.Field(var=k, value=str(v)))
request.options = form
def createNode(self, service, nodeIdentifier=None, sender=None, conf={}):
"""
Create a publish subscribe node.
@param service: The publish subscribe service to create the node at.
@type service: L{JID}
@param nodeIdentifier: Optional suggestion for the id of the node.
@type nodeIdentifier: C{unicode}
"""
request = PubSubRequest('create')
request.recipient = service
request.nodeIdentifier = nodeIdentifier
request.sender = sender
if conf:
self._addOptionsFromDict(request, conf)
def cb(iq):
try:
new_node = iq.pubsub.create["node"]
except AttributeError:
# the suggested node identifier was accepted
new_node = nodeIdentifier
return new_node
d = request.send(self.xmlstream)
d.addCallback(cb)
return d
def configureNode(self, service, nodeIdentifier, conf={}, sender=None):
"""
Apply a configuration to a node.
@param service: The pubsub service where the node exists
@type service: L{JID}
@param conf: form values to configure
@type conf: dict
@param nodeIdentifier: Identifier of the node to configure
@type nodeIdentifier: C{unicode}
@param sender: The entity from which the notification should be sent
@type sender: L{JID}
"""
request = PubSubRequest('configureSet')
request.recipient = service
request.nodeIdentifier = nodeIdentifier
request.sender = sender
self._addOptionsFromDict(request, conf)
return request.send(self.xmlstream)
def getNodeConfiguration(self, service, nodeIdentifier, sender=None):
"""
Apply a configuration to a node.
@param service: The pubsub service where the node exists
@type service: L{JID}
@param nodeIdentifier: Identifier of the node to configure
@type nodeIdentifier: C{unicode}
@param sender: The entity from which the notification should be sent
@type sender: L{JID}
"""
request = PubSubRequest('configureGet')
request.recipient = service
request.nodeIdentifier = nodeIdentifier
request.sender = sender
return request.send(self.xmlstream)
def deleteNode(self, service, nodeIdentifier, sender=None):
"""
Delete a publish subscribe node.
@param service: The publish subscribe service to delete the node from.
@type service: L{JID}
@param nodeIdentifier: The identifier of the node.
@type nodeIdentifier: C{unicode}
"""
request = PubSubRequest('delete')
request.recipient = service
request.nodeIdentifier = nodeIdentifier
request.sender = sender
return request.send(self.xmlstream)
def subscribe(self, service, nodeIdentifier, subscriber, sender=None):
"""
Subscribe to a publish subscribe node.
@param service: The publish subscribe service that keeps the node.
@type service: L{JID}
@param nodeIdentifier: The identifier of the node.
@type nodeIdentifier: C{unicode}
@param subscriber: The entity to subscribe to the node. This entity
will get notifications of new published items.
@type subscriber: L{JID}
"""
request = PubSubRequest('subscribe')
request.recipient = service
request.nodeIdentifier = nodeIdentifier
request.subscriber = subscriber
request.sender = sender
def cb(iq):
subscription = iq.pubsub.subscription["subscription"]
if subscription == 'pending':
raise SubscriptionPending
elif subscription == 'unconfigured':
raise SubscriptionUnconfigured
else:
# we assume subscription == 'subscribed'
# any other value would be invalid, but that should have
# yielded a stanza error.
return None
d = request.send(self.xmlstream)
d.addCallback(cb)
return d
def unsubscribe(self, service, nodeIdentifier, subscriber, sender=None):
"""
Unsubscribe from a publish subscribe node.
@param service: The publish subscribe service that keeps the node.
@type service: L{JID}
@param nodeIdentifier: The identifier of the node.
@type nodeIdentifier: C{unicode}
@param subscriber: The entity to unsubscribe from the node.
@type subscriber: L{JID}
"""
request = PubSubRequest('unsubscribe')
request.recipient = service
request.nodeIdentifier = nodeIdentifier
request.subscriber = subscriber
request.sender = sender
return request.send(self.xmlstream)
def publish(self, service, nodeIdentifier, items=None, sender=None):
"""
Publish to a publish subscribe node.
@param service: The publish subscribe service that keeps the node.
@type service: L{JID}
@param nodeIdentifier: The identifier of the node.
@type nodeIdentifier: C{unicode}
@param items: Optional list of L{Item}s to publish.
@type items: C{list}
"""
request = PubSubRequest('publish')
request.recipient = service
request.nodeIdentifier = nodeIdentifier
request.items = items
request.sender = sender
return request.send(self.xmlstream)
def items(self, service, nodeIdentifier, maxItems=None, sender=None):
"""
Retrieve previously published items from a publish subscribe node.
@param service: The publish subscribe service that keeps the node.
@type service: L{JID}
@param nodeIdentifier: The identifier of the node.
@type nodeIdentifier: C{unicode}
@param maxItems: Optional limit on the number of retrieved items.
@type maxItems: C{int}
"""
request = PubSubRequest('items')
request.recipient = service
request.nodeIdentifier = nodeIdentifier
if maxItems:
request.maxItems = str(int(maxItems))
request.sender = sender
def cb(iq):
items = []
for element in iq.pubsub.items.elements():
if element.uri == NS_PUBSUB and element.name == 'item':
items.append(element)
return items
d = request.send(self.xmlstream)
d.addCallback(cb)
return d
class PubSubService(XMPPHandler, IQHandlerMixin):
"""
Protocol implementation for a XMPP Publish Subscribe Service.
The word Service here is used as taken from the Publish Subscribe
specification. It is the party responsible for keeping nodes and their
subscriptions, and sending out notifications.
Methods from the L{IPubSubService} interface that are called as
a result of an XMPP request may raise exceptions. Alternatively the
deferred returned by these methods may have their errback called. These are
handled as follows:
- If the exception is an instance of L{error.StanzaError}, an error
response iq is returned.
- Any other exception is reported using L{log.msg}. An error response
with the condition C{internal-server-error} is returned.
The default implementation of said methods raises an L{Unsupported}
exception and are meant to be overridden.
@ivar discoIdentity: Service discovery identity as a dictionary with
keys C{'category'}, C{'type'} and C{'name'}.
@ivar pubSubFeatures: List of supported publish-subscribe features for
service discovery, as C{str}.
@type pubSubFeatures: C{list} or C{None}
"""
implements(IPubSubService)
iqHandlers = {
'/*': '_onPubSubRequest',
}
_legacyHandlers = {
'publish': ('publish', ['sender', 'recipient',
'nodeIdentifier', 'items']),
'subscribe': ('subscribe', ['sender', 'recipient',
'nodeIdentifier', 'subscriber']),
'unsubscribe': ('unsubscribe', ['sender', 'recipient',
'nodeIdentifier', 'subscriber']),
'subscriptions': ('subscriptions', ['sender', 'recipient']),
'affiliations': ('affiliations', ['sender', 'recipient']),
'create': ('create', ['sender', 'recipient', 'nodeIdentifier']),
'getConfigurationOptions': ('getConfigurationOptions', []),
'default': ('getDefaultConfiguration',
['sender', 'recipient', 'nodeType']),
'configureGet': ('getConfiguration', ['sender', 'recipient',
'nodeIdentifier']),
'configureSet': ('setConfiguration', ['sender', 'recipient',
'nodeIdentifier', 'options']),
'items': ('items', ['sender', 'recipient', 'nodeIdentifier',
'maxItems', 'itemIdentifiers']),
'retract': ('retract', ['sender', 'recipient', 'nodeIdentifier',
'itemIdentifiers']),
'purge': ('purge', ['sender', 'recipient', 'nodeIdentifier']),
'delete': ('delete', ['sender', 'recipient', 'nodeIdentifier']),
}
hideNodes = False
def __init__(self, resource=None):
self.resource = resource
self.discoIdentity = {'category': 'pubsub',
'type': 'generic',
'name': 'Generic Publish-Subscribe Service'}
self.pubSubFeatures = []
def connectionMade(self):
self.xmlstream.addObserver(PUBSUB_REQUEST, self.handleRequest)
def getDiscoInfo(self, requestor, target, nodeIdentifier):
def toInfo(nodeInfo, info):
if not nodeInfo:
return info
(nodeType, metaData) = nodeInfo['type'], nodeInfo['meta-data']
info.append(disco.DiscoIdentity('pubsub', nodeType))
if metaData:
form = data_form.Form(formType="result",
formNamespace=NS_PUBSUB_META_DATA)
form.addField(
data_form.Field(
var='pubsub#node_type',
value=nodeType,
label='The type of node (collection or leaf)'
)
)
for metaDatum in metaData:
form.addField(data_form.Field.fromDict(metaDatum))
info.append(form)
return info
info = []
request = PubSubRequest('discoInfo')
if self.resource is not None:
resource = self.resource.locateResource(request)
identity = resource.discoIdentity
features = resource.features
getInfo = resource.getInfo
else:
category, idType, name = self.discoIdentity
identity = disco.DiscoIdentity(category, idType, name)
features = self.pubSubFeatures
getInfo = self.getNodeInfo
if not nodeIdentifier:
info.append(identity)
info.append(disco.DiscoFeature(disco.NS_DISCO_ITEMS))
info.extend([disco.DiscoFeature("%s#%s" % (NS_PUBSUB, feature))
for feature in features])
d = getInfo(requestor, target, nodeIdentifier or '')
d.addCallback(toInfo, info)
d.addErrback(log.err)
return d
def getDiscoItems(self, requestor, target, nodeIdentifier):
if self.hideNodes:
d = defer.succeed([])
elif self.resource is not None:
request = PubSubRequest('discoInfo')
resource = self.resource.locateResource(request)
d = resource.getNodes(requestor, target, nodeIdentifier)
elif nodeIdentifier:
d = self.getNodes(requestor, target)
else:
d = defer.succeed([])
d.addCallback(lambda nodes: [disco.DiscoItem(target, node)
for node in nodes])
return d
def _onPubSubRequest(self, iq):
request = PubSubRequest.fromElement(iq)
if self.resource is not None:
resource = self.resource.locateResource(request)
else:
resource = self
# Preprocess the request, knowing the handling resource
try:
preProcessor = getattr(self, '_preProcess_%s' % request.verb)
except AttributeError:
pass
else:
request = preProcessor(resource, request)
if request is None:
return defer.succeed(None)
# Process the request itself,
if resource is not self:
try:
handler = getattr(resource, request.verb)
except AttributeError:
# fix lookup feature
text = "Request verb: %s" % request.verb
return defer.fail(Unsupported('', text))
d = handler(request)
else:
handlerName, argNames = self._legacyHandlers[request.verb]
handler = getattr(self, handlerName)
args = [getattr(request, arg) for arg in argNames]
d = handler(*args)
# If needed, translate the result into a response
try:
cb = getattr(self, '_toResponse_%s' % request.verb)
except AttributeError:
pass
else:
d.addCallback(cb, resource, request)
return d
def _toResponse_subscribe(self, result, resource, request):
response = domish.Element((NS_PUBSUB, "pubsub"))
subscription = response.addElement("subscription")
if result.nodeIdentifier:
subscription["node"] = result.nodeIdentifier
subscription["jid"] = result.subscriber.full()
subscription["subscription"] = result.state
return response
def _toResponse_subscriptions(self, result, resource, request):
response = domish.Element((NS_PUBSUB, 'pubsub'))
subscriptions = response.addElement('subscriptions')
for subscription in result:
item = subscriptions.addElement('subscription')
item['node'] = subscription.nodeIdentifier
item['jid'] = subscription.subscriber.full()
item['subscription'] = subscription.state
return response
def _toResponse_affiliations(self, result, resource, request):
response = domish.Element((NS_PUBSUB, 'pubsub'))
affiliations = response.addElement('affiliations')
for nodeIdentifier, affiliation in result:
item = affiliations.addElement('affiliation')
item['node'] = nodeIdentifier
item['affiliation'] = affiliation
return response
def _toResponse_create(self, result, resource, request):
if not request.nodeIdentifier or request.nodeIdentifier != result:
response = domish.Element((NS_PUBSUB, 'pubsub'))
create = response.addElement('create')
create['node'] = result
return response
else:
return None
def _makeFields(self, options, values):
fields = []
for name, value in values.iteritems():
if name not in options:
continue
option = {'var': name}
option.update(options[name])
if isinstance(value, list):
option['values'] = value
else:
option['value'] = value
fields.append(data_form.Field.fromDict(option))
return fields
def _formFromConfiguration(self, resource, values):
options = resource.getConfigurationOptions()
fields = self._makeFields(options, values)
form = data_form.Form(formType="form",
formNamespace=NS_PUBSUB_NODE_CONFIG,
fields=fields)
return form
def _checkConfiguration(self, resource, values):
options = resource.getConfigurationOptions()
processedValues = {}
for key, value in values.iteritems():
if key not in options:
continue
option = {'var': key}
option.update(options[key])
field = data_form.Field.fromDict(option)
if isinstance(value, list):
field.values = value
else:
field.value = value
field.typeCheck()
if isinstance(value, list):
processedValues[key] = field.values
else:
processedValues[key] = field.value
return processedValues
def _preProcess_default(self, resource, request):
if request.nodeType not in ('leaf', 'collection'):
raise error.StanzaError('not-acceptable')
else:
return request
def _toResponse_default(self, options, resource, request):
response = domish.Element((NS_PUBSUB_OWNER, "pubsub"))
default = response.addElement("default")
form = self._formFromConfiguration(resource, options)
default.addChild(form.toElement())
return response
def _toResponse_configureGet(self, options, resource, request):
response = domish.Element((NS_PUBSUB_OWNER, "pubsub"))
configure = response.addElement("configure")
form = self._formFromConfiguration(resource, options)
configure.addChild(form.toElement())
if request.nodeIdentifier:
configure["node"] = request.nodeIdentifier
return response
def _preProcess_configureSet(self, resource, request):
if request.options:
request.options = self._checkConfiguration(resource,
request.options)
return request
else:
return None
def _toResponse_items(self, result, resource, request):
response = domish.Element((NS_PUBSUB, 'pubsub'))
items = response.addElement('items')
items["node"] = request.nodeIdentifier
for item in result:
items.addChild(item)
return response
def _createNotification(self, eventType, service, nodeIdentifier,
subscriber, subscriptions=None):
headers = []
if subscriptions:
for subscription in subscriptions:
if nodeIdentifier != subscription.nodeIdentifier:
headers.append(('Collection', subscription.nodeIdentifier))
message = domish.Element((None, "message"))
message["from"] = service.full()
message["to"] = subscriber.full()
event = message.addElement((NS_PUBSUB_EVENT, "event"))
element = event.addElement(eventType)
element["node"] = nodeIdentifier
if headers:
message.addChild(shim.Headers(headers))
return message
# public methods
def notifyPublish(self, service, nodeIdentifier, notifications):
for subscriber, subscriptions, items in notifications:
message = self._createNotification('items', service,
nodeIdentifier, subscriber,
subscriptions)
message.event.items.children = items
self.send(message)
def notifyDelete(self, service, nodeIdentifier, subscribers,
redirectURI=None):
for subscriber in subscribers:
message = self._createNotification('delete', service,
nodeIdentifier,
subscriber)
if redirectURI:
redirect = message.event.delete.addElement('redirect')
redirect['uri'] = redirectURI
self.send(message)
def getNodeInfo(self, requestor, service, nodeIdentifier):
return None
def getNodes(self, requestor, service):
return []
def publish(self, requestor, service, nodeIdentifier, items):
raise Unsupported('publish')
def subscribe(self, requestor, service, nodeIdentifier, subscriber):
raise Unsupported('subscribe')
def unsubscribe(self, requestor, service, nodeIdentifier, subscriber):
raise Unsupported('subscribe')
def subscriptions(self, requestor, service):
raise Unsupported('retrieve-subscriptions')
def affiliations(self, requestor, service):
raise Unsupported('retrieve-affiliations')
def create(self, requestor, service, nodeIdentifier):
raise Unsupported('create-nodes')
def getConfigurationOptions(self):
return {}
def getDefaultConfiguration(self, requestor, service, nodeType):
raise Unsupported('retrieve-default')
def getConfiguration(self, requestor, service, nodeIdentifier):
raise Unsupported('config-node')
def setConfiguration(self, requestor, service, nodeIdentifier, options):
raise Unsupported('config-node')
def items(self, requestor, service, nodeIdentifier, maxItems,
itemIdentifiers):
raise Unsupported('retrieve-items')
def retract(self, requestor, service, nodeIdentifier, itemIdentifiers):
raise Unsupported('retract-items')
def purge(self, requestor, service, nodeIdentifier):
raise Unsupported('purge-nodes')
def delete(self, requestor, service, nodeIdentifier):
raise Unsupported('delete-nodes')
class PubSubResource(object):
implements(IPubSubResource)
features = []
discoIdentity = disco.DiscoIdentity('pubsub',
'service',
'Publish-Subscribe Service')
def locateResource(self, request):
return self
def getInfo(self, requestor, service, nodeIdentifier):
return defer.succeed(None)
def getNodes(self, requestor, service, nodeIdentifier):
return defer.succeed([])
def getConfigurationOptions(self):
return {}
def publish(self, request):
return defer.fail(Unsupported('publish'))
def subscribe(self, request):
return defer.fail(Unsupported('subscribe'))
def unsubscribe(self, request):
return defer.fail(Unsupported('subscribe'))
def subscriptions(self, request):
return defer.fail(Unsupported('retrieve-subscriptions'))
def affiliations(self, request):
return defer.fail(Unsupported('retrieve-affiliations'))
def create(self, request):
return defer.fail(Unsupported('create-nodes'))
def default(self, request):
return defer.fail(Unsupported('retrieve-default'))
def configureGet(self, request):
return defer.fail(Unsupported('config-node'))
def configureSet(self, request):
return defer.fail(Unsupported('config-node'))
def items(self, request):
return defer.fail(Unsupported('retrieve-items'))
def retract(self, request):
return defer.fail(Unsupported('retract-items'))
def purge(self, request):
return defer.fail(Unsupported('purge-nodes'))
def delete(self, request):
return defer.fail(Unsupported('delete-nodes'))
def affiliationsGet(self, request):
return defer.fail(Unsupported('modify-affiliations'))
def affiliationsSet(self, request):
return defer.fail(Unsupported('modify-affiliations'))
def subscriptionsGet(self, request):
return defer.fail(Unsupported('manage-subscriptions'))
def subscriptionsSet(self, request):
return defer.fail(Unsupported('manage-subscriptions'))
|
{
"content_hash": "d5954b749c5ab5edf22f80ae205ababe",
"timestamp": "",
"source": "github",
"line_count": 1376,
"max_line_length": 81,
"avg_line_length": 32.22892441860465,
"alnum_prop": 0.5956208988206643,
"repo_name": "dustin/wokkel",
"id": "ed60238bbccb625ae752facbd81254ce585d1604",
"size": "44466",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wokkel/pubsub.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "428955"
}
],
"symlink_target": ""
}
|
import logging
import json
from django.conf import settings
import traceback
from datetime import datetime
from django.template.loader import render_to_string
from django.db.models.aggregates import Sum
from django.contrib import messages
from django.contrib.auth.decorators import login_required, permission_required
from django.core.mail import send_mail
from django.db import transaction
from django.forms import ChoiceField, ModelForm, Form, EmailField, BooleanField
from django.forms import ModelChoiceField, CharField, Textarea, HiddenInput, FileField
from django.forms.models import model_to_dict
from django.http import HttpResponse, HttpResponseForbidden, HttpResponseServerError
from django.shortcuts import get_object_or_404, redirect, render
from django.utils.translation import ugettext_lazy as _
from django.views.generic.list import ListView
from services.models import Alias, Service, ServiceType
from membership.templatetags.sorturl import lookup_sort
from membership.decorators import trusted_host_required
from membership.forms import PersonApplicationForm, OrganizationApplicationForm, PersonContactForm, ServiceForm, \
ContactForm
from membership.utils import log_change, serializable_membership_info, admtool_membership_details, \
get_client_ip, bake_log_entries
from membership.public_memberlist import public_memberlist_data
from membership.unpaid_members import unpaid_members_data, members_to_lock
from membership.billing.payments import process_op_csv, process_procountor_csv
from membership.models import Contact, Membership, MEMBER_TYPES_DICT, Bill, BillingCycle, Payment, ApplicationPoll, \
MembershipAlreadyStatus
from services.views import check_alias_availability, validate_alias
logger = logging.getLogger("membership.views")
ENTRIES_PER_PAGE = settings.ENTRIES_PER_PAGE
# Class based views
class SortListView(ListView):
"""ListView with search query parameter"""
search_query = ''
sort = None
header = ''
disable_duplicates_header = ''
def get_context_data(self, **kwargs):
context = super(SortListView, self).get_context_data(**kwargs)
context['querystring'] = self.request.GET
context['header'] = self.header
context['disable_duplicates_header'] = self.disable_duplicates_header
return context
def get_queryset(self):
qs = super(SortListView, self).get_queryset()
ordering = lookup_sort(self.request.GET.get('sort'))
if ordering is not None:
return qs.order_by(ordering)
else:
return qs
# Public access
def new_application(request, template_name='membership/choose_membership_type.html'):
return render(request, template_name, {})
# Public access
def person_application(request, template_name='membership/new_person_application.html'):
if settings.MAINTENANCE_MESSAGE is not None:
return redirect('frontpage')
chosen_email_forward = None
if request.method != 'POST':
application_form = PersonApplicationForm()
elif request.method == 'POST':
application_form = PersonApplicationForm(request.POST)
if not application_form.is_valid():
try:
chosen_email_forward = application_form.fields['email_forward'].clean(application_form.data['email_forward'])
except:
pass
else:
f = application_form.cleaned_data
with transaction.atomic():
# Separate a contact dict from the other fields
contact_dict = {}
for k, v in list(f.items()):
if k not in ['nationality', 'municipality',
'public_memberlist', 'email_forward',
'unix_login', 'extra_info',
'mysql_database', 'postgresql_database',
'login_vhost', 'poll', 'poll_other',
'birth_year']:
contact_dict[k] = v
person = Contact(**contact_dict)
person.save()
if (datetime.now().year - int(f['birth_year'])) < 21:
membership_type = 'J'
else:
membership_type = 'P'
membership = Membership(type=membership_type, status='N',
person=person,
nationality=f['nationality'],
municipality=f['municipality'],
public_memberlist=f['public_memberlist'],
birth_year=f['birth_year'],
extra_info=f['extra_info'])
membership.save()
# Service handling
services = []
login_alias = Alias(owner=membership, name=f['unix_login'], account=True)
login_alias.save()
unix_account_service = Service(servicetype=ServiceType.objects.get(servicetype='UNIX account'),
alias=login_alias, owner=membership, data=f['unix_login'])
unix_account_service.save()
services.append(unix_account_service)
if f['email_forward'] != 'no' and f['email_forward'] != f['unix_login']:
forward_alias = Alias(owner=membership, name=f['email_forward'])
forward_alias.save()
forward_alias_service = Service(servicetype=ServiceType.objects.get(servicetype='Email alias'),
alias=forward_alias, owner=membership, data=f['unix_login'])
forward_alias_service.save()
services.append(forward_alias_service)
if f['mysql_database'] == True:
mysql_service = Service(servicetype=ServiceType.objects.get(servicetype='MySQL database'),
alias=login_alias, owner=membership, data=f['unix_login'].replace('-', '_'))
mysql_service.save()
services.append(mysql_service)
if f['postgresql_database'] == True:
postgresql_service = Service(servicetype=ServiceType.objects.get(servicetype='PostgreSQL database'),
alias=login_alias, owner=membership, data=f['unix_login'])
postgresql_service.save()
services.append(postgresql_service)
if f['login_vhost'] == True:
login_vhost_service = Service(servicetype=ServiceType.objects.get(servicetype='WWW vhost'),
alias=login_alias, owner=membership, data=f['unix_login'])
login_vhost_service.save()
services.append(login_vhost_service)
logger.debug("Attempting to save with the following services: %s." % ", ".join((str(service) for service in services)))
# End of services
if f['poll'] is not None:
answer = f['poll']
if answer == 'other':
answer = '%s: %s' % (answer, f['poll_other'])
pollanswer = ApplicationPoll(membership=membership,
answer=answer)
pollanswer.save()
logger.info("New application {person} from {ip}:.".format(person=person, ip=get_client_ip(request)))
send_mail(_('Membership application received'),
render_to_string('membership/application_confirmation.txt',
{ 'membership': membership,
'membership_type': MEMBER_TYPES_DICT[membership.type],
'person': membership.person,
'billing_contact': membership.billing_contact,
'tech_contact': membership.tech_contact,
'ip': get_client_ip(request),
'services': services}),
settings.FROM_EMAIL,
[membership.email_to()], fail_silently=False)
return redirect('new_person_application_success')
return render(request, template_name, {
"form": application_form,
"chosen_email_forward": chosen_email_forward,
"title": _("Person member application")})
# Public access
def organization_application(request, template_name='membership/new_organization_application.html'):
if settings.MAINTENANCE_MESSAGE is not None:
return redirect('frontpage')
if request.method == 'POST':
form = OrganizationApplicationForm(request.POST)
if form.is_valid():
f = form.cleaned_data
d = {}
for k, v in list(f.items()):
if k not in ['nationality', 'municipality', 'extra_info',
'public_memberlist', 'organization_registration_number']:
d[k] = v
organization = Contact(**d)
membership = Membership(type='O', status='N',
nationality=f['nationality'],
municipality=f['municipality'],
extra_info=f['extra_info'],
organization_registration_number=f['organization_registration_number'],
public_memberlist=f['public_memberlist'])
request.session.set_expiry(0) # make this expire when the browser exits
request.session['membership'] = model_to_dict(membership)
organization_dict = model_to_dict(organization)
request.session['organization'] = organization_dict
return redirect('organization_application_add_contact', 'billing_contact')
else:
form = OrganizationApplicationForm()
return render(request, template_name,
{"form": form, "title": _('Organization application')})
# Public access
def organization_application_add_contact(request, contact_type,
template_name='membership/new_organization_application_add_contact.html'):
forms = ['billing_contact', 'tech_contact']
if contact_type not in forms:
return HttpResponseForbidden("Access denied")
if contact_type == 'billing_contact':
type_text = _('Billing contact')
elif contact_type == 'tech_contact':
type_text = _('Technical contact')
if request.method == 'POST':
form = PersonContactForm(request.POST)
if (form.is_valid() or # contact is actually filled
len(form.changed_data) == 0 or # form is empty
form.changed_data == ['country']): # only the country field is filled (this comes from form defaults)
if form.is_valid():
f = form.cleaned_data
contact = Contact(**f)
contact_dict = model_to_dict(contact)
request.session[contact_type] = contact_dict
else:
request.session[contact_type] = None
next_idx = forms.index(contact_type) + 1
if next_idx == len(forms):
return redirect('organization_application_services')
return redirect('organization_application_add_contact', forms[next_idx])
else:
if contact_type in request.session:
form = PersonContactForm(request.session[contact_type])
else:
form = PersonContactForm()
return render(request, template_name, {"form": form, "contact_type": type_text,
"step_number": forms.index(contact_type) + 2,
"title": '{title} - {part}'.format(title=_('Organization application'),
part=type_text)})
# Public access
def organization_application_services(request, template_name='membership/new_organization_application_services.html'):
if 'services' in request.session:
form = ServiceForm({'mysql_database': request.session.get('mysql', ''),
'postgresql_database': request.session.get('postgresql', ''),
'login_vhost': request.session.get('login_vhost', ''),
'unix_login': request.session.get('unix_login', '')})
else:
form = ServiceForm()
if request.method == 'POST':
form = ServiceForm(request.POST)
if form.is_valid():
f = form.cleaned_data
services = {'unix_login': f['unix_login']}
if f['mysql_database'] != False:
services['mysql_database'] = f['unix_login']
elif 'mysql_database' in services:
del services['mysql_database']
if f['postgresql_database'] != False:
services['postgresql_database'] = f['unix_login']
elif 'postgresql' in services:
del services['postgresql']
if f['login_vhost'] != False:
services['login_vhost'] = f['unix_login']
elif 'login_vhost' in services:
del services['login_vhost']
request.session['services'] = services
return redirect('organization_application_review')
else:
if 'services' in request.session:
del request.session['services']
return render(request, template_name,
{"form": form, "title": '{title} - {part}'.format(title=_('Organization application'),
part=_('Choose services'))})
# Public access
def organization_application_review(request, template_name='membership/new_organization_application_review.html'):
# Maybe submitting form again after already submitting?
if request.session.get('membership') is None:
messages.error(request, _("Required data missing. Maybe attempted to submit application twice?"))
return redirect('organization_application')
forms = []
combo_dict = request.session['membership']
for k, v in list(request.session['organization'].items()):
combo_dict[k] = v
forms.append(OrganizationApplicationForm(combo_dict))
if request.session.get('billing_contact') is not None:
forms.append(PersonContactForm(request.session['billing_contact']))
forms[-1].name = _("Billing contact")
if request.session.get('tech_contact') is not None:
forms.append(PersonContactForm(request.session['tech_contact']))
forms[-1].name = _("Technical contact")
return render(request, template_name,
{"forms": forms, "services": request.session['services'],
"title": '{title} - {part}'.format(title=_('Organization application'), part=_('Review'))})
# Public access
def organization_application_save(request):
# Maybe submitting form again after already submitting?
if request.session.get('membership') is None:
messages.error(request, _("Required data missing. Maybe attempted to submit application twice?"))
return redirect('organization_application')
with transaction.atomic():
membership = Membership(type='O', status='N',
nationality=request.session['membership']['nationality'],
municipality=request.session['membership']['municipality'],
extra_info=request.session['membership']['extra_info'],
organization_registration_number=request.session['membership']['organization_registration_number'])
organization = Contact(**request.session['organization'])
organization.save()
membership.organization = organization
if request.session.get('billing_contact') is not None:
billing_contact = Contact(**request.session['billing_contact'])
billing_contact.save()
membership.billing_contact = billing_contact
if request.session.get('tech_contact') is not None:
tech_contact = Contact(**request.session['tech_contact'])
tech_contact.save()
membership.tech_contact = tech_contact
membership.save()
services = []
services_request = request.session['services']
login_alias = Alias(owner=membership, name=services_request['unix_login'], account=True)
login_alias.save()
unix_account_service = Service(servicetype=ServiceType.objects.get(servicetype='UNIX account'),
alias=login_alias, owner=membership, data=services_request['unix_login'])
unix_account_service.save()
services.append(unix_account_service)
if 'mysql_database' in services_request:
mysql_service = Service(servicetype=ServiceType.objects.get(servicetype='MySQL database'),
alias=login_alias, owner=membership,
data=services_request['mysql_database'].replace('-', '_'))
mysql_service.save()
services.append(mysql_service)
if 'postgresql_database' in services_request:
postgresql_service = Service(servicetype=ServiceType.objects.get(servicetype='PostgreSQL database'),
alias=login_alias, owner=membership,
data=services_request['postgresql_database'])
postgresql_service.save()
services.append(postgresql_service)
if 'login_vhost' in services_request:
login_vhost_service = Service(servicetype=ServiceType.objects.get(servicetype='WWW vhost'),
alias=login_alias, owner=membership,
data=services_request['login_vhost'])
login_vhost_service.save()
services.append(login_vhost_service)
send_mail(_('Membership application received'),
render_to_string('membership/application_confirmation.txt',
{ 'membership': membership,
'membership_type': MEMBER_TYPES_DICT[membership.type],
'organization': membership.organization,
'billing_contact': membership.billing_contact,
'tech_contact': membership.tech_contact,
'ip': get_client_ip(request),
'services': services}),
settings.FROM_EMAIL,
[membership.email_to()], fail_silently=False)
logger.info("New application {organization} from {ip}:.".format(organization=organization, ip=get_client_ip(request)))
request.session.set_expiry(0) # make this expire when the browser exits
for i in ['membership', 'billing_contact', 'tech_contact', 'services']:
if i in request.session:
del request.session[i]
return redirect('new_organization_application_success')
@permission_required('membership.manage_members')
def contact_add(request, contact_type, memberid, template_name='membership/entity_edit.html'):
membership = get_object_or_404(Membership, id=memberid)
forms = ['billing_contact', 'tech_contact']
if contact_type not in forms:
return HttpResponseForbidden("Access denied")
if contact_type == 'billing_contact' and membership.billing_contact:
return redirect('contact_edit',membership.billing_contact.id)
elif contact_type == 'tech_contact' and membership.tech_contact:
return redirect('contact_edit',membership.tech_contact.id)
if request.method == 'POST':
form = ContactForm(request.POST)
if form.is_valid():
contact = Contact(**form.cleaned_data)
contact.save()
if contact_type == 'billing_contact':
membership.billing_contact = contact
elif contact_type == 'tech_contact':
membership.tech_contact = contact
membership.save()
messages.success(request,
_("Added contact %s.") % contact)
return redirect('contact_edit', contact.id)
else:
messages.error(request,
_("New contact not saved."))
else:
form = ContactForm()
return render(request, template_name, {"form": form, 'memberid': memberid})
@permission_required('membership.read_members')
def contact_edit(request, id, template_name='membership/entity_edit.html'):
contact = get_object_or_404(Contact, id=id)
before = model_to_dict(contact) # Otherwise save() (or valid?) will change the dict, needs to be here
if request.method == 'POST':
if not request.user.has_perm('membership.manage_members'):
messages.error(request, _("You are not authorized to modify memberships."))
return redirect('contact_edit', id)
form = ContactForm(request.POST, instance=contact)
if form.is_valid():
form.save()
after = model_to_dict(contact)
log_change(contact, request.user, before, after)
messages.success(request, _("Changes to contact %s saved.") % contact)
return redirect('contact_edit', id) # form stays as POST otherwise if someone refreshes
else:
messages.error(request, _("Changes to contact %s not saved.") % contact)
else:
form = ContactForm(instance=contact)
logentries = bake_log_entries(contact.logs.all())
return render(request, template_name, {'form': form, 'contact': contact,
'logentries': logentries, 'memberid': contact.find_memberid()})
@permission_required('membership.manage_bills')
def bill_edit(request, id, template_name='membership/entity_edit.html'):
bill = get_object_or_404(Bill, id=id)
class Form(ModelForm):
class Meta:
model = Bill
exclude = ('billingcycle', 'reminder_count', 'pdf_file')
def __init__(self, *args, **kwargs):
super(Form, self).__init__(*args, **kwargs)
instance = getattr(self, 'instance', None)
if instance and instance.pk:
self.fields['type'].widget.attrs['readonly'] = True
def clean_type(self):
instance = getattr(self, 'instance', None)
if instance and instance.pk:
return instance.type
else:
return self.cleaned_data['type']
before = model_to_dict(bill) # Otherwise save() (or valid?) will change the dict, needs to be here
if request.method == 'POST':
form = Form(request.POST, instance=bill)
if form.is_valid():
form.save()
after = model_to_dict(bill)
log_change(bill, request.user, before, after)
messages.success(request, _("Changes to bill %s saved.") % bill)
return redirect('bill_edit', id) # form stays as POST otherwise if someone refreshes
else:
messages.error(request, _("Changes to bill %s not saved.") % bill)
else:
form = Form(instance=bill)
logentries = bake_log_entries(bill.logs.all())
return render(request, template_name, {'form': form, 'bill': bill,
'logentries': logentries,'memberid': bill.billingcycle.membership.id})
@permission_required('membership.read_bills')
def bill_pdf(request, bill_id):
output_messages = []
bill = get_object_or_404(Bill, id=bill_id)
try:
pdf = bill.generate_pdf()
if pdf:
response = HttpResponse(pdf, content_type='application/pdf')
response['Content-Disposition'] = 'inline; filename=bill_%s.pdf' % bill.id
return response
except Exception as e:
logger.exception("Failed to generate pdf for bill %s" % bill.id)
response = HttpResponseServerError("Failed to generate pdf", content_type='plain/text', )
return response
@permission_required('membership.manage_bills')
def billingcycle_connect_payment(request, id, template_name='membership/billingcycle_connect_payment.html'):
billingcycle = get_object_or_404(BillingCycle, id=id)
class SpeciallyLabeledModelChoiceField(ModelChoiceField):
def label_from_instance(self, obj):
return "%s, %s, %s, %s" % (obj.payer_name, obj.reference_number, obj.amount, obj.payment_day)
class PaymentForm(Form):
qs = Payment.objects.filter(billingcycle__exact=None, ignore=False).order_by("payer_name")
payment = SpeciallyLabeledModelChoiceField(queryset=qs,
empty_label=_("None chosen"), required=True)
if request.method == 'POST':
form = PaymentForm(request.POST)
if form.is_valid():
f = form.cleaned_data
payment = f['payment']
before = model_to_dict(payment)
oldcycle = payment.billingcycle
if oldcycle:
oldcycle_before = model_to_dict(oldcycle)
payment.detach_from_cycle(user=request.user)
oldcycle_after = model_to_dict(oldcycle)
log_change(oldcycle, request.user, oldcycle_before, oldcycle_after)
newcycle = billingcycle
newcycle_before = model_to_dict(newcycle)
payment.attach_to_cycle(newcycle)
newcycle_after = model_to_dict(newcycle)
after = model_to_dict(payment)
log_change(payment, request.user, before, after)
log_change(newcycle, request.user, newcycle_before, newcycle_after)
messages.success(request, _("Changes to payment %s saved.") % payment)
return redirect('billingcycle_edit', id)
else:
messages.error(request, _("Changes to BillingCycle %s not saved.") % billingcycle)
else:
form = PaymentForm()
logentries = bake_log_entries(billingcycle.logs.all())
return render(request, template_name,
{'form': form, 'cycle': billingcycle, 'logentries': logentries})
@permission_required('membership.can_import_payments')
def import_payments(request, template_name='membership/import_payments.html'):
import_messages = []
class PaymentCSVForm(Form):
csv = FileField(label=_('CSV File'),
help_text=_('Choose CSV file to upload'))
format = ChoiceField(choices=(('op', 'Osuuspankki'), ('procountor', 'Procountor')),
help_text=_("File type"))
if request.method == 'POST':
form = PaymentCSVForm(request.POST, request.FILES)
if form.is_valid():
try:
in_memory_file = request.FILES['csv']
logger.info("Beginning payment import.")
if form.cleaned_data['format'] == 'op':
import_messages = process_op_csv(in_memory_file, user=request.user)
elif form.cleaned_data['format'] == 'procountor':
import_messages = process_procountor_csv(in_memory_file, user=request.user)
messages.success(request, _("Payment import succeeded!"))
except:
logger.error("%s" % traceback.format_exc())
logger.error("Payment CSV import failed.")
messages.error(request, _("Payment import failed."))
else:
messages.error(request, _("Payment import failed."))
else:
form = PaymentCSVForm()
return render(request, template_name,
{'title': _("Import payments"),
'form': form,
'import_messages': import_messages})
@permission_required('membership.read_bills')
def print_reminders(request, **kwargs):
output_messages = []
if request.method == 'POST':
try:
if 'marksent' in request.POST:
for billing_cycle in BillingCycle.get_reminder_billingcycles().all():
bill = Bill(billingcycle=billing_cycle, type='P')
bill.reminder_count = billing_cycle.bill_set.count()
bill.save()
bill.generate_pdf()
output_messages.append(_('Reminders marked as sent'))
else:
pdf = BillingCycle.get_pdf_reminders()
if pdf:
response = HttpResponse(pdf, content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename=reminders.pdf'
return response
else:
output_messages.append(_('Error processing PDF'))
except RuntimeError:
output_messages.append(_('Error processing PDF'))
except IOError:
output_messages.append(_('Cannot open PDF file'))
return render(request, 'membership/print_reminders.html',
{'title': _("Print paper reminders"),
'output_messages': output_messages,
'count': BillingCycle.get_reminder_billingcycles().count()})
@permission_required('membership.manage_bills')
def billingcycle_edit(request, id, template_name='membership/entity_edit.html'):
cycle = get_object_or_404(BillingCycle, id=id)
class Form(ModelForm):
is_paid_forced = False
class Meta:
model = BillingCycle
exclude = ('membership', 'start', 'end', 'sum', 'reference_number')
def disable_fields(self):
self.fields['is_paid'].required = False
if cycle.amount_paid() >= cycle.sum and cycle.is_paid:
self.fields['is_paid'].widget.attrs['readonly'] = 'readonly'
self.is_paid_forced = True
def clean_is_paid(self):
if self.is_paid_forced:
return cycle.is_paid
else:
return self.cleaned_data['is_paid']
before = model_to_dict(cycle) # Otherwise save() (or valid?) will change the dict, needs to be here
if request.method == 'POST':
form = Form(request.POST, instance=cycle)
form.disable_fields()
if form.is_valid():
form.save()
after = model_to_dict(cycle)
log_change(cycle, request.user, before, after)
messages.success(request, _("Changes to billing cycle %s saved.") % cycle)
return redirect('billingcycle_edit', id) # form stays as POST otherwise if someone refreshes
else:
messages.error(request, _("Changes to bill %s not saved.") % cycle)
else:
form = Form(instance=cycle)
form.disable_fields()
logentries = bake_log_entries(cycle.logs.all())
return render(request, template_name,
{'form': form, 'cycle': cycle,
'logentries': logentries,
'memberid': cycle.membership.id})
@permission_required('membership.manage_bills')
def payment_edit(request, id, template_name='membership/entity_edit.html'):
payment = get_object_or_404(Payment, id=id)
class SpeciallyLabeledModelChoiceField(ModelChoiceField):
def label_from_instance(self, obj):
return "%s, %s" % (obj.membership, str(obj))
class Form(ModelForm):
class Meta:
model = Payment
fields = '__all__'
billingcycle = CharField(widget=HiddenInput(), required=False)
message = CharField(widget=Textarea(attrs={'rows': 5, 'cols': 60}))
def disable_fields(self):
if payment.billingcycle:
self.fields['ignore'].required = False
self.fields['ignore'].widget.attrs['readonly'] = 'readonly'
self.fields['billingcycle'].required = False
self.fields['billingcycle'].widget.attrs['readonly'] = 'readonly'
self.fields['reference_number'].required = False
self.fields['reference_number'].widget.attrs['readonly'] = 'readonly'
self.fields['message'].required = False
self.fields['message'].widget.attrs['readonly'] = 'readonly'
self.fields['transaction_id'].required = False
self.fields['transaction_id'].widget.attrs['readonly'] = 'readonly'
self.fields['payment_day'].required = False
self.fields['payment_day'].widget.attrs['readonly'] = 'readonly'
self.fields['amount'].required = False
self.fields['amount'].widget.attrs['readonly'] = 'readonly'
self.fields['type'].required = False
self.fields['type'].widget.attrs['readonly'] = 'readonly'
self.fields['payer_name'].required = False
self.fields['payer_name'].widget.attrs['readonly'] = 'readonly'
self.fields['comment'].required = False
def clean_ignore(self):
if payment.billingcycle:
return False
else:
return self.cleaned_data['ignore']
def clean_billingcycle(self):
return payment.billingcycle
def clean_reference_number(self):
return payment.reference_number
def clean_message(self):
return payment.message
def clean_transaction_id(self):
return payment.transaction_id
def clean_payment_day(self):
return payment.payment_day
def clean_amount(self):
return payment.amount
def clean_type(self):
return payment.type
def clean_payer_name(self):
return payment.payer_name
before = model_to_dict(payment) # Otherwise save() (or valid?) will change the dict, needs to be here
oldcycle = payment.billingcycle
if request.method == 'POST':
form = Form(request.POST, instance=payment)
form.disable_fields()
if form.is_valid():
form.save()
newcycle = payment.billingcycle
if oldcycle != newcycle:
if oldcycle:
oldcycle.update_is_paid()
if newcycle:
newcycle.update_is_paid()
after = model_to_dict(payment)
log_change(payment, request.user, before, after)
messages.success(request, _("Changes to payment %s saved.") % payment)
return redirect('payment_edit', id) # form stays as POST otherwise if someone refreshes
else:
messages.error(request, _("Changes to payment %s not saved.") % payment)
return redirect('payment_edit', id) # form clears otherwise, this is a borderline acceptable hack
else:
form = Form(instance=payment)
form.disable_fields()
logentries = bake_log_entries(payment.logs.all())
if payment.billingcycle:
memberid = payment.billingcycle.membership.id
else:
memberid = None
return render(request, template_name, {'form': form, 'payment': payment,
'logentries': logentries, 'memberid': memberid})
@permission_required('membership.manage_bills')
def send_duplicate_notification(request, payment, **kwargs):
payment = get_object_or_404(Payment, id=payment)
payment.send_duplicate_payment_notice(request.user)
return redirect('payment_edit', payment.id)
@permission_required('membership.read_members')
def membership_edit(request, id, template_name='membership/membership_edit.html'):
membership = get_object_or_404(Membership, id=id)
class Form(ModelForm):
class Meta:
model = Membership
exclude = ('person', 'billing_contact', 'tech_contact', 'organization')
def clean_status(self):
return membership.status
def clean_approved(self):
return membership.approved
def disable_fields(self):
self.fields['status'].required = False
self.fields['status'].widget.attrs['disabled'] = 'disabled'
self.fields['status'].widget.attrs['readonly'] = 'readonly'
self.fields['approved'].required = False
self.fields['approved'].widget.attrs['readonly'] = 'readonly'
instance = getattr(self, 'instance', None)
if instance and instance.type == 'O':
self.fields["birth_year"].widget = HiddenInput()
self.fields['birth_year'].required = False
if request.method == 'POST':
if not request.user.has_perm('membership.manage_members'):
return HttpResponseForbidden(_("Permission manage required"))
form = Form(request.POST, instance=membership)
before = model_to_dict(membership)
form.disable_fields()
if form.is_valid():
form.save()
after = model_to_dict(membership)
log_change(membership, request.user, before, after)
return redirect('membership_edit', id) # form stays as POST otherwise if someone refreshes
else:
form = Form(instance=membership)
form.disable_fields()
# Pretty print log entries for template
logentries = bake_log_entries(membership.logs.all())
return render(request, template_name,
{'form': form, 'membership': membership, 'logentries': logentries})
@permission_required('membership.read_members')
def membership_duplicates(request, id):
membership = get_object_or_404(Membership, id=id)
view_params = {'queryset': membership.duplicates(),
'template_name': 'membership/membership_list.html',
'context_object_name': 'member_list',
'header': _("List duplicates for member #%(mid)i %(membership)s" % {"mid":membership.id,
"membership":str(membership)}),
'disable_duplicates_header': True,
'paginate_by': ENTRIES_PER_PAGE}
return member_object_list(request, **view_params)
@permission_required('membership.read_members')
def unpaid_paper_reminded(request):
view_params = {'queryset': Membership.paper_reminder_sent_unpaid_after(),
'template_name': 'membership/membership_list.html',
'context_object_name': 'member_list',
'paginate_by': ENTRIES_PER_PAGE
}
return member_object_list(request, **view_params)
@permission_required('membership.read_members')
def unpaid_paper_reminded_plain(request):
view_params = {'queryset': Membership.paper_reminder_sent_unpaid_after().order_by('id'),
'template_name': 'membership/membership_list_plaintext.html',
'context_object_name': 'member_list'
}
return member_object_list(request, **view_params)
@permission_required('membership.delete_members')
def membership_delete(request, id, template_name='membership/membership_delete.html'):
membership = get_object_or_404(Membership, id=id)
class ConfirmForm(Form):
confirm = BooleanField(label=_('To confirm deletion, you must check this box:'),
required=True)
if request.method == 'POST':
form = ConfirmForm(request.POST)
if form.is_valid():
f = form.cleaned_data
membership_str = str(membership)
membership.delete_membership(request.user)
messages.success(request, _('Member %s successfully deleted.') % membership_str)
logger.info("User %s deleted member %s." % (request.user.username, membership))
return redirect('membership_edit', membership.id)
else:
form = ConfirmForm()
return render(request, template_name, {'form': form, 'membership': membership})
@permission_required('membership.dissociate_members')
def membership_dissociate(request, id, template_name='membership/membership_dissociate.html'):
membership = get_object_or_404(Membership, id=id)
class ConfirmForm(Form):
confirm = BooleanField(label=_('To confirm dissociation, you must check this box:'),
required=True)
if request.method == 'POST':
form = ConfirmForm(request.POST)
if form.is_valid():
f = form.cleaned_data
membership_str = str(membership)
membership.dissociate(request.user)
messages.success(request, _('Member %s successfully dissociated.') % membership_str)
logger.info("User %s dissociated member %s." % (request.user.username, membership))
return redirect('membership_edit', membership.id)
else:
form = ConfirmForm()
return render(request, template_name, {'form': form, 'membership': membership})
@permission_required('membership.request_dissociation_for_member')
def membership_request_dissociation(request, id, template_name='membership/membership_request_dissociation.html'):
membership = get_object_or_404(Membership, id=id)
class ConfirmForm(Form):
confirm = BooleanField(label=_('To confirm state change, you must check this box:'),
required=True)
if request.method == 'POST':
form = ConfirmForm(request.POST)
if form.is_valid():
f = form.cleaned_data
membership_str = str(membership)
membership.request_dissociation(request.user)
messages.success(request, _('Member %s successfully transferred to requested dissociation state.') % membership_str)
logger.info("User %s requested dissociation for member %s." % (request.user.username, membership))
return redirect('membership_edit', membership.id)
else:
form = ConfirmForm()
return render(request, template_name, {'form': form, 'membership': membership })
@permission_required('membership.request_dissociation_for_member')
def membership_cancel_dissociation_request(request, id, template_name='membership/membership_cancel_dissociation_request.html'):
membership = get_object_or_404(Membership, id=id)
class ConfirmForm(Form):
confirm = BooleanField(label=_('To confirm state change, you must check this box:'),
required=True)
if request.method == 'POST':
form = ConfirmForm(request.POST)
if form.is_valid():
f = form.cleaned_data
membership_str = str(membership)
membership.cancel_dissociation_request(request.user)
messages.success(request, _('Member %s successfully transferred back to approved state.') % membership_str)
logger.info("User %s requested dissociation for member %s." % (request.user.username, membership))
return redirect('membership_edit', membership.id)
else:
form = ConfirmForm()
return render(request, template_name, {'form': form, 'membership': membership})
@permission_required('membership.manage_members')
def membership_convert_to_organization(request, id, template_name='membership/membership_convert_to_organization.html'):
membership = get_object_or_404(Membership, id=id)
class ConfirmForm(Form):
confirm = BooleanField(label=_('To confirm conversion, you must check this box:'),
required=True)
if request.method == 'POST':
form = ConfirmForm(request.POST)
if form.is_valid():
f = form.cleaned_data
membership.type = 'O'
contact = membership.person
membership.person = None
membership.organization = contact
membership.save()
log_change(membership, request.user, change_message="Converted to an organization")
messages.success(request, _('Member %s successfully converted to an organization.') % membership)
logger.info("User %s converted member %s to an organization." % (request.user.username, membership))
return redirect('membership_edit', membership.id)
else:
form = ConfirmForm()
return render(request, template_name, {'form': form, 'membership': membership})
@permission_required('membership.manage_members')
def membership_preapprove_json(request, id):
m = get_object_or_404(Membership, id=id)
try:
m.preapprove(request.user)
except MembershipAlreadyStatus:
pass # Success if we didn't do anything
return HttpResponse(id, content_type='text/plain')
@permission_required('membership.manage_members')
def membership_approve_json(request, id):
m = get_object_or_404(Membership, id=id)
try:
m.approve(request.user)
except MembershipAlreadyStatus:
pass # Success if we didn't do anything
return HttpResponse(id, content_type='text/plain')
@permission_required('membership.read_members')
def membership_detail_json(request, id):
membership = get_object_or_404(Membership, id=id)
json_obj = serializable_membership_info(membership)
return HttpResponse(json.dumps(json_obj, sort_keys=True, indent=4),
content_type='application/json')
@permission_required('membership.manage_members')
def membership_disassociate_json(request, id):
m = get_object_or_404(Membership, id=id)
try:
m.dissociate(request.user)
except MembershipAlreadyStatus:
pass
return HttpResponse(id, content_type='text/plain')
# Public access
def handle_json(request):
logger.debug("RAW POST DATA: %s" % request.body)
msg = json.loads(request.body.decode('utf-8'))
funcs = {'PREAPPROVE': membership_preapprove_json,
'APPROVE': membership_approve_json,
'DISASSOCIATE': membership_disassociate_json,
'MEMBERSHIP_DETAIL': membership_detail_json,
'ALIAS_AVAILABLE': check_alias_availability,
'VALIDATE_ALIAS': validate_alias}
if msg['requestType'] not in funcs:
raise NotImplementedError()
logger.debug("AJAX call %s, payload: %s" % (msg['requestType'],
str(msg['payload'])))
try:
return funcs[msg['requestType']](request, msg['payload'])
except Exception as e:
logger.critical("%s" % traceback.format_exc())
raise e
@login_required
def test_email(request, template_name='membership/test_email.html'):
class RecipientForm(Form):
recipient = EmailField(label=_('Recipient e-mail address'))
if request.method == 'POST':
form = RecipientForm(request.POST)
if form.is_valid():
f = form.cleaned_data
else:
return render(request, template_name, {'form': form})
body = render_to_string('membership/test_email.txt', {"user": request.user})
send_mail("Testisähköposti", body,
settings.FROM_EMAIL,
# request.user.email,
[f["recipient"]], fail_silently=False)
logger.info("Sent a test e-mail to %s" % f["recipient"])
return render(request, template_name, {'form': RecipientForm()})
@trusted_host_required
def membership_metrics(request):
unpaid_cycles = BillingCycle.objects.filter(membership__status='A', is_paid=False)
unpaid_sum = unpaid_cycles.aggregate(Sum("sum"))['sum__sum']
if unpaid_sum is None:
unpaid_sum = "0.0"
d = {'memberships':
{'new': Membership.objects.filter(status='N').count(),
'preapproved': Membership.objects.filter(status='P').count(),
'approved': Membership.objects.filter(status='A').count(),
'deleted': Membership.objects.filter(status='D').count(),
},
'bills':
{'unpaid_count': unpaid_cycles.count(),
'unpaid_sum': float(unpaid_sum),
},
}
return HttpResponse(json.dumps(d, sort_keys=True, indent=4),
content_type='application/json')
@trusted_host_required
def public_memberlist(request):
template_name = 'membership/public_memberlist.xml'
data = public_memberlist_data()
return render(request, template_name, data, content_type='text/xml')
@trusted_host_required
def unpaid_members(request):
json_obj = unpaid_members_data()
return HttpResponse(json.dumps(json_obj, sort_keys=True, indent=4),
content_type='application/json')
@trusted_host_required
def users_to_lock(request):
json_obj = members_to_lock()
return HttpResponse(json.dumps(json_obj, sort_keys=True, indent=4),
content_type='application/json')
@trusted_host_required
def admtool_membership_detail_json(request, id):
membership = get_object_or_404(Membership, id=id)
json_obj = admtool_membership_details(membership)
return HttpResponse(json.dumps(json_obj, sort_keys=True, indent=4),
content_type='application/json')
@trusted_host_required
def admtool_lookup_alias_json(request, alias):
aliases = Alias.objects.filter(name__iexact=alias)
if len(aliases) == 1:
return HttpResponse(aliases[0].owner.id, content_type='text/plain')
elif not aliases:
return HttpResponse("No match", content_type='text/plain')
return HttpResponse("Too many matches", content_type='text/plain')
@permission_required('membership.read_members')
def member_object_list(request, **kwargs):
return SortListView.as_view(**kwargs)(request)
@permission_required('membership.read_bills')
def billing_object_list(request, **kwargs):
return SortListView.as_view(**kwargs)(request)
# This should list any bills/cycles that were forcefully set as paid even
# though insufficient payments were paid.
# @permission_required('membership.read_bills')
# def forced_paid_cycles_list(*args, **kwargs):
# paid_q = Q(is_paid__exact=True)
# payments_sum_q = Q(payment_set.aggregate(Sum('amount'))__lt=sum)
# qs = BillingCycle.objects.filter(paid_q, payments_sum_q)
# return ListView.as_view(**kwargs, queryset=qs)(request))
@permission_required('membership.read_members')
def search(request, **kwargs):
query = request.GET.get('query', '')
# Shorthand for viewing a membership by giving # and the id
if query.startswith("#"):
try:
return redirect('membership_edit', int(query.lstrip("#")))
except ValueError as ve:
pass
qs = Membership.search(query)
if qs.count() == 1:
return redirect('membership_edit', qs[0].id)
kwargs['queryset'] = qs.order_by("organization__organization_name",
"person__last_name",
"person__first_name")
kwargs['search_query'] = query
return SortListView.as_view(**kwargs)(request)
|
{
"content_hash": "a79762b1bad2564be3332ac03f88c52c",
"timestamp": "",
"source": "github",
"line_count": 1154,
"max_line_length": 135,
"avg_line_length": 44.418544194107454,
"alnum_prop": 0.6015138804892799,
"repo_name": "annttu/sikteeri",
"id": "f4cc2b68cd9809bc0bbcb70b496fc3d651e5c2ec",
"size": "51286",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "membership/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "21557"
},
{
"name": "Dockerfile",
"bytes": "1091"
},
{
"name": "HTML",
"bytes": "58276"
},
{
"name": "JavaScript",
"bytes": "14021"
},
{
"name": "Python",
"bytes": "409773"
},
{
"name": "Shell",
"bytes": "2537"
}
],
"symlink_target": ""
}
|
"""Receives documents from the oplog worker threads and indexes them
into the backend.
This file is a document manager for the Solr search engine, but the intent
is that this file can be used as an example to add on different backends.
To extend this to other systems, simply implement the exact same class and
replace the method definitions with API calls for the desired backend.
"""
import re
import json
import bson.json_util as bsjson
from pysolr import Solr, SolrError
from mongo_connector import errors
from mongo_connector.constants import DEFAULT_COMMIT_INTERVAL
from mongo_connector.util import retry_until_ok
ADMIN_URL = 'admin/luke?show=schema&wt=json'
decoder = json.JSONDecoder()
class DocManager():
"""The DocManager class creates a connection to the backend engine and
adds/removes documents, and in the case of rollback, searches for them.
The reason for storing id/doc pairs as opposed to doc's is so that multiple
updates to the same doc reflect the most up to date version as opposed to
multiple, slightly different versions of a doc.
"""
def __init__(self, url, auto_commit_interval=DEFAULT_COMMIT_INTERVAL,
unique_key='_id', **kwargs):
"""Verify Solr URL and establish a connection.
"""
self.solr = Solr(url)
self.unique_key = unique_key
# pysolr does things in milliseconds
if auto_commit_interval is not None:
self.auto_commit_interval = auto_commit_interval * 1000
else:
self.auto_commit_interval = None
self.field_list = []
self._build_fields()
def _parse_fields(self, result, field_name):
""" If Schema access, parse fields and build respective lists
"""
field_list = []
for key, value in result.get('schema', {}).get(field_name, {}).items():
if key not in field_list:
field_list.append(key)
return field_list
def _build_fields(self):
""" Builds a list of valid fields
"""
declared_fields = self.solr._send_request('get', ADMIN_URL)
result = decoder.decode(declared_fields)
self.field_list = self._parse_fields(result, 'fields')
# Build regular expressions to match dynamic fields.
# dynamic field names may have exactly one wildcard, either at
# the beginning or the end of the name
self._dynamic_field_regexes = []
for wc_pattern in self._parse_fields(result, 'dynamicFields'):
if wc_pattern[0] == "*":
self._dynamic_field_regexes.append(
re.compile("\w%s\Z" % wc_pattern))
elif wc_pattern[-1] == "*":
self._dynamic_field_regexes.append(
re.compile("\A%s\w*" % wc_pattern[:-1]))
def _clean_doc(self, doc):
"""Reformats the given document before insertion into Solr.
This method reformats the document in the following ways:
- removes extraneous fields that aren't defined in schema.xml
- unwinds arrays in order to find and later flatten sub-documents
- flattens the document so that there are no sub-documents, and every
value is associated with its dot-separated path of keys
An example:
{"a": 2,
"b": {
"c": {
"d": 5
}
},
"e": [6, 7, 8]
}
becomes:
{"a": 2, "b.c.d": 5, "e.0": 6, "e.1": 7, "e.2": 8}
"""
# SOLR cannot index fields within sub-documents, so flatten documents
# with the dot-separated path to each value as the respective key
def flattened(doc):
def flattened_kernel(doc, path):
for k, v in doc.items():
path.append(k)
if isinstance(v, dict):
for inner_k, inner_v in flattened_kernel(v, path):
yield inner_k, inner_v
elif isinstance(v, list):
for li, lv in enumerate(v):
path.append(str(li))
if isinstance(lv, dict):
for dk, dv in flattened_kernel(lv, path):
yield dk, dv
else:
yield ".".join(path), lv
path.pop()
else:
yield ".".join(path), v
path.pop()
return dict(flattened_kernel(doc, []))
# Translate the _id field to whatever unique key we're using
doc[self.unique_key] = doc["_id"]
flat_doc = flattened(doc)
# Only include fields that are explicitly provided in the
# schema or match one of the dynamic field patterns, if
# we were able to retrieve the schema
if len(self.field_list) + len(self._dynamic_field_regexes) > 0:
def include_field(field):
return field in self.field_list or any(
regex.match(field) for regex in self._dynamic_field_regexes
)
return dict((k, v) for k, v in flat_doc.items() if include_field(k))
return flat_doc
def stop(self):
""" Stops the instance
"""
pass
def upsert(self, doc):
"""Update or insert a document into Solr
This method should call whatever add/insert/update method exists for
the backend engine and add the document in there. The input will
always be one mongo document, represented as a Python dictionary.
"""
try:
if self.auto_commit_interval is not None:
self.solr.add([self._clean_doc(doc)],
commit=(self.auto_commit_interval == 0),
commitWithin=str(self.auto_commit_interval))
else:
self.solr.add([self._clean_doc(doc)], commit=False)
except SolrError:
raise errors.OperationFailed(
"Could not insert %r into Solr" % bsjson.dumps(doc))
def bulk_upsert(self, docs):
"""Update or insert multiple documents into Solr
docs may be any iterable
"""
try:
cleaned = (self._clean_doc(d) for d in docs)
if self.auto_commit_interval is not None:
self.solr.add(cleaned, commit=(self.auto_commit_interval == 0),
commitWithin=str(self.auto_commit_interval))
else:
self.solr.add(cleaned, commit=False)
except SolrError:
raise errors.OperationFailed(
"Could not bulk-insert documents into Solr")
def remove(self, doc):
"""Removes documents from Solr
The input is a python dictionary that represents a mongo document.
"""
self.solr.delete(id=str(doc[self.unique_key]),
commit=(self.auto_commit_interval == 0))
def _remove(self):
"""Removes everything
"""
self.solr.delete(q='*:*', commit=(self.auto_commit_interval == 0))
def search(self, start_ts, end_ts):
"""Called to query Solr for documents in a time range.
"""
query = '_ts: [%s TO %s]' % (start_ts, end_ts)
return self.solr.search(query, rows=100000000)
def _search(self, query):
"""For test purposes only. Performs search on Solr with given query
Does not have to be implemented.
"""
return self.solr.search(query, rows=200)
def commit(self):
"""This function is used to force a commit.
"""
retry_until_ok(self.solr.commit)
def get_last_doc(self):
"""Returns the last document stored in the Solr engine.
"""
#search everything, sort by descending timestamp, return 1 row
try:
result = self.solr.search('*:*', sort='_ts desc', rows=1)
except ValueError:
return None
if len(result) == 0:
return None
return result.docs[0]
|
{
"content_hash": "e8316f1d3cfaff83c5b05f5ce6dd1c05",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 80,
"avg_line_length": 38.051162790697674,
"alnum_prop": 0.5649676078718983,
"repo_name": "gazimahmud/mongo-connector",
"id": "f03912fe3c0cac48ece8cef83dfb085d0c75ed58",
"size": "8760",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mongo_connector/doc_managers/solr_doc_manager.py",
"mode": "33261",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
from __future__ import unicode_literals
# Ensure 'assert_raises' context manager support for Python 2.6
import tests.backport_assert_raises
from nose.tools import assert_raises
import boto
from boto.exception import EC2ResponseError
import sure # noqa
from moto import mock_ec2
from tests.helpers import requires_boto_gte
@mock_ec2
def test_ami_create_and_delete():
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
all_images = conn.get_all_images()
image = all_images[0]
image.id.should.equal(image_id)
image.virtualization_type.should.equal(instance.virtualization_type)
image.architecture.should.equal(instance.architecture)
image.kernel_id.should.equal(instance.kernel)
image.platform.should.equal(instance.platform)
# Validate auto-created volume and snapshot
volumes = conn.get_all_volumes()
volumes.should.have.length_of(1)
volume = volumes[0]
snapshots = conn.get_all_snapshots()
snapshots.should.have.length_of(1)
snapshot = snapshots[0]
image.block_device_mapping.current_value.snapshot_id.should.equal(snapshot.id)
snapshot.description.should.equal("Auto-created snapshot for AMI {0}".format(image.id))
snapshot.volume_id.should.equal(volume.id)
# Deregister
success = conn.deregister_image(image_id)
success.should.be.true
with assert_raises(EC2ResponseError) as cm:
conn.deregister_image(image_id)
cm.exception.code.should.equal('InvalidAMIID.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@requires_boto_gte("2.14.0")
@mock_ec2
def test_ami_copy():
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
source_image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
source_image = conn.get_all_images(image_ids=[source_image_id])[0]
# Boto returns a 'CopyImage' object with an image_id attribute here. Use the image_id to fetch the full info.
copy_image_ref = conn.copy_image(source_image.region.name, source_image.id, "test-copy-ami", "this is a test copy ami")
copy_image_id = copy_image_ref.image_id
copy_image = conn.get_all_images(image_ids=[copy_image_id])[0]
copy_image.id.should.equal(copy_image_id)
copy_image.virtualization_type.should.equal(source_image.virtualization_type)
copy_image.architecture.should.equal(source_image.architecture)
copy_image.kernel_id.should.equal(source_image.kernel_id)
copy_image.platform.should.equal(source_image.platform)
# Validate auto-created volume and snapshot
conn.get_all_volumes().should.have.length_of(2)
conn.get_all_snapshots().should.have.length_of(2)
copy_image.block_device_mapping.current_value.snapshot_id.should_not.equal(
source_image.block_device_mapping.current_value.snapshot_id)
# Copy from non-existent source ID.
with assert_raises(EC2ResponseError) as cm:
conn.copy_image(source_image.region.name, 'ami-abcd1234', "test-copy-ami", "this is a test copy ami")
cm.exception.code.should.equal('InvalidAMIID.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
# Copy from non-existent source region.
with assert_raises(EC2ResponseError) as cm:
invalid_region = 'us-east-1' if (source_image.region.name != 'us-east-1') else 'us-west-1'
conn.copy_image(invalid_region, source_image.id, "test-copy-ami", "this is a test copy ami")
cm.exception.code.should.equal('InvalidAMIID.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2
def test_ami_tagging():
conn = boto.connect_vpc('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
conn.create_image(instance.id, "test-ami", "this is a test ami")
image = conn.get_all_images()[0]
image.add_tag("a key", "some value")
tag = conn.get_all_tags()[0]
tag.name.should.equal("a key")
tag.value.should.equal("some value")
# Refresh the DHCP options
image = conn.get_all_images()[0]
image.tags.should.have.length_of(1)
image.tags["a key"].should.equal("some value")
@mock_ec2
def test_ami_create_from_missing_instance():
conn = boto.connect_ec2('the_key', 'the_secret')
args = ["i-abcdefg", "test-ami", "this is a test ami"]
with assert_raises(EC2ResponseError) as cm:
conn.create_image(*args)
cm.exception.code.should.equal('InvalidInstanceID.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2
def test_ami_pulls_attributes_from_instance():
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
instance.modify_attribute("kernel", "test-kernel")
image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
image = conn.get_image(image_id)
image.kernel_id.should.equal('test-kernel')
@mock_ec2
def test_ami_filters():
conn = boto.connect_ec2('the_key', 'the_secret')
reservationA = conn.run_instances('ami-1234abcd')
instanceA = reservationA.instances[0]
instanceA.modify_attribute("architecture", "i386")
instanceA.modify_attribute("kernel", "k-1234abcd")
instanceA.modify_attribute("platform", "windows")
instanceA.modify_attribute("virtualization_type", "hvm")
imageA_id = conn.create_image(instanceA.id, "test-ami-A", "this is a test ami")
imageA = conn.get_image(imageA_id)
reservationB = conn.run_instances('ami-abcd1234')
instanceB = reservationB.instances[0]
instanceB.modify_attribute("architecture", "x86_64")
instanceB.modify_attribute("kernel", "k-abcd1234")
instanceB.modify_attribute("platform", "linux")
instanceB.modify_attribute("virtualization_type", "paravirtual")
imageB_id = conn.create_image(instanceB.id, "test-ami-B", "this is a test ami")
imageB = conn.get_image(imageB_id)
amis_by_architecture = conn.get_all_images(filters={'architecture': 'x86_64'})
set([ami.id for ami in amis_by_architecture]).should.equal(set([imageB.id]))
amis_by_kernel = conn.get_all_images(filters={'kernel-id': 'k-abcd1234'})
set([ami.id for ami in amis_by_kernel]).should.equal(set([imageB.id]))
amis_by_virtualization = conn.get_all_images(filters={'virtualization-type': 'paravirtual'})
set([ami.id for ami in amis_by_virtualization]).should.equal(set([imageB.id]))
amis_by_platform = conn.get_all_images(filters={'platform': 'windows'})
set([ami.id for ami in amis_by_platform]).should.equal(set([imageA.id]))
amis_by_id = conn.get_all_images(filters={'image-id': imageA.id})
set([ami.id for ami in amis_by_id]).should.equal(set([imageA.id]))
amis_by_state = conn.get_all_images(filters={'state': 'available'})
set([ami.id for ami in amis_by_state]).should.equal(set([imageA.id, imageB.id]))
amis_by_name = conn.get_all_images(filters={'name': imageA.name})
set([ami.id for ami in amis_by_name]).should.equal(set([imageA.id]))
@mock_ec2
def test_ami_filtering_via_tag():
conn = boto.connect_vpc('the_key', 'the_secret')
reservationA = conn.run_instances('ami-1234abcd')
instanceA = reservationA.instances[0]
imageA_id = conn.create_image(instanceA.id, "test-ami-A", "this is a test ami")
imageA = conn.get_image(imageA_id)
imageA.add_tag("a key", "some value")
reservationB = conn.run_instances('ami-abcd1234')
instanceB = reservationB.instances[0]
imageB_id = conn.create_image(instanceB.id, "test-ami-B", "this is a test ami")
imageB = conn.get_image(imageB_id)
imageB.add_tag("another key", "some other value")
amis_by_tagA = conn.get_all_images(filters={'tag:a key': 'some value'})
set([ami.id for ami in amis_by_tagA]).should.equal(set([imageA.id]))
amis_by_tagB = conn.get_all_images(filters={'tag:another key': 'some other value'})
set([ami.id for ami in amis_by_tagB]).should.equal(set([imageB.id]))
@mock_ec2
def test_getting_missing_ami():
conn = boto.connect_ec2('the_key', 'the_secret')
with assert_raises(EC2ResponseError) as cm:
conn.get_image('ami-missing')
cm.exception.code.should.equal('InvalidAMIID.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2
def test_getting_malformed_ami():
conn = boto.connect_ec2('the_key', 'the_secret')
with assert_raises(EC2ResponseError) as cm:
conn.get_image('foo-missing')
cm.exception.code.should.equal('InvalidAMIID.Malformed')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2
def test_ami_attribute():
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
image = conn.get_image(image_id)
# Baseline
attributes = conn.get_image_attribute(image.id, attribute='launchPermission')
attributes.name.should.equal('launch_permission')
attributes.attrs.should.have.length_of(0)
ADD_GROUP_ARGS = {'image_id': image.id,
'attribute': 'launchPermission',
'operation': 'add',
'groups': 'all'}
REMOVE_GROUP_ARGS = {'image_id': image.id,
'attribute': 'launchPermission',
'operation': 'remove',
'groups': 'all'}
# Add 'all' group and confirm
conn.modify_image_attribute(**ADD_GROUP_ARGS)
attributes = conn.get_image_attribute(image.id, attribute='launchPermission')
attributes.attrs['groups'].should.have.length_of(1)
attributes.attrs['groups'].should.equal(['all'])
# Add is idempotent
conn.modify_image_attribute.when.called_with(**ADD_GROUP_ARGS).should_not.throw(EC2ResponseError)
# Remove 'all' group and confirm
conn.modify_image_attribute(**REMOVE_GROUP_ARGS)
attributes = conn.get_image_attribute(image.id, attribute='launchPermission')
attributes.attrs.should.have.length_of(0)
# Remove is idempotent
conn.modify_image_attribute.when.called_with(**REMOVE_GROUP_ARGS).should_not.throw(EC2ResponseError)
# Error: Add with group != 'all'
with assert_raises(EC2ResponseError) as cm:
conn.modify_image_attribute(image.id,
attribute='launchPermission',
operation='add',
groups='everyone')
cm.exception.code.should.equal('InvalidAMIAttributeItemValue')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
# Error: Add with invalid image ID
with assert_raises(EC2ResponseError) as cm:
conn.modify_image_attribute("ami-abcd1234",
attribute='launchPermission',
operation='add',
groups='all')
cm.exception.code.should.equal('InvalidAMIID.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
# Error: Remove with invalid image ID
with assert_raises(EC2ResponseError) as cm:
conn.modify_image_attribute("ami-abcd1234",
attribute='launchPermission',
operation='remove',
groups='all')
cm.exception.code.should.equal('InvalidAMIID.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
# Error: Add or remove with user ID instead of group
conn.modify_image_attribute.when.called_with(image.id,
attribute='launchPermission',
operation='add',
user_ids=['user']).should.throw(NotImplementedError)
conn.modify_image_attribute.when.called_with(image.id,
attribute='launchPermission',
operation='remove',
user_ids=['user']).should.throw(NotImplementedError)
|
{
"content_hash": "cabb1d84149c681e7d2da4ec6b5f1646",
"timestamp": "",
"source": "github",
"line_count": 312,
"max_line_length": 123,
"avg_line_length": 40.68910256410256,
"alnum_prop": 0.663410791650256,
"repo_name": "DataDog/moto",
"id": "3bdf6dac7a9553a7d983147496fba15e5491a7cc",
"size": "12695",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_ec2/test_amis.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "874590"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import datetime
import pytest
import pypuppetdb
class TestUTC(object):
"""Test the UTC class."""
def test_utc_offset(self, utc):
assert datetime.timedelta(0) == utc.utcoffset(300)
def test_tzname(self, utc):
assert str('UTC') == utc.tzname(300)
def test_dst(self, utc):
assert datetime.timedelta(0) == utc.dst(300)
def test_magic_str(self, utc):
assert str('UTC') == str(utc)
def test_magic_unicode(self, utc):
assert 'UTC' == str(utc)
def test_magic_repr(self, utc):
assert str('<UTC>') == repr(utc)
class TestJSONToDateTime(object):
"""Test the json_to_datetime function."""
def test_json_to_datetime(self):
json_datetime = '2013-08-01T09:57:00.000Z'
python_datetime = pypuppetdb.utils.json_to_datetime(json_datetime)
assert python_datetime.dst() == datetime.timedelta(0)
assert python_datetime.date() == datetime.date(2013, 8, 1)
assert python_datetime.tzname() == 'UTC'
assert python_datetime.utcoffset() == datetime.timedelta(0)
assert python_datetime.dst() == datetime.timedelta(0)
def test_json_to_datetime_invalid(self):
with pytest.raises(ValueError):
pypuppetdb.utils.json_to_datetime('2013-08-0109:57:00.000Z')
class TestVersionCmp(object):
"""Test the versioncmp function using different criteria."""
def test_versioncmp(self):
assert pypuppetdb.utils.versioncmp('1', '1') == 0
assert pypuppetdb.utils.versioncmp('2.1', '2.2') < 0
assert pypuppetdb.utils.versioncmp('3.0.4.10', '3.0.4.2') > 0
assert pypuppetdb.utils.versioncmp('4.08', '4.08.1') < 0
assert pypuppetdb.utils.versioncmp('3.2.1.9.8144', '3.2') > 0
assert pypuppetdb.utils.versioncmp('3.2', '3.2.1.9.8144') < 0
assert pypuppetdb.utils.versioncmp('1.2', '2.1') < 0
assert pypuppetdb.utils.versioncmp('2.1', '1.2') > 0
assert pypuppetdb.utils.versioncmp('5.6.7', '5.6.7') == 0
assert pypuppetdb.utils.versioncmp('1.01.1', '1.1.1') == 0
assert pypuppetdb.utils.versioncmp('1.1.1', '1.01.1') == 0
assert pypuppetdb.utils.versioncmp('1', '1.0') == 0
assert pypuppetdb.utils.versioncmp('1.0', '1') == 0
assert pypuppetdb.utils.versioncmp('1.0', '1.0.1') < 0
assert pypuppetdb.utils.versioncmp('1.0.1', '1.0') > 0
assert pypuppetdb.utils.versioncmp('1.0.2.0', '1.0.2') == 0
|
{
"content_hash": "852489a1ea70ba1213436196d66e487d",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 74,
"avg_line_length": 36.76470588235294,
"alnum_prop": 0.6228,
"repo_name": "puppet-community/pypuppetdb",
"id": "f455a14417d4415e575ad490ece9f4cd8aa2a7de",
"size": "2500",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "200266"
}
],
"symlink_target": ""
}
|
from tempest.api.baremetal.admin import base
from tempest import test
class TestApiDiscovery(base.BaseBaremetalTest):
"""Tests for API discovery features."""
@test.attr(type='smoke')
def test_api_versions(self):
resp, descr = self.client.get_api_description()
self.assertEqual('200', resp['status'])
expected_versions = ('v1',)
versions = [version['id'] for version in descr['versions']]
for v in expected_versions:
self.assertIn(v, versions)
@test.attr(type='smoke')
def test_default_version(self):
resp, descr = self.client.get_api_description()
self.assertEqual('200', resp['status'])
default_version = descr['default_version']
self.assertEqual(default_version['id'], 'v1')
@test.attr(type='smoke')
def test_version_1_resources(self):
resp, descr = self.client.get_version_description(version='v1')
self.assertEqual('200', resp['status'])
expected_resources = ('nodes', 'chassis',
'ports', 'links', 'media_types')
for res in expected_resources:
self.assertIn(res, descr)
|
{
"content_hash": "73f010ad7bc3c226a4a896638888a8af",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 71,
"avg_line_length": 33.4,
"alnum_prop": 0.6184773310521814,
"repo_name": "Mirantis/tempest",
"id": "7368b3e8103fba764b327f80a463657ecbcc97a9",
"size": "1742",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tempest/api/baremetal/admin/test_api_discovery.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3297127"
},
{
"name": "Shell",
"bytes": "8663"
}
],
"symlink_target": ""
}
|
"""Utility class for producing a scansion pattern for a Latin hexameter.
Given a line of hexameter, the scan method performs a series of transformation and checks
are performed and for each one performed successfully, a note is added to the scansion_notes
list so that end users may view the provenance of a scansion.
Because hexameters have strict rules on the position and quantity of stressed and unstressed
syllables, we can often infer the many stress qualities of the syllables, given a valid hexameter.
If the Latin hexameter provided is not accented with macrons, then a best guess is made.
For the scansion produced, the stress of a dipthong is indicated in the second of the two vowel
positions; for the accented line produced, the dipthong stress is not indicated with any macronized
vowels.
"""
import re
from Levenshtein import distance
import cltk.prosody.lat.string_utils as string_utils
from cltk.prosody.lat.metrical_validator import MetricalValidator
from cltk.prosody.lat.scansion_constants import ScansionConstants
from cltk.prosody.lat.scansion_formatter import ScansionFormatter
from cltk.prosody.lat.syllabifier import Syllabifier
from cltk.prosody.lat.verse import Verse
from cltk.prosody.lat.verse_scanner import VerseScanner
__author__ = ["Todd Cook <todd.g.cook@gmail.com>"]
__license__ = "MIT License"
class HexameterScanner(VerseScanner):
"""The scansion symbols used can be configured by passing a suitable constants class to
the constructor."""
def __init__(
self,
constants=ScansionConstants(),
syllabifier=Syllabifier(),
optional_transform=False,
*args,
**kwargs
):
super().__init__(*args, **kwargs)
self.constants = constants
self.remove_punct_map = string_utils.remove_punctuation_dict()
self.punctuation_substitutions = string_utils.punctuation_for_spaces_dict()
self.metrical_validator = MetricalValidator(constants)
self.formatter = ScansionFormatter(constants)
self.syllabifier = syllabifier
self.inverted_amphibrach_re = re.compile(
r"{}\s*{}\s*{}".format(
self.constants.STRESSED,
self.constants.UNSTRESSED,
self.constants.STRESSED,
)
)
self.syllable_matcher = re.compile(
r"[{}]".format(
self.constants.VOWELS
+ self.constants.ACCENTED_VOWELS
+ self.constants.LIQUIDS
+ self.constants.MUTES
)
)
self.optional_transform = optional_transform
def scan(
self,
original_line: str,
optional_transform: bool = False,
dactyl_smoothing: bool = False,
) -> Verse:
"""
Scan a line of Latin hexameter and produce a scansion pattern, and other data.
:param original_line: the original line of Latin verse
:param optional_transform: whether or not to perform i to j transform for syllabification
:param dactyl_smoothing: whether or not to perform dactyl smoothing
:return: a Verse object
>>> scanner = HexameterScanner()
>>> print(HexameterScanner().scan(
... "ēxiguām sedēm pariturae tērra negavit").scansion) # doctest: +NORMALIZE_WHITESPACE
- - - - - U U - - - U U - U
>>> print(scanner.scan("impulerit. Tantaene animis caelestibus irae?"))
Verse(original='impulerit. Tantaene animis caelestibus irae?', scansion='- U U - - - U U - - - U U - - ', meter='hexameter', valid=True, syllable_count=15, accented='īmpulerīt. Tāntaene animīs caelēstibus īrae?', scansion_notes=['Valid by positional stresses.'], syllables = ['īm', 'pu', 'le', 'rīt', 'Tān', 'taen', 'a', 'ni', 'mīs', 'cae', 'lēs', 'ti', 'bus', 'i', 'rae'])
>>> print(scanner.scan(
... "Arma virumque cano, Troiae qui prīmus ab ōrīs").scansion) # doctest: +NORMALIZE_WHITESPACE
- U U - U U - - - - - U U - -
>>> # some hexameters need the optional transformations:
>>> optional_transform_scanner = HexameterScanner(optional_transform=True)
>>> print(optional_transform_scanner.scan(
... "Ītaliam, fāto profugus, Lāvīniaque vēnit").scansion) # doctest: +NORMALIZE_WHITESPACE
- - - - - U U - - - U U - U
>>> print(HexameterScanner().scan(
... "lītora, multum ille et terrīs iactātus et alto").scansion) # doctest: +NORMALIZE_WHITESPACE
- U U - - - - - - - U U - U
>>> print(HexameterScanner().scan(
... "vī superum saevae memorem Iūnōnis ob īram;").scansion) # doctest: +NORMALIZE_WHITESPACE
- U U - - - U U - - - U U - U
>>> # handle multiple elisions
>>> print(scanner.scan("monstrum horrendum, informe, ingens, cui lumen ademptum").scansion) # doctest: +NORMALIZE_WHITESPACE
- - - - - - - - - U U - U
>>> # if we have 17 syllables, create a chain of all dactyls
>>> print(scanner.scan("quadrupedante putrem sonitu quatit ungula campum"
... ).scansion) # doctest: +NORMALIZE_WHITESPACE
- U U - U U - U U - U U - U U - U
>>> # if we have 13 syllables exactly, we'll create a spondaic hexameter
>>> print(HexameterScanner().scan(
... "illi inter sese multa vi bracchia tollunt").scansion) # doctest: +NORMALIZE_WHITESPACE
- - - - - - - - - UU - -
>>> print(HexameterScanner().scan(
... "dat latus; insequitur cumulo praeruptus aquae mons").scansion) # doctest: +NORMALIZE_WHITESPACE
- U U - U U - U U - - - U U - -
>>> print(optional_transform_scanner.scan(
... "Non quivis videt inmodulata poëmata iudex").scansion) # doctest: +NORMALIZE_WHITESPACE
- - - U U - U U - U U- U U - -
>>> print(HexameterScanner().scan(
... "certabant urbem Romam Remoramne vocarent").scansion) # doctest: +NORMALIZE_WHITESPACE
- - - - - - - U U - U U - -
>>> # advanced smoothing is available via keyword flags: dactyl_smoothing
>>> # print(HexameterScanner().scan(
#... "his verbis: 'o gnata, tibi sunt ante ferendae",
#... dactyl_smoothing=True).scansion) # doctest: +NORMALIZE_WHITESPACE
#- - - - - U U - - - U U - -
"""
verse = Verse(original_line, meter="hexameter")
# replace punctuation with spaces
line = original_line.translate(self.punctuation_substitutions)
# conservative i to j
line = self.transform_i_to_j(line)
working_line = self.elide_all(line)
working_line = self.accent_by_position(working_line)
syllables = self.syllabifier.syllabify(working_line)
if optional_transform:
working_line = self.transform_i_to_j_optional(line)
working_line = self.elide_all(working_line)
working_line = self.accent_by_position(working_line)
syllables = self.syllabifier.syllabify(working_line)
verse.scansion_notes += [self.constants.NOTE_MAP["optional i to j"]]
verse.working_line = working_line
verse.syllable_count = self.syllabifier.get_syllable_count(syllables)
verse.syllables = syllables
if verse.syllable_count < 12:
verse.valid = False
verse.scansion_notes += [self.constants.NOTE_MAP["< 12"]]
return verse
stresses = self.flag_dipthongs(syllables)
syllables_wspaces = string_utils.to_syllables_with_trailing_spaces(
working_line, syllables
)
offset_map = self.calc_offset(syllables_wspaces)
for idx, syl in enumerate(syllables):
for accented in self.constants.ACCENTED_VOWELS:
if accented in syl:
stresses.append(idx)
# first syllable is always long in hexameter
stresses.append(0)
# second to last syllable is always long
stresses.append(verse.syllable_count - 2)
verse.scansion = self.produce_scansion(stresses, syllables_wspaces, offset_map)
if len(
string_utils.stress_positions(self.constants.STRESSED, verse.scansion)
) != len(set(stresses)):
verse.valid = False
verse.scansion_notes += [self.constants.NOTE_MAP["invalid syllables"]]
return verse
if self.metrical_validator.is_valid_hexameter(verse.scansion):
verse.scansion_notes += [self.constants.NOTE_MAP["positionally"]]
return self.assign_candidate(verse, verse.scansion)
# identify some obvious and probably choices based on number of syllables
if verse.syllable_count == 17: # produce all dactyls
candidate = self.produce_scansion(
self.metrical_validator.hexameter_known_stresses(),
syllables_wspaces,
offset_map,
)
verse.scansion_notes += [self.constants.NOTE_MAP["17"]]
if self.metrical_validator.is_valid_hexameter(candidate):
return self.assign_candidate(verse, candidate)
if verse.syllable_count == 12: # create all spondee hexameter
candidate = self.produce_scansion(
list(range(12)), syllables_wspaces, offset_map
)
if self.metrical_validator.is_valid_hexameter(verse.scansion):
verse.scansion_notes += [self.constants.NOTE_MAP["12"]]
return self.assign_candidate(verse, candidate)
if (
verse.syllable_count == 13
): # create spondee hexameter with a dactyl at 5th foot
known_unaccents = [9, 10]
last_syllable_accented = False
for vowel in self.constants.ACCENTED_VOWELS:
if vowel in verse.syllables[12]:
last_syllable_accented = True
if not last_syllable_accented:
known_unaccents.append(12)
if set(known_unaccents) - set(stresses) != len(known_unaccents):
verse.scansion = self.produce_scansion(
[x for x in range(13) if x not in known_unaccents],
syllables_wspaces,
offset_map,
)
verse.scansion_notes += [self.constants.NOTE_MAP["5th dactyl"]]
if self.metrical_validator.is_valid_hexameter(verse.scansion):
return self.assign_candidate(verse, verse.scansion)
if verse.syllable_count > 17:
verse.valid = False
verse.scansion_notes += [self.constants.NOTE_MAP["> 17"]]
return verse
smoothed = self.correct_inverted_amphibrachs(verse.scansion)
if distance(verse.scansion, smoothed) > 0:
verse.scansion_notes += [self.constants.NOTE_MAP["inverted"]]
verse.scansion = smoothed
stresses += string_utils.differences(verse.scansion, smoothed)
if self.metrical_validator.is_valid_hexameter(verse.scansion):
return self.assign_candidate(verse, verse.scansion)
smoothed = self.correct_first_two_dactyls(verse.scansion)
if distance(verse.scansion, smoothed) > 0:
verse.scansion_notes += [self.constants.NOTE_MAP["invalid start"]]
verse.scansion = smoothed
stresses += string_utils.differences(verse.scansion, smoothed)
if self.metrical_validator.is_valid_hexameter(verse.scansion):
return self.assign_candidate(verse, verse.scansion)
smoothed = self.correct_invalid_fifth_foot(verse.scansion)
if distance(verse.scansion, smoothed) > 0:
verse.scansion_notes += [self.constants.NOTE_MAP["invalid 5th"]]
verse.scansion = smoothed
stresses += string_utils.differences(verse.scansion, smoothed)
if self.metrical_validator.is_valid_hexameter(verse.scansion):
return self.assign_candidate(verse, verse.scansion)
feet = self.metrical_validator.hexameter_feet(verse.scansion.replace(" ", ""))
if feet:
# Normal good citizens are unwelcome in the house of hexameter
invalid_feet_in_hexameter = [self.constants.IAMB, self.constants.TROCHEE]
current_foot = 0
ending = (
feet.pop()
) # don't process the ending, a possible trochee, add it back after
scanned_line = ""
for foot in feet:
if foot.replace(" ", "") in invalid_feet_in_hexameter:
scanned_line = self.invalid_foot_to_spondee(
feet, foot, current_foot
)
scanned_line = scanned_line + ending
current_foot += 1
smoothed = self.produce_scansion(
stresses
+ string_utils.stress_positions(self.constants.STRESSED, scanned_line),
syllables_wspaces,
offset_map,
)
if self.metrical_validator.is_valid_hexameter(smoothed):
verse.scansion_notes += [self.constants.NOTE_MAP["invalid foot"]]
return self.assign_candidate(verse, smoothed)
# need to do this again, since the scansion has changed
smoothed = self.correct_inverted_amphibrachs(verse.scansion)
if distance(verse.scansion, smoothed) > 0:
verse.scansion_notes += [self.constants.NOTE_MAP["inverted"]]
verse.scansion = smoothed
stresses += string_utils.differences(verse.scansion, smoothed)
if self.metrical_validator.is_valid_hexameter(verse.scansion):
return self.assign_candidate(verse, verse.scansion)
candidates = self.metrical_validator.closest_hexameter_patterns(verse.scansion)
if candidates is not None:
if (
len(candidates) == 1
and len(verse.scansion.replace(" ", "")) == len(candidates[0])
and len(string_utils.differences(verse.scansion, candidates[0])) == 1
):
tmp_scansion = self.produce_scansion(
string_utils.differences(verse.scansion, candidates[0]),
syllables_wspaces,
offset_map,
)
if self.metrical_validator.is_valid_hexameter(tmp_scansion):
verse.scansion_notes += [self.constants.NOTE_MAP["closest match"]]
return self.assign_candidate(verse, tmp_scansion)
# need to do this again, since the scansion has changed
smoothed = self.correct_inverted_amphibrachs(smoothed)
if self.metrical_validator.is_valid_hexameter(smoothed):
verse.scansion_notes += [self.constants.NOTE_MAP["inverted"]]
return self.assign_candidate(verse, smoothed)
if dactyl_smoothing:
smoothed = self.correct_dactyl_chain(smoothed)
if distance(verse.scansion, smoothed) > 0:
verse.scansion_notes += [self.constants.NOTE_MAP["dactyl smoothing"]]
verse.scansion = smoothed
if self.metrical_validator.is_valid_hexameter(verse.scansion):
return self.assign_candidate(verse, verse.scansion)
# if the line doesn't scan "as is", if may scan if the optional i to j transformations
# are made, so here we set them and try again.
if self.optional_transform and not verse.valid:
return self.scan(
original_line, optional_transform=True, dactyl_smoothing=True
)
return verse
def correct_invalid_fifth_foot(self, scansion: str) -> str:
"""
The 'inverted amphibrach': stressed_unstressed_stressed syllable pattern is invalid
in hexameters, so here we coerce it to stressed when it occurs at the end of a line
:param scansion: the scansion pattern
:return corrected scansion: the corrected scansion pattern
>>> print(HexameterScanner().correct_invalid_fifth_foot(
... " - - - U U - U U U - - U U U - x")) # doctest: +NORMALIZE_WHITESPACE
- - - U U - U U U - - - U U - x
"""
scansion_wo_spaces = (
scansion.replace(" ", "")[:-1] + self.constants.OPTIONAL_ENDING
)
if scansion_wo_spaces.endswith(
self.constants.DACTYL + self.constants.IAMB + self.constants.OPTIONAL_ENDING
):
matches = list(
re.compile(
r"{}\s*{}\s*{}\s*{}\s*{}".format(
self.constants.STRESSED,
self.constants.UNSTRESSED,
self.constants.UNSTRESSED,
self.constants.UNSTRESSED,
self.constants.STRESSED,
)
).finditer(scansion)
)
(start, end) = matches[len(matches) - 1].span()
unstressed_idx = scansion.index(self.constants.UNSTRESSED, start)
new_line = (
scansion[:unstressed_idx]
+ self.constants.STRESSED
+ scansion[unstressed_idx + 1 :]
)
return new_line
return scansion
def invalid_foot_to_spondee(self, feet: list, foot: str, idx: int) -> str:
"""
In hexameters, a single foot that is a unstressed_stressed syllable pattern is often
just a double spondee, so here we coerce it to stressed.
:param feet: list of string representations of meterical feet
:param foot: the bad foot to correct
:param idx: the index of the foot to correct
:return: corrected scansion
>>> print(HexameterScanner().invalid_foot_to_spondee(
... ['-UU', '--', '-U', 'U-', '--', '-UU'],'-U', 2)) # doctest: +NORMALIZE_WHITESPACE
-UU----U----UU
"""
new_foot = foot.replace(self.constants.UNSTRESSED, self.constants.STRESSED)
feet[idx] = new_foot
return "".join(feet)
def correct_dactyl_chain(self, scansion: str) -> str:
"""
Three or more unstressed accents in a row is a broken dactyl chain, best detected and
processed backwards.
Since this method takes a Procrustean approach to modifying the scansion pattern,
it is not used by default in the scan method; however, it is available as an optional
keyword parameter, and users looking to further automate the generation of scansion
candidates should consider using this as a fall back.
:param scansion: scansion with broken dactyl chain; inverted amphibrachs not allowed
:return: corrected line of scansion
>>> print(HexameterScanner().correct_dactyl_chain(
... "- U U - - U U - - - U U - x"))
- - - - - U U - - - U U - x
>>> print(HexameterScanner().correct_dactyl_chain(
... "- U U U U - - - - - U U - U")) # doctest: +NORMALIZE_WHITESPACE
- - - U U - - - - - U U - U
"""
mark_list = string_utils.mark_list(scansion)
vals = list(scansion.replace(" ", ""))
# ignore last two positions, save them
feet = [vals.pop(), vals.pop()]
length = len(vals)
idx = length - 1
while idx > 0:
one = vals[idx]
two = vals[idx - 1]
if idx > 1:
three = vals[idx - 2]
else:
three = ""
# Dactyl foot is okay, no corrections
if (
one == self.constants.UNSTRESSED
and two == self.constants.UNSTRESSED
and three == self.constants.STRESSED
):
feet += [one]
feet += [two]
feet += [three]
idx -= 3
continue
# Spondee foot is okay, no corrections
if one == self.constants.STRESSED and two == self.constants.STRESSED:
feet += [one]
feet += [two]
idx -= 2
continue
# handle "U U U" foot as "- U U"
if (
one == self.constants.UNSTRESSED
and two == self.constants.UNSTRESSED
and three == self.constants.UNSTRESSED
):
feet += [one]
feet += [two]
feet += [self.constants.STRESSED]
idx -= 3
continue
# handle "U U -" foot as "- -"
if (
one == self.constants.STRESSED
and two == self.constants.UNSTRESSED
and three == self.constants.UNSTRESSED
):
feet += [self.constants.STRESSED]
feet += [self.constants.STRESSED]
idx -= 2
continue
# handle "- U" foot as "- -"
if one == self.constants.UNSTRESSED and two == self.constants.STRESSED:
feet += [self.constants.STRESSED]
feet += [two]
idx -= 2
continue
corrected = "".join(feet[::-1])
new_line = list(" " * len(scansion))
for idx, car in enumerate(corrected):
new_line[mark_list[idx]] = car
return "".join(new_line)
def correct_inverted_amphibrachs(self, scansion: str) -> str:
"""
The 'inverted amphibrach': stressed_unstressed_stressed syllable pattern is invalid
in hexameters, so here we coerce it to stressed: - U - -> - - -
:param scansion: the scansion stress pattern
:return: a string with the corrected scansion pattern
>>> print(HexameterScanner().correct_inverted_amphibrachs(
... " - U - - U - U U U U - U - x")) # doctest: +NORMALIZE_WHITESPACE
- - - - - - U U U U - - - x
>>> print(HexameterScanner().correct_inverted_amphibrachs(
... " - - - U - - U U U U U- - U - x")) # doctest: +NORMALIZE_WHITESPACE
- - - - - - U U U U U- - - - x
>>> print(HexameterScanner().correct_inverted_amphibrachs(
... "- - - - - U - U U - U U - -")) # doctest: +NORMALIZE_WHITESPACE
- - - - - - - U U - U U - -
>>> print(HexameterScanner().correct_inverted_amphibrachs(
... "- UU- U - U - - U U U U- U")) # doctest: +NORMALIZE_WHITESPACE
- UU- - - - - - U U U U- U
"""
new_line = scansion
while list(self.inverted_amphibrach_re.finditer(new_line)):
matches = list(self.inverted_amphibrach_re.finditer(new_line))
for match in matches:
(start, end) = match.span() # pylint: disable=unused-variable
unstressed_idx = new_line.index(self.constants.UNSTRESSED, start)
new_line = (
new_line[:unstressed_idx]
+ self.constants.STRESSED
+ new_line[unstressed_idx + 1 :]
)
return new_line
|
{
"content_hash": "fa5f5417779a6205be0a30db0eb90a7c",
"timestamp": "",
"source": "github",
"line_count": 490,
"max_line_length": 395,
"avg_line_length": 47.553061224489795,
"alnum_prop": 0.5745676151238144,
"repo_name": "D-K-E/cltk",
"id": "2e0a26cd26b24b8e58809fbd2dd515d756e691b8",
"size": "23332",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/cltk/prosody/lat/hexameter_scanner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2113412"
}
],
"symlink_target": ""
}
|
import sys
import os
#import sphinx_bootstrap_theme
if sys.version_info.major == 2:
import mock
from mock import Mock as MagicMock
else:
from unittest import mock
from unittest.mock import Mock as MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
#MOCK_MODULES = ['pyLikelihood','pyIrfLoader',
# 'BinnedAnalysis','UnbinnedAnalysis','SrcModel','AnalysisBase',
# 'SummedLikelihood','FluxDensity','LikelihoodState',
# 'GtApp']
MOCK_MODULES = ['pyLikelihood','pyIrfLoader']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../fermipy'))
sys.path.insert(0, os.path.abspath('../../fermipy/jobs'))
sys.path.insert(0, os.path.abspath('../../fermipy/diffuse'))
sys.path.insert(0, os.path.abspath('../..'))
sys.path.insert(0, os.path.abspath('..'))
import fermipy
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.mathjax',
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx']
# 'numpydoc'
autosummary_generate = True
intersphinx_mapping = {
'python': ('http://docs.python.org/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
'matplotlib': ('http://matplotlib.org/', None),
'astropy': ('http://docs.astropy.org/en/stable/', None),
'h5py': ('http://docs.h5py.org/en/latest/', None)
}
intersphinx_cache_limit = 10
#intersphinx_mapping = {}
#intersphinx_mapping['astropy'] = ('http://docs.astropy.org/en/latest/', None)
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Fermipy'
author = u'Fermipy Developers'
copyright = u'2016-2022, ' + author
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = fermipy.__version__
# The full version, including alpha/beta/rc tags.
release = fermipy.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'obj'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'sphinx_rtd_theme'
#html_theme = 'bootstrap'
#html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
def setup(app):
app.add_stylesheet("theme_overrides.css")
#html_context = {
# 'css_files': [
# '_static/theme_overrides.css', # overrides for wide tables in RTD theme
# ],
# }
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Fermipydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Fermipy.tex', u'Fermipy Documentation',
u'Matthew Wood', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'fermipy', u'Fermipy Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Fermipy', u'Fermipy Documentation',
author, 'Fermipy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
{
"content_hash": "bdbe85fbb155b76b3e702c2982b3bc9c",
"timestamp": "",
"source": "github",
"line_count": 336,
"max_line_length": 81,
"avg_line_length": 32.416666666666664,
"alnum_prop": 0.6939955930958501,
"repo_name": "fermiPy/fermipy",
"id": "71a1f479fa1c00fc128e1b0634d0e1f7bd6eac72",
"size": "11312",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1974073"
}
],
"symlink_target": ""
}
|
"""Test reproduce state for select entities."""
import pytest
from homeassistant.components.select.const import (
ATTR_OPTION,
ATTR_OPTIONS,
DOMAIN,
SERVICE_SELECT_OPTION,
)
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.core import HomeAssistant, State
from homeassistant.helpers.state import async_reproduce_state
from tests.common import async_mock_service
async def test_reproducing_states(
hass: HomeAssistant, caplog: pytest.LogCaptureFixture
) -> None:
"""Test reproducing select states."""
calls = async_mock_service(hass, DOMAIN, SERVICE_SELECT_OPTION)
hass.states.async_set(
"select.test",
"option_one",
{ATTR_OPTIONS: ["option_one", "option_two", "option_three"]},
)
await async_reproduce_state(
hass,
[
State("select.test", "option_two"),
],
)
assert len(calls) == 1
assert calls[0].domain == DOMAIN
assert calls[0].data == {ATTR_ENTITY_ID: "select.test", ATTR_OPTION: "option_two"}
# Calling it again should not do anything
await async_reproduce_state(
hass,
[
State("select.test", "option_one"),
],
)
assert len(calls) == 1
# Restoring an invalid state should not work either
await async_reproduce_state(hass, [State("select.test", "option_four")])
assert len(calls) == 1
assert "Invalid state specified" in caplog.text
# Restoring an state for an invalid entity ID logs a warning
await async_reproduce_state(hass, [State("select.non_existing", "option_three")])
assert len(calls) == 1
assert "Unable to find entity" in caplog.text
|
{
"content_hash": "738280a69f7166ce6bd4cf921eaf42d3",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 86,
"avg_line_length": 29.892857142857142,
"alnum_prop": 0.6589008363201911,
"repo_name": "mezz64/home-assistant",
"id": "bbd1ae17a7b381674eb41367be6250059c18c6da",
"size": "1674",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "tests/components/select/test_reproduce_state.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52481895"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
"""Module for number conversions
.. module:: lib.number.conversion
:platform: Unix
:synopsis: Module for number conversions
.. moduleauthor:: Petr Czaderna <pc@hydratk.org>
"""
def int2bool(intvar):
"""Method converts number to bool
Args:
intvar (int): number
Returns:
bool: result
"""
result = False
intvar = int(intvar)
if intvar > 0:
result = True
return result
|
{
"content_hash": "23665a81aee340b469dad40e40b30443",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 48,
"avg_line_length": 16.807692307692307,
"alnum_prop": 0.6155606407322655,
"repo_name": "hydratk/hydratk",
"id": "83da73062a2e4346c9132c13ae48977b4ec52854",
"size": "461",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/hydratk/lib/number/conversion.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "444574"
}
],
"symlink_target": ""
}
|
"""Covertype model taxi model features."""
NUMERIC_FEATURE_KEYS = [
'Elevation', 'Aspect', 'Slope', 'Horizontal_Distance_To_Hydrology',
'Vertical_Distance_To_Hydrology', 'Horizontal_Distance_To_Roadways',
'Hillshade_9am', 'Hillshade_Noon', 'Hillshade_3pm',
'Horizontal_Distance_To_Fire_Points'
]
CATEGORICAL_FEATURE_KEYS = ['Wilderness_Area', 'Soil_Type']
LABEL_KEY = 'Cover_Type'
NUM_CLASSES = 7
def transformed_name(key):
"""Add post-fix to feature keys after transformations applied."""
return key + '_xf'
|
{
"content_hash": "be4442c474df473e618cdb50632086e9",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 72,
"avg_line_length": 29.944444444444443,
"alnum_prop": 0.6938775510204082,
"repo_name": "GoogleCloudPlatform/mlops-on-gcp",
"id": "434d6d6ac2549230f499f86981c31f44c63a1ea6",
"size": "1135",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "on_demand/tfx-caip/lab-03-tfx-cicd/labs/pipeline/features.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "15195"
},
{
"name": "HCL",
"bytes": "8348"
},
{
"name": "JavaScript",
"bytes": "1143"
},
{
"name": "Jupyter Notebook",
"bytes": "6737030"
},
{
"name": "Mustache",
"bytes": "1946"
},
{
"name": "Python",
"bytes": "1235643"
},
{
"name": "Shell",
"bytes": "30775"
}
],
"symlink_target": ""
}
|
"""This example retrieves available creative fields for a given string and
displays the name, ID, advertiser ID, and number of values. Results are
limited to the first 10.
Tags: creativefield.getCreativeFields
"""
__author__ = 'api.jdilallo@gmail.com (Joseph DiLallo)'
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfaClient
ADVERTISER_ID = 'INSERT_ADVERTISER_ID_HERE'
def main(client, advertiser_id):
# Initialize appropriate service.
creative_field_service = client.GetCreativeFieldService(
'https://advertisersapitest.doubleclick.net', 'v1.19')
# Set up creative field search criteria structure.
creative_field_search_criteria = {
'advertiserIds': [advertiser_id],
'pageSize': '10'
}
# Get creative fields for the selected criteria.
results = creative_field_service.GetCreativeFields(
creative_field_search_criteria)[0]
# Display creative field names, IDs, advertiser IDs, and number of values.
if results['records']:
for creative_field in results['records']:
print ('Creative field with name \'%s\', ID \'%s\', advertiser ID \'%s\','
' and containing \'%s\' values was found.'
% (creative_field['name'], creative_field['id'],
creative_field['advertiserId'],
creative_field['totalNumberOfValues']))
else:
print 'No creative fields found for your criteria.'
if __name__ == '__main__':
# Initialize client object.
client = DfaClient(path=os.path.join('..', '..', '..', '..'))
main(client, ADVERTISER_ID)
|
{
"content_hash": "6a2f182ce325ac43a688b95a59f63c89",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 80,
"avg_line_length": 32.35294117647059,
"alnum_prop": 0.6715151515151515,
"repo_name": "caioserra/apiAdwords",
"id": "359875e4619dc14304b29b1f0f34fa42b7087850",
"size": "2268",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/adspygoogle/dfa/v1_19/get_creative_field.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "47375"
},
{
"name": "Python",
"bytes": "3481410"
},
{
"name": "Shell",
"bytes": "14782"
}
],
"symlink_target": ""
}
|
"""
This application populates the Personal Archive web application, to facilitate
evaluation of the application for Udacity reviewers.
"""
## IMPORTS
from sqlalchemy import create_engine, asc, desc
from sqlalchemy.orm import sessionmaker
import datetime
## IMPORT DATABASE CLASSES FOR THIS PROJECT
from database_setup import Base, Collections, Items, Authors, Subject, User, People
engine = create_engine('postgresql:///personalarchive')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
## ADD A USER
newUser = User(name = 'Abigail Mathews', email = 'abbymathews@gmail.com')
session.add(newUser)
session.commit()
currentUser = session.query(User).filter_by(email = 'abbymathews@gmail.com').one()
## ADD SOME SUBJECTS
subjList = ['schoolwork', 'birthdays', 'travel', 'artwork']
for subj in subjList:
newSubject = Subject(subject = subj)
session.add(newSubject)
session.commit()
## ADD AUTHORS
authorList = ['William Mathews', 'Abigail Mathews', 'Linnaea Mathews']
for auth in authorList:
newAuthor = Authors(name = auth)
session.add(newAuthor)
session.commit()
## ADD PEOPLE
peopleList = ['Linnaea Mathews', 'Abigail Mathews']
for person in peopleList:
newPerson = People(name = person)
session.add(newPerson)
session.commit()
## ADD SAMPLE COLLECTIONS
collections = [['Photos', 'Photographs of Linnaea and her Travels'],
['Drawings', 'Artwork and other School Projects from Fall 2015'],
['Paintings', "Linnaea's Wet-media work from Fall 2015"]]
for collection in collections:
newCollection = Collections(name = collection[0],
description = collection[1],
user_id=currentUser.id)
session.add(newCollection)
session.commit()
## ADD SAMPLE ITEMS
items = [
['Second Birthday - Candles', 'Linnaea gets ready to blow out her candles',
'', 'IMG_30582015-12-14-19-54-02-493053.JPG', [2], [1], [1], 1],
['Second Birthday - Eating Cake', 'Linnaea makes a big mess out of her ice-cream cake',
'', 'IMG_30772015-12-14-19-52-49-425807.JPG', [2], [1], [1], 1],
['Second Birthday - Presents', 'Opening presents on her second birthday',
'', 'IMG_30622015-12-14-19-55-57-069323.JPG', [2], [1], [1], 1],
['Huntington Library', 'California trip, Linnaea exploring the Hunintgton Library grounds',
'', 'IMG_24472015-12-25-23-29-42-737744.JPG', [3], [1], [1, 2], 1],
['Getty Museum', 'Outside at the Getty Villa', '',
'IMG_24342015-12-25-23-29-42-244983.JPG', [3], [1], [1, 2], 1],
['Flower', 'Drawing of a Magenta Flower with Blue and Black Background',
'Crayon on Paper', 'flower2016-01-11-17-52-50-630350.JPG', [1, 4], [3], [], 2],
['Brown Square', 'Abstract Design with Browns and other Colors',
'Oil Pastel on Paper', 'brownsquare2016-01-11-17-55-17-496917.JPG', [1, 4], [3], [], 2],
['Orange Sun', 'Colorful landscape with an orange sun and Green and Brown Grass',
'Marker on Paper', 'orangesun2016-01-11-17-55-17-540144.JPG', [1, 4], [3], [], 2],
['Red Abstract', 'Small Red-dominated Abstract', 'Oil Pastel on Card',
'redpastel2016-01-11-17-55-17-582808.JPG', [1, 4], [3], [], 2],
['Black Angle', 'Abstract Watercolor with Dominant Black Triangle',
'Watercolor on Paper', 'black_uneven2016-01-11-18-03-16-958836.JPG', [1, 4], [3], [], 3],
['Orangy Writing', 'Linear Orange-Brown Scribbles', 'Watercolor on Paper',
'orangy_writing2016-01-11-18-03-17-010031.JPG', [1, 4], [3], [], 3],
['Purple Abstract', 'Subdued, Purplish Landscape', 'Watercolor on Gray Card',
'purplish_graypaper2016-01-11-18-03-17-038908.JPG', [1, 4], [3], [], 3],
['Colorful Swirl', 'Small, Color-filled Watercolor', 'Watercolor on Gray Card',
'colorful_graypaper2016-01-11-18-03-16-987496.JPG', [1, 4], [3], [], 3]
]
for item in items:
newItem = Items(title = item[0],
description = item[1],
item_type = 'image',
note = item[2],
user_id=currentUser.id,
archive_url = item[3])
# Create many-to-many associations
for auth_id in item[5]:
thisAuth = session.query(Authors).filter_by(id = auth_id).one()
newItem.authors.append(thisAuth)
for subj_id in item[4]:
subj = session.query(Subject).filter_by(id = subj_id).one()
newItem.subject.append(subj)
for person_id in item[6]:
thisPerson = session.query(People).filter_by(id = person_id).one()
newItem.people.append(thisPerson)
coll = session.query(Collections).filter_by(id = item[7]).one()
newItem.collections.append(coll)
session.add(newItem)
session.commit()
|
{
"content_hash": "be8746edd6c3ca85f1b8e15f9b7e725f",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 96,
"avg_line_length": 42.07017543859649,
"alnum_prop": 0.6388657214345288,
"repo_name": "AbigailMathews/catalog_archive",
"id": "47b93d3ce5242a369e81e565cac40bee9613ae08",
"size": "4796",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "db_populate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2516"
},
{
"name": "HTML",
"bytes": "37528"
},
{
"name": "Python",
"bytes": "47931"
}
],
"symlink_target": ""
}
|
import uuid
import svc_monitor.services.loadbalancer.drivers.abstract_driver as abstract_driver
from vnc_api.vnc_api import InstanceIp
from vnc_api.vnc_api import FloatingIp
from vnc_api.vnc_api import PortMap, PortMappings
from vnc_api.vnc_api import ServiceHealthCheck, ServiceHealthCheckType
from vnc_api.vnc_api import NoIdError, RefsExistError
from svc_monitor.config_db import *
class OpencontrailLoadbalancerDriver(
abstract_driver.ContrailLoadBalancerAbstractDriver):
def __init__(self, name, manager, api, db, args=None):
self._name = name
self._api = api
self._svc_manager = manager
self.db = db
def _get_floating_ip(self, iip_id=None, fip_id=None):
fip = None
try:
if iip_id:
iip = self._api.instance_ip_read(id=iip_id)
fip_list = iip.get_floating_ips()
if fip_list or []:
fip_id = fip_list[0]['uuid']
else:
return None
fip = self._api.floating_ip_read(id=fip_id)
except NoIdError:
fip = None
return fip
def _add_vmi_ref(self, vmi, iip_id=None, fip_id=None, fip=None):
if not fip:
fip = self._get_floating_ip(iip_id, fip_id)
if fip:
fip.add_virtual_machine_interface(vmi)
self._api.floating_ip_update(fip)
return fip
def _delete_vmi_ref(self, vmi, iip_id=None, fip_id=None):
fip = self._get_floating_ip(iip_id, fip_id)
if fip:
fip.del_virtual_machine_interface(vmi)
self._api.floating_ip_update(fip)
return fip
def _add_port_map(self, fip, protocol, src_port, dst_port):
fip_uuid = fip.uuid
fip = self._api.floating_ip_read(id=fip_uuid)
portmap_entry = False
portmappings = fip.get_floating_ip_port_mappings()
portmap_list = []
if portmappings:
portmap_list = portmappings.get_port_mappings()
if portmappings is None:
portmappings = PortMappings()
portmap_list = portmappings.get_port_mappings()
for portmap in portmap_list or []:
if portmap.src_port == src_port and portmap.protocol == protocol:
portmap_entry = True
break
if portmap_entry == False:
portmap = PortMap()
portmap.set_protocol(protocol)
portmap.set_src_port(src_port)
portmap.set_dst_port(dst_port)
portmappings.add_port_mappings(portmap)
fip.set_floating_ip_port_mappings(portmappings)
fip.floating_ip_port_mappings_enable = True
self._api.floating_ip_update(fip)
def _delete_port_map(self, fip, src_port):
fip_uuid = fip.uuid
fip = self._api.floating_ip_read(id=fip_uuid)
portmappings = fip.get_floating_ip_port_mappings()
if not portmappings:
return None
portmap_list = portmappings.get_port_mappings()
for portmap in portmap_list or []:
if portmap.src_port == src_port:
portmappings.delete_port_mappings(portmap)
fip.set_floating_ip_port_mappings(portmappings)
if len(portmap_list) == 0:
fip.floating_ip_port_mappings_enable = False
self._api.floating_ip_update(fip)
return portmap
def _add_service_health_check_ref(self, service_health_check, vmi_id):
vmi = self._api.virtual_machine_interface_read(id=vmi_id)
if vmi:
vmi.add_service_health_check(service_health_check)
self._api.virtual_machine_interface_update(vmi)
return vmi
def _delete_service_health_check_ref(self, service_health_check, vmi_id):
vmi = self._api.virtual_machine_interface_read(id=vmi_id)
if vmi:
vmi.del_service_health_check(service_health_check)
self._api.virtual_machine_interface_update(vmi)
return vmi
def set_config_v2(self, lb_id):
lb = LoadbalancerSM.get(lb_id)
if not lb:
return
conf = {}
vmi_conf = {}
instance_ips = []
floating_ips = []
vmi = VirtualMachineInterfaceSM.get(lb.virtual_machine_interface)
if vmi is None:
return conf
for iip_id in vmi.instance_ips or []:
instance_ips.append(iip_id)
for fip_id in vmi.floating_ips or []:
floating_ips.append(fip_id)
vmi_conf['instance_ips'] = instance_ips
vmi_conf['floating_ips'] = floating_ips
conf['vmi'] = vmi_conf
return conf
def _update_pool_ip_list(self, pool, action, iip_id=None, fip_id=None, fip=None):
if action == "add":
if iip_id:
ip_id = iip_id
pool.lb_instance_ips.append(iip_id)
elif fip_id:
ip_id = fip_id
pool.lb_floating_ips.append(fip_id)
if fip:
pool.lb_fips[ip_id] = fip
elif action == 'del':
if iip_id and iip_id in pool.lb_instance_ips:
ip_id = iip_id
idx = pool.lb_instance_ips.index(ip_id)
del pool.lb_instance_ips[idx]
elif fip_id and fip_id in pool.lb_floating_ips:
ip_id = fip_id
idx = pool.lb_floating_ips.index(ip_id)
del pool.lb_floating_ips[idx]
if ip_id in pool.lb_fips.keys():
del pool.lb_fips[ip_id]
def _update_pool_member_props(self, lb, lb_props):
if lb is None:
return
for ll_id in lb.loadbalancer_listeners:
listener = LoadbalancerListenerSM.get(ll_id)
if not listener:
continue
if listener.params['protocol'] == 'UDP':
protocol = 'UDP'
else:
protocol = 'TCP'
src_port = listener.params['protocol_port']
pool = LoadbalancerPoolSM.get(listener.loadbalancer_pool)
if pool:
for iip_id in lb_props['old_instance_ips'] or []:
self._update_pool_ip_list(pool, "del", iip_id=iip_id)
for fip_id in lb_props['old_floating_ips'] or []:
self._update_pool_ip_list(pool, "del", fip_id=fip_id)
for iip_id in lb_props['new_instance_ips'] or []:
fip = self._get_floating_ip(iip_id=iip_id)
for member_id in pool.member_vmis.keys():
vmi = pool.member_vmis[member_id]
member = LoadbalancerMemberSM.get(member_id)
if not member:
continue
dst_port = member.params['protocol_port']
fip = self._add_vmi_ref(vmi, iip_id=iip_id, fip=fip)
self._add_port_map(fip, protocol, src_port, dst_port)
self._update_pool_ip_list(pool, "add", iip_id=iip_id, fip=fip)
for fip_id in lb_props['new_floating_ips'] or []:
fip = self._get_floating_ip(fip_id=fip_id)
for member_id in pool.member_vmis.keys():
vmi = pool.member_vmis[member_id]
member = LoadbalancerMemberSM.get(member_id)
if not member:
continue
dst_port = member.params['protocol_port']
fip = self._add_vmi_ref(vmi, fip_id=fip_id, fip=fip)
self._add_port_map(fip, protocol, src_port, dst_port)
self._update_pool_ip_list(pool, "add", fip_id=fip_id, fip=fip)
def _update_loadbalancer_props(self, lb_id):
lb = LoadbalancerSM.get(lb_id)
if lb is None:
msg = ('Unable to retrieve loadbalancer %s' % lb_id)
self._svc_manager.logger.error(msg)
return
driver_data = self.db.loadbalancer_driver_info_get(lb_id)
if driver_data:
if 'lb_instance_ips' in driver_data:
lb.instance_ips = driver_data['lb_instance_ips']
if 'lb_floating_ips' in driver_data:
lb.floating_ips = driver_data['lb_floating_ips']
vmi = VirtualMachineInterfaceSM.get(lb.virtual_machine_interface)
if vmi is None:
return
if set(lb.instance_ips) == vmi.instance_ips and \
set(lb.floating_ips) == vmi.floating_ips:
return
old_instance_ips = []
new_instance_ips = []
if set(lb.instance_ips) != vmi.instance_ips:
for iip_id in lb.instance_ips or []:
if iip_id not in vmi.instance_ips:
old_instance_ips.append(iip_id)
for iip_id in vmi.instance_ips or []:
if iip_id not in lb.instance_ips:
new_instance_ips.append(iip_id)
old_floating_ips = []
new_floating_ips = []
if set(lb.floating_ips) != vmi.floating_ips:
for fip_id in lb.floating_ips or []:
if fip_id not in vmi.floating_ips:
old_floating_ips.append(fip_id)
for fip_id in vmi.floating_ips or []:
if fip_id not in lb.floating_ips:
new_floating_ips.append(fip_id)
for iip_id in old_instance_ips or []:
fip = self._get_floating_ip(vmi, iip_id=iip_id)
if fip:
fip.set_virtual_machine_interface_list([])
self._api.floating_ip_update(fip)
self._api.floating_ip_delete(id=fip.uuid)
for fip_id in old_floating_ips or []:
fip = self._get_floating_ip(fip_id=fip_id)
if fip:
fip.set_virtual_machine_interface_list([])
fip.set_floating_ip_port_mappings([])
fip.floating_ip_port_mappings_enable = False
fip.floating_ip_traffic_direction = "both"
self._api.floating_ip_update(fip)
if len(new_instance_ips):
proj_obj = None
if vmi.parent_type == 'project':
proj_obj = self._api.project_read(id=vmi.parent_key)
for iip_id in new_instance_ips:
iip = self._api.instance_ip_read(id=iip_id)
fq_name = str(uuid.uuid4())
fip = FloatingIp(name=fq_name, parent_obj=iip,
floating_ip_address=iip.instance_ip_address)
fip.uuid = fq_name
fip.floating_ip_traffic_direction = "ingress"
if proj_obj:
fip.add_project(proj_obj)
self._api.floating_ip_create(fip)
if len(new_floating_ips):
for fip_id in new_floating_ips:
fip = self._get_floating_ip(fip_id=fip_id)
fip.floating_ip_traffic_direction = "ingress"
fip.floating_ip_fixed_ip_address = None
self._api.floating_ip_update(fip)
lb_props = {}
lb_props['old_instance_ips'] = old_instance_ips
lb_props['old_floating_ips'] = old_floating_ips
lb_props['new_instance_ips'] = new_instance_ips
lb_props['new_floating_ips'] = new_floating_ips
self._update_pool_member_props(lb, lb_props)
lb.instance_ips = vmi.instance_ips
lb.floating_ips = vmi.floating_ips
driver_data = {}
driver_data['vmi'] = vmi.uuid
driver_data['lb_instance_ips'] = list(lb.instance_ips)
driver_data['lb_floating_ips'] = list(lb.floating_ips)
self.db.loadbalancer_driver_info_insert(lb_id, driver_data)
def _clear_loadbalancer_props(self, lb_id):
driver_data = self.db.loadbalancer_driver_info_get(lb_id)
if driver_data is None:
return
lb = LoadbalancerSM.get(lb_id)
if lb is None:
return
lb.instance_ips = driver_data['lb_instance_ips']
for iip_id in lb.instance_ips or []:
fip = self._get_floating_ip(iip_id=iip_id)
if fip:
try:
fip.set_virtual_machine_interface_list([])
self._api.floating_ip_update(fip)
self._api.floating_ip_delete(id=fip.uuid)
except NoIdError:
# probably deleted by the lb creator
pass
try:
lb.floating_ips = driver_data['lb_floating_ips']
vmi = self._api.virtual_machine_interface_read(id=driver_data['vmi'])
for fip_id in lb.floating_ips or []:
fip = self._get_floating_ip(fip_id=fip_id)
if fip:
fip.set_virtual_machine_interface_list([])
fip.set_floating_ip_port_mappings([])
fip.floating_ip_port_mappings_enable = False
fip.floating_ip_traffic_direction = "both"
self._api.floating_ip_update(fip)
fip = self._add_vmi_ref(vmi, fip_id=fip_id)
except NoIdError:
# probably deleted by the lb creator
pass
del lb.instance_ips[:]
del lb.floating_ips[:]
self.db.loadbalancer_remove(lb_id, ['vmi'])
self.db.loadbalancer_remove(lb_id, ['lb_instance_ips'])
self.db.loadbalancer_remove(lb_id, ['lb_floating_ips'])
def _update_listener_props(self, old_listener, listener):
lb_id = listener['loadbalancer_id']
driver_data = self.db.loadbalancer_driver_info_get(lb_id)
if driver_data is None:
return
lb_instance_ips = []
lb_floating_ips = []
if 'lb_instance_ips' in driver_data:
lb_instance_ips = driver_data['lb_instance_ips']
if 'lb_floating_ips' in driver_data:
lb_floating_ips = driver_data['lb_floating_ips']
if not old_listener:
return
if old_listener.props['protocol_port'] == listener.props['protocol_port'] and \
old_listener.props['protocol'] == listener.props['protocol']:
return
if listener.params['protocol'] == 'UDP':
protocol = 'UDP'
else:
protocol = 'TCP'
for iip_id in lb_instance_ips or []:
fip = self._get_floating_ip(iip_id=iip_id)
if fip:
src_port = old_listener.props['protocol_port']
portmap = self._delete_port_map(fip, src_port)
if portmap == None:
continue
src_port = listener['protocol_port']
dst_port = portmap.dst_port
self._add_port_map(fip, protocol, src_port, dst_port)
for fip_id in lb_floating_ips or []:
fip = self._get_floating_ip(fip_id=fip_id)
if fip:
src_port = old_listener.props['protocol_port']
portmap = self._delete_port_map(fip, src_port)
if portmap == None:
continue
src_port = listener['protocol_port']
dst_port = portmap.dst_port
self._add_port_map(fip, protocol, src_port, dst_port)
def _clear_listener_props(self, listener_id):
listener = LoadbalancerListenerSM.get(listener_id)
if listener is None:
return
lb_id = listener['loadbalancer_id']
driver_data = self.db.loadbalancer_driver_info_get(lb_id)
if driver_data is None:
return
lb_instance_ips = []
lb_floating_ips = []
if 'lb_instance_ips' in driver_data:
lb_instance_ips = driver_data['lb_instance_ips']
if 'lb_floating_ips' in driver_data:
lb_floating_ips = driver_data['lb_floating_ips']
for iip_id in lb_instance_ips or []:
fip = self._get_floating_ip(iip_id=iip_id)
self._delete_port_map(fip, listener['protocol_port'])
for iip_id in lb_floating_ips or []:
fip = self._get_floating_ip(iip_id=iip_id)
self._delete_port_map(fip, listener['protocol_port'])
def _update_pool_props(self, pool_id):
pool = LoadbalancerPoolSM.get(pool_id)
if pool is None:
return
lb_id = pool.loadbalancer_id
driver_data = self.db.loadbalancer_driver_info_get(lb_id)
if driver_data is None:
return
lb_instance_ips = []
lb_floating_ips = []
if 'lb_instance_ips' in driver_data:
lb_instance_ips = driver_data['lb_instance_ips']
if 'lb_floating_ips' in driver_data:
lb_floating_ips = driver_data['lb_floating_ips']
if lb_instance_ips == pool.lb_instance_ips and \
lb_floating_ips == pool.lb_floating_ips:
return
pool.lb_fips = {}
pool.lb_instance_ips = []
pool.lb_floating_ips = []
for iip_id in lb_instance_ips or []:
fip = self._get_floating_ip(iip_id=iip_id)
self._update_pool_ip_list(pool, "add", iip_id=iip_id, fip=fip)
for fip_id in lb_floating_ips or []:
fip = self._get_floating_ip(fip_id=fip_id)
self._update_pool_ip_list(pool, "add", fip_id=fip_id, fip=fip)
def _clear_pool_props(self, pool_id):
pool = LoadbalancerPoolSM.get(pool_id)
if pool is None:
return
pool.lb_fips = {}
pool.lb_instance_ips = []
pool.lb_floating_ips = []
pool.member_vmis = {}
def _update_member_props(self, member_id):
member = LoadbalancerMemberSM.get(member_id)
if member is None or member.vmi is None:
return
pool = LoadbalancerPoolSM.get(member.loadbalancer_pool)
if pool is None:
return
if member_id in pool.member_vmis.keys():
vmi = pool.member_vmis[member_id]
else:
try:
vmi = self._api.virtual_machine_interface_read(id=member.vmi)
pool.member_vmis[member_id] = vmi
except NoIdError:
return
src_port = pool.listener_port
dst_port = member.params['protocol_port']
if pool.listener_protocol == 'UDP':
protocol = 'UDP'
else:
protocol = 'TCP'
for key in pool.lb_fips.keys():
fip = pool.lb_fips[key]
fip.add_virtual_machine_interface(vmi)
self._api.floating_ip_update(fip)
self._add_port_map(fip, protocol, src_port, dst_port)
if pool.service_health_check:
self._add_service_health_check_ref(pool.service_health_check, member.vmi)
member.service_health_check = service_health_check
def _clear_member_props(self, member_id):
member = LoadbalancerMemberSM.get(member_id)
if member is None:
return
pool = LoadbalancerPoolSM.get(member.loadbalancer_pool)
if pool is None:
return
if member_id in pool.member_vmis.keys():
vmi = pool.member_vmis[member_id]
del pool.member_vmis[member_id]
else:
try:
vmi = self._api.virtual_machine_interface_read(id=member.vmi)
pool.member_vmis[member_id] = vmi
except NoIdError:
return
port_map_delete = False
if len(pool.members) == 1 and list(pool.members)[0] == member_id:
port_map_delete = True
for key in pool.lb_fips.keys():
fip = pool.lb_fips[key]
fip.del_virtual_machine_interface(vmi)
self._api.floating_ip_update(fip)
if port_map_delete == False:
continue
self._delete_port_map(fip, pool.listener_port)
if member.service_health_check:
self._delete_service_health_check_ref(member.service_health_check, member.vmi)
def _update_health_monitor_props(self, hm_id, pool_id):
_service_health_check_type_mapping = {
'delay': 'delay',
'timeout': 'timeout',
'max_retries': 'max_retries',
'http_method': 'http_method',
'url_path': 'url_path',
'expected_codes': 'expected_codes',
'monitor_type': 'monitor_type',
}
pool = LoadbalancerPoolSM.get(pool_id)
if pool is None:
return
hm = HealthMonitorSM.get(hm_id)
if hm is None:
return
props = ServiceHealthCheckType()
setattr(props, 'health_check_type', 'link-local')
for key, mapping in _service_health_check_type_mapping.iteritems():
if mapping in hm.params:
setattr(props, key, hm.params[mapping])
if hm.params['monitor_type'] == 'PING' or \
hm.params['monitor_type'] == 'TCP':
setattr(props, 'monitor_type', 'PING')
setattr(props, 'url_path', 'local-ip')
elif hm.params['monitor_type'] == 'HTTP' or \
hm.params['monitor_type'] == 'HTTPS':
setattr(props, 'monitor_type', 'HTTP')
props.enabled = True
try:
if hm.service_health_check_id:
id = hm.service_health_check_id
service_health_check = self._api.service_health_check_read(id=id)
service_health_check.set_service_health_check_properties(props)
self._api.service_health_check_update(service_health_check)
else:
shc_uuid = str(uuid.uuid4())
fq_name = pool.name + '-' + shc_uuid
tenant_id = str(uuid.UUID(hm.parent_uuid.replace('-', '')))
project = self._api.project_read(id=tenant_id)
service_health_check = ServiceHealthCheck(
name=fq_name,
parent_obj=project,
service_health_check_properties=props)
service_health_check.uuid = shc_uuid
self._api.service_health_check_create(service_health_check)
for member_id in pool.members:
member = LoadbalancerMemberSM.get(member_id)
if member is None:
continue
self._add_service_health_check_ref(service_health_check, member.vmi)
pool.service_health_check = service_health_check
for member_id in pool.members:
member = LoadbalancerMemberSM.get(member_id)
if member:
member.service_health_check = service_health_check
hm.service_health_check_id = service_health_check.uuid
hm.provider = pool.provider
driver_data = {}
driver_data['provider'] = hm.provider
driver_data['service_health_check_id'] = hm.service_health_check_id
driver_data['pool_id'] = pool_id
self.db.health_monitor_driver_info_insert(hm_id, driver_data)
except NoIdError:
return
def _clear_health_monitor_props(self, hm_id, pool_id):
driver_data = self.db.health_monitor_driver_info_get(hm_id)
if driver_data is None:
return
try:
id = driver_data['service_health_check_id']
service_health_check = self._api.service_health_check_read(id=id)
vmi_back_refs = service_health_check.get_virtual_machine_interface_back_refs()
for vmi in vmi_back_refs or []:
self._api.ref_update('virtual-machine-interface', vmi['uuid'], \
'service-health-check', service_health_check.uuid, None, 'DELETE')
self._api.service_health_check_update(service_health_check)
self._api.service_health_check_delete(id=service_health_check.uuid)
if driver_data['pool_id']:
pool = LoadbalancerPoolSM.get(driver_data['pool_id'])
if pool is None:
return
pool.service_health_check = None
for member_id in pool.members:
member = LoadbalancerMemberSM.get(member_id)
if member is None:
continue
member.service_health_check = None
except NoIdError:
return
def create_loadbalancer(self, loadbalancer):
self._update_loadbalancer_props(loadbalancer['id'])
def update_loadbalancer(self, old_loadbalancer, loadbalancer):
self._update_loadbalancer_props(loadbalancer['id'])
def suspend_loadbalancer(self, loadbalancer):
self._clear_loadbalancer_props(loadbalancer['id'])
def delete_loadbalancer(self, loadbalancer):
self._clear_loadbalancer_props(loadbalancer['id'])
def create_listener(self, listener):
self._update_listener_props(None, listener)
def update_listener(self, old_listener, listener):
self._update_listener_props(old_listener, listener)
def delete_listener(self, listener):
self._clear_listener_props(listener['id'])
def create_pool(self, pool):
self._update_pool_props(pool['id'])
def update_pool(self, old_pool, pool):
self._update_pool_props(pool['id'])
def delete_pool(self, pool):
self._clear_pool_props(pool['id'])
def create_member(self, member):
self._update_member_props(member['id'])
def update_member(self, old_member, member):
self._update_member_props(member['id'])
def delete_member(self, member):
self._clear_member_props(member['id'])
def create_health_monitor(self,
health_monitor,
pool_id):
self._update_health_monitor_props(health_monitor['id'], pool_id)
def update_health_monitor(self,
old_health_monitor,
health_monitor,
pool_id):
self._update_health_monitor_props(health_monitor['id'], pool_id)
def delete_health_monitor(self, health_monitor, pool_id):
self._clear_health_monitor_props(health_monitor['id'], pool_id)
def stats(self, pool_id):
pass
def create_vip(self, vip):
pass
def update_vip(self, old_vip, vip):
pass
def delete_vip(self, vip):
pass
|
{
"content_hash": "0315046797de6536e60770c787842aea",
"timestamp": "",
"source": "github",
"line_count": 676,
"max_line_length": 90,
"avg_line_length": 39.28550295857988,
"alnum_prop": 0.548593591143578,
"repo_name": "nischalsheth/contrail-controller",
"id": "890c709be77430a444cbe9480309e030f66d33f3",
"size": "26627",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/config/svc-monitor/svc_monitor/services/loadbalancer/drivers/native/driver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "88437"
},
{
"name": "C++",
"bytes": "23392370"
},
{
"name": "CSS",
"bytes": "531"
},
{
"name": "GDB",
"bytes": "44610"
},
{
"name": "Go",
"bytes": "45352"
},
{
"name": "HTML",
"bytes": "519766"
},
{
"name": "Java",
"bytes": "171966"
},
{
"name": "LLVM",
"bytes": "2937"
},
{
"name": "Lua",
"bytes": "20359"
},
{
"name": "Makefile",
"bytes": "12449"
},
{
"name": "Python",
"bytes": "7781013"
},
{
"name": "Roff",
"bytes": "41295"
},
{
"name": "Ruby",
"bytes": "13596"
},
{
"name": "Shell",
"bytes": "63970"
},
{
"name": "Thrift",
"bytes": "5666"
},
{
"name": "Yacc",
"bytes": "7737"
}
],
"symlink_target": ""
}
|
"""ScienceCruiseDataManagement URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from data_storage_management.views import HardDiskJson
from main.views import MainMenuView, MainMapView, PositionsJson, LatestShipPosition, CountryListView, FileStorageView,\
InteractiveMapView, EventListView, ImportPortsFromGpx, DocumentsView, AccessingDataView, PositionFromDateTime,\
CoordinatesConversion, TrackJson, MailState, ShipTimeToUtc, ImageReloaderView, LatestImage, StatsView,\
ContactDetailsListView
from metadata.views import ProjectListView, MetadataEntryListView, MetadataEntryView, MetadataEntryAsWord,\
MetadataEntryAsDif
from ship_data.views import FerryboxView
from data_storage_management.views import HardDiskJson, DirectoryUpdateJson
from django.conf import settings
from django.conf.urls.static import static
# This file is part of https://github.com/cpina/science-cruise-data-management
#
# This project was programmed in a hurry without any prior Django experience,
# while circumnavigating the Antarctic on the ACE expedition, without proper
# Internet access, with 150 scientists using the system and doing at the same
# cruise other data management and system administration tasks.
#
# Sadly there aren't unit tests and we didn't have time to refactor the code
# during the cruise, which is really needed.
#
# Carles Pina (carles@pina.cat) and Jen Thomas (jenny_t152@yahoo.co.uk), 2016-2017.
urlpatterns = [
url(r'^$', MainMenuView.as_view()),
url(r'^map/$', MainMapView.as_view()),
url(r'^api/positions.geojson', PositionsJson.as_view()),
url(r'^api/track.geojson', TrackJson.as_view()),
url(r'api/latest_ship_position.json', LatestShipPosition.as_view()),
url(r'api/data_storage/hard_disk.json', HardDiskJson.as_view()),
url(r'api/data_storage/add_directory_update.json', DirectoryUpdateJson.as_view()),
# url(r'^api/positions$', PositionsJson.as_view()),
url(r'^admin/', include(admin.site.urls)),
url(r'^chaining/', include('smart_selects.urls')),
url(r'^country/list$', CountryListView.as_view(), name='article-list'),
url(r'^storage/', FileStorageView.as_view()),
url(r'^map/interactive/$', InteractiveMapView.as_view()),
url(r'^reports/events/$', EventListView.as_view()),
url(r'^selectable/', include('selectable.urls')),
url(r'^import_ports_from_gpx/', ImportPortsFromGpx.as_view()),
url(r'^documents/', DocumentsView.as_view()),
url(r'^accessing_data/', AccessingDataView.as_view()),
url(r'^position_from_date_time/', PositionFromDateTime.as_view()),
url(r'^ship_time_to_utc/', ShipTimeToUtc.as_view()),
url(r'^coordinates_conversion/', CoordinatesConversion.as_view()),
url(r'^mail_state/', MailState.as_view()),
url(r'^ferrybox/', FerryboxView.as_view()),
url(r'^metadata/$', MetadataEntryListView.as_view()),
url(r'^metadata/([0-9]+)/$', MetadataEntryView.as_view()),
url(r'^metadata/export/word/([0-9]+)$', MetadataEntryAsWord.as_view()),
url(r'^metadata/export/dif/([0-9]+)$', MetadataEntryAsDif.as_view()),
url(r'^window/$', ImageReloaderView.as_view()),
url(r'^latest_image.jpg$', LatestImage.as_view()),
url(r'^stats/$', StatsView.as_view()),
url(r'^contacts/$', ContactDetailsListView.as_view()),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) \
+ static("/documents_storage/", document_root=settings.DOCUMENTS_DIRECTORY) \
+ static("/ethz_forecast_data/", document_root=settings.FORECAST_DIRECTORY)
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
|
{
"content_hash": "4b7a337d564d749193b10b4fff82ff9c",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 119,
"avg_line_length": 52.207317073170735,
"alnum_prop": 0.7210932025227751,
"repo_name": "cpina/science-cruise-data-management",
"id": "a46d1d1be0cc115f5a59655c7af44ecfc42d37b5",
"size": "4281",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ScienceCruiseDataManagement/ScienceCruiseDataManagement/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "59966"
},
{
"name": "HTML",
"bytes": "50774"
},
{
"name": "JavaScript",
"bytes": "106205"
},
{
"name": "Python",
"bytes": "548151"
},
{
"name": "Shell",
"bytes": "106"
}
],
"symlink_target": ""
}
|
"""diffren API."""
# A new PyPI release will be pushed everytime `__version__` is increased
# When changing this, also update the CHANGELOG.md
__version__ = '0.1.0'
|
{
"content_hash": "cdf3d664899dfbfa6cf93037516debb1",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 72,
"avg_line_length": 33.2,
"alnum_prop": 0.6867469879518072,
"repo_name": "google-research/diffren",
"id": "b490bb7486b9b9f666e285adda317e23f109a93c",
"size": "751",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "diffren/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "93677"
},
{
"name": "Python",
"bytes": "145170"
},
{
"name": "Starlark",
"bytes": "13859"
}
],
"symlink_target": ""
}
|
import httplib, urllib2, time, calendar
from datetime import datetime
from decimal import Decimal
from xml.etree.ElementTree import fromstring
from xml.etree import ElementTree as ET
from base64 import b64encode
API_VERSION = 'v4'
def utc_to_local(dt):
''' Converts utc datetime to local'''
secs = calendar.timegm(dt.timetuple())
return datetime(*time.localtime(secs)[:6])
def str_to_datetime(s):
''' Converts ISO 8601 string (2009-11-10T21:11Z) to LOCAL datetime'''
return utc_to_local(datetime.strptime(s, '%Y-%m-%dT%H:%M:%SZ'))
class Client:
def __init__(self, token, site_name):
self.auth = b64encode('%s:x' % token)
self.base_host = 'spreedly.com'
self.base_path = '/api/%s/%s' % (API_VERSION, site_name)
self.base_url = 'https://%s%s' % (self.base_host, self.base_path)
self.url = None
def get_response(self):
return self.response
def get_url(self):
return self.url
def set_url(self, url):
self.url = '%s/%s' % (self.base_url, url)
def query(self, data=None, put=False):
opener = urllib2.build_opener(urllib2.HTTPHandler)
req = urllib2.Request(url=self.get_url())
req.add_header('User-agent', 'python-spreedly 1.0')
req.add_header('Authorization', 'Basic %s' % self.auth)
# Convert to POST if we got some data
if data:
req.add_header('Content-Type', 'application/xml')
req.add_data(data)
if put:
req.get_method = lambda: 'PUT'
f = opener.open(req)
self.response = f.read()
def get_plans(self):
self.set_url('subscription_plans.xml')
self.query()
# Parse
result = []
tree = fromstring(self.get_response())
for plan in tree.getiterator('subscription-plan'):
data = {
'name': plan.findtext('name'),
'description': plan.findtext('description'),
'terms': plan.findtext('terms'),
'plan_type': plan.findtext('plan-type'),
'price': Decimal(plan.findtext('price')),
'enabled': True if plan.findtext('enabled') == 'true' else False,
'force_recurring': \
True if plan.findtext('force-recurring') == 'true' else False,
'force_renew': \
True if plan.findtext('needs-to-be-renewed') == 'true' else False,
'duration': int(plan.findtext('duration-quantity')),
'duration_units': plan.findtext('duration-units'),
'feature_level': plan.findtext('feature-level'),
'return_url': plan.findtext('return-url'),
'version': int(plan.findtext('version')) \
if plan.findtext('version') else 0,
'speedly_id': int(plan.findtext('id')),
'speedly_site_id': int(plan.findtext('site-id')) \
if plan.findtext('site-id') else 0,
'created_at': str_to_datetime(plan.findtext('created-at')),
'date_changed': str_to_datetime(plan.findtext('updated-at')),
}
result.append(data)
return result
def create_subscriber(self, customer_id, screen_name):
'''
Creates a subscription
'''
data = '''
<subscriber>
<customer-id>%d</customer-id>
<screen-name>%s</screen-name>
</subscriber>
''' % (customer_id, screen_name)
self.set_url('subscribers.xml')
self.query(data)
# Parse
result = []
tree = fromstring(self.get_response())
for plan in tree.getiterator('subscriber'):
data = {
'customer_id': int(plan.findtext('customer-id')),
'first_name': plan.findtext('billing-first-name'),
'last_name': plan.findtext('billing-last-name'),
'active': True if plan.findtext('active') == 'true' else False,
'gift': True if plan.findtext('on-gift') == 'true' else False,
'trial_active': \
True if plan.findtext('on-trial') == 'true' else False,
'trial_elegible': \
True if plan.findtext('eligible-for-free-trial') == 'true' \
else False,
'lifetime': \
True if plan.findtext('lifetime-subscription') == 'true' \
else False,
'recurring': \
True if plan.findtext('recurring') == 'true' \
else False,
'card_expires_before_next_auto_renew': \
True if plan.findtext('card-expires-before-next-auto-renew') == 'true' \
else False,
'token': plan.findtext('token'),
'name': plan.findtext('subscription-plan-name'),
'feature_level': plan.findtext('feature-level'),
'created_at': str_to_datetime(plan.findtext('created-at')),
'date_changed': str_to_datetime(plan.findtext('updated-at')),
'active_until': str_to_datetime(plan.findtext('active-until')) if plan.findtext('active-until') else None,
}
result.append(data)
return result[0]
def delete_subscriber(self, id):
if 'test' in self.base_path:
headers = {'Authorization': 'Basic %s' % self.auth}
conn = httplib.HTTPSConnection(self.base_host)
conn.request(
'DELETE', '%s/subscribers/%d.xml' % (self.base_path, id),
'',
headers
)
response = conn.getresponse()
return response.status
return
def subscribe(self, subscriber_id, plan_id, trial=False):
'''
Subscribe a user to some plan
'''
data = '<subscription_plan><id>%d</id></subscription_plan>' % plan_id
if trial:
self.set_url('subscribers/%d/subscribe_to_free_trial.xml' % subscriber_id)
self.query(data)
# Parse
result = []
tree = fromstring(self.get_response())
for plan in tree.getiterator('subscriber'):
data = {
'customer_id': int(plan.findtext('customer-id')),
'first_name': plan.findtext('billing-first-name'),
'last_name': plan.findtext('billing-last-name'),
'active': True if plan.findtext('active') == 'true' else False,
'gift': True if plan.findtext('on-gift') == 'true' else False,
'trial_active': \
True if plan.findtext('on-trial') == 'true' else False,
'trial_elegible': \
True if plan.findtext('eligible-for-free-trial') == 'true' \
else False,
'lifetime': \
True if plan.findtext('lifetime-subscription') == 'true' \
else False,
'recurring': \
True if plan.findtext('recurring') == 'true' \
else False,
'card_expires_before_next_auto_renew': \
True if plan.findtext('card-expires-before-next-auto-renew') == 'true' \
else False,
'token': plan.findtext('token'),
'name': plan.findtext('subscription-plan-name'),
'feature_level': plan.findtext('feature-level'),
'created_at': str_to_datetime(plan.findtext('created-at')),
'date_changed': str_to_datetime(plan.findtext('updated-at')),
'active_until': str_to_datetime(plan.findtext('active-until')) if plan.findtext('active-until') else None,
}
result.append(data)
return result[0]
def cleanup(self):
'''
Removes ALL subscribers. NEVER USE IN PRODUCTION!
'''
if 'test' in self.base_path:
headers = {'Authorization': 'Basic %s' % self.auth}
conn = httplib.HTTPSConnection(self.base_host)
conn.request(
'DELETE', '%s/subscribers.xml' % self.base_path,
'',
headers
)
response = conn.getresponse()
return response.status
return
def get_info(self, subscriber_id):
self.set_url('subscribers/%d.xml' % subscriber_id)
self.query('')
# Parse
result = []
tree = fromstring(self.get_response())
for plan in tree.getiterator('subscriber'):
data = {
'customer_id': int(plan.findtext('customer-id')),
'email': plan.findtext('email'),
'screen_name': plan.findtext('screen-name'),
'first_name': plan.findtext('billing-first-name'),
'last_name': plan.findtext('billing-last-name'),
'active': True if plan.findtext('active') == 'true' else False,
'gift': True if plan.findtext('on-gift') == 'true' else False,
'trial_active': \
True if plan.findtext('on-trial') == 'true' else False,
'trial_elegible': \
True if plan.findtext('eligible-for-free-trial') == 'true' \
else False,
'lifetime': \
True if plan.findtext('lifetime-subscription') == 'true' \
else False,
'recurring': \
True if plan.findtext('recurring') == 'true' \
else False,
'card_expires_before_next_auto_renew': \
True if plan.findtext('card-expires-before-next-auto-renew') == 'true' \
else False,
'token': plan.findtext('token'),
'name': plan.findtext('subscription-plan-name'),
'feature_level': plan.findtext('feature-level'),
'created_at': str_to_datetime(plan.findtext('created-at')),
'date_changed': str_to_datetime(plan.findtext('updated-at')),
'active_until': str_to_datetime(plan.findtext('active-until')) if plan.findtext('active-until') else None,
}
result.append(data)
return result[0]
def set_info(self, subscriber_id, **kw):
root = ET.Element('subscriber')
for key, value in kw.items():
e = ET.SubElement(root, key)
e.text = value
self.set_url('subscribers/%d.xml' % subscriber_id)
self.query(data=ET.tostring(root), put=True)
def create_complimentary_subscription(self, subscriber_id, duration, duration_units, feature_level):
data = """<complimentary_subscription>
<duration_quantity>%s</duration_quantity>
<duration_units>%s</duration_units>
<feature_level>%s</feature_level>
</complimentary_subscription>""" % (duration, duration_units, feature_level)
self.set_url('subscribers/%s/complimentary_subscriptions.xml' % subscriber_id)
self.query(data)
def complimentary_time_extensions(self, subscriber_id, duration, duration_units):
data = """<complimentary_time_extension>
<duration_quantity>%s</duration_quantity>
<duration_units>%s</duration_units>
</complimentary_time_extension>""" % (duration, duration_units)
self.set_url('subscribers/%s/complimentary_time_extensions.xml' % subscriber_id)
self.query(data)
def get_or_create_subscriber(self, subscriber_id, screen_name):
try:
return self.get_info(subscriber_id)
except urllib2.HTTPError, e:
if e.code == 404:
return self.create_subscriber(subscriber_id, screen_name)
|
{
"content_hash": "dea7f16a60ce101ac089670ab8f407e1",
"timestamp": "",
"source": "github",
"line_count": 291,
"max_line_length": 122,
"avg_line_length": 41.580756013745706,
"alnum_prop": 0.525206611570248,
"repo_name": "shelfworthy/python-spreedly",
"id": "cadcf7887b87ff26a78f72d768636417d07e65be",
"size": "12100",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15789"
}
],
"symlink_target": ""
}
|
"""Utilities for making requests using a given client and handling errors.
"""
import io
import json
from apitools.base.py import exceptions as apitools_exceptions
from googlecloudsdk.api_lib.app import exceptions as api_lib_exceptions
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core.resource import resource_printer
import httplib2
ERROR_FORMAT = ('Error Response: [{status_code}] {status_message}{url.line}'
'{details.content.line.json}')
def ExtractErrorMessage(error_details):
"""Extracts error details from an apitools_exceptions.HttpError.
Args:
error_details: a python dictionary returned from decoding an error that
was serialized to json.
Returns:
Multiline string containing a detailed error message suitable to show to a
user.
"""
error_message = io.BytesIO()
error_message.write('Error Response: [{code}] {message}'.format(
code=error_details.get('code', 'UNKNOWN'), # error_details.code is an int
message=error_details.get('message', u'').encode('utf-8')))
if 'url' in error_details:
error_message.write('\n' + error_details['url'].encode('utf-8'))
if 'details' in error_details:
error_message.write('\n\nDetails: ')
resource_printer.Print(
resources=[error_details['details']],
print_format='json',
out=error_message)
return error_message.getvalue()
def MakeRequest(service_method, request_message):
"""Makes a request using the given client method and handles HTTP errors."""
try:
return service_method(request_message)
except apitools_exceptions.HttpError as error:
log.debug(error)
exc = exceptions.HttpException(error)
# Make it easier to switch on certain common error codes.
err = api_lib_exceptions.STATUS_CODE_TO_ERROR.get(exc.payload.status_code)
if err:
raise err
try:
error_content = json.loads(error.content)
# If the error content isn't json, that's OK, just raise HttpException.
except ValueError:
error_content = {}
error_details = error_content.get('error', {})
# TODO(b/34516298): use generic HttpException when compatible with v2.
# HttpExceptions do not surface details from the response, so use a
# gcloud app-specific error message if details are available.
if 'details' in error_details and error_details['details']:
error_message = ExtractErrorMessage(error_details)
else:
error_message = None
raise api_lib_exceptions.HttpException(error, error_message=error_message)
except httplib2.HttpLib2Error as error:
raise exceptions.HttpException('Response error: %s' % error.message)
|
{
"content_hash": "d9abc2f690b4d104a097ec799575c5c4",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 80,
"avg_line_length": 36.93150684931507,
"alnum_prop": 0.7166172106824926,
"repo_name": "KaranToor/MA450",
"id": "f7aa69b5dd8b8dffbfcf8bdb079fde2e0f956a0e",
"size": "3292",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "google-cloud-sdk/lib/googlecloudsdk/api_lib/app/api/requests.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3162"
},
{
"name": "CSS",
"bytes": "1930"
},
{
"name": "HTML",
"bytes": "13381"
},
{
"name": "Java",
"bytes": "151442"
},
{
"name": "JavaScript",
"bytes": "4906"
},
{
"name": "Makefile",
"bytes": "1636"
},
{
"name": "Objective-C",
"bytes": "13335"
},
{
"name": "PHP",
"bytes": "9086"
},
{
"name": "Pascal",
"bytes": "62"
},
{
"name": "Python",
"bytes": "19710731"
},
{
"name": "Roff",
"bytes": "2069494"
},
{
"name": "Ruby",
"bytes": "690"
},
{
"name": "Shell",
"bytes": "32272"
},
{
"name": "Smarty",
"bytes": "4968"
},
{
"name": "SourcePawn",
"bytes": "616"
},
{
"name": "Swift",
"bytes": "14225"
}
],
"symlink_target": ""
}
|
from __future__ import with_statement
from fabric.api import cd
from fabric.api import env
from fabric.api import local
from fabric.api import run
from fabric.api import sudo
def server():
env.use_ssh_config = True
env.forward_agent = True
env.port = '22222'
env.user = 'root'
env.hosts = ['zope8']
env.webserver = '/opt/webserver/buildout.webserver'
env.code_root = '/opt/sites/jms/buildout.jms'
env.sitename = 'jms'
env.code_user = 'root'
env.prod_user = 'www'
def ls():
""" Low level configuration test """
with cd(env.code_root):
run('ls')
def uptime():
""" Server uptime """
run('uptime')
def load():
""" Server average system load """
run('cat /proc/loadavg')
def memory():
""" Server memory usage """
run('free')
def disk():
""" Server disk and filesystem usage """
run('df -ha')
def supervisor():
""" Webserver process status """
with cd(env.webserver):
run('bin/supervisorctl status')
def status():
""" General system status information """
# General health of the server.
uptime()
load()
memory()
disk()
supervisor()
def update():
""" Update buildout from git/master """
with cd(env.code_root):
run('nice git pull')
def build():
""" Run buildout deployment profile """
with cd(env.code_root):
run('bin/buildout -Nc deployment.cfg')
def build_full():
""" Run buildout deployment profile and enforce updates """
with cd(env.code_root):
run('bin/buildout -c deployment.cfg')
def restart():
""" Restart instance """
with cd(env.webserver):
run('nice bin/supervisorctl restart instance-%(sitename)s' % env)
def supervisorctl(*cmd):
"""Runs an arbitrary supervisorctl command."""
with cd(env.webserver):
run('bin/supervisorctl ' + ' '.join(cmd))
def prepare_deploy():
""" Push committed local changes to git """
local('git push')
def deploy():
""" Deploy current master to production server """
update()
restart()
def deploy_full():
""" Deploy current master to production and run buildout """
prepare_deploy()
update()
build()
restart()
def rebuild():
""" Deploy current master and run full buildout """
update()
build_full()
restart()
|
{
"content_hash": "e09fe19e98960e889c7476ebb9e51ce9",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 73,
"avg_line_length": 20.025641025641026,
"alnum_prop": 0.6116090482287665,
"repo_name": "potzenheimer/buildout.jms",
"id": "cb384b4c93c763294872e38604716b647ea7f22c",
"size": "2343",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fabfile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "325539"
},
{
"name": "JavaScript",
"bytes": "49161"
},
{
"name": "Python",
"bytes": "31830"
}
],
"symlink_target": ""
}
|
def extractUltimaguilBase(item):
"""
Parser for 'Ultimaguil Base'
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or 'preview' in item['title'].lower():
return None
if 'WATTT' in item['tags']:
return buildReleaseMessageWithType(item, 'WATTT', vol, chp, frag=frag, postfix=postfix)
return False
|
{
"content_hash": "265ad0f9242fcfe5455d06973e1cd04c",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 89,
"avg_line_length": 35.5,
"alnum_prop": 0.7183098591549296,
"repo_name": "fake-name/ReadableWebProxy",
"id": "23fbfd1709a15651027523e08eba791165a6f738",
"size": "355",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractUltimaguilBase.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
}
|
"""A module for representing universal morphosyntactic feature bundles."""
from typing import Dict, List, Optional, Tuple, Type, Union
from cltk.core.exceptions import CLTKException
from cltk.morphology.universal_dependencies_features import *
__author__ = ["John Stewart <free-variation>"]
class MorphosyntacticFeatureBundle:
"""A representation of a set of features, usually associated with a word form."""
def __init__(self, *features: List[MorphosyntacticFeature]) -> None:
"""
>>> f1 = MorphosyntacticFeatureBundle(F.neg, N.pos, V.neg, Case.accusative)
>>> f1.features
{F: [neg], N: [pos], V: [neg], Case: [accusative]}
"""
self.features = {}
for feature in features:
if isinstance(feature, type) and issubclass(
feature, MorphosyntacticFeature
):
self.features[feature] = Underspecified
else:
if type(feature) in self.features:
self.features[type(feature)].append(feature)
else:
self.features[type(feature)] = [feature]
def __getitem__(
self, feature_name: Union[str, Type[MorphosyntacticFeature]]
) -> List[MorphosyntacticFeature]:
"""
Use dict-type syntax for accessing the values of features.
>>> f1 = f(F.pos, N.pos)
>>> f1[F]
[pos]
>>> f1[V]
Traceback (most recent call last):
cltk.core.exceptions.CLTKException: {F: [pos], N: [pos]} unspecified for V
>>> f1['F']
[pos]
"""
if type(feature_name) == str:
if feature_name not in globals():
raise TypeError(feature_name + " is not a morphosytactic feature")
feature_name = globals()[feature_name]
if not issubclass(feature_name, MorphosyntacticFeature):
raise TypeError(str(feature_name) + " is not a morphosytactic feature")
if feature_name in self.features:
return self.features[feature_name]
else:
raise CLTKException(f"{self} unspecified for {feature_name}")
def __setitem__(
self,
feature_name: Union[str, Type[MorphosyntacticFeature]],
feature_values: Union[MorphosyntacticFeature, List[MorphosyntacticFeature]],
) -> "MorphosyntacticFeatureBundle":
"""
Use dict-type syntax to set the value of features.
>>> f1 = f(F.pos)
>>> f1[N] = N.neg
>>> f1
{F: [pos], N: [neg]}
>>> f1['V'] = V.pos
>>> f1
{F: [pos], N: [neg], V: [pos]}
"""
if type(feature_name) == str:
if feature_name not in globals():
raise TypeError(feature_name + " is not a morphosytactic feature")
feature_name = globals()[feature_name]
if not issubclass(feature_name, MorphosyntacticFeature):
raise TypeError(str(feature_name) + " is not a morphosyntactic feature")
if type(feature_values) is not list:
feature_values = [feature_values]
for value in feature_values:
if value is not None and type(value) != feature_name:
raise TypeError(str(value) + " is not a " + str(feature_name))
self.features[feature_name] = feature_values
return self
def all(
self,
) -> List[Tuple[Type[MorphosyntacticFeature], List[MorphosyntacticFeature]]]:
return self.features.items()
def underspecify(self, feature_name: Type[MorphosyntacticFeature]) -> None:
"""
Underspecify the given feature in the bundle.
>>> f1 = f(F.pos, N.pos, V.neg)
>>> f1.underspecify(F)
>>> f1[F] is Underspecified
True
"""
if not issubclass(feature_name, MorphosyntacticFeature):
raise TypeError(str(feature_name) + " is not a morphosytactic feature")
self.features[feature_name] = Underspecified
def matches(self, other: "MorphosyntacticFeatureBundle") -> bool:
"""
This feature bundle matches other if other contains all the features of this bundle,
i.e. if this bundle is an improper subset of other.
Underspecified features will match.
>>> f1 = f(F, N.pos, V.neg)
>>> f2 = f(F.neg, N.pos, V.neg)
>>> f3 = f(F.pos, N.neg, V.pos)
>>> f1.matches(f2)
True
>>> f1.matches(f3)
False
"""
if other is None:
return False
for f in self.features.keys():
if f not in other.features:
return False
if (
self[f] is not Underspecified
and other[f] is not Underspecified
and not (self[f] == other[f])
):
return False
return True
def __str__(self) -> str:
return str(self.features)
def __iter__(self):
return iter(self.features)
__repr__ = __str__
def keys(self):
return self.features.keys()
def values(self):
return self.features.values()
def items(self):
return self.features.items()
def __len__(self):
return len(self.features)
def __contains__(self, item: MorphosyntacticFeature):
if not isinstance(item, MorphosyntacticFeature):
# raise TypeError(str(item) + " is not a MorphosyntacticFeature")
return False
else:
for i in self.features:
if item in self.features[i]:
return True
return False
f = MorphosyntacticFeatureBundle
def to_categorial(pos: int) -> "MorphosyntacticFeatureBundle":
"""Maps UD parts of speech to binary categorial feature bundles.
In some cases these are underspecified, including empty bundles for interjections.
>>> to_categorial(POS.adjective)
{F: [neg], N: [pos], V: [pos]}
>>> to_categorial(POS.particle)
{F: [pos]}
>>> to_categorial(POS.interjection)
{}
"""
if pos == POS.adjective or pos == POS.adverb:
return f(F.neg, N.pos, V.pos)
elif pos == POS.adposition:
return f(F.pos, N.neg, V.neg)
elif pos == POS.auxiliary:
return f(F.pos, N.neg, V.pos)
elif (
pos == POS.coordinating_conjunction
or pos == POS.subordinating_conjunction
or pos == POS.particle
):
return f(F.pos)
elif pos == POS.determiner or pos == POS.pronoun or pos == POS.numeral:
return f(F.pos, N.pos, V.neg)
elif pos == POS.noun or pos == POS.proper_noun:
return f(F.neg, N.pos, V.neg)
elif pos == POS.verb:
return f(F.neg, N.neg, V.pos)
else:
return f()
FORM_UD_MAP: Dict[str, Dict[str, MorphosyntacticFeature]] = {
# parts of speech
"POS": {
"ADJ": POS.adjective,
"ADP": POS.adposition,
"ADV": POS.adverb,
"AUX": POS.auxiliary,
"CCONJ": POS.coordinating_conjunction,
"DET": POS.determiner,
"INTJ": POS.interjection,
"NOUN": POS.noun,
"NUM": POS.numeral,
"PART": POS.particle,
"PRON": POS.pronoun,
"PROPN": POS.proper_noun,
"PUNCT": POS.punctuation,
"SCONJ": POS.subordinating_conjunction,
"SYM": POS.symbol,
"VERB": POS.verb,
"X": POS.other,
},
# verbal features
"VerbForm": {
"Conv": VerbForm.converb,
"Fin": VerbForm.finite,
"Gdv": VerbForm.gerundive,
"Ger": VerbForm.gerund,
"Inf": VerbForm.infinitive,
"Part": VerbForm.participle,
"Sup": VerbForm.supine,
"Vnoun": VerbForm.masdar,
},
"Mood": {
"Adm": Mood.admirative,
"Cnd": Mood.conditional,
"Des": Mood.desiderative,
"Imp": Mood.imperative,
"Ind": Mood.indicative,
"Jus": Mood.jussive,
"Nec": Mood.necessitative,
"Opt": Mood.optative,
"Pot": Mood.potential,
"Prp": Mood.purposive,
"Qot": Mood.quotative,
"Sub": Mood.subjunctive,
},
"Tense": {
"Fut": Tense.future,
"Imp": Tense.imperfect,
"Past": Tense.past,
"Pqp": Tense.pluperfect,
"Pres": Tense.present,
},
"Aspect": {
"Hab": Aspect.habitual,
"Imp": Aspect.imperfective,
"Iter": Aspect.iterative,
"Perf": Aspect.perfective,
"Prog": Aspect.progressive,
"Prosp": Aspect.prospective,
},
"Voice": {
"Act": Voice.active,
"Antip": Voice.antipassive,
"Bfoc": Voice.beneficiary_focus,
"Lfoc": Voice.location_focus,
"Caus": Voice.causative,
"Dir": Voice.direct,
"Inv": Voice.inverse,
"Mid": Voice.middle,
"Pass": Voice.passive,
"Rcp": Voice.reciprocal,
},
"Evident": {"Fh": Evidentiality.first_hand, "Nfh": Evidentiality.non_first_hand},
"Polarity": {"Pos": Polarity.pos, "Neg": Polarity.neg},
"Person": {
"0": Person.zeroth,
"1": Person.first,
"2": Person.second,
"3": Person.third,
"4": Person.fourth,
"Psor": Person.psor,
"Subj": Person.subj,
},
"Polite": {
"Elev": Politeness.elevated,
"Form": Politeness.formal,
"Humb": Politeness.humble,
"Infm": Politeness.informal,
},
"Clusivity": {"Ex": Clusivity.exclusive, "In": Clusivity.inclusive},
# nominal
"Gender": {
"Com": Gender.common,
"Fem": Gender.feminine,
"Masc": Gender.masculine,
"Neut": Gender.neuter,
"Psor": Gender.psor,
},
"Animacy": {
"Anim": Animacy.animate,
"Hum": Animacy.human,
"Inan": Animacy.inanimate,
"Nhum": Animacy.non_human,
},
"Number": {
"Coll": Number.collective,
"Count": Number.count_plural,
"Dual": Number.dual,
"Grpa": Number.greater_paucal,
"Grpl": Number.greater_plural,
"Inv": Number.inverse_number,
"Pauc": Number.paucal,
"Plur": Number.plural,
"Ptan": Number.plurale_tantum,
"Sing": Number.singular,
"Tri": Number.trial,
"Psor": Number.psor,
},
"NumForm": {
"Word": NumForm.word,
"Digit": NumForm.digit,
"Roman": NumForm.roman,
"Reference": NumForm.reference,
},
"Case": {
# structural cases
"Nom": Case.nominative,
"Acc": Case.accusative,
"Erg": Case.ergative,
"Abs": Case.absolutive,
# oblique cases
"Abe": Case.abessive,
"Ben": Case.befefactive,
"Caus": Case.causative,
"Cmp": Case.comparative,
"Cns": Case.considerative,
"Com": Case.comitative,
"Dat": Case.dative,
"Dis": Case.distributive,
"Equ": Case.equative,
"Gen": Case.genitive,
"Ins": Case.instrumental,
"Par": Case.partitive,
"Voc": Case.vocative,
# spatiotemporal cases
"Abl": Case.ablative,
"Add": Case.additive,
"Ade": Case.adessive,
"All": Case.allative,
"Del": Case.delative,
"Ela": Case.elative,
"Ess": Case.essive,
"Ill": Case.illative,
"Ine": Case.inessive,
"Lat": Case.lative,
"Loc": Case.locative,
"Per": Case.perlative,
"Sub": Case.sublative,
"Sup": Case.superessive,
"Ter": Case.terminative,
"Tem": Case.temporal,
"Tra": Case.translative,
},
"Definite": {
"Com": Definiteness.complex,
"Cons": Definiteness.construct_state,
"Def": Definiteness.definite,
"Ind": Definiteness.indefinite,
"Spec": Definiteness.specific_indefinite,
},
"Degree": {
"Abs": Degree.absolute_superlative,
"Cmp": Degree.comparative,
"Equ": Degree.equative,
"Pos": Degree.positive,
"Sup": Degree.superlative,
},
# other lexical
"PronType": {
"Art": PrononimalType.article,
"Dem": PrononimalType.demonstrative,
"Emp": PrononimalType.emphatic,
"Exc": PrononimalType.exclamative,
"Ind": PrononimalType.indefinite,
"Int": PrononimalType.interrogative,
"Neg": PrononimalType.negative,
"Prs": PrononimalType.personal,
"Rcp": PrononimalType.reciprocal,
"Rel": PrononimalType.relative,
"Tot": PrononimalType.total,
},
"AdpType": {
"Prep": AdpositionalType.preposition,
"Post": AdpositionalType.postposition,
"Circ": AdpositionalType.circumposition,
"Voc": AdpositionalType.vocalized_adposition,
},
"AdvType": {
"Man": AdverbialType.manner,
"Loc": AdverbialType.location,
"Tim": AdverbialType.time,
"Deg": AdverbialType.degree,
"Cau": AdverbialType.cause,
"Mod": AdverbialType.modality,
},
"VerbType": {
"Aux": VerbType.auxiliary,
"Cop": VerbType.copula,
"Mod": VerbType.modal,
"Light": VerbType.light,
},
"NumType": {
"Card": Numeral.cardinal,
"Dist": Numeral.distributive,
"Frac": Numeral.fractional,
"Mult": Numeral.multiplicative,
"Ord": Numeral.ordinal,
"Range": Numeral.range,
"Sets": Numeral.sets,
},
"NameType": {
"Geo": NameType.place,
"Prs": NameType.person,
"Giv": NameType.person_given_name,
"Sur": NameType.person_surname,
"Nat": NameType.nationality,
"Com": NameType.company,
"Pro": NameType.product,
"Oth": NameType.other,
},
"Strength": {"Strong": Strength.strong, "Weak": Strength.weak},
"Poss": {"Yes": Possessive.pos},
"Reflex": {"Yes": Reflexive.pos},
"Foreign": {"Yes": Foreign.pos},
"Abbr": {"Yes": Abbreviation.pos},
"Typo": {"Yes": Typo.pos},
}
def from_ud(feature_name: str, feature_value: str) -> Optional[MorphosyntacticFeature]:
"""For a given Universal Dependencies feature name and value,
return the appropriate feature class/value.
>>> from_ud('Case', 'Abl')
ablative
>>> from_ud('Abbr', 'Yes')
pos
>>> from_ud('PronType', 'Ind')
indefinite
"""
# Do cleanup on certain inputs that look like ``"Number[psor]``
# Thus this is rewritten to ``feature_name = Number``
# and ``feature_value = psor``.
if "[" in feature_name and "]" in feature_name:
feature_name_split: List[str] = feature_name.split("[", maxsplit=1)
feature_name = feature_name_split[0]
feature_value = feature_name_split[1][:-1]
feature_value = feature_value.title()
if feature_name in FORM_UD_MAP:
feature_map = FORM_UD_MAP[feature_name]
else:
msg1: str = f"Unrecognized UD `feature_name` ('{feature_name}') with `feature_value` ('{feature_value}')."
msg2: str = f"Please raise an issue at <https://github.com/cltk/cltk/issues> and include a small sample to reproduce the error."
print(msg1)
print(msg2)
# raise CLTKException(msg)
return None
values = feature_value.split(",")
for value in values:
if value in feature_map:
return feature_map[value]
else:
raise CLTKException(
f"{value}: Unrecognized value for UD feature {feature_name}"
)
|
{
"content_hash": "38038ce608f01f213732f13e8cde6620",
"timestamp": "",
"source": "github",
"line_count": 480,
"max_line_length": 136,
"avg_line_length": 33.15,
"alnum_prop": 0.5437405731523378,
"repo_name": "cltk/cltk",
"id": "16fed53cdacf1130c956cf0dfbf1a02b7a6af909",
"size": "15912",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/cltk/morphology/morphosyntax.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "120521"
},
{
"name": "Makefile",
"bytes": "2633"
},
{
"name": "Python",
"bytes": "3336083"
}
],
"symlink_target": ""
}
|
from datetime import datetime
from itertools import imap
from bson import ObjectId
from django.core.urlresolvers import reverse
from django.utils.http import urlencode
from django.views.generic import TemplateView, FormView, ListView, RedirectView, View
from django.conf import settings
from django import http
from tastypie.http import HttpNoContent
from blog.models import Post
from profiles.mixins import LoginRequiredMixin
from newsfeed.models import Entry
from newsfeed.constants import *
from documents.constants import *
from documents.forms import DocumentForm, ForkDocumentForm, SearchForm
from documents.mixins import DocumentMixin
from documents.models import Document
from documents.resources import DocumentResource
from documents.utils import extract_keywords
from documents.signals import (document_done, fork_done, star_done,
document_delete, fork_delete)
from documents.exporters.sql import (MysqlExporter, PostgresExporter,
SQLiteExporter, OracleExporter)
DOCUMENT_EXPORTERS = {
EXPORTER_MYSQL: MysqlExporter,
EXPORTER_POSTGRES: PostgresExporter,
EXPORTER_SQLITE: SQLiteExporter,
EXPORTER_ORACLE: OracleExporter,
}
class HomeView(TemplateView):
template_name = "index.html"
def get_context_data(self, **kwargs):
if self.request.user.is_anonymous():
is_public = True
else:
is_public = self.request.GET.get("public") == "true"
try:
page_number = int(self.request.GET.get("page"))
except (ValueError, TypeError):
page_number = 1
newsfeed = self.get_newsfeed(
public=is_public,
offset=NEWSFEED_LIMIT * (page_number - 1))
if NEWSFEED_LIMIT * page_number < newsfeed.count():
next_page_url = self.get_next_page_url(self.request, page_number)
else:
next_page_url = None
return {
"is_public": is_public,
"newsfeed": imap(Entry, newsfeed),
"next_page_url": next_page_url,
"featured_documents": self.get_featured_documents(),
"starred_documents": self.get_starred_documents(),
"latest_posts": self.get_latest_posts(),
"search_form": SearchForm()
}
def get_featured_documents(self):
return Document.objects.featured()
def get_starred_documents(self):
if self.request.user.is_anonymous():
return []
return Document.objects.starred(user_id=self.request.user.id)
def get_newsfeed(self, public=True, offset=0, limit=NEWSFEED_LIMIT):
"""
Fetches news items from the newsfeed database
"""
parameters = {
"news_type": {
"$in": [NEWS_TYPE_REGISTRATION,
NEWS_TYPE_COMMENT,
NEWS_TYPE_DOCUMENT,
NEWS_TYPE_FORK,
NEWS_TYPE_STAR,
NEWS_TYPE_FOLLOWING]
}}
if not public:
parameters["recipients"] = {
"$in": [self.request.user.pk]
}
newsfeed = Entry.objects.collection.find(
parameters).sort([("date_created", -1)])
return newsfeed[offset:offset + limit]
def get_next_page_url(self, request, page_number):
"""
Builds the next page link from GET parameters.
"""
return "%(newsfeed_url)s?%(parameters)s" % {
"newsfeed_url": reverse("home"),
"parameters": urlencode({
"public": request.GET.get("public") or "false",
"page": page_number + 1
})}
def get_latest_posts(self):
return Post.objects.all()[:10]
class DocumentDetailView(DocumentMixin, TemplateView):
template_name = "documents/show.html"
def get_context_data(self, **kwargs):
return {"document": self.get_document(),
"exporters": EXPORTERS}
class ExportDocumentView(DocumentMixin, View):
def get(self, *args, **kwargs):
klass = DOCUMENT_EXPORTERS.get(kwargs.get("exporter"))
if klass is None:
return http.HttpResponseBadRequest()
document = self.get_document()
exporter = klass(document)
return http.HttpResponse(exporter.as_text(), content_type="text/plain")
class DocumentForksView(DocumentDetailView):
template_name = "documents/forks.html"
def get_context_data(self, **kwargs):
context = super(DocumentForksView, self).get_context_data(**kwargs)
context["forks"] = context.get("document").forks()
return context
class DocumentStarsView(DocumentDetailView):
template_name = "documents/stars.html"
class StarDocumentView(LoginRequiredMixin, RedirectView, DocumentMixin):
def post(self, request, *args, **kwargs):
document = self.get_document()
stars = document.get_stars()
if request.user.pk in stars:
stars.remove(request.user.pk)
else:
stars.append(request.user.pk)
star_done.send(sender=self, instance=document,
user=request.user)
Document.objects.collection.update(
{"_id": document.pk},
{"$set": {"stars": stars, "star_count": len(stars)}})
return super(StarDocumentView, self).post(request, *args, **kwargs)
def get_redirect_url(self, **kwargs):
return reverse("show_document", args=[self.kwargs.get("slug")])
class DocumentEditView(LoginRequiredMixin, DocumentDetailView):
template_name = "documents/edit.html"
def get(self, request, *args, **kwargs):
if not self.is_authorized():
return self.redirect()
return super(DocumentEditView, self).get(request, *args, **kwargs)
def delete(self, *args, **kwargs):
if not self.is_authorized():
return self.redirect()
document = self.get_document()
if document.fork_of is not None:
signal = fork_delete
else:
signal = document_delete
signal.send(sender=self, instance=self.get_document())
resource = DocumentResource()
resource.obj_delete(pk=self.kwargs.get("slug"))
return HttpNoContent()
def is_authorized(self):
return self.get_document().is_editable(user_id=self.request.user.id)
def redirect(self):
return http.HttpResponseRedirect(
reverse("show_document", kwargs=self.kwargs))
def get_context_data(self, **kwargs):
context = super(DocumentEditView, self).get_context_data(**kwargs)
context["edit"] = True
context["FIELD_TYPES"] = FIELD_TYPES
context["SOCKETIO_HOST"] = settings.SOCKETIO_HOST
return context
class NewDocumentView(LoginRequiredMixin, FormView):
form_class = DocumentForm
template_name = "documents/new.html"
def form_valid(self, form, **kwargs):
self.object_id = Document.objects.collection.insert({
"title": form.cleaned_data.get("title"),
"user_id": self.request.user.pk,
"date_created": datetime.now(),
"entities": form.cleaned_data.get("entities"),
"is_public": form.cleaned_data.get("is_public"),
"_keywords": extract_keywords(form.cleaned_data.get("title"))
})
document = Document.objects.get(_id=ObjectId(self.object_id))
document_done.send(sender=self, instance=document)
return super(NewDocumentView, self).form_valid(form)
def get_success_url(self):
return reverse("edit_document", args=[self.object_id])
class MyDocumentsView(LoginRequiredMixin, TemplateView):
template_name = "documents/list.html"
def get_context_data(self, **kwargs):
return {
"documents": self.get_documents(),
"shared": self.get_shared_documents()
}
def get_documents(self):
collection = Document.objects.for_user(self.request.user.id)
return map(Document, collection)
def get_shared_documents(self):
collection = Document.objects.assigned(self.request.user.id)
return map(Document, collection)
class SearchDocumentView(ListView):
template_name = "documents/search.html"
context_object_name = "documents"
def get_queryset(self):
form = self.get_form()
if not form.is_valid():
return []
keyword = form.cleaned_data.get("keyword")
collection = Document.objects.collection.find({
"_keywords": {"$all": keyword.split()}})
return map(Document, collection)
def get_context_data(self, **kwargs):
return super(SearchDocumentView, self).get_context_data(
search_form=self.form,
keyword=self.request.GET.get("keyword"),
**kwargs)
def get_form(self):
self.form = SearchForm(self.request.GET)
return self.form
class ForkDocumentView(DocumentMixin, NewDocumentView):
form_class = ForkDocumentForm
template_name = "documents/fork.html"
def get_initial(self):
return {
"title": self.get_document().title
}
def form_valid(self, form, **kwargs):
document = self.get_document()
self.object_id = Document.objects.collection.insert({
"title": form.cleaned_data.get("title"),
"user_id": self.request.user.pk,
"entities": document.entities,
"fork_of": document.pk,
"date_created": datetime.now(),
"is_public": document.is_public,
"_keywords": extract_keywords(form.cleaned_data.get("title"))
})
Document.objects.collection.update(
{'_id': ObjectId(document.pk)},
{"$inc": {'fork_count': 1}})
document = Document.objects.get(_id=ObjectId(self.object_id))
fork_done.send(sender=self, instance=document)
return super(NewDocumentView, self).form_valid(form)
def get_context_data(self, **kwargs):
data = super(ForkDocumentView, self).get_context_data(**kwargs)
data["document_id"] = self.get_document()._id
return data
|
{
"content_hash": "2a4cc3ff6cd4f7fb194402786658c0c8",
"timestamp": "",
"source": "github",
"line_count": 312,
"max_line_length": 85,
"avg_line_length": 32.81730769230769,
"alnum_prop": 0.6183221017677507,
"repo_name": "fatiherikli/dbpatterns",
"id": "6ec0ed5bf7c4cbaa319952c9896a82878a3c16a4",
"size": "10239",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/dbpatterns/documents/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "50678"
},
{
"name": "Elixir",
"bytes": "489"
},
{
"name": "Gherkin",
"bytes": "6360"
},
{
"name": "HTML",
"bytes": "49402"
},
{
"name": "JavaScript",
"bytes": "735986"
},
{
"name": "Python",
"bytes": "106434"
},
{
"name": "Shell",
"bytes": "60"
}
],
"symlink_target": ""
}
|
"""Tests for DNNEstimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.python.ops import math_ops
def _prepare_iris_data_for_logistic_regression():
# Converts iris data to a logistic regression problem.
iris = tf.contrib.learn.datasets.load_iris()
ids = np.where((iris.target == 0) | (iris.target == 1))
iris = tf.contrib.learn.datasets.base.Dataset(data=iris.data[ids],
target=iris.target[ids])
return iris
def _iris_input_logistic_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': tf.constant(iris.data, dtype=tf.float32)
}, tf.constant(iris.target, shape=[100, 1], dtype=tf.int32)
def _iris_input_multiclass_fn():
iris = tf.contrib.learn.datasets.load_iris()
return {
'feature': tf.constant(iris.data, dtype=tf.float32)
}, tf.constant(iris.target, shape=[150, 1], dtype=tf.int32)
class DNNClassifierTest(tf.test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, tf.contrib.learn.DNNClassifier)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
classifier = tf.contrib.learn.DNNClassifier(
feature_columns=cont_features,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(input_fn=_iris_input_logistic_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
self.assertLess(scores['loss'], 0.3)
def testLogisticRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': tf.constant(iris.data, dtype=tf.float32)
}, tf.constant(iris.target, shape=[100], dtype=tf.int32)
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
classifier = tf.contrib.learn.DNNClassifier(
feature_columns=cont_features,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = _prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [tf.contrib.layers.real_valued_column('', dimension=4)]
classifier = tf.contrib.learn.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(x=train_x, y=train_y, steps=100)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self.assertGreater(scores['accuracy'], 0.8)
def testLogisticRegression_TensorData(self):
"""Tests binary classification using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[.8], [0.2], [.1]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([[1], [0], [0]], dtype=tf.int32)
language_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
tf.contrib.layers.embedding_column(language_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
]
classifier = tf.contrib.learn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
self.assertLess(scores['loss'], 0.3)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertListEqual(predictions, [1, 0, 0])
def testLogisticRegression_FloatLabel(self):
"""Tests binary classification with float labels."""
def _input_fn_float_label(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[50], [20], [10]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
labels = tf.constant([[0.8], [0.], [0.2]], dtype=tf.float32)
return features, labels
language_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
tf.contrib.layers.embedding_column(language_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
]
classifier = tf.contrib.learn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_float_label, steps=1000)
predict_input_fn = functools.partial(_input_fn_float_label, num_epochs=1)
predictions_proba = list(
classifier.predict_proba(input_fn=predict_input_fn, as_iterable=True))
# Prediction probabilities mirror the labels column, which proves that the
# classifier learns from float input.
self.assertAllClose(
predictions_proba, [[0.2, 0.8], [1., 0.], [0.8, 0.2]], atol=0.05)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertListEqual(predictions, [1, 0, 0])
def testMultiClass_MatrixData(self):
"""Tests multi-class classification using matrix data as input."""
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
classifier = tf.contrib.learn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_iris_input_multiclass_fn, steps=200)
self.assertTrue('centered_bias_weight' in classifier.get_variable_names())
scores = classifier.evaluate(input_fn=_iris_input_multiclass_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.8)
self.assertLess(scores['loss'], 0.3)
def testMultiClass_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [150] instead of [150, 1]."""
def _input_fn():
iris = tf.contrib.learn.datasets.load_iris()
return {
'feature': tf.constant(iris.data, dtype=tf.float32)
}, tf.constant(iris.target, shape=[150], dtype=tf.int32)
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
classifier = tf.contrib.learn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=200)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.8)
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = tf.contrib.learn.datasets.load_iris()
train_x = iris.data
train_y = iris.target
feature_columns = [tf.contrib.layers.real_valued_column('', dimension=4)]
classifier = tf.contrib.learn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
classifier.fit(x=train_x, y=train_y, steps=200)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self.assertGreater(scores['accuracy'], 0.8)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = tf.constant([[1], [0], [0], [0]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
}
return features, labels
classifier = tf.contrib.learn.DNNClassifier(
n_classes=2,
feature_columns=[tf.contrib.layers.real_valued_column('x')],
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
# Cross entropy = -0.25*log(0.25)-0.75*log(0.75) = 0.562
self.assertAlmostEqual(scores['loss'], 0.562, delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
classifier = tf.contrib.learn.DNNClassifier(
weight_column_name='w',
n_classes=2,
feature_columns=[tf.contrib.layers.real_valued_column('x')],
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted cross entropy = (-7*log(0.25)-3*log(0.75))/10 = 1.06
self.assertAlmostEqual(scores['loss'], 1.06, delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = tf.constant([[1], [0], [0], [0]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = tf.constant([[1], [1], [1], [1]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = tf.contrib.learn.DNNClassifier(
weight_column_name='w',
feature_columns=[tf.contrib.layers.real_valued_column('x')],
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
# The model should learn (y = x) because of the weights, so the accuracy
# should be close to 1.
self.assertGreater(scores['accuracy'], 0.9)
def testPredict_AsIterableFalse(self):
"""Tests predict and predict_prob methods with as_iterable=False."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[.8], [.2], [.1]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([[1], [0], [0]], dtype=tf.int32)
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1)
]
classifier = tf.contrib.learn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
self.assertLess(scores['loss'], 0.3)
predictions = classifier.predict(input_fn=_input_fn, as_iterable=False)
self.assertListEqual(list(predictions), [1, 0, 0])
predictions = classifier.predict_proba(input_fn=_input_fn,
as_iterable=False)
self.assertAllClose(
predictions, [[0., 1., 0.], [1., 0., 0.], [1., 0., 0.]], atol=0.1)
def testPredict_AsIterable(self):
"""Tests predict and predict_prob methods with as_iterable=True."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[.8], [.2], [.1]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([[1], [0], [0]], dtype=tf.int32)
language_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
tf.contrib.layers.embedding_column(language_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
]
classifier = tf.contrib.learn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=200)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
self.assertLess(scores['loss'], 0.3)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertListEqual(predictions, [1, 0, 0])
predictions = list(
classifier.predict_proba(input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(
predictions, [[0., 1., 0.], [1., 0., 0.], [1., 0., 0.]], atol=0.3)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = tf.constant([[1], [0], [0], [0]])
features = {
'x': tf.train.limit_epochs(
tf.ones(shape=[4, 1], dtype=tf.float32), num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = tf.to_float(labels)
predictions = tf.slice(predictions, [0, 1], [-1, 1])
labels = math_ops.cast(labels, predictions.dtype)
return tf.reduce_sum(tf.mul(predictions, labels))
classifier = tf.contrib.learn.DNNClassifier(
feature_columns=[tf.contrib.layers.real_valued_column('x')],
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'my_accuracy': MetricSpec(
metric_fn=tf.contrib.metrics.streaming_accuracy,
prediction_key='classes'),
'my_precision': MetricSpec(
metric_fn=tf.contrib.metrics.streaming_precision,
prediction_key='classes'),
'my_metric': MetricSpec(
metric_fn=_my_metric_op,
prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric'
]).issubset(set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict(input_fn=predict_input_fn)))
self.assertEqual(_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'bad_name': MetricSpec(
metric_fn=tf.contrib.metrics.streaming_auc,
prediction_key='bad_type')})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[.8], [.2], [.1]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([[1], [0], [0]], dtype=tf.int32)
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1)
]
model_dir = tempfile.mkdtemp()
classifier = tf.contrib.learn.DNNClassifier(
model_dir=model_dir,
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=100)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions1 = classifier.predict(input_fn=predict_input_fn)
del classifier
classifier2 = tf.contrib.learn.DNNClassifier(
model_dir=model_dir,
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
predictions2 = classifier2.predict(input_fn=predict_input_fn)
self.assertEqual(list(predictions1), list(predictions2))
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[.8], [.2], [.1]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([[1], [0], [0]], dtype=tf.int32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
feature_columns = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1)
]
classifier = tf.contrib.learn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config=tf.contrib.learn.RunConfig(
num_ps_replicas=2, cluster_spec=tf.train.ClusterSpec({}),
tf_random_seed=5))
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
self.assertLess(scores['loss'], 0.3)
def testExport(self):
"""Tests export model for servo."""
def input_fn():
return {
'age': tf.constant([1]),
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
feature_columns = [
tf.contrib.layers.real_valued_column('age'),
tf.contrib.layers.embedding_column(language, dimension=1)
]
classifier = tf.contrib.learn.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=100)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
classifier = tf.contrib.learn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
enable_centered_bias=False,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_iris_input_multiclass_fn, steps=200)
self.assertFalse('centered_bias_weight' in classifier.get_variable_names())
scores = classifier.evaluate(input_fn=_iris_input_multiclass_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.8)
self.assertLess(scores['loss'], 0.3)
class DNNRegressorTest(tf.test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, tf.contrib.learn.DNNRegressor)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=cont_features,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_iris_input_logistic_fn, steps=200)
scores = regressor.evaluate(input_fn=_iris_input_logistic_fn, steps=1)
self.assertLess(scores['loss'], 0.3)
def testRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': tf.constant(iris.data, dtype=tf.float32)
}, tf.constant(iris.target, shape=[100], dtype=tf.int32)
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=cont_features,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.3)
def testRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = _prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [tf.contrib.layers.real_valued_column('', dimension=4)]
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(x=train_x, y=train_y, steps=200)
scores = regressor.evaluate(x=train_x, y=train_y, steps=1)
self.assertLess(scores['loss'], 0.3)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[.8], [.15], [0.]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([1., 0., 0.2], dtype=tf.float32)
language_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
tf.contrib.layers.embedding_column(language_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.3)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
}
return features, labels
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=[tf.contrib.layers.real_valued_column('x')],
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
# Average square loss = (0.75^2 + 3*0.25^2) / 4 = 0.1875
self.assertAlmostEqual(scores['loss'], 0.1875, delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = tf.contrib.learn.DNNRegressor(
weight_column_name='w',
feature_columns=[tf.contrib.layers.real_valued_column('x')],
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted average square loss = (7*0.75^2 + 3*0.25^2) / 10 = 0.4125
self.assertAlmostEqual(scores['loss'], 0.4125, delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = tf.constant([[1.], [1.], [1.], [1.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = tf.contrib.learn.DNNRegressor(
weight_column_name='w',
feature_columns=[tf.contrib.layers.real_valued_column('x')],
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# The model should learn (y = x) because of the weights, so the loss should
# be close to zero.
self.assertLess(scores['loss'], 0.2)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant(labels, dtype=tf.float32)
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
predictions = regressor.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllClose(labels, predictions, atol=0.2)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant(labels, dtype=tf.float32)
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(
regressor.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(labels, predictions, atol=0.2)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.train.limit_epochs(
tf.ones(shape=[4, 1], dtype=tf.float32), num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
return tf.reduce_sum(tf.mul(predictions, labels))
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=[tf.contrib.layers.real_valued_column('x')],
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error': tf.contrib.metrics.streaming_mean_squared_error,
'my_metric': _my_metric_op
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict(input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests that when the key is a tuple, an error is raised.
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={('my_error', 'predictions'):
tf.contrib.metrics.streaming_mean_squared_error})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([1., 0., 0.2], dtype=tf.float32)
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
]
model_dir = tempfile.mkdtemp()
regressor = tf.contrib.learn.DNNRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
regressor.fit(input_fn=_input_fn, steps=100)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(regressor.predict(input_fn=predict_input_fn))
del regressor
regressor2 = tf.contrib.learn.DNNRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
predictions2 = list(regressor2.predict(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([1., 0., 0.2], dtype=tf.float32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
feature_columns = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config=tf.contrib.learn.RunConfig(
num_ps_replicas=2, cluster_spec=tf.train.ClusterSpec({}),
tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.3)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([1., 0., 0.2], dtype=tf.float32)
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
enable_centered_bias=False,
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.3)
def boston_input_fn():
boston = tf.contrib.learn.datasets.load_boston()
features = tf.cast(tf.reshape(tf.constant(boston.data), [-1, 13]), tf.float32)
labels = tf.cast(tf.reshape(tf.constant(boston.target), [-1, 1]), tf.float32)
return features, labels
class FeatureColumnTest(tf.test.TestCase):
def testTrain(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input_fn(
boston_input_fn)
est = tf.contrib.learn.DNNRegressor(
feature_columns=feature_columns, hidden_units=[3, 3])
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_input_fn, steps=1)
if __name__ == '__main__':
tf.test.main()
|
{
"content_hash": "eda4d80451d2c7d8727e268d13366c2e",
"timestamp": "",
"source": "github",
"line_count": 1004,
"max_line_length": 81,
"avg_line_length": 38.67131474103586,
"alnum_prop": 0.6177561427909133,
"repo_name": "nanditav/15712-TensorFlow",
"id": "1781d85b6415e1bda27400fe7168efb23261e34d",
"size": "39516",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/learn/python/learn/estimators/dnn_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2967"
},
{
"name": "C",
"bytes": "94853"
},
{
"name": "C++",
"bytes": "13822769"
},
{
"name": "CMake",
"bytes": "93933"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "85586"
},
{
"name": "HTML",
"bytes": "525001"
},
{
"name": "Java",
"bytes": "56007"
},
{
"name": "JavaScript",
"bytes": "12235"
},
{
"name": "Jupyter Notebook",
"bytes": "1833475"
},
{
"name": "Makefile",
"bytes": "23468"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "64592"
},
{
"name": "Protocol Buffer",
"bytes": "142429"
},
{
"name": "Python",
"bytes": "13133178"
},
{
"name": "Shell",
"bytes": "270336"
},
{
"name": "TypeScript",
"bytes": "724952"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, print_function)
from __future__ import unicode_literals
# this example reads today's numerical weather forecasts
# from the NOAA OpenDAP servers and makes a multi-panel plot.
# This version demonstrates the use of the AxesGrid toolkit.
import numpy as np
import matplotlib.pyplot as plt
import sys
import numpy.ma as ma
import datetime
from mpl_toolkits.basemap import Basemap, addcyclic
from mpl_toolkits.axes_grid1 import AxesGrid
from netCDF4 import Dataset as NetCDFFile, num2date
# today's date is default.
if len(sys.argv) > 1:
YYYYMMDD = sys.argv[1]
else:
YYYYMMDD = datetime.datetime.today().strftime('%Y%m%d')
# set OpenDAP server URL.
try:
URLbase="http://nomads.ncep.noaa.gov:9090/dods/gfs/gfs"
URL=URLbase+YYYYMMDD+'/gfs_00z'
print(URL)
data = NetCDFFile(URL)
except:
msg = """
opendap server not providing the requested data.
Try another date by providing YYYYMMDD on command line."""
raise IOError(msg)
# read lats,lons,times.
print(data.variables.keys())
latitudes = data.variables['lat']
longitudes = data.variables['lon']
fcsttimes = data.variables['time']
times = fcsttimes[0:6] # first 6 forecast times.
ntimes = len(times)
# convert times for datetime instances.
fdates = num2date(times,units=fcsttimes.units,calendar='standard')
# make a list of YYYYMMDDHH strings.
verifdates = [fdate.strftime('%Y%m%d%H') for fdate in fdates]
# convert times to forecast hours.
fcsthrs = []
for fdate in fdates:
fdiff = fdate-fdates[0]
fcsthrs.append(fdiff.days*24. + fdiff.seconds/3600.)
print(fcsthrs)
print(verifdates)
lats = latitudes[:]
nlats = len(lats)
lons1 = longitudes[:]
nlons = len(lons1)
# unpack 2-meter temp forecast data.
t2mvar = data.variables['tmp2m']
# create figure, set up AxesGrid.
fig=plt.figure(figsize=(6,8))
grid = AxesGrid(fig, [0.05,0.01,0.9,0.9],
nrows_ncols=(3, 2),
axes_pad=0.25,
cbar_mode='single',
cbar_pad=0.3,
cbar_size=0.1,
cbar_location='top',
share_all=True,
)
# create Basemap instance for Orthographic projection.
m = Basemap(lon_0=-90,lat_0=60,projection='ortho')
# add wrap-around point in longitude.
t2m = np.zeros((ntimes,nlats,nlons+1),np.float32)
for nt in range(ntimes):
t2m[nt,:,:], lons = addcyclic(t2mvar[nt,:,:], lons1)
# convert to celsius.
t2m = t2m-273.15
# contour levels
clevs = np.arange(-30,30.1,2.)
lons, lats = np.meshgrid(lons, lats)
x, y = m(lons, lats)
# make subplots.
for nt,fcsthr in enumerate(fcsthrs):
ax = grid[nt]
m.ax = ax
cs = m.contourf(x,y,t2m[nt,:,:],clevs,cmap=plt.cm.jet,extend='both')
m.drawcoastlines(linewidth=0.5)
m.drawcountries()
m.drawparallels(np.arange(-80,81,20))
m.drawmeridians(np.arange(0,360,20))
# panel title
ax.set_title('%d-h forecast valid '%fcsthr+verifdates[nt],fontsize=9)
# figure title
plt.figtext(0.5,0.95,
"2-m temp (\N{DEGREE SIGN}C) forecasts from %s"%verifdates[0],
horizontalalignment='center',fontsize=14)
# a single colorbar.
cbar = fig.colorbar(cs, cax=grid.cbar_axes[0], orientation='horizontal')
plt.show()
|
{
"content_hash": "db2f519348e97f080360bc8383372816",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 74,
"avg_line_length": 30,
"alnum_prop": 0.6958730158730159,
"repo_name": "guziy/basemap",
"id": "d4d3d44dfc55d31d8fa9c193b30e8bcb6cc61f46",
"size": "3150",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/fcstmaps_axesgrid.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cython",
"bytes": "14661"
},
{
"name": "Python",
"bytes": "440405"
},
{
"name": "Shell",
"bytes": "1161"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class XanchorValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="xanchor", parent_name="scattergl.marker.colorbar", **kwargs
):
super(XanchorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop("values", ["left", "center", "right"]),
**kwargs,
)
|
{
"content_hash": "4ee0d162877eb844bab21444eb607518",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 86,
"avg_line_length": 35.92857142857143,
"alnum_prop": 0.6023856858846919,
"repo_name": "plotly/plotly.py",
"id": "bbad4b4804fc4f8d7e7f88c44df9eda8840a6fec",
"size": "503",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/scattergl/marker/colorbar/_xanchor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
"""\
=======================
Simple Button component
=======================
A simple cuboid shaped button without caption. Implements responsive
button behavoir.
Could be used to subclass differently shaped buttons from. The colours
of the front/back and the side faces can be specified.
Example Usage
-------------
Two simple buttons which send messages to the console::
Graphline(
button1 = SimpleButton(size=(1,1,0.3), position=(-2,0,-10), msg="PINKY"),
button2 = SimpleButton(size=(2,2,1), position=(5,0,-15), msg="BRAIN"),
echo = ConsoleEchoer(),
linkages = {
("button1", "outbox") : ("echo", "inbox"),
("button2", "outbox") : ("echo", "inbox")
}
).run()
How does it work?
-----------------
This component is a subclass of OpenGLComponent (for OpenGLComponent
functionality see its documentation). It overrides __init__(), setup(),
draw() and handleEvents().
It draws a simple cuboid. It is activated on mouse button release over
the object and on key down if a key is assigned. On mouse button down it
is shrunk by a small amount until the button is released.
"""
import Axon
import pygame
from pygame.locals import *
from OpenGL.GL import *
from Vector import Vector
from OpenGLComponent import OpenGLComponent
class SimpleButton(OpenGLComponent):
"""\
SimpleButton(...) -> A new SimpleButton component.
A simple cuboid shaped button without caption. Implements responsive
button behavoir.
Keyword arguments:
- bgcolour -- Background colour (default=(244,244,244))
- sidecolour -- Colour of side planes (default=(200,200,244))
- key -- Activation key, pygame identifier (optional)
- msg -- Message that gets sent to the outbox when the button is activated (default="CLICK")
"""
def __init__(self, **argd):
"""x.__init__(...) initializes x; see x.__class__.__doc__ for signature"""
super(SimpleButton, self).__init__(**argd)
self.backgroundColour = argd.get("bgcolour", (244,244,244))
self.sideColour = argd.get("sidecolour", (200,200,244))
self.key = argd.get("key", None)
self.eventMsg = argd.get("msg", "CLICK")
self.size = Vector(*argd.get("size", (1,1,1)))
self.grabbed = 0
def setup(self):
self.addListenEvents( [pygame.MOUSEBUTTONDOWN, pygame.MOUSEBUTTONUP, pygame.KEYDOWN ])
def draw(self):
hs = self.size/2.0
# draw faces
glBegin(GL_QUADS)
glColor4f(self.sideColour[0]/256.0, self.sideColour[1]/256.0, self.sideColour[2]/256.0, 0.5)
# right face
glVertex3f(hs.x,hs.y,hs.z)
glVertex3f(hs.x,-hs.y,hs.z)
glVertex3f(hs.x,-hs.y,-hs.z)
glVertex3f(hs.x,hs.y,-hs.z)
# left face
glVertex3f(-hs.x,hs.y,hs.z)
glVertex3f(-hs.x,-hs.y,hs.z)
glVertex3f(-hs.x,-hs.y,-hs.z)
glVertex3f(-hs.x,hs.y,-hs.z)
# top face
glVertex3f(hs.x,hs.y,hs.z)
glVertex3f(-hs.x,hs.y,hs.z)
glVertex3f(-hs.x,hs.y,-hs.z)
glVertex3f(hs.x,hs.y,-hs.z)
# bottom face
glVertex3f(hs.x,-hs.y,hs.z)
glVertex3f(-hs.x,-hs.y,hs.z)
glVertex3f(-hs.x,-hs.y,-hs.z)
glVertex3f(hs.x,-hs.y,-hs.z)
glColor4f(self.backgroundColour[0]/256.0, self.backgroundColour[1]/256.0, self.backgroundColour[2]/256.0, 0.5)
# back face
glVertex3f(hs.x,hs.y,-hs.z)
glVertex3f(-hs.x,hs.y,-hs.z)
glVertex3f(-hs.x,-hs.y,-hs.z)
glVertex3f(hs.x,-hs.y,-hs.z)
# front face
glVertex3f(-hs.x,-hs.y,hs.z)
glVertex3f(hs.x,-hs.y,hs.z)
glVertex3f(hs.x,hs.y,hs.z)
glVertex3f(-hs.x,hs.y,hs.z)
glEnd()
def handleEvents(self):
while self.dataReady("events"):
activate = False
event = self.recv("events")
if event.type == pygame.KEYDOWN:
if event.key == self.key:
activate = True
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1 and self.identifier in event.hitobjects:
self.grabbed = event.button
self.scaling = Vector(0.9,0.9,0.9)
if event.type == pygame.MOUSEBUTTONUP:
if event.button == 1:
self.grabbed = 0
self.scaling = Vector(1,1,1)
#activate
if self.identifier in event.hitobjects:
activate = True
if activate:
self.send( self.eventMsg, "outbox" )
__kamaelia_components__ = (SimpleButton,)
if __name__=='__main__':
from Kamaelia.Util.Console import ConsoleEchoer
from Kamaelia.Chassis.Graphline import Graphline
Graphline(
button1 = SimpleButton(size=(1,1,0.3), position=(-2,0,-10), msg="PINKY"),
button2 = SimpleButton(size=(2,2,1), position=(5,0,-15), msg="BRAIN"),
echo = ConsoleEchoer(),
linkages = {
("button1", "outbox") : ("echo", "inbox"),
("button2", "outbox") : ("echo", "inbox")
}
).run()
# Licensed to the BBC under a Contributor Agreement: THF
|
{
"content_hash": "1679c2565b145eae86ea9de46fb477a9",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 118,
"avg_line_length": 32.93167701863354,
"alnum_prop": 0.569407770652584,
"repo_name": "sparkslabs/kamaelia_",
"id": "3e1db4f7ccca6394253e32dca9c296ba913700fb",
"size": "6208",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "Sketches/MPS/BugReports/FixTests/Kamaelia/Kamaelia/UI/OpenGL/SimpleButton.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3814"
},
{
"name": "C",
"bytes": "212854"
},
{
"name": "C++",
"bytes": "327546"
},
{
"name": "CSS",
"bytes": "114434"
},
{
"name": "ChucK",
"bytes": "422"
},
{
"name": "HTML",
"bytes": "1288960"
},
{
"name": "Java",
"bytes": "31832"
},
{
"name": "JavaScript",
"bytes": "829491"
},
{
"name": "Makefile",
"bytes": "5768"
},
{
"name": "NSIS",
"bytes": "18867"
},
{
"name": "PHP",
"bytes": "49059"
},
{
"name": "Perl",
"bytes": "504"
},
{
"name": "Processing",
"bytes": "2885"
},
{
"name": "Pure Data",
"bytes": "7485482"
},
{
"name": "Python",
"bytes": "18896248"
},
{
"name": "Ruby",
"bytes": "4165"
},
{
"name": "Shell",
"bytes": "707430"
}
],
"symlink_target": ""
}
|
"""Test controller."""
from deluca.lung.core import BreathWaveform
from deluca.lung.utils.data.analyzer import Analyzer
from deluca.lung.utils.scripts.run_controller import run_controller_scan
import jax
import jax.numpy as jnp
@jax.jit
def test_controller(controller, sim, pips, peep):
"""Test controller."""
# new_controller = controller.replace(use_leaky_clamp=False)
score = 0.0
horizon = 29
for pip in pips:
waveform = BreathWaveform.create(peep=peep, pip=pip)
result = run_controller_scan(
controller,
T=horizon,
abort=horizon,
env=sim,
waveform=waveform,
init_controller=True,
)
analyzer = Analyzer(result)
preds = analyzer.pressure # shape = (29,)
truth = analyzer.target # shape = (29,)
# print('preds.shape: %s', str(preds.shape))
# print('truth.shape: %s', str(truth.shape))
score += jnp.abs(preds - truth).mean()
score = score / len(pips)
return score
|
{
"content_hash": "e7b35dffb4737a2093d000030a4f1222",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 72,
"avg_line_length": 30.1875,
"alnum_prop": 0.6656314699792961,
"repo_name": "google/deluca",
"id": "972fcd9036ee8467f640692da993cfb5728af8bf",
"size": "1550",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "deluca/lung/utils/scripts/test_controller.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "297090"
}
],
"symlink_target": ""
}
|
import hashlib
import logging
import uuid
from tower import ugettext as _
import paypal
log = logging.getLogger('z.paypal')
class Check(object):
"""
Run a series of tests on PayPal for either an addon or a paypal_id.
The add-on is not required, but we'll do another check or two if the
add-on is there.
"""
def __init__(self, addon=None, paypal_id=None):
# If this state flips to False, it means they need to
# go to Paypal and re-set up permissions. We'll assume the best.
self.state = {'permissions': True}
self.tests = ['id', 'refund']
for test in self.tests:
# Three states for pass:
# None: haven't tried
# False: tried but failed
# True: tried and passed
self.state[test] = {'pass': None, 'errors': []}
self.addon = addon
self.paypal_id = paypal_id
if not self.paypal_id and self.addon:
self.paypal_id = self.addon.paypal_id
def all(self):
self.check_id()
self.check_refund()
def failure(self, test, msg):
self.state[test]['errors'].append(msg)
self.state[test]['pass'] = False
def pass_(self, test):
self.state[test]['pass'] = True
def check_id(self):
"""Check that the paypal id is good."""
test_id = 'id'
if not self.paypal_id:
self.failure(test_id, _('No PayPal ID provided.'))
return
valid, msg = paypal.check_paypal_id(self.paypal_id)
if not valid:
self.failure(test_id, _('Please enter a valid email.'))
else:
self.pass_(test_id)
def check_refund(self):
"""Check that we have the refund permission."""
test_id = 'refund'
msg = _('You have not setup permissions for us to check this '
'PayPal account.')
if not self.addon:
# If there's no addon there's not even any point checking.
return
premium = self.addon.premium
if not premium:
self.state['permissions'] = False
self.failure(test_id, msg)
return
def test_paykey(self, data):
"""
Wraps get_paykey filling none optional data with test data. This
should never ever be used for real purchases.
The only things that you can set on this are:
email: who the money is going to (required)
amount: the amount of money (required)
currency: valid paypal currency, defaults to USD (optional)
"""
data.update({
'pattern': '',
'ip': '127.0.0.1',
'slug': 'foo',
'uuid': hashlib.md5(str(uuid.uuid4())).hexdigest()
})
return paypal.get_paykey(data)
@property
def passed(self):
"""Returns a boolean to check that all the attempted tests passed."""
values = [self.state[k] for k in self.tests]
passes = [s['pass'] for s in values if s['pass'] is not None]
if passes:
return all(passes)
return False
@property
def errors(self):
errs = []
for k in self.tests:
if self.state[k]['pass'] is False:
for err in self.state[k]['errors']:
errs.append(err)
return errs
|
{
"content_hash": "f727ebd45fda0aaec0ab6c38156d3a8c",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 77,
"avg_line_length": 30.463636363636365,
"alnum_prop": 0.5556550283497463,
"repo_name": "robhudson/zamboni",
"id": "6963cbb93a95cf6564f1c95ff63175e55a3dd44b",
"size": "3351",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/paypal/check.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4145"
},
{
"name": "CSS",
"bytes": "900136"
},
{
"name": "JavaScript",
"bytes": "1700376"
},
{
"name": "Puppet",
"bytes": "13808"
},
{
"name": "Python",
"bytes": "6317591"
},
{
"name": "Shell",
"bytes": "20633"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('service', '0007_auto_20161126_2013'),
]
operations = [
migrations.AlterField(
model_name='post',
name='contentType',
field=models.CharField(choices=[(b'text/plain', b'text/plain'), (b'text/markdown', b'text/markdown')], default=b'text/plain', max_length=100),
),
]
|
{
"content_hash": "6ca48d67008ca5994ef8298ea4b8e553",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 154,
"avg_line_length": 26.944444444444443,
"alnum_prop": 0.6123711340206186,
"repo_name": "CMPUT404Team/CMPUT404-project-socialdistribution",
"id": "a813763e187fa05e691d186bb7be23292c0f1730",
"size": "558",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cmput404project/service/migrations/0008_auto_20161128_0426.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "155326"
},
{
"name": "HTML",
"bytes": "24834"
},
{
"name": "JavaScript",
"bytes": "211325"
},
{
"name": "Python",
"bytes": "141889"
}
],
"symlink_target": ""
}
|
"""
This file registers pre-defined datasets at hard-coded paths, and their metadata.
We hard-code metadata for common datasets. This will enable:
1. Consistency check when loading the datasets
2. Use models on these standard datasets directly and run demos,
without having to download the dataset annotations
We hard-code some paths to the dataset that's assumed to
exist in "./datasets/".
Users SHOULD NOT use this file to create new dataset / metadata for new dataset.
To add new dataset, refer to the tutorial "docs/DATASETS.md".
"""
import os
from detectron2.data import DatasetCatalog, MetadataCatalog
from .builtin_meta import ADE20K_SEM_SEG_CATEGORIES, _get_builtin_metadata
from .cityscapes import load_cityscapes_instances, load_cityscapes_semantic
from .cityscapes_panoptic import register_all_cityscapes_panoptic
from .coco import load_sem_seg, register_coco_instances
from .coco_panoptic import register_coco_panoptic, register_coco_panoptic_separated
from .lvis import get_lvis_instances_meta, register_lvis_instances
from .pascal_voc import register_pascal_voc
# ==== Predefined datasets and splits for COCO ==========
_PREDEFINED_SPLITS_COCO = {}
_PREDEFINED_SPLITS_COCO["coco"] = {
"coco_2014_train": ("coco/train2014", "coco/annotations/instances_train2014.json"),
"coco_2014_val": ("coco/val2014", "coco/annotations/instances_val2014.json"),
"coco_2014_minival": ("coco/val2014", "coco/annotations/instances_minival2014.json"),
"coco_2014_valminusminival": (
"coco/val2014",
"coco/annotations/instances_valminusminival2014.json",
),
"coco_2017_train": ("coco/train2017", "coco/annotations/instances_train2017.json"),
"coco_2017_val": ("coco/val2017", "coco/annotations/instances_val2017.json"),
"coco_2017_test": ("coco/test2017", "coco/annotations/image_info_test2017.json"),
"coco_2017_test-dev": ("coco/test2017", "coco/annotations/image_info_test-dev2017.json"),
"coco_2017_val_100": ("coco/val2017", "coco/annotations/instances_val2017_100.json"),
}
_PREDEFINED_SPLITS_COCO["coco_person"] = {
"keypoints_coco_2014_train": (
"coco/train2014",
"coco/annotations/person_keypoints_train2014.json",
),
"keypoints_coco_2014_val": ("coco/val2014", "coco/annotations/person_keypoints_val2014.json"),
"keypoints_coco_2014_minival": (
"coco/val2014",
"coco/annotations/person_keypoints_minival2014.json",
),
"keypoints_coco_2014_valminusminival": (
"coco/val2014",
"coco/annotations/person_keypoints_valminusminival2014.json",
),
"keypoints_coco_2017_train": (
"coco/train2017",
"coco/annotations/person_keypoints_train2017.json",
),
"keypoints_coco_2017_val": ("coco/val2017", "coco/annotations/person_keypoints_val2017.json"),
"keypoints_coco_2017_val_100": (
"coco/val2017",
"coco/annotations/person_keypoints_val2017_100.json",
),
}
_PREDEFINED_SPLITS_COCO_PANOPTIC = {
"coco_2017_train_panoptic": (
# This is the original panoptic annotation directory
"coco/panoptic_train2017",
"coco/annotations/panoptic_train2017.json",
# This directory contains semantic annotations that are
# converted from panoptic annotations.
# It is used by PanopticFPN.
# You can use the script at detectron2/datasets/prepare_panoptic_fpn.py
# to create these directories.
"coco/panoptic_stuff_train2017",
),
"coco_2017_val_panoptic": (
"coco/panoptic_val2017",
"coco/annotations/panoptic_val2017.json",
"coco/panoptic_stuff_val2017",
),
"coco_2017_val_100_panoptic": (
"coco/panoptic_val2017_100",
"coco/annotations/panoptic_val2017_100.json",
"coco/panoptic_stuff_val2017_100",
),
}
def register_all_coco(root):
for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_COCO.items():
for key, (image_root, json_file) in splits_per_dataset.items():
# Assume pre-defined datasets live in `./datasets`.
register_coco_instances(
key,
_get_builtin_metadata(dataset_name),
os.path.join(root, json_file) if "://" not in json_file else json_file,
os.path.join(root, image_root),
)
for (
prefix,
(panoptic_root, panoptic_json, semantic_root),
) in _PREDEFINED_SPLITS_COCO_PANOPTIC.items():
prefix_instances = prefix[: -len("_panoptic")]
instances_meta = MetadataCatalog.get(prefix_instances)
image_root, instances_json = instances_meta.image_root, instances_meta.json_file
# The "separated" version of COCO panoptic segmentation dataset,
# e.g. used by Panoptic FPN
register_coco_panoptic_separated(
prefix,
_get_builtin_metadata("coco_panoptic_separated"),
image_root,
os.path.join(root, panoptic_root),
os.path.join(root, panoptic_json),
os.path.join(root, semantic_root),
instances_json,
)
# The "standard" version of COCO panoptic segmentation dataset,
# e.g. used by Panoptic-DeepLab
register_coco_panoptic(
prefix,
_get_builtin_metadata("coco_panoptic_standard"),
image_root,
os.path.join(root, panoptic_root),
os.path.join(root, panoptic_json),
instances_json,
)
# ==== Predefined datasets and splits for LVIS ==========
_PREDEFINED_SPLITS_LVIS = {
"lvis_v1": {
"lvis_v1_train": ("coco/", "lvis/lvis_v1_train.json"),
"lvis_v1_val": ("coco/", "lvis/lvis_v1_val.json"),
"lvis_v1_test_dev": ("coco/", "lvis/lvis_v1_image_info_test_dev.json"),
"lvis_v1_test_challenge": ("coco/", "lvis/lvis_v1_image_info_test_challenge.json"),
},
"lvis_v0.5": {
"lvis_v0.5_train": ("coco/", "lvis/lvis_v0.5_train.json"),
"lvis_v0.5_val": ("coco/", "lvis/lvis_v0.5_val.json"),
"lvis_v0.5_val_rand_100": ("coco/", "lvis/lvis_v0.5_val_rand_100.json"),
"lvis_v0.5_test": ("coco/", "lvis/lvis_v0.5_image_info_test.json"),
},
"lvis_v0.5_cocofied": {
"lvis_v0.5_train_cocofied": ("coco/", "lvis/lvis_v0.5_train_cocofied.json"),
"lvis_v0.5_val_cocofied": ("coco/", "lvis/lvis_v0.5_val_cocofied.json"),
},
}
def register_all_lvis(root):
for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_LVIS.items():
for key, (image_root, json_file) in splits_per_dataset.items():
register_lvis_instances(
key,
get_lvis_instances_meta(dataset_name),
os.path.join(root, json_file) if "://" not in json_file else json_file,
os.path.join(root, image_root),
)
# ==== Predefined splits for raw cityscapes images ===========
_RAW_CITYSCAPES_SPLITS = {
"cityscapes_fine_{task}_train": ("cityscapes/leftImg8bit/train/", "cityscapes/gtFine/train/"),
"cityscapes_fine_{task}_val": ("cityscapes/leftImg8bit/val/", "cityscapes/gtFine/val/"),
"cityscapes_fine_{task}_test": ("cityscapes/leftImg8bit/test/", "cityscapes/gtFine/test/"),
}
def register_all_cityscapes(root):
for key, (image_dir, gt_dir) in _RAW_CITYSCAPES_SPLITS.items():
meta = _get_builtin_metadata("cityscapes")
image_dir = os.path.join(root, image_dir)
gt_dir = os.path.join(root, gt_dir)
inst_key = key.format(task="instance_seg")
DatasetCatalog.register(
inst_key,
lambda x=image_dir, y=gt_dir: load_cityscapes_instances(
x, y, from_json=True, to_polygons=True
),
)
MetadataCatalog.get(inst_key).set(
image_dir=image_dir, gt_dir=gt_dir, evaluator_type="cityscapes_instance", **meta
)
sem_key = key.format(task="sem_seg")
DatasetCatalog.register(
sem_key, lambda x=image_dir, y=gt_dir: load_cityscapes_semantic(x, y)
)
MetadataCatalog.get(sem_key).set(
image_dir=image_dir,
gt_dir=gt_dir,
evaluator_type="cityscapes_sem_seg",
ignore_label=255,
**meta,
)
# ==== Predefined splits for PASCAL VOC ===========
def register_all_pascal_voc(root):
SPLITS = [
("voc_2007_trainval", "VOC2007", "trainval"),
("voc_2007_train", "VOC2007", "train"),
("voc_2007_val", "VOC2007", "val"),
("voc_2007_test", "VOC2007", "test"),
("voc_2012_trainval", "VOC2012", "trainval"),
("voc_2012_train", "VOC2012", "train"),
("voc_2012_val", "VOC2012", "val"),
]
for name, dirname, split in SPLITS:
year = 2007 if "2007" in name else 2012
register_pascal_voc(name, os.path.join(root, dirname), split, year)
MetadataCatalog.get(name).evaluator_type = "pascal_voc"
def register_all_ade20k(root):
root = os.path.join(root, "ADEChallengeData2016")
for name, dirname in [("train", "training"), ("val", "validation")]:
image_dir = os.path.join(root, "images", dirname)
gt_dir = os.path.join(root, "annotations_detectron2", dirname)
name = f"ade20k_sem_seg_{name}"
DatasetCatalog.register(
name, lambda x=image_dir, y=gt_dir: load_sem_seg(y, x, gt_ext="png", image_ext="jpg")
)
MetadataCatalog.get(name).set(
stuff_classes=ADE20K_SEM_SEG_CATEGORIES[:],
image_root=image_dir,
sem_seg_root=gt_dir,
evaluator_type="sem_seg",
ignore_label=255,
)
# True for open source;
# Internally at fb, we register them elsewhere
if __name__.endswith(".builtin"):
# Assume pre-defined datasets live in `./datasets`.
_root = os.path.expanduser(os.getenv("DETECTRON2_DATASETS", "datasets"))
register_all_coco(_root)
register_all_lvis(_root)
register_all_cityscapes(_root)
register_all_cityscapes_panoptic(_root)
register_all_pascal_voc(_root)
register_all_ade20k(_root)
|
{
"content_hash": "f71c3d1e4347d2c81055233c288b4087",
"timestamp": "",
"source": "github",
"line_count": 255,
"max_line_length": 98,
"avg_line_length": 39.59607843137255,
"alnum_prop": 0.6274140833911063,
"repo_name": "facebookresearch/detectron2",
"id": "c3a68aa833f12f0fa324a269c36190f21b8a75bd",
"size": "10174",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "detectron2/data/datasets/builtin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "79417"
},
{
"name": "CMake",
"bytes": "616"
},
{
"name": "Cuda",
"bytes": "112955"
},
{
"name": "Dockerfile",
"bytes": "3209"
},
{
"name": "Python",
"bytes": "3261609"
},
{
"name": "Shell",
"bytes": "14448"
}
],
"symlink_target": ""
}
|
from i3pystatus.file import File
from i3pystatus import Module
from i3pystatus.core.command import run_through_shell
import shutil
class Backlight(File):
"""
Screen backlight info
- (Optional) requires `xbacklight` to change the backlight brightness with the scollwheel.
.. rubric:: Available formatters
* `{brightness}` — current brightness relative to max_brightness
* `{max_brightness}` — maximum brightness value
* `{percentage}` — current brightness in percent
"""
settings = (
("format", "format string, formatters: brightness, max_brightness, percentage"),
("backlight", "backlight, see `/sys/class/backlight/`"),
"color",
)
required = ()
backlight = "acpi_video0"
format = "{brightness}/{max_brightness}"
base_path = "/sys/class/backlight/{backlight}/"
components = {
"brightness": (int, "brightness"),
"max_brightness": (int, "max_brightness"),
}
transforms = {
"percentage": lambda cdict: round((cdict["brightness"] / cdict["max_brightness"]) * 100),
}
on_upscroll = "lighter"
on_downscroll = "darker"
def init(self):
self.base_path = self.base_path.format(backlight=self.backlight)
self.has_xbacklight = shutil.which("xbacklight") is not None
super().init()
def lighter(self):
if self.has_xbacklight:
run_through_shell(["xbacklight", "+5"])
def darker(self):
if self.has_xbacklight:
run_through_shell(["xbacklight", "-5"])
|
{
"content_hash": "9d5666c46ccb192f065b9f631bef46ac",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 97,
"avg_line_length": 29.78846153846154,
"alnum_prop": 0.6294383473208521,
"repo_name": "plumps/i3pystatus",
"id": "dd50b483a28d62a8bbd2adac2b8958f886abb0a8",
"size": "1555",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "i3pystatus/backlight.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "223202"
},
{
"name": "Shell",
"bytes": "794"
}
],
"symlink_target": ""
}
|
try:
from Tkinter import *
except ImportError:
# for Python3
from tkinter import *
import math
import threading
import time
try:
from exceptions import *
except:
pass
class WindowGone(Exception):
def __init__(self, args=[]):
self.args = args
# These are all the inner Glasgow Canvas functions
class RawCanvas:
def __init__(self):
self.mainThread = threading.currentThread()
self._events = []
self.mainLoopRunning = False
self.no_current_keyhandler_call = True # Concurrency control - stops multiple simultaneous calls of the handler
self.no_current_mousehandler_call = True
# These are the main drawing functions - calling straight through to the
# underlying Tkinter Canvas functions
def create_rectangle( self, x1, y1, x2, y2, *kw ):
r = self._canvas.create_rectangle( x1, y1, x2, y2, kw )
self._canvas._root().update()
return r
def create_arc( self, x1, y1, x2, y2, *kw ):
r = self._canvas.create_arc( x1, y1, x2, y2, kw )
self._canvas._root().update()
return r
def create_line( self, x1, y1, x2, y2, *kw ):
r = self._canvas.create_line( x1, y1, x2, y2, kw)
self._canvas._root().update()
return r
def create_oval( self, x1, y1, x2, y2, *kw ):
r = self._canvas.create_oval( x1, y1, x2, y2, kw )
self._canvas._root().update()
return r
def create_text( self, x1, y1, *kw ):
r = self._canvas.create_text( x1, y1, kw )
self._canvas._root().update()
return r
def create_image( self, x1, y1, *kw ):
r = self._canvas.create_image( x1, y1, kw )
self._canvas._root().update()
return r
def move( self, tagOrId, xInc, yInc ):
self._canvas.move( tagOrId, xInc, yInc )
self._canvas._root().update()
def delete( self, tagOrId ):
self._canvas.delete( tagOrId )
self._canvas._root().update()
def add_to_group( self, groupName, tagOrId, moreTagsOrIds ):
for tOrId in (tagOrId,) + moreTagsOrIds:
self._canvas.addtag_withtag( groupName, tOrId )
def import_image( self, filename ):
return PhotoImage( file = filename )
def set_title( self, t ):
self._canvas._root().title( t )
self._canvas._root().update()
# These two both set the mainloop running
# For this one, it's just to set a button handler to kill the window when pressed
def _normal_complete( self, m = "Click mouse to end" ):
global _can
self._canvas.unbind("<Button-1>")
self._canvas.bind("<Button-1>", _can.destroy)
wait( 0.5 )
self._canvas._root().title( m )
self._canvas._root().update()
self.run()
# and for this one, it sets the mainloop running alone, trusting the user has
# set some callbacks already.
def run( self ):
if not self.mainLoopRunning:
self.mainLoopRunning = True
try:
self._canvas._root().mainloop()
except WindowGone:
pass
# These three functions all set flags in the _events list - and then are handled
# by the originating tkinter thread later. Required as if the code to execute
# these functions called by the non-Tkinter thread, then Tkinter hangs.
def set_size( self, x, y ):
self._events = self._events + [ ["ss",x,y] ]
def complete( self, a=None ):
if threading.currentThread() != self.mainThread:
if "c" not in self._events:
if a == None:
self._events = self._events + ["c"]
else:
self._events = self._events + ["c"+a]
else:
if a == None:
self._normal_complete()
else:
self._normal_complete( a )
def quitCanvas( self ):
if "q" not in self._events:
self._events = self._events + [ "q" ]
# Enables a separate thread to be run alongside the Tkinter thread.
# This is the unsafest part of the module, since separate threads shouldn't be allowed
# to call the Tkinter functions - but it seems to work for the Canvas functions
def runGraphicsFn( self,g ):
def gWrap():
try:
g()
except WindowGone: # Enables threads to die quietly if Tkinter closed by user
pass
newThread = threading.Thread( target = gWrap )
newThread.start()
# A range of event handler setting functions next
def set_keydown_handler( self, handler ):
def inner_handler( e ):
if self.no_current_keyhandler_call:
self.no_current_keyhandler_call = False
handler( e.keysym )
self.no_current_keyhandler_call = True
self._canvas._root().bind( "<Any-KeyPress>", inner_handler )
self._canvas._root().update()
def unset_keydown_handler( self ):
self._canvas._root().unbind( "<Any-KeyPress>" )
def set_mousedown_handler( self, handler ):
def inner_handler( e ):
if self.no_current_mousehandler_call:
self.no_current_mousehandler_call = False
handler( e.x, e.y, e.num )
self.no_current_mousehandler_call = True
self._canvas.bind( "<Any-Button>", inner_handler )
self._canvas._root().update()
def unset_mousedown_handler( self ):
self._canvas.unbind( "<Any-Button>" )
def set_mouseup_handler( self, handler ):
def inner_handler( e ):
if self.no_current_mousehandler_call:
self.no_current_mousehandler_call = False
handler( e.x, e.y, e.num )
self.no_current_mousehandler_call = True
self._canvas.bind( "<Any-ButtonRelease>", inner_handler )
self._canvas._root().update()
def unset_mouseup_handler( self ):
self._canvas.unbind( "<Any-ButtonRelease>" )
def set_mousemotion_handler( self, handler ):
def inner_handler( e ):
if self.no_current_mousehandler_call:
self.no_current_mousehandler_call = False
handler( e.x, e.y )
self.no_current_mousehandler_call = True
self._canvas.bind( "<Motion>", inner_handler )
self._canvas._root().update()
def unset_mousemotion_handler( self ):
self._canvas.unbind( "<Motion>" )
_can = None # This is the Glasgow canvas
_hadCan = False # Did we ever open a Canvas, even though it might now be dead?
_blockCalls = False # When True, don't try to execute Canvas ops, because Window has been closed
class Can( RawCanvas ):
def __init__( self ):
global _root, _canvas
self._root = Tk()
self._canvas = Canvas( self._root, background = "white" )
self._canvas.pack(expand=1, fill="both" )
RawCanvas.__init__( self )
self._root.iconify()
self._root.update()
self._root.deiconify()
self._root.update()
def destroy( event=None, extra=None ):
global _blockCalls, _root
_blockCalls = True
time.sleep( 0.5 )
self._root.destroy()
self.destroy = destroy
self._root.protocol("WM_DELETE_WINDOW",self.destroy )
# Finally, get the event checker running, to pick up events
# coming in from other threads that want to act on the tkinter thread
def update_tkinter():
if self._events != []:
for e in self._events:
if type( e ) == type( "" ):
if e[0] == "c":
if len( e ) == 1:
self._normal_complete()
else:
self._normal_complete( e[1:] )
elif e == "q":
self.destroy()
else: # must be ["ss", x, y] for a set screen
self._canvas.config( width = e[1], height = e[2] )
self._events = []
self._root.after( 10, update_tkinter )
update_tkinter()
def _getCanvas():
global _can, _hadCan, _blockCalls
if (_hadCan and not _can) or _blockCalls:
raise WindowGone
if not _can:
_can = Can()
_hadCan = True
return _can
##########################################################
# These are the only visible functions out of the module
#
# i.e. These are the functions that you can use
##########################################################
def create_rectangle( x1, y1, x2, y2, **kw ):
return _getCanvas().create_rectangle( x1, y1, x2, y2, kw )
def create_arc( x1, y1, x2, y2, **kw ):
return _getCanvas().create_arc( x1, y1, x2, y2, kw )
def create_line( x1, y1, x2, y2, **kw ):
return _getCanvas().create_line( x1, y1, x2, y2, kw )
def create_oval( x1, y1, x2, y2, **kw ):
return _getCanvas().create_oval( x1, y1, x2, y2, kw )
def create_text( x1, y1, **kw ):
return _getCanvas().create_text( x1, y1, kw )
def create_image( x1, y1, **kw ):
return _getCanvas().create_image( x1, y1, kw )
def move( tagOrId, xInc, yInc ):
_getCanvas().move( tagOrId, xInc, yInc )
def delete( tagOrId ):
_getCanvas().delete( tagOrId )
def addToGroup( groupName, tagOrId, *moreTagOrIds ):
_getCanvas().addToGroup( groupName, tagOrId, moreTagOrIds )
def importImage( filename ):
return _getCanvas().importImage( filename )
def wait( t1 ):
time.sleep( t1 )
def set_title( txt ):
_getCanvas().set_title( txt )
def set_size( x, y ):
_getCanvas().set_size( x, y )
def complete( a = None ):
_getCanvas().complete( a )
def run():
_getCanvas().run()
def quitCanvas():
_getCanvas().quitCanvas()
def runGraphicsFn( g ):
_getCanvas().runGraphicsFn( g )
def set_keydown_handler( handler ):
_getCanvas().set_keydown_handler( handler )
def unset_keydown_handler():
_getCanvas().unset_keydown_handler()
def set_mousedown_handler( handler ):
_getCanvas().set_mousedown_handler( handler )
def unset_mousedown_handler( handler ):
_getCanvas().unset_mousedown_handler()
def set_mouseup_handler( handler ):
_getCanvas().set_mouseup_handler( handler )
def unset_mouseup_handler():
_getCanvas().unset_mouseup_handler()
def set_mousemotion_handler( handler ):
_getCanvas().set_mousemotion_handler( handler )
def unset_mousemotion_handler():
_getCanvas().unset_mousemotion_handler()
def end_x( start_x, length, angle ):
return start_x + length * math.sin( math.radians( angle ) )
def end_y( start_y, length, angle ):
return start_y + length * math.cos( math.radians( angle ) )
|
{
"content_hash": "db05c1f0a13cf7ac600330db38b3f919",
"timestamp": "",
"source": "github",
"line_count": 279,
"max_line_length": 122,
"avg_line_length": 38.727598566308245,
"alnum_prop": 0.5708468301712171,
"repo_name": "RossMeikleham/Connect4",
"id": "ef4a455a83f5b776bccbe1d91539965267fba429",
"size": "11244",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Canvas.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20315"
}
],
"symlink_target": ""
}
|
import os
import shutil
import tempfile
import time
from xml.dom.minidom import Document
import mock
import six
from cinder import exception
from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall
from cinder import test
from cinder.volume.drivers.emc.emc_vmax_common import EMCVMAXCommon
from cinder.volume.drivers.emc.emc_vmax_fast import EMCVMAXFast
from cinder.volume.drivers.emc.emc_vmax_fc import EMCVMAXFCDriver
from cinder.volume.drivers.emc.emc_vmax_iscsi import EMCVMAXISCSIDriver
from cinder.volume.drivers.emc.emc_vmax_masking import EMCVMAXMasking
from cinder.volume.drivers.emc.emc_vmax_utils import EMCVMAXUtils
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
CINDER_EMC_CONFIG_DIR = '/etc/cinder/'
class EMC_StorageVolume(dict):
pass
class CIM_StorageExtent(dict):
pass
class SE_InitiatorMaskingGroup(dict):
pass
class SE_ConcreteJob(dict):
pass
class SE_StorageHardwareID(dict):
pass
class SYMM_LunMasking(dict):
pass
class CIM_DeviceMaskingGroup(dict):
pass
class EMC_LunMaskingSCSIProtocolController(dict):
pass
class CIM_TargetMaskingGroup(dict):
pass
class EMC_StorageHardwareID(dict):
pass
class Fake_CIMProperty():
def fake_getCIMProperty(self):
cimproperty = Fake_CIMProperty()
cimproperty.value = True
return cimproperty
def fake_getBlockSizeCIMProperty(self):
cimproperty = Fake_CIMProperty()
cimproperty.value = '512'
return cimproperty
def fake_getConsumableBlocksCIMProperty(self):
cimproperty = Fake_CIMProperty()
cimproperty.value = '12345'
return cimproperty
def fake_getIsConcatenatedCIMProperty(self):
cimproperty = Fake_CIMProperty()
cimproperty.value = True
return cimproperty
def fake_getIsCompositeCIMProperty(self):
cimproperty = Fake_CIMProperty()
cimproperty.value = False
return cimproperty
def fake_getElementNameCIMProperty(self):
cimproperty = Fake_CIMProperty()
cimproperty.value = 'OS-myhost-MV'
return cimproperty
class Fake_CIM_TierPolicyServiceCapabilities():
def fake_getpolicyinstance(self):
classinstance = Fake_CIM_TierPolicyServiceCapabilities()
classcimproperty = Fake_CIMProperty()
cimproperty = classcimproperty.fake_getCIMProperty()
cimproperties = {u'SupportsTieringPolicies': cimproperty}
classinstance.properties = cimproperties
return classinstance
class FakeCIMInstanceName(dict):
def fake_getinstancename(self, classname, bindings):
instancename = FakeCIMInstanceName()
for key in bindings:
instancename[key] = bindings[key]
instancename.classname = classname
instancename.namespace = 'root/emc'
return instancename
class FakeDB():
def volume_update(self, context, volume_id, model_update):
pass
def volume_get(self, context, volume_id):
conn = FakeEcomConnection()
objectpath = {}
objectpath['CreationClassName'] = 'Symm_StorageVolume'
if volume_id == 'vol1':
device_id = '1'
objectpath['DeviceID'] = device_id
else:
objectpath['DeviceID'] = volume_id
return conn.GetInstance(objectpath)
class EMCVMAXCommonData():
wwpn1 = "123456789012345"
wwpn2 = "123456789054321"
connector = {'ip': '10.0.0.2',
'initiator': 'iqn.1993-08.org.debian: 01: 222',
'wwpns': [wwpn1, wwpn2],
'wwnns': ["223456789012345", "223456789054321"],
'host': 'fakehost'}
target_wwns = [wwn[::-1] for wwn in connector['wwpns']]
fabric_name_prefix = "fakeFabric"
end_point_map = {connector['wwpns'][0]: [target_wwns[0]],
connector['wwpns'][1]: [target_wwns[1]]}
device_map = {}
for wwn in connector['wwpns']:
fabric_name = ''.join([fabric_name_prefix,
wwn[-2:]])
target_wwn = wwn[::-1]
fabric_map = {'initiator_port_wwn_list': [wwn],
'target_port_wwn_list': [target_wwn]
}
device_map[fabric_name] = fabric_map
default_storage_group = (
u'//10.10.10.10/root/emc: SE_DeviceMaskingGroup.InstanceID='
'"SYMMETRIX+000198700440+OS_default_GOLD1_SG"')
storage_system = 'SYMMETRIX+000195900551'
port_group = 'OS-portgroup-PG'
lunmaskctrl_id =\
'SYMMETRIX+000195900551+OS-fakehost-gold-MV'
lunmaskctrl_name =\
'OS-fakehost-gold-MV'
initiatorgroup_id =\
'SYMMETRIX+000195900551+OS-fakehost-IG'
initiatorgroup_name =\
'OS-fakehost-IG'
initiatorgroup_creationclass = 'SE_InitiatorMaskingGroup'
storageextent_creationclass = 'CIM_StorageExtent'
initiator1 = 'iqn.1993-08.org.debian: 01: 1a2b3c4d5f6g'
stconf_service_creationclass = 'Symm_StorageConfigurationService'
ctrlconf_service_creationclass = 'Symm_ControllerConfigurationService'
elementcomp_service_creationclass = 'Symm_ElementCompositionService'
storreloc_service_creationclass = 'Symm_StorageRelocationService'
replication_service_creationclass = 'EMC_ReplicationService'
vol_creationclass = 'Symm_StorageVolume'
pool_creationclass = 'Symm_VirtualProvisioningPool'
lunmask_creationclass = 'Symm_LunMaskingSCSIProtocolController'
lunmask_creationclass2 = 'Symm_LunMaskingView'
hostedservice_creationclass = 'CIM_HostedService'
policycapability_creationclass = 'CIM_TierPolicyServiceCapabilities'
policyrule_creationclass = 'Symm_TierPolicyRule'
assoctierpolicy_creationclass = 'CIM_StorageTier'
storagepool_creationclass = 'Symm_VirtualProvisioningPool'
storagegroup_creationclass = 'CIM_DeviceMaskingGroup'
hardwareid_creationclass = 'EMC_StorageHardwareID'
storagepoolid = 'SYMMETRIX+000195900551+U+gold'
storagegroupname = 'OS_default_GOLD1_SG'
storagevolume_creationclass = 'EMC_StorageVolume'
policyrule = 'gold'
poolname = 'gold'
totalmanagedspace_bits = '1000000000000'
subscribedcapacity_bits = '500000000000'
totalmanagedspace_gbs = 931
subscribedcapacity_gbs = 466
unit_creationclass = 'CIM_ProtocolControllerForUnit'
storage_type = 'gold'
keybindings = {'CreationClassName': u'Symm_StorageVolume',
'SystemName': u'SYMMETRIX+000195900551',
'DeviceID': u'1',
'SystemCreationClassName': u'Symm_StorageSystem'}
keybindings2 = {'CreationClassName': u'Symm_StorageVolume',
'SystemName': u'SYMMETRIX+000195900551',
'DeviceID': u'99999',
'SystemCreationClassName': u'Symm_StorageSystem'}
provider_location = {'classname': 'Symm_StorageVolume',
'keybindings': keybindings}
provider_location2 = {'classname': 'Symm_StorageVolume',
'keybindings': keybindings2}
properties = {'ConsumableBlocks': '12345',
'BlockSize': '512'}
test_volume = {'name': 'vol1',
'size': 1,
'volume_name': 'vol1',
'id': '1',
'device_id': '1',
'provider_auth': None,
'project_id': 'project',
'display_name': 'vol1',
'display_description': 'test volume',
'volume_type_id': 'abc',
'provider_location': six.text_type(provider_location),
'status': 'available',
'host': 'fake-host'
}
test_volume_v2 = {'name': 'vol1',
'size': 1,
'volume_name': 'vol1',
'id': 'vol1',
'device_id': '1',
'provider_auth': None,
'project_id': 'project',
'display_name': 'vol1',
'display_description': 'test volume',
'volume_type_id': 'abc',
'provider_location': six.text_type(provider_location),
'status': 'available',
'host': 'fake-host'
}
test_failed_volume = {'name': 'failed_vol',
'size': 1,
'volume_name': 'failed_vol',
'id': '4',
'device_id': '4',
'provider_auth': None,
'project_id': 'project',
'display_name': 'failed_vol',
'display_description': 'test failed volume',
'volume_type_id': 'abc'}
failed_delete_vol = {'name': 'failed_delete_vol',
'size': '-1',
'volume_name': 'failed_delete_vol',
'id': '99999',
'device_id': '99999',
'provider_auth': None,
'project_id': 'project',
'display_name': 'failed delete vol',
'display_description': 'failed delete volume',
'volume_type_id': 'abc',
'provider_location': six.text_type(provider_location2)
}
test_source_volume = {'size': 1,
'volume_type_id': 'sourceid',
'display_name': 'sourceVolume',
'name': 'sourceVolume',
'volume_name': 'vmax-154326',
'id': 'vmax-154326',
'provider_auth': None,
'project_id':
'project', 'id': '2',
'provider_location':
six.text_type(provider_location),
'display_description': 'snapshot source volume'}
location_info = {'location_info': '000195900551#silver#None',
'storage_protocol': 'ISCSI'}
test_host = {'capabilities': location_info,
'host': 'fake_host'}
initiatorNames = ["123456789012345", "123456789054321"]
test_ctxt = {}
new_type = {}
diff = {}
class FakeLookupService():
def get_device_mapping_from_network(self, initiator_wwns, target_wwns):
return EMCVMAXCommonData.device_map
class FakeEcomConnection():
def __init__(self, *args, **kwargs):
self.data = EMCVMAXCommonData()
def InvokeMethod(self, MethodName, Service, ElementName=None, InPool=None,
ElementType=None, Size=None,
SyncType=None, SourceElement=None, TargetElement=None,
Operation=None, Synchronization=None,
TheElements=None, TheElement=None,
LUNames=None, InitiatorPortIDs=None, DeviceAccesses=None,
ProtocolControllers=None,
MaskingGroup=None, Members=None,
HardwareId=None, ElementSource=None, EMCInPools=None,
CompositeType=None, EMCNumberOfMembers=None,
EMCBindElements=None,
InElements=None, TargetPool=None, RequestedState=None):
rc = 0L
myjob = SE_ConcreteJob()
myjob.classname = 'SE_ConcreteJob'
myjob['InstanceID'] = '9999'
myjob['status'] = 'success'
myjob['type'] = ElementName
if Size == -1073741824 and \
MethodName == 'CreateOrModifyCompositeElement':
rc = 0L
myjob = SE_ConcreteJob()
myjob.classname = 'SE_ConcreteJob'
myjob['InstanceID'] = '99999'
myjob['status'] = 'success'
myjob['type'] = 'failed_delete_vol'
elif ElementName is None and \
MethodName == 'CreateOrModifyCompositeElement':
rc = 0L
myjob = SE_ConcreteJob()
myjob.classname = 'SE_ConcreteJob'
myjob['InstanceID'] = '9999'
myjob['status'] = 'success'
myjob['type'] = 'vol1'
if ElementName == 'failed_vol' and \
MethodName == 'CreateOrModifyElementFromStoragePool':
rc = 10L
myjob['status'] = 'failure'
elif TheElements and \
TheElements[0]['DeviceID'] == '99999' and \
MethodName == 'EMCReturnToStoragePool':
rc = 10L
myjob['status'] = 'failure'
elif HardwareId:
rc = 0L
targetendpoints = {}
endpoints = []
endpoint = {}
endpoint['Name'] = (EMCVMAXCommonData.end_point_map[
EMCVMAXCommonData.connector['wwpns'][0]])
endpoints.append(endpoint)
endpoint2 = {}
endpoint2['Name'] = (EMCVMAXCommonData.end_point_map[
EMCVMAXCommonData.connector['wwpns'][1]])
endpoints.append(endpoint2)
targetendpoints['TargetEndpoints'] = endpoints
return rc, targetendpoints
job = {'Job': myjob}
return rc, job
def EnumerateInstanceNames(self, name):
result = None
if name == 'EMC_StorageConfigurationService':
result = self._enum_stconfsvcs()
elif name == 'EMC_ControllerConfigurationService':
result = self._enum_ctrlconfsvcs()
elif name == 'Symm_ElementCompositionService':
result = self._enum_elemcompsvcs()
elif name == 'Symm_StorageRelocationService':
result = self._enum_storrelocsvcs()
elif name == 'EMC_ReplicationService':
result = self._enum_replicsvcs()
elif name == 'EMC_VirtualProvisioningPool':
result = self._enum_pools()
elif name == 'EMC_StorageVolume':
result = self._enum_storagevolumes()
elif name == 'Symm_StorageVolume':
result = self._enum_storagevolumes()
elif name == 'CIM_ProtocolControllerForUnit':
result = self._enum_unitnames()
elif name == 'EMC_LunMaskingSCSIProtocolController':
result = self._enum_lunmaskctrls()
elif name == 'EMC_StorageProcessorSystem':
result = self._enum_processors()
elif name == 'EMC_StorageHardwareIDManagementService':
result = self._enum_hdwidmgmts()
elif name == 'SE_StorageHardwareID':
result = self._enum_storhdwids()
elif name == 'EMC_StorageSystem':
result = self._enum_storage_system()
elif name == 'Symm_TierPolicyRule':
result = self._enum_policyrules()
else:
result = self._default_enum()
return result
def EnumerateInstances(self, name):
result = None
if name == 'EMC_VirtualProvisioningPool':
result = self._enum_pool_details()
elif name == 'SE_StorageHardwareID':
result = self._enum_storhdwids()
else:
result = self._default_enum()
return result
def GetInstance(self, objectpath, LocalOnly=False):
try:
name = objectpath['CreationClassName']
except KeyError:
name = objectpath.classname
result = None
if name == 'Symm_StorageVolume':
result = self._getinstance_storagevolume(objectpath)
elif name == 'CIM_ProtocolControllerForUnit':
result = self._getinstance_unit(objectpath)
elif name == 'SE_ConcreteJob':
result = self._getinstance_job(objectpath)
elif name == 'SE_StorageSynchronized_SV_SV':
result = self._getinstance_syncsvsv(objectpath)
elif name == 'Symm_TierPolicyServiceCapabilities':
result = self._getinstance_policycapabilities(objectpath)
elif name == 'CIM_TierPolicyServiceCapabilities':
result = self._getinstance_policycapabilities(objectpath)
elif name == 'SE_InitiatorMaskingGroup':
result = self._getinstance_initiatormaskinggroup(objectpath)
elif name == 'SE_StorageHardwareID':
result = self._getinstance_storagehardwareid(objectpath)
elif name == 'EMC_StorageHardwareID':
result = self._getinstance_storagehardwareid(objectpath)
elif name == 'Symm_VirtualProvisioningPool':
result = self._getinstance_pool(objectpath)
else:
result = self._default_getinstance(objectpath)
return result
def DeleteInstance(self, objectpath):
pass
def Associators(self, objectpath, ResultClass='EMC_StorageHardwareID'):
result = None
if ResultClass == 'EMC_StorageHardwareID':
result = self._assoc_hdwid()
elif ResultClass == 'EMC_iSCSIProtocolEndpoint':
result = self._assoc_endpoint()
elif ResultClass == 'EMC_StorageVolume':
result = self._assoc_storagevolume(objectpath)
elif ResultClass == 'CIM_DeviceMaskingGroup':
result = self._assoc_storagegroup()
elif ResultClass == 'CIM_StorageExtent':
result = self._enum_storage_extent()
elif ResultClass == 'EMC_LunMaskingSCSIProtocolController':
result = self._assoc_lunmaskctrls()
elif ResultClass == 'CIM_TargetMaskingGroup':
result = self._assoc_portgroup()
else:
result = self._default_assoc(objectpath)
return result
def AssociatorNames(self, objectpath,
ResultClass='default', AssocClass='default'):
result = None
if ResultClass == 'EMC_LunMaskingSCSIProtocolController':
result = self._assocnames_lunmaskctrl()
elif AssocClass == 'CIM_HostedService':
result = self._assocnames_hostedservice()
elif ResultClass == 'CIM_TierPolicyServiceCapabilities':
result = self._assocnames_policyCapabilities()
elif ResultClass == 'Symm_TierPolicyRule':
result = self._assocnames_policyrule()
elif AssocClass == 'CIM_AssociatedTierPolicy':
result = self._assocnames_assoctierpolicy()
elif ResultClass == 'CIM_StoragePool':
result = self._assocnames_storagepool()
elif ResultClass == 'EMC_VirtualProvisioningPool':
result = self._assocnames_storagepool()
elif ResultClass == 'CIM_DeviceMaskingGroup':
result = self._assocnames_storagegroup()
elif ResultClass == 'EMC_StorageVolume':
result = self._enum_storagevolumes()
elif ResultClass == 'Symm_StorageVolume':
result = self._enum_storagevolumes()
elif ResultClass == 'SE_InitiatorMaskingGroup':
result = self._enum_initiatorMaskingGroup()
elif ResultClass == 'CIM_InitiatorMaskingGroup':
result = self._enum_initiatorMaskingGroup()
elif ResultClass == 'CIM_StorageExtent':
result = self._enum_storage_extent()
elif ResultClass == 'SE_StorageHardwareID':
result = self._enum_storhdwids()
elif ResultClass == 'Symm_FCSCSIProtocolEndpoint':
result = self._enum_fcscsiendpoint()
elif ResultClass == 'CIM_TargetMaskingGroup':
result = self._assocnames_portgroup()
else:
result = self._default_assocnames(objectpath)
return result
def ReferenceNames(self, objectpath,
ResultClass='CIM_ProtocolControllerForUnit'):
result = None
if ResultClass == 'CIM_ProtocolControllerForUnit':
result = self._ref_unitnames2()
else:
result = self._default_ref(objectpath)
return result
def _ref_unitnames(self):
unitnames = []
unitname = {}
dependent = {}
dependent['CreationClassName'] = self.data.vol_creationclass
dependent['DeviceID'] = self.data.test_volume['id']
dependent['ElementName'] = self.data.test_volume['name']
dependent['SystemName'] = self.data.storage_system
antecedent = {}
antecedent['CreationClassName'] = self.data.lunmask_creationclass
antecedent['DeviceID'] = self.data.lunmaskctrl_id
antecedent['SystemName'] = self.data.storage_system
unitname['Dependent'] = dependent
unitname['Antecedent'] = antecedent
unitname['CreationClassName'] = self.data.unit_creationclass
unitnames.append(unitname)
return unitnames
def _ref_unitnames2(self):
unitnames = []
unitname = {}
dependent = {}
dependent['CreationClassName'] = self.data.vol_creationclass
dependent['DeviceID'] = self.data.test_volume['id']
dependent['ElementName'] = self.data.test_volume['name']
dependent['SystemName'] = self.data.storage_system
antecedent = SYMM_LunMasking()
antecedent['CreationClassName'] = self.data.lunmask_creationclass2
antecedent['SystemName'] = self.data.storage_system
classcimproperty = Fake_CIMProperty()
elementName = (
classcimproperty.fake_getElementNameCIMProperty())
properties = {u'ElementName': elementName}
antecedent.properties = properties
unitname['Dependent'] = dependent
unitname['Antecedent'] = antecedent
unitname['CreationClassName'] = self.data.unit_creationclass
unitnames.append(unitname)
return unitnames
def _default_ref(self, objectpath):
return objectpath
def _assoc_hdwid(self):
assocs = []
assoc = EMC_StorageHardwareID()
assoc['StorageID'] = self.data.connector['initiator']
assoc['SystemName'] = self.data.storage_system
assoc['CreationClassName'] = 'EMC_StorageHardwareID'
assoc.path = assoc
assocs.append(assoc)
for wwpn in self.data.connector['wwpns']:
assoc2 = EMC_StorageHardwareID()
assoc2['StorageID'] = wwpn
assoc2['SystemName'] = self.data.storage_system
assoc2['CreationClassName'] = 'EMC_StorageHardwareID'
assoc2.path = assoc2
assocs.append(assoc2)
assocs.append(assoc)
return assocs
def _assoc_endpoint(self):
assocs = []
assoc = {}
assoc['Name'] = 'iqn.1992-04.com.emc: 50000973f006dd80'
assoc['SystemName'] = self.data.storage_system
assocs.append(assoc)
return assocs
def _assoc_storagegroup(self):
assocs = []
assoc = CIM_DeviceMaskingGroup()
assoc['ElementName'] = 'OS_default_GOLD1_SG'
assoc['SystemName'] = self.data.storage_system
assoc['CreationClassName'] = 'CIM_DeviceMaskingGroup'
assoc.path = assoc
assocs.append(assoc)
return assocs
def _assoc_portgroup(self):
assocs = []
assoc = CIM_TargetMaskingGroup()
assoc['ElementName'] = self.data.port_group
assoc['SystemName'] = self.data.storage_system
assoc['CreationClassName'] = 'CIM_TargetMaskingGroup'
assoc.path = assoc
assocs.append(assoc)
return assocs
def _assoc_lunmaskctrls(self):
ctrls = []
ctrl = EMC_LunMaskingSCSIProtocolController()
ctrl['CreationClassName'] = self.data.lunmask_creationclass
ctrl['DeviceID'] = self.data.lunmaskctrl_id
ctrl['SystemName'] = self.data.storage_system
ctrl['ElementName'] = self.data.lunmaskctrl_name
ctrl.path = ctrl
ctrls.append(ctrl)
return ctrls
# Added test for EMC_StorageVolume associators
def _assoc_storagevolume(self, objectpath):
assocs = []
if 'type' not in objectpath:
vol = self.data.test_volume
elif objectpath['type'] == 'failed_delete_vol':
vol = self.data.failed_delete_vol
elif objectpath['type'] == 'vol1':
vol = self.data.test_volume
elif objectpath['type'] == 'appendVolume':
vol = self.data.test_volume
elif objectpath['type'] == 'failed_vol':
vol = self.data.test_failed_volume
elif objectpath['type'] == 'TargetBaseVol':
vol = self.data.test_failed_volume
else:
return None
vol['DeviceID'] = vol['device_id']
assoc = self._getinstance_storagevolume(vol)
assocs.append(assoc)
return assocs
def _default_assoc(self, objectpath):
return objectpath
def _assocnames_lunmaskctrl(self):
return self._enum_lunmaskctrls()
def _assocnames_hostedservice(self):
return self._enum_hostedservice()
def _assocnames_policyCapabilities(self):
return self._enum_policycapabilities()
def _assocnames_policyrule(self):
return self._enum_policyrules()
def _assocnames_assoctierpolicy(self):
return self._enum_assoctierpolicy()
def _assocnames_storagepool(self):
return self._enum_storagepool()
def _assocnames_storagegroup(self):
return self._enum_storagegroup()
def _assocnames_storagevolume(self):
return self._enum_storagevolume()
def _assocnames_portgroup(self):
return self._enum_portgroup()
def _default_assocnames(self, objectpath):
return objectpath
def _getinstance_storagevolume(self, objectpath):
foundinstance = None
instance = EMC_StorageVolume()
vols = self._enum_storagevolumes()
for vol in vols:
if vol['DeviceID'] == objectpath['DeviceID']:
instance = vol
break
if not instance:
foundinstance = None
else:
foundinstance = instance
return foundinstance
def _getinstance_lunmask(self):
lunmask = {}
lunmask['CreationClassName'] = self.data.lunmask_creationclass
lunmask['DeviceID'] = self.data.lunmaskctrl_id
lunmask['SystemName'] = self.data.storage_system
return lunmask
def _getinstance_initiatormaskinggroup(self, objectpath):
initiatorgroup = SE_InitiatorMaskingGroup()
initiatorgroup['CreationClassName'] = (
self.data.initiatorgroup_creationclass)
initiatorgroup['DeviceID'] = self.data.initiatorgroup_id
initiatorgroup['SystemName'] = self.data.storage_system
initiatorgroup['ElementName'] = self.data.initiatorgroup_name
initiatorgroup.path = initiatorgroup
return initiatorgroup
def _getinstance_storagehardwareid(self, objectpath):
hardwareid = SE_StorageHardwareID()
hardwareid['CreationClassName'] = self.data.hardwareid_creationclass
hardwareid['SystemName'] = self.data.storage_system
hardwareid['StorageID'] = self.data.connector['wwpns'][0]
hardwareid.path = hardwareid
return hardwareid
def _getinstance_pool(self, objectpath):
pool = {}
pool['CreationClassName'] = 'Symm_VirtualProvisioningPool'
pool['ElementName'] = 'gold'
pool['SystemName'] = self.data.storage_system
pool['TotalManagedSpace'] = self.data.totalmanagedspace_bits
pool['EMCSubscribedCapacity'] = self.data.subscribedcapacity_bits
return pool
def _getinstance_unit(self, objectpath):
unit = {}
dependent = {}
dependent['CreationClassName'] = self.data.vol_creationclass
dependent['DeviceID'] = self.data.test_volume['id']
dependent['ElementName'] = self.data.test_volume['name']
dependent['SystemName'] = self.data.storage_system
antecedent = {}
antecedent['CreationClassName'] = self.data.lunmask_creationclass
antecedent['DeviceID'] = self.data.lunmaskctrl_id
antecedent['SystemName'] = self.data.storage_system
unit['Dependent'] = dependent
unit['Antecedent'] = antecedent
unit['CreationClassName'] = self.data.unit_creationclass
unit['DeviceNumber'] = '1'
return unit
def _getinstance_job(self, jobpath):
jobinstance = {}
jobinstance['InstanceID'] = '9999'
if jobpath['status'] == 'failure':
jobinstance['JobState'] = 10
jobinstance['ErrorCode'] = 99
jobinstance['ErrorDescription'] = 'Failure'
else:
jobinstance['JobState'] = 7
jobinstance['ErrorCode'] = 0
jobinstance['ErrorDescription'] = ''
return jobinstance
def _getinstance_policycapabilities(self, policycapabilitypath):
instance = Fake_CIM_TierPolicyServiceCapabilities()
fakeinstance = instance.fake_getpolicyinstance()
return fakeinstance
def _getinstance_syncsvsv(self, objectpath):
svInstance = {}
svInstance['SyncedElement'] = 'SyncedElement'
svInstance['SystemElement'] = 'SystemElement'
svInstance['PercentSynced'] = 100
return svInstance
def _default_getinstance(self, objectpath):
return objectpath
def _enum_stconfsvcs(self):
conf_services = []
conf_service = {}
conf_service['SystemName'] = self.data.storage_system
conf_service['CreationClassName'] =\
self.data.stconf_service_creationclass
conf_services.append(conf_service)
return conf_services
def _enum_ctrlconfsvcs(self):
conf_services = []
conf_service = {}
conf_service['SystemName'] = self.data.storage_system
conf_service['CreationClassName'] =\
self.data.ctrlconf_service_creationclass
conf_services.append(conf_service)
return conf_services
def _enum_elemcompsvcs(self):
comp_services = []
comp_service = {}
comp_service['SystemName'] = self.data.storage_system
comp_service['CreationClassName'] =\
self.data.elementcomp_service_creationclass
comp_services.append(comp_service)
return comp_services
def _enum_storrelocsvcs(self):
reloc_services = []
reloc_service = {}
reloc_service['SystemName'] = self.data.storage_system
reloc_service['CreationClassName'] =\
self.data.storreloc_service_creationclass
reloc_services.append(reloc_service)
return reloc_services
def _enum_replicsvcs(self):
replic_services = []
replic_service = {}
replic_service['SystemName'] = self.data.storage_system
replic_service['CreationClassName'] =\
self.data.replication_service_creationclass
replic_services.append(replic_service)
return replic_services
def _enum_pools(self):
pools = []
pool = {}
pool['InstanceID'] = self.data.storage_system + '+U+' +\
self.data.storage_type
pool['CreationClassName'] = 'Symm_VirtualProvisioningPool'
pool['ElementName'] = 'gold'
pools.append(pool)
return pools
def _enum_pool_details(self):
pools = []
pool = {}
pool['InstanceID'] = self.data.storage_system + '+U+' +\
self.data.storage_type
pool['CreationClassName'] = 'Symm_VirtualProvisioningPool'
pool['TotalManagedSpace'] = 12345678
pool['RemainingManagedSpace'] = 123456
pools.append(pool)
return pools
def _enum_storagevolumes(self):
vols = []
vol = EMC_StorageVolume()
vol['name'] = self.data.test_volume['name']
vol['CreationClassName'] = 'Symm_StorageVolume'
vol['ElementName'] = self.data.test_volume['name']
vol['DeviceID'] = self.data.test_volume['id']
vol['SystemName'] = self.data.storage_system
# Added vol to vol.path
vol['SystemCreationClassName'] = 'Symm_StorageSystem'
vol.path = vol
vol.path.classname = vol['CreationClassName']
classcimproperty = Fake_CIMProperty()
blocksizecimproperty = classcimproperty.fake_getBlockSizeCIMProperty()
consumableBlockscimproperty = (
classcimproperty.fake_getConsumableBlocksCIMProperty())
isCompositecimproperty = (
classcimproperty.fake_getIsCompositeCIMProperty())
properties = {u'ConsumableBlocks': blocksizecimproperty,
u'BlockSize': consumableBlockscimproperty,
u'IsComposite': isCompositecimproperty}
vol.properties = properties
name = {}
name['classname'] = 'Symm_StorageVolume'
keys = {}
keys['CreationClassName'] = 'Symm_StorageVolume'
keys['SystemName'] = self.data.storage_system
keys['DeviceID'] = vol['DeviceID']
keys['SystemCreationClassName'] = 'Symm_StorageSystem'
name['keybindings'] = keys
vol['provider_location'] = str(name)
vols.append(vol)
failed_delete_vol = EMC_StorageVolume()
failed_delete_vol['name'] = 'failed_delete_vol'
failed_delete_vol['CreationClassName'] = 'Symm_StorageVolume'
failed_delete_vol['ElementName'] = 'failed_delete_vol'
failed_delete_vol['DeviceID'] = '99999'
failed_delete_vol['SystemName'] = self.data.storage_system
# Added vol to vol.path
failed_delete_vol['SystemCreationClassName'] = 'Symm_StorageSystem'
failed_delete_vol.path = failed_delete_vol
failed_delete_vol.path.classname =\
failed_delete_vol['CreationClassName']
vols.append(failed_delete_vol)
failed_vol = EMC_StorageVolume()
failed_vol['name'] = 'failed__vol'
failed_vol['CreationClassName'] = 'Symm_StorageVolume'
failed_vol['ElementName'] = 'failed_vol'
failed_vol['DeviceID'] = '4'
failed_vol['SystemName'] = self.data.storage_system
# Added vol to vol.path
failed_vol['SystemCreationClassName'] = 'Symm_StorageSystem'
failed_vol.path = failed_vol
failed_vol.path.classname =\
failed_vol['CreationClassName']
name_failed = {}
name_failed['classname'] = 'Symm_StorageVolume'
keys_failed = {}
keys_failed['CreationClassName'] = 'Symm_StorageVolume'
keys_failed['SystemName'] = self.data.storage_system
keys_failed['DeviceID'] = failed_vol['DeviceID']
keys_failed['SystemCreationClassName'] = 'Symm_StorageSystem'
name_failed['keybindings'] = keys_failed
failed_vol['provider_location'] = str(name_failed)
vols.append(failed_vol)
return vols
def _enum_initiatorMaskingGroup(self):
initatorgroups = []
initatorgroup = {}
initatorgroup['CreationClassName'] = (
self.data.initiatorgroup_creationclass)
initatorgroup['DeviceID'] = self.data.initiatorgroup_id
initatorgroup['SystemName'] = self.data.storage_system
initatorgroup['ElementName'] = self.data.initiatorgroup_name
initatorgroups.append(initatorgroup)
return initatorgroups
def _enum_storage_system(self):
storagesystems = []
storagesystem = {}
storagesystem['SystemName'] = self.data.storage_system
storagesystem['Name'] = self.data.storage_system
storagesystems.append(storagesystem)
return storagesystems
def _enum_storage_extent(self):
storageExtents = []
storageExtent = CIM_StorageExtent()
storageExtent['CreationClassName'] = (
self.data.storageextent_creationclass)
classcimproperty = Fake_CIMProperty()
isConcatenatedcimproperty = (
classcimproperty.fake_getIsConcatenatedCIMProperty())
properties = {u'IsConcatenated': isConcatenatedcimproperty}
storageExtent.properties = properties
storageExtents.append(storageExtent)
return storageExtents
def _enum_lunmaskctrls(self):
ctrls = []
ctrl = {}
ctrl['CreationClassName'] = self.data.lunmask_creationclass
ctrl['DeviceID'] = self.data.lunmaskctrl_id
ctrl['SystemName'] = self.data.storage_system
ctrl['ElementName'] = self.data.lunmaskctrl_name
ctrls.append(ctrl)
return ctrls
def _enum_hostedservice(self):
hostedservices = []
hostedservice = {}
hostedservice['CreationClassName'] = (
self.data.hostedservice_creationclass)
hostedservice['SystemName'] = self.data.storage_system
hostedservices.append(hostedservice)
return hostedservices
def _enum_policycapabilities(self):
policycapabilities = []
policycapability = {}
policycapability['CreationClassName'] = (
self.data.policycapability_creationclass)
policycapability['SystemName'] = self.data.storage_system
propertiesList = []
CIMProperty = {'is_array': True}
properties = {u'SupportedTierFeatures': CIMProperty}
propertiesList.append(properties)
policycapability['Properties'] = propertiesList
policycapabilities.append(policycapability)
return policycapabilities
def _enum_policyrules(self):
policyrules = []
policyrule = {}
policyrule['CreationClassName'] = self.data.policyrule_creationclass
policyrule['SystemName'] = self.data.storage_system
policyrule['PolicyRuleName'] = self.data.policyrule
policyrules.append(policyrule)
return policyrules
def _enum_assoctierpolicy(self):
assoctierpolicies = []
assoctierpolicy = {}
assoctierpolicy['CreationClassName'] = (
self.data.assoctierpolicy_creationclass)
assoctierpolicies.append(assoctierpolicy)
return assoctierpolicies
def _enum_storagepool(self):
storagepools = []
storagepool = {}
storagepool['CreationClassName'] = self.data.storagepool_creationclass
storagepool['InstanceID'] = self.data.storagepoolid
storagepool['ElementName'] = 'gold'
storagepools.append(storagepool)
return storagepools
def _enum_storagegroup(self):
storagegroups = []
storagegroup = {}
storagegroup['CreationClassName'] = (
self.data.storagegroup_creationclass)
storagegroup['ElementName'] = self.data.storagegroupname
storagegroups.append(storagegroup)
return storagegroups
def _enum_storagevolume(self):
storagevolumes = []
storagevolume = {}
storagevolume['CreationClassName'] = (
self.data.storagevolume_creationclass)
storagevolumes.append(storagevolume)
return storagevolumes
def _enum_hdwidmgmts(self):
services = []
srv = {}
srv['SystemName'] = self.data.storage_system
services.append(srv)
return services
def _enum_storhdwids(self):
storhdwids = []
hdwid = SE_StorageHardwareID()
hdwid['CreationClassName'] = self.data.hardwareid_creationclass
hdwid['StorageID'] = self.data.connector['wwpns'][0]
hdwid.path = hdwid
storhdwids.append(hdwid)
return storhdwids
def _enum_fcscsiendpoint(self):
wwns = []
wwn = {}
wwn['Name'] = "5000090000000000"
wwns.append(wwn)
return wwns
def _enum_portgroup(self):
portgroups = []
portgroup = {}
portgroup['CreationClassName'] = (
'CIM_TargetMaskingGroup')
portgroup['ElementName'] = self.data.port_group
portgroups.append(portgroup)
return portgroups
def _default_enum(self):
names = []
name = {}
name['Name'] = 'default'
names.append(name)
return names
class EMCVMAXISCSIDriverNoFastTestCase(test.TestCase):
def setUp(self):
self.data = EMCVMAXCommonData()
self.tempdir = tempfile.mkdtemp()
super(EMCVMAXISCSIDriverNoFastTestCase, self).setUp()
self.config_file_path = None
self.config_file_1364232 = None
self.create_fake_config_file_no_fast()
self.addCleanup(self._cleanup)
configuration = mock.Mock()
configuration.safe_get.return_value = 'ISCSINoFAST'
configuration.cinder_emc_config_file = self.config_file_path
configuration.config_group = 'ISCSINoFAST'
self.stubs.Set(EMCVMAXISCSIDriver, 'smis_do_iscsi_discovery',
self.fake_do_iscsi_discovery)
self.stubs.Set(EMCVMAXCommon, '_get_ecom_connection',
self.fake_ecom_connection)
instancename = FakeCIMInstanceName()
self.stubs.Set(EMCVMAXUtils, 'get_instance_name',
instancename.fake_getinstancename)
self.stubs.Set(time, 'sleep',
self.fake_sleep)
driver = EMCVMAXISCSIDriver(configuration=configuration)
driver.db = FakeDB()
self.driver = driver
self.driver.utils = EMCVMAXUtils(object)
def create_fake_config_file_no_fast(self):
doc = Document()
emc = doc.createElement("EMC")
doc.appendChild(emc)
array = doc.createElement("Array")
arraytext = doc.createTextNode("1234567891011")
emc.appendChild(array)
array.appendChild(arraytext)
ecomserverip = doc.createElement("EcomServerIp")
ecomserveriptext = doc.createTextNode("1.1.1.1")
emc.appendChild(ecomserverip)
ecomserverip.appendChild(ecomserveriptext)
ecomserverport = doc.createElement("EcomServerPort")
ecomserverporttext = doc.createTextNode("10")
emc.appendChild(ecomserverport)
ecomserverport.appendChild(ecomserverporttext)
ecomusername = doc.createElement("EcomUserName")
ecomusernametext = doc.createTextNode("user")
emc.appendChild(ecomusername)
ecomusername.appendChild(ecomusernametext)
ecompassword = doc.createElement("EcomPassword")
ecompasswordtext = doc.createTextNode("pass")
emc.appendChild(ecompassword)
ecompassword.appendChild(ecompasswordtext)
portgroup = doc.createElement("PortGroup")
portgrouptext = doc.createTextNode("myPortGroup")
portgroup.appendChild(portgrouptext)
portgroups = doc.createElement("PortGroups")
portgroups.appendChild(portgroup)
emc.appendChild(portgroups)
pool = doc.createElement("Pool")
pooltext = doc.createTextNode("gold")
emc.appendChild(pool)
pool.appendChild(pooltext)
array = doc.createElement("Array")
arraytext = doc.createTextNode("0123456789")
emc.appendChild(array)
array.appendChild(arraytext)
timeout = doc.createElement("Timeout")
timeouttext = doc.createTextNode("0")
emc.appendChild(timeout)
timeout.appendChild(timeouttext)
filename = 'cinder_emc_config_ISCSINoFAST.xml'
self.config_file_path = self.tempdir + '/' + filename
f = open(self.config_file_path, 'w')
doc.writexml(f)
f.close()
# Create XML config file with newlines and whitespaces
# Bug #1364232
def create_fake_config_file_1364232(self):
filename = 'cinder_emc_config_1364232.xml'
self.config_file_1364232 = self.tempdir + '/' + filename
text_file = open(self.config_file_1364232, "w")
text_file.write("<?xml version='1.0' encoding='UTF-8'?>\n<EMC>\n"
"<EcomServerIp>10.10.10.10</EcomServerIp>\n"
"<EcomServerPort>5988</EcomServerPort>\n"
"<EcomUserName>user\t</EcomUserName>\n"
"<EcomPassword>password</EcomPassword>\n"
"<PortGroups><PortGroup>OS-PORTGROUP1-PG"
"</PortGroup><PortGroup>OS-PORTGROUP2-PG"
" </PortGroup>\n"
"<PortGroup>OS-PORTGROUP3-PG</PortGroup>"
"<PortGroup>OS-PORTGROUP4-PG</PortGroup>"
"</PortGroups>\n<Array>000198700439"
" \n</Array>\n<Pool>FC_SLVR1\n"
"</Pool>\n<FastPolicy>SILVER1</FastPolicy>\n"
"</EMC>")
text_file.close()
def fake_ecom_connection(self):
conn = FakeEcomConnection()
return conn
def fake_do_iscsi_discovery(self, volume):
output = []
item = '10.10.0.50: 3260,1 iqn.1992-04.com.emc: 50000973f006dd80'
output.append(item)
return output
def fake_sleep(self, seconds):
return
def test_wait_for_job_complete(self):
myjob = SE_ConcreteJob()
myjob.classname = 'SE_ConcreteJob'
myjob['InstanceID'] = '9999'
myjob['status'] = 'success'
myjob['type'] = 'type'
myjob['CreationClassName'] = 'SE_ConcreteJob'
myjob['Job'] = myjob
conn = self.fake_ecom_connection()
self.driver.utils._is_job_finished = mock.Mock(
return_value = True)
rc = self.driver.utils._wait_for_job_complete(conn, myjob)
self.assertIsNone(rc)
self.driver.utils._is_job_finished.assert_called_once_with(
conn, myjob)
self.assertEqual(
True,
self.driver.utils._is_job_finished.return_value)
self.driver.utils._is_job_finished.reset_mock()
# Save the original state and restore it after this test
loopingcall_orig = loopingcall.FixedIntervalLoopingCall
loopingcall.FixedIntervalLoopingCall = mock.Mock()
rc = self.driver.utils._wait_for_job_complete(conn, myjob)
self.assertIsNone(rc)
loopingcall.FixedIntervalLoopingCall.assert_called_once_with(
mock.ANY)
loopingcall.FixedIntervalLoopingCall.reset_mock()
loopingcall.FixedIntervalLoopingCall = loopingcall_orig
def test_wait_for_sync(self):
mysync = 'fakesync'
conn = self.fake_ecom_connection()
self.driver.utils._is_sync_complete = mock.Mock(
return_value = True)
rc = self.driver.utils.wait_for_sync(conn, mysync)
self.assertIsNone(rc)
self.driver.utils._is_sync_complete.assert_called_once_with(
conn, mysync)
self.assertEqual(
True,
self.driver.utils._is_sync_complete.return_value)
self.driver.utils._is_sync_complete.reset_mock()
# Save the original state and restore it after this test
loopingcall_orig = loopingcall.FixedIntervalLoopingCall
loopingcall.FixedIntervalLoopingCall = mock.Mock()
rc = self.driver.utils.wait_for_sync(conn, mysync)
self.assertIsNone(rc)
loopingcall.FixedIntervalLoopingCall.assert_called_once_with(
mock.ANY)
loopingcall.FixedIntervalLoopingCall.reset_mock()
loopingcall.FixedIntervalLoopingCall = loopingcall_orig
# Bug 1395830: _find_lun throws exception when lun is not found.
def test_find_lun(self):
keybindings = {'CreationClassName': u'Symm_StorageVolume',
'SystemName': u'SYMMETRIX+000195900551',
'DeviceID': u'1',
'SystemCreationClassName': u'Symm_StorageSystem'}
provider_location = {'classname': 'Symm_StorageVolume',
'keybindings': keybindings}
volume = EMC_StorageVolume()
volume['name'] = 'vol1'
volume['provider_location'] = six.text_type(provider_location)
self.driver.common.conn = self.driver.common._get_ecom_connection()
findlun = self.driver.common._find_lun(volume)
getinstance = self.driver.common.conn._getinstance_storagevolume(
keybindings)
# Found lun.
self.assertEqual(getinstance, findlun)
keybindings2 = {'CreationClassName': u'Symm_StorageVolume',
'SystemName': u'SYMMETRIX+000195900551',
'DeviceID': u'9',
'SystemCreationClassName': u'Symm_StorageSystem'}
provider_location2 = {'classname': 'Symm_StorageVolume',
'keybindings': keybindings2}
volume2 = EMC_StorageVolume()
volume2['name'] = 'myVol'
volume2['provider_location'] = six.text_type(provider_location2)
verify_orig = self.driver.common.utils.get_existing_instance
self.driver.common.utils.get_existing_instance = mock.Mock(
return_value=None)
findlun2 = self.driver.common._find_lun(volume2)
# Not found.
self.assertIsNone(findlun2)
instancename2 = self.driver.utils.get_instance_name(
provider_location2['classname'],
keybindings2)
self.driver.common.utils.get_existing_instance.assert_called_once_with(
self.driver.common.conn, instancename2)
self.driver.common.utils.get_existing_instance.reset_mock()
self.driver.common.utils.get_existing_instance = verify_orig
keybindings3 = {'CreationClassName': u'Symm_StorageVolume',
'SystemName': u'SYMMETRIX+000195900551',
'DeviceID': u'9999',
'SystemCreationClassName': u'Symm_StorageSystem'}
provider_location3 = {'classname': 'Symm_StorageVolume',
'keybindings': keybindings3}
instancename3 = self.driver.utils.get_instance_name(
provider_location3['classname'],
keybindings3)
# Error other than not found.
arg = 9999, "test_error"
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.common.utils.process_exception_args,
arg, instancename3)
# Bug 1393555 - masking view has been deleted by another process.
def test_find_maskingview(self):
conn = self.fake_ecom_connection()
foundMaskingViewInstanceName = (
self.driver.common.masking._find_masking_view(
conn, self.data.lunmaskctrl_name, self.data.storage_system))
# The masking view has been found.
self.assertEqual(
self.data.lunmaskctrl_name,
conn.GetInstance(foundMaskingViewInstanceName)['ElementName'])
self.driver.common.masking.utils.get_existing_instance = mock.Mock(
return_value=None)
foundMaskingViewInstanceName2 = (
self.driver.common.masking._find_masking_view(
conn, self.data.lunmaskctrl_name, self.data.storage_system))
# The masking view has not been found.
self.assertIsNone(foundMaskingViewInstanceName2)
# Bug 1393555 - port group has been deleted by another process.
def test_find_portgroup(self):
conn = self.fake_ecom_connection()
controllerConfigService = (
self.driver.utils.find_controller_configuration_service(
conn, self.data.storage_system))
foundPortGroupInstanceName = (
self.driver.common.masking._find_port_group(
conn, controllerConfigService, self.data.port_group))
# The port group has been found.
self.assertEqual(
self.data.port_group,
conn.GetInstance(foundPortGroupInstanceName)['ElementName'])
self.driver.common.masking.utils.get_existing_instance = mock.Mock(
return_value=None)
foundPortGroupInstanceName2 = (
self.driver.common.masking._find_port_group(
conn, controllerConfigService, self.data.port_group))
# The port group has not been found as it has been deleted
# externally or by another thread.
self.assertIsNone(foundPortGroupInstanceName2)
# Bug 1393555 - storage group has been deleted by another process.
def test_get_storage_group_from_masking_view(self):
conn = self.fake_ecom_connection()
foundStorageGroupInstanceName = (
self.driver.common.masking._get_storage_group_from_masking_view(
conn, self.data.lunmaskctrl_name, self.data.storage_system))
# The storage group has been found.
self.assertEqual(
self.data.storagegroupname,
conn.GetInstance(foundStorageGroupInstanceName)['ElementName'])
self.driver.common.masking.utils.get_existing_instance = mock.Mock(
return_value=None)
foundStorageGroupInstanceName2 = (
self.driver.common.masking._get_storage_group_from_masking_view(
conn, self.data.lunmaskctrl_name, self.data.storage_system))
# The storage group has not been found as it has been deleted
# externally or by another thread.
self.assertIsNone(foundStorageGroupInstanceName2)
# Bug 1393555 - initiator group has been deleted by another process.
def test_get_initiator_group_from_masking_view(self):
conn = self.fake_ecom_connection()
foundInitiatorGroupInstanceName = (
self.driver.common.masking._get_initiator_group_from_masking_view(
conn, self.data.lunmaskctrl_name, self.data.storage_system))
# The initiator group has been found.
self.assertEqual(
self.data.initiatorgroup_name,
conn.GetInstance(foundInitiatorGroupInstanceName)['ElementName'])
self.driver.common.masking.utils.get_existing_instance = mock.Mock(
return_value=None)
foundInitiatorGroupInstanceName2 = (
self.driver.common.masking._get_storage_group_from_masking_view(
conn, self.data.lunmaskctrl_name, self.data.storage_system))
# The initiator group has not been found as it has been deleted
# externally or by another thread.
self.assertIsNone(foundInitiatorGroupInstanceName2)
# Bug 1393555 - port group has been deleted by another process.
def test_get_port_group_from_masking_view(self):
conn = self.fake_ecom_connection()
foundPortGroupInstanceName = (
self.driver.common.masking._get_port_group_from_masking_view(
conn, self.data.lunmaskctrl_name, self.data.storage_system))
# The port group has been found.
self.assertEqual(
self.data.port_group,
conn.GetInstance(foundPortGroupInstanceName)['ElementName'])
self.driver.common.masking.utils.get_existing_instance = mock.Mock(
return_value=None)
foundPortGroupInstanceName2 = (
self.driver.common.masking._get_port_group_from_masking_view(
conn, self.data.lunmaskctrl_name, self.data.storage_system))
# The port group has not been found as it has been deleted
# externally or by another thread.
self.assertIsNone(foundPortGroupInstanceName2)
# Bug 1393555 - initiator group has been deleted by another process.
def test_find_initiator_group(self):
conn = self.fake_ecom_connection()
controllerConfigService = (
self.driver.utils.find_controller_configuration_service(
conn, self.data.storage_system))
foundInitiatorGroupInstanceName = (
self.driver.common.masking._find_initiator_masking_group(
conn, controllerConfigService, self.data.initiatorNames))
# The initiator group has been found.
self.assertEqual(
self.data.initiatorgroup_name,
conn.GetInstance(foundInitiatorGroupInstanceName)['ElementName'])
self.driver.common.masking.utils.get_existing_instance = mock.Mock(
return_value=None)
foundInitiatorGroupInstanceName2 = (
self.driver.common.masking._find_initiator_masking_group(
conn, controllerConfigService, self.data.initiatorNames))
# The initiator group has not been found as it has been deleted
# externally or by another thread.
self.assertIsNone(foundInitiatorGroupInstanceName2)
# Bug 1393555 - hardware id has been deleted by another process.
def test_get_storage_hardware_id_instance_names(self):
conn = self.fake_ecom_connection()
foundHardwareIdInstanceNames = (
self.driver.common.masking._get_storage_hardware_id_instance_names(
conn, self.data.initiatorNames, self.data.storage_system))
# The hardware id list has been found.
self.assertEqual(
'123456789012345',
conn.GetInstance(
foundHardwareIdInstanceNames[0])['StorageID'])
self.driver.common.masking.utils.get_existing_instance = mock.Mock(
return_value=None)
foundHardwareIdInstanceNames2 = (
self.driver.common.masking._get_storage_hardware_id_instance_names(
conn, self.data.initiatorNames, self.data.storage_system))
# The hardware id list has not been found as it has been removed
# externally.
self.assertTrue(len(foundHardwareIdInstanceNames2) == 0)
# Bug 1393555 - controller has been deleted by another process.
def test_find_lunmasking_scsi_protocol_controller(self):
self.driver.common.conn = self.fake_ecom_connection()
foundControllerInstanceName = (
self.driver.common._find_lunmasking_scsi_protocol_controller(
self.data.storage_system, self.data.connector))
# The controller has been found.
self.assertEqual(
'OS-fakehost-gold-MV',
self.driver.common.conn.GetInstance(
foundControllerInstanceName)['ElementName'])
self.driver.common.utils.get_existing_instance = mock.Mock(
return_value=None)
foundControllerInstanceName2 = (
self.driver.common._find_lunmasking_scsi_protocol_controller(
self.data.storage_system, self.data.connector))
# The controller has not been found as it has been removed
# externally.
self.assertIsNone(foundControllerInstanceName2)
# Bug 1393555 - storage group has been deleted by another process.
def test_get_policy_default_storage_group(self):
conn = self.fake_ecom_connection()
controllerConfigService = (
self.driver.utils.find_controller_configuration_service(
conn, self.data.storage_system))
foundStorageMaskingGroupInstanceName = (
self.driver.common.fast.get_policy_default_storage_group(
conn, controllerConfigService, 'OS_default'))
# The storage group has been found.
self.assertEqual(
'OS_default_GOLD1_SG',
conn.GetInstance(
foundStorageMaskingGroupInstanceName)['ElementName'])
self.driver.common.fast.utils.get_existing_instance = mock.Mock(
return_value=None)
foundStorageMaskingGroupInstanceName2 = (
self.driver.common.fast.get_policy_default_storage_group(
conn, controllerConfigService, 'OS_default'))
# The storage group has not been found as it has been removed
# externally.
self.assertIsNone(foundStorageMaskingGroupInstanceName2)
# Bug 1393555 - policy has been deleted by another process.
def test_get_capacities_associated_to_policy(self):
conn = self.fake_ecom_connection()
total_capacity_gb, free_capacity_gb = (
self.driver.common.fast.get_capacities_associated_to_policy(
conn, self.data.storage_system, self.data.policyrule))
# The capacities associated to the policy have been found.
self.assertEqual(self.data.totalmanagedspace_gbs, total_capacity_gb)
self.assertEqual(self.data.subscribedcapacity_gbs, free_capacity_gb)
self.driver.common.fast.utils.get_existing_instance = mock.Mock(
return_value=None)
total_capacity_gb_2, free_capacity_gb_2 = (
self.driver.common.fast.get_capacities_associated_to_policy(
conn, self.data.storage_system, self.data.policyrule))
# The capacities have not been found as the policy has been
# removed externally.
self.assertEqual(0, total_capacity_gb_2)
self.assertEqual(0, free_capacity_gb_2)
# Bug 1393555 - storage group has been deleted by another process.
def test_find_storage_masking_group(self):
conn = self.fake_ecom_connection()
controllerConfigService = (
self.driver.utils.find_controller_configuration_service(
conn, self.data.storage_system))
foundStorageMaskingGroupInstanceName = (
self.driver.common.utils.find_storage_masking_group(
conn, controllerConfigService, self.data.storagegroupname))
# The storage group has been found.
self.assertEqual(
self.data.storagegroupname,
conn.GetInstance(
foundStorageMaskingGroupInstanceName)['ElementName'])
self.driver.common.utils.get_existing_instance = mock.Mock(
return_value=None)
foundStorageMaskingGroupInstanceName2 = (
self.driver.common.utils.find_storage_masking_group(
conn, controllerConfigService, self.data.storagegroupname))
# The storage group has not been found as it has been removed
# externally.
self.assertIsNone(foundStorageMaskingGroupInstanceName2)
# Bug 1393555 - pool has been deleted by another process.
def test_get_pool_by_name(self):
conn = self.fake_ecom_connection()
foundPoolInstanceName = self.driver.common.utils.get_pool_by_name(
conn, self.data.poolname, self.data.storage_system)
# The pool has been found.
self.assertEqual(
self.data.poolname,
conn.GetInstance(foundPoolInstanceName)['ElementName'])
self.driver.common.utils.get_existing_instance = mock.Mock(
return_value=None)
foundPoolInstanceName2 = self.driver.common.utils.get_pool_by_name(
conn, self.data.poolname, self.data.storage_system)
# The pool has not been found as it has been removed externally.
self.assertIsNone(foundPoolInstanceName2)
def test_get_volume_stats_1364232(self):
self.create_fake_config_file_1364232()
self.assertEqual('000198700439',
self.driver.utils.parse_array_name_from_file(
self.config_file_1364232))
self.assertEqual('FC_SLVR1',
self.driver.utils.parse_pool_name_from_file(
self.config_file_1364232))
self.assertEqual('SILVER1',
self.driver.utils.parse_fast_policy_name_from_file(
self.config_file_1364232))
self.assertIn('OS-PORTGROUP',
self.driver.utils.parse_file_to_get_port_group_name(
self.config_file_1364232))
bExists = os.path.exists(self.config_file_1364232)
if bExists:
os.remove(self.config_file_1364232)
@mock.patch.object(
EMCVMAXUtils,
'find_storageSystem',
return_value=None)
@mock.patch.object(
EMCVMAXFast,
'is_tiering_policy_enabled',
return_value=False)
@mock.patch.object(
EMCVMAXUtils,
'get_pool_capacities',
return_value=(1234, 1200))
@mock.patch.object(
EMCVMAXUtils,
'parse_array_name_from_file',
return_value="123456789")
def test_get_volume_stats_no_fast(self, mock_storage_system,
mock_is_fast_enabled,
mock_capacity, mock_array):
self.driver.get_volume_stats(True)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSINoFAST'})
@mock.patch.object(
EMCVMAXCommon,
'_get_pool_and_storage_system',
return_value=(None, EMCVMAXCommonData.storage_system))
def test_create_volume_no_fast_success(
self, _mock_volume_type, mock_storage_system):
self.driver.create_volume(self.data.test_volume_v2)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'storagetype: stripedmetacount': '4',
'volume_backend_name': 'ISCSINoFAST'})
@mock.patch.object(
EMCVMAXCommon,
'_get_pool_and_storage_system',
return_value=(None, EMCVMAXCommonData.storage_system))
def test_create_volume_no_fast_striped_success(
self, _mock_volume_type, mock_storage_system):
self.driver.create_volume(self.data.test_volume_v2)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSINoFAST'})
@mock.patch.object(
EMCVMAXCommon,
'_get_pool_and_storage_system',
return_value=(None, EMCVMAXCommonData.storage_system))
def test_delete_volume_no_fast_success(
self, _mock_volume_type, mock_storage_system):
self.driver.delete_volume(self.data.test_volume)
def test_create_volume_no_fast_failed(self):
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume,
self.data.test_failed_volume)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSINoFAST'})
def test_delete_volume_no_fast_notfound(self, _mock_volume_type):
notfound_delete_vol = {}
notfound_delete_vol['name'] = 'notfound_delete_vol'
notfound_delete_vol['id'] = '10'
notfound_delete_vol['CreationClassName'] = 'Symmm_StorageVolume'
notfound_delete_vol['SystemName'] = self.data.storage_system
notfound_delete_vol['DeviceID'] = notfound_delete_vol['id']
notfound_delete_vol['SystemCreationClassName'] = 'Symm_StorageSystem'
notfound_delete_vol['volume_type_id'] = 'abc'
notfound_delete_vol['provider_location'] = None
name = {}
name['classname'] = 'Symm_StorageVolume'
keys = {}
keys['CreationClassName'] = notfound_delete_vol['CreationClassName']
keys['SystemName'] = notfound_delete_vol['SystemName']
keys['DeviceID'] = notfound_delete_vol['DeviceID']
keys['SystemCreationClassName'] =\
notfound_delete_vol['SystemCreationClassName']
name['keybindings'] = keys
self.driver.delete_volume(notfound_delete_vol)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSINoFAST'})
@mock.patch.object(
EMCVMAXCommon,
'_get_pool_and_storage_system',
return_value=(None, EMCVMAXCommonData.storage_system))
def test_delete_volume_failed(
self, _mock_volume_type, mock_storage_system):
self.driver.create_volume(self.data.failed_delete_vol)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.delete_volume,
self.data.failed_delete_vol)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSINoFAST'})
@mock.patch.object(
EMCVMAXMasking,
'_wrap_get_storage_group_from_volume',
return_value=None)
@mock.patch.object(
EMCVMAXCommon,
'_wrap_find_device_number',
return_value={'storagesystem': EMCVMAXCommonData.storage_system})
@mock.patch.object(
EMCVMAXUtils,
'find_storage_masking_group',
return_value='value')
def test_map_new_masking_view_no_fast_success(self, _mock_volume_type,
mock_wrap_group,
mock_wrap_device,
mock_storage_group):
self.driver.initialize_connection(self.data.test_volume,
self.data.connector)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSINoFAST'})
@mock.patch.object(
EMCVMAXMasking,
'_wrap_get_storage_group_from_volume',
return_value=None)
@mock.patch.object(
EMCVMAXCommon,
'_wrap_find_device_number',
return_value={'hostlunid': 1,
'storagesystem': EMCVMAXCommonData.storage_system})
@mock.patch.object(
EMCVMAXUtils,
'find_storage_masking_group',
return_value='value')
@mock.patch.object(
EMCVMAXCommon,
'_is_same_host',
return_value=False)
def test_map_live_migration_no_fast_success(self, _mock_volume_type,
mock_wrap_group,
mock_wrap_device,
mock_storage_group,
mock_same_host):
self.driver.initialize_connection(self.data.test_volume,
self.data.connector)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSINoFAST'})
@mock.patch.object(
EMCVMAXMasking,
'_wrap_get_storage_group_from_volume',
return_value=None)
@mock.patch.object(
EMCVMAXCommon,
'_wrap_find_device_number',
return_value={'hostlunid': 1,
'storagesystem': EMCVMAXCommonData.storage_system})
def test_already_mapped_no_fast_success(self, _mock_volume_type,
mock_wrap_group,
mock_wrap_device):
self.driver.initialize_connection(self.data.test_volume,
self.data.connector)
@mock.patch.object(
EMCVMAXMasking,
'_wrap_get_storage_group_from_volume',
return_value=None)
@mock.patch.object(
EMCVMAXCommon,
'_wrap_find_device_number',
return_value={'storagesystem': EMCVMAXCommonData.storage_system})
def test_map_no_fast_failed(self, mock_wrap_group, mock_wrap_device):
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
self.data.test_volume,
self.data.connector)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSINoFAST'})
@mock.patch.object(
EMCVMAXUtils,
'find_storage_masking_group',
return_value=EMCVMAXCommonData.storagegroupname)
def test_detach_no_fast_success(self, mock_volume_type,
mock_storage_group):
self.driver.terminate_connection(
self.data.test_volume, self.data.connector)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSINoFAST'})
@mock.patch.object(
EMCVMAXUtils, 'find_storage_system',
return_value={'Name': EMCVMAXCommonData.storage_system})
@mock.patch.object(
EMCVMAXUtils,
'find_storage_masking_group',
return_value=EMCVMAXCommonData.storagegroupname)
def test_detach_no_fast_last_volume_success(
self, mock_volume_type,
mock_storage_system, mock_storage_group):
self.driver.terminate_connection(
self.data.test_volume, self.data.connector)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSINoFAST'})
@mock.patch.object(
EMCVMAXUtils,
'get_volume_size',
return_value='2147483648')
def test_extend_volume_no_fast_success(
self, _mock_volume_type, mock_volume_size):
newSize = '2'
self.driver.extend_volume(self.data.test_volume, newSize)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'storagetype: stripedmetacount': '4',
'volume_backend_name': 'ISCSINoFAST'})
@mock.patch.object(
EMCVMAXUtils,
'check_if_volume_is_extendable',
return_value='False')
def test_extend_volume_striped_no_fast_failed(
self, _mock_volume_type, _mock_is_extendable):
newSize = '2'
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.extend_volume,
self.data.test_volume,
newSize)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSINoFAST'})
@mock.patch.object(
FakeDB,
'volume_get',
return_value=EMCVMAXCommonData.test_source_volume)
@mock.patch.object(
EMCVMAXUtils,
'get_volume_meta_head',
return_value=[EMCVMAXCommonData.test_volume])
@mock.patch.object(
EMCVMAXUtils,
'get_meta_members_capacity_in_bit',
return_value=[1234567, 7654321])
@mock.patch.object(
EMCVMAXCommon,
'_get_pool_and_storage_system',
return_value=(None, EMCVMAXCommonData.storage_system))
def test_create_snapshot_different_sizes_meta_no_fast_success(
self, mock_volume_type, mock_volume,
mock_meta, mock_size, mock_pool):
self.data.test_volume['volume_name'] = "vmax-1234567"
common = self.driver.common
volumeDict = {'classname': u'Symm_StorageVolume',
'keybindings': EMCVMAXCommonData.keybindings}
common.provision.create_volume_from_pool = (
mock.Mock(return_value=(volumeDict, 0L)))
common.provision.get_volume_dict_from_job = (
mock.Mock(return_value=volumeDict))
self.driver.create_snapshot(self.data.test_volume)
def test_create_snapshot_no_fast_failed(self):
self.data.test_volume['volume_name'] = "vmax-1234567"
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_snapshot,
self.data.test_volume)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSINoFAST'})
@mock.patch.object(
EMCVMAXCommon,
'_find_storage_sync_sv_sv',
return_value=(None, None))
@mock.patch.object(
EMCVMAXUtils,
'get_volume_meta_head',
return_value=[EMCVMAXCommonData.test_volume])
@mock.patch.object(
EMCVMAXUtils,
'get_meta_members_capacity_in_bit',
return_value=[1234567])
def test_create_volume_from_same_size_meta_snapshot(
self, mock_volume_type, mock_sync_sv, mock_meta, mock_size):
self.data.test_volume['volume_name'] = "vmax-1234567"
self.driver.create_volume_from_snapshot(
self.data.test_volume, self.data.test_volume)
def test_create_volume_from_snapshot_no_fast_failed(self):
self.data.test_volume['volume_name'] = "vmax-1234567"
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume_from_snapshot,
self.data.test_volume,
EMCVMAXCommonData.test_source_volume)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSINoFAST'})
@mock.patch.object(
FakeDB,
'volume_get',
return_value=EMCVMAXCommonData.test_source_volume)
@mock.patch.object(
EMCVMAXCommon,
'_find_storage_sync_sv_sv',
return_value=(None, None))
@mock.patch.object(
EMCVMAXUtils,
'get_volume_meta_head',
return_value=None)
def test_create_clone_simple_volume_no_fast_success(
self, mock_volume_type, mock_volume, mock_sync_sv,
mock_simple_volume):
self.data.test_volume['volume_name'] = "vmax-1234567"
self.driver.create_cloned_volume(self.data.test_volume,
EMCVMAXCommonData.test_source_volume)
def test_create_clone_no_fast_failed(self):
self.data.test_volume['volume_name'] = "vmax-1234567"
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_cloned_volume,
self.data.test_volume,
EMCVMAXCommonData.test_source_volume)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSINoFAST'})
def test_migrate_volume_no_fast_success(self, _mock_volume_type):
self.driver.migrate_volume(self.data.test_ctxt, self.data.test_volume,
self.data.test_host)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSINoFAST'})
@mock.patch.object(
EMCVMAXUtils,
'parse_pool_instance_id',
return_value=('silver', 'SYMMETRIX+000195900551'))
def test_retype_volume_no_fast_success(
self, _mock_volume_type, mock_values):
self.driver.retype(
self.data.test_ctxt, self.data.test_volume, self.data.new_type,
self.data.diff, self.data.test_host)
def test_check_for_setup_error(self):
self.driver.configuration.iscsi_ip_address = '1.1.1.1'
self.driver.check_for_setup_error()
self.driver.configuration.iscsi_ip_address = None
self.assertRaises(exception.InvalidInput,
self.driver.check_for_setup_error)
def _cleanup(self):
bExists = os.path.exists(self.config_file_path)
if bExists:
os.remove(self.config_file_path)
shutil.rmtree(self.tempdir)
class EMCVMAXISCSIDriverFastTestCase(test.TestCase):
def setUp(self):
self.data = EMCVMAXCommonData()
self.tempdir = tempfile.mkdtemp()
super(EMCVMAXISCSIDriverFastTestCase, self).setUp()
self.config_file_path = None
self.create_fake_config_file_fast()
self.addCleanup(self._cleanup)
configuration = mock.Mock()
configuration.cinder_emc_config_file = self.config_file_path
configuration.safe_get.return_value = 'ISCSIFAST'
configuration.config_group = 'ISCSIFAST'
self.stubs.Set(EMCVMAXISCSIDriver, 'smis_do_iscsi_discovery',
self.fake_do_iscsi_discovery)
self.stubs.Set(EMCVMAXCommon, '_get_ecom_connection',
self.fake_ecom_connection)
instancename = FakeCIMInstanceName()
self.stubs.Set(EMCVMAXUtils, 'get_instance_name',
instancename.fake_getinstancename)
self.stubs.Set(time, 'sleep',
self.fake_sleep)
driver = EMCVMAXISCSIDriver(configuration=configuration)
driver.db = FakeDB()
self.driver = driver
def create_fake_config_file_fast(self):
doc = Document()
emc = doc.createElement("EMC")
doc.appendChild(emc)
array = doc.createElement("Array")
arraytext = doc.createTextNode("1234567891011")
emc.appendChild(array)
array.appendChild(arraytext)
fastPolicy = doc.createElement("FastPolicy")
fastPolicyText = doc.createTextNode("GOLD1")
emc.appendChild(fastPolicy)
fastPolicy.appendChild(fastPolicyText)
ecomserverip = doc.createElement("EcomServerIp")
ecomserveriptext = doc.createTextNode("1.1.1.1")
emc.appendChild(ecomserverip)
ecomserverip.appendChild(ecomserveriptext)
ecomserverport = doc.createElement("EcomServerPort")
ecomserverporttext = doc.createTextNode("10")
emc.appendChild(ecomserverport)
ecomserverport.appendChild(ecomserverporttext)
ecomusername = doc.createElement("EcomUserName")
ecomusernametext = doc.createTextNode("user")
emc.appendChild(ecomusername)
ecomusername.appendChild(ecomusernametext)
ecompassword = doc.createElement("EcomPassword")
ecompasswordtext = doc.createTextNode("pass")
emc.appendChild(ecompassword)
ecompassword.appendChild(ecompasswordtext)
timeout = doc.createElement("Timeout")
timeouttext = doc.createTextNode("0")
emc.appendChild(timeout)
timeout.appendChild(timeouttext)
portgroup = doc.createElement("PortGroup")
portgrouptext = doc.createTextNode("myPortGroup")
portgroup.appendChild(portgrouptext)
pool = doc.createElement("Pool")
pooltext = doc.createTextNode("gold")
emc.appendChild(pool)
pool.appendChild(pooltext)
array = doc.createElement("Array")
arraytext = doc.createTextNode("0123456789")
emc.appendChild(array)
array.appendChild(arraytext)
portgroups = doc.createElement("PortGroups")
portgroups.appendChild(portgroup)
emc.appendChild(portgroups)
filename = 'cinder_emc_config_ISCSIFAST.xml'
self.config_file_path = self.tempdir + '/' + filename
f = open(self.config_file_path, 'w')
doc.writexml(f)
f.close()
def fake_ecom_connection(self):
conn = FakeEcomConnection()
return conn
def fake_do_iscsi_discovery(self, volume):
output = []
item = '10.10.0.50: 3260,1 iqn.1992-04.com.emc: 50000973f006dd80'
output.append(item)
return output
def fake_sleep(self, seconds):
return
@mock.patch.object(
EMCVMAXUtils,
'find_storageSystem',
return_value=None)
@mock.patch.object(
EMCVMAXFast,
'is_tiering_policy_enabled',
return_value=True)
@mock.patch.object(
EMCVMAXFast,
'get_tier_policy_by_name',
return_value=None)
@mock.patch.object(
EMCVMAXFast,
'get_capacities_associated_to_policy',
return_value=(1234, 1200))
@mock.patch.object(
EMCVMAXUtils,
'parse_array_name_from_file',
return_value="123456789")
def test_get_volume_stats_fast(self, mock_storage_system,
mock_is_fast_enabled,
mock_get_policy, mock_capacity, mock_array):
self.driver.get_volume_stats(True)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSIFAST'})
@mock.patch.object(
EMCVMAXCommon,
'_get_pool_and_storage_system',
return_value=(None, EMCVMAXCommonData.storage_system))
@mock.patch.object(
EMCVMAXFast,
'get_pool_associated_to_policy',
return_value=1)
def test_create_volume_fast_success(
self, _mock_volume_type, mock_storage_system, mock_pool_policy):
self.driver.create_volume(self.data.test_volume_v2)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'storagetype: stripedmetacount': '4',
'volume_backend_name': 'ISCSIFAST'})
@mock.patch.object(
EMCVMAXCommon,
'_get_pool_and_storage_system',
return_value=(None, EMCVMAXCommonData.storage_system))
@mock.patch.object(
EMCVMAXFast,
'get_pool_associated_to_policy',
return_value=1)
def test_create_volume_fast_striped_success(
self, _mock_volume_type, mock_storage_system, mock_pool_policy):
self.driver.create_volume(self.data.test_volume_v2)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSIFAST'})
@mock.patch.object(
EMCVMAXMasking,
'_wrap_get_storage_group_from_volume',
return_value=None)
def test_delete_volume_fast_success(
self, _mock_volume_type, mock_storage_group):
self.driver.delete_volume(self.data.test_volume)
def test_create_volume_fast_failed(self):
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume,
self.data.test_failed_volume)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSIFAST'})
@mock.patch.object(
EMCVMAXMasking,
'_wrap_get_storage_group_from_volume',
return_value=None)
def test_delete_volume_fast_notfound(
self, _mock_volume_type, mock_wrapper):
notfound_delete_vol = {}
notfound_delete_vol['name'] = 'notfound_delete_vol'
notfound_delete_vol['id'] = '10'
notfound_delete_vol['CreationClassName'] = 'Symmm_StorageVolume'
notfound_delete_vol['SystemName'] = self.data.storage_system
notfound_delete_vol['DeviceID'] = notfound_delete_vol['id']
notfound_delete_vol['SystemCreationClassName'] = 'Symm_StorageSystem'
name = {}
name['classname'] = 'Symm_StorageVolume'
keys = {}
keys['CreationClassName'] = notfound_delete_vol['CreationClassName']
keys['SystemName'] = notfound_delete_vol['SystemName']
keys['DeviceID'] = notfound_delete_vol['DeviceID']
keys['SystemCreationClassName'] =\
notfound_delete_vol['SystemCreationClassName']
name['keybindings'] = keys
notfound_delete_vol['volume_type_id'] = 'abc'
notfound_delete_vol['provider_location'] = None
self.driver.delete_volume(notfound_delete_vol)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSIFAST'})
@mock.patch.object(
EMCVMAXMasking,
'_wrap_get_storage_group_from_volume',
return_value=None)
@mock.patch.object(
EMCVMAXCommon,
'_get_pool_and_storage_system',
return_value=(None, EMCVMAXCommonData.storage_system))
@mock.patch.object(
EMCVMAXFast,
'get_pool_associated_to_policy',
return_value=1)
def test_delete_volume_fast_failed(
self, _mock_volume_type, _mock_storage_group,
mock_storage_system, mock_policy_pool):
self.driver.create_volume(self.data.failed_delete_vol)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.delete_volume,
self.data.failed_delete_vol)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSIFAST'})
@mock.patch.object(
EMCVMAXMasking,
'_wrap_get_storage_group_from_volume',
return_value=None)
@mock.patch.object(
EMCVMAXCommon,
'_wrap_find_device_number',
return_value={'hostlunid': 1,
'storagesystem': EMCVMAXCommonData.storage_system})
def test_map_fast_success(self, _mock_volume_type, mock_wrap_group,
mock_wrap_device):
self.driver.initialize_connection(self.data.test_volume,
self.data.connector)
@mock.patch.object(
EMCVMAXMasking,
'_wrap_get_storage_group_from_volume',
return_value=None)
@mock.patch.object(
EMCVMAXCommon,
'_wrap_find_device_number',
return_value={'storagesystem': EMCVMAXCommonData.storage_system})
def test_map_fast_failed(self, mock_wrap_group, mock_wrap_device):
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
self.data.test_volume,
self.data.connector)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSIFAST'})
@mock.patch.object(
EMCVMAXUtils,
'find_storage_masking_group',
return_value=EMCVMAXCommonData.storagegroupname)
def test_detach_fast_success(self, mock_volume_type,
mock_storage_group):
self.driver.terminate_connection(
self.data.test_volume, self.data.connector)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSIFAST'})
@mock.patch.object(
EMCVMAXUtils, 'find_storage_system',
return_value={'Name': EMCVMAXCommonData.storage_system})
@mock.patch.object(
EMCVMAXUtils,
'find_storage_masking_group',
return_value=EMCVMAXCommonData.storagegroupname)
def test_detach_fast_last_volume_success(
self, mock_volume_type,
mock_storage_system, mock_storage_group):
self.driver.terminate_connection(
self.data.test_volume, self.data.connector)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSIFAST'})
@mock.patch.object(
EMCVMAXUtils,
'get_volume_size',
return_value='2147483648')
def test_extend_volume_fast_success(
self, _mock_volume_type, mock_volume_size):
newSize = '2'
self.driver.extend_volume(self.data.test_volume, newSize)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSIFAST'})
@mock.patch.object(
EMCVMAXUtils,
'check_if_volume_is_extendable',
return_value='False')
def test_extend_volume_striped_fast_failed(
self, _mock_volume_type, _mock_is_extendable):
newSize = '2'
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.extend_volume,
self.data.test_volume,
newSize)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSIFAST'})
@mock.patch.object(
FakeDB,
'volume_get',
return_value=EMCVMAXCommonData.test_source_volume)
@mock.patch.object(
EMCVMAXFast,
'get_pool_associated_to_policy',
return_value=1)
@mock.patch.object(
EMCVMAXUtils,
'get_volume_meta_head',
return_value=[EMCVMAXCommonData.test_volume])
@mock.patch.object(
EMCVMAXUtils,
'get_meta_members_capacity_in_bit',
return_value=[1234567, 7654321])
@mock.patch.object(
EMCVMAXCommon,
'_get_pool_and_storage_system',
return_value=(None, EMCVMAXCommonData.storage_system))
def test_create_snapshot_different_sizes_meta_fast_success(
self, mock_volume_type, mock_volume, mock_meta,
mock_size, mock_pool, mock_policy):
self.data.test_volume['volume_name'] = "vmax-1234567"
common = self.driver.common
volumeDict = {'classname': u'Symm_StorageVolume',
'keybindings': EMCVMAXCommonData.keybindings}
common.provision.create_volume_from_pool = (
mock.Mock(return_value=(volumeDict, 0L)))
common.provision.get_volume_dict_from_job = (
mock.Mock(return_value=volumeDict))
common.fast.is_volume_in_default_SG = (
mock.Mock(return_value=True))
self.driver.create_snapshot(self.data.test_volume)
def test_create_snapshot_fast_failed(self):
self.data.test_volume['volume_name'] = "vmax-1234567"
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_snapshot,
self.data.test_volume)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSIFAST'})
@mock.patch.object(
EMCVMAXCommon,
'_find_storage_sync_sv_sv',
return_value=(None, None))
@mock.patch.object(
EMCVMAXUtils,
'get_volume_meta_head',
return_value=[EMCVMAXCommonData.test_volume])
@mock.patch.object(
EMCVMAXUtils,
'get_meta_members_capacity_in_bit',
return_value=[1234567])
def test_create_volume_from_same_size_meta_snapshot(
self, mock_volume_type, mock_sync_sv, mock_meta, mock_size):
self.data.test_volume['volume_name'] = "vmax-1234567"
self.driver.common.utils.find_storage_configuration_service = (
mock.Mock(return_value=EMCVMAXCommonData.storage_system))
self.driver.common._get_or_create_default_storage_group = (
mock.Mock(return_value=EMCVMAXCommonData.default_storage_group))
self.driver.common.fast.is_volume_in_default_SG = (
mock.Mock(return_value=True))
self.driver.create_volume_from_snapshot(
self.data.test_volume, self.data.test_volume)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSIFAST',
'FASTPOLICY': 'FC_GOLD1'})
@mock.patch.object(
EMCVMAXUtils,
'find_replication_service',
return_value=None)
@mock.patch.object(
EMCVMAXCommon,
'_find_storage_sync_sv_sv',
return_value=(None, None))
@mock.patch.object(
EMCVMAXUtils,
'get_volume_meta_head',
return_value=None)
def test_create_volume_from_snapshot_fast_failed(
self, mock_type, mock_rep_service, mock_sync_sv, mock_meta):
self.data.test_volume['volume_name'] = "vmax-1234567"
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume_from_snapshot,
self.data.test_volume,
EMCVMAXCommonData.test_source_volume)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSIFAST'})
@mock.patch.object(
FakeDB,
'volume_get',
return_value=EMCVMAXCommonData.test_source_volume)
@mock.patch.object(
EMCVMAXCommon,
'_find_storage_sync_sv_sv',
return_value=(None, None))
@mock.patch.object(
EMCVMAXUtils,
'get_volume_meta_head',
return_value=None)
def test_create_clone_simple_volume_fast_success(
self, mock_volume_type, mock_volume, mock_sync_sv,
mock_simple_volume):
self.data.test_volume['volume_name'] = "vmax-1234567"
self.driver.common.utils.find_storage_configuration_service = (
mock.Mock(return_value=EMCVMAXCommonData.storage_system))
self.driver.common._get_or_create_default_storage_group = (
mock.Mock(return_value=EMCVMAXCommonData.default_storage_group))
self.driver.common.fast.is_volume_in_default_SG = (
mock.Mock(return_value=True))
self.driver.create_cloned_volume(self.data.test_volume,
EMCVMAXCommonData.test_source_volume)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSIFAST'})
@mock.patch.object(
FakeDB,
'volume_get',
return_value=EMCVMAXCommonData.test_source_volume)
@mock.patch.object(
EMCVMAXFast,
'get_pool_associated_to_policy',
return_value=1)
@mock.patch.object(
EMCVMAXUtils,
'get_volume_meta_head',
return_value=[EMCVMAXCommonData.test_volume])
@mock.patch.object(
EMCVMAXUtils,
'get_meta_members_capacity_in_bit',
return_value=[1234567, 7654321])
@mock.patch.object(
EMCVMAXCommon,
'_get_pool_and_storage_system',
return_value=(None, EMCVMAXCommonData.storage_system))
def test_create_clone_fast_failed(
self, mock_volume_type, mock_vol, mock_policy, mock_meta,
mock_size, mock_pool):
self.data.test_volume['volume_name'] = "vmax-1234567"
self.driver.common._modify_and_get_composite_volume_instance = (
mock.Mock(return_value=(1L, None)))
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_cloned_volume,
self.data.test_volume,
EMCVMAXCommonData.test_source_volume)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSIFAST'})
def test_migrate_volume_fast_success(self, _mock_volume_type):
self.driver.migrate_volume(self.data.test_ctxt, self.data.test_volume,
self.data.test_host)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'ISCSIFAST'})
@mock.patch.object(
EMCVMAXUtils,
'parse_pool_instance_id',
return_value=('silver', 'SYMMETRIX+000195900551'))
@mock.patch.object(
EMCVMAXMasking,
'_wrap_get_storage_group_from_volume',
return_value=None)
def test_retype_volume_fast_success(
self, _mock_volume_type, mock_values, mock_wrap):
self.driver.retype(
self.data.test_ctxt, self.data.test_volume, self.data.new_type,
self.data.diff, self.data.test_host)
def _cleanup(self):
bExists = os.path.exists(self.config_file_path)
if bExists:
os.remove(self.config_file_path)
shutil.rmtree(self.tempdir)
class EMCVMAXFCDriverNoFastTestCase(test.TestCase):
def setUp(self):
self.data = EMCVMAXCommonData()
self.tempdir = tempfile.mkdtemp()
super(EMCVMAXFCDriverNoFastTestCase, self).setUp()
self.config_file_path = None
self.create_fake_config_file_no_fast()
self.addCleanup(self._cleanup)
configuration = mock.Mock()
configuration.cinder_emc_config_file = self.config_file_path
configuration.safe_get.return_value = 'FCNoFAST'
configuration.config_group = 'FCNoFAST'
self.stubs.Set(EMCVMAXCommon, '_get_ecom_connection',
self.fake_ecom_connection)
instancename = FakeCIMInstanceName()
self.stubs.Set(EMCVMAXUtils, 'get_instance_name',
instancename.fake_getinstancename)
self.stubs.Set(time, 'sleep',
self.fake_sleep)
driver = EMCVMAXFCDriver(configuration=configuration)
driver.db = FakeDB()
driver.common.conn = FakeEcomConnection()
driver.zonemanager_lookup_service = FakeLookupService()
self.driver = driver
def create_fake_config_file_no_fast(self):
doc = Document()
emc = doc.createElement("EMC")
doc.appendChild(emc)
array = doc.createElement("Array")
arraytext = doc.createTextNode("1234567891011")
emc.appendChild(array)
array.appendChild(arraytext)
ecomserverip = doc.createElement("EcomServerIp")
ecomserveriptext = doc.createTextNode("1.1.1.1")
emc.appendChild(ecomserverip)
ecomserverip.appendChild(ecomserveriptext)
ecomserverport = doc.createElement("EcomServerPort")
ecomserverporttext = doc.createTextNode("10")
emc.appendChild(ecomserverport)
ecomserverport.appendChild(ecomserverporttext)
ecomusername = doc.createElement("EcomUserName")
ecomusernametext = doc.createTextNode("user")
emc.appendChild(ecomusername)
ecomusername.appendChild(ecomusernametext)
ecompassword = doc.createElement("EcomPassword")
ecompasswordtext = doc.createTextNode("pass")
emc.appendChild(ecompassword)
ecompassword.appendChild(ecompasswordtext)
portgroup = doc.createElement("PortGroup")
portgrouptext = doc.createTextNode("myPortGroup")
portgroup.appendChild(portgrouptext)
portgroups = doc.createElement("PortGroups")
portgroups.appendChild(portgroup)
emc.appendChild(portgroups)
pool = doc.createElement("Pool")
pooltext = doc.createTextNode("gold")
emc.appendChild(pool)
pool.appendChild(pooltext)
array = doc.createElement("Array")
arraytext = doc.createTextNode("0123456789")
emc.appendChild(array)
array.appendChild(arraytext)
timeout = doc.createElement("Timeout")
timeouttext = doc.createTextNode("0")
emc.appendChild(timeout)
timeout.appendChild(timeouttext)
filename = 'cinder_emc_config_FCNoFAST.xml'
self.config_file_path = self.tempdir + '/' + filename
f = open(self.config_file_path, 'w')
doc.writexml(f)
f.close()
def fake_ecom_connection(self):
conn = FakeEcomConnection()
return conn
def fake_sleep(self, seconds):
return
@mock.patch.object(
EMCVMAXUtils,
'find_storageSystem',
return_value=None)
@mock.patch.object(
EMCVMAXFast,
'is_tiering_policy_enabled',
return_value=False)
@mock.patch.object(
EMCVMAXUtils,
'get_pool_capacities',
return_value=(1234, 1200))
@mock.patch.object(
EMCVMAXUtils,
'parse_array_name_from_file',
return_value="123456789")
def test_get_volume_stats_no_fast(self,
mock_storage_system,
mock_is_fast_enabled,
mock_capacity,
mock_array):
self.driver.get_volume_stats(True)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCNoFAST'})
@mock.patch.object(
EMCVMAXCommon,
'_get_pool_and_storage_system',
return_value=(None, EMCVMAXCommonData.storage_system))
def test_create_volume_no_fast_success(
self, _mock_volume_type, mock_storage_system):
self.driver.create_volume(self.data.test_volume_v2)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'storagetype: stripedmetacount': '4',
'volume_backend_name': 'FCNoFAST'})
@mock.patch.object(
EMCVMAXCommon,
'_get_pool_and_storage_system',
return_value=(None, EMCVMAXCommonData.storage_system))
def test_create_volume_no_fast_striped_success(
self, _mock_volume_type, mock_storage_system):
self.driver.create_volume(self.data.test_volume_v2)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCNoFAST'})
@mock.patch.object(
EMCVMAXCommon,
'_get_pool_and_storage_system',
return_value=(None, EMCVMAXCommonData.storage_system))
def test_delete_volume_no_fast_success(
self, _mock_volume_type, mock_storage_system):
self.driver.delete_volume(self.data.test_volume)
def test_create_volume_no_fast_failed(self):
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume,
self.data.test_failed_volume)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCNoFAST'})
def test_delete_volume_no_fast_notfound(self, _mock_volume_type):
notfound_delete_vol = {}
notfound_delete_vol['name'] = 'notfound_delete_vol'
notfound_delete_vol['id'] = '10'
notfound_delete_vol['CreationClassName'] = 'Symmm_StorageVolume'
notfound_delete_vol['SystemName'] = self.data.storage_system
notfound_delete_vol['DeviceID'] = notfound_delete_vol['id']
notfound_delete_vol['SystemCreationClassName'] = 'Symm_StorageSystem'
name = {}
name['classname'] = 'Symm_StorageVolume'
keys = {}
keys['CreationClassName'] = notfound_delete_vol['CreationClassName']
keys['SystemName'] = notfound_delete_vol['SystemName']
keys['DeviceID'] = notfound_delete_vol['DeviceID']
keys['SystemCreationClassName'] =\
notfound_delete_vol['SystemCreationClassName']
name['keybindings'] = keys
notfound_delete_vol['volume_type_id'] = 'abc'
notfound_delete_vol['provider_location'] = None
self.driver.delete_volume(notfound_delete_vol)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCNoFAST'})
@mock.patch.object(
EMCVMAXCommon,
'_get_pool_and_storage_system',
return_value=(None, EMCVMAXCommonData.storage_system))
def test_delete_volume_failed(
self, _mock_volume_type, mock_storage_system):
self.driver.create_volume(self.data.failed_delete_vol)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.delete_volume,
self.data.failed_delete_vol)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCNoFAST',
'FASTPOLICY': 'FC_GOLD1'})
@mock.patch.object(
EMCVMAXMasking,
'get_masking_view_from_storage_group',
return_value=EMCVMAXCommonData.lunmaskctrl_name)
def test_map_lookup_service_no_fast_success(
self, _mock_volume_type, mock_maskingview):
self.data.test_volume['volume_name'] = "vmax-1234567"
common = self.driver.common
common.get_target_wwns_from_masking_view = mock.Mock(
return_value=EMCVMAXCommonData.target_wwns)
lookup_service = self.driver.zonemanager_lookup_service
lookup_service.get_device_mapping_from_network = mock.Mock(
return_value=EMCVMAXCommonData.device_map)
data = self.driver.initialize_connection(self.data.test_volume,
self.data.connector)
common.get_target_wwns_from_masking_view.assert_called_once_with(
EMCVMAXCommonData.storage_system, self.data.test_volume,
EMCVMAXCommonData.connector)
lookup_service.get_device_mapping_from_network.assert_called_once_with(
EMCVMAXCommonData.connector['wwpns'],
EMCVMAXCommonData.target_wwns)
# Test the lookup service code path.
for init, target in data['data']['initiator_target_map'].items():
self.assertEqual(init, target[0][::-1])
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCNoFAST',
'FASTPOLICY': 'FC_GOLD1'})
@mock.patch.object(
EMCVMAXCommon,
'find_device_number',
return_value={'Name': "0001"})
def test_map_no_fast_failed(self, mock_wrap_group, mock_maskingview):
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
self.data.test_volume,
self.data.connector)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCNoFAST',
'FASTPOLICY': 'FC_GOLD1'})
@mock.patch.object(
EMCVMAXMasking,
'get_masking_view_by_volume',
return_value=EMCVMAXCommonData.lunmaskctrl_name)
def test_detach_no_fast_success(self, mock_volume_type, mock_maskingview):
self.driver.terminate_connection(self.data.test_volume,
self.data.connector)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCNoFAST'})
@mock.patch.object(
EMCVMAXMasking,
'get_masking_view_by_volume',
return_value=EMCVMAXCommonData.lunmaskctrl_name)
def test_detach_no_fast_last_volume_success(
self, mock_volume_type, mock_mv):
self.driver.terminate_connection(self.data.test_source_volume,
self.data.connector)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCNoFAST'})
@mock.patch.object(
EMCVMAXUtils,
'get_volume_size',
return_value='2147483648')
def test_extend_volume_no_fast_success(self, _mock_volume_type,
_mock_volume_size):
newSize = '2'
self.driver.extend_volume(self.data.test_volume, newSize)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCNoFAST'})
@mock.patch.object(
EMCVMAXUtils,
'check_if_volume_is_extendable',
return_value='False')
def test_extend_volume_striped_no_fast_failed(
self, _mock_volume_type, _mock_is_extendable):
newSize = '2'
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.extend_volume,
self.data.test_volume,
newSize)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCNoFAST'})
def test_migrate_volume_no_fast_success(self, _mock_volume_type):
self.driver.migrate_volume(self.data.test_ctxt, self.data.test_volume,
self.data.test_host)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCNoFAST'})
@mock.patch.object(
EMCVMAXUtils,
'parse_pool_instance_id',
return_value=('silver', 'SYMMETRIX+000195900551'))
def test_retype_volume_no_fast_success(
self, _mock_volume_type, mock_values):
self.driver.retype(
self.data.test_ctxt, self.data.test_volume, self.data.new_type,
self.data.diff, self.data.test_host)
def _cleanup(self):
bExists = os.path.exists(self.config_file_path)
if bExists:
os.remove(self.config_file_path)
shutil.rmtree(self.tempdir)
class EMCVMAXFCDriverFastTestCase(test.TestCase):
def setUp(self):
self.data = EMCVMAXCommonData()
self.tempdir = tempfile.mkdtemp()
super(EMCVMAXFCDriverFastTestCase, self).setUp()
self.config_file_path = None
self.create_fake_config_file_fast()
self.addCleanup(self._cleanup)
configuration = mock.Mock()
configuration.cinder_emc_config_file = self.config_file_path
configuration.safe_get.return_value = 'FCFAST'
configuration.config_group = 'FCFAST'
self.stubs.Set(EMCVMAXCommon, '_get_ecom_connection',
self.fake_ecom_connection)
instancename = FakeCIMInstanceName()
self.stubs.Set(EMCVMAXUtils, 'get_instance_name',
instancename.fake_getinstancename)
self.stubs.Set(time, 'sleep',
self.fake_sleep)
driver = EMCVMAXFCDriver(configuration=configuration)
driver.db = FakeDB()
driver.common.conn = FakeEcomConnection()
driver.zonemanager_lookup_service = None
self.driver = driver
def create_fake_config_file_fast(self):
doc = Document()
emc = doc.createElement("EMC")
doc.appendChild(emc)
fastPolicy = doc.createElement("FastPolicy")
fastPolicyText = doc.createTextNode("GOLD1")
emc.appendChild(fastPolicy)
fastPolicy.appendChild(fastPolicyText)
ecomserverip = doc.createElement("EcomServerIp")
ecomserveriptext = doc.createTextNode("1.1.1.1")
emc.appendChild(ecomserverip)
ecomserverip.appendChild(ecomserveriptext)
ecomserverport = doc.createElement("EcomServerPort")
ecomserverporttext = doc.createTextNode("10")
emc.appendChild(ecomserverport)
ecomserverport.appendChild(ecomserverporttext)
ecomusername = doc.createElement("EcomUserName")
ecomusernametext = doc.createTextNode("user")
emc.appendChild(ecomusername)
ecomusername.appendChild(ecomusernametext)
ecompassword = doc.createElement("EcomPassword")
ecompasswordtext = doc.createTextNode("pass")
emc.appendChild(ecompassword)
ecompassword.appendChild(ecompasswordtext)
portgroup = doc.createElement("PortGroup")
portgrouptext = doc.createTextNode("myPortGroup")
portgroup.appendChild(portgrouptext)
pool = doc.createElement("Pool")
pooltext = doc.createTextNode("gold")
emc.appendChild(pool)
pool.appendChild(pooltext)
array = doc.createElement("Array")
arraytext = doc.createTextNode("0123456789")
emc.appendChild(array)
array.appendChild(arraytext)
portgroups = doc.createElement("PortGroups")
portgroups.appendChild(portgroup)
emc.appendChild(portgroups)
timeout = doc.createElement("Timeout")
timeouttext = doc.createTextNode("0")
emc.appendChild(timeout)
timeout.appendChild(timeouttext)
filename = 'cinder_emc_config_FCFAST.xml'
self.config_file_path = self.tempdir + '/' + filename
f = open(self.config_file_path, 'w')
doc.writexml(f)
f.close()
def fake_ecom_connection(self):
conn = FakeEcomConnection()
return conn
def fake_sleep(self, seconds):
return
@mock.patch.object(
EMCVMAXUtils,
'find_storageSystem',
return_value=None)
@mock.patch.object(
EMCVMAXFast,
'is_tiering_policy_enabled',
return_value=True)
@mock.patch.object(
EMCVMAXFast,
'get_tier_policy_by_name',
return_value=None)
@mock.patch.object(
EMCVMAXFast,
'get_capacities_associated_to_policy',
return_value=(1234, 1200))
@mock.patch.object(
EMCVMAXUtils,
'parse_array_name_from_file',
return_value="123456789")
def test_get_volume_stats_fast(self,
mock_storage_system,
mock_is_fast_enabled,
mock_get_policy,
mock_capacity,
mock_array):
self.driver.get_volume_stats(True)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCFAST'})
@mock.patch.object(
EMCVMAXCommon,
'_get_pool_and_storage_system',
return_value=(None, EMCVMAXCommonData.storage_system))
@mock.patch.object(
EMCVMAXFast,
'get_pool_associated_to_policy',
return_value=1)
def test_create_volume_fast_success(
self, _mock_volume_type, mock_storage_system, mock_pool_policy):
self.driver.create_volume(self.data.test_volume_v2)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'storagetype: stripedmetacount': '4',
'volume_backend_name': 'FCFAST'})
@mock.patch.object(
EMCVMAXCommon,
'_get_pool_and_storage_system',
return_value=(None, EMCVMAXCommonData.storage_system))
@mock.patch.object(
EMCVMAXFast,
'get_pool_associated_to_policy',
return_value=1)
def test_create_volume_fast_striped_success(
self, _mock_volume_type, mock_storage_system, mock_pool_policy):
self.driver.create_volume(self.data.test_volume_v2)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCFAST'})
@mock.patch.object(
EMCVMAXMasking,
'_wrap_get_storage_group_from_volume',
return_value=None)
def test_delete_volume_fast_success(self, _mock_volume_type,
mock_storage_group):
self.driver.delete_volume(self.data.test_volume)
def test_create_volume_fast_failed(self):
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume,
self.data.test_failed_volume)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCFAST'})
def test_delete_volume_fast_notfound(self, _mock_volume_type):
"""We do not set the provider location.
"""
notfound_delete_vol = {}
notfound_delete_vol['name'] = 'notfound_delete_vol'
notfound_delete_vol['id'] = '10'
notfound_delete_vol['CreationClassName'] = 'Symmm_StorageVolume'
notfound_delete_vol['SystemName'] = self.data.storage_system
notfound_delete_vol['DeviceID'] = notfound_delete_vol['id']
notfound_delete_vol['SystemCreationClassName'] = 'Symm_StorageSystem'
name = {}
name['classname'] = 'Symm_StorageVolume'
keys = {}
keys['CreationClassName'] = notfound_delete_vol['CreationClassName']
keys['SystemName'] = notfound_delete_vol['SystemName']
keys['DeviceID'] = notfound_delete_vol['DeviceID']
keys['SystemCreationClassName'] =\
notfound_delete_vol['SystemCreationClassName']
name['keybindings'] = keys
notfound_delete_vol['volume_type_id'] = 'abc'
notfound_delete_vol['provider_location'] = None
self.driver.delete_volume(notfound_delete_vol)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCFAST'})
@mock.patch.object(
EMCVMAXMasking,
'_wrap_get_storage_group_from_volume',
return_value=None)
@mock.patch.object(
EMCVMAXCommon,
'_get_pool_and_storage_system',
return_value=(None, EMCVMAXCommonData.storage_system))
@mock.patch.object(
EMCVMAXFast,
'get_pool_associated_to_policy',
return_value=1)
def test_delete_volume_fast_failed(
self, _mock_volume_type, mock_wrapper,
mock_storage_system, mock_pool_policy):
self.driver.create_volume(self.data.failed_delete_vol)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.delete_volume,
self.data.failed_delete_vol)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCFAST',
'FASTPOLICY': 'FC_GOLD1'})
@mock.patch.object(
EMCVMAXMasking,
'get_masking_view_from_storage_group',
return_value=EMCVMAXCommonData.lunmaskctrl_name)
def test_map_fast_success(self, _mock_volume_type, mock_maskingview):
self.data.test_volume['volume_name'] = "vmax-1234567"
common = self.driver.common
common.get_target_wwns = mock.Mock(
return_value=EMCVMAXCommonData.target_wwns)
data = self.driver.initialize_connection(
self.data.test_volume, self.data.connector)
# Test the no lookup service, pre-zoned case.
common.get_target_wwns.assert_called_once_with(
EMCVMAXCommonData.storage_system, EMCVMAXCommonData.connector)
for init, target in data['data']['initiator_target_map'].items():
self.assertIn(init[::-1], target)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCFAST',
'FASTPOLICY': 'FC_GOLD1'})
@mock.patch.object(
EMCVMAXCommon,
'find_device_number',
return_value={'Name': "0001"})
def test_map_fast_failed(self, mock_wrap_group, mock_maskingview):
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
self.data.test_volume,
self.data.connector)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCFAST',
'FASTPOLICY': 'FC_GOLD1'})
@mock.patch.object(
EMCVMAXMasking,
'get_masking_view_by_volume',
return_value=EMCVMAXCommonData.lunmaskctrl_name)
def test_detach_fast_success(self, mock_volume_type, mock_maskingview):
common = self.driver.common
common.get_target_wwns = mock.Mock(
return_value=EMCVMAXCommonData.target_wwns)
data = self.driver.terminate_connection(self.data.test_volume,
self.data.connector)
common.get_target_wwns.assert_called_once_with(
EMCVMAXCommonData.storage_system, EMCVMAXCommonData.connector)
self.assertEqual(0, len(data['data']))
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCFAST'})
@mock.patch.object(
EMCVMAXUtils,
'get_volume_size',
return_value='2147483648')
def test_extend_volume_fast_success(self, _mock_volume_type,
_mock_volume_size):
newSize = '2'
self.driver.extend_volume(self.data.test_volume, newSize)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCFAST'})
@mock.patch.object(
EMCVMAXUtils,
'check_if_volume_is_extendable',
return_value='False')
def test_extend_volume_striped_fast_failed(self, _mock_volume_type,
_mock_is_extendable):
newSize = '2'
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.extend_volume,
self.data.test_volume,
newSize)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCFAST'})
@mock.patch.object(
FakeDB,
'volume_get',
return_value=EMCVMAXCommonData.test_source_volume)
@mock.patch.object(
EMCVMAXFast,
'get_pool_associated_to_policy',
return_value=1)
@mock.patch.object(
EMCVMAXUtils,
'get_volume_meta_head',
return_value=[EMCVMAXCommonData.test_volume])
@mock.patch.object(
EMCVMAXUtils,
'get_meta_members_capacity_in_bit',
return_value=[1234567, 7654321])
@mock.patch.object(
EMCVMAXCommon,
'_get_pool_and_storage_system',
return_value=(None, EMCVMAXCommonData.storage_system))
def test_create_snapshot_different_sizes_meta_fast_success(
self, mock_volume_type, mock_volume, mock_meta,
mock_size, mock_pool, mock_policy):
self.data.test_volume['volume_name'] = "vmax-1234567"
common = self.driver.common
volumeDict = {'classname': u'Symm_StorageVolume',
'keybindings': EMCVMAXCommonData.keybindings}
common.provision.create_volume_from_pool = (
mock.Mock(return_value=(volumeDict, 0L)))
common.provision.get_volume_dict_from_job = (
mock.Mock(return_value=volumeDict))
common.fast.is_volume_in_default_SG = (
mock.Mock(return_value=True))
self.driver.create_snapshot(self.data.test_volume)
def test_create_snapshot_fast_failed(self):
self.data.test_volume['volume_name'] = "vmax-1234567"
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_snapshot,
self.data.test_volume)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCFAST'})
@mock.patch.object(
FakeDB,
'volume_get',
return_value=EMCVMAXCommonData.test_source_volume)
@mock.patch.object(
EMCVMAXCommon,
'_find_storage_sync_sv_sv',
return_value=(None, None))
@mock.patch.object(
EMCVMAXUtils,
'get_volume_meta_head',
return_value=None)
def test_create_clone_simple_volume_fast_success(
self, mock_volume_type,
mock_volume, mock_sync_sv, mock_meta):
self.data.test_volume['volume_name'] = "vmax-1234567"
self.driver.common.utils.find_storage_configuration_service = (
mock.Mock(return_value=EMCVMAXCommonData.storage_system))
self.driver.common._get_or_create_default_storage_group = (
mock.Mock(return_value=EMCVMAXCommonData.default_storage_group))
self.driver.common.fast.is_volume_in_default_SG = (
mock.Mock(return_value=True))
self.driver.create_cloned_volume(
self.data.test_volume,
EMCVMAXCommonData.test_source_volume)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCFAST'})
@mock.patch.object(
FakeDB,
'volume_get',
return_value=EMCVMAXCommonData.test_source_volume)
@mock.patch.object(
EMCVMAXFast,
'get_pool_associated_to_policy',
return_value=1)
@mock.patch.object(
EMCVMAXUtils,
'get_volume_meta_head',
return_value=[EMCVMAXCommonData.test_volume])
@mock.patch.object(
EMCVMAXUtils,
'get_meta_members_capacity_in_bit',
return_value=[1234567, 7654321])
@mock.patch.object(
EMCVMAXCommon,
'_get_pool_and_storage_system',
return_value=(None, EMCVMAXCommonData.storage_system))
def test_create_clone_fast_failed(
self, mock_volume_type, mock_vol,
mock_policy, mock_meta, mock_size, mock_pool):
self.data.test_volume['volume_name'] = "vmax-1234567"
self.driver.common._modify_and_get_composite_volume_instance = (
mock.Mock(return_value=(1L, None)))
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_cloned_volume,
self.data.test_volume,
EMCVMAXCommonData.test_source_volume)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCFAST'})
def test_migrate_volume_fast_success(self, _mock_volume_type):
self.driver.migrate_volume(self.data.test_ctxt, self.data.test_volume,
self.data.test_host)
@mock.patch.object(
volume_types,
'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCFAST'})
@mock.patch.object(
EMCVMAXUtils,
'parse_pool_instance_id',
return_value=('silver', 'SYMMETRIX+000195900551'))
@mock.patch.object(
EMCVMAXMasking,
'_wrap_get_storage_group_from_volume',
return_value=None)
def test_retype_volume_fast_success(
self, _mock_volume_type, mock_values, mock_wrap):
self.driver.retype(
self.data.test_ctxt, self.data.test_volume, self.data.new_type,
self.data.diff, self.data.test_host)
def _cleanup(self):
bExists = os.path.exists(self.config_file_path)
if bExists:
os.remove(self.config_file_path)
shutil.rmtree(self.tempdir)
|
{
"content_hash": "31fca3ea78643a99fc7161caf23164e8",
"timestamp": "",
"source": "github",
"line_count": 3312,
"max_line_length": 79,
"avg_line_length": 38.98913043478261,
"alnum_prop": 0.6120481367902612,
"repo_name": "hguemar/cinder",
"id": "b4c3cb08fb9872b3580a93a5b2845428a89b01aa",
"size": "129780",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cinder/tests/test_emc_vmax.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "3322"
},
{
"name": "Python",
"bytes": "10010542"
},
{
"name": "Shell",
"bytes": "9917"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import os
from .. import helper
from .. import template_helper
class NetsecHandler(helper.RequestHandlerWithAuth):
def render(self, template, data):
TEMPLATE_PATH = os.path.join(self.application.config.module_path, "templates")
data['template_helper'] = template_helper
return super(NetsecHandler, self).render(
os.path.join(TEMPLATE_PATH, "%s.html" % template),
**data)
def render2string(self, template, data):
TEMPLATE_PATH = os.path.join(self.application.config.module_path, "templates")
data['template_helper'] = template_helper
return self.render_string(
os.path.join(TEMPLATE_PATH, "%s.html" % template),
**data)
@property
def db(self):
return self.application.db
|
{
"content_hash": "2adc0dee223e584c19c0d2798a5fc394",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 86,
"avg_line_length": 29.928571428571427,
"alnum_prop": 0.649164677804296,
"repo_name": "hhucn/netsec-uebungssystem",
"id": "2c9949d6fd5dba4fb2e0f57f87c184a2f01671a7",
"size": "838",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "netsecus/webhandler/NetsecHandler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "638"
},
{
"name": "HTML",
"bytes": "18299"
},
{
"name": "Makefile",
"bytes": "770"
},
{
"name": "Python",
"bytes": "92592"
},
{
"name": "Shell",
"bytes": "1180"
}
],
"symlink_target": ""
}
|
"""Implements `!p ` interpolation."""
import os
from collections import namedtuple
from UltiSnips import _vim
from UltiSnips.compatibility import as_unicode
from UltiSnips.indent_util import IndentUtil
from UltiSnips.text_objects._base import NoneditableTextObject
from UltiSnips.vim_state import _Placeholder
import UltiSnips.snippet_manager
class _Tabs(object):
"""Allows access to tabstop content via t[] inside of python code."""
def __init__(self, to):
self._to = to
def __getitem__(self, no):
ts = self._to._get_tabstop(
self._to,
int(no)) # pylint:disable=protected-access
if ts is None:
return ''
return ts.current_text
def __setitem__(self, no, value):
ts = self._to._get_tabstop(
self._to,
int(no)) # pylint:disable=protected-access
if ts is None:
return
ts.overwrite(value)
_VisualContent = namedtuple('_VisualContent', ['mode', 'text'])
class SnippetUtilForAction(dict):
def __init__(self, *args, **kwargs):
super(SnippetUtilForAction, self).__init__(*args, **kwargs)
self.__dict__ = self
def expand_anon(self, *args, **kwargs):
UltiSnips.snippet_manager.UltiSnips_Manager.expand_anon(
*args, **kwargs
)
self.cursor.preserve()
class SnippetUtilCursor(object):
def __init__(self, cursor):
self._cursor = [cursor[0] - 1, cursor[1]]
self._set = False
def preserve(self):
self._set = True
self._cursor = [
_vim.buf.cursor[0],
_vim.buf.cursor[1],
]
def is_set(self):
return self._set
def set(self, line, column):
self.__setitem__(0, line)
self.__setitem__(1, column)
def to_vim_cursor(self):
return (self._cursor[0] + 1, self._cursor[1])
def __getitem__(self, index):
return self._cursor[index]
def __setitem__(self, index, value):
self._set = True
self._cursor[index] = value
def __len__(self):
return 2
def __str__(self):
return str((self._cursor[0], self._cursor[1]))
class SnippetUtil(object):
"""Provides easy access to indentation, etc.
This is the 'snip' object in python code.
"""
def __init__(self, initial_indent, vmode, vtext, context, parent):
self._ind = IndentUtil()
self._visual = _VisualContent(vmode, vtext)
self._initial_indent = self._ind.indent_to_spaces(initial_indent)
self._reset('')
self._context = context
self._start = parent.start
self._end = parent.end
self._parent = parent
def _reset(self, cur):
"""Gets the snippet ready for another update.
:cur: the new value for c.
"""
self._ind.reset()
self._cur = cur
self._rv = ''
self._changed = False
self.reset_indent()
def shift(self, amount=1):
"""Shifts the indentation level. Note that this uses the shiftwidth
because thats what code formatters use.
:amount: the amount by which to shift.
"""
self.indent += ' ' * self._ind.shiftwidth * amount
def unshift(self, amount=1):
"""Unshift the indentation level. Note that this uses the shiftwidth
because thats what code formatters use.
:amount: the amount by which to unshift.
"""
by = -self._ind.shiftwidth * amount
try:
self.indent = self.indent[:by]
except IndexError:
self.indent = ''
def mkline(self, line='', indent=None):
"""Creates a properly set up line.
:line: the text to add
:indent: the indentation to have at the beginning
if None, it uses the default amount
"""
if indent is None:
indent = self.indent
# this deals with the fact that the first line is
# already properly indented
if '\n' not in self._rv:
try:
indent = indent[len(self._initial_indent):]
except IndexError:
indent = ''
indent = self._ind.spaces_to_indent(indent)
return indent + line
def reset_indent(self):
"""Clears the indentation."""
self.indent = self._initial_indent
# Utility methods
@property
def fn(self): # pylint:disable=no-self-use,invalid-name
"""The filename."""
return _vim.eval('expand("%:t")') or ''
@property
def basename(self): # pylint:disable=no-self-use
"""The filename without extension."""
return _vim.eval('expand("%:t:r")') or ''
@property
def ft(self): # pylint:disable=invalid-name
"""The filetype."""
return self.opt('&filetype', '')
@property
def rv(self): # pylint:disable=invalid-name
"""The return value.
The text to insert at the location of the placeholder.
"""
return self._rv
@rv.setter
def rv(self, value): # pylint:disable=invalid-name
"""See getter."""
self._changed = True
self._rv = value
@property
def _rv_changed(self):
"""True if rv has changed."""
return self._changed
@property
def c(self): # pylint:disable=invalid-name
"""The current text of the placeholder."""
return self._cur
@property
def v(self): # pylint:disable=invalid-name
"""Content of visual expansions."""
return self._visual
@property
def p(self):
if self._parent.current_placeholder:
return self._parent.current_placeholder
else:
return _Placeholder('', 0, 0)
@property
def context(self):
return self._context
def opt(self, option, default=None): # pylint:disable=no-self-use
"""Gets a Vim variable."""
if _vim.eval("exists('%s')" % option) == '1':
try:
return _vim.eval(option)
except _vim.error:
pass
return default
def __add__(self, value):
"""Appends the given line to rv using mkline."""
self.rv += '\n' # pylint:disable=invalid-name
self.rv += self.mkline(value)
return self
def __lshift__(self, other):
"""Same as unshift."""
self.unshift(other)
def __rshift__(self, other):
"""Same as shift."""
self.shift(other)
@property
def snippet_start(self):
"""
Returns start of the snippet in format (line, column).
"""
return self._start
@property
def snippet_end(self):
"""
Returns end of the snippet in format (line, column).
"""
return self._end
@property
def buffer(self):
return _vim.buf
class PythonCode(NoneditableTextObject):
"""See module docstring."""
def __init__(self, parent, token):
# Find our containing snippet for snippet local data
snippet = parent
while snippet:
try:
self._locals = snippet.locals
text = snippet.visual_content.text
mode = snippet.visual_content.mode
context = snippet.context
break
except AttributeError as e:
snippet = snippet._parent # pylint:disable=protected-access
self._snip = SnippetUtil(token.indent, mode, text, context, snippet)
self._codes = ((
'import re, os, vim, string, random',
'\n'.join(snippet.globals.get('!p', [])).replace('\r\n', '\n'),
token.code.replace('\\`', '`')
))
NoneditableTextObject.__init__(self, parent, token)
def _update(self, done):
path = _vim.eval('expand("%")') or ''
ct = self.current_text
self._locals.update({
't': _Tabs(self._parent),
'fn': os.path.basename(path),
'path': path,
'cur': ct,
'res': ct,
'snip': self._snip,
})
self._snip._reset(ct) # pylint:disable=protected-access
for code in self._codes:
try:
exec(code, self._locals) # pylint:disable=exec-used
except Exception as e:
e.snippet_code = code
raise
rv = as_unicode(
self._snip.rv if self._snip._rv_changed # pylint:disable=protected-access
else as_unicode(self._locals['res'])
)
if ct != rv:
self.overwrite(rv)
return False
return True
|
{
"content_hash": "e08b88f107d02284ff5faf2926217a4d",
"timestamp": "",
"source": "github",
"line_count": 318,
"max_line_length": 86,
"avg_line_length": 27.358490566037737,
"alnum_prop": 0.5486206896551724,
"repo_name": "NcLang/vimrc",
"id": "5404dca56a744ed147c0edf124552de711fb9958",
"size": "8741",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "sources_non_forked/ultisnips/pythonx/UltiSnips/text_objects/_python_code.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "568"
},
{
"name": "CSS",
"bytes": "6320"
},
{
"name": "CoffeeScript",
"bytes": "1402"
},
{
"name": "Erlang",
"bytes": "3232"
},
{
"name": "GCC Machine Description",
"bytes": "525"
},
{
"name": "Go",
"bytes": "2239"
},
{
"name": "HTML",
"bytes": "134"
},
{
"name": "JavaScript",
"bytes": "1064"
},
{
"name": "Makefile",
"bytes": "8657"
},
{
"name": "Perl",
"bytes": "2705"
},
{
"name": "Python",
"bytes": "704814"
},
{
"name": "Ruby",
"bytes": "33390"
},
{
"name": "Shell",
"bytes": "9370"
},
{
"name": "TeX",
"bytes": "6193"
},
{
"name": "VimL",
"bytes": "3170590"
},
{
"name": "XSLT",
"bytes": "4217"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
install_requires = [
'pyyaml',
'python-keystoneclient',
'kazoo',
]
setup(
name='contrail-db-loader',
version='0.1b1',
description="Script to load data in Contrail database for scaling tests",
long_description=open('README.md').read(),
author="Édouard Thuleau",
author_email="ethuleau@juniper.net",
packages=find_packages(),
install_requires=install_requires,
scripts=[],
license="Apache Software License",
entry_points={
'console_scripts': [
'contrail-db-loader = contrail_db_loader.main:main'
],
},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: User Interfaces',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
],
keywords='contrail db loader',
)
|
{
"content_hash": "ac5f4c3cd8cbe3aa92875d27a02a2fae",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 77,
"avg_line_length": 28.058823529411764,
"alnum_prop": 0.6257861635220126,
"repo_name": "eonpatapon/contrail-controller",
"id": "8bfafd436db67ec30d8d017c9a9855de12cca3c1",
"size": "1048",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "src/config/utils/db-loader/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "722794"
},
{
"name": "C++",
"bytes": "22097574"
},
{
"name": "GDB",
"bytes": "39260"
},
{
"name": "Go",
"bytes": "47213"
},
{
"name": "Java",
"bytes": "91653"
},
{
"name": "Lua",
"bytes": "13345"
},
{
"name": "PowerShell",
"bytes": "1810"
},
{
"name": "Python",
"bytes": "7240671"
},
{
"name": "Roff",
"bytes": "41295"
},
{
"name": "Ruby",
"bytes": "13596"
},
{
"name": "Shell",
"bytes": "53994"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('firecares_core', '0003_contactrequest_created_at'),
]
operations = [
migrations.AlterField(
model_name='address',
name='address_line2',
field=models.CharField(max_length=100, null=True, verbose_name=b'Address line 2', blank=True),
),
]
|
{
"content_hash": "15dfcf9b3a5b3ffa829f64529bff9020",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 106,
"avg_line_length": 25.333333333333332,
"alnum_prop": 0.625,
"repo_name": "HunterConnelly/firecares",
"id": "c93b959fd5092307d344192d4aeddeb58635f48b",
"size": "480",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "firecares/firecares_core/migrations/0004_auto_20160301_1434.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "832800"
},
{
"name": "HTML",
"bytes": "256608"
},
{
"name": "JavaScript",
"bytes": "1655928"
},
{
"name": "PHP",
"bytes": "17935"
},
{
"name": "Python",
"bytes": "504630"
}
],
"symlink_target": ""
}
|
from google.analytics import admin_v1beta
def sample_delete_account():
# Create a client
client = admin_v1beta.AnalyticsAdminServiceClient()
# Initialize request argument(s)
request = admin_v1beta.DeleteAccountRequest(
name="name_value",
)
# Make the request
client.delete_account(request=request)
# [END analyticsadmin_v1beta_generated_AnalyticsAdminService_DeleteAccount_sync]
|
{
"content_hash": "9212fc1a88a119ab8e8b76fd2181a8c9",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 80,
"avg_line_length": 24.764705882352942,
"alnum_prop": 0.7339667458432304,
"repo_name": "googleapis/python-analytics-admin",
"id": "622afcf59249fcb1fd5322dc58aa5058184a3f72",
"size": "1827",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/analyticsadmin_v1beta_generated_analytics_admin_service_delete_account_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "5576405"
},
{
"name": "Shell",
"bytes": "30687"
}
],
"symlink_target": ""
}
|
import fnmatch
import os
from urllib.parse import urlsplit, urlparse
def save_python_script(script_folder, script_url):
import requests
if not os.path.exists(script_folder):
os.makedirs(script_folder)
script_name = get_filename(script_url)
script_data = requests.get(script_url).text
script_loc = os.path.join(script_folder, script_name)
write_file(script_loc, script_data)
if not os.path.exists(script_loc):
raise FileNotFoundError("Unable to locate file %s after attempting to save it" % script_loc)
return script_loc
def is_url(url):
return urlparse(url).scheme != ""
def get_filename(url_or_path):
if not is_url(url_or_path):
if not os.path.exists(url_or_path):
return None
return "%s%s" % os.path.splitext(url_or_path)
else:
return "%s%s" % os.path.splitext(os.path.basename(urlsplit(url_or_path).path))
def get_file_extension(path):
if is_url(path):
return "%s" % os.path.splitext(os.path.basename(urlsplit(path).path))[1]
else:
if not os.path.exists(path):
return None
return "%s" % os.path.splitext(path)[1]
def get_files_recursive(path, match='*.py'):
matches = []
for root, dirnames, filenames in os.walk(path):
for filename in fnmatch.filter(filenames, match):
matches.append(os.path.join(root, filename))
return matches
def get_config_from_file(file, trim_newlines=True):
with open(file, 'r') as config_file:
data = config_file.read()
if trim_newlines:
data = data.replace('\n', '')
return data
def write_file(file, data):
with open(file, 'w') as data_file:
data_file.write(data)
class ChangeDir:
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
# Change directory with the new path
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
# Return back to previous directory
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
class Map(dict):
"""
Example:
m = Map({'first_name': 'Eduardo'}, last_name='Pool', age=24, sports=['Soccer'])
"""
def __init__(self, *args, **kwargs):
super(Map, self).__init__(*args, **kwargs)
for arg in args:
if isinstance(arg, dict):
for k, v in arg.items():
self[k] = v
if kwargs:
for k, v in kwargs.items():
self[k] = v
def __getattr__(self, attr):
return self.get(attr)
def __setattr__(self, key, value):
self.__setitem__(key, value)
def __setitem__(self, key, value):
super(Map, self).__setitem__(key, value)
self.__dict__.update({key: value})
def __delattr__(self, item):
self.__delitem__(item)
def __delitem__(self, key):
super(Map, self).__delitem__(key)
del self.__dict__[key]
|
{
"content_hash": "93c8348587346ddcb04f6180f45c1fc5",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 100,
"avg_line_length": 26.008695652173913,
"alnum_prop": 0.5894349715814109,
"repo_name": "TechnicalBro/CraftBuildTools",
"id": "a98af0bcc778f6b1e5ef106abd527b95770146af",
"size": "2991",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "craftbuildtools/utils/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "48592"
}
],
"symlink_target": ""
}
|
import socket
import sys
import time
from pathlib import Path
import pytest
from xprocess import ProcessStarter
server_path = Path(__file__).parent.joinpath("server.py").absolute()
def cleanup_server_instance(tcp_port):
sock = socket.socket()
sock.connect(("localhost", tcp_port))
try:
for _ in range(10):
sock.sendall(b"exit\n")
sock.recv(1)
time.sleep(0.1)
except (
BrokenPipeError,
ConnectionAbortedError,
ConnectionResetError,
): # Server is terminated
pass
sock.close()
@pytest.mark.parametrize("proc_name", ["s1", "s2", "s3"])
def test_timeout_raise_exception(tcp_port, proc_name, xprocess, request):
class Starter(ProcessStarter):
timeout = 2
max_read_lines = 500
pattern = "will not match"
args = [sys.executable, server_path, tcp_port, "--no-children"]
with pytest.raises(TimeoutError):
xprocess.ensure(proc_name, Starter)
cleanup_server_instance(tcp_port)
|
{
"content_hash": "6c001403126e3a7ac780b8e88000d44e",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 73,
"avg_line_length": 25.675,
"alnum_prop": 0.6368062317429406,
"repo_name": "pytest-dev/pytest-xprocess",
"id": "9eb2a343649883f7007c98b3160aadf68974078b",
"size": "1027",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_startup_timeout.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34653"
}
],
"symlink_target": ""
}
|
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import io
import os
import re
import shutil
import sys
import time
from unittest import SkipTest, skipUnless
import warnings
from django.conf import settings
from django.core import management
from django.core.management import execute_from_command_line
from django.core.management.utils import find_command
from django.test import SimpleTestCase
from django.test import override_settings
from django.utils.encoding import force_text
from django.utils._os import upath
from django.utils import six
from django.utils.six import StringIO
from django.utils.translation import TranslatorCommentWarning
LOCALE = 'de'
has_xgettext = find_command('xgettext')
this_directory = os.path.dirname(upath(__file__))
@skipUnless(has_xgettext, 'xgettext is mandatory for extraction tests')
class ExtractorTests(SimpleTestCase):
test_dir = os.path.abspath(os.path.join(this_directory, 'commands'))
PO_FILE = 'locale/%s/LC_MESSAGES/django.po' % LOCALE
def setUp(self):
self._cwd = os.getcwd()
def _rmrf(self, dname):
if os.path.commonprefix([self.test_dir, os.path.abspath(dname)]) != self.test_dir:
return
shutil.rmtree(dname)
def rmfile(self, filepath):
if os.path.exists(filepath):
os.remove(filepath)
def tearDown(self):
os.chdir(self.test_dir)
try:
self._rmrf('locale/%s' % LOCALE)
except OSError:
pass
os.chdir(self._cwd)
def _run_makemessages(self, **options):
os.chdir(self.test_dir)
out = StringIO()
management.call_command('makemessages', locale=[LOCALE], verbosity=2,
stdout=out, **options)
output = out.getvalue()
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = fp.read()
return output, po_contents
def assertMsgId(self, msgid, s, use_quotes=True):
q = '"'
if use_quotes:
msgid = '"%s"' % msgid
q = "'"
needle = 'msgid %s' % msgid
msgid = re.escape(msgid)
return self.assertTrue(re.search('^msgid %s' % msgid, s, re.MULTILINE), 'Could not find %(q)s%(n)s%(q)s in generated PO file' % {'n': needle, 'q': q})
def assertNotMsgId(self, msgid, s, use_quotes=True):
if use_quotes:
msgid = '"%s"' % msgid
msgid = re.escape(msgid)
return self.assertTrue(not re.search('^msgid %s' % msgid, s, re.MULTILINE))
def _assertPoLocComment(self, assert_presence, po_filename, line_number, *comment_parts):
with open(po_filename, 'r') as fp:
po_contents = force_text(fp.read())
if os.name == 'nt':
# #: .\path\to\file.html:123
cwd_prefix = '%s%s' % (os.curdir, os.sep)
else:
# #: path/to/file.html:123
cwd_prefix = ''
parts = ['#: ']
parts.append(os.path.join(cwd_prefix, *comment_parts))
if line_number is not None:
parts.append(':%d' % line_number)
needle = ''.join(parts)
if assert_presence:
return self.assertTrue(needle in po_contents, '"%s" not found in final .po file.' % needle)
else:
return self.assertFalse(needle in po_contents, '"%s" shouldn\'t be in final .po file.' % needle)
def assertLocationCommentPresent(self, po_filename, line_number, *comment_parts):
"""
self.assertLocationCommentPresent('django.po', 42, 'dirA', 'dirB', 'foo.py')
verifies that the django.po file has a gettext-style location comment of the form
`#: dirA/dirB/foo.py:42`
(or `#: .\dirA\dirB\foo.py:42` on Windows)
None can be passed for the line_number argument to skip checking of the :42 suffix part.
"""
return self._assertPoLocComment(True, po_filename, line_number, *comment_parts)
def assertLocationCommentNotPresent(self, po_filename, line_number, *comment_parts):
"""Check the opposite of assertLocationComment()"""
return self._assertPoLocComment(False, po_filename, line_number, *comment_parts)
def assertRecentlyModified(self, path):
"""
Assert that file was recently modified (modification time was less than 10 seconds ago).
"""
delta = time.time() - os.stat(path).st_mtime
self.assertLess(delta, 10, "%s was recently modified" % path)
def assertNotRecentlyModified(self, path):
"""
Assert that file was not recently modified (modification time was more than 10 seconds ago).
"""
delta = time.time() - os.stat(path).st_mtime
self.assertGreater(delta, 10, "%s wasn't recently modified" % path)
class BasicExtractorTests(ExtractorTests):
def test_comments_extractor(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with io.open(self.PO_FILE, 'r', encoding='utf-8') as fp:
po_contents = fp.read()
self.assertTrue('#. Translators: This comment should be extracted' in po_contents)
self.assertTrue('This comment should not be extracted' not in po_contents)
# Comments in templates
self.assertTrue('#. Translators: Django template comment for translators' in po_contents)
self.assertTrue("#. Translators: Django comment block for translators\n#. string's meaning unveiled" in po_contents)
self.assertTrue('#. Translators: One-line translator comment #1' in po_contents)
self.assertTrue('#. Translators: Two-line translator comment #1\n#. continued here.' in po_contents)
self.assertTrue('#. Translators: One-line translator comment #2' in po_contents)
self.assertTrue('#. Translators: Two-line translator comment #2\n#. continued here.' in po_contents)
self.assertTrue('#. Translators: One-line translator comment #3' in po_contents)
self.assertTrue('#. Translators: Two-line translator comment #3\n#. continued here.' in po_contents)
self.assertTrue('#. Translators: One-line translator comment #4' in po_contents)
self.assertTrue('#. Translators: Two-line translator comment #4\n#. continued here.' in po_contents)
self.assertTrue('#. Translators: One-line translator comment #5 -- with non ASCII characters: áéíóúö' in po_contents)
self.assertTrue('#. Translators: Two-line translator comment #5 -- with non ASCII characters: áéíóúö\n#. continued here.' in po_contents)
def test_templatize_trans_tag(self):
# ticket #11240
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('Literal with a percent symbol at the end %%', po_contents)
self.assertMsgId('Literal with a percent %% symbol in the middle', po_contents)
self.assertMsgId('Completed 50%% of all the tasks', po_contents)
self.assertMsgId('Completed 99%% of all the tasks', po_contents)
self.assertMsgId("Shouldn't double escape this sequence: %% (two percent signs)", po_contents)
self.assertMsgId("Shouldn't double escape this sequence %% either", po_contents)
self.assertMsgId("Looks like a str fmt spec %%s but shouldn't be interpreted as such", po_contents)
self.assertMsgId("Looks like a str fmt spec %% o but shouldn't be interpreted as such", po_contents)
def test_templatize_blocktrans_tag(self):
# ticket #11966
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('I think that 100%% is more that 50%% of anything.', po_contents)
self.assertMsgId('I think that 100%% is more that 50%% of %(obj)s.', po_contents)
self.assertMsgId("Blocktrans extraction shouldn't double escape this: %%, a=%(a)s", po_contents)
def test_blocktrans_trimmed(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
# should not be trimmed
self.assertNotMsgId('Text with a few line breaks.', po_contents)
# should be trimmed
self.assertMsgId("Again some text with a few line breaks, this time should be trimmed.", po_contents)
# #21406 -- Should adjust for eaten line numbers
self.assertMsgId("I'm on line 97", po_contents)
self.assertLocationCommentPresent(self.PO_FILE, 97, 'templates', 'test.html')
def test_force_en_us_locale(self):
"""Value of locale-munging option used by the command is the right one"""
from django.core.management.commands.makemessages import Command
self.assertTrue(Command.leave_locale_alone)
def test_extraction_error(self):
os.chdir(self.test_dir)
self.assertRaises(SyntaxError, management.call_command, 'makemessages', locale=[LOCALE], extensions=['tpl'], verbosity=0)
with self.assertRaises(SyntaxError) as context_manager:
management.call_command('makemessages', locale=[LOCALE], extensions=['tpl'], verbosity=0)
six.assertRegex(
self, str(context_manager.exception),
r'Translation blocks must not include other block tags: blocktrans \(file templates[/\\]template_with_error\.tpl, line 3\)'
)
# Check that the temporary file was cleaned up
self.assertFalse(os.path.exists('./templates/template_with_error.tpl.py'))
def test_unicode_decode_error(self):
os.chdir(self.test_dir)
shutil.copyfile('./not_utf8.sample', './not_utf8.txt')
self.addCleanup(self.rmfile, os.path.join(self.test_dir, 'not_utf8.txt'))
out = StringIO()
management.call_command('makemessages', locale=[LOCALE], stdout=out)
self.assertIn("UnicodeDecodeError: skipped file not_utf8.txt in .",
force_text(out.getvalue()))
def test_extraction_warning(self):
"""test xgettext warning about multiple bare interpolation placeholders"""
os.chdir(self.test_dir)
shutil.copyfile('./code.sample', './code_sample.py')
self.addCleanup(self.rmfile, os.path.join(self.test_dir, 'code_sample.py'))
out = StringIO()
management.call_command('makemessages', locale=[LOCALE], stdout=out)
self.assertIn("code_sample.py:4", force_text(out.getvalue()))
def test_template_message_context_extractor(self):
"""
Ensure that message contexts are correctly extracted for the
{% trans %} and {% blocktrans %} template tags.
Refs #14806.
"""
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
# {% trans %}
self.assertTrue('msgctxt "Special trans context #1"' in po_contents)
self.assertMsgId("Translatable literal #7a", po_contents)
self.assertTrue('msgctxt "Special trans context #2"' in po_contents)
self.assertMsgId("Translatable literal #7b", po_contents)
self.assertTrue('msgctxt "Special trans context #3"' in po_contents)
self.assertMsgId("Translatable literal #7c", po_contents)
# {% blocktrans %}
self.assertTrue('msgctxt "Special blocktrans context #1"' in po_contents)
self.assertMsgId("Translatable literal #8a", po_contents)
self.assertTrue('msgctxt "Special blocktrans context #2"' in po_contents)
self.assertMsgId("Translatable literal #8b-singular", po_contents)
self.assertTrue("Translatable literal #8b-plural" in po_contents)
self.assertTrue('msgctxt "Special blocktrans context #3"' in po_contents)
self.assertMsgId("Translatable literal #8c-singular", po_contents)
self.assertTrue("Translatable literal #8c-plural" in po_contents)
self.assertTrue('msgctxt "Special blocktrans context #4"' in po_contents)
self.assertMsgId("Translatable literal #8d %(a)s", po_contents)
def test_context_in_single_quotes(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
# {% trans %}
self.assertTrue('msgctxt "Context wrapped in double quotes"' in po_contents)
self.assertTrue('msgctxt "Context wrapped in single quotes"' in po_contents)
# {% blocktrans %}
self.assertTrue('msgctxt "Special blocktrans context wrapped in double quotes"' in po_contents)
self.assertTrue('msgctxt "Special blocktrans context wrapped in single quotes"' in po_contents)
def test_template_comments(self):
"""Template comment tags on the same line of other constructs (#19552)"""
os.chdir(self.test_dir)
# Test detection/end user reporting of old, incorrect templates
# translator comments syntax
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter('always')
management.call_command('makemessages', locale=[LOCALE], extensions=['thtml'], verbosity=0)
self.assertEqual(len(ws), 3)
for w in ws:
self.assertTrue(issubclass(w.category, TranslatorCommentWarning))
six.assertRegex(
self, str(ws[0].message),
r"The translator-targeted comment 'Translators: ignored i18n comment #1' \(file templates[/\\]comments.thtml, line 4\) was ignored, because it wasn't the last item on the line\."
)
six.assertRegex(
self, str(ws[1].message),
r"The translator-targeted comment 'Translators: ignored i18n comment #3' \(file templates[/\\]comments.thtml, line 6\) was ignored, because it wasn't the last item on the line\."
)
six.assertRegex(
self, str(ws[2].message),
r"The translator-targeted comment 'Translators: ignored i18n comment #4' \(file templates[/\\]comments.thtml, line 8\) was ignored, because it wasn't the last item on the line\."
)
# Now test .po file contents
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('Translatable literal #9a', po_contents)
self.assertFalse('ignored comment #1' in po_contents)
self.assertFalse('Translators: ignored i18n comment #1' in po_contents)
self.assertMsgId("Translatable literal #9b", po_contents)
self.assertFalse('ignored i18n comment #2' in po_contents)
self.assertFalse('ignored comment #2' in po_contents)
self.assertMsgId('Translatable literal #9c', po_contents)
self.assertFalse('ignored comment #3' in po_contents)
self.assertFalse('ignored i18n comment #3' in po_contents)
self.assertMsgId('Translatable literal #9d', po_contents)
self.assertFalse('ignored comment #4' in po_contents)
self.assertMsgId('Translatable literal #9e', po_contents)
self.assertFalse('ignored comment #5' in po_contents)
self.assertFalse('ignored i18n comment #4' in po_contents)
self.assertMsgId('Translatable literal #9f', po_contents)
self.assertTrue('#. Translators: valid i18n comment #5' in po_contents)
self.assertMsgId('Translatable literal #9g', po_contents)
self.assertTrue('#. Translators: valid i18n comment #6' in po_contents)
self.assertMsgId('Translatable literal #9h', po_contents)
self.assertTrue('#. Translators: valid i18n comment #7' in po_contents)
self.assertMsgId('Translatable literal #9i', po_contents)
six.assertRegex(self, po_contents, r'#\..+Translators: valid i18n comment #8')
six.assertRegex(self, po_contents, r'#\..+Translators: valid i18n comment #9')
self.assertMsgId("Translatable literal #9j", po_contents)
class JavascriptExtractorTests(ExtractorTests):
PO_FILE = 'locale/%s/LC_MESSAGES/djangojs.po' % LOCALE
def test_javascript_literals(self):
os.chdir(self.test_dir)
_, po_contents = self._run_makemessages(domain='djangojs')
self.assertMsgId('This literal should be included.', po_contents)
self.assertMsgId('This one as well.', po_contents)
self.assertMsgId(r'He said, \"hello\".', po_contents)
self.assertMsgId("okkkk", po_contents)
self.assertMsgId("TEXT", po_contents)
self.assertMsgId("It's at http://example.com", po_contents)
self.assertMsgId("String", po_contents)
self.assertMsgId("/* but this one will be too */ 'cause there is no way of telling...", po_contents)
self.assertMsgId("foo", po_contents)
self.assertMsgId("bar", po_contents)
self.assertMsgId("baz", po_contents)
self.assertMsgId("quz", po_contents)
self.assertMsgId("foobar", po_contents)
@override_settings(
STATIC_ROOT=os.path.join(this_directory, 'commands', 'static/'),
MEDIA_ROOT=os.path.join(this_directory, 'commands', 'media_root/'))
def test_media_static_dirs_ignored(self):
"""
Regression test for #23583.
"""
_, po_contents = self._run_makemessages(domain='djangojs')
self.assertMsgId("Static content inside app should be included.", po_contents)
self.assertNotMsgId("Content from STATIC_ROOT should not be included", po_contents)
class IgnoredExtractorTests(ExtractorTests):
def test_ignore_directory(self):
out, po_contents = self._run_makemessages(ignore_patterns=[
os.path.join('ignore_dir', '*'),
])
self.assertTrue("ignoring directory ignore_dir" in out)
self.assertMsgId('This literal should be included.', po_contents)
self.assertNotMsgId('This should be ignored.', po_contents)
def test_ignore_subdirectory(self):
out, po_contents = self._run_makemessages(ignore_patterns=[
'templates/*/ignore.html',
'templates/subdir/*',
])
self.assertTrue("ignoring directory subdir" in out)
self.assertNotMsgId('This subdir should be ignored too.', po_contents)
def test_ignore_file_patterns(self):
out, po_contents = self._run_makemessages(ignore_patterns=[
'xxx_*',
])
self.assertTrue("ignoring file xxx_ignored.html" in out)
self.assertNotMsgId('This should be ignored too.', po_contents)
@override_settings(
STATIC_ROOT=os.path.join(this_directory, 'commands', 'static/'),
MEDIA_ROOT=os.path.join(this_directory, 'commands', 'media_root/'))
def test_media_static_dirs_ignored(self):
out, _ = self._run_makemessages()
self.assertIn("ignoring directory static", out)
self.assertIn("ignoring directory media_root", out)
class SymlinkExtractorTests(ExtractorTests):
def setUp(self):
super(SymlinkExtractorTests, self).setUp()
self.symlinked_dir = os.path.join(self.test_dir, 'templates_symlinked')
def tearDown(self):
super(SymlinkExtractorTests, self).tearDown()
os.chdir(self.test_dir)
try:
os.remove(self.symlinked_dir)
except OSError:
pass
os.chdir(self._cwd)
def test_symlink(self):
# On Python < 3.2 os.symlink() exists only on Unix
if hasattr(os, 'symlink'):
if os.path.exists(self.symlinked_dir):
self.assertTrue(os.path.islink(self.symlinked_dir))
else:
# On Python >= 3.2) os.symlink() exists always but then can
# fail at runtime when user hasn't the needed permissions on
# WIndows versions that support symbolink links (>= 6/Vista).
# See Python issue 9333 (http://bugs.python.org/issue9333).
# Skip the test in that case
try:
os.symlink(os.path.join(self.test_dir, 'templates'), self.symlinked_dir)
except (OSError, NotImplementedError):
raise SkipTest("os.symlink() is available on this OS but can't be used by this user.")
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, symlinks=True)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('This literal should be included.', po_contents)
self.assertTrue('templates_symlinked/test.html' in po_contents)
class CopyPluralFormsExtractorTests(ExtractorTests):
PO_FILE_ES = 'locale/es/LC_MESSAGES/django.po'
def tearDown(self):
super(CopyPluralFormsExtractorTests, self).tearDown()
os.chdir(self.test_dir)
try:
self._rmrf('locale/es')
except OSError:
pass
os.chdir(self._cwd)
def test_copy_plural_forms(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertTrue('Plural-Forms: nplurals=2; plural=(n != 1)' in po_contents)
def test_override_plural_forms(self):
"""Ticket #20311."""
os.chdir(self.test_dir)
management.call_command('makemessages', locale=['es'], extensions=['djtpl'], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE_ES))
with io.open(self.PO_FILE_ES, 'r', encoding='utf-8') as fp:
po_contents = fp.read()
found = re.findall(r'^(?P<value>"Plural-Forms.+?\\n")\s*$', po_contents, re.MULTILINE | re.DOTALL)
self.assertEqual(1, len(found))
class NoWrapExtractorTests(ExtractorTests):
def test_no_wrap_enabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, no_wrap=True)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('This literal should also be included wrapped or not wrapped depending on the use of the --no-wrap option.', po_contents)
def test_no_wrap_disabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, no_wrap=False)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('""\n"This literal should also be included wrapped or not wrapped depending on the "\n"use of the --no-wrap option."', po_contents, use_quotes=False)
class LocationCommentsTests(ExtractorTests):
def test_no_location_enabled(self):
"""Behavior is correct if --no-location switch is specified. See #16903."""
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, no_location=True)
self.assertTrue(os.path.exists(self.PO_FILE))
self.assertLocationCommentNotPresent(self.PO_FILE, 55, 'templates', 'test.html.py')
def test_no_location_disabled(self):
"""Behavior is correct if --no-location switch isn't specified."""
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0, no_location=False)
self.assertTrue(os.path.exists(self.PO_FILE))
# #16903 -- Standard comment with source file relative path should be present
self.assertLocationCommentPresent(self.PO_FILE, 55, 'templates', 'test.html')
# #21208 -- Leaky paths in comments on Windows e.g. #: path\to\file.html.py:123
self.assertLocationCommentNotPresent(self.PO_FILE, None, 'templates', 'test.html.py')
class KeepPotFileExtractorTests(ExtractorTests):
POT_FILE = 'locale/django.pot'
def setUp(self):
super(KeepPotFileExtractorTests, self).setUp()
def tearDown(self):
super(KeepPotFileExtractorTests, self).tearDown()
os.chdir(self.test_dir)
try:
os.unlink(self.POT_FILE)
except OSError:
pass
os.chdir(self._cwd)
def test_keep_pot_disabled_by_default(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
self.assertFalse(os.path.exists(self.POT_FILE))
def test_keep_pot_explicitly_disabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0,
keep_pot=False)
self.assertFalse(os.path.exists(self.POT_FILE))
def test_keep_pot_enabled(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=[LOCALE], verbosity=0,
keep_pot=True)
self.assertTrue(os.path.exists(self.POT_FILE))
class MultipleLocaleExtractionTests(ExtractorTests):
PO_FILE_PT = 'locale/pt/LC_MESSAGES/django.po'
PO_FILE_DE = 'locale/de/LC_MESSAGES/django.po'
LOCALES = ['pt', 'de', 'ch']
def tearDown(self):
os.chdir(self.test_dir)
for locale in self.LOCALES:
try:
self._rmrf('locale/%s' % locale)
except OSError:
pass
os.chdir(self._cwd)
def test_multiple_locales(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=['pt', 'de'], verbosity=0)
self.assertTrue(os.path.exists(self.PO_FILE_PT))
self.assertTrue(os.path.exists(self.PO_FILE_DE))
class ExcludedLocaleExtractionTests(ExtractorTests):
LOCALES = ['en', 'fr', 'it']
PO_FILE = 'locale/%s/LC_MESSAGES/django.po'
test_dir = os.path.abspath(os.path.join(this_directory, 'exclude'))
def _set_times_for_all_po_files(self):
"""
Set access and modification times to the Unix epoch time for all the .po files.
"""
for locale in self.LOCALES:
os.utime(self.PO_FILE % locale, (0, 0))
def setUp(self):
super(ExcludedLocaleExtractionTests, self).setUp()
os.chdir(self.test_dir) # ExtractorTests.tearDown() takes care of restoring.
shutil.copytree('canned_locale', 'locale')
self._set_times_for_all_po_files()
self.addCleanup(self._rmrf, os.path.join(self.test_dir, 'locale'))
def test_command_help(self):
old_stdout, old_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = StringIO(), StringIO()
try:
# `call_command` bypasses the parser; by calling
# `execute_from_command_line` with the help subcommand we
# ensure that there are no issues with the parser itself.
execute_from_command_line(['django-admin', 'help', 'makemessages'])
finally:
sys.stdout, sys.stderr = old_stdout, old_stderr
def test_one_locale_excluded(self):
management.call_command('makemessages', exclude=['it'], stdout=StringIO())
self.assertRecentlyModified(self.PO_FILE % 'en')
self.assertRecentlyModified(self.PO_FILE % 'fr')
self.assertNotRecentlyModified(self.PO_FILE % 'it')
def test_multiple_locales_excluded(self):
management.call_command('makemessages', exclude=['it', 'fr'], stdout=StringIO())
self.assertRecentlyModified(self.PO_FILE % 'en')
self.assertNotRecentlyModified(self.PO_FILE % 'fr')
self.assertNotRecentlyModified(self.PO_FILE % 'it')
def test_one_locale_excluded_with_locale(self):
management.call_command('makemessages', locale=['en', 'fr'], exclude=['fr'], stdout=StringIO())
self.assertRecentlyModified(self.PO_FILE % 'en')
self.assertNotRecentlyModified(self.PO_FILE % 'fr')
self.assertNotRecentlyModified(self.PO_FILE % 'it')
def test_multiple_locales_excluded_with_locale(self):
management.call_command('makemessages', locale=['en', 'fr', 'it'], exclude=['fr', 'it'],
stdout=StringIO())
self.assertRecentlyModified(self.PO_FILE % 'en')
self.assertNotRecentlyModified(self.PO_FILE % 'fr')
self.assertNotRecentlyModified(self.PO_FILE % 'it')
class CustomLayoutExtractionTests(ExtractorTests):
def setUp(self):
self._cwd = os.getcwd()
self.test_dir = os.path.join(this_directory, 'project_dir')
def test_no_locale_raises(self):
os.chdir(self.test_dir)
with six.assertRaisesRegex(self, management.CommandError,
"Unable to find a locale path to store translations for file"):
management.call_command('makemessages', locale=LOCALE, verbosity=0)
@override_settings(
LOCALE_PATHS=(os.path.join(
this_directory, 'project_dir', 'project_locale'),)
)
def test_project_locale_paths(self):
"""
Test that:
* translations for an app containing a locale folder are stored in that folder
* translations outside of that app are in LOCALE_PATHS[0]
"""
os.chdir(self.test_dir)
self.addCleanup(shutil.rmtree,
os.path.join(settings.LOCALE_PATHS[0], LOCALE), True)
self.addCleanup(shutil.rmtree,
os.path.join(self.test_dir, 'app_with_locale', 'locale', LOCALE), True)
management.call_command('makemessages', locale=[LOCALE], verbosity=0)
project_de_locale = os.path.join(
self.test_dir, 'project_locale', 'de', 'LC_MESSAGES', 'django.po')
app_de_locale = os.path.join(
self.test_dir, 'app_with_locale', 'locale', 'de', 'LC_MESSAGES', 'django.po')
self.assertTrue(os.path.exists(project_de_locale))
self.assertTrue(os.path.exists(app_de_locale))
with open(project_de_locale, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('This app has no locale directory', po_contents)
self.assertMsgId('This is a project-level string', po_contents)
with open(app_de_locale, 'r') as fp:
po_contents = force_text(fp.read())
self.assertMsgId('This app has a locale directory', po_contents)
|
{
"content_hash": "6e9c6305563c360bdbc5da239e6402b5",
"timestamp": "",
"source": "github",
"line_count": 681,
"max_line_length": 194,
"avg_line_length": 46.198237885462554,
"alnum_prop": 0.6354216331330854,
"repo_name": "andersonresende/django",
"id": "c0c03ee50698be2d1a525d53b5ccd408bac0bd56",
"size": "31473",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/i18n/test_extraction.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from __future__ import absolute_import
from django.db import IntegrityError
from django.db.models.signals import post_save
from django.utils import timezone
from sentry.models import User, UserEmail
from sentry.signals import email_verified
def create_user_email(instance, created, **kwargs):
if created:
try:
UserEmail.objects.create(email=instance.email, user=instance)
except IntegrityError:
pass
post_save.connect(create_user_email, sender=User, dispatch_uid="create_user_email", weak=False)
@email_verified.connect(weak=False)
def verify_newsletter_subscription(sender, **kwargs):
from sentry.app import newsletter
if not newsletter.enabled:
return
if not sender.is_primary():
return
newsletter.update_subscription(
sender.user,
verified=True,
verified_date=timezone.now(),
)
|
{
"content_hash": "0281dbd2abd2f625fc64c3dcceb60c21",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 95,
"avg_line_length": 24.88888888888889,
"alnum_prop": 0.7042410714285714,
"repo_name": "jean/sentry",
"id": "88228f2d8506b144a2b20cd4a4f315c1bbf50a1f",
"size": "896",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/sentry/receivers/useremail.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "296112"
},
{
"name": "HTML",
"bytes": "314273"
},
{
"name": "JavaScript",
"bytes": "1293918"
},
{
"name": "Lua",
"bytes": "57158"
},
{
"name": "Makefile",
"bytes": "6632"
},
{
"name": "Python",
"bytes": "24515298"
},
{
"name": "Ruby",
"bytes": "4410"
},
{
"name": "Shell",
"bytes": "2942"
}
],
"symlink_target": ""
}
|
from lino.projects.std.settings import *
SITE = Site(globals(), 'lino_book.projects.combo')
SITE.demo_fixtures = ['demo']
DEBUG = True
|
{
"content_hash": "f382b660733408054e61822f023abfdc",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 50,
"avg_line_length": 27.2,
"alnum_prop": 0.7205882352941176,
"repo_name": "lino-framework/book",
"id": "4418e4c780dbca4ce22643e75c75845f23e71365",
"size": "136",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "lino_book/projects/combo/settings.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "3668"
},
{
"name": "JavaScript",
"bytes": "7140"
},
{
"name": "Python",
"bytes": "991438"
},
{
"name": "Shell",
"bytes": "989"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Techniques for outlier detection of speeds. Each of these returns a speed threshold that
# can be used with outlier detection techniques.
# Standard imports
from future import standard_library
standard_library.install_aliases()
from builtins import *
from builtins import object
import logging
class BoxplotOutlier(object):
MINOR = 1.5
MAJOR = 3
def __init__(self, multiplier = MAJOR, ignore_zeros = False):
self.multiplier = multiplier
self.ignore_zeros = ignore_zeros
def get_threshold(self, with_speeds_df):
if self.ignore_zeros:
df_to_use = with_speeds_df[with_speeds_df.speed > 0]
else:
df_to_use = with_speeds_df
quartile_vals = df_to_use.quantile([0.25, 0.75]).speed
logging.debug("quartile values are %s" % quartile_vals)
iqr = quartile_vals.iloc[1] - quartile_vals.iloc[0]
logging.debug("iqr %s" % iqr)
return quartile_vals.iloc[1] + self.multiplier * iqr
class SimpleQuartileOutlier(object):
def __init__(self, quantile = 0.99, ignore_zeros = False):
self.quantile = quantile
self.ignore_zeros = ignore_zeros
def get_threshold(self, with_speeds_df):
if self.ignore_zeros:
df_to_use = with_speeds_df[with_speeds_df.speed > 0]
else:
df_to_use = with_speeds_df
return df_to_use.speed.quantile(self.quantile)
|
{
"content_hash": "0b9241d5b0ccbc3b91fb7f8cb8cfd893",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 91,
"avg_line_length": 35.38636363636363,
"alnum_prop": 0.6634553628773282,
"repo_name": "sunil07t/e-mission-server",
"id": "3085c3c14e0c451556824c127caec8d989981fd0",
"size": "1557",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "emission/analysis/intake/cleaning/cleaning_methods/speed_outlier_detection.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "445"
},
{
"name": "CSS",
"bytes": "711874"
},
{
"name": "HTML",
"bytes": "122542"
},
{
"name": "JavaScript",
"bytes": "6962852"
},
{
"name": "Jupyter Notebook",
"bytes": "99521529"
},
{
"name": "Python",
"bytes": "1800632"
},
{
"name": "Shell",
"bytes": "2299"
},
{
"name": "Smarty",
"bytes": "3456"
}
],
"symlink_target": ""
}
|
from typing import Type
from dependency_injector import providers
class Animal:
...
class Cat(Animal):
def __init__(self, *_, **__): ...
# Test 1: to check the return type
provider1 = providers.Dependency(instance_of=Animal)
provider1.override(providers.Factory(Cat))
var1: Animal = provider1()
# Test 2: to check the return type
provider2 = providers.Dependency(instance_of=Animal)
var2: Type[Animal] = provider2.instance_of
# Test 3: to check the return type with await
provider3 = providers.Dependency(instance_of=Animal)
async def _async3() -> None:
var1: Animal = await provider3() # type: ignore
var2: Animal = await provider3.async_()
|
{
"content_hash": "ab1c383cc2598e44b641896105d48fa5",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 52,
"avg_line_length": 23.892857142857142,
"alnum_prop": 0.7144992526158446,
"repo_name": "ets-labs/python-dependency-injector",
"id": "a699e4c2026a990e23ec34ea97c5bf4af507fba5",
"size": "669",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/typing/dependency.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Cython",
"bytes": "201812"
},
{
"name": "Makefile",
"bytes": "1942"
},
{
"name": "Python",
"bytes": "492977"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import gdbremote_testcase
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestGdbRemote_QPassSignals(gdbremote_testcase.GdbRemoteTestCaseBase):
mydir = TestBase.compute_mydir(__file__)
def expect_signal(self, expected_signo):
self.test_sequence.add_log_lines(["read packet: $vCont;c#a8",
{"direction": "send",
"regex": r"^\$T([0-9a-fA-F]{2}).*#[0-9a-fA-F]{2}$",
"capture": {1: "hex_exit_code"}},
],
True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
hex_exit_code = context.get("hex_exit_code")
self.assertIsNotNone(hex_exit_code)
self.assertEqual(int(hex_exit_code, 16), expected_signo)
def expect_exit_code(self, exit_code):
self.test_sequence.add_log_lines(
["read packet: $vCont;c#a8",
"send packet: $W{0:02x}#00".format(exit_code)],
True)
self.expect_gdbremote_sequence()
def ignore_signals(self, signals):
def signal_name_to_hex(signame):
return format(lldbutil.get_signal_number(signame), 'x')
signals_str = ";".join(map(signal_name_to_hex, signals))
self.test_sequence.add_log_lines(["read packet: $QPassSignals:"
+ signals_str + " #00",
"send packet: $OK#00"],
True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
@llgs_test
@skipUnlessPlatform(["linux", "android"])
def test_q_pass_signals(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
procs = self.prep_debug_monitor_and_inferior()
expected_signals = ["SIGSEGV",
"SIGALRM", "SIGFPE", "SIGBUS", "SIGINT", "SIGHUP"]
signals_to_ignore = ["SIGUSR1", "SIGUSR2"]
self.ignore_signals(signals_to_ignore)
for signal_name in expected_signals:
signo = lldbutil.get_signal_number(signal_name)
self.expect_signal(signo)
self.expect_exit_code(len(signals_to_ignore))
@llgs_test
@skipUnlessPlatform(["linux", "android"])
def test_change_signals_at_runtime(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
procs = self.prep_debug_monitor_and_inferior()
expected_signals = ["SIGSEGV", "SIGUSR1", "SIGUSR2",
"SIGALRM", "SIGHUP"]
signals_to_ignore = ["SIGFPE", "SIGBUS", "SIGINT"]
for signal_name in expected_signals:
signo = lldbutil.get_signal_number(signal_name)
self.expect_signal(signo)
if signal_name == "SIGALRM":
self.ignore_signals(signals_to_ignore)
self.expect_exit_code(len(signals_to_ignore))
@llgs_test
def test_default_signals_behavior(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
procs = self.prep_debug_monitor_and_inferior()
expected_signals = ["SIGSEGV", "SIGUSR1", "SIGUSR2",
"SIGALRM", "SIGFPE", "SIGBUS", "SIGINT", "SIGHUP"]
for signal_name in expected_signals:
signo = lldbutil.get_signal_number(signal_name)
self.expect_signal(signo)
self.expect_exit_code(0)
@llgs_test
@skipUnlessPlatform(["linux", "android"])
def test_support_q_pass_signals(self):
self.init_llgs_test()
self.build()
# Start up the stub and start/prep the inferior.
self.set_inferior_startup_launch()
procs = self.prep_debug_monitor_and_inferior()
self.add_qSupported_packets()
# Run the packet stream.
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Retrieve the qSupported features and check QPassSignals+
supported_dict = self.parse_qSupported_response(context)
self.assertEqual(supported_dict["QPassSignals"], "+")
|
{
"content_hash": "052d8479cfa67e0e516bf01c9e296ba2",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 94,
"avg_line_length": 38.309734513274336,
"alnum_prop": 0.5768075768075768,
"repo_name": "youtube/cobalt_sandbox",
"id": "7105bcb078bc6b61112dcefd6e6d0e2187a8421d",
"size": "4436",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "third_party/llvm-project/lldb/packages/Python/lldbsuite/test/tools/lldb-server/signal-filtering/TestGdbRemote_QPassSignals.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""
Automated processing of associated exposures
"""
def fetch_from_AWS_bucket(root='j022644-044142', id=1161, product='.beams.fits', bucket_name='aws-grivam', verbose=True, dryrun=False, output_path='./', get_fit_args=False, skip_existing=True):
"""
Fetch products from the Grizli AWS bucket.
Boto3 will require that you have set up your AWS credentials in, e.g.,
~/.aws/credentials
"""
import os
import boto3
s3 = boto3.resource('s3')
s3_client = boto3.client('s3')
bkt = s3.Bucket(bucket_name)
files = [obj.key for obj in bkt.objects.filter(Prefix='Pipeline/{0}/Extractions/{0}_{1:05d}{2}'.format(root, id, product))]
if get_fit_args:
files += ['Pipeline/{0}/Extractions/fit_args.npy'.format(root)]
for file in files:
local = os.path.join(output_path, os.path.basename(file))
if verbose:
print('{0} -> {1}'.format(file, output_path))
if not dryrun:
if os.path.exists(local) & skip_existing:
continue
bkt.download_file(file, local,
ExtraArgs={"RequestPayer": "requester"})
|
{
"content_hash": "97c5dd90bebb8ee910376d2f944c2f08",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 193,
"avg_line_length": 31.86111111111111,
"alnum_prop": 0.6129032258064516,
"repo_name": "gbrammer/grizli",
"id": "efef84a732db75bd973915c8e412547e4b8c269c",
"size": "1147",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grizli/pipeline/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cython",
"bytes": "20306"
},
{
"name": "Python",
"bytes": "2117532"
}
],
"symlink_target": ""
}
|
import json
import math
from dojo.models import Finding
class PhpSecurityAuditV2Parser(object):
def get_scan_types(self):
return ["PHP Security Audit v2"]
def get_label_for_scan_types(self, scan_type):
return scan_type
def get_description_for_scan_types(self, scan_type):
return "Import PHP Security Audit v2 Scan in JSON format."
def get_findings(self, filename, test):
tree = filename.read()
try:
data = json.loads(str(tree, 'utf-8'))
except:
data = json.loads(tree)
dupes = dict()
for filepath, report in list(data["files"].items()):
errors = report.get("errors") or 0
warns = report.get("warnings") or 0
if errors + warns > 0:
for issue in report["messages"]:
title = issue["source"]
findingdetail = "Filename: " + filepath + "\n"
findingdetail += "Line: " + str(issue["line"]) + "\n"
findingdetail += "Column: " + str(issue["column"]) + "\n"
findingdetail += "Rule Source: " + issue["source"] + "\n"
findingdetail += "Details: " + issue["message"] + "\n"
sev = PhpSecurityAuditV2Parser.get_severity_word(issue["severity"])
dupe_key = title + filepath + str(issue["line"]) + str(issue["column"])
if dupe_key in dupes:
find = dupes[dupe_key]
else:
dupes[dupe_key] = True
find = Finding(
title=title,
test=test,
description=findingdetail,
severity=sev.title(),
file_path=filepath,
line=issue["line"],
static_finding=True,
dynamic_finding=False,
)
dupes[dupe_key] = find
findingdetail = ''
return list(dupes.values())
@staticmethod
def get_severity_word(severity):
sev = math.ceil(severity / 2)
if sev == 5:
return 'Critical'
elif sev == 4:
return 'High'
elif sev == 3:
return 'Medium'
else:
return 'Low'
|
{
"content_hash": "6e0df9829f7a4f2f506edaaf0f58a619",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 91,
"avg_line_length": 32.68,
"alnum_prop": 0.46266829865361075,
"repo_name": "rackerlabs/django-DefectDojo",
"id": "4df82d3de560cdb9fce4071e80618f73001ef424",
"size": "2451",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dojo/tools/php_security_audit_v2/parser.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "18132"
},
{
"name": "Groff",
"bytes": "91"
},
{
"name": "HTML",
"bytes": "666571"
},
{
"name": "JavaScript",
"bytes": "6393"
},
{
"name": "Python",
"bytes": "524728"
},
{
"name": "Shell",
"bytes": "20558"
},
{
"name": "XSLT",
"bytes": "6624"
}
],
"symlink_target": ""
}
|
import os
import platform
import sys
import pytest
from _pytest.doctest import DoctestItem
from sklearn.utils import _IS_32BIT
from sklearn.externals import _pilutil
from sklearn._min_dependencies import PYTEST_MIN_VERSION
from sklearn.utils.fixes import np_version, parse_version
if parse_version(pytest.__version__) < parse_version(PYTEST_MIN_VERSION):
raise ImportError('Your version of pytest is too old, you should have '
'at least pytest >= {} installed.'
.format(PYTEST_MIN_VERSION))
def pytest_addoption(parser):
parser.addoption("--skip-network", action="store_true", default=False,
help="skip network tests")
def pytest_collection_modifyitems(config, items):
for item in items:
# FeatureHasher is not compatible with PyPy
if (item.name.endswith(('_hash.FeatureHasher',
'text.HashingVectorizer'))
and platform.python_implementation() == 'PyPy'):
marker = pytest.mark.skip(
reason='FeatureHasher is not compatible with PyPy')
item.add_marker(marker)
# Known failure on with GradientBoostingClassifier on ARM64
elif (item.name.endswith('GradientBoostingClassifier')
and platform.machine() == 'aarch64'):
marker = pytest.mark.xfail(
reason=(
'know failure. See '
'https://github.com/scikit-learn/scikit-learn/issues/17797' # noqa
)
)
item.add_marker(marker)
# Skip tests which require internet if the flag is provided
if (config.getoption("--skip-network")
or int(os.environ.get("SKLEARN_SKIP_NETWORK_TESTS", "0"))):
skip_network = pytest.mark.skip(
reason="test requires internet connectivity")
for item in items:
if "network" in item.keywords:
item.add_marker(skip_network)
# numpy changed the str/repr formatting of numpy arrays in 1.14. We want to
# run doctests only for numpy >= 1.14.
skip_doctests = False
try:
if np_version < parse_version('1.14'):
reason = 'doctests are only run for numpy >= 1.14'
skip_doctests = True
elif _IS_32BIT:
reason = ('doctest are only run when the default numpy int is '
'64 bits.')
skip_doctests = True
elif sys.platform.startswith("win32"):
reason = ("doctests are not run for Windows because numpy arrays "
"repr is inconsistent across platforms.")
skip_doctests = True
except ImportError:
pass
if skip_doctests:
skip_marker = pytest.mark.skip(reason=reason)
for item in items:
if isinstance(item, DoctestItem):
item.add_marker(skip_marker)
elif not _pilutil.pillow_installed:
skip_marker = pytest.mark.skip(reason="pillow (or PIL) not installed!")
for item in items:
if item.name in [
"sklearn.feature_extraction.image.PatchExtractor",
"sklearn.feature_extraction.image.extract_patches_2d"]:
item.add_marker(skip_marker)
def pytest_configure(config):
import sys
sys._is_pytest_session = True
# declare our custom markers to avoid PytestUnknownMarkWarning
config.addinivalue_line(
"markers",
"network: mark a test for execution if network available."
)
def pytest_unconfigure(config):
import sys
del sys._is_pytest_session
|
{
"content_hash": "a55a1785be1f0cb2e9c3bb52b6615177",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 87,
"avg_line_length": 36.33,
"alnum_prop": 0.6077621800165153,
"repo_name": "ndingwall/scikit-learn",
"id": "5c48de4ac36a35d75104f82b7e07a6d4f84dcfa9",
"size": "4011",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "conftest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "416843"
},
{
"name": "C++",
"bytes": "140261"
},
{
"name": "Makefile",
"bytes": "1630"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "6794973"
},
{
"name": "Shell",
"bytes": "13442"
}
],
"symlink_target": ""
}
|
from telegram.ext import CommandHandler, MessageHandler, Filters
from . import (start, auth, debug,
get_top, homeworks, mojno,
style, group_summary, cites)
METHODS = [
CommandHandler('start', start.on_start),
CommandHandler('raise', debug.on_raise),
MessageHandler(Filters.text & Filters.group, mojno.send_msg),
CommandHandler('top', get_top.get_top),
CommandHandler('top-activate', get_top.top_activate),
CommandHandler('top-deactivate', get_top.top_deactivate),
CommandHandler('summary', group_summary.get_summary),
CommandHandler('answers', cites.answers),
auth.conv_handler,
homeworks.conv_handler,
style.conv_handler
]
|
{
"content_hash": "4208449e2949afb45de40ea2c63bdccc",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 65,
"avg_line_length": 36.8421052631579,
"alnum_prop": 0.6914285714285714,
"repo_name": "mesenev/top_bot_lyceum",
"id": "8057d55d4f43db5c32386a447cd1b14d8de1470b",
"size": "700",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "methods/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "71141"
}
],
"symlink_target": ""
}
|
import pylzma
class SerializerLZMA(object):
def dumps(self,obj):
return pylzma.compress(obj)
def loads(self,s):
return pylzma.decompress(s)
|
{
"content_hash": "928f01eddfb63ee80a0570eb5820f67f",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 35,
"avg_line_length": 23.428571428571427,
"alnum_prop": 0.676829268292683,
"repo_name": "Jumpscale/jumpscale6_core",
"id": "a0861aaddcca8feb7ea10989329a5f9c617fb636",
"size": "165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/JumpScale/baselib/serializers/SerializerLZMA.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "3681"
},
{
"name": "HTML",
"bytes": "11738"
},
{
"name": "JavaScript",
"bytes": "70132"
},
{
"name": "Lua",
"bytes": "2162"
},
{
"name": "Python",
"bytes": "5848017"
},
{
"name": "Shell",
"bytes": "7692"
}
],
"symlink_target": ""
}
|
import random
game_num = raw_input('How many game you want play? ')
sign = 1
win = 0
lose = 0
l = ["jiandao", "shitou", "bu"]
while sign:
if (int(game_num) % 2 != 1):
game_num = raw_input('Please input a singluar number : ')
for i in range(int(game_num)):
computer_num = random.randint(0, 2)
player_num = int(raw_input("What you want to give : \n0、jiandao\n1、shitou\n2、bu\n"))
while (player_num not in [0, 1, 2]):
player_num = int(raw_input("please chose a number in here : \n0、jiandao\n1、shitou\n2、bu\n"))
print "The comuputer give {0}".format(l[computer_num])
if (player_num == 0 and computer_num == 1) or (player_num == 1 and computer_num == 2) or (player_num == 2 and computer_num == 0):
lose += 1
print "you lose one game"
elif (player_num == 0 and computer_num == 2) or (player_num == 1 and computer_num == 0) or (player_num == 2 and computer_num == 1):
win += 1
print "you win one game"
else:
print "This a draw"
i -= 1
continue
if win > lose:
print "You win"
sign = 0
elif win < lose:
print "You lose!"
sign = 0
elif win == lose:
print "This a draw game"
else:
continue
|
{
"content_hash": "8595d15b288ad743c0f98de9d205e594",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 133,
"avg_line_length": 30.783783783783782,
"alnum_prop": 0.6224758560140474,
"repo_name": "51reboot/actual_13_homework",
"id": "d4aaf81daf546dc3c40c189c9368aacd7f8b1536",
"size": "1189",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "02/leon/work-3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "280720"
},
{
"name": "HTML",
"bytes": "348571"
},
{
"name": "JavaScript",
"bytes": "131407"
},
{
"name": "Python",
"bytes": "239436"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Tagline'
db.create_table(u'pleiapp_tagline', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('text', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal(u'pleiapp', ['Tagline'])
# Adding M2M table for field related_resources on 'Dictionary'
m2m_table_name = db.shorten_name(u'pleiapp_dictionary_related_resources')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('dictionary', models.ForeignKey(orm[u'pleiapp.dictionary'], null=False)),
('resource', models.ForeignKey(orm[u'pleiapp.resource'], null=False))
))
db.create_unique(m2m_table_name, ['dictionary_id', 'resource_id'])
# Adding M2M table for field related_faqs on 'Dictionary'
m2m_table_name = db.shorten_name(u'pleiapp_dictionary_related_faqs')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('dictionary', models.ForeignKey(orm[u'pleiapp.dictionary'], null=False)),
('faq', models.ForeignKey(orm[u'pleiapp.faq'], null=False))
))
db.create_unique(m2m_table_name, ['dictionary_id', 'faq_id'])
# Adding M2M table for field related_resources on 'Faq'
m2m_table_name = db.shorten_name(u'pleiapp_faq_related_resources')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('faq', models.ForeignKey(orm[u'pleiapp.faq'], null=False)),
('resource', models.ForeignKey(orm[u'pleiapp.resource'], null=False))
))
db.create_unique(m2m_table_name, ['faq_id', 'resource_id'])
def backwards(self, orm):
# Deleting model 'Tagline'
db.delete_table(u'pleiapp_tagline')
# Removing M2M table for field related_resources on 'Dictionary'
db.delete_table(db.shorten_name(u'pleiapp_dictionary_related_resources'))
# Removing M2M table for field related_faqs on 'Dictionary'
db.delete_table(db.shorten_name(u'pleiapp_dictionary_related_faqs'))
# Removing M2M table for field related_resources on 'Faq'
db.delete_table(db.shorten_name(u'pleiapp_faq_related_resources'))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'generic.assignedkeyword': {
'Meta': {'ordering': "('_order',)", 'object_name': 'AssignedKeyword'},
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keyword': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'assignments'", 'to': u"orm['generic.Keyword']"}),
'object_pk': ('django.db.models.fields.IntegerField', [], {})
},
u'generic.keyword': {
'Meta': {'object_name': 'Keyword'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
u'pleiapp.category': {
'Meta': {'ordering': "('title',)", 'object_name': 'Category'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'visible': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'})
},
u'pleiapp.dictionary': {
'Meta': {'ordering': "('title',)", 'object_name': 'Dictionary'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'dicts'", 'blank': 'True', 'to': u"orm['pleiapp.Category']"}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'featured_image': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': u"orm['generic.AssignedKeyword']", 'frozen_by_south': 'True'}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'related_dictionary': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_dictionary_rel_+'", 'blank': 'True', 'to': u"orm['pleiapp.Dictionary']"}),
'related_faqs': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['pleiapp.Faq']", 'symmetrical': 'False', 'blank': 'True'}),
'related_resources': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['pleiapp.Resource']", 'symmetrical': 'False', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'topics': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'dicts'", 'blank': 'True', 'to': u"orm['pleiapp.Topic']"}),
'types': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'dicts'", 'blank': 'True', 'to': u"orm['pleiapp.Type']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'dictionarys'", 'to': u"orm['auth.User']"})
},
u'pleiapp.faq': {
'Meta': {'ordering': "('title',)", 'object_name': 'Faq'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'faqs'", 'blank': 'True', 'to': u"orm['pleiapp.Category']"}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'featured_image': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': u"orm['generic.AssignedKeyword']", 'frozen_by_south': 'True'}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'related_dictionary': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['pleiapp.Dictionary']", 'symmetrical': 'False', 'blank': 'True'}),
'related_faqs': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_faqs_rel_+'", 'blank': 'True', 'to': u"orm['pleiapp.Faq']"}),
'related_resources': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['pleiapp.Resource']", 'symmetrical': 'False', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'topics': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'faqs'", 'blank': 'True', 'to': u"orm['pleiapp.Topic']"}),
'types': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'faqs'", 'blank': 'True', 'to': u"orm['pleiapp.Type']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'faqs'", 'to': u"orm['auth.User']"})
},
u'pleiapp.frontpageitem': {
'Meta': {'object_name': 'FrontPageItem'},
'featured_image': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'pleiapp.resource': {
'Meta': {'ordering': "('title',)", 'object_name': 'Resource'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'resources'", 'blank': 'True', 'to': u"orm['pleiapp.Category']"}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'featured_image': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': u"orm['generic.AssignedKeyword']", 'frozen_by_south': 'True'}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'related_dictionary': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['pleiapp.Dictionary']", 'symmetrical': 'False', 'blank': 'True'}),
'related_faqs': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['pleiapp.Faq']", 'symmetrical': 'False', 'blank': 'True'}),
'related_resources': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_resources_rel_+'", 'blank': 'True', 'to': u"orm['pleiapp.Resource']"}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'topics': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'resources'", 'blank': 'True', 'to': u"orm['pleiapp.Topic']"}),
'types': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'resources'", 'blank': 'True', 'to': u"orm['pleiapp.Type']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'resources'", 'to': u"orm['auth.User']"})
},
u'pleiapp.tagline': {
'Meta': {'object_name': 'Tagline'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.TextField', [], {})
},
u'pleiapp.topic': {
'Meta': {'ordering': "('title',)", 'object_name': 'Topic'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'visible': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'})
},
u'pleiapp.type': {
'Meta': {'ordering': "('title',)", 'object_name': 'Type'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'visible': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['pleiapp']
|
{
"content_hash": "b19e3f22d749c94e4f4cb40d546df85c",
"timestamp": "",
"source": "github",
"line_count": 240,
"max_line_length": 191,
"avg_line_length": 82.16666666666667,
"alnum_prop": 0.5629817444219067,
"repo_name": "orlenko/plei",
"id": "6026287de30ae6a121386a5feda4accd7ea2d382",
"size": "19744",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pleiapp/migrations/0002_auto__add_tagline.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "676231"
},
{
"name": "JavaScript",
"bytes": "764265"
},
{
"name": "PHP",
"bytes": "25856"
},
{
"name": "Python",
"bytes": "1259282"
}
],
"symlink_target": ""
}
|
import os
from urllib.parse import quote_plus
import pytest
from parameterized import parameterized
from airflow import DAG
from airflow.api_connexion.exceptions import EXCEPTIONS_LINK_MAP
from airflow.models.baseoperator import BaseOperatorLink
from airflow.models.dagbag import DagBag
from airflow.models.dagrun import DagRun
from airflow.models.xcom import XCom
from airflow.plugins_manager import AirflowPlugin
from airflow.providers.google.cloud.operators.bigquery import BigQueryExecuteQueryOperator
from airflow.security import permissions
from airflow.utils.dates import days_ago
from airflow.utils.timezone import datetime
from airflow.utils.types import DagRunType
from tests.test_utils.api_connexion_utils import create_user, delete_user
from tests.test_utils.db import clear_db_runs, clear_db_xcom
from tests.test_utils.mock_plugins import mock_plugin_manager
@pytest.fixture(scope="module")
def configured_app(minimal_app_for_api):
app = minimal_app_for_api
create_user(
app, # type: ignore
username="test",
role_name="Test",
permissions=[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
],
)
create_user(app, username="test_no_permissions", role_name="TestNoPermissions") # type: ignore
yield app
delete_user(app, username="test") # type: ignore
delete_user(app, username="test_no_permissions") # type: ignore
class TestGetExtraLinks:
@pytest.fixture(autouse=True)
def setup_attrs(self, configured_app, session) -> None:
self.default_time = datetime(2020, 1, 1)
clear_db_runs()
clear_db_xcom()
self.app = configured_app
self.dag = self._create_dag()
self.app.dag_bag = DagBag(os.devnull, include_examples=False)
self.app.dag_bag.dags = {self.dag.dag_id: self.dag} # type: ignore
self.app.dag_bag.sync_to_db() # type: ignore
dr = DagRun(
dag_id=self.dag.dag_id,
run_id="TEST_DAG_RUN_ID",
execution_date=self.default_time,
run_type=DagRunType.MANUAL,
)
session.add(dr)
session.commit()
self.client = self.app.test_client() # type:ignore
def teardown_method(self) -> None:
clear_db_runs()
clear_db_xcom()
@staticmethod
def _create_dag():
with DAG(
dag_id="TEST_DAG_ID",
default_args=dict(
start_date=days_ago(2),
),
) as dag:
BigQueryExecuteQueryOperator(task_id="TEST_SINGLE_QUERY", sql="SELECT 1")
BigQueryExecuteQueryOperator(task_id="TEST_MULTIPLE_QUERY", sql=["SELECT 1", "SELECT 2"])
return dag
@parameterized.expand(
[
(
"missing_dag",
"/api/v1/dags/INVALID/dagRuns/TEST_DAG_RUN_ID/taskInstances/TEST_SINGLE_QUERY/links",
"DAG not found",
'DAG with ID = "INVALID" not found',
),
(
"missing_dag_run",
"/api/v1/dags/TEST_DAG_ID/dagRuns/INVALID/taskInstances/TEST_SINGLE_QUERY/links",
"DAG Run not found",
'DAG Run with ID = "INVALID" not found',
),
(
"missing_task",
"/api/v1/dags/TEST_DAG_ID/dagRuns/TEST_DAG_RUN_ID/taskInstances/INVALID/links",
"Task not found",
'Task with ID = "INVALID" not found',
),
]
)
def test_should_respond_404(self, name, url, expected_title, expected_detail):
del name
response = self.client.get(url, environ_overrides={'REMOTE_USER': "test"})
assert 404 == response.status_code
assert {
"detail": expected_detail,
"status": 404,
"title": expected_title,
"type": EXCEPTIONS_LINK_MAP[404],
} == response.json
def test_should_raise_403_forbidden(self):
response = self.client.get(
"/api/v1/dags/TEST_DAG_ID/dagRuns/TEST_DAG_RUN_ID/taskInstances/TEST_SINGLE_QUERY/links",
environ_overrides={'REMOTE_USER': "test_no_permissions"},
)
assert response.status_code == 403
@mock_plugin_manager(plugins=[])
def test_should_respond_200(self):
XCom.set(
key="job_id",
value="TEST_JOB_ID",
execution_date=self.default_time,
task_id="TEST_SINGLE_QUERY",
dag_id=self.dag.dag_id,
)
response = self.client.get(
"/api/v1/dags/TEST_DAG_ID/dagRuns/TEST_DAG_RUN_ID/taskInstances/TEST_SINGLE_QUERY/links",
environ_overrides={'REMOTE_USER': "test"},
)
assert 200 == response.status_code, response.data
assert {
"BigQuery Console": "https://console.cloud.google.com/bigquery?j=TEST_JOB_ID"
} == response.json
@mock_plugin_manager(plugins=[])
def test_should_respond_200_missing_xcom(self):
response = self.client.get(
"/api/v1/dags/TEST_DAG_ID/dagRuns/TEST_DAG_RUN_ID/taskInstances/TEST_SINGLE_QUERY/links",
environ_overrides={'REMOTE_USER': "test"},
)
assert 200 == response.status_code, response.data
assert {"BigQuery Console": None} == response.json
@mock_plugin_manager(plugins=[])
def test_should_respond_200_multiple_links(self):
XCom.set(
key="job_id",
value=["TEST_JOB_ID_1", "TEST_JOB_ID_2"],
execution_date=self.default_time,
task_id="TEST_MULTIPLE_QUERY",
dag_id=self.dag.dag_id,
)
response = self.client.get(
"/api/v1/dags/TEST_DAG_ID/dagRuns/TEST_DAG_RUN_ID/taskInstances/TEST_MULTIPLE_QUERY/links",
environ_overrides={'REMOTE_USER': "test"},
)
assert 200 == response.status_code, response.data
assert {
"BigQuery Console #1": "https://console.cloud.google.com/bigquery?j=TEST_JOB_ID_1",
"BigQuery Console #2": "https://console.cloud.google.com/bigquery?j=TEST_JOB_ID_2",
} == response.json
@mock_plugin_manager(plugins=[])
def test_should_respond_200_multiple_links_missing_xcom(self):
response = self.client.get(
"/api/v1/dags/TEST_DAG_ID/dagRuns/TEST_DAG_RUN_ID/taskInstances/TEST_MULTIPLE_QUERY/links",
environ_overrides={'REMOTE_USER': "test"},
)
assert 200 == response.status_code, response.data
assert {"BigQuery Console #1": None, "BigQuery Console #2": None} == response.json
def test_should_respond_200_support_plugins(self):
class GoogleLink(BaseOperatorLink):
name = "Google"
def get_link(self, operator, dttm):
return "https://www.google.com"
class S3LogLink(BaseOperatorLink):
name = "S3"
operators = [BigQueryExecuteQueryOperator]
def get_link(self, operator, dttm):
return "https://s3.amazonaws.com/airflow-logs/{dag_id}/{task_id}/{execution_date}".format(
dag_id=operator.dag_id,
task_id=operator.task_id,
execution_date=quote_plus(dttm.isoformat()),
)
class AirflowTestPlugin(AirflowPlugin):
name = "test_plugin"
global_operator_extra_links = [
GoogleLink(),
]
operator_extra_links = [
S3LogLink(),
]
with mock_plugin_manager(plugins=[AirflowTestPlugin]):
response = self.client.get(
"/api/v1/dags/TEST_DAG_ID/dagRuns/TEST_DAG_RUN_ID/taskInstances/TEST_SINGLE_QUERY/links",
environ_overrides={'REMOTE_USER': "test"},
)
assert 200 == response.status_code, response.data
assert {
"BigQuery Console": None,
"Google": "https://www.google.com",
"S3": (
"https://s3.amazonaws.com/airflow-logs/"
"TEST_DAG_ID/TEST_SINGLE_QUERY/2020-01-01T00%3A00%3A00%2B00%3A00"
),
} == response.json
|
{
"content_hash": "6b5a072c3a1dd851c227957977cd3c5b",
"timestamp": "",
"source": "github",
"line_count": 230,
"max_line_length": 106,
"avg_line_length": 36.45652173913044,
"alnum_prop": 0.5901013714967204,
"repo_name": "dhuang/incubator-airflow",
"id": "bec2ed8ebc409715288af32feae14fb34e3d2865",
"size": "9170",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/api_connexion/endpoints/test_extra_link_endpoint.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "109698"
},
{
"name": "HTML",
"bytes": "264851"
},
{
"name": "JavaScript",
"bytes": "1988427"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "3357958"
},
{
"name": "Shell",
"bytes": "34442"
}
],
"symlink_target": ""
}
|
import sys
from pathlib import Path # if you haven't already done so
root = str(Path(__file__).resolve().parents[1])
sys.path.append(root)
from classificator.predictor import Predictor as ClassificatorPredictor
import numpy as np
classificator = ClassificatorPredictor('second_model')
def getNameByPred(pred):
label = np.argmax(pred)
print(pred)
if label == 0:
return "Беседа"
if label == 1:
return "Контактная информация"
if label == 2:
return "Форма запроса"
if label == 3:
return "Информация по доставке"
def chat_loop():
while True:
user_input = str(input(">>>>>").lower().strip())
output = getNameByPred(classificator.predict(user_input))
print(output)
if __name__ == "__main__":
chat_loop()
|
{
"content_hash": "64b3e177e40f74a1ff296b404dd5fd52",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 71,
"avg_line_length": 26.566666666666666,
"alnum_prop": 0.6411543287327478,
"repo_name": "Goodluckhf/chatbot",
"id": "67d94a9f930bd166eb7e67ddeedcfc8cb98bc077",
"size": "855",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "classificator/console.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "57131"
},
{
"name": "HTML",
"bytes": "2320"
},
{
"name": "JavaScript",
"bytes": "17280"
},
{
"name": "Python",
"bytes": "118209"
},
{
"name": "Shell",
"bytes": "785"
}
],
"symlink_target": ""
}
|
''''
Run the first exploratory data analysis in order to underdand better the data available and plan the next steps.
'''
# Load libraries
import os
import time
import pandas
import numpy
import matplotlib.pyplot as plt
from pandas.tools.plotting import scatter_matrix
from pandas import DataFrame
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn import cross_validation
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.datasets import load_digits
from sklearn.model_selection import GridSearchCV
from sklearn.decomposition import PCA, NMF
from sklearn.feature_selection import SelectKBest, chi2
#constants
N_DIGITS = 3
NUM_FOLDS = 10
RAND_SEED = 7
SCORING = 'accuracy'
VALIDATION_SIZE = 0.20
#global variables
start = time.clock()
imageidx = 1
createImages = True
def pause():
os.system('read -p "Press Enter to continue..."')
def duration():
global start
end = time.clock()
print '\nDuration: %.2f ' % (end - start)
start = time.clock()
#load Dataframe from file/url
def loadDataframe(filename):
print 'loading ' + filename + ' ...'
return pandas.read_csv(filename, header=0, sep=';')
#drop not interesting columns and fill NaN values
def dataCleansing(dataframe, dropColumns):
#axis: 0 for rows and 1 for columns
dataframe.drop('hash_cod_matricula', axis=1, inplace=True)
dataframe.drop('cep', axis=1, inplace=True)
for column in dropColumns:
dataframe.drop(column, axis=1, inplace=True)
#dataframe.drop('cod_instituicao', axis=1, inplace=True)
#dataframe.drop('cod_curso', axis=1, inplace=True)
#replace NaN with 0
dataframe.fillna(value=0, inplace=True)
return dataframe
# Descriptive statistics
def descriptiveStatistics(dataframe, outputPath):
# Summarize Data
print("\n=== Summarize Data ===")
# shape
print(dataframe.shape)
# types
pandas.set_option('display.max_rows', 500)
print(dataframe.dtypes)
# head
pandas.set_option('display.width', 100)
print(dataframe.head(20))
# descriptions, change precision to 3 places
pandas.set_option('precision', 3)
print(dataframe.describe())
# class distribution
print(dataframe.groupby('evadiu').size())
# Data visualizations
def dataVisualizations(dataframe, outputPath):
global imageidx
ncolumns = dataframe.shape[1]
print("\n=== Data visualizations ===")
# histograms
if (createImages):
print("histograms")
dataframe.hist()
plt.savefig(outputPath + str(imageidx).zfill(N_DIGITS) + '-histograms.png')
imageidx += 1
# density
if (createImages):
print("density")
dataframe.plot(kind='density', subplots=True, sharex=False, legend=False)
plt.savefig(outputPath + str(imageidx).zfill(N_DIGITS) + '-density.png')
imageidx += 1
# box and whisker plots
if (createImages):
print("box and whisker plots")
dataframe.plot(kind='box', subplots=True, layout=(5,5), sharex=False, sharey=False)
#dataframe.plot(kind='box', subplots=True, sharex=False, sharey=False)
plt.savefig(outputPath + str(imageidx).zfill(N_DIGITS) + '-box.png')
imageidx += 1
# scatter plot matrix
if (createImages):
print("scatter plot matrix")
scatter_matrix(dataframe)
#plt.show()
plt.savefig(outputPath + str(imageidx).zfill(N_DIGITS) + '-scatter-plot.png')
imageidx += 1
# correlation matrix
if (createImages):
print("correlation matrix")
fig = plt.figure()
ax = fig.add_subplot(111, autoscale_on=True)
cax = ax.matshow(dataframe.corr(), vmin=-1, vmax=1, interpolation='none')
fig.colorbar(cax)
#plt.show()
plt.savefig(outputPath + str(imageidx).zfill(N_DIGITS) + '-correlation-matrix.png')
imageidx += 1
# histograms of standardized data
if (createImages):
print("histograms of standardized data")
array = dataframe.values
Ax = array[:,0:ncolumns-1].astype(float)
Ay = array[:,ncolumns-1]
scaler = StandardScaler().fit(Ax)
rescaledX = scaler.transform(Ax)
stdDataframe = DataFrame(data=rescaledX)
stdDataframe.hist()
#plt.show()
plt.savefig(outputPath + str(imageidx).zfill(N_DIGITS) + '-standardized-histograms.png')
imageidx += 1
# density of standardized data
if (createImages):
print("density of standardized data")
stdDataframe.plot(kind='density', subplots=True, sharex=False, legend=False)
plt.savefig(outputPath + str(imageidx).zfill(N_DIGITS) + '-standardized-density.png')
imageidx += 1
# box and whisker plots of standardized data
if (createImages):
print("box and whisker plots of standardized data")
stdDataframe.plot(kind='box', subplots=True, layout=(5,5), sharex=False, sharey=False)
#stdDataframe.plot(kind='box', subplots=True, sharex=False, sharey=False)
plt.savefig(outputPath + str(imageidx).zfill(N_DIGITS) + '-standardized-box.png')
imageidx += 1
plt.close('all')
# Split-out validation dataset
def splitoutValidationDataset(dataframe):
print '\n=== Split-out train/validation datasets ==='
ncolumns = dataframe.shape[1]
array = dataframe.values
X = array[:,0:ncolumns-1].astype(float)
Y = array[:,ncolumns-1]
X_train, X_validation, Y_train, Y_validation = cross_validation.train_test_split(X, Y, test_size=VALIDATION_SIZE, random_state=RAND_SEED)
return (X_train, X_validation, Y_train, Y_validation)
#Feature Selection
def featureSelection(features, X_train, Y_train):
print("\n=== Feature Selection ===")
printFeaturesByRelevance(features, X_train, Y_train, ExtraTreesClassifier())
printFeaturesByRelevance(features, X_train, Y_train, RandomForestClassifier())
#Print Features by Relevance
def printFeaturesByRelevance(features, X_train, Y_train, model):
print "\nFeatures by Relevance (using '%s'):" % type(model).__name__
model.fit(X_train, Y_train)
idx = 0
features_by_relevance = []
for relevance in model.feature_importances_:
features_by_relevance.append((relevance, features[idx]))
idx += 1
features_by_relevance = sorted(features_by_relevance, key=lambda x: x[0], reverse=True)
for relevance, feature in features_by_relevance:
print "%f : %s" % (relevance, feature)
# Evaluate Algorithms
def evaluteAlgorithms(X_train, Y_train, outputPath):
global imageidx
print '\n=== Evaluate algorithms ==='
# Spot Check Algorithms
models = []
models.append(('LR', LogisticRegression()))
models.append(('LDA', LinearDiscriminantAnalysis()))
models.append(('KNN', KNeighborsClassifier()))
models.append(('CART', DecisionTreeClassifier()))
models.append(('NB', GaussianNB()))
models.append(('SVM', SVC()))
results = []
names = []
for name, model in models:
kfold = cross_validation.KFold(n=len(X_train), n_folds=NUM_FOLDS, random_state=RAND_SEED)
cv_results = cross_validation.cross_val_score(model, X_train, Y_train, cv=kfold, scoring=SCORING)
results.append(cv_results)
names.append(name)
msg = "%s:\tmean=%f (std=%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
# Compare Algorithms
if (createImages):
fig = plt.figure()
fig.suptitle('Algorithm Comparison')
ax = fig.add_subplot(111)
plt.boxplot(results)
ax.set_xticklabels(names)
#plt.show()
plt.savefig(outputPath + str(imageidx).zfill(N_DIGITS) + '-Compare-algorithms.png')
imageidx += 1
plt.close('all')
# Standardize the dataset and reevaluate algorithms
def standardizeDataAndReevaluateAlgorithms(X_train, Y_train, outputPath):
global imageidx
print '\n === Standardize the dataset and reevaluate algorithms ==='
#('MinMaxScaler', MinMaxScaler(feature_range=(0, 1))),
pipelines = []
pipelines.append(('ScaledLR', Pipeline([('MinMaxScaler', MinMaxScaler(feature_range=(0, 1))),('Scaler', StandardScaler()),('LR', LogisticRegression())])))
pipelines.append(('ScaledLDA', Pipeline([('MinMaxScaler', MinMaxScaler(feature_range=(0, 1))),('Scaler', StandardScaler()),('LDA', LinearDiscriminantAnalysis())])))
pipelines.append(('ScaledKNN', Pipeline([('MinMaxScaler', MinMaxScaler(feature_range=(0, 1))),('Scaler', StandardScaler()),('KNN', KNeighborsClassifier())])))
pipelines.append(('ScaledCART', Pipeline([('MinMaxScaler', MinMaxScaler(feature_range=(0, 1))),('Scaler', StandardScaler()),('CART', DecisionTreeClassifier())])))
pipelines.append(('ScaledNB', Pipeline([('MinMaxScaler', MinMaxScaler(feature_range=(0, 1))),('Scaler', StandardScaler()),('NB', GaussianNB())])))
pipelines.append(('ScaledSVM', Pipeline([('MinMaxScaler', MinMaxScaler(feature_range=(0, 1))),('Scaler', StandardScaler()),('SVM', SVC())])))
results = []
names = []
for name, model in pipelines:
kfold = cross_validation.KFold(n=len(X_train), n_folds=NUM_FOLDS, random_state=RAND_SEED)
cv_results = cross_validation.cross_val_score(model, X_train, Y_train, cv=kfold, scoring=SCORING)
results.append(cv_results)
names.append(name)
msg = "%s:\tmean=%f (std=%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
# Compare Algorithms
if (createImages):
fig = plt.figure()
fig.suptitle('Algorithm Comparison - Standardized Dataset')
ax = fig.add_subplot(111)
plt.boxplot(results)
ax.set_xticklabels(names)
#plt.show()
plt.savefig(outputPath + str(imageidx).zfill(N_DIGITS) + '-Compare-algorithms-standardized-dataset.png')
imageidx += 1
plt.close('all')
# Evaluate Ensemble Algorithms
def evaluateEnsembleAlgorith(X_train, Y_train, outputPath):
global imageidx
print '\n === Evaluate Ensemble Algorithms ==='
ensembles = []
ensembles.append(('AB', AdaBoostClassifier()))
ensembles.append(('GBM', GradientBoostingClassifier()))
ensembles.append(('RF', RandomForestClassifier()))
ensembles.append(('ET', ExtraTreesClassifier()))
results = []
names = []
for name, model in ensembles:
kfold = cross_validation.KFold(n=len(X_train), n_folds=NUM_FOLDS, random_state=RAND_SEED)
cv_results = cross_validation.cross_val_score(model, X_train, Y_train, cv=kfold, scoring=SCORING)
results.append(cv_results)
names.append(name)
msg = "%s:\tmean=%f (std=%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
# Compare Algorithms
if (createImages):
fig = plt.figure()
fig.suptitle('Ensemble Algorithm Comparison')
ax = fig.add_subplot(111)
plt.boxplot(results)
ax.set_xticklabels(names)
#plt.show()
plt.savefig(outputPath + str(imageidx).zfill(N_DIGITS) + '-Ensemble-Algorithm-Comparison.png')
imageidx += 1
plt.close('all')
def reset_imageidx(value=1):
global imageidx
imageidx = value
def set_createImages(value):
global createImages
createImages = value
def get_imageidx():
return imageidx
def compareFeatureReductionTechniques(X_train, Y_train, outputPath):
global imageidx
print '\n === Compare Feature Reduction Techniques ==='
pipe = Pipeline([
('reduce_dim', PCA()),
('classify', SVC())
])
N_FEATURES_OPTIONS = [2, 4, 8]
C_OPTIONS = [1, 10, 100, 1000]
param_grid = [
{
'reduce_dim': [PCA(iterated_power=7), NMF()],
'reduce_dim__n_components': N_FEATURES_OPTIONS,
'classify__C': C_OPTIONS
},
{
'reduce_dim': [SelectKBest(chi2)],
'reduce_dim__k': N_FEATURES_OPTIONS,
'classify__C': C_OPTIONS
},
]
reducer_labels = ['PCA', 'NMF', 'KBest(chi2)']
grid = GridSearchCV(pipe, cv=3, n_jobs=2, param_grid=param_grid)
grid.fit(X_train, Y_train)
mean_scores = numpy.array(grid.cv_results_['mean_test_score'])
# scores are in the order of param_grid iteration, which is alphabetical
mean_scores = mean_scores.reshape(len(C_OPTIONS), -1, len(N_FEATURES_OPTIONS))
# select score for best C
mean_scores = mean_scores.max(axis=0)
bar_offsets = (numpy.arange(len(N_FEATURES_OPTIONS)) *
(len(reducer_labels) + 1) + .5)
if (createImages):
plt.figure()
COLORS = 'bgrcmyk'
for i, (label, reducer_scores) in enumerate(zip(reducer_labels, mean_scores)):
plt.bar(bar_offsets + i, reducer_scores, label=label, color=COLORS[i])
plt.title("Comparing feature reduction techniques")
plt.xlabel('Reduced number of features')
plt.xticks(bar_offsets + len(reducer_labels) / 2, N_FEATURES_OPTIONS)
plt.ylabel('Digit classification accuracy')
plt.ylim((0, 1))
plt.legend(loc='upper left')
#plt.show()
plt.savefig(outputPath + str(imageidx).zfill(N_DIGITS) + '-Comparing-feature-reduction-techniques.png')
imageidx += 1
# ===================================================
# ================== main function ==================
# ===================================================
def run(inputFilePath, outputPath, createImagesFlag, dropColumns):
global imageidx
global start
print '####################################################################'
print '############### Running Exploratory Data Analysis #1 ###############'
print '####################################################################'
print ''
imageidx = 1
start = time.clock()
set_createImages(createImagesFlag)
if not os.path.exists(outputPath):
os.makedirs(outputPath)
# Load dataset
dataframe = loadDataframe(inputFilePath)
dataframe = dataCleansing(dataframe, dropColumns)
# Understand the data
descriptiveStatistics(dataframe, outputPath)
dataVisualizations(dataframe, outputPath)
#Split-out train/validation dataset
X_train, X_validation, Y_train, Y_validation = splitoutValidationDataset(dataframe)
if (len(X_train) < 1000):
#Compare different feature reduction techniques
compareFeatureReductionTechniques(X_train, Y_train, outputPath)
# Select the most effective features
featureSelection(dataframe.columns, X_train, Y_train)
# Evaluate Algorithms
evaluteAlgorithms(X_train, Y_train, outputPath)
# Standardize the dataset and reevaluate the same algorithms
standardizeDataAndReevaluateAlgorithms(X_train, Y_train, outputPath)
# Evaluate Ensemble Algorithms
evaluateEnsembleAlgorith(X_train, Y_train, outputPath)
print '\n<<< THEN END - Running Exploratory Data Analysis #1 >>>'
|
{
"content_hash": "d2fdb798269e6d1b709c29abe146139e",
"timestamp": "",
"source": "github",
"line_count": 462,
"max_line_length": 168,
"avg_line_length": 34.34848484848485,
"alnum_prop": 0.6447161131766337,
"repo_name": "FabricioMatos/ifes-dropout-machine-learning",
"id": "21afc8d88e59cc80df885c008e96769c9c127455",
"size": "16040",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/eda1.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "453389"
},
{
"name": "Python",
"bytes": "61963"
}
],
"symlink_target": ""
}
|
import pyramid_handlers
from nflpool.controllers.base_controller import BaseController
from nflpool.services.playerpicks_service import PlayerPicksService
from nflpool.viewmodels.playerpicks_viewmodel import PlayerPicksViewModel
from nflpool.data.dbsession import DbSessionFactory
from nflpool.data.player_picks import PlayerPicks
from nflpool.data.seasoninfo import SeasonInfo
from nflpool.data.account import Account
from nflpool.services.slack_service import SlackService
from nflpool.services.time_service import TimeService
from nflpool.services.gameday_service import GameDayService
from nflpool.services.view_picks_service import ViewPicksService
class PicksController(BaseController):
@pyramid_handlers.action(renderer="templates/picks/index.pt")
def index(self):
if not self.logged_in_user_id:
print("Cannot view account page, you must be logged in")
self.redirect("/account/signin")
return {}
@pyramid_handlers.action(renderer="templates/picks/completed.pt")
def completed(self):
if not self.logged_in_user_id:
print("Cannot view account page, you must be logged in")
self.redirect("/account/signin")
# display_player_picks = DisplayPlayerPicks.display_picks(self.logged_in_user_id)
session = DbSessionFactory.create_session()
season_row = (
session.query(SeasonInfo.current_season)
.filter(SeasonInfo.id == "1")
.first()
)
season = season_row.current_season
get_first_name = (
session.query(Account.first_name)
.filter(Account.id == self.logged_in_user_id)
.first()
)
first_name = get_first_name[0]
return {"season": season, "first_name": first_name}
# Get player picks for the current season
@pyramid_handlers.action(
renderer="templates/picks/submit-picks.pt",
request_method="GET",
name="submit-picks",
)
def submit_player_picks(self):
if not self.logged_in_user_id:
print("Cannot view picks page, you must be logged in")
self.redirect("/account/signin")
now_time = TimeService.get_time()
session = DbSessionFactory.create_session()
season_row = (
session.query(SeasonInfo.current_season)
.filter(SeasonInfo.id == "1")
.first()
)
season = season_row.current_season
season_info = session.query(SeasonInfo).all()
first_game = GameDayService.season_opener_date()
picks_due = GameDayService.picks_due()
time_due = GameDayService.time_due()
# Methods used prior to Pendulum
# string_date = first_game[0] + ' 21:59'
# first_game_time = datetime.datetime.strptime(string_date, "%Y-%m-%d %H:%M")
if now_time > first_game:
print("Season has already started")
self.redirect("/picks/too-late")
else:
# Check if user has already submitted picks
days = GameDayService.delta_days()
hours = GameDayService.delta_hours()
minutes = GameDayService.delta_minutes()
current_datetime = now_time.to_day_datetime_string()
user_query = (
session.query(PlayerPicks.user_id)
.filter(PlayerPicks.user_id == self.logged_in_user_id)
.filter(PlayerPicks.season == season)
.first()
)
if user_query is None:
# Data / Service access
afc_east_list = PlayerPicksService.get_team_list(0, 1)
afc_north_list = PlayerPicksService.get_team_list(0, 2)
afc_south_list = PlayerPicksService.get_team_list(0, 3)
afc_west_list = PlayerPicksService.get_team_list(0, 4)
nfc_east_list = PlayerPicksService.get_team_list(1, 1)
nfc_north_list = PlayerPicksService.get_team_list(1, 2)
nfc_south_list = PlayerPicksService.get_team_list(1, 3)
nfc_west_list = PlayerPicksService.get_team_list(1, 4)
afc_qb_list = PlayerPicksService.get_player_list(0, "QB")
nfc_qb_list = PlayerPicksService.get_player_list(1, "QB")
afc_rb_list = PlayerPicksService.get_player_list(0, "RB")
nfc_rb_list = PlayerPicksService.get_player_list(1, "RB")
afc_rec_list = PlayerPicksService.get_rec_list(0, "WR", "TE")
nfc_rec_list = PlayerPicksService.get_rec_list(1, "WR", "TE")
afc_sacks_list = PlayerPicksService.get_sacks(
0, "DE", "DT", "ILB", "LB", "MLB", "NT", "OLB"
)
nfc_sacks_list = PlayerPicksService.get_sacks(
1, "DE", "DT", "ILB", "LB", "MLB", "NT", "OLB"
)
afc_int_list = PlayerPicksService.get_int(
0, "CB", "DB", "FS", "SS", "MLB", "LB", "OLB", "ILB"
)
nfc_int_list = PlayerPicksService.get_int(
1, "CB", "DB", "FS", "SS", "MLB", "LB", "OLB", "ILB"
)
afc_wildcard_list = PlayerPicksService.get_afc_wildcard()
nfc_wildcard_list = PlayerPicksService.get_nfc_wildcard()
all_team_list = PlayerPicksService.get_all_teams()
# Get the user ID
user_id = self.logged_in_user_id
get_first_name = (
session.query(Account.first_name)
.filter(Account.id == self.logged_in_user_id)
.first()
)
first_name = get_first_name[0]
# Return the models
return {
"season": season,
"user_id": user_id,
"first_name": first_name,
"afc_east": afc_east_list,
"afc_north": afc_north_list,
"afc_south": afc_south_list,
"afc_west": afc_west_list,
"nfc_east": nfc_east_list,
"nfc_north": nfc_north_list,
"nfc_south": nfc_south_list,
"nfc_west": nfc_west_list,
"afc_qb_list": afc_qb_list,
"nfc_qb_list": nfc_qb_list,
"afc_rb_list": afc_rb_list,
"nfc_rb_list": nfc_rb_list,
"afc_rec_list": afc_rec_list,
"nfc_rec_list": nfc_rec_list,
"afc_sacks_list": afc_sacks_list,
"nfc_sacks_list": nfc_sacks_list,
"afc_int_list": afc_int_list,
"nfc_int_list": nfc_int_list,
"afc_wildcard_list": afc_wildcard_list,
"nfc_wildcard_list": nfc_wildcard_list,
"all_team_list": all_team_list,
"picks_due": picks_due,
"time_due": time_due,
"days": days,
"hours": hours,
"minutes": minutes,
"current_datetime": current_datetime,
"season_info": season_info,
}
else:
print("You have already submitted picks for this season")
self.redirect("/picks/change-picks")
# POST /picks/submit_picks
@pyramid_handlers.action(
renderer="templates/picks/submit-picks.pt",
request_method="POST",
name="submit-picks",
)
def submit_player_picks_post(self):
vm = PlayerPicksViewModel()
vm.from_dict(self.request.POST)
# Pass a player's picks to the service to be inserted in the db
vm.user_id = self.logged_in_user_id
session = DbSessionFactory.create_session()
get_first_name = (
session.query(Account.first_name)
.filter(Account.id == self.logged_in_user_id)
.first()
)
first_name = get_first_name[0]
get_last_name = (
session.query(Account.last_name)
.filter(Account.id == self.logged_in_user_id)
.first()
)
last_name = get_last_name[0]
player_picks = PlayerPicksService.get_player_picks(
vm.afc_east_winner_pick,
vm.afc_east_second,
vm.afc_east_last,
vm.afc_north_winner_pick,
vm.afc_north_second,
vm.afc_north_last,
vm.afc_south_winner_pick,
vm.afc_south_second,
vm.afc_south_last,
vm.afc_west_winner_pick,
vm.afc_west_second,
vm.afc_west_last,
vm.nfc_east_winner_pick,
vm.nfc_east_second,
vm.nfc_east_last,
vm.nfc_north_winner_pick,
vm.nfc_north_second,
vm.nfc_north_last,
vm.nfc_south_winner_pick,
vm.nfc_south_second,
vm.nfc_south_last,
vm.nfc_west_winner_pick,
vm.nfc_west_second,
vm.nfc_west_last,
vm.afc_qb_pick,
vm.nfc_qb_pick,
vm.afc_rb_pick,
vm.nfc_rb_pick,
vm.afc_rec_pick,
vm.nfc_rec_pick,
vm.afc_sacks_pick,
vm.nfc_sacks_pick,
vm.afc_int_pick,
vm.nfc_int_pick,
vm.afc_wildcard1_pick,
vm.afc_wildcard2_pick,
vm.nfc_wildcard1_pick,
vm.nfc_wildcard2_pick,
vm.afc_pf_pick,
vm.nfc_pf_pick,
vm.specialteams_td_pick,
vm.user_id,
)
# Log that a user submitted picks
self.log.notice("Picks submitted by {}.".format(self.logged_in_user.email))
message = f"Picks submitted by NFLPool user: {first_name} {last_name}"
print(message)
SlackService.send_message(message)
# redirect
self.redirect("/picks/completed")
@pyramid_handlers.action(
renderer="templates/picks/too-late.pt", request_method="GET", name="too-late"
)
def too_late(self):
if not self.logged_in_user_id:
print("Cannot view account page, you must be logged in")
self.redirect("/account/signin")
session = DbSessionFactory.create_session()
season_row = (
session.query(SeasonInfo.current_season)
.filter(SeasonInfo.id == "1")
.first()
)
season = season_row.current_season
return {"season": season}
# Change player picks for the current season
@pyramid_handlers.action(
renderer="templates/picks/change-picks.pt",
request_method="GET",
name="change-picks",
)
def change_player_picks(self):
if not self.logged_in_user_id:
print("Cannot view picks page, you must be logged in")
self.redirect("/account/signin")
# Check if user has already submitted picks
session = DbSessionFactory.create_session()
season_row = (
session.query(SeasonInfo.current_season)
.filter(SeasonInfo.id == "1")
.first()
)
season = season_row.current_season
user_query = (
session.query(PlayerPicks.user_id)
.filter(PlayerPicks.user_id == self.logged_in_user_id)
.filter(PlayerPicks.season == season)
.first()
)
if user_query is None:
print("You have not submitted picks for this season")
self.redirect("/picks/submit-picks")
else:
now_time = TimeService.get_time()
if now_time > GameDayService.season_opener_date():
self.redirect("/picks/too-late")
else:
picks_due = GameDayService.picks_due()
time_due = GameDayService.time_due()
days = GameDayService.delta_days()
hours = GameDayService.delta_hours()
minutes = GameDayService.delta_minutes()
current_datetime = now_time.to_day_datetime_string()
season_info = session.query(SeasonInfo).all()
# Data / Service access
afc_east_list = PlayerPicksService.get_team_list(0, 1)
afc_north_list = PlayerPicksService.get_team_list(0, 2)
afc_south_list = PlayerPicksService.get_team_list(0, 3)
afc_west_list = PlayerPicksService.get_team_list(0, 4)
nfc_east_list = PlayerPicksService.get_team_list(1, 1)
nfc_north_list = PlayerPicksService.get_team_list(1, 2)
nfc_south_list = PlayerPicksService.get_team_list(1, 3)
nfc_west_list = PlayerPicksService.get_team_list(1, 4)
afc_qb_list = PlayerPicksService.get_player_list(0, "QB")
nfc_qb_list = PlayerPicksService.get_player_list(1, "QB")
afc_rb_list = PlayerPicksService.get_player_list(0, "RB")
nfc_rb_list = PlayerPicksService.get_player_list(1, "RB")
afc_rec_list = PlayerPicksService.get_rec_list(0, "WR", "TE")
nfc_rec_list = PlayerPicksService.get_rec_list(1, "WR", "TE")
afc_sacks_list = PlayerPicksService.get_sacks(
0, "DE", "DT", "ILB", "LB", "MLB", "NT", "OLB"
)
nfc_sacks_list = PlayerPicksService.get_sacks(
1, "DE", "DT", "ILB", "LB", "MLB", "NT", "OLB"
)
afc_int_list = PlayerPicksService.get_int(
0, "CB", "DB", "FS", "SS", "MLB", "LB", "OLB", "ILB"
)
nfc_int_list = PlayerPicksService.get_int(
1, "CB", "DB", "FS", "SS", "MLB", "LB", "OLB", "ILB"
)
afc_wildcard_list = PlayerPicksService.get_afc_wildcard()
nfc_wildcard_list = PlayerPicksService.get_nfc_wildcard()
all_team_list = PlayerPicksService.get_all_teams()
# Get the user ID
user_id = self.logged_in_user_id
get_first_name = (
session.query(Account.first_name)
.filter(Account.id == self.logged_in_user_id)
.first()
)
first_name = get_first_name[0]
# Get the user's original picks
all_picks = ViewPicksService.change_picks(
self.logged_in_user_id, season
)
# Return the models
return {
"season": season,
"user_id": user_id,
"first_name": first_name,
"afc_east": afc_east_list,
"afc_north": afc_north_list,
"afc_south": afc_south_list,
"afc_west": afc_west_list,
"nfc_east": nfc_east_list,
"nfc_north": nfc_north_list,
"nfc_south": nfc_south_list,
"nfc_west": nfc_west_list,
"afc_qb_list": afc_qb_list,
"nfc_qb_list": nfc_qb_list,
"afc_rb_list": afc_rb_list,
"nfc_rb_list": nfc_rb_list,
"afc_rec_list": afc_rec_list,
"nfc_rec_list": nfc_rec_list,
"afc_sacks_list": afc_sacks_list,
"nfc_sacks_list": nfc_sacks_list,
"afc_int_list": afc_int_list,
"nfc_int_list": nfc_int_list,
"afc_wildcard_list": afc_wildcard_list,
"nfc_wildcard_list": nfc_wildcard_list,
"all_team_list": all_team_list,
"all_picks": all_picks,
"picks_due": picks_due,
"time_due": time_due,
"days": days,
"hours": hours,
"minutes": minutes,
"current_datetime": current_datetime,
"season_info": season_info,
}
# POST /picks/submit_picks
@pyramid_handlers.action(
renderer="templates/picks/change-picks.pt",
request_method="POST",
name="change-picks",
)
def change_player_picks_post(self):
# Pass a player's picks to the service to be inserted in the db
vm = PlayerPicksViewModel()
vm.from_dict(self.request.POST)
session = DbSessionFactory.create_session()
season_row = (
session.query(SeasonInfo.current_season)
.filter(SeasonInfo.id == "1")
.first()
)
season = season_row.current_season
vm.user_id = self.logged_in_user_id
vm.season = season
now_time = TimeService.get_time()
player_picks = PlayerPicksService.change_player_picks(
vm.afc_east_winner_pick,
vm.afc_east_second,
vm.afc_east_last,
vm.afc_north_winner_pick,
vm.afc_north_second,
vm.afc_north_last,
vm.afc_south_winner_pick,
vm.afc_south_second,
vm.afc_south_last,
vm.afc_west_winner_pick,
vm.afc_west_second,
vm.afc_west_last,
vm.nfc_east_winner_pick,
vm.nfc_east_second,
vm.nfc_east_last,
vm.nfc_north_winner_pick,
vm.nfc_north_second,
vm.nfc_north_last,
vm.nfc_south_winner_pick,
vm.nfc_south_second,
vm.nfc_south_last,
vm.nfc_west_winner_pick,
vm.nfc_west_second,
vm.nfc_west_last,
vm.afc_qb_pick,
vm.nfc_qb_pick,
vm.afc_rb_pick,
vm.nfc_rb_pick,
vm.afc_rec_pick,
vm.nfc_rec_pick,
vm.afc_sacks_pick,
vm.nfc_sacks_pick,
vm.afc_int_pick,
vm.nfc_int_pick,
vm.afc_wildcard1_pick,
vm.afc_wildcard2_pick,
vm.nfc_wildcard1_pick,
vm.nfc_wildcard2_pick,
vm.afc_pf_pick,
vm.nfc_pf_pick,
vm.specialteams_td_pick,
vm.user_id,
)
# Log that a user changed picks
self.log.notice("Picks changed by {}.".format(self.logged_in_user.email))
get_first_name = (
session.query(Account.first_name)
.filter(Account.id == self.logged_in_user_id)
.first()
)
first_name = get_first_name[0]
get_last_name = (
session.query(Account.last_name)
.filter(Account.id == self.logged_in_user_id)
.first()
)
last_name = get_last_name[0]
message = f"Picks updated by NFLPool user: {first_name} {last_name}"
print(message)
SlackService.send_message(message)
# redirect
self.redirect("/account")
|
{
"content_hash": "12228790061f5fd684c399719aa3c12f",
"timestamp": "",
"source": "github",
"line_count": 512,
"max_line_length": 96,
"avg_line_length": 37.509765625,
"alnum_prop": 0.5200208279093986,
"repo_name": "prcutler/nflpool",
"id": "15965f4e06b1e1e57a6ca0d41f0acf634adf73aa",
"size": "19205",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nflpool/controllers/picks_controller.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9316"
},
{
"name": "HTML",
"bytes": "3704"
},
{
"name": "Python",
"bytes": "223267"
},
{
"name": "Rich Text Format",
"bytes": "910"
}
],
"symlink_target": ""
}
|
"""Data and Channel Location Equivalence Tests"""
from __future__ import print_function
# Author: Teon Brooks <teon.brooks@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
import inspect
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_raises, assert_true
import scipy.io
from mne import pick_types, Epochs, find_events, read_events
from mne.tests.common import assert_dig_allclose
from mne.utils import run_tests_if_main
from mne.io import Raw, read_raw_kit, read_epochs_kit
from mne.io.kit.coreg import read_sns
from mne.io.tests.test_raw import _test_raw_reader
FILE = inspect.getfile(inspect.currentframe())
parent_dir = op.dirname(op.abspath(FILE))
data_dir = op.join(parent_dir, 'data')
sqd_path = op.join(data_dir, 'test.sqd')
epochs_path = op.join(data_dir, 'test-epoch.raw')
events_path = op.join(data_dir, 'test-eve.txt')
mrk_path = op.join(data_dir, 'test_mrk.sqd')
mrk2_path = op.join(data_dir, 'test_mrk_pre.sqd')
mrk3_path = op.join(data_dir, 'test_mrk_post.sqd')
elp_path = op.join(data_dir, 'test_elp.txt')
hsp_path = op.join(data_dir, 'test_hsp.txt')
def test_data():
"""Test reading raw kit files
"""
assert_raises(TypeError, read_raw_kit, epochs_path)
assert_raises(TypeError, read_epochs_kit, sqd_path)
assert_raises(ValueError, read_raw_kit, sqd_path, mrk_path, elp_path)
assert_raises(ValueError, read_raw_kit, sqd_path, None, None, None,
list(range(200, 190, -1)))
assert_raises(ValueError, read_raw_kit, sqd_path, None, None, None,
list(range(167, 159, -1)), '*', 1, True)
# check functionality
raw_mrk = read_raw_kit(sqd_path, [mrk2_path, mrk3_path], elp_path,
hsp_path)
raw_py = _test_raw_reader(read_raw_kit,
input_fname=sqd_path, mrk=mrk_path, elp=elp_path,
hsp=hsp_path, stim=list(range(167, 159, -1)),
slope='+', stimthresh=1)
assert_true('RawKIT' in repr(raw_py))
# Test stim channel
raw_stim = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path, stim='<',
preload=False)
for raw in [raw_py, raw_stim, raw_mrk]:
stim_pick = pick_types(raw.info, meg=False, ref_meg=False,
stim=True, exclude='bads')
stim1, _ = raw[stim_pick]
stim2 = np.array(raw.read_stim_ch(), ndmin=2)
assert_array_equal(stim1, stim2)
# Binary file only stores the sensor channels
py_picks = pick_types(raw_py.info, exclude='bads')
raw_bin = op.join(data_dir, 'test_bin_raw.fif')
raw_bin = Raw(raw_bin, preload=True)
bin_picks = pick_types(raw_bin.info, stim=True, exclude='bads')
data_bin, _ = raw_bin[bin_picks]
data_py, _ = raw_py[py_picks]
# this .mat was generated using the Yokogawa MEG Reader
data_Ykgw = op.join(data_dir, 'test_Ykgw.mat')
data_Ykgw = scipy.io.loadmat(data_Ykgw)['data']
data_Ykgw = data_Ykgw[py_picks]
assert_array_almost_equal(data_py, data_Ykgw)
py_picks = pick_types(raw_py.info, stim=True, ref_meg=False,
exclude='bads')
data_py, _ = raw_py[py_picks]
assert_array_almost_equal(data_py, data_bin)
def test_epochs():
raw = read_raw_kit(sqd_path, stim=None)
events = read_events(events_path)
raw_epochs = Epochs(raw, events, None, tmin=0, tmax=.099, baseline=None)
data1 = raw_epochs.get_data()
epochs = read_epochs_kit(epochs_path, events_path)
data11 = epochs.get_data()
assert_array_equal(data1, data11)
def test_raw_events():
def evts(a, b, c, d, e, f=None):
out = [[269, a, b], [281, b, c], [1552, c, d], [1564, d, e]]
if f is not None:
out.append([2000, e, f])
return out
raw = read_raw_kit(sqd_path)
assert_array_equal(find_events(raw, output='step', consecutive=True),
evts(255, 254, 255, 254, 255, 0))
raw = read_raw_kit(sqd_path, slope='+')
assert_array_equal(find_events(raw, output='step', consecutive=True),
evts(0, 1, 0, 1, 0))
raw = read_raw_kit(sqd_path, stim='<', slope='+')
assert_array_equal(find_events(raw, output='step', consecutive=True),
evts(0, 128, 0, 128, 0))
raw = read_raw_kit(sqd_path, stim='<', slope='+', stim_code='channel')
assert_array_equal(find_events(raw, output='step', consecutive=True),
evts(0, 160, 0, 160, 0))
def test_ch_loc():
"""Test raw kit loc
"""
raw_py = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path, stim='<')
raw_bin = Raw(op.join(data_dir, 'test_bin_raw.fif'))
ch_py = raw_py._raw_extras[0]['sensor_locs'][:, :5]
# ch locs stored as m, not mm
ch_py[:, :3] *= 1e3
ch_sns = read_sns(op.join(data_dir, 'sns.txt'))
assert_array_almost_equal(ch_py, ch_sns, 2)
assert_array_almost_equal(raw_py.info['dev_head_t']['trans'],
raw_bin.info['dev_head_t']['trans'], 4)
for py_ch, bin_ch in zip(raw_py.info['chs'], raw_bin.info['chs']):
if bin_ch['ch_name'].startswith('MEG'):
# the stored ch locs have more precision than the sns.txt
assert_array_almost_equal(py_ch['loc'], bin_ch['loc'], decimal=2)
# test when more than one marker file provided
mrks = [mrk_path, mrk2_path, mrk3_path]
read_raw_kit(sqd_path, mrks, elp_path, hsp_path, preload=False)
# this dataset does not have the equivalent set of points :(
raw_bin.info['dig'] = raw_bin.info['dig'][:8]
raw_py.info['dig'] = raw_py.info['dig'][:8]
assert_dig_allclose(raw_py.info, raw_bin.info)
run_tests_if_main()
|
{
"content_hash": "52362d2cc9c6b7e28a9f1a3a2ded8d2d",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 79,
"avg_line_length": 39.47260273972603,
"alnum_prop": 0.6113135519694604,
"repo_name": "ARudiuk/mne-python",
"id": "2e171a59e8254dde2ace5311311e7503aee80e26",
"size": "5763",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "mne/io/kit/tests/test_kit.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "3769"
},
{
"name": "PowerShell",
"bytes": "2988"
},
{
"name": "Python",
"bytes": "5086775"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
}
|
"""Base shape-related objects such as BaseShape."""
from __future__ import absolute_import, division, print_function, unicode_literals
from pptx.action import ActionSetting
from pptx.dml.effect import ShadowFormat
from pptx.shared import ElementProxy
from pptx.util import lazyproperty
class BaseShape(object):
"""Base class for shape objects.
Subclasses include |Shape|, |Picture|, and |GraphicFrame|.
"""
def __init__(self, shape_elm, parent):
super(BaseShape, self).__init__()
self._element = shape_elm
self._parent = parent
def __eq__(self, other):
"""|True| if this shape object proxies the same element as *other*.
Equality for proxy objects is defined as referring to the same XML
element, whether or not they are the same proxy object instance.
"""
if not isinstance(other, BaseShape):
return False
return self._element is other._element
def __ne__(self, other):
if not isinstance(other, BaseShape):
return True
return self._element is not other._element
@lazyproperty
def click_action(self):
"""|ActionSetting| instance providing access to click behaviors.
Click behaviors are hyperlink-like behaviors including jumping to
a hyperlink (web page) or to another slide in the presentation. The
click action is that defined on the overall shape, not a run of text
within the shape. An |ActionSetting| object is always returned, even
when no click behavior is defined on the shape.
"""
cNvPr = self._element._nvXxPr.cNvPr
return ActionSetting(cNvPr, self)
@property
def element(self):
"""`lxml` element for this shape, e.g. a CT_Shape instance.
Note that manipulating this element improperly can produce an invalid
presentation file. Make sure you know what you're doing if you use
this to change the underlying XML.
"""
return self._element
@property
def has_chart(self):
"""
|True| if this shape is a graphic frame containing a chart object.
|False| otherwise. When |True|, the chart object can be accessed
using the ``.chart`` property.
"""
# This implementation is unconditionally False, the True version is
# on GraphicFrame subclass.
return False
@property
def has_table(self):
"""
|True| if this shape is a graphic frame containing a table object.
|False| otherwise. When |True|, the table object can be accessed
using the ``.table`` property.
"""
# This implementation is unconditionally False, the True version is
# on GraphicFrame subclass.
return False
@property
def has_text_frame(self):
"""
|True| if this shape can contain text.
"""
# overridden on Shape to return True. Only <p:sp> has text frame
return False
@property
def height(self):
"""
Read/write. Integer distance between top and bottom extents of shape
in EMUs
"""
return self._element.cy
@height.setter
def height(self, value):
self._element.cy = value
@property
def is_placeholder(self):
"""
True if this shape is a placeholder. A shape is a placeholder if it
has a <p:ph> element.
"""
return self._element.has_ph_elm
@property
def left(self):
"""
Read/write. Integer distance of the left edge of this shape from the
left edge of the slide, in English Metric Units (EMU)
"""
return self._element.x
@left.setter
def left(self, value):
self._element.x = value
@property
def name(self):
"""
Name of this shape, e.g. 'Picture 7'
"""
return self._element.shape_name
@name.setter
def name(self, value):
self._element._nvXxPr.cNvPr.name = value
@property
def part(self):
"""The package part containing this shape.
A |BaseSlidePart| subclass in this case. Access to a slide part
should only be required if you are extending the behavior of |pp| API
objects.
"""
return self._parent.part
@property
def placeholder_format(self):
"""
A |_PlaceholderFormat| object providing access to
placeholder-specific properties such as placeholder type. Raises
|ValueError| on access if the shape is not a placeholder.
"""
if not self.is_placeholder:
raise ValueError("shape is not a placeholder")
return _PlaceholderFormat(self._element.ph)
@property
def rotation(self):
"""
Read/write float. Degrees of clockwise rotation. Negative values can
be assigned to indicate counter-clockwise rotation, e.g. assigning
-45.0 will change setting to 315.0.
"""
return self._element.rot
@rotation.setter
def rotation(self, value):
self._element.rot = value
@lazyproperty
def shadow(self):
"""|ShadowFormat| object providing access to shadow for this shape.
A |ShadowFormat| object is always returned, even when no shadow is
explicitly defined on this shape (i.e. it inherits its shadow
behavior).
"""
return ShadowFormat(self._element.spPr)
@property
def shape_id(self):
"""Read-only positive integer identifying this shape.
The id of a shape is unique among all shapes on a slide.
"""
return self._element.shape_id
@property
def shape_type(self):
"""
Unique integer identifying the type of this shape, like
``MSO_SHAPE_TYPE.CHART``. Must be implemented by subclasses.
"""
# # This one returns |None| unconditionally to account for shapes
# # that haven't been implemented yet, like group shape and chart.
# # Once those are done this should raise |NotImplementedError|.
# msg = 'shape_type property must be implemented by subclasses'
# raise NotImplementedError(msg)
return None
@property
def top(self):
"""
Read/write. Integer distance of the top edge of this shape from the
top edge of the slide, in English Metric Units (EMU)
"""
return self._element.y
@top.setter
def top(self, value):
self._element.y = value
@property
def width(self):
"""
Read/write. Integer distance between left and right extents of shape
in EMUs
"""
return self._element.cx
@width.setter
def width(self, value):
self._element.cx = value
class _PlaceholderFormat(ElementProxy):
"""
Accessed via the :attr:`~.BaseShape.placeholder_format` property of
a placeholder shape, provides properties specific to placeholders, such
as the placeholder type.
"""
@property
def element(self):
"""
The `p:ph` element proxied by this object.
"""
return super(_PlaceholderFormat, self).element
@property
def idx(self):
"""
Integer placeholder 'idx' attribute.
"""
return self._element.idx
@property
def type(self):
"""
Placeholder type, a member of the :ref:`PpPlaceholderType`
enumeration, e.g. PP_PLACEHOLDER.CHART
"""
return self._element.type
|
{
"content_hash": "f875d28d0dafba1f699d11e10808faba",
"timestamp": "",
"source": "github",
"line_count": 250,
"max_line_length": 82,
"avg_line_length": 30.22,
"alnum_prop": 0.6173395102581072,
"repo_name": "scanny/python-pptx",
"id": "c9472434dba57b2917a76e855baef48fd9e7155a",
"size": "7574",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pptx/shapes/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "124592"
},
{
"name": "Makefile",
"bytes": "2055"
},
{
"name": "PLpgSQL",
"bytes": "48599"
},
{
"name": "Python",
"bytes": "2152173"
}
],
"symlink_target": ""
}
|
from django.db import models
from activatable_model.models import BaseActivatableModel
class ActivatableModel(BaseActivatableModel):
is_active = models.BooleanField(default=False)
char_field = models.CharField(max_length=64)
class Rel(models.Model):
is_active = models.BooleanField(default=False)
char_field = models.CharField(max_length=64)
class ActivatableModelWRel(BaseActivatableModel):
is_active = models.BooleanField(default=False)
rel_field = models.ForeignKey(Rel, on_delete=models.PROTECT)
class ActivatableModelWRelAndCascade(BaseActivatableModel):
ALLOW_CASCADE_DELETE = True
is_active = models.BooleanField(default=False)
rel_field = models.ForeignKey(Rel, on_delete=models.CASCADE)
class ActivatableModelWNonDefaultField(BaseActivatableModel):
ACTIVATABLE_FIELD_NAME = 'active'
active = models.BooleanField(default=False)
char_field = models.CharField(max_length=64)
|
{
"content_hash": "a6220da70ed5c1ff12d40a3ce2d25ed4",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 64,
"avg_line_length": 31.333333333333332,
"alnum_prop": 0.776595744680851,
"repo_name": "ambitioninc/django-activatable-model",
"id": "2918dc53abb9b6a82e28909b92f70444d2a01348",
"size": "940",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "activatable_model/tests/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "39876"
}
],
"symlink_target": ""
}
|
import glob
import dicom
from django.db import models
from django.utils._os import safe_join
from rest_framework import serializers
class ImageSeries(models.Model):
"""
Model representing a certain image series
"""
patient_id = models.CharField(max_length=64)
series_instance_uid = models.CharField(max_length=256)
uri = models.CharField(max_length=512)
def get_or_create(uri):
"""
Return the ImageSeries instance with the same PatientID and SeriesInstanceUID as the DICOM images in the
given directory. If none exists so far, create one.
Return a tuple of (ImageSeries, created), where created is a boolean specifying whether the object was created.
Args:
uri (str): absolute URI to a directory with DICOM images of a patient
Returns:
(ImageSeries, bool): the looked up ImageSeries instance and whether it had to be created
"""
file_ = glob.glob1(uri, '*.dcm')[0]
plan = dicom.read_file(safe_join(uri, file_))
patient_id = plan.PatientID
series_instance_uid = plan.SeriesInstanceUID
return ImageSeries.objects.get_or_create(
patient_id=patient_id,
series_instance_uid=series_instance_uid,
uri=uri)
class ImageSeriesSerializer(serializers.ModelSerializer):
class Meta:
model = ImageSeries
fields = ('id', 'patient_id', 'series_instance_uid', 'uri')
class ImageLocation(models.Model):
"""
Model representing a certain voxel location on certain image
"""
series = models.ForeignKey(ImageSeries, on_delete=models.CASCADE)
x = models.PositiveSmallIntegerField(help_text='Voxel index for X axis, zero-index, from top left')
y = models.PositiveSmallIntegerField(help_text='Voxel index for Y axis, zero-index, from top left')
z = models.PositiveSmallIntegerField(help_text='Slice index for Z axis, zero-index')
class ImageLocationSerializer(serializers.ModelSerializer):
series = ImageSeriesSerializer()
class Meta:
model = ImageLocation
fields = ('id', 'series', 'x', 'y', 'z')
|
{
"content_hash": "4445d214cea1137706852b01957c8b52",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 119,
"avg_line_length": 33.01538461538462,
"alnum_prop": 0.6784715750232991,
"repo_name": "antonow/concept-to-clinic",
"id": "395cbe2f0849f5763491c418860ef1f46b8dd54c",
"size": "2146",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "interface/backend/images/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "135"
},
{
"name": "HTML",
"bytes": "446"
},
{
"name": "JavaScript",
"bytes": "25989"
},
{
"name": "Python",
"bytes": "227606"
},
{
"name": "Shell",
"bytes": "2580"
},
{
"name": "Vue",
"bytes": "19612"
}
],
"symlink_target": ""
}
|
#!/usr/bin/env python
"""
"""
from cortical.models import retina
class RetinasApi(object):
def __init__(self, apiClient):
self.apiClient = apiClient
def getRetinas(self, retina_name=None):
"""Information about retinas
Args:
retina_name, str: The retina name (optional) (optional)
Returns: Array[Retina]
"""
resourcePath = '/retinas'
method = 'GET'
queryParams = {}
headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'}
postData = None
queryParams['retina_name'] = retina_name
response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams)
return [retina.Retina(**r) for r in response.json()]
|
{
"content_hash": "989d4f09dd615212ed1aa449934ce5df",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 101,
"avg_line_length": 23.41176470588235,
"alnum_prop": 0.6042713567839196,
"repo_name": "cortical-io/python-client-sdk",
"id": "d60b132b68801fc118527ff048f69637d415ff70",
"size": "1202",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cortical/retinasApi.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "58412"
}
],
"symlink_target": ""
}
|
"""Implements experimental logic."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from dl_bounds.src.data import LocalDatasetProvider
from dl_bounds.src.experiments.exp_base import Experiment
from dl_bounds.src.hessian_top_sv import HessianTopSV
import numpy as np
import tensorflow as tf
class HessianExperiment(Experiment):
"""Hessian experiment.
Trains the model, evaluates, and stores top singular
value of the Hessian matrix.
"""
def create_hessian_estimator(self, model):
"""Creates estimator for top singular value of the Hessian."""
self.hessian_top_sv = HessianTopSV(model.mean_loss, model.weights,
tf.logging.info)
def run(self):
"""Runs experiment."""
if self.exists():
tf.logging.info("Result file already exists.")
return
(x_train, y_train, x_val, y_val, _) = self.get_data()
m = x_train.shape[0]
if self.conf.stat_sample_n == -1:
x_hessian = x_train
y_hessian = y_train
else:
tf.logging.info(
"Subsampling training set for ||Hessian||_2 evaluation till %d.",
self.conf.stat_sample_n)
subset_ix = np.random.choice(
np.arange(m), self.conf.stat_sample_n, replace=False)
x_hessian = x_train[subset_ix, :]
y_hessian = y_train[subset_ix]
tf.logging.info("Training set size = %d", m)
tf.logging.info("Val set size = %d", x_val.shape[0])
tf.logging.info("Hessian set size = %d", x_hessian.shape[0])
train_dataset = LocalDatasetProvider(
x_train, y_train, shuffle_seed=self.conf.data_shuffle_seed)
val_dataset = LocalDatasetProvider(
x_val, y_val, shuffle_seed=self.conf.data_shuffle_seed)
n_records = self.get_n_records()
hessian_top_sv_means = np.zeros((n_records,))
hessian_top_sv_stds = np.zeros((n_records,))
poweriter_means = np.zeros((n_records,))
poweriter_stds = np.zeros((n_records,))
tf.logging.info("""Computing Rval(A_S), Remp(A_S), and estimating
top Hessian singular values on the subsampled training sample...""")
for (pass_index, (p, model)) in enumerate(
self.train(train_dataset, self.create_hessian_estimator)):
self.measure_on_train_val(train_dataset, val_dataset, pass_index, p,
model)
self.report_train_val(pass_index)
hessian_rs = self.hessian_top_sv.compute(
tf.get_default_session(),
placeholder_x=model.net_in,
placeholder_y=model.label_placeholder,
data_x=x_hessian,
data_y=y_hessian)
hessian_top_sv_means[pass_index] = hessian_rs["avg_top_sv"]
hessian_top_sv_stds[pass_index] = hessian_rs["std_top_sv"]
poweriter_means[pass_index] = hessian_rs["avg_converge_steps"]
poweriter_stds[pass_index] = hessian_rs["std_converge_steps"]
tf.logging.info("E^[||H||_2]: %s", ", ".join(
["%.2f" % x for x in hessian_top_sv_means[:pass_index + 1]]
))
results = self.get_train_val_measurements()
results.update(
dict(
hessian_top_sv_means=hessian_top_sv_means,
hessian_top_sv_stds=hessian_top_sv_stds,
poweriter_means=poweriter_means,
poweriter_stds=poweriter_stds))
self.save(results)
return results
|
{
"content_hash": "8ca669f069b5b9f2c94f574db10493fb",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 75,
"avg_line_length": 33.91919191919192,
"alnum_prop": 0.6360929124478857,
"repo_name": "google/dl_bounds",
"id": "27f2e77a65e8de98b419f50d51acc5a968bbdd46",
"size": "3948",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dl_bounds/src/experiments/exp_hessian.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "132002"
},
{
"name": "Shell",
"bytes": "1743"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import json
from oslo_log import log as logging
from congress.api import api_utils
from congress.api import base
from congress.api import error_codes
from congress.api import webservice
from congress import exception
LOG = logging.getLogger(__name__)
class DatasourceModel(base.APIModel):
"""Model for handling API requests about Datasources."""
# Note(thread-safety): blocking function
def get_items(self, params, context=None):
"""Get items in model.
Args:
params: A dict-like object containing parameters
from the request query string and body.
context: Key-values providing frame of reference of request
Returns: A dict containing at least a 'results' key whose value is
a list of items in the model. Additional keys set in the
dict will also be rendered for the user.
"""
# Note(thread-safety): blocking call
results = self.bus.get_datasources(filter_secret=True)
# Check that running datasources match the datasources in the
# database since this is going to tell the client about those
# datasources, and the running datasources should match the
# datasources we show the client.
return {"results": results}
def get_item(self, id_, params, context=None):
"""Get datasource corresponding to id_ in model."""
try:
datasource = self.bus.get_datasource(id_)
return datasource
except exception.DatasourceNotFound as e:
LOG.exception("Datasource '%s' not found", id_)
raise webservice.DataModelException(e.code, str(e),
http_status_code=e.code)
# Note(thread-safety): blocking function
def add_item(self, item, params, id_=None, context=None):
"""Add item to model.
Args:
item: The item to add to the model
id_: The ID of the item, or None if an ID should be generated
context: Key-values providing frame of reference of request
Returns:
Tuple of (ID, newly_created_item)
Raises:
KeyError: ID already exists.
"""
obj = None
try:
# Note(thread-safety): blocking call
obj = self.invoke_rpc(base.DS_MANAGER_SERVICE_ID,
'add_datasource',
{'items': item},
timeout=self.dse_long_timeout)
# Let PE synchronizer take care of creating the policy.
except (exception.BadConfig,
exception.DatasourceNameInUse,
exception.DriverNotFound,
exception.DatasourceCreationError) as e:
LOG.exception(_("Datasource creation failed."))
raise webservice.DataModelException(e.code, str(e),
http_status_code=e.code)
return (obj['id'], obj)
# Note(thread-safety): blocking function
def delete_item(self, id_, params, context=None):
ds_id = context.get('ds_id')
try:
# Note(thread-safety): blocking call
datasource = self.bus.get_datasource(ds_id)
# FIXME(thread-safety):
# by the time greenthread resumes, the
# returned datasource name could refer to a totally different
# datasource, causing the rest of this code to unintentionally
# delete a different datasource
# Fix: check UUID of datasource before operating.
# Abort if mismatch
self.invoke_rpc(base.DS_MANAGER_SERVICE_ID,
'delete_datasource',
{'datasource': datasource},
timeout=self.dse_long_timeout)
# Let PE synchronizer takes care of deleting policy
except (exception.DatasourceNotFound,
exception.DanglingReference) as e:
raise webservice.DataModelException(e.code, str(e))
# Note(thread-safety): blocking function
def request_refresh_action(self, params, context=None, request=None):
caller, source_id = api_utils.get_id_from_context(context)
try:
args = {'source_id': source_id}
# Note(thread-safety): blocking call
self.invoke_rpc(caller, 'request_refresh', args)
except exception.CongressException as e:
LOG.exception(e)
raise webservice.DataModelException.create(e)
# Note(thread-safety): blocking function
def execute_action(self, params, context=None, request=None):
"Execute the action."
service = context.get('ds_id')
body = json.loads(request.body)
action = body.get('name')
action_args = body.get('args', {})
if (not isinstance(action_args, dict)):
(num, desc) = error_codes.get('execute_action_args_syntax')
raise webservice.DataModelException(num, desc)
try:
args = {'service_name': service, 'action': action,
'action_args': action_args}
# TODO(ekcs): perhaps keep execution synchronous when explicitly
# called via API
# Note(thread-safety): blocking call
self.invoke_rpc(base.ENGINE_SERVICE_ID, 'execute_action', args)
except exception.PolicyException as e:
(num, desc) = error_codes.get('execute_error')
raise webservice.DataModelException(num, desc + "::" + str(e))
return {}
|
{
"content_hash": "1df9993246d5c54aa7d2a77c28182d60",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 76,
"avg_line_length": 40.26573426573427,
"alnum_prop": 0.5923931920805835,
"repo_name": "ramineni/my_congress",
"id": "5cd79bebdd3031f42e07e93de255734015bf886b",
"size": "6390",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "congress/api/datasource_model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2744"
},
{
"name": "GAP",
"bytes": "7778"
},
{
"name": "HTML",
"bytes": "19549"
},
{
"name": "JavaScript",
"bytes": "9896"
},
{
"name": "Makefile",
"bytes": "228"
},
{
"name": "Mako",
"bytes": "1043"
},
{
"name": "Python",
"bytes": "2416507"
},
{
"name": "Shell",
"bytes": "26177"
}
],
"symlink_target": ""
}
|
import sys # Wee trick when running in same directory as module
sys.path.append('..')
import pylcs
import matplotlib.pyplot as plt
from random import randint
# Define muliplexer 'fitness' function
def multiplex(bits,addrlen=2):
return bits[int(reduce(lambda x,y: str(y)+str(x),bits[0:addrlen]),base=2)]
# Create classifier by defining the action set [0 or 1]
lcs = pylcs.xcs([0,1])
# Set up to record time, performance and population
t = []; p = []; s = []
# Change the learning rate and exploration probability...
lcs.BETA = 0.015
lcs.EPSILON = 0.1
while True:
# Record current values as we go
t.append(lcs.time())
p.append(lcs.perf())
s.append(lcs.size())
# Generate random perception of 6 bits
percept = [randint(0,1) for b in range(1,6+1)]
# Determine classifier action based on this
act = lcs.act(percept)
# Give reward if action is correct
if (act==multiplex(percept)):
lcs.reward(1000)
# Terminate if run too long or performance good
if t[-1]>1000 and p[-1]>0.6:
break
# Plot results
ax1 = plt.subplot(2,1,1)
plt.plot(t,p,'r-'); plt.ylabel('Performance'); ax1.set_ylim([0,1.0])
ax2 = plt.subplot(2,1,2)
plt.plot(t,s,'b-'); plt.ylabel('Population')
plt.xlabel('Time')
plt.show()
|
{
"content_hash": "25f8d6b79f75ba849c524b6d9c20527f",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 76,
"avg_line_length": 27.886363636363637,
"alnum_prop": 0.687041564792176,
"repo_name": "timlukins/pylcs",
"id": "62bf6185beeea539bfd17e5e87bc41ae7f26f957",
"size": "1227",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "28806"
},
{
"name": "Python",
"bytes": "5427"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse
from django.contrib.auth.models import User
from django.db.models import Q
from django.db import models
from register.models import AgencyList
from register.models import Petitions
import json
import datetime
import os.path
import base64
@csrf_exempt
def sendSignal(request):
if request.method != 'POST':
raise Http404('Only POSTs are allowed')
requestJson = json.loads(request.body)
image = "-"
if requestJson["petitionData"] != "":
image = handleFile(requestJson["petitionData"])
response = '{"status" : "no_such_category"}'
if AgencyList.objects.filter(AgencyName = requestJson["agencyName"]):
response = saveSignal(requestJson, image)
print response
return HttpResponse(response, content_type='text/json')
def handleFile(uploaded_file):
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
BASE_DIR += '/static/'
fileName = "signalImages/imageFile"
suffix = datetime.datetime.now().strftime("%y%m%d_%H%M%S%f")
fileName = "_".join([fileName, suffix])
fileName += ".jpg"
BASE_DIR += fileName
if os.path.isfile(BASE_DIR):
handleFile(uploaded_file)
imgdata = base64.b64decode(uploaded_file)
with open(BASE_DIR, 'wb') as f:
f.write(imgdata)
return fileName
def saveSignal(requestJson, imagePath):
status = '{"status" : "send"}'
insertSignal = Petitions(AgencyName = requestJson["agencyName"], petitionType = requestJson["petitionType"],
petitionDescription = requestJson["petitionDescription"],latitude = 0, longitude = 0,
citizenName = requestJson["citizenName"], citizenMiddleName = requestJson["citizenMiddleName"],
citizenLastName = requestJson["citizenLastName"], citizenAddres = requestJson["citizenAddress"],
petition = requestJson["petition"], petitionData = imagePath, date = datetime.datetime.now())
insertSignal.save()
#model = Model.objects.get(app__name='webServer', name=requestJson["AgencyName"])
#insertSignal = model(petitionType = requestJson["petitionType"], petitionDescription = requestJson["petitionDescription"],
# citizenName = requestJson["citizenName"], citizenMiddleName = requestJson["citizenMiddleName"],
# citizenLastName = requestJson["citizenLastName"], citizenAddres = requestJson["citizenAddres"],
# petition = requestJson["petition"], petitionData = requestJson["petitionData"], date = datetime.datetime.now())
#insertSignal.save()
return status
|
{
"content_hash": "234c56e78554af51b387debf6000b94c",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 124,
"avg_line_length": 36.35294117647059,
"alnum_prop": 0.7536407766990292,
"repo_name": "Nikolas1814/Code4Bulgaria",
"id": "e15340ef369e783d02fe49be682414612c0c4a6f",
"size": "2472",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webServer/sendSignal/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1627"
},
{
"name": "HTML",
"bytes": "4797"
},
{
"name": "Java",
"bytes": "96502"
},
{
"name": "Python",
"bytes": "24801"
}
],
"symlink_target": ""
}
|
import numpy
import pytest
import chainerx
import chainerx.testing
from chainerx_tests import array_utils
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_relu(xp, device, shape, dtype):
if dtype == 'bool_':
return chainerx.testing.ignore()
x = array_utils.create_dummy_ndarray(xp, shape, dtype)
if xp is numpy:
return numpy.maximum(0, x)
else:
return chainerx.relu(x)
@chainerx.testing.numpy_chainerx_allclose(atol=1e-6)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_sigmoid(xp, device, shape, float_dtype):
# TODO(imanishi): Dtype promotion is not supported yet.
x = array_utils.create_dummy_ndarray(xp, shape, float_dtype)
if xp is numpy:
return numpy.reciprocal(1 + numpy.exp(-x))
else:
return chainerx.sigmoid(x)
|
{
"content_hash": "5077c66f9d1dfb0e6ab7bd978af9cb83",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 64,
"avg_line_length": 29.266666666666666,
"alnum_prop": 0.6902050113895216,
"repo_name": "jnishi/chainer",
"id": "c9dffdf7247ccea138e26014e4c1ad1daacecd6d",
"size": "878",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/chainerx_tests/unit_tests/routines_tests/test_activation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3368"
},
{
"name": "C",
"bytes": "70"
},
{
"name": "C++",
"bytes": "1460543"
},
{
"name": "CMake",
"bytes": "42279"
},
{
"name": "Cuda",
"bytes": "53858"
},
{
"name": "Dockerfile",
"bytes": "1457"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "5121452"
},
{
"name": "Shell",
"bytes": "22130"
}
],
"symlink_target": ""
}
|
import os
import sys
from distutils.core import setup
def publish():
"""Publish to PyPi"""
os.system("python setup.py sdist upload")
if sys.argv[-1] == "publish":
publish()
sys.exit()
required = ['requests']
if sys.version_info[:2] < (2,6):
required.append('simplejson')
setup(
name='gistapi',
version='0.2.4',
description='Python wrapper for Gist API',
long_description=open('README.rst').read() + '\n\n' + open('HISTORY.rst').read(),
author='Kenneth Reitz',
author_email='me@kennethreitz.com',
url='http://github.com/kennethreitz/gistapi.py',
packages=['gistapi'],
install_requires=required,
license='MIT',
classifiers=(
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 2.5",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
)
)
|
{
"content_hash": "0dc414cc5810b5d1a138f85813a3bdf3",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 85,
"avg_line_length": 23.046511627906977,
"alnum_prop": 0.6054490413723511,
"repo_name": "kennethreitz/gistapi.py",
"id": "9a8ca8ac6ed3fcefde4728e17c822fdd66c257de",
"size": "1038",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8064"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.