text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import dlib
import cv2
import numpy as np
import models
import NonLinearLeastSquares
import ImageProcessing
from drawing import *
import FaceRendering
import utils
print "Press T to draw the keypoints and the 3D model"
print "Press R to start recording to a video file"
#you need to download shape_predictor_68_face_landmarks.dat from the link below and unpack it where the solution file is
#http://sourceforge.net/projects/dclib/files/dlib/v18.10/shape_predictor_68_face_landmarks.dat.bz2
#loading the keypoint detection model, the image and the 3D model
predictor_path = "../shape_predictor_68_face_landmarks.dat"
image_name = "../data/sface1.jpg"
#the smaller this value gets the faster the detection will work
#if it is too small, the user's face might not be detected
maxImageSizeForDetection = 320
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path)
mean3DShape, blendshapes, mesh, idxs3D, idxs2D = utils.load3DFaceModel("../candide.npz")
projectionModel = models.OrthographicProjectionBlendshapes(blendshapes.shape[0])
modelParams = None
lockedTranslation = True
drawOverlay = False
cap = cv2.VideoCapture(0)
writer = None
cameraImg = cap.read()[1]
textureImg = cv2.imread(image_name)
textureCoords = utils.getFaceTextureCoords(textureImg, mean3DShape, blendshapes, idxs2D, idxs3D, detector, predictor)
renderer = FaceRendering.FaceRenderer(cameraImg, textureImg, textureCoords, mesh)
while True:
cameraImg = cap.read()[1]
shapes2D = utils.getFaceKeypoints(cameraImg, detector, predictor, maxImageSizeForDetection)
if shapes2D is not None:
for shape2D in shapes2D:
#3D model parameter initialization
modelParams = projectionModel.getInitialParameters(mean3DShape[:, idxs3D], shape2D[:, idxs2D])
#3D model parameter optimization
modelParams = NonLinearLeastSquares.GaussNewton(modelParams, projectionModel.residual, projectionModel.jacobian, ([mean3DShape[:, idxs3D], blendshapes[:, :, idxs3D]], shape2D[:, idxs2D]), verbose=0)
#rendering the model to an image
shape3D = utils.getShape3D(mean3DShape, blendshapes, modelParams)
renderedImg = renderer.render(shape3D)
#blending of the rendered face with the image
mask = np.copy(renderedImg[:, :, 0])
renderedImg = ImageProcessing.colorTransfer(cameraImg, renderedImg, mask)
cameraImg = ImageProcessing.blendImages(renderedImg, cameraImg, mask)
#drawing of the mesh and keypoints
if drawOverlay:
drawPoints(cameraImg, shape2D.T)
drawProjectedShape(cameraImg, [mean3DShape, blendshapes], projectionModel, mesh, modelParams, lockedTranslation)
if writer is not None:
writer.write(cameraImg)
cv2.imshow('image', cameraImg)
key = cv2.waitKey(1)
if key==27:
break
if key == ord('t'):
drawOverlay = not drawOverlay
if key == ord('r'):
if writer is None:
print "Starting video writer"
writer = cv2.VideoWriter("../out.avi", cv2.cv.CV_FOURCC('X', 'V', 'I', 'D'), 25, (cameraImg.shape[1], cameraImg.shape[0]))
if writer.isOpened():
print "Writer succesfully opened"
else:
writer = None
print "Writer opening failed"
else:
print "Stopping video writer"
writer.release()
writer = None
|
{
"content_hash": "1fb9bce2aeb54a566c80456606b46b53",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 210,
"avg_line_length": 37.234042553191486,
"alnum_prop": 0.6902857142857143,
"repo_name": "TH4NOS/FaceSwap-Halloween",
"id": "f2ab263f057952c807bb92bfd6c8d15b6fc5a888",
"size": "3500",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "FaceSwap/zad2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22485"
}
],
"symlink_target": ""
}
|
DEFAULT_REFRESH_OFFSET = 300
DEFAULT_TOKEN_REFRESH_RETRY_DELAY = 30
|
{
"content_hash": "cf808a56cda85846180859ee07f1dcd7",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 38,
"avg_line_length": 34,
"alnum_prop": 0.7941176470588235,
"repo_name": "Azure/azure-sdk-for-python",
"id": "8ca26c01752aa04e73401641974ada1d45db1f58",
"size": "250",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/ml/azure-ai-ml/azure/ai/ml/identity/_constants.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
from devilry.simplified import (simplified_modelapi, SimplifiedModelApi,
PermissionDenied, FieldSpec,
FilterSpecs, FilterSpec)
from devilry.apps.core import models
from devilry.coreutils.simplified.metabases import SimplifiedAbstractApplicationKeyValueMixin
@simplified_modelapi
class SimplifiedRelatedStudentKeyValue(SimplifiedModelApi):
""" Simplified wrapper for :class:`devilry.apps.core.models.RelatedStudentKeyValue`. """
class Meta:
model = models.RelatedStudentKeyValue
methods = ['read', 'search']
resultfields = FieldSpec('relatedstudent', 'relatedstudent__period') + SimplifiedAbstractApplicationKeyValueMixin.resultfields
searchfields = SimplifiedAbstractApplicationKeyValueMixin.searchfields
filters = FilterSpecs(FilterSpec('relatedstudent__period', supported_comp=('exact',))) + SimplifiedAbstractApplicationKeyValueMixin.filters
@classmethod
def create_searchqryset(cls, user):
return cls._meta.model.objects.filter(relatedstudent__user=user, student_can_read=True)
@classmethod
def read_authorize(cls, user, obj):
if not cls.create_searchqryset(user).filter(id=obj.id):
raise PermissionDenied()
|
{
"content_hash": "92e80de8b0e199c284b1fe89646dcee1",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 147,
"avg_line_length": 50.88,
"alnum_prop": 0.7295597484276729,
"repo_name": "vegarang/devilry-django",
"id": "43a6154b6797da389f324371e9661fb564846d57",
"size": "1272",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "devilry/apps/student/simplified/simplifiedrelatedstudentkeyvalue.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "697906"
},
{
"name": "Python",
"bytes": "931589"
}
],
"symlink_target": ""
}
|
r"""
This model calculates a simple power law with a flat background.
Definition
----------
.. math::
I(q) = \text{scale} \cdot q^{-\text{power}} + \text{background}
Note the minus sign in front of the exponent. The exponent *power*
should therefore be entered as a **positive** number for fitting.
Also note that unlike many other models, *scale* in this model
is NOT explicitly related to a volume fraction. Be careful if
combining this model with other models.
References
----------
None.
Authorship and Verification
----------------------------
* **Author:**
* **Last Modified by:**
* **Last Reviewed by:**
"""
import numpy as np
from numpy import inf, errstate
name = "power_law"
title = "Simple power law with a flat background"
description = """
Evaluates the function
I(q) = scale * q^(-power) + background
NB: enter power as a positive number!
"""
category = "shape-independent"
# ["name", "units", default, [lower, upper], "type", "description"],
parameters = [["power", "", 4.0, [-inf, inf], "", "Power law exponent"]]
# NB: Scale and Background are implicit parameters on every model
def Iq(q, power):
# pylint: disable=missing-docstring
with errstate(divide='ignore'):
result = q**-power
return result
Iq.vectorized = True # Iq accepts an array of q values
def random():
"""Return a random parameter set for the model."""
power = np.random.uniform(1, 6)
pars = dict(
scale=0.1**power*10**np.random.uniform(-4, 2),
power=power,
)
return pars
tests = [
[{'scale': 1.0, 'power': 4.0, 'background' : 0.0},
[0.0106939, 0.469418], [7.64644e+07, 20.5949]],
]
|
{
"content_hash": "3fdbbeb67864e001a4499778de6dcfaa",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 80,
"avg_line_length": 24.764705882352942,
"alnum_prop": 0.6306413301662708,
"repo_name": "SasView/sasmodels",
"id": "b2588139ae35ef1fd844f499e017f7f414f8a51b",
"size": "1772",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sasmodels/models/power_law.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "770"
},
{
"name": "C",
"bytes": "331392"
},
{
"name": "M",
"bytes": "127"
},
{
"name": "MATLAB",
"bytes": "2232"
},
{
"name": "Python",
"bytes": "1442038"
},
{
"name": "Shell",
"bytes": "1799"
}
],
"symlink_target": ""
}
|
import os
from . import gen
class Error(Exception):
def __init__(self, msg, e=None):
self.msg = msg
self.e = e
def __str__(self):
if self.e is not None:
return os.linesep.join([self.msg, gen.exnToString(self.e)])
else:
return self.msg
# vim:set shiftwidth=4 softtabstop=8 expandtab textwidth=78:
|
{
"content_hash": "07d12b1590fa40a51fd74365774e581c",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 71,
"avg_line_length": 22.75,
"alnum_prop": 0.5824175824175825,
"repo_name": "Ban3/Limnoria",
"id": "2373bb5c5e0995dff88152806263025ef67cdaec",
"size": "1948",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "src/utils/error.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "864"
},
{
"name": "Python",
"bytes": "2513657"
},
{
"name": "Shell",
"bytes": "217"
}
],
"symlink_target": ""
}
|
from unittest.mock import patch, Mock
from usb.tests.base import APITestCase
from usb.models import db, DesktopRedirect, TabletRedirect, MobileRedirect
class RedirectFromShortURLTestCase(APITestCase):
def setUp(self):
super(RedirectFromShortURLTestCase, self).setUp()
redirects = (
DesktopRedirect('aaaaaaaa', 'http://domain1.com/path?q=a'),
TabletRedirect('aaaaaaaa', 'http://tablet.domain1.com/path?q=a'),
MobileRedirect('aaaaaaaa', 'http://mobile.domain1.com/path?q=a'),
DesktopRedirect('bbbbbbbb', 'http://domain2.com/path?q=b'),
TabletRedirect('bbbbbbbb', 'http://tablet.domain2.com/path?q=b'),
MobileRedirect('bbbbbbbb', 'http://mobile.domain2.com/path?q=b')
)
for redirect in redirects:
db.session.add(redirect)
db.session.commit()
self.PATCH_TARGET = 'usb.blueprints.api.get_device_model_from_request'
def _test_redirect_desktop(self, url):
with patch(self.PATCH_TARGET, return_value=DesktopRedirect):
response = self.client.get(url + 'aaaaaaaa')
self.assertEqual(response.status_code, self.app.config['REDIRECT_CODE'])
self.assertEqual(response.headers['Location'], 'http://domain1.com/path?q=a')
def _test_redirect_tablet(self, url):
with patch(self.PATCH_TARGET, return_value=TabletRedirect):
response = self.client.get(url + 'aaaaaaaa')
self.assertEqual(response.status_code, self.app.config['REDIRECT_CODE'])
self.assertEqual(response.headers['Location'], 'http://tablet.domain1.com/path?q=a')
def _test_redirect_mobile(self, url):
with patch(self.PATCH_TARGET, return_value=MobileRedirect):
response = self.client.get(url + 'aaaaaaaa')
self.assertEqual(response.status_code, self.app.config['REDIRECT_CODE'])
self.assertEqual(response.headers['Location'], 'http://mobile.domain1.com/path?q=a')
def _test_redirect(self, url):
self._test_redirect_desktop(url)
self._test_redirect_tablet(url)
self._test_redirect_mobile(url)
def test_redirect_from_index_namespace(self):
self._test_redirect('/')
def test_redirect_from_links_namespace(self):
self._test_redirect('/urls/')
def test_redirect_increase_count(self):
# 3 desktop
self._test_redirect_desktop('/')
self._test_redirect_desktop('/')
self._test_redirect_desktop('/')
# 0 tablet
# 1 mobile
self._test_redirect_mobile('/')
desktop = DesktopRedirect.query.filter_by(short='aaaaaaaa').first()
self.assertEqual(desktop.count, 3)
tablet = TabletRedirect.query.filter_by(short='aaaaaaaa').first()
self.assertEqual(tablet.count, 0)
mobile = MobileRedirect.query.filter_by(short='aaaaaaaa').first()
self.assertEqual(mobile.count, 1)
|
{
"content_hash": "877558f33af8bb22f070c2f6b808b19f",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 92,
"avg_line_length": 40.06849315068493,
"alnum_prop": 0.6512820512820513,
"repo_name": "dizpers/usb",
"id": "bfb9e8cc9ea39f995a74149e9089da782a48bcb9",
"size": "2925",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "usb/tests/api/test_redirect_from_short_url.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21548"
}
],
"symlink_target": ""
}
|
import sys
import os
from PIL import Image
project_root = '../ISeeNN'
sys.path.append('../')
sys.path.append(os.path.join(project_root, 'search_web'))
sys.path.append(os.path.join(project_root, 'ISeeNN'))
import personal_settings as ps
from feature_extractor import ResizeExtractor, RootNormalizer
from ISeeNN.image_server.models import DBImage, DBImageThumbnail, ImageServer
from models import Feature, AestheticInfo
import mongoengine
_MONGODB_USER = ps._MONGODB_USER
_MONGODB_PASSWD = ps._MONGODB_PASSWD
_MONGODB_HOST = ps._MONGODB_HOST
_MONGODB_NAME = ps._MONGODB_NAME
_MONGODB_DATABASE_HOST = \
'mongodb://%s:%s@%s/%s' \
% (_MONGODB_USER, _MONGODB_PASSWD, _MONGODB_HOST, _MONGODB_NAME)
mongoengine.connect(_MONGODB_NAME, host=_MONGODB_DATABASE_HOST)
extensions = {".jpg"}
dir_name = '' # the directory you want to index
AVA_file = 'AVA.txt'
tags_file = 'tags.txt'
ext = ResizeExtractor('VGG16P5', (224,224))
server_id = ImageServer.objects.get(server_name='Amax').pk
norm = RootNormalizer()
def parse_tag_line(line):
return (line[:line.find(' ')], line[line.find(' ')+1:-1])
tags = {}
with open(tags_file, 'r') as f:
for line in f:
(tag_id, tag_name) = parse_tag_line(line)
tags[tag_id] = tag_name
def parse_ava_line(line): # '1 953619 0 1 5 17 38 36 15 6 5 1 1 22 1396'
elements = line.split()
file_id = elements[1]
scores = 0.0
nums = 0
for x in range(1,11):
scores += float(elements[x+1])*x
nums += int(elements[x+1])
score = scores / nums
tag = []
for x in range(12,14):
if elements[x] != '0':
tag.append(tags[elements[x]])
return (file_id, score, tag)
with open(AVA_file, 'r') as f:
for line in f:
print(parse_ava_line(line))
(file_id, score, tag) = parse_ava_line(line)
img_filename = os.path.join(dir_name, file_id+'.jpg')
if not os.path.isfile(img_filename):
continue
file_name, file_extension = os.path.splitext(img_filename)
print(img_filename)
try:
db_image = DBImage.objects.get(path=img_filename)
image_id = db_image.pk
except DBImage.DoesNotExist:
im = Image.open(img_filename)
# if im.format != 'JPEG':
# continue
db_image = DBImage(
server=server_id,
path=img_filename,
width=im.width,
height=im.height,
mime_type='image/' + im.format.lower(),
source='AVA'
)
db_image.save()
image_id = db_image.pk
finally:
try:
feature = Feature.objects.get(image=image_id, identity='VGG16P5_resize')
except Feature.DoesNotExist:
try:
feat = ext.extract(img_filename)
feat = norm.normalize(feat)
Feature.objects(image=image_id, identity='VGG16P5_resize').update_one(
set__image=image_id,
set__dimension=feat.size,
set__model='VGG16P5',
set__data=feat.tobytes(),
upsert=True
)
except:
pass
try:
aestheticInfo = AestheticInfo.objects.get(image=image_id)
except AestheticInfo.DoesNotExist:
try:
AestheticInfo.objects(image=image_id).update_one(
set__image=image_id,
set__score=score,
set__tags=tag,
upsert=True
)
except:
pass
|
{
"content_hash": "8d44811c13659ca4d64f06b311e8c1ce",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 90,
"avg_line_length": 32.43103448275862,
"alnum_prop": 0.5443912812333865,
"repo_name": "sunshaoyan/ISeeNN",
"id": "503b913be7a5021b736b2bb1a1788fb97c0c7ae7",
"size": "3762",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Indexer/ava_indexer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "10022"
},
{
"name": "CMake",
"bytes": "673"
},
{
"name": "CSS",
"bytes": "1214"
},
{
"name": "HTML",
"bytes": "11345"
},
{
"name": "JavaScript",
"bytes": "5825"
},
{
"name": "Python",
"bytes": "62315"
},
{
"name": "Shell",
"bytes": "136"
}
],
"symlink_target": ""
}
|
"""
Unit tests for the Neutron HNV L2 Agent.
"""
import sys
from unittest import mock
from networking_hyperv.neutron.agent import hnv_neutron_agent as hnv_agent
from networking_hyperv.neutron import constants
from networking_hyperv.tests import base as test_base
class TestHNVAgent(test_base.HyperVBaseTestCase):
_autospec_classes = [
hnv_agent.neutron_client.NeutronAPIClient,
]
@mock.patch.object(hnv_agent.HNVAgent, "_setup")
@mock.patch.object(hnv_agent.HNVAgent, "_setup_rpc")
@mock.patch.object(hnv_agent.HNVAgent, "_set_agent_state")
def _get_agent(self, mock_set_agent_state, mock_setup_rpc, mock_setup):
return hnv_agent.HNVAgent()
def setUp(self):
super(TestHNVAgent, self).setUp()
self.agent = self._get_agent()
def test_get_agent_configurations(self):
self.config(logical_network=mock.sentinel.logical_network,
group="HNV")
self.agent._physical_network_mappings = mock.sentinel.mappings
agent_configurations = self.agent._get_agent_configurations()
expected_keys = ["logical_network", "vswitch_mappings",
"devices", "l2_population", "tunnel_types",
"bridge_mappings", "enable_distributed_routing"]
self.assertEqual(sorted(expected_keys),
sorted(agent_configurations.keys()))
self.assertEqual(mock.sentinel.mappings,
agent_configurations["vswitch_mappings"])
self.assertEqual(str(mock.sentinel.logical_network),
agent_configurations["logical_network"])
@mock.patch.object(hnv_agent.HNVAgent, "_get_vswitch_name")
def test_provision_network(self, mock_get_vswitch_name):
self.agent._provision_network(mock.sentinel.port_id,
mock.sentinel.net_uuid,
mock.sentinel.network_type,
mock.sentinel.physical_network,
mock.sentinel.segmentation_id)
mock_get_vswitch_name.assert_called_once_with(
mock.sentinel.network_type,
mock.sentinel.physical_network)
vswitch_map = self.agent._network_vswitch_map[mock.sentinel.net_uuid]
self.assertEqual(mock.sentinel.network_type,
vswitch_map['network_type'])
self.assertEqual(mock_get_vswitch_name.return_value,
vswitch_map['vswitch_name'])
self.assertEqual(mock.sentinel.segmentation_id,
vswitch_map['vlan_id'])
@mock.patch.object(hnv_agent.hyperv_base.Layer2Agent, '_port_bound')
def test_port_bound(self, mock_super_port_bound):
self.agent._port_bound(
mock.sentinel.port_id, mock.sentinel.network_id,
mock.sentinel.network_type, mock.sentinel.physical_network,
mock.sentinel.segmentation_id, mock.sentinel.port_security_enabled,
mock.sentinel.set_port_sriov)
mock_super_port_bound.assert_called_once_with(
mock.sentinel.port_id, mock.sentinel.network_id,
mock.sentinel.network_type, mock.sentinel.physical_network,
mock.sentinel.segmentation_id, mock.sentinel.port_security_enabled,
mock.sentinel.set_port_sriov)
mock_neutron_client = self.agent._neutron_client
mock_neutron_client.get_port_profile_id.assert_called_once_with(
mock.sentinel.port_id)
self.agent._utils.set_vswitch_port_profile_id.assert_called_once_with(
switch_port_name=mock.sentinel.port_id,
profile_id=mock_neutron_client.get_port_profile_id.return_value,
profile_data=constants.PROFILE_DATA,
profile_name=constants.PROFILE_NAME,
net_cfg_instance_id=constants.NET_CFG_INSTANCE_ID,
cdn_label_id=constants.CDN_LABEL_ID,
cdn_label_string=constants.CDN_LABEL_STRING,
vendor_id=constants.VENDOR_ID,
vendor_name=constants.VENDOR_NAME)
class TestMain(test_base.BaseTestCase):
@mock.patch.object(hnv_agent, 'HNVAgent')
@mock.patch.object(hnv_agent, 'common_config')
@mock.patch.object(hnv_agent, 'neutron_config')
def test_main(self, mock_config, mock_common_config, mock_hnv_agent):
hnv_agent.main()
mock_config.register_agent_state_opts_helper.assert_called_once_with(
hnv_agent.CONF)
mock_common_config.init.assert_called_once_with(sys.argv[1:])
mock_config.setup_logging.assert_called_once_with()
mock_hnv_agent.assert_called_once_with()
mock_hnv_agent.return_value.daemon_loop.assert_called_once_with()
|
{
"content_hash": "5fc9378eb08432396aa89a8080aa735b",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 79,
"avg_line_length": 43.559633027522935,
"alnum_prop": 0.6381634372367313,
"repo_name": "openstack/networking-hyperv",
"id": "dfbf8e368e965988927190de182b8c630560fc68",
"size": "5387",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "networking_hyperv/tests/unit/neutron/agent/test_hnv_neutron_agent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "257721"
},
{
"name": "Shell",
"bytes": "281"
}
],
"symlink_target": ""
}
|
"""Test the hunt_view interface."""
from unittest import mock
from absl import app
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs
from grr_response_core.lib.rdfvalues import file_finder as rdf_file_finder
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_server import file_store
from grr_response_server.flows.general import collectors
from grr_response_server.flows.general import export as flow_export
from grr_response_server.gui import archive_generator
from grr_response_server.gui import gui_test_lib
from grr.test_lib import test_lib
class TestHuntArchiving(gui_test_lib.GRRSeleniumHuntTest):
"""Test the hunt archive download functionality."""
def testDoesNotShowGenerateArchiveButtonForNonExportableRDFValues(self):
values = [rdf_client.Process(pid=1), rdf_client.Process(pid=42423)]
hunt_id, _ = self.CreateGenericHuntWithCollection(values=values)
self.Open("/")
self.Click("css=a[grrtarget=hunts]")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Results]")
self.WaitUntil(self.IsTextPresent, "42423")
self.WaitUntilNot(self.IsTextPresent,
"Files referenced in this collection can be downloaded")
def testDoesNotShowGenerateArchiveButtonWhenResultCollectionIsEmpty(self):
hunt_id, _ = self.CreateGenericHuntWithCollection([])
self.Open("/")
self.Click("css=a[grrtarget=hunts]")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Results]")
self.WaitUntil(self.IsTextPresent, "Value")
self.WaitUntilNot(self.IsTextPresent,
"Files referenced in this collection can be downloaded")
def testShowsGenerateArchiveButtonForFileFinderHunt(self):
stat_entry = rdf_client_fs.StatEntry(
pathspec=rdf_paths.PathSpec(
path="/foo/bar", pathtype=rdf_paths.PathSpec.PathType.OS))
values = [rdf_file_finder.FileFinderResult(stat_entry=stat_entry)]
hunt_id, _ = self.CreateGenericHuntWithCollection(values=values)
self.Open("/")
self.Click("css=a[grrtarget=hunts]")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Results]")
self.WaitUntil(self.IsTextPresent,
"Files referenced in this collection can be downloaded")
def testShowsGenerateArchiveButtonForArtifactDownloaderHunt(self):
stat_entry = rdf_client_fs.StatEntry(
pathspec=rdf_paths.PathSpec(
path="/foo/bar", pathtype=rdf_paths.PathSpec.PathType.OS))
values = [
collectors.ArtifactFilesDownloaderResult(downloaded_file=stat_entry)
]
hunt_id, _ = self.CreateGenericHuntWithCollection(values=values)
self.Open("/")
self.Click("css=a[grrtarget=hunts]")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Results]")
self.WaitUntil(self.IsTextPresent,
"Files referenced in this collection can be downloaded")
def testExportCommandIsShownForStatEntryResults(self):
stat_entry = rdf_client_fs.StatEntry(
pathspec=rdf_paths.PathSpec(
path="/foo/bar", pathtype=rdf_paths.PathSpec.PathType.OS))
values = [rdf_file_finder.FileFinderResult(stat_entry=stat_entry)]
hunt_id, _ = self.CreateGenericHuntWithCollection(values=values)
self.Open("/#/hunts/%s/results" % hunt_id)
self.Click("link=Show export command")
self.WaitUntil(
self.IsTextPresent, "/usr/bin/grr_api_shell 'http://localhost:8000/' "
"--exec_code 'grrapi.Hunt(\"%s\").GetFilesArchive()."
"WriteToFile(\"./hunt_results_%s.zip\")'" %
(hunt_id, hunt_id.replace(":", "_")))
def testExportCommandIsNotShownWhenNoResults(self):
hunt_id, _ = self.CreateGenericHuntWithCollection([])
self.Open("/#/hunts/%s/results" % hunt_id)
self.WaitUntil(self.IsElementPresent,
"css=grr-hunt-results:contains('Value')")
self.WaitUntilNot(self.IsTextPresent, "Show export command")
def testExportCommandIsNotShownForNonFileResults(self):
values = [rdf_client.Process(pid=1), rdf_client.Process(pid=42423)]
hunt_id, _ = self.CreateGenericHuntWithCollection(values=values)
self.Open("/#/hunts/%s/results" % hunt_id)
self.WaitUntil(self.IsElementPresent,
"css=grr-hunt-results:contains('Value')")
self.WaitUntilNot(self.IsTextPresent, "Show export command")
def testHuntAuthorizationIsRequiredToGenerateResultsArchive(self):
stat_entry = rdf_client_fs.StatEntry(
pathspec=rdf_paths.PathSpec(
path="/foo/bar", pathtype=rdf_paths.PathSpec.PathType.OS))
values = [rdf_file_finder.FileFinderResult(stat_entry=stat_entry)]
hunt_id, _ = self.CreateGenericHuntWithCollection(values=values)
self.Open("/")
self.Click("css=a[grrtarget=hunts]")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Results]")
self.Click("css=button.DownloadButton")
self.WaitUntil(self.IsTextPresent, "Create a new approval request")
def testGenerateZipButtonGetsDisabledAfterClick(self):
hunt_id = self._CreateHuntWithDownloadedFile()
self.RequestAndGrantHuntApproval(hunt_id)
self.Open("/")
self.Click("css=a[grrtarget=hunts]")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Results]")
self.Click("css=button.DownloadButton")
self.WaitUntil(self.IsElementPresent, "css=button.DownloadButton[disabled]")
self.WaitUntil(self.IsTextPresent, "Generation has started")
def testShowsNotificationWhenArchiveGenerationIsDone(self):
hunt_id = self._CreateHuntWithDownloadedFile()
self.RequestAndGrantHuntApproval(hunt_id)
self.Open("/")
self.Click("css=a[grrtarget=hunts]")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Results]")
self.Click("css=button.DownloadButton")
self.WaitUntil(self.IsTextPresent, "Generation has started")
self.WaitUntil(self.IsUserNotificationPresent,
"Downloaded archive of hunt %s" % hunt_id)
# Check that the archive generating flow does not end with an error.
self.WaitUntilNot(self.IsUserNotificationPresent, "terminated due to error")
def testShowsErrorMessageIfArchiveStreamingFailsBeforeFirstChunkIsSent(self):
hunt_id = self._CreateHuntWithDownloadedFile()
self.RequestAndGrantHuntApproval(hunt_id)
def RaisingStub(*unused_args, **unused_kwargs):
raise RuntimeError("something went wrong")
with utils.Stubber(archive_generator.CollectionArchiveGenerator, "Generate",
RaisingStub):
self.Open("/")
self.Click("css=a[grrtarget=hunts]")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Results]")
self.Click("css=button.DownloadButton")
self.WaitUntil(self.IsTextPresent,
"Can't generate archive: Unknown error")
self.WaitUntil(self.IsUserNotificationPresent,
"Archive generation failed for hunt")
def testShowsNotificationIfArchiveStreamingFailsInProgress(self):
hunt_id = self._CreateHuntWithDownloadedFile()
self.RequestAndGrantHuntApproval(hunt_id)
def RaisingStub(*unused_args, **unused_kwargs):
yield b"foo"
yield b"bar"
raise RuntimeError("something went wrong")
with utils.Stubber(archive_generator.CollectionArchiveGenerator, "Generate",
RaisingStub):
self.Open("/")
self.Click("css=a[grrtarget=hunts]")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Results]")
self.Click("css=button.DownloadButton")
self.WaitUntil(self.IsUserNotificationPresent,
"Archive generation failed for hunt")
# There will be no failure message, as we can't get a status from an
# iframe that triggers the download.
self.WaitUntilNot(self.IsTextPresent,
"Can't generate archive: Unknown error")
def testDoesNotShowPerFileDownloadButtonForNonExportableRDFValues(self):
values = [rdf_client.Process(pid=1), rdf_client.Process(pid=42423)]
hunt_id, _ = self.CreateGenericHuntWithCollection(values=values)
self.Open("/")
self.Click("css=a[grrtarget=hunts]")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Results]")
self.WaitUntil(self.IsTextPresent, "42423")
self.WaitUntilNot(
self.IsElementPresent,
"css=grr-results-collection button:has(span.glyphicon-download)")
def testShowsPerFileDownloadButtonForFileFinderHunt(self):
stat_entry = rdf_client_fs.StatEntry(
pathspec=rdf_paths.PathSpec(
path="/foo/bar", pathtype=rdf_paths.PathSpec.PathType.OS))
values = [rdf_file_finder.FileFinderResult(stat_entry=stat_entry)]
hunt_id, _ = self.CreateGenericHuntWithCollection(values=values)
self.Open("/")
self.Click("css=a[grrtarget=hunts]")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Results]")
self.WaitUntil(
self.IsElementPresent,
"css=grr-results-collection button:has(span.glyphicon-download)")
def testShowsPerFileDownloadButtonForArtifactDownloaderHunt(self):
stat_entry = rdf_client_fs.StatEntry(
pathspec=rdf_paths.PathSpec(
path="/foo/bar", pathtype=rdf_paths.PathSpec.PathType.OS))
values = [
collectors.ArtifactFilesDownloaderResult(downloaded_file=stat_entry)
]
hunt_id, _ = self.CreateGenericHuntWithCollection(values=values)
self.Open("/")
self.Click("css=a[grrtarget=hunts]")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Results]")
self.WaitUntil(
self.IsElementPresent,
"css=grr-results-collection button:has(span.glyphicon-download)")
def testHuntAuthorizationIsRequiredToDownloadSingleHuntFile(self):
hunt_id = self._CreateHuntWithDownloadedFile()
self.Open("/")
self.Click("css=a[grrtarget=hunts]")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Results]")
self.Click("css=grr-results-collection button:has(span.glyphicon-download)")
self.WaitUntil(self.IsTextPresent, "Create a new approval request")
def testDownloadsSingleHuntFileIfAuthorizationIsPresent(self):
hunt_id = self._CreateHuntWithDownloadedFile()
results = self.GetHuntResults(hunt_id)
self.RequestAndGrantHuntApproval(hunt_id)
self.Open("/")
self.Click("css=a[grrtarget=hunts]")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Results]")
fd = file_store.OpenFile(flow_export.CollectionItemToClientPath(results[0]))
with mock.patch.object(fd.__class__, "Read") as mock_obj:
self.Click(
"css=grr-results-collection button:has(span.glyphicon-download)")
self.WaitUntil(lambda: mock_obj.called)
def testDisplaysErrorMessageIfSingleHuntFileCanNotBeRead(self):
hunt_id = self._CreateHuntWithDownloadedFile()
results = self.GetHuntResults(hunt_id)
original_result = results[0]
payload = original_result.payload.Copy()
payload.pathspec.path += "blah"
client_id = self.SetupClients(1)[0]
self.AddResultsToHunt(hunt_id, client_id, [payload])
self.RequestAndGrantHuntApproval(hunt_id)
self.Open("/")
self.Click("css=a[grrtarget=hunts]")
self.Click("css=td:contains('%s')" % hunt_id)
self.Click("css=li[heading=Results]")
self.Click(
"css=grr-results-collection button:has(span.glyphicon-download):last")
self.WaitUntil(self.IsTextPresent, "Couldn't download the file.")
if __name__ == "__main__":
app.run(test_lib.main)
|
{
"content_hash": "7eca988e6154a68969b38f4fafc31bd7",
"timestamp": "",
"source": "github",
"line_count": 308,
"max_line_length": 80,
"avg_line_length": 38.532467532467535,
"alnum_prop": 0.6987698011459387,
"repo_name": "google/grr",
"id": "9028d11bde91b12d5e8f8f72d0676f6aeffd3171",
"size": "11916",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grr/server/grr_response_server/gui/selenium_tests/hunt_archive_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "12697"
},
{
"name": "C++",
"bytes": "54814"
},
{
"name": "Dockerfile",
"bytes": "1822"
},
{
"name": "HCL",
"bytes": "8451"
},
{
"name": "HTML",
"bytes": "366783"
},
{
"name": "JavaScript",
"bytes": "13088"
},
{
"name": "Jupyter Notebook",
"bytes": "199216"
},
{
"name": "Makefile",
"bytes": "3244"
},
{
"name": "PowerShell",
"bytes": "531"
},
{
"name": "Python",
"bytes": "8844725"
},
{
"name": "Roff",
"bytes": "444"
},
{
"name": "SCSS",
"bytes": "105120"
},
{
"name": "Shell",
"bytes": "48663"
},
{
"name": "Standard ML",
"bytes": "8172"
},
{
"name": "TypeScript",
"bytes": "2139377"
}
],
"symlink_target": ""
}
|
import re
import os
# Where module is on filesystem.
FORGE_HOME = os.path.abspath(os.path.realpath(os.path.dirname(__file__)))
MODULE_REGEX = re.compile(r'^(?P<author>\w+)[/\-](?P<module>\w+)$')
TARBALL_REGEX = re.compile(r'^(?P<author>\w+)-(?P<module>\w+)-(?P<version>[\w\.]+)\.tar\.gz$')
PUPPETLABS_FORGE_URL = 'https://forge.puppetlabs.com'
PUPPETLABS_FORGE_API_URL = 'https://forgeapi.puppetlabs.com'
MODULES_JSON = '/modules.json'
RELEASES_JSON = '/api/v1/releases.json'
RELEASES_URL = '/system/releases/'
|
{
"content_hash": "f61594994a72d7d76dee79ea7360a6c1",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 94,
"avg_line_length": 36.714285714285715,
"alnum_prop": 0.6750972762645915,
"repo_name": "jbronn/django-forge",
"id": "a1d134655aad4f89a3e5c21df09ac583f9b21b61",
"size": "514",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "forge/constants.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "268"
},
{
"name": "Python",
"bytes": "45183"
}
],
"symlink_target": ""
}
|
import numpy as np
from BDSpace.Figure import Figure
from BDSpace.Figure.Sphere import SphericalShape, SphericalWedge, SphericalSegmentWedge
from BDSpace.Figure.Cylinder import CylindricalWedge
from BDSpace.Figure.Cone import ConicalWedge
from BDSpace.Figure.Torus import ToricWedge
from BDSpace.Figure.Cube import Parallelepiped
from BDSpaceVis import generators
from BDSpaceVis.space import SpaceView
class FigureView(SpaceView):
def __init__(self, fig, figure, scale=1, color=None, opacity=None, edge_visible=False,
cs_visible=True, surface_visible=True, wireframe=False, resolution=20):
assert isinstance(figure, Figure)
self.resolution = resolution
self.edge_visible = edge_visible
points, dims = generate_points(figure, self.resolution)
super(FigureView, self).__init__(fig, figure, scale=scale, color=color, opacity=opacity,
points=points, dims=dims,
cs_visible=cs_visible, surface_visible=surface_visible, wireframe=wireframe)
def set_resolution(self, resolution):
self.resolution = resolution
points, dims = generate_points(self.space, resolution)
self.set_points(points, dims)
self.draw()
def set_edge_visible(self, edge_visible=True):
self.edge_visible = edge_visible
self.draw()
def generate_points(figure, resolution=20):
assert isinstance(figure, Figure)
points = None
dims = None
if isinstance(figure, SphericalShape):
phi = np.linspace(0.0, figure.phi, angular_resolution(figure.phi, resolution), endpoint=True)
r = np.array([figure.r_inner, figure.r_outer], dtype=np.float)
if isinstance(figure, SphericalWedge):
theta = np.linspace(figure.theta[0], figure.theta[1],
angular_resolution(figure.theta[1] - figure.theta[0], resolution), endpoint=True)
points, dims = generators.generate_sphere(phi, theta, r)
elif isinstance(figure, SphericalSegmentWedge):
z = np.array([figure.h1, figure.h2], dtype=np.float)
points, dims = generators.generate_spherical_section(phi, z, r)
elif isinstance(figure, CylindricalWedge):
phi = np.linspace(0.0, figure.phi, angular_resolution(figure.phi, resolution), endpoint=True)
r = np.array([figure.r_inner, figure.r_outer], dtype=np.float)
z = np.array(figure.z, dtype=np.float)
points, dims = generators.generate_cylinder(phi, z, r)
elif isinstance(figure, ConicalWedge):
phi = np.linspace(0.0, figure.phi, angular_resolution(figure.phi, resolution), endpoint=True)
z = np.array(figure.z, dtype=np.float)
points, dims = generators.generate_cone(phi, z, theta=figure.theta,
z_offset=figure.z_offset, r_min=figure.r_min)
elif isinstance(figure, ToricWedge):
phi = np.linspace(0.0, figure.phi, angular_resolution(figure.phi, resolution), endpoint=True)
theta = np.linspace(figure.theta[0], figure.theta[1],
angular_resolution(figure.theta[1] - figure.theta[0], resolution), endpoint=True)
points, dims = generators.generate_torus(r_torus=figure.r_torus, r_tube=figure.r_tube, phi=phi, theta=theta)
elif isinstance(figure, Parallelepiped):
points, dims = generators.generate_parallelepiped(a=figure.vectors[0], b=figure.vectors[1], c=figure.vectors[2],
origin=np.array([0, 0, 0]))
return points, dims
def angular_resolution(angle, resolution):
points_num = int(angle / np.pi * resolution)
if points_num < 2:
points_num = 2
return points_num
|
{
"content_hash": "47196a7c0b92440f1c4178194e55d7b6",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 120,
"avg_line_length": 49.81578947368421,
"alnum_prop": 0.6539883782356049,
"repo_name": "bond-anton/Space_visualization",
"id": "208f0117bde234c0d79c0a737375675f16349b61",
"size": "3786",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "BDSpaceVis/figures.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "34107"
}
],
"symlink_target": ""
}
|
import time
from coinbase.wallet.client import Client
class Coinbase(object):
def __init__(self, api_key, secret_key):
self.client = Client(api_key, secret_key)
self._portfolio_cache = {}
self._last_retrieved = 0.0
self._update_limit = 10 #seconds
def portfolio(self):
if time.time() - self._last_retrieved > self._update_limit:
self._last_retrieved = time.time()
self._portfolio_cache = self._portfolio()
return self._portfolio_cache
def _portfolio(self):
accounts = self.client.get_accounts()
coins = []
for account in accounts.data:
# ignore coins with hold no value
if float(account['balance']['amount']) == 0.0:
continue
coin = {
"symbol": account['currency'],
"amount": float(account['balance']['amount']),
"trading": 0.0,
"value": float(account['native_balance']['amount'])
}
# estimate the trading value, since coinbase API only seems to
# return the BTC trading price atm
coin['trading'] = coin['value'] / coin['amount']
coins.append(coin)
return coins
|
{
"content_hash": "1457b1078fd546db06044a88414b5fa9",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 74,
"avg_line_length": 30.11904761904762,
"alnum_prop": 0.5470355731225296,
"repo_name": "mattmolo/cryptocurreny-viewer",
"id": "65dffcb9e32b699ddc608b5a008e94ac6569a4f6",
"size": "1265",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CoinbaseClient.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "759"
},
{
"name": "Dockerfile",
"bytes": "156"
},
{
"name": "HTML",
"bytes": "2431"
},
{
"name": "JavaScript",
"bytes": "1739"
},
{
"name": "Python",
"bytes": "4018"
}
],
"symlink_target": ""
}
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "zzz.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
{
"content_hash": "b29d87efbf24f6da9fc23e9518c9742f",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 67,
"avg_line_length": 24.88888888888889,
"alnum_prop": 0.7053571428571429,
"repo_name": "yetizzz/zzz",
"id": "81aff7216e5547c6be081c2fb31f028b7d1589d2",
"size": "246",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/manage.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "JavaScript",
"bytes": "84640"
},
{
"name": "Perl",
"bytes": "6148"
},
{
"name": "Python",
"bytes": "41113"
}
],
"symlink_target": ""
}
|
from collections import namedtuple
import uuid
import datetime
from django.test import SimpleTestCase
from corehq.util.es.elasticsearch import ConnectionError
from corehq.apps.es import FormES
from corehq.apps.es.tests.utils import es_test
from corehq.elastic import get_es_new, send_to_elasticsearch, doc_exists_in_es
from corehq.form_processor.utils import TestFormMetadata
from corehq.pillows.mappings.xform_mapping import XFORM_INDEX_INFO
from corehq.util.es.interface import ElasticsearchInterface
from corehq.util.test_utils import make_es_ready_form, trap_extra_setup
from pillowtop.es_utils import initialize_index_and_mapping
WrappedJsonFormPair = namedtuple('WrappedJsonFormPair', ['wrapped_form', 'json_form'])
@es_test
class XFormESTestCase(SimpleTestCase):
@classmethod
def setUpClass(cls):
super(XFormESTestCase, cls).setUpClass()
cls.now = datetime.datetime.utcnow()
cls.forms = []
with trap_extra_setup(ConnectionError):
cls.es = get_es_new()
initialize_index_and_mapping(cls.es, XFORM_INDEX_INFO)
def setUp(self):
super(XFormESTestCase, self).setUp()
self.test_id = uuid.uuid4().hex
@classmethod
def _ship_forms_to_es(cls, metadatas):
for form_metadata in metadatas:
form_metadata = form_metadata or TestFormMetadata()
form_pair = make_es_ready_form(form_metadata)
cls.forms.append(form_pair)
send_to_elasticsearch('forms', form_pair.json_form)
# have to refresh the index to make sure changes show up
cls.es.indices.refresh(XFORM_INDEX_INFO.index)
@classmethod
def tearDownClass(cls):
interface = ElasticsearchInterface(cls.es)
for form in cls.forms:
interface.delete_doc(XFORM_INDEX_INFO.alias, XFORM_INDEX_INFO.type, form.wrapped_form.form_id)
cls.es.indices.refresh(XFORM_INDEX_INFO.index)
cls.forms = []
super(XFormESTestCase, cls).tearDownClass()
def test_forms_are_in_index(self):
for form in self.forms:
self.assertFalse(doc_exists_in_es(XFORM_INDEX_INFO, form.wrapped_form.form_id))
self._ship_forms_to_es([None, None])
self.assertEqual(2, len(self.forms))
for form in self.forms:
self.assertTrue(doc_exists_in_es(XFORM_INDEX_INFO, form.wrapped_form.form_id))
def test_query_by_domain(self):
domain1 = 'test1-{}'.format(self.test_id)
domain2 = 'test2-{}'.format(self.test_id)
self._ship_forms_to_es(
2 * [TestFormMetadata(domain=domain1)] +
1 * [TestFormMetadata(domain=domain2)]
)
self.assertEqual(2, FormES().domain(domain1).run().total)
self.assertEqual(1, FormES().domain(domain2).run().total)
def test_query_by_user(self):
domain = 'test-by-user-{}'.format(self.test_id)
user1 = 'user1-{}'.format(self.test_id)
user2 = 'user2-{}'.format(self.test_id)
self._ship_forms_to_es(
2 * [TestFormMetadata(domain=domain, user_id=user1)] +
1 * [TestFormMetadata(domain=domain, user_id=user2)]
)
self.assertEqual(2, FormES().user_id([user1]).run().total)
self.assertEqual(1, FormES().user_id([user2]).run().total)
self.assertEqual(3, FormES().user_id([user1, user2]).run().total)
# also test with domain filter
self.assertEqual(3, FormES().domain(domain).run().total)
self.assertEqual(2, FormES().domain(domain).user_id([user1]).run().total)
self.assertEqual(1, FormES().domain(domain).user_id([user2]).run().total)
self.assertEqual(3, FormES().domain(domain).user_id([user1, user2]).run().total)
def test_query_completed_date(self):
domain = 'test-completed-{}'.format(self.test_id)
early = datetime.datetime(2015, 12, 5)
later = datetime.datetime(2015, 12, 8)
self._ship_forms_to_es(
2 * [TestFormMetadata(domain=domain, time_end=early)] +
1 * [TestFormMetadata(domain=domain, time_end=later)]
)
base_qs = FormES().domain(domain)
self.assertEqual(3, base_qs.run().total)
# test gt/gte
self.assertEqual(3, base_qs.completed(gt=early - datetime.timedelta(days=1)).run().total)
self.assertEqual(3, base_qs.completed(gte=early).run().total)
self.assertEqual(1, base_qs.completed(gt=early).run().total)
self.assertEqual(1, base_qs.completed(gte=later).run().total)
self.assertEqual(0, base_qs.completed(gt=later).run().total)
# test lt/lte
self.assertEqual(3, base_qs.completed(lt=later + datetime.timedelta(days=1)).run().total)
self.assertEqual(3, base_qs.completed(lte=later).run().total)
self.assertEqual(2, base_qs.completed(lt=later).run().total)
self.assertEqual(2, base_qs.completed(lte=early).run().total)
self.assertEqual(0, base_qs.completed(lt=early).run().total)
# test both
self.assertEqual(0, base_qs.completed(gt=early, lt=later).run().total)
|
{
"content_hash": "4bb30267a5d0296bdde670dffc66c23c",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 106,
"avg_line_length": 45.32142857142857,
"alnum_prop": 0.6548463356973995,
"repo_name": "dimagi/commcare-hq",
"id": "8dae284dc5f07bc0ce725ff759bee125a9c2e604",
"size": "5076",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testapps/test_elasticsearch/tests/test_xform_es.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "82928"
},
{
"name": "Dockerfile",
"bytes": "2341"
},
{
"name": "HTML",
"bytes": "2589268"
},
{
"name": "JavaScript",
"bytes": "5889543"
},
{
"name": "Jinja",
"bytes": "3693"
},
{
"name": "Less",
"bytes": "176180"
},
{
"name": "Makefile",
"bytes": "1622"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "66704"
},
{
"name": "Python",
"bytes": "21779773"
},
{
"name": "Roff",
"bytes": "150"
},
{
"name": "Shell",
"bytes": "67473"
}
],
"symlink_target": ""
}
|
import os.path
from reportlab.lib.units import mm
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.pdfgen import canvas
from laboratory.settings import FONTS_FOLDER
class PageNumCanvas(canvas.Canvas):
"""
Adding a Page Number of Total
"""
# ----------------------------------------------------------------------
def __init__(self, *args, **kwargs):
"""Constructor"""
canvas.Canvas.__init__(self, *args, **kwargs)
self.pages = []
# ----------------------------------------------------------------------
def showPage(self):
"""
On a page break, add information to the list
"""
self.pages.append(dict(self.__dict__))
self._startPage()
# ----------------------------------------------------------------------
def save(self):
"""
Add the page number to each page (page x of y)
"""
page_count = len(self.pages)
for page in self.pages:
self.__dict__.update(page)
self.draw_page_number(page_count)
canvas.Canvas.showPage(self)
canvas.Canvas.save(self)
# ----------------------------------------------------------------------
def draw_page_number(self, page_count):
"""
Add the page number
"""
pdfmetrics.registerFont(TTFont('PTAstraSerifReg', os.path.join(FONTS_FOLDER, 'PTAstraSerif-Regular.ttf')))
page = "Лист {} из {}".format(self._pageNumber, page_count)
self.setFont("PTAstraSerifReg", 9)
self.drawRightString(200 * mm, 8 * mm, page)
class PageNumCanvasPartitionAll(canvas.Canvas):
"""
Adding a Page Number of Total
"""
# ----------------------------------------------------------------------
def __init__(self, *args, **kwargs):
"""Constructor"""
canvas.Canvas.__init__(self, *args, **kwargs)
self.pages = []
# ----------------------------------------------------------------------
def showPage(self):
"""
On a page break, add information to the list
"""
self.pages.append(dict(self.__dict__))
self._startPage()
# ----------------------------------------------------------------------
def save(self):
"""
Add the page number to each page (page x of y)
"""
page_count = len(self.pages)
for page in self.pages:
self.__dict__.update(page)
self.draw_page_number(page_count)
canvas.Canvas.showPage(self)
canvas.Canvas.save(self)
# ----------------------------------------------------------------------
def draw_page_number(self, page_count):
"""
Add the page number
"""
pdfmetrics.registerFont(TTFont('PTAstraSerifReg', os.path.join(FONTS_FOLDER, 'PTAstraSerif-Regular.ttf')))
page = "Лист {}".format(self._pageNumber)
self.setFont("PTAstraSerifReg", 9)
self.drawRightString(200 * mm, 8 * mm, page)
|
{
"content_hash": "bc130f555f331cbe3264523eac5220df",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 114,
"avg_line_length": 31.864583333333332,
"alnum_prop": 0.4645308924485126,
"repo_name": "moodpulse/l2",
"id": "7e5fad7bb1a98ad6c99514ec5d2869aceafa4ab5",
"size": "3069",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "utils/pagenum.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "38747"
},
{
"name": "Dockerfile",
"bytes": "146"
},
{
"name": "HTML",
"bytes": "238498"
},
{
"name": "JavaScript",
"bytes": "425946"
},
{
"name": "Makefile",
"bytes": "1515"
},
{
"name": "Python",
"bytes": "3710422"
},
{
"name": "SCSS",
"bytes": "48493"
},
{
"name": "Shell",
"bytes": "1815"
},
{
"name": "TypeScript",
"bytes": "98237"
},
{
"name": "Vue",
"bytes": "1980612"
}
],
"symlink_target": ""
}
|
from rope.base import ast
from rope.base import pynames
from rope.base import utils
from rope.refactor.importutils import actions
from rope.refactor.importutils import importinfo
class ModuleImports(object):
def __init__(self, project, pymodule, import_filter=None):
self.project = project
self.pymodule = pymodule
self.separating_lines = 0
self.filter = import_filter
@property
@utils.saveit
def imports(self):
finder = _GlobalImportFinder(self.pymodule)
result = finder.find_import_statements()
self.separating_lines = finder.get_separating_line_count()
if self.filter is not None:
for import_stmt in result:
if not self.filter(import_stmt):
import_stmt.readonly = True
return result
def _get_unbound_names(self, defined_pyobject):
visitor = _GlobalUnboundNameFinder(self.pymodule, defined_pyobject)
ast.walk(self.pymodule.get_ast(), visitor)
return visitor.unbound
def remove_unused_imports(self):
can_select = _OneTimeSelector(self._get_unbound_names(self.pymodule))
visitor = actions.RemovingVisitor(
self.project, self._current_folder(), can_select)
for import_statement in self.imports:
import_statement.accept(visitor)
def get_used_imports(self, defined_pyobject):
result = []
can_select = _OneTimeSelector(
self._get_unbound_names(defined_pyobject))
visitor = actions.FilteringVisitor(
self.project, self._current_folder(), can_select)
for import_statement in self.imports:
new_import = import_statement.accept(visitor)
if new_import is not None and not new_import.is_empty():
result.append(new_import)
return result
def get_changed_source(self):
# Make sure we forward a removed import's preceding blank
# lines count to the following import statement.
prev_stmt = None
for stmt in self.imports:
if prev_stmt is not None and prev_stmt.import_info.is_empty():
stmt.blank_lines = max(prev_stmt.blank_lines, stmt.blank_lines)
prev_stmt = stmt
# The new list of imports.
imports = [stmt for stmt in self.imports
if not stmt.import_info.is_empty()]
after_removing = self._remove_imports(self.imports)
first_non_blank = self._first_non_blank_line(after_removing, 0)
first_import = self._first_import_line() - 1
result = []
# Writing module docs
result.extend(after_removing[first_non_blank:first_import])
# Writing imports
sorted_imports = sorted(imports, self._compare_import_locations)
for stmt in sorted_imports:
if stmt != sorted_imports[0]:
result.append('\n' * stmt.blank_lines)
result.append(stmt.get_import_statement() + '\n')
if sorted_imports and first_non_blank < len(after_removing):
result.append('\n' * self.separating_lines)
# Writing the body
first_after_imports = self._first_non_blank_line(after_removing,
first_import)
result.extend(after_removing[first_after_imports:])
return ''.join(result)
def _get_import_location(self, stmt):
start = stmt.get_new_start()
if start is None:
start = stmt.get_old_location()[0]
return start
def _compare_import_locations(self, stmt1, stmt2):
def get_location(stmt):
if stmt.get_new_start() is not None:
return stmt.get_new_start()
else:
return stmt.get_old_location()[0]
return cmp(get_location(stmt1), get_location(stmt2))
def _remove_imports(self, imports):
lines = self.pymodule.source_code.splitlines(True)
after_removing = []
last_index = 0
for stmt in imports:
start, end = stmt.get_old_location()
after_removing.extend(lines[last_index:start - 1])
last_index = end - 1
for i in range(start, end):
after_removing.append('')
after_removing.extend(lines[last_index:])
return after_removing
def _first_non_blank_line(self, lines, lineno):
result = lineno
for line in lines[lineno:]:
if line.strip() == '':
result += 1
else:
break
return result
def add_import(self, import_info):
visitor = actions.AddingVisitor(self.project, [import_info])
for import_statement in self.imports:
if import_statement.accept(visitor):
break
else:
lineno = self._get_new_import_lineno()
blanks = self._get_new_import_blanks()
self.imports.append(importinfo.ImportStatement(
import_info, lineno, lineno,
blank_lines=blanks))
def _get_new_import_blanks(self):
return 0
def _get_new_import_lineno(self):
if self.imports:
return self.imports[-1].end_line
return 1
def filter_names(self, can_select):
visitor = actions.RemovingVisitor(
self.project, self._current_folder(), can_select)
for import_statement in self.imports:
import_statement.accept(visitor)
def expand_stars(self):
can_select = _OneTimeSelector(self._get_unbound_names(self.pymodule))
visitor = actions.ExpandStarsVisitor(
self.project, self._current_folder(), can_select)
for import_statement in self.imports:
import_statement.accept(visitor)
def remove_duplicates(self):
added_imports = []
for import_stmt in self.imports:
visitor = actions.AddingVisitor(self.project,
[import_stmt.import_info])
for added_import in added_imports:
if added_import.accept(visitor):
import_stmt.empty_import()
else:
added_imports.append(import_stmt)
def force_single_imports(self):
"""force a single import per statement"""
for import_stmt in self.imports[:]:
import_info = import_stmt.import_info
if import_info.is_empty():
continue
if len(import_info.names_and_aliases) > 1:
for name_and_alias in import_info.names_and_aliases:
if hasattr(import_info, "module_name"):
new_import = importinfo.FromImport(
import_info.module_name, import_info.level,
[name_and_alias])
else:
new_import = importinfo.NormalImport([name_and_alias])
self.add_import(new_import)
import_stmt.empty_import()
def get_relative_to_absolute_list(self):
visitor = actions.RelativeToAbsoluteVisitor(
self.project, self._current_folder())
for import_stmt in self.imports:
if not import_stmt.readonly:
import_stmt.accept(visitor)
return visitor.to_be_absolute
def get_self_import_fix_and_rename_list(self):
visitor = actions.SelfImportVisitor(
self.project, self._current_folder(), self.pymodule.get_resource())
for import_stmt in self.imports:
if not import_stmt.readonly:
import_stmt.accept(visitor)
return visitor.to_be_fixed, visitor.to_be_renamed
def _current_folder(self):
return self.pymodule.get_resource().parent
def sort_imports(self):
if self.project.prefs.get("sort_imports_alphabetically"):
sort_kwargs = dict(key=self._get_import_name)
else:
sort_kwargs = dict(cmp=self._compare_imports)
# IDEA: Sort from import list
visitor = actions.SortingVisitor(self.project, self._current_folder())
for import_statement in self.imports:
import_statement.accept(visitor)
in_projects = sorted(visitor.in_project, **sort_kwargs)
third_party = sorted(visitor.third_party, **sort_kwargs)
standards = sorted(visitor.standard, **sort_kwargs)
future = sorted(visitor.future, **sort_kwargs)
last_index = self._first_import_line()
last_index = self._move_imports(future, last_index, 0)
last_index = self._move_imports(standards, last_index, 1)
last_index = self._move_imports(third_party, last_index, 1)
last_index = self._move_imports(in_projects, last_index, 1)
self.separating_lines = 2
def _first_import_line(self):
nodes = self.pymodule.get_ast().body
lineno = 0
if self.pymodule.get_doc() is not None:
lineno = 1
if len(nodes) > lineno:
lineno = self.pymodule.logical_lines.logical_line_in(
nodes[lineno].lineno)[0]
else:
lineno = self.pymodule.lines.length()
while lineno > 1:
line = self.pymodule.lines.get_line(lineno - 1)
if line.strip() == '':
lineno -= 1
else:
break
return lineno
def _get_import_name(self, import_stmt):
import_info = import_stmt.import_info
if hasattr(import_info, "module_name"):
return "%s.%s" % (import_info.module_name,
import_info.names_and_aliases[0][0])
else:
return import_info.names_and_aliases[0][0]
def _compare_imports(self, stmt1, stmt2):
str1 = stmt1.get_import_statement()
str2 = stmt2.get_import_statement()
if str1.startswith('from ') and not str2.startswith('from '):
return 1
if not str1.startswith('from ') and str2.startswith('from '):
return -1
return cmp(str1, str2)
def _move_imports(self, imports, index, blank_lines):
if imports:
imports[0].move(index, blank_lines)
index += 1
if len(imports) > 1:
for stmt in imports[1:]:
stmt.move(index)
index += 1
return index
def handle_long_imports(self, maxdots, maxlength):
visitor = actions.LongImportVisitor(
self._current_folder(), self.project, maxdots, maxlength)
for import_statement in self.imports:
if not import_statement.readonly:
import_statement.accept(visitor)
for import_info in visitor.new_imports:
self.add_import(import_info)
return visitor.to_be_renamed
def remove_pyname(self, pyname):
"""Removes pyname when imported in ``from mod import x``"""
visitor = actions.RemovePyNameVisitor(self.project, self.pymodule,
pyname, self._current_folder())
for import_stmt in self.imports:
import_stmt.accept(visitor)
class _OneTimeSelector(object):
def __init__(self, names):
self.names = names
self.selected_names = set()
def __call__(self, imported_primary):
if self._can_name_be_added(imported_primary):
for name in self._get_dotted_tokens(imported_primary):
self.selected_names.add(name)
return True
return False
def _get_dotted_tokens(self, imported_primary):
tokens = imported_primary.split('.')
for i in range(len(tokens)):
yield '.'.join(tokens[:i + 1])
def _can_name_be_added(self, imported_primary):
for name in self._get_dotted_tokens(imported_primary):
if name in self.names and name not in self.selected_names:
return True
return False
class _UnboundNameFinder(object):
def __init__(self, pyobject):
self.pyobject = pyobject
def _visit_child_scope(self, node):
pyobject = self.pyobject.get_module().get_scope().\
get_inner_scope_for_line(node.lineno).pyobject
visitor = _LocalUnboundNameFinder(pyobject, self)
for child in ast.get_child_nodes(node):
ast.walk(child, visitor)
def _FunctionDef(self, node):
self._visit_child_scope(node)
def _ClassDef(self, node):
self._visit_child_scope(node)
def _Name(self, node):
if self._get_root()._is_node_interesting(node) and \
not self.is_bound(node.id):
self.add_unbound(node.id)
def _Attribute(self, node):
result = []
while isinstance(node, ast.Attribute):
result.append(node.attr)
node = node.value
if isinstance(node, ast.Name):
result.append(node.id)
primary = '.'.join(reversed(result))
if self._get_root()._is_node_interesting(node) and \
not self.is_bound(primary):
self.add_unbound(primary)
else:
ast.walk(node, self)
def _get_root(self):
pass
def is_bound(self, name, propagated=False):
pass
def add_unbound(self, name):
pass
class _GlobalUnboundNameFinder(_UnboundNameFinder):
def __init__(self, pymodule, wanted_pyobject):
super(_GlobalUnboundNameFinder, self).__init__(pymodule)
self.unbound = set()
self.names = set()
for name, pyname in pymodule._get_structural_attributes().items():
if not isinstance(pyname, (pynames.ImportedName,
pynames.ImportedModule)):
self.names.add(name)
wanted_scope = wanted_pyobject.get_scope()
self.start = wanted_scope.get_start()
self.end = wanted_scope.get_end() + 1
def _get_root(self):
return self
def is_bound(self, primary, propagated=False):
name = primary.split('.')[0]
if name in self.names:
return True
return False
def add_unbound(self, name):
names = name.split('.')
for i in range(len(names)):
self.unbound.add('.'.join(names[:i + 1]))
def _is_node_interesting(self, node):
return self.start <= node.lineno < self.end
class _LocalUnboundNameFinder(_UnboundNameFinder):
def __init__(self, pyobject, parent):
super(_LocalUnboundNameFinder, self).__init__(pyobject)
self.parent = parent
def _get_root(self):
return self.parent._get_root()
def is_bound(self, primary, propagated=False):
name = primary.split('.')[0]
if propagated:
names = self.pyobject.get_scope().get_propagated_names()
else:
names = self.pyobject.get_scope().get_names()
if name in names or self.parent.is_bound(name, propagated=True):
return True
return False
def add_unbound(self, name):
self.parent.add_unbound(name)
class _GlobalImportFinder(object):
def __init__(self, pymodule):
self.current_folder = None
if pymodule.get_resource():
self.current_folder = pymodule.get_resource().parent
self.pymodule = pymodule
self.imports = []
self.pymodule = pymodule
self.lines = self.pymodule.lines
def visit_import(self, node, end_line):
start_line = node.lineno
import_statement = importinfo.ImportStatement(
importinfo.NormalImport(self._get_names(node.names)),
start_line, end_line, self._get_text(start_line, end_line),
blank_lines=self._count_empty_lines_before(start_line))
self.imports.append(import_statement)
def _count_empty_lines_before(self, lineno):
result = 0
for current in range(lineno - 1, 0, -1):
line = self.lines.get_line(current)
if line.strip() == '':
result += 1
else:
break
return result
def _count_empty_lines_after(self, lineno):
result = 0
for current in range(lineno + 1, self.lines.length()):
line = self.lines.get_line(current)
if line.strip() == '':
result += 1
else:
break
return result
def get_separating_line_count(self):
if not self.imports:
return 0
return self._count_empty_lines_after(self.imports[-1].end_line - 1)
def _get_text(self, start_line, end_line):
result = []
for index in range(start_line, end_line):
result.append(self.lines.get_line(index))
return '\n'.join(result)
def visit_from(self, node, end_line):
level = 0
if node.level:
level = node.level
import_info = importinfo.FromImport(
node.module or '', # see comment at rope.base.ast.walk
level, self._get_names(node.names))
start_line = node.lineno
self.imports.append(importinfo.ImportStatement(
import_info, node.lineno, end_line,
self._get_text(start_line, end_line),
blank_lines=
self._count_empty_lines_before(start_line)))
def _get_names(self, alias_names):
result = []
for alias in alias_names:
result.append((alias.name, alias.asname))
return result
def find_import_statements(self):
nodes = self.pymodule.get_ast().body
for index, node in enumerate(nodes):
if isinstance(node, (ast.Import, ast.ImportFrom)):
lines = self.pymodule.logical_lines
end_line = lines.logical_line_in(node.lineno)[1] + 1
if isinstance(node, ast.Import):
self.visit_import(node, end_line)
if isinstance(node, ast.ImportFrom):
self.visit_from(node, end_line)
return self.imports
|
{
"content_hash": "565a1c5d66fd04ccd3dcf128ac84119b",
"timestamp": "",
"source": "github",
"line_count": 492,
"max_line_length": 79,
"avg_line_length": 36.86991869918699,
"alnum_prop": 0.5790518191841235,
"repo_name": "JetChars/vim",
"id": "b96eebc4647f02da3f37bb7f0a888c01d9c441b0",
"size": "18140",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "vim/bundle/python-mode/pymode/libs2/rope/refactor/importutils/module_imports.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CoffeeScript",
"bytes": "1402"
},
{
"name": "Erlang",
"bytes": "6887"
},
{
"name": "GCC Machine Description",
"bytes": "525"
},
{
"name": "Go",
"bytes": "2239"
},
{
"name": "HTML",
"bytes": "134"
},
{
"name": "JavaScript",
"bytes": "2128"
},
{
"name": "Makefile",
"bytes": "2763"
},
{
"name": "Python",
"bytes": "3294722"
},
{
"name": "Ruby",
"bytes": "40061"
},
{
"name": "Shell",
"bytes": "4058"
},
{
"name": "VimL",
"bytes": "5034489"
}
],
"symlink_target": ""
}
|
from django import template
register = template.Library()
class StreamItemNode(template.Node):
def __init__(self, entry_var):
self.entry_var = template.Variable(entry_var)
def render(self, context):
entry = self.entry_var.resolve(context)
template_name = 'stream/%s.html' % (entry.item_type.model)
context['entry'] = entry
return template.loader.get_template(template_name).render(context)
@register.tag
def streamentry(parser, token):
"""
streamentry <entry_var>
"""
bits = token.split_contents()
bits.reverse()
tag_name = bits.pop()
try:
entry_var = bits.pop()
except IndexError:
raise template.TemplateSyntaxError, "%r is missing entry argument" % tag_name
if bits:
raise template.TemplateSyntaxError, "%r has unexpected arguments" % tag_name
return StreamItemNode(entry_var)
|
{
"content_hash": "a5a476c6facf2f9dde12b6a9d11e7983",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 85,
"avg_line_length": 27.058823529411764,
"alnum_prop": 0.6434782608695652,
"repo_name": "bhrutledge/debugged-django",
"id": "f7962eb9b619f81957050e1f0f2b2e0140df22a3",
"size": "920",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "debugged/stream/templatetags/stream_tags.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1176"
},
{
"name": "HTML",
"bytes": "4107"
},
{
"name": "Python",
"bytes": "76633"
}
],
"symlink_target": ""
}
|
import sys
#Only really old versions of Python don't have sys.hexversion. We don't support
#them. The logging module was introduced in Python 2.3
if not hasattr(sys, 'hexversion') or sys.hexversion < 0x20300F0:
sys.stdout.write('Python version: ' + sys.version)
sys.stdout.write('PyBitmessage requires Python 2.7.3 or greater (but not Python 3)')
sys.exit()
#We can now use logging so set up a simple configuration
import logging
formatter = logging.Formatter(
'%(levelname)s: %(message)s'
)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
logger = logging.getLogger(__name__)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
#We need to check hashlib for RIPEMD-160, as it won't be available if OpenSSL is
#not linked against or the linked OpenSSL has RIPEMD disabled.
def check_hashlib():
if sys.hexversion < 0x020500F0:
logger.error('The hashlib module is not included in this version of Python.')
return False
import hashlib
if '_hashlib' not in hashlib.__dict__:
logger.error('The RIPEMD-160 hash algorithm is not available. The hashlib module is not linked against OpenSSL.')
return False
try:
hashlib.new('ripemd160')
except ValueError:
logger.error('The RIPEMD-160 hash algorithm is not available. The hashlib module utilizes an OpenSSL library with RIPEMD disabled.')
return False
return True
def check_sqlite():
if sys.hexversion < 0x020500F0:
logger.error('The sqlite3 module is not included in this version of Python.')
return False
try:
import sqlite3
except ImportError:
logger.error('The sqlite3 module is not available')
return False
logger.info('sqlite3 Module Version: ' + sqlite3.version)
logger.info('SQLite Library Version: ' + sqlite3.sqlite_version)
#sqlite_version_number formula: https://sqlite.org/c3ref/c_source_id.html
sqlite_version_number = sqlite3.sqlite_version_info[0] * 1000000 + sqlite3.sqlite_version_info[1] * 1000 + sqlite3.sqlite_version_info[2]
conn = None
try:
try:
conn = sqlite3.connect(':memory:')
if sqlite_version_number >= 3006018:
sqlite_source_id = conn.execute('SELECT sqlite_source_id();').fetchone()[0]
logger.info('SQLite Library Source ID: ' + sqlite_source_id)
if sqlite_version_number >= 3006023:
compile_options = ', '.join(map(lambda row: row[0], conn.execute('PRAGMA compile_options;')))
logger.info('SQLite Library Compile Options: ' + compile_options)
#There is no specific version requirement as yet, so we just use the
#first version that was included with Python.
if sqlite_version_number < 3000008:
logger.error('This version of SQLite is too old. PyBitmessage requires SQLite 3.0.8 or later')
return False
return True
except sqlite3.Error:
logger.exception('An exception occured while checking sqlite.')
return False
finally:
if conn:
conn.close()
def check_openssl():
try:
import ctypes
except ImportError:
logger.error('Unable to check OpenSSL. The ctypes module is not available.')
return False
#We need to emulate the way PyElliptic searches for OpenSSL.
if sys.platform == 'win32':
paths = ['libeay32.dll']
if getattr(sys, 'frozen', False):
import os.path
paths.insert(0, os.path.join(sys._MEIPASS, 'libeay32.dll'))
else:
paths = ['libcrypto.so']
if sys.platform == 'darwin':
paths.extend([
'libcrypto.dylib',
'/usr/local/opt/openssl/lib/libcrypto.dylib',
'./../Frameworks/libcrypto.dylib'
])
import re
if re.match(r'linux|darwin|freebsd', sys.platform):
try:
import ctypes.util
path = ctypes.util.find_library('ssl')
if path not in paths:
paths.append(path)
except:
pass
SSLEAY_VERSION = 0
SSLEAY_CFLAGS = 2
cflags_regex = re.compile(r'(?:OPENSSL_NO_)(AES|EC|ECDH|ECDSA)(?!\w)')
for path in paths:
logger.info('Checking OpenSSL at ' + path)
try:
library = ctypes.CDLL(path)
except OSError:
continue
logger.info('OpenSSL Name: ' + library._name)
try:
library.SSLeay.restype = ctypes.c_long
library.SSLeay_version.restype = ctypes.c_char_p
library.SSLeay_version.argtypes = [ctypes.c_int]
except AttributeError:
logger.error('Cannot determine version of this OpenSSL library.')
return False
logger.info('OpenSSL Version: ' + library.SSLeay_version(SSLEAY_VERSION))
compile_options = library.SSLeay_version(SSLEAY_CFLAGS)
logger.info('OpenSSL Compile Options: ' + compile_options)
openssl_hexversion = library.SSLeay()
#PyElliptic uses EVP_CIPHER_CTX_new and EVP_CIPHER_CTX_free which were
#introduced in 0.9.8b.
if openssl_hexversion < 0x90802F:
logger.error('This OpenSSL library is too old. PyBitmessage requires OpenSSL 0.9.8b or later with AES, Elliptic Curves (EC), ECDH, and ECDSA enabled.')
return False
matches = cflags_regex.findall(compile_options)
if len(matches) > 0:
logger.error('This OpenSSL library is missing the following required features: ' + ', '.join(matches) + '. PyBitmessage requires OpenSSL 0.9.8b or later with AES, Elliptic Curves (EC), ECDH, and ECDSA enabled.')
return False
return True
return False
#TODO: The minimum versions of pythondialog and dialog need to be determined
def check_curses():
if sys.hexversion < 0x20600F0:
logger.error('The curses interface requires the pythondialog package and the dialog utility.')
return False
try:
import curses
except ImportError:
logger.error('The curses interface can not be used. The curses module is not available.')
return False
logger.info('curses Module Version: ' + curses.version)
try:
import dialog
except ImportError:
logger.error('The curses interface can not be used. The pythondialog package is not available.')
return False
logger.info('pythondialog Package Version: ' + dialog.__version__)
dialog_util_version = dialog.Dialog().cached_backend_version
#The pythondialog author does not like Python2 str, so we have to use
#unicode for just the version otherwise we get the repr form which includes
#the module and class names along with the actual version.
logger.info('dialog Utility Version' + unicode(dialog_util_version))
return True
def check_pyqt():
try:
import PyQt4.QtCore
except ImportError:
logger.error('The PyQt4 package is not available. PyBitmessage requires PyQt 4.8 or later and Qt 4.7 or later.')
return False
logger.info('PyQt Version: ' + PyQt4.QtCore.PYQT_VERSION_STR)
logger.info('Qt Version: ' + PyQt4.QtCore.QT_VERSION_STR)
passed = True
if PyQt4.QtCore.PYQT_VERSION < 0x40800:
logger.error('This version of PyQt is too old. PyBitmessage requries PyQt 4.8 or later.')
passed = False
if PyQt4.QtCore.QT_VERSION < 0x40700:
logger.error('This version of Qt is too old. PyBitmessage requries Qt 4.7 or later.')
passed = False
return passed
def check_dependencies(verbose = False, optional = False):
if verbose:
logger.setLevel(logging.INFO)
has_all_dependencies = True
#Python 2.7.3 is the required minimum. Python 3+ is not supported, but it is
#still useful to provide information about our other requirements.
logger.info('Python version: %s', sys.version)
if sys.hexversion < 0x20703F0:
logger.error('PyBitmessage requires Python 2.7.3 or greater (but not Python 3+)')
has_all_dependencies = False
if sys.hexversion >= 0x3000000:
logger.error('PyBitmessage does not support Python 3+. Python 2.7.3 or greater is required.')
has_all_dependencies = False
check_functions = [check_hashlib, check_sqlite, check_openssl]
if optional:
check_functions.extend([check_pyqt, check_curses])
#Unexpected exceptions are handled here
for check in check_functions:
try:
has_all_dependencies &= check()
except:
logger.exception(check.__name__ + ' failed unexpectedly.')
has_all_dependencies = False
if not has_all_dependencies:
logger.critical('PyBitmessage cannot start. One or more dependencies are unavailable.')
sys.exit()
if __name__ == '__main__':
check_dependencies(True, True)
|
{
"content_hash": "d919cd1c896c73c502365004f3cad633",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 223,
"avg_line_length": 41.11059907834101,
"alnum_prop": 0.6532899899114449,
"repo_name": "debguy0x/PyBitmessage",
"id": "294c6a2231cc032ea1eec9a3145ac42463960420",
"size": "8932",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/depends.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "7456"
},
{
"name": "C++",
"bytes": "4103"
},
{
"name": "Makefile",
"bytes": "3294"
},
{
"name": "Python",
"bytes": "1253072"
},
{
"name": "QMake",
"bytes": "1827"
},
{
"name": "Shell",
"bytes": "12353"
}
],
"symlink_target": ""
}
|
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'AWS Overlord'
copyright = u'2014, Zalando SE'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = open('../VERSION', 'r').read()
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'AWSOverlorddoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'AWSOverlord.tex', u'AWS Overlord Documentation',
u'Willi Schönborn', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'awsoverlord', u'AWS Overlord Documentation',
[u'Willi Schönborn'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'AWSOverlord', u'AWS Overlord Documentation',
u'Willi Schönborn', 'AWSOverlord', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
|
{
"content_hash": "9a47d4bf00732aabb34ba2ffdb0520f6",
"timestamp": "",
"source": "github",
"line_count": 244,
"max_line_length": 79,
"avg_line_length": 32.26229508196721,
"alnum_prop": 0.6991869918699187,
"repo_name": "apfeiffer85/aws-overlord",
"id": "3561e8d0d82ecdc20a1687638e206c9994c64c2b",
"size": "8300",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Clojure",
"bytes": "71590"
},
{
"name": "Makefile",
"bytes": "6774"
},
{
"name": "Python",
"bytes": "8300"
},
{
"name": "Shell",
"bytes": "478"
}
],
"symlink_target": ""
}
|
"""This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Builder class, a minimal prototype class to build more chart
types on top of it.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
import numpy as np
import warnings
from six import string_types
from .attributes import AttrSpec, ColorAttr, CatAttr
from .chart import Chart
from .data_source import ChartDataSource
from .models import CompositeGlyph
from .properties import Dimension, ColumnLabel
from .utils import collect_attribute_columns, label_from_index_dict, build_hover_tooltips
from .data_source import OrderedAssigner
from ..models.ranges import Range, Range1d, FactorRange
from ..models.sources import ColumnDataSource
from ..core.properties import (HasProps, Instance, List, String, Dict,
Color, Bool, Tuple, Either, Enum)
from ..core.enums import SortDirection
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def create_and_build(builder_class, *data, **kws):
"""A factory function for handling Chart and Builder generation.
Returns:
:class:`Chart`
"""
if getattr(builder_class, 'dimensions') is None:
raise NotImplementedError('Each builder must specify its dimensions, %s does not.' % builder_class.__name__)
if getattr(builder_class, 'default_attributes') is None:
raise NotImplementedError('Each builder must specify its default_attributes, %s does not.' % builder_class.__name__)
builder_props = set(builder_class.properties()) | \
set(getattr(builder_class, "__deprecated_attributes__", []))
# append dimensions to the builder props
for dim in builder_class.dimensions:
builder_props.add(dim)
# append attributes to the builder props
for attr_name in builder_class.default_attributes.keys():
builder_props.add(attr_name)
# create the new builder
builder_kws = {k: v for k, v in kws.items() if k in builder_props}
builder = builder_class(*data, **builder_kws)
# create a chart to return, since there isn't one already
chart_kws = {k: v for k, v in kws.items() if k not in builder_props}
chart = Chart(**chart_kws)
chart.add_builder(builder)
chart.start_plot()
return chart
class Builder(HasProps):
""" A prototype class to inherit each new chart Builder type.
It provides useful methods to be used by the inherited builder classes,
in order to automate most of the charts creation tasks and leave the
core customization to specialized builder classes. In that pattern
inherited builders just need to provide the following methods:
Required:
* :meth:`~bokeh.charts.builder.Builder.yield_renderers`: yields the glyphs to be
rendered into the plot. Here you should call the
:meth:`~bokeh.charts.builder.Builder.add_glyph` method so that the builder can
setup the legend for you.
* :meth:`~bokeh.charts.builder.Builder.set_ranges`: setup the ranges for the
glyphs. This is called after glyph creation, so you are able to inspect the
comp_glyphs for their minimum and maximum values. See the
:meth:`~bokeh.charts.builder.Builder.create` method for more information on
when this is called and how the builder provides the ranges to the containing
:class:`Chart` using the :meth:`Chart.add_ranges` method.
Optional:
* :meth:`~bokeh.charts.builder.Builder.setup`: provides an area
where subclasses of builder can introspect properties, setup attributes, or change
property values. This is called before
:meth:`~bokeh.charts.builder.Builder.process_data`.
* :meth:`~bokeh.charts.builder.Builder.process_data`: provides an area
where subclasses of builder can manipulate the source data before renderers are
created.
"""
# Optional Inputs
x_range = Instance(Range)
y_range = Instance(Range)
xlabel = String()
ylabel = String()
xscale = String()
yscale = String()
palette = List(Color, help="""Optional input to override the default palette used
by any color attribute.
""")
# Dimension Configuration
"""
The dimension labels that drive the position of the
glyphs. Subclasses should implement this so that the Builder
base class knows which dimensions it needs to operate on.
An example for a builder working with cartesian x and y
coordinates would be dimensions = ['x', 'y']. You should
then instantiate the x and y dimensions as attributes of the
subclass of builder using the :class:`Dimension
<bokeh.charts.properties.Dimension>` class. One for x, as x
= Dimension(...), and one as y = Dimension(...).
"""
dimensions = None # None because it MUST be overridden
"""
The dimension labels that must exist to produce the
glyphs. This specifies what are the valid configurations for
the chart, with the option of specifying the type of the
columns. The
:class:`~bokeh.charts.data_source.ChartDataSource` will
inspect this property of your subclass of Builder and use
this to fill in any required dimensions if no keyword
arguments are used.
"""
req_dimensions = []
# Attribute Configuration
attributes = Dict(String, Instance(AttrSpec), help="""
The attribute specs used to group data. This is a mapping between the role of
the attribute spec (e.g. 'color') and the
:class:`~bokeh.charts.attributes.AttrSpec` class (e.g.,
:class:`~bokeh.charts.attributes.ColorAttr`). The Builder will use this
attributes property during runtime, which will consist of any attribute specs
that are passed into the chart creation function (e.g.,
:class:`~bokeh.charts.Bar`), ones that are created for the user from simple
input types (e.g. `Bar(..., color='red')` or `Bar(..., color=<column_name>)`),
or lastly, the attribute spec found in the default_attributes configured for
the subclass of :class:`~bokeh.charts.builder.Builder`.
""")
"""
The default attribute specs used to group data. This is
where the subclass of Builder should specify what the
default attributes are that will yield attribute values to
each group of data, and any specific configuration. For
example, the :class:`ColorAttr` utilizes a default palette
for assigning color based on groups of data. If the user
doesn't assign a column of the data to the associated
attribute spec, then the default attrspec is used, which
will yield a constant color value for each group of
data. This is by default the first color in the default
palette, but can be customized by setting the default color
in the ColorAttr.
"""
default_attributes = None # None because it MUST be overridden
# Derived properties (created by Builder at runtime)
attribute_columns = List(ColumnLabel, help="""
All columns used for specifying attributes for the Chart. The Builder will set
this value on creation so that the subclasses can know the distinct set of columns
that are being used to assign attributes.
""")
comp_glyphs = List(Instance(CompositeGlyph), help="""
A list of composite glyphs, where each represents a unique subset of data. The
composite glyph is a helper class that encapsulates all low level
:class:`~bokeh.models.glyphs.Glyph`, that represent a higher level group of
data. For example, the :class:`BoxGlyph` is a single class that yields
each :class:`GlyphRenderer` needed to produce a Box on a :class:`BoxPlot`. The
single Box represents a full array of values that are aggregated, and is made
up of multiple :class:`~bokeh.models.glyphs.Rect` and
:class:`~bokeh.models.glyphs.Segment` glyphs.
""")
labels = List(String, help="""Represents the unique labels to be used for legends.""")
"""List of attributes to use for legends."""
label_attributes = []
"""
Used to assign columns to dimensions when no selections have been provided. The
default behavior is provided by the :class:`OrderedAssigner`, which assigns
a single column to each dimension available in the `Builder`'s `dims` property.
"""
column_selector = OrderedAssigner
comp_glyph_types = List(Instance(CompositeGlyph))
sort_dim = Dict(String, Bool, default={})
sort_legend = List(Tuple(String, Bool), help="""
List of tuples to use for sorting the legend, in order that they should be
used for sorting. This sorting can be different than the sorting used for the
rest of the chart. For example, you might want to sort only on the column
assigned to the color attribute, or sort it descending. The order of each tuple
is (Column, Ascending).
""")
legend_sort_field = String(help="""
Attribute that should be used to sort the legend, for example: color,
dash, maker, etc. Valid values for this property depend on the type
of chart.
""")
legend_sort_direction = Enum(SortDirection, help="""
Sort direction to apply to :attr:`~bokeh.charts.builder.Builder.sort_legend`.
Valid values are: `ascending` or `descending`.
""")
source = Instance(ColumnDataSource)
tooltips = Either(List(Tuple(String, String)), List(String), Bool, default=None,
help="""
Tells the builder to add tooltips to the chart by either using the columns
specified to the chart attributes (True), or by generating tooltips for each
column specified (list(str)), or by explicit specification of the tooltips
using the valid input for the `HoverTool` tooltips kwarg.
""")
__deprecated_attributes__ = ('sort_legend',)
def __init__(self, *args, **kws):
"""Common arguments to be used by all the inherited classes.
Args:
data (:ref:`userguide_charts_data_types`): source data for the chart
legend (str, bool): the legend of your plot. The legend content is
inferred from incoming input.It can be ``top_left``,
``top_right``, ``bottom_left``, ``bottom_right``.
It is ``top_right`` is you set it as True.
Attributes:
source (obj): datasource object for your plot,
initialized as a dummy None.
x_range (obj): x-associated datarange object for you plot,
initialized as a dummy None.
y_range (obj): y-associated datarange object for you plot,
initialized as a dummy None.
groups (list): to be filled with the incoming groups of data.
Useful for legend construction.
data (dict): to be filled with the incoming data and be passed
to the ChartDataSource for each Builder class.
attr (list(AttrSpec)): to be filled with the new attributes created after
loading the data dict.
"""
data = None
if len(args) != 0 or len(kws) != 0:
# chart dimensions can be literal dimensions or attributes
attrs = list(self.default_attributes.keys())
dims = self.dimensions + attrs
# pop the dimension inputs from kwargs
data_args = {}
for dim in dims:
if dim in kws.keys():
data_args[dim] = kws[dim]
# build chart data source from inputs, given the dimension configuration
data_args['dims'] = tuple(dims)
data_args['required_dims'] = tuple(self.req_dimensions)
data_args['attrs'] = attrs
data_args['column_assigner'] = self.column_selector
data = ChartDataSource.from_data(*args, **data_args)
# make sure that the builder dimensions have access to the chart data source
for dim in self.dimensions:
getattr(getattr(self, dim), 'set_data')(data)
# handle input attrs and ensure attrs have access to data
attributes = self._setup_attrs(data, kws)
# remove inputs handled by dimensions and chart attributes
for dim in dims:
kws.pop(dim, None)
else:
attributes = dict()
kws['attributes'] = attributes
super(Builder, self).__init__(**kws)
# collect unique columns used for attributes
self.attribute_columns = collect_attribute_columns(**self.attributes)
for k in self.__deprecated_attributes__:
if k in kws:
setattr(self, k, kws[k])
self._data = data
self._legends = []
def _setup_attrs(self, data, kws):
"""Handle overridden attributes and initialize them with data.
Makes sure that all attributes have access to the data
source, which is used for mapping attributes to groups
of data.
Returns:
None
"""
source = ColumnDataSource(data.df)
attr_names = self.default_attributes.keys()
custom_palette = kws.get('palette')
attributes = dict()
for attr_name in attr_names:
attr = kws.pop(attr_name, None)
# if given an attribute use it
if isinstance(attr, AttrSpec):
attributes[attr_name] = attr
# if we are given columns, use those
elif isinstance(attr, (str, list)):
attributes[attr_name] = self.default_attributes[attr_name]._clone()
# override palette if available
if isinstance(attributes[attr_name], ColorAttr):
if custom_palette is not None:
attributes[attr_name].iterable = custom_palette
attributes[attr_name].setup(data=source, columns=attr)
else:
# override palette if available
if (isinstance(self.default_attributes[attr_name], ColorAttr) and
custom_palette is not None):
attributes[attr_name] = self.default_attributes[attr_name]._clone()
attributes[attr_name].iterable = custom_palette
else:
attributes[attr_name] = self.default_attributes[attr_name]._clone()
# make sure all have access to data source
for attr_name in attr_names:
attributes[attr_name].update_data(data=source)
return attributes
def setup(self):
"""Perform any initial pre-processing, attribute config.
Returns:
None
"""
pass
def process_data(self):
"""Make any global data manipulations before grouping.
It has to be implemented by any of the inherited class
representing each different chart type. It is the place
where we make specific calculations for each chart.
Returns:
None
"""
pass
def yield_renderers(self):
""" Generator that yields the glyphs to be draw on the plot
It has to be implemented by any of the inherited class
representing each different chart type.
Yields:
:class:`GlyphRenderer`
"""
raise NotImplementedError('Subclasses of %s must implement _yield_renderers.' %
self.__class__.__name__)
def set_ranges(self):
"""Calculate and set the x and y ranges.
It has to be implemented by any of the subclasses of builder
representing each different chart type, and is called after
:meth:`yield_renderers`.
Returns:
None
"""
raise NotImplementedError('Subclasses of %s must implement _set_ranges.' %
self.__class__.__name__)
def get_dim_extents(self):
"""Helper method to retrieve maximum extents of all the renderers.
Returns:
a dict mapping between dimension and value for x_max, y_max, x_min, y_min
"""
return {'x_max': max([renderer.x_max for renderer in self.comp_glyphs]),
'y_max': max([renderer.y_max for renderer in self.comp_glyphs]),
'x_min': min([renderer.x_min for renderer in self.comp_glyphs]),
'y_min': min([renderer.y_min for renderer in self.comp_glyphs])
}
def add_glyph(self, group, glyph):
"""Add a composite glyph.
Manages the legend, since the builder might not want all attribute types
used for the legend.
Args:
group (:class:`DataGroup`): the data the `glyph` is associated with
glyph (:class:`CompositeGlyph`): the glyph associated with the `group`
Returns:
None
"""
if isinstance(glyph, list):
for sub_glyph in glyph:
self.comp_glyphs.append(sub_glyph)
else:
self.comp_glyphs.append(glyph)
# handle cases where builders have specified which attributes to use for labels
label = None
if len(self.label_attributes) > 0:
for attr in self.label_attributes:
# this will get the last attribute group label for now
if self.attributes[attr].columns is not None:
label = self._get_group_label(group, attr=attr)
# if no special case for labeling, just use the group label
if label is None:
label = self._get_group_label(group, attr='label')
# add to legend if new and unique label
if str(label) not in self.labels and label is not None:
self._legends.append((label, glyph.renderers))
self.labels.append(label)
def _get_group_label(self, group, attr='label'):
"""Get the label of the group by the attribute name.
Args:
group (:attr:`DataGroup`: the group of data
attr (str, optional): the attribute name containing the label, defaults to
'label'.
Returns:
str: the label for the group
"""
if attr is 'label':
label = group.label
else:
label = group[attr]
if isinstance(label, dict):
label = tuple(label.values())
return self._get_label(label)
@staticmethod
def _get_label(raw_label):
"""Converts a label by string or tuple to a string representation.
Args:
raw_label (str or tuple(any, any)): a unique identifier for the data group
Returns:
str: a label that is usable in charts
"""
# don't convert None type to string so we can test for it later
if raw_label is None:
return None
if (isinstance(raw_label, tuple) or isinstance(raw_label, list)) and \
len(raw_label) == 1:
raw_label = raw_label[0]
elif isinstance(raw_label, dict):
raw_label = label_from_index_dict(raw_label)
return str(raw_label)
def collect_attr_kwargs(self):
if hasattr(super(self.__class__, self), 'default_attributes'):
attrs = set(self.default_attributes.keys()) - set(
(super(self.__class__, self).default_attributes or {}).keys())
else:
attrs = set()
return attrs
def get_group_kwargs(self, group, attrs):
return {attr: group[attr] for attr in attrs}
def create(self, chart=None):
"""Builds the renderers, adding them and other components to the chart.
Args:
chart (:class:`Chart`, optional): the chart that will contain the glyph
renderers that the `Builder` produces.
Returns:
:class:`Chart`
"""
# call methods that allow customized setup by subclasses
self.setup()
self.process_data()
# create and add renderers to chart
renderers = self.yield_renderers()
if chart is None:
chart = Chart()
chart.add_renderers(self, renderers)
# handle ranges after renders, since ranges depend on aggregations
# ToDo: should reconsider where this occurs
self.set_ranges()
chart.add_ranges('x', self.x_range)
chart.add_ranges('y', self.y_range)
# sort the legend if we are told to
self._legends = self._sort_legend(
self.legend_sort_field, self.legend_sort_direction,
self._legends, self.attributes)
# always contribute legends, let Chart sort it out
chart.add_legend(self._legends)
chart.add_labels('x', self.xlabel)
chart.add_labels('y', self.ylabel)
chart.add_scales('x', self.xscale)
chart.add_scales('y', self.yscale)
if self.tooltips is not None:
tooltips = build_hover_tooltips(hover_spec=self.tooltips,
chart_cols=self.attribute_columns)
chart.add_tooltips(tooltips)
return chart
@classmethod
def generate_help(cls):
help_str = ''
for comp_glyph in cls.comp_glyph_types:
help_str += str(comp_glyph.glyph_properties())
return help_str
@staticmethod
def _sort_legend(legend_sort_field, legend_sort_direction, legends, attributes):
"""Sort legends sorted by looping though sort_legend items (
see :attr:`Builder.sort_legend` for more details)
"""
if legend_sort_field:
if len(attributes[legend_sort_field].columns) > 0:
# TODO(fpliger): attributes should be consistent and not
# need any type checking but for
# the moment it is not, specially when going
# though a process like binning or when data
# is built for HeatMap, Scatter, etc...
item_order = [x[0] if isinstance(x, tuple) else x
for x in attributes[legend_sort_field].items]
item_order = [str(x) if not isinstance(x, string_types)
else x for x in item_order]
def foo(leg):
return item_order.index(leg[0])
reverse = legend_sort_direction == 'descending'
return list(sorted(legends, key=foo, reverse=reverse))
return legends
@property
def sort_legend(self):
warnings.warn("Chart property `sort_legend` was deprecated in 0.12 \
and will be removed in the future. Use `legend_sort_field` and \
`legend_sort_direction` instead.")
return [(self.legend_sort_field, self.legend_sort_direction)]
@sort_legend.setter
def sort_legend(self, value):
warnings.warn("Chart property 'sort_legend' was deprecated in 0.12 \
and will be removed in the future. Use `legend_sort_field` and \
`legend_sort_direction` instead.")
self.legend_sort_field, direction = value[0]
if direction:
self.legend_sort_direction = "ascending"
else:
self.legend_sort_direction = "descending"
class XYBuilder(Builder):
"""Implements common functionality for XY Builders."""
x = Dimension('x')
y = Dimension('y')
dimensions = ['x', 'y']
req_dimensions = [['x'],
['y'],
['x', 'y']]
default_attributes = {'color': ColorAttr()}
def set_ranges(self):
"""Calculate and set the x and y ranges."""
# ToDo: handle when only single dimension is provided
extents = self.get_dim_extents()
endx = extents['x_max']
startx = extents['x_min']
self.x_range = self._get_range('x', startx, endx)
endy = extents['y_max']
starty = extents['y_min']
self.y_range = self._get_range('y', starty, endy)
if self.xlabel is None:
if self.x.selection is not None:
select = self.x.selection
if not isinstance(select, list):
select = [select]
else:
select = ['']
self.xlabel = ', '.join(select)
if self.ylabel is None:
if self.y.selection is not None:
select = self.y.selection
if not isinstance(select, list):
select = [select]
else:
select = ['']
self.ylabel = ', '.join(select)
def _get_range(self, dim, start, end):
"""Create a :class:`Range` for the :class:`Chart`.
Args:
dim (str): the name of the dimension, which is an attribute of the builder
start: the starting value of the range
end: the ending value of the range
Returns:
:class:`Range`
"""
dim_ref = getattr(self, dim)
values = dim_ref.data
dtype = dim_ref.dtype
sort = self.sort_dim.get(dim)
# object data or single value
if dtype.name == 'object':
factors = values.drop_duplicates()
if sort:
# TODO (fpliger): this handles pandas API change so users do not experience
# the related annoying deprecation warning. This is probably worth
# removing when pandas deprecated version (0.16) is "old" enough
try:
factors.sort_values(inplace=True)
except AttributeError:
factors.sort(inplace=True)
setattr(self, dim + 'scale', 'categorical')
return FactorRange(factors=factors.tolist())
elif np.issubdtype(dtype, np.datetime64):
setattr(self, dim + 'scale', 'datetime')
return Range1d(start=start, end=end)
else:
if end == 'None' or (end - start) == 0:
setattr(self, dim + 'scale', 'categorical')
return FactorRange(factors=['None'])
else:
diff = end - start
setattr(self, dim + 'scale', 'linear')
return Range1d(start=start - 0.1 * diff, end=end + 0.1 * diff)
class AggregateBuilder(Builder):
"""A base class for deriving specific builders performing aggregation with stats.
The typical AggregateBuilder takes a single dimension of values.
"""
values = Dimension('values')
default_attributes = {'label': CatAttr(),
'color': ColorAttr()}
|
{
"content_hash": "1de8f036b10d60b15d4daed7ae7fe150",
"timestamp": "",
"source": "github",
"line_count": 716,
"max_line_length": 124,
"avg_line_length": 38.279329608938546,
"alnum_prop": 0.6007005253940455,
"repo_name": "ptitjano/bokeh",
"id": "4bb1a7ee5a4a37d12b5eba1a25d51d6dc11e26aa",
"size": "27408",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bokeh/charts/builder.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1710"
},
{
"name": "CSS",
"bytes": "406989"
},
{
"name": "CoffeeScript",
"bytes": "1073573"
},
{
"name": "HTML",
"bytes": "45510"
},
{
"name": "JavaScript",
"bytes": "12173"
},
{
"name": "Jupyter Notebook",
"bytes": "3981"
},
{
"name": "Makefile",
"bytes": "1161"
},
{
"name": "Python",
"bytes": "2083050"
},
{
"name": "Shell",
"bytes": "15584"
},
{
"name": "TypeScript",
"bytes": "25843"
}
],
"symlink_target": ""
}
|
from telemetry.page import shared_page_state
from page_sets.rendering import rendering_story
from page_sets.rendering import story_tags
from page_sets.system_health import platforms
from page_sets.login_helpers import linkedin_login
class ToughPinchZoomMobilePage(rendering_story.RenderingStory):
ABSTRACT_STORY = True
SUPPORTED_PLATFORMS = platforms.MOBILE_ONLY
TAGS = [story_tags.TOUGH_PINCH_ZOOM_MOBILE]
def __init__(self,
page_set,
name_suffix='',
extra_browser_args=None,
shared_page_state_class=shared_page_state.SharedMobilePageState):
super(ToughPinchZoomMobilePage, self).__init__(
page_set=page_set,
name_suffix=name_suffix,
extra_browser_args=extra_browser_args,
shared_page_state_class=shared_page_state_class)
def RunPinchGesture(self, action_runner, left_anchor_ratio=0.5,
top_anchor_ratio=0.5, scale_factor=None,
speed_in_pixels_per_second=800):
with action_runner.CreateGestureInteraction('PinchAction', repeatable=True):
action_runner.PinchPage(
left_anchor_ratio=left_anchor_ratio,
top_anchor_ratio=top_anchor_ratio,
scale_factor=scale_factor,
speed_in_pixels_per_second=speed_in_pixels_per_second)
def RunPageInteractions(self, action_runner):
action_runner.tab.WaitForDocumentReadyStateToBeInteractiveOrBetter()
for _ in range(3):
current_scale_factor = 7.0
self.RunPinchGesture(action_runner, scale_factor=current_scale_factor)
while current_scale_factor > 1.0:
current_scale_factor *= 1/2.0
self.RunPinchGesture(action_runner, scale_factor=1/2.0)
class GoogleSearchPinchZoomMobile2018Page(ToughPinchZoomMobilePage):
""" Why: top google property; a google tab is often open. """
BASE_NAME = 'google_search_mobile_pinch'
YEAR = '2018'
URL = 'https://www.google.com/#hl=en&q=barack+obama'
class LinkedinPinchZoomMobile2018Page(ToughPinchZoomMobilePage):
""" Why: #12 (Alexa global), Public profile."""
BASE_NAME = 'linkedin_mobile_pinch'
YEAR = '2018'
URL = 'http://www.linkedin.com/in/linustorvalds'
# Linkedin has expensive shader compilation so it can benefit from shader
# cache from reload.
def RunNavigateSteps(self, action_runner):
linkedin_login.LoginMobileAccount(action_runner, 'linkedin')
super(LinkedinPinchZoomMobile2018Page,
self).RunNavigateSteps(action_runner)
action_runner.WaitForJavaScriptCondition(
'document.getElementById("profile-wrapper") !== null')
class AccuWeatherPinchZoomMobile2018Page(ToughPinchZoomMobilePage):
""" Why: #2 weather according to Alexa."""
BASE_NAME = 'accu_weather_mobile_pinch'
YEAR = '2018'
URL = 'https://www.accuweather.com/en/us/new-york-ny/10017/weather-forecast/349727'
class TwitchPinchZoomMobile2018Page(ToughPinchZoomMobilePage):
""" Why: #1 games according to Alexa."""
BASE_NAME = 'twitch_mobile_pinch'
YEAR = '2018'
URL = 'https://www.twitch.tv/?no-mobile-redirect=true'
class CnnPinchZoomMobile2018Page(ToughPinchZoomMobilePage):
""" Why: #2 news worldwide."""
BASE_NAME = 'cnn_mobile_pinch'
YEAR = '2018'
URL = 'http://www.cnn.com/travel/article/airbus-a330-900-neo-tours-us-airports/index.html'
class EBayPinchZoomMobile2018Page(ToughPinchZoomMobilePage):
""" Why: #1 commerce website by time spent by users in US."""
BASE_NAME = 'ebay_mobile_pinch'
YEAR = '2018'
URL = 'http://www.ebay.com'
|
{
"content_hash": "b7cddf362fad5dcc0e845dce952def2b",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 92,
"avg_line_length": 37.83870967741935,
"alnum_prop": 0.7101449275362319,
"repo_name": "scheib/chromium",
"id": "0c6b25aeff3359d3bb74c6b1e2cf872abedb7ee9",
"size": "3681",
"binary": false,
"copies": "5",
"ref": "refs/heads/main",
"path": "tools/perf/page_sets/rendering/tough_pinch_zoom_mobile_cases.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import itertools, operator
from .data import *
from .manager import *
import PIL.Image, PIL.ImageMath, PIL.ImageStat, PIL.ImageChops
##def position_raster(raster, width, height, coordspace_bbox):
## # GET COORDS OF ALL 4 VIEW SCREEN CORNERS
## xleft,ytop,xright,ybottom = coordspace_bbox
## viewcorners = [(xleft,ytop), (xleft,ybottom), (xright,ybottom), (xright,ytop)]
##
## # FIND PIXEL LOCS OF ALL THESE COORDS ON THE RASTER
## viewcorners_pixels = [raster.geo_to_cell(*point, fraction=True) for point in viewcorners]
## print viewcorners_pixels
## print "---"
##
## # ON RASTER, PERFORM QUAD TRANSFORM
## #(FROM VIEW SCREEN COORD CORNERS IN PIXELS TO RASTER COORD CORNERS IN PIXELS)
## flattened = [xory for point in viewcorners_pixels for xory in point]
## newraster = raster.copy()
## for grid in newraster.grids:
## grid.img = grid.img.transform((width,height), PIL.Image.QUAD,
## flattened, resample=PIL.Image.NEAREST)
## grid.cells = grid.img.load()
##
## return newraster
##
##def align_rasters(*rasters):
## for rast in rasters: print rast.bbox
## # resample to same dimensions of first raster (arbitrary)
## #rasters = [resample(rast, width=rasters[0].width, height=rasters[0].height)
## # for rast in rasters]
##
## # get coord bbox containing all rasters
## print rasters
## for rast in rasters: print rast.bbox
## xlefts,ytops,xrights,ybottoms = zip(*[rast.bbox for rast in rasters])
## if xlefts[0] < xrights[0]:
## xleft,xright = min(xlefts),max(xrights)
## else: xleft,xright = max(xlefts),min(xrights)
## if ytops[0] > ybottoms[0]:
## ytop,ybottom = max(ytops),min(ybottoms)
## else: ytop,ybottom = min(ytops),max(ybottoms)
##
## # get the required pixel dimensions (based on first raster, arbitrary)
## xs,ys = (xleft,xright),(ytop,ybottom)
## coordwidth,coordheight = max(xs)-min(xs), max(ys)-min(ys)
## rast = rasters[0]
## orig_xs,orig_ys = (rast.bbox[0],rast.bbox[2]),(rast.bbox[1],rast.bbox[3])
## orig_coordwidth,orig_coordheight = max(orig_xs)-min(orig_xs), max(orig_ys)-min(orig_ys)
## widthratio,heightratio = coordwidth/orig_coordwidth, coordheight/orig_coordheight
## reqwidth = int(round(rast.width*widthratio))
## reqheight = int(round(rast.height*heightratio))
##
## # position into same coordbbox
## aligned = []
## for rast in rasters:
## #rast.grids[0].img.save("C:/Users/kimo/Desktop/realpos.png")
## coordbbox = [xleft,ytop,xright,ybottom]
## print coordbbox
## positioned = position_raster(rast, reqwidth, reqheight, coordbbox)
## aligned.append(positioned)
## return aligned
# Zonal aggregation
def zonal_statistics(zonaldata, valuedata, zonalband=0, valueband=0, outstat="mean"):
#if isinstance(zonaldata, GeoTable):
#rasterize
#if isinstance(valuedata, GeoTable):
#rasterize
# get nullvalues
nullzone = zonaldata.info.get("nodata_value")
# position value grid into zonal grid
#(zonaldata,zonalmask),(valuedata,valuemask) = align_rasters(zonaldata, valuedata)
(valuedata,valuemask) = valuedata.positioned(zonaldata.width, zonaldata.height,
zonaldata.bbox)
# pick one image band for each
zonalimg = zonaldata.grids[zonalband].img
#zonalimg.save(r"C:\Users\kimo\Desktop\zones.png")
valueimg = valuedata.grids[valueband].img
#valueimg.save(r"C:\Users\kimo\Desktop\values879.png")
# create output image, using nullzone as nullvalue
outimg = PIL.Image.new("F", zonalimg.size, nullzone)
print 1234,zonalimg,outimg
# get stats for each unique value in zonal data
zonevalues = [val for count,val in zonalimg.getcolors()]
zonesdict = {}
for zoneval in zonevalues:
# exclude nullzone
if zoneval == nullzone: continue
# simple
## zonemask = PIL.Image.eval(zonalimg, lambda px: 255 if px == zoneval else 0)
## stats = PIL.ImageStat.Stat(valueimg, zonemask)
## zonemask.save("C:/Users/kimo/Desktop/zonemasks/zoneval%i.png"%zoneval)
# mask only the current zone
zonemask = zonalimg.point(lambda px: 1 if px == zoneval else 0, "1")
fullmask = PIL.Image.new("1", zonemask.size, 0)
# also exclude null values from calculations
fullmask.paste(zonemask, valuemask)
#fullmask.save("C:/Users/kimo/Desktop/zonemasks/zoneval%i.png"%zoneval)
# retrieve stats
stats = PIL.ImageStat.Stat(valueimg, fullmask)
statsdict = {}
statsdict["min"],statsdict["max"] = stats.extrema[0]
for stattype in ("count","sum","mean","median","var","stddev"):
try: statsdict[stattype] = stats.__getattr__(stattype)[0]
except ZeroDivisionError: statsdict[stattype] = None
zonesdict[zoneval] = statsdict
# write chosen stat to outimg
print outstat,statsdict[outstat]
outimg.paste(statsdict[outstat], (0,0), zonemask)
# make outimg to raster
print 5678,outimg
outraster = Raster(image=outimg, **zonaldata.info)
return zonesdict, outraster
# Raster math
def math(mathexpr, rasters):
print rasters
rasters = align_rasters(*rasters)
# convert all nullvalues to zero before doing any math
for rast,mask in rasters:
nodata = rast.info.get("nodata_value")
for grid in rast:
if nodata != None:
grid.img = PIL.Image.eval(grid.img, lambda px: 0 if px == nodata else px)
# calculate math
# basic math + - * / ** %
# note: logical ops ~ & | ^ makes binary mask and return the pixel value where mask is valid
# note: relational ops < > == != return only binary mask
# note: other useful is min() and max(), equiv to (r1 < r2) | r2
rastersdict = dict([("raster%i"%(i+1),rast.grids[0].img)#.convert("F"))
for i,(rast,mask) in enumerate(rasters)])
print [img.mode for img in rastersdict.values()]
#img = PIL.ImageChops.logical_xor(*rastersdict.values()[:2])
img = PIL.ImageMath.eval(mathexpr, **rastersdict)
# should maybe create a combined mask of nullvalues for all rasters
# and filter away those nullcells from math result
# ...
# return result
print img.mode
firstrast,firstmask = rasters[0]
outraster = Raster(image=img, **firstrast.info)
return outraster
##def logical(logicexpr, *rasters):
## # use PIL.ImageChops: invert(ie ~),difference(ie positive a-b),logical_and(ie &),logical_or(ie |),logical_xor(ie ^)
## # instead of lighter(ie >),darker(ie <), just use "min()" and "max()" inside ImageMath
## # custom approach required for == != >= <= (or maybe just use "equal()" or "notequal()" inside ImageMath)
## pass
def focal_statistics():
# set value based on focal neighbourhood cell values
# eg 3x3 or 5x5 cells
# PIL.ImageFilter: min,max,mode(ie majority),median,rank
# manual calculation required for sum,minority,range,std,variety
pass
|
{
"content_hash": "ddb1bff9c3584ba19cfb20f0004cb280",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 121,
"avg_line_length": 39.10382513661202,
"alnum_prop": 0.6446338736724427,
"repo_name": "karimbahgat/PythonGis",
"id": "4388713d59f1dab795cc19ec7c0d1bbcf793fc57",
"size": "7157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "(sandbox,tobemerged)/pythongis/raster/analyzer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "125"
},
{
"name": "HTML",
"bytes": "1979518"
},
{
"name": "Python",
"bytes": "1762972"
},
{
"name": "Tcl",
"bytes": "345753"
}
],
"symlink_target": ""
}
|
"""Widgets for Zinnia admin"""
import json
from itertools import chain
from django.contrib.admin import widgets
from django.contrib.staticfiles.storage import staticfiles_storage
from django.forms import Media
from django.utils.encoding import force_str
from django.utils.safestring import mark_safe
from tagging.models import Tag
from zinnia.models import Entry
class MPTTFilteredSelectMultiple(widgets.FilteredSelectMultiple):
"""
MPTT version of FilteredSelectMultiple.
"""
option_inherits_attrs = True
def __init__(self, verbose_name, is_stacked=False, attrs=None, choices=()):
"""
Initializes the widget directly not stacked.
"""
super(MPTTFilteredSelectMultiple, self).__init__(
verbose_name, is_stacked, attrs, choices)
def optgroups(self, name, value, attrs=None):
"""Return a list of optgroups for this widget."""
groups = []
has_selected = False
if attrs is None:
attrs = {}
for index, (option_value, option_label, sort_fields) in enumerate(
chain(self.choices)):
# Set tree attributes
attrs['data-tree-id'] = sort_fields[0]
attrs['data-left-value'] = sort_fields[1]
subgroup = []
subindex = None
choices = [(option_value, option_label)]
groups.append((None, subgroup, index))
for subvalue, sublabel in choices:
selected = (
force_str(subvalue) in value and
(has_selected is False or self.allow_multiple_selected)
)
if selected is True and has_selected is False:
has_selected = True
subgroup.append(self.create_option(
name, subvalue, sublabel, selected, index,
subindex=subindex, attrs=attrs,
))
return groups
@property
def media(self):
"""
MPTTFilteredSelectMultiple's Media.
"""
js = ['admin/js/core.js',
'zinnia/admin/mptt/js/mptt_m2m_selectbox.js',
'admin/js/SelectFilter2.js']
return Media(js=[staticfiles_storage.url(path) for path in js])
class TagAutoComplete(widgets.AdminTextInputWidget):
"""
Tag widget with autocompletion based on select2.
"""
def get_tags(self):
"""
Returns the list of tags to auto-complete.
"""
return [tag.name for tag in
Tag.objects.usage_for_model(Entry)]
def render(self, name, value, attrs=None, renderer=None):
"""
Render the default widget and initialize select2.
"""
output = [super(TagAutoComplete, self).render(name, value, attrs)]
output.append('<script type="text/javascript">')
output.append('(function($) {')
output.append(' $(document).ready(function() {')
output.append(' $("#id_%s").select2({' % name)
output.append(' width: "element",')
output.append(' maximumInputLength: 50,')
output.append(' tokenSeparators: [",", " "],')
output.append(' tags: %s' % json.dumps(self.get_tags()))
output.append(' });')
output.append(' });')
output.append('}(django.jQuery));')
output.append('</script>')
return mark_safe('\n'.join(output))
@property
def media(self):
"""
TagAutoComplete's Media.
"""
def static(path):
return staticfiles_storage.url(
'zinnia/admin/select2/%s' % path)
return Media(
css={'all': (static('css/select2.css'),)},
js=(static('js/select2.js'),)
)
class MiniTextarea(widgets.AdminTextareaWidget):
"""
Vertically shorter version of the admin textarea widget.
"""
rows = 2
def __init__(self, attrs=None):
super(MiniTextarea, self).__init__(
{'rows': self.rows})
|
{
"content_hash": "1e6caa3aef29aed4f4562f0f4b34b704",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 79,
"avg_line_length": 32.03174603174603,
"alnum_prop": 0.5698711595639246,
"repo_name": "Fantomas42/django-blog-zinnia",
"id": "25a0a0aaef90b480d43fd4e7a89e5900834a0204",
"size": "4036",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "zinnia/admin/widgets.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "24255"
},
{
"name": "HTML",
"bytes": "78415"
},
{
"name": "JavaScript",
"bytes": "87448"
},
{
"name": "Makefile",
"bytes": "1774"
},
{
"name": "Python",
"bytes": "543985"
}
],
"symlink_target": ""
}
|
from django.contrib.sitemaps import Sitemap
from django.contrib.sites.models import Site
from django.db.models import Q
from django.utils import translation
from cms.models import Title
from cms.utils.i18n import get_public_languages
def from_iterable(iterables):
"""
Backport of itertools.chain.from_iterable
"""
for it in iterables:
for element in it:
yield element
class CMSSitemap(Sitemap):
changefreq = "monthly"
priority = 0.5
def items(self):
#
# It is counter-productive to provide entries for:
# > Pages which redirect:
# - If the page redirects to another page on this site, the
# destination page will already be in the sitemap, and
# - If the page redirects externally, then it shouldn't be
# part of our sitemap anyway.
# > Pages which cannot be accessed by anonymous users (like
# search engines are).
#
# It is noted here: http://www.sitemaps.org/protocol.html that
# "locations" that differ from the place where the sitemap is found,
# are considered invalid. E.g., if your sitemap is located here:
#
# http://example.com/sub/sitemap.xml
#
# valid locations *must* be rooted at http://example.com/sub/...
#
# This rules any redirected locations out.
#
# If, for some reason, you require redirecting pages (Titles) to be
# included, simply create a new class inheriting from this one, and
# supply a new items() method which doesn't filter out the redirects.
site = Site.objects.get_current()
languages = get_public_languages(site_id=site.pk)
all_titles = Title.objects.public().filter(
Q(redirect='') | Q(redirect__isnull=True),
language__in=languages,
page__login_required=False,
page__site=Site.objects.get_current(),
).order_by('page__path')
return all_titles
def lastmod(self, title):
modification_dates = [title.page.changed_date, title.page.publication_date]
plugins_for_placeholder = lambda placeholder: placeholder.get_plugins()
plugins = from_iterable(map(plugins_for_placeholder, title.page.placeholders.all()))
plugin_modification_dates = map(lambda plugin: plugin.changed_date, plugins)
modification_dates.extend(plugin_modification_dates)
return max(modification_dates)
def location(self, title):
translation.activate(title.language)
url = title.page.get_absolute_url(title.language)
translation.deactivate()
return url
|
{
"content_hash": "59b929228d8b8c2c0d899f5e4a4f08af",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 92,
"avg_line_length": 39.20289855072464,
"alnum_prop": 0.6395563770794824,
"repo_name": "FinalAngel/django-cms",
"id": "c0bf891f7b4cfdf141b8b400de46649fbdb76752",
"size": "2730",
"binary": false,
"copies": "1",
"ref": "refs/heads/release/3.4.x",
"path": "cms/sitemaps/cms_sitemap.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "143612"
},
{
"name": "HTML",
"bytes": "188625"
},
{
"name": "JavaScript",
"bytes": "1292263"
},
{
"name": "Python",
"bytes": "2302372"
},
{
"name": "Shell",
"bytes": "447"
}
],
"symlink_target": ""
}
|
"""
WSGI config for InfragramOnline project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "InfragramOnline.settings")
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
|
{
"content_hash": "a39b9dcd7ddab0783e94eed90d5ffac2",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 78,
"avg_line_length": 27.61111111111111,
"alnum_prop": 0.7987927565392354,
"repo_name": "smarani/InfraShareMobile",
"id": "749d09526c13514339ba15ce83875abb27b9cfe7",
"size": "497",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "InfraShare Online/InfragramOnline/wsgi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1234818"
},
{
"name": "Batchfile",
"bytes": "8390"
},
{
"name": "C",
"bytes": "4123807"
},
{
"name": "C++",
"bytes": "722333"
},
{
"name": "CMake",
"bytes": "44975"
},
{
"name": "CSS",
"bytes": "137033"
},
{
"name": "Groff",
"bytes": "35831"
},
{
"name": "HTML",
"bytes": "701103"
},
{
"name": "Java",
"bytes": "1876195"
},
{
"name": "JavaScript",
"bytes": "299770"
},
{
"name": "M4",
"bytes": "33806"
},
{
"name": "Makefile",
"bytes": "2462161"
},
{
"name": "Python",
"bytes": "1059005"
},
{
"name": "Shell",
"bytes": "279429"
}
],
"symlink_target": ""
}
|
import os
import sys
from termcolor import colored
GREP_COMMAND_FORMAT = 'xargs -n {0} {1} < {2}'
DEFAULT_GREP_COMMAND = 'grep --color=always \'{0}\''
def grep_files(args, all_logs):
if args.grep:
greplist_filename = '{0}/.greplist'.format(args.dest)
create_greplist(args, all_logs, greplist_filename)
command = grep_command(args, all_logs, greplist_filename)
sys.stderr.write(colored('Running "{0}" this might take a minute'.format(command), 'blue') + '\n')
sys.stdout.write(os.popen(command).read() + '\n')
remove_greplist(greplist_filename)
sys.stderr.write(colored('Finished grep, exiting', 'green') + '\n')
def create_greplist(args, all_logs, greplist_filename):
greplist_file = open(greplist_filename, 'wb')
for log in all_logs:
greplist_file.write('{0}\n'.format(log))
greplist_file.close()
def remove_greplist(greplist_filename):
if os.path.isfile(greplist_filename):
os.remove(greplist_filename)
def grep_command(args, all_logs, greplist_filename):
if 'grep' in args.grep:
return GREP_COMMAND_FORMAT.format(len(all_logs), args.grep, greplist_filename)
else:
return GREP_COMMAND_FORMAT.format(len(all_logs), DEFAULT_GREP_COMMAND.format(args.grep), greplist_filename)
|
{
"content_hash": "6d675bafb7167e4725a06fd762a33c84",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 111,
"avg_line_length": 38.6875,
"alnum_prop": 0.7051696284329564,
"repo_name": "mjball/Singularity",
"id": "7f0356cc85e28d2169552e77d76acfda5f316aef",
"size": "1238",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/logfetch/grep.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "18295"
},
{
"name": "CoffeeScript",
"bytes": "129494"
},
{
"name": "Java",
"bytes": "1215902"
},
{
"name": "JavaScript",
"bytes": "44273"
},
{
"name": "Python",
"bytes": "20125"
},
{
"name": "Ruby",
"bytes": "3005"
},
{
"name": "Shell",
"bytes": "10172"
}
],
"symlink_target": ""
}
|
"""Word completion for GNU readline.
The completer completes keywords, built-ins and globals in a selectable
namespace (which defaults to __main__); when completing NAME.NAME..., it
evaluates (!) the expression up to the last dot and completes its attributes.
It's very cool to do "import sys" type "sys.", hit the completion key (twice),
and see the list of names defined by the sys module!
Tip: to use the tab key as the completion key, call
readline.parse_and_bind("tab: complete")
Notes:
- Exceptions raised by the completer function are *ignored* (and generally cause
the completion to fail). This is a feature -- since readline sets the tty
device in raw (or cbreak) mode, printing a traceback wouldn't work well
without some complicated hoopla to save, reset and restore the tty state.
- The evaluation of the NAME.NAME... form may cause arbitrary application
defined code to be executed if an object with a __getattr__ hook is found.
Since it is the responsibility of the application (or the user) to enable this
feature, I consider this an acceptable risk. More complicated expressions
(e.g. function calls or indexing operations) are *not* evaluated.
- When the original stdin is not a tty device, GNU readline is never
used, and this module (and the readline module) are silently inactive.
"""
import atexit
import builtins
import __main__
__all__ = ["Completer"]
class Completer:
def __init__(self, namespace = None):
"""Create a new completer for the command line.
Completer([namespace]) -> completer instance.
If unspecified, the default namespace where completions are performed
is __main__ (technically, __main__.__dict__). Namespaces should be
given as dictionaries.
Completer instances should be used as the completion mechanism of
readline via the set_completer() call:
readline.set_completer(Completer(my_namespace).complete)
"""
if namespace and not isinstance(namespace, dict):
raise TypeError('namespace must be a dictionary')
# Don't bind to namespace quite yet, but flag whether the user wants a
# specific namespace or to use __main__.__dict__. This will allow us
# to bind to __main__.__dict__ at completion time, not now.
if namespace is None:
self.use_main_ns = 1
else:
self.use_main_ns = 0
self.namespace = namespace
def complete(self, text, state):
"""Return the next possible completion for 'text'.
This is called successively with state == 0, 1, 2, ... until it
returns None. The completion should begin with 'text'.
"""
if self.use_main_ns:
self.namespace = __main__.__dict__
if not text.strip():
if state == 0:
return '\t'
else:
return None
if state == 0:
if "." in text:
self.matches = self.attr_matches(text)
else:
self.matches = self.global_matches(text)
try:
return self.matches[state]
except IndexError:
return None
def _callable_postfix(self, val, word):
if callable(val):
word = word + "("
return word
def global_matches(self, text):
"""Compute matches when text is a simple name.
Return a list of all keywords, built-in functions and names currently
defined in self.namespace that match.
"""
import keyword
matches = []
n = len(text)
for word in keyword.kwlist:
if word[:n] == text:
matches.append(word)
for nspace in [builtins.__dict__, self.namespace]:
for word, val in nspace.items():
if word[:n] == text and word != "__builtins__":
matches.append(self._callable_postfix(val, word))
return matches
def attr_matches(self, text):
"""Compute matches when text contains a dot.
Assuming the text is of the form NAME.NAME....[NAME], and is
evaluable in self.namespace, it will be evaluated and its attributes
(as revealed by dir()) are used as possible completions. (For class
instances, class members are also considered.)
WARNING: this can still invoke arbitrary C code, if an object
with a __getattr__ hook is evaluated.
"""
import re
m = re.match(r"(\w+(\.\w+)*)\.(\w*)", text)
if not m:
return []
expr, attr = m.group(1, 3)
try:
thisobject = eval(expr, self.namespace)
except Exception:
return []
# get the content of the object, except __builtins__
words = set(dir(thisobject))
words.discard("__builtins__")
if hasattr(thisobject, '__class__'):
words.add('__class__')
words.update(get_class_members(thisobject.__class__))
matches = []
n = len(attr)
for word in words:
if word[:n] == attr:
try:
val = getattr(thisobject, word)
except Exception:
continue # Exclude properties that are not set
word = self._callable_postfix(val, "%s.%s" % (expr, word))
matches.append(word)
matches.sort()
return matches
def get_class_members(klass):
ret = dir(klass)
if hasattr(klass,'__bases__'):
for base in klass.__bases__:
ret = ret + get_class_members(base)
return ret
try:
import readline
except ImportError:
pass
else:
readline.set_completer(Completer().complete)
# Release references early at shutdown (the readline module's
# contents are quasi-immortal, and the completer function holds a
# reference to globals).
atexit.register(lambda: readline.set_completer(None))
|
{
"content_hash": "82ad61d07edea1e20fc282dc3e20ccd3",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 80,
"avg_line_length": 34.35057471264368,
"alnum_prop": 0.6063242429312364,
"repo_name": "sharhar/USB-Thing",
"id": "be8aee0f7e904f336e01bb294cbeed9cef07071a",
"size": "5977",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "UpdaterFiles/Lib/python-3.5.1.amd64/Lib/rlcompleter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5015"
},
{
"name": "C",
"bytes": "436714"
},
{
"name": "C#",
"bytes": "8440"
},
{
"name": "C++",
"bytes": "100530"
},
{
"name": "CSS",
"bytes": "96"
},
{
"name": "F#",
"bytes": "2310"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "GLSL",
"bytes": "1040"
},
{
"name": "HTML",
"bytes": "41126"
},
{
"name": "Jupyter Notebook",
"bytes": "752587"
},
{
"name": "Makefile",
"bytes": "895"
},
{
"name": "Mask",
"bytes": "969"
},
{
"name": "PowerShell",
"bytes": "1372"
},
{
"name": "Python",
"bytes": "14041449"
},
{
"name": "Shell",
"bytes": "13559"
},
{
"name": "Tcl",
"bytes": "2173292"
}
],
"symlink_target": ""
}
|
import sys
import numpy as np
import pytest
import scipy as sp
from pygam import *
def test_LinearGAM_prediction(mcycle_X_y, mcycle_gam):
"""
check that we the predictions we get are correct shape
"""
X, y = mcycle_X_y
preds = mcycle_gam.predict(X)
assert(preds.shape == y.shape)
def test_LogisticGAM_accuracy(default_X_y):
"""
check that we can compute accuracy correctly
"""
X, y = default_X_y
gam = LogisticGAM().fit(X, y)
preds = gam.predict(X)
acc0 = (preds == y).mean()
acc1 = gam.accuracy(X, y)
assert(acc0 == acc1)
def test_PoissonGAM_exposure(coal_X_y):
"""
check that we can fit a Poisson GAM with exposure, and it scales predictions
"""
X, y = coal_X_y
gam = PoissonGAM().fit(X, y, exposure=np.ones_like(y))
assert((gam.predict(X, exposure=np.ones_like(y)*2) == 2 *gam.predict(X)).all())
def test_PoissonGAM_loglike(coal_X_y):
"""
check that our loglikelihood is scaled by exposure
predictions that are twice as large with twice the exposure
should have lower loglikelihood
"""
X, y = coal_X_y
exposure = np.ones_like(y)
gam_high_var = PoissonGAM().fit(X, y * 2, exposure=exposure * 2)
gam_low_var = PoissonGAM().fit(X, y, exposure=exposure)
assert gam_high_var.loglikelihood(X, y * 2, exposure * 2) < gam_low_var.loglikelihood(X, y, exposure)
def test_large_GAM(coal_X_y):
"""
check that we can fit a GAM in py3 when we have more than 50,000 samples
"""
X = np.linspace(0, 100, 100000)
y = X**2
gam = LinearGAM().fit(X, y)
assert(gam._is_fitted)
def test_summary(mcycle_X_y, mcycle_gam):
"""
check that we can get a summary if we've fitted the model, else not
"""
X, y = mcycle_X_y
gam = LinearGAM()
try:
gam.summary()
except AttributeError:
assert(True)
mcycle_gam.summary()
assert(True)
def test_more_splines_than_samples(mcycle_X_y):
"""
check that gridsearch returns the expected number of models
"""
X, y = mcycle_X_y
n = len(X)
gam = LinearGAM(s(0, n_splines=n+1)).fit(X, y)
assert(gam._is_fitted)
# TODO here is our bug:
# we cannot display the term-by-term effective DoF because we have fewer
# values than coefficients
assert len(gam.statistics_['edof_per_coef']) < len(gam.coef_)
gam.summary()
def test_deviance_residuals(mcycle_X_y, mcycle_gam):
"""
for linear GAMs, the deviance residuals should be equal to the y - y_pred
"""
X, y = mcycle_X_y
res = mcycle_gam.deviance_residuals(X, y)
err = y - mcycle_gam.predict(X)
assert((res == err).all())
def test_conf_intervals_return_array(mcycle_X_y, mcycle_gam):
"""
make sure that the confidence_intervals method returns an array
"""
X, y = mcycle_X_y
conf_ints = mcycle_gam.confidence_intervals(X)
assert(conf_ints.ndim == 2)
def test_conf_intervals_quantiles_width_interchangable(mcycle_X_y, mcycle_gam):
"""
getting confidence_intervals via width or specifying quantiles
should return the same result
"""
X, y = mcycle_X_y
conf_ints_a = mcycle_gam.confidence_intervals(X, width=.9)
conf_ints_b = mcycle_gam.confidence_intervals(X, quantiles=[.05, .95])
assert(np.allclose(conf_ints_a, conf_ints_b))
def test_conf_intervals_ordered(mcycle_X_y, mcycle_gam):
"""
comfidence intervals returned via width should be ordered
"""
X, y = mcycle_X_y
conf_ints = mcycle_gam.confidence_intervals(X)
assert((conf_ints[:,0] <= conf_ints[:,1]).all())
def test_summary_returns_12_lines(mcycle_gam):
"""
check that the summary method works and returns 24 lines like:
LinearGAM
=============================================== ==========================================================
Distribution: NormalDist Effective DoF: 11.2495
Link Function: IdentityLink Log Likelihood: -952.605
Number of Samples: 133 AIC: 1929.7091
AICc: 1932.4197
GCV: 605.6546
Scale: 514.2013
Pseudo R-Squared: 0.7969
==========================================================================================================
Feature Function Data Type Num Splines Spline Order Linear Fit Lambda P > x Sig. Code
================== ============== ============= ============= =========== ========== ========== ==========
feature 1 numerical 25 3 False 1.0 3.43e-03 **
intercept 6.85e-02 .
==========================================================================================================
Significance codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
WARNING: Fitting splines and a linear function to a feature introduces a model identifiability problem
which can cause p-values to appear significant when they are not.
WARNING: p-values calculated in this manner behave correctly for un-penalized models or models with
known smoothing parameters, but when smoothing parameters have been estimated, the p-values
are typically lower than they should be, meaning that the tests reject the null too readily.
"""
if sys.version_info.major == 2:
from StringIO import StringIO
if sys.version_info.major == 3:
from io import StringIO
stdout = sys.stdout #keep a handle on the real standard output
sys.stdout = StringIO() #Choose a file-like object to write to
mcycle_gam.summary()
assert(len(sys.stdout.getvalue().split('\n')) == 24)
def test_is_fitted_predict(mcycle_X_y):
"""
test predict requires fitted model
"""
X, y = mcycle_X_y
gam = LinearGAM()
with pytest.raises(AttributeError):
gam.predict(X)
def test_is_fitted_predict_mu(mcycle_X_y):
"""
test predict_mu requires fitted model
"""
X, y = mcycle_X_y
gam = LinearGAM()
with pytest.raises(AttributeError):
gam.predict_mu(X)
def test_is_fitted_dev_resid(mcycle_X_y):
"""
test deviance_residuals requires fitted model
"""
X, y = mcycle_X_y
gam = LinearGAM()
with pytest.raises(AttributeError):
gam.deviance_residuals(X, y)
def test_is_fitted_conf_intervals(mcycle_X_y):
"""
test confidence_intervals requires fitted model
"""
X, y = mcycle_X_y
gam = LinearGAM()
with pytest.raises(AttributeError):
gam.confidence_intervals(X)
def test_is_fitted_pdep(mcycle_X_y):
"""
test partial_dependence requires fitted model
"""
gam = LinearGAM()
with pytest.raises(AttributeError):
gam.partial_dependence(term=0)
def test_is_fitted_summary(mcycle_X_y):
"""
test summary requires fitted model
"""
X, y = mcycle_X_y
gam = LinearGAM()
with pytest.raises(AttributeError):
gam.summary()
def test_set_params_with_external_param():
"""
test set_params sets a real parameter
"""
gam = GAM(lam=1)
gam.set_params(lam=420)
assert(gam.lam == 420)
def test_set_params_with_phony_param():
"""
test set_params should not set any phony param
"""
gam = GAM()
gam.set_params(cat=420)
assert(not hasattr(gam, 'cat'))
def test_set_params_with_phony_param_force():
"""
test set_params can set phony params if we use the force=True
"""
gam = GAM()
assert(not hasattr(gam, 'cat'))
gam.set_params(cat=420, force=True)
assert(gam.cat == 420)
def test_get_params():
"""
test gam gets our params
"""
gam = GAM(lam=420)
params = gam.get_params()
assert(params['lam'] == 420)
class TestSamplingFromPosterior(object):
def test_drawing_samples_from_unfitted_model(self, mcycle_X_y, mcycle_gam):
X, y = mcycle_X_y
gam = LinearGAM()
with pytest.raises(AttributeError):
gam.sample(X, y)
with pytest.raises(AttributeError):
gam._sample_coef(X, y)
with pytest.raises(AttributeError):
gam._bootstrap_samples_of_smoothing(X, y)
assert mcycle_gam._is_fitted
mcycle_gam.sample(X, y, n_draws=2)
mcycle_gam._sample_coef(X, y, n_draws=2)
mcycle_gam._bootstrap_samples_of_smoothing(X, y, n_bootstraps=1)
assert True
def test_sample_quantity(self, mcycle_X_y, mcycle_gam):
X, y = mcycle_X_y
for quantity in ['coefficients', 'response']:
with pytest.raises(ValueError):
mcycle_gam.sample(X, y, quantity=quantity, n_draws=2)
for quantity in ['coef', 'mu', 'y']:
mcycle_gam.sample(X, y, quantity=quantity, n_draws=2)
assert True
def test_shape_of_random_samples(self, mcycle_X_y, mcycle_gam):
X, y = mcycle_X_y
n_samples = len(X)
n_draws = 5
sample_coef = mcycle_gam.sample(X, y, quantity='coef', n_draws=n_draws)
sample_mu = mcycle_gam.sample(X, y, quantity='mu', n_draws=n_draws)
sample_y = mcycle_gam.sample(X, y, quantity='y', n_draws=n_draws)
assert sample_coef.shape == (n_draws, len(mcycle_gam.coef_))
assert sample_mu.shape == (n_draws, n_samples)
assert sample_y.shape == (n_draws, n_samples)
n_samples_in_grid = 500
idxs = np.random.choice(np.arange(len(X)), n_samples_in_grid)
XX = X[idxs]
sample_coef = mcycle_gam.sample(X, y, quantity='coef', n_draws=n_draws,
sample_at_X=XX)
sample_mu = mcycle_gam.sample(X, y, quantity='mu', n_draws=n_draws,
sample_at_X=XX)
sample_y = mcycle_gam.sample(X, y, quantity='y', n_draws=n_draws,
sample_at_X=XX)
assert sample_coef.shape == (n_draws, len(mcycle_gam.coef_))
assert sample_mu.shape == (n_draws, n_samples_in_grid)
assert sample_y.shape == (n_draws, n_samples_in_grid)
def test_shape_bootstrap_samples_of_smoothing(self, mcycle_X_y, mcycle_gam):
X, y = mcycle_X_y
for n_bootstraps in [1, 2]:
coef_bootstraps, cov_bootstraps = (
mcycle_gam._bootstrap_samples_of_smoothing(
X, y, n_bootstraps=n_bootstraps))
assert len(coef_bootstraps) == len(cov_bootstraps) == n_bootstraps
for coef, cov in zip(coef_bootstraps, cov_bootstraps):
assert coef.shape == mcycle_gam.coef_.shape
assert cov.shape == mcycle_gam.statistics_['cov'].shape
for n_draws in [1, 2]:
coef_draws = mcycle_gam._simulate_coef_from_bootstraps(
n_draws, coef_bootstraps, cov_bootstraps)
assert coef_draws.shape == (n_draws, len(mcycle_gam.coef_))
def test_bad_sample_params(self, mcycle_X_y, mcycle_gam):
X, y = mcycle_X_y
with pytest.raises(ValueError):
mcycle_gam.sample(X, y, n_draws=0)
with pytest.raises(ValueError):
mcycle_gam.sample(X, y, n_bootstraps=0)
def test_prediction_interval_unknown_scale():
"""
the prediction intervals should be correct to a few decimal places
we test at a large sample limit, where the t distribution becomes normal
"""
n = 1000000
X = np.linspace(0,1,n)
y = np.random.randn(n)
gam_a = LinearGAM(terms=l(0)).fit(X, y)
gam_b = LinearGAM(s(0, n_splines=4)).fit(X, y)
XX = gam_a.generate_X_grid(term=0)
intervals_a = gam_a.prediction_intervals(XX, quantiles=[0.1, .9]).mean(axis=0)
intervals_b = gam_b.prediction_intervals(XX, quantiles=[0.1, .9]).mean(axis=0)
assert np.allclose(intervals_a[0], sp.stats.norm.ppf(0.1), atol=0.01)
assert np.allclose(intervals_a[1], sp.stats.norm.ppf(0.9), atol=0.01)
assert np.allclose(intervals_b[0], sp.stats.norm.ppf(0.1), atol=0.01)
assert np.allclose(intervals_b[1], sp.stats.norm.ppf(0.9), atol=0.01)
def test_prediction_interval_known_scale():
"""
the prediction intervals should be correct to a few decimal places
we test at a large sample limit.
"""
n = 1000000
X = np.linspace(0,1,n)
y = np.random.randn(n)
gam_a = LinearGAM(terms=l(0), scale=1.).fit(X, y)
gam_b = LinearGAM(s(0, n_splines=4), scale=1.).fit(X, y)
XX = gam_a.generate_X_grid(term=0)
intervals_a = gam_a.prediction_intervals(XX, quantiles=[0.1, .9]).mean(axis=0)
intervals_b = gam_b.prediction_intervals(XX, quantiles=[0.1, .9]).mean(axis=0)
assert np.allclose(intervals_a[0], sp.stats.norm.ppf(0.1), atol=0.01)
assert np.allclose(intervals_a[1], sp.stats.norm.ppf(0.9), atol=0.01)
assert np.allclose(intervals_b[0], sp.stats.norm.ppf(0.1), atol=0.01)
assert np.allclose(intervals_b[1], sp.stats.norm.ppf(0.9), atol=0.01)
def test_pvalue_rejects_useless_feature(wage_X_y):
"""
check that a p-value can reject a useless feature
"""
X, y = wage_X_y
# add empty feature
X = np.c_[X, np.arange(X.shape[0])]
gam = LinearGAM(s(0) + s(1) + f(2) + s(3)).fit(X, y)
# now do the test, with some safety
p_values = gam._estimate_p_values()
print(p_values)
assert(p_values[-2] > .5) # because -1 is intercept
def test_fit_quantile_is_close_enough(head_circumference_X_y):
"""see that we get close to the desired quantile
and check that repeating on an already fitted returns the same
"""
X, y = head_circumference_X_y
quantile = 0.99
tol = 1e-4
gam = ExpectileGAM().fit_quantile(X, y, quantile=quantile, max_iter=20, tol=tol)
ratio = gam._get_quantile_ratio(X, y)
assert np.abs(ratio - quantile) <= tol
# now check if we had to refit
gam2 = gam.fit_quantile(X, y, quantile=quantile, max_iter=20, tol=tol)
assert gam == gam2
def test_fit_quantile_NOT_close_enough(head_circumference_X_y):
"""see that we DO NOT get close to the desired quantile
"""
X, y = head_circumference_X_y
quantile = 0.99
tol = 1e-5
gam = ExpectileGAM().fit_quantile(X, y, quantile=quantile, max_iter=1, tol=tol)
ratio = gam._get_quantile_ratio(X, y)
assert np.abs(ratio - quantile) > tol
def test_fit_quantile_raises_ValueError(head_circumference_X_y):
"""see that we DO NOT get fit on bad argument requests
"""
X, y = head_circumference_X_y
with pytest.raises(ValueError):
ExpectileGAM().fit_quantile(X, y, quantile=0)
with pytest.raises(ValueError):
ExpectileGAM().fit_quantile(X, y, quantile=1)
with pytest.raises(ValueError):
ExpectileGAM().fit_quantile(X, y, quantile=-0.1)
with pytest.raises(ValueError):
ExpectileGAM().fit_quantile(X, y, quantile=1.1)
with pytest.raises(ValueError):
ExpectileGAM().fit_quantile(X, y, tol=0, quantile=0.5)
with pytest.raises(ValueError):
ExpectileGAM().fit_quantile(X, y, tol=-0.1, quantile=0.5)
with pytest.raises(ValueError):
ExpectileGAM().fit_quantile(X, y, max_iter=0, quantile=0.5)
with pytest.raises(ValueError):
ExpectileGAM().fit_quantile(X, y, max_iter=-1, quantile=0.5)
class TestRegressions(object):
def test_pvalue_invariant_to_scale(self, wage_X_y):
"""
regression test.
a bug made the F-statistic sensitive to scale changes, when it should be invariant.
check that a p-value should not change when we change the scale of the response
"""
X, y = wage_X_y
gamA = LinearGAM(s(0) + s(1) + f(2)).fit(X, y * 1000000)
gamB = LinearGAM(s(0) + s(1) + f(2)).fit(X, y)
assert np.allclose(gamA.statistics_['p_values'], gamB.statistics_['p_values'])
def test_2d_y_still_allow_fitting_in_PoissonGAM(self, coal_X_y):
"""
regression test.
there was a bug where we forgot to check the y_array before converting
exposure to weights.
"""
X, y = coal_X_y
two_d_data = np.ones_like(y).ravel()[:, None]
# 2d y should cause no problems now
gam = PoissonGAM().fit(X, y[:, None])
assert gam._is_fitted
# 2d weghts should cause no problems now
gam = PoissonGAM().fit(X, y, weights=two_d_data)
assert gam._is_fitted
# 2d exposure should cause no problems now
gam = PoissonGAM().fit(X, y, exposure=two_d_data)
assert gam._is_fitted
def test_non_int_exposure_produced_no_inf_in_PoissonGAM_ll(self, coal_X_y):
"""
regression test.
there was a bug where we forgot to round the rescaled counts before
computing the loglikelihood. since Poisson requires integer observations,
small numerical errors caused the pmf to return -inf, which shows up
in the loglikelihood computations, AIC, AICc..
"""
X, y = coal_X_y
rate = 1.2 + np.cos(np.linspace(0, 2. * np.pi, len(y)))
gam = PoissonGAM().fit(X, y, exposure=rate)
assert np.isfinite(gam.statistics_['loglikelihood'])
def test_initial_estimate_runs_for_int_obseravtions(self, toy_classification_X_y):
"""
regression test
._initial_estimate would fail when trying to add small numbers to
integer observations
casting the observations to float in that method fixes that
"""
X, y = toy_classification_X_y
gam = LogisticGAM().fit(X, y)
assert gam._is_fitted
def test_r_squared_for_new_dataset(self, mcycle_gam, mcycle_X_y):
"""
regression test
estimate r squared used to refer to a non-existant method when `mu=None`
"""
X, y = mcycle_X_y
mcycle_gam._estimate_r2(X, y)
def test_score_method(self, mcycle_gam, mcycle_X_y):
"""
regression test
score returns calculated r^2 for X data using trained gam
"""
X, y = mcycle_X_y
assert mcycle_gam.score(X, y) <= 1
|
{
"content_hash": "bb3641e1fa417d11778fdcaeda36adfe",
"timestamp": "",
"source": "github",
"line_count": 542,
"max_line_length": 110,
"avg_line_length": 34.190036900369,
"alnum_prop": 0.5841023150396633,
"repo_name": "dswah/pyGAM",
"id": "f74999d52b2382a3834e09180ab26767c330e0dd",
"size": "18556",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pygam/tests/test_GAM_methods.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "331596"
}
],
"symlink_target": ""
}
|
from django.test import TestCase
from unittest import mock
from .. import factories
from .. import models
from .. import utils
@mock.patch.object(utils, 'operate')
class UndoOperateTests(TestCase):
def setUp(self):
self.game = factories.GameFactory(cash=1000)
self.start_entry = models.LogEntry.objects.create(game=self.game,
text='New game')
self.game.log_cursor = self.start_entry
self.game.save()
self.alice, self.bob, self.dave = factories.PlayerFactory.create_batch(
game=self.game, size=3)
self.company, self.company2 = factories.CompanyFactory.create_batch(
game=self.game, size=2)
factories.PlayerShareFactory(owner=self.alice, company=self.company,
shares=3)
factories.PlayerShareFactory(owner=self.bob, company=self.company,
shares=2)
factories.CompanyShareFactory(owner=self.company2,
company=self.company, shares=1)
def create_entry(self, **kwargs):
entry = models.LogEntry.objects.create(game=self.game,
action=models.LogEntry.OPERATE, acting_company=self.company,
**kwargs)
self.game.log_cursor = entry
self.game.save()
def test_can_undo_company_operating_full(self, mock_operate):
self.create_entry(mode=models.LogEntry.FULL, amount=10)
utils.undo(self.game)
mock_operate.assert_called_once_with(company=self.company, amount=-10,
method=utils.OperateMethod.FULL)
def test_can_undo_company_operating_half(self, mock_operate):
self.create_entry(mode=models.LogEntry.HALF, amount=20)
utils.undo(self.game)
mock_operate.assert_called_once_with(company=self.company, amount=-20,
method=utils.OperateMethod.HALF)
def test_can_undo_company_withholding(self, mock_operate):
self.create_entry(mode=models.LogEntry.WITHHOLD, amount=30)
utils.undo(self.game)
mock_operate.assert_called_once_with(company=self.company, amount=-30,
method=utils.OperateMethod.WITHHOLD)
def test_undoing_company_operating_full_returns_affected(self, mock):
self.create_entry(mode=models.LogEntry.FULL, amount=40)
affected = utils.undo(self.game)
self.assertEqual(affected['game'], self.game)
self.assertCountEqual(affected['players'], [self.alice, self.bob])
self.assertEqual(list(affected['companies']), [self.company2])
def test_undoing_company_operating_half_returns_affected(self, mock):
self.create_entry(mode=models.LogEntry.HALF, amount=50)
affected = utils.undo(self.game)
self.assertEqual(affected['game'], self.game)
self.assertCountEqual(affected['players'], [self.alice, self.bob])
self.assertCountEqual(affected['companies'],
[self.company, self.company2])
def test_undoing_company_withholding_returns_affected(self, mock):
self.create_entry(mode=models.LogEntry.WITHHOLD, amount=60)
affected = utils.undo(self.game)
self.assertEqual(affected['game'], self.game)
self.assertNotIn('players', affected.keys())
self.assertEqual(list(affected['companies']), [self.company])
@mock.patch.object(utils, 'operate')
class RedoOperateTests(TestCase):
def setUp(self):
self.game = factories.GameFactory(cash=1000)
self.start_entry = models.LogEntry.objects.create(game=self.game,
text='New game')
self.game.log_cursor = self.start_entry
self.game.save()
self.alice, self.bob, self.dave = factories.PlayerFactory.create_batch(
game=self.game, size=3)
self.company, self.company2 = factories.CompanyFactory.create_batch(
game=self.game, size=2)
factories.PlayerShareFactory(owner=self.alice, company=self.company,
shares=3)
factories.PlayerShareFactory(owner=self.bob, company=self.company,
shares=2)
factories.CompanyShareFactory(owner=self.company2,
company=self.company, shares=1)
def create_entry(self, **kwargs):
models.LogEntry.objects.create(game=self.game,
action=models.LogEntry.OPERATE, acting_company=self.company,
**kwargs)
def test_can_redo_company_operating_full(self, mock_operate):
self.create_entry(mode=models.LogEntry.FULL, amount=10)
utils.redo(self.game)
mock_operate.assert_called_once_with(company=self.company, amount=10,
method=utils.OperateMethod.FULL)
def test_can_redo_company_operating_half(self, mock_operate):
self.create_entry(mode=models.LogEntry.HALF, amount=20)
utils.redo(self.game)
mock_operate.assert_called_once_with(company=self.company, amount=20,
method=utils.OperateMethod.HALF)
def test_can_redo_company_withholding(self, mock_operate):
self.create_entry(mode=models.LogEntry.WITHHOLD, amount=30)
utils.redo(self.game)
mock_operate.assert_called_once_with(company=self.company, amount=30,
method=utils.OperateMethod.WITHHOLD)
def test_redoing_company_operating_full_returns_affected(self, mock):
self.create_entry(mode=models.LogEntry.FULL, amount=40)
affected = utils.redo(self.game)
self.assertEqual(affected['game'], self.game)
self.assertCountEqual(affected['players'], [self.alice, self.bob])
self.assertEqual(list(affected['companies']), [self.company2])
def test_redoing_company_operating_half_returns_affected(self, mock):
self.create_entry(mode=models.LogEntry.HALF, amount=50)
affected = utils.redo(self.game)
self.assertEqual(affected['game'], self.game)
self.assertCountEqual(affected['players'], [self.alice, self.bob])
self.assertCountEqual(affected['companies'],
[self.company, self.company2])
def test_redoing_company_withholding_returns_affected(self, mock):
self.create_entry(mode=models.LogEntry.WITHHOLD, amount=60)
affected = utils.redo(self.game)
self.assertEqual(affected['game'], self.game)
self.assertNotIn('players', affected.keys())
self.assertEqual(list(affected['companies']), [self.company])
|
{
"content_hash": "98604dfc1c69b9d22280e09caf950b9f",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 79,
"avg_line_length": 45.839416058394164,
"alnum_prop": 0.6756369426751593,
"repo_name": "XeryusTC/18xx-accountant",
"id": "ec05e33be24f5fb392e36d06a6ee635808ec505d",
"size": "6304",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "accountant/core/tests/test_undo_operate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "56853"
},
{
"name": "HTML",
"bytes": "38371"
},
{
"name": "JavaScript",
"bytes": "1864"
},
{
"name": "Python",
"bytes": "595122"
},
{
"name": "Shell",
"bytes": "2580"
},
{
"name": "TypeScript",
"bytes": "163804"
}
],
"symlink_target": ""
}
|
"""empty message
Revision ID: c31f38f66b40
Revises:
Create Date: 2018-03-08 11:24:16.886091
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c31f38f66b40'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=80), nullable=False),
sa.Column('email', sa.String(length=80), nullable=False),
sa.Column('password', sa.Binary(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('first_name', sa.String(length=30), nullable=True),
sa.Column('last_name', sa.String(length=30), nullable=True),
sa.Column('has_image', sa.Boolean(), nullable=True),
sa.Column('active', sa.Boolean(), nullable=True),
sa.Column('is_admin', sa.Boolean(), nullable=True),
sa.Column('points', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('username')
)
op.create_table('actions',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=200), nullable=False),
sa.Column('description', sa.Text(), nullable=False),
sa.Column('image_url', sa.Text(), nullable=True),
sa.Column('initial_nb_days', sa.Integer(), nullable=True),
sa.Column('public', sa.Boolean(), nullable=True),
sa.Column('is_event', sa.Boolean(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('default_tag', sa.Text(), nullable=True),
sa.Column('creator_user_id', sa.Integer(), nullable=False),
sa.Column('nb_like', sa.BigInteger(), nullable=True),
sa.ForeignKeyConstraint(['creator_user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=80), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('tags',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.Text(), nullable=False),
sa.Column('parent_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('rank', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['parent_id'], ['tags.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('action_like',
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('action_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['action_id'], ['actions.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('user_id', 'action_id')
)
op.create_table('commentaries',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('content', sa.Text(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('action_id', sa.Integer(), nullable=False),
sa.Column('is_journal', sa.Boolean(), nullable=False),
sa.ForeignKeyConstraint(['action_id'], ['actions.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('resources',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('action_id', sa.Integer(), nullable=False),
sa.Column('nblike', sa.BigInteger(), nullable=True),
sa.Column('url', sa.Text(), nullable=True),
sa.Column('content', sa.Text(), nullable=True),
sa.ForeignKeyConstraint(['action_id'], ['actions.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('user_actions',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('action_id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('start_date', sa.DateTime(), nullable=False),
sa.Column('end_date', sa.DateTime(), nullable=False),
sa.Column('last_succeed', sa.DateTime(), nullable=True),
sa.Column('nb_succeed', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['action_id'], ['actions.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('resources_like',
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('resource_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['resource_id'], ['resources.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('user_id', 'resource_id')
)
op.create_table('user_action_tag_mapping',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_action_id', sa.Integer(), nullable=False),
sa.Column('tag_slug', sa.Text(), nullable=True),
sa.ForeignKeyConstraint(['user_action_id'], ['user_actions.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('user_action_tag_mapping')
op.drop_table('resources_like')
op.drop_table('user_actions')
op.drop_table('resources')
op.drop_table('commentaries')
op.drop_table('action_like')
op.drop_table('tags')
op.drop_table('roles')
op.drop_table('actions')
op.drop_table('users')
# ### end Alembic commands ###
|
{
"content_hash": "507d758b4977785fff79620c4a11b64e",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 71,
"avg_line_length": 41.60283687943262,
"alnum_prop": 0.6462666212069553,
"repo_name": "jerkos/hozons",
"id": "6a44f71a4daa46d1d9e30313c8861195a4a1fe4b",
"size": "5866",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "migrations/versions/c31f38f66b40_.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "51456"
},
{
"name": "HTML",
"bytes": "22916"
},
{
"name": "JavaScript",
"bytes": "123823"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "69026"
}
],
"symlink_target": ""
}
|
import datetime
import iso8601
from oslo_utils import timeutils
import six
import six.moves.urllib.parse as urlparse
from webob import exc
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import exception
from nova.i18n import _
from nova import objects
ALIAS = "os-simple-tenant-usage"
authorize = extensions.os_compute_authorizer(ALIAS)
def parse_strtime(dstr, fmt):
try:
return timeutils.parse_strtime(dstr, fmt)
except (TypeError, ValueError) as e:
raise exception.InvalidStrTime(reason=six.text_type(e))
class SimpleTenantUsageController(wsgi.Controller):
def _hours_for(self, instance, period_start, period_stop):
launched_at = instance.launched_at
terminated_at = instance.terminated_at
if terminated_at is not None:
if not isinstance(terminated_at, datetime.datetime):
# NOTE(mriedem): Instance object DateTime fields are
# timezone-aware so convert using isotime.
terminated_at = timeutils.parse_isotime(terminated_at)
if launched_at is not None:
if not isinstance(launched_at, datetime.datetime):
launched_at = timeutils.parse_isotime(launched_at)
if terminated_at and terminated_at < period_start:
return 0
# nothing if it started after the usage report ended
if launched_at and launched_at > period_stop:
return 0
if launched_at:
# if instance launched after period_started, don't charge for first
start = max(launched_at, period_start)
if terminated_at:
# if instance stopped before period_stop, don't charge after
stop = min(period_stop, terminated_at)
else:
# instance is still running, so charge them up to current time
stop = period_stop
dt = stop - start
seconds = (dt.days * 3600 * 24 + dt.seconds +
dt.microseconds / 100000.0)
return seconds / 3600.0
else:
# instance hasn't launched, so no charge
return 0
def _get_flavor(self, context, instance, flavors_cache):
"""Get flavor information from the instance object,
allowing a fallback to lookup by-id for deleted instances only.
"""
try:
return instance.get_flavor()
except exception.NotFound:
if not instance.deleted:
# Only support the fallback mechanism for deleted instances
# that would have been skipped by migration #153
raise
flavor_type = instance.instance_type_id
if flavor_type in flavors_cache:
return flavors_cache[flavor_type]
try:
flavor_ref = objects.Flavor.get_by_id(context, flavor_type)
flavors_cache[flavor_type] = flavor_ref
except exception.FlavorNotFound:
# can't bill if there is no flavor
flavor_ref = None
return flavor_ref
def _tenant_usages_for_period(self, context, period_start,
period_stop, tenant_id=None, detailed=True):
instances = objects.InstanceList.get_active_by_window_joined(
context, period_start, period_stop, tenant_id,
expected_attrs=['flavor'])
rval = {}
flavors = {}
for instance in instances:
info = {}
info['hours'] = self._hours_for(instance,
period_start,
period_stop)
flavor = self._get_flavor(context, instance, flavors)
if not flavor:
info['flavor'] = ''
else:
info['flavor'] = flavor.name
info['instance_id'] = instance.uuid
info['name'] = instance.display_name
info['memory_mb'] = instance.memory_mb
info['local_gb'] = instance.root_gb + instance.ephemeral_gb
info['vcpus'] = instance.vcpus
info['tenant_id'] = instance.project_id
# NOTE(mriedem): We need to normalize the start/end times back
# to timezone-naive so the response doesn't change after the
# conversion to objects.
info['started_at'] = timeutils.normalize_time(instance.launched_at)
info['ended_at'] = (
timeutils.normalize_time(instance.terminated_at) if
instance.terminated_at else None)
if info['ended_at']:
info['state'] = 'terminated'
else:
info['state'] = instance.vm_state
now = timeutils.utcnow()
if info['state'] == 'terminated':
delta = info['ended_at'] - info['started_at']
else:
delta = now - info['started_at']
info['uptime'] = delta.days * 24 * 3600 + delta.seconds
if info['tenant_id'] not in rval:
summary = {}
summary['tenant_id'] = info['tenant_id']
if detailed:
summary['server_usages'] = []
summary['total_local_gb_usage'] = 0
summary['total_vcpus_usage'] = 0
summary['total_memory_mb_usage'] = 0
summary['total_hours'] = 0
summary['start'] = timeutils.normalize_time(period_start)
summary['stop'] = timeutils.normalize_time(period_stop)
rval[info['tenant_id']] = summary
summary = rval[info['tenant_id']]
summary['total_local_gb_usage'] += info['local_gb'] * info['hours']
summary['total_vcpus_usage'] += info['vcpus'] * info['hours']
summary['total_memory_mb_usage'] += (info['memory_mb'] *
info['hours'])
summary['total_hours'] += info['hours']
if detailed:
summary['server_usages'].append(info)
return rval.values()
def _parse_datetime(self, dtstr):
if not dtstr:
value = timeutils.utcnow()
elif isinstance(dtstr, datetime.datetime):
value = dtstr
else:
for fmt in ["%Y-%m-%dT%H:%M:%S",
"%Y-%m-%dT%H:%M:%S.%f",
"%Y-%m-%d %H:%M:%S.%f"]:
try:
value = parse_strtime(dtstr, fmt)
break
except exception.InvalidStrTime:
pass
else:
msg = _("Datetime is in invalid format")
raise exception.InvalidStrTime(reason=msg)
# NOTE(mriedem): Instance object DateTime fields are timezone-aware
# so we have to force UTC timezone for comparing this datetime against
# instance object fields and still maintain backwards compatibility
# in the API.
if value.utcoffset() is None:
value = value.replace(tzinfo=iso8601.iso8601.Utc())
return value
def _get_datetime_range(self, req):
qs = req.environ.get('QUERY_STRING', '')
env = urlparse.parse_qs(qs)
# NOTE(lzyeval): env.get() always returns a list
period_start = self._parse_datetime(env.get('start', [None])[0])
period_stop = self._parse_datetime(env.get('end', [None])[0])
if not period_start < period_stop:
msg = _("Invalid start time. The start time cannot occur after "
"the end time.")
raise exc.HTTPBadRequest(explanation=msg)
detailed = env.get('detailed', ['0'])[0] == '1'
return (period_start, period_stop, detailed)
@extensions.expected_errors(400)
def index(self, req):
"""Retrieve tenant_usage for all tenants."""
context = req.environ['nova.context']
authorize(context, action='list')
try:
(period_start, period_stop, detailed) = self._get_datetime_range(
req)
except exception.InvalidStrTime as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
now = timeutils.parse_isotime(timeutils.utcnow().isoformat())
if period_stop > now:
period_stop = now
usages = self._tenant_usages_for_period(context,
period_start,
period_stop,
detailed=detailed)
return {'tenant_usages': usages}
@extensions.expected_errors(400)
def show(self, req, id):
"""Retrieve tenant_usage for a specified tenant."""
tenant_id = id
context = req.environ['nova.context']
authorize(context, action='show', target={'project_id': tenant_id})
try:
(period_start, period_stop, ignore) = self._get_datetime_range(
req)
except exception.InvalidStrTime as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
now = timeutils.parse_isotime(timeutils.utcnow().isoformat())
if period_stop > now:
period_stop = now
usage = self._tenant_usages_for_period(context,
period_start,
period_stop,
tenant_id=tenant_id,
detailed=True)
if len(usage):
usage = usage[0]
else:
usage = {}
return {'tenant_usage': usage}
class SimpleTenantUsage(extensions.V21APIExtensionBase):
"""Simple tenant usage extension."""
name = "SimpleTenantUsage"
alias = ALIAS
version = 1
def get_resources(self):
resources = []
res = extensions.ResourceExtension(ALIAS,
SimpleTenantUsageController())
resources.append(res)
return resources
def get_controller_extensions(self):
return []
|
{
"content_hash": "39b93284b77f20e5121b3443955adeda",
"timestamp": "",
"source": "github",
"line_count": 274,
"max_line_length": 79,
"avg_line_length": 37.25182481751825,
"alnum_prop": 0.543842461056138,
"repo_name": "apporc/nova",
"id": "e9a35ee28dfd1d4140061545cf05357c0ab6934c",
"size": "10843",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/api/openstack/compute/simple_tenant_usage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16560867"
},
{
"name": "Shell",
"bytes": "24210"
},
{
"name": "Smarty",
"bytes": "335237"
}
],
"symlink_target": ""
}
|
import numpy as np
import onnx
from onnx import AttributeProto, GraphProto, OperatorSetIdProto, TensorProto, helper, numpy_helper
hidden_size = 4
weight_dim_to_split = 16
X = helper.make_tensor_value_info("input", TensorProto.FLOAT, ["batch", "seqlen", hidden_size])
Y = helper.make_tensor_value_info("output", TensorProto.FLOAT, ["batch", "seqlen", hidden_size])
a_weight_np_vals = (0.01 * np.arange(hidden_size * weight_dim_to_split, dtype=np.float32)).reshape(
(weight_dim_to_split, hidden_size)
)
a_weight_initializer = numpy_helper.from_array(
a_weight_np_vals, "encoder.t5_stack.block.1.layer.1.DenseReluDense.wi.weight"
)
a_bias_np_vals = 0.01 * np.arange(weight_dim_to_split, dtype=np.float32) # weight_dim_to_split numbers in total
a_bias_initializer = numpy_helper.from_array(a_bias_np_vals, "encoder.t5_stack.block.1.layer.1.DenseReluDense.wi.bias")
dropout_np_vals = np.asarray([0.1], dtype=np.float32).reshape(())
dropout_initializer = numpy_helper.from_array(dropout_np_vals, "ratio")
dropout_mode_np_vals = np.array([False], dtype=np.bool).reshape(())
dropout_mode_initializer = numpy_helper.from_array(dropout_mode_np_vals, "mode")
b_weight_np_vals = (0.01 * np.arange(hidden_size * weight_dim_to_split, dtype=np.float32)).reshape(
(hidden_size, weight_dim_to_split)
)
b_weight_initializer = numpy_helper.from_array(
b_weight_np_vals, "encoder.t5_stack.block.1.layer.1.DenseReluDense.wo.weight"
)
b_bias_np_vals = 0.01 * np.arange(hidden_size, dtype=np.float32) # hidden_size numbers in total
b_bias_initializer = numpy_helper.from_array(b_bias_np_vals, "encoder.t5_stack.block.1.layer.1.DenseReluDense.wo.bias")
transpose1 = helper.make_node(
"Transpose",
[a_weight_initializer.name],
["transpose1"],
name="transpose1",
perm=[1, 0],
)
transpose2 = helper.make_node(
"Transpose",
[b_weight_initializer.name],
["transpose2"],
name="transpose2",
perm=[1, 0],
)
matmul = helper.make_node(
"MatMul", # node name
["input", "transpose1"], # inputs
["matmul"], # outputs
name="matmul",
)
biasgelu = helper.make_node(
"BiasGelu", # node name
["matmul", a_bias_initializer.name], # inputs
["biasgelu"], # outputs
name="biasgelu",
domain="com.microsoft",
)
dropout1 = helper.make_node(
"Dropout",
["biasgelu", dropout_initializer.name, dropout_mode_initializer.name],
["dropout1", "dropout1_mask"],
name="dropout1",
)
matmul2 = helper.make_node(
"MatMul", # node name
["dropout1", "transpose2"], # inputs
["matmul2"], # outputs
name="matmul2",
)
add = helper.make_node(
"Add", # node name
["matmul2", b_bias_initializer.name], # inputs
["add"], # outputs
name="add",
)
dropout2 = helper.make_node(
"Dropout",
["add", dropout_initializer.name, dropout_mode_initializer.name],
["dropout2", "dropout2_mask"],
name="dropout2",
)
identity = helper.make_node(
"Identity", # node name
["dropout2"], # inputs
["output"], # outputs
name="identity",
)
# Create the graph (GraphProto)
graph_def = helper.make_graph(
[
transpose1,
transpose2,
matmul,
biasgelu,
dropout1,
matmul2,
add,
dropout2,
identity,
],
"test-model",
[X],
[Y],
[
a_weight_initializer,
a_bias_initializer,
b_weight_initializer,
b_bias_initializer,
dropout_initializer,
dropout_mode_initializer,
],
)
opsets = []
onnxdomain = OperatorSetIdProto()
onnxdomain.version = 12
onnxdomain.domain = "" # The empty string ("") or absence of this field implies the operator set that is defined as part of the ONNX specification.
opsets.append(onnxdomain)
msdomain = OperatorSetIdProto()
msdomain.version = 1
msdomain.domain = "com.microsoft"
opsets.append(msdomain)
kwargs = {}
kwargs["opset_imports"] = opsets
# Create the model (ModelProto)
model_def = helper.make_model(graph_def, producer_name="onnx-example", **kwargs)
onnx.save(model_def, "bart_mlp_megatron_basic_test.onnx")
|
{
"content_hash": "f7b5b20f5bbfab6b1811ab789e9dbf07",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 148,
"avg_line_length": 28.430555555555557,
"alnum_prop": 0.6629213483146067,
"repo_name": "microsoft/onnxruntime",
"id": "8f8798750f56fe1861e406f9eb1a6e9d1311a26e",
"size": "4094",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "onnxruntime/test/testdata/transform/model_parallel/bart_mlp_megatron_basic_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "1763425"
},
{
"name": "Batchfile",
"bytes": "17040"
},
{
"name": "C",
"bytes": "955390"
},
{
"name": "C#",
"bytes": "2304597"
},
{
"name": "C++",
"bytes": "39435305"
},
{
"name": "CMake",
"bytes": "514764"
},
{
"name": "CSS",
"bytes": "138431"
},
{
"name": "Cuda",
"bytes": "1104338"
},
{
"name": "Dockerfile",
"bytes": "8089"
},
{
"name": "HLSL",
"bytes": "11234"
},
{
"name": "HTML",
"bytes": "5933"
},
{
"name": "Java",
"bytes": "418665"
},
{
"name": "JavaScript",
"bytes": "212575"
},
{
"name": "Jupyter Notebook",
"bytes": "218327"
},
{
"name": "Kotlin",
"bytes": "4653"
},
{
"name": "Liquid",
"bytes": "5457"
},
{
"name": "NASL",
"bytes": "2628"
},
{
"name": "Objective-C",
"bytes": "151027"
},
{
"name": "Objective-C++",
"bytes": "107084"
},
{
"name": "Pascal",
"bytes": "9597"
},
{
"name": "PowerShell",
"bytes": "16419"
},
{
"name": "Python",
"bytes": "5041661"
},
{
"name": "Roff",
"bytes": "27539"
},
{
"name": "Ruby",
"bytes": "3545"
},
{
"name": "Shell",
"bytes": "116513"
},
{
"name": "Swift",
"bytes": "115"
},
{
"name": "TypeScript",
"bytes": "973087"
}
],
"symlink_target": ""
}
|
"""Upgrader for Python scripts from 1.x TensorFlow to 2.0 TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import six
from tensorflow.tools.compatibility import ast_edits
from tensorflow.tools.compatibility import ipynb
from tensorflow.tools.compatibility import tf_upgrade_v2
from tensorflow.tools.compatibility import tf_upgrade_v2_safety
# Make straightforward changes to convert to 2.0. In harder cases,
# use compat.v1.
_DEFAULT_MODE = "DEFAULT"
# Convert to use compat.v1.
_SAFETY_MODE = "SAFETY"
# Whether to rename to compat.v2
_IMPORT_RENAME_DEFAULT = False
def process_file(in_filename, out_filename, upgrader):
"""Process a file of type `.py` or `.ipynb`."""
if six.ensure_str(in_filename).endswith(".py"):
files_processed, report_text, errors = \
upgrader.process_file(in_filename, out_filename)
elif six.ensure_str(in_filename).endswith(".ipynb"):
files_processed, report_text, errors = \
ipynb.process_file(in_filename, out_filename, upgrader)
else:
raise NotImplementedError(
"Currently converter only supports python or ipynb")
return files_processed, report_text, errors
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""Convert a TensorFlow Python file from 1.x to 2.0
Simple usage:
tf_upgrade_v2.py --infile foo.py --outfile bar.py
tf_upgrade_v2.py --infile foo.ipynb --outfile bar.ipynb
tf_upgrade_v2.py --intree ~/code/old --outtree ~/code/new
""")
parser.add_argument(
"--infile",
dest="input_file",
help="If converting a single file, the name of the file "
"to convert")
parser.add_argument(
"--outfile",
dest="output_file",
help="If converting a single file, the output filename.")
parser.add_argument(
"--intree",
dest="input_tree",
help="If converting a whole tree of files, the directory "
"to read from (relative or absolute).")
parser.add_argument(
"--outtree",
dest="output_tree",
help="If converting a whole tree of files, the output "
"directory (relative or absolute).")
parser.add_argument(
"--copyotherfiles",
dest="copy_other_files",
help=("If converting a whole tree of files, whether to "
"copy the other files."),
type=bool,
default=True)
parser.add_argument(
"--inplace",
dest="in_place",
help=("If converting a set of files, whether to "
"allow the conversion to be performed on the "
"input files."),
action="store_true")
parser.add_argument(
"--no_import_rename",
dest="no_import_rename",
help=("Not to rename import to compat.v2 explicitly."),
action="store_true")
parser.add_argument(
"--no_upgrade_compat_v1_import",
dest="no_upgrade_compat_v1_import",
help=("If specified, don't upgrade explicit imports of "
"`tensorflow.compat.v1 as tf` to the v2 apis. Otherwise, "
"explicit imports of the form `tensorflow.compat.v1 as tf` will "
"be upgraded."),
action="store_true")
parser.add_argument(
"--reportfile",
dest="report_filename",
help=("The name of the file where the report log is "
"stored."
"(default: %(default)s)"),
default="report.txt")
parser.add_argument(
"--mode",
dest="mode",
choices=[_DEFAULT_MODE, _SAFETY_MODE],
help=("Upgrade script mode. Supported modes:\n"
"%s: Perform only straightforward conversions to upgrade to "
"2.0. In more difficult cases, switch to use compat.v1.\n"
"%s: Keep 1.* code intact and import compat.v1 "
"module." %
(_DEFAULT_MODE, _SAFETY_MODE)),
default=_DEFAULT_MODE)
parser.add_argument(
"--print_all",
dest="print_all",
help="Print full log to stdout instead of just printing errors",
action="store_true")
args = parser.parse_args()
if args.mode == _SAFETY_MODE:
change_spec = tf_upgrade_v2_safety.TFAPIChangeSpec()
else:
if args.no_import_rename:
change_spec = tf_upgrade_v2.TFAPIChangeSpec(
import_rename=False,
upgrade_compat_v1_import=not args.no_upgrade_compat_v1_import)
else:
change_spec = tf_upgrade_v2.TFAPIChangeSpec(
import_rename=_IMPORT_RENAME_DEFAULT,
upgrade_compat_v1_import=not args.no_upgrade_compat_v1_import)
upgrade = ast_edits.ASTCodeUpgrader(change_spec)
report_text = None
report_filename = args.report_filename
files_processed = 0
if args.input_file:
if not args.in_place and not args.output_file:
raise ValueError(
"--outfile=<output file> argument is required when converting a "
"single file.")
if args.in_place and args.output_file:
raise ValueError(
"--outfile argument is invalid when when converting in place")
output_file = args.input_file if args.in_place else args.output_file
files_processed, report_text, errors = process_file(
args.input_file, output_file, upgrade)
errors = {args.input_file: errors}
files_processed = 1
elif args.input_tree:
if not args.in_place and not args.output_tree:
raise ValueError(
"--outtree=<output directory> argument is required when converting a "
"file tree.")
if args.in_place and args.output_tree:
raise ValueError(
"--outtree argument is invalid when when converting in place")
output_tree = args.input_tree if args.in_place else args.output_tree
files_processed, report_text, errors = upgrade.process_tree(
args.input_tree, output_tree, args.copy_other_files)
else:
parser.print_help()
if report_text:
num_errors = 0
report = []
for f in errors:
if errors[f]:
num_errors += len(errors[f])
report.append(six.ensure_str("-" * 80) + "\n")
report.append("File: %s\n" % f)
report.append(six.ensure_str("-" * 80) + "\n")
report.append("\n".join(errors[f]) + "\n")
report = ("TensorFlow 2.0 Upgrade Script\n"
"-----------------------------\n"
"Converted %d files\n" % files_processed +
"Detected %d issues that require attention" % num_errors + "\n" +
six.ensure_str("-" * 80) + "\n") + "".join(report)
detailed_report_header = six.ensure_str("=" * 80) + "\n"
detailed_report_header += "Detailed log follows:\n\n"
detailed_report_header += six.ensure_str("=" * 80) + "\n"
with open(report_filename, "w") as report_file:
report_file.write(report)
report_file.write(detailed_report_header)
report_file.write(six.ensure_str(report_text))
if args.print_all:
print(report)
print(detailed_report_header)
print(report_text)
else:
print(report)
print("\nMake sure to read the detailed log %r\n" % report_filename)
if __name__ == "__main__":
main()
|
{
"content_hash": "ba14af12635656e33a79203b4638f0b2",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 80,
"avg_line_length": 35.545,
"alnum_prop": 0.6363764242509495,
"repo_name": "aldian/tensorflow",
"id": "7c7461c19da5b8067181df0218fb4e5075895694",
"size": "7826",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/tools/compatibility/tf_upgrade_v2_main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "8458"
},
{
"name": "C",
"bytes": "201402"
},
{
"name": "C++",
"bytes": "29667924"
},
{
"name": "CMake",
"bytes": "647100"
},
{
"name": "Go",
"bytes": "976514"
},
{
"name": "Java",
"bytes": "412117"
},
{
"name": "Jupyter Notebook",
"bytes": "1833675"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "38128"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Perl",
"bytes": "6715"
},
{
"name": "Protocol Buffer",
"bytes": "275733"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "26424665"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Shell",
"bytes": "373109"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import re
import sys
from setuptools import find_packages, setup
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
init_py = open(os.path.join(package, '__init__.py')).read()
return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
version = get_version('django_perf_rec')
if sys.argv[-1] == 'publish':
if os.system("pip freeze | grep twine"):
print("twine not installed.\nUse `pip install twine`.\nExiting.")
sys.exit()
os.system("python setup.py sdist bdist_wheel")
os.system("twine upload dist/*")
print("You probably want to also tag the version now:")
print(" git tag -a %s -m 'version %s'" % (version, version))
print(" git push --tags")
sys.exit()
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read().replace('.. :changelog:', '')
setup(
name='django-perf-rec',
version=version,
description="Keep detailed records of the performance of your Django "
"code.",
long_description=readme + '\n\n' + history,
author='YPlan',
author_email='adam@yplanapp.com',
url='https://github.com/YPlan/django-perf-rec',
packages=find_packages(exclude=['tests', 'tests.*']),
include_package_data=True,
install_requires=[
'Django',
'patchy',
'PyYAML',
'six',
'sqlparse>=0.2.0',
],
license='MIT',
zip_safe=False,
keywords='Django',
entry_points={
'pytest11': ['django_perf_rec = django_perf_rec.pytest_plugin'],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Framework :: Django :: 1.10',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
|
{
"content_hash": "b6d485de680302bb9e456ae428c24e8e",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 82,
"avg_line_length": 30.658227848101266,
"alnum_prop": 0.5912469033856317,
"repo_name": "moumoutte/django-perf-rec",
"id": "48a3967c3930cf0d8b204731b2ad2d0fc6b620d4",
"size": "2467",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "53075"
}
],
"symlink_target": ""
}
|
import json
import mock
import six
from heat.common import exception
from heat.common import grouputils
from heat.common import template_format
from heat.engine import function
from heat.engine import rsrc_defn
from heat.tests.autoscaling import inline_templates
from heat.tests import common
from heat.tests import utils
class TestAutoScalingGroupValidation(common.HeatTestCase):
def setUp(self):
super(TestAutoScalingGroupValidation, self).setUp()
self.parsed = template_format.parse(inline_templates.as_heat_template)
def test_invalid_min_size(self):
self.parsed['resources']['my-group']['properties']['min_size'] = -1
stack = utils.parse_stack(self.parsed)
self.assertRaises(exception.StackValidationFailed,
stack['my-group'].validate)
def test_invalid_max_size(self):
self.parsed['resources']['my-group']['properties']['max_size'] = -1
stack = utils.parse_stack(self.parsed)
self.assertRaises(exception.StackValidationFailed,
stack['my-group'].validate)
class TestScalingGroupTags(common.HeatTestCase):
def setUp(self):
super(TestScalingGroupTags, self).setUp()
t = template_format.parse(inline_templates.as_heat_template)
self.stack = utils.parse_stack(t, params=inline_templates.as_params)
self.group = self.stack['my-group']
def test_tags_default(self):
expected = [{'Key': 'metering.groupname',
'Value': u'my-group'},
{'Key': 'metering.AutoScalingGroupName',
'Value': u'my-group'}]
self.assertEqual(expected, self.group._tags())
def test_tags_with_extra(self):
self.group.properties.data['Tags'] = [
{'Key': 'fee', 'Value': 'foo'}]
expected = [{'Key': 'metering.groupname',
'Value': u'my-group'},
{'Key': 'metering.AutoScalingGroupName',
'Value': u'my-group'}]
self.assertEqual(expected, self.group._tags())
def test_tags_with_metering(self):
self.group.properties.data['Tags'] = [
{'Key': 'metering.fee', 'Value': 'foo'}]
expected = [{'Key': 'metering.groupname', 'Value': 'my-group'},
{'Key': 'metering.AutoScalingGroupName',
'Value': u'my-group'}]
self.assertEqual(expected, self.group._tags())
class TestInitialGroupSize(common.HeatTestCase):
scenarios = [
('000', dict(mins=0, maxs=0, desired=0, expected=0)),
('040', dict(mins=0, maxs=4, desired=0, expected=0)),
('253', dict(mins=2, maxs=5, desired=3, expected=3)),
('14n', dict(mins=1, maxs=4, desired=None, expected=1)),
]
def setUp(self):
super(TestInitialGroupSize, self).setUp()
def test_initial_size(self):
t = template_format.parse(inline_templates.as_heat_template)
properties = t['resources']['my-group']['properties']
properties['min_size'] = self.mins
properties['max_size'] = self.maxs
properties['desired_capacity'] = self.desired
stack = utils.parse_stack(t, params=inline_templates.as_params)
group = stack['my-group']
with mock.patch.object(group, '_create_template') as mock_cre_temp:
group.child_template()
mock_cre_temp.assert_called_once_with(self.expected)
class TestGroupAdjust(common.HeatTestCase):
def setUp(self):
super(TestGroupAdjust, self).setUp()
t = template_format.parse(inline_templates.as_heat_template)
self.stack = utils.parse_stack(t, params=inline_templates.as_params)
self.group = self.stack['my-group']
self.stub_ImageConstraint_validate()
self.stub_FlavorConstraint_validate()
self.stub_SnapshotConstraint_validate()
self.assertIsNone(self.group.validate())
def test_scaling_policy_cooldown_toosoon(self):
"""If _is_scaling_allowed() returns False don't progress."""
dont_call = self.patchobject(grouputils, 'get_size')
self.patchobject(self.group, '_is_scaling_allowed',
return_value=False)
self.assertRaises(exception.NoActionRequired,
self.group.adjust, 1)
self.assertEqual([], dont_call.call_args_list)
def test_scaling_same_capacity(self):
"""Alway resize even if the capacity is the same."""
self.patchobject(grouputils, 'get_size', return_value=3)
resize = self.patchobject(self.group, 'resize')
finished_scaling = self.patchobject(self.group, '_finished_scaling')
notify = self.patch('heat.engine.notification.autoscaling.send')
self.patchobject(self.group, '_is_scaling_allowed',
return_value=True)
self.group.adjust(3, adjustment_type='ExactCapacity')
expected_notifies = [
mock.call(
capacity=3, suffix='start',
adjustment_type='ExactCapacity',
groupname=u'my-group',
message=u'Start resizing the group my-group',
adjustment=3,
stack=self.group.stack),
mock.call(
capacity=3, suffix='end',
adjustment_type='ExactCapacity',
groupname=u'my-group',
message=u'End resizing the group my-group',
adjustment=3,
stack=self.group.stack)]
self.assertEqual(expected_notifies, notify.call_args_list)
resize.assert_called_once_with(3)
finished_scaling.assert_called_once_with('ExactCapacity : 3',
changed_size=False)
def test_scale_up_min_adjustment(self):
self.patchobject(grouputils, 'get_size', return_value=1)
resize = self.patchobject(self.group, 'resize')
finished_scaling = self.patchobject(self.group, '_finished_scaling')
notify = self.patch('heat.engine.notification.autoscaling.send')
self.patchobject(self.group, '_is_scaling_allowed',
return_value=True)
self.group.adjust(33, adjustment_type='PercentChangeInCapacity',
min_adjustment_step=2)
expected_notifies = [
mock.call(
capacity=1, suffix='start',
adjustment_type='PercentChangeInCapacity',
groupname=u'my-group',
message=u'Start resizing the group my-group',
adjustment=33,
stack=self.group.stack),
mock.call(
capacity=3, suffix='end',
adjustment_type='PercentChangeInCapacity',
groupname=u'my-group',
message=u'End resizing the group my-group',
adjustment=33,
stack=self.group.stack)]
self.assertEqual(expected_notifies, notify.call_args_list)
resize.assert_called_once_with(3)
finished_scaling.assert_called_once_with(
'PercentChangeInCapacity : 33', changed_size=True)
def test_scale_down_min_adjustment(self):
self.patchobject(grouputils, 'get_size', return_value=3)
resize = self.patchobject(self.group, 'resize')
finished_scaling = self.patchobject(self.group, '_finished_scaling')
notify = self.patch('heat.engine.notification.autoscaling.send')
self.patchobject(self.group, '_is_scaling_allowed',
return_value=True)
self.group.adjust(-33, adjustment_type='PercentChangeInCapacity',
min_adjustment_step=2)
expected_notifies = [
mock.call(
capacity=3, suffix='start',
adjustment_type='PercentChangeInCapacity',
groupname=u'my-group',
message=u'Start resizing the group my-group',
adjustment=-33,
stack=self.group.stack),
mock.call(
capacity=1, suffix='end',
adjustment_type='PercentChangeInCapacity',
groupname=u'my-group',
message=u'End resizing the group my-group',
adjustment=-33,
stack=self.group.stack)]
self.assertEqual(expected_notifies, notify.call_args_list)
resize.assert_called_once_with(1)
finished_scaling.assert_called_once_with(
'PercentChangeInCapacity : -33', changed_size=True)
def test_scaling_policy_cooldown_ok(self):
self.patchobject(grouputils, 'get_size', return_value=0)
resize = self.patchobject(self.group, 'resize')
finished_scaling = self.patchobject(self.group, '_finished_scaling')
notify = self.patch('heat.engine.notification.autoscaling.send')
self.patchobject(self.group, '_is_scaling_allowed',
return_value=True)
self.group.adjust(1)
expected_notifies = [
mock.call(
capacity=0, suffix='start', adjustment_type='ChangeInCapacity',
groupname=u'my-group',
message=u'Start resizing the group my-group',
adjustment=1,
stack=self.group.stack),
mock.call(
capacity=1, suffix='end',
adjustment_type='ChangeInCapacity',
groupname=u'my-group',
message=u'End resizing the group my-group',
adjustment=1,
stack=self.group.stack)]
self.assertEqual(expected_notifies, notify.call_args_list)
resize.assert_called_once_with(1)
finished_scaling.assert_called_once_with('ChangeInCapacity : 1',
changed_size=True)
grouputils.get_size.assert_called_once_with(self.group)
def test_scaling_policy_resize_fail(self):
self.patchobject(grouputils, 'get_size', return_value=0)
self.patchobject(self.group, 'resize',
side_effect=ValueError('test error'))
notify = self.patch('heat.engine.notification.autoscaling.send')
self.patchobject(self.group, '_is_scaling_allowed',
return_value=True)
self.patchobject(self.group, '_finished_scaling')
self.assertRaises(ValueError, self.group.adjust, 1)
expected_notifies = [
mock.call(
capacity=0, suffix='start',
adjustment_type='ChangeInCapacity',
groupname=u'my-group',
message=u'Start resizing the group my-group',
adjustment=1,
stack=self.group.stack),
mock.call(
capacity=0, suffix='error',
adjustment_type='ChangeInCapacity',
groupname=u'my-group',
message=u'test error',
adjustment=1,
stack=self.group.stack)]
self.assertEqual(expected_notifies, notify.call_args_list)
grouputils.get_size.assert_called_with(self.group)
def test_notification_send_if_resize_failed(self):
"""If resize failed, the capacity of group might have been changed"""
self.patchobject(grouputils, 'get_size', side_effect=[3, 4])
self.patchobject(self.group, 'resize',
side_effect=ValueError('test error'))
notify = self.patch('heat.engine.notification.autoscaling.send')
self.patchobject(self.group, '_is_scaling_allowed',
return_value=True)
self.patchobject(self.group, '_finished_scaling')
self.assertRaises(ValueError, self.group.adjust,
5, adjustment_type='ExactCapacity')
expected_notifies = [
mock.call(
capacity=3, suffix='start',
adjustment_type='ExactCapacity',
groupname='my-group',
message='Start resizing the group my-group',
adjustment=5,
stack=self.group.stack),
mock.call(
capacity=4, suffix='error',
adjustment_type='ExactCapacity',
groupname='my-group',
message=u'test error',
adjustment=5,
stack=self.group.stack)]
self.assertEqual(expected_notifies, notify.call_args_list)
self.group.resize.assert_called_once_with(5)
grouputils.get_size.assert_has_calls([mock.call(self.group),
mock.call(self.group)])
class TestGroupCrud(common.HeatTestCase):
def setUp(self):
super(TestGroupCrud, self).setUp()
self.stub_ImageConstraint_validate()
self.stub_FlavorConstraint_validate()
self.stub_SnapshotConstraint_validate()
t = template_format.parse(inline_templates.as_heat_template)
self.stack = utils.parse_stack(t, params=inline_templates.as_params)
self.group = self.stack['my-group']
self.assertIsNone(self.group.validate())
def test_handle_create(self):
self.group.create_with_template = mock.Mock(return_value=None)
self.group.child_template = mock.Mock(return_value='{}')
self.group.handle_create()
self.group.child_template.assert_called_once_with()
self.group.create_with_template.assert_called_once_with('{}')
def test_handle_update_desired_cap(self):
self.group._try_rolling_update = mock.Mock(return_value=None)
self.group.resize = mock.Mock(return_value=None)
props = {'desired_capacity': 4,
'min_size': 0,
'max_size': 6}
defn = rsrc_defn.ResourceDefinition(
'nopayload',
'OS::Heat::AutoScalingGroup',
props)
self.group.handle_update(defn, None, props)
self.group.resize.assert_called_once_with(4)
self.group._try_rolling_update.assert_called_once_with(props)
def test_handle_update_desired_nocap(self):
self.group._try_rolling_update = mock.Mock(return_value=None)
self.group.resize = mock.Mock(return_value=None)
get_size = self.patchobject(grouputils, 'get_size')
get_size.return_value = 6
props = {'min_size': 0,
'max_size': 6}
defn = rsrc_defn.ResourceDefinition(
'nopayload',
'OS::Heat::AutoScalingGroup',
props)
self.group.handle_update(defn, None, props)
self.group.resize.assert_called_once_with(6)
self.group._try_rolling_update.assert_called_once_with(props)
def test_update_in_failed(self):
self.group.state_set('CREATE', 'FAILED')
# to update the failed asg
self.group.resize = mock.Mock(return_value=None)
new_defn = rsrc_defn.ResourceDefinition(
'asg', 'OS::Heat::AutoScalingGroup',
{'AvailabilityZones': ['nova'],
'LaunchConfigurationName': 'config',
'max_size': 5,
'min_size': 1,
'desired_capacity': 2,
'resource':
{'type': 'ResourceWithPropsAndAttrs',
'properties': {
'Foo': 'hello'}}})
self.group.handle_update(new_defn, None, None)
self.group.resize.assert_called_once_with(2)
class HeatScalingGroupAttrTest(common.HeatTestCase):
def setUp(self):
super(HeatScalingGroupAttrTest, self).setUp()
t = template_format.parse(inline_templates.as_heat_template)
self.stack = utils.parse_stack(t, params=inline_templates.as_params)
self.group = self.stack['my-group']
self.assertIsNone(self.group.validate())
def test_no_instance_list(self):
"""Tests inheritance of InstanceList attribute.
The InstanceList attribute is not inherited from
AutoScalingResourceGroup's superclasses.
"""
self.assertRaises(exception.InvalidTemplateAttribute,
self.group.FnGetAtt, 'InstanceList')
def test_output_attribute_list(self):
mock_members = self.patchobject(grouputils, 'get_members')
members = []
output = []
for ip_ex in six.moves.range(1, 4):
inst = mock.Mock()
inst.FnGetAtt.return_value = '2.1.3.%d' % ip_ex
output.append('2.1.3.%d' % ip_ex)
members.append(inst)
mock_members.return_value = members
self.assertEqual(output, self.group.FnGetAtt('outputs_list', 'Bar'))
def test_output_attribute_dict(self):
mock_members = self.patchobject(grouputils, 'get_members')
members = []
output = {}
for ip_ex in six.moves.range(1, 4):
inst = mock.Mock()
inst.name = str(ip_ex)
inst.FnGetAtt.return_value = '2.1.3.%d' % ip_ex
output[str(ip_ex)] = '2.1.3.%d' % ip_ex
members.append(inst)
mock_members.return_value = members
self.assertEqual(output,
self.group.FnGetAtt('outputs', 'Bar'))
def test_attribute_current_size(self):
mock_instances = self.patchobject(grouputils, 'get_size')
mock_instances.return_value = 3
self.assertEqual(3, self.group.FnGetAtt('current_size'))
def test_attribute_current_size_with_path(self):
mock_instances = self.patchobject(grouputils, 'get_size')
mock_instances.return_value = 4
self.assertEqual(4, self.group.FnGetAtt('current_size', 'name'))
def test_index_dotted_attribute(self):
mock_members = self.patchobject(grouputils, 'get_members')
self.group.nested = mock.Mock()
members = []
output = []
for ip_ex in six.moves.range(0, 2):
inst = mock.Mock()
inst.name = str(ip_ex)
inst.FnGetAtt.return_value = '2.1.3.%d' % ip_ex
output.append('2.1.3.%d' % ip_ex)
members.append(inst)
mock_members.return_value = members
self.assertEqual(output[0], self.group.FnGetAtt('resource.0', 'Bar'))
self.assertEqual(output[1], self.group.FnGetAtt('resource.1.Bar'))
self.assertRaises(exception.InvalidTemplateAttribute,
self.group.FnGetAtt, 'resource.2')
def asg_tmpl_with_bad_updt_policy():
t = template_format.parse(inline_templates.as_heat_template)
agp = t['resources']['my-group']['properties']
agp['rolling_updates'] = {"foo": {}}
return json.dumps(t)
def asg_tmpl_with_default_updt_policy():
t = template_format.parse(inline_templates.as_heat_template)
return json.dumps(t)
def asg_tmpl_with_updt_policy(props=None):
t = template_format.parse(inline_templates.as_heat_template)
agp = t['resources']['my-group']['properties']
agp['rolling_updates'] = {
"min_in_service": 1,
"max_batch_size": 2,
"pause_time": 1
}
if props is not None:
agp.update(props)
return json.dumps(t)
class RollingUpdatePolicyTest(common.HeatTestCase):
def setUp(self):
super(RollingUpdatePolicyTest, self).setUp()
self.stub_keystoneclient(username='test_stack.CfnLBUser')
def test_parse_without_update_policy(self):
tmpl = template_format.parse(inline_templates.as_heat_template)
stack = utils.parse_stack(tmpl)
stack.validate()
grp = stack['my-group']
default_policy = {
'min_in_service': 0,
'pause_time': 0,
'max_batch_size': 1
}
self.assertEqual(default_policy, grp.properties['rolling_updates'])
def test_parse_with_update_policy(self):
tmpl = template_format.parse(asg_tmpl_with_updt_policy())
stack = utils.parse_stack(tmpl)
stack.validate()
tmpl_grp = tmpl['resources']['my-group']
tmpl_policy = tmpl_grp['properties']['rolling_updates']
tmpl_batch_sz = int(tmpl_policy['max_batch_size'])
policy = stack['my-group'].properties['rolling_updates']
self.assertTrue(policy)
self.assertTrue(len(policy) == 3)
self.assertEqual(1, int(policy['min_in_service']))
self.assertEqual(tmpl_batch_sz, int(policy['max_batch_size']))
self.assertEqual(1, policy['pause_time'])
def test_parse_with_default_update_policy(self):
tmpl = template_format.parse(asg_tmpl_with_default_updt_policy())
stack = utils.parse_stack(tmpl)
stack.validate()
policy = stack['my-group'].properties['rolling_updates']
self.assertTrue(policy)
self.assertEqual(3, len(policy))
self.assertEqual(0, int(policy['min_in_service']))
self.assertEqual(1, int(policy['max_batch_size']))
self.assertEqual(0, policy['pause_time'])
def test_parse_with_bad_update_policy(self):
tmpl = template_format.parse(asg_tmpl_with_bad_updt_policy())
stack = utils.parse_stack(tmpl)
error = self.assertRaises(
exception.StackValidationFailed, stack.validate)
self.assertIn("foo", six.text_type(error))
def test_parse_with_bad_pausetime_in_update_policy(self):
tmpl = template_format.parse(asg_tmpl_with_default_updt_policy())
group = tmpl['resources']['my-group']
group['properties']['rolling_updates'] = {'pause_time': 'a-string'}
stack = utils.parse_stack(tmpl)
error = self.assertRaises(
exception.StackValidationFailed, stack.validate)
self.assertIn("could not convert string to float",
six.text_type(error))
class RollingUpdatePolicyDiffTest(common.HeatTestCase):
def setUp(self):
super(RollingUpdatePolicyDiffTest, self).setUp()
self.stub_keystoneclient(username='test_stack.CfnLBUser')
def validate_update_policy_diff(self, current, updated):
# load current stack
current_tmpl = template_format.parse(current)
current_stack = utils.parse_stack(current_tmpl)
# get the json snippet for the current InstanceGroup resource
current_grp = current_stack['my-group']
current_snippets = dict((n, r.parsed_template())
for n, r in current_stack.items())
current_grp_json = current_snippets[current_grp.name]
# load the updated stack
updated_tmpl = template_format.parse(updated)
updated_stack = utils.parse_stack(updated_tmpl)
# get the updated json snippet for the InstanceGroup resource in the
# context of the current stack
updated_grp = updated_stack['my-group']
updated_grp_json = function.resolve(updated_grp.t)
# identify the template difference
tmpl_diff = updated_grp.update_template_diff(
updated_grp_json, current_grp_json)
updated_policy = (updated_grp.properties['rolling_updates']
if 'rolling_updates' in updated_grp.properties.data
else None)
self.assertEqual(updated_policy,
tmpl_diff['Properties'].get('rolling_updates'))
# test application of the new update policy in handle_update
update_snippet = rsrc_defn.ResourceDefinition(
current_grp.name,
current_grp.type(),
properties=updated_grp.t['Properties'])
current_grp._try_rolling_update = mock.MagicMock()
current_grp.resize = mock.MagicMock()
current_grp.handle_update(update_snippet, tmpl_diff, None)
if updated_policy is None:
self.assertIsNone(
current_grp.properties.data.get('rolling_updates'))
else:
self.assertEqual(updated_policy,
current_grp.properties.data['rolling_updates'])
def test_update_policy_added(self):
self.validate_update_policy_diff(inline_templates.as_heat_template,
asg_tmpl_with_updt_policy())
def test_update_policy_updated(self):
extra_props = {'rolling_updates': {
'min_in_service': 2,
'max_batch_size': 4,
'pause_time': 30}}
self.validate_update_policy_diff(
asg_tmpl_with_updt_policy(),
asg_tmpl_with_updt_policy(props=extra_props))
def test_update_policy_removed(self):
self.validate_update_policy_diff(asg_tmpl_with_updt_policy(),
inline_templates.as_heat_template)
class IncorrectUpdatePolicyTest(common.HeatTestCase):
def setUp(self):
super(IncorrectUpdatePolicyTest, self).setUp()
self.stub_keystoneclient(username='test_stack.CfnLBUser')
def test_with_update_policy_aws(self):
t = template_format.parse(inline_templates.as_heat_template)
ag = t['resources']['my-group']
ag["update_policy"] = {"AutoScalingRollingUpdate": {
"MinInstancesInService": "1",
"MaxBatchSize": "2",
"PauseTime": "PT1S"
}}
tmpl = template_format.parse(json.dumps(t))
stack = utils.parse_stack(tmpl)
exc = self.assertRaises(exception.StackValidationFailed,
stack.validate)
self.assertIn('Unknown Property AutoScalingRollingUpdate',
six.text_type(exc))
def test_with_update_policy_inst_group(self):
t = template_format.parse(inline_templates.as_heat_template)
ag = t['resources']['my-group']
ag["update_policy"] = {"RollingUpdate": {
"MinInstancesInService": "1",
"MaxBatchSize": "2",
"PauseTime": "PT1S"
}}
tmpl = template_format.parse(json.dumps(t))
stack = utils.parse_stack(tmpl)
exc = self.assertRaises(exception.StackValidationFailed,
stack.validate)
self.assertIn('Unknown Property RollingUpdate', six.text_type(exc))
|
{
"content_hash": "45ce32c72a97cd796f7a57b65699b83a",
"timestamp": "",
"source": "github",
"line_count": 634,
"max_line_length": 79,
"avg_line_length": 41.0583596214511,
"alnum_prop": 0.5984403211555454,
"repo_name": "jasondunsmore/heat",
"id": "0e8900212edb966b6d6adb3b3bcd1a95a58dca98",
"size": "26604",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heat/tests/autoscaling/test_heat_scaling_group.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "7819626"
},
{
"name": "Shell",
"bytes": "33158"
}
],
"symlink_target": ""
}
|
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0,
os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')))
import papersizes
# Monkey patch sphinx's autodata to fix bug 857
import sphinx.ext.autodoc
def __get_real_modname(self):
return self.get_attr(self.parent or self.object, '__module__', None) \
or self.modname
sphinx.ext.autodoc.DataDocumenter.get_real_modname = __get_real_modname
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Papersizes'
copyright = papersizes.__copyright__
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = papersizes.__version__
# The full version, including alpha/beta/rc tags.
release = papersizes.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Papersizesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Papersizes.tex', u'Papersizes Documentation',
u'Ian Millington', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'papersizes', u'Papersizes Documentation',
[u'Ian Millington'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Papersizes', u'Papersizes Documentation',
u'Ian Millington', 'Papersizes', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
{
"content_hash": "2b75c167fcca31a637a4e60302d7343a",
"timestamp": "",
"source": "github",
"line_count": 255,
"max_line_length": 79,
"avg_line_length": 31.96078431372549,
"alnum_prop": 0.7076073619631902,
"repo_name": "idmillington/papersizes",
"id": "4866fdd22eafad99d1d2ba60d743ed95eed57739",
"size": "8573",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/source/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38794"
}
],
"symlink_target": ""
}
|
"""
This is a script written to add the template "orphan" to pages.
These command line parameters can be used to specify which pages to work on:
¶ms;
-xml Retrieve information from a local XML dump (pages-articles
or pages-meta-current, see https://download.wikimedia.org).
Argument can also be given as "-xml:filename".
-page Only edit a specific page.
Argument can also be given as "-page:pagetitle". You can
give this parameter multiple times to edit multiple pages.
Furthermore, the following command line parameters are supported:
-enable: Enable or disable the bot via a Wiki Page.
-disambig: Set a page where the bot saves the name of the disambig
pages found (default: skip the pages)
-limit: Set how many pages check.
-always Always say yes, won't ask
--- Examples ---
python pwb.py lonelypages -enable:User:Bot/CheckBot -always
"""
#
# (C) Pietrodn, it.wiki 2006-2007
# (C) Filnik, it.wiki 2007
# (C) Pywikibot team, 2008-2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id$'
#
import re
import sys
import pywikibot
from pywikibot import i18n, pagegenerators, Bot
# This is required for the text that is shown when you run this script
# with the parameter -help.
docuReplacements = {
'¶ms;': pagegenerators.parameterHelp,
}
template = {
'ar': u'{{يتيمة|تاريخ={{نسخ:اسم_شهر}} {{نسخ:عام}}}}',
'ca': u'{{Orfe|date={{subst:CURRENTMONTHNAME}} {{subst:CURRENTYEAR}}}}',
'en': u'{{Orphan|date={{subst:CURRENTMONTHNAME}} {{subst:CURRENTYEAR}}}}',
'it': u'{{O||mese={{subst:CURRENTMONTHNAME}} {{subst:CURRENTYEAR}}}}',
'ja': u'{{孤立|{{subst:DATE}}}}',
'zh': u'{{subst:Orphan/auto}}',
}
# Use regex to prevent to put the same template twice!
exception_regex = {
'ar': [u'\\{\\{(?:قالب:|)(يتيمة)[\\|\\}]'],
'ca': [r'\{\{(?:template:|)(orfe)[\|\}]'],
'en': [r'\{\{(?:template:|)(orphan)[\|\}]',
r'\{\{(?:template:|)(wi)[\|\}]'],
'it': [r'\{\{(?:template:|)(o|a)[\|\}]'],
'ja': [u'\\{\\{(?:template:|)(孤立)[\\|\\}]'],
'zh': [r'\{\{(?:template:|)(orphan)[\|\}]'],
}
class LonelyPagesBot(Bot):
"""Orphan page tagging bot."""
def __init__(self, generator, **kwargs):
self.availableOptions.update({
'enablePage': None, # Check if someone set an enablePage or not
'disambigPage': None, # If no disambigPage given, not use it.
})
super(LonelyPagesBot, self).__init__(**kwargs)
self.generator = generator
# Take the configurations according to our project
self.site = pywikibot.Site()
if self.getOption('enablePage'):
self.options['enablePage'] = pywikibot.Page(
self.site, self.getOption('enablePage'))
self.comment = i18n.twtranslate(
self.site, 'lonelypages-comment-add-template')
self.commentdisambig = i18n.twtranslate(
self.site, 'lonelypages-comment-add-disambig-template')
self.template = i18n.translate(self.site, template)
self.exception = i18n.translate(self.site, exception_regex)
if self.template is None or self.exception is None:
pywikibot.showHelp()
sys.exit(u'Missing configuration for site %s' % self.site)
# DisambigPage part
if self.getOption('disambigPage') is not None:
self.disambigpage = pywikibot.Page(self.site, self.getOption('disambigPage'))
try:
self.disambigtext = self.disambigpage.get()
except pywikibot.NoPage:
pywikibot.output(u"%s doesn't esist, skip!" % self.disambigpage.title())
self.disambigtext = ''
except pywikibot.IsRedirectPage:
pywikibot.output(u"%s is a redirect, don't use it!"
% self.disambigpage.title())
self.options['disambigPage'] = None
def enable_page(self):
enable = self.getOption('enablePage')
if enable is not None:
try:
getenable = enable.get()
except pywikibot.NoPage:
pywikibot.output(
u"%s doesn't esist, I use the page as if it was blank!"
% enable.title())
getenable = ''
except pywikibot.IsRedirectPage:
pywikibot.output(u"%s is a redirect, skip!" % enable.title())
getenable = ''
return getenable == 'enable'
return True
def run(self):
# If the enable page is set to disable, turn off the bot
# (useful when the bot is run on a server)
if not self.enable_page():
pywikibot.output('The bot is disabled')
return
# Main Loop
for page in self.generator:
self.treat(page)
def treat(self, page):
pywikibot.output(u"Checking %s..." % page.title())
if page.isRedirectPage(): # If redirect, skip!
pywikibot.output(u'%s is a redirect! Skip...' % page.title())
return
refs = list(page.getReferences(total=1))
if len(refs) > 0:
pywikibot.output(u"%s isn't orphan! Skip..." % page.title())
return
else:
# no refs, no redirect; check if there's already the template
try:
oldtxt = page.get()
except pywikibot.NoPage:
pywikibot.output(u"%s doesn't exist! Skip..." % page.title())
return
except pywikibot.IsRedirectPage:
pywikibot.output(u"%s is a redirect! Skip..." % page.title())
return
# I've used a loop in a loop. If I use continue in the second loop,
# it won't do anything in the first. So let's create a variable to
# avoid this problem.
for regexp in self.exception:
res = re.findall(regexp, oldtxt.lower())
# Found a template! Let's skip the page!
if res != []:
pywikibot.output(
u'Your regex has found something in %s, skipping...'
% page.title())
break
else:
return
if page.isDisambig() and self.getOption('disambigPage') is not None:
pywikibot.output(u'%s is a disambig page, report..'
% page.title())
if not page.title().lower() in self.disambigtext.lower():
self.disambigtext = u"%s\n*[[%s]]" % (self.disambigtext, page.title())
self.disambigpage.text = self.disambigtext
self.disambigpage.save(self.commentdisambig)
return
# Is the page a disambig but there's not disambigPage? Skip!
elif page.isDisambig():
pywikibot.output(u'%s is a disambig page, skip...'
% page.title())
return
else:
# Ok, the page need the template. Let's put it there!
# Adding the template in the text
newtxt = u"%s\n%s" % (self.template, oldtxt)
self.userPut(page, oldtxt, newtxt, summary=self.comment)
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
options = {}
local_args = pywikibot.handle_args(args)
genFactory = pagegenerators.GeneratorFactory()
site = pywikibot.Site()
for arg in local_args:
if arg.startswith('-enable'):
if len(arg) == 7:
options['enablePage'] = pywikibot.input(
u'Would you like to check if the bot should run or not?')
else:
options['enablePage'] = arg[8:]
elif arg.startswith('-disambig'):
if len(arg) == 9:
options['disambigPage'] = pywikibot.input(
u'In which page should the bot save the disambig pages?')
else:
options['disambigPage'] = arg[10:]
elif arg == '-always':
options['always'] = True
else:
genFactory.handleArg(arg)
generator = genFactory.getCombinedGenerator()
# If the generator is not given, use the default one
if not generator:
generator = site.lonelypages(total=genFactory.limit)
bot = LonelyPagesBot(generator, **options)
bot.run()
if __name__ == '__main__':
main()
|
{
"content_hash": "8bbd9514fd1660221a3d7149217c01b5",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 90,
"avg_line_length": 37.164556962025316,
"alnum_prop": 0.5557447774750227,
"repo_name": "trishnaguha/pywikibot-core",
"id": "de4e7cabc7cd3dd2c66f34af58f94bc6b52357da",
"size": "8892",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "scripts/lonelypages.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "97"
},
{
"name": "Python",
"bytes": "3821251"
}
],
"symlink_target": ""
}
|
'''
Utility module that converts a reStructuredText file into XHTML.
Based on the rst2html script provided by docutils with some hard-coded
options that are suitable for restblog.
:copyright: Copyright 2010 Luis Artola.
:license: BSD, see LICENSE.txt for details.
'''
import os
import sys
from docutils.core import default_description
from docutils.core import publish_cmdline
from restblog.directives import fullstory
from restblog.directives import restblogheader
from restblog.directives import rstpygments
from restblog.directives import video
def main( arguments ):
'''main( arguments )
Converts a reStructuredText file into an XHTML document.
Parameters:
- arguments: A list of strings representing the command-line arguments
to the ``rst2html`` executable, e.g. ``sys.argv[1:]``
'''
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
description = \
'Generates (X)HTML documents from standalone reStructuredText ' \
'sources ready to be posted in a restblog site. ' \
'Highlights source code using Pygments. ' \
+ default_description
docutils_arguments = arguments + [
'--link-stylesheet',
'--stylesheet=tango.css',
'--cloak-email-addresses',
]
if not arguments:
print 'Type reStructuredText and press Control-D when done:'
else:
# TODO: There's gotta be a better way of communicating the source
# file to the ``restblog.directives`` classes. For now, let's just
# use a plain-old environment variable.
os.environ[ 'RESTBLOG_SOURCE_FILE_NAME' ] = arguments[0]
# Let docutils work its magic.
publish_cmdline( writer_name='html', description=description, argv=docutils_arguments )
if __name__ == '__main__':
main( sys.argv[1:] )
|
{
"content_hash": "480fbb1fa8b4e3daf247f3fcc8ddc856",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 91,
"avg_line_length": 27.70149253731343,
"alnum_prop": 0.6831896551724138,
"repo_name": "MadanThangavelu/restblog",
"id": "82130ee4b6db040f2ea999826f5cf0392db4af2c",
"size": "2152",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/restblog/restblog2html.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "47240"
},
{
"name": "Python",
"bytes": "2643933"
},
{
"name": "Shell",
"bytes": "74"
}
],
"symlink_target": ""
}
|
from bamboo_boy.utils import with_canopy
import json
from django.test import TestCase
from builds.constants import LATEST
from projects.models import Project
from rtd_tests.factories.projects_factories import OneProjectWithTranslationsOneWithout,\
ProjectFactory
from rest_framework.reverse import reverse
from restapi.serializers import ProjectSerializer
from rtd_tests.mocks.paths import fake_paths_by_regex
@with_canopy(OneProjectWithTranslationsOneWithout)
class TestProject(TestCase):
fixtures = ["eric", "test_data"]
def setUp(self):
self.client.login(username='eric', password='test')
self.pip = Project.objects.get(slug='pip')
def test_valid_versions(self):
r = self.client.get('/api/v2/project/6/valid_versions/', {})
resp = json.loads(r.content)
self.assertEqual(r.status_code, 200)
self.assertEqual(resp['flat'][0], '0.8')
self.assertEqual(resp['flat'][1], '0.8.1')
def test_subprojects(self):
r = self.client.get('/api/v2/project/6/subprojects/', {})
resp = json.loads(r.content)
self.assertEqual(r.status_code, 200)
self.assertEqual(resp['subprojects'][0]['id'], 23)
def test_translations(self):
p = self.canopy.project_with_translations
url = reverse('project-translations', [p.id])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
translation_ids_from_api = [t['id']
for t in response.data['translations']]
translation_ids_from_orm = [t[0]
for t in p.translations.values_list('id')]
self.assertEqual(
set(translation_ids_from_api),
set(translation_ids_from_orm)
)
def test_token(self):
r = self.client.get('/api/v2/project/6/token/', {})
resp = json.loads(r.content)
self.assertEqual(r.status_code, 200)
self.assertEqual(resp['token'], None)
def test_has_pdf(self):
# The project has a pdf if the PDF file exists on disk.
with fake_paths_by_regex('\.pdf$'):
self.assertTrue(self.pip.has_pdf(LATEST))
# The project has no pdf if there is no file on disk.
with fake_paths_by_regex('\.pdf$', exists=False):
self.assertFalse(self.pip.has_pdf(LATEST))
def test_has_pdf_with_pdf_build_disabled(self):
# The project has NO pdf if pdf builds are disabled
self.pip.enable_pdf_build = False
with fake_paths_by_regex('\.pdf$'):
self.assertFalse(self.pip.has_pdf(LATEST))
def test_has_epub(self):
# The project has a epub if the PDF file exists on disk.
with fake_paths_by_regex('\.epub$'):
self.assertTrue(self.pip.has_epub(LATEST))
# The project has no epub if there is no file on disk.
with fake_paths_by_regex('\.epub$', exists=False):
self.assertFalse(self.pip.has_epub(LATEST))
def test_has_epub_with_epub_build_disabled(self):
# The project has NO epub if epub builds are disabled
self.pip.enable_epub_build = False
with fake_paths_by_regex('\.epub$'):
self.assertFalse(self.pip.has_epub(LATEST))
|
{
"content_hash": "299ddc2cf6ce463c85c5208b2ce25d56",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 89,
"avg_line_length": 38.773809523809526,
"alnum_prop": 0.632483880871968,
"repo_name": "raven47git/readthedocs.org",
"id": "30b9f527295d605a4e87f5666e0e24837af6f04a",
"size": "3257",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "readthedocs/rtd_tests/tests/test_project.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "4515"
},
{
"name": "CSS",
"bytes": "65384"
},
{
"name": "HTML",
"bytes": "208325"
},
{
"name": "JavaScript",
"bytes": "429334"
},
{
"name": "Makefile",
"bytes": "4594"
},
{
"name": "Python",
"bytes": "735549"
},
{
"name": "Shell",
"bytes": "492"
}
],
"symlink_target": ""
}
|
from PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox, QLineEdit, QPushButton
class MyMainWindow(QMainWindow):
def __init__(self):
super(QMainWindow, self).__init__()
self.title = 'PyQt5 textbox example'
self.left = 100
self.top = 100
self.width = 640
self.height = 480
self.init_ui()
def init_ui(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.textbox = QLineEdit(self)
self.textbox.move(20, 20)
self.textbox.resize(280, 30)
button = QPushButton('Show Text', self)
button.move(20, 80)
button.clicked.connect(self.on_button_click)
def on_button_click(self):
text = self.textbox.text()
if text:
QMessageBox.information(self, 'Message - textbox example', 'You typed: ' + text, QMessageBox.Ok, QMessageBox.Ok)
else:
QMessageBox.warning(self, 'Message - textbox example', 'You have not typed anything!', QMessageBox.Ok, QMessageBox.Ok)
self.textbox.setText('')
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
w = MyMainWindow()
w.show()
sys.exit(app.exec_())
|
{
"content_hash": "1d5741bef0ce8efb14c05e2f6ccec56c",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 130,
"avg_line_length": 28.75,
"alnum_prop": 0.6134387351778656,
"repo_name": "nnaabbcc/exercise",
"id": "d1c2bd64b9d58dc9ae697f47499d417cdc9f3e50",
"size": "1266",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/PyQt5/06_textbox_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "6872"
},
{
"name": "C",
"bytes": "1764627"
},
{
"name": "C#",
"bytes": "1976292"
},
{
"name": "C++",
"bytes": "2643235"
},
{
"name": "CMake",
"bytes": "6371"
},
{
"name": "CSS",
"bytes": "5461"
},
{
"name": "Cuda",
"bytes": "400"
},
{
"name": "HTML",
"bytes": "683692"
},
{
"name": "Java",
"bytes": "863101"
},
{
"name": "JavaScript",
"bytes": "65541"
},
{
"name": "Makefile",
"bytes": "650457"
},
{
"name": "PHP",
"bytes": "1501"
},
{
"name": "Python",
"bytes": "61596"
},
{
"name": "Shell",
"bytes": "2340"
},
{
"name": "Vim script",
"bytes": "220"
},
{
"name": "Visual Basic .NET",
"bytes": "257344"
},
{
"name": "XSLT",
"bytes": "2038"
}
],
"symlink_target": ""
}
|
number = 10
|
{
"content_hash": "c5f3657a6f859b58f15ddd97571f53b7",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 11,
"avg_line_length": 12,
"alnum_prop": 0.6666666666666666,
"repo_name": "FelicityN/hello_world",
"id": "88d97b6bb1dcdc69493d49f808a3bb089ab5101d",
"size": "12",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1800"
}
],
"symlink_target": ""
}
|
from rest_framework.pagination import PageNumberPagination
class StandardResultsSetPagination(PageNumberPagination):
page_size = 20
page_size_query_param = 'page_size'
max_page_size = 1000
|
{
"content_hash": "ca5a48edb9193b6cb2bf8682942d359f",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 58,
"avg_line_length": 29,
"alnum_prop": 0.7733990147783252,
"repo_name": "larryhq/railguns",
"id": "46ad06ea1167e546a1709660877d66a719a4a0ca",
"size": "203",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "railguns/rest_framework/pagination.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "6168"
},
{
"name": "JavaScript",
"bytes": "7485"
},
{
"name": "Python",
"bytes": "29321"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, url
from helloworld import views
urlpatterns = patterns('',
url(r'^$', views.index, name='index'),
)
|
{
"content_hash": "1d84ee1d7eaa4aca900c4fb9684f32da",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 42,
"avg_line_length": 24.166666666666668,
"alnum_prop": 0.7034482758620689,
"repo_name": "samuraitaiga/django-sample",
"id": "63e8065847fe042aa305cc04f98529a2d0f181c0",
"size": "145",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djangosample/helloworld/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3502"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import sandboxie
import subprocess
import time
import unittest
class SandboxieIntegrationTests(unittest.TestCase):
def setUp(self):
self.sbie = sandboxie.Sandboxie()
self.sbie.create_sandbox(box='foo', options={'Enabled': 'yes'})
def tearDown(self):
self.sbie.delete_contents(box='foo')
self.sbie.destroy_sandbox(box='foo')
def test_start_command_fails_due_to_non_existent_sandbox(self):
try:
self.sbie.start('notepad.exe', box='DOES_NOT_EXIST', wait=False)
except subprocess.CalledProcessError:
pass
else:
self.fail()
def test_start_command_fails_due_to_invalid_command(self):
try:
self.sbie.start('asdaklwjWLAL.asjd', box='foo', wait=False)
except subprocess.CalledProcessError:
pass
else:
self.fail()
def test_launch_notepad(self):
self.sbie.start('notepad.exe', box='foo', wait=False)
assert(len(list(self.sbie.running_processes(box='foo'))) > 0)
self.sbie.terminate_processes(box='foo')
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "ca3fb1f962988a4f2d0ff08b7448a5bd",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 76,
"avg_line_length": 28.878048780487806,
"alnum_prop": 0.625,
"repo_name": "gg/sandboxie-py",
"id": "f62ede2b19522e074f92ed10b0df15f4e6c6c1f4",
"size": "1201",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "integration_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29308"
}
],
"symlink_target": ""
}
|
import rospy
import os
import pyinotify
import sys
import collections
class _QueryDeleteProcessor(pyinotify.ProcessEvent):
def process_IN_DELETE(self, event):
self.__class__.handle_delete(event)
def process_default(self, event):
pass
class QueryNotifier():
def __init__(self, query_path):
self.query_path = query_path
_QueryDeleteProcessor.handle_delete = self.handle_delete
self.delete_handlers = []
self.wm = pyinotify.WatchManager()
self.notifier = pyinotify.ThreadedNotifier(self.wm)
self.wm.watch_transient_file(
filename=self.query_path,
mask=pyinotify.ALL_EVENTS,
proc_class=_QueryDeleteProcessor
)
def add_delete_handler(self, handler):
assert isinstance(handler, collections.Callable)
self.delete_handlers.append(handler)
def handle_delete(self, *args, **kwargs):
def run_handler(h):
try:
h()
except Exception as e:
rospy.logerr('Caught an Exception in delete handler!')
rospy.logerr(sys.exc_info()[2])
list(map(run_handler, self.delete_handlers))
def start(self):
self.notifier.start()
def stop(self):
self.notifier.stop()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
{
"content_hash": "65878466d08bc28a6cf2886890865951",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 70,
"avg_line_length": 28.1875,
"alnum_prop": 0.6267553584626755,
"repo_name": "EndPointCorp/lg_ros_nodes",
"id": "75d63f6e41affb4b46c32246d1361949aa911737",
"size": "1353",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lg_earth/src/lg_earth/query_notifier.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "28157"
},
{
"name": "C++",
"bytes": "291289"
},
{
"name": "CMake",
"bytes": "26675"
},
{
"name": "Dockerfile",
"bytes": "15931"
},
{
"name": "HTML",
"bytes": "29662"
},
{
"name": "JavaScript",
"bytes": "430737"
},
{
"name": "Makefile",
"bytes": "4197"
},
{
"name": "Python",
"bytes": "1144931"
},
{
"name": "Shell",
"bytes": "17851"
}
],
"symlink_target": ""
}
|
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(529, 353)
self.verticalLayout_3 = QtGui.QVBoxLayout(Form)
self.verticalLayout_3.setSpacing(0)
self.verticalLayout_3.setMargin(0)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.groupBox = GroupBox(Form)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.groupBox.setFont(font)
self.groupBox.setCheckable(False)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.groupBox)
self.verticalLayout_2.setSpacing(0)
self.verticalLayout_2.setContentsMargins(5, 0, 0, 0)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setSpacing(0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.preSetCheck = QtGui.QCheckBox(self.groupBox)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.preSetCheck.setFont(font)
self.preSetCheck.setObjectName(_fromUtf8("preSetCheck"))
self.gridLayout.addWidget(self.preSetCheck, 0, 0, 1, 1)
self.preSetSpin = SpinBox(self.groupBox)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.preSetSpin.setFont(font)
self.preSetSpin.setMinimum(-999999999.0)
self.preSetSpin.setMaximum(999999999.0)
self.preSetSpin.setObjectName(_fromUtf8("preSetSpin"))
self.gridLayout.addWidget(self.preSetSpin, 0, 1, 1, 1)
self.holdingCheck = QtGui.QCheckBox(self.groupBox)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.holdingCheck.setFont(font)
self.holdingCheck.setObjectName(_fromUtf8("holdingCheck"))
self.gridLayout.addWidget(self.holdingCheck, 1, 0, 1, 1)
self.holdingSpin = SpinBox(self.groupBox)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.holdingSpin.setFont(font)
self.holdingSpin.setMinimum(-999999999.0)
self.holdingSpin.setMaximum(999999999.0)
self.holdingSpin.setObjectName(_fromUtf8("holdingSpin"))
self.gridLayout.addWidget(self.holdingSpin, 1, 1, 1, 1)
self.verticalLayout_2.addLayout(self.gridLayout)
self.frame = QtGui.QFrame(self.groupBox)
self.frame.setFrameShape(QtGui.QFrame.Box)
self.frame.setFrameShadow(QtGui.QFrame.Raised)
self.frame.setObjectName(_fromUtf8("frame"))
self.verticalLayout = QtGui.QVBoxLayout(self.frame)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.functionCheck = QtGui.QCheckBox(self.frame)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.functionCheck.setFont(font)
self.functionCheck.setObjectName(_fromUtf8("functionCheck"))
self.horizontalLayout.addWidget(self.functionCheck)
self.displayCheck = QtGui.QCheckBox(self.frame)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.displayCheck.setFont(font)
self.displayCheck.setChecked(True)
self.displayCheck.setObjectName(_fromUtf8("displayCheck"))
self.horizontalLayout.addWidget(self.displayCheck)
self.verticalLayout.addLayout(self.horizontalLayout)
self.waveGeneratorWidget = StimGenerator(self.frame)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.waveGeneratorWidget.sizePolicy().hasHeightForWidth())
self.waveGeneratorWidget.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.waveGeneratorWidget.setFont(font)
self.waveGeneratorWidget.setObjectName(_fromUtf8("waveGeneratorWidget"))
self.verticalLayout.addWidget(self.waveGeneratorWidget)
self.verticalLayout_2.addWidget(self.frame)
self.verticalLayout_3.addWidget(self.groupBox)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.groupBox.setTitle(_translate("Form", "GroupBox", None))
self.preSetCheck.setText(_translate("Form", "Pre-set", None))
self.holdingCheck.setText(_translate("Form", "Holding", None))
self.functionCheck.setText(_translate("Form", "Enable Function", None))
self.displayCheck.setText(_translate("Form", "Display", None))
from acq4.pyqtgraph import SpinBox, GroupBox
from acq4.util.generator.StimGenerator import StimGenerator
|
{
"content_hash": "02d47ce3e802310bc1324091adc24272",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 96,
"avg_line_length": 45.048,
"alnum_prop": 0.6876220919907654,
"repo_name": "tropp/acq4",
"id": "69e9063dbdc647bd091e99ba8cf210b52fbf7160",
"size": "5870",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "acq4/devices/DAQGeneric/AOChannelTemplate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AMPL",
"bytes": "3037"
},
{
"name": "Arduino",
"bytes": "18651"
},
{
"name": "Batchfile",
"bytes": "64"
},
{
"name": "C",
"bytes": "732694"
},
{
"name": "C++",
"bytes": "955052"
},
{
"name": "CSS",
"bytes": "716"
},
{
"name": "Matlab",
"bytes": "1752"
},
{
"name": "Processing",
"bytes": "13403"
},
{
"name": "Python",
"bytes": "5691736"
}
],
"symlink_target": ""
}
|
import sys
from abc import ABCMeta
from py4j.java_gateway import get_java_class
from pyflink.java_gateway import get_gateway
from pyflink.util.utils import load_java_class
__all__ = [
'StateBackend',
'MemoryStateBackend',
'FsStateBackend',
'RocksDBStateBackend',
'CustomStateBackend',
'PredefinedOptions']
if sys.version > '3':
xrange = range
def _from_j_state_backend(j_state_backend):
if j_state_backend is None:
return None
gateway = get_gateway()
JStateBackend = gateway.jvm.org.apache.flink.runtime.state.StateBackend
JMemoryStateBackend = gateway.jvm.org.apache.flink.runtime.state.memory.MemoryStateBackend
JFsStateBackend = gateway.jvm.org.apache.flink.runtime.state.filesystem.FsStateBackend
JRocksDBStateBackend = gateway.jvm.org.apache.flink.contrib.streaming.state.RocksDBStateBackend
j_clz = j_state_backend.getClass()
if not get_java_class(JStateBackend).isAssignableFrom(j_clz):
raise TypeError("The input %s is not an instance of StateBackend." % j_state_backend)
if get_java_class(JMemoryStateBackend).isAssignableFrom(j_state_backend.getClass()):
return MemoryStateBackend(j_memory_state_backend=j_state_backend)
elif get_java_class(JFsStateBackend).isAssignableFrom(j_state_backend.getClass()):
return FsStateBackend(j_fs_state_backend=j_state_backend)
elif get_java_class(JRocksDBStateBackend).isAssignableFrom(j_state_backend.getClass()):
return RocksDBStateBackend(j_rocks_db_state_backend=j_state_backend)
else:
return CustomStateBackend(j_state_backend) # users' customized state backend
class StateBackend(object):
"""
A **State Backend** defines how the state of a streaming application is stored and
checkpointed. Different State Backends store their state in different fashions, and use
different data structures to hold the state of a running application.
For example, the :class:`MemoryStateBackend` keeps working state in the memory of the
TaskManager and stores checkpoints in the memory of the JobManager. The backend is
lightweight and without additional dependencies, but not highly available and supports only
small state.
The :class:`FsStateBackend` keeps working state in the memory of the TaskManager and stores
state checkpoints in a filesystem(typically a replicated highly-available filesystem,
like `HDFS <https://hadoop.apache.org/>`_, `Ceph <https://ceph.com/>`_,
`S3 <https://aws.amazon.com/documentation/s3/>`_, `GCS <https://cloud.google.com/storage/>`_,
etc).
The :class:`RocksDBStateBackend` stores working state in `RocksDB <http://rocksdb.org/>`_,
and checkpoints the state by default to a filesystem (similar to the :class:`FsStateBackend`).
**Raw Bytes Storage and Backends**
The :class:`StateBackend` creates services for *raw bytes storage* and for *keyed state*
and *operator state*.
The *raw bytes storage* (through the `org.apache.flink.runtime.state.CheckpointStreamFactory`)
is the fundamental service that simply stores bytes in a fault tolerant fashion. This service
is used by the JobManager to store checkpoint and recovery metadata and is typically also used
by the keyed- and operator state backends to store checkpointed state.
The `org.apache.flink.runtime.state.AbstractKeyedStateBackend and
`org.apache.flink.runtime.state.OperatorStateBackend` created by this state backend define how
to hold the working state for keys and operators. They also define how to checkpoint that
state, frequently using the raw bytes storage (via the
`org.apache.flink.runtime.state.CheckpointStreamFactory`). However, it is also possible that
for example a keyed state backend simply implements the bridge to a key/value store, and that
it does not need to store anything in the raw byte storage upon a checkpoint.
**Serializability**
State Backends need to be serializable(`java.io.Serializable`), because they distributed
across parallel processes (for distributed execution) together with the streaming application
code.
Because of that, :class:`StateBackend` implementations are meant to be like *factories* that
create the proper states stores that provide access to the persistent storage and hold the
keyed- and operator state data structures. That way, the State Backend can be very lightweight
(contain only configurations) which makes it easier to be serializable.
**Thread Safety**
State backend implementations have to be thread-safe. Multiple threads may be creating
streams and keyed-/operator state backends concurrently.
"""
__metaclass__ = ABCMeta
def __init__(self, j_state_backend):
self._j_state_backend = j_state_backend
class MemoryStateBackend(StateBackend):
"""
This state backend holds the working state in the memory (JVM heap) of the TaskManagers.
The state backend checkpoints state directly to the JobManager's memory (hence the backend's
name), but the checkpoints will be persisted to a file system for high-availability setups and
savepoints. The MemoryStateBackend is consequently a FileSystem-based backend that can work
without a file system dependency in simple setups.
This state backend should be used only for experimentation, quick local setups,
or for streaming applications that have very small state: Because it requires checkpoints to
go through the JobManager's memory, larger state will occupy larger portions of the
JobManager's main memory, reducing operational stability.
For any other setup, the :class:`FsStateBackend` should be used. The :class:`FsStateBackend`
holds the working state on the TaskManagers in the same way, but checkpoints state directly to
files rather then to the JobManager's memory, thus supporting large state sizes.
**State Size Considerations**
State checkpointing with this state backend is subject to the following conditions:
- Each individual state must not exceed the configured maximum state size
(see :func:`get_max_state_size`.
- All state from one task (i.e., the sum of all operator states and keyed states from all
chained operators of the task) must not exceed what the RPC system supports, which is
be default < 10 MB. That limit can be configured up, but that is typically not advised.
- The sum of all states in the application times all retained checkpoints must comfortably
fit into the JobManager's JVM heap space.
**Persistence Guarantees**
For the use cases where the state sizes can be handled by this backend, the backend does
guarantee persistence for savepoints, externalized checkpoints (of configured), and checkpoints
(when high-availability is configured).
**Configuration**
As for all state backends, this backend can either be configured within the application (by
creating the backend with the respective constructor parameters and setting it on the execution
environment) or by specifying it in the Flink configuration.
If the state backend was specified in the application, it may pick up additional configuration
parameters from the Flink configuration. For example, if the backend if configured in the
application without a default savepoint directory, it will pick up a default savepoint
directory specified in the Flink configuration of the running job/cluster. That behavior is
implemented via the :func:`configure` method.
"""
# The default maximal size that the snapshotted memory state may have (5 MiBytes).
DEFAULT_MAX_STATE_SIZE = 5 * 1024 * 1024
def __init__(self,
checkpoint_path=None,
savepoint_path=None,
max_state_size=None,
using_asynchronous_snapshots=None,
j_memory_state_backend=None):
"""
Creates a new MemoryStateBackend, setting optionally the paths to persist checkpoint
metadata and savepoints to, as well as configuring state thresholds and asynchronous
operations.
WARNING: Increasing the size of this value beyond the default value
(:data:`DEFAULT_MAX_STATE_SIZE`) should be done with care.
The checkpointed state needs to be send to the JobManager via limited size RPC messages,
and there and the JobManager needs to be able to hold all aggregated state in its memory.
Example:
::
>>> state_backend = MemoryStateBackend()
:param checkpoint_path: The path to write checkpoint metadata to. If none, the value from
the runtime configuration will be used.
:param savepoint_path: The path to write savepoints to. If none, the value from
the runtime configuration will be used.
:param max_state_size: The maximal size of the serialized state. If none, the
:data:`DEFAULT_MAX_STATE_SIZE` will be used.
:param using_asynchronous_snapshots: Flag to switch between synchronous and asynchronous
snapshot mode. If null, the value configured in the
runtime configuration will be used.
:param j_memory_state_backend: For internal use, please keep none.
"""
if j_memory_state_backend is None:
gateway = get_gateway()
JTernaryBoolean = gateway.jvm.org.apache.flink.util.TernaryBoolean
JMemoryStateBackend = gateway.jvm.org.apache.flink.runtime.state.memory\
.MemoryStateBackend
if using_asynchronous_snapshots is None:
j_asynchronous_snapshots = JTernaryBoolean.UNDEFINED
elif using_asynchronous_snapshots is True:
j_asynchronous_snapshots = JTernaryBoolean.TRUE
elif using_asynchronous_snapshots is False:
j_asynchronous_snapshots = JTernaryBoolean.FALSE
else:
raise TypeError("Unsupported input for 'using_asynchronous_snapshots': %s, "
"the value of the parameter should be None or"
"True or False.")
if max_state_size is None:
max_state_size = JMemoryStateBackend.DEFAULT_MAX_STATE_SIZE
j_memory_state_backend = JMemoryStateBackend(checkpoint_path,
savepoint_path,
max_state_size,
j_asynchronous_snapshots)
self._j_memory_state_backend = j_memory_state_backend
super(MemoryStateBackend, self).__init__(j_memory_state_backend)
def get_max_state_size(self):
"""
Gets the maximum size that an individual state can have, as configured in the
constructor (by default :data:`DEFAULT_MAX_STATE_SIZE`).
:return: The maximum size that an individual state can have.
"""
return self._j_memory_state_backend.getMaxStateSize()
def is_using_asynchronous_snapshots(self):
"""
Gets whether the key/value data structures are asynchronously snapshotted.
If not explicitly configured, this is the default value of
``org.apache.flink.configuration.CheckpointingOptions.ASYNC_SNAPSHOTS``.
:return: True if the key/value data structures are asynchronously snapshotted,
false otherwise.
"""
return self._j_memory_state_backend.isUsingAsynchronousSnapshots()
def __str__(self):
return self._j_memory_state_backend.toString()
class FsStateBackend(StateBackend):
"""
This state backend holds the working state in the memory (JVM heap) of the TaskManagers.
The state backend checkpoints state as files to a file system (hence the backend's name).
Each checkpoint individually will store all its files in a subdirectory that includes the
checkpoint number, such as ``hdfs://namenode:port/flink-checkpoints/chk-17/``.
**State Size Considerations**
Working state is kept on the TaskManager heap. If a TaskManager executes multiple
tasks concurrently (if the TaskManager has multiple slots, or if slot-sharing is used)
then the aggregate state of all tasks needs to fit into that TaskManager's memory.
This state backend stores small state chunks directly with the metadata, to avoid creating
many small files. The threshold for that is configurable. When increasing this threshold, the
size of the checkpoint metadata increases. The checkpoint metadata of all retained completed
checkpoints needs to fit into the JobManager's heap memory. This is typically not a problem,
unless the threshold :func:`get_min_file_size_threshold` is increased significantly.
**Persistence Guarantees**
Checkpoints from this state backend are as persistent and available as filesystem that is
written to. If the file system is a persistent distributed file system, this state backend
supports highly available setups. The backend additionally supports savepoints and externalized
checkpoints.
**Configuration**
As for all state backends, this backend can either be configured within the application (by
creating the backend with the respective constructor parameters and setting it on the execution
environment) or by specifying it in the Flink configuration.
If the state backend was specified in the application, it may pick up additional configuration
parameters from the Flink configuration. For example, if the backend if configured in the
application without a default savepoint directory, it will pick up a default savepoint
directory specified in the Flink configuration of the running job/cluster. That behavior is
implemented via the :func:`configure` method.
"""
def __init__(self,
checkpoint_directory_uri=None,
default_savepoint_directory_uri=None,
file_state_size_threshold=None,
write_buffer_size=None,
using_asynchronous_snapshots=None,
j_fs_state_backend=None):
"""
Creates a new state backend that stores its checkpoint data in the file system and location
defined by the given URI.
A file system for the file system scheme in the URI (e.g., 'file://', 'hdfs://', or
'S3://') must be accessible via ``org.apache.flink.core.fs.FileSystem.get(URI)``.
For a state backend targeting HDFS, this means that the URI must either specify the
authority (host and port), or that the Hadoop configuration that describes that information
must be in the classpath.
Example:
::
>>> state_backend = FsStateBackend("file://var/checkpoints/")
:param checkpoint_directory_uri: The path to write checkpoint metadata to, required.
:param default_savepoint_directory_uri: The path to write savepoints to. If none, the value
from the runtime configuration will be used, or
savepoint target locations need to be passed when
triggering a savepoint.
:param file_state_size_threshold: State below this size will be stored as part of the
metadata, rather than in files. If none, the value
configured in the runtime configuration will be used, or
the default value (1KB) if nothing is configured.
:param write_buffer_size: Write buffer size used to serialize state. If -1, the value
configured in the runtime configuration will be used, or the
default value (4KB) if nothing is configured.
:param using_asynchronous_snapshots: Flag to switch between synchronous and asynchronous
snapshot mode. If none, the value configured in
the runtime configuration will be used.
:param j_fs_state_backend: For internal use, please keep none.
"""
if j_fs_state_backend is None:
gateway = get_gateway()
JTernaryBoolean = gateway.jvm.org.apache.flink.util.TernaryBoolean
JFsStateBackend = gateway.jvm.org.apache.flink.runtime.state.filesystem\
.FsStateBackend
JPath = gateway.jvm.org.apache.flink.core.fs.Path
if checkpoint_directory_uri is None:
raise ValueError("The parameter 'checkpoint_directory_uri' is required!")
j_checkpoint_directory_uri = JPath(checkpoint_directory_uri).toUri()
if default_savepoint_directory_uri is None:
j_default_savepoint_directory_uri = None
else:
j_default_savepoint_directory_uri = JPath(default_savepoint_directory_uri).toUri()
if file_state_size_threshold is None:
file_state_size_threshold = -1
if write_buffer_size is None:
write_buffer_size = -1
if using_asynchronous_snapshots is None:
j_asynchronous_snapshots = JTernaryBoolean.UNDEFINED
elif using_asynchronous_snapshots is True:
j_asynchronous_snapshots = JTernaryBoolean.TRUE
elif using_asynchronous_snapshots is False:
j_asynchronous_snapshots = JTernaryBoolean.FALSE
else:
raise TypeError("Unsupported input for 'using_asynchronous_snapshots': %s, "
"the value of the parameter should be None or"
"True or False.")
j_fs_state_backend = JFsStateBackend(j_checkpoint_directory_uri,
j_default_savepoint_directory_uri,
file_state_size_threshold,
write_buffer_size,
j_asynchronous_snapshots)
self._j_fs_state_backend = j_fs_state_backend
super(FsStateBackend, self).__init__(j_fs_state_backend)
def get_checkpoint_path(self):
"""
Gets the base directory where all the checkpoints are stored.
The job-specific checkpoint directory is created inside this directory.
:return: The base directory for checkpoints.
"""
return self._j_fs_state_backend.getCheckpointPath().toString()
def get_min_file_size_threshold(self):
"""
Gets the threshold below which state is stored as part of the metadata, rather than in
files. This threshold ensures that the backend does not create a large amount of very
small files, where potentially the file pointers are larger than the state itself.
If not explicitly configured, this is the default value of
``org.apache.flink.configuration.CheckpointingOptions.FS_SMALL_FILE_THRESHOLD``.
:return: The file size threshold, in bytes.
"""
return self._j_fs_state_backend.getMinFileSizeThreshold()
def is_using_asynchronous_snapshots(self):
"""
Gets whether the key/value data structures are asynchronously snapshotted.
If not explicitly configured, this is the default value of
``org.apache.flink.configuration.CheckpointingOptions.ASYNC_SNAPSHOTS``.
:return: True if the key/value data structures are asynchronously snapshotted,
false otherwise.
"""
return self._j_fs_state_backend.isUsingAsynchronousSnapshots()
def get_write_buffer_size(self):
"""
Gets the write buffer size for created checkpoint stream.
If not explicitly configured, this is the default value of
``org.apache.flink.configuration.CheckpointingOptions.FS_WRITE_BUFFER_SIZE``.
:return: The write buffer size, in bytes.
"""
return self._j_fs_state_backend.getWriteBufferSize()
class RocksDBStateBackend(StateBackend):
"""
A State Backend that stores its state in ``RocksDB``. This state backend can
store very large state that exceeds memory and spills to disk.
All key/value state (including windows) is stored in the key/value index of RocksDB.
For persistence against loss of machines, checkpoints take a snapshot of the
RocksDB database, and persist that snapshot in a file system (by default) or
another configurable state backend.
The behavior of the RocksDB instances can be parametrized by setting RocksDB Options
using the methods :func:`set_predefined_options` and :func:`set_options`.
"""
def __init__(self,
checkpoint_data_uri=None,
enable_incremental_checkpointing=None,
checkpoint_stream_backend=None,
j_rocks_db_state_backend=None):
"""
Creates a new :class:`RocksDBStateBackend` that stores its checkpoint data in the given
state backend or the location of given URI.
If using state backend, typically, one would supply a filesystem or database state backend
here where the snapshots from RocksDB would be stored.
If using URI, a state backend that stores checkpoints in HDFS or S3 must specify the file
system host and port in the URI, or have the Hadoop configuration that describes the file
system (host / high-availability group / possibly credentials) either referenced from the
Flink config, or included in the classpath.
Example:
::
>>> state_backend = RocksDBStateBackend("file://var/checkpoints/")
:param checkpoint_data_uri: The URI describing the filesystem and path to the checkpoint
data directory.
:param enable_incremental_checkpointing: True if incremental checkpointing is enabled.
:param checkpoint_stream_backend: The backend write the checkpoint streams to.
:param j_rocks_db_state_backend: For internal use, please keep none.
"""
if j_rocks_db_state_backend is None:
gateway = get_gateway()
JTernaryBoolean = gateway.jvm.org.apache.flink.util.TernaryBoolean
JRocksDBStateBackend = gateway.jvm.org.apache.flink.contrib.streaming.state \
.RocksDBStateBackend
if enable_incremental_checkpointing not in (None, True, False):
raise TypeError("Unsupported input for 'enable_incremental_checkpointing': %s, "
"the value of the parameter should be None or"
"True or False.")
if checkpoint_data_uri is not None:
if enable_incremental_checkpointing is None:
j_rocks_db_state_backend = JRocksDBStateBackend(checkpoint_data_uri)
else:
j_rocks_db_state_backend = \
JRocksDBStateBackend(checkpoint_data_uri, enable_incremental_checkpointing)
elif isinstance(checkpoint_stream_backend, StateBackend):
if enable_incremental_checkpointing is None:
j_enable_incremental_checkpointing = JTernaryBoolean.UNDEFINED
elif enable_incremental_checkpointing is True:
j_enable_incremental_checkpointing = JTernaryBoolean.TRUE
else:
j_enable_incremental_checkpointing = JTernaryBoolean.FALSE
j_rocks_db_state_backend = \
JRocksDBStateBackend(checkpoint_stream_backend._j_state_backend,
j_enable_incremental_checkpointing)
self._j_rocks_db_state_backend = j_rocks_db_state_backend
super(RocksDBStateBackend, self).__init__(j_rocks_db_state_backend)
def get_checkpoint_backend(self):
"""
Gets the state backend that this RocksDB state backend uses to persist
its bytes to.
This RocksDB state backend only implements the RocksDB specific parts, it
relies on the 'CheckpointBackend' to persist the checkpoint and savepoint bytes
streams.
:return: The state backend to persist the checkpoint and savepoint bytes streams.
"""
j_state_backend = self._j_rocks_db_state_backend.getCheckpointBackend()
return _from_j_state_backend(j_state_backend)
def set_db_storage_paths(self, *paths):
"""
Sets the directories in which the local RocksDB database puts its files (like SST and
metadata files). These directories do not need to be persistent, they can be ephemeral,
meaning that they are lost on a machine failure, because state in RocksDB is persisted
in checkpoints.
If nothing is configured, these directories default to the TaskManager's local
temporary file directories.
Each distinct state will be stored in one path, but when the state backend creates
multiple states, they will store their files on different paths.
Passing ``None`` to this function restores the default behavior, where the configured
temp directories will be used.
:param paths: The paths across which the local RocksDB database files will be spread. this
parameter is optional.
"""
if len(paths) < 1:
self._j_rocks_db_state_backend.setDbStoragePath(None)
else:
gateway = get_gateway()
j_path_array = gateway.new_array(gateway.jvm.String, len(paths))
for i in xrange(0, len(paths)):
j_path_array[i] = paths[i]
self._j_rocks_db_state_backend.setDbStoragePaths(j_path_array)
def get_db_storage_paths(self):
"""
Gets the configured local DB storage paths, or null, if none were configured.
Under these directories on the TaskManager, RocksDB stores its SST files and
metadata files. These directories do not need to be persistent, they can be ephermeral,
meaning that they are lost on a machine failure, because state in RocksDB is persisted
in checkpoints.
If nothing is configured, these directories default to the TaskManager's local
temporary file directories.
:return: The list of configured local DB storage paths.
"""
return list(self._j_rocks_db_state_backend.getDbStoragePaths())
def is_incremental_checkpoints_enabled(self):
"""
Gets whether incremental checkpoints are enabled for this state backend.
:return: True if incremental checkpoints are enabled, false otherwise.
"""
return self._j_rocks_db_state_backend.isIncrementalCheckpointsEnabled()
def is_ttl_compaction_filter_enabled(self):
"""
Gets whether compaction filter to cleanup state with TTL is enabled.
:return: True if enabled, false otherwise.
"""
return self._j_rocks_db_state_backend.isTtlCompactionFilterEnabled()
def enable_ttl_compaction_filter(self):
"""
Enable compaction filter to cleanup state with TTL.
.. note::
User can still decide in state TTL configuration in state descriptor
whether the filter is active for particular state or not.
"""
self._j_rocks_db_state_backend.enableTtlCompactionFilter()
def set_predefined_options(self, options):
"""
Sets the predefined options for RocksDB.
If user-configured options within ``RocksDBConfigurableOptions`` is set (through
flink-conf.yaml) or a user-defined options factory is set (via :func:`setOptions`),
then the options from the factory are applied on top of the here specified
predefined options and customized options.
Example:
::
>>> state_backend.set_predefined_options(PredefinedOptions.SPINNING_DISK_OPTIMIZED)
:param options: The options to set (must not be null), see :class:`PredefinedOptions`.
"""
gateway = get_gateway()
JPredefinedOptions = gateway.jvm.org.apache.flink.contrib.streaming.state.PredefinedOptions
if options == PredefinedOptions.DEFAULT:
self._j_rocks_db_state_backend.setPredefinedOptions(JPredefinedOptions.DEFAULT)
elif options == PredefinedOptions.SPINNING_DISK_OPTIMIZED:
self._j_rocks_db_state_backend.setPredefinedOptions(
JPredefinedOptions.SPINNING_DISK_OPTIMIZED)
elif options == PredefinedOptions.SPINNING_DISK_OPTIMIZED_HIGH_MEM:
self._j_rocks_db_state_backend.setPredefinedOptions(
JPredefinedOptions.SPINNING_DISK_OPTIMIZED_HIGH_MEM)
elif options == PredefinedOptions.FLASH_SSD_OPTIMIZED:
self._j_rocks_db_state_backend.setPredefinedOptions(
JPredefinedOptions.FLASH_SSD_OPTIMIZED)
else:
raise TypeError("Unsupported options: %s, the supported options are: "
"PredefinedOptions.DEFAULT, PredefinedOptions.SPINNING_DISK_OPTIMIZED,"
" PredefinedOptions.SPINNING_DISK_OPTIMIZED_HIGH_MEM and "
"PredefinedOptions.FLASH_SSD_OPTIMIZED")
def get_predefined_options(self):
"""
Gets the current predefined options for RocksDB.
The default options (if nothing was set via :func:`setPredefinedOptions`)
are :data:`PredefinedOptions.DEFAULT`.
If user-configured options within ``RocksDBConfigurableOptions`` is set (through
flink-conf.yaml) or a user-defined options factory is set (via :func:`setOptions`),
then the options from the factory are applied on top of the predefined and customized
options.
.. seealso:: :func:`set_predefined_options`
:return: Current predefined options.
"""
j_predefined_options = self._j_rocks_db_state_backend.getPredefinedOptions()
gateway = get_gateway()
JPredefinedOptions = gateway.jvm.org.apache.flink.contrib.streaming.state.PredefinedOptions
if j_predefined_options == JPredefinedOptions.DEFAULT:
return PredefinedOptions.DEFAULT
elif j_predefined_options == JPredefinedOptions.FLASH_SSD_OPTIMIZED:
return PredefinedOptions.FLASH_SSD_OPTIMIZED
elif j_predefined_options == JPredefinedOptions.SPINNING_DISK_OPTIMIZED:
return PredefinedOptions.SPINNING_DISK_OPTIMIZED
elif j_predefined_options == JPredefinedOptions.SPINNING_DISK_OPTIMIZED_HIGH_MEM:
return PredefinedOptions.SPINNING_DISK_OPTIMIZED_HIGH_MEM
else:
raise Exception("Unsupported java options: %s" % j_predefined_options)
def set_options(self, options_factory_class_name):
"""
Sets ``org.rocksdb.Options`` for the RocksDB instances.
Because the options are not serializable and hold native code references,
they must be specified through a factory.
The options created by the factory here are applied on top of the pre-defined
options profile selected via :func:`set_predefined_options`.
If the pre-defined options profile is the default (:data:`PredefinedOptions.DEFAULT`),
then the factory fully controls the RocksDB options.
:param options_factory_class_name: The fully-qualified class name of the options
factory in Java that lazily creates the RocksDB options.
The options factory must have a default constructor.
"""
gateway = get_gateway()
JOptionsFactory = gateway.jvm.org.apache.flink.contrib.streaming.state.OptionsFactory
j_options_factory_clz = load_java_class(options_factory_class_name)
if not get_java_class(JOptionsFactory).isAssignableFrom(j_options_factory_clz):
raise ValueError("The input class not implements OptionsFactory.")
self._j_rocks_db_state_backend.setOptions(j_options_factory_clz.newInstance())
def get_options(self):
"""
Gets the fully-qualified class name of the options factory in Java that lazily creates
the RocksDB options.
:return: The fully-qualified class name of the options factory in Java.
"""
j_options_factory = self._j_rocks_db_state_backend.getOptions()
if j_options_factory is not None:
return j_options_factory.getClass().getName()
else:
return None
def get_number_of_transfering_threads(self):
"""
Gets the number of threads used to transfer files while snapshotting/restoring.
:return: The number of threads used to transfer files while snapshotting/restoring.
"""
return self._j_rocks_db_state_backend.getNumberOfTransferingThreads()
def set_number_of_transfering_threads(self, number_of_transfering_threads):
"""
Sets the number of threads used to transfer files while snapshotting/restoring.
:param number_of_transfering_threads: The number of threads used to transfer files while
snapshotting/restoring.
"""
self._j_rocks_db_state_backend.setNumberOfTransferingThreads(number_of_transfering_threads)
def __str__(self):
return self._j_rocks_db_state_backend.toString()
class PredefinedOptions(object):
"""
The :class:`PredefinedOptions` are configuration settings for the :class:`RocksDBStateBackend`.
The various pre-defined choices are configurations that have been empirically
determined to be beneficial for performance under different settings.
Some of these settings are based on experiments by the Flink community, some follow
guides from the RocksDB project.
:data:`DEFAULT`:
Default options for all settings, except that writes are not forced to the
disk.
.. note::
Because Flink does not rely on RocksDB data on disk for recovery,
there is no need to sync data to stable storage.
:data:`SPINNING_DISK_OPTIMIZED`:
Pre-defined options for regular spinning hard disks.
This constant configures RocksDB with some options that lead empirically
to better performance when the machines executing the system use
regular spinning hard disks.
The following options are set:
- setCompactionStyle(CompactionStyle.LEVEL)
- setLevelCompactionDynamicLevelBytes(true)
- setIncreaseParallelism(4)
- setUseFsync(false)
- setDisableDataSync(true)
- setMaxOpenFiles(-1)
.. note::
Because Flink does not rely on RocksDB data on disk for recovery,
there is no need to sync data to stable storage.
:data:`SPINNING_DISK_OPTIMIZED_HIGH_MEM`:
Pre-defined options for better performance on regular spinning hard disks,
at the cost of a higher memory consumption.
.. note::
These settings will cause RocksDB to consume a lot of memory for
block caching and compactions. If you experience out-of-memory problems related to,
RocksDB, consider switching back to :data:`SPINNING_DISK_OPTIMIZED`.
The following options are set:
- setLevelCompactionDynamicLevelBytes(true)
- setTargetFileSizeBase(256 MBytes)
- setMaxBytesForLevelBase(1 GByte)
- setWriteBufferSize(64 MBytes)
- setIncreaseParallelism(4)
- setMinWriteBufferNumberToMerge(3)
- setMaxWriteBufferNumber(4)
- setUseFsync(false)
- setMaxOpenFiles(-1)
- BlockBasedTableConfig.setBlockCacheSize(256 MBytes)
- BlockBasedTableConfigsetBlockSize(128 KBytes)
.. note::
Because Flink does not rely on RocksDB data on disk for recovery,
there is no need to sync data to stable storage.
:data:`FLASH_SSD_OPTIMIZED`:
Pre-defined options for Flash SSDs.
This constant configures RocksDB with some options that lead empirically
to better performance when the machines executing the system use SSDs.
The following options are set:
- setIncreaseParallelism(4)
- setUseFsync(false)
- setDisableDataSync(true)
- setMaxOpenFiles(-1)
.. note::
Because Flink does not rely on RocksDB data on disk for recovery,
there is no need to sync data to stable storage.
"""
DEFAULT = 0
SPINNING_DISK_OPTIMIZED = 1
SPINNING_DISK_OPTIMIZED_HIGH_MEM = 2
FLASH_SSD_OPTIMIZED = 3
class CustomStateBackend(StateBackend):
"""
A wrapper of customized java state backend created from the provided `StateBackendFactory`.
"""
def __init__(self, j_custom_state_backend):
super(CustomStateBackend, self).__init__(j_custom_state_backend)
|
{
"content_hash": "325e8cb5e951aa873035a5a21abee626",
"timestamp": "",
"source": "github",
"line_count": 781,
"max_line_length": 99,
"avg_line_length": 47.64276568501921,
"alnum_prop": 0.6731167190733425,
"repo_name": "fhueske/flink",
"id": "6d23850c0bce11bfc1c44f78bd55e85b6d7cdc12",
"size": "38167",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "flink-python/pyflink/datastream/state_backend.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "4588"
},
{
"name": "CSS",
"bytes": "57936"
},
{
"name": "Clojure",
"bytes": "93205"
},
{
"name": "Dockerfile",
"bytes": "10793"
},
{
"name": "FreeMarker",
"bytes": "17422"
},
{
"name": "HTML",
"bytes": "224476"
},
{
"name": "Java",
"bytes": "48798371"
},
{
"name": "JavaScript",
"bytes": "1829"
},
{
"name": "Makefile",
"bytes": "5134"
},
{
"name": "Python",
"bytes": "809835"
},
{
"name": "Scala",
"bytes": "13339497"
},
{
"name": "Shell",
"bytes": "485338"
},
{
"name": "TypeScript",
"bytes": "243702"
}
],
"symlink_target": ""
}
|
import argparse, os, ConfigParser, sys, re
from pysandesh.sandesh_base import *
from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
from device_config import DeviceConfig
import discoveryclient.client as client
from sandesh_common.vns.ttypes import Module
from sandesh_common.vns.constants import ModuleNames, \
API_SERVER_DISCOVERY_SERVICE_NAME
class CfgParser(object):
CONF_DEFAULT_PATH = '/etc/contrail/contrail-snmp-collector.conf'
def __init__(self, argv):
self._devices = []
self._args = None
self.__pat = None
self._argv = argv or ' '.join(sys.argv[1:])
def parse(self):
'''
command line example
contrail-snmp-scanner --log_level SYS_DEBUG
--logging_level DEBUG
--log_category test
--log_file <stdout>
--use_syslog
--syslog_facility LOG_USER
--disc_server_ip 127.0.0.1
--disc_server_port 5998
--conf_file /etc/contrail/contrail-snmp-scanner.conf
conf file example:
[DEFAULTS]
log_local = 0
log_level = SYS_DEBUG
log_category =
log_file = /var/log/contrail/contrail-analytics-api.log
file = /etc/contrail/snmp-dev.ini
/etc/contrail/snmp-dev.ini example:
#snmp version 1 or 2
[1.1.1.190]
Community = public
Version = 2
#snmp version 3
[1.1.1.191]
Version = 3
SecLevel = authPriv
AuthProto = SHA
AuthPass = foo
PrivProto = AES
PrivPass = foo
SecName = snmpuser
# Mibs default to all, to get a subset
Mibs = LldpTable, ArpTable
'''
# Source any specified config/ini file
# Turn off help, so we print all options in response to -h
conf_parser = argparse.ArgumentParser(add_help=False)
kwargs = {'help': "Specify config file", 'metavar':"FILE"}
if os.path.exists(self.CONF_DEFAULT_PATH):
kwargs['default'] = self.CONF_DEFAULT_PATH
conf_parser.add_argument("-c", "--conf_file", **kwargs)
args, remaining_argv = conf_parser.parse_known_args(self._argv.split())
defaults = {
'collectors' : ['127.0.0.1:8086'],
'log_local' : False,
'log_level' : SandeshLevel.SYS_DEBUG,
'log_category' : '',
'log_file' : Sandesh._DEFAULT_LOG_FILE,
'use_syslog' : False,
'syslog_facility' : Sandesh._DEFAULT_SYSLOG_FACILITY,
'scan_frequency' : 600,
'http_server_port': 5920,
}
ksopts = {
'admin_user': 'user1',
'admin_password': 'password1',
'admin_tenant_name': 'default-domain'
}
disc_opts = {
'disc_server_ip' : None,
'disc_server_port' : 5998,
}
config = None
if args.conf_file:
config = ConfigParser.SafeConfigParser()
config.optionxform = str
config.read([args.conf_file])
defaults.update(dict(config.items("DEFAULTS")))
if 'KEYSTONE' in config.sections():
ksopts.update(dict(config.items("KEYSTONE")))
if 'DISCOVERY' in config.sections():
disc_opts.update(dict(config.items('DISCOVERY')))
# Override with CLI options
# Don't surpress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser],
# print script description with -h/--help
description=__doc__,
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
)
defaults.update(ksopts)
defaults.update(disc_opts)
parser.set_defaults(**defaults)
parser.add_argument("--collectors",
help="List of Collector IP addresses in ip:port format",
nargs="+")
parser.add_argument(
"--log_file",
help="Filename for the logs to be written to")
parser.add_argument("--log_local", action="store_true",
help="Enable local logging of sandesh messages")
parser.add_argument(
"--log_category",
help="Category filter for local logging of sandesh messages")
parser.add_argument(
"--log_level",
help="Severity level for local logging of sandesh messages")
parser.add_argument("--use_syslog",
action="store_true",
help="Use syslog for logging")
parser.add_argument("--syslog_facility",
help="Syslog facility to receive log lines")
parser.add_argument("--scan_frequency", type=int,
help="Time between snmp poll")
parser.add_argument("--http_server_port", type=int,
help="introspect server port")
parser.add_argument("--admin_user",
help="Name of keystone admin user")
parser.add_argument("--admin_password",
help="Password of keystone admin user")
parser.add_argument("--admin_tenant_name",
help="Tenant name for keystone admin user")
#parser.add_argument("--discovery_server",
# help="ip:port of dicovery server")
parser.add_argument("--disc_server_ip",
help="Discovery Server IP address")
parser.add_argument("--disc_server_port", type=int,
help="Discovery Server port")
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument("--device-config-file",
help="where to look for snmp credentials")
group.add_argument("--api_server",
help="ip:port of api-server for snmp credentials")
self._args = parser.parse_args(remaining_argv)
if type(self._args.collectors) is str:
self._args.collectors = self._args.collectors.split()
self._args.config_sections = config
self._disc = None
def devices(self):
if self._args.device_config_file:
self._devices = DeviceConfig.fom_file(
self._args.device_config_file)
elif self._args.api_server:
self._devices = DeviceConfig.fom_api_server(
self._args.api_server,
self._args.admin_user, self._args.admin_password,
self._args.admin_tenant_name)
elif self._args.disc_server_port:
try:
self._devices = DeviceConfig.fom_api_server(
self.get_api_svr(), self._args.admin_user,
self._args.admin_password, self._args.admin_tenant_name)
except Exception as e:
self._devices = []
for d in self._devices:
yield d
def get_api_svr(self):
if self._disc is None:
self._disc = client.DiscoveryClient(*self.discovery_params())
a = self._disc.subscribe(API_SERVER_DISCOVERY_SERVICE_NAME, 0)
d = a.read()
return d[-1]['ip-address'] + ':' + d[-1]['port']
def discovery_params(self):
if self._args.disc_server_ip:
ip, port = self._args.disc_server_ip, \
self._args.disc_server_port
else:
ip, port = '127.0.0.1', self._args.disc_server_port
return ip, port, ModuleNames[Module.CONTRAIL_SNMP_COLLECTOR]
def collectors(self):
return self._args.collectors
def log_local(self):
return self._args.log_local
def log_category(self):
return self._args.log_category
def log_level(self):
return self._args.log_level
def log_file(self):
return self._args.log_file
def use_syslog(self):
return self._args.use_syslog
def syslog_facility(self):
return self._args.syslog_facility
def frequency(self):
return self._args.scan_frequency
def http_port(self):
return self._args.http_server_port
|
{
"content_hash": "7fd7f74fb4f82ac27f7d3b6ebead9ac4",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 79,
"avg_line_length": 36.51131221719457,
"alnum_prop": 0.5751642086999629,
"repo_name": "srajag/contrail-controller",
"id": "fceb15241b00614abf4a001821d007b377fd4ee9",
"size": "8138",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/analytics/contrail-snmp-collector/contrail_snmp_collector/snmpcfg.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "80551"
},
{
"name": "C",
"bytes": "44989"
},
{
"name": "C++",
"bytes": "14871796"
},
{
"name": "CSS",
"bytes": "531"
},
{
"name": "Java",
"bytes": "171966"
},
{
"name": "Lua",
"bytes": "7673"
},
{
"name": "Makefile",
"bytes": "12439"
},
{
"name": "Objective-C",
"bytes": "720"
},
{
"name": "Protocol Buffer",
"bytes": "1120"
},
{
"name": "Python",
"bytes": "3008184"
},
{
"name": "Shell",
"bytes": "54611"
},
{
"name": "Thrift",
"bytes": "40763"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from flexget import options
from flexget.event import event
from flexget.manager import Session
from flexget.terminal import TerminalTable, TerminalTableError, table_parser, console
from . import db
def do_cli(manager, options):
if options.action == 'clear':
num = db.clear_entries(options.task, all=True)
console('%s entries cleared from backlog.' % num)
else:
header = ['Title', 'Task', 'Expires']
table_data = [header]
with Session() as session:
entries = db.get_entries(options.task, session=session)
for entry in entries:
table_data.append(
[entry.title, entry.task, entry.expire.strftime('%Y-%m-%d %H:%M')]
)
try:
table = TerminalTable(options.table_type, table_data, wrap_columns=[0])
console(table.output)
except TerminalTableError as e:
console('ERROR: %s' % str(e))
@event('options.register')
def register_options():
parser = options.register_command(
'backlog', do_cli, help='View or clear entries from backlog plugin', parents=[table_parser]
)
parser.add_argument(
'action',
choices=['list', 'clear'],
help='Choose to show items in backlog, or clear all of them',
)
parser.add_argument('task', nargs='?', help='Limit to specific task (if supplied)')
|
{
"content_hash": "476730e9c5b61c58fb60630c55e1ae1a",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 99,
"avg_line_length": 35.906976744186046,
"alnum_prop": 0.6321243523316062,
"repo_name": "tobinjt/Flexget",
"id": "7d2fa9b8a83e49aff89491891d7edcce7d1352ba",
"size": "1544",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "flexget/components/backlog/cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11875"
},
{
"name": "Dockerfile",
"bytes": "2338"
},
{
"name": "HTML",
"bytes": "79800"
},
{
"name": "JavaScript",
"bytes": "263723"
},
{
"name": "Python",
"bytes": "3492888"
},
{
"name": "SRecode Template",
"bytes": "3"
},
{
"name": "Shell",
"bytes": "1576"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class TwilioNumberGroup(Document):
pass
|
{
"content_hash": "03f16d77db26df08e01198aa7d543bfe",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 42,
"avg_line_length": 23.5,
"alnum_prop": 0.8085106382978723,
"repo_name": "adityahase/frappe",
"id": "04cb9ae146052b65bdd13330077823882f9553fc",
"size": "275",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "frappe/integrations/doctype/twilio_number_group/twilio_number_group.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "288806"
},
{
"name": "HTML",
"bytes": "209164"
},
{
"name": "JavaScript",
"bytes": "2350450"
},
{
"name": "Less",
"bytes": "160693"
},
{
"name": "Makefile",
"bytes": "99"
},
{
"name": "Python",
"bytes": "3035663"
},
{
"name": "SCSS",
"bytes": "45340"
},
{
"name": "Shell",
"bytes": "517"
},
{
"name": "Vue",
"bytes": "73943"
}
],
"symlink_target": ""
}
|
import logging
import os
import sys
import yaml
from pushmanager.core.util import dict_copy_keys
configuration_file = os.environ.get('SERVICE_ENV_CONFIG_PATH')
Settings = {}
try:
with open(configuration_file) as settings_yaml:
Settings = yaml.safe_load(settings_yaml)
except:
logging.error("Can not load configuration from '%s'." % configuration_file)
sys.exit(1);
# JS files in static/js need to know some of the configuration options
# too, but we do not have to export everything, just what's
# needed. This is what's needed. We're only setting up/defining keys
# here and will copy values from Settings.
JSSettings = {
'main_app': {
'servername': None,
'port': None,
},
'buildbot': {
'servername': None,
},
'reviewboard': {
'servername': None,
},
'trac': {
'servername': None,
},
'git': {
'main_repository': None,
},
'check_sites_bookmarklet': None,
}
dict_copy_keys(to_dict=JSSettings, from_dict=Settings)
__all__ = ['Settings', 'JSSettings']
|
{
"content_hash": "3386300df3150c37d71179042688537b",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 79,
"avg_line_length": 23.711111111111112,
"alnum_prop": 0.6401124648547329,
"repo_name": "asottile/pushmanager",
"id": "c35c56e9e17f9d18dce48f868ef87de04f2e70ac",
"size": "1091",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pushmanager/core/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "15229"
},
{
"name": "JavaScript",
"bytes": "33747"
},
{
"name": "Python",
"bytes": "273240"
},
{
"name": "Shell",
"bytes": "835"
}
],
"symlink_target": ""
}
|
import json
from .util import create_url, normalize_connector_config
class ConnectorAPI:
"""Access Data Connector API which handles Data Connector.
This class is inherited by :class:`tdclient.api.API`.
"""
def connector_guess(self, job):
"""Guess the Data Connector configuration
Args:
job (dict): :class:`dict` representation of `seed.yml`
See Also: https://www.embulk.org/docs/built-in.html#guess-executor
Returns:
:class:`dict`: The configuration of the Data Connector.
Examples:
>>> config = {
... "in": {
... "type": "s3",
... "bucket": "your-bucket",
... "path_prefix": "logs/csv-",
... "access_key_id": "YOUR-AWS-ACCESS-KEY",
... "secret_access_key": "YOUR-AWS-SECRET-KEY"
... },
... "out": {"mode": "append"},
... "exec": {"guess_plugins": ["json", "query_string"]},
... }
>>> td.api.connector_guess(config)
{'config': {'in': {'type': 's3',
'bucket': 'your-bucket',
'path_prefix': 'logs/csv-',
'access_key_id': 'YOUR-AWS-ACCESS-KEY',
'secret_access_key': 'YOU-AWS-SECRET-KEY',
'parser': {'charset': 'UTF-8',
'newline': 'LF',
'type': 'csv',
'delimiter': ',',
'quote': '"',
'escape': '"',
'trim_if_not_quoted': False,
'skip_header_lines': 1,
'allow_extra_columns': False,
'allow_optional_columns': False,
'columns': [{'name': 'sepal.length', 'type': 'double'},
{'name': 'sepal.width', 'type': 'double'},
{'name': 'petal.length', 'type': 'double'},
{'name': 'petal.width', 'type': 'string'},
{'name': 'variety', 'type': 'string'}]}},
'out': {'mode': 'append'},
'exec': {'guess_plugin': ['json', 'query_string']},
'filters': [{'rules': [{'rule': 'upper_to_lower'},
{'pass_types': ['a-z', '0-9'],
'pass_characters': '_',
'replace': '_',
'rule': 'character_types'},
{'pass_types': ['a-z'],
'pass_characters': '_',
'prefix': '_',
'rule': 'first_character_types'},
{'rule': 'unique_number_suffix', 'max_length': 128}],
'type': 'rename'},
{'from_value': {'mode': 'upload_time'},
'to_column': {'name': 'time'},
'type': 'add_time'}]}}
"""
headers = {"content-type": "application/json; charset=utf-8"}
if isinstance(job, dict):
job = {"config": normalize_connector_config(job)}
payload = json.dumps(job).encode("utf-8")
else:
# Not checking the format. Assuming the right format
payload = job
with self.post("/v3/bulk_loads/guess", payload, headers=headers) as res:
code, body = res.status, res.read()
if code != 200:
self.raise_error("DataConnector configuration guess failed", res, body)
return self.checked_json(body, [])
def connector_preview(self, job):
"""Show the preview of the Data Connector job.
Args:
job (dict): :class:`dict` representation of `load.yml`
Returns:
:class:`dict`
"""
headers = {"content-type": "application/json; charset=utf-8"}
payload = json.dumps(job).encode("utf-8") if isinstance(job, dict) else job
with self.post("/v3/bulk_loads/preview", payload, headers=headers) as res:
code, body = res.status, res.read()
if code != 200:
self.raise_error("DataConnector job preview failed", res, body)
return self.checked_json(body, [])
def connector_issue(self, db, table, job):
"""Create a Data Connector job.
Args:
db (str): name of the database to perform connector job
table (str): name of the table to perform connector job
job (dict): :class:`dict` representation of `load.yml`
Returns:
str: job Id
"""
headers = {"content-type": "application/json; charset=utf-8"}
params = dict(job)
params["database"] = db
params["table"] = table
payload = json.dumps(params).encode("utf-8")
with self.post(
create_url("/v3/job/issue/bulkload/{db}", db=db), payload, headers=headers
) as res:
code, body = res.status, res.read()
if code != 200:
self.raise_error("DataConnector job issuing failed", res, body)
js = self.checked_json(body, ["job_id"])
return str(js["job_id"])
def connector_list(self):
"""Show the list of available Data Connector sessions.
Returns:
:class:`list`
"""
with self.get("/v3/bulk_loads") as res:
code, body = res.status, res.read()
if code != 200:
self.raise_error("DataConnectorSession list retrieve failed", res, body)
# cannot use `checked_json` since `GET /v3/bulk_loads` returns an array
return json.loads(body.decode("utf-8"))
def connector_create(self, name, database, table, job, params=None):
"""Create a Data Connector session.
Args:
name (str): name of the connector job
database (str): name of the database to perform connector job
table (str): name of the table to perform connector job
job (dict): :class:`dict` representation of `load.yml`
params (dict, optional): Extra parameters
- config (str):
Embulk configuration as JSON format.
See also https://www.embulk.org/docs/built-in.html#embulk-configuration-file-format
- cron (str, optional):
Schedule of the query.
{``"@daily"``, ``"@hourly"``, ``"10 * * * *"`` (custom cron)}
See also: https://tddocs.atlassian.net/wiki/spaces/PD/pages/1084633/Scheduling+Jobs+Using+TD+Console
- delay (int, optional):
A delay ensures all buffered events are imported
before running the query. Default: 0
- database (str):
Target database for the Data Connector session
- name (str):
Name of the Data Connector session
- table (str):
Target table for the Data Connector session
- time_column (str, optional):
Column in the table for registering config.out.time
- timezone (str):
Timezone for scheduled Data Connector session.
See here for list of supported timezones https://gist.github.com/frsyuki/4533752
Returns:
:class:`dict`
"""
headers = {"content-type": "application/json; charset=utf-8"}
params = {} if params is None else dict(params)
params.update(job)
params["name"] = name
params["database"] = database
params["table"] = table
payload = json.dumps(params).encode("utf-8")
with self.post("/v3/bulk_loads", payload, headers=headers) as res:
code, body = res.status, res.read()
if code != 200:
self.raise_error(
"DataConnectorSession: %s created failed" % (name,), res, body
)
return self.checked_json(body, [])
def connector_show(self, name):
"""Show a specific Data Connector session information.
Args:
name (str): name of the connector job
Returns:
:class:`dict`
"""
with self.get(create_url("/v3/bulk_loads/{name}", name=name)) as res:
code, body = res.status, res.read()
if code != 200:
self.raise_error(
"DataConnectorSession: %s retrieve failed" % (name,), res, body
)
return self.checked_json(body, [])
def connector_update(self, name, job):
"""Update a specific Data Connector session.
Args:
name (str): name of the connector job
job (dict): :class:`dict` representation of `load.yml`.
For detailed format, see also: https://www.embulk.org/docs/built-in.html#embulk-configuration-file-format
Returns:
:class:`dict`
"""
headers = {"content-type": "application/json; charset=utf-8"}
payload = json.dumps(job).encode("utf-8")
with self.put(
create_url("/v3/bulk_loads/{name}", name=name),
payload,
len(payload),
headers=headers,
) as res:
code, body = res.status, res.read()
if code != 200:
self.raise_error(
"DataConnectorSession: %s update failed" % (name,), res, body
)
return self.checked_json(body, [])
def connector_delete(self, name):
"""Delete a Data Connector session.
Args:
name (str): name of the connector job
Returns:
:class:`dict`
"""
with self.delete(create_url("/v3/bulk_loads/{name}", name=name)) as res:
code, body = res.status, res.read()
if code != 200:
self.raise_error(
"DataConnectorSession: %s delete failed" % (name,), res, body
)
return self.checked_json(body, [])
def connector_history(self, name):
"""Show the list of the executed jobs information for the Data Connector job.
Args:
name (str): name of the connector job
Returns:
:class:`list`
"""
with self.get(create_url("/v3/bulk_loads/{name}/jobs", name=name)) as res:
code, body = res.status, res.read()
if code != 200:
self.raise_error(
"history of DataConnectorSession: %s retrieve failed" % (name,),
res,
body,
)
return json.loads(body.decode("utf-8"))
def connector_run(self, name, **kwargs):
"""Create a job to execute Data Connector session.
Args:
name (str): name of the connector job
**kwargs (optional): Extra parameters.
- scheduled_time (int):
Time in Unix epoch format that would be set as
`TD_SCHEDULED_TIME`.
- domain_key (str):
Job domain key which is assigned to a single job.
Returns:
:class:`dict`
"""
headers = {"content-type": "application/json; charset=utf-8"}
payload = json.dumps(kwargs).encode("utf-8")
with self.post(
create_url("/v3/bulk_loads/{name}/jobs", name=name),
payload,
headers=headers,
) as res:
code, body = res.status, res.read()
if code != 200:
self.raise_error(
"DataConnectorSession: %s job create failed" % (name,), res, body
)
return self.checked_json(body, [])
|
{
"content_hash": "2d450b6b26dbb1dd2d132a09b6494a80",
"timestamp": "",
"source": "github",
"line_count": 298,
"max_line_length": 121,
"avg_line_length": 39.439597315436245,
"alnum_prop": 0.5020845741512805,
"repo_name": "treasure-data/td-client-python",
"id": "bf5b5d4440fd564aa0671fa5c1951e6f48836797",
"size": "11776",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tdclient/connector_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "326768"
}
],
"symlink_target": ""
}
|
TEMPLATE_PATH = 'app/'
|
{
"content_hash": "3a8eeef0807ddf00efb6c3f87c597f62",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 22,
"avg_line_length": 22,
"alnum_prop": 0.6818181818181818,
"repo_name": "tao12345666333/app-turbo",
"id": "99b11a593592033707c4369e86eb180b14ba7f6f",
"size": "104",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "demos/helloworld/jinja2-support/apps/app/setting.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1615"
},
{
"name": "JavaScript",
"bytes": "4046"
},
{
"name": "Python",
"bytes": "124548"
}
],
"symlink_target": ""
}
|
"""
Homepage Admin
"""
from django.contrib import admin
from django.db import models
from .models import HomePageHeader, HomePageButton, TextBlurb, SemesterModule
class ModuleAdmin(admin.ModelAdmin):
"""
Homepage Admin
"""
list_display = ('module_title',)
admin.site.register(SemesterModule, ModuleAdmin)
admin.site.register(HomePageHeader)
admin.site.register(HomePageButton)
admin.site.register(TextBlurb)
|
{
"content_hash": "42095b5edd646e329fe83135efdd1e85",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 77,
"avg_line_length": 22.473684210526315,
"alnum_prop": 0.7681498829039812,
"repo_name": "gatortechuf/gatortechuf.com",
"id": "4ff6f0b48fdf3986ba00ee8f06975c42921da0a8",
"size": "427",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/home/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11040"
},
{
"name": "HTML",
"bytes": "27103"
},
{
"name": "JavaScript",
"bytes": "1195"
},
{
"name": "Python",
"bytes": "33826"
}
],
"symlink_target": ""
}
|
import os
import sys
import json
import fnmatch
import glob2
from flask import Flask
from flask import request
from flask.ext import restful
from flask.ext.restful import Resource
from flask import jsonify
from monitor.resource import ResourceMonitor
from monitor import file_cache
import log as logging
from config import DiscoveryConst
from monitor import ResourceConst
from client.app_info import AppInfo
class TmpLogging(object):
def __init__(self):
self.out = sys.stdout
self.err = sys.stderr
def info(self, msg):
self.out.write("INFO\t%s\n" % msg)
self.out.flush()
def debug(self, msg):
self.out.write("DEBUG\t%s\n" % msg)
self.out.flush()
def error(self, msg):
self.err.write("ERROR\t%s\n" % msg)
self.out.flush()
# Flaks intercept logging class, so we temporarily
# use own class to print log message
LOG = TmpLogging()
class ResourceInfo(Resource):
resource_monitor = None
file_cache_monitor = None
def __init__(self, *args, **kwargs):
super(ResourceInfo, self).__init__(*args, **kwargs)
if self.resource_monitor is None:
self.resource_monitor = ResourceMonitor()
try:
if self.file_cache_monitor is None:
self.file_cache_monitor = file_cache.get_instance()
except file_cache.CacheMonitorError as e:
self.file_cache_monitor = None
self.dfs_root = DiscoveryConst.DFS_ROOT
def get(self):
ret_data = self.resource_monitor.get_static_resource()
ret_data.update(self.resource_monitor.get_dynamic_resource())
request_opts = request.json or None
if request_opts is not None and \
AppInfo.APPLICATION in request_opts:
app_info = request_opts.get(AppInfo.APPLICATION)
app_id = app_info.get(AppInfo.APP_ID, None)
# file cache
cache_files = list()
cache_score = float(0)
if self.file_cache_monitor is not None:
file_cachelist = app_info.get(AppInfo.REQUIRED_CACHE_FILES, None)
cache_files, total_filesize, total_cachesize = \
self.check_file_cache(file_cachelist)
if total_filesize is not 0:
cache_score = float(100.0*total_cachesize/total_filesize)
else:
cache_score = float(0)
ret_data.update({\
ResourceConst.APP_CACHE_FILES: cache_files,
ResourceConst.APP_CACHE_TOTAL_SCORE: cache_score,
})
return jsonify(ret_data)
else:
# return default resource info
return jsonify(ret_data)
def check_file_cache(self, filepattern_list):
if filepattern_list is None or len(filepattern_list) == 0:
return list(), 0, 0
ret_filelist = list()
total_filesize = 0
total_cachesize = 0
filelist = list()
for each_pattern in filepattern_list:
pattern = os.path.join(self.dfs_root, each_pattern)
filelist += glob2.glob(pattern)
for abspath in filelist:
LOG.debug("checking cache file of %s" % abspath)
if os.path.isfile(abspath) is not True:
continue
filesize = os.path.getsize(abspath)
total_filesize += filesize
if self.file_cache_monitor.check_file(abspath, is_abspath=True) is True:
relpath = os.path.relpath(abspath, self.dfs_root)
ret_filelist.append(relpath)
total_cachesize += filesize
ret_filelist.sort()
return ret_filelist, total_filesize, total_cachesize
class CacheInfo(Resource):
file_cache_monitor = None
def __init__(self, *args, **kwargs):
super(CacheInfo, self).__init__(*args, **kwargs)
try:
if self.file_cache_monitor is None:
self.file_cache_monitor = file_cache.get_instance()
except file_cache.CacheMonitorError as e:
self.file_cache_monitor = None
def get(self):
ret_data = {}
if self.file_cache_monitor is not None:
filecache_ret = self.file_cache_monitor.cached_files()
ret_data = {ResourceConst.APP_CACHE_FILES: filecache_ret}
return jsonify(ret_data)
if __name__ == "__main__":
try:
# run REST server
app = Flask(__name__)
api = restful.Api(app)
api.add_resource(ResourceInfo, '/api/v1/resource/')
# do no turn on debug mode. it make a mess for graceful terminate
host = "0.0.0.0"; port = 8022
app.run(host=host, port=port, threaded=True, debug=True)
except KeyboardInterrupt as e:
ret_code = 1
finally:
pass
|
{
"content_hash": "b506131751b8fc476b9c487b364e0d56",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 84,
"avg_line_length": 33.14383561643836,
"alnum_prop": 0.597024178549287,
"repo_name": "cmusatyalab/elijah-discovery-basic",
"id": "5af28bc1bf6992c168c8be2298323af33b80ef40",
"size": "4862",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "elijah/discovery/discovery_rest.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "18223"
},
{
"name": "Makefile",
"bytes": "296"
},
{
"name": "Python",
"bytes": "129226"
},
{
"name": "Shell",
"bytes": "332"
}
],
"symlink_target": ""
}
|
"""
This module tests twisted.conch.ssh.connection.
"""
import struct
from twisted.conch import error
from twisted.conch.ssh import channel, common, connection
from twisted.trial import unittest
from twisted.conch.test import test_userauth
class TestChannel(channel.SSHChannel):
"""
A mocked-up version of twisted.conch.ssh.channel.SSHChannel.
@ivar gotOpen: True if channelOpen has been called.
@type gotOpen: C{bool}
@ivar specificData: the specific channel open data passed to channelOpen.
@type specificData: C{str}
@ivar openFailureReason: the reason passed to openFailed.
@type openFailed: C{error.ConchError}
@ivar inBuffer: a C{list} of strings received by the channel.
@type inBuffer: C{list}
@ivar extBuffer: a C{list} of 2-tuples (type, extended data) of received by
the channel.
@type extBuffer: C{list}
@ivar numberRequests: the number of requests that have been made to this
channel.
@type numberRequests: C{int}
@ivar gotEOF: True if the other side sent EOF.
@type gotEOF: C{bool}
@ivar gotOneClose: True if the other side closed the connection.
@type gotOneClose: C{bool}
@ivar gotClosed: True if the channel is closed.
@type gotClosed: C{bool}
"""
name = "TestChannel"
gotOpen = False
def logPrefix(self):
return "TestChannel %i" % self.id
def channelOpen(self, specificData):
"""
The channel is open. Set up the instance variables.
"""
self.gotOpen = True
self.specificData = specificData
self.inBuffer = []
self.extBuffer = []
self.numberRequests = 0
self.gotEOF = False
self.gotOneClose = False
self.gotClosed = False
def openFailed(self, reason):
"""
Opening the channel failed. Store the reason why.
"""
self.openFailureReason = reason
def request_test(self, data):
"""
A test request. Return True if data is 'data'.
@type data: C{str}
"""
self.numberRequests += 1
return data == 'data'
def dataReceived(self, data):
"""
Data was received. Store it in the buffer.
"""
self.inBuffer.append(data)
def extReceived(self, code, data):
"""
Extended data was received. Store it in the buffer.
"""
self.extBuffer.append((code, data))
def eofReceived(self):
"""
EOF was received. Remember it.
"""
self.gotEOF = True
def closeReceived(self):
"""
Close was received. Remember it.
"""
self.gotOneClose = True
def closed(self):
"""
The channel is closed. Rembember it.
"""
self.gotClosed = True
class TestAvatar:
"""
A mocked-up version of twisted.conch.avatar.ConchUser
"""
def lookupChannel(self, channelType, windowSize, maxPacket, data):
"""
The server wants us to return a channel. If the requested channel is
our TestChannel, return it, otherwise return None.
"""
if channelType == TestChannel.name:
return TestChannel(remoteWindow=windowSize,
remoteMaxPacket=maxPacket,
data=data, avatar=self)
def gotGlobalRequest(self, requestType, data):
"""
The client has made a global request. If the global request is
'TestGlobal', return True. If the global request is 'TestData',
return True and the request-specific data we received. Otherwise,
return False.
"""
if requestType == 'TestGlobal':
return True
elif requestType == 'TestData':
return True, data
else:
return False
class TestConnection(connection.SSHConnection):
"""
A subclass of SSHConnection for testing.
@ivar channel: the current channel.
@type channel. C{TestChannel}
"""
def logPrefix(self):
return "TestConnection"
def global_TestGlobal(self, data):
"""
The other side made the 'TestGlobal' global request. Return True.
"""
return True
def global_Test_Data(self, data):
"""
The other side made the 'Test-Data' global request. Return True and
the data we received.
"""
return True, data
def channel_TestChannel(self, windowSize, maxPacket, data):
"""
The other side is requesting the TestChannel. Create a C{TestChannel}
instance, store it, and return it.
"""
self.channel = TestChannel(remoteWindow=windowSize,
remoteMaxPacket=maxPacket, data=data)
return self.channel
def channel_ErrorChannel(self, windowSize, maxPacket, data):
"""
The other side is requesting the ErrorChannel. Raise an exception.
"""
raise AssertionError('no such thing')
class ConnectionTestCase(unittest.TestCase):
if test_userauth.transport is None:
skip = "Cannot run without PyCrypto"
def setUp(self):
self.transport = test_userauth.FakeTransport(None)
self.transport.avatar = TestAvatar()
self.conn = TestConnection()
self.conn.transport = self.transport
self.conn.serviceStarted()
def _openChannel(self, channel):
"""
Open the channel with the default connection.
"""
self.conn.openChannel(channel)
self.transport.packets = self.transport.packets[:-1]
self.conn.ssh_CHANNEL_OPEN_CONFIRMATION(struct.pack('>2L',
channel.id, 255) + '\x00\x02\x00\x00\x00\x00\x80\x00')
def tearDown(self):
self.conn.serviceStopped()
def test_linkAvatar(self):
"""
Test that the connection links itself to the avatar in the
transport.
"""
self.assertIdentical(self.transport.avatar.conn, self.conn)
def test_serviceStopped(self):
"""
Test that serviceStopped() closes any open channels.
"""
channel1 = TestChannel()
channel2 = TestChannel()
self.conn.openChannel(channel1)
self.conn.openChannel(channel2)
self.conn.ssh_CHANNEL_OPEN_CONFIRMATION('\x00\x00\x00\x00' * 4)
self.assertTrue(channel1.gotOpen)
self.assertFalse(channel2.gotOpen)
self.conn.serviceStopped()
self.assertTrue(channel1.gotClosed)
def test_GLOBAL_REQUEST(self):
"""
Test that global request packets are dispatched to the global_*
methods and the return values are translated into success or failure
messages.
"""
self.conn.ssh_GLOBAL_REQUEST(common.NS('TestGlobal') + '\xff')
self.assertEquals(self.transport.packets,
[(connection.MSG_REQUEST_SUCCESS, '')])
self.transport.packets = []
self.conn.ssh_GLOBAL_REQUEST(common.NS('TestData') + '\xff' +
'test data')
self.assertEquals(self.transport.packets,
[(connection.MSG_REQUEST_SUCCESS, 'test data')])
self.transport.packets = []
self.conn.ssh_GLOBAL_REQUEST(common.NS('TestBad') + '\xff')
self.assertEquals(self.transport.packets,
[(connection.MSG_REQUEST_FAILURE, '')])
self.transport.packets = []
self.conn.ssh_GLOBAL_REQUEST(common.NS('TestGlobal') + '\x00')
self.assertEquals(self.transport.packets, [])
def test_REQUEST_SUCCESS(self):
"""
Test that global request success packets cause the Deferred to be
called back.
"""
d = self.conn.sendGlobalRequest('request', 'data', True)
self.conn.ssh_REQUEST_SUCCESS('data')
def check(data):
self.assertEquals(data, 'data')
d.addCallback(check)
d.addErrback(self.fail)
return d
def test_REQUEST_FAILURE(self):
"""
Test that global request failure packets cause the Deferred to be
erred back.
"""
d = self.conn.sendGlobalRequest('request', 'data', True)
self.conn.ssh_REQUEST_FAILURE('data')
def check(f):
self.assertEquals(f.value.data, 'data')
d.addCallback(self.fail)
d.addErrback(check)
return d
def test_CHANNEL_OPEN(self):
"""
Test that open channel packets cause a channel to be created and
opened or a failure message to be returned.
"""
del self.transport.avatar
self.conn.ssh_CHANNEL_OPEN(common.NS('TestChannel') +
'\x00\x00\x00\x01' * 4)
self.assertTrue(self.conn.channel.gotOpen)
self.assertEquals(self.conn.channel.conn, self.conn)
self.assertEquals(self.conn.channel.data, '\x00\x00\x00\x01')
self.assertEquals(self.conn.channel.specificData, '\x00\x00\x00\x01')
self.assertEquals(self.conn.channel.remoteWindowLeft, 1)
self.assertEquals(self.conn.channel.remoteMaxPacket, 1)
self.assertEquals(self.transport.packets,
[(connection.MSG_CHANNEL_OPEN_CONFIRMATION,
'\x00\x00\x00\x01\x00\x00\x00\x00\x00\x02\x00\x00'
'\x00\x00\x80\x00')])
self.transport.packets = []
self.conn.ssh_CHANNEL_OPEN(common.NS('BadChannel') +
'\x00\x00\x00\x02' * 4)
self.flushLoggedErrors()
self.assertEquals(self.transport.packets,
[(connection.MSG_CHANNEL_OPEN_FAILURE,
'\x00\x00\x00\x02\x00\x00\x00\x03' + common.NS(
'unknown channel') + common.NS(''))])
self.transport.packets = []
self.conn.ssh_CHANNEL_OPEN(common.NS('ErrorChannel') +
'\x00\x00\x00\x02' * 4)
self.flushLoggedErrors()
self.assertEquals(self.transport.packets,
[(connection.MSG_CHANNEL_OPEN_FAILURE,
'\x00\x00\x00\x02\x00\x00\x00\x02' + common.NS(
'unknown failure') + common.NS(''))])
def test_CHANNEL_OPEN_CONFIRMATION(self):
"""
Test that channel open confirmation packets cause the channel to be
notified that it's open.
"""
channel = TestChannel()
self.conn.openChannel(channel)
self.conn.ssh_CHANNEL_OPEN_CONFIRMATION('\x00\x00\x00\x00'*5)
self.assertEquals(channel.remoteWindowLeft, 0)
self.assertEquals(channel.remoteMaxPacket, 0)
self.assertEquals(channel.specificData, '\x00\x00\x00\x00')
self.assertEquals(self.conn.channelsToRemoteChannel[channel],
0)
self.assertEquals(self.conn.localToRemoteChannel[0], 0)
def test_CHANNEL_OPEN_FAILURE(self):
"""
Test that channel open failure packets cause the channel to be
notified that its opening failed.
"""
channel = TestChannel()
self.conn.openChannel(channel)
self.conn.ssh_CHANNEL_OPEN_FAILURE('\x00\x00\x00\x00\x00\x00\x00'
'\x01' + common.NS('failure!'))
self.assertEquals(channel.openFailureReason.args, ('failure!', 1))
self.assertEquals(self.conn.channels.get(channel), None)
def test_CHANNEL_WINDOW_ADJUST(self):
"""
Test that channel window adjust messages add bytes to the channel
window.
"""
channel = TestChannel()
self._openChannel(channel)
oldWindowSize = channel.remoteWindowLeft
self.conn.ssh_CHANNEL_WINDOW_ADJUST('\x00\x00\x00\x00\x00\x00\x00'
'\x01')
self.assertEquals(channel.remoteWindowLeft, oldWindowSize + 1)
def test_CHANNEL_DATA(self):
"""
Test that channel data messages are passed up to the channel, or
cause the channel to be closed if the data is too large.
"""
channel = TestChannel(localWindow=6, localMaxPacket=5)
self._openChannel(channel)
self.conn.ssh_CHANNEL_DATA('\x00\x00\x00\x00' + common.NS('data'))
self.assertEquals(channel.inBuffer, ['data'])
self.assertEquals(self.transport.packets,
[(connection.MSG_CHANNEL_WINDOW_ADJUST, '\x00\x00\x00\xff'
'\x00\x00\x00\x04')])
self.transport.packets = []
longData = 'a' * (channel.localWindowLeft + 1)
self.conn.ssh_CHANNEL_DATA('\x00\x00\x00\x00' + common.NS(longData))
self.assertEquals(channel.inBuffer, ['data'])
self.assertEquals(self.transport.packets,
[(connection.MSG_CHANNEL_CLOSE, '\x00\x00\x00\xff')])
channel = TestChannel()
self._openChannel(channel)
bigData = 'a' * (channel.localMaxPacket + 1)
self.transport.packets = []
self.conn.ssh_CHANNEL_DATA('\x00\x00\x00\x01' + common.NS(bigData))
self.assertEquals(channel.inBuffer, [])
self.assertEquals(self.transport.packets,
[(connection.MSG_CHANNEL_CLOSE, '\x00\x00\x00\xff')])
def test_CHANNEL_EXTENDED_DATA(self):
"""
Test that channel extended data messages are passed up to the channel,
or cause the channel to be closed if they're too big.
"""
channel = TestChannel(localWindow=6, localMaxPacket=5)
self._openChannel(channel)
self.conn.ssh_CHANNEL_EXTENDED_DATA('\x00\x00\x00\x00\x00\x00\x00'
'\x00' + common.NS('data'))
self.assertEquals(channel.extBuffer, [(0, 'data')])
self.assertEquals(self.transport.packets,
[(connection.MSG_CHANNEL_WINDOW_ADJUST, '\x00\x00\x00\xff'
'\x00\x00\x00\x04')])
self.transport.packets = []
longData = 'a' * (channel.localWindowLeft + 1)
self.conn.ssh_CHANNEL_EXTENDED_DATA('\x00\x00\x00\x00\x00\x00\x00'
'\x00' + common.NS(longData))
self.assertEquals(channel.extBuffer, [(0, 'data')])
self.assertEquals(self.transport.packets,
[(connection.MSG_CHANNEL_CLOSE, '\x00\x00\x00\xff')])
channel = TestChannel()
self._openChannel(channel)
bigData = 'a' * (channel.localMaxPacket + 1)
self.transport.packets = []
self.conn.ssh_CHANNEL_EXTENDED_DATA('\x00\x00\x00\x01\x00\x00\x00'
'\x00' + common.NS(bigData))
self.assertEquals(channel.extBuffer, [])
self.assertEquals(self.transport.packets,
[(connection.MSG_CHANNEL_CLOSE, '\x00\x00\x00\xff')])
def test_CHANNEL_EOF(self):
"""
Test that channel eof messages are passed up to the channel.
"""
channel = TestChannel()
self._openChannel(channel)
self.conn.ssh_CHANNEL_EOF('\x00\x00\x00\x00')
self.assertTrue(channel.gotEOF)
def test_CHANNEL_CLOSE(self):
"""
Test that channel close messages are passed up to the channel. Also,
test that channel.close() is called if both sides are closed when this
message is received.
"""
channel = TestChannel()
self._openChannel(channel)
self.conn.sendClose(channel)
self.conn.ssh_CHANNEL_CLOSE('\x00\x00\x00\x00')
self.assertTrue(channel.gotOneClose)
self.assertTrue(channel.gotClosed)
def test_CHANNEL_REQUEST_success(self):
"""
Test that channel requests that succeed send MSG_CHANNEL_SUCCESS.
"""
channel = TestChannel()
self._openChannel(channel)
self.conn.ssh_CHANNEL_REQUEST('\x00\x00\x00\x00' + common.NS('test')
+ '\x00')
self.assertEquals(channel.numberRequests, 1)
d = self.conn.ssh_CHANNEL_REQUEST('\x00\x00\x00\x00' + common.NS(
'test') + '\xff' + 'data')
def check(result):
self.assertEquals(self.transport.packets,
[(connection.MSG_CHANNEL_SUCCESS, '\x00\x00\x00\xff')])
d.addCallback(check)
return d
def test_CHANNEL_REQUEST_failure(self):
"""
Test that channel requests that fail send MSG_CHANNEL_FAILURE.
"""
channel = TestChannel()
self._openChannel(channel)
d = self.conn.ssh_CHANNEL_REQUEST('\x00\x00\x00\x00' + common.NS(
'test') + '\xff')
def check(result):
self.assertEquals(self.transport.packets,
[(connection.MSG_CHANNEL_FAILURE, '\x00\x00\x00\xff'
)])
d.addCallback(self.fail)
d.addErrback(check)
return d
def test_CHANNEL_REQUEST_SUCCESS(self):
"""
Test that channel request success messages cause the Deferred to be
called back.
"""
channel = TestChannel()
self._openChannel(channel)
d = self.conn.sendRequest(channel, 'test', 'data', True)
self.conn.ssh_CHANNEL_SUCCESS('\x00\x00\x00\x00')
def check(result):
self.assertTrue(result)
return d
def test_CHANNEL_REQUEST_FAILURE(self):
"""
Test that channel request failure messages cause the Deferred to be
erred back.
"""
channel = TestChannel()
self._openChannel(channel)
d = self.conn.sendRequest(channel, 'test', '', True)
self.conn.ssh_CHANNEL_FAILURE('\x00\x00\x00\x00')
def check(result):
self.assertEquals(result.value.value, 'channel request failed')
d.addCallback(self.fail)
d.addErrback(check)
return d
def test_sendGlobalRequest(self):
"""
Test that global request messages are sent in the right format.
"""
d = self.conn.sendGlobalRequest('wantReply', 'data', True)
self.conn.sendGlobalRequest('noReply', '', False)
self.assertEquals(self.transport.packets,
[(connection.MSG_GLOBAL_REQUEST, common.NS('wantReply') +
'\xffdata'),
(connection.MSG_GLOBAL_REQUEST, common.NS('noReply') +
'\x00')])
self.assertEquals(self.conn.deferreds, {'global':[d]})
def test_openChannel(self):
"""
Test that open channel messages are sent in the right format.
"""
channel = TestChannel()
self.conn.openChannel(channel, 'aaaa')
self.assertEquals(self.transport.packets,
[(connection.MSG_CHANNEL_OPEN, common.NS('TestChannel') +
'\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x80\x00aaaa')])
self.assertEquals(channel.id, 0)
self.assertEquals(self.conn.localChannelID, 1)
def test_sendRequest(self):
"""
Test that channel request messages are sent in the right format.
"""
channel = TestChannel()
self._openChannel(channel)
d = self.conn.sendRequest(channel, 'test', 'test', True)
self.conn.sendRequest(channel, 'test2', '', False)
channel.localClosed = True # emulate sending a close message
self.conn.sendRequest(channel, 'test3', '', True)
self.assertEquals(self.transport.packets,
[(connection.MSG_CHANNEL_REQUEST, '\x00\x00\x00\xff' +
common.NS('test') + '\x01test'),
(connection.MSG_CHANNEL_REQUEST, '\x00\x00\x00\xff' +
common.NS('test2') + '\x00')])
self.assertEquals(self.conn.deferreds, {0:[d]})
def test_adjustWindow(self):
"""
Test that channel window adjust messages cause bytes to be added
to the window.
"""
channel = TestChannel(localWindow=5)
self._openChannel(channel)
channel.localWindowLeft = 0
self.conn.adjustWindow(channel, 1)
self.assertEquals(channel.localWindowLeft, 1)
channel.localClosed = True
self.conn.adjustWindow(channel, 2)
self.assertEquals(channel.localWindowLeft, 1)
self.assertEquals(self.transport.packets,
[(connection.MSG_CHANNEL_WINDOW_ADJUST, '\x00\x00\x00\xff'
'\x00\x00\x00\x01')])
def test_sendData(self):
"""
Test that channel data messages are sent in the right format.
"""
channel = TestChannel()
self._openChannel(channel)
self.conn.sendData(channel, 'a')
channel.localClosed = True
self.conn.sendData(channel, 'b')
self.assertEquals(self.transport.packets,
[(connection.MSG_CHANNEL_DATA, '\x00\x00\x00\xff' +
common.NS('a'))])
def test_sendExtendedData(self):
"""
Test that channel extended data messages are sent in the right format.
"""
channel = TestChannel()
self._openChannel(channel)
self.conn.sendExtendedData(channel, 1, 'test')
channel.localClosed = True
self.conn.sendExtendedData(channel, 2, 'test2')
self.assertEquals(self.transport.packets,
[(connection.MSG_CHANNEL_EXTENDED_DATA, '\x00\x00\x00\xff' +
'\x00\x00\x00\x01' + common.NS('test'))])
def test_sendEOF(self):
"""
Test that channel EOF messages are sent in the right format.
"""
channel = TestChannel()
self._openChannel(channel)
self.conn.sendEOF(channel)
self.assertEquals(self.transport.packets,
[(connection.MSG_CHANNEL_EOF, '\x00\x00\x00\xff')])
channel.localClosed = True
self.conn.sendEOF(channel)
self.assertEquals(self.transport.packets,
[(connection.MSG_CHANNEL_EOF, '\x00\x00\x00\xff')])
def test_sendClose(self):
"""
Test that channel close messages are sent in the right format.
"""
channel = TestChannel()
self._openChannel(channel)
self.conn.sendClose(channel)
self.assertTrue(channel.localClosed)
self.assertEquals(self.transport.packets,
[(connection.MSG_CHANNEL_CLOSE, '\x00\x00\x00\xff')])
self.conn.sendClose(channel)
self.assertEquals(self.transport.packets,
[(connection.MSG_CHANNEL_CLOSE, '\x00\x00\x00\xff')])
channel2 = TestChannel()
self._openChannel(channel2)
channel2.remoteClosed = True
self.conn.sendClose(channel2)
self.assertTrue(channel2.gotClosed)
def test_getChannelWithAvatar(self):
"""
Test that getChannel dispatches to the avatar when an avatar is
present. Correct functioning without the avatar is verified in
test_CHANNEL_OPEN.
"""
channel = self.conn.getChannel('TestChannel', 50, 30, 'data')
self.assertEquals(channel.data, 'data')
self.assertEquals(channel.remoteWindowLeft, 50)
self.assertEquals(channel.remoteMaxPacket, 30)
self.assertRaises(error.ConchError, self.conn.getChannel,
'BadChannel', 50, 30, 'data')
def test_gotGlobalRequestWithoutAvatar(self):
"""
Test that gotGlobalRequests dispatches to global_* without an avatar.
"""
del self.transport.avatar
self.assertTrue(self.conn.gotGlobalRequest('TestGlobal', 'data'))
self.assertEquals(self.conn.gotGlobalRequest('Test-Data', 'data'),
(True, 'data'))
self.assertFalse(self.conn.gotGlobalRequest('BadGlobal', 'data'))
|
{
"content_hash": "cc08247b166cc2561bfa7ccd3f7e0d71",
"timestamp": "",
"source": "github",
"line_count": 620,
"max_line_length": 79,
"avg_line_length": 37.79677419354839,
"alnum_prop": 0.6019032175471537,
"repo_name": "sorenh/cc",
"id": "22f38c261643417df3e4f67502543e43f56ba23c",
"size": "23511",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "vendor/Twisted-10.0.0/twisted/conch/test/test_connection.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PHP",
"bytes": "707"
},
{
"name": "Python",
"bytes": "398663"
},
{
"name": "Shell",
"bytes": "12374"
}
],
"symlink_target": ""
}
|
import inspect
import sys
try:
from SimpleXMLRPCServer import SimpleXMLRPCServer
except ImportError:
from xmlrpc.server import SimpleXMLRPCServer
class RemoteServer(SimpleXMLRPCServer):
def __init__(self, library, port=8270, port_file=None):
SimpleXMLRPCServer.__init__(self, ('127.0.0.1', int(port)))
self.library = library
self._shutdown = False
self.register_function(self.get_keyword_names)
self.register_function(self.get_keyword_arguments)
self.register_function(self.run_keyword)
announce_port(self.socket, port_file)
self.serve_forever()
def serve_forever(self):
while not self._shutdown:
self.handle_request()
def get_keyword_names(self):
return [attr for attr in dir(self.library) if attr[0] != '_']
def get_keyword_arguments(self, name):
kw = getattr(self.library, name)
args, varargs, kwargs, defaults = inspect.getargspec(kw)
args = args[1:] # drop 'self'
if defaults:
args, names = args[:-len(defaults)], args[-len(defaults):]
args += ['%s=%s' % (n, d) for n, d in zip(names, defaults)]
if varargs:
args.append('*%s' % varargs)
if kwargs:
args.append('**%s' % kwargs)
return args
def run_keyword(self, name, args, kwargs=None):
try:
result = getattr(self.library, name)(*args, **(kwargs or {}))
except AssertionError as err:
return {'status': 'FAIL', 'error': str(err)}
else:
return {'status': 'PASS',
'return': result if result is not None else ''}
class DirectResultRemoteServer(RemoteServer):
def run_keyword(self, name, args, kwargs=None):
try:
return getattr(self.library, name)(*args, **(kwargs or {}))
except SystemExit:
self._shutdown = True
return {'status': 'PASS'}
def announce_port(socket, port_file=None):
port = socket.getsockname()[1]
sys.stdout.write('Remote server starting on port %s.\n' % port)
sys.stdout.flush()
if port_file:
with open(port_file, 'w') as f:
f.write(str(port))
|
{
"content_hash": "55af17e85a6bb291724acc3dc1c98095",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 73,
"avg_line_length": 33.149253731343286,
"alnum_prop": 0.5943268797838811,
"repo_name": "synsun/robotframework",
"id": "42096756652c70613e6a7ef2802c03b5c43460ff",
"size": "2221",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "atest/testdata/standard_libraries/remote/remoteserver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "245"
},
{
"name": "CSS",
"bytes": "23490"
},
{
"name": "HTML",
"bytes": "140926"
},
{
"name": "Java",
"bytes": "57462"
},
{
"name": "JavaScript",
"bytes": "160787"
},
{
"name": "Python",
"bytes": "2184737"
},
{
"name": "RobotFramework",
"bytes": "2009226"
},
{
"name": "Shell",
"bytes": "281"
}
],
"symlink_target": ""
}
|
from datetime import datetime
from loguru import logger
from flexget import plugin
from flexget.event import event
logger = logger.bind(name='est_movies')
class EstimatesReleasedMovies:
@plugin.priority(0)
def estimate(self, entry):
if 'tmdb_released' in entry:
logger.verbose('Querying release estimation for {}', entry['title'])
return entry['tmdb_released']
elif 'movie_year' in entry and entry['movie_year'] is not None:
try:
return datetime(year=entry['movie_year'], month=1, day=1)
except ValueError:
pass
logger.debug(
'Unable to check release for {}, tmdb_release and movie_year fields are not defined',
entry['title'],
)
@event('plugin.register')
def register_plugin():
plugin.register(
EstimatesReleasedMovies, 'est_released_movies', interfaces=['estimate_release'], api_ver=2
)
|
{
"content_hash": "ef87a15aa478264e3358d0975ed433cb",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 98,
"avg_line_length": 29.875,
"alnum_prop": 0.6317991631799164,
"repo_name": "ianstalk/Flexget",
"id": "e3329856883ad36cbd28b84696c545a76eab1adf",
"size": "956",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "flexget/components/estimate_release/estimators/est_released_movies.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "56725"
},
{
"name": "HTML",
"bytes": "35670"
},
{
"name": "JavaScript",
"bytes": "455222"
},
{
"name": "Python",
"bytes": "2063551"
}
],
"symlink_target": ""
}
|
import os
import shutil
import xml.etree.ElementTree as ET
from string import Template
from PIL import Image
from StringIO import StringIO
import zipfile
import urllib
from osgeo import ogr
import auxiliary_functions
def tab_template(sensor, file_name, map_coords1, map_coords2, map_coords3, map_coords4, img_hight, img_width):
"""Функция, формирующая tab-файл привязки по шаблону, используя следующие параметры
:param file_type:
:param file_name:
:param map_coords1:
:param map_coords2:
:param map_coords3:
:param map_coords4:
:param img_hight:
:type img_width: int
"""
# порядок следования точек у некоторых спутников отличается, поэтому строки будут разные
if sensor == 'bka':
point2 = ' ($map_coords2) (0.0,$img_hight.0) Label "Point 2",\n'
point4 = ' ($map_coords4) ($img_width.0,0.0) Label "Point 4"\n'
elif sensor == 'deimos':
point2 = ' ($map_coords2) ($img_width.0,0.0) Label "Point 2",\n'
point4 = ' ($map_coords4) (0.0,$img_hight.0) Label "Point 4"\n'
else:
point2 = ' ($map_coords2) (0.0,$img_hight.0) Label "Point 2",\n'
point4 = ' ($map_coords4) ($img_width.0,0.0) Label "Point 4"\n'
text_content = Template('!table\n'
'!version 300\n'
'!charset WindowsCyrillic\n\n'
'Definition Table\n'
' File "$file_name"\n'
' Type "RASTER"\n'
' ($map_coords1) (0.0,0.0) Label "Point 1",\n' + point2 +
# ' ($map_coords2) (0.0,$img_hight.0) Label "Point 2",\n'
' ($map_coords3) ($img_width.0,$img_hight.0) Label "Point 3",\n' + point4 +
# ' ($map_coords4) ($img_width.0,0.0) Label "Point 4"\n'
' CoordSys Earth Projection 1, 0\n')
return text_content.substitute(
file_name=os.path.splitext(file_name)[0] + '.jpg', map_coords1=map_coords1, map_coords2=map_coords2,
map_coords3=map_coords3, map_coords4=map_coords4, img_hight=str(img_hight), img_width=str(img_width))
def get_valid_column_name(col_name_list, layer):
# img_name = None
for col_name in col_name_list:
img_contour = layer.GetFeature(0)
try:
img_name = img_contour.GetField(col_name)
if img_name is not None:
return col_name
except ValueError:
continue
# если дошли до сюда, то тот SHP-файл не содержит нужных полей
return None
def bka_ql_exporter(source_file, dst_dirpath):
total_ql_list, percent_done, process_done_flag = 0, 0, False
# парсим kml
if source_file.endswith(('.kml', '.KML')):
with open(source_file, 'r') as kml_file:
kml_xml = kml_file.read()
else:
# src_file.endswith(('.kmz', '.KMZ'))
with zipfile.ZipFile(source_file) as kmz:
for filename in kmz.namelist():
if filename.endswith(('.kml', '.KML')):
# парсим kml
with kmz.open(filename, 'r') as kml_file:
kml_xml = kml_file.read()
break
tree = auxiliary_functions.remove_xml_namespace(kml_xml)
root = tree.root
# ищем в kml все записи, описывающие квиклук
ql_kml_list = root.findall(".//GroundOverlay")
counter = 0
for q in range(len(ql_kml_list)):
ql_rel_path = ql_kml_list[q].find(".//href").text
ql_name = os.path.splitext(os.path.basename(ql_rel_path))[0]
# стандартизируем имя и копируем квиклук в целевую директорию, где измеряем его пикс. ширину и высоту
ql_dst_path = os.path.join(dst_dirpath, ql_name + '.jpg')
if source_file.endswith(('.kml', '.KML')):
shutil.copy(os.path.join(os.path.dirname(source_file), ql_rel_path), ql_dst_path)
else:
with zipfile.ZipFile(source_file) as kmz:
# заменяем слеши, чтобы работать с zip-архивом
with kmz.open(ql_rel_path.replace('\\', '/')) as zipped_ql, open(ql_dst_path, 'wb') as f:
shutil.copyfileobj(zipped_ql, f)
ql_image_obj = Image.open(ql_dst_path)
ql_width, ql_height = ql_image_obj.size[0], ql_image_obj.size[1]
del ql_image_obj
coords_str = ql_kml_list[q].find(".//coordinates").text
# предусматриваем необычный вариант хранения координат (X,Y,Z в одну строчку)
coords_str = coords_str.strip().replace(',0 ', '\n')
# преобразуем строку с координатами углов в список и разбиваем по 4 точкам
coords_lst = coords_str.split('\n')
c1, c2, c3, c4 = coords_lst[3], coords_lst[0], coords_lst[1], coords_lst[2]
text_content = tab_template('bka', ql_name, c1, c2, c3, c4, ql_height, ql_width)
with open(os.path.join(dst_dirpath, ql_name + '.tab'), 'w') as f:
f.write(text_content.strip())
counter += 1
percent_done = 100 * counter / len(ql_kml_list)
# этот callback позволяет отслеживать прогресс функции в helper_main
yield percent_done, len(ql_kml_list), process_done_flag
process_done_flag = True
yield percent_done, len(ql_kml_list), process_done_flag
def deimos_ql_exporter(source_file, dst_dirpath):
total_ql_list, percent_done, process_done_flag = 0, 0, False
with auxiliary_functions.make_temp_directory() as tmpdir:
with zipfile.ZipFile(source_file, 'r') as zfile:
zfile.extractall(tmpdir)
if source_file.lower().endswith('.zip'):
ql_list = [f for dp, dn, filenames in os.walk(tmpdir) for f in filenames if f.endswith(('.kmz', '.KMZ'))
and f[-7:-4] != 'ALL']
else:
ql_list = [os.path.join(dp, filename) for dp, dn, filenames in os.walk(tmpdir)
for filename in filenames if filename.lower().endswith('.png')]
if source_file.lower().endswith('.kmz'):
for dirpath, dirnames, filenames in os.walk(tmpdir):
counter = 0
for filename in [filename for filename in filenames if filename.lower().endswith('.kml')]:
with open(os.path.join(dirpath, filename)) as kml_file:
kml_xml = kml_file.read()
tree = auxiliary_functions.remove_xml_namespace(kml_xml)
root = tree.root
# TODO отсюда и далее до break код полностью дублируется в else. Устранить (функция?)
ql_kml_list = root.findall(".//GroundOverlay")
for q in range(len(ql_kml_list)):
ql_filename = ql_kml_list[q].find(".//name").text
ql_dst_path = os.path.join(dst_dirpath, ql_filename + '.tif')
ql_url = ql_kml_list[q].find(".//href").text
i = Image.open(os.path.join(tmpdir, ql_url))
i.save(ql_dst_path, format='TIFF')
del i
ql_image_obj = Image.open(ql_dst_path)
ql_width, ql_height = ql_image_obj.size[0], ql_image_obj.size[1]
del ql_image_obj
north = ql_kml_list[q].find(".//north").text
south = ql_kml_list[q].find(".//south").text
east = ql_kml_list[q].find(".//east").text
west = ql_kml_list[q].find(".//west").text
c1, c2, c3, c4 = ','.join((str(west), str(north))), ','.join((str(east), str(north))), \
','.join((str(east), str(south))), ','.join((str(west), str(south)))
text_content = tab_template('deimos', ql_filename, c1, c2, c3, c4, ql_height, ql_width)
with open(os.path.join(dst_dirpath, ql_filename + '.tab'), 'w') as f:
f.write(text_content.strip())
counter += 1
percent_done = 100 * counter / len(ql_list)
# этот callback позволяет отслеживать прогресс функции в helper_main
yield percent_done, len(ql_list), process_done_flag
total_ql_list += len(ql_kml_list)
break
else:
for dirpath, dirnames, filenames in os.walk(tmpdir):
counter = 0
for filename in filenames:
if filename.endswith(('.kmz', '.KMZ')) and filename[-7:-4] != 'ALL':
in_file = os.path.join(dirpath, filename)
with zipfile.ZipFile(in_file, 'r') as kmz:
with kmz.open('doc.kml', 'r') as kml_file:
kml_xml = kml_file.read()
tree = auxiliary_functions.remove_xml_namespace(kml_xml)
root = tree.root
ql_kml_list = root.findall(".//GroundOverlay")
for q in range(len(ql_kml_list)):
ql_filename = ql_kml_list[q].find(".//name").text
ql_dst_path = os.path.join(dst_dirpath, ql_filename + '.tif')
ql_url = ql_kml_list[q].find(".//href").text
content = StringIO(urllib.urlopen(ql_url).read())
i = Image.open(content)
i.save(ql_dst_path, format='TIFF')
del i
ql_image_obj = Image.open(ql_dst_path)
ql_width, ql_height = ql_image_obj.size[0], ql_image_obj.size[1]
del ql_image_obj
north = ql_kml_list[q].find(".//north").text
south = ql_kml_list[q].find(".//south").text
east = ql_kml_list[q].find(".//east").text
west = ql_kml_list[q].find(".//west").text
c1, c2, c3, c4 = ','.join((str(west), str(north))), ','.join((str(east), str(north))), \
','.join((str(east), str(south))), ','.join((str(west), str(south)))
text_content = tab_template('deimos', ql_filename, c1, c2, c3, c4, ql_height, ql_width)
with open(os.path.join(dst_dirpath, ql_filename + '.tab'), 'w') as f:
f.write(text_content.strip())
counter += 1
percent_done = 100 * counter / len(ql_list)
# этот callback позволяет отслеживать прогресс функции в helper_main
yield percent_done, len(ql_list), process_done_flag
total_ql_list += len(ql_kml_list)
process_done_flag = True
yield percent_done, total_ql_list, process_done_flag
def chinease_ql_exporter(source_file, dst_dirpath, sensor):
total_ql_list, percent_done, process_done_flag = 0, 0, False
if source_file.endswith(('.zip', '.ZIP')):
with auxiliary_functions.make_temp_directory() as tmpdir:
with zipfile.ZipFile(source_file, 'r') as zfile:
zfile.extractall(tmpdir)
# else:
# if os.path.isdir (source_file) do it(make all further proc a function) for each zipfile in directory
for dirpath, dirnames, filenames in os.walk(tmpdir):
for filename in filenames:
if filename.endswith(('.shp', '.SHP')):
shape_filepath = os.path.join(dirpath, filename)
driver = ogr.GetDriverByName('ESRI Shapefile')
dataSource = driver.Open(shape_filepath, 0)
layer = dataSource.GetLayer(0)
ql_list = layer.GetFeatureCount()
total_ql_list += ql_list
if sensor == 'TH':
col_name = get_valid_column_name(('ImgIdDgp', 'ImgIdGfb'), layer)
else:
# вариант GF/ZY/TRIPLESAT
col_name = get_valid_column_name(['browsefile', 'browserimg'], layer)
counter = 0
# если в shp-файле не нашлось нужных полей, откидываем его
if col_name is None:
total_ql_list -= ql_list
yield 100, 0, process_done_flag
del layer, dataSource
continue
for img_contour in layer:
# ql_name_w_type = img_contour.GetField(col_name)
if sensor == 'TH':
ql_name_w_type = str(img_contour.GetField(col_name)) + '_Bro' + '.jpg'
elif sensor == 'TRIPLESAT' or sensor == 'GF1-2, ZY3':
ql_name_w_type = os.path.basename(img_contour.GetField(col_name))
ql_name = os.path.splitext(ql_name_w_type)[0]
geometry = img_contour.GetGeometryRef()
ring = geometry.GetGeometryRef(0)
coord_list = ['', '', '', '']
list_counter = 0
for point_id in range(ring.GetPointCount() - 1):
lon, lat, z = ring.GetPoint(point_id)
coord_list[list_counter] = str(','.join((str(lon), str(lat))))
list_counter += 1
if sensor == 'TRIPLESAT':
ql_path = os.path.join(os.path.dirname(shape_filepath), 'images', ql_name_w_type)
else:
ql_path = os.path.join(os.path.dirname(shape_filepath), ql_name_w_type)
ql_dst_path = os.path.join(dst_dirpath, ql_name_w_type)
shutil.copy(ql_path, ql_dst_path)
ql_image_obj = Image.open(ql_path)
ql_width, ql_height = ql_image_obj.size[0], ql_image_obj.size[1]
del ql_image_obj
text_content = tab_template(
'China', ql_name, coord_list[0], coord_list[3], coord_list[2], coord_list[1],
ql_height, ql_width)
with open(os.path.join(dst_dirpath, ql_name + '.tab'), 'w') as f:
f.write(text_content.strip())
counter += 1
percent_done = 100 * counter / ql_list
yield percent_done, ql_list, process_done_flag
del layer, dataSource
process_done_flag = True
yield percent_done, total_ql_list, process_done_flag
# chinease_ql_exporter(r"C:\Users\lobanov\.qgis2\python\plugins\Helper\testData\TRIPLESAT\2016-10-26_1808644472_exportshp.zip",
# r"C:\Users\lobanov\.qgis2\python\plugins\Helper\testData\TRIPLESAT\QuickLooks", r"TRIPLESAT")
deimos_ql_exporter(r"U:\PRJ\2016\HELPER\Geolocation\EXAMPLE\DEIMOS2\version1\2016-09-27_6izMHfLhb5.zip",
r"U:\PRJ\2016\HELPER\Geolocation\EXAMPLE\DEIMOS2\version1\QuickLooks")
|
{
"content_hash": "8ad9be40b40df2a7992106124fa018a6",
"timestamp": "",
"source": "github",
"line_count": 271,
"max_line_length": 127,
"avg_line_length": 58.05166051660517,
"alnum_prop": 0.5018433765573354,
"repo_name": "bazzile/helper",
"id": "d72ea347d56fa886e33b29dd47aeee56a0e8580f",
"size": "16469",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ql_exporter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17956"
},
{
"name": "HTML",
"bytes": "10374"
},
{
"name": "JavaScript",
"bytes": "92350"
},
{
"name": "Python",
"bytes": "64668"
}
],
"symlink_target": ""
}
|
"""
Audio Interchange File Format (AIFF) parser.
Author: Victor Stinner
Creation: 27 december 2006
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet,
UInt16, UInt32, Float80, TimestampMac32,
RawBytes, NullBytes,
String, Enum, PascalString32)
from lib.hachoir_core.endian import BIG_ENDIAN
from lib.hachoir_core.text_handler import filesizeHandler
from lib.hachoir_core.tools import alignValue
from lib.hachoir_parser.audio.id3 import ID3v2
CODEC_NAME = {
'ACE2': u"ACE 2-to-1",
'ACE8': u"ACE 8-to-3",
'MAC3': u"MAC 3-to-1",
'MAC6': u"MAC 6-to-1",
'NONE': u"None",
'sowt': u"Little-endian, no compression",
}
class Comment(FieldSet):
def createFields(self):
yield TimestampMac32(self, "timestamp")
yield PascalString32(self, "text")
def parseText(self):
yield String(self, "text", self["size"].value)
def parseID3(self):
yield ID3v2(self, "id3v2", size=self["size"].value*8)
def parseComment(self):
yield UInt16(self, "nb_comment")
for index in xrange(self["nb_comment"].value):
yield Comment(self, "comment[]")
def parseCommon(self):
yield UInt16(self, "nb_channel")
yield UInt32(self, "nb_sample")
yield UInt16(self, "sample_size")
yield Float80(self, "sample_rate")
yield Enum(String(self, "codec", 4, strip="\0", charset="ASCII"), CODEC_NAME)
def parseVersion(self):
yield TimestampMac32(self, "timestamp")
def parseSound(self):
yield UInt32(self, "offset")
yield UInt32(self, "block_size")
size = (self.size - self.current_size) // 8
if size:
yield RawBytes(self, "data", size)
class Chunk(FieldSet):
TAG_INFO = {
'COMM': ('common', "Common chunk", parseCommon),
'COMT': ('comment', "Comment", parseComment),
'NAME': ('name', "Name", parseText),
'AUTH': ('author', "Author", parseText),
'FVER': ('version', "Version", parseVersion),
'SSND': ('sound', "Sound data", parseSound),
'ID3 ': ('id3', "ID3", parseID3),
}
def __init__(self, *args):
FieldSet.__init__(self, *args)
self._size = (8 + alignValue(self["size"].value, 2)) * 8
tag = self["type"].value
if tag in self.TAG_INFO:
self._name, self._description, self._parser = self.TAG_INFO[tag]
else:
self._parser = None
def createFields(self):
yield String(self, "type", 4, "Signature (FORM)", charset="ASCII")
yield filesizeHandler(UInt32(self, "size"))
size = self["size"].value
if size:
if self._parser:
for field in self._parser(self):
yield field
if size % 2:
yield NullBytes(self, "padding", 1)
else:
yield RawBytes(self, "data", size)
class HeightSVX(Parser):
PARSER_TAGS = {
"id": "8svx",
"category": "audio",
"file_ext": ("8svx",),
"mime": (u"audio/x-aiff",),
"min_size": 12*8,
"description": "8SVX (audio) format"
}
endian = BIG_ENDIAN
def validate(self):
if self.stream.readBytes(0, 4) != "FORM":
return "Invalid signature"
if self.stream.readBytes(8*8, 4) != "8SVX":
return "Invalid type"
return True
def createFields(self):
yield String(self, "signature", 4, "Signature (FORM)", charset="ASCII")
yield filesizeHandler(UInt32(self, "filesize"))
yield String(self, "type", 4, "Form type (AIFF or AIFC)", charset="ASCII")
while not self.eof:
yield Chunk(self, "chunk[]")
def createDescription(self):
if self["type"].value == "AIFC":
return "Audio Interchange File Format Compressed (AIFC)"
else:
return "Audio Interchange File Format (AIFF)"
def createContentSize(self):
return self["filesize"].value * 8
|
{
"content_hash": "16cf2d71388d53f18b16e164469f5b81",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 82,
"avg_line_length": 31.341269841269842,
"alnum_prop": 0.592301848569258,
"repo_name": "Branlala/docker-sickbeardfr",
"id": "16d0f7030178e9e13234900ee16db3e31c67fef6",
"size": "3949",
"binary": false,
"copies": "90",
"ref": "refs/heads/master",
"path": "sickbeard/lib/hachoir_parser/audio/8svx.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "83278"
},
{
"name": "CSS",
"bytes": "155616"
},
{
"name": "JavaScript",
"bytes": "248414"
},
{
"name": "Python",
"bytes": "8146521"
},
{
"name": "Ruby",
"bytes": "2461"
},
{
"name": "Shell",
"bytes": "8791"
}
],
"symlink_target": ""
}
|
def configuration(parent_package='io',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('arff', parent_package, top_path)
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
{
"content_hash": "35603c2c94e36e2865a3c6c05e2958b1",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 60,
"avg_line_length": 34.3,
"alnum_prop": 0.6909620991253644,
"repo_name": "zerothi/scipy",
"id": "0b2417a2fa982d89ce60f27ce9d6c7785fe7d9af",
"size": "344",
"binary": false,
"copies": "27",
"ref": "refs/heads/main",
"path": "scipy/io/arff/setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4306215"
},
{
"name": "C++",
"bytes": "3692292"
},
{
"name": "Fortran",
"bytes": "5573034"
},
{
"name": "HTML",
"bytes": "124330"
},
{
"name": "MATLAB",
"bytes": "4346"
},
{
"name": "Makefile",
"bytes": "76425"
},
{
"name": "Python",
"bytes": "10541152"
},
{
"name": "Shell",
"bytes": "2218"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
}
|
import logging
from os import makedirs, listdir
from os.path import abspath, dirname, join
from shutil import copy, copytree
class Installer(object):
"""Initialize default rant source files in given directory"""
def __init__(self, dest_dir):
self._dest_dir = dest_dir
self._rant_path = abspath(join(dirname(__file__), ".."))
def _create_tree(self):
makedirs('%s/posts' % self._dest_dir)
makedirs('%s/pages' % self._dest_dir)
makedirs('%s/static' % self._dest_dir)
makedirs('%s/deploy' % self._dest_dir)
makedirs('%s/deploy/blog' % self._dest_dir)
def _copy_defaults(self):
copy('%s/defaults/config.yml' % self._rant_path, self._dest_dir)
copytree(
"%s/defaults/layouts" % self._rant_path,
"%s/layouts" % self._dest_dir
)
copytree(
"%s/defaults/css" % self._rant_path,
"%s/static/css" % self._dest_dir
)
def install(self):
if listdir(self._dest_dir) != []:
logging.info('\nUnable to initialize rant: Directory not empty')
return False
self._create_tree()
self._copy_defaults()
logging.info('\nInitialized rant in "%s"' % self._dest_dir)
logging.info('\nYou may now edit "%s/config.yml" as needed.' % self._dest_dir)
|
{
"content_hash": "a008acdc5a55a336056d0144a9311bc4",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 86,
"avg_line_length": 34.58974358974359,
"alnum_prop": 0.5796886582653817,
"repo_name": "lrvick/rant",
"id": "51f67eeef9ac6ff8c0e31933996f809ef387b3a4",
"size": "1349",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rant/install.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3198"
},
{
"name": "HTML",
"bytes": "15605"
},
{
"name": "Makefile",
"bytes": "1876"
},
{
"name": "Python",
"bytes": "30488"
}
],
"symlink_target": ""
}
|
"""Pyauric Documentation
Pyauric is an interface for the AURIC airglow modelling software. The AURICManager class keeps track of environment variables and the working directory to facilitate AURIC use in python scripts, jupyter notebooks, interactive ipython sessions, etc.
If AURIC is set up so that you can use the standard command line interface, pyauric should work as well.
Example
-------
import pyauric
auric = payuric.AURICManager()
...
"""
from .manager import AURICManager
_param_format = r"""Mandatory parameters:
NALT = 100 : number of altitude points
ZUB = 1000.00 : upper bound of atmosphere (km)
YYDDD = 92080 : year & day (YYDDD format)
UTSEC = 45000.00 : universal time (sec)
GLAT = 42.00 : latitude (deg)
GLON = 000.00 : longitude (deg)
SCALE(N2) = 1.00 : N2 density scale factor
SCALE(O2) = 1.00 : O2 density scale factor
SCALE(O) = 1.00 : O density scale factor
SCALE(O3) = 1.00 : O3 density scale factor
SCALE(NO) = 1.00 : NO density scale factor
SCALE(N) = 1.00 : N density scale factor
SCALE(He) = 1.00 : He density scale factor
SCALE(H) = 1.00 : H density scale factor
SCALE(Ar) = 1.00 : Ar density scale factor
Derived parameters:
GMLAT = 51.84 : geomagnetic latitude (deg)
GMLON = 1.71 : geomagnetic longitude (deg)
DPANG = 70.16 : magnetic dip angle (deg)
SZA = 30.00 : solar zenith angle (deg)
SLT = 1.00 : solar local time (hours)
F10DAY = 79.30 : F10.7 (current day)
F10PRE = 76.80 : F10.7 (previous day)
F10AVE = 79.40 : F10.7 (81-day average)
AP(1) = 9.00 : daily Ap
AP(2) = -1.00 : 3-hour Ap
AP(3) = -1.00 : 3-hour Ap
AP(4) = -1.00 : 3-hour Ap
AP(5) = -1.00 : 3-hour Ap
AP(6) = -1.00 : average 3-hour Ap
AP(7) = -1.00 : average 3-hour Ap"""
|
{
"content_hash": "5097180a507964216b22d8c00772a77b",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 248,
"avg_line_length": 41.326530612244895,
"alnum_prop": 0.5807407407407408,
"repo_name": "georgegeddes/pyauric",
"id": "108b25add3e7a27906d30514c2ef760cd8ff2f3d",
"size": "2025",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyauric/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "189907"
},
{
"name": "Python",
"bytes": "26335"
}
],
"symlink_target": ""
}
|
from Axon.AxonExceptions import AxonException as _AxonException
class socketSendFailure(_AxonException): pass
class connectionClosedown(_AxonException): pass
class connectionDied(connectionClosedown): pass
class connectionDiedSending(connectionDied): pass
class connectionDiedReceiving(connectionDied): pass
class connectionServerShutdown(connectionClosedown): pass
class BadRequest(_AxonException):
"Thrown when parsing a request fails"
def __init__(self, request, innerexception):
self.request = request
self.exception = innerexception
|
{
"content_hash": "1ccfa3617a246553c01a80b9f0c317e4",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 63,
"avg_line_length": 39.857142857142854,
"alnum_prop": 0.8154121863799283,
"repo_name": "sparkslabs/kamaelia",
"id": "5908b40d1fa02491e2aa887f096c2b6d64d9afa5",
"size": "1706",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "Sketches/MPS/BugReports/FixTests/Kamaelia/Kamaelia/KamaeliaExceptions.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3814"
},
{
"name": "C",
"bytes": "212854"
},
{
"name": "C++",
"bytes": "327546"
},
{
"name": "CSS",
"bytes": "114434"
},
{
"name": "ChucK",
"bytes": "422"
},
{
"name": "HTML",
"bytes": "1288960"
},
{
"name": "Java",
"bytes": "31832"
},
{
"name": "JavaScript",
"bytes": "829491"
},
{
"name": "M4",
"bytes": "12224"
},
{
"name": "Makefile",
"bytes": "150947"
},
{
"name": "NSIS",
"bytes": "18867"
},
{
"name": "OCaml",
"bytes": "643"
},
{
"name": "PHP",
"bytes": "49059"
},
{
"name": "Perl",
"bytes": "504"
},
{
"name": "Processing",
"bytes": "2885"
},
{
"name": "Python",
"bytes": "18900785"
},
{
"name": "Ruby",
"bytes": "4165"
},
{
"name": "Shell",
"bytes": "707588"
}
],
"symlink_target": ""
}
|
"""Module for wrapper cyber record."""
import collections
import importlib
import os
import sys
from google.protobuf.descriptor_pb2 import FileDescriptorProto
# Refer to the _cyber_record_wrapper.so with relative path so that it can be
# always addressed as a part of the runfiles.
wrapper_lib_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
'../internal'))
sys.path.append(wrapper_lib_path)
_CYBER_RECORD = importlib.import_module('_cyber_record_wrapper')
PyBagMessage = collections.namedtuple('PyBagMessage',
'topic message data_type timestamp')
class RecordReader(object):
"""
Class for cyber RecordReader wrapper.
"""
##
# @brief the constructor function.
#
# @param file_name the record file name.
def __init__(self, file_name):
self.record_reader = _CYBER_RECORD.new_PyRecordReader(file_name)
def __del__(self):
_CYBER_RECORD.delete_PyRecordReader(self.record_reader)
##
# @brief Read message from bag file.
#
# @param start_time the start time to read.
# @param end_time the end time to read.
#
# @return return (channnel, data, data_type, timestamp)
def read_messages(self, start_time=0, end_time=18446744073709551615):
while True:
message = _CYBER_RECORD.PyRecordReader_ReadMessage(
self.record_reader, start_time, end_time)
if not message["end"]:
yield PyBagMessage(message["channel_name"], message["data"],
message["data_type"], message["timestamp"])
else:
# print "No message more."
break
##
# @brief Return message count of the channel in current record file.
#
# @param channel_name the channel name.
#
# @return return the message count.
def get_messagenumber(self, channel_name):
return _CYBER_RECORD.PyRecordReader_GetMessageNumber(
self.record_reader, channel_name)
##
# @brief Get the corresponding message type of channel.
#
# @param channel_name channel name.
#
# @return return the name of ther string type.
def get_messagetype(self, channel_name):
return _CYBER_RECORD.PyRecordReader_GetMessageType(
self.record_reader, channel_name).decode('utf-8')
def get_protodesc(self, channel_name):
"""
Return message protodesc.
"""
return _CYBER_RECORD.PyRecordReader_GetProtoDesc(
self.record_reader, channel_name)
def get_headerstring(self):
"""
Return message header string.
"""
return _CYBER_RECORD.PyRecordReader_GetHeaderString(self.record_reader)
def reset(self):
"""
Return reset.
"""
return _CYBER_RECORD.PyRecordReader_Reset(self.record_reader)
def get_channellist(self):
"""
Return current channel names list.
"""
return _CYBER_RECORD.PyRecordReader_GetChannelList(self.record_reader)
class RecordWriter(object):
"""
Class for cyber RecordWriter wrapper.
"""
##
# @brief the constructor function.
#
# @param file_segmentation_size_kb size to segment the file, 0 is no segmentation.
# @param file_segmentation_interval_sec size to segment the file, 0 is no segmentation.
def __init__(self, file_segmentation_size_kb=0,
file_segmentation_interval_sec=0):
self.record_writer = _CYBER_RECORD.new_PyRecordWriter()
_CYBER_RECORD.PyRecordWriter_SetSizeOfFileSegmentation(
self.record_writer, file_segmentation_size_kb)
_CYBER_RECORD.PyRecordWriter_SetIntervalOfFileSegmentation(
self.record_writer, file_segmentation_interval_sec)
def __del__(self):
_CYBER_RECORD.delete_PyRecordWriter(self.record_writer)
##
# @brief Open record file for write.
#
# @param path the file path.
#
# @return Success is True, other False.
def open(self, path):
return _CYBER_RECORD.PyRecordWriter_Open(self.record_writer, path)
##
# @brief Close record file.
def close(self):
"""
Close record file.
"""
_CYBER_RECORD.PyRecordWriter_Close(self.record_writer)
##
# @brief Writer channel by channelname, typename, protodesc.
#
# @param channel_name the channel name to write
# @param type_name a string of message type name.
# @param proto_desc the message descriptor.
#
# @return Success is True, other False.
def write_channel(self, channel_name, type_name, proto_desc):
"""
Writer channel by channelname,typename,protodesc
"""
return _CYBER_RECORD.PyRecordWriter_WriteChannel(
self.record_writer, channel_name, type_name, proto_desc)
##
# @brief Writer msg: channelname, data, writer time.
#
# @param channel_name channel name to write.
# @param data when raw is True, data processed as a rawdata, other it needs to SerializeToString
# @param time message time.
# @param raw the flag implies data whether or not a rawdata.
#
# @return Success is True, other False.
def write_message(self, channel_name, data, time, raw=True):
"""
Writer msg:channelname,rawmsg,writer time
"""
if raw:
return _CYBER_RECORD.PyRecordWriter_WriteMessage(
self.record_writer, channel_name, data, time, "")
file_desc = data.DESCRIPTOR.file
proto = FileDescriptorProto()
file_desc.CopyToProto(proto)
proto.name = file_desc.name
desc_str = proto.SerializeToString()
return _CYBER_RECORD.PyRecordWriter_WriteMessage(
self.record_writer,
channel_name, data.SerializeToString(), time, desc_str)
def set_size_fileseg(self, size_kilobytes):
"""
Return filesegment size.
"""
return _CYBER_RECORD.PyRecordWriter_SetSizeOfFileSegmentation(
self.record_writer, size_kilobytes)
def set_intervaltime_fileseg(self, time_sec):
"""
Return file interval time.
"""
return _CYBER_RECORD.PyRecordWriter_SetIntervalOfFileSegmentation(
self.record_writer, time_sec)
def get_messagenumber(self, channel_name):
"""
Return message count.
"""
return _CYBER_RECORD.PyRecordWriter_GetMessageNumber(
self.record_writer, channel_name)
def get_messagetype(self, channel_name):
"""
Return message type.
"""
return _CYBER_RECORD.PyRecordWriter_GetMessageType(
self.record_writer, channel_name).decode('utf-8')
def get_protodesc(self, channel_name):
"""
Return message protodesc.
"""
return _CYBER_RECORD.PyRecordWriter_GetProtoDesc(
self.record_writer, channel_name)
|
{
"content_hash": "a1e93f5124b00adf5bb12cdd33edbfca",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 100,
"avg_line_length": 32.586046511627906,
"alnum_prop": 0.6251784184984299,
"repo_name": "ycool/apollo",
"id": "664ab405121d9ca0ab25e39c48e0b81b85e1b94a",
"size": "7813",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "cyber/python/cyber_py3/record.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1922"
},
{
"name": "Batchfile",
"bytes": "791"
},
{
"name": "C",
"bytes": "66747"
},
{
"name": "C++",
"bytes": "19613034"
},
{
"name": "CMake",
"bytes": "3600"
},
{
"name": "Cuda",
"bytes": "221003"
},
{
"name": "Dockerfile",
"bytes": "8522"
},
{
"name": "GLSL",
"bytes": "7000"
},
{
"name": "HTML",
"bytes": "9768"
},
{
"name": "Handlebars",
"bytes": "991"
},
{
"name": "JavaScript",
"bytes": "461346"
},
{
"name": "Makefile",
"bytes": "6626"
},
{
"name": "Python",
"bytes": "1178333"
},
{
"name": "SCSS",
"bytes": "52149"
},
{
"name": "Shell",
"bytes": "783043"
},
{
"name": "Smarty",
"bytes": "33183"
},
{
"name": "Starlark",
"bytes": "1023973"
},
{
"name": "Vim script",
"bytes": "161"
}
],
"symlink_target": ""
}
|
"""Ragged operations for working with string Tensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_string_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops.ragged import ragged_array_ops
from tensorflow.python.ops.ragged import ragged_math_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.util import compat as util_compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import dispatch
from tensorflow.python.util.lazy_loader import LazyLoader
from tensorflow.python.util.tf_export import tf_export
map_fn_lib = LazyLoader("map_fn_lib", globals(),
"tensorflow.python.ops.map_fn")
@tf_export("strings.bytes_split")
@dispatch.add_dispatch_support
def string_bytes_split(input, name=None): # pylint: disable=redefined-builtin
"""Split string elements of `input` into bytes.
Examples:
>>> tf.strings.bytes_split('hello').numpy()
array([b'h', b'e', b'l', b'l', b'o'], dtype=object)
>>> tf.strings.bytes_split(['hello', '123'])
<tf.RaggedTensor [[b'h', b'e', b'l', b'l', b'o'], [b'1', b'2', b'3']]>
Note that this op splits strings into bytes, not unicode characters. To
split strings into unicode characters, use `tf.strings.unicode_split`.
See also: `tf.io.decode_raw`, `tf.strings.split`, `tf.strings.unicode_split`.
Args:
input: A string `Tensor` or `RaggedTensor`: the strings to split. Must
have a statically known rank (`N`).
name: A name for the operation (optional).
Returns:
A `RaggedTensor` of rank `N+1`: the bytes that make up the source strings.
"""
with ops.name_scope(name, "StringsByteSplit", [input]):
input = ragged_tensor.convert_to_tensor_or_ragged_tensor(input,
name="input")
if isinstance(input, ragged_tensor.RaggedTensor):
return input.with_flat_values(string_bytes_split(input.flat_values))
rank = input.shape.ndims
if rank is None:
raise ValueError("input must have a statically-known rank.")
if rank == 0:
return string_bytes_split(array_ops.stack([input]))[0]
elif rank == 1:
indices, values, shape = gen_string_ops.string_split(
input, delimiter="", skip_empty=False)
return ragged_tensor.RaggedTensor.from_value_rowids(
values=values, value_rowids=indices[:, 0], nrows=shape[0],
validate=False)
else:
return string_bytes_split(ragged_tensor.RaggedTensor.from_tensor(input))
# pylint: disable=redefined-builtin
@tf_export("strings.unicode_encode")
@dispatch.add_dispatch_support
def unicode_encode(input,
output_encoding,
errors="replace",
replacement_char=65533,
name=None):
r"""Encodes each sequence of Unicode code points in `input` into a string.
`result[i1...iN]` is the string formed by concatenating the Unicode
codepoints `input[1...iN, :]`, encoded using `output_encoding`.
Args:
input: An `N+1` dimensional potentially ragged integer tensor with shape
`[D1...DN, num_chars]`.
output_encoding: Unicode encoding that should be used to encode each
codepoint sequence. Can be `"UTF-8"`, `"UTF-16-BE"`, or `"UTF-32-BE"`.
errors: Specifies the response when an invalid codepoint is encountered
(optional). One of:
* `'replace'`: Replace invalid codepoint with the
`replacement_char`. (default)
* `'ignore'`: Skip invalid codepoints.
* `'strict'`: Raise an exception for any invalid codepoint.
replacement_char: The replacement character codepoint to be used in place of
any invalid input when `errors='replace'`. Any valid unicode codepoint may
be used. The default value is the default unicode replacement character
which is 0xFFFD (U+65533).
name: A name for the operation (optional).
Returns:
A `N` dimensional `string` tensor with shape `[D1...DN]`.
#### Example:
>>> input = tf.ragged.constant(
... [[71, 246, 246, 100, 110, 105, 103, 104, 116], [128522]])
>>> print(unicode_encode(input, 'UTF-8'))
tf.Tensor([b'G\xc3\xb6\xc3\xb6dnight' b'\xf0\x9f\x98\x8a'],
shape=(2,), dtype=string)
"""
with ops.name_scope(name, "UnicodeEncode", [input]):
input_tensor = ragged_tensor.convert_to_tensor_or_ragged_tensor(input)
if input_tensor.shape.ndims is None:
raise ValueError("Rank of input_tensor must be statically known.")
if ragged_tensor.is_ragged(input_tensor):
if input_tensor.flat_values.shape.ndims > 1:
# If the flat_values of our ragged tensor is multi-dimensional, we can
# process it separately and our output will have the same nested splits
# as our input.
return input_tensor.with_flat_values(
unicode_encode(input_tensor.flat_values, output_encoding, errors,
replacement_char))
elif input_tensor.ragged_rank > 1:
# Recursively process the values of the ragged tensor.
return input_tensor.with_values(
unicode_encode(input_tensor.values, output_encoding, errors,
replacement_char))
else:
# Our ragged tensor is of the correct shape (rank 1 flat_values tensor
# with ragged_rank of 1) so we can process it as normal.
return gen_string_ops.unicode_encode(
input_values=input_tensor.values,
input_splits=input_tensor.row_splits,
output_encoding=output_encoding,
errors=errors,
replacement_char=replacement_char)
else:
if input_tensor.shape.ndims == 2:
# The input tensor is of the correct 2-D shape, it's just not ragged.
return unicode_encode(
ragged_tensor.RaggedTensor.from_tensor(input_tensor),
output_encoding, errors, replacement_char)
elif input_tensor.shape.ndims > 2:
# We need to initially flatten the input tensor to 2-D, and then can
# reshape the output of our processed flattened tensor.
flat_input_tensor = array_ops.reshape(
input_tensor,
array_ops.stack([-1, array_ops.shape(input_tensor)[-1]]))
flat_output_tensor = unicode_encode(flat_input_tensor, output_encoding,
errors, replacement_char)
return array_ops.reshape(flat_output_tensor, input_tensor.shape[:-1])
elif input_tensor.shape.ndims == 0:
raise ValueError("input_tensor's rank must be at least 1.")
else:
# Our input tensor is rank 1, so we create a ragged tensor with an added
# dimension to create the correct input shape & type, and then remove
# the additional dimension from the output and return the string scalar.
ragged_input_tensor = ragged_tensor.RaggedTensor.from_row_splits(
input_tensor,
array_ops.stack(
[0, array_ops.shape(input_tensor, out_type=dtypes.int32)[0]]),
validate=False)
output_tensor = unicode_encode(ragged_input_tensor, output_encoding,
errors, replacement_char)
return array_ops.reshape(output_tensor, [])
# pylint: disable=redefined-builtin
@tf_export("strings.unicode_decode")
@dispatch.add_dispatch_support
def unicode_decode(input,
input_encoding,
errors="replace",
replacement_char=0xFFFD,
replace_control_characters=False,
name=None):
r"""Decodes each string in `input` into a sequence of Unicode code points.
`result[i1...iN, j]` is the Unicode codepoint for the `j`th character in
`input[i1...iN]`, when decoded using `input_encoding`.
Args:
input: An `N` dimensional potentially ragged `string` tensor with shape
`[D1...DN]`. `N` must be statically known.
input_encoding: String name for the unicode encoding that should be used to
decode each string.
errors: Specifies the response when an input string can't be converted
using the indicated encoding. One of:
* `'strict'`: Raise an exception for any illegal substrings.
* `'replace'`: Replace illegal substrings with `replacement_char`.
* `'ignore'`: Skip illegal substrings.
replacement_char: The replacement codepoint to be used in place of invalid
substrings in `input` when `errors='replace'`; and in place of C0 control
characters in `input` when `replace_control_characters=True`.
replace_control_characters: Whether to replace the C0 control characters
`(U+0000 - U+001F)` with the `replacement_char`.
name: A name for the operation (optional).
Returns:
A `N+1` dimensional `int32` tensor with shape `[D1...DN, (num_chars)]`.
The returned tensor is a `tf.Tensor` if `input` is a scalar, or a
`tf.RaggedTensor` otherwise.
#### Example:
>>> input = [s.encode('utf8') for s in (u'G\xf6\xf6dnight', u'\U0001f60a')]
>>> tf.strings.unicode_decode(input, 'UTF-8').to_list()
[[71, 246, 246, 100, 110, 105, 103, 104, 116], [128522]]
"""
with ops.name_scope(name, "UnicodeDecode", [input]):
return _unicode_decode(input, input_encoding, errors, replacement_char,
replace_control_characters, with_offsets=False)
@tf_export("strings.unicode_decode_with_offsets")
@dispatch.add_dispatch_support
def unicode_decode_with_offsets(input,
input_encoding,
errors="replace",
replacement_char=0xFFFD,
replace_control_characters=False,
name=None):
r"""Decodes each string into a sequence of code points with start offsets.
This op is similar to `tf.strings.decode(...)`, but it also returns the
start offset for each character in its respective string. This information
can be used to align the characters with the original byte sequence.
Returns a tuple `(codepoints, start_offsets)` where:
* `codepoints[i1...iN, j]` is the Unicode codepoint for the `j`th character
in `input[i1...iN]`, when decoded using `input_encoding`.
* `start_offsets[i1...iN, j]` is the start byte offset for the `j`th
character in `input[i1...iN]`, when decoded using `input_encoding`.
Args:
input: An `N` dimensional potentially ragged `string` tensor with shape
`[D1...DN]`. `N` must be statically known.
input_encoding: String name for the unicode encoding that should be used to
decode each string.
errors: Specifies the response when an input string can't be converted
using the indicated encoding. One of:
* `'strict'`: Raise an exception for any illegal substrings.
* `'replace'`: Replace illegal substrings with `replacement_char`.
* `'ignore'`: Skip illegal substrings.
replacement_char: The replacement codepoint to be used in place of invalid
substrings in `input` when `errors='replace'`; and in place of C0 control
characters in `input` when `replace_control_characters=True`.
replace_control_characters: Whether to replace the C0 control characters
`(U+0000 - U+001F)` with the `replacement_char`.
name: A name for the operation (optional).
Returns:
A tuple of `N+1` dimensional tensors `(codepoints, start_offsets)`.
* `codepoints` is an `int32` tensor with shape `[D1...DN, (num_chars)]`.
* `offsets` is an `int64` tensor with shape `[D1...DN, (num_chars)]`.
The returned tensors are `tf.Tensor`s if `input` is a scalar, or
`tf.RaggedTensor`s otherwise.
#### Example:
>>> input = [s.encode('utf8') for s in (u'G\xf6\xf6dnight', u'\U0001f60a')]
>>> result = tf.strings.unicode_decode_with_offsets(input, 'UTF-8')
>>> result[0].to_list() # codepoints
[[71, 246, 246, 100, 110, 105, 103, 104, 116], [128522]]
>>> result[1].to_list() # offsets
[[0, 1, 3, 5, 6, 7, 8, 9, 10], [0]]
"""
with ops.name_scope(name, "UnicodeDecodeWithOffsets", [input]):
return _unicode_decode(input, input_encoding, errors, replacement_char,
replace_control_characters, with_offsets=True)
@tf_export("strings.unicode_split")
@dispatch.add_dispatch_support
def unicode_split(input,
input_encoding,
errors="replace",
replacement_char=0xFFFD,
name=None):
r"""Splits each string in `input` into a sequence of Unicode code points.
`result[i1...iN, j]` is the substring of `input[i1...iN]` that encodes its
`j`th character, when decoded using `input_encoding`.
Args:
input: An `N` dimensional potentially ragged `string` tensor with shape
`[D1...DN]`. `N` must be statically known.
input_encoding: String name for the unicode encoding that should be used to
decode each string.
errors: Specifies the response when an input string can't be converted
using the indicated encoding. One of:
* `'strict'`: Raise an exception for any illegal substrings.
* `'replace'`: Replace illegal substrings with `replacement_char`.
* `'ignore'`: Skip illegal substrings.
replacement_char: The replacement codepoint to be used in place of invalid
substrings in `input` when `errors='replace'`.
name: A name for the operation (optional).
Returns:
A `N+1` dimensional `int32` tensor with shape `[D1...DN, (num_chars)]`.
The returned tensor is a `tf.Tensor` if `input` is a scalar, or a
`tf.RaggedTensor` otherwise.
#### Example:
>>> input = [s.encode('utf8') for s in (u'G\xf6\xf6dnight', u'\U0001f60a')]
>>> tf.strings.unicode_split(input, 'UTF-8').to_list()
[[b'G', b'\xc3\xb6', b'\xc3\xb6', b'd', b'n', b'i', b'g', b'h', b't'],
[b'\xf0\x9f\x98\x8a']]
"""
with ops.name_scope(name, "UnicodeSplit", [input]):
codepoints = _unicode_decode(input, input_encoding, errors,
replacement_char, False, with_offsets=False)
return unicode_encode(
ragged_array_ops.expand_dims(codepoints, -1),
output_encoding=input_encoding,
errors=errors,
replacement_char=replacement_char)
@tf_export("strings.unicode_split_with_offsets")
@dispatch.add_dispatch_support
def unicode_split_with_offsets(input,
input_encoding,
errors="replace",
replacement_char=0xFFFD,
name=None):
r"""Splits each string into a sequence of code points with start offsets.
This op is similar to `tf.strings.decode(...)`, but it also returns the
start offset for each character in its respective string. This information
can be used to align the characters with the original byte sequence.
Returns a tuple `(chars, start_offsets)` where:
* `chars[i1...iN, j]` is the substring of `input[i1...iN]` that encodes its
`j`th character, when decoded using `input_encoding`.
* `start_offsets[i1...iN, j]` is the start byte offset for the `j`th
character in `input[i1...iN]`, when decoded using `input_encoding`.
Args:
input: An `N` dimensional potentially ragged `string` tensor with shape
`[D1...DN]`. `N` must be statically known.
input_encoding: String name for the unicode encoding that should be used to
decode each string.
errors: Specifies the response when an input string can't be converted
using the indicated encoding. One of:
* `'strict'`: Raise an exception for any illegal substrings.
* `'replace'`: Replace illegal substrings with `replacement_char`.
* `'ignore'`: Skip illegal substrings.
replacement_char: The replacement codepoint to be used in place of invalid
substrings in `input` when `errors='replace'`.
name: A name for the operation (optional).
Returns:
A tuple of `N+1` dimensional tensors `(codepoints, start_offsets)`.
* `codepoints` is an `int32` tensor with shape `[D1...DN, (num_chars)]`.
* `offsets` is an `int64` tensor with shape `[D1...DN, (num_chars)]`.
The returned tensors are `tf.Tensor`s if `input` is a scalar, or
`tf.RaggedTensor`s otherwise.
#### Example:
>>> input = [s.encode('utf8') for s in (u'G\xf6\xf6dnight', u'\U0001f60a')]
>>> result = tf.strings.unicode_split_with_offsets(input, 'UTF-8')
>>> result[0].to_list() # character substrings
[[b'G', b'\xc3\xb6', b'\xc3\xb6', b'd', b'n', b'i', b'g', b'h', b't'],
[b'\xf0\x9f\x98\x8a']]
>>> result[1].to_list() # offsets
[[0, 1, 3, 5, 6, 7, 8, 9, 10], [0]]
"""
with ops.name_scope(name, "UnicodeSplitWithOffsets", [input]):
codepoints, offsets = _unicode_decode(input, input_encoding, errors,
replacement_char, False,
with_offsets=True)
chars = unicode_encode(
ragged_array_ops.expand_dims(codepoints, -1),
output_encoding=input_encoding,
errors=errors,
replacement_char=replacement_char)
return chars, offsets
def _unicode_decode(input, input_encoding, errors, replacement_char,
replace_control_characters, with_offsets):
"""Decodes each string into a sequence of codepoints."""
input = ragged_tensor.convert_to_tensor_or_ragged_tensor(input, name="input")
input_ndims = input.shape.ndims
if input_ndims is None:
raise ValueError("Rank of `input` must be statically known.")
if input_ndims > 1:
# Convert to a ragged tensor with ragged_rank = input_ndims - 1.
if not ragged_tensor.is_ragged(input):
input = ragged_tensor.RaggedTensor.from_tensor(
input, ragged_rank=input_ndims - 1)
elif input.ragged_rank < input_ndims - 1:
input = input.with_flat_values(
ragged_tensor.RaggedTensor.from_tensor(
input.flat_values,
ragged_rank=input_ndims - input.ragged_rank - 1))
# Reshape the input to a flat vector, and apply the gen_string_ops op.
if ragged_tensor.is_ragged(input):
flat_input = array_ops.reshape(input.flat_values, [-1])
else:
flat_input = array_ops.reshape(input, [-1])
if with_offsets:
decode_op = gen_string_ops.unicode_decode_with_offsets
else:
decode_op = gen_string_ops.unicode_decode
flat_result = decode_op(
input=flat_input,
input_encoding=input_encoding,
errors=errors,
replacement_char=replacement_char,
replace_control_characters=replace_control_characters)
if input_ndims == 0:
codepoints = flat_result.char_values
if with_offsets:
offsets = flat_result.char_to_byte_starts
else:
codepoints = ragged_tensor.RaggedTensor.from_row_splits(
flat_result.char_values, flat_result.row_splits, validate=False)
if input_ndims > 1:
codepoints = input.with_flat_values(codepoints)
if with_offsets:
offsets = ragged_tensor.RaggedTensor.from_row_splits(
flat_result.char_to_byte_starts, flat_result.row_splits,
validate=False)
if input_ndims > 1:
offsets = input.with_flat_values(offsets)
if with_offsets:
return codepoints, offsets
else:
return codepoints
@tf_export("strings.split", v1=[])
@dispatch.add_dispatch_support
def string_split_v2(input, sep=None, maxsplit=-1, name=None): # pylint: disable=redefined-builtin
"""Split elements of `input` based on `sep` into a `RaggedTensor`.
Let N be the size of `input` (typically N will be the batch size). Split each
element of `input` based on `sep` and return a `RaggedTensor` containing the
split tokens. Empty tokens are ignored.
Example:
>>> tf.strings.split('hello world').numpy()
array([b'hello', b'world'], dtype=object)
>>> tf.strings.split(['hello world', 'a b c'])
<tf.RaggedTensor [[b'hello', b'world'], [b'a', b'b', b'c']]>
If `sep` is given, consecutive delimiters are not grouped together and are
deemed to delimit empty strings. For example, `input` of `"1<>2<><>3"` and
`sep` of `"<>"` returns `["1", "2", "", "3"]`. If `sep` is None or an empty
string, consecutive whitespace are regarded as a single separator, and the
result will contain no empty strings at the start or end if the string has
leading or trailing whitespace.
Note that the above mentioned behavior matches python's str.split.
Args:
input: A string `Tensor` of rank `N`, the strings to split. If
`rank(input)` is not known statically, then it is assumed to be `1`.
sep: `0-D` string `Tensor`, the delimiter string.
maxsplit: An `int`. If `maxsplit > 0`, limit of the split of the result.
name: A name for the operation (optional).
Raises:
ValueError: If sep is not a string.
Returns:
A `RaggedTensor` of rank `N+1`, the strings split according to the
delimiter.
"""
with ops.name_scope(name, "StringSplit", [input]):
input = ragged_tensor.convert_to_tensor_or_ragged_tensor(
input, dtype=dtypes.string, name="input")
if isinstance(input, ragged_tensor.RaggedTensor):
return input.with_flat_values(
string_split_v2(input.flat_values, sep, maxsplit))
rank = input.shape.ndims
if rank == 0:
return string_split_v2(array_ops.stack([input]), sep, maxsplit)[0]
elif rank == 1 or rank is None:
sparse_result = string_ops.string_split_v2(
input, sep=sep, maxsplit=maxsplit)
return ragged_tensor.RaggedTensor.from_value_rowids(
values=sparse_result.values,
value_rowids=sparse_result.indices[:, 0],
nrows=sparse_result.dense_shape[0],
validate=False)
else:
return string_split_v2(
ragged_tensor.RaggedTensor.from_tensor(input), sep, maxsplit)
@tf_export(v1=["string_split"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None,
"delimiter is deprecated, please use sep instead.",
"delimiter")
def string_split(source, sep=None, skip_empty=True, delimiter=None,
result_type="SparseTensor", name=None): # pylint: disable=invalid-name
"""Split elements of `source` based on `delimiter`.
Let N be the size of `source` (typically N will be the batch size). Split each
element of `source` based on `delimiter` and return a `SparseTensor`
or `RaggedTensor` containing the split tokens. Empty tokens are ignored.
If `sep` is an empty string, each element of the `source` is split
into individual strings, each containing one byte. (This includes splitting
multibyte sequences of UTF-8.) If delimiter contains multiple bytes, it is
treated as a set of delimiters with each considered a potential split point.
Examples:
>>> print(tf.compat.v1.string_split(['hello world', 'a b c']))
SparseTensor(indices=tf.Tensor( [[0 0] [0 1] [1 0] [1 1] [1 2]], ...),
values=tf.Tensor([b'hello' b'world' b'a' b'b' b'c'], ...),
dense_shape=tf.Tensor([2 3], shape=(2,), dtype=int64))
>>> print(tf.compat.v1.string_split(['hello world', 'a b c'],
... result_type="RaggedTensor"))
<tf.RaggedTensor [[b'hello', b'world'], [b'a', b'b', b'c']]>
Args:
source: `1-D` string `Tensor`, the strings to split.
sep: `0-D` string `Tensor`, the delimiter character, the string should
be length 0 or 1. Default is ' '.
skip_empty: A `bool`. If `True`, skip the empty strings from the result.
delimiter: deprecated alias for `sep`.
result_type: The tensor type for the result: one of `"RaggedTensor"` or
`"SparseTensor"`.
name: A name for the operation (optional).
Raises:
ValueError: If delimiter is not a string.
Returns:
A `SparseTensor` or `RaggedTensor` of rank `2`, the strings split according
to the delimiter. The first column of the indices corresponds to the row
in `source` and the second column corresponds to the index of the split
component in this row.
"""
with ops.name_scope(name, "StringSplit", [source]):
sparse_result = string_ops.string_split(
source, sep=sep, skip_empty=skip_empty, delimiter=delimiter)
if result_type == "SparseTensor":
return sparse_result
elif result_type == "RaggedTensor":
return ragged_tensor.RaggedTensor.from_value_rowids(
values=sparse_result.values,
value_rowids=sparse_result.indices[:, 0],
nrows=sparse_result.dense_shape[0],
validate=False)
else:
raise ValueError("result_type must be 'RaggedTensor' or 'SparseTensor'.")
# In TensorFlow 1.x, "tf.strings.split" uses the new signature (with maxsplit),
# but we need to add the result_type argument.
@tf_export(v1=["strings.split"])
@dispatch.add_dispatch_support
def strings_split_v1(input=None, sep=None, maxsplit=-1, # pylint: disable=redefined-builtin
result_type="SparseTensor", source=None, name=None):
"""Split elements of `input` based on `sep`.
Let N be the size of `input` (typically N will be the batch size). Split each
element of `input` based on `sep` and return a `SparseTensor` or
`RaggedTensor` containing the split tokens. Empty tokens are ignored.
Examples:
>>> print(tf.compat.v1.strings.split(['hello world', 'a b c']))
SparseTensor(indices=tf.Tensor( [[0 0] [0 1] [1 0] [1 1] [1 2]], ...),
values=tf.Tensor([b'hello' b'world' b'a' b'b' b'c'], ...),
dense_shape=tf.Tensor([2 3], shape=(2,), dtype=int64))
>>> print(tf.compat.v1.strings.split(['hello world', 'a b c'],
... result_type="RaggedTensor"))
<tf.RaggedTensor [[b'hello', b'world'], [b'a', b'b', b'c']]>
If `sep` is given, consecutive delimiters are not grouped together and are
deemed to delimit empty strings. For example, `input` of `"1<>2<><>3"` and
`sep` of `"<>"` returns `["1", "2", "", "3"]`. If `sep` is None or an empty
string, consecutive whitespace are regarded as a single separator, and the
result will contain no empty strings at the start or end if the string has
leading or trailing whitespace.
Note that the above mentioned behavior matches python's str.split.
Args:
input: A string `Tensor` of rank `N`, the strings to split. If
`rank(input)` is not known statically, then it is assumed to be `1`.
sep: `0-D` string `Tensor`, the delimiter character.
maxsplit: An `int`. If `maxsplit > 0`, limit of the split of the result.
result_type: The tensor type for the result: one of `"RaggedTensor"` or
`"SparseTensor"`.
source: alias for "input" argument.
name: A name for the operation (optional).
Raises:
ValueError: If sep is not a string.
Returns:
A `SparseTensor` or `RaggedTensor` of rank `N+1`, the strings split
according to the delimiter.
"""
input = deprecation.deprecated_argument_lookup(
"input", input, "source", source)
with ops.name_scope(name, "StringSplit", [input]):
input = ragged_tensor.convert_to_tensor_or_ragged_tensor(
input, dtype=dtypes.string, name="input")
if input.shape.rank == 0:
input = array_ops.expand_dims(input, 0)
if result_type == "SparseTensor":
if input.shape.rank == 1:
return string_ops.string_split_v2(input, sep=sep, maxsplit=maxsplit)
else:
return string_split_v2(input, sep=sep, maxsplit=maxsplit).to_sparse()
elif result_type == "RaggedTensor":
return string_split_v2(input, sep=sep, maxsplit=maxsplit)
else:
raise ValueError("result_type must be 'RaggedTensor' or 'SparseTensor'.")
def reduce_join(inputs, axis=None, keepdims=None, separator="", name=None):
"""For docs, see: _RAGGED_REDUCE_DOCSTRING."""
return ragged_math_ops.ragged_reduce_aggregate(
string_ops.reduce_join, string_ops.unsorted_segment_join, inputs, axis,
keepdims, separator, name or "RaggedSegmentJoin")
@tf_export("strings.ngrams")
@dispatch.add_dispatch_support
def ngrams(data,
ngram_width,
separator=" ",
pad_values=None,
padding_width=None,
preserve_short_sequences=False,
name=None):
"""Create a tensor of n-grams based on `data`.
Creates a tensor of n-grams based on `data`. The n-grams are created by
joining windows of `width` adjacent strings from the inner axis of `data`
using `separator`.
The input data can be padded on both the start and end of the sequence, if
desired, using the `pad_values` argument. If set, `pad_values` should contain
either a tuple of strings or a single string; the 0th element of the tuple
will be used to pad the left side of the sequence and the 1st element of the
tuple will be used to pad the right side of the sequence. The `padding_width`
arg controls how many padding values are added to each side; it defaults to
`ngram_width-1`.
If this op is configured to not have padding, or if it is configured to add
padding with `padding_width` set to less than ngram_width-1, it is possible
that a sequence, or a sequence plus padding, is smaller than the ngram
width. In that case, no ngrams will be generated for that sequence. This can
be prevented by setting `preserve_short_sequences`, which will cause the op
to always generate at least one ngram per non-empty sequence.
Examples:
>>> tf.strings.ngrams(["A", "B", "C", "D"], 2).numpy()
array([b'A B', b'B C', b'C D'], dtype=object)
>>> tf.strings.ngrams(["TF", "and", "keras"], 1).numpy()
array([b'TF', b'and', b'keras'], dtype=object)
Args:
data: A Tensor or RaggedTensor containing the source data for the ngrams.
ngram_width: The width(s) of the ngrams to create. If this is a list or
tuple, the op will return ngrams of all specified arities in list order.
Values must be non-Tensor integers greater than 0.
separator: The separator string used between ngram elements. Must be a
string constant, not a Tensor.
pad_values: A tuple of (left_pad_value, right_pad_value), a single string,
or None. If None, no padding will be added; if a single string, then that
string will be used for both left and right padding. Values must be Python
strings.
padding_width: If set, `padding_width` pad values will be added to both
sides of each sequence. Defaults to `ngram_width`-1. Must be greater than
0. (Note that 1-grams are never padded, regardless of this value.)
preserve_short_sequences: If true, then ensure that at least one ngram is
generated for each input sequence. In particular, if an input sequence is
shorter than `min(ngram_width) + 2*pad_width`, then generate a single
ngram containing the entire sequence. If false, then no ngrams are
generated for these short input sequences.
name: The op name.
Returns:
A RaggedTensor of ngrams. If `data.shape=[D1...DN, S]`, then
`output.shape=[D1...DN, NUM_NGRAMS]`, where
`NUM_NGRAMS=S-ngram_width+1+2*padding_width`.
Raises:
TypeError: if `pad_values` is set to an invalid type.
ValueError: if `pad_values`, `padding_width`, or `ngram_width` is set to an
invalid value.
"""
with ops.name_scope(name, "StringNGrams", [data]):
if pad_values is None:
left_pad = ""
right_pad = ""
elif isinstance(pad_values, (list, tuple)):
if (not isinstance(pad_values[0], util_compat.bytes_or_text_types) or
not isinstance(pad_values[1], util_compat.bytes_or_text_types)):
raise TypeError(
"pad_values must be a string, tuple of strings, or None.")
left_pad = pad_values[0]
right_pad = pad_values[1]
else:
if not isinstance(pad_values, util_compat.bytes_or_text_types):
raise TypeError(
"pad_values must be a string, tuple of strings, or None.")
left_pad = pad_values
right_pad = pad_values
if padding_width is not None and padding_width < 1:
raise ValueError("padding_width must be greater than 0.")
if padding_width is not None and pad_values is None:
raise ValueError("pad_values must be provided if padding_width is set.")
data = ragged_tensor.convert_to_tensor_or_ragged_tensor(
data, name="data", dtype=dtypes.string)
# preserve the shape of the data if it is a tensor
to_tensor = False
if isinstance(data, ops.Tensor):
dense_shape = array_ops.concat([array_ops.shape(data)[:-1], [-1]], axis=0)
to_tensor = True
if not isinstance(data, ragged_tensor.RaggedTensor):
if data.shape.ndims is None:
raise ValueError("Rank of data must be known.")
elif data.shape.ndims == 0:
raise ValueError("Data must have rank>0")
elif data.shape.ndims == 1:
rt = ragged_tensor.RaggedTensor.from_row_starts(
data, [0], validate=False)
return ngrams(rt, ngram_width, separator, pad_values, padding_width,
preserve_short_sequences, name)[0]
else:
data = ragged_tensor.RaggedTensor.from_tensor(
data, ragged_rank=data.shape.ndims - 1)
if data.ragged_rank > 1:
output = data.with_values(
ngrams(data.values, ngram_width, separator, pad_values, padding_width,
preserve_short_sequences, name))
return array_ops.reshape(output.flat_values,
dense_shape) if to_tensor else output
if pad_values is None:
padding_width = 0
if pad_values is not None and padding_width is None:
padding_width = -1
if not isinstance(ngram_width, (list, tuple)):
ngram_widths = [ngram_width]
else:
ngram_widths = ngram_width
for width in ngram_widths:
if width < 1:
raise ValueError("All ngram_widths must be greater than 0. Got %s" %
ngram_width)
output, output_splits = gen_string_ops.string_n_grams(
data=data.flat_values,
data_splits=data.row_splits,
separator=separator,
ngram_widths=ngram_widths,
left_pad=left_pad,
right_pad=right_pad,
pad_width=padding_width,
preserve_short_sequences=preserve_short_sequences)
# if the input is Dense tensor, the output should also be a dense tensor
output = ragged_tensor.RaggedTensor.from_row_splits(
values=output, row_splits=output_splits, validate=False)
return array_ops.reshape(output.flat_values,
dense_shape) if to_tensor else output
def string_format(template, inputs, placeholder="{}", summarize=3, name=None):
"""Version of tf.strings.format that handles RaggedTensors."""
if tensor_util.is_tf_type(inputs) or ragged_tensor.is_ragged(inputs):
inputs = [inputs]
split_template = template.split(placeholder)
if len(inputs) != len(split_template) - 1:
raise ValueError("num placeholders in template and num inputs must match"
": {} vs {}".format(len(split_template) - 1, len(inputs)))
with ops.name_scope(name, "StringFormat", [inputs]):
output_pieces = [constant_op.constant(split_template[0])]
for i, input in enumerate(inputs):
if ragged_tensor.is_ragged(input):
output_pieces.append(ragged_tensor_to_string(input, summarize))
else:
output_pieces.append(string_ops.string_format(
"{}", [input], summarize=summarize))
output_pieces.append(constant_op.constant(split_template[i + 1]))
if len(output_pieces) == 1:
return output_pieces[0]
else:
return string_ops.reduce_join(output_pieces)
def ragged_tensor_to_string(rt, summarize=None):
"""Returns a scalar string tensor with the contents of a RaggedTensor.
Requires that `rt.shape.rank` is not `None`.
Note: this converts the entire `RaggedTensor` into a single string scalar.
If you want to convert individual elements, use `tf.strings.as_string(rt)`.
>>> rt1 = tf.ragged.constant([[1, 2, 3], [4, 5]])
>>> ragged_tensor_to_string(rt1).numpy()
b'[[1, 2, 3], [4, 5]]'
>>> rt2 = tf.ragged.constant([[['a'], ['b', 'c']], [['d', 'e', 'f'], []]])
>>> ragged_tensor_to_string(rt2).numpy()
b"[[['a'], ['b', 'c']], [['d', 'e', 'f'], []]]"
>>> rt3 = tf.ragged.constant([[1], [2, 3, 4, 5, 6], [], [], [7], [8, 9]])
>>> ragged_tensor_to_string(rt3, summarize=2).numpy()
b'[[1], [2, 3, ..., 5, 6], ..., [7], [8, 9]]'
Args:
rt: The RaggedTensor that should be converted to a string.
summarize: If specified, then only the first and last `summarize` elements
within each dimension are included in the string. If `-1` or `None`, then
all elements are included.
"""
if (summarize is not None and summarize != -1 and
not (isinstance(summarize, int) and summarize > 0)):
raise ValueError("Expected summarize to be -1 or a positive int, got %r" %
summarize)
with ops.name_scope(None, "AsString", [rt]):
rt = ragged_tensor.convert_to_tensor_or_ragged_tensor(rt)
if rt.shape.rank is None:
raise ValueError("RaggedTensor to_string requires that rt.shape.rank "
"is not None.")
# Convert all elements of `rt` to strings.
if rt.dtype == dtypes.string:
escaped = string_ops.regex_replace(rt.flat_values, r"(['\\])", r"\\\1")
str_t = rt.with_flat_values("'" + escaped + "'")
else:
str_t = rt.with_flat_values(string_ops.as_string(rt.flat_values))
return _ragged_tensor_to_string(str_t, summarize)
def _ragged_tensor_to_string(string_tensor, summarize):
"""Returns a scalar string tensor with the contents of `string_tensor`.
Args:
string_tensor: A potentially ragged tensor with dtype=string.
summarize: Include only the first and last `summarize` elements of each
dimension. If `-1` or `None`, then include all elements.
Returns:
A scalar string Tensor.
"""
if string_tensor.shape.rank == 1:
pieces = string_tensor
else:
pieces = map_fn_lib.map_fn(
lambda s: _ragged_tensor_to_string(s, summarize),
string_tensor,
fn_output_signature=tensor_spec.TensorSpec(None, dtypes.string))
if summarize not in (-1, None):
pieces = control_flow_ops.cond(
_nrows(string_tensor) <= 2 * summarize,
lambda: pieces,
lambda: array_ops.concat( # pylint: disable=g-long-lambda
[pieces[:summarize], ["..."], pieces[-summarize:]],
axis=0))
return "[" + string_ops.reduce_join(pieces, separator=", ") + "]"
def _nrows(tensor, out_type=dtypes.int32):
if isinstance(tensor, ragged_tensor.RaggedTensor):
return tensor.nrows(out_type=out_type)
else:
return array_ops.shape(tensor, out_type=out_type)[0]
|
{
"content_hash": "59dab23fd7cb17c72f1af8ad7bcb849a",
"timestamp": "",
"source": "github",
"line_count": 914,
"max_line_length": 98,
"avg_line_length": 42.82494529540482,
"alnum_prop": 0.6533391242143989,
"repo_name": "sarvex/tensorflow",
"id": "85735c39a868ece873ccc0bae2ee4622d0502eaf",
"size": "39831",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "tensorflow/python/ops/ragged/ragged_string_ops.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "148184"
},
{
"name": "C++",
"bytes": "6224499"
},
{
"name": "CSS",
"bytes": "107"
},
{
"name": "HTML",
"bytes": "650478"
},
{
"name": "Java",
"bytes": "53519"
},
{
"name": "JavaScript",
"bytes": "6659"
},
{
"name": "Jupyter Notebook",
"bytes": "777935"
},
{
"name": "Objective-C",
"bytes": "1288"
},
{
"name": "Protocol Buffer",
"bytes": "61743"
},
{
"name": "Python",
"bytes": "3474762"
},
{
"name": "Shell",
"bytes": "45640"
},
{
"name": "TypeScript",
"bytes": "283668"
}
],
"symlink_target": ""
}
|
import argparse
import os
import sys
#--------------------------------------------------------------------------------------------------
# Utility functions.
#--------------------------------------------------------------------------------------------------
def walk(directory, recursive):
if recursive:
for dirpath, dirnames, filenames in os.walk(directory):
for filename in filenames:
yield os.path.join(dirpath, filename)
else:
dirpath, dirnames, filenames = os.walk(directory).next()
for filename in filenames:
yield os.path.join(dirpath, filename)
#--------------------------------------------------------------------------------------------------
# Processing code.
#--------------------------------------------------------------------------------------------------
def process_file(filepath):
print("processing {0}...".format(filepath))
with open(filepath) as f:
lines = f.readlines()
section_begin = -1
for index in range(len(lines)):
line = lines[index]
if section_begin == -1 and line.startswith("#include"):
section_begin = index
if section_begin != -1 and line in ["\n", "\r\n"]:
if all(clause.startswith("#include") for clause in lines[section_begin:index]):
lines[section_begin:index] = sorted(lines[section_begin:index], key=lambda s: s.lower())
section_begin = -1
with open(filepath + ".processed", "wt") as f:
for line in lines:
f.write(line)
os.remove(filepath)
os.rename(filepath + ".processed", filepath)
#--------------------------------------------------------------------------------------------------
# Entry point.
#--------------------------------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description="sort #include clauses in c++ source code.")
parser.add_argument("-r", "--recursive", action='store_true', dest='recursive',
help="process all files in the specified directory and all its subdirectories")
parser.add_argument("path", help="file or directory to process")
args = parser.parse_args()
if os.path.isfile(args.path):
process_file(args.path)
else:
for filepath in walk(args.path, args.recursive):
ext = os.path.splitext(filepath)[1]
if ext == ".h" or ext == ".cpp":
process_file(filepath)
if __name__ == '__main__':
main()
|
{
"content_hash": "4caa299dc1393e6ce57d0de834d58a4b",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 104,
"avg_line_length": 35.541666666666664,
"alnum_prop": 0.4630715123094959,
"repo_name": "aytekaman/appleseed",
"id": "0bc8086013a1af01a9e00deed24ac56f2132bb7f",
"size": "3882",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "scripts/sortincludes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "87"
},
{
"name": "C",
"bytes": "620682"
},
{
"name": "C++",
"bytes": "16347901"
},
{
"name": "CMake",
"bytes": "258484"
},
{
"name": "HTML",
"bytes": "39165"
},
{
"name": "Objective-C",
"bytes": "1353161"
},
{
"name": "Python",
"bytes": "444885"
},
{
"name": "Shell",
"bytes": "2750"
}
],
"symlink_target": ""
}
|
"""
A classifier based on the Naive Bayes algorithm. In order to find the
probability for a label, this algorithm first uses the Bayes rule to
express P(label|features) in terms of P(label) and P(features|label):
| P(label) * P(features|label)
| P(label|features) = ------------------------------
| P(features)
The algorithm then makes the 'naive' assumption that all features are
independent, given the label:
| P(label) * P(f1|label) * ... * P(fn|label)
| P(label|features) = --------------------------------------------
| P(features)
Rather than computing P(featues) explicitly, the algorithm just
calculates the denominator for each label, and normalizes them so they
sum to one:
| P(label) * P(f1|label) * ... * P(fn|label)
| P(label|features) = --------------------------------------------
| SUM[l]( P(l) * P(f1|l) * ... * P(fn|l) )
"""
from __future__ import print_function, unicode_literals
from collections import defaultdict
from nltk.probability import FreqDist, DictionaryProbDist, ELEProbDist, sum_logs
from .api import ClassifierI
##//////////////////////////////////////////////////////
## Naive Bayes Classifier
##//////////////////////////////////////////////////////
class NaiveBayesClassifier(ClassifierI):
"""
A Naive Bayes classifier. Naive Bayes classifiers are
paramaterized by two probability distributions:
- P(label) gives the probability that an input will receive each
label, given no information about the input's features.
- P(fname=fval|label) gives the probability that a given feature
(fname) will receive a given value (fval), given that the
label (label).
If the classifier encounters an input with a feature that has
never been seen with any label, then rather than assigning a
probability of 0 to all labels, it will ignore that feature.
The feature value 'None' is reserved for unseen feature values;
you generally should not use 'None' as a feature value for one of
your own features.
"""
def __init__(self, label_probdist, feature_probdist):
"""
:param label_probdist: P(label), the probability distribution
over labels. It is expressed as a ``ProbDistI`` whose
samples are labels. I.e., P(label) =
``label_probdist.prob(label)``.
:param feature_probdist: P(fname=fval|label), the probability
distribution for feature values, given labels. It is
expressed as a dictionary whose keys are ``(label, fname)``
pairs and whose values are ``ProbDistI`` objects over feature
values. I.e., P(fname=fval|label) =
``feature_probdist[label,fname].prob(fval)``. If a given
``(label,fname)`` is not a key in ``feature_probdist``, then
it is assumed that the corresponding P(fname=fval|label)
is 0 for all values of ``fval``.
"""
self._label_probdist = label_probdist
self._feature_probdist = feature_probdist
self._labels = list(label_probdist.samples())
def labels(self):
return self._labels
def classify(self, featureset):
return self.prob_classify(featureset).max()
def prob_classify(self, featureset):
# Discard any feature names that we've never seen before.
# Otherwise, we'll just assign a probability of 0 to
# everything.
featureset = featureset.copy()
for fname in list(featureset.keys()):
for label in self._labels:
if (label, fname) in self._feature_probdist:
break
else:
#print 'Ignoring unseen feature %s' % fname
del featureset[fname]
# Find the log probabilty of each label, given the features.
# Start with the log probability of the label itself.
logprob = {}
for label in self._labels:
logprob[label] = self._label_probdist.logprob(label)
# Then add in the log probability of features given labels.
for label in self._labels:
for (fname, fval) in featureset.items():
if (label, fname) in self._feature_probdist:
feature_probs = self._feature_probdist[label,fname]
logprob[label] += feature_probs.logprob(fval)
else:
# nb: This case will never come up if the
# classifier was created by
# NaiveBayesClassifier.train().
logprob[label] += sum_logs([]) # = -INF.
return DictionaryProbDist(logprob, normalize=True, log=True)
def show_most_informative_features(self, n=10):
# Determine the most relevant features, and display them.
cpdist = self._feature_probdist
print('Most Informative Features')
for (fname, fval) in self.most_informative_features(n):
def labelprob(l):
return cpdist[l,fname].prob(fval)
labels = sorted([l for l in self._labels
if fval in cpdist[l,fname].samples()],
key=labelprob)
if len(labels) == 1: continue
l0 = labels[0]
l1 = labels[-1]
if cpdist[l0,fname].prob(fval) == 0:
ratio = 'INF'
else:
ratio = '%8.1f' % (cpdist[l1,fname].prob(fval) /
cpdist[l0,fname].prob(fval))
print(('%24s = %-14r %6s : %-6s = %s : 1.0' %
(fname, fval, ("%s" % l1)[:6], ("%s" % l0)[:6], ratio)))
def most_informative_features(self, n=100):
"""
Return a list of the 'most informative' features used by this
classifier. For the purpose of this function, the
informativeness of a feature ``(fname,fval)`` is equal to the
highest value of P(fname=fval|label), for any label, divided by
the lowest value of P(fname=fval|label), for any label:
| max[ P(fname=fval|label1) / P(fname=fval|label2) ]
"""
# The set of (fname, fval) pairs used by this classifier.
features = set()
# The max & min probability associated w/ each (fname, fval)
# pair. Maps (fname,fval) -> float.
maxprob = defaultdict(lambda: 0.0)
minprob = defaultdict(lambda: 1.0)
for (label, fname), probdist in self._feature_probdist.items():
for fval in probdist.samples():
feature = (fname, fval)
features.add( feature )
p = probdist.prob(fval)
maxprob[feature] = max(p, maxprob[feature])
minprob[feature] = min(p, minprob[feature])
if minprob[feature] == 0:
features.discard(feature)
# Convert features to a list, & sort it by how informative
# features are.
features = sorted(features,
key=lambda feature: minprob[feature]/maxprob[feature])
return features[:n]
@staticmethod
def train(labeled_featuresets, estimator=ELEProbDist):
"""
:param labeled_featuresets: A list of classified featuresets,
i.e., a list of tuples ``(featureset, label)``.
"""
label_freqdist = FreqDist()
feature_freqdist = defaultdict(FreqDist)
feature_values = defaultdict(set)
fnames = set()
# Count up how many times each feature value occurred, given
# the label and featurename.
for featureset, label in labeled_featuresets:
label_freqdist.inc(label)
for fname, fval in featureset.items():
# Increment freq(fval|label, fname)
feature_freqdist[label, fname].inc(fval)
# Record that fname can take the value fval.
feature_values[fname].add(fval)
# Keep a list of all feature names.
fnames.add(fname)
# If a feature didn't have a value given for an instance, then
# we assume that it gets the implicit value 'None.' This loop
# counts up the number of 'missing' feature values for each
# (label,fname) pair, and increments the count of the fval
# 'None' by that amount.
for label in label_freqdist:
num_samples = label_freqdist[label]
for fname in fnames:
count = feature_freqdist[label, fname].N()
feature_freqdist[label, fname].inc(None, num_samples-count)
feature_values[fname].add(None)
# Create the P(label) distribution
label_probdist = estimator(label_freqdist)
# Create the P(fval|label, fname) distribution
feature_probdist = {}
for ((label, fname), freqdist) in feature_freqdist.items():
probdist = estimator(freqdist, bins=len(feature_values[fname]))
feature_probdist[label,fname] = probdist
return NaiveBayesClassifier(label_probdist, feature_probdist)
##//////////////////////////////////////////////////////
## Demo
##//////////////////////////////////////////////////////
def demo():
from nltk.classify.util import names_demo
classifier = names_demo(NaiveBayesClassifier.train)
classifier.show_most_informative_features()
if __name__ == '__main__':
demo()
|
{
"content_hash": "a47ba8550bb27f0ef88f01e9a944e15a",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 80,
"avg_line_length": 41.646288209606986,
"alnum_prop": 0.5691517248610675,
"repo_name": "haya14busa/alc-etm-searcher",
"id": "2f6c5c02d57b488778eba8fdca246b46a4c98a47",
"size": "9743",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nltk-3.0a3/nltk/classify/naivebayes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11448"
},
{
"name": "Java",
"bytes": "30518"
},
{
"name": "Python",
"bytes": "6856183"
}
],
"symlink_target": ""
}
|
'''
Test cases for the isoduration module.
'''
import unittest
import operator
from datetime import timedelta, date, datetime
from isodate import Duration, parse_duration, ISO8601Error
from isodate import D_DEFAULT, D_WEEK, D_ALT_EXT, duration_isoformat
# the following list contains tuples of ISO duration strings and the expected
# result from the parse_duration method. A result of None means an ISO8601Error
# is expected.
PARSE_TEST_CASES = {'P18Y9M4DT11H9M8S': (Duration(4, 8, 0, 0, 9, 11, 0, 9, 18),
D_DEFAULT, None),
'P2W': (timedelta(weeks=2), D_WEEK, None),
'P3Y6M4DT12H30M5S': (Duration(4, 5, 0, 0, 30, 12, 0, 6, 3),
D_DEFAULT, None),
'P23DT23H': (timedelta(hours=23, days=23),
D_DEFAULT, None),
'P4Y': (Duration(years=4), D_DEFAULT, None),
'P1M': (Duration(months=1), D_DEFAULT, None),
'PT1M': (timedelta(minutes=1), D_DEFAULT, None),
'P0.5Y': (Duration(years=0.5), D_DEFAULT, None),
'PT36H': (timedelta(hours=36), D_DEFAULT, 'P1DT12H'),
'P1DT12H': (timedelta(days=1, hours=12), D_DEFAULT, None),
'+P11D': (timedelta(days=11), D_DEFAULT, 'P11D'),
'-P2W': (timedelta(weeks=-2), D_WEEK, None),
'-P2.2W': (timedelta(weeks=-2.2), D_DEFAULT,
'-P15DT9H36M'),
'P1DT2H3M4S': (timedelta(days=1, hours=2, minutes=3,
seconds=4), D_DEFAULT, None),
'P1DT2H3M': (timedelta(days=1, hours=2, minutes=3),
D_DEFAULT, None),
'P1DT2H': (timedelta(days=1, hours=2), D_DEFAULT, None),
'PT2H': (timedelta(hours=2), D_DEFAULT, None),
'PT2.3H': (timedelta(hours=2.3), D_DEFAULT, 'PT2H18M'),
'PT2H3M4S': (timedelta(hours=2, minutes=3, seconds=4),
D_DEFAULT, None),
'PT3M4S': (timedelta(minutes=3, seconds=4), D_DEFAULT,
None),
'PT22S': (timedelta(seconds=22), D_DEFAULT, None),
'PT22.22S': (timedelta(seconds=22.22), 'PT%S.%fS',
'PT22.220000S'),
'-P2Y': (Duration(years=-2), D_DEFAULT, None),
'-P3Y6M4DT12H30M5S': (Duration(-4, -5, 0, 0, -30, -12, 0,
-6, -3), D_DEFAULT, None),
'-P1DT2H3M4S': (timedelta(days=-1, hours=-2, minutes=-3,
seconds=-4), D_DEFAULT, None),
# alternative format
'P0018-09-04T11:09:08': (Duration(4, 8, 0, 0, 9, 11, 0, 9,
18), D_ALT_EXT, None),
#'PT000022.22': timedelta(seconds=22.22),
}
# d1 d2 '+', '-', '>'
# A list of test cases to test addition and subtraction between datetime and
# Duration objects.
# each tuple contains 2 duration strings, and a result string for addition and
# one for subtraction. The last value says, if the first duration is greater
# than the second.
MATH_TEST_CASES = (('P5Y7M1DT9H45M16.72S', 'PT27M24.68S',
'P5Y7M1DT10H12M41.4S', 'P5Y7M1DT9H17M52.04S', None),
('PT28M12.73S', 'PT56M29.92S',
'PT1H24M42.65S', '-PT28M17.19S', False),
('P3Y7M23DT5H25M0.33S', 'PT1H1.95S',
'P3Y7M23DT6H25M2.28S', 'P3Y7M23DT4H24M58.38S', None),
('PT1H1.95S', 'P3Y7M23DT5H25M0.33S',
'P3Y7M23DT6H25M2.28S', '-P3Y7M23DT4H24M58.38S', None),
('P1332DT55M0.33S', 'PT1H1.95S',
'P1332DT1H55M2.28S', 'P1331DT23H54M58.38S', True),
('PT1H1.95S', 'P1332DT55M0.33S',
'P1332DT1H55M2.28S', '-P1331DT23H54M58.38S', False))
# A list of test cases to test addition and subtraction of date/datetime
# and Duration objects. They are tested against the results of an
# equal long timedelta duration.
DATE_TEST_CASES = ( (date(2008, 2, 29),
timedelta(days=10, hours=12, minutes=20),
Duration(days=10, hours=12, minutes=20)),
(date(2008, 1, 31),
timedelta(days=10, hours=12, minutes=20),
Duration(days=10, hours=12, minutes=20)),
(datetime(2008, 2, 29),
timedelta(days=10, hours=12, minutes=20),
Duration(days=10, hours=12, minutes=20)),
(datetime(2008, 1, 31),
timedelta(days=10, hours=12, minutes=20),
Duration(days=10, hours=12, minutes=20)),
(datetime(2008, 4, 21),
timedelta(days=10, hours=12, minutes=20),
Duration(days=10, hours=12, minutes=20)),
(datetime(2008, 5, 5),
timedelta(days=10, hours=12, minutes=20),
Duration(days=10, hours=12, minutes=20)),
(datetime(2000, 1, 1),
timedelta(hours=-33),
Duration(hours=-33)),
(datetime(2008, 5, 5),
Duration(years=1, months=1, days=10, hours=12,
minutes=20),
Duration(months=13, days=10, hours=12, minutes=20)),
(datetime(2000, 3, 30),
Duration(years=1, months=1, days=10, hours=12,
minutes=20),
Duration(months=13, days=10, hours=12, minutes=20)),
)
# A list of test cases of additon of date/datetime and Duration. The results
# are compared against a given expected result.
DATE_CALC_TEST_CASES = (
(date(2000, 2, 1),
Duration(years=1, months=1),
date(2001, 3, 1)),
(date(2000, 2, 29),
Duration(years=1, months=1),
date(2001, 3, 29)),
(date(2000, 2, 29),
Duration(years=1),
date(2001, 2, 28)),
(date(1996, 2, 29),
Duration(years=4),
date(2000, 2, 29)),
(date(2096, 2, 29),
Duration(years=4),
date(2100, 2, 28)),
(date(2000, 2, 1),
Duration(years=-1, months=-1),
date(1999, 1, 1)),
(date(2000, 2, 29),
Duration(years=-1, months=-1),
date(1999, 1, 29)),
(date(2000, 2, 1),
Duration(years=1, months=1, days=1),
date(2001, 3, 2)),
(date(2000, 2, 29),
Duration(years=1, months=1, days=1),
date(2001, 3, 30)),
(date(2000, 2, 29),
Duration(years=1, days=1),
date(2001, 3, 1)),
(date(1996, 2, 29),
Duration(years=4, days=1),
date(2000, 3, 1)),
(date(2096, 2, 29),
Duration(years=4, days=1),
date(2100, 3, 1)),
(date(2000, 2, 1),
Duration(years=-1, months=-1, days=-1),
date(1998, 12, 31)),
(date(2000, 2, 29),
Duration(years=-1, months=-1, days=-1),
date(1999, 1, 28)),
(date(2001, 4, 1),
Duration(years=-1, months=-1, days=-1),
date(2000, 2, 29)),
(date(2000, 4, 1),
Duration(years=-1, months=-1, days=-1),
date(1999, 2, 28)),
(Duration(years=1, months=2),
Duration(years=0, months=0, days=1),
Duration(years=1, months=2, days=1)),
(Duration(years=-1, months=-1, days=-1),
date(2000, 4, 1),
date(1999, 2, 28)),
(Duration(years=1, months=1, weeks=5),
date(2000, 1, 30),
date(2001, 4, 4)),
(Duration(years=1, months=1, weeks=5),
'raise exception',
None),
('raise exception',
Duration(years=1, months=1, weeks=5),
None),
(Duration(years=1, months=2),
timedelta(days=1),
Duration(years=1, months=2, days=1)),
(timedelta(days=1),
Duration(years=1, months=2),
Duration(years=1, months=2, days=1)),
#(date(2000, 1, 1),
# Duration(years=1.5),
# date(2001, 6, 1)),
#(date(2000, 1, 1),
# Duration(years=1, months=1.5),
# date(2001, 2, 14)),
)
class DurationTest(unittest.TestCase):
'''
This class tests various other aspects of the isoduration module,
which are not covered with the test cases listed above.
'''
def test_associative(self):
'''
Adding 2 durations to a date is not associative.
'''
days1 = Duration(days=1)
months1 = Duration(months=1)
start = date(2000, 3, 30)
res1 = start + days1 + months1
res2 = start + months1 + days1
self.assertNotEqual(res1, res2)
def test_typeerror(self):
'''
Test if TypError is raised with certain parameters.
'''
self.assertRaises(TypeError, parse_duration, date(2000, 1, 1))
self.assertRaises(TypeError, operator.sub, Duration(years=1),
date(2000, 1, 1))
self.assertRaises(TypeError, operator.sub, 'raise exc',
Duration(years=1))
def test_parseerror(self):
'''
Test for unparseable duration string.
'''
self.assertRaises(ISO8601Error, parse_duration, 'T10:10:10')
def test_repr(self):
'''
Test __repr__ and __str__ for Duration obqects.
'''
dur = Duration(10, 10, years=10, months=10)
self.assertEqual('10 years, 10 months, 10 days, 0:00:10', str(dur))
self.assertEqual('isodate.duration.Duration(10, 10, 0,'
' years=10, months=10)', repr(dur))
def test_neg(self):
'''
Test __neg__ for Duration objects.
'''
self.assertEqual(-Duration(0), Duration(0))
self.assertEqual(-Duration(years=1, months=1),
Duration(years=-1, months=-1))
self.assertEqual(-Duration(years=1, months=1), Duration(months=-13))
self.assertNotEqual(-Duration(years=1), timedelta(days=-365))
self.assertNotEqual(-timedelta(days=365), Duration(years=-1))
# FIXME: this test fails in python 3... it seems like python3
# treats a == b the same b == a
#self.assertNotEqual(-timedelta(days=10), -Duration(days=10))
def test_format(self):
'''
Test various other strftime combinations.
'''
self.assertEqual(duration_isoformat(Duration(0)), 'P0D')
self.assertEqual(duration_isoformat(-Duration(0)), 'P0D')
self.assertEqual(duration_isoformat(Duration(seconds=10)), 'PT10S')
self.assertEqual(duration_isoformat(Duration(years=-1, months=-1)),
'-P1Y1M')
self.assertEqual(duration_isoformat(-Duration(years=1, months=1)),
'-P1Y1M')
self.assertEqual(duration_isoformat(-Duration(years=-1, months=-1)),
'P1Y1M')
self.assertEqual(duration_isoformat(-Duration(years=-1, months=-1)),
'P1Y1M')
dur = Duration(years=3, months=7, days=23, hours=5, minutes=25,
milliseconds=330)
self.assertEqual(duration_isoformat(dur), 'P3Y7M23DT5H25M0.33S')
self.assertEqual(duration_isoformat(-dur), '-P3Y7M23DT5H25M0.33S')
def test_equal(self):
'''
Test __eq__ and __ne__ methods.
'''
self.assertEqual(Duration(years=1, months=1),
Duration(years=1, months=1))
self.assertEqual(Duration(years=1, months=1), Duration(months=13))
self.assertNotEqual(Duration(years=1, months=2),
Duration(years=1, months=1))
self.assertNotEqual(Duration(years=1, months=1), Duration(months=14))
self.assertNotEqual(Duration(years=1), timedelta(days=365))
self.assertFalse(Duration(years=1, months=1) !=
Duration(years=1, months=1))
self.assertFalse(Duration(years=1, months=1) != Duration(months=13))
self.assertTrue(Duration(years=1, months=2) !=
Duration(years=1, months=1))
self.assertTrue(Duration(years=1, months=1) != Duration(months=14))
self.assertTrue(Duration(years=1) != timedelta(days=365))
self.assertEqual(Duration(days=1), timedelta(days=1))
# FIXME: this test fails in python 3... it seems like python3
# treats a != b the same b != a
#self.assertNotEqual(timedelta(days=1), Duration(days=1))
def create_parsetestcase(durationstring, expectation, format, altstr):
"""
Create a TestCase class for a specific test.
This allows having a separate TestCase for each test tuple from the
PARSE_TEST_CASES list, so that a failed test won't stop other tests.
"""
class TestParseDuration(unittest.TestCase):
'''
A test case template to parse an ISO duration string into a
timedelta or Duration object.
'''
def test_parse(self):
'''
Parse an ISO duration string and compare it to the expected value.
'''
result = parse_duration(durationstring)
self.assertEqual(result, expectation)
def test_format(self):
'''
Take duration/timedelta object and create ISO string from it.
This is the reverse test to test_parse.
'''
if altstr:
self.assertEqual(duration_isoformat(expectation, format),
altstr)
else:
# if durationstring == '-P2W':
# import pdb; pdb.set_trace()
self.assertEqual(duration_isoformat(expectation, format),
durationstring)
return unittest.TestLoader().loadTestsFromTestCase(TestParseDuration)
def create_mathtestcase(dur1, dur2, resadd, ressub, resge):
"""
Create a TestCase class for a specific test.
This allows having a separate TestCase for each test tuple from the
MATH_TEST_CASES list, so that a failed test won't stop other tests.
"""
dur1 = parse_duration(dur1)
dur2 = parse_duration(dur2)
resadd = parse_duration(resadd)
ressub = parse_duration(ressub)
class TestMathDuration(unittest.TestCase):
'''
A test case template test addition, subtraction and >
operators for Duration objects.
'''
def test_add(self):
'''
Test operator + (__add__, __radd__)
'''
self.assertEqual(dur1 + dur2, resadd)
def test_sub(self):
'''
Test operator - (__sub__, __rsub__)
'''
self.assertEqual(dur1 - dur2, ressub)
def test_ge(self):
'''
Test operator > and <
'''
def dogetest():
''' Test greater than.'''
return dur1 > dur2
def doletest():
''' Test less than.'''
return dur1 < dur2
if resge is None:
self.assertRaises(TypeError, dogetest)
self.assertRaises(TypeError, doletest)
else:
self.assertEqual(dogetest(), resge)
self.assertEqual(doletest(), not resge)
return unittest.TestLoader().loadTestsFromTestCase(TestMathDuration)
def create_datetestcase(start, tdelta, duration):
"""
Create a TestCase class for a specific test.
This allows having a separate TestCase for each test tuple from the
DATE_TEST_CASES list, so that a failed test won't stop other tests.
"""
class TestDateCalc(unittest.TestCase):
'''
A test case template test addition, subtraction
operators for Duration objects.
'''
def test_add(self):
'''
Test operator +.
'''
self.assertEqual(start + tdelta, start + duration)
def test_sub(self):
'''
Test operator -.
'''
self.assertEqual(start - tdelta, start - duration)
return unittest.TestLoader().loadTestsFromTestCase(TestDateCalc)
def create_datecalctestcase(start, duration, expectation):
"""
Create a TestCase class for a specific test.
This allows having a separate TestCase for each test tuple from the
DATE_CALC_TEST_CASES list, so that a failed test won't stop other tests.
"""
class TestDateCalc(unittest.TestCase):
'''
A test case template test addition operators for Duration objects.
'''
def test_calc(self):
'''
Test operator +.
'''
if expectation is None:
self.assertRaises(TypeError, operator.add, start, duration)
else:
self.assertEqual(start + duration, expectation)
return unittest.TestLoader().loadTestsFromTestCase(TestDateCalc)
def test_suite():
'''
Return a test suite containing all test defined above.
'''
suite = unittest.TestSuite()
for durationstring, (expectation, format, altstr) in PARSE_TEST_CASES.items():
suite.addTest(create_parsetestcase(durationstring, expectation,
format, altstr))
for testdata in MATH_TEST_CASES:
suite.addTest(create_mathtestcase(*testdata))
for testdata in DATE_TEST_CASES:
suite.addTest(create_datetestcase(*testdata))
for testdata in DATE_CALC_TEST_CASES:
suite.addTest(create_datecalctestcase(*testdata))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(DurationTest))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
{
"content_hash": "f5b370dfea7044e16e7a69e56c1d0b5f",
"timestamp": "",
"source": "github",
"line_count": 453,
"max_line_length": 82,
"avg_line_length": 42.33112582781457,
"alnum_prop": 0.5066750104297038,
"repo_name": "unor/schemaorg",
"id": "d8b4cf28c6165487a9c8e50f8fa45abe83bc982c",
"size": "20722",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "lib/isodate/tests/test_duration.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "5585717"
},
{
"name": "Python",
"bytes": "2417294"
},
{
"name": "Shell",
"bytes": "81"
},
{
"name": "Smarty",
"bytes": "26365"
},
{
"name": "Tcl",
"bytes": "98801"
}
],
"symlink_target": ""
}
|
import os
import sys
import time
import threading
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) # not required after 'pip install uiautomation'
import uiautomation as auto
def threadFunc(uselessRoot):
"""
If you want to use UI Controls in a new thread, create an UIAutomationInitializerInThread object first.
But you can't use use a Control or a Pattern created in a different thread.
So you can't create a Control or a Pattern in main thread and then pass it to a new thread and use it.
"""
# print(uselessRoot)# you cannot use uselessRoot because it is a control created in a different thread
th = threading.currentThread()
auto.Logger.WriteLine('\nThis is running in a new thread. {} {}'.format(th.ident, th.name), auto.ConsoleColor.Cyan)
time.sleep(2)
with auto.UIAutomationInitializerInThread(debug=True):
auto.GetConsoleWindow().CaptureToImage('console_newthread.png')
newRoot = auto.GetRootControl() # ok, root control created in current thread
auto.EnumAndLogControl(newRoot, 1)
auto.Logger.WriteLine('\nThread exits. {} {}'.format(th.ident, th.name), auto.ConsoleColor.Cyan)
def main():
mth = threading.currentThread()
auto.Logger.WriteLine('This is running in main thread. {} {}'.format(mth.ident, mth.name), auto.ConsoleColor.Cyan)
time.sleep(2)
auto.GetConsoleWindow().CaptureToImage('console_mainthread.png')
root = auto.GetRootControl()
auto.EnumAndLogControl(root, 1)
th = threading.Thread(target=threadFunc, args=(root, ))
th.start()
th.join()
auto.Logger.WriteLine('\nMain thread exits. {} {}'.format(mth.ident, mth.name), auto.ConsoleColor.Cyan)
if __name__ == '__main__':
main()
input('press Enter to exit')
|
{
"content_hash": "2d6eb91507f2a9795fbeb62d23973039",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 125,
"avg_line_length": 42.42857142857143,
"alnum_prop": 0.7081930415263749,
"repo_name": "yinkaisheng/Python-UIAutomation-for-Windows",
"id": "4bd02a5ac737e014e30dd5bc522cf40e8902c5d6",
"size": "1816",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demos/uiautomation_in_thread.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "159"
},
{
"name": "Python",
"bytes": "2911858"
}
],
"symlink_target": ""
}
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 12, transform = "None", sigma = 0.0, exog_count = 0, ar_order = 0);
|
{
"content_hash": "22e1ca26cf73915570cbe97b7436f78e",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 163,
"avg_line_length": 37.57142857142857,
"alnum_prop": 0.7034220532319392,
"repo_name": "antoinecarme/pyaf",
"id": "078bc8ef8600ecd68d560e81eef9d54a0860ea96",
"size": "263",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_None/trend_MovingAverage/cycle_12/ar_/test_artificial_128_None_MovingAverage_12__0.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
from unittest import mock
from nose.plugins.attrib import attr
from tests.base_testcase import BoilerTestCase
from faker import Factory
from boiler.collections import PaginatedCollection
from tests.boiler_test_app.models import User
from boiler.feature.orm import db
from pprint import pprint as pp
@attr('kernel', 'collections', 'paginated_collection')
class PaginatedCollectionTests(BoilerTestCase):
"""
Paginated collection test
This is a test for a collection that allows pagination through
an sql alchemy query. This is an integrated functionality, so to test
it we'll need an actual set of data in a database.
"""
def setUp(self):
super().setUp()
self.create_db()
def create_fake_data(self, how_many=1):
""" Create a fake data set to test our collection """
fake = Factory.create()
items = []
for i in range(how_many):
user = User(
email=fake.email(),
password=fake.password()
)
db.session.add(user)
db.session.commit()
items.append(user)
return items
# ------------------------------------------------------------------------
# General
# ------------------------------------------------------------------------
def test_can_create_instance(self):
""" Can create an instance of collection """
collection = PaginatedCollection(User.query)
self.assertIsInstance(collection, PaginatedCollection)
def test_can_get_collection(self):
""" Getting collection as dictionary """
collection = PaginatedCollection(User.query)
collection = collection.dict()
self.assertIsInstance(collection, dict)
self.assertIsInstance(collection['items'], list)
def test_can_get_printable_representation(self):
""" Getting printable representation of a collection """
collection = PaginatedCollection(User.query)
printable = collection.__repr__()
self.assertTrue(printable.startswith('<PaginatedCollection'))
def test_can_iterate_through_page_items(self):
""" Iterating through collection items """
items1 = self.create_fake_data(2)
items2 = self.create_fake_data(2)
collection = PaginatedCollection(User.query, per_page=2)
for item in collection:
self.assertIn(item, items1)
self.assertNotIn(item, items2)
collection.next_page()
for item in collection:
self.assertNotIn(item, items1)
self.assertIn(item, items2)
def test_can_access_totals(self):
""" Has access to total counters """
self.create_fake_data(2)
collection = PaginatedCollection(User.query, per_page=1)
self.assertEquals(2, collection.total_items)
self.assertEquals(2, collection.total_pages)
def test_can_fetch_first_page(self):
""" Can fetch first page of items """
items = self.create_fake_data(2)
collection = PaginatedCollection(User.query, per_page=1)
self.assertEquals(items[0].id, collection.items[0].id)
def test_can_fetch_arbitrary_page(self):
""" Can to fetch any page of items"""
items = self.create_fake_data(2)
collection = PaginatedCollection(User.query, per_page=1, page=2)
self.assertEquals(items[1].id, collection.items[0].id)
def test_can_check_if_on_first_page(self):
""" Checking if collection is on the first page """
self.create_fake_data(2)
collection = PaginatedCollection(User.query, per_page=1, page=1)
self.assertTrue(collection.is_first_page())
def test_can_check_if_on_last_page(self):
""" Checking if collection is on the last page """
self.create_fake_data(2)
collection = PaginatedCollection(User.query, per_page=1, page=2)
self.assertTrue(collection.is_last_page())
def test_can_fetch_next_page(self):
""" Fetching next page for the collection (unless on last page)"""
page1 = self.create_fake_data(4)
page2 = self.create_fake_data(3)
collection = PaginatedCollection(User.query, per_page=4)
self.assertEquals(1, collection.page)
for item in page1: self.assertIn(item, collection.items)
for item in page2: self.assertNotIn(item, collection.items)
got_next = collection.next_page()
self.assertTrue(got_next)
self.assertEquals(2, collection.page)
for item in page1: self.assertNotIn(item, collection.items)
for item in page2: self.assertIn(item, collection.items)
got_next = collection.next_page()
self.assertFalse(got_next)
def test_can_fetch_previous_page(self):
""" Fetching previous page for the collection (unlest on first page) """
page1 = self.create_fake_data(4)
page2 = self.create_fake_data(3)
collection = PaginatedCollection(User.query, per_page=4, page=2)
self.assertEquals(2, collection.page)
for item in page1: self.assertNotIn(item, collection.items)
for item in page2: self.assertIn(item, collection.items)
got_previous = collection.previous_page()
self.assertTrue(got_previous)
self.assertEquals(1, collection.page)
for item in page1: self.assertIn(item, collection.items)
for item in page2: self.assertNotIn(item, collection.items)
got_previous = collection.previous_page()
self.assertFalse(got_previous)
def test_generate_pagination_on_instantiation(self):
""" Generate pagination controls for collection """
self.create_fake_data(15)
collection = PaginatedCollection(User.query, per_page=5, page=3)
pagination = collection.pagination
self.assertIsNone(pagination['last'])
self.assertIsNone(pagination['next'])
self.assertIsNone(pagination['previous_slice'])
self.assertIsNone(pagination['next_slice'])
self.assertEquals(2, pagination['previous'])
self.assertEquals(3, len(pagination['pages']))
# as dict
self.assertIn('pagination', collection.dict())
|
{
"content_hash": "9869c42002f66af2155588619b997158",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 80,
"avg_line_length": 38.52795031055901,
"alnum_prop": 0.6377559245526359,
"repo_name": "projectshift/shift-boiler",
"id": "ca50160aee1e2b29b05bc78bbec5d38404063e6a",
"size": "6203",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/collections_tests/paginated_collection_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6144"
},
{
"name": "JavaScript",
"bytes": "10563"
},
{
"name": "Jinja",
"bytes": "13981"
},
{
"name": "Mako",
"bytes": "1382"
},
{
"name": "Python",
"bytes": "92204"
},
{
"name": "Shell",
"bytes": "513"
}
],
"symlink_target": ""
}
|
import os
import pdb
import sys
import tempfile
sys.path.append("/opt/tosca")
from translator.toscalib.tosca_template import ToscaTemplate
from core.models import User,Site,Deployment,Controller,SiteDeployment
from xosresource import XOSResource
class XOSSite(XOSResource):
provides = "tosca.nodes.Site"
xos_model = Site
def get_xos_args(self):
display_name = self.get_property("display_name")
if not display_name:
display_name = self.nodetemplate.name
args = {"login_base": self.nodetemplate.name,
"name": display_name}
# copy simple string properties from the template into the arguments
for prop in ["site_url", ]:
v = self.get_property(prop)
if v:
args[prop] = v
return args
def get_existing_objs(self):
return self.xos_model.objects.filter(login_base = self.nodetemplate.name)
def postprocess(self, obj):
results = []
for reqs in self.nodetemplate.requirements:
for (k,v) in reqs.items():
if (v["relationship"] == "tosca.relationships.SiteDeployment"):
deployment_name = v["node"]
deployment = self.get_xos_object(Deployment, name=deployment_name)
controller_name = None
for sd_req in v["requirements"]:
for (sd_req_k, sd_req_v) in sd_req.items():
if sd_req_v["relationship"] == "tosca.relationships.UsesController":
controller_name = sd_req_v["node"]
if not controller_name:
raise Exception("Controller must be specified in SiteDeployment relationship")
controller = self.get_xos_object(Controller, name=controller_name, throw_exception=True)
existing_sitedeps = SiteDeployment.objects.filter(deployment=deployment, site=obj)
if existing_sitedeps:
sd = existing_sitedeps[0]
if sd.controller != controller:
sd.controller = controller
sd.save()
self.info("SiteDeployment from %s to %s updated controller" % (str(obj), str(deployment)))
else:
self.info("SiteDeployment from %s to %s already exists" % (str(obj), str(deployment)))
else:
sitedep = SiteDeployment(deployment=deployment, site=obj, controller=controller)
sitedep.save()
self.info("Created SiteDeployment from %s to %s" % (str(obj), str(deployment)))
def create(self):
nodetemplate = self.nodetemplate
siteName = nodetemplate.name
xos_args = self.get_xos_args()
site = Site(**xos_args)
site.caller = self.user
site.save()
self.postprocess(site)
self.info("Created Site '%s'" % (str(site), ))
def delete(self, obj):
if obj.slices.exists():
self.info("Site %s has active slices; skipping delete" % obj.name)
return
if obj.users.exists():
self.info("Site %s has active users; skipping delete" % obj.name)
return
if obj.nodes.exists():
self.info("Site %s has active nodes; skipping delete" % obj.name)
return
super(XOSSite, self).delete(obj)
|
{
"content_hash": "3143092ed9da8a9a9e9edf1497b640ef",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 118,
"avg_line_length": 37.56382978723404,
"alnum_prop": 0.5581988105352591,
"repo_name": "xmaruto/mcord",
"id": "0db2705dfa9d133be56d2cb49ab9d116798705cb",
"size": "3641",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "xos/tosca/resources/xossite.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "847306"
},
{
"name": "HTML",
"bytes": "732024"
},
{
"name": "JavaScript",
"bytes": "5293940"
},
{
"name": "Makefile",
"bytes": "13901"
},
{
"name": "Python",
"bytes": "1937152"
},
{
"name": "Shell",
"bytes": "49250"
}
],
"symlink_target": ""
}
|
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 2.0.21
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kinow_client
from kinow_client.rest import ApiException
from kinow_client.models.gift_list_response import GiftListResponse
class TestGiftListResponse(unittest.TestCase):
""" GiftListResponse unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testGiftListResponse(self):
"""
Test GiftListResponse
"""
model = kinow_client.models.gift_list_response.GiftListResponse()
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "43b9d3f2651818ff08d1506b55a5cff1",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 73,
"avg_line_length": 19.125,
"alnum_prop": 0.6718954248366014,
"repo_name": "kinow-io/kinow-python-sdk",
"id": "18c070b7e1a62fa304a8c52bdf9bc2c8c95c39ec",
"size": "782",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_gift_list_response.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4659182"
},
{
"name": "Shell",
"bytes": "1666"
}
],
"symlink_target": ""
}
|
import logging
import os
import sys
from mimetypes import guess_type
from ipypublish.frontend.shared import parse_options
from ipypublish.convert.main import IpyPubMain
from ipypublish.postprocessors.reveal_serve import RevealServer
logger = logging.getLogger("nbpresent")
def nbpresent(
inpath,
outformat="slides_standard",
outpath=None,
dump_files=True,
ignore_prefix="_",
clear_files=False,
log_level="INFO",
dry_run=False,
print_traceback=False,
export_paths=(),
):
""" load reveal.js slides as a web server,
converting from ipynb first if path extension is .ipynb
Parameters
----------
inpath: str
path to html or ipynb file
outformat: str
conversion format to use
outpath : str or pathlib.Path
path to output converted files
dump_files: bool
whether to write files from nbconvert (images, etc) to outpath
clear_files : str
whether to clear existing external files in outpath folder
ignore_prefix: str
ignore ipynb files with this prefix
log_level: str
the logging level (debug, info, critical, ...)
"""
inpath_name, inpath_ext = os.path.splitext(os.path.basename(inpath))
output_mimetype = guess_type(inpath, strict=False)[0]
output_mimetype = "unknown" if output_mimetype is None else output_mimetype
if output_mimetype != "text/html":
config = {
"IpyPubMain": {
"conversion": outformat,
"plugin_folder_paths": export_paths,
"outpath": outpath,
"ignore_prefix": ignore_prefix,
"log_to_stdout": True,
"log_level_stdout": log_level,
"log_to_file": True,
"log_level_file": log_level,
"default_pporder_kwargs": dict(
dry_run=dry_run,
clear_existing=clear_files,
dump_files=dump_files,
serve_html=True,
slides=True,
),
}
}
publish = IpyPubMain(config=config)
try:
outdata = publish(inpath)
outpath = outdata["outpath"]
output_mimetype = outdata["exporter"].output_mimetype
except Exception as err:
logger.error("Run Failed: {}".format(err))
if print_traceback:
raise
return 1
else:
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
server = RevealServer()
if not dry_run:
server.postprocess("", output_mimetype, os.path.abspath(inpath))
return 0
def run(sys_args=None):
if sys_args is None:
sys_args = sys.argv[1:]
filepath, options = parse_options(sys_args, "nbpresent")
outcode = nbpresent(filepath, **options)
return outcode
|
{
"content_hash": "10ab362153b3d4ee0cdfb448d5aa30e2",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 79,
"avg_line_length": 28.41176470588235,
"alnum_prop": 0.5886818495514148,
"repo_name": "chrisjsewell/ipypublish",
"id": "d5019a906561397d4679547285aad0014dcee84d",
"size": "2920",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "ipypublish/frontend/nbpresent.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4549"
},
{
"name": "CSS",
"bytes": "7275"
},
{
"name": "HTML",
"bytes": "7194717"
},
{
"name": "JavaScript",
"bytes": "2124646"
},
{
"name": "Jupyter Notebook",
"bytes": "1319557"
},
{
"name": "Makefile",
"bytes": "4666"
},
{
"name": "Python",
"bytes": "494303"
},
{
"name": "Shell",
"bytes": "552"
},
{
"name": "TeX",
"bytes": "267595"
}
],
"symlink_target": ""
}
|
import time
import random
def countingSort2(data, smallest, largest):
counters = [0 for x in range(smallest, largest + 1)]
for i in data:
counters[i - smallest] += 1
del data[:]
for i, c in enumerate(counters):
data.extend([smallest + i] * c)
def countingSort(data, smallest, largest):
counters = [0 for x in range(smallest, largest + 1)]
for i in data:
counters[i - smallest] += 1
dataPointer = len(data) - 1
for i in range(len(counters) - 1, -1, -1):
while counters[i] > 0:
data[dataPointer] = i + smallest
counters[i] -= 1
dataPointer -= 1
if __name__ == '__main__':
data = [random.randint(1, 10) for x in range(10000000)]
data1 = list(data)
millis1 = int(round(time.time() * 1000))
countingSort(data, 1, 10)
millis2 = int(round(time.time() * 1000))
print("Time in milliseconds: %d " % (millis2 - millis1))
countingSort2(data1, 1, 10)
millis3 = int(round(time.time() * 1000))
print("Time in milliseconds: %d " % (millis3 - millis2))
|
{
"content_hash": "176613014ef8d440fb680e1c3bb2ed6a",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 60,
"avg_line_length": 29.97222222222222,
"alnum_prop": 0.58758109360519,
"repo_name": "yubinbai/python_practice",
"id": "578934dbc99304211e4c6411095e82f33f96a404",
"size": "1079",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "countingSort.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "7209"
},
{
"name": "Python",
"bytes": "94379"
}
],
"symlink_target": ""
}
|
"""Read in model results and plot results
"""
import os
import logging
import numpy as np
import time
from energy_demand.read_write import data_loader, read_data
from energy_demand.technologies import tech_related
def read_in_weather_results(
path_result,
seasons,
model_yeardays_daytype,
pop_data,
fueltype_str
):
"""Read and post calculate results from txt files
and store into container
Arguments
---------
path_result : str
Paths
seasons : dict
seasons
model_yeardays_daytype : dict
Daytype of modelled yeardays
"""
logging.info("... Reading in results")
fueltype_int = tech_related.get_fueltype_int(fueltype_str)
results_container = {}
# Read in total regional demands per fueltype
results_container['ed_reg_tot_y'] = read_data.read_results_yh(
path_result, 'only_total')
#print(results_container['ed_reg_tot_y'][2015].shape)
results_container['ed_reg_peakday'] = read_data.read_results_yh(
os.path.join('simulation_results', path_result), 'only_peak')
#print(results_container['ed_reg_peakday'][2015].shape)
results_container['ed_reg_peakday_peak_hour'] = {}
results_container['ed_reg_peakday_peak_hour_per_pop'] = {}
results_container['national_peak'] = {}
results_container['regional_share_national_peak'] = {}
results_container['regional_share_national_peak_pp'] = {}
results_container['pp_peak_abs'] = {}
results_container['regional_peak'] = {}
results_container['national_all_fueltypes'] = {}
results_container['mean_peak_day_demand'] = {}
for year in results_container['ed_reg_peakday']:
reg_pop_yr = pop_data[year]
# Get peak demand of each region
results_container['ed_reg_peakday_peak_hour'][year] = results_container['ed_reg_peakday'][year].max(axis=2)
# Divide peak by number of population
results_container['ed_reg_peakday_peak_hour_per_pop'][year] = results_container['ed_reg_peakday_peak_hour'][year] / reg_pop_yr
# Get national peak
national_demand_per_hour = results_container['ed_reg_peakday'][year].sum(axis=1) #Aggregate hourly across all regions
# Get maximum hour for electricity demand
max_hour = national_demand_per_hour[fueltype_int].argmax()
results_container['national_peak'][year] = national_demand_per_hour[:, max_hour]
# Calculate regional share of peak hour to national peak
national_peak = results_container['national_peak'][year][fueltype_int]
regional_peak = results_container['ed_reg_peakday'][year][fueltype_int][:, max_hour]
results_container['regional_peak'][year] = regional_peak
results_container['regional_share_national_peak'][year] = (100 / national_peak) * regional_peak #1 = 1 %
results_container['regional_share_national_peak_pp'][year] = ((100 / national_peak) * regional_peak) / reg_pop_yr #1 = 1 %
# Calculate mean of peak day demand of peak day
results_container['national_all_fueltypes'][year] = np.sum(results_container['ed_reg_tot_y'][year], axis=1)
results_container['mean_peak_day_demand'][year] = np.mean(national_demand_per_hour, axis=1)
# Calculate contribution per person towards national peak (reg_peak / people) [abs]
#print(results_container['ed_reg_peakday'][year].shape)
#print(reg_pop_yr.shape)
# results_container['pp_peak_abs'][year] = (
# results_container['ed_reg_peakday'][year][:,:, max_hour] / reg_pop_yr)
#(cpp = (regional peak / national peak) / people [%]
logging.info("... Reading in results finished")
return results_container
|
{
"content_hash": "91aebabfe6663a11f66dab160fa83b1f",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 134,
"avg_line_length": 39.797872340425535,
"alnum_prop": 0.6562416466185512,
"repo_name": "nismod/energy_demand",
"id": "e883bd30fb4967bfa584794420926f6a142c17e0",
"size": "3741",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "energy_demand/read_write/read_weather_results.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1432899"
},
{
"name": "Shell",
"bytes": "2063"
}
],
"symlink_target": ""
}
|
"""
Picarto.TV API Documentation
The Picarto.TV API documentation Note, for fixed access tokens, the header that needs to be sent is of the format: `Authorization: Bearer yourTokenHere` This can be generated at https://oauth.picarto.tv/ For chat API, see https://docs.picarto.tv/chat/chat.proto - contact via the email below for implementation details
OpenAPI spec version: 1.2.5
Contact: api@picarto.tv
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class ChannelDetails(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'user_id': 'int',
'name': 'str',
'avatar': 'str',
'online': 'bool',
'viewers': 'int',
'viewers_total': 'int',
'thumbnails': 'Thumbnail',
'followers': 'int',
'subscribers': 'int',
'adult': 'bool',
'category': 'str',
'account_type': 'str',
'commissions': 'bool',
'recordings': 'bool',
'title': 'str',
'description_panels': 'list[DescriptionPanel]',
'private': 'bool',
'gaming': 'bool',
'guest_chat': 'bool',
'last_live': 'datetime',
'tags': 'list[str]',
'multistream': 'list[MultiParticipant]',
'languages': 'Languages'
}
attribute_map = {
'user_id': 'user_id',
'name': 'name',
'avatar': 'avatar',
'online': 'online',
'viewers': 'viewers',
'viewers_total': 'viewers_total',
'thumbnails': 'thumbnails',
'followers': 'followers',
'subscribers': 'subscribers',
'adult': 'adult',
'category': 'category',
'account_type': 'account_type',
'commissions': 'commissions',
'recordings': 'recordings',
'title': 'title',
'description_panels': 'description_panels',
'private': 'private',
'gaming': 'gaming',
'guest_chat': 'guest_chat',
'last_live': 'last_live',
'tags': 'tags',
'multistream': 'multistream',
'languages': 'languages'
}
def __init__(self, user_id=None, name=None, avatar=None, online=None, viewers=None, viewers_total=None, thumbnails=None, followers=None, subscribers=None, adult=None, category=None, account_type=None, commissions=None, recordings=None, title=None, description_panels=None, private=None, gaming=None, guest_chat=None, last_live=None, tags=None, multistream=None, languages=None):
"""
ChannelDetails - a model defined in Swagger
"""
self._user_id = None
self._name = None
self._avatar = None
self._online = None
self._viewers = None
self._viewers_total = None
self._thumbnails = None
self._followers = None
self._subscribers = None
self._adult = None
self._category = None
self._account_type = None
self._commissions = None
self._recordings = None
self._title = None
self._description_panels = None
self._private = None
self._gaming = None
self._guest_chat = None
self._last_live = None
self._tags = None
self._multistream = None
self._languages = None
if user_id is not None:
self.user_id = user_id
if name is not None:
self.name = name
if avatar is not None:
self.avatar = avatar
if online is not None:
self.online = online
if viewers is not None:
self.viewers = viewers
if viewers_total is not None:
self.viewers_total = viewers_total
if thumbnails is not None:
self.thumbnails = thumbnails
if followers is not None:
self.followers = followers
if subscribers is not None:
self.subscribers = subscribers
if adult is not None:
self.adult = adult
if category is not None:
self.category = category
if account_type is not None:
self.account_type = account_type
if commissions is not None:
self.commissions = commissions
if recordings is not None:
self.recordings = recordings
if title is not None:
self.title = title
if description_panels is not None:
self.description_panels = description_panels
if private is not None:
self.private = private
if gaming is not None:
self.gaming = gaming
if guest_chat is not None:
self.guest_chat = guest_chat
if last_live is not None:
self.last_live = last_live
if tags is not None:
self.tags = tags
if multistream is not None:
self.multistream = multistream
if languages is not None:
self.languages = languages
@property
def user_id(self):
"""
Gets the user_id of this ChannelDetails.
The channel's user ID
:return: The user_id of this ChannelDetails.
:rtype: int
"""
return self._user_id
@user_id.setter
def user_id(self, user_id):
"""
Sets the user_id of this ChannelDetails.
The channel's user ID
:param user_id: The user_id of this ChannelDetails.
:type: int
"""
self._user_id = user_id
@property
def name(self):
"""
Gets the name of this ChannelDetails.
The name of the channel
:return: The name of this ChannelDetails.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this ChannelDetails.
The name of the channel
:param name: The name of this ChannelDetails.
:type: str
"""
self._name = name
@property
def avatar(self):
"""
Gets the avatar of this ChannelDetails.
The URI of the user's avatar
:return: The avatar of this ChannelDetails.
:rtype: str
"""
return self._avatar
@avatar.setter
def avatar(self, avatar):
"""
Sets the avatar of this ChannelDetails.
The URI of the user's avatar
:param avatar: The avatar of this ChannelDetails.
:type: str
"""
self._avatar = avatar
@property
def online(self):
"""
Gets the online of this ChannelDetails.
If the channel is online
:return: The online of this ChannelDetails.
:rtype: bool
"""
return self._online
@online.setter
def online(self, online):
"""
Sets the online of this ChannelDetails.
If the channel is online
:param online: The online of this ChannelDetails.
:type: bool
"""
self._online = online
@property
def viewers(self):
"""
Gets the viewers of this ChannelDetails.
The number of current viewers watching this stream (0 if offline)
:return: The viewers of this ChannelDetails.
:rtype: int
"""
return self._viewers
@viewers.setter
def viewers(self, viewers):
"""
Sets the viewers of this ChannelDetails.
The number of current viewers watching this stream (0 if offline)
:param viewers: The viewers of this ChannelDetails.
:type: int
"""
self._viewers = viewers
@property
def viewers_total(self):
"""
Gets the viewers_total of this ChannelDetails.
The total number of viewers this channel has had since the beginning of time
:return: The viewers_total of this ChannelDetails.
:rtype: int
"""
return self._viewers_total
@viewers_total.setter
def viewers_total(self, viewers_total):
"""
Sets the viewers_total of this ChannelDetails.
The total number of viewers this channel has had since the beginning of time
:param viewers_total: The viewers_total of this ChannelDetails.
:type: int
"""
self._viewers_total = viewers_total
@property
def thumbnails(self):
"""
Gets the thumbnails of this ChannelDetails.
:return: The thumbnails of this ChannelDetails.
:rtype: Thumbnail
"""
return self._thumbnails
@thumbnails.setter
def thumbnails(self, thumbnails):
"""
Sets the thumbnails of this ChannelDetails.
:param thumbnails: The thumbnails of this ChannelDetails.
:type: Thumbnail
"""
self._thumbnails = thumbnails
@property
def followers(self):
"""
Gets the followers of this ChannelDetails.
The total number of people following this streamer
:return: The followers of this ChannelDetails.
:rtype: int
"""
return self._followers
@followers.setter
def followers(self, followers):
"""
Sets the followers of this ChannelDetails.
The total number of people following this streamer
:param followers: The followers of this ChannelDetails.
:type: int
"""
self._followers = followers
@property
def subscribers(self):
"""
Gets the subscribers of this ChannelDetails.
The total number of people subscribed to this streamer
:return: The subscribers of this ChannelDetails.
:rtype: int
"""
return self._subscribers
@subscribers.setter
def subscribers(self, subscribers):
"""
Sets the subscribers of this ChannelDetails.
The total number of people subscribed to this streamer
:param subscribers: The subscribers of this ChannelDetails.
:type: int
"""
self._subscribers = subscribers
@property
def adult(self):
"""
Gets the adult of this ChannelDetails.
If this channel is an adult channel
:return: The adult of this ChannelDetails.
:rtype: bool
"""
return self._adult
@adult.setter
def adult(self, adult):
"""
Sets the adult of this ChannelDetails.
If this channel is an adult channel
:param adult: The adult of this ChannelDetails.
:type: bool
"""
self._adult = adult
@property
def category(self):
"""
Gets the category of this ChannelDetails.
The name of the category this stream is in
:return: The category of this ChannelDetails.
:rtype: str
"""
return self._category
@category.setter
def category(self, category):
"""
Sets the category of this ChannelDetails.
The name of the category this stream is in
:param category: The category of this ChannelDetails.
:type: str
"""
self._category = category
@property
def account_type(self):
"""
Gets the account_type of this ChannelDetails.
The account type of the channel
:return: The account_type of this ChannelDetails.
:rtype: str
"""
return self._account_type
@account_type.setter
def account_type(self, account_type):
"""
Sets the account_type of this ChannelDetails.
The account type of the channel
:param account_type: The account_type of this ChannelDetails.
:type: str
"""
allowed_values = ["free", "basic", "premium"]
if account_type not in allowed_values:
raise ValueError(
"Invalid value for `account_type` ({0}), must be one of {1}"
.format(account_type, allowed_values)
)
self._account_type = account_type
@property
def commissions(self):
"""
Gets the commissions of this ChannelDetails.
If this channel is accepting commissions
:return: The commissions of this ChannelDetails.
:rtype: bool
"""
return self._commissions
@commissions.setter
def commissions(self, commissions):
"""
Sets the commissions of this ChannelDetails.
If this channel is accepting commissions
:param commissions: The commissions of this ChannelDetails.
:type: bool
"""
self._commissions = commissions
@property
def recordings(self):
"""
Gets the recordings of this ChannelDetails.
If recordings are enabled and videos are accessible
:return: The recordings of this ChannelDetails.
:rtype: bool
"""
return self._recordings
@recordings.setter
def recordings(self, recordings):
"""
Sets the recordings of this ChannelDetails.
If recordings are enabled and videos are accessible
:param recordings: The recordings of this ChannelDetails.
:type: bool
"""
self._recordings = recordings
@property
def title(self):
"""
Gets the title of this ChannelDetails.
This channel's title
:return: The title of this ChannelDetails.
:rtype: str
"""
return self._title
@title.setter
def title(self, title):
"""
Sets the title of this ChannelDetails.
This channel's title
:param title: The title of this ChannelDetails.
:type: str
"""
self._title = title
@property
def description_panels(self):
"""
Gets the description_panels of this ChannelDetails.
This channel's description panels
:return: The description_panels of this ChannelDetails.
:rtype: list[DescriptionPanel]
"""
return self._description_panels
@description_panels.setter
def description_panels(self, description_panels):
"""
Sets the description_panels of this ChannelDetails.
This channel's description panels
:param description_panels: The description_panels of this ChannelDetails.
:type: list[DescriptionPanel]
"""
self._description_panels = description_panels
@property
def private(self):
"""
Gets the private of this ChannelDetails.
If this channel is in private mode
:return: The private of this ChannelDetails.
:rtype: bool
"""
return self._private
@private.setter
def private(self, private):
"""
Sets the private of this ChannelDetails.
If this channel is in private mode
:param private: The private of this ChannelDetails.
:type: bool
"""
self._private = private
@property
def gaming(self):
"""
Gets the gaming of this ChannelDetails.
If this channel is in game mode
:return: The gaming of this ChannelDetails.
:rtype: bool
"""
return self._gaming
@gaming.setter
def gaming(self, gaming):
"""
Sets the gaming of this ChannelDetails.
If this channel is in game mode
:param gaming: The gaming of this ChannelDetails.
:type: bool
"""
self._gaming = gaming
@property
def guest_chat(self):
"""
Gets the guest_chat of this ChannelDetails.
If guest (unregistered) users can talk in chat
:return: The guest_chat of this ChannelDetails.
:rtype: bool
"""
return self._guest_chat
@guest_chat.setter
def guest_chat(self, guest_chat):
"""
Sets the guest_chat of this ChannelDetails.
If guest (unregistered) users can talk in chat
:param guest_chat: The guest_chat of this ChannelDetails.
:type: bool
"""
self._guest_chat = guest_chat
@property
def last_live(self):
"""
Gets the last_live of this ChannelDetails.
The date/time this user was last live
:return: The last_live of this ChannelDetails.
:rtype: datetime
"""
return self._last_live
@last_live.setter
def last_live(self, last_live):
"""
Sets the last_live of this ChannelDetails.
The date/time this user was last live
:param last_live: The last_live of this ChannelDetails.
:type: datetime
"""
self._last_live = last_live
@property
def tags(self):
"""
Gets the tags of this ChannelDetails.
A list of tags
:return: The tags of this ChannelDetails.
:rtype: list[str]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""
Sets the tags of this ChannelDetails.
A list of tags
:param tags: The tags of this ChannelDetails.
:type: list[str]
"""
self._tags = tags
@property
def multistream(self):
"""
Gets the multistream of this ChannelDetails.
A list of channels we are multistreaming with
:return: The multistream of this ChannelDetails.
:rtype: list[MultiParticipant]
"""
return self._multistream
@multistream.setter
def multistream(self, multistream):
"""
Sets the multistream of this ChannelDetails.
A list of channels we are multistreaming with
:param multistream: The multistream of this ChannelDetails.
:type: list[MultiParticipant]
"""
self._multistream = multistream
@property
def languages(self):
"""
Gets the languages of this ChannelDetails.
:return: The languages of this ChannelDetails.
:rtype: Languages
"""
return self._languages
@languages.setter
def languages(self, languages):
"""
Sets the languages of this ChannelDetails.
:param languages: The languages of this ChannelDetails.
:type: Languages
"""
self._languages = languages
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, ChannelDetails):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
{
"content_hash": "10e183f88a64b0f7e7ba53306a728f63",
"timestamp": "",
"source": "github",
"line_count": 741,
"max_line_length": 382,
"avg_line_length": 26.989203778677464,
"alnum_prop": 0.5731286564328216,
"repo_name": "Sythelux/Picarto.bundle",
"id": "eec58cfb75a03e78b3849ca18b1b81c8fc618179",
"size": "20016",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Contents/Libraries/Shared/PicartoClientAPI/models/channel_details.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "505"
},
{
"name": "Python",
"bytes": "927803"
}
],
"symlink_target": ""
}
|
from random import randint
def encrypt(text, key):
if key == "":
for char in range(16):
key += chr(randint(0, 127))
print("Your key is " + key)
if len(text) != len(key):
oldKey = key
key = ""
for char in range(len(text)):
key += oldKey[char % len(oldKey)]
encrypted = ""
for char in range(len(text)):
newASCII = ord(text[char]) + ord(key[char])
multiple = newASCII // 127
newASCII -= multiple * 127
encrypted += chr(newASCII)
print("The encrypted text is:\n" + encrypted)
def decrypt(text, key):
if key == "":
for char in range(16):
key += chr(randint(0, 127))
print("Your key is " + key)
if len(text) != len(key):
oldKey = key
key = ""
for char in range(len(text)):
key += oldKey[char % len(oldKey)]
decrypted = ""
for char in range(len(text)):
newASCII = ord(text[char]) - ord(key[char])
if newASCII < 0:
newASCII = 127 + newASCII
decrypted += chr(newASCII)
print("The decrypted text is:\n" + decrypted)
def caesarDecrypt(text, key):
decrypted = ""
alphabet = "abcdefghijklmnopqrstuvwxyz"
if key == "":
for tried in range(len(alphabet) - 1):
print("Trying with the key " + str(tried) + ".")
key = tried + 1
for char in text:
if char in alphabet:
newIndex = alphabet.index(char) - key
if newIndex < 0:
newIndex = len(alphabet) + newIndex
decrypted += alphabet[newIndex]
elif char in alphabet.upper():
newIndex = alphabet.upper().index(char) - key
if newIndex < 0:
newIndex = len(alphabet) + newIndex
decrypted += alphabet.upper()[newIndex]
else:
decrypted += char
print("The decrypted text is:\n" + decrypted)
decrypted = ""
else:
key = int(key)
for char in text:
if char in alphabet:
newIndex = alphabet.index(char) - key
if newIndex < 0:
newIndex = len(alphabet) + newIndex
decrypted += alphabet[newIndex]
elif char in alphabet.upper():
newIndex = alphabet.upper().index(char) - key
if newIndex < 0:
newIndex = len(alphabet) + newIndex
decrypted += alphabet.upper()[newIndex]
else:
decrypted += char
print("The decrypted text is:\n" + decrypted)
def cipherMore():
print("Some samples.")
print("Encrypting with Text = ILoveReddit, Key = dailyprogrammer")
encrypt("ILoveReddit", "dailyprogrammer")
print("Decrypting with Text = ..Yc_CXTL\V, Key = dailyprogrammer")
decrypt("..Yc_CXTL\V", "dailyprogrammer")
print("Protip: It may not work on multi-line inputs, so if it doesn't work you should remove the line break and replace it with a random character as a substitution.")
while True:
action = input("What do you want to do?\n1) Encrypt\n2) Decrypt\n3) Decrypt Caesar")
text = input("Enter your text. ")
key = input("Enter your key. Leave blank for a random key. Or Brute Force for Caesar decrypt.")
if action == "1":
encrypt(text, key)
elif action == "2":
decrypt(text, key)
elif action == "3":
caesarDecrypt(text, key)
if __name__ == "__main__":
cipherMore()
|
{
"content_hash": "51d5c2808a7981e6d883dda1368619a2",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 171,
"avg_line_length": 38.65979381443299,
"alnum_prop": 0.5141333333333333,
"repo_name": "ngmhprogramming/dailyprogrammer",
"id": "674bd1cb2f2ec249b1113e412aa767c12a43c828",
"size": "3750",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Everything/python_difficult_3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PHP",
"bytes": "425"
},
{
"name": "Python",
"bytes": "96971"
},
{
"name": "Ruby",
"bytes": "405"
}
],
"symlink_target": ""
}
|
from ghost import GhostBoard # noqa
|
{
"content_hash": "a3c7aba22572e719aa3795947eb3bd07",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 36,
"avg_line_length": 37,
"alnum_prop": 0.7837837837837838,
"repo_name": "tylerdave/pingo-py",
"id": "220e2583ca7e928a867e3152b957352beb4af0f9",
"size": "37",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "pingo/ghost/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "416"
},
{
"name": "Python",
"bytes": "105524"
},
{
"name": "Shell",
"bytes": "221"
},
{
"name": "VimL",
"bytes": "1062"
}
],
"symlink_target": ""
}
|
import httpretty
from paystackapi.tests.base_test_case import BaseTestCase
from paystackapi.refund import Refund
class TestRefund(BaseTestCase):
@httpretty.activate
def test_valid_create(self):
"""Test Refund Create."""
httpretty.register_uri(
httpretty.POST,
self.endpoint_url("refund"),
content_type='text/json',
body='{"status": true, "message": "Refund has been queued for processing", "data": {"transaction": {}, "currency": "NGN", "amount": 180000, "status": "pending"}}',
status=200,
)
response = Refund.create(transaction=1234)
self.assertTrue(response['status'])
self.assertIn('message', response)
self.assertIn('data', response)
self.assertIn('transaction', response['data'])
self.assertIn('currency', response['data'])
@httpretty.activate
def test_list(self):
"""Test Refund List Method"""
httpretty.register_uri(
httpretty.GET,
self.endpoint_url("refund"),
content_type='text/json',
body='{"status": true, "message": "Refunds retrieved", "data": [], "meta": {}}',
status=200,
)
response = Refund.list()
self.assertTrue(response['status'])
self.assertIn('message', response)
self.assertIn('data', response)
self.assertIn('meta', response)
self.assertListEqual([], response['data'])
self.assertDictEqual({}, response['meta'])
@httpretty.activate
def test_fetch(self):
"""Test Refund Fetching"""
httpretty.register_uri(
httpretty.GET,
self.endpoint_url("refund/1234"),
content_type='text/json',
body='{"status": true, "message": "Refund retrieved", "data": {"id": 1234}}',
status=200,
)
response = Refund.fetch(refund_id=1234)
self.assertTrue(response['status'])
self.assertIn('message', response)
self.assertIn('data', response)
self.assertEqual(1234, response['data']['id'])
|
{
"content_hash": "5c67d0263e08e8a90b8fe8a1dff6806a",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 166,
"avg_line_length": 29.080645161290324,
"alnum_prop": 0.6838602329450915,
"repo_name": "andela-sjames/paystack-python",
"id": "4b48172b1e010e75bbb0f3e36c9b76394e8f61d4",
"size": "1803",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "paystackapi/tests/test_refund.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "95753"
}
],
"symlink_target": ""
}
|
import sys
import os
import sphinx_rtd_theme
from datetime import datetime
import chatterbot
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its version is used.
current_directory = os.path.dirname(os.path.abspath(__file__))
parent_directory = os.path.abspath(os.path.join(current_directory, os.pardir))
sys.path.insert(0, parent_directory)
# -- General configuration ------------------------------------------------
# Sphinx extension modules
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosectionlabel',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.todo',
'sphinx.ext.viewcode'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = ['.rst', '.md']
# The encoding of source files
# source_encoding = 'utf-8-sig'
# The master toctree document
master_doc = 'index'
# General information about the project
project = 'ChatterBot'
copyright = '{}, {}'.format(datetime.now().year, chatterbot.__author__)
author = chatterbot.__author__
# The short X.Y version
version = chatterbot.__version__
# The full version, including alpha/beta/rc tags
release = chatterbot.__version__
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# If true, '()' will be appended to :func: etc. cross-reference text
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::)
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use
pygments_style = 'sphinx'
# -- Options for HTML output ----------------------------------------------
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'logo_only': True
}
html_show_sourcelink = False
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '../graphics/banner.png'
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
# html_last_updated_fmt = None
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# Split the index into individual pages for each letter.
html_split_index = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
html_search_language = 'en'
# Output file base name for HTML help builder
htmlhelp_basename = 'ChatterBotdoc'
# Read the docs theme modifications
html_context = {
'extra_css_files': [
'_static/style.css'
]
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class])
latex_documents = [
(master_doc, 'ChatterBot.tex', u'ChatterBot Documentation',
u'Gunther Cox', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section)
man_pages = [
(master_doc, 'chatterbot', u'ChatterBot Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ChatterBot', u'ChatterBot Documentation',
author, 'ChatterBot', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# A list of files that should not be packed into the epub file
epub_exclude_files = ['search.html']
# Example configuration for intersphinx: refer to the Python standard library
intersphinx_mapping = {'https://docs.python.org/': None}
|
{
"content_hash": "ac05be0daa95505a6852726cca5b0e59",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 88,
"avg_line_length": 31.670157068062828,
"alnum_prop": 0.6939990081005125,
"repo_name": "Reinaesaya/OUIRL-ChatBot",
"id": "71bca1ae274d9aa16fbadb06ec95e2561f32438f",
"size": "6189",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "534146"
},
{
"name": "Shell",
"bytes": "2872"
}
],
"symlink_target": ""
}
|
"""filter"""
import re
from sqlparse import lexer, SQLParseError
from sqlparse.engine import grouping
from sqlparse.engine.filter import StatementFilter
# XXX remove this when cleanup is complete
Filter = object
class FilterStack(object):
def __init__(self):
self.preprocess = []
self.stmtprocess = []
self.postprocess = []
self.split_statements = False
self._grouping = False
def _flatten(self, stream):
for token in stream:
if token.is_group():
for t in self._flatten(token.tokens):
yield t
else:
yield token
def enable_grouping(self):
self._grouping = True
def full_analyze(self):
self.enable_grouping()
def run(self, sql):
stream = lexer.tokenize(sql)
# Process token stream
if self.preprocess:
for filter_ in self.preprocess:
stream = filter_.process(self, stream)
if (self.stmtprocess or self.postprocess or self.split_statements
or self._grouping):
splitter = StatementFilter()
stream = splitter.process(self, stream)
if self._grouping:
def _group(stream):
for stmt in stream:
grouping.group(stmt)
yield stmt
stream = _group(stream)
if self.stmtprocess:
def _run(stream):
ret = []
for stmt in stream:
for filter_ in self.stmtprocess:
filter_.process(self, stmt)
ret.append(stmt)
return ret
stream = _run(stream)
if self.postprocess:
def _run(stream):
for stmt in stream:
stmt.tokens = list(self._flatten(stmt.tokens))
for filter_ in self.postprocess:
stmt = filter_.process(self, stmt)
yield stmt
stream = _run(stream)
return stream
|
{
"content_hash": "7a60623d52b00bbb9a80f605329acf12",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 73,
"avg_line_length": 27.76,
"alnum_prop": 0.5240153698366955,
"repo_name": "dbbhattacharya/kitsune",
"id": "6852f7d1b7cd112a08af06df16e22531255e8424",
"size": "2279",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "vendor/packages/sqlparse/sqlparse/engine/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "2694"
},
{
"name": "CSS",
"bytes": "276585"
},
{
"name": "HTML",
"bytes": "600145"
},
{
"name": "JavaScript",
"bytes": "800276"
},
{
"name": "Python",
"bytes": "2762831"
},
{
"name": "Shell",
"bytes": "6720"
},
{
"name": "Smarty",
"bytes": "1752"
}
],
"symlink_target": ""
}
|
import logging
from django.db import models
from person.models import Person
from parliament.models import ParliamentMember
from parliament.models import PoliticalParty, PartyMember
logger = logging.getLogger(__name__)
class PersonPosition(models.Model):
person = models.ForeignKey(Person, on_delete=models.CASCADE)
party = models.ForeignKey(PoliticalParty, null=True, blank=True, on_delete=models.CASCADE)
parliament_member = models.ForeignKey(ParliamentMember, null=True, blank=True, on_delete=models.CASCADE)
date = models.DateField()
class Meta:
unique_together = ['person', 'date']
def save(self, *args, **kwargs):
party_members = PartyMember.get_at_date(self.person, self.date)
self.party = party_members[0].party if party_members else None
parliament_members = ParliamentMember.find_at_date(self.person, self.date)
self.parliament_member = parliament_members[0] if parliament_members else None
super().save(*args, **kwargs)
class Gift(models.Model):
BOEK = 'BOEK'
TOEGANGSKAART = 'KAAR'
WIJN = 'WIJN'
DINER = 'DINE'
BLOEMEN = 'BLOE'
PAKKET = 'PAKK'
KLEDING = 'KLED'
ONBEKEND = 'ONB'
TYPE_CHOICES = (
(BOEK, 'Boek'), (TOEGANGSKAART, 'Toegangskaart'), (WIJN, 'Wijn'),
(BLOEMEN, 'Bloemen'), (PAKKET, 'Pakket'), (KLEDING, 'Kleding'), (DINER, 'Diner'),
(ONBEKEND, 'Onbekend')
)
person = models.ForeignKey(Person, on_delete=models.CASCADE)
person_position = models.ForeignKey(PersonPosition, on_delete=models.CASCADE)
description = models.CharField(max_length=1000, default='', blank=True)
date = models.DateField(null=True, blank=True)
value_euro = models.FloatField(null=True, blank=True)
type = models.CharField(max_length=4, choices=TYPE_CHOICES, default=ONBEKEND, db_index=True)
def save(self, *args, **kwargs):
self.person_position, created = PersonPosition.objects.get_or_create(
person=self.person,
date=self.date
)
super().save(*args, **kwargs)
@staticmethod
def calc_sum_average(gifts):
gifts = gifts.filter(value_euro__isnull=False)
gifts_value = 0
for gift in gifts:
gifts_value += gift.value_euro
average = 0
if gifts.count() != 0:
average = gifts_value / gifts.count()
return gifts_value, average
|
{
"content_hash": "536c47dc132c33554cc260d3e8e7b933",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 108,
"avg_line_length": 36.59090909090909,
"alnum_prop": 0.6612836438923395,
"repo_name": "openkamer/openkamer",
"id": "1c8fa59b2751a0f2cb66df5a040884086d5cf4d4",
"size": "2415",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gift/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "442"
},
{
"name": "CSS",
"bytes": "11171"
},
{
"name": "HTML",
"bytes": "154052"
},
{
"name": "JavaScript",
"bytes": "1051"
},
{
"name": "Python",
"bytes": "513282"
},
{
"name": "Shell",
"bytes": "157"
}
],
"symlink_target": ""
}
|
import rodeo
import os
def staticpath():
return os.path.join(os.path.dirname(rodeo.__file__), 'static')
if __name__ == '__main__':
print(staticpath())
|
{
"content_hash": "e5fe6b5748ec021fc3b0b6d3c3ec01bd",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 66,
"avg_line_length": 20.125,
"alnum_prop": 0.6273291925465838,
"repo_name": "rectangletangle/dataquest-test-project",
"id": "f1e99403682ef957f68116514287e32d9bf25297",
"size": "162",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "static/getstatic.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1016"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ThirdPartyProvider'
db.create_table('monocle_thirdpartyprovider', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=50, blank=True)),
('api_endpoint', self.gf('django.db.models.fields.URLField')(max_length=200)),
('resource_type', self.gf('django.db.models.fields.CharField')(max_length=10)),
('is_active', self.gf('django.db.models.fields.BooleanField')(default=True, db_index=True)),
('expose', self.gf('django.db.models.fields.BooleanField')(default=False, db_index=True)),
))
db.send_create_signal('monocle', ['ThirdPartyProvider'])
# Adding model 'URLScheme'
db.create_table('monocle_urlscheme', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('scheme', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)),
('provider', self.gf('django.db.models.fields.related.ForeignKey')(related_name='_schemes', to=orm['monocle.ThirdPartyProvider'])),
))
db.send_create_signal('monocle', ['URLScheme'])
def backwards(self, orm):
# Deleting model 'ThirdPartyProvider'
db.delete_table('monocle_thirdpartyprovider')
# Deleting model 'URLScheme'
db.delete_table('monocle_urlscheme')
models = {
'monocle.thirdpartyprovider': {
'Meta': {'ordering': "('api_endpoint', 'resource_type')", 'object_name': 'ThirdPartyProvider'},
'api_endpoint': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'expose': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'resource_type': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
'monocle.urlscheme': {
'Meta': {'object_name': 'URLScheme'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'provider': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'_schemes'", 'to': "orm['monocle.ThirdPartyProvider']"}),
'scheme': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
}
}
complete_apps = ['monocle']
|
{
"content_hash": "4a2282eebf5ae1c0d51876610dd2ff37",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 150,
"avg_line_length": 50.964285714285715,
"alnum_prop": 0.5998598458304134,
"repo_name": "shaunduncan/django-monocle",
"id": "d71d90208c8c7c580aa01175bac4ce619c04e348",
"size": "2878",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "monocle/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "106241"
}
],
"symlink_target": ""
}
|
import wordtools
from forms.form import Form
class TankaForm(Form):
syllables = "syllableCount"
text = "text"
def __init__(self):
self.data={}
self.data[5]=[]
self.data[7]=[]
def validate(self,tweet):
clean = wordtools.clean(tweet)
syllableCount = wordtools.syllableCount(clean)
if syllableCount==7 or syllableCount==5:
return {self.syllables:syllableCount, self.text:tweet}
else:
return None
def save(self,cleaned):
self.data[cleaned[self.syllables]].append(cleaned[self.text])
def build(self):
if len(self.data[5])>=2 and len(self.data[7])>=3:
poem = self.data[5].pop(0)+"\n"
poem +=self.data[7].pop(0)+"\n"
poem +=self.data[5].pop(0)+"\n"
poem +=self.data[7].pop(0)+"\n"
poem +=self.data[7].pop(0)+"\n"
return poem
else:
return None
|
{
"content_hash": "2dff5d9143961450119b7ff3f5998a59",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 63,
"avg_line_length": 24.09090909090909,
"alnum_prop": 0.6566037735849056,
"repo_name": "mlesicko/automaticpoetry",
"id": "f1507b0e019aac31127869971643d62370fa0cc0",
"size": "795",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "forms/tanka.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21889"
}
],
"symlink_target": ""
}
|
__author__ = 'RemiZOffAlex'
__copyright__ = '(c) RemiZOffAlex'
__license__ = 'MIT'
__email__ = 'remizoffalex@mail.ru'
__url__ = 'http://remizoffalex.ru'
from pycertauth import app as application
|
{
"content_hash": "59fdbf9fa42ddead734d1a7118df8565",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 41,
"avg_line_length": 28,
"alnum_prop": 0.6632653061224489,
"repo_name": "RemiZOffAlex/pycertauth",
"id": "e9e2740b0a45a8bfa49844dc2e847f0e8d7696f4",
"size": "244",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wsgi.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "89085"
},
{
"name": "HTML",
"bytes": "230861"
},
{
"name": "JavaScript",
"bytes": "12147"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "Python",
"bytes": "38335"
},
{
"name": "Shell",
"bytes": "69"
}
],
"symlink_target": ""
}
|
import argparse
import contextlib
import time
import cupy
import numpy
# This sample computes call and put prices for European options with
# Black-Scholes equation. It was based on a sample of the financial package
# in CUDA toolkit. For details, please see the corresponding whitepaper.
#
# The following code shows that CuPy enables us to write algorithms for GPUs
# without significantly modifying the existing NumPy's code.
# It also briefly describes how to create your own kernel with CuPy.
# If you want to speed up the existing code, please define the kernel
# with cupy.ElementwiseKernel.
# Naive implementation of the pricing of options with NumPy and CuPy.
def black_scholes(xp, s, x, t, r, v):
sqrt_t = xp.sqrt(t)
d1 = (xp.log(s / x) + (r + v * v / 2) * t) / (v * sqrt_t)
d2 = d1 - v * sqrt_t
def get_cumulative_normal_distribution(x):
A1 = 0.31938153
A2 = -0.356563782
A3 = 1.781477937
A4 = -1.821255978
A5 = 1.330274429
RSQRT2PI = 0.39894228040143267793994605993438
W = 0.2316419
k = 1 / (1 + W * xp.abs(x))
cnd = RSQRT2PI * xp.exp(-x * x / 2) * \
(k * (A1 + k * (A2 + k * (A3 + k * (A4 + k * A5)))))
cnd = xp.where(x > 0, 1 - cnd, cnd)
return cnd
cnd_d1 = get_cumulative_normal_distribution(d1)
cnd_d2 = get_cumulative_normal_distribution(d2)
exp_rt = xp.exp(- r * t)
call = s * cnd_d1 - x * exp_rt * cnd_d2
put = x * exp_rt * (1 - cnd_d2) - s * (1 - cnd_d1)
return call, put
# An example of calling the kernel via cupy.ElementwiseKernel.
# When executing __call__ method of the instance, it automatically compiles
# the code depending on the types of the given arrays, and calls the kernel.
# Other functions used inside the kernel can be defined by 'preamble' option.
black_scholes_kernel = cupy.ElementwiseKernel(
'T s, T x, T t, T r, T v', # Inputs
'T call, T put', # Outputs
'''
const T sqrt_t = sqrt(t);
const T d1 = (log(s / x) + (r + v * v / 2) * t) / (v * sqrt_t);
const T d2 = d1 - v * sqrt_t;
const T cnd_d1 = get_cumulative_normal_distribution(d1);
const T cnd_d2 = get_cumulative_normal_distribution(d2);
const T exp_rt = exp(- r * t);
call = s * cnd_d1 - x * exp_rt * cnd_d2;
put = x * exp_rt * (1 - cnd_d2) - s * (1 - cnd_d1);
''',
'black_scholes_kernel',
preamble='''
__device__
inline T get_cumulative_normal_distribution(T x) {
const T A1 = 0.31938153;
const T A2 = -0.356563782;
const T A3 = 1.781477937;
const T A4 = -1.821255978;
const T A5 = 1.330274429;
const T RSQRT2PI = 0.39894228040143267793994605993438;
const T W = 0.2316419;
const T k = 1 / (1 + W * abs(x));
T cnd = RSQRT2PI * exp(- x * x / 2) *
(k * (A1 + k * (A2 + k * (A3 + k * (A4 + k * A5)))));
if (x > 0) {
cnd = 1 - cnd;
}
return cnd;
}
''',
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu-id', '-g', default=0, type=int, help='GPU ID')
parser.add_argument('--n-options', '-n', default=10000000, type=int)
args = parser.parse_args()
cupy.cuda.Device(args.gpu_id).use()
def rand_range(m, M):
samples = cupy.random.rand(args.n_options)
return (m + (M - m) * samples).astype(numpy.float64)
print('initializing...')
stock_price_gpu = rand_range(5, 30)
option_strike_gpu = rand_range(1, 100)
option_years_gpu = rand_range(0.25, 10)
stock_price_cpu = stock_price_gpu.get()
option_strike_cpu = option_strike_gpu.get()
option_years_cpu = option_years_gpu.get()
@contextlib.contextmanager
def timer(message):
cupy.cuda.Stream.null.synchronize()
start = time.time()
yield
cupy.cuda.Stream.null.synchronize()
end = time.time()
print('%s:\t%f sec' % (message, end - start))
print('start computation')
risk_free = 0.02
volatility = 0.3
with timer(' CPU (NumPy, Naive implementation)'):
call_cpu, put_cpu = black_scholes(
numpy, stock_price_cpu, option_strike_cpu, option_years_cpu,
risk_free, volatility)
with timer(' GPU (CuPy, Naive implementation)'):
call_gpu1, put_gpu1 = black_scholes(
cupy, stock_price_gpu, option_strike_gpu, option_years_gpu,
risk_free, volatility)
with timer(' GPU (CuPy, Elementwise kernel)'):
call_gpu2, put_gpu2 = black_scholes_kernel(
stock_price_gpu, option_strike_gpu, option_years_gpu,
risk_free, volatility)
# Check whether all elements in gpu arrays are equal to those of cpus
cupy.testing.assert_allclose(call_cpu, call_gpu1)
cupy.testing.assert_allclose(call_cpu, call_gpu2)
cupy.testing.assert_allclose(put_cpu, put_gpu1)
cupy.testing.assert_allclose(put_cpu, put_gpu2)
|
{
"content_hash": "05f2b79e1b5f32db9578794c6eac2fbc",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 77,
"avg_line_length": 34.47222222222222,
"alnum_prop": 0.6021353746978243,
"repo_name": "cupy/cupy",
"id": "d9863ffdc0aab23e8cdee5e38f0555aadea6d98d",
"size": "4964",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/finance/black_scholes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "38"
},
{
"name": "C",
"bytes": "712019"
},
{
"name": "C++",
"bytes": "895316"
},
{
"name": "Cuda",
"bytes": "151799"
},
{
"name": "Cython",
"bytes": "1996454"
},
{
"name": "Dockerfile",
"bytes": "40251"
},
{
"name": "PowerShell",
"bytes": "7361"
},
{
"name": "Python",
"bytes": "4841354"
},
{
"name": "Shell",
"bytes": "24521"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import logging
__all__ = [
'reopen_log_files',
'set_log_level_for_all_loggers'
]
LOG = logging.getLogger(__name__)
def reopen_log_files(handlers):
"""
This method iterates through all of the providers handlers looking for the FileHandler types.
A lock is acquired, the underlying stream closed, reopened, then the lock is released.
This method should be called when logs are to be rotated by an external process. The simplest
way to do this is via a signal handler.
"""
for handler in handlers:
if not isinstance(handler, logging.FileHandler):
continue
LOG.info('Re-opening log file "%s" with mode "%s"\n' %
(handler.baseFilename, handler.mode))
try:
handler.acquire()
handler.stream.close()
handler.stream = open(handler.baseFilename, handler.mode)
finally:
handler.release()
def set_log_level_for_all_loggers(level):
"""
Set a log level for all the loggers and handlers to the provided level.
"""
root_logger = logging.getLogger()
loggers = logging.Logger.manager.loggerDict.values()
loggers += [root_logger]
for logger in loggers:
if not isinstance(logger, logging.Logger):
continue
logger.setLevel(logging.DEBUG)
handlers = logger.handlers
for handler in handlers:
handler.setLevel(logging.DEBUG)
|
{
"content_hash": "0b4c8777b54e9335bbd37991fad2359e",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 97,
"avg_line_length": 27.9811320754717,
"alnum_prop": 0.641267700606878,
"repo_name": "grengojbo/st2",
"id": "b04834a58230c7cbc0f964838b6549c9c32ae20d",
"size": "2263",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "st2common/st2common/logging/misc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "Makefile",
"bytes": "21186"
},
{
"name": "PowerShell",
"bytes": "299"
},
{
"name": "Python",
"bytes": "2091976"
},
{
"name": "Shell",
"bytes": "7518"
},
{
"name": "Slash",
"bytes": "677"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/lair/gubbur/shared_lair_gubbur_underwater.iff"
result.attribute_template_id = -1
result.stfName("lair_n","gubbur_underwater")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "22f7e56c6743b51a6c8dfd2f385f345b",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 82,
"avg_line_length": 24.384615384615383,
"alnum_prop": 0.7034700315457413,
"repo_name": "anhstudios/swganh",
"id": "900e1074fab440f4272adfcd81adb26d373eea0b",
"size": "462",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/tangible/lair/gubbur/shared_lair_gubbur_underwater.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
from pathlib import Path
import pytest
import testing.postgresql
from sqlalchemy import create_engine, text
from sqlalchemy.engine.url import make_url
from testcontainers.postgres import PostgresContainer as _PostgresContainer
tests_dir = Path(__file__).parents[0].resolve()
test_schema_file = Path(tests_dir, 'data', 'test-schema.sql')
SUPERUSER_NAME = 'alice'
DB_NAME = 'db1'
Postgresql = testing.postgresql.PostgresqlFactory(
initdb_args='-U postgres -A trust',
database=DB_NAME,
)
class PostgresContainer(_PostgresContainer):
POSTGRES_USER = 'postgres'
POSTGRES_DB = DB_NAME
def pytest_addoption(parser):
parser.addoption(
'--no-container', action='store_true',
help='Use temporary PostgreSQL cluster without a container.')
def pytest_runtest_setup(item):
if 'nocontainer' in item.keywords and not item.config.getoption('--no-container'):
pytest.skip('Use --no-container to execute this test.')
@pytest.fixture(scope='session')
def postgres_url(request):
no_container = request.config.getoption("--no-container")
if no_container:
postgresql = Postgresql()
# Use superuser to create new superuser, then yield new connection URL
url = make_url(postgresql.url())
engine = create_engine(url)
engine.execute('CREATE ROLE {} WITH SUPERUSER LOGIN'.format(SUPERUSER_NAME))
engine.dispose()
url.username = SUPERUSER_NAME
yield str(url)
else:
postgres_container = PostgresContainer("postgres:latest")
with postgres_container as postgres:
# Use superuser to create new superuser, then yield new connection URL
url = make_url(postgres.get_connection_url())
engine = create_engine(url)
engine.execute(
text(
'CREATE ROLE {} WITH SUPERUSER LOGIN PASSWORD '
':password'.format(SUPERUSER_NAME)
),
password=postgres_container.POSTGRES_PASSWORD,
)
engine.dispose()
url.username = SUPERUSER_NAME
yield str(url)
@pytest.fixture(scope='session')
def engine(postgres_url):
return create_engine(postgres_url)
@pytest.fixture(scope='session')
def pg_schema(engine):
with test_schema_file.open() as fp:
engine.execute(fp.read())
@pytest.fixture
def connection(engine, pg_schema):
with engine.connect() as conn:
yield conn
|
{
"content_hash": "d1d219cbaacc015e69b4ca91a7f6e968",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 86,
"avg_line_length": 29.11764705882353,
"alnum_prop": 0.6577777777777778,
"repo_name": "RazerM/pg_grant",
"id": "12048142fde84a84667119199a1ea22673655766",
"size": "2475",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/conftest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PLpgSQL",
"bytes": "1257"
},
{
"name": "Python",
"bytes": "98393"
},
{
"name": "Shell",
"bytes": "299"
}
],
"symlink_target": ""
}
|
from security_monkey.cloudaux_batched_watcher import CloudAuxBatchedWatcher
from cloudaux.aws.iam import list_roles
from cloudaux.orchestration.aws.iam.role import get_role
from security_monkey import AWS_DEFAULT_REGION
class IAMRole(CloudAuxBatchedWatcher):
index = 'iamrole'
i_am_singular = 'IAM Role'
i_am_plural = 'IAM Roles'
override_region = 'universal'
def __init__(self, **kwargs):
super(IAMRole, self).__init__(**kwargs)
self.honor_ephemerals = True
self.ephemeral_paths = ['_version', "Region"]
def _get_regions(self):
return [AWS_DEFAULT_REGION]
def get_name_from_list_output(self, item):
return item['RoleName']
def list_method(self, **kwargs):
all_roles = list_roles(**kwargs)
items = []
for role in all_roles:
role["Region"] = "us-east-1" # IAM is global
items.append(role)
return items
def get_method(self, item, **kwargs):
# This is not needed for IAM Role:
item.pop("Region")
return get_role(dict(item), **kwargs)
|
{
"content_hash": "246c3097f546cbdd12477a2fef1b4485",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 75,
"avg_line_length": 29,
"alnum_prop": 0.6279491833030852,
"repo_name": "markofu/security_monkey",
"id": "0f9cfbd951e8f4508cae0edf77f99244964df8c3",
"size": "1102",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "security_monkey/watchers/iam/iam_role.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "22116"
},
{
"name": "Dart",
"bytes": "86565"
},
{
"name": "HTML",
"bytes": "80747"
},
{
"name": "JavaScript",
"bytes": "8629"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "483322"
},
{
"name": "Shell",
"bytes": "19151"
}
],
"symlink_target": ""
}
|
def is_group_member(user, group):
return user.groups.filter(name=group).exists()
|
{
"content_hash": "5c054b4b5b2f2a605ff57fb522b82ab5",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 50,
"avg_line_length": 42,
"alnum_prop": 0.7380952380952381,
"repo_name": "CSchool/SchoolSite",
"id": "51adf7684798bdd383d1f16bd5133bb7a21e4dc7",
"size": "84",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "CSchoolSite/userprofile/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "98937"
},
{
"name": "CSS",
"bytes": "1820"
},
{
"name": "HTML",
"bytes": "60303"
},
{
"name": "JavaScript",
"bytes": "14501"
},
{
"name": "Makefile",
"bytes": "3042"
},
{
"name": "Python",
"bytes": "208264"
},
{
"name": "TeX",
"bytes": "19748"
}
],
"symlink_target": ""
}
|
"""
Copyright 2016-present Nike, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
You may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and*
limitations under the License.*
"""
import logging
from . import CerberusClientException, CLIENT_VERSION
from .network_util import throw_if_bad_response
from .network_util import get_with_retry
from .network_util import post_with_retry
logger = logging.getLogger(__name__)
class UserAuth:
"""Class to authenticate with username and
password and returns a cerberus token"""
HEADERS = {"Content-Type": "application/json",
"X-Cerberus-Client": "CerberusPythonClient/" + CLIENT_VERSION}
def __init__(self, cerberus_url, username, password):
self.cerberus_url = cerberus_url
self.username = username
self.password = password
def get_auth(self):
"""Returns auth response which has client
token unless MFA is required"""
auth_resp = get_with_retry(self.cerberus_url + '/v2/auth/user',
auth=(self.username, self.password),
headers=self.HEADERS)
self.check_response(auth_resp)
return auth_resp.json()
def get_token(self):
"""sets client token from Cerberus"""
auth_resp = self.get_auth()
if auth_resp['status'] == 'mfa_req':
token_resp = self.get_mfa(auth_resp)
else:
token_resp = auth_resp
token = token_resp['data']['client_token']['client_token']
return token
@classmethod
def check_response(cls, response):
"""Ensure a reponse has a 200 status code"""
if response.status_code != 200:
throw_if_bad_response(response)
def mfa_check(self, json_param):
"""Posts json_param to mfa_check endpoint and returns the result
after checking the status with check_response"""
mfa_resp = post_with_retry(
self.cerberus_url + '/v2/auth/mfa_check',
json=json_param,
headers=self.HEADERS
)
self.check_response(mfa_resp)
return mfa_resp
def trigger_challenge(self, device_id, state_token):
"""Trigger a challenge for devices that need them"""
self.mfa_check({'device_id': device_id,
'state_token': state_token})
def check_mfa_code(self, sec_code, device_id, state_token):
"""Check the otp token for a device"""
mfa_resp = self.mfa_check({'otp_token': sec_code,
'device_id': device_id,
'state_token': state_token})
return mfa_resp.json()
@classmethod
def get_valid_device_selection(cls, devices):
"""Display a list of the user's devices and get their selection"""
if len(devices) == 1:
# If there's only one option, don't show selection prompt
return 0
print("Found the following MFA devices")
for index, device in enumerate(devices):
print("%s: %s" % (index, device['name']))
selection = input("Enter a selection: ")
if selection.isdigit():
selection_num = int(str(selection))
else:
msg = "Selection: '%s' is not a number" % selection
raise CerberusClientException(msg)
if selection_num not in range(len(devices)):
msg = "Selection: '%s' is out of range" % selection_num
raise CerberusClientException(msg)
return selection_num
def get_mfa(self, auth_resp):
"""Gets MFA code from user and returns response which
includes the client token"""
devices = auth_resp['data']['devices']
selection_num = self.get_valid_device_selection(devices)
selected_device = auth_resp['data']['devices'][selection_num]
device_id = selected_device['id']
state_token = auth_resp['data']['state_token']
if selected_device.get('requires_trigger'):
self.trigger_challenge(device_id, state_token)
sec_code = input('Enter %s security code: ' % selected_device['name'])
return self.check_mfa_code(sec_code, device_id, state_token)
|
{
"content_hash": "e2c06f1f4779557204a7b0f6956dec1d",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 78,
"avg_line_length": 33.89051094890511,
"alnum_prop": 0.6144734008184364,
"repo_name": "Nike-Inc/cerberus-python-client",
"id": "7522d6e93c64de505f5f1efdc5e4fa5928ff4d73",
"size": "4643",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cerberus/user_auth.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "161"
},
{
"name": "Python",
"bytes": "121334"
}
],
"symlink_target": ""
}
|
"""
eve.io.mongo.geo
~~~~~~~~~~~~~~~~~~~
Geospatial functions and classes for mongo IO layer
:copyright: (c) 2017 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
"""
class GeoJSON(dict):
def __init__(self, json):
try:
self['type'] = json['type']
except KeyError:
raise TypeError("Not compilant to GeoJSON")
self.update(json)
if len(self.keys()) != 2:
raise TypeError("Not compilant to GeoJSON")
def _correct_position(self, position):
return isinstance(position, list) and \
all(isinstance(pos, int) or isinstance(pos, float)
for pos in position)
class Geometry(GeoJSON):
def __init__(self, json):
super(Geometry, self).__init__(json)
try:
if not isinstance(self['coordinates'], list) or \
self['type'] != self.__class__.__name__:
raise TypeError
except (KeyError, TypeError):
raise TypeError("Geometry not compilant to GeoJSON")
class GeometryCollection(GeoJSON):
def __init__(self, json):
super(GeometryCollection, self).__init__(json)
try:
if not isinstance(self['geometries'], list):
raise TypeError
for geometry in self['geometries']:
factory = factories[geometry["type"]]
factory(geometry)
except (KeyError, TypeError, AttributeError):
raise TypeError("Geometry not compilant to GeoJSON")
class Point(Geometry):
def __init__(self, json):
super(Point, self).__init__(json)
if not self._correct_position(self['coordinates']):
raise TypeError
class MultiPoint(GeoJSON):
def __init__(self, json):
super(MultiPoint, self).__init__(json)
for position in self["coordinates"]:
if not self._correct_position(position):
raise TypeError
class LineString(GeoJSON):
def __init__(self, json):
super(LineString, self).__init__(json)
for position in self["coordinates"]:
if not self._correct_position(position):
raise TypeError
class MultiLineString(GeoJSON):
def __init__(self, json):
super(MultiLineString, self).__init__(json)
for linestring in self["coordinates"]:
for position in linestring:
if not self._correct_position(position):
raise TypeError
class Polygon(GeoJSON):
def __init__(self, json):
super(Polygon, self).__init__(json)
for linestring in self["coordinates"]:
for position in linestring:
if not self._correct_position(position):
raise TypeError
class MultiPolygon(GeoJSON):
def __init__(self, json):
super(MultiPolygon, self).__init__(json)
for polygon in self["coordinates"]:
for linestring in polygon:
for position in linestring:
if not self._correct_position(position):
raise TypeError
factories = dict([(_type.__name__, _type)
for _type in
[GeometryCollection, Point, MultiPoint, LineString,
MultiLineString, Polygon, MultiPolygon]])
|
{
"content_hash": "2851a1fdddabb3ff99cd688a33edfeb8",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 69,
"avg_line_length": 31.38679245283019,
"alnum_prop": 0.5677787796813947,
"repo_name": "bcrochet/eve",
"id": "665025abba2844f870a19511d854774b74b82e2b",
"size": "3352",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "eve/io/mongo/geo.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "790594"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import os
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from django.utils.html import format_html
from django.utils.translation import ugettext_lazy as _
from djblets.util.templatetags.djblets_images import thumbnail
from reviewboard.site.urlresolvers import local_site_reverse
@python_2_unicode_compatible
class Screenshot(models.Model):
"""A screenshot associated with a review request.
Like diffs, a screenshot can have comments associated with it.
These comments are of type
:py:class:`reviewboard.reviews.models.ScreenshotComment`.
"""
caption = models.CharField(_("caption"), max_length=256, blank=True)
draft_caption = models.CharField(_("draft caption"),
max_length=256, blank=True)
image = models.ImageField(_("image"),
upload_to=os.path.join('uploaded', 'images',
'%Y', '%m', '%d'))
@property
def filename(self):
"""Returns the filename for display purposes."""
return os.path.basename(self.image.name)
def get_comments(self):
"""Returns all the comments made on this screenshot."""
if not hasattr(self, '_comments'):
self._comments = list(self.comments.all())
return self._comments
def get_thumbnail_url(self):
"""Returns the URL for the thumbnail, creating it if necessary."""
return thumbnail(self.image)
def thumb(self):
"""Creates and returns HTML for this screenshot's thumbnail."""
url = self.get_thumbnail_url()
return format_html(
'<img src="{src_1x}" srcset="{src_1x} 1x, {src_2x} 2x"'
' alt="{caption}" width="400" />',
src_1x=url,
src_2x=thumbnail(self.image, '800x200'),
caption=self.caption)
thumb.allow_tags = True
def __str__(self):
return "%s (%s)" % (self.caption, self.image)
def get_review_request(self):
if hasattr(self, '_review_request'):
return self._review_request
try:
return self.review_request.all()[0]
except IndexError:
try:
return self.inactive_review_request.all()[0]
except IndexError:
# Maybe it's on a draft.
try:
draft = self.drafts.get()
except ObjectDoesNotExist:
draft = self.inactive_drafts.get()
return draft.review_request
def get_absolute_url(self):
review_request = self.get_review_request()
if review_request.local_site:
local_site_name = review_request.local_site.name
else:
local_site_name = None
return local_site_reverse(
'screenshot',
local_site_name=local_site_name,
kwargs={
'review_request_id': review_request.display_id,
'screenshot_id': self.pk,
})
def save(self, **kwargs):
super(Screenshot, self).save()
try:
draft = self.drafts.get()
draft.timestamp = timezone.now()
draft.save()
except ObjectDoesNotExist:
pass
class Meta:
app_label = 'reviews'
db_table = 'reviews_screenshot'
verbose_name = _('Screenshot')
verbose_name_plural = _('Screenshots')
|
{
"content_hash": "660f64613b0076e2865242f097f856c5",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 74,
"avg_line_length": 33.00917431192661,
"alnum_prop": 0.5839355197331851,
"repo_name": "chipx86/reviewboard",
"id": "0d06ebdec1c24c7de1f5c646faca79660666b77a",
"size": "3598",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reviewboard/reviews/models/screenshot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "434719"
},
{
"name": "HTML",
"bytes": "224310"
},
{
"name": "JavaScript",
"bytes": "3830753"
},
{
"name": "Python",
"bytes": "7333453"
},
{
"name": "Shell",
"bytes": "777"
}
],
"symlink_target": ""
}
|
from cms.cache.permissions import clear_permission_cache
from cms.signals.apphook import apphook_post_delete_page_checker
from cms.signals.title import update_title_paths
from menus.menu_pool import menu_pool
def pre_save_page(instance, **kwargs):
menu_pool.clear(instance.site_id)
clear_permission_cache()
def pre_delete_page(instance, **kwargs):
menu_pool.clear(instance.site_id)
for placeholder in instance.get_placeholders():
for plugin in placeholder.get_plugins().order_by('-depth'):
plugin._no_reorder = True
plugin.delete(no_mp=True)
placeholder.delete()
clear_permission_cache()
def post_delete_page(instance, **kwargs):
apphook_post_delete_page_checker(instance)
instance.clear_cache()
def post_moved_page(instance, **kwargs):
update_title_paths(instance, **kwargs)
|
{
"content_hash": "b0590e061fc499ab8803da1be45fbc2b",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 67,
"avg_line_length": 29.551724137931036,
"alnum_prop": 0.7141190198366394,
"repo_name": "timgraham/django-cms",
"id": "8f4bc9be0e01da3eb317645073b387c1750e030e",
"size": "881",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "cms/signals/page.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105760"
},
{
"name": "JavaScript",
"bytes": "672964"
},
{
"name": "PHP",
"bytes": "2156"
},
{
"name": "Python",
"bytes": "2258825"
},
{
"name": "XSLT",
"bytes": "5122"
}
],
"symlink_target": ""
}
|
"""Benchmark comparing dynamic_rnn implementations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from pyctr.api import conversion
from pyctr.examples.models import dynamic_rnn_minimal
from pyctr.examples.numpy import numpy_to_tf
from pyctr.examples.numpy import numpy_to_torch
from pyctr.examples.pytorch import pytorch_to_numpy
from pyctr.examples.pytorch import pytorch_to_tf
from pyctr.examples.sysml2019 import benchmark_base
from pyctr.examples.tf import tf as tf_
from pyctr.examples.tf import tf_to_numpy
from pyctr.examples.tf import tf_to_pytorch
from pyctr.transformers.virtualization import control_flow
from pyctr.transformers.virtualization import functions
from pyctr.transformers.virtualization import variables
import tensorflow as tf
tf.enable_eager_execution()
FEATURE_SIZE = 50
HIDDEN_SIZE = 256
class RNNBenchmark(benchmark_base.ReportingBenchmark):
"""Runs benchmarks for variants of dynamic_rnn."""
def _numpy_baseline(self, batch_size, max_seq_len, input_size, hidden_size):
inputs, seq_len, w, b, init_state = dynamic_rnn_minimal.random_inputs_numpy(
batch_size, max_seq_len, input_size, hidden_size)
def target():
dynamic_rnn_minimal.numpy(inputs, seq_len, w, b, init_state)
self.time_execution(
('NumPy', batch_size, max_seq_len, input_size, hidden_size),
target,
extras={
'max_seq_len': max_seq_len,
'batch_size': batch_size,
'input_size': input_size,
'hidden_size': hidden_size,
})
def _numpy_to_eager(self, batch_size, max_seq_len, input_size, hidden_size):
inputs, seq_len, w, b, init_state = dynamic_rnn_minimal.random_inputs_tf(
batch_size, max_seq_len, input_size, hidden_size)
eager_from_np = conversion.convert(dynamic_rnn_minimal.numpy, numpy_to_tf,
[variables, functions, control_flow])
def target():
eager_from_np(inputs, seq_len, w, b, init_state).numpy()
self.time_execution(
('NumPy_Eager', batch_size, max_seq_len, input_size, hidden_size),
target,
extras={
'max_seq_len': max_seq_len,
'batch_size': batch_size,
'input_size': input_size,
'hidden_size': hidden_size,
})
def _numpy_to_tf(self, batch_size, max_seq_len, input_size, hidden_size):
with tf.Graph().as_default():
with tf.Session() as sess:
tensors = dynamic_rnn_minimal.random_inputs_tf(batch_size, max_seq_len,
input_size, hidden_size)
inputs, seq_len, w, b, init_state = tensors
tf_from_np = conversion.convert(dynamic_rnn_minimal.numpy, numpy_to_tf,
[variables, functions, control_flow])
ops = tf_from_np(inputs, seq_len, w, b, init_state)
def target():
sess.run(ops)
self.time_execution(
('NumPy_TF', batch_size, max_seq_len, input_size, hidden_size),
target,
extras={
'max_seq_len': max_seq_len,
'batch_size': batch_size,
'input_size': input_size,
'hidden_size': hidden_size,
})
def _numpy_to_pytorch(self, batch_size, max_seq_len, input_size, hidden_size):
tensors = dynamic_rnn_minimal.random_inputs_torch(batch_size, max_seq_len,
input_size, hidden_size)
inputs, seq_len, w, b, init_state = tensors
torch_from_np = conversion.convert(dynamic_rnn_minimal.numpy,
numpy_to_torch,
[variables, functions, control_flow])
def target():
torch_from_np(inputs, seq_len, w, b, init_state).numpy()
self.time_execution(
('NumPy_Torch', batch_size, max_seq_len, input_size, hidden_size),
target,
extras={
'max_seq_len': max_seq_len,
'batch_size': batch_size,
'input_size': input_size,
'hidden_size': hidden_size,
})
def _pytorch_baseline(self, batch_size, max_seq_len, input_size, hidden_size):
inputs, seq_len, w, b, init_state = dynamic_rnn_minimal.random_inputs_torch(
batch_size, max_seq_len, input_size, hidden_size)
def target():
dynamic_rnn_minimal.pytorch(inputs, seq_len, w, b, init_state)
self.time_execution(
('PyTorch', batch_size, max_seq_len, input_size, hidden_size),
target,
extras={
'max_seq_len': max_seq_len,
'batch_size': batch_size,
'input_size': input_size,
'hidden_size': hidden_size,
})
def _pytorch_to_eager(self, batch_size, max_seq_len, input_size, hidden_size):
inputs, seq_len, w, b, init_state = dynamic_rnn_minimal.random_inputs_tf(
batch_size, max_seq_len, input_size, hidden_size)
eager_from_torch = conversion.convert(dynamic_rnn_minimal.pytorch,
pytorch_to_tf,
[variables, functions, control_flow])
def target():
eager_from_torch(inputs, seq_len, w, b, init_state).numpy()
self.time_execution(
('PyTorch_Eager', batch_size, max_seq_len, input_size, hidden_size),
target,
extras={
'max_seq_len': max_seq_len,
'batch_size': batch_size,
'input_size': input_size,
'hidden_size': hidden_size,
})
def _pytorch_to_numpy(self, batch_size, max_seq_len, input_size, hidden_size):
tensors = dynamic_rnn_minimal.random_inputs_numpy(batch_size, max_seq_len,
input_size, hidden_size)
inputs, seq_len, w, b, init_state = tensors
numpy_from_torch = conversion.convert(dynamic_rnn_minimal.pytorch,
pytorch_to_numpy,
[variables, functions, control_flow])
def target():
np.copy(numpy_from_torch(inputs, seq_len, w, b, init_state))
self.time_execution(
('PyTorch_NumPy', batch_size, max_seq_len, input_size, hidden_size),
target,
extras={
'max_seq_len': max_seq_len,
'batch_size': batch_size,
'input_size': input_size,
'hidden_size': hidden_size,
})
def _pytorch_to_tf(self, batch_size, max_seq_len, input_size, hidden_size):
with tf.Graph().as_default():
with tf.Session() as sess:
tensors = dynamic_rnn_minimal.random_inputs_tf(batch_size, max_seq_len,
input_size, hidden_size)
inputs, seq_len, w, b, init_state = tensors
tf_from_torch = conversion.convert(dynamic_rnn_minimal.pytorch,
pytorch_to_tf,
[variables, functions, control_flow])
ops = tf_from_torch(inputs, seq_len, w, b, init_state)
def target():
sess.run(ops)
self.time_execution(
('PYTorch_TF', batch_size, max_seq_len, input_size, hidden_size),
target,
extras={
'max_seq_len': max_seq_len,
'batch_size': batch_size,
'input_size': input_size,
'hidden_size': hidden_size,
})
def _eager_baseline(self, batch_size, max_seq_len, input_size, hidden_size):
inputs, seq_len, w, b, init_state = dynamic_rnn_minimal.random_inputs_tf(
batch_size, max_seq_len, input_size, hidden_size)
def target():
dynamic_rnn_minimal.eager(inputs, seq_len, w, b, init_state).numpy()
self.time_execution(
('Eager', batch_size, max_seq_len, input_size, hidden_size),
target,
extras={
'max_seq_len': max_seq_len,
'batch_size': batch_size,
'input_size': input_size,
'hidden_size': hidden_size,
})
def _eager_to_numpy(self, batch_size, max_seq_len, input_size, hidden_size):
tensors = dynamic_rnn_minimal.random_inputs_numpy(batch_size, max_seq_len,
input_size, hidden_size)
inputs, seq_len, w, b, init_state = tensors
numpy_from_eager = conversion.convert(dynamic_rnn_minimal.eager,
tf_to_numpy,
[variables, functions, control_flow])
def target():
np.copy(numpy_from_eager(inputs, seq_len, w, b, init_state))
self.time_execution(
('Eager_NumPy', batch_size, max_seq_len, input_size, hidden_size),
target,
extras={
'max_seq_len': max_seq_len,
'batch_size': batch_size,
'input_size': input_size,
'hidden_size': hidden_size,
})
def _eager_to_pytorch(self, batch_size, max_seq_len, input_size, hidden_size):
tensors = dynamic_rnn_minimal.random_inputs_torch(batch_size, max_seq_len,
input_size, hidden_size)
inputs, seq_len, w, b, init_state = tensors
torch_from_eager = conversion.convert(dynamic_rnn_minimal.eager,
tf_to_pytorch,
[variables, functions, control_flow])
def target():
torch_from_eager(inputs, seq_len, w, b, init_state).numpy()
self.time_execution(
('Eager_PyTorch', batch_size, max_seq_len, input_size, hidden_size),
target,
extras={
'max_seq_len': max_seq_len,
'batch_size': batch_size,
'input_size': input_size,
'hidden_size': hidden_size,
})
def _eager_to_tf(self, batch_size, max_seq_len, input_size, hidden_size):
with tf.Graph().as_default():
with tf.Session() as sess:
tensors = dynamic_rnn_minimal.random_inputs_tf(batch_size, max_seq_len,
input_size, hidden_size)
inputs, seq_len, w, b, init_state = tensors
tf_from_eager = conversion.convert(dynamic_rnn_minimal.eager, tf_,
[variables, functions, control_flow])
ops = tf_from_eager(inputs, seq_len, w, b, init_state)
def target():
sess.run(ops)
self.time_execution(
('Eager_TF', batch_size, max_seq_len, input_size, hidden_size),
target,
extras={
'max_seq_len': max_seq_len,
'batch_size': batch_size,
'input_size': input_size,
'hidden_size': hidden_size,
})
def _tf_baseline(self, batch_size, max_seq_len, input_size, hidden_size):
with tf.Graph().as_default():
with tf.Session() as sess:
tensors = dynamic_rnn_minimal.random_inputs_tf(batch_size, max_seq_len,
input_size, hidden_size)
inputs, seq_len, w, b, init_state = tensors
ops = dynamic_rnn_minimal.tf_(inputs, seq_len, w, b, init_state)
def target():
sess.run(ops)
self.time_execution(
('TF', batch_size, max_seq_len, input_size, hidden_size),
target,
extras={
'max_seq_len': max_seq_len,
'batch_size': batch_size,
'input_size': input_size,
'hidden_size': hidden_size,
})
def _autograph_baseline(self, batch_size, max_seq_len, input_size,
hidden_size):
with tf.Graph().as_default():
with tf.Session() as sess:
inputs, seq_len, w, b, init_state = dynamic_rnn_minimal.random_inputs_tf(
batch_size, max_seq_len, input_size, hidden_size)
converted_fn = tf.autograph.to_graph(
dynamic_rnn_minimal.eager, experimental_optional_features=None)
ops = converted_fn(inputs, seq_len, w, b, init_state)
def target():
sess.run(ops)
self.time_execution(
('AutoGraph', batch_size, max_seq_len, input_size, hidden_size),
target,
extras={
'max_seq_len': max_seq_len,
'batch_size': batch_size,
'input_size': input_size,
'hidden_size': hidden_size,
})
def benchmark_all(self):
for batch_size in (32,):
for max_seq_len in (64,):
self._eager_baseline(batch_size, max_seq_len, FEATURE_SIZE, HIDDEN_SIZE)
self._eager_to_numpy(batch_size, max_seq_len, FEATURE_SIZE, HIDDEN_SIZE)
self._eager_to_pytorch(
batch_size, max_seq_len, FEATURE_SIZE, HIDDEN_SIZE)
self._eager_to_tf(batch_size, max_seq_len, FEATURE_SIZE, HIDDEN_SIZE)
# TODO(mdanatg): Check correctness. It's suspiciously slow.
self._numpy_baseline(batch_size, max_seq_len, FEATURE_SIZE, HIDDEN_SIZE)
self._numpy_to_eager(batch_size, max_seq_len, FEATURE_SIZE, HIDDEN_SIZE)
self._numpy_to_pytorch(
batch_size, max_seq_len, FEATURE_SIZE, HIDDEN_SIZE)
self._numpy_to_tf(batch_size, max_seq_len, FEATURE_SIZE, HIDDEN_SIZE)
self._pytorch_baseline(
batch_size, max_seq_len, FEATURE_SIZE, HIDDEN_SIZE)
self._pytorch_to_eager(
batch_size, max_seq_len, FEATURE_SIZE, HIDDEN_SIZE)
self._pytorch_to_numpy(
batch_size, max_seq_len, FEATURE_SIZE, HIDDEN_SIZE)
self._pytorch_to_tf(batch_size, max_seq_len, FEATURE_SIZE, HIDDEN_SIZE)
self._tf_baseline(batch_size, max_seq_len, FEATURE_SIZE, HIDDEN_SIZE)
self._autograph_baseline(batch_size, max_seq_len, FEATURE_SIZE,
HIDDEN_SIZE)
if __name__ == '__main__':
tf.test.main()
|
{
"content_hash": "7a8750b403948ca9b524d55f267b5f78",
"timestamp": "",
"source": "github",
"line_count": 363,
"max_line_length": 81,
"avg_line_length": 38.523415977961434,
"alnum_prop": 0.5610697940503433,
"repo_name": "google/pyctr",
"id": "9396f1fcbffcf998f3a3bd5e4532445051bb7773",
"size": "14639",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/benchmarks/rnn_benchmark.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "270509"
}
],
"symlink_target": ""
}
|
from PyOpenWorm import *
class PatchClampExperiment(Experiment):
def __init__(self, reference=False, **kwargs):
Experiment.__init__(self, reference)
# enumerate conditions patch-clamp experiments should have
patch_clamp_conditions = [
'type',
'duration',
'delta_t',
'start_time',
'end_time',
'protocol_start',
'protocol_end',
'protocol_step'
]
for c in patch_clamp_conditions:
self.conditions.set(c, None)
for c, v in kwargs.iteritems():
print('sup')
self.conditions.set(c, v)
class References(Property):
multiple=True
def __init__(self, **kwargs):
Property.__init__(self, 'references', **kwargs)
self._refs = []
def set(self, e=False, **kwargs):
"""
Add a reference to the list.
This method will also take care of mapping the Evidence's assertion to
this ChannelModel
Parameters
----------
e : Evidence or Experiment
The Experiment or Evidence that supports this ChannelModel
Returns
-------
None
"""
if isinstance(e, Evidence):
e.asserts(self.owner)
self._refs.append(e)
elif isinstance(e, Experiment):
e = e.reference()
e.asserts(self.owner)
self._refs.append(e)
def get(self, **kwargs):
"""
Retrieve the reference list for this ChannelModel
Parameters
----------
None
Returns
-------
Set of Evidence and Experiment objects
"""
if len(self._refs) == 0:
#Make dummy Evidence to load from db
ev = Evidence()
ev.asserts(self.owner)
#Make dummy Experiment with this Evidence
ex = Experiment(reference=ev)
#load from db
for e in ev.load():
self._refs.append(e)
for e in ex.load():
self._refs.append(e)
#now return the iterable set
for r in self._refs:
yield r
class ChannelModelType:
patchClamp = "Patch clamp experiment"
homologyEstimate = "Estimation based on homology"
class ChannelModel(DataObject):
"""
A model for an ion channel.
There may be multiple models for a single channel.
Parameters
----------
modelType : DatatypeProperty
What this model is based on (either "homology" or "patch-clamp")
Attributes
----------
modelType : DatatypeProperty
Passed in on construction
ion : DatatypeProperty
The type of ion this channel selects for
gating : DatatypeProperty
The gating mechanism for this channel ("voltage" or name of ligand(s) )
references : Property
Evidence for this model. May be either Experiment or Evidence object(s).
conductance : DatatypeProperty
The conductance of this ion channel. This is the initial value, and
should be entered as a Quantity object.
"""
def __init__(self, modelType=False, *args, **kwargs):
DataObject.__init__(self, **kwargs)
ChannelModel.DatatypeProperty('modelType', self)
ChannelModel.DatatypeProperty('ion', self, multiple=True)
ChannelModel.DatatypeProperty('gating', self, multiple=True)
ChannelModel.DatatypeProperty('conductance', self)
References(owner=self)
#Change modelType value to something from ChannelModelType class on init
if (isinstance(modelType, basestring)):
modelType = modelType.lower()
if modelType in ('homology', ChannelModelType.homologyEstimate):
self.modelType(ChannelModelType.homologyEstimate)
elif modelType in ('patch-clamp', ChannelModelType.patchClamp):
self.modelType(ChannelModelType.patchClamp)
|
{
"content_hash": "9ab1a67361e577cf5dd6bde975ff29b3",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 80,
"avg_line_length": 30.930232558139537,
"alnum_prop": 0.5824561403508772,
"repo_name": "travs/PyOpenWorm",
"id": "a1ae56c84bf65d03af466688a2ef123c0814a948",
"size": "3990",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PyOpenWorm/channelworm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "215278"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.