text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import json
import os
import praw
import urllib.request
class RedditScraper():
def __init__(self, output_folder="output"):
self.check_words = ['jpg']
self.output_folder = output_folder
credFile = open('credentials.json', 'r+')
credentials = json.load(credFile)
CLIENT_ID = credentials["client_id"]
CLIENT_SECRET = credentials["client_secret"]
self.reddit = praw.Reddit(client_id=CLIENT_ID,
client_secret=CLIENT_SECRET,
user_agent='test')
def scrape_subreddit(self, subreddit, max_posts):
filenames = []
print("Scraping memes from r/" + subreddit)
if not os.path.exists(self.output_folder + "/" + subreddit):
os.makedirs(self.output_folder + "/" + subreddit)
for submission in self.reddit.subreddit(subreddit).top('month', limit=max_posts):
is_image = any(string in submission.url for string in self.check_words)
# print ('[LOG] Checking url: ' + submission.url)
# print(submission.shortlink)
if is_image:
image_url = submission.url
# print(image_url)
split_string = image_url.split("/")
file_out = self.output_folder + "/" + subreddit + "/" + split_string[len(split_string) - 1]
f = open(file_out, 'wb')
f.write(urllib.request.urlopen(image_url).read())
f.close()
filenames.append(file_out)
return filenames
def scrape_all(self, subreddits, max_posts):
all_filenames = []
for subreddit in subreddits:
try:
all_filenames.extend(self.scrape_subreddit(subreddit, max_posts))
except Exception as e:
print("Error scraping r/" + subreddit)
print(e)
print("Done")
return all_filenames
|
{
"content_hash": "20031917f984fe880246a480d55ede72",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 107,
"avg_line_length": 37.471698113207545,
"alnum_prop": 0.5478348439073515,
"repo_name": "debkbanerji/super-mem",
"id": "50a54da96a96b3c4ed4681245adedc846b319192",
"size": "2034",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reddit_scraper/scraper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3018"
},
{
"name": "HTML",
"bytes": "12001"
},
{
"name": "JavaScript",
"bytes": "16752"
},
{
"name": "OpenEdge ABL",
"bytes": "198305"
},
{
"name": "Python",
"bytes": "1548515"
},
{
"name": "Shell",
"bytes": "3290"
}
],
"symlink_target": ""
}
|
"""
Salt Edge Account Information API
API Reference for services # noqa: E501
OpenAPI spec version: 5.0.0
Contact: support@saltedge.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class DuplicateTransactionsRequestBody(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'data': 'DuplicateTransactionsRequestBodyData'
}
attribute_map = {
'data': 'data'
}
def __init__(self, data=None): # noqa: E501
"""DuplicateTransactionsRequestBody - a model defined in Swagger""" # noqa: E501
self._data = None
self.discriminator = None
self.data = data
@property
def data(self):
"""Gets the data of this DuplicateTransactionsRequestBody. # noqa: E501
:return: The data of this DuplicateTransactionsRequestBody. # noqa: E501
:rtype: DuplicateTransactionsRequestBodyData
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this DuplicateTransactionsRequestBody.
:param data: The data of this DuplicateTransactionsRequestBody. # noqa: E501
:type: DuplicateTransactionsRequestBodyData
"""
if data is None:
raise ValueError("Invalid value for `data`, must not be `None`") # noqa: E501
self._data = data
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DuplicateTransactionsRequestBody, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DuplicateTransactionsRequestBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
{
"content_hash": "fe1f3bce90ccb22b780265bf1d37b325",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 90,
"avg_line_length": 30.431192660550458,
"alnum_prop": 0.5688875489900512,
"repo_name": "ltowarek/budget-supervisor",
"id": "efad15c3dc869d8455de41244ff6e6e0a2483014",
"size": "3334",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/saltedge/swagger_client/models/duplicate_transactions_request_body.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7960"
},
{
"name": "JavaScript",
"bytes": "79489"
}
],
"symlink_target": ""
}
|
__author__ = 'Taio'
import json, collections
tree = lambda: collections.defaultdict(tree)
root = tree()
root['menu']['id'] = 'file'
root['menu']['value'] = 'File'
root['menu']['menuitems']['new']['value'] = 'New'
root['menu']['menuitems']['new']['onclick'] = 'new();'
root['menu']['menuitems']['open']['value'] = 'Open'
root['menu']['menuitems']['open']['onclick'] = 'open();'
root['menu']['menuitems']['close']['value'] = 'Close'
root['menu']['menuitems']['close']['onclick'] = 'close();'
print json.dumps(root, sort_keys=True, indent=4, separators=(',', ': '))
|
{
"content_hash": "1db3ed4b6a04ed8a854798c584ffb5bb",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 72,
"avg_line_length": 35.375,
"alnum_prop": 0.598939929328622,
"repo_name": "jiasir/python-examples",
"id": "49d11b66d6f1b175f94090ed798db2255fbd6afd",
"size": "566",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "using_default_dictionaries_to_represent_simple_trees.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7113"
}
],
"symlink_target": ""
}
|
import requests
from spyjam import settings
import os
class ImageDownloadPipeline(object):
def process_item(self, item, spider):
if 'image_urls' in item:
images = []
dir_path = '%s/%s' % (settings.IMAGES_STORE, spider.name)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
for image_url in item['image_urls']:
us = image_url.split('/')[3:]
image_file_name = '_'.join(us)
file_path = '%s/%s' % (dir_path, image_file_name)
images.append(file_path)
if os.path.exists(file_path):
continue
with open(file_path, 'wb') as handle:
response = requests.get(image_url, stream=True)
for block in response.iter_content(1024):
if not block:
break
handle.write(block)
item['images'] = images
return item
|
{
"content_hash": "a9667bb048bc23d84c20bddfd21376b0",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 69,
"avg_line_length": 33.16129032258065,
"alnum_prop": 0.4883268482490272,
"repo_name": "ttslj/spyjam",
"id": "a32c8c5ce9a9ddb32a68df06f60d7586612524f2",
"size": "1222",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spyjam/pipelines.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6275"
}
],
"symlink_target": ""
}
|
"""Generate CloudPrint Proxy Config File.
Given a Google Account email and password, generates a config file for the
CloudPrint proxy to use. For instructions on how to use this file, see:
"Install Google Cloud Print on a Linux server"
http://support.google.com/chromeos/a/bin/answer.py?&answer=2616503.
"""
import getpass
import sys
import urllib
import os
import random
__author__ = 'gene@google.com (Gene Gutnik)'
CONFIG_FILE = """{
"cloud_print": {
"email": "%s",
"auth_token": "%s",
"xmpp_auth_token": "%s",
"proxy_id": "%s",
"enabled": true,
"print_system_settings": {
"print_server_urls": [
"%s"
]
}
}
}
"""
CONFIG_FILE_WO_SERVER = """{
"cloud_print": {
"email": "%s",
"auth_token": "%s",
"xmpp_auth_token": "%s",
"proxy_id": "%s",
"enabled": true
}
}
"""
def GetAuth(query_params):
stream = urllib.urlopen('https://www.google.com/accounts/ClientLogin',
urllib.urlencode(query_params))
for line in stream:
if line.strip().startswith('Auth='):
return line.strip().replace('Auth=', '')
return None
if __name__ == '__main__':
env = os.environ
email = env['CLOUD_PRINT_EMAIL'] if env.has_key('CLOUD_PRINT_EMAIL') else None;
password = env['CLOUD_PRINT_PASS'] if env.has_key('CLOUD_PRINT_PASS') else None;
proxy_id = "CloudPrint_unRAID_%s" % random.randrange(100, 1000, 3)
printserver = ""
if not email or not password:
sys.exit(1)
params = {'accountType': 'GOOGLE',
'Email': email,
'Passwd': password,
'service': 'cloudprint',
'source': 'CP-GenConfig'}
auth_token = GetAuth(params)
params['service'] = 'chromiumsync'
xmpp_auth_token = GetAuth(params)
if not auth_token or not xmpp_auth_token:
print "Google authentication failed."
sys.exit(1)
filename = 'Service State'
config_file = open(filename, 'w')
if printserver:
config_file.write(CONFIG_FILE % (email, auth_token, xmpp_auth_token,
proxy_id, printserver))
else:
config_file.write(CONFIG_FILE_WO_SERVER % (email, auth_token,
xmpp_auth_token, proxy_id))
config_file.close()
print 'Config file %s generated for proxy %s' % (filename, proxy_id)
|
{
"content_hash": "fef3b04755e102fab86782287ae2b440",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 84,
"avg_line_length": 26.123595505617978,
"alnum_prop": 0.5995698924731183,
"repo_name": "amacie/cups-cloud-print",
"id": "3d04646a3e7c0de1ad8c8cdd8a4417f0522e3fa1",
"size": "2396",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "generate_cloudprint_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2188"
},
{
"name": "Python",
"bytes": "2396"
},
{
"name": "Shell",
"bytes": "2580"
}
],
"symlink_target": ""
}
|
"""
install_examples.py
Dialogs that installs scripts to a desired directory. Simple front end to
sfc_models.examples.install_example_scripts
Migrated to sfc_models.examples
License/Disclaimer
------------------
Copyright 2017 Brian Romanchuk
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import sys
if sys.version_info[0] < 3:
import Tkinter as tk
from Tkinter import *
import Tkinter.messagebox as mbox
import Tkinter.filedialog as fdog
else:
import tkinter as tk
from tkinter import *
import tkinter.messagebox as mbox
import tkinter.filedialog as fdog
from sfc_models.examples import install_example_scripts
validate_str = """
This command will install sfc_models to a directory that you specify. It will also create a sub-directory named "output" (which is where log files are directed).
It will not overwrite existing files; it is recommended that you clear out your local copy of the examples directory before installing an updated examples set.
"""
def install_examples():
"""
Pops up windows to allow the user to choose a directory for installation
of sfc_models examples.
Uses tkinter, which is installed in base Python (modern versions).
:return:
"""
if not mbox.askokcancel(title='sfc_models Example Installation',
message=validate_str):
return
target = fdog.askdirectory(title='Choose directory to for sfc_models examples installation')
if target == () or target == '':
return
install_example_scripts.install(target)
if __name__ == '__main__':
install_examples()
|
{
"content_hash": "44a6c96091f8abd6b7e703a2018ab515",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 161,
"avg_line_length": 31.33823529411765,
"alnum_prop": 0.7301736274049742,
"repo_name": "brianr747/sfc_gui",
"id": "dbd7f75334f9c3cc0d1c327fac2a0721b4c93e8a",
"size": "2146",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "sfc_gui/install_examples.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "58298"
}
],
"symlink_target": ""
}
|
from mpi4py import MPI
import mpiunittest as unittest
_basic = [
None,
True, False,
-7, 0, 7, 2**31,
-2**63+1, 2**63-1,
-2.17, 0.0, 3.14,
1+2j, 2-3j,
'mpi4py',
]
messages = list(_basic)
messages += [
list(_basic),
tuple(_basic),
dict((f'k{k}', v) for k, v in enumerate(_basic)),
]
messages = messages + [messages]
def create_topo_comms(comm):
size = comm.Get_size()
rank = comm.Get_rank()
# Cartesian
n = int(size**1/2.0)
m = int(size**1/3.0)
if m*m*m == size:
dims = [m, m, m]
elif n*n == size:
dims = [n, n]
else:
dims = [size]
periods = [True] * len(dims)
yield comm.Create_cart(dims, periods=periods)
# Graph
index, edges = [0], []
for i in range(size):
pos = index[-1]
index.append(pos+2)
edges.append((i-1)%size)
edges.append((i+1)%size)
yield comm.Create_graph(index, edges)
# Dist Graph
sources = [(rank-2)%size, (rank-1)%size]
destinations = [(rank+1)%size, (rank+2)%size]
yield comm.Create_dist_graph_adjacent(sources, destinations)
def get_neighbors_count(comm):
topo = comm.Get_topology()
if topo == MPI.CART:
ndim = comm.Get_dim()
return 2*ndim, 2*ndim
if topo == MPI.GRAPH:
rank = comm.Get_rank()
nneighbors = comm.Get_neighbors_count(rank)
return nneighbors, nneighbors
if topo == MPI.DIST_GRAPH:
indeg, outdeg, w = comm.Get_dist_neighbors_count()
return indeg, outdeg
return 0, 0
def have_feature():
cartcomm = MPI.COMM_SELF.Create_cart([1], periods=[0])
try:
cartcomm.neighbor_allgather(None)
return True
except NotImplementedError:
return False
finally:
cartcomm.Free()
@unittest.skipIf(not have_feature(), 'mpi-neighbor')
class BaseTestCCONghObj(object):
COMM = MPI.COMM_NULL
@unittest.skipMPI('openmpi(<2.2.0)')
def testNeighborAllgather(self):
for comm in create_topo_comms(self.COMM):
rsize, ssize = get_neighbors_count(comm)
for smess in messages:
rmess = comm.neighbor_allgather(smess)
self.assertEqual(rmess, [smess] * rsize)
comm.Free()
def testNeighborAlltoall(self):
for comm in create_topo_comms(self.COMM):
rsize, ssize = get_neighbors_count(comm)
for smess in messages:
rmess = comm.neighbor_alltoall([smess] * ssize)
self.assertEqual(rmess, [smess] * rsize)
comm.Free()
class TestCCONghObjSelf(BaseTestCCONghObj, unittest.TestCase):
COMM = MPI.COMM_SELF
class TestCCONghObjWorld(BaseTestCCONghObj, unittest.TestCase):
COMM = MPI.COMM_WORLD
class TestCCONghObjSelfDup(TestCCONghObjSelf):
def setUp(self):
self.COMM = MPI.COMM_SELF.Dup()
def tearDown(self):
self.COMM.Free()
class TestCCONghObjWorldDup(TestCCONghObjWorld):
def setUp(self):
self.COMM = MPI.COMM_WORLD.Dup()
def tearDown(self):
self.COMM.Free()
name, version = MPI.get_vendor()
if name == 'Open MPI':
if version < (1,8,4):
_create_topo_comms = create_topo_comms
def create_topo_comms(comm):
for c in _create_topo_comms(comm):
if c.size * 2 < sum(c.degrees):
c.Free(); continue
yield c
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "f0728f4f377e18647e7db2dac6293ced",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 64,
"avg_line_length": 27.133858267716537,
"alnum_prop": 0.5856065002901916,
"repo_name": "mpi4py/mpi4py",
"id": "b57b5173ed60da89cd1b5cf9782380a3dda9c49c",
"size": "3446",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_cco_ngh_obj.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3328"
},
{
"name": "C",
"bytes": "439363"
},
{
"name": "CMake",
"bytes": "6169"
},
{
"name": "Cython",
"bytes": "559570"
},
{
"name": "Makefile",
"bytes": "1220"
},
{
"name": "PowerShell",
"bytes": "3707"
},
{
"name": "Python",
"bytes": "948515"
},
{
"name": "SWIG",
"bytes": "2282"
},
{
"name": "Shell",
"bytes": "14524"
}
],
"symlink_target": ""
}
|
from twilio.rest import TwilioRestClient
# Your Account Sid and Auth Token from twilio.com/user/account
account_sid = "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
auth_token = "your_auth_token"
client = TwilioRestClient(account_sid, auth_token)
call = client.calls.create(
url="http://demo.twilio.com/docs/voice.xml",
to="+14155551212",
from_="+18668675309",
method="GET",
status_callback="https://www.myapp.com/events",
status_callback_method="POST",
status_events=["initiated", "ringing", "answered", "completed"],
)
print(call.sid)
|
{
"content_hash": "e721e7cbfb14bb29b7403d254b7d4c0a",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 68,
"avg_line_length": 32.76470588235294,
"alnum_prop": 0.718132854578097,
"repo_name": "teoreteetik/api-snippets",
"id": "d514f6cca2b4f73f164fa1672266370910d838cd",
"size": "630",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rest/voice/outbound-calls/example-4/example-4.5.x.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "643369"
},
{
"name": "HTML",
"bytes": "335"
},
{
"name": "Java",
"bytes": "943336"
},
{
"name": "JavaScript",
"bytes": "539577"
},
{
"name": "M",
"bytes": "117"
},
{
"name": "Mathematica",
"bytes": "93"
},
{
"name": "Objective-C",
"bytes": "46198"
},
{
"name": "PHP",
"bytes": "538312"
},
{
"name": "Python",
"bytes": "467248"
},
{
"name": "Ruby",
"bytes": "470316"
},
{
"name": "Shell",
"bytes": "1564"
},
{
"name": "Swift",
"bytes": "36563"
}
],
"symlink_target": ""
}
|
import copy
import mock
from oslo.serialization import jsonutils
import webob
from nova.api.openstack.compute import image_metadata
from nova.api.openstack.compute.plugins.v3 import image_metadata \
as image_metadata_v21
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import image_fixtures
IMAGE_FIXTURES = image_fixtures.get_image_fixtures()
CHK_QUOTA_STR = 'nova.api.openstack.common.check_img_metadata_properties_quota'
def get_image_123():
return copy.deepcopy(IMAGE_FIXTURES)[0]
class ImageMetaDataTestV21(test.NoDBTestCase):
controller_class = image_metadata_v21.ImageMetadataController
invalid_request = exception.ValidationError
def setUp(self):
super(ImageMetaDataTestV21, self).setUp()
self.controller = self.controller_class()
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_index(self, get_all_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata')
res_dict = self.controller.index(req, '123')
expected = {'metadata': {'key1': 'value1'}}
self.assertEqual(res_dict, expected)
get_all_mocked.assert_called_once_with(mock.ANY, '123')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_show(self, get_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
res_dict = self.controller.show(req, '123', 'key1')
self.assertIn('meta', res_dict)
self.assertEqual(len(res_dict['meta']), 1)
self.assertEqual('value1', res_dict['meta']['key1'])
get_mocked.assert_called_once_with(mock.ANY, '123')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_show_not_found(self, _get_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key9')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, '123', 'key9')
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotFound(image_id='100'))
def test_show_image_not_found(self, _get_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata/key1')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, '100', 'key9')
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_create(self, get_mocked, update_mocked, quota_mocked):
mock_result = copy.deepcopy(get_image_123())
mock_result['properties']['key7'] = 'value7'
update_mocked.return_value = mock_result
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata')
req.method = 'POST'
body = {"metadata": {"key7": "value7"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = self.controller.create(req, '123', body=body)
get_mocked.assert_called_once_with(mock.ANY, '123')
expected = copy.deepcopy(get_image_123())
expected['properties'] = {
'key1': 'value1', # existing meta
'key7': 'value7' # new meta
}
quota_mocked.assert_called_once_with(mock.ANY, expected["properties"])
update_mocked.assert_called_once_with(mock.ANY, '123', expected,
data=None, purge_props=True)
expected_output = {'metadata': {'key1': 'value1', 'key7': 'value7'}}
self.assertEqual(expected_output, res)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotFound(image_id='100'))
def test_create_image_not_found(self, _get_mocked, update_mocked,
quota_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata')
req.method = 'POST'
body = {"metadata": {"key7": "value7"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.create, req, '100', body=body)
self.assertFalse(quota_mocked.called)
self.assertFalse(update_mocked.called)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_update_all(self, get_mocked, update_mocked, quota_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata')
req.method = 'PUT'
body = {"metadata": {"key9": "value9"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = self.controller.update_all(req, '123', body=body)
get_mocked.assert_called_once_with(mock.ANY, '123')
expected = copy.deepcopy(get_image_123())
expected['properties'] = {
'key9': 'value9' # replace meta
}
quota_mocked.assert_called_once_with(mock.ANY, expected["properties"])
update_mocked.assert_called_once_with(mock.ANY, '123', expected,
data=None, purge_props=True)
expected_output = {'metadata': {'key9': 'value9'}}
self.assertEqual(expected_output, res)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotFound(image_id='100'))
def test_update_all_image_not_found(self, _get_mocked, quota_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata')
req.method = 'PUT'
body = {"metadata": {"key9": "value9"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update_all, req, '100', body=body)
self.assertFalse(quota_mocked.called)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_update_item(self, _get_mocked, update_mocked, quota_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
req.method = 'PUT'
body = {"meta": {"key1": "zz"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = self.controller.update(req, '123', 'key1', body=body)
expected = copy.deepcopy(get_image_123())
expected['properties'] = {
'key1': 'zz' # changed meta
}
quota_mocked.assert_called_once_with(mock.ANY, expected["properties"])
update_mocked.assert_called_once_with(mock.ANY, '123', expected,
data=None, purge_props=True)
expected_output = {'meta': {'key1': 'zz'}}
self.assertEqual(res, expected_output)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotFound(image_id='100'))
def test_update_item_image_not_found(self, _get_mocked, quota_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata/key1')
req.method = 'PUT'
body = {"meta": {"key1": "zz"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update, req, '100', 'key1',
body=body)
self.assertFalse(quota_mocked.called)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get')
def test_update_item_bad_body(self, get_mocked, update_mocked,
quota_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
req.method = 'PUT'
body = {"key1": "zz"}
req.body = ''
req.headers["content-type"] = "application/json"
self.assertRaises(self.invalid_request,
self.controller.update, req, '123', 'key1',
body=body)
self.assertFalse(get_mocked.called)
self.assertFalse(quota_mocked.called)
self.assertFalse(update_mocked.called)
@mock.patch(CHK_QUOTA_STR,
side_effect=webob.exc.HTTPRequestEntityTooLarge(
explanation='', headers={'Retry-After': 0}))
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get')
def test_update_item_too_many_keys(self, get_mocked, update_mocked,
_quota_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
req.method = 'PUT'
body = {"meta": {"foo": "bar"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, '123', 'key1',
body=body)
self.assertFalse(get_mocked.called)
self.assertFalse(update_mocked.called)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_update_item_body_uri_mismatch(self, _get_mocked, update_mocked,
quota_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/bad')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, '123', 'bad',
body=body)
self.assertFalse(quota_mocked.called)
self.assertFalse(update_mocked.called)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_delete(self, _get_mocked, update_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
req.method = 'DELETE'
res = self.controller.delete(req, '123', 'key1')
expected = copy.deepcopy(get_image_123())
expected['properties'] = {}
update_mocked.assert_called_once_with(mock.ANY, '123', expected,
data=None, purge_props=True)
self.assertIsNone(res)
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_delete_not_found(self, _get_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/blah')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, '123', 'blah')
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotFound(image_id='100'))
def test_delete_image_not_found(self, _get_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata/key1')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, '100', 'key1')
@mock.patch(CHK_QUOTA_STR,
side_effect=webob.exc.HTTPForbidden(
explanation='', headers={'Retry-After': 0}))
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_too_many_metadata_items_on_create(self, _get_mocked,
update_mocked, _quota_mocked):
body = {"metadata": {"foo": "bar"}}
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create, req, '123', body=body)
self.assertFalse(update_mocked.called)
@mock.patch(CHK_QUOTA_STR,
side_effect=webob.exc.HTTPForbidden(
explanation='', headers={'Retry-After': 0}))
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_too_many_metadata_items_on_put(self, _get_mocked,
update_mocked, _quota_mocked):
body = {"metadata": {"foo": "bar"}}
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/blah')
req.method = 'PUT'
body = {"meta": {"blah": "blah", "blah1": "blah1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(self.invalid_request,
self.controller.update, req, '123', 'blah',
body=body)
self.assertFalse(update_mocked.called)
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotAuthorized(image_id='123'))
def test_image_not_authorized_update(self, _get_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.update, req, '123', 'key1',
body=body)
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotAuthorized(image_id='123'))
def test_image_not_authorized_update_all(self, _get_mocked):
image_id = 131
# see nova.tests.unit.api.openstack.fakes:_make_image_fixtures
req = fakes.HTTPRequest.blank('/v2/fake/images/%s/metadata/key1'
% image_id)
req.method = 'PUT'
body = {"metadata": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.update_all, req, image_id,
body=body)
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotAuthorized(image_id='123'))
def test_image_not_authorized_create(self, _get_mocked):
image_id = 131
# see nova.tests.unit.api.openstack.fakes:_make_image_fixtures
req = fakes.HTTPRequest.blank('/v2/fake/images/%s/metadata/key1'
% image_id)
req.method = 'POST'
body = {"metadata": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create, req, image_id,
body=body)
class ImageMetaDataTestV2(ImageMetaDataTestV21):
controller_class = image_metadata.Controller
invalid_request = webob.exc.HTTPBadRequest
# NOTE(cyeoh): This duplicate unittest is necessary for a race condition
# with the V21 unittests. It's mock issue.
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_delete(self, _get_mocked, update_mocked):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
req.method = 'DELETE'
res = self.controller.delete(req, '123', 'key1')
expected = copy.deepcopy(get_image_123())
expected['properties'] = {}
update_mocked.assert_called_once_with(mock.ANY, '123', expected,
data=None, purge_props=True)
self.assertIsNone(res)
|
{
"content_hash": "d66eacd095fd269f1720725db383643b",
"timestamp": "",
"source": "github",
"line_count": 361,
"max_line_length": 79,
"avg_line_length": 44.75623268698061,
"alnum_prop": 0.5993068020053228,
"repo_name": "shakamunyi/nova",
"id": "8eac0d65f3cc4779afe3a84c946ebd92b2914ec8",
"size": "16793",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "nova/tests/unit/api/openstack/compute/test_image_metadata.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15322211"
},
{
"name": "Shell",
"bytes": "17730"
},
{
"name": "Smarty",
"bytes": "489682"
}
],
"symlink_target": ""
}
|
"""A component that allows one to place colored and scaled glyphs at
input point data.
"""
# Author: Prabhu Ramachandran <prabhu_r@users.sf.net>
# KK Rai (kk.rai [at] iitb.ac.in)
# R. Ambareesha (ambareesha [at] iitb.ac.in)
# Copyright (c) 2005-2007, Enthought, Inc.
# License: BSD Style.
# Enthought library imports.
from traits.api import Instance, Trait, Bool
from traits.api import Enum
from traitsui.api import View, Group, Item
from tvtk.api import tvtk
from tvtk.tvtk_base import TraitRevPrefixMap
import tvtk.common as tvtk_common
# Local imports.
from mayavi.core.component import Component
from mayavi.core.module import Module
from mayavi.components import glyph_source
######################################################################
# `Glyph` class.
######################################################################
class Glyph(Component):
# The version of this class. Used for persistence.
__version__ = 0
# Type of Glyph: 'tensor' or 'vector'
glyph_type = Enum('vector', 'tensor',
desc = 'if the glyph is vector or tensor')
# The scaling mode to use when scaling the glyphs. We could have
# used the glyph's own scale mode but it allows users to set the
# mode to use vector components for the scaling which I'd like to
# disallow.
scale_mode = Trait('scale_by_scalar',
TraitRevPrefixMap({'scale_by_vector': 1,
'scale_by_vector_components': 2,
'data_scaling_off': 3,
'scale_by_scalar': 0}),
desc="if scaling is done using scalar or vector/normal magnitude"
)
# The color mode to use when coloring the glyphs. We could have
# used the glyph's own color_mode trait but it allows users to set
# the mode to use vector components for the scaling which I'd
# like to disallow.
color_mode = Trait('color_by_scalar',
TraitRevPrefixMap({'color_by_vector': 2,
'color_by_scalar': 1,
'no_coloring': 0}),
desc="if coloring is done by scalar or vector/normal magnitude"
)
color_mode_tensor = Trait('scalar',
TraitRevPrefixMap({'scalars': 1,
'eigenvalues':2,
'no_coloring': 0}),
desc="if coloring is done by scalar or eigenvalues"
)
# Specify if the input points must be masked. By mask we mean
# that only a subset of the input points must be displayed.
mask_input_points = Bool(False, desc="if input points are masked")
# The MaskPoints filter.
mask_points = Instance(tvtk.MaskPoints, args=(),
kw={'random_mode': True}, record=True)
# The Glyph3D instance.
glyph = Instance(tvtk.Object, allow_none=False, record=True)
# The Source to use for the glyph. This is chosen from
# `self._glyph_list` or `self.glyph_dict`.
glyph_source = Instance(glyph_source.GlyphSource,
allow_none=False, record=True)
# The module associated with this component. This is used to get
# the data range of the glyph when the scale mode changes. This
# *must* be set if this module is to work correctly.
module = Instance(Module)
# Should we show the GUI option for changing the scalar mode or
# not? This is useful for vector glyphing modules where there it
# does not make sense to scale the data based on scalars.
show_scale_mode = Bool(True)
########################################
# Private traits.
# Used for optimization.
_updating = Bool(False)
########################################
# View related traits.
view = View(Group(Item(name='mask_input_points'),
Group(Item(name='mask_points',
enabled_when='object.mask_input_points',
style='custom', resizable=True),
show_labels=False,
),
label='Masking',
),
Group(Group(Item(name='scale_mode',
enabled_when='show_scale_mode',
visible_when='show_scale_mode'),
Item(name='color_mode',
enabled_when= 'glyph_type == "vector"',
visible_when= 'glyph_type == "vector"'),
Item(name='color_mode_tensor',
enabled_when= 'glyph_type == "tensor"',
visible_when= 'glyph_type == "tensor"'),
),
Group(Item(name='glyph', style='custom',
resizable=True),
show_labels=False),
label='Glyph',
selected=True,
),
Group(Item(name='glyph_source',
style='custom', resizable=True),
show_labels=False,
label='Glyph Source',
),
resizable=True
)
######################################################################
# `object` interface
######################################################################
def __get_pure_state__(self):
d = super(Glyph, self).__get_pure_state__()
for attr in ('module', '_updating'):
d.pop(attr, None)
return d
######################################################################
# `Module` interface
######################################################################
def setup_pipeline(self):
"""Override this method so that it *creates* the tvtk
pipeline.
This method is invoked when the object is initialized via
`__init__`. Note that at the time this method is called, the
tvtk data pipeline will *not* yet be setup. So upstream data
will not be available. The idea is that you simply create the
basic objects and setup those parts of the pipeline not
dependent on upstream sources and filters. You should also
set the `actors` attribute up at this point.
"""
self._glyph_type_changed(self.glyph_type)
self.glyph_source = glyph_source.GlyphSource()
# Handlers to setup our source when the sources pipeline changes.
self.glyph_source.on_trait_change(self._update_source, 'pipeline_changed')
self.mask_points.on_trait_change(self.render)
def update_pipeline(self):
"""Override this method so that it *updates* the tvtk pipeline
when data upstream is known to have changed.
This method is invoked (automatically) when any of the inputs
sends a `pipeline_changed` event.
"""
if ((len(self.inputs) == 0) or (len(self.inputs[0].outputs) == 0)):
return
self._mask_input_points_changed(self.mask_input_points)
if self.glyph_type == 'vector':
self._color_mode_changed(self.color_mode)
else:
self._color_mode_tensor_changed(self.color_mode_tensor)
self._scale_mode_changed(self.scale_mode)
# Set our output.
tvtk_common.configure_outputs(self, self.glyph)
self.pipeline_changed = True
def update_data(self):
"""Override this method so that it flushes the vtk pipeline if
that is necessary.
This method is invoked (automatically) when any of the inputs
sends a `data_changed` event.
"""
self._scale_mode_changed(self.scale_mode)
self.data_changed = True
def render(self):
if not self._updating:
super(Glyph, self).render()
def start(self):
"""Overridden method.
"""
if self.running:
return
self.glyph_source.start()
super(Glyph, self).start()
def stop(self):
if not self.running:
return
self.glyph_source.stop()
super(Glyph, self).stop()
def has_output_port(self):
""" The filter has an output port."""
return True
def get_output_object(self):
""" Returns the output port."""
return self.glyph.output_port
######################################################################
# Non-public methods.
######################################################################
def _update_source(self):
self.configure_source_data(self.glyph, self.glyph_source.outputs[0])
def _glyph_source_changed(self, value):
self.configure_source_data(self.glyph, value.outputs[0])
def _color_mode_changed(self, value):
if len(self.inputs) == 0:
return
if value != 'no_coloring':
self.glyph.color_mode = value
def _color_mode_tensor_changed(self, value):
if len(self.inputs) == 0:
return
self._updating = True
if value != 'no_coloring':
self.glyph.color_mode = value
self.glyph.color_glyphs = True
else:
self.glyph.color_glyphs = False
self._updating = False
self.render()
def _scale_mode_changed(self, value):
if (self.module is None) or (len(self.inputs) == 0)\
or self.glyph_type == 'tensor':
return
self._updating = True
try:
glyph = self.glyph
glyph.scale_mode = value
mm = self.module.module_manager
if glyph.scale_mode == 'scale_by_scalar':
glyph.range = tuple(mm.scalar_lut_manager.data_range)
else:
glyph.range = tuple(mm.vector_lut_manager.data_range)
finally:
self._updating = False
self.render()
def _mask_input_points_changed(self, value):
inputs = self.inputs
if len(inputs) == 0:
return
if value:
mask = self.mask_points
tvtk_common.configure_input(mask, inputs[0].outputs[0])
self.configure_connection(self.glyph, mask)
else:
self.configure_connection(self.glyph, inputs[0])
self.glyph.update()
def _glyph_type_changed(self, value):
if self.glyph_type == 'vector':
self.glyph = tvtk.Glyph3D(clamping=True)
else:
self.glyph = tvtk.TensorGlyph(scale_factor=0.1)
self.show_scale_mode = False
self.glyph.on_trait_change(self.render)
def _scene_changed(self, old, new):
super(Glyph, self)._scene_changed(old, new)
self.glyph_source.scene = new
|
{
"content_hash": "309b199e2a20e74d80fb120cffa733cc",
"timestamp": "",
"source": "github",
"line_count": 289,
"max_line_length": 88,
"avg_line_length": 38.50865051903114,
"alnum_prop": 0.5163087429238925,
"repo_name": "alexandreleroux/mayavi",
"id": "904301031539336632715e16f8d6533afd1b45ab",
"size": "11129",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "mayavi/components/glyph.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1054"
},
{
"name": "GAP",
"bytes": "34817"
},
{
"name": "Python",
"bytes": "2511883"
},
{
"name": "Shell",
"bytes": "147"
}
],
"symlink_target": ""
}
|
from pycast.common.timeseries import TimeSeries
from pycast.common.pycastobject import PyCastObject
class BaseMethod(PyCastObject):
"""Baseclass for all smoothing and forecasting methods."""
_interval_definitions = { True: ["[", "]"], False: ["(", ")"]}
def __init__(self, requiredParameters=None, hasToBeSorted=True, hasToBeNormalized=True):
"""Initializes the BaseMethod.
:param list requiredParameters: List of parameternames that have to be defined.
:param boolean hasToBeSorted: Defines if the TimeSeries has to be sorted or not.
:param boolean hasToBeNormalized: Defines if the TimeSeries has to be normalized or not.
"""
if requiredParameters is None:
requiredParameters = []
super(BaseMethod, self).__init__()
self._parameters = {}
self._parameterIntervals = self._get_parameter_intervals()
self._requiredParameters = {}
for entry in requiredParameters:
self._requiredParameters[entry] = None
self._hasToBeSorted = hasToBeSorted
self._hasToBeNormalized = hasToBeNormalized
def _get_parameter_intervals(self):
"""Returns the intervals for the methods parameter.
Only parameters with defined intervals can be used for optimization!
:return: Returns a dictionary containing the parameter intervals, using the parameter
name as key, while the value hast the following format:
[minValue, maxValue, minIntervalClosed, maxIntervalClosed]
- minValue
Minimal value for the parameter
- maxValue
Maximal value for the parameter
- minIntervalClosed
:py:const:`True`, if minValue represents a valid value for the parameter.
:py:const:`False` otherwise.
- maxIntervalClosed:
:py:const:`True`, if maxValue represents a valid value for the parameter.
:py:const:`False` otherwise.
:rtype: dictionary
"""
parameterIntervals = {}
# YOUR METHOD SPECIFIC CODE HERE!
if self.__class__.__name__ not in ["BaseMethod", "BaseForecastingMethod"]:
raise NotImplementedError
return parameterIntervals
def get_interval(self, parameter):
"""Returns the interval for a given parameter.
:param string parameter: Name of the parameter.
:return: Returns a list containing with [minValue, maxValue, minIntervalClosed, maxIntervalClosed].
If no interval definitions for the given parameter exist, :py:const:`None` is returned.
- minValue
Minimal value for the parameter
- maxValue
Maximal value for the parameter
- minIntervalClosed
:py:const:`True`, if minValue represents a valid value for the parameter.
:py:const:`False` otherwise.
- maxIntervalClosed:
:py:const:`True`, if maxValue represents a valid value for the parameter.
:py:const:`False` otherwise.
:rtype: list
"""
if parameter not in self._parameterIntervals:
return None
return self._parameterIntervals[parameter]
def get_required_parameters(self):
"""Returns a list with the names of all required parameters.
:return: Returns a list with the names of all required parameters.
:rtype: list
"""
return self._requiredParameters.keys()
def _in_valid_interval(self, parameter, value):
"""Returns if the parameter is within its valid interval.
:param string parameter: Name of the parameter that has to be checked.
:param numeric value: Value of the parameter.
:return: Returns :py:const:`True` it the value for the given parameter is valid,
:py:const:`False` otherwise.
:rtype: boolean
"""
# return True, if not interval is defined for the parameter
if parameter not in self._parameterIntervals:
return True
interval = self._parameterIntervals[parameter]
if interval[2] and interval[3]:
return interval[0] <= value <= interval[1]
if not interval[2] and interval[3]:
return interval[0] < value <= interval[1]
if interval[2] and not interval[3]:
return interval[0] <= value < interval[1]
#if False == interval[2] and False == interval[3]:
return interval[0] < value < interval[1]
def _get_value_error_message_for_invalid_prarameter(self, parameter, value):
"""Returns the ValueError message for the given parameter.
:param string parameter: Name of the parameter the message has to be created for.
:param numeric value: Value outside the parameters interval.
:return: Returns a string containing hte message.
:rtype: string
"""
# return if not interval is defined for the parameter
if parameter not in self._parameterIntervals:
return
interval = self._parameterIntervals[parameter]
return "%s has to be in %s%s, %s%s. Current value is %s." % (
parameter,
BaseMethod._interval_definitions[interval[2]][0],
interval[0], interval[1],
BaseMethod._interval_definitions[interval[3]][1],
value
)
def set_parameter(self, name, value):
"""Sets a parameter for the BaseMethod.
:param string name: Name of the parameter that has to be checked.
:param numeric value: Value of the parameter.
"""
if not self._in_valid_interval(name, value):
raise ValueError(self._get_value_error_message_for_invalid_prarameter(name, value))
#if name in self._parameters:
# print "Parameter %s already existed. It's old value will be replaced with %s" % (name, value)
self._parameters[name] = value
def get_parameter(self, name):
"""Returns a forecasting parameter.
:param string name: Name of the parameter.
:return: Returns the value stored in parameter.
:rtype: numeric
:raise: Raises a :py:exc:`KeyError` if the parameter is not defined.
"""
return self._parameters[name]
def has_to_be_normalized(self):
"""Returns if the TimeSeries has to be normalized or not.
:return: Returns :py:const:`True` if the TimeSeries has to be normalized, :py:const:`False` otherwise.
:rtype: boolean
"""
return self._hasToBeNormalized
def has_to_be_sorted(self):
"""Returns if the TimeSeries has to be sorted or not.
:return: Returns :py:const:`True` if the TimeSeries has to be sorted, :py:const:`False` otherwise.
:rtype: boolean
"""
return self._hasToBeSorted
def can_be_executed(self):
"""Returns if the method can already be executed.
:return: Returns :py:const:`True` if all required parameters where already set, False otherwise.
:rtype: boolean
"""
missingParams = filter(lambda rp: rp not in self._parameters, self._requiredParameters)
return len(missingParams) == 0
def execute(self, timeSeries):
"""Executes the BaseMethod on a given TimeSeries object.
:param TimeSeries timeSeries: TimeSeries object that fullfills all requirements (normalization, sortOrder).
:return: Returns a TimeSeries object containing the smoothed/forecasted values.
:rtype: TimeSeries
:raise: Raises a :py:exc:`NotImplementedError` if the child class does not overwrite this function.
"""
raise NotImplementedError
class BaseForecastingMethod(BaseMethod):
"""Basemethod for all forecasting methods."""
def __init__(self, requiredParameters=None, valuesToForecast=1, hasToBeSorted=True, hasToBeNormalized=True):
"""Initializes the BaseForecastingMethod.
:param list requiredParameters: List of parameternames that have to be defined.
:param integer valuesToForecast: Number of entries that will be forecasted.
This can be changed by using forecast_until().
:param boolean hasToBeSorted: Defines if the TimeSeries has to be sorted or not.
:param boolean hasToBeNormalized: Defines if the TimeSeries has to be normalized or not.
:raise: Raises a :py:exc:`ValueError` when valuesToForecast is smaller than zero.
"""
if requiredParameters is None:
requiredParameters = []
if "valuesToForecast" not in requiredParameters:
requiredParameters.append("valuesToForecast")
if valuesToForecast < 0:
raise ValueError("valuesToForecast has to be larger than zero.")
super(BaseForecastingMethod, self).__init__(requiredParameters, hasToBeSorted=hasToBeSorted, hasToBeNormalized=hasToBeNormalized)
self.set_parameter("valuesToForecast", valuesToForecast)
self._forecastUntil = None
def get_optimizable_parameters(self):
"""Returns a list with optimizable parameters.
All required parameters of a forecasting method with defined intervals can be used for optimization.
:return: Returns a list with optimizable parameter names.
:rtype: list
:todo: Should we return all parameter names from the self._parameterIntervals instead?
"""
return filter(lambda parameter: parameter in self._parameterIntervals, self._requiredParameters)
def set_parameter(self, name, value):
"""Sets a parameter for the BaseForecastingMethod.
:param string name: Name of the parameter.
:param numeric value: Value of the parameter.
"""
# set the furecast until variable to None if necessary
if name == "valuesToForecast":
self._forecastUntil = None
# continue with the parents implementation
return super(BaseForecastingMethod, self).set_parameter(name, value)
def forecast_until(self, timestamp, tsformat=None):
"""Sets the forecasting goal (timestamp wise).
This function enables the automatic determination of valuesToForecast.
:param timestamp: timestamp containing the end date of the forecast.
:param string tsformat: Format of the timestamp. This is used to convert the
timestamp from UNIX epochs, if necessary. For valid examples
take a look into the :py:func:`time.strptime` documentation.
"""
if tsformat is not None:
timestamp = TimeSeries.convert_timestamp_to_epoch(timestamp, tsformat)
self._forecastUntil = timestamp
def _calculate_values_to_forecast(self, timeSeries):
"""Calculates the number of values, that need to be forecasted to match the goal set in forecast_until.
This sets the parameter "valuesToForecast" and should be called at the beginning of the :py:meth:`BaseMethod.execute` implementation.
:param TimeSeries timeSeries: Should be a sorted and normalized TimeSeries instance.
:raise: Raises a :py:exc:`ValueError` if the TimeSeries is either not normalized or sorted.
"""
# do not set anything, if it is not required
if self._forecastUntil is None:
return
# check the TimeSeries for correctness
if not timeSeries.is_sorted():
raise ValueError("timeSeries has to be sorted.")
if not timeSeries.is_normalized():
raise ValueError("timeSeries has to be normalized.")
timediff = timeSeries[-1][0] - timeSeries[-2][0]
forecastSpan = self._forecastUntil - timeSeries[-1][0]
self.set_parameter("valuesToForecast", int(forecastSpan / timediff) + 1)
|
{
"content_hash": "040ef4f19a7b8663ec57e9e8c754fd70",
"timestamp": "",
"source": "github",
"line_count": 298,
"max_line_length": 141,
"avg_line_length": 40.513422818791945,
"alnum_prop": 0.6399403627930091,
"repo_name": "T-002/pycast",
"id": "03e67368e4f17956d62778dfda2458caea41dbcd",
"size": "13226",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pycast/methods/basemethod.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4332"
},
{
"name": "C++",
"bytes": "33576"
},
{
"name": "Gnuplot",
"bytes": "426"
},
{
"name": "HTML",
"bytes": "2789"
},
{
"name": "JavaScript",
"bytes": "3531"
},
{
"name": "Makefile",
"bytes": "6854"
},
{
"name": "Python",
"bytes": "400662"
}
],
"symlink_target": ""
}
|
"""Commands for the peristaltic pump"""
import math
from typing import Optional, Any
from PyQt5 import QtCore
from PyQt5.QtCore import pyqtSignal as Signal, pyqtSlot as Slot
import time
import logging
from .command import Command, InstantCommand
from .commandargument import FloatArgument, StringArgument, IntArgument
from ..devices.peristalticpump.leadfluid.frontend import BT100S, ControlMode
logger=logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class PeristalticPumpDispense(Command):
name = 'pp_dispense_wait'
description = 'Drive the peristaltic pump for the desired time, wait until completed'
arguments = [FloatArgument('dispensetime', 'Dispense time in seconds', defaultvalue=None),
StringArgument('direction', 'Rotation direction (clockwise or counterclockwise)', defaultvalue=None),
FloatArgument('speed', 'Rotation speed (rpm)', defaultvalue=None),
]
starttime: Optional[float] = None
timerinterval = 0.1
clockwise: Optional[bool]
dispensetime: Optional[float]
speed: Optional[float]
wait_until_complete: bool = True
def initialize(self, dispensetime: Optional[float], direction: Optional[str], speed: Optional[float]):
logger.debug(f'Initializing pp_dispense command (variant: {self.name}): {dispensetime=}, {direction=}, {speed=}')
try:
self.device()
except IndexError:
raise self.CommandException('No peristaltic pump found!')
self.dispensetime = dispensetime
if direction is None:
self.clockwise = None
elif direction.lower() == 'clockwise':
self.clockwise = True
elif direction.lower() == 'counterclockwise':
self.clockwise = False
else:
raise self.CommandException('Invalid value for argument `direction`: '
'must be either "clockwise" or "counterclockwise".')
self.speed = speed
self.device().commandResult.connect(self.onCommandResult)
if dispensetime is None:
logger.debug('Skipping setting dispense time: default requested')
self.onCommandResult(True, 'set_dispense_time', None)
elif math.isfinite(dispensetime):
logger.debug(f'Setting dispense time to {dispensetime:.1f} sec')
self.device().setDispenseTime(dispensetime)
else:
logger.debug('Skipping setting dispense time: infinite')
self.onCommandResult(True, 'set_dispense_time', None)
pass # continuous operation requested, do not set dispense time.
self.starttime = None
def finalize(self):
logger.debug(f'Finalizing command {self.name}')
self.device().commandResult.disconnect(self.onCommandResult)
def device(self) -> BT100S:
return self.instrument.devicemanager.peristalticpump()
def timerEvent(self, event: QtCore.QTimerEvent) -> None:
if self.starttime is None:
self.progress.emit('Initializing peristaltic pump...', 0, 0)
else:
disptime = self.device()['dispense_time']
elapsed = time.monotonic() - self.starttime
if (elapsed > disptime) and (not self.device()['running']):
# dispense time elapsed and the pump is not running anymore => we are finished
self.finish(True)
else:
self.progress.emit(
f'Dispensing for {disptime:.1f} seconds, '
f'{disptime - elapsed:.1f} seconds remaining',
int(1000 * elapsed / disptime), 1000
)
def stop(self):
self.device().stopRotation()
self.fail('User stop')
@Slot(bool, str, object)
def onCommandResult(self, success: bool, command: str, result: Any):
logger.debug(f'Command result from peristaltic pump: {success=}, {command=}, {result=}')
if not success:
self.fail(f'Peristaltic pump command {command} failed.')
elif command == 'set_dispense_time':
if self.clockwise is None:
self.onCommandResult(True, 'clockwise', None)
else:
self.device().setClockwise(self.clockwise)
elif (command == 'clockwise') or (command == 'counterclockwise'):
if self.speed is None:
self.onCommandResult(True, 'setspeed', None)
else:
self.device().setRotationSpeed(self.speed)
elif (command == 'setspeed'):
if (self.dispensetime is None) or math.isfinite(self.dispensetime):
# finite dispense mode
self.device().setControlMode(ControlMode.Foot_Switch)
else:
assert not math.isfinite(self.dispensetime)
# infinite dispense mode
self.device().setControlMode(ControlMode.Internal)
elif (command == 'footswitch_control') or (command == 'internal_control'):
self.device().startRotation()
elif (command == 'start'):
self.message.emit(
f'Peristaltic pump running {self.device()["direction"]} at {self.device()["rotating_speed"]:.1f} rpm ' + (
f'for {self.device()["dispense_time"]:.1f} seconds.' if (
self.device()["control_mode"] == ControlMode.Foot_Switch.value) else "until stopped.")
)
if (not self.wait_until_complete) or (
(self.dispensetime is not None) and (not math.isfinite(self.dispensetime))):
self.finish(True)
else:
self.starttime = time.monotonic()
class PeristalticPumpDispenseNowait(PeristalticPumpDispense):
name = 'pp_dispense_start'
description = 'Drive the peristaltic pump for the desired time, do not wait until completed'
wait_until_complete: bool = False
class PeristalticPumpStart(PeristalticPumpDispense):
name = 'pp_start'
description = 'Start the peristaltic pump'
arguments = [StringArgument('direction', 'Rotation direction (clockwise or counterclockwise)', defaultvalue=None),
FloatArgument('speed', 'Rotation speed (rpm)', defaultvalue=None),
]
wait_until_complete = False
def initialize(self, direction: Optional[str], speed: Optional[float]):
return super().initialize(math.inf, direction, speed)
class PeristalticPumpStop(Command):
name = 'pp_stop'
description = 'Stop the peristaltic pump'
arguments=[]
def initialize(self, *args: Any):
try:
self.device()
except IndexError:
raise self.CommandException('No peristaltic pump found!')
self.device().commandResult.connect(self.onCommandResult)
self.device().stopRotation()
def finalize(self):
self.device().commandResult.disconnect(self.onCommandResult)
def device(self) -> BT100S:
return self.instrument.devicemanager.peristalticpump()
@Slot(bool, str, object)
def onCommandResult(self, success: bool, command: str, result: Any):
if not success:
self.fail(f'Peristaltic pump command {command} failed.')
elif command == 'stop':
self.message.emit('Peristaltic pump stopped.')
self.finish(True)
|
{
"content_hash": "d1603d591b7098880e5306cc3cc9b2be",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 122,
"avg_line_length": 43.26470588235294,
"alnum_prop": 0.6274643099932019,
"repo_name": "awacha/cct",
"id": "f4b3da230cd7fca68f928a180ac3d23225d00d10",
"size": "7372",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cct/core2/commands/peristalticpump.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "607"
},
{
"name": "CSS",
"bytes": "908"
},
{
"name": "Cython",
"bytes": "70859"
},
{
"name": "HTML",
"bytes": "1665"
},
{
"name": "Jupyter Notebook",
"bytes": "195924"
},
{
"name": "Python",
"bytes": "1944682"
},
{
"name": "Shell",
"bytes": "481"
}
],
"symlink_target": ""
}
|
def myblock(tw, x): # ignore second argument
''' Load journal stats to heap (Sugar only) '''
import os
import glob
_DIROFINTEREST = 'datastore'
class ParseJournal():
''' Simple parser of datastore for turtle art entries '''
def __init__(self):
self._score = []
homepath = os.environ['HOME']
for path in glob.glob(os.path.join(homepath, '.sugar', '*')):
if isdsdir(path):
dsobjdirs = glob.glob(
os.path.join(path, _DIROFINTEREST, '??'))
for dsobjdir in dsobjdirs:
dsobjs = glob.glob(os.path.join(dsobjdir, '*'))
for dsobj in dsobjs:
if not isactivity(dsobj) == 'TurtleArtActivity':
continue
if hascomponent(dsobj, 'mime_type') != \
'application/x-turtle-art':
continue
score = hasturtleblocks(dsobj)
if score:
self._score.append(score)
def hascomponent(path, component):
''' Return metadata attribute, if any '''
if not os.path.exists(os.path.join(path, 'metadata')):
return False
if not os.path.exists(os.path.join(path, 'metadata', component)):
return False
fd = open(os.path.join(path, 'metadata', component))
data = fd.readline()
fd.close()
if len(data) == 0:
return False
return data
def isactivity(path):
''' Return activity name '''
activity = hascomponent(path, 'activity')
if not activity:
return False
else:
return activity.split('.')[-1]
def isdsdir(path):
''' Only interested if it is a datastore directory '''
if not os.path.isdir(path):
return False
if not os.path.exists(os.path.join(path, _DIROFINTEREST)):
return False
return True
TACAT = {'clean': 'forward', 'forward': 'forward', 'back': 'forward',
'left': 'forward', 'right': 'forward', 'arc': 'arc',
'xcor': 'coord', 'ycor': 'coord', 'heading': 'coord',
'setxy2': 'setxy', 'seth': 'setxy', 'penup': 'pen',
'setpensize': 'pen', 'setcolor': 'pen', 'pensize': 'pen',
'color': 'pen', 'setshade': 'pen', 'setgray': 'pen',
'gray': 'pen', 'fillscreen': 'pen', 'startfill': 'fill',
'stopfill': 'fill', 'plus2': 'number', 'minus2': 'number',
'product2': 'number', 'division2': 'number',
'pendown': 'pen', 'shade': 'pen', 'remainder2': 'number',
'sqrt': 'number', 'identity2': 'number', 'and2': 'boolean',
'or2': 'boolean', 'not': 'boolean', 'greater2': 'boolean',
'less2': 'boolean', 'equal2': 'boolean', 'random': 'random',
'repeat': 'repeat', 'forever': 'repeat', 'if': 'ifthen',
'ifelse': 'ifthen', 'while': 'ifthen', 'until': 'ifthen',
'hat': 'action', 'stack': 'action', 'storein': 'box',
'luminance': 'sensor', 'mousex': 'sensor', 'mousey': 'sensor',
'mousebutton2': 'sensor', 'keyboard': 'sensor',
'readpixel': 'sensor', 'see': 'sensor', 'time': 'sensor',
'sound': 'sensor', 'volume': 'sensor', 'pitch': 'sensor',
'resistance': 'sensor', 'voltage': 'sensor', 'video': 'media',
'wait': 'media', 'camera': 'media', 'journal': 'media',
'audio': 'media', 'show': 'media', 'setscale': 'media',
'savepix': 'media', 'savesvg': 'media', 'mediawait': 'media',
'mediapause': 'media', 'mediastop': 'media', 'mediaplay': 'media',
'speak': 'media', 'sinewave': 'media', 'description': 'media',
'push': 'extras', 'pop': 'extras', 'printheap': 'extras',
'clearheap': 'extras', 'isheapempty2': 'extras', 'chr': 'extras',
'int': 'extras', 'myfunction': 'python', 'userdefined': 'python',
'box': 'box', 'kbinput': 'sensor',
'loadblock': 'python', 'loadpalette': 'python'}
TAPAL = {'forward': 'turtlep', 'arc': 'turtlep', 'coord': 'turtlep',
'setxy': 'turtlep', 'pen': 'penp', 'fill': 'penp',
'random': 'numberp', 'boolean': 'numberp', 'repeat': 'flowp',
'ifthen': 'flowp', 'action': 'boxp', 'box': 'boxp',
'sensor': 'sensorp', 'media': 'mediap', 'extras': 'extrasp',
'number': 'numberp', 'python': 'extrasp'}
TASCORE = {'forward': 3, 'arc': 3, 'setxy': 2.5, 'coord': 4, 'turtlep': 5,
'pen': 2.5, 'fill': 2.5, 'penp': 5,
'number': 2.5, 'boolean': 2.5, 'random': 2.5, 'numberp': 0,
'repeat': 2.5, 'ifthen': 7.5, 'flowp': 10,
'box': 7.5, 'action': 7.5, 'boxp': 0,
'media': 5, 'mediap': 0,
'python': 5, 'extras': 5, 'extrasp': 0,
'sensor': 5, 'sensorp': 0}
PALS = ['turtlep', 'penp', 'numberp', 'flowp', 'boxp', 'sensorp', 'mediap',
'extrasp']
def hasturtleblocks(path):
''' Parse turtle block data and generate score based on rubric '''
if not os.path.exists(os.path.join(path, 'data')):
return None
fd = open(os.path.join(path, 'data'))
blocks = []
# block name is second token in each line
for line in fd:
tokens = line.split(',')
if len(tokens) > 1:
token = tokens[1].strip('" [')
blocks.append(token)
score = []
for i in range(len(PALS)):
score.append(0)
cats = []
pals = []
for b in blocks:
if b in TACAT:
if not TACAT[b] in cats:
cats.append(TACAT[b])
for c in cats:
if c in TAPAL:
if not TAPAL[c] in pals:
pals.append(TAPAL[c])
for c in cats:
if c in TASCORE:
score[PALS.index(TAPAL[c])] += TASCORE[c]
for p in pals:
if p in TASCORE:
score[PALS.index(p)] += TASCORE[p]
return score
data = ParseJournal()
n = min(40, len(data._score) / len(PALS))
for i in range(n):
for j in range(len(PALS)):
tw.lc.heap.append(data._score[(n - i - 1)][len(PALS) - j - 1])
tw.lc.heap.append(n)
return
|
{
"content_hash": "5e273394c41dbb3ecc5fb108259ca335",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 79,
"avg_line_length": 42.63225806451613,
"alnum_prop": 0.47866222760290555,
"repo_name": "walterbender/turtleartmini",
"id": "9a80c046968ef86927b17e69bc6acdeb76e6dd86",
"size": "7348",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pysamples/ta-stats.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1148643"
}
],
"symlink_target": ""
}
|
"""
Module to set up run time parameters for Clawpack.
The values set in the function setrun are then written out to data files
that will be read in by the Fortran code.
"""
import os
import numpy as np
#------------------------------
def setrun(claw_pkg='geoclaw'):
#------------------------------
"""
Define the parameters used for running Clawpack.
INPUT:
claw_pkg expected to be "geoclaw" for this setrun.
OUTPUT:
rundata - object of class ClawRunData
"""
from clawpack.clawutil import data
assert claw_pkg.lower() == 'geoclaw', "Expected claw_pkg = 'geoclaw'"
num_dim = 2
rundata = data.ClawRunData(claw_pkg, num_dim)
#------------------------------------------------------------------
# GeoClaw specific parameters:
#------------------------------------------------------------------
rundata = setgeo(rundata)
#------------------------------------------------------------------
# Standard Clawpack parameters to be written to claw.data:
# (or to amr2ez.data for AMR)
#------------------------------------------------------------------
clawdata = rundata.clawdata # initialized when rundata instantiated
# Set single grid parameters first.
# See below for AMR parameters.
# ---------------
# Spatial domain:
# ---------------
# Number of space dimensions:
clawdata.num_dim = num_dim
# Lower and upper edge of computational domain:
clawdata.lower[0] = 0.0
clawdata.upper[0] = 19.76 #9.84
clawdata.lower[1] = 0.
clawdata.upper[1] = 1.52
# Number of grid cells: Coarsest grid
clawdata.num_cells[0] = 260
clawdata.num_cells[1] = 20
# ---------------
# Size of system:
# ---------------
# Number of equations in the system:
clawdata.num_eqn = 3
# Number of auxiliary variables in the aux array (initialized in setaux)
clawdata.num_aux = 1
# Index of aux array corresponding to capacity function, if there is one:
clawdata.capa_index = 0
# -------------
# Initial time:
# -------------
clawdata.t0 = 0.0
# Restart from checkpoint file of a previous run?
# Note: If restarting, you must also change the Makefile to set:
# RESTART = True
# If restarting, t0 above should be from original run, and the
# restart_file 'fort.chkNNNNN' specified below should be in
# the OUTDIR indicated in Makefile.
clawdata.restart = False # True to restart from prior results
clawdata.restart_file = 'fort.chk00006' # File to use for restart data
# -------------
# Output times:
#--------------
# Specify at what times the results should be written to fort.q files.
# Note that the time integration stops after the final output time.
# The solution at initial time t0 is always written in addition.
clawdata.output_style = 1
if clawdata.output_style==1:
# Output nout frames at equally spaced times up to tfinal:
clawdata.num_output_times = 50
clawdata.tfinal = 200.
clawdata.output_t0 = True # output at initial (or restart) time?
elif clawdata.output_style == 2:
# Specify a list of output times.
clawdata.output_times = np.linspace(24,40,81)
elif clawdata.output_style == 3:
# Output every iout timesteps with a total of ntot time steps:
clawdata.output_step_interval = 20
clawdata.total_steps = 40
clawdata.output_t0 = True
clawdata.output_format = 'binary' # 'ascii' or 'binary'
clawdata.output_q_components = 'all' # could be list such as [True,True]
clawdata.output_aux_components = 'none' # could be list
clawdata.output_aux_onlyonce = True # output aux arrays only at t0
# ---------------------------------------------------
# Verbosity of messages to screen during integration:
# ---------------------------------------------------
# The current t, dt, and cfl will be printed every time step
# at AMR levels <= verbosity. Set verbosity = 0 for no printing.
# (E.g. verbosity == 2 means print only on levels 1 and 2.)
clawdata.verbosity = 0
# --------------
# Time stepping:
# --------------
# if dt_variable==1: variable time steps used based on cfl_desired,
# if dt_variable==0: fixed time steps dt = dt_initial will always be used.
clawdata.dt_variable = True
# Initial time step for variable dt.
# If dt_variable==0 then dt=dt_initial for all steps:
clawdata.dt_initial = 0.016
# Max time step to be allowed if variable dt used:
clawdata.dt_max = 1e+99
# Desired Courant number if variable dt used, and max to allow without
# retaking step with a smaller dt:
clawdata.cfl_desired = 0.9
clawdata.cfl_max = 1.0
# Maximum number of time steps to allow between output times:
clawdata.steps_max = 100000
# ------------------
# Method to be used:
# ------------------
# Order of accuracy: 1 => Godunov, 2 => Lax-Wendroff plus limiters
clawdata.order = 2
# Use dimensional splitting? (not yet available for AMR)
clawdata.dimensional_split = 'unsplit'
# For unsplit method, transverse_waves can be
# 0 or 'none' ==> donor cell (only normal solver used)
# 1 or 'increment' ==> corner transport of waves
# 2 or 'all' ==> corner transport of 2nd order corrections too
clawdata.transverse_waves = 2
# Number of waves in the Riemann solution:
clawdata.num_waves = 3
# List of limiters to use for each wave family:
# Required: len(limiter) == num_waves
# Some options:
# 0 or 'none' ==> no limiter (Lax-Wendroff)
# 1 or 'minmod' ==> minmod
# 2 or 'superbee' ==> superbee
# 3 or 'mc' ==> MC limiter
# 4 or 'vanleer' ==> van Leer
clawdata.limiter = ['mc', 'mc', 'mc']
clawdata.use_fwaves = True # True ==> use f-wave version of algorithms
# Source terms splitting:
# src_split == 0 or 'none' ==> no source term (src routine never called)
# src_split == 1 or 'godunov' ==> Godunov (1st order) splitting used,
# src_split == 2 or 'strang' ==> Strang (2nd order) splitting used, not recommended.
clawdata.source_split = 'godunov'
# --------------------
# Boundary conditions:
# --------------------
# Number of ghost cells (usually 2)
clawdata.num_ghost = 2
# Choice of BCs at xlower and xupper:
# 0 => user specified (must modify bcN.f to use this option)
# 1 => extrapolation (non-reflecting outflow)
# 2 => periodic (must specify this at both boundaries)
# 3 => solid wall for systems where q(2) is normal velocity
clawdata.bc_lower[0] = 'user'
clawdata.bc_upper[0] = 'extrap'
clawdata.bc_lower[1] = 'wall'
clawdata.bc_upper[1] = 'wall'
# Specify when checkpoint files should be created that can be
# used to restart a computation.
clawdata.checkpt_style = 1
if clawdata.checkpt_style == 0:
# Do not checkpoint at all
pass
elif clawdata.checkpt_style == 1:
# Checkpoint only at tfinal.
pass
elif clawdata.checkpt_style == 2:
# Specify a list of checkpoint times.
clawdata.checkpt_times = [0.1,0.15]
elif clawdata.checkpt_style == 3:
# Checkpoint every checkpt_interval timesteps (on Level 1)
# and at the final time.
clawdata.checkpt_interval = 5
# ---------------
# AMR parameters:
# ---------------
amrdata = rundata.amrdata
# max number of refinement levels:
amrdata.amr_levels_max = 2
# List of refinement ratios at each level (length at least mxnest-1)
amrdata.refinement_ratios_x = [8]
amrdata.refinement_ratios_y = [8]
amrdata.refinement_ratios_t = [8]
# Specify type of each aux variable in amrdata.auxtype.
# This must be a list of length maux, each element of which is one of:
# 'center', 'capacity', 'xleft', or 'yleft' (see documentation).
amrdata.aux_type = ['center']
# Flag using refinement routine flag2refine rather than richardson error
amrdata.flag_richardson = False # use Richardson?
amrdata.flag2refine = True
# steps to take on each level L between regriddings of level L+1:
amrdata.regrid_interval = 100000 # fixed grids
# width of buffer zone around flagged points:
# (typically the same as regrid_interval so waves don't escape):
amrdata.regrid_buffer_width = 2
# clustering alg. cutoff for (# flagged pts) / (total # of cells refined)
# (closer to 1.0 => more small grids may be needed to cover flagged cells)
amrdata.clustering_cutoff = 0.700000
# print info about each regridding up to this level:
amrdata.verbosity_regrid = 0
# ----- For developers -----
# Toggle debugging print statements:
amrdata.dprint = False # print domain flags
amrdata.eprint = False # print err est flags
amrdata.edebug = False # even more err est flags
amrdata.gprint = False # grid bisection/clustering
amrdata.nprint = False # proper nesting output
amrdata.pprint = False # proj. of tagged points
amrdata.rprint = False # print regridding summary
amrdata.sprint = False # space/memory output
amrdata.tprint = False # time step reporting each level
amrdata.uprint = False # update/upbnd reporting
# More AMR parameters can be set -- see the defaults in pyclaw/data.py
# == setregions.data values ==
regions = rundata.regiondata.regions
# to specify regions of refinement append lines of the form
# [minlevel,maxlevel,t1,t2,x1,x2,y1,y2]
regions.append([1, 1, 0., 1e9, 0., 100., 0., 2.])
regions.append([2, 2, 0., 1e9, 3., 10., 0., 2.])
# == setgauges.data values ==
# for gauges append lines of the form [gaugeno, x, y, t1, t2]
# rundata.gaugedata.add_gauge()
gauges = rundata.gaugedata.gauges
x0 = 5.
y0 = 1.52/2.
gauges.append([0, 1., y0, 0., 1e10])
gauges.append([1, x0+1.02, y0, 0., 1e10])
gauges.append([2, x0+1.02, y0+0.27, 0., 1e10])
return rundata
# end of function setrun
# ----------------------
#-------------------
def setgeo(rundata):
#-------------------
"""
Set GeoClaw specific runtime parameters.
For documentation see ....
"""
try:
geo_data = rundata.geo_data
except:
print "*** Error, this rundata has no geo_data attribute"
raise AttributeError("Missing geo_data attribute")
# == Physics ==
geo_data.gravity = 9.81
geo_data.coordinate_system = 1
geo_data.earth_radius = 6367.5e3
# == Forcing Options
geo_data.coriolis_forcing = False
# == Algorithm and Initial Conditions ==
geo_data.sea_level = 0.0
geo_data.dry_tolerance = 1.e-3
geo_data.friction_forcing = True
geo_data.manning_coefficient = 0.015
geo_data.friction_depth = 0.05
# Refinement data
refinement_data = rundata.refinement_data
refinement_data.wave_tolerance = 1.e-2
refinement_data.deep_depth = 1e2
refinement_data.max_level_deep = 3
refinement_data.variable_dt_refinement_ratios = True
# == settopo.data values ==
topo_data = rundata.topo_data
# for topography, append lines of the form
# [topotype, minlevel, maxlevel, t1, t2, fname]
topo_data.topofiles.append([1, 1, 1, 0., 1.e10, 'domain.tt1'])
topo_data.topofiles.append([1, 1, 1, 0., 1.e10, 'hump.tt1'])
# == setdtopo.data values ==
dtopo_data = rundata.dtopo_data
# for moving topography, append lines of the form : (<= 1 allowed for now!)
# [topotype, minlevel,maxlevel,fname]
# == setqinit.data values ==
#rundata.qinit_data.qinit_type = 2
qinitfiles = rundata.qinit_data.qinitfiles
# for qinit perturbations, append lines of the form: (<= 1 allowed for now!)
# [minlev, maxlev, fname]
# == fgmax.data values ==
fgmax_files = rundata.fgmax_data.fgmax_files
# for fixed grids append to this list names of any fgmax input files
return rundata
# end of function setgeo
# ----------------------
if __name__ == '__main__':
# Set up run-time parameters and write all data files.
import sys
rundata = setrun(*sys.argv[1:])
rundata.write()
|
{
"content_hash": "8af9ff9f5dcd099a5c4e7a90b43416a0",
"timestamp": "",
"source": "github",
"line_count": 404,
"max_line_length": 92,
"avg_line_length": 30.923267326732674,
"alnum_prop": 0.5960938125350196,
"repo_name": "xinshengqin/tsunami_benchmarks",
"id": "d648faa3865b7453cd1a8916b5732a4b2cf57513",
"size": "12493",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nthmp_currents_2015/problem1/setrun.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Fortran",
"bytes": "62167"
},
{
"name": "MATLAB",
"bytes": "1772"
},
{
"name": "Makefile",
"bytes": "20125"
},
{
"name": "Python",
"bytes": "164435"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Job',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('posted', models.DateTimeField(auto_now_add=True)),
('approved', models.BooleanField(default=False)),
('filled', models.BooleanField(default=False)),
('spam', models.BooleanField(default=False)),
('posted_by_user_agent', models.CharField(default=b'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.22 (KHTML, like Gecko) Chrome/25.0.1364.172 Safari/537.22', max_length=1024)),
('posted_from_ip', models.IPAddressField(null=True, blank=True)),
('title', models.CharField(max_length=1024)),
('job_type', models.CharField(max_length=2, choices=[(b'PT', b'Part-time'), (b'FT', b'Full-time'), (b'FL', b'Freelance'), (b'VL', b'Volunteer'), (b'OT', b'Other')])),
('salary', models.CharField(max_length=128, blank=True)),
('url', models.CharField(max_length=1024, blank=True)),
('location', models.CharField(max_length=1024, blank=True)),
('company', models.CharField(max_length=512, blank=True)),
('description', models.TextField()),
('contact_email', models.EmailField(max_length=254, blank=True)),
('experience', models.CharField(max_length=128, blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Retweeter',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('username', models.CharField(unique=True, max_length=32)),
('access_key', models.CharField(max_length=70)),
('access_secret', models.CharField(max_length=70)),
],
options={
},
bases=(models.Model,),
),
]
|
{
"content_hash": "1aa2ca77a8600739d7669509ab91b6af",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 198,
"avg_line_length": 47.291666666666664,
"alnum_prop": 0.5533039647577093,
"repo_name": "gpjt/jobsboard",
"id": "664a30610514e43b5b75e726be74006ab85ea287",
"size": "2294",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "201"
},
{
"name": "HTML",
"bytes": "13368"
},
{
"name": "Python",
"bytes": "20498"
}
],
"symlink_target": ""
}
|
import mock
from oslo.serialization import jsonutils
import webob
from cinder import context
from cinder import exception
from cinder import test
from cinder.tests.api import fakes
# This list of fake volumes is used by our tests. Each is configured in a
# slightly different way, and includes only the properties that are required
# for these particular tests to function correctly.
snapshot_vol_id = 'ffffffff-0000-ffff-0000-fffffffffffd'
detached_vol_id = 'ffffffff-0000-ffff-0000-fffffffffffe'
attached_vol_id = 'ffffffff-0000-ffff-0000-ffffffffffff'
bad_vol_id = 'ffffffff-0000-ffff-0000-fffffffffff0'
vols = {snapshot_vol_id: {'id': snapshot_vol_id,
'status': 'available',
'attach_status': 'detached',
'host': 'fake_host',
'project_id': 'fake_project',
'migration_status': None,
'encryption_key_id': None},
detached_vol_id: {'id': detached_vol_id,
'status': 'available',
'attach_status': 'detached',
'host': 'fake_host',
'project_id': 'fake_project',
'migration_status': None,
'encryption_key_id': None},
attached_vol_id: {'id': attached_vol_id,
'status': 'available',
'attach_status': 'attached',
'host': 'fake_host',
'project_id': 'fake_project',
'migration_status': None,
'encryption_key_id': None}
}
def app():
# no auth, just let environ['cinder.context'] pass through
api = fakes.router.APIRouter()
mapper = fakes.urlmap.URLMap()
mapper['/v2'] = api
return mapper
def api_get(self, context, volume_id):
"""Replacement for cinder.volume.api.API.get.
We stub the cinder.volume.api.API.get method to check for the existence
of volume_id in our list of fake volumes and raise an exception if the
specified volume ID is not in our list.
"""
vol = vols.get(volume_id, None)
if not vol:
raise exception.NotFound
return vol
def db_snapshot_get_all_for_volume(context, volume_id):
"""Replacement for cinder.db.snapshot_get_all_for_volume.
We stub the cinder.db.snapshot_get_all_for_volume method because when we
go to unmanage a volume, the code checks for snapshots and won't unmanage
volumes with snapshots. For these tests, only the snapshot_vol_id reports
any snapshots. The delete code just checks for array length, doesn't
inspect the contents.
"""
if volume_id == snapshot_vol_id:
return ['fake_snapshot']
return []
@mock.patch('cinder.volume.api.API.get', api_get)
@mock.patch('cinder.db.snapshot_get_all_for_volume',
db_snapshot_get_all_for_volume)
class VolumeUnmanageTest(test.TestCase):
"""Test cases for cinder/api/contrib/volume_unmanage.py
The API extension adds an action to volumes, "os-unmanage", which will
effectively issue a delete operation on the volume, but with a flag set
that means that a different method will be invoked on the driver, so that
the volume is not actually deleted in the storage backend.
In this set of test cases, we are ensuring that the code correctly parses
the request structure and raises the correct exceptions when things are not
right, and calls down into cinder.volume.api.API.delete with the correct
arguments.
"""
def setUp(self):
super(VolumeUnmanageTest, self).setUp()
def _get_resp(self, volume_id):
"""Helper to build an os-unmanage req for the specified volume_id."""
req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume_id)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.environ['cinder.context'] = context.RequestContext('admin',
'fake',
True)
body = {'os-unmanage': ''}
req.body = jsonutils.dumps(body)
res = req.get_response(app())
return res
@mock.patch('cinder.db.volume_update')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.delete_volume')
def test_unmanage_volume_ok(self, mock_rpcapi, mock_db):
"""Return success for valid and unattached volume."""
res = self._get_resp(detached_vol_id)
# volume_update is (context, id, new_data)
self.assertEqual(mock_db.call_count, 1)
self.assertEqual(len(mock_db.call_args[0]), 3, mock_db.call_args)
self.assertEqual(mock_db.call_args[0][1], detached_vol_id)
# delete_volume is (context, status, unmanageOnly)
self.assertEqual(mock_rpcapi.call_count, 1)
self.assertEqual(len(mock_rpcapi.call_args[0]), 3)
self.assertEqual(mock_rpcapi.call_args[0][2], True)
self.assertEqual(res.status_int, 202, res)
def test_unmanage_volume_bad_volume_id(self):
"""Return 404 if the volume does not exist."""
res = self._get_resp(bad_vol_id)
self.assertEqual(res.status_int, 404, res)
def test_unmanage_volume_attached_(self):
"""Return 400 if the volume exists but is attached."""
res = self._get_resp(attached_vol_id)
self.assertEqual(res.status_int, 400, res)
def test_unmanage_volume_with_snapshots(self):
"""Return 400 if the volume exists but has snapshots."""
res = self._get_resp(snapshot_vol_id)
self.assertEqual(res.status_int, 400, res)
|
{
"content_hash": "9b321c9d31b98f05380ec553988177ba",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 79,
"avg_line_length": 39.94444444444444,
"alnum_prop": 0.6110917941585535,
"repo_name": "hguemar/cinder",
"id": "c80c00d48af0320af4412abe02018f795f6608ee",
"size": "6346",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cinder/tests/api/contrib/test_volume_unmanage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "3322"
},
{
"name": "Python",
"bytes": "10010542"
},
{
"name": "Shell",
"bytes": "9917"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from experiments.models import Experiment
from modeldict import ModelDict
class LazyAutoCreate(object):
"""
A lazy version of the setting is used so that tests can change the setting and still work
"""
def __nonzero__(self):
return self.__bool__()
def __bool__(self):
return getattr(settings, 'EXPERIMENTS_AUTO_CREATE', True)
class ExperimentManager(ModelDict):
def get_experiment(self, experiment_name):
# Helper that uses self[...] so that the experiment is auto created where desired
try:
return self[experiment_name]
except KeyError:
return None
experiment_manager = ExperimentManager(Experiment, key='name', value='value', instances=True, auto_create=LazyAutoCreate())
|
{
"content_hash": "6af43983d4e56a21ed5d825ef16a2d83",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 123,
"avg_line_length": 30.846153846153847,
"alnum_prop": 0.6870324189526185,
"repo_name": "mixcloud/django-experiments",
"id": "f0807d696000c4c8bc23b1d442081f8d6c747eb4",
"size": "802",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "experiments/manager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1597"
},
{
"name": "HTML",
"bytes": "11032"
},
{
"name": "JavaScript",
"bytes": "9586"
},
{
"name": "Python",
"bytes": "132147"
}
],
"symlink_target": ""
}
|
"""
Support for ``typing`` py3.9+ features while min version is py3.8.
"""
from typing import * # noqa: F401
try: # py 3.9+
from typing import Annotated
except (ImportError, ModuleNotFoundError): # optional dependency
try:
from typing_extensions import Annotated
except (ImportError, ModuleNotFoundError):
Annotated = NotImplemented
else: # override typing
from typing_extensions import * # noqa: F401
HAS_ANNOTATED = Annotated is not NotImplemented
|
{
"content_hash": "c4ac014c03541971d1ea41ffa3b77c57",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 66,
"avg_line_length": 26.31578947368421,
"alnum_prop": 0.696,
"repo_name": "larrybradley/astropy",
"id": "9ae93becf632de0b25949d8aa0e974f2080c44f7",
"size": "564",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "astropy/units/_typing.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "11040101"
},
{
"name": "C++",
"bytes": "47001"
},
{
"name": "Cython",
"bytes": "78755"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Lex",
"bytes": "183333"
},
{
"name": "M4",
"bytes": "18757"
},
{
"name": "Makefile",
"bytes": "52508"
},
{
"name": "Python",
"bytes": "12335716"
},
{
"name": "Shell",
"bytes": "17024"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
}
|
import threading
import mock
import netaddr
from neutron_lib.api.definitions import floating_ip_port_forwarding as apidef
from neutron_lib.callbacks import exceptions as c_exc
from neutron_lib import exceptions as lib_exc
from neutron_lib.exceptions import l3 as lib_l3_exc
from oslo_utils import uuidutils
from six.moves import queue
from neutron.services.portforwarding.common import exceptions as pf_exc
from neutron.services.portforwarding import pf_plugin
from neutron.tests.unit.plugins.ml2 import base as ml2_test_base
class PortForwardingTestCaseBase(ml2_test_base.ML2TestFramework):
def setUp(self):
super(PortForwardingTestCaseBase, self).setUp()
self.pf_plugin = pf_plugin.PortForwardingPlugin()
def _create_floatingip(self, network_id, port_id=None,
fixed_ip_address=None):
body = {"floating_network_id": network_id,
"port_id": port_id,
"fixed_ip_address": fixed_ip_address,
"tenant_id": self._tenant_id,
"project_id": self._tenant_id}
return self.l3_plugin.create_floatingip(
self.context,
{"floatingip": body})
def _get_floatingip(self, floatingip_id):
return self.l3_plugin.get_floatingip(self.context, floatingip_id)
def _update_floatingip(self, fip_id, update_info):
return self.l3_plugin.update_floatingip(
self.context, fip_id, {"floatingip": update_info})
def _delete_floatingip(self, fip_id):
return self.l3_plugin.delete_floatingip(self.context, fip_id)
def _update_port(self, port_id, update_info):
return self.core_plugin.update_port(
self.context, port_id, {'port': update_info})
def _delete_port(self, port_id):
return self.core_plugin.delete_port(self.context, port_id)
def _add_router_interface(self, router_id, subnet_id):
interface_info = {"subnet_id": subnet_id}
self.l3_plugin.add_router_interface(
self.context, router_id, interface_info=interface_info)
def _set_router_gw(self, router_id, ext_net_id):
body = {
'router':
{'external_gateway_info': {'network_id': ext_net_id}}}
self.l3_plugin.update_router(self.context, router_id, body)
class PortForwardingTestCase(PortForwardingTestCaseBase):
def setUp(self):
super(PortForwardingTestCase, self).setUp()
self._prepare_env()
def _prepare_env(self):
self.router = self._create_router()
self.ext_net = self._create_network(
self.fmt, 'ext-net', True, arg_list=("router:external",),
**{"router:external": True}).json['network']
self.ext_subnet = self._create_subnet(
self.fmt, self.ext_net['id'], '172.24.2.0/24').json['subnet']
self.net = self._create_network(self.fmt, 'private', True).json[
'network']
self.subnet = self._create_subnet(self.fmt, self.net['id'],
'10.0.0.0/24').json['subnet']
self._set_router_gw(self.router['id'], self.ext_net['id'])
self._add_router_interface(self.router['id'], self.subnet['id'])
self.fip = self._create_floatingip(self.ext_net['id'])
self.port = self._create_port(self.fmt, self.net['id']).json['port']
self.port_forwarding = {
apidef.RESOURCE_NAME:
{apidef.EXTERNAL_PORT: 2225,
apidef.INTERNAL_PORT: 25,
apidef.INTERNAL_PORT_ID: self.port['id'],
apidef.PROTOCOL: "tcp",
apidef.INTERNAL_IP_ADDRESS:
self.port['fixed_ips'][0]['ip_address']}}
def test_create_floatingip_port_forwarding(self):
res = self.pf_plugin.create_floatingip_port_forwarding(
self.context, self.fip['id'], self.port_forwarding)
expect = {
"external_port": 2225,
"internal_port": 25,
"internal_port_id": self.port['id'],
"protocol": "tcp",
"internal_ip_address": self.port['fixed_ips'][0]['ip_address'],
'id': mock.ANY,
'router_id': self.router['id'],
'floating_ip_address': self.fip['floating_ip_address'],
'floatingip_id': self.fip['id']}
self.assertEqual(expect, res)
def test_negative_create_floatingip_port_forwarding(self):
self.pf_plugin.create_floatingip_port_forwarding(
self.context, self.fip['id'], self.port_forwarding)
# This will be fail with the same params
self.assertRaises(lib_exc.BadRequest,
self.pf_plugin.create_floatingip_port_forwarding,
self.context, self.fip['id'], self.port_forwarding)
def test_update_floatingip_port_forwarding(self):
# create a test port forwarding
res = self.pf_plugin.create_floatingip_port_forwarding(
self.context, self.fip['id'], self.port_forwarding)
# update the socket port only
update_body = {
apidef.RESOURCE_NAME: {
"external_port": 2226,
"internal_port": 26,
"protocol": "udp"
}
}
update_res = self.pf_plugin.update_floatingip_port_forwarding(
self.context, res['id'], self.fip['id'], update_body)
expect = {
"external_port": 2226,
"internal_port": 26,
"internal_port_id": self.port['id'],
"protocol": "udp",
"internal_ip_address": self.port['fixed_ips'][0]['ip_address'],
'id': res['id'],
'router_id': self.router['id'],
'floating_ip_address': self.fip['floating_ip_address'],
'floatingip_id': self.fip['id']}
self.assertEqual(expect, update_res)
# update the neutron port and success
new_port = self._create_port(self.fmt, self.net['id']).json['port']
update_body = {
apidef.RESOURCE_NAME: {
"external_port": 2227,
"internal_port": 27,
"protocol": "tcp",
"internal_port_id": new_port['id'],
"internal_ip_address": new_port['fixed_ips'][0]['ip_address']
}
}
update_res = self.pf_plugin.update_floatingip_port_forwarding(
self.context, res['id'], self.fip['id'], update_body)
expect = {
"external_port": 2227,
"internal_port": 27,
"internal_port_id": new_port['id'],
"protocol": "tcp",
"internal_ip_address": new_port['fixed_ips'][0]['ip_address'],
'id': res['id'],
'router_id': self.router['id'],
'floating_ip_address': self.fip['floating_ip_address'],
'floatingip_id': self.fip['id']}
self.assertEqual(expect, update_res)
def test_negative_update_floatingip_port_forwarding(self):
# prepare a port forwarding
res = self.pf_plugin.create_floatingip_port_forwarding(
self.context, self.fip['id'], self.port_forwarding)
# prepare another port and make its gateway set on other router
new_router = self._create_router()
new_subnet = self._create_subnet(self.fmt, self.net['id'],
'11.0.0.0/24').json['subnet']
self._set_router_gw(new_router['id'], self.ext_net['id'])
self._add_router_interface(new_router['id'], new_subnet['id'])
# create a port based on the new subnet
new_port = self._create_port(
self.fmt, self.net['id'],
fixed_ips=[{'subnet_id': new_subnet['id']}]).json['port']
update_body = {
apidef.RESOURCE_NAME: {
"external_port": 2227,
"internal_port": 27,
"protocol": "tcp",
"internal_port_id": new_port['id'],
"internal_ip_address": new_port['fixed_ips'][0]['ip_address']
}
}
# This will be fail, as the new found router_id not match.
self.assertRaises(lib_exc.BadRequest,
self.pf_plugin.update_floatingip_port_forwarding,
self.context, res['id'], self.fip['id'], update_body)
# There is already a port forwarding. We create another port forwarding
# with the new_port, and update the new one with the same params of the
# existing one.
new_port = self._create_port(self.fmt, self.net['id']).json['port']
self.port_forwarding[apidef.RESOURCE_NAME].update({
'internal_port_id': new_port['id'],
'internal_ip_address': new_port['fixed_ips'][0]['ip_address'],
'external_port': self.port_forwarding[
apidef.RESOURCE_NAME]['external_port'] + 1
})
new_res = self.pf_plugin.create_floatingip_port_forwarding(
self.context, self.fip['id'], self.port_forwarding)
self.port_forwarding[apidef.RESOURCE_NAME].update({
'internal_port_id': self.port['id'],
'internal_ip_address': self.port['fixed_ips'][0]['ip_address'],
'external_port': self.port_forwarding[
apidef.RESOURCE_NAME]['external_port'] - 1
})
# This will be fail, as the duplicate record.
self.assertRaises(lib_exc.BadRequest,
self.pf_plugin.update_floatingip_port_forwarding,
self.context, new_res['id'], self.fip['id'],
update_body)
def test_delete_floatingip_port_forwarding(self):
# create two port forwardings for a floatingip
pf_1 = self.pf_plugin.create_floatingip_port_forwarding(
self.context, self.fip['id'], self.port_forwarding)
new_port = self._create_port(self.fmt, self.net['id']).json['port']
self.port_forwarding[apidef.RESOURCE_NAME].update({
'external_port': 2226,
'internal_port_id': new_port['id'],
'internal_ip_address': new_port['fixed_ips'][0]['ip_address']
})
pf_2 = self.pf_plugin.create_floatingip_port_forwarding(
self.context, self.fip['id'], self.port_forwarding)
floatingip = self._get_floatingip(self.fip['id'])
self.assertEqual(self.router['id'], floatingip['router_id'])
# delete pf_1, check the router_id of floatingip is not change.
self.pf_plugin.delete_floatingip_port_forwarding(
self.context, pf_1['id'], self.fip['id'])
exist_pfs = self.pf_plugin.get_floatingip_port_forwardings(
self.context, floatingip_id=self.fip['id'])
self.assertEqual(1, len(exist_pfs))
self.assertEqual(pf_2['id'], exist_pfs[0]['id'])
# delete pf_2, it's the last port forwarding of floatingip.
self.pf_plugin.delete_floatingip_port_forwarding(
self.context, pf_2['id'], self.fip['id'])
exist_pfs = self.pf_plugin.get_floatingip_port_forwardings(
self.context, floatingip_id=self.fip['id'])
self.assertEqual(0, len(exist_pfs))
floatingip = self._get_floatingip(self.fip['id'])
self.assertIsNone(floatingip['router_id'])
def test_negative_delete_floatingip_port_forwarding(self):
# prepare a good port forwarding
res = self.pf_plugin.create_floatingip_port_forwarding(
self.context, self.fip['id'], self.port_forwarding)
# pass non-existing port forwarding id
self.assertRaises(pf_exc.PortForwardingNotFound,
self.pf_plugin.delete_floatingip_port_forwarding,
self.context, uuidutils.generate_uuid(),
self.fip['id'])
# pass existing port forwarding but non-existing floatingip_id
self.assertRaises(pf_exc.PortForwardingNotFound,
self.pf_plugin.delete_floatingip_port_forwarding,
self.context, res['id'], uuidutils.generate_uuid())
def _simulate_concurrent_requests_process_and_raise(
self, funcs, args_list):
class SimpleThread(threading.Thread):
def __init__(self, q):
super(SimpleThread, self).__init__()
self.q = q
self.exception = None
def run(self):
try:
while not self.q.empty():
item = None
try:
item = self.q.get(False)
func, func_args = item[0], item[1]
func(*func_args)
except queue.Empty:
pass
finally:
if item:
self.q.task_done()
except Exception as e:
self.exception = e
def get_exception(self):
return self.exception
q = queue.Queue()
for func, func_args in zip(funcs, args_list):
q.put_nowait((func, func_args))
threads = []
for _ in range(len(funcs)):
t = SimpleThread(q)
threads.append(t)
t.start()
q.join()
for t in threads:
e = t.get_exception()
if e:
raise e
def test_concurrent_create_port_forwarding_delete_fip(self):
func1 = self.pf_plugin.create_floatingip_port_forwarding
func2 = self._delete_floatingip
funcs = [func1, func2]
args_list = [(self.context, self.fip['id'], self.port_forwarding),
(self.fip['id'],)]
self.assertRaises(c_exc.CallbackFailure,
self._simulate_concurrent_requests_process_and_raise,
funcs, args_list)
port_forwardings = self.pf_plugin.get_floatingip_port_forwardings(
self.context, floatingip_id=self.fip['id'], fields=['id'])
self.pf_plugin.delete_floatingip_port_forwarding(
self.context, port_forwardings[0][apidef.ID],
floatingip_id=self.fip['id'])
funcs.reverse()
args_list.reverse()
self.assertRaises(lib_l3_exc.FloatingIPNotFound,
self._simulate_concurrent_requests_process_and_raise,
funcs, args_list)
def test_concurrent_create_port_forwarding_update_fip(self):
newport = self._create_port(self.fmt, self.net['id']).json['port']
func1 = self.pf_plugin.create_floatingip_port_forwarding
func2 = self._update_floatingip
funcs = [func1, func2]
args_list = [(self.context, self.fip['id'], self.port_forwarding),
(self.fip['id'], {'port_id': newport['id']})]
self.assertRaises(c_exc.CallbackFailure,
self._simulate_concurrent_requests_process_and_raise,
funcs, args_list)
funcs.reverse()
args_list.reverse()
self.assertRaises(c_exc.CallbackFailure,
self._simulate_concurrent_requests_process_and_raise,
funcs, args_list)
def test_concurrent_create_port_forwarding_update_port(self):
new_ip = str(
netaddr.IPAddress(self.port['fixed_ips'][0]['ip_address']) + 2)
funcs = [self.pf_plugin.create_floatingip_port_forwarding,
self._update_port]
args_list = [(self.context, self.fip['id'], self.port_forwarding),
(self.port['id'], {
'fixed_ips': [{'subnet_id': self.subnet['id'],
'ip_address': new_ip}]})]
self._simulate_concurrent_requests_process_and_raise(funcs, args_list)
self.assertEqual([], self.pf_plugin.get_floatingip_port_forwardings(
self.context, floatingip_id=self.fip['id']))
def test_concurrent_create_port_forwarding_delete_port(self):
funcs = [self.pf_plugin.create_floatingip_port_forwarding,
self._delete_port]
args_list = [(self.context, self.fip['id'], self.port_forwarding),
(self.port['id'],)]
self._simulate_concurrent_requests_process_and_raise(funcs, args_list)
self.assertEqual([], self.pf_plugin.get_floatingip_port_forwardings(
self.context, floatingip_id=self.fip['id']))
|
{
"content_hash": "7433c1b5745de5a506006f2a6819bc49",
"timestamp": "",
"source": "github",
"line_count": 375,
"max_line_length": 79,
"avg_line_length": 44.157333333333334,
"alnum_prop": 0.5648891841294764,
"repo_name": "huntxu/neutron",
"id": "7ff316916706003e024ca2836b778e1da2b29fc7",
"size": "17132",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron/tests/functional/services/portforwarding/test_port_forwarding.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "11111676"
},
{
"name": "Shell",
"bytes": "37514"
}
],
"symlink_target": ""
}
|
import pygame
import screens.credits as credits
import screens.highscores as highscores
import screens.settings as settings
import screens.experience as exp
import screens.instructions as instructions
import screens.sound as sound
import screens.termination as termination
class MainScreen:
def __init__(self, canvas):
self.canvas = canvas
self.image = pygame.image.load('resources/screens/' + canvas.language + '/main_menu.jpg')
if sound.current_song != 'intro':
sound.Plopperdeplop.music(self, 'intro')
# Draws the components of this main menu screen.
def draw(self, surface):
surface.blit(self.image, (0, 0))
# Handles an event.
def on_event(self, event): # TODO use widget.button instead of hardcoding
if event.type == pygame.MOUSEBUTTONDOWN:
mouse_cursor = pygame.mouse.get_cursor()
mouse_pos = pygame.mouse.get_pos()
x = mouse_pos[0]
y = mouse_pos[1]
print(x, y)
if x >= 350 and y >= 140 and x <= 670 and y <= 214:
sound.Plopperdeplop.tune(self, 'click')
self.canvas.set_screen(exp.ExperienceScreen(self.canvas))
elif x >= 354 and y >= 250 and x <= 668 and y <= 320:
sound.Plopperdeplop.tune(self, 'click')
self.canvas.set_screen(instructions.InstructionsScreen(self.canvas))
elif x >= 359 and y >= 355 and x <= 670 and y <= 428:
sound.Plopperdeplop.tune(self, 'click')
self.canvas.set_screen(settings.SettingsScreen(self.canvas, self))
elif x >= 356 and y >= 458 and x <= 667 and y <= 538:
sound.Plopperdeplop.tune(self, 'click')
self.canvas.set_screen(highscores.HighscoresScreen(self.canvas, self))
elif x >= 914 and y >= 603 and x <= 1001 and y <= 679:
sound.Plopperdeplop.tune(self, 'click')
self.canvas.set_screen(credits.CreditsScreen(self.canvas))
elif x >= 20 and y >= 603 and x <= 108 and y <= 675:
sound.Plopperdeplop.tune(self, 'click')
self.canvas.set_screen(termination.ExitScreen(self.canvas, self))
# Updates this main menu screen.
def update(self):
pass
|
{
"content_hash": "953a9c47848cd388da1a8b3a0901ffa7",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 97,
"avg_line_length": 42.53703703703704,
"alnum_prop": 0.6060078363082281,
"repo_name": "sinoz/boat-wars",
"id": "146935af1b88fa7bfcd13563734cfba1d7ae1aa6",
"size": "2297",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/screens/main_menu.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "88691"
},
{
"name": "SQLPL",
"bytes": "648"
}
],
"symlink_target": ""
}
|
import unittest
from ctree.jit import *
from ctree import CONFIG
from fixtures.sample_asts import *
import ctypes
from ctree.transformations import PyBasicConversions
class TestTranslator(LazySpecializedFunction):
def args_to_subconfig(self, args):
return {'arg_typesig': tuple(type(get_ctype(a)) for a in args)}
def transform(self, tree, program_config):
arg_types = program_config[0]['arg_typesig']
tree = PyBasicConversions().visit(tree.body[0])
tree.return_type = arg_types[0]()
for param, ty in zip(tree.params, arg_types):
param.type = ty()
return [CFile(tree.name, [tree])]
def finalize(self, transform_result, program_config):
proj = Project(transform_result)
cfile = transform_result[0]
arg_types = program_config[0]['arg_typesig']
func_type = ctypes.CFUNCTYPE(arg_types[0], *arg_types)
return BasicFunction(cfile.name, proj, func_type)
class BasicFunction(ConcreteSpecializedFunction):
def __init__(self, entry, tree, typesig):
self._c_function = self._compile(entry, tree, typesig)
def __call__(self, *args, **kwargs):
return self._c_function(*args, **kwargs)
class TestJit(unittest.TestCase):
def test_identity(self):
mod = JitModule()
submod = CFile("test_identity", [identity_ast], path=CONFIG.get('jit','COMPILE_PATH')). \
_compile(identity_ast.codegen())
mod._link_in(submod)
c_identity_fn = mod.get_callable(identity_ast.name,
identity_ast.get_type())
self.assertEqual(identity(1), c_identity_fn(1))
self.assertEqual(identity(12), c_identity_fn(12))
self.assertEqual(identity(123), c_identity_fn(123))
def test_fib(self):
mod = JitModule()
submod = CFile("test_fib", [fib_ast], path=CONFIG.get('jit','COMPILE_PATH'))._compile(fib_ast.codegen())
mod._link_in(submod)
c_fib_fn = mod.get_callable(fib_ast.name,
fib_ast.get_type())
self.assertEqual(fib(1), c_fib_fn(1))
self.assertEqual(fib(6), c_fib_fn(6))
def test_gcd(self):
mod = JitModule()
submod = CFile("test_gcd", [gcd_ast], path=CONFIG.get('jit','COMPILE_PATH'))._compile(gcd_ast.codegen())
mod._link_in(submod)
c_gcd_fn = mod.get_callable(gcd_ast.name,
gcd_ast.get_type())
self.assertEqual(gcd(44, 122), c_gcd_fn(44, 122))
self.assertEqual(gcd(27, 39), c_gcd_fn(27, 39))
def test_choose(self):
mod = JitModule()
submod = CFile("test_choose", [choose_ast], path=CONFIG.get('jit','COMPILE_PATH')). \
_compile(choose_ast.codegen())
mod._link_in(submod)
c_choose_fn = mod.get_callable(choose_ast.name,
choose_ast.get_type())
self.assertEqual(choose(0.2, 44, 122), c_choose_fn(0.2, 44, 122))
self.assertEqual(choose(0.8, 44, 122), c_choose_fn(0.8, 44, 122))
self.assertEqual(choose(0.3, 27, 39), c_choose_fn(0.3, 27, 39))
self.assertEqual(choose(0.7, 27, 39), c_choose_fn(0.7, 27, 39))
def test_l2norm(self):
mod = JitModule()
submod = CFile("test_l2norm",
[l2norm_ast], path=CONFIG.get('jit','COMPILE_PATH'))._compile(l2norm_ast.codegen())
mod._link_in(submod)
entry = l2norm_ast.find(FunctionDecl, name="l2norm")
c_l2norm_fn = mod.get_callable(entry.name, entry.get_type())
self.assertEqual(l2norm(np.ones(12, dtype=np.float64)),
c_l2norm_fn(np.ones(12, dtype=np.float64), 12))
def test_getFile(self):
getFile(os.path.join(CONFIG.get('jit','COMPILE_PATH'),'test_l2norm.c'))
class TestAuxiliary(unittest.TestCase):
def test_NameExtractor(self):
def f(x):
return x + 3
py_ast = get_ast(f)
result = LazySpecializedFunction.NameExtractor().visit(py_ast)
self.assertEqual(result, 'f')
def test_from_function(self):
def f(x):
return x + 3
c_f = TestTranslator.from_function(f, 'test_from_function')
self.assertEqual(c_f(3), 6)
|
{
"content_hash": "54aada9940b03a7708d388e6f24ee4a1",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 112,
"avg_line_length": 40.132075471698116,
"alnum_prop": 0.5937940761636107,
"repo_name": "ucb-sejits/ctree",
"id": "f4d452ac5c25d5022a7634794ec49181218db2ac",
"size": "4254",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/test_jit.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Mako",
"bytes": "820"
},
{
"name": "Python",
"bytes": "249654"
},
{
"name": "Shell",
"bytes": "1396"
}
],
"symlink_target": ""
}
|
"""
TCP support for IOCP reactor
"""
import socket, operator, errno, struct
from zope.interface import implements, classImplements
from twisted.internet import interfaces, error, address, main, defer
from twisted.internet.abstract import _LogOwner, isIPAddress, isIPv6Address
from twisted.internet.tcp import _SocketCloser, Connector as TCPConnector
from twisted.internet.tcp import _AbortingMixin, _BaseBaseClient, _BaseTCPClient
from twisted.python import log, failure, reflect, util
from twisted.internet.iocpreactor import iocpsupport as _iocp, abstract
from twisted.internet.iocpreactor.interfaces import IReadWriteHandle
from twisted.internet.iocpreactor.const import ERROR_IO_PENDING
from twisted.internet.iocpreactor.const import SO_UPDATE_CONNECT_CONTEXT
from twisted.internet.iocpreactor.const import SO_UPDATE_ACCEPT_CONTEXT
from twisted.internet.iocpreactor.const import ERROR_CONNECTION_REFUSED
from twisted.internet.iocpreactor.const import ERROR_NETWORK_UNREACHABLE
try:
from twisted.internet._newtls import startTLS as _startTLS
except ImportError:
_startTLS = None
# ConnectEx returns these. XXX: find out what it does for timeout
connectExErrors = {
ERROR_CONNECTION_REFUSED: errno.WSAECONNREFUSED,
ERROR_NETWORK_UNREACHABLE: errno.WSAENETUNREACH,
}
class Connection(abstract.FileHandle, _SocketCloser, _AbortingMixin):
"""
@ivar TLS: C{False} to indicate the connection is in normal TCP mode,
C{True} to indicate that TLS has been started and that operations must
be routed through the L{TLSMemoryBIOProtocol} instance.
"""
implements(IReadWriteHandle, interfaces.ITCPTransport,
interfaces.ISystemHandle)
TLS = False
def __init__(self, sock, proto, reactor=None):
abstract.FileHandle.__init__(self, reactor)
self.socket = sock
self.getFileHandle = sock.fileno
self.protocol = proto
def getHandle(self):
return self.socket
def dataReceived(self, rbuffer):
# XXX: some day, we'll have protocols that can handle raw buffers
self.protocol.dataReceived(str(rbuffer))
def readFromHandle(self, bufflist, evt):
return _iocp.recv(self.getFileHandle(), bufflist, evt)
def writeToHandle(self, buff, evt):
"""
Send C{buff} to current file handle using C{_iocp.send}. The buffer
sent is limited to a size of C{self.SEND_LIMIT}.
"""
return _iocp.send(self.getFileHandle(),
buffer(buff, 0, self.SEND_LIMIT), evt)
def _closeWriteConnection(self):
try:
getattr(self.socket, self._socketShutdownMethod)(1)
except socket.error:
pass
p = interfaces.IHalfCloseableProtocol(self.protocol, None)
if p:
try:
p.writeConnectionLost()
except:
f = failure.Failure()
log.err()
self.connectionLost(f)
def readConnectionLost(self, reason):
p = interfaces.IHalfCloseableProtocol(self.protocol, None)
if p:
try:
p.readConnectionLost()
except:
log.err()
self.connectionLost(failure.Failure())
else:
self.connectionLost(reason)
def connectionLost(self, reason):
if self.disconnected:
return
abstract.FileHandle.connectionLost(self, reason)
isClean = (reason is None or
not reason.check(error.ConnectionAborted))
self._closeSocket(isClean)
protocol = self.protocol
del self.protocol
del self.socket
del self.getFileHandle
protocol.connectionLost(reason)
def logPrefix(self):
"""
Return the prefix to log with when I own the logging thread.
"""
return self.logstr
def getTcpNoDelay(self):
return operator.truth(self.socket.getsockopt(socket.IPPROTO_TCP,
socket.TCP_NODELAY))
def setTcpNoDelay(self, enabled):
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, enabled)
def getTcpKeepAlive(self):
return operator.truth(self.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_KEEPALIVE))
def setTcpKeepAlive(self, enabled):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, enabled)
if _startTLS is not None:
def startTLS(self, contextFactory, normal=True):
"""
@see: L{ITLSTransport.startTLS}
"""
_startTLS(self, contextFactory, normal, abstract.FileHandle)
def write(self, data):
"""
Write some data, either directly to the underlying handle or, if TLS
has been started, to the L{TLSMemoryBIOProtocol} for it to encrypt and
send.
@see: L{ITCPTransport.write}
"""
if self.disconnected:
return
if self.TLS:
self.protocol.write(data)
else:
abstract.FileHandle.write(self, data)
def writeSequence(self, iovec):
"""
Write some data, either directly to the underlying handle or, if TLS
has been started, to the L{TLSMemoryBIOProtocol} for it to encrypt and
send.
@see: L{ITCPTransport.writeSequence}
"""
if self.disconnected:
return
if self.TLS:
self.protocol.writeSequence(iovec)
else:
abstract.FileHandle.writeSequence(self, iovec)
def loseConnection(self, reason=None):
"""
Close the underlying handle or, if TLS has been started, first shut it
down.
@see: L{ITCPTransport.loseConnection}
"""
if self.TLS:
if self.connected and not self.disconnecting:
self.protocol.loseConnection()
else:
abstract.FileHandle.loseConnection(self, reason)
def registerProducer(self, producer, streaming):
"""
Register a producer.
If TLS is enabled, the TLS connection handles this.
"""
if self.TLS:
# Registering a producer before we're connected shouldn't be a
# problem. If we end up with a write(), that's already handled in
# the write() code above, and there are no other potential
# side-effects.
self.protocol.registerProducer(producer, streaming)
else:
abstract.FileHandle.registerProducer(self, producer, streaming)
def unregisterProducer(self):
"""
Unregister a producer.
If TLS is enabled, the TLS connection handles this.
"""
if self.TLS:
self.protocol.unregisterProducer()
else:
abstract.FileHandle.unregisterProducer(self)
if _startTLS is not None:
classImplements(Connection, interfaces.ITLSTransport)
class Client(_BaseBaseClient, _BaseTCPClient, Connection):
"""
@ivar _tlsClientDefault: Always C{True}, indicating that this is a client
connection, and by default when TLS is negotiated this class will act as
a TLS client.
"""
addressFamily = socket.AF_INET
socketType = socket.SOCK_STREAM
_tlsClientDefault = True
_commonConnection = Connection
def __init__(self, host, port, bindAddress, connector, reactor):
# ConnectEx documentation says socket _has_ to be bound
if bindAddress is None:
bindAddress = ('', 0)
self.reactor = reactor # createInternetSocket needs this
_BaseTCPClient.__init__(self, host, port, bindAddress, connector,
reactor)
def createInternetSocket(self):
"""
Create a socket registered with the IOCP reactor.
@see: L{_BaseTCPClient}
"""
return self.reactor.createSocket(self.addressFamily, self.socketType)
def _collectSocketDetails(self):
"""
Clean up potentially circular references to the socket and to its
C{getFileHandle} method.
@see: L{_BaseBaseClient}
"""
del self.socket, self.getFileHandle
def _stopReadingAndWriting(self):
"""
Remove the active handle from the reactor.
@see: L{_BaseBaseClient}
"""
self.reactor.removeActiveHandle(self)
def cbConnect(self, rc, bytes, evt):
if rc:
rc = connectExErrors.get(rc, rc)
self.failIfNotConnected(error.getConnectError((rc,
errno.errorcode.get(rc, 'Unknown error'))))
else:
self.socket.setsockopt(
socket.SOL_SOCKET, SO_UPDATE_CONNECT_CONTEXT,
struct.pack('P', self.socket.fileno()))
self.protocol = self.connector.buildProtocol(self.getPeer())
self.connected = True
logPrefix = self._getLogPrefix(self.protocol)
self.logstr = logPrefix + ",client"
self.protocol.makeConnection(self)
self.startReading()
def doConnect(self):
if not hasattr(self, "connector"):
# this happens if we connector.stopConnecting in
# factory.startedConnecting
return
assert _iocp.have_connectex
self.reactor.addActiveHandle(self)
evt = _iocp.Event(self.cbConnect, self)
rc = _iocp.connect(self.socket.fileno(), self.realAddress, evt)
if rc and rc != ERROR_IO_PENDING:
self.cbConnect(rc, 0, evt)
class Server(Connection):
"""
Serverside socket-stream connection class.
I am a serverside network connection transport; a socket which came from an
accept() on a server.
@ivar _tlsClientDefault: Always C{False}, indicating that this is a server
connection, and by default when TLS is negotiated this class will act as
a TLS server.
"""
_tlsClientDefault = False
def __init__(self, sock, protocol, clientAddr, serverAddr, sessionno, reactor):
"""
Server(sock, protocol, client, server, sessionno)
Initialize me with a socket, a protocol, a descriptor for my peer (a
tuple of host, port describing the other end of the connection), an
instance of Port, and a session number.
"""
Connection.__init__(self, sock, protocol, reactor)
self.serverAddr = serverAddr
self.clientAddr = clientAddr
self.sessionno = sessionno
logPrefix = self._getLogPrefix(self.protocol)
self.logstr = "%s,%s,%s" % (logPrefix, sessionno, self.clientAddr.host)
self.repstr = "<%s #%s on %s>" % (self.protocol.__class__.__name__,
self.sessionno, self.serverAddr.port)
self.connected = True
self.startReading()
def __repr__(self):
"""
A string representation of this connection.
"""
return self.repstr
def getHost(self):
"""
Returns an IPv4Address.
This indicates the server's address.
"""
return self.serverAddr
def getPeer(self):
"""
Returns an IPv4Address.
This indicates the client's address.
"""
return self.clientAddr
class Connector(TCPConnector):
def _makeTransport(self):
return Client(self.host, self.port, self.bindAddress, self,
self.reactor)
class Port(_SocketCloser, _LogOwner):
implements(interfaces.IListeningPort)
connected = False
disconnected = False
disconnecting = False
addressFamily = socket.AF_INET
socketType = socket.SOCK_STREAM
_addressType = address.IPv4Address
sessionno = 0
# Actual port number being listened on, only set to a non-None
# value when we are actually listening.
_realPortNumber = None
# A string describing the connections which will be created by this port.
# Normally this is C{"TCP"}, since this is a TCP port, but when the TLS
# implementation re-uses this class it overrides the value with C{"TLS"}.
# Only used for logging.
_type = 'TCP'
def __init__(self, port, factory, backlog=50, interface='', reactor=None):
self.port = port
self.factory = factory
self.backlog = backlog
self.interface = interface
self.reactor = reactor
if isIPv6Address(interface):
self.addressFamily = socket.AF_INET6
self._addressType = address.IPv6Address
def __repr__(self):
if self._realPortNumber is not None:
return "<%s of %s on %s>" % (self.__class__,
self.factory.__class__,
self._realPortNumber)
else:
return "<%s of %s (not listening)>" % (self.__class__,
self.factory.__class__)
def startListening(self):
try:
skt = self.reactor.createSocket(self.addressFamily,
self.socketType)
# TODO: resolve self.interface if necessary
if self.addressFamily == socket.AF_INET6:
addr = socket.getaddrinfo(self.interface, self.port)[0][4]
else:
addr = (self.interface, self.port)
skt.bind(addr)
except socket.error, le:
raise error.CannotListenError, (self.interface, self.port, le)
self.addrLen = _iocp.maxAddrLen(skt.fileno())
# Make sure that if we listened on port 0, we update that to
# reflect what the OS actually assigned us.
self._realPortNumber = skt.getsockname()[1]
log.msg("%s starting on %s" % (self._getLogPrefix(self.factory),
self._realPortNumber))
self.factory.doStart()
skt.listen(self.backlog)
self.connected = True
self.disconnected = False
self.reactor.addActiveHandle(self)
self.socket = skt
self.getFileHandle = self.socket.fileno
self.doAccept()
def loseConnection(self, connDone=failure.Failure(main.CONNECTION_DONE)):
"""
Stop accepting connections on this port.
This will shut down my socket and call self.connectionLost().
It returns a deferred which will fire successfully when the
port is actually closed.
"""
self.disconnecting = True
if self.connected:
self.deferred = defer.Deferred()
self.reactor.callLater(0, self.connectionLost, connDone)
return self.deferred
stopListening = loseConnection
def _logConnectionLostMsg(self):
"""
Log message for closing port
"""
log.msg('(%s Port %s Closed)' % (self._type, self._realPortNumber))
def connectionLost(self, reason):
"""
Cleans up the socket.
"""
self._logConnectionLostMsg()
self._realPortNumber = None
d = None
if hasattr(self, "deferred"):
d = self.deferred
del self.deferred
self.disconnected = True
self.reactor.removeActiveHandle(self)
self.connected = False
self._closeSocket(True)
del self.socket
del self.getFileHandle
try:
self.factory.doStop()
except:
self.disconnecting = False
if d is not None:
d.errback(failure.Failure())
else:
raise
else:
self.disconnecting = False
if d is not None:
d.callback(None)
def logPrefix(self):
"""
Returns the name of my class, to prefix log entries with.
"""
return reflect.qual(self.factory.__class__)
def getHost(self):
"""
Returns an IPv4Address.
This indicates the server's address.
"""
host, port = self.socket.getsockname()[:2]
return self._addressType('TCP', host, port)
def cbAccept(self, rc, bytes, evt):
self.handleAccept(rc, evt)
if not (self.disconnecting or self.disconnected):
self.doAccept()
def handleAccept(self, rc, evt):
if self.disconnecting or self.disconnected:
return False
# possible errors:
# (WSAEMFILE, WSAENOBUFS, WSAENFILE, WSAENOMEM, WSAECONNABORTED)
if rc:
log.msg("Could not accept new connection -- %s (%s)" %
(errno.errorcode.get(rc, 'unknown error'), rc))
return False
else:
evt.newskt.setsockopt(
socket.SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT,
struct.pack('P', self.socket.fileno()))
family, lAddr, rAddr = _iocp.get_accept_addrs(evt.newskt.fileno(),
evt.buff)
assert family == self.addressFamily
protocol = self.factory.buildProtocol(
self._addressType('TCP', rAddr[0], rAddr[1]))
if protocol is None:
evt.newskt.close()
else:
s = self.sessionno
self.sessionno = s+1
transport = Server(evt.newskt, protocol,
self._addressType('TCP', rAddr[0], rAddr[1]),
self._addressType('TCP', lAddr[0], lAddr[1]),
s, self.reactor)
protocol.makeConnection(transport)
return True
def doAccept(self):
evt = _iocp.Event(self.cbAccept, self)
# see AcceptEx documentation
evt.buff = buff = _iocp.AllocateReadBuffer(2 * (self.addrLen + 16))
evt.newskt = newskt = self.reactor.createSocket(self.addressFamily,
self.socketType)
rc = _iocp.accept(self.socket.fileno(), newskt.fileno(), buff, evt)
if rc and rc != ERROR_IO_PENDING:
self.handleAccept(rc, evt)
|
{
"content_hash": "1e2c44486092ddee45ce3cc347cd46b8",
"timestamp": "",
"source": "github",
"line_count": 575,
"max_line_length": 83,
"avg_line_length": 31.63478260869565,
"alnum_prop": 0.5960417811984607,
"repo_name": "normanmaurer/autobahntestsuite-maven-plugin",
"id": "d34f698d9a5d07038a96f6ddd10d26f16148361c",
"size": "18263",
"binary": false,
"copies": "18",
"ref": "refs/heads/master",
"path": "src/main/resources/twisted/internet/iocpreactor/tcp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "106"
},
{
"name": "C",
"bytes": "70690"
},
{
"name": "C++",
"bytes": "1291"
},
{
"name": "CSS",
"bytes": "6075"
},
{
"name": "GAP",
"bytes": "14120"
},
{
"name": "Gherkin",
"bytes": "2218"
},
{
"name": "HTML",
"bytes": "56655"
},
{
"name": "Java",
"bytes": "24931"
},
{
"name": "JavaScript",
"bytes": "9151"
},
{
"name": "Python",
"bytes": "13888733"
},
{
"name": "Shell",
"bytes": "1406"
},
{
"name": "Smarty",
"bytes": "393"
}
],
"symlink_target": ""
}
|
""" Ontology attributes
:Author: Jonathan Karr <karr@mssm.edu>
:Date: 2019-01-14
:Copyright: 2019, Karr Lab
:License: MIT
"""
from . import core
import pronto
import wc_utils.workbook.io
import wc_utils.util.ontology
class OntologyAttribute(core.LiteralAttribute):
""" Ontology attribute
Attributes:
ontology (:obj:`pronto.Ontology`): ontology
namespace (:obj:`str`): prefix in term ids
terms (:obj:`list` of :obj:`pronto.term.Term`): list of allowed terms. If :obj:`None`, all terms are allowed.
none (:obj:`bool`): if :obj:`False`, the attribute is invalid if its value is :obj:`None`
"""
def __init__(self, ontology, namespace=None, terms=None, none=True, default=None, default_cleaned_value=None, none_value=None,
verbose_name='', description='',
primary=False, unique=False, unique_case_insensitive=False):
"""
Args:
ontology (:obj:`pronto.Ontology`): ontology
namespace (:obj:`str`, optional): prefix in term ids
terms (:obj:`list` of :obj:`pronto.term.Term`, optional): list of allowed terms. If :obj:`None`, all terms are allowed.
none (:obj:`bool`, optional): if :obj:`False`, the attribute is invalid if its value is :obj:`None`
default (:obj:`pronto.term.Term`, optional): default value
default_cleaned_value (:obj:`pronto.term.Term`, optional): value to replace
:obj:`None` values with during cleaning
none_value (:obj:`object`, optional): none value
verbose_name (:obj:`str`, optional): verbose name
description (:obj:`str`, optional): description
primary (:obj:`bool`, optional): indicate if attribute is primary attribute
unique (:obj:`bool`, optional): indicate if attribute value must be unique
unique_case_insensitive (:obj:`bool`, optional): if true, conduct case-insensitive test of uniqueness
Raises:
:obj:`ValueError`: if :obj:`ontology` is not an instance of :obj:`pronto.Ontology`,
:obj:`ValueError`: if :obj:`default` not in :obj:`ontology`
:obj:`ValueError`: if :obj:`default_cleaned_value` not in :obj:`ontology`
"""
if not isinstance(ontology, pronto.Ontology):
raise ValueError('`ontology` must be an instance of `pronto.Ontology`')
if isinstance(terms, list):
for term in terms:
if not isinstance(term, pronto.term.Term) or term not in ontology:
raise ValueError('element {} of `terms` must be in `ontology`'.format(term))
if default is not None and \
(not isinstance(default, pronto.term.Term) or
default not in ontology or
(isinstance(terms, list) and default not in terms)):
raise ValueError(
'`default` must be `None` or in `terms`')
if default_cleaned_value is not None and \
(not isinstance(default_cleaned_value, pronto.term.Term) or
default_cleaned_value not in ontology or
(isinstance(terms, list) and default_cleaned_value not in terms)):
raise ValueError(
'`default_cleaned_value` must be `None` or in `terms`')
super(OntologyAttribute, self).__init__(default=default,
default_cleaned_value=default_cleaned_value, none_value=none_value,
verbose_name=verbose_name, description=description,
primary=primary, unique=unique, unique_case_insensitive=unique_case_insensitive)
self.ontology = ontology
self.namespace = namespace
self.terms = terms
self.none = none
def get_default(self):
""" Get default value for attribute
Returns:
:obj:`object`: initial value
"""
return self.default
def get_default_cleaned_value(self):
""" Get value to replace :obj:`None` values with during cleaning
Returns:
:obj:`object`: initial value
"""
return self.default_cleaned_value
def value_equal(self, val1, val2, tol=0.):
""" Determine if attribute values are equal
Args:
val1 (:obj:`pronto.Term`): first value
val2 (:obj:`pronto.Term`): second value
tol (:obj:`float`, optional): equality tolerance
Returns:
:obj:`bool`: True if attribute values are equal
"""
return wc_utils.util.ontology.are_terms_equivalent(val1, val2)
def clean(self, value):
""" Convert attribute value into the appropriate type
Args:
value (:obj:`object`): value of attribute to clean
Returns:
:obj:`pronto.term.Term` or :obj:`None`: cleaned value
:obj:`core.InvalidAttribute` or :obj:`None`: cleaning error
"""
error = None
if value is None or value == '':
value = self.get_default_cleaned_value()
elif isinstance(value, str):
value = value.partition('!')[0].strip()
if value and self.namespace:
value = self.namespace + ':' + value
str_value = value
value = self.ontology.get(value, str_value)
if isinstance(value, str):
error = 'Value "{}" is not in `ontology`'.format(value)
elif isinstance(value, pronto.term.Term):
if value not in self.ontology:
error = "Value '{}' must be in `ontology`".format(value)
if value and isinstance(self.terms, list) and value not in self.terms:
error = "Value '{}' must be in `terms`".format(value)
if error:
return (value, core.InvalidAttribute(self, [error]))
else:
return (value, None)
def validate(self, obj, value):
""" Determine if `value` is a valid value of the attribute
Args:
obj (:obj:`Model`): object being validated
value (:obj:`pronto.term.Term`): value of attribute to validate
Returns:
:obj:`core.InvalidAttribute` or :obj:`None`: :obj:`None` if attribute is valid, other return list of
errors as an instance of :obj:`core.InvalidAttribute`
"""
if value is None:
if not self.none:
return core.InvalidAttribute(self, ['Value cannot be `None`'])
else:
return None
if not isinstance(value, pronto.term.Term) or value not in self.ontology:
return core.InvalidAttribute(self, ["Value '{}' must be in `ontology`".format(value)])
if isinstance(self.terms, list) and value not in self.terms:
return core.InvalidAttribute(self, ["Value '{}' must be in `terms`".format(value)])
return None
def copy_value(self, value, objects_and_copies):
""" Copy value
Args:
value (:obj:`object`): value
objects_and_copies (:obj:`dict`): dictionary that maps objects to their copies
Returns:
:obj:`object`: copy of value
"""
return value
def serialize(self, value):
""" Serialize ontology instance
Args:
value (:obj:`pronto.term.Term`): Python representation
Returns:
:obj:`str`: simple Python representation
"""
if value:
if self.namespace:
if value.id.startswith(self.namespace + ':'):
return value.id[len(self.namespace) + 1:]
else:
raise ValueError('Id {} must begin with namespace'.format(value.id))
else:
return value.id
return ''
def to_builtin(self, value):
""" Encode a value of the attribute using a simple Python representation (dict, list, str, float, bool, None)
that is compatible with JSON and YAML
Args:
value (:obj:`pronto.term.Term`): value of the attribute
Returns:
:obj:`str`: simple Python representation of a value of the attribute
"""
if value:
return value.id
return None
def from_builtin(self, json):
""" Decode a simple Python representation (dict, list, str, float, bool, None) of a value of the attribute
that is compatible with JSON and YAML
Args:
json (:obj:`str`): simple Python representation of a value of the attribute
Returns:
:obj:`pronto.term.Term`: decoded value of the attribute
"""
if json:
return self.ontology[json]
else:
return None
def get_excel_validation(self, sheet_models=None):
""" Get Excel validation
Returns:
:obj:`wc_utils.workbook.io.FieldValidation`: validation
"""
validation = super(OntologyAttribute, self).get_excel_validation()
if self.terms is not None:
allowed_values = [self.serialize(term) for term in self.terms]
if len(','.join(allowed_values)) <= 255:
validation.type = wc_utils.workbook.io.FieldValidationType.list
validation.allowed_list_values = allowed_values
validation.ignore_blank = self.none
if self.none:
input_message = ['Enter a comma-separated list of {} ontology terms "{}" or blank.'.format(
self.namespace, '", "'.join(allowed_values))]
error_message = ['Value must be a comma-separated list of {} ontology terms "{}" or blank.'.format(
self.namespace, '", "'.join(allowed_values))]
else:
input_message = ['Enter a comma-separated list of {} ontology terms "{}".'.format(
self.namespace, '", "'.join(allowed_values))]
error_message = ['Value must be a comma-separated list of {} ontology terms "{}".'.format(
self.namespace, '", "'.join(allowed_values))]
else:
validation.ignore_blank = self.none
if self.none:
input_message = ['Enter a comma-separated list of {} ontology terms or blank.'.format(
self.namespace)]
error_message = ['Value must be a comma-separated list of {} ontology terms or blank.'.format(
self.namespace)]
else:
input_message = ['Enter a comma-separated list of {} ontology terms.'.format(
self.namespace)]
error_message = ['Value must be a comma-separated list of {} ontology terms.'.format(
self.namespace)]
if self.unique:
input_message.append('Value must be unique.')
error_message.append('Value must be unique.')
default = self.get_default_cleaned_value()
if default:
input_message.append('Default: "{}".'.format(self.serialize(default)))
if validation.input_message:
validation.input_message += '\n\n'
validation.input_message += '\n\n'.join(input_message)
if validation.error_message:
validation.error_message += '\n\n'
validation.error_message += '\n\n'.join(error_message)
return validation
|
{
"content_hash": "6d7ce84ec931480c7569013465fb27f4",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 131,
"avg_line_length": 40.31578947368421,
"alnum_prop": 0.5732811140121845,
"repo_name": "KarrLab/obj_model",
"id": "407293c7ebbbc20398a31fed243ccccd2c5c93f8",
"size": "11490",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "obj_model/ontology.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2360"
},
{
"name": "HTML",
"bytes": "44150"
},
{
"name": "JavaScript",
"bytes": "7739"
},
{
"name": "Python",
"bytes": "1487837"
}
],
"symlink_target": ""
}
|
'''
Routines for outputting latex.
David Duvenaud
Nov 2012
'''
from datetime import datetime
def clean(string):
'''Prepare a string for latex'''
return string.replace('_', '').strip('1234567890')
def table(filename, rownames, colnames, entries, label=None):
if label == None:
label = filename.strip('/')[-1]
rownames = [clean(s) for s in rownames]
colnames = [clean(s) for s in colnames]
with open(filename, 'w') as file:
# Print table header.
file.write( '%% --- Automatically generated by latex.py ---\n')
file.write( '%% Exported at %s\n' % str(datetime.now()))
file.write( '\\begin{table}[h!]\n')
file.write( '\\begin{center}\n')
file.write( '\\begin{tabular}{l |%s}\n' % (' l' * len(colnames)))
# Write first row.
file.write( ' %s ' % colnames[0] )
for c in range(1, len(colnames)):
file.write( ' & \\rotatebox{0}{ %s } ' % colnames[c] )
file.write( ' \\\\ \\hline\n' )
# Write first column and table entries.
for r in range(len(rownames)):
file.write( rownames[r])
for c in range(0, len(colnames) - 1):
file.write( ' & %s ' % entries[r][c] )
file.write( ' \\\\\n' )
file.write( '\\end{tabular}\n');
file.write( '\\end{center}\n');
file.write( '\\label{tbl:%s}\n' % label)
file.write( '\\end{table}\n');
|
{
"content_hash": "1e39993e0373383041ae1e9ca6325890",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 73,
"avg_line_length": 30.291666666666668,
"alnum_prop": 0.5316368638239339,
"repo_name": "jamesrobertlloyd/gp-structure-search",
"id": "e2b61ca29e914eda618d24ff9a03354c2e45c3d4",
"size": "1454",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "source/utils/latex.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1284"
},
{
"name": "C++",
"bytes": "42466"
},
{
"name": "CSS",
"bytes": "77"
},
{
"name": "Fortran",
"bytes": "141064"
},
{
"name": "HTML",
"bytes": "33004"
},
{
"name": "Makefile",
"bytes": "2229"
},
{
"name": "Mathematica",
"bytes": "16140"
},
{
"name": "Matlab",
"bytes": "416230"
},
{
"name": "OpenEdge ABL",
"bytes": "4146"
},
{
"name": "Python",
"bytes": "329264"
},
{
"name": "TeX",
"bytes": "731550"
}
],
"symlink_target": ""
}
|
"""Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
# TASK: Fit the pipeline on the training set
# TASK: Predict the outcome on the testing set in a variable named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
|
{
"content_hash": "02aab3248da3ecaab7608778c23e7d29",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 78,
"avg_line_length": 32.274193548387096,
"alnum_prop": 0.7521239380309845,
"repo_name": "xyguo/scikit-learn",
"id": "51649840d9049040e0af4cf2fe5ba779e37dcd0b",
"size": "2001",
"binary": false,
"copies": "14",
"ref": "refs/heads/master",
"path": "doc/tutorial/text_analytics/skeletons/exercise_01_language_train_model.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "394787"
},
{
"name": "C++",
"bytes": "140225"
},
{
"name": "Makefile",
"bytes": "1579"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "6545381"
},
{
"name": "Shell",
"bytes": "9256"
}
],
"symlink_target": ""
}
|
from marshmallow import (Schema, fields, post_dump)
from webargs import fields as fs
page_args = {
'page': fs.Int(missing=1, validate=lambda val: val > 0),
'per_page': fs.Int(missing=10, validate=lambda val: val > 0)
}
class BaseSchema(Schema):
SKIP_VALUES = {None, }
@post_dump
def remove_skip_values(self, data):
return {
key: value for key, value in data.items()
if value not in self.SKIP_VALUES
}
class AccountSchema(BaseSchema):
id = fields.Int(dump_only=True)
name = fields.Str(required=True)
email = fields.Email(required=True)
password = fields.Str(load_only=True, required=True)
created_at = fields.DateTime()
class Meta:
strict = True
class LoginSchema(BaseSchema):
email = fields.Email(required=True)
password = fields.Str(load_only=True, required=True)
class Meta:
strict = True
class LogoutSchema(BaseSchema):
refresh_token = fields.Str(required=True)
class Meta:
strict = True
account_schema = AccountSchema()
accounts_schema = AccountSchema(many=True)
login_schema = LoginSchema()
logout_schema = LogoutSchema()
|
{
"content_hash": "96caadff80ca512eb6c62990819bca92",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 64,
"avg_line_length": 22.96078431372549,
"alnum_prop": 0.6618274978650726,
"repo_name": "alexcc4/flask_restful_backend",
"id": "de2c914688fcccb393f73cfd2cc581d1e6238eb1",
"size": "1219",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/libs/schemas.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "25826"
},
{
"name": "Shell",
"bytes": "1228"
}
],
"symlink_target": ""
}
|
import types
import fixtures
from oslo.config import cfg
from neutron import manager
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
from neutron.tests import base
from neutron.tests.unit import dummy_plugin
LOG = logging.getLogger(__name__)
DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
class MultiServiceCorePlugin(object):
supported_extension_aliases = ['lbaas', 'dummy']
class CorePluginWithAgentNotifiers(object):
agent_notifiers = {'l3': 'l3_agent_notifier',
'dhcp': 'dhcp_agent_notifier'}
class NeutronManagerTestCase(base.BaseTestCase):
def setUp(self):
super(NeutronManagerTestCase, self).setUp()
self.config_parse()
self.setup_coreplugin()
self.useFixture(
fixtures.MonkeyPatch('neutron.manager.NeutronManager._instance'))
def test_service_plugin_is_loaded(self):
cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS)
cfg.CONF.set_override("service_plugins",
["neutron.tests.unit.dummy_plugin."
"DummyServicePlugin"])
mgr = manager.NeutronManager.get_instance()
plugin = mgr.get_service_plugins()[constants.DUMMY]
self.assertTrue(
isinstance(plugin,
(dummy_plugin.DummyServicePlugin, types.ClassType)),
"loaded plugin should be of type neutronDummyPlugin")
def test_service_plugin_by_name_is_loaded(self):
cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS)
cfg.CONF.set_override("service_plugins", ["dummy"])
mgr = manager.NeutronManager.get_instance()
plugin = mgr.get_service_plugins()[constants.DUMMY]
self.assertTrue(
isinstance(plugin,
(dummy_plugin.DummyServicePlugin, types.ClassType)),
"loaded plugin should be of type neutronDummyPlugin")
def test_multiple_plugins_specified_for_service_type(self):
cfg.CONF.set_override("service_plugins",
["neutron.tests.unit.dummy_plugin."
"DummyServicePlugin",
"neutron.tests.unit.dummy_plugin."
"DummyServicePlugin"])
cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS)
self.assertRaises(ValueError, manager.NeutronManager.get_instance)
def test_multiple_plugins_by_name_specified_for_service_type(self):
cfg.CONF.set_override("service_plugins", ["dummy", "dummy"])
cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS)
self.assertRaises(ValueError, manager.NeutronManager.get_instance)
def test_multiple_plugins_mixed_specified_for_service_type(self):
cfg.CONF.set_override("service_plugins",
["neutron.tests.unit.dummy_plugin."
"DummyServicePlugin", "dummy"])
cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS)
self.assertRaises(ValueError, manager.NeutronManager.get_instance)
def test_service_plugin_conflicts_with_core_plugin(self):
cfg.CONF.set_override("service_plugins",
["neutron.tests.unit.dummy_plugin."
"DummyServicePlugin"])
cfg.CONF.set_override("core_plugin",
"neutron.tests.unit.test_neutron_manager."
"MultiServiceCorePlugin")
self.assertRaises(ValueError, manager.NeutronManager.get_instance)
def test_core_plugin_supports_services(self):
cfg.CONF.set_override("core_plugin",
"neutron.tests.unit.test_neutron_manager."
"MultiServiceCorePlugin")
mgr = manager.NeutronManager.get_instance()
svc_plugins = mgr.get_service_plugins()
self.assertEqual(3, len(svc_plugins))
self.assertIn(constants.CORE, svc_plugins.keys())
self.assertIn(constants.LOADBALANCER, svc_plugins.keys())
self.assertIn(constants.DUMMY, svc_plugins.keys())
def test_post_plugin_validation(self):
cfg.CONF.import_opt('dhcp_agents_per_network',
'neutron.db.agentschedulers_db')
self.assertIsNone(manager.validate_post_plugin_load())
cfg.CONF.set_override('dhcp_agents_per_network', 2)
self.assertIsNone(manager.validate_post_plugin_load())
cfg.CONF.set_override('dhcp_agents_per_network', 0)
self.assertIsNotNone(manager.validate_post_plugin_load())
cfg.CONF.set_override('dhcp_agents_per_network', -1)
self.assertIsNotNone(manager.validate_post_plugin_load())
def test_pre_plugin_validation(self):
self.assertIsNotNone(manager.validate_pre_plugin_load())
cfg.CONF.set_override('core_plugin', 'dummy.plugin')
self.assertIsNone(manager.validate_pre_plugin_load())
def test_manager_gathers_agent_notifiers_from_service_plugins(self):
cfg.CONF.set_override("service_plugins",
["neutron.tests.unit.dummy_plugin."
"DummyServicePlugin"])
cfg.CONF.set_override("core_plugin",
"neutron.tests.unit.test_neutron_manager."
"CorePluginWithAgentNotifiers")
expected = {'l3': 'l3_agent_notifier',
'dhcp': 'dhcp_agent_notifier',
'dummy': 'dummy_agent_notifier'}
core_plugin = manager.NeutronManager.get_plugin()
self.assertEqual(expected, core_plugin.agent_notifiers)
|
{
"content_hash": "5e0d83d1cbcd54bd6bb6434afbd15394",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 77,
"avg_line_length": 44.07751937984496,
"alnum_prop": 0.6227576503693282,
"repo_name": "vbannai/neutron",
"id": "4a8eb0e6dcc342da7d810a41d8501db9858d3962",
"size": "6372",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/test_neutron_manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "21914"
},
{
"name": "JavaScript",
"bytes": "60527"
},
{
"name": "Python",
"bytes": "9344274"
},
{
"name": "Shell",
"bytes": "9202"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
}
|
"""
TEST FILE 3
"""
from zope.interface import implements, Interface, Attribute
from twisted.python.reflect import namedAny
from twisted.python import components
from twisted.internet import defer
from twisted.persisted import sob
from twisted.plugin import IPlugin
class IServiceMaker(Interface):
"""
An object which can be used to construct services in a flexible
way.
This interface should most often be implemented along with
L{twisted.plugin.IPlugin}, and will most often be used by the
'twistd' command.
"""
tapname = Attribute(
"A short string naming this Twisted plugin, for example 'web' or "
"'pencil'. This name will be used as the subcommand of 'twistd'.")
description = Attribute(
"A brief summary of the features provided by this "
"Twisted application plugin.")
options = Attribute(
"A C{twisted.python.usage.Options} subclass defining the"
"configuration options for this application.")
def makeService(options):
"""
Create and return an object providing
L{twisted.application.service.IService}.
@param options: A mapping (typically a C{dict} or
C{twisted.python.usage.Options} instance) of configuration
options to desired configuration values.
"""
class ServiceMaker(object):
"""
Utility class to simplify the definition of L{IServiceMaker} plugins.
"""
implements(IPlugin, IServiceMaker)
def __init__(self, name, module, description, tapname):
self.name = name
self.module = module
self.description = description
self.tapname = tapname
def options():
def get(self):
return namedAny(self.module).Options
return get,
options = property(*options())
def makeService():
def get(self):
return namedAny(self.module).makeService
return get,
makeService = property(*makeService())
class IService(Interface):
"""
A service.
Run start-up and shut-down code at the appropriate times.
@type name: C{string}
@ivar name: The name of the service (or None)
@type running: C{boolean}
@ivar running: Whether the service is running.
"""
def setName(name):
"""
Set the name of the service.
@type name: C{str}
@raise RuntimeError: Raised if the service already has a parent.
"""
def setServiceParent(parent):
"""
Set the parent of the service.
@type parent: L{IServiceCollection}
@raise RuntimeError: Raised if the service already has a parent
or if the service has a name and the parent already has a child
by that name.
"""
def disownServiceParent():
"""
Use this API to remove an L{IService} from an L{IServiceCollection}.
This method is used symmetrically with L{setServiceParent} in that it
sets the C{parent} attribute on the child.
@rtype: L{Deferred}
@return: a L{Deferred} which is triggered when the service has
finished shutting down. If shutting down is immediate,
a value can be returned (usually, C{None}).
"""
def startService():
"""
Start the service.
"""
def stopService():
"""
Stop the service.
@rtype: L{Deferred}
@return: a L{Deferred} which is triggered when the service has
finished shutting down. If shutting down is immediate, a
value can be returned (usually, C{None}).
"""
def privilegedStartService():
"""
Do preparation work for starting the service.
Here things which should be done before changing directory,
root or shedding privileges are done.
"""
class Service:
"""
Base class for services.
Most services should inherit from this class. It handles the
book-keeping reponsibilities of starting and stopping, as well
as not serializing this book-keeping information.
"""
implements(IService)
running = 0
name = None
parent = None
def __getstate__(self):
dict = self.__dict__.copy()
if dict.has_key("running"):
del dict['running']
return dict
def setName(self, name):
if self.parent is not None:
raise RuntimeError("cannot change name when parent exists")
self.name = name
def setServiceParent(self, parent):
if self.parent is not None:
self.disownServiceParent()
parent = IServiceCollection(parent, parent)
self.parent = parent
self.parent.addService(self)
def disownServiceParent(self):
d = self.parent.removeService(self)
self.parent = None
return d
def privilegedStartService(self):
pass
def startService(self):
self.running = 1
def stopService(self):
self.running = 0
class IServiceCollection(Interface):
"""
Collection of services.
Contain several services, and manage their start-up/shut-down.
Services can be accessed by name if they have a name, and it
is always possible to iterate over them.
"""
def getServiceNamed(name):
"""
Get the child service with a given name.
@type name: C{str}
@rtype: L{IService}
@raise KeyError: Raised if the service has no child with the
given name.
"""
def __iter__():
"""
Get an iterator over all child services.
"""
def addService(service):
"""
Add a child service.
@type service: L{IService}
@raise RuntimeError: Raised if the service has a child with
the given name.
"""
def removeService(service):
"""
Remove a child service.
Only implementations of L{IService.disownServiceParent} should
use this method.
@type service: L{IService}
@raise ValueError: Raised if the given service is not a child.
@rtype: L{Deferred}
@return: a L{Deferred} which is triggered when the service has
finished shutting down. If shutting down is immediate, a
value can be returned (usually, C{None}).
"""
class MultiService(Service):
"""
Straightforward Service Container.
Hold a collection of services, and manage them in a simplistic
way. No service will wait for another, but this object itself
will not finish shutting down until all of its child services
will finish.
"""
implements(IServiceCollection)
def __init__(self):
self.services = []
self.namedServices = {}
self.parent = None
def privilegedStartService(self):
Service.privilegedStartService(self)
for service in self:
service.privilegedStartService()
def startService(self):
Service.startService(self)
for service in self:
service.startService()
def stopService(self):
Service.stopService(self)
l = []
services = list(self)
services.reverse()
for service in services:
l.append(defer.maybeDeferred(service.stopService))
return defer.DeferredList(l)
def getServiceNamed(self, name):
return self.namedServices[name]
def __iter__(self):
return iter(self.services)
def addService(self, service):
if service.name is not None:
if self.namedServices.has_key(service.name):
raise RuntimeError("cannot have two services with same name"
" '%s'" % service.name)
self.namedServices[service.name] = service
self.services.append(service)
if self.running:
# It may be too late for that, but we will do our best
service.privilegedStartService()
service.startService()
def removeService(self, service):
if service.name:
del self.namedServices[service.name]
self.services.remove(service)
if self.running:
# Returning this so as not to lose information from the
# MultiService.stopService deferred.
return service.stopService()
else:
return None
class IProcess(Interface):
"""
Process running parameters.
Represents parameters for how processes should be run.
"""
processName = Attribute(
"""
A C{str} giving the name the process should have in ps (or C{None}
to leave the name alone).
""")
uid = Attribute(
"""
An C{int} giving the user id as which the process should run (or
C{None} to leave the UID alone).
""")
gid = Attribute(
"""
An C{int} giving the group id as which the process should run (or
C{None} to leave the GID alone).
""")
class Process:
"""
Process running parameters.
Sets up uid/gid in the constructor, and has a default
of C{None} as C{processName}.
"""
implements(IProcess)
processName = None
def __init__(self, uid=None, gid=None):
"""
Set uid and gid.
@param uid: The user ID as whom to execute the process. If
this is C{None}, no attempt will be made to change the UID.
@param gid: The group ID as whom to execute the process. If
this is C{None}, no attempt will be made to change the GID.
"""
self.uid = uid
self.gid = gid
def Application(name, uid=None, gid=None):
"""
Return a compound class.
Return an object supporting the L{IService}, L{IServiceCollection},
L{IProcess} and L{sob.IPersistable} interfaces, with the given
parameters. Always access the return value by explicit casting to
one of the interfaces.
"""
ret = components.Componentized()
for comp in (MultiService(), sob.Persistent(ret, name), Process(uid, gid)):
ret.addComponent(comp, ignoreClass=1)
IService(ret).setName(name)
return ret
def loadApplication(filename, kind, passphrase=None):
"""
Load Application from a given file.
The serialization format it was saved in should be given as
C{kind}, and is one of C{pickle}, C{source}, C{xml} or C{python}. If
C{passphrase} is given, the application was encrypted with the
given passphrase.
@type filename: C{str}
@type kind: C{str}
@type passphrase: C{str}
"""
if kind == 'python':
application = sob.loadValueFromFile(filename, 'application', passphrase)
else:
application = sob.load(filename, kind, passphrase)
return application
__all__ = ['IServiceMaker', 'IService', 'Service',
'IServiceCollection', 'MultiService',
'IProcess', 'Process', 'Application', 'loadApplication']
|
{
"content_hash": "50dd880f673b16113c58decc48e82138",
"timestamp": "",
"source": "github",
"line_count": 397,
"max_line_length": 80,
"avg_line_length": 27.740554156171285,
"alnum_prop": 0.6171796967220557,
"repo_name": "wkwan/Markus",
"id": "3e0da56541987d6df3116e8f4185fe446d5f4ae5",
"size": "11096",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "db/data/test-files-in-inner-dirs/3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "270"
},
{
"name": "C",
"bytes": "15240"
},
{
"name": "C++",
"bytes": "3715"
},
{
"name": "CSS",
"bytes": "107571"
},
{
"name": "HTML",
"bytes": "501824"
},
{
"name": "Java",
"bytes": "1656552"
},
{
"name": "JavaScript",
"bytes": "179001"
},
{
"name": "Makefile",
"bytes": "1233"
},
{
"name": "PHP",
"bytes": "1518"
},
{
"name": "Pascal",
"bytes": "1679"
},
{
"name": "Python",
"bytes": "152386"
},
{
"name": "Ruby",
"bytes": "1897398"
},
{
"name": "Shell",
"bytes": "8978"
}
],
"symlink_target": ""
}
|
import sys
import unittest
import appleseed as asr
import IECore
import Gaffer
import GafferScene
import GafferTest
import GafferAppleseed
from AppleseedTest import appleseedProjectSchemaPath
@unittest.skipIf( sys.platform == 'darwin', "Unknown segfault on Mac see #3234" )
class AppleseedCapsuleTest( GafferTest.TestCase ) :
def setUp( self ) :
GafferTest.TestCase.setUp( self )
self.__scriptFileName = self.temporaryDirectory() + "/test.gfr"
def testCapsules( self ) :
s = Gaffer.ScriptNode()
s["sphere"] = GafferScene.Sphere( "sphere" )
s["sphere1"] = GafferScene.Sphere( "sphere1" )
s["group"] = GafferScene.Group( "group" )
s["group"]["in"][0].setInput( s["sphere"]["out"] )
s["group"]["in"][1].setInput( s["sphere1"]["out"] )
s["path_filter"] = GafferScene.PathFilter( "path_filter" )
s["path_filter"]["paths"].setValue( IECore.StringVectorData( [ '*' ] ) )
s["encapsulate"] = GafferScene.Encapsulate( "encapsulate" )
s["encapsulate"]["in"].setInput( s["group"]["out"] )
s["encapsulate"]["filter"].setInput( s["path_filter"]["out"] )
s["duplicate"] = GafferScene.Duplicate( "duplicate" )
s["duplicate"]["in"].setInput( s["encapsulate"]["out"] )
s["duplicate"]["target"].setValue( 'group' )
s["duplicate"]["copies"].setValue( 2 )
s["render"] = GafferAppleseed.AppleseedRender()
s["render"]["in"].setInput( s["duplicate"]["out"] )
s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode )
projectFilename = self.temporaryDirectory() + "/test.appleseed"
s["render"]["fileName"].setValue( projectFilename )
s["render"]["task"].execute()
reader = asr.ProjectFileReader()
options = asr.ProjectFileReaderOptions.OmitReadingMeshFiles
project = reader.read( projectFilename, appleseedProjectSchemaPath(), options )
scene = project.get_scene()
mainAssembly = scene.assemblies().get_by_name( "assembly" )
# Check that we have 3 instances of 1 capsule.
self.assertEqual( len( mainAssembly.assemblies() ), 1)
self.assertEqual( len( mainAssembly.assembly_instances() ), 3 )
capsuleAssemblyName = mainAssembly.assemblies().keys()[0]
capsuleAssembly = mainAssembly.assemblies()[capsuleAssemblyName]
# Check that we have 2 instances of 1 sphere inside the capsule.
self.assertEqual( len( capsuleAssembly.assemblies() ), 1)
self.assertEqual( len( capsuleAssembly.assembly_instances() ), 2 )
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "0fc006cd05cd31bb77b06214a5c8a9d1",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 81,
"avg_line_length": 32.87837837837838,
"alnum_prop": 0.6962597616111796,
"repo_name": "appleseedhq/gaffer",
"id": "9e79f5fc416331b3a41fc2231edf63401626297b",
"size": "4231",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/GafferAppleseedTest/AppleseedCapsuleTest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "39910"
},
{
"name": "C++",
"bytes": "7337901"
},
{
"name": "CMake",
"bytes": "85201"
},
{
"name": "GLSL",
"bytes": "6236"
},
{
"name": "Python",
"bytes": "7531988"
},
{
"name": "Shell",
"bytes": "15031"
}
],
"symlink_target": ""
}
|
"""
Create service files
"""
def main():
"""
main
"""
from pymod.csf import CreateServiceFiles
CreateServiceFiles().run()
if __name__ == '__main__':
import sys
sys.exit(main())
|
{
"content_hash": "f4f65d8b29fc9799fcfcf21503d8e654",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 44,
"avg_line_length": 12.352941176470589,
"alnum_prop": 0.5523809523809524,
"repo_name": "MyRequiem/comfortablePlayingInGW",
"id": "72e832309624ee3fe0afbf9978703577cf555d6c",
"size": "972",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "create_service_files.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9523"
},
{
"name": "HTML",
"bytes": "1151814"
},
{
"name": "JavaScript",
"bytes": "1344903"
},
{
"name": "Python",
"bytes": "12132"
},
{
"name": "Shell",
"bytes": "2670"
}
],
"symlink_target": ""
}
|
from dimod.binary.binary_quadratic_model import BinaryQuadraticModel
from dimod.vartypes import Vartype
__all__ = ['anti_crossing_clique', 'anti_crossing_loops']
def anti_crossing_clique(num_variables: int) -> BinaryQuadraticModel:
"""Generate an anti-crossing problem with a single clique.
Let ``N = num_variables // 2``. This function returns a binary quadratic
model where half the variables, `[0, N)`, form a ferromagnetic clique, with
each variable, `v`, also ferromagnetically interacting with one variable,
`v+N`, of the remaining half of the variables, `[N, 2*N)`.
All of the variables in the clique except variable `1` have a linear bias
of `+1`, and all of the variables attached to the clique have a linear bias
of `-1`.
The ground state of this problem is therefore `+1` for all variables.
Args:
num_variables:
Number of variables used to generate the problem. Must be an even
number greater than or equal to 6.
Returns:
A binary quadratic model.
"""
if num_variables % 2 or num_variables < 6:
raise ValueError('num_variables must be an even number >= 6')
bqm = BinaryQuadraticModel(Vartype.SPIN)
hf = int(num_variables / 2)
for n in range(hf):
for m in range(n + 1, hf):
bqm.add_quadratic(n, m, -1)
bqm.add_quadratic(n, n + hf, -1)
bqm.add_linear(n, 1)
bqm.add_linear(n + hf, -1)
bqm.set_linear(1, 0)
return bqm
def anti_crossing_loops(num_variables: int) -> BinaryQuadraticModel:
"""Generate an anti-crossing problem with two loops.
The low-energy space of this model consists of a unique ground state of all
:math:`+1`\ s and a degenerate first excited state, centered at all
:math:`-1`\ s, with these two lowest states well separated in Hamming distance
and by an energy barrier. These features are sufficient to yield a small
anti-crossing when employed in a transverse-field annealing process. A closely
related approach is employed in [DJA]_.
Note that for small values of ``num_variables``, the loops can be as small
as a single edge.
Args:
num_variables:
Number of variables used to generate the problem. Must be an even
number greater than or equal to 8.
Returns:
A binary quadratic model.
.. [DJA] Dickson, N., Johnson, M., Amin, M. et al. Thermally assisted
quantum annealing of a 16-qubit problem. Nat Commun 4, 1903 (2013).
https://doi.org/10.1038/ncomms2920
"""
if num_variables % 2 or num_variables < 8:
raise ValueError('num_variables must be an even number >= 8')
bqm = BinaryQuadraticModel(Vartype.SPIN)
hf = int(num_variables / 4)
for n in range(hf):
if n % 2 == 1:
bqm.set_quadratic(n, n + hf, -1)
bqm.set_quadratic(n, (n + 1) % hf, -1)
bqm.set_quadratic(n + hf, (n + 1) % hf + hf, -1)
bqm.set_quadratic(n, n + 2 * hf, -1)
bqm.set_quadratic(n + hf, n + 3 * hf, -1)
bqm.add_linear(n, 1)
bqm.add_linear(n + hf, 1)
bqm.add_linear(n + 2 * hf, -1)
bqm.add_linear(n + 3 * hf, -1)
bqm.set_linear(0, 0)
bqm.set_linear(hf, 0)
return bqm
|
{
"content_hash": "7afcd3f9289ce89f4b0ddff642375059",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 82,
"avg_line_length": 32.205882352941174,
"alnum_prop": 0.6316590563165906,
"repo_name": "dwavesystems/dimod",
"id": "445be507ef08da47e82319393d9a3b0a73b626d2",
"size": "3896",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "dimod/generators/anti_crossing.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "239337"
},
{
"name": "Cython",
"bytes": "184652"
},
{
"name": "Makefile",
"bytes": "927"
},
{
"name": "Python",
"bytes": "1411314"
}
],
"symlink_target": ""
}
|
from google.cloud import datalabeling_v1beta1
def sample_search_evaluations():
# Create a client
client = datalabeling_v1beta1.DataLabelingServiceClient()
# Initialize request argument(s)
request = datalabeling_v1beta1.SearchEvaluationsRequest(
parent="parent_value",
)
# Make the request
page_result = client.search_evaluations(request=request)
# Handle the response
for response in page_result:
print(response)
# [END datalabeling_v1beta1_generated_DataLabelingService_SearchEvaluations_sync]
|
{
"content_hash": "e945dcc6782d754d3903db41617e575f",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 81,
"avg_line_length": 27.65,
"alnum_prop": 0.7359855334538878,
"repo_name": "googleapis/python-datalabeling",
"id": "35aaba2d84d6932b48b0f0d3c0258b94bf13712c",
"size": "1967",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/datalabeling_v1beta1_generated_data_labeling_service_search_evaluations_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1228072"
},
{
"name": "Shell",
"bytes": "30678"
}
],
"symlink_target": ""
}
|
"""
Some reimplementation of Henkelman's Transition State Analysis utilities,
which are originally in Perl. Additional features beyond those offered by
Henkelman's utilities will be added.
This allows the usage and customization in Python.
"""
import glob
import os
import numpy as np
from monty.json import MSONable, jsanitize
from scipy.interpolate import CubicSpline
from pymatgen.analysis.structure_matcher import StructureMatcher
from pymatgen.io.vasp import Outcar, Poscar
from pymatgen.util.plotting import pretty_plot
class NEBAnalysis(MSONable):
"""
An NEBAnalysis class.
"""
def __init__(self, r, energies, forces, structures, spline_options=None):
"""
Initializes an NEBAnalysis from the cumulative root mean squared distances
between structures, the energies, the forces, the structures and the
interpolation_order for the analysis.
Args:
r: Root mean square distances between structures
energies: Energies of each structure along reaction coordinate
forces: Tangent forces along the reaction coordinate.
structures ([Structure]): List of Structures along reaction
coordinate.
spline_options (dict): Options for cubic spline. For example,
{"saddle_point": "zero_slope"} forces the slope at the saddle to
be zero.
"""
self.r = np.array(r)
self.energies = np.array(energies)
self.forces = np.array(forces)
self.structures = structures
self.spline_options = spline_options if spline_options is not None else {}
# We do a piecewise interpolation between the points. Each spline (
# cubic by default) is constrained by the boundary conditions of the
# energies and the tangent force, i.e., the derivative of
# the energy at each pair of points.
self.setup_spline(spline_options=self.spline_options)
def setup_spline(self, spline_options=None):
"""
Setup of the options for the spline interpolation
Args:
spline_options (dict): Options for cubic spline. For example,
{"saddle_point": "zero_slope"} forces the slope at the saddle to
be zero.
"""
self.spline_options = spline_options
relative_energies = self.energies - self.energies[0]
if self.spline_options.get("saddle_point", "") == "zero_slope":
imax = np.argmax(relative_energies)
self.spline = CubicSpline(
x=self.r[: imax + 1],
y=relative_energies[: imax + 1],
bc_type=((1, 0.0), (1, 0.0)),
)
cspline2 = CubicSpline(
x=self.r[imax:],
y=relative_energies[imax:],
bc_type=((1, 0.0), (1, 0.0)),
)
self.spline.extend(c=cspline2.c, x=cspline2.x[1:])
else:
self.spline = CubicSpline(x=self.r, y=relative_energies, bc_type=((1, 0.0), (1, 0.0)))
@classmethod
def from_outcars(cls, outcars, structures, **kwargs):
"""
Initializes an NEBAnalysis from Outcar and Structure objects. Use
the static constructors, e.g., :class:`from_dir` instead if you
prefer to have these automatically generated from a directory of NEB
calculations.
Args:
outcars ([Outcar]): List of Outcar objects. Note that these have
to be ordered from start to end along reaction coordinates.
structures ([Structure]): List of Structures along reaction
coordinate. Must be same length as outcar.
interpolation_order (int): Order of polynomial to use to
interpolate between images. Same format as order parameter in
scipy.interplotate.PiecewisePolynomial.
"""
if len(outcars) != len(structures):
raise ValueError("# of Outcars must be same as # of Structures")
# Calculate cumulative root mean square distance between structures,
# which serves as the reaction coordinate. Note that these are
# calculated from the final relaxed structures as the coordinates may
# have changed from the initial interpolation.
r = [0]
prev = structures[0]
for st in structures[1:]:
dists = np.array([s2.distance(s1) for s1, s2 in zip(prev, st)])
r.append(np.sqrt(np.sum(dists ** 2)))
prev = st
r = np.cumsum(r)
energies = []
forces = []
for i, o in enumerate(outcars):
o.read_neb()
energies.append(o.data["energy"])
if i in [0, len(outcars) - 1]:
forces.append(0)
else:
forces.append(o.data["tangent_force"])
forces = np.array(forces)
r = np.array(r)
return cls(r=r, energies=energies, forces=forces, structures=structures, **kwargs)
def get_extrema(self, normalize_rxn_coordinate=True):
"""
Returns the positions of the extrema along the MEP. Both local
minimums and maximums are returned.
Args:
normalize_rxn_coordinate (bool): Whether to normalize the
reaction coordinate to between 0 and 1. Defaults to True.
Returns:
(min_extrema, max_extrema), where the extrema are given as
[(x1, y1), (x2, y2), ...].
"""
x = np.arange(0, np.max(self.r), 0.01)
y = self.spline(x) * 1000
scale = 1 if not normalize_rxn_coordinate else 1 / self.r[-1]
min_extrema = []
max_extrema = []
for i in range(1, len(x) - 1):
if y[i] < y[i - 1] and y[i] < y[i + 1]:
min_extrema.append((x[i] * scale, y[i]))
elif y[i] > y[i - 1] and y[i] > y[i + 1]:
max_extrema.append((x[i] * scale, y[i]))
return min_extrema, max_extrema
def get_plot(self, normalize_rxn_coordinate=True, label_barrier=True):
"""
Returns the NEB plot. Uses Henkelman's approach of spline fitting
each section of the reaction path based on tangent force and energies.
Args:
normalize_rxn_coordinate (bool): Whether to normalize the
reaction coordinate to between 0 and 1. Defaults to True.
label_barrier (bool): Whether to label the maximum barrier.
Returns:
matplotlib.pyplot object.
"""
plt = pretty_plot(12, 8)
scale = 1 if not normalize_rxn_coordinate else 1 / self.r[-1]
x = np.arange(0, np.max(self.r), 0.01)
y = self.spline(x) * 1000
relative_energies = self.energies - self.energies[0]
plt.plot(
self.r * scale,
relative_energies * 1000,
"ro",
x * scale,
y,
"k-",
linewidth=2,
markersize=10,
)
plt.xlabel("Reaction coordinate")
plt.ylabel("Energy (meV)")
plt.ylim((np.min(y) - 10, np.max(y) * 1.02 + 20))
if label_barrier:
data = zip(x * scale, y)
barrier = max(data, key=lambda d: d[1])
plt.plot([0, barrier[0]], [barrier[1], barrier[1]], "k--")
plt.annotate(
"%.0f meV" % (np.max(y) - np.min(y)),
xy=(barrier[0] / 2, barrier[1] * 1.02),
xytext=(barrier[0] / 2, barrier[1] * 1.02),
horizontalalignment="center",
)
plt.tight_layout()
return plt
@classmethod
def from_dir(cls, root_dir, relaxation_dirs=None, **kwargs):
"""
Initializes a NEBAnalysis object from a directory of a NEB run.
Note that OUTCARs must be present in all image directories. For the
terminal OUTCARs from relaxation calculations, you can specify the
locations using relaxation_dir. If these are not specified, the code
will attempt to look for the OUTCARs in 00 and 0n directories,
followed by subdirs "start", "end" or "initial", "final" in the
root_dir. These are just some typical conventions used
preferentially in Shyue Ping's MAVRL research group. For the
non-terminal points, the CONTCAR is read to obtain structures. For
terminal points, the POSCAR is used. The image directories are
assumed to be the only directories that can be resolved to integers.
E.g., "00", "01", "02", "03", "04", "05", "06". The minimum
sub-directory structure that can be parsed is of the following form (
a 5-image example is shown):
00:
- POSCAR
- OUTCAR
01, 02, 03, 04, 05:
- CONTCAR
- OUTCAR
06:
- POSCAR
- OUTCAR
Args:
root_dir (str): Path to the root directory of the NEB calculation.
relaxation_dirs (tuple): This specifies the starting and ending
relaxation directories from which the OUTCARs are read for the
terminal points for the energies.
Returns:
NEBAnalysis object.
"""
neb_dirs = []
for d in os.listdir(root_dir):
pth = os.path.join(root_dir, d)
if os.path.isdir(pth) and d.isdigit():
i = int(d)
neb_dirs.append((i, pth))
neb_dirs = sorted(neb_dirs, key=lambda d: d[0])
outcars = []
structures = []
# Setup the search sequence for the OUTCARs for the terminal
# directories.
terminal_dirs = []
if relaxation_dirs is not None:
terminal_dirs.append(relaxation_dirs)
terminal_dirs.append((neb_dirs[0][1], neb_dirs[-1][1]))
terminal_dirs.append([os.path.join(root_dir, d) for d in ["start", "end"]])
terminal_dirs.append([os.path.join(root_dir, d) for d in ["initial", "final"]])
for i, d in neb_dirs:
outcar = glob.glob(os.path.join(d, "OUTCAR*"))
contcar = glob.glob(os.path.join(d, "CONTCAR*"))
poscar = glob.glob(os.path.join(d, "POSCAR*"))
terminal = i in [0, neb_dirs[-1][0]]
if terminal:
for ds in terminal_dirs:
od = ds[0] if i == 0 else ds[1]
outcar = glob.glob(os.path.join(od, "OUTCAR*"))
if outcar:
outcar = sorted(outcar)
outcars.append(Outcar(outcar[-1]))
break
else:
raise ValueError("OUTCAR cannot be found for terminal point %s" % d)
structures.append(Poscar.from_file(poscar[0]).structure)
else:
outcars.append(Outcar(outcar[0]))
structures.append(Poscar.from_file(contcar[0]).structure)
return NEBAnalysis.from_outcars(outcars, structures, **kwargs)
def as_dict(self):
"""
Dict representation of NEBAnalysis.
Returns:
JSON serializable dict representation.
"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"r": jsanitize(self.r),
"energies": jsanitize(self.energies),
"forces": jsanitize(self.forces),
"structures": [s.as_dict() for s in self.structures],
}
def combine_neb_plots(neb_analyses, arranged_neb_analyses=False, reverse_plot=False):
"""
neb_analyses: a list of NEBAnalysis objects
arranged_neb_analyses: The code connects two end points with the
smallest-energy difference. If all end points have very close energies, it's
likely to result in an inaccurate connection. Manually arrange neb_analyses
if the combined plot is not as expected compared with all individual plots.
E.g., if there are two NEBAnalysis objects to combine, arrange in such a
way that the end-point energy of the first NEBAnalysis object is the
start-point energy of the second NEBAnalysis object.
Note that the barrier labeled in y-axis in the combined plot might be
different from that in the individual plot due to the reference energy used.
reverse_plot: reverse the plot or percolation direction.
return: a NEBAnalysis object
"""
x = StructureMatcher()
for neb_index, neb in enumerate(neb_analyses):
if neb_index == 0:
neb1 = neb
neb1_energies = list(neb1.energies)
neb1_structures = neb1.structures
neb1_forces = neb1.forces
neb1_r = neb1.r
continue
neb2 = neb
neb2_energies = list(neb2.energies)
matching = 0
for neb1_s in [neb1_structures[0], neb1_structures[-1]]:
if x.fit(neb1_s, neb2.structures[0]) or x.fit(neb1_s, neb2.structures[-1]):
matching += 1
break
if matching == 0:
raise ValueError("no matched structures for connection!")
neb1_start_e, neb1_end_e = neb1_energies[0], neb1_energies[-1]
neb2_start_e, neb2_end_e = neb2_energies[0], neb2_energies[-1]
min_e_diff = min(
[
abs(neb1_start_e - neb2_start_e),
abs(neb1_start_e - neb2_end_e),
abs(neb1_end_e - neb2_start_e),
abs(neb1_end_e - neb2_end_e),
]
)
if arranged_neb_analyses:
neb1_energies = (
neb1_energies[0 : len(neb1_energies) - 1]
+ [(neb1_energies[-1] + neb2_energies[0]) / 2]
+ neb2_energies[1:]
)
neb1_structures = neb1_structures + neb2.structures[1:]
neb1_forces = list(neb1_forces) + list(neb2.forces)[1:]
neb1_r = list(neb1_r) + [i + neb1_r[-1] for i in list(neb2.r)[1:]]
elif abs(neb1_start_e - neb2_start_e) == min_e_diff:
neb1_energies = list(reversed(neb1_energies[1:])) + neb2_energies
neb1_structures = list(reversed(neb1_structures[1:])) + neb2.structures
neb1_forces = list(reversed(list(neb1_forces)[1:])) + list(neb2.forces)
neb1_r = list(reversed([i * -1 - neb1_r[-1] * -1 for i in list(neb1_r)[1:]])) + [
i + neb1_r[-1] for i in list(neb2.r)
]
elif abs(neb1_start_e - neb2_end_e) == min_e_diff:
neb1_energies = neb2_energies + neb1_energies[1:]
neb1_structures = neb2.structures + neb1_structures[1:]
neb1_forces = list(neb2.forces) + list(neb1_forces)[1:]
neb1_r = list(neb2.r) + [i + list(neb2.r)[-1] for i in list(neb1_r)[1:]]
elif abs(neb1_end_e - neb2_start_e) == min_e_diff:
neb1_energies = neb1_energies + neb2_energies[1:]
neb1_structures = neb1_structures + neb2.structures[1:]
neb1_forces = list(neb1_forces) + list(neb2.forces)[1:]
neb1_r = list(neb1_r) + [i + neb1_r[-1] for i in list(neb2.r)[1:]]
else:
neb1_energies = neb1_energies + list(reversed(neb2_energies))[1:]
neb1_structures = neb1_structures + list(reversed(neb2.structures))[1:]
neb1_forces = list(neb1_forces) + list(reversed(list(neb2.forces)))[1:]
neb1_r = list(neb1_r) + list(
reversed([i * -1 - list(neb2.r)[-1] * -1 + list(neb1_r)[-1] for i in list(neb2.r)[:-1]])
)
if reverse_plot:
na = NEBAnalysis(
list(reversed([i * -1 - neb1_r[-1] * -1 for i in list(neb1_r)])),
list(reversed(neb1_energies)),
list(reversed(neb1_forces)),
list(reversed(neb1_structures)),
)
else:
na = NEBAnalysis(neb1_r, neb1_energies, neb1_forces, neb1_structures)
return na
|
{
"content_hash": "ba4d581cf334692f81dd50ac763c1440",
"timestamp": "",
"source": "github",
"line_count": 387,
"max_line_length": 104,
"avg_line_length": 41.049095607235145,
"alnum_prop": 0.5711947626841244,
"repo_name": "vorwerkc/pymatgen",
"id": "0e1dd91e16219b8452396d85534dd716e1adf8b7",
"size": "15980",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymatgen/analysis/transition_state.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "87"
},
{
"name": "CSS",
"bytes": "7572"
},
{
"name": "Cython",
"bytes": "38792"
},
{
"name": "HTML",
"bytes": "12642493"
},
{
"name": "Python",
"bytes": "8941675"
},
{
"name": "Roff",
"bytes": "1407429"
},
{
"name": "Shell",
"bytes": "12010"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
# Import Salt Testing libs
from salttesting import TestCase, expectedFailure
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../')
class SimpleTest(TestCase):
def test_success(self):
assert True
@expectedFailure
def test_fail(self):
assert False
if __name__ == '__main__':
from integration import run_tests
run_tests(SimpleTest, needs_daemon=False)
|
{
"content_hash": "ef12c74a610172f7a97b9728550c8f77",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 49,
"avg_line_length": 22.55,
"alnum_prop": 0.70509977827051,
"repo_name": "stephane-martin/salt-debian-packaging",
"id": "fd7c43051a3d60e309a8412e1fcd19e4ea0fd13a",
"size": "497",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "salt-2016.3.2/tests/unit/simple_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13798"
},
{
"name": "C",
"bytes": "986"
},
{
"name": "Groff",
"bytes": "13634346"
},
{
"name": "HTML",
"bytes": "39558"
},
{
"name": "Makefile",
"bytes": "20902"
},
{
"name": "NSIS",
"bytes": "22316"
},
{
"name": "PowerShell",
"bytes": "38719"
},
{
"name": "Python",
"bytes": "40857506"
},
{
"name": "SaltStack",
"bytes": "58278"
},
{
"name": "Scheme",
"bytes": "1790"
},
{
"name": "Shell",
"bytes": "829927"
},
{
"name": "Tcl",
"bytes": "6532"
},
{
"name": "TeX",
"bytes": "11632"
}
],
"symlink_target": ""
}
|
import rpm
import os
import sys
import utils
from AbstractRPM.Package import PackageFactory
from AbstractRPM.SourceManager import SourceManager
from AbstractRPM.Configurations import ConfigurationManager
rpmtsCallback_fd = None
def install(packageNames, sourcePaths, readConfsPaths, refConfPath, writeConfPath, forced = False):
SourceManager.loadPaths(sourcePaths)
packagesToInstall = []
install = True
for packageName in packageNames:
possiblePackages = SourceManager.findPackages(packageName)
if len(possiblePackages) < 1:
try:
fd = os.open(packageName, os.O_RDONLY)
os.close(fd)
packagesToInstall.extend(PackageFactory.createPackageFromPaths([packageName]))
except:
install = False
print('Unable to find matching package for name %s' % packageName)
sys.exit(6)
elif len(possiblePackages) == 1:
package = possiblePackages[0]
packagesToInstall.append(package)
print('Found unique package matching %s : %s' % (packageName, package.path))
else:
print('Several packages match the name %s :' % packageName)
i = 0
for possiblePackage in possiblePackages:
print('[%s]%s' % (i, possiblePackage.path))
i+=1
while True:
choice = 0
var = raw_input('which one do you want to install ? ')
try:
choice = int(var)
except:
print "Invalid input"
continue
if choice > -1 and choice < len(possiblePackages):
package = possiblePackages[choice]
packagesToInstall.append(package)
print('%s selected' % package.path)
break
else:
print("Input out of range")
# if error on given package(s) has occurred, do nothing
if install == False:
return
headers = [package.header for package in packagesToInstall]
ConfigurationManager.loadConfigurations(readConfsPaths, refConfPath, writeConfPath)
dependencies = ConfigurationManager.verifyForInstall(headers)
if len(dependencies) > 0:
print "Dependencies cannot be resolved :"
for r in dependencies:
print("%s" % (utils.printHeaders(r), ))
if forced:
print "Forcing installation..."
else:
return
ConfigurationManager.install([package.path for package in packagesToInstall])
print "Installed packages : ", [package.path for package in packagesToInstall]
|
{
"content_hash": "fa1d623bb89cb9bdb0f9265027d74eb9",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 99,
"avg_line_length": 37.391891891891895,
"alnum_prop": 0.5908926635345139,
"repo_name": "adriengentil/ConfMgr",
"id": "d4ddb73581c3879c517a42f15303e7a4bf2d518f",
"size": "2908",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/install.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "18658"
}
],
"symlink_target": ""
}
|
'''
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
Test.Summary = '''
Test tls
'''
def Build(Test, filename, host):
tr = Test.AddTestRun("Build", "Build test file: {0}".format(filename))
tr.Command = 'gcc -o ssl-post -O2 -g {0} -lssl -lpthread -lcrypto'.format(filename)
tr.ReturnCode = 0
tr = Test.addTestRun("Run-Test")
tr.Command = './ssl-post {0} 40 378'.format(host)
# ExtendTest(Build)
# need Curl
Test.SkipUnless(
Condition.HasProgram("curl", "Curl need to be installed on system for this test to work")
)
Test.ContinueOnFail = True
# Define default ATS
ts = Test.MakeATSProcess("ts", select_ports=False)
server = Test.MakeOriginServer("server")
tr = Test.AddTestRun("Build-Test", "build test file: ssl-post.c")
tr.Command = 'gcc -o ssl-post -O2 -g {0}/ssl-post.c -lssl -lpthread -lcrypto'.format(Test.RunDirectory)
tr.ReturnCode = 0
tr.Setup.CopyAs('ssl-post.c', Test.RunDirectory)
requestLocation = "test2"
reHost = "www.example.com"
testName = ""
header_count = 378
header_string = "POST /post HTTP/1.1\r\nHost: www.example.com\r\nContent-Length:1000\r\n"
for i in range(0, 378):
header_string = "{1}header{0}:{0}\r\n".format(i, header_string)
header_string = "{0}\r\n".format(header_string)
post_body = ""
for i in range(0, 1000):
post_body = "{0}0".format(post_body)
# Add info the origin server responses
server.addResponse("sessionlog.json",
{"headers": header_string, "timestamp": "1469733493.993", "body": post_body},
{"headers": "HTTP/1.1 200 OK\r\nServer: microserver\r\nConnection: close\r\nCache-Control: max-age=3600\r\nContent-Length: 2\r\n\r\n", "timestamp": "1469733493.993", "body": "ok"})
# add ssl materials like key, certificates for the server
ts.addSSLfile("ssl/server.pem")
ts.addSSLfile("ssl/server.key")
ts.Variables.ssl_port = 4443
ts.Disk.remap_config.AddLine(
'map / http://127.0.0.1:{0}'.format(server.Variables.Port)
)
ts.Disk.ssl_multicert_config.AddLine(
'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key'
)
ts.Disk.records_config.update({
'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir),
# enable ssl port
'proxy.config.http.server_ports': '{0} {1}:proto=http2;http:ssl'.format(ts.Variables.port, ts.Variables.ssl_port),
'proxy.config.ssl.client.verify.server': 0,
'proxy.config.ssl.server.cipher_suite': 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384:AES128-GCM-SHA256:AES256-GCM-SHA384:ECDHE-RSA-RC4-SHA:ECDHE-RSA-AES128-SHA:ECDHE-RSA-AES256-SHA:RC4-SHA:RC4-MD5:AES128-SHA:AES256-SHA:DES-CBC3-SHA!SRP:!DSS:!PSK:!aNULL:!eNULL:!SSLv2',
})
tr = Test.AddTestRun("Run-Test")
tr.Command = './ssl-post 127.0.0.1 40 378 4443'
tr.ReturnCode = 0
# time delay as proxy.config.http.wait_for_cache could be broken
tr.Processes.Default.StartBefore(server)
tr.Processes.Default.StartBefore(Test.Processes.ts, ready=When.PortOpen(ts.Variables.ssl_port))
tr.Processes.Default.Streams.stdout = "gold/ssl-post.gold"
tr.StillRunningAfter = server
tr.Processes.Default.TimeOut = 5
tr.TimeOut = 5
|
{
"content_hash": "c4748312d831589373278d6ff8dbd800",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 332,
"avg_line_length": 38.96078431372549,
"alnum_prop": 0.7141419224962254,
"repo_name": "rahmalik/trafficserver",
"id": "679209ca840ae09ac22f1a1557cfc51471d33cd4",
"size": "3974",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/gold_tests/tls/tls.test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13053"
},
{
"name": "C",
"bytes": "3370423"
},
{
"name": "C++",
"bytes": "11286986"
},
{
"name": "CSS",
"bytes": "8089"
},
{
"name": "HTML",
"bytes": "238770"
},
{
"name": "Java",
"bytes": "9881"
},
{
"name": "JavaScript",
"bytes": "1609"
},
{
"name": "Lex",
"bytes": "4029"
},
{
"name": "Lua",
"bytes": "380105"
},
{
"name": "M4",
"bytes": "270404"
},
{
"name": "Makefile",
"bytes": "195829"
},
{
"name": "Objective-C",
"bytes": "13254"
},
{
"name": "Perl",
"bytes": "67408"
},
{
"name": "Perl 6",
"bytes": "1163"
},
{
"name": "Protocol Buffer",
"bytes": "4013"
},
{
"name": "Python",
"bytes": "326130"
},
{
"name": "Roff",
"bytes": "2339"
},
{
"name": "Shell",
"bytes": "86963"
},
{
"name": "Vim script",
"bytes": "192"
},
{
"name": "Yacc",
"bytes": "3251"
}
],
"symlink_target": ""
}
|
"""Sphinx ReadTheDocs theme.
From https://github.com/ryan-roemer/sphinx-bootstrap-theme.
"""
import os
VERSION = (0, 1, 5)
__version__ = ".".join(str(v) for v in VERSION)
__version_full__ = __version__
def get_html_theme_path():
"""Return list of HTML theme paths."""
cur_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__ ) ))
return cur_dir
|
{
"content_hash": "1b17803c8b74b9b60f00b21eea21ca7d",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 75,
"avg_line_length": 22.058823529411764,
"alnum_prop": 0.64,
"repo_name": "ajose1024/Code_Igniter_Extended",
"id": "47c5e26cc540f0274a6138a9dbf8958af4470dcb",
"size": "375",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "user_guide_src/source/_themes/sphinx_rtd_theme/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "841"
},
{
"name": "CSS",
"bytes": "2486"
},
{
"name": "HTML",
"bytes": "27410"
},
{
"name": "JavaScript",
"bytes": "6476"
},
{
"name": "Makefile",
"bytes": "4616"
},
{
"name": "PHP",
"bytes": "2176291"
},
{
"name": "Python",
"bytes": "11603"
}
],
"symlink_target": ""
}
|
from slack_sdk.errors import BotUserAccessError # noqa
from slack_sdk.errors import SlackApiError # noqa
from slack_sdk.errors import SlackClientError # noqa
from slack_sdk.errors import SlackClientNotConnectedError # noqa
from slack_sdk.errors import SlackObjectFormationError # noqa
from slack_sdk.errors import SlackRequestError # noqa
from slack import deprecation
deprecation.show_message(__name__, "slack_sdk.errors")
|
{
"content_hash": "9cc2d21da83bba7b414bcd31341c7963",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 65,
"avg_line_length": 43.2,
"alnum_prop": 0.8125,
"repo_name": "slackapi/python-slackclient",
"id": "7d5f130903ad6b18b088b552c09e86e76f75dc3f",
"size": "432",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "slack/errors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7756"
},
{
"name": "HTML",
"bytes": "5961"
},
{
"name": "Makefile",
"bytes": "7656"
},
{
"name": "Python",
"bytes": "360940"
},
{
"name": "Shell",
"bytes": "110"
}
],
"symlink_target": ""
}
|
"""optparse - a powerful, extensible, and easy-to-use option parser.
By Greg Ward <gward@python.net>
Originally distributed as Optik; see http://optik.sourceforge.net/ .
If you have problems with this module, please do not file bugs,
patches, or feature requests with Python; instead, use Optik's
SourceForge project page:
http://sourceforge.net/projects/optik
For support, use the optik-users@lists.sourceforge.net mailing list
(http://lists.sourceforge.net/lists/listinfo/optik-users).
"""
# Python developers: please do not make changes to this file, since
# it is automatically generated from the Optik source code.
__version__ = "1.4.1+"
__all__ = ['Option',
'SUPPRESS_HELP',
'SUPPRESS_USAGE',
'Values',
'OptionContainer',
'OptionGroup',
'OptionParser',
'HelpFormatter',
'IndentedHelpFormatter',
'TitledHelpFormatter',
'OptParseError',
'OptionError',
'OptionConflictError',
'OptionValueError',
'BadOptionError']
__copyright__ = """
Copyright (c) 2001-2003 Gregory P. Ward. All rights reserved.
Copyright (c) 2002-2003 Python Software Foundation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import sys, os
import types
import textwrap
# This file was generated from:
# Id: option_parser.py,v 1.57 2003/08/27 02:35:41 goodger Exp
# Id: option.py,v 1.26 2003/05/08 01:20:36 gward Exp
# Id: help.py,v 1.6 2003/08/27 02:35:41 goodger Exp
# Id: errors.py,v 1.7 2003/04/21 01:53:28 gward Exp
class OptParseError (Exception):
def __init__ (self, msg):
self.msg = msg
def __str__ (self):
return self.msg
class OptionError (OptParseError):
"""
Raised if an Option instance is created with invalid or
inconsistent arguments.
"""
def __init__ (self, msg, option):
self.msg = msg
self.option_id = str(option)
def __str__ (self):
if self.option_id:
return "option %s: %s" % (self.option_id, self.msg)
else:
return self.msg
class OptionConflictError (OptionError):
"""
Raised if conflicting options are added to an OptionParser.
"""
class OptionValueError (OptParseError):
"""
Raised if an invalid option value is encountered on the command
line.
"""
class BadOptionError (OptParseError):
"""
Raised if an invalid or ambiguous option is seen on the command-line.
"""
class HelpFormatter:
"""
Abstract base class for formatting option help. OptionParser
instances should use one of the HelpFormatter subclasses for
formatting help; by default IndentedHelpFormatter is used.
Instance attributes:
indent_increment : int
the number of columns to indent per nesting level
max_help_position : int
the maximum starting column for option help text
help_position : int
the calculated starting column for option help text;
initially the same as the maximum
width : int
total number of columns for output
level : int
current indentation level
current_indent : int
current indentation level (in columns)
help_width : int
number of columns available for option help text (calculated)
"""
def __init__ (self,
indent_increment,
max_help_position,
width,
short_first):
self.indent_increment = indent_increment
self.help_position = self.max_help_position = max_help_position
self.width = width
self.current_indent = 0
self.level = 0
self.help_width = width - max_help_position
self.short_first = short_first
def indent (self):
self.current_indent += self.indent_increment
self.level += 1
def dedent (self):
self.current_indent -= self.indent_increment
assert self.current_indent >= 0, "Indent decreased below 0."
self.level -= 1
def format_usage (self, usage):
raise NotImplementedError, "subclasses must implement"
def format_heading (self, heading):
raise NotImplementedError, "subclasses must implement"
def format_description (self, description):
desc_width = self.width - self.current_indent
indent = " "*self.current_indent
return textwrap.fill(description, desc_width,
initial_indent=indent,
subsequent_indent=indent) + "\n"
def format_option (self, option):
# The help for each option consists of two parts:
# * the opt strings and metavars
# eg. ("-x", or "-fFILENAME, --file=FILENAME")
# * the user-supplied help string
# eg. ("turn on expert mode", "read data from FILENAME")
#
# If possible, we write both of these on the same line:
# -x turn on expert mode
#
# But if the opt string list is too long, we put the help
# string on a second line, indented to the same column it would
# start in if it fit on the first line.
# -fFILENAME, --file=FILENAME
# read data from FILENAME
result = []
opts = option.option_strings
opt_width = self.help_position - self.current_indent - 2
if len(opts) > opt_width:
opts = "%*s%s\n" % (self.current_indent, "", opts)
indent_first = self.help_position
else: # start help on same line as opts
opts = "%*s%-*s " % (self.current_indent, "", opt_width, opts)
indent_first = 0
result.append(opts)
if option.help:
help_lines = textwrap.wrap(option.help, self.help_width)
result.append("%*s%s\n" % (indent_first, "", help_lines[0]))
result.extend(["%*s%s\n" % (self.help_position, "", line)
for line in help_lines[1:]])
elif opts[-1] != "\n":
result.append("\n")
return "".join(result)
def store_option_strings (self, parser):
self.indent()
max_len = 0
for opt in parser.option_list:
strings = self.format_option_strings(opt)
opt.option_strings = strings
max_len = max(max_len, len(strings) + self.current_indent)
self.indent()
for group in parser.option_groups:
for opt in group.option_list:
strings = self.format_option_strings(opt)
opt.option_strings = strings
max_len = max(max_len, len(strings) + self.current_indent)
self.dedent()
self.dedent()
self.help_position = min(max_len + 2, self.max_help_position)
def format_option_strings (self, option):
"""Return a comma-separated list of option strings & metavariables."""
if option.takes_value():
metavar = option.metavar or option.dest.upper()
short_opts = [sopt + metavar for sopt in option._short_opts]
long_opts = [lopt + "=" + metavar for lopt in option._long_opts]
else:
short_opts = option._short_opts
long_opts = option._long_opts
if self.short_first:
opts = short_opts + long_opts
else:
opts = long_opts + short_opts
return ", ".join(opts)
class IndentedHelpFormatter (HelpFormatter):
"""Format help with indented section bodies.
"""
def __init__ (self,
indent_increment=2,
max_help_position=24,
width=78,
short_first=1):
HelpFormatter.__init__(
self, indent_increment, max_help_position, width, short_first)
def format_usage (self, usage):
return "usage: %s\n" % usage
def format_heading (self, heading):
return "%*s%s:\n" % (self.current_indent, "", heading)
class TitledHelpFormatter (HelpFormatter):
"""Format help with underlined section headers.
"""
def __init__ (self,
indent_increment=0,
max_help_position=24,
width=78,
short_first=0):
HelpFormatter.__init__ (
self, indent_increment, max_help_position, width, short_first)
def format_usage (self, usage):
return "%s %s\n" % (self.format_heading("Usage"), usage)
def format_heading (self, heading):
return "%s\n%s\n" % (heading, "=-"[self.level] * len(heading))
# Do the right thing with boolean values for all known Python versions.
try:
True, False
except NameError:
(True, False) = (1, 0)
_builtin_cvt = { "int" : (int, "integer"),
"long" : (long, "long integer"),
"float" : (float, "floating-point"),
"complex" : (complex, "complex") }
def check_builtin (option, opt, value):
(cvt, what) = _builtin_cvt[option.type]
try:
return cvt(value)
except ValueError:
raise OptionValueError(
#"%s: invalid %s argument %r" % (opt, what, value))
"option %s: invalid %s value: %r" % (opt, what, value))
def check_choice(option, opt, value):
if value in option.choices:
return value
else:
choices = ", ".join(map(repr, option.choices))
raise OptionValueError(
"option %s: invalid choice: %r (choose from %s)"
% (opt, value, choices))
# Not supplying a default is different from a default of None,
# so we need an explicit "not supplied" value.
NO_DEFAULT = "NO"+"DEFAULT"
class Option:
"""
Instance attributes:
_short_opts : [string]
_long_opts : [string]
action : string
type : string
dest : string
default : any
nargs : int
const : any
choices : [string]
callback : function
callback_args : (any*)
callback_kwargs : { string : any }
help : string
metavar : string
"""
# The list of instance attributes that may be set through
# keyword args to the constructor.
ATTRS = ['action',
'type',
'dest',
'default',
'nargs',
'const',
'choices',
'callback',
'callback_args',
'callback_kwargs',
'help',
'metavar']
# The set of actions allowed by option parsers. Explicitly listed
# here so the constructor can validate its arguments.
ACTIONS = ("store",
"store_const",
"store_true",
"store_false",
"append",
"count",
"callback",
"help",
"version")
# The set of actions that involve storing a value somewhere;
# also listed just for constructor argument validation. (If
# the action is one of these, there must be a destination.)
STORE_ACTIONS = ("store",
"store_const",
"store_true",
"store_false",
"append",
"count")
# The set of actions for which it makes sense to supply a value
# type, ie. where we expect an argument to this option.
TYPED_ACTIONS = ("store",
"append",
"callback")
# The set of known types for option parsers. Again, listed here for
# constructor argument validation.
TYPES = ("string", "int", "long", "float", "complex", "choice")
# Dictionary of argument checking functions, which convert and
# validate option arguments according to the option type.
#
# Signature of checking functions is:
# check(option : Option, opt : string, value : string) -> any
# where
# option is the Option instance calling the checker
# opt is the actual option seen on the command-line
# (eg. "-a", "--file")
# value is the option argument seen on the command-line
#
# The return value should be in the appropriate Python type
# for option.type -- eg. an integer if option.type == "int".
#
# If no checker is defined for a type, arguments will be
# unchecked and remain strings.
TYPE_CHECKER = { "int" : check_builtin,
"long" : check_builtin,
"float" : check_builtin,
"complex" : check_builtin,
"choice" : check_choice,
}
# CHECK_METHODS is a list of unbound method objects; they are called
# by the constructor, in order, after all attributes are
# initialized. The list is created and filled in later, after all
# the methods are actually defined. (I just put it here because I
# like to define and document all class attributes in the same
# place.) Subclasses that add another _check_*() method should
# define their own CHECK_METHODS list that adds their check method
# to those from this class.
CHECK_METHODS = None
# -- Constructor/initialization methods ----------------------------
def __init__ (self, *opts, **attrs):
# Set _short_opts, _long_opts attrs from 'opts' tuple.
# Have to be set now, in case no option strings are supplied.
self._short_opts = []
self._long_opts = []
opts = self._check_opt_strings(opts)
self._set_opt_strings(opts)
# Set all other attrs (action, type, etc.) from 'attrs' dict
self._set_attrs(attrs)
# Check all the attributes we just set. There are lots of
# complicated interdependencies, but luckily they can be farmed
# out to the _check_*() methods listed in CHECK_METHODS -- which
# could be handy for subclasses! The one thing these all share
# is that they raise OptionError if they discover a problem.
for checker in self.CHECK_METHODS:
checker(self)
def _check_opt_strings (self, opts):
# Filter out None because early versions of Optik had exactly
# one short option and one long option, either of which
# could be None.
opts = filter(None, opts)
if not opts:
raise TypeError("at least one option string must be supplied")
return opts
def _set_opt_strings (self, opts):
for opt in opts:
if len(opt) < 2:
raise OptionError(
"invalid option string %r: "
"must be at least two characters long" % opt, self)
elif len(opt) == 2:
if not (opt[0] == "-" and opt[1] != "-"):
raise OptionError(
"invalid short option string %r: "
"must be of the form -x, (x any non-dash char)" % opt,
self)
self._short_opts.append(opt)
else:
if not (opt[0:2] == "--" and opt[2] != "-"):
raise OptionError(
"invalid long option string %r: "
"must start with --, followed by non-dash" % opt,
self)
self._long_opts.append(opt)
def _set_attrs (self, attrs):
for attr in self.ATTRS:
if attrs.has_key(attr):
setattr(self, attr, attrs[attr])
del attrs[attr]
else:
if attr == 'default':
setattr(self, attr, NO_DEFAULT)
else:
setattr(self, attr, None)
if attrs:
raise OptionError(
"invalid keyword arguments: %s" % ", ".join(attrs.keys()),
self)
# -- Constructor validation methods --------------------------------
def _check_action (self):
if self.action is None:
self.action = "store"
elif self.action not in self.ACTIONS:
raise OptionError("invalid action: %r" % self.action, self)
def _check_type (self):
if self.type is None:
# XXX should factor out another class attr here: list of
# actions that *require* a type
if self.action in ("store", "append"):
if self.choices is not None:
# The "choices" attribute implies "choice" type.
self.type = "choice"
else:
# No type given? "string" is the most sensible default.
self.type = "string"
else:
if self.type not in self.TYPES:
raise OptionError("invalid option type: %r" % self.type, self)
if self.action not in self.TYPED_ACTIONS:
raise OptionError(
"must not supply a type for action %r" % self.action, self)
def _check_choice(self):
if self.type == "choice":
if self.choices is None:
raise OptionError(
"must supply a list of choices for type 'choice'", self)
elif type(self.choices) not in (types.TupleType, types.ListType):
raise OptionError(
"choices must be a list of strings ('%s' supplied)"
% str(type(self.choices)).split("'")[1], self)
elif self.choices is not None:
raise OptionError(
"must not supply choices for type %r" % self.type, self)
def _check_dest (self):
if self.action in self.STORE_ACTIONS and self.dest is None:
# No destination given, and we need one for this action.
# Glean a destination from the first long option string,
# or from the first short option string if no long options.
if self._long_opts:
# eg. "--foo-bar" -> "foo_bar"
self.dest = self._long_opts[0][2:].replace('-', '_')
else:
self.dest = self._short_opts[0][1]
def _check_const (self):
if self.action != "store_const" and self.const is not None:
raise OptionError(
"'const' must not be supplied for action %r" % self.action,
self)
def _check_nargs (self):
if self.action in self.TYPED_ACTIONS:
if self.nargs is None:
self.nargs = 1
elif self.nargs is not None:
raise OptionError(
"'nargs' must not be supplied for action %r" % self.action,
self)
def _check_callback (self):
if self.action == "callback":
if not callable(self.callback):
raise OptionError(
"callback not callable: %r" % self.callback, self)
if (self.callback_args is not None and
type(self.callback_args) is not types.TupleType):
raise OptionError(
"callback_args, if supplied, must be a tuple: not %r"
% self.callback_args, self)
if (self.callback_kwargs is not None and
type(self.callback_kwargs) is not types.DictType):
raise OptionError(
"callback_kwargs, if supplied, must be a dict: not %r"
% self.callback_kwargs, self)
else:
if self.callback is not None:
raise OptionError(
"callback supplied (%r) for non-callback option"
% self.callback, self)
if self.callback_args is not None:
raise OptionError(
"callback_args supplied for non-callback option", self)
if self.callback_kwargs is not None:
raise OptionError(
"callback_kwargs supplied for non-callback option", self)
CHECK_METHODS = [_check_action,
_check_type,
_check_choice,
_check_dest,
_check_const,
_check_nargs,
_check_callback]
# -- Miscellaneous methods -----------------------------------------
def __str__ (self):
return "/".join(self._short_opts + self._long_opts)
def takes_value (self):
return self.type is not None
# -- Processing methods --------------------------------------------
def check_value (self, opt, value):
checker = self.TYPE_CHECKER.get(self.type)
if checker is None:
return value
else:
return checker(self, opt, value)
def process (self, opt, value, values, parser):
# First, convert the value(s) to the right type. Howl if any
# value(s) are bogus.
if value is not None:
if self.nargs == 1:
value = self.check_value(opt, value)
else:
value = tuple([self.check_value(opt, v) for v in value])
# And then take whatever action is expected of us.
# This is a separate method to make life easier for
# subclasses to add new actions.
return self.take_action(
self.action, self.dest, opt, value, values, parser)
def take_action (self, action, dest, opt, value, values, parser):
if action == "store":
setattr(values, dest, value)
elif action == "store_const":
setattr(values, dest, self.const)
elif action == "store_true":
setattr(values, dest, True)
elif action == "store_false":
setattr(values, dest, False)
elif action == "append":
values.ensure_value(dest, []).append(value)
elif action == "count":
setattr(values, dest, values.ensure_value(dest, 0) + 1)
elif action == "callback":
args = self.callback_args or ()
kwargs = self.callback_kwargs or {}
self.callback(self, opt, value, parser, *args, **kwargs)
elif action == "help":
parser.print_help()
sys.exit(0)
elif action == "version":
parser.print_version()
sys.exit(0)
else:
raise RuntimeError, "unknown action %r" % self.action
return 1
# class Option
SUPPRESS_HELP = "SUPPRESS"+"HELP"
SUPPRESS_USAGE = "SUPPRESS"+"USAGE"
class Values:
def __init__ (self, defaults=None):
if defaults:
for (attr, val) in defaults.items():
setattr(self, attr, val)
def __repr__ (self):
return ("<%s at 0x%x: %r>"
% (self.__class__.__name__, id(self), self.__dict__))
def _update_careful (self, dict):
"""
Update the option values from an arbitrary dictionary, but only
use keys from dict that already have a corresponding attribute
in self. Any keys in dict without a corresponding attribute
are silently ignored.
"""
for attr in dir(self):
if dict.has_key(attr):
dval = dict[attr]
if dval is not None:
setattr(self, attr, dval)
def _update_loose (self, dict):
"""
Update the option values from an arbitrary dictionary,
using all keys from the dictionary regardless of whether
they have a corresponding attribute in self or not.
"""
self.__dict__.update(dict)
def _update (self, dict, mode):
if mode == "careful":
self._update_careful(dict)
elif mode == "loose":
self._update_loose(dict)
else:
raise ValueError, "invalid update mode: %r" % mode
def read_module (self, modname, mode="careful"):
__import__(modname)
mod = sys.modules[modname]
self._update(vars(mod), mode)
def read_file (self, filename, mode="careful"):
vars = {}
execfile(filename, vars)
self._update(vars, mode)
def ensure_value (self, attr, value):
if not hasattr(self, attr) or getattr(self, attr) is None:
setattr(self, attr, value)
return getattr(self, attr)
class OptionContainer:
"""
Abstract base class.
Class attributes:
standard_option_list : [Option]
list of standard options that will be accepted by all instances
of this parser class (intended to be overridden by subclasses).
Instance attributes:
option_list : [Option]
the list of Option objects contained by this OptionContainer
_short_opt : { string : Option }
dictionary mapping short option strings, eg. "-f" or "-X",
to the Option instances that implement them. If an Option
has multiple short option strings, it will appears in this
dictionary multiple times. [1]
_long_opt : { string : Option }
dictionary mapping long option strings, eg. "--file" or
"--exclude", to the Option instances that implement them.
Again, a given Option can occur multiple times in this
dictionary. [1]
defaults : { string : any }
dictionary mapping option destination names to default
values for each destination [1]
[1] These mappings are common to (shared by) all components of the
controlling OptionParser, where they are initially created.
"""
def __init__ (self, option_class, conflict_handler, description):
# Initialize the option list and related data structures.
# This method must be provided by subclasses, and it must
# initialize at least the following instance attributes:
# option_list, _short_opt, _long_opt, defaults.
self._create_option_list()
self.option_class = option_class
self.set_conflict_handler(conflict_handler)
self.set_description(description)
def _create_option_mappings (self):
# For use by OptionParser constructor -- create the master
# option mappings used by this OptionParser and all
# OptionGroups that it owns.
self._short_opt = {} # single letter -> Option instance
self._long_opt = {} # long option -> Option instance
self.defaults = {} # maps option dest -> default value
def _share_option_mappings (self, parser):
# For use by OptionGroup constructor -- use shared option
# mappings from the OptionParser that owns this OptionGroup.
self._short_opt = parser._short_opt
self._long_opt = parser._long_opt
self.defaults = parser.defaults
def set_conflict_handler (self, handler):
if handler not in ("ignore", "error", "resolve"):
raise ValueError, "invalid conflict_resolution value %r" % handler
self.conflict_handler = handler
def set_description (self, description):
self.description = description
# -- Option-adding methods -----------------------------------------
def _check_conflict (self, option):
conflict_opts = []
for opt in option._short_opts:
if self._short_opt.has_key(opt):
conflict_opts.append((opt, self._short_opt[opt]))
for opt in option._long_opts:
if self._long_opt.has_key(opt):
conflict_opts.append((opt, self._long_opt[opt]))
if conflict_opts:
handler = self.conflict_handler
if handler == "ignore": # behaviour for Optik 1.0, 1.1
pass
elif handler == "error": # new in 1.2
raise OptionConflictError(
"conflicting option string(s): %s"
% ", ".join([co[0] for co in conflict_opts]),
option)
elif handler == "resolve": # new in 1.2
for (opt, c_option) in conflict_opts:
if opt.startswith("--"):
c_option._long_opts.remove(opt)
del self._long_opt[opt]
else:
c_option._short_opts.remove(opt)
del self._short_opt[opt]
if not (c_option._short_opts or c_option._long_opts):
c_option.container.option_list.remove(c_option)
def add_option (self, *args, **kwargs):
"""add_option(Option)
add_option(opt_str, ..., kwarg=val, ...)
"""
if type(args[0]) is types.StringType:
option = self.option_class(*args, **kwargs)
elif len(args) == 1 and not kwargs:
option = args[0]
if not isinstance(option, Option):
raise TypeError, "not an Option instance: %r" % option
else:
raise TypeError, "invalid arguments"
self._check_conflict(option)
self.option_list.append(option)
option.container = self
for opt in option._short_opts:
self._short_opt[opt] = option
for opt in option._long_opts:
self._long_opt[opt] = option
if option.dest is not None: # option has a dest, we need a default
if option.default is not NO_DEFAULT:
self.defaults[option.dest] = option.default
elif not self.defaults.has_key(option.dest):
self.defaults[option.dest] = None
return option
def add_options (self, option_list):
for option in option_list:
self.add_option(option)
# -- Option query/removal methods ----------------------------------
def get_option (self, opt_str):
return (self._short_opt.get(opt_str) or
self._long_opt.get(opt_str))
def has_option (self, opt_str):
return (self._short_opt.has_key(opt_str) or
self._long_opt.has_key(opt_str))
def remove_option (self, opt_str):
option = self._short_opt.get(opt_str)
if option is None:
option = self._long_opt.get(opt_str)
if option is None:
raise ValueError("no such option %r" % opt_str)
for opt in option._short_opts:
del self._short_opt[opt]
for opt in option._long_opts:
del self._long_opt[opt]
option.container.option_list.remove(option)
# -- Help-formatting methods ---------------------------------------
def format_option_help (self, formatter):
if not self.option_list:
return ""
result = []
for option in self.option_list:
if not option.help is SUPPRESS_HELP:
result.append(formatter.format_option(option))
return "".join(result)
def format_description (self, formatter):
if self.description:
return formatter.format_description(self.description)
else:
return ""
def format_help (self, formatter):
result = []
if self.description:
result.append(self.format_description(formatter))
if self.option_list:
result.append(self.format_option_help(formatter))
return "\n".join(result)
class OptionGroup (OptionContainer):
def __init__ (self, parser, title, description=None):
self.parser = parser
OptionContainer.__init__(
self, parser.option_class, parser.conflict_handler, description)
self.title = title
def _create_option_list (self):
self.option_list = []
self._share_option_mappings(self.parser)
def set_title (self, title):
self.title = title
# -- Help-formatting methods ---------------------------------------
def format_help (self, formatter):
result = formatter.format_heading(self.title)
formatter.indent()
result += OptionContainer.format_help(self, formatter)
formatter.dedent()
return result
class OptionParser (OptionContainer):
"""
Class attributes:
standard_option_list : [Option]
list of standard options that will be accepted by all instances
of this parser class (intended to be overridden by subclasses).
Instance attributes:
usage : string
a usage string for your program. Before it is displayed
to the user, "%prog" will be expanded to the name of
your program (self.prog or os.path.basename(sys.argv[0])).
prog : string
the name of the current program (to override
os.path.basename(sys.argv[0])).
allow_interspersed_args : boolean = true
if true, positional arguments may be interspersed with options.
Assuming -a and -b each take a single argument, the command-line
-ablah foo bar -bboo baz
will be interpreted the same as
-ablah -bboo -- foo bar baz
If this flag were false, that command line would be interpreted as
-ablah -- foo bar -bboo baz
-- ie. we stop processing options as soon as we see the first
non-option argument. (This is the tradition followed by
Python's getopt module, Perl's Getopt::Std, and other argument-
parsing libraries, but it is generally annoying to users.)
rargs : [string]
the argument list currently being parsed. Only set when
parse_args() is active, and continually trimmed down as
we consume arguments. Mainly there for the benefit of
callback options.
largs : [string]
the list of leftover arguments that we have skipped while
parsing options. If allow_interspersed_args is false, this
list is always empty.
values : Values
the set of option values currently being accumulated. Only
set when parse_args() is active. Also mainly for callbacks.
Because of the 'rargs', 'largs', and 'values' attributes,
OptionParser is not thread-safe. If, for some perverse reason, you
need to parse command-line arguments simultaneously in different
threads, use different OptionParser instances.
"""
standard_option_list = []
def __init__ (self,
usage=None,
option_list=None,
option_class=Option,
version=None,
conflict_handler="error",
description=None,
formatter=None,
add_help_option=1,
prog=None):
OptionContainer.__init__(
self, option_class, conflict_handler, description)
self.set_usage(usage)
self.prog = prog
self.version = version
self.allow_interspersed_args = 1
if formatter is None:
formatter = IndentedHelpFormatter()
self.formatter = formatter
# Populate the option list; initial sources are the
# standard_option_list class attribute, the 'option_list'
# argument, and (if applicable) the _add_version_option() and
# _add_help_option() methods.
self._populate_option_list(option_list,
add_help=add_help_option)
self._init_parsing_state()
# -- Private methods -----------------------------------------------
# (used by our or OptionContainer's constructor)
def _create_option_list (self):
self.option_list = []
self.option_groups = []
self._create_option_mappings()
def _add_help_option (self):
self.add_option("-h", "--help",
action="help",
help="show this help message and exit")
def _add_version_option (self):
self.add_option("--version",
action="version",
help="show program's version number and exit")
def _populate_option_list (self, option_list, add_help=1):
if self.standard_option_list:
self.add_options(self.standard_option_list)
if option_list:
self.add_options(option_list)
if self.version:
self._add_version_option()
if add_help:
self._add_help_option()
def _init_parsing_state (self):
# These are set in parse_args() for the convenience of callbacks.
self.rargs = None
self.largs = None
self.values = None
# -- Simple modifier methods ---------------------------------------
def set_usage (self, usage):
if usage is None:
self.usage = "%prog [options]"
elif usage is SUPPRESS_USAGE:
self.usage = None
elif usage.startswith("usage: "):
# for backwards compatibility with Optik 1.3 and earlier
self.usage = usage[7:]
else:
self.usage = usage
def enable_interspersed_args (self):
self.allow_interspersed_args = 1
def disable_interspersed_args (self):
self.allow_interspersed_args = 0
def set_default (self, dest, value):
self.defaults[dest] = value
def set_defaults (self, **kwargs):
self.defaults.update(kwargs)
def get_default_values (self):
return Values(self.defaults)
# -- OptionGroup methods -------------------------------------------
def add_option_group (self, *args, **kwargs):
# XXX lots of overlap with OptionContainer.add_option()
if type(args[0]) is types.StringType:
group = OptionGroup(self, *args, **kwargs)
elif len(args) == 1 and not kwargs:
group = args[0]
if not isinstance(group, OptionGroup):
raise TypeError, "not an OptionGroup instance: %r" % group
if group.parser is not self:
raise ValueError, "invalid OptionGroup (wrong parser)"
else:
raise TypeError, "invalid arguments"
self.option_groups.append(group)
return group
def get_option_group (self, opt_str):
option = (self._short_opt.get(opt_str) or
self._long_opt.get(opt_str))
if option and option.container is not self:
return option.container
return None
# -- Option-parsing methods ----------------------------------------
def _get_args (self, args):
if args is None:
return sys.argv[1:]
else:
return args[:] # don't modify caller's list
def parse_args (self, args=None, values=None):
"""
parse_args(args : [string] = sys.argv[1:],
values : Values = None)
-> (values : Values, args : [string])
Parse the command-line options found in 'args' (default:
sys.argv[1:]). Any errors result in a call to 'error()', which
by default prints the usage message to stderr and calls
sys.exit() with an error message. On success returns a pair
(values, args) where 'values' is an Values instance (with all
your option values) and 'args' is the list of arguments left
over after parsing options.
"""
rargs = self._get_args(args)
if values is None:
values = self.get_default_values()
# Store the halves of the argument list as attributes for the
# convenience of callbacks:
# rargs
# the rest of the command-line (the "r" stands for
# "remaining" or "right-hand")
# largs
# the leftover arguments -- ie. what's left after removing
# options and their arguments (the "l" stands for "leftover"
# or "left-hand")
self.rargs = rargs
self.largs = largs = []
self.values = values
try:
stop = self._process_args(largs, rargs, values)
except (BadOptionError, OptionValueError), err:
self.error(err.msg)
args = largs + rargs
return self.check_values(values, args)
def check_values (self, values, args):
"""
check_values(values : Values, args : [string])
-> (values : Values, args : [string])
Check that the supplied option values and leftover arguments are
valid. Returns the option values and leftover arguments
(possibly adjusted, possibly completely new -- whatever you
like). Default implementation just returns the passed-in
values; subclasses may override as desired.
"""
return (values, args)
def _process_args (self, largs, rargs, values):
"""_process_args(largs : [string],
rargs : [string],
values : Values)
Process command-line arguments and populate 'values', consuming
options and arguments from 'rargs'. If 'allow_interspersed_args' is
false, stop at the first non-option argument. If true, accumulate any
interspersed non-option arguments in 'largs'.
"""
while rargs:
arg = rargs[0]
# We handle bare "--" explicitly, and bare "-" is handled by the
# standard arg handler since the short arg case ensures that the
# len of the opt string is greater than 1.
if arg == "--":
del rargs[0]
return
elif arg[0:2] == "--":
# process a single long option (possibly with value(s))
self._process_long_opt(rargs, values)
elif arg[:1] == "-" and len(arg) > 1:
# process a cluster of short options (possibly with
# value(s) for the last one only)
self._process_short_opts(rargs, values)
elif self.allow_interspersed_args:
largs.append(arg)
del rargs[0]
else:
return # stop now, leave this arg in rargs
# Say this is the original argument list:
# [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)]
# ^
# (we are about to process arg(i)).
#
# Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of
# [arg0, ..., arg(i-1)] (any options and their arguments will have
# been removed from largs).
#
# The while loop will usually consume 1 or more arguments per pass.
# If it consumes 1 (eg. arg is an option that takes no arguments),
# then after _process_arg() is done the situation is:
#
# largs = subset of [arg0, ..., arg(i)]
# rargs = [arg(i+1), ..., arg(N-1)]
#
# If allow_interspersed_args is false, largs will always be
# *empty* -- still a subset of [arg0, ..., arg(i-1)], but
# not a very interesting subset!
def _match_long_opt (self, opt):
"""_match_long_opt(opt : string) -> string
Determine which long option string 'opt' matches, ie. which one
it is an unambiguous abbrevation for. Raises BadOptionError if
'opt' doesn't unambiguously match any long option string.
"""
return _match_abbrev(opt, self._long_opt)
def _process_long_opt (self, rargs, values):
arg = rargs.pop(0)
# Value explicitly attached to arg? Pretend it's the next
# argument.
if "=" in arg:
(opt, next_arg) = arg.split("=", 1)
rargs.insert(0, next_arg)
had_explicit_value = 1
else:
opt = arg
had_explicit_value = 0
opt = self._match_long_opt(opt)
option = self._long_opt[opt]
if option.takes_value():
nargs = option.nargs
if len(rargs) < nargs:
if nargs == 1:
self.error("%s option requires a value" % opt)
else:
self.error("%s option requires %d values"
% (opt, nargs))
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
elif had_explicit_value:
self.error("%s option does not take a value" % opt)
else:
value = None
option.process(opt, value, values, self)
def _process_short_opts (self, rargs, values):
arg = rargs.pop(0)
stop = 0
i = 1
for ch in arg[1:]:
opt = "-" + ch
option = self._short_opt.get(opt)
i += 1 # we have consumed a character
if not option:
self.error("no such option: %s" % opt)
if option.takes_value():
# Any characters left in arg? Pretend they're the
# next arg, and stop consuming characters of arg.
if i < len(arg):
rargs.insert(0, arg[i:])
stop = 1
nargs = option.nargs
if len(rargs) < nargs:
if nargs == 1:
self.error("%s option requires a value" % opt)
else:
self.error("%s option requires %s values"
% (opt, nargs))
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
else: # option doesn't take a value
value = None
option.process(opt, value, values, self)
if stop:
break
# -- Feedback methods ----------------------------------------------
def get_prog_name (self):
if self.prog is None:
return os.path.basename(sys.argv[0])
else:
return self.prog
def error (self, msg):
"""error(msg : string)
Print a usage message incorporating 'msg' to stderr and exit.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.
"""
self.print_usage(sys.stderr)
sys.stderr.write("%s: error: %s\n" % (self.get_prog_name(), msg))
sys.exit(2) # command-line usage error
def get_usage (self):
if self.usage:
return self.formatter.format_usage(
self.usage.replace("%prog", self.get_prog_name()))
else:
return ""
def print_usage (self, file=None):
"""print_usage(file : file = stdout)
Print the usage message for the current program (self.usage) to
'file' (default stdout). Any occurence of the string "%prog" in
self.usage is replaced with the name of the current program
(basename of sys.argv[0]). Does nothing if self.usage is empty
or not defined.
"""
if self.usage:
print >>file, self.get_usage()
def get_version (self):
if self.version:
return self.version.replace("%prog", self.get_prog_name())
else:
return ""
def print_version (self, file=None):
"""print_version(file : file = stdout)
Print the version message for this program (self.version) to
'file' (default stdout). As with print_usage(), any occurence
of "%prog" in self.version is replaced by the current program's
name. Does nothing if self.version is empty or undefined.
"""
if self.version:
print >>file, self.get_version()
def format_option_help (self, formatter=None):
if formatter is None:
formatter = self.formatter
formatter.store_option_strings(self)
result = []
result.append(formatter.format_heading("Options"))
formatter.indent()
if self.option_list:
result.append(OptionContainer.format_option_help(self, formatter))
result.append("\n")
for group in self.option_groups:
result.append(group.format_help(formatter))
result.append("\n")
formatter.dedent()
# Drop the last "\n", or the header if no options or option groups:
return "".join(result[:-1])
def format_help (self, formatter=None):
if formatter is None:
formatter = self.formatter
result = []
if self.usage:
result.append(self.get_usage() + "\n")
if self.description:
result.append(self.format_description(formatter) + "\n")
result.append(self.format_option_help(formatter))
return "".join(result)
def print_help (self, file=None):
"""print_help(file : file = stdout)
Print an extended help message, listing all options and any
help text provided with them, to 'file' (default stdout).
"""
if file is None:
file = sys.stdout
file.write(self.format_help())
# class OptionParser
def _match_abbrev (s, wordmap):
"""_match_abbrev(s : string, wordmap : {string : Option}) -> string
Return the string key in 'wordmap' for which 's' is an unambiguous
abbreviation. If 's' is found to be ambiguous or doesn't match any of
'words', raise BadOptionError.
"""
# Is there an exact match?
if wordmap.has_key(s):
return s
else:
# Isolate all words with s as a prefix.
possibilities = [word for word in wordmap.keys()
if word.startswith(s)]
# No exact match, so there had better be just one possibility.
if len(possibilities) == 1:
return possibilities[0]
elif not possibilities:
raise BadOptionError("no such option: %s" % s)
else:
# More than one possible completion: ambiguous prefix.
raise BadOptionError("ambiguous option: %s (%s?)"
% (s, ", ".join(possibilities)))
# Some day, there might be many Option classes. As of Optik 1.3, the
# preferred way to instantiate Options is indirectly, via make_option(),
# which will become a factory function when there are many Option
# classes.
make_option = Option
|
{
"content_hash": "a51515a4dc2950a79c127c2a52ab1063",
"timestamp": "",
"source": "github",
"line_count": 1417,
"max_line_length": 79,
"avg_line_length": 36.403669724770644,
"alnum_prop": 0.5623642990074442,
"repo_name": "google-code-export/django-hotclub",
"id": "c21663c555b27e62b5d1f8f8b126a9bd979f2315",
"size": "51584",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "libs/external_libs/docutils-0.4/extras/optparse.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ActionScript",
"bytes": "4084"
},
{
"name": "ApacheConf",
"bytes": "20791"
},
{
"name": "Assembly",
"bytes": "3294"
},
{
"name": "Boo",
"bytes": "1111"
},
{
"name": "C",
"bytes": "146718"
},
{
"name": "C#",
"bytes": "16949"
},
{
"name": "C++",
"bytes": "79372"
},
{
"name": "CSS",
"bytes": "147815"
},
{
"name": "Clean",
"bytes": "2878"
},
{
"name": "Clojure",
"bytes": "21964"
},
{
"name": "Common Lisp",
"bytes": "48874"
},
{
"name": "D",
"bytes": "5475"
},
{
"name": "Diff",
"bytes": "10634"
},
{
"name": "Dylan",
"bytes": "683"
},
{
"name": "Emacs Lisp",
"bytes": "29569"
},
{
"name": "Erlang",
"bytes": "5746"
},
{
"name": "FORTRAN",
"bytes": "27700"
},
{
"name": "Genshi",
"bytes": "2298"
},
{
"name": "Gettext Catalog",
"bytes": "764716"
},
{
"name": "Gnuplot",
"bytes": "10376"
},
{
"name": "Groff",
"bytes": "47103"
},
{
"name": "HTML",
"bytes": "8286203"
},
{
"name": "Haskell",
"bytes": "40419"
},
{
"name": "Java",
"bytes": "81989"
},
{
"name": "JavaScript",
"bytes": "74222"
},
{
"name": "Logtalk",
"bytes": "7260"
},
{
"name": "Lua",
"bytes": "8677"
},
{
"name": "Makefile",
"bytes": "60193"
},
{
"name": "Matlab",
"bytes": "469"
},
{
"name": "Moocode",
"bytes": "3343"
},
{
"name": "Myghty",
"bytes": "4713"
},
{
"name": "Objective-C",
"bytes": "778"
},
{
"name": "PHP",
"bytes": "17078"
},
{
"name": "Pascal",
"bytes": "84519"
},
{
"name": "Perl",
"bytes": "32503"
},
{
"name": "Python",
"bytes": "7043260"
},
{
"name": "R",
"bytes": "3468"
},
{
"name": "Redcode",
"bytes": "830"
},
{
"name": "Ruby",
"bytes": "91160"
},
{
"name": "Scala",
"bytes": "138"
},
{
"name": "Scheme",
"bytes": "45856"
},
{
"name": "Shell",
"bytes": "119136"
},
{
"name": "Smalltalk",
"bytes": "16163"
},
{
"name": "Standard ML",
"bytes": "42416"
},
{
"name": "TeX",
"bytes": "77612"
},
{
"name": "VimL",
"bytes": "16660"
},
{
"name": "Visual Basic",
"bytes": "846"
},
{
"name": "XSLT",
"bytes": "755"
},
{
"name": "mupad",
"bytes": "2434"
}
],
"symlink_target": ""
}
|
import unittest
from telemetry.internal.platform import system_info
from telemetry.page import page as page_module
from telemetry.story import story_set
from gpu_tests import gpu_test_expectations
VENDOR_NVIDIA = 0x10DE
VENDOR_AMD = 0x1002
VENDOR_INTEL = 0x8086
VENDOR_STRING_IMAGINATION = 'Imagination Technologies'
DEVICE_STRING_SGX = 'PowerVR SGX 554'
class StubPlatform(object):
def __init__(self, os_name, os_version_name=None):
self.os_name = os_name
self.os_version_name = os_version_name
def GetOSName(self):
return self.os_name
def GetOSVersionName(self):
return self.os_version_name
class StubBrowser(object):
def __init__(self, platform, gpu, device, vendor_string, device_string,
browser_type=None, gl_renderer=None):
self.platform = platform
self.browser_type = browser_type
sys_info = {
'model_name': '',
'gpu': {
'devices': [
{'vendor_id': gpu, 'device_id': device,
'vendor_string': vendor_string, 'device_string': device_string},
]
}
}
if gl_renderer:
sys_info['gpu']['aux_attributes'] = {
'gl_renderer': gl_renderer
}
self.system_info = system_info.SystemInfo.FromDict(sys_info)
@property
def supports_system_info(self):
return False if not self.system_info else True
def GetSystemInfo(self):
return self.system_info
class SampleTestExpectations(gpu_test_expectations.GpuTestExpectations):
def SetExpectations(self):
# Test GPU conditions.
self.Fail('test1.html', ['nvidia', 'intel'], bug=123)
self.Fail('test2.html', [('nvidia', 0x1001), ('nvidia', 0x1002)], bug=123)
self.Fail('test3.html', ['win', 'intel', ('amd', 0x1001)], bug=123)
self.Fail('test4.html', ['imagination'])
self.Fail('test5.html', [('imagination', 'PowerVR SGX 554')])
# Test ANGLE conditions.
self.Fail('test6.html', ['win', 'd3d9'], bug=345)
# Test flaky expectations.
self.Flaky('test7.html', bug=123, max_num_retries=5)
self.Flaky('test8.html', ['win'], bug=123, max_num_retries=6)
self.Flaky('wildcardtest*.html', ['win'], bug=123, max_num_retries=7)
class GpuTestExpectationsTest(unittest.TestCase):
def setUp(self):
self.expectations = SampleTestExpectations()
def assertExpectationEquals(self, expected, page, platform=StubPlatform(''),
gpu=0, device=0, vendor_string='',
device_string='', browser_type=None,
gl_renderer=None):
self.expectations.ClearExpectationsCacheForTesting()
result = self.expectations.GetExpectationForPage(StubBrowser(
platform, gpu, device, vendor_string, device_string,
browser_type=browser_type, gl_renderer=gl_renderer), page)
self.assertEquals(expected, result)
def getRetriesForPage(self, page, platform=StubPlatform(''), gpu=0,
device=0, vendor_string='', device_string=''):
self.expectations.ClearExpectationsCacheForTesting()
return self.expectations.GetFlakyRetriesForPage(StubBrowser(
platform, gpu, device, vendor_string, device_string), page)
# Pages with expectations for a GPU should only return them when running with
# that GPU
def testGpuExpectations(self):
ps = story_set.StorySet()
page = page_module.Page('http://test.com/test1.html', ps)
self.assertExpectationEquals('fail', page, gpu=VENDOR_NVIDIA)
self.assertExpectationEquals('fail', page, gpu=VENDOR_INTEL)
self.assertExpectationEquals('pass', page, gpu=VENDOR_AMD)
# Pages with expectations for a GPU should only return them when running with
# that GPU
def testGpuDeviceIdExpectations(self):
ps = story_set.StorySet()
page = page_module.Page('http://test.com/test2.html', ps)
self.assertExpectationEquals('fail', page, gpu=VENDOR_NVIDIA, device=0x1001)
self.assertExpectationEquals('fail', page, gpu=VENDOR_NVIDIA, device=0x1002)
self.assertExpectationEquals('pass', page, gpu=VENDOR_NVIDIA, device=0x1003)
self.assertExpectationEquals('pass', page, gpu=VENDOR_AMD, device=0x1001)
# Pages with multiple expectations should only return them when all criteria
# are met
def testMultipleExpectations(self):
ps = story_set.StorySet()
page = page_module.Page('http://test.com/test3.html', ps)
self.assertExpectationEquals('fail', page,
StubPlatform('win'), VENDOR_AMD, 0x1001)
self.assertExpectationEquals('fail', page,
StubPlatform('win'), VENDOR_INTEL, 0x1002)
self.assertExpectationEquals('pass', page,
StubPlatform('win'), VENDOR_NVIDIA, 0x1001)
self.assertExpectationEquals('pass', page,
StubPlatform('mac'), VENDOR_AMD, 0x1001)
self.assertExpectationEquals('pass', page,
StubPlatform('win'), VENDOR_AMD, 0x1002)
# Pages with expectations based on GPU vendor string.
def testGpuVendorStringExpectations(self):
ps = story_set.StorySet()
page = page_module.Page('http://test.com/test4.html', ps)
self.assertExpectationEquals('fail', page,
vendor_string=VENDOR_STRING_IMAGINATION,
device_string=DEVICE_STRING_SGX)
self.assertExpectationEquals('fail', page,
vendor_string=VENDOR_STRING_IMAGINATION,
device_string='Triangle Monster 3000')
self.assertExpectationEquals('pass', page,
vendor_string='Acme',
device_string=DEVICE_STRING_SGX)
# Pages with expectations based on GPU vendor and renderer string pairs.
def testGpuDeviceStringExpectations(self):
ps = story_set.StorySet()
page = page_module.Page('http://test.com/test5.html', ps)
self.assertExpectationEquals('fail', page,
vendor_string=VENDOR_STRING_IMAGINATION,
device_string=DEVICE_STRING_SGX)
self.assertExpectationEquals('pass', page,
vendor_string=VENDOR_STRING_IMAGINATION,
device_string='Triangle Monster 3000')
self.assertExpectationEquals('pass', page,
vendor_string='Acme',
device_string=DEVICE_STRING_SGX)
# Test ANGLE conditions.
def testANGLEConditions(self):
ps = story_set.StorySet()
page = page_module.Page('http://test.com/test6.html', ps)
self.assertExpectationEquals('pass', page, StubPlatform('win'),
gl_renderer='Direct3D11')
self.assertExpectationEquals('fail', page, StubPlatform('win'),
gl_renderer='Direct3D9')
# Ensure retry mechanism is working.
def testFlakyExpectation(self):
ps = story_set.StorySet()
page = page_module.Page('http://test.com/test7.html', ps)
self.assertExpectationEquals('flaky', page)
self.assertEquals(5, self.getRetriesForPage(page))
# Ensure the filtering from the TestExpectations superclass still works.
def testFlakyPerPlatformExpectation(self):
ps = story_set.StorySet()
page1 = page_module.Page('http://test.com/test8.html', ps)
self.assertExpectationEquals('flaky', page1, StubPlatform('win'))
self.assertEquals(6, self.getRetriesForPage(page1, StubPlatform('win')))
self.assertExpectationEquals('pass', page1, StubPlatform('mac'))
self.assertEquals(0, self.getRetriesForPage(page1, StubPlatform('mac')))
page2 = page_module.Page('http://test.com/wildcardtest1.html', ps)
self.assertExpectationEquals('flaky', page2, StubPlatform('win'))
self.assertEquals(7, self.getRetriesForPage(page2, StubPlatform('win')))
self.assertExpectationEquals('pass', page2, StubPlatform('mac'))
self.assertEquals(0, self.getRetriesForPage(page2, StubPlatform('mac')))
|
{
"content_hash": "d95e1a3febdcc8c1558a66f7197f13ae",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 80,
"avg_line_length": 43.62777777777778,
"alnum_prop": 0.6643321023812556,
"repo_name": "js0701/chromium-crosswalk",
"id": "23bad2c94896a376666da251114d512acf339604",
"size": "8015",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "content/test/gpu/gpu_tests/gpu_test_expectations_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import math
import common.config
import common.util as util
import common.messaging as messaging
from common.geo import Line, LineSet
from common.objects import Object
from tree.common.server import Server
import tree.common.protocol.client_configuration_pb2 as client_config_protocol
import tree.common.coordinator_message_type as coordinator_message_type
import tree.recursive.config as config
import tree.recursive.message_type as message_type
import tree.recursive.protocol.modifications_pb2 as modifications_protocol
import tree.recursive.protocol.queries_pb2 as query_protocol
class Transaction(object):
def __init__(self, client, identifier, read_locks, write_locks):
self.client = client
self.identifier = identifier
self.read_locks = read_locks
self.write_locks = write_locks
def commit(self):
return self.client.end_transaction(self.identifier, self.read_locks, self.write_locks, False)
def insert(self, *args):
return self.client.insert(*args)
def find(self, start = 0, end = common.config.MAX_EXTEND):
return self.client.find(start, end)
class PendingModificationResult(object):
def __init__(self, conn, identifier):
self.conn = conn
self.identifier = identifier
self.result = None
def wait(self):
if self.result:
return
result_type, result_msg = self.conn.receive()
if result_type is None:
raise RuntimeError("Received no answer from server")
self.result = modifications_protocol.ModificationResult()
self.result.ParseFromString(result_msg)
assert self.result.replyTo == self.identifier
def get_ok(self):
self.wait()
return self.result.okay
class PendingQueryResponse(object):
def __init__(self, conn, identifier):
self.conn = conn
self.identifier = identifier
self.response = None
def wait(self):
if self.response:
return
result_type, result_msg = self.conn.receive()
if result_type is None:
raise RuntimeError("Received no answer from server")
self.response = query_protocol.QueryResponse()
self.response.ParseFromString(result_msg)
assert self.response.replyTo == self.identifier
def get_objects(self):
self.wait()
return self.response.objects
class Client(object):
def __init__(self, address, port = config.COORDINATOR_PORT):
self.socket = messaging.connect(address,
port)
self.socket.set_timeout(common.config.CLIENT_TIMEOUT)
self.servers_root = None
self.servers_leaves = None
self.identifier = -1
while not self.servers_root or self.identifier < 0:
self.update()
self.next_transaction_id = 1
self.next_query_id = 1
self.next_modification_id = 1
def close(self):
self.socket.close()
self.servers_root.close()
def find_parent_layers(self, servers):
layers = []
previous = servers
while len(previous) > 1:
parents = []
for server in previous:
assert server
if server.parent not in parents:
parents.append(server.parent)
layers.append(previous)
previous = parents
layers.append(previous)
layers.reverse()
return layers
def read_servers(self, parent, message):
server = Server(parent, self.num_partitions())
if message.port > 0:
peer = messaging.connect(message.ip, message.port)
peer.set_timeout(common.config.CLIENT_TIMEOUT)
server.set_peer(peer)
assert peer
assert self.identifier > 0
msg = client_config_protocol.NotifyClientId()
msg.identifier = self.identifier
# Ensure server has received client id
peer.send(message_type.client.notify_client_id, msg.SerializeToString())
msgtype, data = peer.receive()
if not msgtype:
assert not peer.is_connected()
raise RuntimeError("Lost connection to content server")
assert msgtype == message_type.client.notify_cid_ack
assert data == ""
if message.HasField('left_child'):
server.left_child = self.read_servers(server, message.left_child)
if message.HasField('right_child'):
server.right_child = self.read_servers(server, message.right_child)
if message.content_position >= 0:
self.servers_leaves[message.content_position] = server
return server
def partition_size(self):
return common.config.MAX_EXTEND / self.num_partitions()
def num_partitions(self):
return len(self.servers_leaves)
def update(self):
msgtype, msg = self.socket.receive()
if msgtype is None:
if not self.socket.is_connected():
raise RuntimeError("Server closed connection before sending config")
elif msgtype is coordinator_message_type.client.update_config:
config_message = client_config_protocol.UpdateServerConfiguration()
config_message.ParseFromString(msg)
self.servers_leaves = [None] * config_message.num_partitions
self.servers_root = self.read_servers(None, config_message.root)
elif msgtype is coordinator_message_type.client.assign_client_id:
assign_cid = client_config_protocol.AssignClientId()
assign_cid.ParseFromString(msg)
self.identifier = assign_cid.identifier
else:
raise RuntimeError("Unknown Message Type: " + str(msgtype))
def show_config(self):
i = 0
for server in self.servers_leaves:
if server:
print("# " + str(i) + " " + str(server.ip) + ":" + str(server.port))
else:
print("<gap>")
i += 1
def to_server_position(self, position):
if position < 0 or position > common.config.MAX_EXTEND:
raise ValueError("Position out of range")
pos = int(math.floor(position / self.partition_size()))
return min(pos, self.num_partitions()-1)
def create_insert_request(self, entries):
msg = modifications_protocol.InsertRequest()
for entry in entries:
obj = msg.objects.add()
obj.position, obj.value = entry
return msg
def insert(self, *args):
entries = util.parse_entries_args(args)
msg = modifications_protocol.ModificationRequest()
msg.identifier = self.next_modification_id
self.next_modification_id += 1
msg.insert.MergeFrom(self.create_insert_request(entries))
start,end = util.get_range(entries)
return self.send_modification_request(message_type.client.mod_request, msg, start, end)
def create_range_search_request(self, start, end):
if not start <= end:
raise ValueError("Start must be <= to End")
query = query_protocol.QueryRequest()
query.identifier = self.next_query_id
self.next_query_id += 1
query.range_search.start = start
query.range_search.end = end
return query
def send_query(self, msg_type, message, start, end):
spos = self.to_server_position(start)
epos = self.to_server_position(end)
pending = []
for i in range(spos, epos+1):
p = PendingQueryResponse(self.send_to_server(i, msg_type, message.SerializeToString()), message.identifier)
pending.append(p)
return pending
def collect_objects(self, pending):
objects = []
while len(pending):
response = pending.pop()
for obj in response.get_objects():
objects.append(Object(obj.position, obj.value))
objects.sort()
return objects
def find(self, start = 0, end = common.config.MAX_EXTEND):
search = self.create_range_search_request(start, end)
pending = self.send_query(message_type.client.query_request, search, start, end)
return self.collect_objects(pending)
def init_transaction(self, read_lines, write_lines):
identifier = self.next_transaction_id
self.next_transaction_id += 1
read_locks = util.to_line_set(read_lines)
write_locks = util.to_line_set(write_lines)
msg = modifications_protocol.AcquireLockRequest()
msg.transaction_id = identifier
servers = []
for line in read_locks.lines:
lock = msg.read_locks.add()
lock.start = line.start
lock.end = line.end
for line in write_locks.lines:
lock = msg.write_locks.add()
lock.start = line.start
lock.end = line.end
for line in (read_locks.lines + write_locks.lines):
spos = self.to_server_position(line.start)
epos = self.to_server_position(line.end)
for i in range(spos, epos+1):
s = self.servers_leaves[i]
if not s:
raise RuntimeError("Server is down")
if s not in servers:
servers.append(s)
parent_layers = self.find_parent_layers(servers)
previous = []
for layer in parent_layers:
success = False
done_peers = []
assert len(layer) >= len(previous)
# Aquire lock(s) on the next layer
# (this might busy wait)
while not success:
success = True
pending = []
assert len(layer) > 0
for server in layer:
p = server.peer
if p not in done_peers:
p.send(message_type.client.start_transaction, msg.SerializeToString())
pending.append(PendingModificationResult(p, msg.transaction_id))
while len(pending):
p = pending.pop()
if not p.get_ok():
success = False
else:
done_peers.append(p.conn)
assert(len(done_peers) == len(layer))
# Release locks on the previous layer (if any)
epending = []
emsg = modifications_protocol.ReleaseLockRequest()
emsg.transaction_id = identifier
for server in previous:
p = server.peer
p.send(message_type.client.end_transaction, emsg.SerializeToString())
epending.append(PendingModificationResult(p, emsg.transaction_id))
while len(epending):
if not epending.pop().get_ok():
raise RuntimeError("Failed to release lock")
previous = layer
assert previous == servers
return Transaction(self, identifier, read_locks, write_locks)
def end_transaction(self, transaction_id, read_locks, write_locks, abort):
msg = modifications_protocol.ReleaseLockRequest()
msg.transaction_id = transaction_id
success = True
pending = []
servers = []
for line in (read_locks.lines + write_locks.lines):
spos = self.to_server_position(line.start)
epos = self.to_server_position(line.end)
for i in range(spos, epos+1):
s = self.servers_leaves[i]
if s not in servers:
servers.append(s)
success = True
pending = []
for server in servers:
p = server.peer
p.send(message_type.client.end_transaction, msg.SerializeToString())
pending.append(PendingModificationResult(p, msg.transaction_id))
while len(pending):
if not pending.pop().get_ok():
success = False
if not success:
raise RuntimeError("Failed to release lock")
return success
def send_to_server(self, pos, msg_type, message):
server = self.servers_leaves[pos]
if not server:
raise RuntimeError("Server #" + str(pos) + " is down")
if not server.peer.is_connected():
raise RuntimeError("Not connected to server")
server.peer.send(msg_type, message)
return server.peer
def clear(self):
return self.range_remove(0, common.config.MAX_EXTEND)
def create_range_remove_request(self, start, end):
msg = modifications_protocol.RangeRemoveRequest()
msg.start = start
msg.end = end
return msg
def send_modification_request(self, msg_type, msg, start, end):
spos = self.to_server_position(start)
epos = self.to_server_position(end)
pending = []
for i in range(spos, epos+1):
p = PendingModificationResult(self.send_to_server(i, msg_type,
msg.SerializeToString()), msg.identifier)
pending.append(p)
success = True
while len(pending):
if not pending.pop().get_ok():
success = False
return success
def range_remove(self, start, end):
msg = modifications_protocol.ModificationRequest()
msg.identifier = self.next_modification_id
self.next_modification_id += 1
msg.range_remove.MergeFrom(self.create_range_remove_request(start, end))
return self.send_modification_request(message_type.client.mod_request, msg, start, end)
|
{
"content_hash": "dd43f93e44843205f66ab422ce959a88",
"timestamp": "",
"source": "github",
"line_count": 431,
"max_line_length": 119,
"avg_line_length": 33.51276102088167,
"alnum_prop": 0.56597895319856,
"repo_name": "kaimast/inanutshell",
"id": "2789632d4c169edfcb89124a5babc35ec91f2f72",
"size": "14444",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tree/recursive/client.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Protocol Buffer",
"bytes": "11886"
},
{
"name": "Python",
"bytes": "209569"
},
{
"name": "Shell",
"bytes": "2539"
}
],
"symlink_target": ""
}
|
from WorkingWithWorksheets import RemovingWorksheetsusingSheetName
import jpype
import os.path
asposeapispath = os.path.join(os.path.abspath("../../../"), "lib/")
dataDir = os.path.join(os.path.abspath("./"), "data/")
print "You need to put your Aspose.Cells for Java APIs .jars in this folder:\n"+asposeapispath
#print dataDir
jpype.startJVM(jpype.getDefaultJVMPath(), "-Djava.ext.dirs=%s" % asposeapispath)
hw = RemovingWorksheetsusingSheetName(dataDir)
hw.main()
|
{
"content_hash": "71f8d1f7ca992d0d2c4bcbfb088aff80",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 94,
"avg_line_length": 33.5,
"alnum_prop": 0.7569296375266524,
"repo_name": "asposecells/Aspose_Cells_Java",
"id": "79ffd38acd67d3fc8e93713f362478b189840f0c",
"size": "699",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Plugins/Aspose-Cells-Java-for-Python/tests/WorkingWithWorksheets/RemovingWorksheetsusingSheetName/RemovingWorksheetsusingSheetName.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5888"
},
{
"name": "HTML",
"bytes": "79320"
},
{
"name": "Java",
"bytes": "826384"
},
{
"name": "JavaScript",
"bytes": "92031"
},
{
"name": "PHP",
"bytes": "56094"
},
{
"name": "Python",
"bytes": "111280"
},
{
"name": "Ruby",
"bytes": "47960"
}
],
"symlink_target": ""
}
|
import sys
from rpg import app
if __name__ == "__main__":
game = app.Game()
sys.exit(game.run())
|
{
"content_hash": "c90ef4fc69b961a9963e56b3925dfc49",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 26,
"avg_line_length": 17.666666666666668,
"alnum_prop": 0.5660377358490566,
"repo_name": "tvarney/txtrpg",
"id": "6a5658d2e54078573791a3b094a0f7c0e2da8853",
"size": "130",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "__main__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "211457"
}
],
"symlink_target": ""
}
|
import sys
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, overload
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
resource_group_name: str,
integration_account_name: str,
subscription_id: str,
*,
top: Optional[int] = None,
filter: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2019-05-01")) # type: Literal["2019-05-01"]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/integrationAccounts/{integrationAccountName}/sessions",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"integrationAccountName": _SERIALIZER.url("integration_account_name", integration_account_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
if top is not None:
_params["$top"] = _SERIALIZER.query("top", top, "int")
if filter is not None:
_params["$filter"] = _SERIALIZER.query("filter", filter, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_request(
resource_group_name: str, integration_account_name: str, session_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2019-05-01")) # type: Literal["2019-05-01"]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/integrationAccounts/{integrationAccountName}/sessions/{sessionName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"integrationAccountName": _SERIALIZER.url("integration_account_name", integration_account_name, "str"),
"sessionName": _SERIALIZER.url("session_name", session_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_create_or_update_request(
resource_group_name: str, integration_account_name: str, session_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2019-05-01")) # type: Literal["2019-05-01"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/integrationAccounts/{integrationAccountName}/sessions/{sessionName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"integrationAccountName": _SERIALIZER.url("integration_account_name", integration_account_name, "str"),
"sessionName": _SERIALIZER.url("session_name", session_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_request(
resource_group_name: str, integration_account_name: str, session_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2019-05-01")) # type: Literal["2019-05-01"]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/integrationAccounts/{integrationAccountName}/sessions/{sessionName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"integrationAccountName": _SERIALIZER.url("integration_account_name", integration_account_name, "str"),
"sessionName": _SERIALIZER.url("session_name", session_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
class IntegrationAccountSessionsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.logic.LogicManagementClient`'s
:attr:`integration_account_sessions` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(
self,
resource_group_name: str,
integration_account_name: str,
top: Optional[int] = None,
filter: Optional[str] = None,
**kwargs: Any
) -> Iterable["_models.IntegrationAccountSession"]:
"""Gets a list of integration account sessions.
:param resource_group_name: The resource group name. Required.
:type resource_group_name: str
:param integration_account_name: The integration account name. Required.
:type integration_account_name: str
:param top: The number of items to be included in the result. Default value is None.
:type top: int
:param filter: The filter to apply on the operation. Options for filters include: ChangedTime.
Default value is None.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either IntegrationAccountSession or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.logic.models.IntegrationAccountSession]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2019-05-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.IntegrationAccountSessionListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
integration_account_name=integration_account_name,
subscription_id=self._config.subscription_id,
top=top,
filter=filter,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("IntegrationAccountSessionListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/integrationAccounts/{integrationAccountName}/sessions"} # type: ignore
@distributed_trace
def get(
self, resource_group_name: str, integration_account_name: str, session_name: str, **kwargs: Any
) -> _models.IntegrationAccountSession:
"""Gets an integration account session.
:param resource_group_name: The resource group name. Required.
:type resource_group_name: str
:param integration_account_name: The integration account name. Required.
:type integration_account_name: str
:param session_name: The integration account session name. Required.
:type session_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IntegrationAccountSession or the result of cls(response)
:rtype: ~azure.mgmt.logic.models.IntegrationAccountSession
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2019-05-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.IntegrationAccountSession]
request = build_get_request(
resource_group_name=resource_group_name,
integration_account_name=integration_account_name,
session_name=session_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("IntegrationAccountSession", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/integrationAccounts/{integrationAccountName}/sessions/{sessionName}"} # type: ignore
@overload
def create_or_update(
self,
resource_group_name: str,
integration_account_name: str,
session_name: str,
session: _models.IntegrationAccountSession,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.IntegrationAccountSession:
"""Creates or updates an integration account session.
:param resource_group_name: The resource group name. Required.
:type resource_group_name: str
:param integration_account_name: The integration account name. Required.
:type integration_account_name: str
:param session_name: The integration account session name. Required.
:type session_name: str
:param session: The integration account session. Required.
:type session: ~azure.mgmt.logic.models.IntegrationAccountSession
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IntegrationAccountSession or the result of cls(response)
:rtype: ~azure.mgmt.logic.models.IntegrationAccountSession
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def create_or_update(
self,
resource_group_name: str,
integration_account_name: str,
session_name: str,
session: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.IntegrationAccountSession:
"""Creates or updates an integration account session.
:param resource_group_name: The resource group name. Required.
:type resource_group_name: str
:param integration_account_name: The integration account name. Required.
:type integration_account_name: str
:param session_name: The integration account session name. Required.
:type session_name: str
:param session: The integration account session. Required.
:type session: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IntegrationAccountSession or the result of cls(response)
:rtype: ~azure.mgmt.logic.models.IntegrationAccountSession
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def create_or_update(
self,
resource_group_name: str,
integration_account_name: str,
session_name: str,
session: Union[_models.IntegrationAccountSession, IO],
**kwargs: Any
) -> _models.IntegrationAccountSession:
"""Creates or updates an integration account session.
:param resource_group_name: The resource group name. Required.
:type resource_group_name: str
:param integration_account_name: The integration account name. Required.
:type integration_account_name: str
:param session_name: The integration account session name. Required.
:type session_name: str
:param session: The integration account session. Is either a model type or a IO type. Required.
:type session: ~azure.mgmt.logic.models.IntegrationAccountSession or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IntegrationAccountSession or the result of cls(response)
:rtype: ~azure.mgmt.logic.models.IntegrationAccountSession
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2019-05-01"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.IntegrationAccountSession]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(session, (IO, bytes)):
_content = session
else:
_json = self._serialize.body(session, "IntegrationAccountSession")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
integration_account_name=integration_account_name,
session_name=session_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("IntegrationAccountSession", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("IntegrationAccountSession", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/integrationAccounts/{integrationAccountName}/sessions/{sessionName}"} # type: ignore
@distributed_trace
def delete( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, integration_account_name: str, session_name: str, **kwargs: Any
) -> None:
"""Deletes an integration account session.
:param resource_group_name: The resource group name. Required.
:type resource_group_name: str
:param integration_account_name: The integration account name. Required.
:type integration_account_name: str
:param session_name: The integration account session name. Required.
:type session_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2019-05-01"]
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_request(
resource_group_name=resource_group_name,
integration_account_name=integration_account_name,
session_name=session_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/integrationAccounts/{integrationAccountName}/sessions/{sessionName}"} # type: ignore
|
{
"content_hash": "6e286b5cb9ee0923581c82e46bb97017",
"timestamp": "",
"source": "github",
"line_count": 576,
"max_line_length": 219,
"avg_line_length": 44.90625,
"alnum_prop": 0.6510477074151396,
"repo_name": "Azure/azure-sdk-for-python",
"id": "d396851be5a4b1e1fde492d626c3696fcbab6b65",
"size": "26366",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/logic/azure-mgmt-logic/azure/mgmt/logic/operations/_integration_account_sessions_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
"""Check if a number of different objects exist.
This whole module is a big workaround to avoid circular imports.
This should probaby be cleaned up in some way.
"""
from typing import Optional, Iterable
from irisett.sql import DBConnection
async def _object_exists(dbcon: DBConnection, query: str, query_args: Optional[Iterable]) -> bool:
res = await dbcon.fetch_single(query, query_args)
if res == 0:
return False
return True
async def monitor_group_exists(dbcon: DBConnection, monitor_group_id: int) -> bool:
"""Check if a monitor group id exists."""
q = """select count(id) from monitor_groups where id=%s"""
return await _object_exists(dbcon, q, (monitor_group_id,))
async def contact_exists(dbcon: DBConnection, contact_id: int) -> bool:
"""Check if a contact id exists."""
q = """select count(id) from contacts where id=%s"""
return await _object_exists(dbcon, q, (contact_id,))
async def active_monitor_exists(dbcon: DBConnection, active_monitor_id: int) -> bool:
"""Check if a contact id exists."""
q = """select count(id) from active_monitors where id=%s"""
return await _object_exists(dbcon, q, (active_monitor_id,))
async def contact_group_exists(dbcon: DBConnection, contact_group_id: int) -> bool:
"""Check if a contact group id exists."""
q = """select count(id) from contact_groups where id=%s"""
return await _object_exists(dbcon, q, (contact_group_id,))
|
{
"content_hash": "5f1790d3e1efb94c96e41cc4ae0b9259",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 98,
"avg_line_length": 37.205128205128204,
"alnum_prop": 0.6891798759476223,
"repo_name": "beebyte/irisett",
"id": "1c98b911577ad6bb7b291c1b08914c3ac883c63d",
"size": "1451",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "irisett/object_exists.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1865"
},
{
"name": "HTML",
"bytes": "20933"
},
{
"name": "Makefile",
"bytes": "604"
},
{
"name": "Python",
"bytes": "241040"
},
{
"name": "Shell",
"bytes": "225"
}
],
"symlink_target": ""
}
|
from mainTools import main
def create_results(doc):
yield {
'correctedText': doc['correctedText'].encode('utf-8'),
'isCorrected': str(doc['corrected']),
'isDiacritized': str(doc['diacritized'])
}
def csv_header():
return ['correctedText', 'isCorrected', 'isDiacritized']
if __name__ == '__main__':
main('correction', csv_header(), create_results)
|
{
"content_hash": "a3a0ccb9a73204c0c511467acb7e4aa6",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 62,
"avg_line_length": 27.857142857142858,
"alnum_prop": 0.6230769230769231,
"repo_name": "Geneea/keboola-connection",
"id": "e9bd2e2f48d86ed45040d71299e3993e3cc1eaf6",
"size": "406",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/correction.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1622"
},
{
"name": "Python",
"bytes": "10809"
}
],
"symlink_target": ""
}
|
from copy import copy, deepcopy
from textwrap import dedent
try:
import cPickle as pickle
except ImportError:
import pickle
try:
import dask.array as da
except ImportError:
pass
import numpy as np
import pandas as pd
from xray import (align, concat, conventions, backends, Dataset, DataArray,
Variable, Coordinate, auto_combine, open_dataset,
set_options)
from xray.core import indexing, utils
from xray.core.pycompat import iteritems, OrderedDict
from . import (TestCase, unittest, InaccessibleArray, UnexpectedDataAccess,
requires_dask)
def create_test_data(seed=None):
rs = np.random.RandomState(seed)
_vars = {'var1': ['dim1', 'dim2'],
'var2': ['dim1', 'dim2'],
'var3': ['dim3', 'dim1']}
_dims = {'dim1': 8, 'dim2': 9, 'dim3': 10}
obj = Dataset()
obj['time'] = ('time', pd.date_range('2000-01-01', periods=20))
obj['dim1'] = ('dim1', np.arange(_dims['dim1'], dtype='int64'))
obj['dim2'] = ('dim2', 0.5 * np.arange(_dims['dim2']))
obj['dim3'] = ('dim3', list('abcdefghij'))
for v, dims in sorted(_vars.items()):
data = rs.normal(size=tuple(_dims[d] for d in dims))
obj[v] = (dims, data, {'foo': 'variable'})
obj.coords['numbers'] = ('dim3', np.array([0, 1, 2, 0, 0, 1, 1, 2, 2, 3],
dtype='int64'))
return obj
class InaccessibleVariableDataStore(backends.InMemoryDataStore):
def get_variables(self):
def lazy_inaccessible(x):
data = indexing.LazilyIndexedArray(InaccessibleArray(x.values))
return Variable(x.dims, data, x.attrs)
return dict((k, lazy_inaccessible(v)) for
k, v in iteritems(self._variables))
class TestDataset(TestCase):
def test_repr(self):
data = create_test_data(seed=123)
data.attrs['foo'] = 'bar'
# need to insert str dtype at runtime to handle both Python 2 & 3
expected = dedent("""\
<xray.Dataset>
Dimensions: (dim1: 8, dim2: 9, dim3: 10, time: 20)
Coordinates:
* time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03 ...
* dim1 (dim1) int64 0 1 2 3 4 5 6 7
* dim2 (dim2) float64 0.0 0.5 1.0 1.5 2.0 2.5 3.0 3.5 4.0
* dim3 (dim3) %s 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j'
numbers (dim3) int64 0 1 2 0 0 1 1 2 2 3
Data variables:
var1 (dim1, dim2) float64 -1.086 0.9973 0.283 -1.506 -0.5786 1.651 ...
var2 (dim1, dim2) float64 1.162 -1.097 -2.123 1.04 -0.4034 -0.126 ...
var3 (dim3, dim1) float64 0.5565 -0.2121 0.4563 1.545 -0.2397 0.1433 ...
Attributes:
foo: bar""") % data['dim3'].dtype
actual = '\n'.join(x.rstrip() for x in repr(data).split('\n'))
print(actual)
self.assertEqual(expected, actual)
with set_options(display_width=100):
max_len = max(map(len, repr(data).split('\n')))
assert 90 < max_len < 100
expected = dedent("""\
<xray.Dataset>
Dimensions: ()
Coordinates:
*empty*
Data variables:
*empty*""")
actual = '\n'.join(x.rstrip() for x in repr(Dataset()).split('\n'))
print(actual)
self.assertEqual(expected, actual)
# verify that ... doesn't appear for scalar coordinates
data = Dataset({'foo': ('x', np.ones(10))}).mean()
expected = dedent("""\
<xray.Dataset>
Dimensions: ()
Coordinates:
*empty*
Data variables:
foo float64 1.0""")
actual = '\n'.join(x.rstrip() for x in repr(data).split('\n'))
print(actual)
self.assertEqual(expected, actual)
# verify long attributes are truncated
data = Dataset(attrs={'foo': 'bar' * 1000})
self.assertTrue(len(repr(data)) < 1000)
def test_constructor(self):
x1 = ('x', 2 * np.arange(100))
x2 = ('x', np.arange(1000))
z = (['x', 'y'], np.arange(1000).reshape(100, 10))
with self.assertRaisesRegexp(ValueError, 'conflicting sizes'):
Dataset({'a': x1, 'b': x2})
with self.assertRaisesRegexp(ValueError, 'must be defined with 1-d'):
Dataset({'a': x1, 'x': z})
with self.assertRaisesRegexp(TypeError, 'must be an array or'):
Dataset({'x': (1, 2, 3, 4, 5, 6, 7)})
with self.assertRaisesRegexp(ValueError, 'already exists as a scalar'):
Dataset({'x': 0, 'y': ('x', [1, 2, 3])})
# verify handling of DataArrays
expected = Dataset({'x': x1, 'z': z})
actual = Dataset({'z': expected['z']})
self.assertDatasetIdentical(expected, actual)
def test_constructor_1d(self):
expected = Dataset({'x': (['x'], 5.0 + np.arange(5))})
actual = Dataset({'x': 5.0 + np.arange(5)})
self.assertDatasetIdentical(expected, actual)
actual = Dataset({'x': [5, 6, 7, 8, 9]})
self.assertDatasetIdentical(expected, actual)
def test_constructor_0d(self):
expected = Dataset({'x': ([], 1)})
for arg in [1, np.array(1), expected['x']]:
actual = Dataset({'x': arg})
self.assertDatasetIdentical(expected, actual)
d = pd.Timestamp('2000-01-01T12')
args = [True, None, 3.4, np.nan, 'hello', u'uni', b'raw',
np.datetime64('2000-01-01T00'), d, d.to_datetime()]
for arg in args:
print(arg)
expected = Dataset({'x': ([], arg)})
actual = Dataset({'x': arg})
self.assertDatasetIdentical(expected, actual)
def test_constructor_auto_align(self):
a = DataArray([1, 2], [('x', [0, 1])])
b = DataArray([3, 4], [('x', [1, 2])])
# verify align uses outer join
expected = Dataset({'a': ('x', [1, 2, np.nan]),
'b': ('x', [np.nan, 3, 4])})
actual = Dataset({'a': a, 'b': b})
self.assertDatasetIdentical(expected, actual)
# regression test for GH346
self.assertIsInstance(actual.variables['x'], Coordinate)
# variable with different dimensions
c = ('y', [3, 4])
expected2 = expected.merge({'c': c})
actual = Dataset({'a': a, 'b': b, 'c': c})
self.assertDatasetIdentical(expected2, actual)
# variable that is only aligned against the aligned variables
d = ('x', [3, 2, 1])
expected3 = expected.merge({'d': d})
actual = Dataset({'a': a, 'b': b, 'd': d})
self.assertDatasetIdentical(expected3, actual)
e = ('x', [0, 0])
with self.assertRaisesRegexp(ValueError, 'conflicting sizes'):
Dataset({'a': a, 'b': b, 'e': e})
def test_constructor_compat(self):
data = OrderedDict([('x', DataArray(0, coords={'y': 1})),
('y', ('z', [1, 1, 1]))])
with self.assertRaisesRegexp(ValueError, 'conflicting value'):
Dataset(data, compat='equals')
expected = Dataset({'x': 0}, {'y': ('z', [1, 1, 1])})
actual = Dataset(data)
self.assertDatasetIdentical(expected, actual)
actual = Dataset(data, compat='broadcast_equals')
self.assertDatasetIdentical(expected, actual)
data = OrderedDict([('y', ('z', [1, 1, 1])),
('x', DataArray(0, coords={'y': 1}))])
actual = Dataset(data)
self.assertDatasetIdentical(expected, actual)
original = Dataset({'a': (('x', 'y'), np.ones((2, 3)))},
{'c': (('x', 'y'), np.zeros((2, 3)))})
expected = Dataset({'a': ('x', np.ones(2)),
'b': ('y', np.ones(3))},
{'c': (('x', 'y'), np.zeros((2, 3)))})
# use an OrderedDict to ensure test results are reproducible; otherwise
# the order of appearance of x and y matters for the order of
# dimensions in 'c'
actual = Dataset(OrderedDict([('a', original['a'][:, 0].drop('y')),
('b', original['a'][0].drop('x'))]))
self.assertDatasetIdentical(expected, actual)
data = {'x': DataArray(0, coords={'y': 3}), 'y': ('z', [1, 1, 1])}
with self.assertRaisesRegexp(ValueError, 'conflicting value'):
Dataset(data)
data = {'x': DataArray(0, coords={'y': 1}), 'y': [1, 1]}
actual = Dataset(data)
expected = Dataset({'x': 0}, {'y': [1, 1]})
self.assertDatasetIdentical(expected, actual)
def test_constructor_with_coords(self):
with self.assertRaisesRegexp(ValueError, 'redundant variables and co'):
Dataset({'a': ('x', [1])}, {'a': ('x', [1])})
ds = Dataset({}, {'a': ('x', [1])})
self.assertFalse(ds.data_vars)
self.assertItemsEqual(ds.coords.keys(), ['x', 'a'])
def test_properties(self):
ds = create_test_data()
self.assertEqual(ds.dims,
{'dim1': 8, 'dim2': 9, 'dim3': 10, 'time': 20})
self.assertItemsEqual(ds, list(ds.variables))
self.assertItemsEqual(ds.keys(), list(ds.variables))
self.assertNotIn('aasldfjalskdfj', ds.variables)
self.assertIn('dim1', repr(ds.variables))
self.assertEqual(len(ds), 8)
self.assertItemsEqual(ds.data_vars, ['var1', 'var2', 'var3'])
self.assertItemsEqual(ds.data_vars.keys(), ['var1', 'var2', 'var3'])
self.assertIn('var1', ds.data_vars)
self.assertNotIn('dim1', ds.data_vars)
self.assertNotIn('numbers', ds.data_vars)
self.assertEqual(len(ds.data_vars), 3)
self.assertItemsEqual(ds.indexes, ['dim1', 'dim2', 'dim3', 'time'])
self.assertEqual(len(ds.indexes), 4)
self.assertIn('dim1', repr(ds.indexes))
self.assertItemsEqual(ds.coords,
['time', 'dim1', 'dim2', 'dim3', 'numbers'])
self.assertIn('dim1', ds.coords)
self.assertIn('numbers', ds.coords)
self.assertNotIn('var1', ds.coords)
self.assertEqual(len(ds.coords), 5)
self.assertEqual(Dataset({'x': np.int64(1),
'y': np.float32([1, 2])}).nbytes, 16)
def test_attr_access(self):
ds = Dataset({'tmin': ('x', [42], {'units': 'Celcius'})},
attrs={'title': 'My test data'})
self.assertDataArrayIdentical(ds.tmin, ds['tmin'])
self.assertDataArrayIdentical(ds.tmin.x, ds.x)
self.assertEqual(ds.title, ds.attrs['title'])
self.assertEqual(ds.tmin.units, ds['tmin'].attrs['units'])
self.assertLessEqual(set(['tmin', 'title']), set(dir(ds)))
self.assertIn('units', set(dir(ds.tmin)))
# should defer to variable of same name
ds.attrs['tmin'] = -999
self.assertEqual(ds.attrs['tmin'], -999)
self.assertDataArrayIdentical(ds.tmin, ds['tmin'])
def test_variable(self):
a = Dataset()
d = np.random.random((10, 3))
a['foo'] = (('time', 'x',), d)
self.assertTrue('foo' in a.variables)
self.assertTrue('foo' in a)
a['bar'] = (('time', 'x',), d)
# order of creation is preserved
self.assertEqual(list(a), ['foo', 'time', 'x', 'bar'])
self.assertTrue(all([a['foo'][i].values == d[i]
for i in np.ndindex(*d.shape)]))
# try to add variable with dim (10,3) with data that's (3,10)
with self.assertRaises(ValueError):
a['qux'] = (('time', 'x'), d.T)
def test_modify_inplace(self):
a = Dataset()
vec = np.random.random((10,))
attributes = {'foo': 'bar'}
a['x'] = ('x', vec, attributes)
self.assertTrue('x' in a.coords)
self.assertIsInstance(a.coords['x'].to_index(),
pd.Index)
self.assertVariableIdentical(a.coords['x'], a.variables['x'])
b = Dataset()
b['x'] = ('x', vec, attributes)
self.assertVariableIdentical(a['x'], b['x'])
self.assertEqual(a.dims, b.dims)
# this should work
a['x'] = ('x', vec[:5])
a['z'] = ('x', np.arange(5))
with self.assertRaises(ValueError):
# now it shouldn't, since there is a conflicting length
a['x'] = ('x', vec[:4])
arr = np.random.random((10, 1,))
scal = np.array(0)
with self.assertRaises(ValueError):
a['y'] = ('y', arr)
with self.assertRaises(ValueError):
a['y'] = ('y', scal)
self.assertTrue('y' not in a.dims)
def test_coords_properties(self):
# use an OrderedDict for coordinates to ensure order across python
# versions
# use int64 for repr consistency on windows
data = Dataset(OrderedDict([('x', ('x', np.array([-1, -2], 'int64'))),
('y', ('y', np.array([0, 1, 2], 'int64'))),
('foo', (['x', 'y'],
np.random.randn(2, 3)))]),
OrderedDict([('a', ('x', np.array([4, 5], 'int64'))),
('b', np.int64(-10))]))
self.assertEqual(4, len(data.coords))
self.assertItemsEqual(['x', 'y', 'a', 'b'], list(data.coords))
self.assertVariableIdentical(data.coords['x'], data['x'].variable)
self.assertVariableIdentical(data.coords['y'], data['y'].variable)
self.assertIn('x', data.coords)
self.assertIn('a', data.coords)
self.assertNotIn(0, data.coords)
self.assertNotIn('foo', data.coords)
with self.assertRaises(KeyError):
data.coords['foo']
with self.assertRaises(KeyError):
data.coords[0]
expected = dedent("""\
Coordinates:
* x (x) int64 -1 -2
* y (y) int64 0 1 2
a (x) int64 4 5
b int64 -10""")
actual = repr(data.coords)
self.assertEqual(expected, actual)
self.assertEqual({'x': 2, 'y': 3}, data.coords.dims)
def test_coords_modify(self):
data = Dataset({'x': ('x', [-1, -2]),
'y': ('y', [0, 1, 2]),
'foo': (['x', 'y'], np.random.randn(2, 3))},
{'a': ('x', [4, 5]), 'b': -10})
actual = data.copy(deep=True)
actual.coords['x'] = ('x', ['a', 'b'])
self.assertArrayEqual(actual['x'], ['a', 'b'])
actual = data.copy(deep=True)
actual.coords['z'] = ('z', ['a', 'b'])
self.assertArrayEqual(actual['z'], ['a', 'b'])
with self.assertRaisesRegexp(ValueError, 'conflicting sizes'):
data.coords['x'] = ('x', [-1])
actual = data.copy()
del actual.coords['b']
expected = data.reset_coords('b', drop=True)
self.assertDatasetIdentical(expected, actual)
with self.assertRaises(KeyError):
del data.coords['not_found']
with self.assertRaises(KeyError):
del data.coords['foo']
actual = data.copy(deep=True)
actual.coords.update({'c': 11})
expected = data.merge({'c': 11}).set_coords('c')
self.assertDatasetIdentical(expected, actual)
def test_coords_set(self):
one_coord = Dataset({'x': ('x', [0]),
'yy': ('x', [1]),
'zzz': ('x', [2])})
two_coords = Dataset({'zzz': ('x', [2])},
{'x': ('x', [0]),
'yy': ('x', [1])})
all_coords = Dataset(coords={'x': ('x', [0]),
'yy': ('x', [1]),
'zzz': ('x', [2])})
actual = one_coord.set_coords('x')
self.assertDatasetIdentical(one_coord, actual)
actual = one_coord.set_coords(['x'])
self.assertDatasetIdentical(one_coord, actual)
actual = one_coord.set_coords('yy')
self.assertDatasetIdentical(two_coords, actual)
actual = one_coord.set_coords(['yy', 'zzz'])
self.assertDatasetIdentical(all_coords, actual)
actual = one_coord.reset_coords()
self.assertDatasetIdentical(one_coord, actual)
actual = two_coords.reset_coords()
self.assertDatasetIdentical(one_coord, actual)
actual = all_coords.reset_coords()
self.assertDatasetIdentical(one_coord, actual)
actual = all_coords.reset_coords(['yy', 'zzz'])
self.assertDatasetIdentical(one_coord, actual)
actual = all_coords.reset_coords('zzz')
self.assertDatasetIdentical(two_coords, actual)
with self.assertRaisesRegexp(ValueError, 'cannot remove index'):
one_coord.reset_coords('x')
actual = all_coords.reset_coords('zzz', drop=True)
expected = all_coords.drop('zzz')
self.assertDatasetIdentical(expected, actual)
expected = two_coords.drop('zzz')
self.assertDatasetIdentical(expected, actual)
def test_coords_to_dataset(self):
orig = Dataset({'foo': ('y', [-1, 0, 1])}, {'x': 10, 'y': [2, 3, 4]})
expected = Dataset(coords={'x': 10, 'y': [2, 3, 4]})
actual = orig.coords.to_dataset()
self.assertDatasetIdentical(expected, actual)
def test_coords_merge(self):
orig_coords = Dataset(coords={'a': ('x', [1, 2])}).coords
other_coords = Dataset(coords={'b': ('x', ['a', 'b'])}).coords
expected = Dataset(coords={'a': ('x', [1, 2]),
'b': ('x', ['a', 'b'])})
actual = orig_coords.merge(other_coords)
self.assertDatasetIdentical(expected, actual)
actual = other_coords.merge(orig_coords)
self.assertDatasetIdentical(expected, actual)
other_coords = Dataset(coords={'x': ('x', ['a'])}).coords
with self.assertRaisesRegexp(ValueError, 'not aligned'):
orig_coords.merge(other_coords)
other_coords = Dataset(coords={'x': ('x', ['a', 'b'])}).coords
with self.assertRaisesRegexp(ValueError, 'not aligned'):
orig_coords.merge(other_coords)
other_coords = Dataset(coords={'x': ('x', ['a', 'b', 'c'])}).coords
with self.assertRaisesRegexp(ValueError, 'not aligned'):
orig_coords.merge(other_coords)
other_coords = Dataset(coords={'a': ('x', [8, 9])}).coords
expected = Dataset(coords={'x': range(2)})
actual = orig_coords.merge(other_coords)
self.assertDatasetIdentical(expected, actual)
actual = other_coords.merge(orig_coords)
self.assertDatasetIdentical(expected, actual)
other_coords = Dataset(coords={'x': np.nan}).coords
actual = orig_coords.merge(other_coords)
self.assertDatasetIdentical(orig_coords.to_dataset(), actual)
actual = other_coords.merge(orig_coords)
self.assertDatasetIdentical(orig_coords.to_dataset(), actual)
def test_coords_merge_mismatched_shape(self):
orig_coords = Dataset(coords={'a': ('x', [1, 1])}).coords
other_coords = Dataset(coords={'a': 1}).coords
expected = orig_coords.to_dataset()
actual = orig_coords.merge(other_coords)
self.assertDatasetIdentical(expected, actual)
other_coords = Dataset(coords={'a': ('y', [1])}).coords
expected = Dataset(coords={'a': (['x', 'y'], [[1], [1]])})
actual = orig_coords.merge(other_coords)
self.assertDatasetIdentical(expected, actual)
actual = other_coords.merge(orig_coords)
self.assertDatasetIdentical(expected.T, actual)
orig_coords = Dataset(coords={'a': ('x', [np.nan])}).coords
other_coords = Dataset(coords={'a': np.nan}).coords
expected = orig_coords.to_dataset()
actual = orig_coords.merge(other_coords)
self.assertDatasetIdentical(expected, actual)
def test_equals_and_identical(self):
data = create_test_data(seed=42)
self.assertTrue(data.equals(data))
self.assertTrue(data.identical(data))
data2 = create_test_data(seed=42)
data2.attrs['foobar'] = 'baz'
self.assertTrue(data.equals(data2))
self.assertFalse(data.identical(data2))
del data2['time']
self.assertFalse(data.equals(data2))
data = create_test_data(seed=42).rename({'var1': None})
self.assertTrue(data.equals(data))
self.assertTrue(data.identical(data))
data2 = data.reset_coords()
self.assertFalse(data2.equals(data))
self.assertFalse(data2.identical(data))
def test_equals_failures(self):
data = create_test_data()
self.assertFalse(data.equals('foo'))
self.assertFalse(data.identical(123))
self.assertFalse(data.broadcast_equals({1: 2}))
def test_broadcast_equals(self):
data1 = Dataset(coords={'x': 0})
data2 = Dataset(coords={'x': [0]})
self.assertTrue(data1.broadcast_equals(data2))
self.assertFalse(data1.equals(data2))
self.assertFalse(data1.identical(data2))
def test_attrs(self):
data = create_test_data(seed=42)
data.attrs = {'foobar': 'baz'}
self.assertTrue(data.attrs['foobar'], 'baz')
self.assertIsInstance(data.attrs, OrderedDict)
@requires_dask
def test_chunk(self):
data = create_test_data()
for v in data.variables.values():
self.assertIsInstance(v.data, np.ndarray)
self.assertEqual(data.chunks, {})
reblocked = data.chunk()
for v in reblocked.variables.values():
self.assertIsInstance(v.data, da.Array)
expected_chunks = dict((d, (s,)) for d, s in data.dims.items())
self.assertEqual(reblocked.chunks, expected_chunks)
reblocked = data.chunk({'time': 5, 'dim1': 5, 'dim2': 5, 'dim3': 5})
expected_chunks = {'time': (5,) * 4, 'dim1': (5, 3),
'dim2': (5, 4), 'dim3': (5, 5)}
self.assertEqual(reblocked.chunks, expected_chunks)
reblocked = data.chunk(expected_chunks)
self.assertEqual(reblocked.chunks, expected_chunks)
# reblock on already blocked data
reblocked = reblocked.chunk(expected_chunks)
self.assertEqual(reblocked.chunks, expected_chunks)
self.assertDatasetIdentical(reblocked, data)
with self.assertRaisesRegexp(ValueError, 'some chunks'):
data.chunk({'foo': 10})
@requires_dask
def test_dask_is_lazy(self):
store = InaccessibleVariableDataStore()
create_test_data().dump_to_store(store)
ds = open_dataset(store).chunk()
with self.assertRaises(UnexpectedDataAccess):
ds.load()
with self.assertRaises(UnexpectedDataAccess):
ds['var1'].values
# these should not raise UnexpectedDataAccess:
ds.var1.data
ds.isel(time=10)
ds.isel(time=slice(10), dim1=[0]).isel(dim1=0, dim2=-1)
ds.transpose()
ds.mean()
ds.fillna(0)
ds.rename({'dim1': 'foobar'})
ds.set_coords('var1')
ds.drop('var1')
def test_isel(self):
data = create_test_data()
slicers = {'dim1': slice(None, None, 2), 'dim2': slice(0, 2)}
ret = data.isel(**slicers)
# Verify that only the specified dimension was altered
self.assertItemsEqual(data.dims, ret.dims)
for d in data.dims:
if d in slicers:
self.assertEqual(ret.dims[d],
np.arange(data.dims[d])[slicers[d]].size)
else:
self.assertEqual(data.dims[d], ret.dims[d])
# Verify that the data is what we expect
for v in data:
self.assertEqual(data[v].dims, ret[v].dims)
self.assertEqual(data[v].attrs, ret[v].attrs)
slice_list = [slice(None)] * data[v].values.ndim
for d, s in iteritems(slicers):
if d in data[v].dims:
inds = np.nonzero(np.array(data[v].dims) == d)[0]
for ind in inds:
slice_list[ind] = s
expected = data[v].values[slice_list]
actual = ret[v].values
np.testing.assert_array_equal(expected, actual)
with self.assertRaises(ValueError):
data.isel(not_a_dim=slice(0, 2))
ret = data.isel(dim1=0)
self.assertEqual({'time': 20, 'dim2': 9, 'dim3': 10}, ret.dims)
self.assertItemsEqual(data.data_vars, ret.data_vars)
self.assertItemsEqual(data.coords, ret.coords)
self.assertItemsEqual(data.indexes, list(ret.indexes) + ['dim1'])
ret = data.isel(time=slice(2), dim1=0, dim2=slice(5))
self.assertEqual({'time': 2, 'dim2': 5, 'dim3': 10}, ret.dims)
self.assertItemsEqual(data.data_vars, ret.data_vars)
self.assertItemsEqual(data.coords, ret.coords)
self.assertItemsEqual(data.indexes, list(ret.indexes) + ['dim1'])
ret = data.isel(time=0, dim1=0, dim2=slice(5))
self.assertItemsEqual({'dim2': 5, 'dim3': 10}, ret.dims)
self.assertItemsEqual(data.data_vars, ret.data_vars)
self.assertItemsEqual(data.coords, ret.coords)
self.assertItemsEqual(data.indexes,
list(ret.indexes) + ['dim1', 'time'])
def test_sel(self):
data = create_test_data()
int_slicers = {'dim1': slice(None, None, 2),
'dim2': slice(2),
'dim3': slice(3)}
loc_slicers = {'dim1': slice(None, None, 2),
'dim2': slice(0, 0.5),
'dim3': slice('a', 'c')}
self.assertDatasetEqual(data.isel(**int_slicers),
data.sel(**loc_slicers))
data['time'] = ('time', pd.date_range('2000-01-01', periods=20))
self.assertDatasetEqual(data.isel(time=0),
data.sel(time='2000-01-01'))
self.assertDatasetEqual(data.isel(time=slice(10)),
data.sel(time=slice('2000-01-01',
'2000-01-10')))
self.assertDatasetEqual(data, data.sel(time=slice('1999', '2005')))
times = pd.date_range('2000-01-01', periods=3)
self.assertDatasetEqual(data.isel(time=slice(3)),
data.sel(time=times))
self.assertDatasetEqual(data.isel(time=slice(3)),
data.sel(time=(data['time.dayofyear'] <= 3)))
td = pd.to_timedelta(np.arange(3), unit='days')
data = Dataset({'x': ('td', np.arange(3)), 'td': td})
self.assertDatasetEqual(data, data.sel(td=td))
self.assertDatasetEqual(data, data.sel(td=slice('3 days')))
self.assertDatasetEqual(data.isel(td=0), data.sel(td='0 days'))
self.assertDatasetEqual(data.isel(td=0), data.sel(td='0h'))
self.assertDatasetEqual(data.isel(td=slice(1, 3)),
data.sel(td=slice('1 days', '2 days')))
def test_isel_points(self):
data = create_test_data()
pdim1 = [1, 2, 3]
pdim2 = [4, 5, 1]
pdim3 = [1, 2, 3]
actual = data.isel_points(dim1=pdim1, dim2=pdim2, dim3=pdim3,
dim='test_coord')
assert 'test_coord' in actual.coords
assert actual.coords['test_coord'].shape == (len(pdim1), )
actual = data.isel_points(dim1=pdim1, dim2=pdim2)
assert 'points' in actual.coords
np.testing.assert_array_equal(pdim1, actual['dim1'])
# test that the order of the indexers doesn't matter
self.assertDatasetIdentical(data.isel_points(dim1=pdim1, dim2=pdim2),
data.isel_points(dim2=pdim2, dim1=pdim1))
# make sure we're raising errors in the right places
with self.assertRaisesRegexp(ValueError,
'All indexers must be the same length'):
data.isel_points(dim1=[1, 2], dim2=[1, 2, 3])
with self.assertRaisesRegexp(ValueError,
'dimension bad_key does not exist'):
data.isel_points(bad_key=[1, 2])
with self.assertRaisesRegexp(TypeError, 'Indexers must be integers'):
data.isel_points(dim1=[1.5, 2.2])
with self.assertRaisesRegexp(TypeError, 'Indexers must be integers'):
data.isel_points(dim1=[1, 2, 3], dim2=slice(3))
with self.assertRaisesRegexp(ValueError,
'Indexers must be 1 dimensional'):
data.isel_points(dim1=1, dim2=2)
with self.assertRaisesRegexp(ValueError,
'Existing dimension names are not valid'):
data.isel_points(dim1=[1, 2], dim2=[1, 2], dim='dim2')
# test to be sure we keep around variables that were not indexed
ds = Dataset({'x': [1, 2, 3, 4], 'y': 0})
actual = ds.isel_points(x=[0, 1, 2])
self.assertDataArrayIdentical(ds['y'], actual['y'])
# tests using index or DataArray as a dim
stations = Dataset()
stations['station'] = ('station', ['A', 'B', 'C'])
stations['dim1s'] = ('station', [1, 2, 3])
stations['dim2s'] = ('station', [4, 5, 1])
actual = data.isel_points(dim1=stations['dim1s'],
dim2=stations['dim2s'],
dim=stations['station'])
assert 'station' in actual.coords
assert 'station' in actual.dims
self.assertDataArrayIdentical(actual['station'].drop(['dim1', 'dim2']),
stations['station'])
# make sure we get the default points coordinate when a list is passed
actual = data.isel_points(dim1=stations['dim1s'],
dim2=stations['dim2s'],
dim=['A', 'B', 'C'])
assert 'points' in actual.coords
# can pass a numpy array
data.isel_points(dim1=stations['dim1s'],
dim2=stations['dim2s'],
dim=np.array([4, 5, 6]))
def test_sel_points(self):
data = create_test_data()
pdim1 = [1, 2, 3]
pdim2 = [4, 5, 1]
pdim3 = [1, 2, 3]
expected = data.isel_points(dim1=pdim1, dim2=pdim2, dim3=pdim3,
dim='test_coord')
actual = data.sel_points(dim1=data.dim1[pdim1], dim2=data.dim2[pdim2],
dim3=data.dim3[pdim3], dim='test_coord')
self.assertDatasetIdentical(expected, actual)
data = Dataset({'foo': (('x', 'y'), np.arange(9).reshape(3, 3))})
expected = Dataset({'foo': ('points', [0, 4, 8])},
{'x': ('points', range(3)),
'y': ('points', range(3))})
actual = data.sel_points(x=[0.1, 1.1, 2.5], y=[0, 1.2, 2.0],
method='pad')
self.assertDatasetIdentical(expected, actual)
def test_sel_method(self):
data = create_test_data()
if pd.__version__ >= '0.16':
expected = data.sel(dim1=1)
actual = data.sel(dim1=0.95, method='nearest')
self.assertDatasetIdentical(expected, actual)
expected = data.sel(dim2=[1.5])
actual = data.sel(dim2=[1.45], method='backfill')
self.assertDatasetIdentical(expected, actual)
with self.assertRaisesRegexp(NotImplementedError, 'slice objects'):
data.sel(dim2=slice(1, 3), method='ffill')
def test_loc(self):
data = create_test_data()
expected = data.sel(dim3='a')
actual = data.loc[dict(dim3='a')]
self.assertDatasetIdentical(expected, actual)
with self.assertRaisesRegexp(TypeError, 'can only lookup dict'):
data.loc['a']
with self.assertRaises(TypeError):
data.loc[dict(dim3='a')] = 0
def test_reindex_like(self):
data = create_test_data()
data['letters'] = ('dim3', 10 * ['a'])
expected = data.isel(dim1=slice(10), time=slice(13))
actual = data.reindex_like(expected)
self.assertDatasetIdentical(actual, expected)
expected = data.copy(deep=True)
expected['dim3'] = ('dim3', list('cdefghijkl'))
expected['var3'][:-2] = expected['var3'][2:]
expected['var3'][-2:] = np.nan
expected['letters'] = expected['letters'].astype(object)
expected['letters'][-2:] = np.nan
expected['numbers'] = expected['numbers'].astype(float)
expected['numbers'][:-2] = expected['numbers'][2:].values
expected['numbers'][-2:] = np.nan
actual = data.reindex_like(expected)
self.assertDatasetIdentical(actual, expected)
def test_reindex(self):
data = create_test_data()
self.assertDatasetIdentical(data, data.reindex())
expected = data.isel(dim1=slice(10))
actual = data.reindex(dim1=data['dim1'][:10])
self.assertDatasetIdentical(actual, expected)
actual = data.reindex(dim1=data['dim1'][:10].values)
self.assertDatasetIdentical(actual, expected)
actual = data.reindex(dim1=data['dim1'][:10].to_index())
self.assertDatasetIdentical(actual, expected)
# test dict-like argument
actual = data.reindex({'dim1': data['dim1'][:10]})
self.assertDatasetIdentical(actual, expected)
with self.assertRaisesRegexp(ValueError, 'cannot specify both'):
data.reindex({'x': 0}, x=0)
with self.assertRaisesRegexp(ValueError, 'dictionary'):
data.reindex('foo')
# out of order
expected = data.sel(dim1=data['dim1'][:10:-1])
actual = data.reindex(dim1=data['dim1'][:10:-1])
self.assertDatasetIdentical(actual, expected)
# regression test for #279
expected = Dataset({'x': ('time', np.random.randn(5))})
time2 = DataArray(np.arange(5), dims="time2")
actual = expected.reindex(time=time2)
self.assertDatasetIdentical(actual, expected)
# another regression test
ds = Dataset({'foo': (['x', 'y'], np.zeros((3, 4)))})
expected = Dataset({'foo': (['x', 'y'], np.zeros((3, 2))),
'x': [0, 1, 3]})
expected['foo'][-1] = np.nan
actual = ds.reindex(x=[0, 1, 3], y=[0, 1])
self.assertDatasetIdentical(expected, actual)
def test_reindex_method(self):
ds = Dataset({'x': ('y', [10, 20])})
y = [-0.5, 0.5, 1.5]
actual = ds.reindex(y=y, method='backfill')
expected = Dataset({'x': ('y', [10, 20, np.nan]), 'y': y})
self.assertDatasetIdentical(expected, actual)
actual = ds.reindex(y=y, method='pad')
expected = Dataset({'x': ('y', [np.nan, 10, 20]), 'y': y})
self.assertDatasetIdentical(expected, actual)
alt = Dataset({'y': y})
actual = ds.reindex_like(alt, method='pad')
self.assertDatasetIdentical(expected, actual)
def test_align(self):
left = create_test_data()
right = left.copy(deep=True)
right['dim3'] = ('dim3', list('cdefghijkl'))
right['var3'][:-2] = right['var3'][2:]
right['var3'][-2:] = np.random.randn(*right['var3'][-2:].shape)
right['numbers'][:-2] = right['numbers'][2:]
right['numbers'][-2:] = -10
intersection = list('cdefghij')
union = list('abcdefghijkl')
left2, right2 = align(left, right, join='inner')
self.assertArrayEqual(left2['dim3'], intersection)
self.assertDatasetIdentical(left2, right2)
left2, right2 = align(left, right, join='outer')
self.assertVariableEqual(left2['dim3'], right2['dim3'])
self.assertArrayEqual(left2['dim3'], union)
self.assertDatasetIdentical(left2.sel(dim3=intersection),
right2.sel(dim3=intersection))
self.assertTrue(np.isnan(left2['var3'][-2:]).all())
self.assertTrue(np.isnan(right2['var3'][:2]).all())
left2, right2 = align(left, right, join='left')
self.assertVariableEqual(left2['dim3'], right2['dim3'])
self.assertVariableEqual(left2['dim3'], left['dim3'])
self.assertDatasetIdentical(left2.sel(dim3=intersection),
right2.sel(dim3=intersection))
self.assertTrue(np.isnan(right2['var3'][:2]).all())
left2, right2 = align(left, right, join='right')
self.assertVariableEqual(left2['dim3'], right2['dim3'])
self.assertVariableEqual(left2['dim3'], right['dim3'])
self.assertDatasetIdentical(left2.sel(dim3=intersection),
right2.sel(dim3=intersection))
self.assertTrue(np.isnan(left2['var3'][-2:]).all())
with self.assertRaisesRegexp(ValueError, 'invalid value for join'):
align(left, right, join='foobar')
with self.assertRaises(TypeError):
align(left, right, foo='bar')
def test_variable_indexing(self):
data = create_test_data()
v = data['var1']
d1 = data['dim1']
d2 = data['dim2']
self.assertVariableEqual(v, v[d1.values])
self.assertVariableEqual(v, v[d1])
self.assertVariableEqual(v[:3], v[d1 < 3])
self.assertVariableEqual(v[:, 3:], v[:, d2 >= 1.5])
self.assertVariableEqual(v[:3, 3:], v[d1 < 3, d2 >= 1.5])
self.assertVariableEqual(v[:3, :2], v[range(3), range(2)])
self.assertVariableEqual(v[:3, :2], v.loc[d1[:3], d2[:2]])
def test_drop_variables(self):
data = create_test_data()
self.assertDatasetIdentical(data, data.drop([]))
expected = Dataset(dict((k, data[k]) for k in data if k != 'time'))
actual = data.drop('time')
self.assertDatasetIdentical(expected, actual)
actual = data.drop(['time'])
self.assertDatasetIdentical(expected, actual)
expected = Dataset(dict((k, data[k]) for
k in ['dim2', 'dim3', 'time', 'numbers']))
actual = data.drop('dim1')
self.assertDatasetIdentical(expected, actual)
with self.assertRaisesRegexp(ValueError, 'cannot be found'):
data.drop('not_found_here')
def test_drop_index_labels(self):
data = Dataset({'A': (['x', 'y'], np.random.randn(2, 3)),
'x': ['a', 'b']})
actual = data.drop(1, 'y')
expected = data.isel(y=[0, 2])
self.assertDatasetIdentical(expected, actual)
actual = data.drop(['a'], 'x')
expected = data.isel(x=[1])
self.assertDatasetIdentical(expected, actual)
actual = data.drop(['a', 'b'], 'x')
expected = data.isel(x=slice(0, 0))
self.assertDatasetIdentical(expected, actual)
with self.assertRaises(ValueError):
# not contained in axis
data.drop(['c'], dim='x')
def test_copy(self):
data = create_test_data()
for copied in [data.copy(deep=False), copy(data)]:
self.assertDatasetIdentical(data, copied)
for k in data:
v0 = data.variables[k]
v1 = copied.variables[k]
self.assertIs(v0, v1)
copied['foo'] = ('z', np.arange(5))
self.assertNotIn('foo', data)
for copied in [data.copy(deep=True), deepcopy(data)]:
self.assertDatasetIdentical(data, copied)
for k in data:
v0 = data.variables[k]
v1 = copied.variables[k]
self.assertIsNot(v0, v1)
def test_rename(self):
data = create_test_data()
newnames = {'var1': 'renamed_var1', 'dim2': 'renamed_dim2'}
renamed = data.rename(newnames)
variables = OrderedDict(data.variables)
for k, v in iteritems(newnames):
variables[v] = variables.pop(k)
for k, v in iteritems(variables):
dims = list(v.dims)
for name, newname in iteritems(newnames):
if name in dims:
dims[dims.index(name)] = newname
self.assertVariableEqual(Variable(dims, v.values, v.attrs),
renamed[k])
self.assertEqual(v.encoding, renamed[k].encoding)
self.assertEqual(type(v), type(renamed.variables[k]))
self.assertTrue('var1' not in renamed)
self.assertTrue('dim2' not in renamed)
with self.assertRaisesRegexp(ValueError, "cannot rename 'not_a_var'"):
data.rename({'not_a_var': 'nada'})
# verify that we can rename a variable without accessing the data
var1 = data['var1']
data['var1'] = (var1.dims, InaccessibleArray(var1.values))
renamed = data.rename(newnames)
with self.assertRaises(UnexpectedDataAccess):
renamed['renamed_var1'].values
def test_rename_inplace(self):
times = pd.date_range('2000-01-01', periods=3)
data = Dataset({'z': ('x', [2, 3, 4]), 't': ('t', times)})
copied = data.copy()
renamed = data.rename({'x': 'y'})
data.rename({'x': 'y'}, inplace=True)
self.assertDatasetIdentical(data, renamed)
self.assertFalse(data.equals(copied))
self.assertEquals(data.dims, {'y': 3, 't': 3})
# check virtual variables
self.assertArrayEqual(data['t.dayofyear'], [1, 2, 3])
def test_swap_dims(self):
original = Dataset({'x': [1, 2, 3], 'y': ('x', list('abc')), 'z': 42})
expected = Dataset({'z': 42}, {'x': ('y', [1, 2, 3]), 'y': list('abc')})
actual = original.swap_dims({'x': 'y'})
self.assertDatasetIdentical(expected, actual)
self.assertIsInstance(actual.variables['y'], Coordinate)
self.assertIsInstance(actual.variables['x'], Variable)
roundtripped = actual.swap_dims({'y': 'x'})
self.assertDatasetIdentical(original.set_coords('y'), roundtripped)
actual = original.copy()
actual.swap_dims({'x': 'y'}, inplace=True)
self.assertDatasetIdentical(expected, actual)
with self.assertRaisesRegexp(ValueError, 'cannot swap'):
original.swap_dims({'y': 'x'})
with self.assertRaisesRegexp(ValueError, 'replacement dimension'):
original.swap_dims({'x': 'z'})
def test_update(self):
data = create_test_data(seed=0)
expected = data.copy()
var2 = Variable('dim1', np.arange(8))
actual = data.update({'var2': var2})
expected['var2'] = var2
self.assertDatasetIdentical(expected, actual)
actual = data.copy()
actual_result = actual.update(data, inplace=True)
self.assertIs(actual_result, actual)
self.assertDatasetIdentical(expected, actual)
actual = data.update(data, inplace=False)
expected = data
self.assertIsNot(actual, expected)
self.assertDatasetIdentical(expected, actual)
other = Dataset(attrs={'new': 'attr'})
actual = data.copy()
actual.update(other)
self.assertDatasetIdentical(expected, actual)
def test_update_auto_align(self):
ds = Dataset({'x': ('t', [3, 4])})
expected = Dataset({'x': ('t', [3, 4]), 'y': ('t', [np.nan, 5])})
actual = ds.copy()
other = {'y': ('t', [5]), 't': [1]}
with self.assertRaisesRegexp(ValueError, 'conflicting sizes'):
actual.update(other)
actual.update(Dataset(other))
self.assertDatasetIdentical(expected, actual)
actual = ds.copy()
other = Dataset({'y': ('t', [5]), 't': [100]})
actual.update(other)
expected = Dataset({'x': ('t', [3, 4]), 'y': ('t', [np.nan] * 2)})
self.assertDatasetIdentical(expected, actual)
def test_merge(self):
data = create_test_data()
ds1 = data[['var1']]
ds2 = data[['var3']]
expected = data[['var1', 'var3']]
actual = ds1.merge(ds2)
self.assertDatasetIdentical(expected, actual)
actual = ds2.merge(ds1)
self.assertDatasetIdentical(expected, actual)
actual = data.merge(data)
self.assertDatasetIdentical(data, actual)
actual = data.reset_coords(drop=True).merge(data)
self.assertDatasetIdentical(data, actual)
actual = data.merge(data.reset_coords(drop=True))
self.assertDatasetIdentical(data, actual)
with self.assertRaises(ValueError):
ds1.merge(ds2.rename({'var3': 'var1'}))
with self.assertRaisesRegexp(ValueError, 'cannot merge'):
data.reset_coords().merge(data)
with self.assertRaisesRegexp(ValueError, 'cannot merge'):
data.merge(data.reset_coords())
def test_merge_broadcast_equals(self):
ds1 = Dataset({'x': 0})
ds2 = Dataset({'x': ('y', [0, 0])})
actual = ds1.merge(ds2)
self.assertDatasetIdentical(ds2, actual)
actual = ds2.merge(ds1)
self.assertDatasetIdentical(ds2, actual)
actual = ds1.copy()
actual.update(ds2)
self.assertDatasetIdentical(ds2, actual)
ds1 = Dataset({'x': np.nan})
ds2 = Dataset({'x': ('y', [np.nan, np.nan])})
actual = ds1.merge(ds2)
self.assertDatasetIdentical(ds2, actual)
def test_merge_compat(self):
ds1 = Dataset({'x': 0})
ds2 = Dataset({'x': 1})
for compat in ['broadcast_equals', 'equals', 'identical']:
with self.assertRaisesRegexp(ValueError, 'conflicting value'):
ds1.merge(ds2, compat=compat)
ds2 = Dataset({'x': [0, 0]})
for compat in ['equals', 'identical']:
with self.assertRaisesRegexp(ValueError, 'conflicting value'):
ds1.merge(ds2, compat=compat)
ds2 = Dataset({'x': ((), 0, {'foo': 'bar'})})
with self.assertRaisesRegexp(ValueError, 'conflicting value'):
ds1.merge(ds2, compat='identical')
with self.assertRaisesRegexp(ValueError, 'compat=\S+ invalid'):
ds1.merge(ds2, compat='foobar')
def test_merge_auto_align(self):
ds1 = Dataset({'a': ('x', [1, 2])})
ds2 = Dataset({'b': ('x', [3, 4]), 'x': [1, 2]})
expected = Dataset({'a': ('x', [1, 2, np.nan]),
'b': ('x', [np.nan, 3, 4])})
self.assertDatasetIdentical(expected, ds1.merge(ds2))
self.assertDatasetIdentical(expected, ds2.merge(ds1))
expected = expected.isel(x=slice(2))
self.assertDatasetIdentical(expected, ds1.merge(ds2, join='left'))
self.assertDatasetIdentical(expected, ds2.merge(ds1, join='right'))
expected = expected.isel(x=slice(1, 2))
self.assertDatasetIdentical(expected, ds1.merge(ds2, join='inner'))
self.assertDatasetIdentical(expected, ds2.merge(ds1, join='inner'))
def test_getitem(self):
data = create_test_data()
self.assertIsInstance(data['var1'], DataArray)
self.assertVariableEqual(data['var1'], data.variables['var1'])
with self.assertRaises(KeyError):
data['notfound']
with self.assertRaises(KeyError):
data[['var1', 'notfound']]
actual = data[['var1', 'var2']]
expected = Dataset({'var1': data['var1'], 'var2': data['var2']})
self.assertDatasetEqual(expected, actual)
actual = data['numbers']
expected = DataArray(data['numbers'].variable, [data['dim3']],
name='numbers')
self.assertDataArrayIdentical(expected, actual)
actual = data[dict(dim1=0)]
expected = data.isel(dim1=0)
self.assertDatasetIdentical(expected, actual)
def test_getitem_hashable(self):
data = create_test_data()
data[(3, 4)] = data['var1'] + 1
expected = data['var1'] + 1
expected.name = (3, 4)
self.assertDataArrayIdentical(expected, data[(3, 4)])
with self.assertRaisesRegexp(KeyError, "('var1', 'var2')"):
data[('var1', 'var2')]
def test_virtual_variables(self):
# access virtual variables
data = create_test_data()
expected = DataArray(1 + np.arange(20), coords=[data['time']],
dims='time', name='dayofyear')
self.assertDataArrayIdentical(expected, data['time.dayofyear'])
self.assertArrayEqual(data['time.month'].values,
data.variables['time'].to_index().month)
self.assertArrayEqual(data['time.season'].values, 'DJF')
# test virtual variable math
self.assertArrayEqual(data['time.dayofyear'] + 1, 2 + np.arange(20))
self.assertArrayEqual(np.sin(data['time.dayofyear']),
np.sin(1 + np.arange(20)))
# ensure they become coordinates
expected = Dataset({}, {'dayofyear': data['time.dayofyear']})
actual = data[['time.dayofyear']]
self.assertDatasetEqual(expected, actual)
# non-coordinate variables
ds = Dataset({'t': ('x', pd.date_range('2000-01-01', periods=3))})
self.assertTrue((ds['t.year'] == 2000).all())
def test_time_season(self):
ds = Dataset({'t': pd.date_range('2000-01-01', periods=12, freq='M')})
expected = ['DJF'] * 2 + ['MAM'] * 3 + ['JJA'] * 3 + ['SON'] * 3 + ['DJF']
self.assertArrayEqual(expected, ds['t.season'])
def test_slice_virtual_variable(self):
data = create_test_data()
self.assertVariableEqual(data['time.dayofyear'][:10],
Variable(['time'], 1 + np.arange(10)))
self.assertVariableEqual(data['time.dayofyear'][0], Variable([], 1))
def test_setitem(self):
# assign a variable
var = Variable(['dim1'], np.random.randn(8))
data1 = create_test_data()
data1['A'] = var
data2 = data1.copy()
data2['A'] = var
self.assertDatasetIdentical(data1, data2)
# assign a dataset array
dv = 2 * data2['A']
data1['B'] = dv.variable
data2['B'] = dv
self.assertDatasetIdentical(data1, data2)
# can't assign an ND array without dimensions
with self.assertRaisesRegexp(ValueError,
'dimensions .* must have the same len'):
data2['C'] = var.values.reshape(2, 4)
# but can assign a 1D array
data1['C'] = var.values
data2['C'] = ('C', var.values)
self.assertDatasetIdentical(data1, data2)
# can assign a scalar
data1['scalar'] = 0
data2['scalar'] = ([], 0)
self.assertDatasetIdentical(data1, data2)
# can't use the same dimension name as a scalar var
with self.assertRaisesRegexp(ValueError, 'cannot merge'):
data1['newvar'] = ('scalar', [3, 4, 5])
# can't resize a used dimension
with self.assertRaisesRegexp(ValueError, 'conflicting sizes'):
data1['dim1'] = data1['dim1'][:5]
# override an existing value
data1['A'] = 3 * data2['A']
self.assertVariableEqual(data1['A'], 3 * data2['A'])
with self.assertRaises(NotImplementedError):
data1[{'x': 0}] = 0
def test_setitem_auto_align(self):
ds = Dataset()
ds['x'] = ('y', range(3))
ds['y'] = 1 + np.arange(3)
expected = Dataset({'x': ('y', range(3)), 'y': 1 + np.arange(3)})
self.assertDatasetIdentical(ds, expected)
ds['y'] = DataArray(range(3), dims='y')
expected = Dataset({'x': ('y', range(3))})
self.assertDatasetIdentical(ds, expected)
ds['x'] = DataArray([1, 2], dims='y')
expected = Dataset({'x': ('y', [1, 2, np.nan])})
self.assertDatasetIdentical(ds, expected)
ds['x'] = 42
expected = Dataset({'x': 42, 'y': range(3)})
self.assertDatasetIdentical(ds, expected)
ds['x'] = DataArray([4, 5, 6, 7], dims='y')
expected = Dataset({'x': ('y', [4, 5, 6])})
self.assertDatasetIdentical(ds, expected)
def test_assign(self):
ds = Dataset()
actual = ds.assign(x = [0, 1, 2], y = 2)
expected = Dataset({'x': [0, 1, 2], 'y': 2})
self.assertDatasetIdentical(actual, expected)
self.assertEqual(list(actual), ['x', 'y'])
self.assertDatasetIdentical(ds, Dataset())
actual = actual.assign(y = lambda ds: ds.x ** 2)
expected = Dataset({'y': ('x', [0, 1, 4])})
self.assertDatasetIdentical(actual, expected)
actual = actual.assign_coords(z = 2)
expected = Dataset({'y': ('x', [0, 1, 4])}, {'z': 2})
self.assertDatasetIdentical(actual, expected)
ds = Dataset({'a': ('x', range(3))}, {'b': ('x', ['A'] * 2 + ['B'])})
actual = ds.groupby('b').assign(c = lambda ds: 2 * ds.a)
expected = ds.merge({'c': ('x', [0, 2, 4])})
self.assertDatasetIdentical(actual, expected)
actual = ds.groupby('b').assign(c = lambda ds: ds.a.sum())
expected = ds.merge({'c': ('x', [1, 1, 2])})
self.assertDatasetIdentical(actual, expected)
actual = ds.groupby('b').assign_coords(c = lambda ds: ds.a.sum())
expected = expected.set_coords('c')
self.assertDatasetIdentical(actual, expected)
def test_delitem(self):
data = create_test_data()
all_items = set(data)
self.assertItemsEqual(data, all_items)
del data['var1']
self.assertItemsEqual(data, all_items - set(['var1']))
del data['dim1']
self.assertItemsEqual(data, set(['time', 'dim2', 'dim3', 'numbers']))
self.assertNotIn('dim1', data.dims)
self.assertNotIn('dim1', data.coords)
def test_squeeze(self):
data = Dataset({'foo': (['x', 'y', 'z'], [[[1], [2]]])})
for args in [[], [['x']], [['x', 'z']]]:
def get_args(v):
return [set(args[0]) & set(v.dims)] if args else []
expected = Dataset(dict((k, v.squeeze(*get_args(v)))
for k, v in iteritems(data.variables)))
expected.set_coords(data.coords, inplace=True)
self.assertDatasetIdentical(expected, data.squeeze(*args))
# invalid squeeze
with self.assertRaisesRegexp(ValueError, 'cannot select a dimension'):
data.squeeze('y')
def test_groupby(self):
data = Dataset({'z': (['x', 'y'], np.random.randn(3, 5))},
{'x': ('x', list('abc')),
'c': ('x', [0, 1, 0])})
groupby = data.groupby('x')
self.assertEqual(len(groupby), 3)
expected_groups = {'a': 0, 'b': 1, 'c': 2}
self.assertEqual(groupby.groups, expected_groups)
expected_items = [('a', data.isel(x=0)),
('b', data.isel(x=1)),
('c', data.isel(x=2))]
for actual, expected in zip(groupby, expected_items):
self.assertEqual(actual[0], expected[0])
self.assertDatasetEqual(actual[1], expected[1])
identity = lambda x: x
for k in ['x', 'c', 'y']:
actual = data.groupby(k, squeeze=False).apply(identity)
self.assertDatasetEqual(data, actual)
def test_groupby_returns_new_type(self):
data = Dataset({'z': (['x', 'y'], np.random.randn(3, 5))})
actual = data.groupby('x').apply(lambda ds: ds['z'])
expected = data['z']
self.assertDataArrayIdentical(expected, actual)
actual = data['z'].groupby('x').apply(lambda x: x.to_dataset())
expected = data
self.assertDatasetIdentical(expected, actual)
def test_groupby_iter(self):
data = create_test_data()
for n, (t, sub) in enumerate(list(data.groupby('dim1'))[:3]):
self.assertEqual(data['dim1'][n], t)
self.assertVariableEqual(data['var1'][n], sub['var1'])
self.assertVariableEqual(data['var2'][n], sub['var2'])
self.assertVariableEqual(data['var3'][:, n], sub['var3'])
def test_groupby_errors(self):
data = create_test_data()
with self.assertRaisesRegexp(ValueError, 'must be 1 dimensional'):
data.groupby('var1')
with self.assertRaisesRegexp(ValueError, 'must have a name'):
data.groupby(np.arange(10))
with self.assertRaisesRegexp(ValueError, 'length does not match'):
data.groupby(data['dim1'][:3])
with self.assertRaisesRegexp(ValueError, "must have a 'dims'"):
data.groupby(data.coords['dim1'].to_index())
def test_groupby_reduce(self):
data = Dataset({'xy': (['x', 'y'], np.random.randn(3, 4)),
'xonly': ('x', np.random.randn(3)),
'yonly': ('y', np.random.randn(4)),
'letters': ('y', ['a', 'a', 'b', 'b'])})
expected = data.mean('y')
expected['yonly'] = expected['yonly'].variable.expand_dims({'x': 3})
actual = data.groupby('x').mean()
self.assertDatasetAllClose(expected, actual)
actual = data.groupby('x').mean('y')
self.assertDatasetAllClose(expected, actual)
letters = data['letters']
expected = Dataset({'xy': data['xy'].groupby(letters).mean(),
'xonly': (data['xonly'].mean().variable
.expand_dims({'letters': 2})),
'yonly': data['yonly'].groupby(letters).mean()})
actual = data.groupby('letters').mean()
self.assertDatasetAllClose(expected, actual)
def test_groupby_math(self):
reorder_dims = lambda x: x.transpose('dim1', 'dim2', 'dim3', 'time')
ds = create_test_data()
for squeeze in [True, False]:
grouped = ds.groupby('dim1', squeeze=squeeze)
expected = reorder_dims(ds + ds.coords['dim1'])
actual = grouped + ds.coords['dim1']
self.assertDatasetIdentical(expected, reorder_dims(actual))
actual = ds.coords['dim1'] + grouped
self.assertDatasetIdentical(expected, reorder_dims(actual))
ds2 = 2 * ds
expected = reorder_dims(ds + ds2)
actual = grouped + ds2
self.assertDatasetIdentical(expected, reorder_dims(actual))
actual = ds2 + grouped
self.assertDatasetIdentical(expected, reorder_dims(actual))
grouped = ds.groupby('numbers')
zeros = DataArray([0, 0, 0, 0], [('numbers', range(4))])
expected = ((ds + Variable('dim3', np.zeros(10)))
.transpose('dim3', 'dim1', 'dim2', 'time'))
actual = grouped + zeros
self.assertDatasetEqual(expected, actual)
actual = zeros + grouped
self.assertDatasetEqual(expected, actual)
with self.assertRaisesRegexp(ValueError, 'dimensions .* do not exist'):
grouped + ds
with self.assertRaisesRegexp(ValueError, 'dimensions .* do not exist'):
ds + grouped
with self.assertRaisesRegexp(TypeError, 'only support binary ops'):
grouped + 1
with self.assertRaisesRegexp(TypeError, 'only support binary ops'):
grouped + grouped
with self.assertRaisesRegexp(TypeError, 'in-place operations'):
ds += grouped
ds = Dataset({'x': ('time', np.arange(100)),
'time': pd.date_range('2000-01-01', periods=100)})
with self.assertRaisesRegexp(ValueError, 'no overlapping labels'):
ds + ds.groupby('time.month')
def test_groupby_math_virtual(self):
ds = Dataset({'x': ('t', [1, 2, 3])},
{'t': pd.date_range('20100101', periods=3)})
grouped = ds.groupby('t.day')
actual = grouped - grouped.mean()
expected = Dataset({'x': ('t', [0, 0, 0])},
ds[['t', 't.day']])
self.assertDatasetIdentical(actual, expected)
def test_groupby_nan(self):
# nan should be excluded from groupby
ds = Dataset({'foo': ('x', [1, 2, 3, 4])},
{'bar': ('x', [1, 1, 2, np.nan])})
actual = ds.groupby('bar').mean()
expected = Dataset({'foo': ('bar', [1.5, 3]), 'bar': [1, 2]})
self.assertDatasetIdentical(actual, expected)
def test_resample_and_first(self):
times = pd.date_range('2000-01-01', freq='6H', periods=10)
ds = Dataset({'foo': (['time', 'x', 'y'], np.random.randn(10, 5, 3)),
'bar': ('time', np.random.randn(10), {'meta': 'data'}),
'time': times})
actual = ds.resample('1D', dim='time', how='first')
expected = ds.isel(time=[0, 4, 8])
self.assertDatasetIdentical(expected, actual)
# upsampling
expected_time = pd.date_range('2000-01-01', freq='3H', periods=19)
expected = ds.reindex(time=expected_time)
for how in ['mean', 'sum', 'first', 'last', np.mean]:
actual = ds.resample('3H', 'time', how=how)
self.assertDatasetEqual(expected, actual)
def test_to_array(self):
ds = Dataset(OrderedDict([('a', 1), ('b', ('x', [1, 2, 3]))]),
coords={'c': 42}, attrs={'Conventions': 'None'})
data = [[1, 1, 1], [1, 2, 3]]
coords = {'x': range(3), 'c': 42, 'variable': ['a', 'b']}
dims = ('variable', 'x')
expected = DataArray(data, coords, dims, attrs=ds.attrs)
actual = ds.to_array()
self.assertDataArrayIdentical(expected, actual)
actual = ds.to_array('abc', name='foo')
expected = expected.rename({'variable': 'abc'}).rename('foo')
self.assertDataArrayIdentical(expected, actual)
def test_to_and_from_dataframe(self):
x = np.random.randn(10)
y = np.random.randn(10)
t = list('abcdefghij')
ds = Dataset(OrderedDict([('a', ('t', x)),
('b', ('t', y)),
('t', ('t', t))]))
expected = pd.DataFrame(np.array([x, y]).T, columns=['a', 'b'],
index=pd.Index(t, name='t'))
actual = ds.to_dataframe()
# use the .equals method to check all DataFrame metadata
assert expected.equals(actual), (expected, actual)
# verify coords are included
actual = ds.set_coords('b').to_dataframe()
assert expected.equals(actual), (expected, actual)
# check roundtrip
self.assertDatasetIdentical(ds, Dataset.from_dataframe(actual))
# test a case with a MultiIndex
w = np.random.randn(2, 3)
ds = Dataset({'w': (('x', 'y'), w)})
ds['y'] = ('y', list('abc'))
exp_index = pd.MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1], ['a', 'b', 'c', 'a', 'b', 'c']],
names=['x', 'y'])
expected = pd.DataFrame(w.reshape(-1), columns=['w'], index=exp_index)
actual = ds.to_dataframe()
self.assertTrue(expected.equals(actual))
# check roundtrip
self.assertDatasetIdentical(ds, Dataset.from_dataframe(actual))
# check pathological cases
df = pd.DataFrame([1])
actual = Dataset.from_dataframe(df)
expected = Dataset({0: ('index', [1])})
self.assertDatasetIdentical(expected, actual)
df = pd.DataFrame()
actual = Dataset.from_dataframe(df)
expected = Dataset()
self.assertDatasetIdentical(expected, actual)
# regression test for GH278
# use int64 to ensure consistent results for the pandas .equals method
# on windows (which requires the same dtype)
ds = Dataset({'x': pd.Index(['bar']),
'a': ('y', np.array([1], 'int64'))}).isel(x=0)
# use .loc to ensure consistent results on Python 3
actual = ds.to_dataframe().loc[:, ['a', 'x']]
expected = pd.DataFrame([[1, 'bar']], index=pd.Index([0], name='y'),
columns=['a', 'x'])
assert expected.equals(actual), (expected, actual)
ds = Dataset({'x': np.array([0], 'int64'),
'y': np.array([1], 'int64')})
actual = ds.to_dataframe()
idx = pd.MultiIndex.from_arrays([[0], [1]], names=['x', 'y'])
expected = pd.DataFrame([[]], index=idx)
assert expected.equals(actual), (expected, actual)
# regression test for GH449
df = pd.DataFrame(np.zeros((2, 2)))
df.columns = ['foo', 'foo']
with self.assertRaisesRegexp(ValueError, 'non-unique columns'):
Dataset.from_dataframe(df)
def test_pickle(self):
data = create_test_data()
roundtripped = pickle.loads(pickle.dumps(data))
self.assertDatasetIdentical(data, roundtripped)
# regression test for #167:
self.assertEqual(data.dims, roundtripped.dims)
def test_lazy_load(self):
store = InaccessibleVariableDataStore()
create_test_data().dump_to_store(store)
for decode_cf in [True, False]:
ds = open_dataset(store, decode_cf=decode_cf)
with self.assertRaises(UnexpectedDataAccess):
ds.load()
with self.assertRaises(UnexpectedDataAccess):
ds['var1'].values
# these should not raise UnexpectedDataAccess:
ds.isel(time=10)
ds.isel(time=slice(10), dim1=[0]).isel(dim1=0, dim2=-1)
def test_dropna(self):
x = np.random.randn(4, 4)
x[::2, 0] = np.nan
y = np.random.randn(4)
y[-1] = np.nan
ds = Dataset({'foo': (('a', 'b'), x), 'bar': (('b', y))})
expected = ds.isel(a=slice(1, None, 2))
actual = ds.dropna('a')
self.assertDatasetIdentical(actual, expected)
expected = ds.isel(b=slice(1, 3))
actual = ds.dropna('b')
self.assertDatasetIdentical(actual, expected)
actual = ds.dropna('b', subset=['foo', 'bar'])
self.assertDatasetIdentical(actual, expected)
expected = ds.isel(b=slice(1, None))
actual = ds.dropna('b', subset=['foo'])
self.assertDatasetIdentical(actual, expected)
expected = ds.isel(b=slice(3))
actual = ds.dropna('b', subset=['bar'])
self.assertDatasetIdentical(actual, expected)
actual = ds.dropna('a', subset=[])
self.assertDatasetIdentical(actual, ds)
actual = ds.dropna('a', subset=['bar'])
self.assertDatasetIdentical(actual, ds)
actual = ds.dropna('a', how='all')
self.assertDatasetIdentical(actual, ds)
actual = ds.dropna('b', how='all', subset=['bar'])
expected = ds.isel(b=[0, 1, 2])
self.assertDatasetIdentical(actual, expected)
actual = ds.dropna('b', thresh=1, subset=['bar'])
self.assertDatasetIdentical(actual, expected)
actual = ds.dropna('b', thresh=2)
self.assertDatasetIdentical(actual, ds)
actual = ds.dropna('b', thresh=4)
expected = ds.isel(b=[1, 2, 3])
self.assertDatasetIdentical(actual, expected)
actual = ds.dropna('a', thresh=3)
expected = ds.isel(a=[1, 3])
self.assertDatasetIdentical(actual, ds)
with self.assertRaisesRegexp(ValueError, 'a single dataset dimension'):
ds.dropna('foo')
with self.assertRaisesRegexp(ValueError, 'invalid how'):
ds.dropna('a', how='somehow')
with self.assertRaisesRegexp(TypeError, 'must specify how or thresh'):
ds.dropna('a', how=None)
def test_fillna(self):
ds = Dataset({'a': ('x', [np.nan, 1, np.nan, 3])})
# fill with -1
actual = ds.fillna(-1)
expected = Dataset({'a': ('x', [-1, 1, -1, 3])})
self.assertDatasetIdentical(expected, actual)
actual = ds.fillna({'a': -1})
self.assertDatasetIdentical(expected, actual)
other = Dataset({'a': -1})
actual = ds.fillna(other)
self.assertDatasetIdentical(expected, actual)
actual = ds.fillna({'a': other.a})
self.assertDatasetIdentical(expected, actual)
# fill with range(4)
b = DataArray(range(4), dims='x')
actual = ds.fillna(b)
expected = b.rename('a').to_dataset()
self.assertDatasetIdentical(expected, actual)
actual = ds.fillna(expected)
self.assertDatasetIdentical(expected, actual)
actual = ds.fillna(range(4))
self.assertDatasetIdentical(expected, actual)
actual = ds.fillna(b[:3])
self.assertDatasetIdentical(expected, actual)
# left align variables
ds['b'] = np.nan
actual = ds.fillna({'a': -1, 'c': 'foobar'})
expected = Dataset({'a': ('x', [-1, 1, -1, 3]), 'b': np.nan})
self.assertDatasetIdentical(expected, actual)
with self.assertRaisesRegexp(ValueError, 'no overlapping'):
ds.fillna({'x': 0})
with self.assertRaisesRegexp(ValueError, 'no overlapping'):
ds.fillna(Dataset(coords={'a': 0}))
# groupby
expected = Dataset({'a': ('x', range(4))})
for target in [ds, expected]:
target.coords['b'] = ('x', [0, 0, 1, 1])
actual = ds.groupby('b').fillna(DataArray([0, 2], dims='b'))
self.assertDatasetIdentical(expected, actual)
actual = ds.groupby('b').fillna(Dataset({'a': ('b', [0, 2])}))
self.assertDatasetIdentical(expected, actual)
def test_where(self):
ds = Dataset({'a': ('x', range(5))})
expected = Dataset({'a': ('x', [np.nan, np.nan, 2, 3, 4])})
actual = ds.where(ds > 1)
self.assertDatasetIdentical(expected, actual)
actual = ds.where(ds.a > 1)
self.assertDatasetIdentical(expected, actual)
actual = ds.where(ds.a.values > 1)
self.assertDatasetIdentical(expected, actual)
actual = ds.where(True)
self.assertDatasetIdentical(ds, actual)
expected = ds.copy(deep=True)
expected['a'].values = [np.nan] * 5
actual = ds.where(False)
self.assertDatasetIdentical(expected, actual)
# 2d
ds = Dataset({'a': (('x', 'y'), [[0, 1], [2, 3]])})
expected = Dataset({'a': (('x', 'y'), [[np.nan, 1], [2, 3]])})
actual = ds.where(ds > 0)
self.assertDatasetIdentical(expected, actual)
# groupby
ds = Dataset({'a': ('x', range(5))}, {'c': ('x', [0, 0, 1, 1, 1])})
cond = Dataset({'a': ('c', [True, False])})
expected = ds.copy(deep=True)
expected['a'].values = [0, 1] + [np.nan] * 3
actual = ds.groupby('c').where(cond)
self.assertDatasetIdentical(expected, actual)
def test_reduce(self):
data = create_test_data()
self.assertEqual(len(data.mean().coords), 0)
actual = data.max()
expected = Dataset(dict((k, v.max())
for k, v in iteritems(data.data_vars)))
self.assertDatasetEqual(expected, actual)
self.assertDatasetEqual(data.min(dim=['dim1']),
data.min(dim='dim1'))
for reduct, expected in [('dim2', ['dim1', 'dim3', 'time']),
(['dim2', 'time'], ['dim1', 'dim3']),
(('dim2', 'time'), ['dim1', 'dim3']),
((), ['dim1', 'dim2', 'dim3', 'time'])]:
actual = data.min(dim=reduct).dims
print(reduct, actual, expected)
self.assertItemsEqual(actual, expected)
self.assertDatasetEqual(data.mean(dim=[]), data)
def test_reduce_bad_dim(self):
data = create_test_data()
with self.assertRaisesRegexp(ValueError, 'Dataset does not contain'):
ds = data.mean(dim='bad_dim')
def test_reduce_non_numeric(self):
data1 = create_test_data(seed=44)
data2 = create_test_data(seed=44)
add_vars = {'var4': ['dim1', 'dim2']}
for v, dims in sorted(add_vars.items()):
size = tuple(data1.dims[d] for d in dims)
data = np.random.random_integers(0, 100, size=size).astype(np.str_)
data1[v] = (dims, data, {'foo': 'variable'})
self.assertTrue('var4' not in data1.mean())
self.assertDatasetEqual(data1.mean(), data2.mean())
self.assertDatasetEqual(data1.mean(dim='dim1'),
data2.mean(dim='dim1'))
def test_reduce_strings(self):
expected = Dataset({'x': 'a'})
ds = Dataset({'x': ('y', ['a', 'b'])})
actual = ds.min()
self.assertDatasetIdentical(expected, actual)
expected = Dataset({'x': 'b'})
actual = ds.max()
self.assertDatasetIdentical(expected, actual)
expected = Dataset({'x': 0})
actual = ds.argmin()
self.assertDatasetIdentical(expected, actual)
expected = Dataset({'x': 1})
actual = ds.argmax()
self.assertDatasetIdentical(expected, actual)
expected = Dataset({'x': b'a'})
ds = Dataset({'x': ('y', np.array(['a', 'b'], 'S1'))})
actual = ds.min()
self.assertDatasetIdentical(expected, actual)
expected = Dataset({'x': u'a'})
ds = Dataset({'x': ('y', np.array(['a', 'b'], 'U1'))})
actual = ds.min()
self.assertDatasetIdentical(expected, actual)
def test_reduce_dtypes(self):
# regression test for GH342
expected = Dataset({'x': 1})
actual = Dataset({'x': True}).sum()
self.assertDatasetIdentical(expected, actual)
# regression test for GH505
expected = Dataset({'x': 3})
actual = Dataset({'x': ('y', np.array([1, 2], 'uint16'))}).sum()
self.assertDatasetIdentical(expected, actual)
expected = Dataset({'x': 1 + 1j})
actual = Dataset({'x': ('y', [1, 1j])}).sum()
self.assertDatasetIdentical(expected, actual)
def test_reduce_keep_attrs(self):
data = create_test_data()
_attrs = {'attr1': 'value1', 'attr2': 2929}
attrs = OrderedDict(_attrs)
data.attrs = attrs
# Test dropped attrs
ds = data.mean()
self.assertEqual(ds.attrs, {})
for v in ds.data_vars.values():
self.assertEqual(v.attrs, {})
# Test kept attrs
ds = data.mean(keep_attrs=True)
self.assertEqual(ds.attrs, attrs)
for k, v in ds.data_vars.items():
self.assertEqual(v.attrs, data[k].attrs)
def test_reduce_argmin(self):
# regression test for #205
ds = Dataset({'a': ('x', [0, 1])})
expected = Dataset({'a': ([], 0)})
actual = ds.argmin()
self.assertDatasetIdentical(expected, actual)
actual = ds.argmin('x')
self.assertDatasetIdentical(expected, actual)
def test_reduce_scalars(self):
ds = Dataset({'x': ('a', [2, 2]), 'y': 2, 'z': ('b', [2])})
expected = Dataset({'x': 0, 'y': 0, 'z': 0})
actual = ds.var()
self.assertDatasetIdentical(expected, actual)
def test_reduce_only_one_axis(self):
def mean_only_one_axis(x, axis):
if not isinstance(axis, (int, np.integer)):
raise TypeError('non-integer axis')
return x.mean(axis)
ds = Dataset({'a': (['x', 'y'], [[0, 1, 2, 3, 4]])})
expected = Dataset({'a': ('x', [2])})
actual = ds.reduce(mean_only_one_axis, 'y')
self.assertDatasetIdentical(expected, actual)
with self.assertRaisesRegexp(TypeError, 'non-integer axis'):
ds.reduce(mean_only_one_axis)
with self.assertRaisesRegexp(TypeError, 'non-integer axis'):
ds.reduce(mean_only_one_axis, ['x', 'y'])
def test_count(self):
ds = Dataset({'x': ('a', [np.nan, 1]), 'y': 0, 'z': np.nan})
expected = Dataset({'x': 1, 'y': 1, 'z': 0})
actual = ds.count()
self.assertDatasetIdentical(expected, actual)
def test_apply(self):
data = create_test_data()
data.attrs['foo'] = 'bar'
self.assertDatasetIdentical(data.apply(np.mean), data.mean())
expected = data.mean(keep_attrs=True)
actual = data.apply(lambda x: x.mean(keep_attrs=True), keep_attrs=True)
self.assertDatasetIdentical(expected, actual)
self.assertDatasetIdentical(data.apply(lambda x: x, keep_attrs=True),
data.drop('time'))
def scale(x, multiple=1):
return multiple * x
actual = data.apply(scale, multiple=2)
self.assertDataArrayEqual(actual['var1'], 2 * data['var1'])
self.assertDataArrayIdentical(actual['numbers'], data['numbers'])
actual = data.apply(np.asarray)
expected = data.drop('time') # time is not used on a data var
self.assertDatasetEqual(expected, actual)
def make_example_math_dataset(self):
variables = OrderedDict(
[('bar', ('x', np.arange(100, 400, 100))),
('foo', (('x', 'y'), 1.0 * np.arange(12).reshape(3, 4)))])
coords = {'abc': ('x', ['a', 'b', 'c']),
'y': 10 * np.arange(4)}
ds = Dataset(variables, coords)
ds['foo'][0, 0] = np.nan
return ds
def test_dataset_number_math(self):
ds = self.make_example_math_dataset()
self.assertDatasetIdentical(ds, +ds)
self.assertDatasetIdentical(ds, ds + 0)
self.assertDatasetIdentical(ds, 0 + ds)
self.assertDatasetIdentical(ds, ds + np.array(0))
self.assertDatasetIdentical(ds, np.array(0) + ds)
actual = ds.copy(deep=True)
actual += 0
self.assertDatasetIdentical(ds, actual)
def test_unary_ops(self):
ds = self.make_example_math_dataset()
self.assertDatasetIdentical(ds.apply(abs), abs(ds))
self.assertDatasetIdentical(ds.apply(lambda x: x + 4), ds + 4)
for func in [lambda x: x.isnull(),
lambda x: x.round(),
lambda x: x.astype(int)]:
self.assertDatasetIdentical(ds.apply(func), func(ds))
self.assertDatasetIdentical(ds.isnull(), ~ds.notnull())
# don't actually patch these methods in
with self.assertRaises(AttributeError):
ds.item
with self.assertRaises(AttributeError):
ds.searchsorted
def test_dataset_array_math(self):
ds = self.make_example_math_dataset()
expected = ds.apply(lambda x: x - ds['foo'])
self.assertDatasetIdentical(expected, ds - ds['foo'])
self.assertDatasetIdentical(expected, -ds['foo'] + ds)
self.assertDatasetIdentical(expected, ds - ds['foo'].variable)
self.assertDatasetIdentical(expected, -ds['foo'].variable + ds)
actual = ds.copy(deep=True)
actual -= ds['foo']
self.assertDatasetIdentical(expected, actual)
expected = ds.apply(lambda x: x + ds['bar'])
self.assertDatasetIdentical(expected, ds + ds['bar'])
actual = ds.copy(deep=True)
actual += ds['bar']
self.assertDatasetIdentical(expected, actual)
expected = Dataset({'bar': ds['bar'] + np.arange(3)})
self.assertDatasetIdentical(expected, ds[['bar']] + np.arange(3))
self.assertDatasetIdentical(expected, np.arange(3) + ds[['bar']])
def test_dataset_dataset_math(self):
ds = self.make_example_math_dataset()
self.assertDatasetIdentical(ds, ds + 0 * ds)
self.assertDatasetIdentical(ds, ds + {'foo': 0, 'bar': 0})
expected = ds.apply(lambda x: 2 * x)
self.assertDatasetIdentical(expected, 2 * ds)
self.assertDatasetIdentical(expected, ds + ds)
self.assertDatasetIdentical(expected, ds + ds.data_vars)
self.assertDatasetIdentical(expected, ds + dict(ds.data_vars))
actual = ds.copy(deep=True)
expected_id = id(actual)
actual += ds
self.assertDatasetIdentical(expected, actual)
self.assertEqual(expected_id, id(actual))
self.assertDatasetIdentical(ds == ds, ds.notnull())
subsampled = ds.isel(y=slice(2))
expected = 2 * subsampled
self.assertDatasetIdentical(expected, subsampled + ds)
self.assertDatasetIdentical(expected, ds + subsampled)
def test_dataset_math_auto_align(self):
ds = self.make_example_math_dataset()
subset = ds.isel(x=slice(2), y=[1, 3])
expected = 2 * subset
actual = ds + subset
self.assertDatasetIdentical(expected, actual)
with self.assertRaisesRegexp(ValueError, 'no overlapping labels'):
ds.isel(x=slice(1)) + ds.isel(x=slice(1, None))
actual = ds + ds[['bar']]
expected = (2 * ds[['bar']]).merge(ds.coords)
self.assertDatasetIdentical(expected, actual)
with self.assertRaisesRegexp(ValueError, 'no overlapping data'):
ds + Dataset()
with self.assertRaisesRegexp(ValueError, 'no overlapping data'):
Dataset() + Dataset()
ds2 = Dataset(coords={'bar': 42})
with self.assertRaisesRegexp(ValueError, 'no overlapping data'):
ds + ds2
# maybe unary arithmetic with empty datasets should raise instead?
self.assertDatasetIdentical(Dataset() + 1, Dataset())
for other in [ds.isel(x=slice(2)), ds.bar.isel(x=slice(0))]:
actual = ds.copy(deep=True)
other = ds.isel(x=slice(2))
actual += other
expected = ds + other.reindex_like(ds)
self.assertDatasetIdentical(expected, actual)
def test_dataset_math_errors(self):
ds = self.make_example_math_dataset()
with self.assertRaises(TypeError):
ds['foo'] += ds
with self.assertRaises(TypeError):
ds['foo'].variable += ds
with self.assertRaisesRegexp(ValueError, 'must have the same'):
ds += ds[['bar']]
# verify we can rollback in-place operations if something goes wrong
# nb. inplace datetime64 math actually will work with an integer array
# but not floats thanks to numpy's inconsistent handling
other = DataArray(np.datetime64('2000-01-01T12'), coords={'c': 2})
actual = ds.copy(deep=True)
with self.assertRaises(TypeError):
actual += other
self.assertDatasetIdentical(actual, ds)
def test_dataset_transpose(self):
ds = Dataset({'a': (('x', 'y'), np.random.randn(3, 4)),
'b': (('y', 'x'), np.random.randn(4, 3))})
actual = ds.transpose()
expected = ds.apply(lambda x: x.transpose())
self.assertDatasetIdentical(expected, actual)
actual = ds.T
self.assertDatasetIdentical(expected, actual)
actual = ds.transpose('x', 'y')
expected = ds.apply(lambda x: x.transpose('x', 'y'))
self.assertDatasetIdentical(expected, actual)
ds = create_test_data()
actual = ds.transpose()
for k in ds:
self.assertEqual(actual[k].dims[::-1], ds[k].dims)
new_order = ('dim2', 'dim3', 'dim1', 'time')
actual = ds.transpose(*new_order)
for k in ds:
expected_dims = tuple(d for d in new_order if d in ds[k].dims)
self.assertEqual(actual[k].dims, expected_dims)
with self.assertRaisesRegexp(ValueError, 'arguments to transpose'):
ds.transpose('dim1', 'dim2', 'dim3')
with self.assertRaisesRegexp(ValueError, 'arguments to transpose'):
ds.transpose('dim1', 'dim2', 'dim3', 'time', 'extra_dim')
def test_dataset_diff_n1_simple(self):
ds = Dataset({'foo': ('x', [5, 5, 6, 6])})
actual = ds.diff('x')
expected = Dataset({'foo': ('x', [0, 1, 0])})
expected.coords['x'].values = [1, 2, 3]
self.assertDatasetEqual(expected, actual)
def test_dataset_diff_n1_lower(self):
ds = Dataset({'foo': ('x', [5, 5, 6, 6])})
actual = ds.diff('x', label='lower')
expected = Dataset({'foo': ('x', [0, 1, 0])})
expected.coords['x'].values = [0, 1, 2]
self.assertDatasetEqual(expected, actual)
def test_dataset_diff_n1(self):
ds = create_test_data(seed=1)
actual = ds.diff('dim2')
expected = dict()
expected['var1'] = DataArray(np.diff(ds['var1'].values, axis=1),
[ds['dim1'].values,
ds['dim2'].values[1:]],
['dim1', 'dim2'])
expected['var2'] = DataArray(np.diff(ds['var2'].values, axis=1),
[ds['dim1'].values,
ds['dim2'].values[1:]],
['dim1', 'dim2'])
expected['var3'] = ds['var3']
expected = Dataset(expected, coords={'time': ds['time'].values})
expected.coords['numbers'] = ('dim3', ds['numbers'].values)
self.assertDatasetEqual(expected, actual)
def test_dataset_diff_n2(self):
ds = create_test_data(seed=1)
actual = ds.diff('dim2', n=2)
expected = dict()
expected['var1'] = DataArray(np.diff(ds['var1'].values, axis=1, n=2),
[ds['dim1'].values,
ds['dim2'].values[2:]],
['dim1', 'dim2'])
expected['var2'] = DataArray(np.diff(ds['var2'].values, axis=1, n=2),
[ds['dim1'].values,
ds['dim2'].values[2:]],
['dim1', 'dim2'])
expected['var3'] = ds['var3']
expected = Dataset(expected, coords={'time': ds['time'].values})
expected.coords['numbers'] = ('dim3', ds['numbers'].values)
self.assertDatasetEqual(expected, actual)
def test_dataset_diff_exception_n_neg(self):
ds = create_test_data(seed=1)
with self.assertRaisesRegexp(ValueError, 'must be non-negative'):
ds.diff('dim2', n=-1)
def test_dataset_diff_exception_label_str(self):
ds = create_test_data(seed=1)
with self.assertRaisesRegexp(ValueError, '\'label\' argument has to'):
ds.diff('dim2', label='raise_me')
def test_real_and_imag(self):
attrs = {'foo': 'bar'}
ds = Dataset({'x': ((), 1 + 2j, attrs)}, attrs=attrs)
expected_re = Dataset({'x': ((), 1, attrs)}, attrs=attrs)
self.assertDatasetIdentical(ds.real, expected_re)
expected_im = Dataset({'x': ((), 2, attrs)}, attrs=attrs)
self.assertDatasetIdentical(ds.imag, expected_im)
|
{
"content_hash": "1c0a2a2f71620aead4446308e9a8d2d4",
"timestamp": "",
"source": "github",
"line_count": 2142,
"max_line_length": 88,
"avg_line_length": 40.58309990662932,
"alnum_prop": 0.5515305594220571,
"repo_name": "cpaulik/xray",
"id": "68a8e6baa15dea8c5887b51ee6281a20eb8096ce",
"size": "86929",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xray/test/test_dataset.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PowerShell",
"bytes": "2986"
},
{
"name": "Python",
"bytes": "755103"
},
{
"name": "Shell",
"bytes": "161"
}
],
"symlink_target": ""
}
|
"""
This module will directly access to functions,
to verify cases not easly testable through cli commands
"""
import os
import re
import tempfile
from datetime import datetime
from pathlib import Path
from typing import Dict, Union
import pytest
from faker import Faker
from packaging.version import Version
from controller import __version__
from controller.app import Application, Configuration
from controller.commands.backup import get_date_pattern
from controller.commands.password import get_projectrc_variables_indentation
from controller.deploy.builds import get_image_creation
from controller.deploy.docker import Docker
from controller.packages import ExecutionException, Packages
from controller.templating import Templating
from controller.utilities import git, services, system
from controller.utilities.configuration import load_yaml_file, mix_configuration
from tests import Capture, create_project, init_project, random_project_name
def test_autocomplete(capfd: Capture, faker: Faker) -> None:
create_project(
capfd=capfd,
name=random_project_name(faker),
)
init_project(capfd)
app = Application()
values = app.autocomplete_service(None, None, "") # type: ignore
assert len(values) > 0
assert "backend" in values
values = app.autocomplete_service(None, None, "invalid") # type: ignore
assert len(values) == 0
values = app.autocomplete_service(None, None, "b") # type: ignore
assert len(values) >= 1
assert "backend" in values
values = app.autocomplete_allservice(None, None, "") # type: ignore
assert len(values) > 0
assert "backend" in values
values = app.autocomplete_allservice(None, None, "invalid") # type: ignore
assert len(values) == 0
values = app.autocomplete_allservice(None, None, "b") # type: ignore
assert len(values) >= 1
assert "backend" in values
values = app.autocomplete_allservice(None, None, "c") # type: ignore
assert len(values) >= 1
assert "backend" not in values
values = app.autocomplete_submodule(None, None, "") # type: ignore
assert len(values) > 0
assert "main" in values
values = app.autocomplete_submodule(None, None, "invalid") # type: ignore
assert len(values) == 0
values = app.autocomplete_submodule(None, None, "m") # type: ignore
assert len(values) >= 1
assert "main" in values
values = app.autocomplete_submodule(None, None, "d") # type: ignore
assert len(values) >= 1
assert "main" not in values
os.unlink(".rapydo")
values = app.autocomplete_service(None, None, "") # type: ignore
assert len(values) == 0
values = app.autocomplete_allservice(None, None, "") # type: ignore
assert len(values) == 0
values = app.autocomplete_submodule(None, None, "") # type: ignore
assert len(values) == 0
def test_git(capfd: Capture, faker: Faker) -> None:
create_project(
capfd=capfd,
name=random_project_name(faker),
)
init_project(capfd)
assert git.get_repo("does/not/exist") is None
do_repo = git.get_repo("submodules/do")
assert do_repo is not None
assert git.get_active_branch(None) is None
assert git.get_active_branch(do_repo) == __version__
assert not git.switch_branch(None, branch_name="0.7.3")
# Same branch => no change => return True
assert git.switch_branch(do_repo, branch_name=__version__)
assert not git.switch_branch(do_repo, branch_name="XYZ")
assert git.switch_branch(do_repo, branch_name="0.7.3")
assert git.get_active_branch(do_repo) == "0.7.3"
assert git.switch_branch(do_repo, branch_name=__version__)
assert git.get_active_branch(do_repo) == __version__
assert git.get_origin(None) is None
r = git.get_repo(".")
assert git.get_origin(r) == "https://your_remote_git/your_project.git"
# Create an invalid repo (i.e. without any remote)
r = git.init("../justatest")
assert git.get_origin(r) is None
def test_execute_command() -> None:
out = Packages.execute_command("echo", ["-n", "Hello World"])
assert out == "Hello World"
out = Packages.execute_command("echo", ["Hello World"])
assert out == "Hello World\n"
with pytest.raises(ExecutionException):
assert Packages.execute_command("ls", ["doesnotexistforsure"])
def test_bytes_to_str() -> None:
assert system.bytes_to_str(0) == "0"
assert system.bytes_to_str(1) == "1"
assert system.bytes_to_str(1023) == "1023"
assert system.bytes_to_str(1024) == "1KB"
assert system.bytes_to_str(1424) == "1KB"
assert system.bytes_to_str(1824) == "2KB"
assert system.bytes_to_str(18248) == "18KB"
assert system.bytes_to_str(1024 * 1024 - 1) == "1024KB"
assert system.bytes_to_str(1024 * 1024) == "1MB"
assert system.bytes_to_str(18248377) == "17MB"
assert system.bytes_to_str(418248377) == "399MB"
assert system.bytes_to_str(1024 * 1024 * 1024 - 1) == "1024MB"
assert system.bytes_to_str(1024 * 1024 * 1024) == "1GB"
assert system.bytes_to_str(1024 * 1024 * 1024 * 1024 - 1) == "1024GB"
assert system.bytes_to_str(1024 * 1024 * 1024 * 1024) == "1024GB"
assert system.bytes_to_str(1024 * 1024 * 1024 * 1024 * 1024) == "1048576GB"
def test_str_to_bytes() -> None:
assert system.str_to_bytes("0") == 0
assert system.str_to_bytes("1") == 1
assert system.str_to_bytes("42") == 42
assert system.str_to_bytes("1K") == 1024
assert system.str_to_bytes("1k") == 1024
assert system.str_to_bytes("1KB") == 1024
assert system.str_to_bytes("1kb") == 1024
assert system.str_to_bytes("1M") == 1024 * 1024
assert system.str_to_bytes("1m") == 1024 * 1024
assert system.str_to_bytes("1MB") == 1024 * 1024
assert system.str_to_bytes("1mb") == 1024 * 1024
assert system.str_to_bytes("1G") == 1024 * 1024 * 1024
assert system.str_to_bytes("1g") == 1024 * 1024 * 1024
assert system.str_to_bytes("1GB") == 1024 * 1024 * 1024
assert system.str_to_bytes("1gb") == 1024 * 1024 * 1024
with pytest.raises(AttributeError):
system.str_to_bytes("x")
with pytest.raises(AttributeError):
system.str_to_bytes("1T")
with pytest.raises(AttributeError):
system.str_to_bytes("1TB")
def to_int() -> None:
assert system.to_int(0) == 0
assert system.to_int(42) == 42
assert system.to_int(-24) == -24
assert system.to_int("1") == 1
assert system.to_int("43") == 43
assert system.to_int("-25") == -25
assert system.to_int(None) is None # type: ignore
assert system.to_int({}) is None # type: ignore
assert system.to_int([]) is None # type: ignore
assert system.to_int("not a number") is None
def test_load_yaml_file() -> None:
# Invalid file / path
with pytest.raises(SystemExit):
load_yaml_file(file=Path("path", "invalid"))
y = load_yaml_file(file=Path("path", "invalid"), is_optional=True)
assert y is not None
assert isinstance(y, dict)
assert len(y) == 0
with pytest.raises(SystemExit):
load_yaml_file(file=Path("projects", "invalid"))
# Valid path, but not in yaml format
with pytest.raises(SystemExit):
load_yaml_file(file=Path("pyproject.toml"))
# File is empty
f = tempfile.NamedTemporaryFile()
with pytest.raises(SystemExit):
load_yaml_file(file=Path(f.name))
f.close()
def test_mix_configuration() -> None:
y = mix_configuration(None, None)
assert y is not None
assert isinstance(y, dict)
assert len(y) == 0
def test_normalize_placeholder_variable() -> None:
short1 = services.normalize_placeholder_variable
assert short1("NEO4J_AUTH") == "NEO4J_PASSWORD"
assert short1("POSTGRES_USER") == "ALCHEMY_USER"
assert short1("POSTGRES_PASSWORD") == "ALCHEMY_PASSWORD"
assert short1("MYSQL_USER") == "ALCHEMY_USER"
assert short1("MYSQL_PASSWORD") == "ALCHEMY_PASSWORD"
assert short1("DEFAULT_USER") == "RABBITMQ_USER"
assert short1("DEFAULT_PASS") == "RABBITMQ_PASSWORD"
assert short1("CYPRESS_AUTH_DEFAULT_USERNAME") == "AUTH_DEFAULT_USERNAME"
assert short1("CYPRESS_AUTH_DEFAULT_PASSWORD") == "AUTH_DEFAULT_PASSWORD"
assert short1("NEO4J_dbms_memory_heap_max__size") == "NEO4J_HEAP_SIZE"
assert short1("NEO4J_dbms_memory_heap_initial__size") == "NEO4J_HEAP_SIZE"
assert short1("NEO4J_dbms_memory_pagecache_size") == "NEO4J_PAGECACHE_SIZE"
key = "anyother"
assert short1(key) == key
def test_get_celerybeat_scheduler() -> None:
short2 = services.get_celerybeat_scheduler
env: Dict[str, Union[None, str, int, float]] = {}
assert short2(env) == "Unknown"
# Both ACTIVATE_CELERYBEAT and CELERY_BACKEND are required
env["ACTIVATE_CELERYBEAT"] = "0"
assert short2(env) == "Unknown"
env["ACTIVATE_CELERYBEAT"] = "1"
assert short2(env) == "Unknown"
env["CELERY_BACKEND"] = "??"
assert short2(env) == "Unknown"
# This is valid, but ACTIVATE_CELERYBEAT is still missing
env["CELERY_BACKEND"] = "REDIS"
env["ACTIVATE_CELERYBEAT"] = "0"
assert short2(env) == "Unknown"
env["ACTIVATE_CELERYBEAT"] = "1"
assert short2(env) == "redbeat.RedBeatScheduler"
env["CELERY_BACKEND"] = "INVALID"
assert short2(env) == "Unknown"
def test_get_default_user() -> None:
assert services.get_default_user("invalid") is None
assert services.get_default_user("backend") == "developer"
assert services.get_default_user("celery") == "developer"
assert services.get_default_user("flower") == "developer"
assert services.get_default_user("celerybeat") == "developer"
Configuration.frontend = "invalid"
assert services.get_default_user("frontend") is None
Configuration.frontend = "no"
assert services.get_default_user("frontend") is None
Configuration.frontend = "angular"
assert services.get_default_user("frontend") == "node"
Configuration.frontend = "angularjs"
assert services.get_default_user("frontend") is None
assert services.get_default_user("postgres") == "postgres"
assert services.get_default_user("neo4j") == "neo4j"
assert services.get_default_user("redis") == "redis"
def test_get_default_command() -> None:
assert services.get_default_command("invalid") == "bash"
assert services.get_default_command("backend") == "restapi launch"
assert services.get_default_command("bot") == "restapi bot"
assert services.get_default_command("neo4j") == "bin/cypher-shell"
assert services.get_default_command("registry") == "ash"
assert "psql -U " in services.get_default_command("postgres")
assert "mysql -D" in services.get_default_command("mariadb")
assert "redis-cli --pass" in services.get_default_command("redis")
def test_get_templating() -> None:
templating = Templating()
with pytest.raises(SystemExit):
templating.get_template("invalid", {})
def test_split_command() -> None:
cmd = Docker.split_command(None)
assert isinstance(cmd, list)
assert len(cmd) == 0
cmd = Docker.split_command("")
assert isinstance(cmd, list)
assert len(cmd) == 0
cmd = Docker.split_command("a")
assert isinstance(cmd, list)
assert len(cmd) == 1
assert cmd[0] == "a"
cmd = Docker.split_command("a b")
assert isinstance(cmd, list)
assert len(cmd) == 2
assert cmd[0] == "a"
assert cmd[1] == "b"
cmd = Docker.split_command("a b c")
assert isinstance(cmd, list)
assert len(cmd) == 3
assert cmd[0] == "a"
assert cmd[1] == "b"
assert cmd[2] == "c"
cmd = Docker.split_command("a 'b c'")
assert isinstance(cmd, list)
assert len(cmd) == 2
assert cmd[0] == "a"
assert cmd[1] == "b c"
def test_packages(faker: Faker) -> None:
assert Packages.get_bin_version("invalid") is None
v = Packages.get_bin_version("git")
assert v is not None
# Something like 2.25.1
assert len(str(Version(v)).split(".")) == 3
# Check docker client version
v = Packages.get_bin_version("docker")
assert v is not None
# Something like 19.03.8 or 18.06.0-ce
assert len(str(Version(v)).split(".")) >= 3
# Check docker engine version
v = Packages.get_bin_version(
"docker", option=["version", "--format", "'{{.Server.Version}}'"]
)
assert v is not None
assert len(str(Version(v)).split(".")) >= 3
with pytest.raises(SystemExit):
Packages.check_program("invalid")
v = Packages.check_program("docker")
assert v is not None
with pytest.raises(SystemExit):
Packages.check_program("docker", min_version="99999.99")
with pytest.raises(SystemExit):
Packages.check_program("docker", max_version="0.0")
v = Packages.check_program("docker", min_version="0.0")
assert v is not None
v = Packages.check_program("docker", max_version="99999.99")
assert v is not None
v = Packages.check_program("docker", min_version="0.0", max_version="99999.99")
assert v is not None
v = Packages.check_program(
"docker",
min_version="0.0",
max_version="99999.99",
min_recommended_version="99999.99",
)
assert v is not None
assert Packages.get_installation_path("invalid") is None
assert Packages.get_installation_path("rapydo") is not None
assert Packages.get_installation_path("pip") is None
assert Packages.convert_bin_to_win32("test") == "test"
assert Packages.convert_bin_to_win32("compose") == "compose"
assert Packages.convert_bin_to_win32("buildx") == "buildx"
assert Packages.convert_bin_to_win32("git") == "git"
rand_str = faker.pystr()
assert Packages.convert_bin_to_win32(rand_str) == rand_str
assert Packages.convert_bin_to_win32("docker") == "docker.exe"
def test_download() -> None:
with pytest.raises(SystemExit):
Packages.download("https://www.google.com/test", "")
with pytest.raises(SystemExit):
Packages.download(
"https://github.com/rapydo/do/archive/refs/tags/v1.2.zip",
"thisisawrongchecksum",
)
downloaded = Packages.download(
"https://github.com/rapydo/do/archive/refs/tags/v1.2.zip",
"dc07bef0d12a7a9cfd0f383452cbcb6d",
)
assert downloaded is not None
def test_get_date_pattern(faker: Faker) -> None:
date_pattern = get_date_pattern()
# just a trick to transform a glob-like expression into a valid regular expression
date_pattern.replace(".*", "\\.+")
# Same pattern used in backup.py to create backup filenames
d = faker.date("%Y_%m_%d-%H_%M_%S")
for _ in range(20):
assert re.match(date_pattern, f"{d}.bak")
def test_get_image_creation() -> None:
_1970 = datetime.fromtimestamp(0)
assert get_image_creation("invalid") == _1970
def test_get_projectrc_variables_indentation() -> None:
assert get_projectrc_variables_indentation([]) == 0
projectrc = """
project: xyz
""".split(
"\n"
)
assert get_projectrc_variables_indentation(projectrc) == 0
projectrc = """
project: xyz
project_configuration:
""".split(
"\n"
)
assert get_projectrc_variables_indentation(projectrc) == 0
projectrc = """
project: xyz
project_configuration:
variables:
""".split(
"\n"
)
assert get_projectrc_variables_indentation(projectrc) == 0
projectrc = """
project: xyz
project_configuration:
variables:
env:
X: 10
""".split(
"\n"
)
assert get_projectrc_variables_indentation(projectrc) == 6
projectrc = """
project: xyz
project_configuration:
variables:
env:
X: 10
""".split(
"\n"
)
assert get_projectrc_variables_indentation(projectrc) == 6
projectrc = """
project: xyz
project_configuration:
variables:
env:
X: 10
""".split(
"\n"
)
assert get_projectrc_variables_indentation(projectrc) == 3
projectrc = """
project: xyz
project_configuration:
variables:
env:
X: 10
""".split(
"\n"
)
assert get_projectrc_variables_indentation(projectrc) == 4
projectrc = """
project: xyz
project_configuration:
variables:
env:
""".split(
"\n"
)
assert get_projectrc_variables_indentation(projectrc) == 3
projectrc = """
project: xyz
project_configuration:
variables:
env:
""".split(
"\n"
)
assert get_projectrc_variables_indentation(projectrc) == 4
projectrc = """
project: xyz
project_configuration:
variables:
env:
""".split(
"\n"
)
assert get_projectrc_variables_indentation(projectrc) == 6
projectrc = """
project: xyz
project_configuration:
variables:
env:
X: 10
""".split(
"\n"
)
assert get_projectrc_variables_indentation(projectrc) == 12
|
{
"content_hash": "13a65cbe586db2f1d573f512bebb49fe",
"timestamp": "",
"source": "github",
"line_count": 547,
"max_line_length": 86,
"avg_line_length": 30.718464351005483,
"alnum_prop": 0.6503600547521275,
"repo_name": "rapydo/do",
"id": "d42c9ec0925725896961f4c56c37877f8d1e8234",
"size": "16803",
"binary": false,
"copies": "1",
"ref": "refs/heads/2.4",
"path": "tests/test_libs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jinja",
"bytes": "59123"
},
{
"name": "Python",
"bytes": "551612"
}
],
"symlink_target": ""
}
|
__all__ = [
'Imshow_Slider',
'Imshow_Slider_Array',
'NonUniformImage',
'NonUniformImage_axes',
'RectangleSelector',
'addlabel',
'axesfontsize',
'colorbar',
'contour',
'figure',
'hist',
'hist2d',
'imshow',
'imshow_batch',
'latexfig',
'less_labels',
'pcolor_axes',
'plot',
'plot_featured',
'quiver',
'rgb2gray',
'savefig',
'scaled_figsize',
'setup_axes',
'setup_figure',
'showfig',
]
__all__.sort()
import os as _os
on_rtd = _os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd:
from .cmaps import * # noqa
__all__.sort()
from .Imshow_Slider_mod import Imshow_Slider
from .Imshow_Slider_Array_mod import Imshow_Slider_Array
from .NonUniformImage import NonUniformImage
from .NonUniformImage_axes import NonUniformImage_axes
from .RectangleSelector_mod import RectangleSelector
from .addlabel import addlabel
from .axesfontsize import axesfontsize
from .colorbar import colorbar
from .figure import figure
from .hist import hist
from .hist2d import hist2d
from .imshow import contour
from .imshow import imshow
from .imshow import quiver
from .imshow import scaled_figsize
from .imshow_batch import imshow_batch
from .latexfig import latexfig
from .less_labels import less_labels
from .pcolor_axes import pcolor_axes
from .plot import plot
from .plot_featured import plot_featured
from .rgb2gray import rgb2gray
from .savefig import savefig
from .setup_axes import setup_axes
from .setup_figure import setup_figure
from .showfig import showfig
|
{
"content_hash": "83336d937ac7b9761bff29237fa7f668",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 56,
"avg_line_length": 25.29032258064516,
"alnum_prop": 0.7104591836734694,
"repo_name": "joelfrederico/SciSalt",
"id": "263eb70c7c6c395d56a00cb8adb491384ac304c2",
"size": "1568",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scisalt/matplotlib/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "198931"
}
],
"symlink_target": ""
}
|
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
import json
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.client import Client
from django.test.utils import override_settings
from geoevents.core.tests import R3TestCaseMixin
from geoevents.operations.models import Event
from geoevents.timeline.forms import TimelineItemForm
from geoevents.timeline.models import TimelineItem
@override_settings(AUTHENTICATION_BACKENDS=('django.contrib.auth.backends.ModelBackend',))
class SimpleTest(R3TestCaseMixin, TestCase):
fixtures = ['maps.json']
def setUp(self):
super(SimpleTest, self).setUp()
self.event = Event.objects.create(name='Hurricane Sandy',
event_location='United States',
description='This is a test incident',
posture='Deployed',
poc='Red Team',
event_type='Hurricane_Cyclone',
longitude=-74.4,
latitude=38.8,
tags='hurricane, sandy, test'
)
self.timelineitem_data_api = {'start': '2013-01-18 18:20',
'end': '2013-01-29 18:20',
'content': 'Testing',
'content_object': reverse('api_dispatch_list', args=['v1', 'event']).format(
self.event.id)}
self.timelineitem_data = {'start': '2013-01-18 18:20',
'end': '2013-01-29 18:20',
'content': 'Testing',
#'object_id':self.event.id,
#'content_type': ContentType.objects.get_for_model(Event).id,
'content_object': self.event}
def test_timelineitem_data_is_valid(self):
f = TimelineItemForm(self.timelineitem_data)
self.assertTrue(f.is_valid())
def test_post_timeline_returns_401(self):
'''Test a post to the timeline api returns a 401 if the user is not logged in'''
c = Client()
response = c.post(
'{0}?format=json'.format(reverse('api_dispatch_list', args=[self.current_api_version, 'timeline-item'])),
json.dumps(self.timelineitem_data_api), content_type='application/json')
self.assertEqual(response.status_code, 401)
def test_post_timeline_data(self):
'''Test a post to the timeline api creates an object when user has appropriate perms'''
c = Client()
c.login(username='admin', password='test')
response = c.post(
'{0}?format=json'.format(reverse('api_dispatch_list', args=[self.current_api_version, 'timeline-item'])),
json.dumps(self.timelineitem_data_api), content_type='application/json')
self.assertEqual(response.status_code, 201)
def tearDown(self):
self.event.delete()
self.admin_user.delete()
self.non_admin_user.delete()
|
{
"content_hash": "25311e526479f792cd9e1c2a04aaa374",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 117,
"avg_line_length": 46.2,
"alnum_prop": 0.5665223665223665,
"repo_name": "jaycrossler/geo-events",
"id": "a59048a2d099ea7e79b3bcff6cd1bb342391b4ab",
"size": "3672",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "geoevents/timeline/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "7006"
},
{
"name": "CSS",
"bytes": "169563"
},
{
"name": "JavaScript",
"bytes": "10629192"
},
{
"name": "Python",
"bytes": "1589939"
},
{
"name": "Shell",
"bytes": "4212"
}
],
"symlink_target": ""
}
|
from errors import ServerError
import urllib
import json
class Request():
ALLOWED_METHODS = ['HEAD', 'GET', 'POST', 'PUT', 'DELETE']
def __init__(self, request_text):
request_text = request_text.split('\r\n\r\n', 1)
if len(request_text) > 1:
self.body = request_text[1].strip()
else:
self.body = ""
header_lines = request_text[0].split('\r\n')
command = header_lines[0].split(' ')
if len(command) != 3:
raise ServerError('Invalid Request, malformed command', 400)
self.method = command[0]
if self.method not in Request.ALLOWED_METHODS:
raise ServerError('Not Supported Request', 405)
command[1] = command[1].split('?', 1)
self.path = command[1][0]
self.params = []
if len(command[1]) > 1:
self._qs = command[1][1]
self._parse_parameters(self.params, self._qs)
if not self.path.startswith('/'):
self.path = self.path(self.path.split('/', 1)[-1])
self.version = command[2]
self.headers = {}
for header_line in header_lines[1:]:
if header_line == "":
continue
header = header_line.split(': ', 1)
name = header[0]
value = header[1]
self.headers[name] = value
if self.version.endswith('1.1') and not self.headers.get('Host'):
raise ServerError('Invalid Request, no host in header', 400)
if self.method == 'POST' or self.method == 'PUT':
if 'application/json' in self.headers.get('Content-Type', ""):
self.params += json.loads(self.body).items()
elif 'application/x-www-form-urlencoded' in self.headers.get('Content-Type', ""):
self._parse_parameters(self.params, self.body)
def _parse_parameters(self, list, params):
for param in params.split('&'):
param = urllib.unquote_plus(param).split('=', 1)
list.append((param[0],param[1]))
def qs_lookup(self, key, default=None):
for kee, value in self.params:
if key == kee:
return value
return default
if __name__ == '__main__':
pass
|
{
"content_hash": "b635ab1ac5499e393f9e5b9bad1bf2e9",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 93,
"avg_line_length": 37.45,
"alnum_prop": 0.5456163773920784,
"repo_name": "rockwotj/PieServer",
"id": "c7805db6237f4a1cc8956d0e66ec0884d225d448",
"size": "2247",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/movie_quotes/framework/server/request.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "638"
},
{
"name": "HTML",
"bytes": "8328"
},
{
"name": "JavaScript",
"bytes": "4531"
},
{
"name": "Python",
"bytes": "29487"
},
{
"name": "Shell",
"bytes": "337"
}
],
"symlink_target": ""
}
|
from pycoin.coins.bitcoin.Solver import BitcoinSolver
from .SolutionChecker import GroestlcoinSolutionChecker
class GroestlcoinSolver(BitcoinSolver):
SolutionChecker = GroestlcoinSolutionChecker
|
{
"content_hash": "16eb7173d8f55e21f1a243464eab4870",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 55,
"avg_line_length": 33.5,
"alnum_prop": 0.8656716417910447,
"repo_name": "richardkiss/pycoin",
"id": "849faacf271637d2d882e1a92224eb27693ac110",
"size": "201",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pycoin/coins/groestlcoin/Solver.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "115"
},
{
"name": "Python",
"bytes": "752865"
},
{
"name": "Shell",
"bytes": "198"
}
],
"symlink_target": ""
}
|
"""
Thinkful Python Unit 1 Lesson 6 Assignment 4
Implement Bicycle in Python using Classes
Qasim Ijaz
"""
import random
#import bicycle classes
from bicycle import *
#create 3 wheel types
road_wheel = Wheels(21, 40, "Road")
bmx_wheel = Wheels(18, 50, "BMX")
mountain_wheel = Wheels(24, 60, "Mountain")
#print(bmx_wheel.cost)
#create 3 frame types
aluminum_frame = Frames("Aluminum", 150, 70)
carbon_frame = Frames("Carbon", 130, 60)
steel_frame = Frames("Steel", 50, 50)
# print(steel_frame.weight)
#create 2 bicycle manufacturers
jedi_bikes = Manufacturers("Jedi Bikes", 8)
yoda_bikes = Manufacturers("Yoda Bikes", 10)
#create 3 bicycle models for each of 2 manufacturers:
jedi_roadster = Bicycles("Jedi Roadster", jedi_bikes, aluminum_frame, road_wheel)
jedi_bmx = Bicycles("Jedi BMX", jedi_bikes, carbon_frame, bmx_wheel)
jedi_climber = Bicycles("Jedi Climber", jedi_bikes, steel_frame, mountain_wheel)
yoda_cr = Bicycles("Yoda CR", yoda_bikes, carbon_frame, road_wheel)
yoda_am = Bicycles("Yoda AM", yoda_bikes, aluminum_frame, mountain_wheel)
yoda_sb = Bicycles("Yoda SB", yoda_bikes, steel_frame, bmx_wheel)
#creat a shop "Marty's" that takes 20% profit over wholesale cost
martys = Shops("Marty\'s Bicycle Shop", 20)
martys.add_inventory(jedi_roadster)
martys.add_inventory(jedi_bmx)
martys.add_inventory(jedi_climber)
martys.add_inventory(yoda_am)
martys.add_inventory(yoda_cr)
martys.add_inventory(yoda_sb)
print("-----------------------------------------------------------------------")
#Try to add retail cost:
for each_bike in martys.inventory:
if "Yoda" in each_bike.name:
print(each_bike.name + " >> $" + str(each_bike.cost))
each_bike.cost = int(each_bike.cost) + int(yoda_bikes.percent) + int(martys.margin)
print("Added Yoda Bike's margin of $" + str(yoda_bikes.percent) + " to " + each_bike.name)
print("Added Marty's margin of $" + str(martys.margin) + " to " + each_bike.name)
print(each_bike.name + " >> $" + str(each_bike.cost) + "\n")
elif "Jedi" in each_bike.name:
print(each_bike.name + " >> $" + str(each_bike.cost))
each_bike.cost = int(each_bike.cost) + int(jedi_bikes.percent) + int(martys.margin)
print("Added Jedi Bike's margin of $" + str(jedi_bikes.percent) + " to " + each_bike.name)
print("Added Marty's margin of $" + str(martys.margin) + " to " + each_bike.name)
print(each_bike.name + " >> $" + str(each_bike.cost) + "\n")
#create 3 customers with budges of 200, 500, and 1000
clients = {
'Angel': Customers("Angel", 200),
'Barb': Customers("Barb", 500),
'Casey': Customers("Casey", 1000)
}
#Print name and total weight of each bike
print("-----------------------------------")
print(martys.name + " currently carries: ")
print("-----------------------------------")
for each_bike in martys.inventory:
print(each_bike.name + " weighs " + str(each_bike.weight) + " and costs $" + str(each_bike.cost))
print("-----------------------------------")
#Print client name and budget amounts
for each_client in clients:
budget_reach = []
budget = clients[each_client].fund
print("\n" + each_client + " has a budge of $" + str(budget) + " and they can afford following: ")
for each_bike in martys.inventory:
if each_bike.cost < budget:
print(each_bike.name)
budget_reach.append(each_bike.name)
print("-----------------------------------")
print("\n" + martys.name + " currently carries following bicycles: ")
for each_bike in martys.inventory:
print(each_bike.name + " costs $" + str(each_bike.cost))
print("-----------------------------------")
#have each client buy one random bike
for each_client in clients:
budget = clients[each_client].fund
item = random.choice(martys.inventory) # pick a random bike
print(each_client + " would like to purchase " + item.name)
if item in martys.inventory:
if item.cost < budget:
martys.profit = martys.profit + item.cost
martys.inventory.remove(item) #bike has been purchased. Still need to add it to customer's inventory.
print(each_client + " purchased " + each_bike.name)
else:
print("Item not in your price range.")
else:
print("Item unavailable.")
clients[each_client].fund = clients[each_client].fund - item.cost
print(each_client + " has $" + str(clients[each_client].fund) + " left in their budget \n")
#print post-purchase inventory
print("-----------------------------------")
print("\n" + martys.name + " currently carries following bicycles: ")
for each_bike in martys.inventory:
print(each_bike.name + " costs $" + str(each_bike.cost))
print("Total profit for today >> $" + str(martys.profit))
print("-----------------------------------")
|
{
"content_hash": "4cc249c46460e05527aaa243e111b605",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 114,
"avg_line_length": 35.67407407407408,
"alnum_prop": 0.6216777408637874,
"repo_name": "qasimchadhar/thinkful",
"id": "9e64fec8fc9b16f5b193bd70dbd2015e4b1b2a75",
"size": "4816",
"binary": false,
"copies": "1",
"ref": "refs/heads/gh-pages",
"path": "bicycleScript.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "421"
},
{
"name": "Python",
"bytes": "6892"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from future.builtins import str
import re
import unicodedata
from django.core.exceptions import ObjectDoesNotExist
from django.urls import resolve, reverse, NoReverseMatch, get_script_prefix
from django.shortcuts import redirect
from django.utils.encoding import smart_text
from django.utils.http import is_safe_url
from django.utils import translation
from mezzanine.conf import settings
from mezzanine.utils.importing import import_dotted_path
def admin_url(model, url, object_id=None):
"""
Returns the URL for the given model and admin url name.
"""
opts = model._meta
url = "admin:%s_%s_%s" % (opts.app_label, opts.object_name.lower(), url)
args = ()
if object_id is not None:
args = (object_id,)
return reverse(url, args=args)
def home_slug():
"""
Returns the slug arg defined for the ``home`` urlpattern, which
is the definitive source of the ``url`` field defined for an
editable homepage object.
"""
prefix = get_script_prefix()
slug = reverse("home")
if slug.startswith(prefix):
slug = '/' + slug[len(prefix):]
try:
return resolve(slug).kwargs["slug"]
except KeyError:
return slug
def slugify(s):
"""
Loads the callable defined by the ``SLUGIFY`` setting, which defaults
to the ``slugify_unicode`` function.
"""
return import_dotted_path(settings.SLUGIFY)(s)
def slugify_unicode(s):
"""
Replacement for Django's slugify which allows unicode chars in
slugs, for URLs in Chinese, Russian, etc.
Adopted from https://github.com/mozilla/unicode-slugify/
"""
chars = []
for char in str(smart_text(s)):
cat = unicodedata.category(char)[0]
if cat in "LN" or char in "-_~":
chars.append(char)
elif cat == "Z":
chars.append(" ")
return re.sub(r"[-\s]+", "-", "".join(chars).strip()).lower()
def unique_slug(queryset, slug_field, slug):
"""
Ensures a slug is unique for the given queryset, appending
an integer to its end until the slug is unique.
"""
i = 0
while True:
if i > 0:
if i > 1:
slug = slug.rsplit("-", 1)[0]
slug = "%s-%s" % (slug, i)
try:
queryset.get(**{slug_field: slug})
except ObjectDoesNotExist:
break
i += 1
return slug
def next_url(request):
"""
Returns URL to redirect to from the ``next`` param in the request.
"""
next = request.GET.get("next", request.POST.get("next", ""))
host = request.get_host()
return next if next and is_safe_url(next, host=host) else None
def login_redirect(request):
"""
Returns the redirect response for login/signup. Favors:
- next param
- LOGIN_REDIRECT_URL setting
- homepage
"""
ignorable_nexts = ("",)
if "mezzanine.accounts" in settings.INSTALLED_APPS:
from mezzanine.accounts import urls
ignorable_nexts += (urls.SIGNUP_URL, urls.LOGIN_URL, urls.LOGOUT_URL)
next = next_url(request) or ""
if next in ignorable_nexts:
next = settings.LOGIN_REDIRECT_URL
if next == "/accounts/profile/":
# Use the homepage if LOGIN_REDIRECT_URL is Django's defaut.
next = get_script_prefix()
else:
try:
next = reverse(next)
except NoReverseMatch:
pass
return redirect(next)
def path_to_slug(path):
"""
Removes everything from the given URL path, including
language code and ``PAGES_SLUG`` if any is set, returning
a slug that would match a ``Page`` instance's slug.
"""
from mezzanine.urls import PAGES_SLUG
lang_code = translation.get_language_from_path(path)
for prefix in (lang_code, settings.SITE_PREFIX, PAGES_SLUG):
if prefix:
path = path.replace(prefix, "", 1)
return clean_slashes(path) or "/"
def clean_slashes(path):
"""
Canonicalize path by removing leading slashes and conditionally
removing trailing slashes.
"""
return path.strip("/") if settings.APPEND_SLASH else path.lstrip("/")
|
{
"content_hash": "b573d249c6b92555f9823d223454f29e",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 77,
"avg_line_length": 29.272727272727273,
"alnum_prop": 0.6244624940277114,
"repo_name": "frankier/mezzanine",
"id": "e71a063845fec9b36d272e6ccc69d48e511a378f",
"size": "4186",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "mezzanine/utils/urls.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "59995"
},
{
"name": "HTML",
"bytes": "79222"
},
{
"name": "JavaScript",
"bytes": "453239"
},
{
"name": "Python",
"bytes": "707940"
}
],
"symlink_target": ""
}
|
"""Metaprogramming functions for introspection and manipulation of objects."""
from copy import copy
from importlib import import_module
from inspect import getmembers, getmro, isclass, Parameter, signature
def get_param_names(func):
"""Get the argument names of a function."""
return list(func.__code__.co_varnames[: func.__code__.co_argcount])
def replace_param_sig(func, params):
"""Replace the parameter signature of a function.
Parameters
----------
func: Callable
params: Iterable[str]
New list of argument names for the output function.
Returns
-------
Callable
A new function which is a copy of `func` with its parameter signature
replaced.
"""
func_copy = copy(func)
new_parameters = [
Parameter(param, Parameter.POSITIONAL_OR_KEYWORD) for param in params
]
new_signature = signature(func_copy).replace(parameters=new_parameters)
func_copy.__signature__ = new_signature
return func_copy
def classname(cls):
"""Return the name of a class"""
return cls.__name__
def list_classes(module, package=None, exclude=None, of_type=None):
"""List all classes in a python module
Parameters
----------
module: str
Module name to inspect
package: str, Optional
If doing a relative import, specify the package name to import from
exclude: List[str], optional
List of classes to exclude from the return
of_type: List[str], optional
Only classes of given type should be returned
Returns
-------
List[class]
A list of classes in `module`
"""
if package is None:
imported = import_module(module)
else:
import_module(package)
imported = import_module("".join([".", module]), package)
classes = []
# List all classes
for _, obj in getmembers(imported):
# Check if class
if isclass(obj):
# Check if class is defined in the target file.
if obj.__module__ == imported.__name__:
if of_type:
# List object's class and class inheritance
class_inheritance = getmro(obj)
# Get class inheritance names
inheritance_names = [
classname(cls) for cls in class_inheritance
]
# Check if any of the objects inheritance is of the target
# type.
if any([typ in inheritance_names for typ in of_type]):
classes.append(obj)
else:
classes.append(obj)
# Exclude some
if exclude:
classes = [cls for cls in classes if classname(cls) not in exclude]
return sorted(classes, key=classname)
def parent_names(cls):
"""Get the names of the parent classes of a class `cls`"""
parent_classes = cls.__bases__
names = [classname(parent) for parent in parent_classes]
return names
|
{
"content_hash": "7466e8d1515968ef6ecf61ca12c36bd9",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 78,
"avg_line_length": 29.666666666666668,
"alnum_prop": 0.5991407799074686,
"repo_name": "fennerm/fmbiopy",
"id": "3e6a9cfabf08244c22074c5f78f715e5a32410d7",
"size": "3026",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fmbiopy/obj.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "196"
},
{
"name": "Perl",
"bytes": "466"
},
{
"name": "Python",
"bytes": "116621"
},
{
"name": "Shell",
"bytes": "3182"
}
],
"symlink_target": ""
}
|
"""
TestCases for python DB Btree key comparison function.
"""
import sys, os, re
import test_all
from cStringIO import StringIO
import unittest
try:
# For Pythons w/distutils pybsddb
from bsddb3 import db, dbshelve
except ImportError:
# For Python 2.3
from bsddb import db, dbshelve
lexical_cmp = cmp
def lowercase_cmp(left, right):
return cmp (left.lower(), right.lower())
def make_reverse_comparator (cmp):
def reverse (left, right, delegate=cmp):
return - delegate (left, right)
return reverse
_expected_lexical_test_data = ['', 'CCCP', 'a', 'aaa', 'b', 'c', 'cccce', 'ccccf']
_expected_lowercase_test_data = ['', 'a', 'aaa', 'b', 'c', 'CC', 'cccce', 'ccccf', 'CCCP']
class ComparatorTests (unittest.TestCase):
def comparator_test_helper (self, comparator, expected_data):
data = expected_data[:]
data.sort (comparator)
self.failUnless (data == expected_data,
"comparator `%s' is not right: %s vs. %s"
% (comparator, expected_data, data))
def test_lexical_comparator (self):
self.comparator_test_helper (lexical_cmp, _expected_lexical_test_data)
def test_reverse_lexical_comparator (self):
rev = _expected_lexical_test_data[:]
rev.reverse ()
self.comparator_test_helper (make_reverse_comparator (lexical_cmp),
rev)
def test_lowercase_comparator (self):
self.comparator_test_helper (lowercase_cmp,
_expected_lowercase_test_data)
class AbstractBtreeKeyCompareTestCase (unittest.TestCase):
env = None
db = None
def setUp (self):
self.filename = self.__class__.__name__ + '.db'
homeDir = os.path.join (os.path.dirname (sys.argv[0]), 'db_home')
self.homeDir = homeDir
try:
os.mkdir (homeDir)
except os.error:
pass
env = db.DBEnv ()
env.open (homeDir,
db.DB_CREATE | db.DB_INIT_MPOOL
| db.DB_INIT_LOCK | db.DB_THREAD)
self.env = env
def tearDown (self):
self.closeDB ()
if self.env is not None:
self.env.close ()
self.env = None
import glob
map (os.remove, glob.glob (os.path.join (self.homeDir, '*')))
def addDataToDB (self, data):
i = 0
for item in data:
self.db.put (item, str (i))
i = i + 1
def createDB (self, key_comparator):
self.db = db.DB (self.env)
self.setupDB (key_comparator)
self.db.open (self.filename, "test", db.DB_BTREE, db.DB_CREATE)
def setupDB (self, key_comparator):
self.db.set_bt_compare (key_comparator)
def closeDB (self):
if self.db is not None:
self.db.close ()
self.db = None
def startTest (self):
pass
def finishTest (self, expected = None):
if expected is not None:
self.check_results (expected)
self.closeDB ()
def check_results (self, expected):
curs = self.db.cursor ()
try:
index = 0
rec = curs.first ()
while rec:
key, ignore = rec
self.failUnless (index < len (expected),
"to many values returned from cursor")
self.failUnless (expected[index] == key,
"expected value `%s' at %d but got `%s'"
% (expected[index], index, key))
index = index + 1
rec = curs.next ()
self.failUnless (index == len (expected),
"not enough values returned from cursor")
finally:
curs.close ()
class BtreeKeyCompareTestCase (AbstractBtreeKeyCompareTestCase):
def runCompareTest (self, comparator, data):
self.startTest ()
self.createDB (comparator)
self.addDataToDB (data)
self.finishTest (data)
def test_lexical_ordering (self):
self.runCompareTest (lexical_cmp, _expected_lexical_test_data)
def test_reverse_lexical_ordering (self):
expected_rev_data = _expected_lexical_test_data[:]
expected_rev_data.reverse ()
self.runCompareTest (make_reverse_comparator (lexical_cmp),
expected_rev_data)
def test_compare_function_useless (self):
self.startTest ()
def socialist_comparator (l, r):
return 0
self.createDB (socialist_comparator)
self.addDataToDB (['b', 'a', 'd'])
# all things being equal the first key will be the only key
# in the database... (with the last key's value fwiw)
self.finishTest (['b'])
class BtreeExceptionsTestCase (AbstractBtreeKeyCompareTestCase):
def test_raises_non_callable (self):
self.startTest ()
self.assertRaises (TypeError, self.createDB, 'abc')
self.assertRaises (TypeError, self.createDB, None)
self.finishTest ()
def test_set_bt_compare_with_function (self):
self.startTest ()
self.createDB (lexical_cmp)
self.finishTest ()
def check_results (self, results):
pass
def test_compare_function_incorrect (self):
self.startTest ()
def bad_comparator (l, r):
return 1
# verify that set_bt_compare checks that comparator('', '') == 0
self.assertRaises (TypeError, self.createDB, bad_comparator)
self.finishTest ()
def verifyStderr(self, method, successRe):
"""
Call method() while capturing sys.stderr output internally and
call self.fail() if successRe.search() does not match the stderr
output. This is used to test for uncatchable exceptions.
"""
stdErr = sys.stderr
sys.stderr = StringIO()
try:
method()
finally:
temp = sys.stderr
sys.stderr = stdErr
errorOut = temp.getvalue()
if not successRe.search(errorOut):
self.fail("unexpected stderr output:\n"+errorOut)
def _test_compare_function_exception (self):
self.startTest ()
def bad_comparator (l, r):
if l == r:
# pass the set_bt_compare test
return 0
raise RuntimeError, "i'm a naughty comparison function"
self.createDB (bad_comparator)
#print "\n*** test should print 2 uncatchable tracebacks ***"
self.addDataToDB (['a', 'b', 'c']) # this should raise, but...
self.finishTest ()
def test_compare_function_exception(self):
self.verifyStderr(
self._test_compare_function_exception,
re.compile('(^RuntimeError:.* naughty.*){2}', re.M|re.S)
)
def _test_compare_function_bad_return (self):
self.startTest ()
def bad_comparator (l, r):
if l == r:
# pass the set_bt_compare test
return 0
return l
self.createDB (bad_comparator)
#print "\n*** test should print 2 errors about returning an int ***"
self.addDataToDB (['a', 'b', 'c']) # this should raise, but...
self.finishTest ()
def test_compare_function_bad_return(self):
self.verifyStderr(
self._test_compare_function_bad_return,
re.compile('(^TypeError:.* return an int.*){2}', re.M|re.S)
)
def test_cannot_assign_twice (self):
def my_compare (a, b):
return 0
self.startTest ()
self.createDB (my_compare)
try:
self.db.set_bt_compare (my_compare)
assert False, "this set should fail"
except RuntimeError, msg:
pass
def test_suite ():
res = unittest.TestSuite ()
res.addTest (unittest.makeSuite (ComparatorTests))
if db.version () >= (3, 3, 11):
res.addTest (unittest.makeSuite (BtreeExceptionsTestCase))
res.addTest (unittest.makeSuite (BtreeKeyCompareTestCase))
return res
if __name__ == '__main__':
unittest.main (defaultTest = 'suite')
|
{
"content_hash": "311532746c8175fa2cd0b11a9925766a",
"timestamp": "",
"source": "github",
"line_count": 249,
"max_line_length": 90,
"avg_line_length": 33.06024096385542,
"alnum_prop": 0.5665694849368319,
"repo_name": "kmod/icbd",
"id": "59a45ec502a672a07a3727e8d81de3a1e37fccbd",
"size": "8232",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "stdlib/python2.5/bsddb/test/test_compare.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "33042"
},
{
"name": "C++",
"bytes": "35981"
},
{
"name": "CSS",
"bytes": "8888"
},
{
"name": "JavaScript",
"bytes": "3602"
},
{
"name": "Makefile",
"bytes": "48655"
},
{
"name": "Objective-C",
"bytes": "88"
},
{
"name": "Python",
"bytes": "10340340"
},
{
"name": "Shell",
"bytes": "18865"
}
],
"symlink_target": ""
}
|
import networkx as nx
import settings
import sys
sys.path.append('./graphparser/')
from graphparser import GraphParser
from meter_graph import MeterGraph
import logging,sys
import re
import phonemes
# create digraph of meters (DG) using networkx
# each node has a type (=,-, and 0 [for the first one])
# the node at the end of each meter has a meter_type and meter_full_description attribute
from collections import namedtuple
ScanResult = namedtuple('ScanResult',["scan","matches", "meter_type"]) # used for completed scans
NodeMatch = namedtuple('NodeMatch', ["node_type", # = or -
"matched_tokens", # tokens matched at node -- could be an array
"node_id", # id of node in graph
"orig_tokens", # original tokens that were matched
"ipa",
"found", # production of parser
"token_i"]) # used for matches at nodes in graph
MeterSegment = namedtuple('MeterSegment',['syllables','ending','number_of_repeats','optional','start_node','end_node'])
Branch = namedtuple('Branch',['syllables','ending','number_of_repeats','optional','weight','skip_if_matched'])
Fork = namedtuple('Fork',['segments','optional'])
OPTIONAL_BUT_PREFERRED = 2
OPTIONAL = 1
NOT_OPTIONAL = 0
def get_matra_count(x):
y = 0
for c in x:
if c =='=': y+=2
if c =='-': y+=1
return y
class CustomMeterGraph(MeterGraph):
def get_matra_count(x):
y = 0
for c in x:
if x =='=': y+=2
if x =='-': y+=1
return y
def __init__(self, phrase='', count = None):
''' Count refers to matra count; can be an int or list of ints'''
self.DG = self.create_graph()
self.pp = GraphParser('settings/urdu-meter.yaml',blank=' ') # token parser
self.lp = GraphParser('settings/long.yaml',blank='b') # long parser
self.sp = GraphParser('settings/short.yaml',blank='b') # sort parser
self.components = []
if count:
assert type(count) in [list,int]
if type(count)=='int':
count = [count]
for x in count: assert x>0
self.count = count
if phrase!='':
self.init_from_phrase(phrase)
def transcription_of(self,inp, join_ch=''):
''' Provides transcription (into metrical units) of input
'''
p = self.pp.parse(inp)
if p.matches:
return join_ch.join([x.production for x in p.matches])
def branch(self,syllables,ending=False, number_of_repeats=0, optional=False, weight=0, skip_if_matched=False):
return Branch(syllables,ending,number_of_repeats,optional,weight,skip_if_matched)
def create_graph(self):
DG=nx.DiGraph()
DG.add_node(0,type='0') # this is the start
return DG
# this is the graph scan.
def add_graph_edge(self, curr_node_ids, new_node_id,optional=NOT_OPTIONAL,weight=0):
DG = self.DG
for curr_node_id in curr_node_ids:
DG.add_edge(curr_node_id,new_node_id)
curr_type = DG.node[curr_node_id]['type']
new_type = DG.node[new_node_id]['type']
edge = DG[curr_node_id][new_node_id]
if (curr_type,new_type) in settings.bad_types:
edge['bad_combos'] = settings.bad_types[(curr_type, new_type)]
if optional != NOT_OPTIONAL:
edge['optional'] = optional #allow preference for ignoring
edge['weight'] = weight
def end_nodes_of_component(self, component):
component_type = type(component).__name__
assert component_type in ['Fork','MeterSegment']
if component_type=='Fork':
end_nodes = [segment.end_node for segment in component.segments]
else:
end_nodes = [component.end_node]
return end_nodes
def add_fork(self,branches,optional=NOT_OPTIONAL, number_of_repeats=0):
fork_number_of_repeats = number_of_repeats
logging.debug('inside add_fork, number of repeats'+str(number_of_repeats)+str(branches))
#repeats inside fork')
assert type(branches)==list
for b in branches: assert type(b).__name__=='Branch'
DG = self.DG
if len(self.components)==0:
prev_nodes=[0] # start from the beginning
prev_optional = NOT_OPTIONAL
else: # there are previous components
prev_component = self.components[-1]
prev_nodes = self.end_nodes_of_component(prev_component)
prev_optional = prev_component.optional
fork = Fork(segments=[],optional=optional)
branch_starts = []
branch_ends = []
for branch in branches:
curr_nodes = prev_nodes #return to original nodes
start_node = len(DG.nodes())
branch_starts.append(start_node)
last_node = start_node - 1
syllables = branch.syllables
ending = branch.ending
number_of_repeats = branch.number_of_repeats
for i,s in enumerate(syllables):
new_node = len(DG.nodes())
DG.add_node(new_node, type=s)
if i==len(syllables)-1 and ending:
DG.node[new_node]['ending'] = True
self.add_graph_edge(curr_nodes,new_node)
curr_nodes = [new_node]
if i==0 and prev_optional!=NOT_OPTIONAL: #TODO:allow for multiple optionals
last_optional = len(self.components)-2
optionals = []
l = last_optional
optionals=[l]
# while l>=0 and self.components[l].optional!=NOT_OPTIONAL:
# optionals.append(l)
# l=l-1
for o in optionals: #this might explode
#TODO: THIS MAY BE BUGGY.
assert o > -1
end_nodes = self.end_nodes_of_component(self.components[o])
self.add_graph_edge(end_nodes, start_node,optional=True)#self.components[o].optional)
if i+1==len(syllables):
branch_ends.append(new_node)
if number_of_repeats>0:
pass
#DO NOT ALLOW REPEATS ON BRANCHES
# if i+1 == (len(syllables)) and number_of_repeats > 0:
# print i, curr_node, start_node
# self.add_graph_edge(curr_nodes, start_node)
m = MeterSegment(syllables=syllables, ending=ending,number_of_repeats=number_of_repeats,optional=optional,start_node=start_node, end_node=start_node+len(syllables)-1)
fork.segments.append(m)
if fork_number_of_repeats>0:
logging.debug('repeats inside fork')
for j in branch_starts:
self.add_graph_edge(branch_ends,j)
self.components.append(fork)
def add_segment(self,syllables, ending=False, number_of_repeats=0,optional=NOT_OPTIONAL):
# get ends of previous nodes
DG = self.DG
start_node = len(DG.nodes()) # where this segment will start
last_node = start_node - 1 # this is the last node in the graph
if len(self.components)==0:
prev_nodes=[0] # start from the beginning
prev_optional = NOT_OPTIONAL
else: # there are previous components
prev_component = self.components[-1]
prev_nodes = self.end_nodes_of_component(prev_component)
prev_optional = prev_component.optional
curr_nodes = prev_nodes
for i,s in enumerate(syllables):
new_node = len(DG.nodes())
DG.add_node(new_node, type=s)
if i==len(syllables)-1 and ending:
DG.node[new_node]['ending'] = True
self.add_graph_edge(curr_nodes,new_node)
curr_nodes = [new_node]
if i==0 and prev_optional!=NOT_OPTIONAL: #TODO:allow for multiple optionals
last_optional = len(self.components)-2
l = last_optional
optionals = [l]
for o in optionals: #this might explode
assert o >-2
if o == -1:
end_nodes = [0]
else:
end_nodes = self.end_nodes_of_component(self.components[o])
self.add_graph_edge(end_nodes, start_node,optional=self.components[0].optional)
if i+1 == (len(syllables)) and number_of_repeats > 0:
self.add_graph_edge(curr_nodes, start_node)
m = MeterSegment(syllables=syllables, ending=ending,number_of_repeats=number_of_repeats,optional=optional,start_node=start_node, end_node=start_node+len(syllables)-1)
self.components.append(m)
def graph_scan(self, in_string, parse='', ignore_skipping = False):
#print 'in graph_scan'
completed_scans = [] # holds complete scans
if parse == '':
parse = self.pp.parse(in_string) # holds output, matches
scan_tokens = self.lp.tokenize(parse.output)
else:
scan_tokens = self.pp.tokenize(parse)
logging.debug('parsed as %s',parse)
# this generates scan_tokens from the scan of the input string, e.g. ['b','c','v'], using the long parser (lp)
logging.debug('scan tokens %s',scan_tokens)
# print 'scan_tokens',scan_tokens
# this function descends into node (node_id), passing current token_i, matches, and a string represent
DG = self.DG
def descend_node(node_id, token_i, matches, matched_so_far):
logging.debug('descending node_id'+str(node_id))
import operator
successors = self.DG.successors(node_id) #edges([node_id])
newlist = sorted(successors, key=lambda k: self.DG[node_id][k]['weight'])
successors=newlist#.sort(key=operator.itemgetter('weight'))
for successor_id in successors:
#print ignore_skipping
if ignore_skipping==False and 'skip_if_matched' in self.DG[node_id][successor_id] and len(completed_scans)>0:
logging.debug('********skipping!')
continue
node_type = self.DG.node[successor_id]['type']
assert node_type in ('=','-')
if node_type=='=':
parser = self.lp
else:
# print 'using sp'
parser = self.sp
if node_type=='-' and ignore_skipping==False and len(completed_scans)>0:
#if len(self.lp.match_all_at(scan_tokens,token_i))>1: # Long matches possible, so moving along
logging.debug('skipping wild shorts at node %d',successor_id)
continue
if 'optional' in self.DG[node_id][successor_id]:# check the edge if it's optional
logging.debug('found an optional edge')
for m in parser.match_all_at(scan_tokens, token_i):
#print ' matched ', m.tokens, m.production
# next, check to make sure that this is not a bad combination
# do so by looking for constraints on the edge
# note: this could be added as a constraint to match_all_at() as not_starting_with ...
if len(matches)>0: # if already matched something
# print 'already matched'
a = matches[-1].found # details of previous match
b = m.production#**['rule']['production'] # details of current match
if 'bad_combos' in self.DG[node_id][successor_id]: # if
if (a,b) in self.DG[node_id][successor_id]['bad_combos']:
logging.debug('found bad combos %s',(a,b))
continue # abort! bad combination
orig_tokens =[]
for i in range(token_i, token_i+len(m.tokens)):
orig_tokens +=parse.matches[i].tokens
# generate node_ipa
node_ipa = u''
for tkn in orig_tokens:
if tkn in phonemes.phonemes:
node_ipa +=phonemes.phonemes[tkn]
else:
print 'could not find token',tkn,'in ',phonemes.phonemes
if node_ipa.endswith(u'ː̃'):#, node_ipa): # if nasal after long symbol, switch
node_ipa = node_ipa[0:-2]+u'̃ː'
if m.production.startswith('s_') and node_ipa.endswith(u'ː'):
node_ipa = node_ipa[0:-1]+u'ˑ'
# advance token index based on length of match tokens
# generate match data
matched_tokens = m.tokens
match_data = NodeMatch(node_type=node_type,
matched_tokens = matched_tokens,
node_id=node_id,
orig_tokens=orig_tokens,
ipa = node_ipa,
found=m.production,
token_i=token_i)
new_token_i = token_i + len(matched_tokens)
so_far=matched_so_far + node_type
curr_matches = list(matches)
curr_matches.append(match_data)
if new_token_i == len(scan_tokens):
logging.debug('AT THE END')
logging.debug(curr_matches)
logging.debug('node is %d%s',successor_id,self.DG.node[successor_id])
if 'ending' in self.DG.node[successor_id]:
logging.debug('AT THE END REALLY')
count_okay = True
if self.count:
count=0
for x in so_far:
if x=='=': count+=2
if x=='-': count+=1
count_okay = count in self.count
if count_okay == True:
completed_scans.append(ScanResult(scan=so_far, matches=curr_matches, meter_type='CUSTOM'))
match_node = successor_id
else: # count not okay
pass
else:
pass # doesn't match and at end, so don't continu
else:
descend_node(successor_id, new_token_i,curr_matches,so_far)
descend_node(0, 0, [], '')
return completed_scans
def init_from_phrase(self,phrase):
self.initial_phrase = phrase
self.parse_meter(phrase)
def parse_meter(self,phrase):
x = '(?:'
x += '(?:'
x += '(?P<required_group>\[.+?\])'+'|'
x += '(?P<optional_group>\(.+?\))'
x += ')'
x += '(?P<repeated_group>\+)?'
x += ')|'
x += '(?P<regular>[=-]+)'
my_re = re.compile(x)
matches = [m for m in my_re.finditer(phrase)]
endings = [False] * len(matches)
optionals = [m.group('optional_group')!=None for m in matches]# in enumerate(matches) if m.group('optional_group')==None]
ending_start = len(matches)-1
while optionals[ending_start]==True:
ending_start-=1
for i in range(ending_start, len(endings)):
endings[i]=True
for i,m in enumerate(matches):
if m.group('required_group') is not None or m.group('optional_group') is not None:
optional_on = m.group('optional_group') is not None
repeat_on = m.group('repeated_group') is not None
if repeat_on:
phrase = m.group(0)[1:-2]
else:
phrase = m.group(0)[1:-1]
internal_groups = phrase.split('|')
logging.debug('processing group '+phrase+' optional:'+str(optional_on)+' repeat:'+str(repeat_on))
# print 'optional = ',optional_on,'repeat',repeat_on
if optional_on:
optional_setting = OPTIONAL
else:
optional_setting = NOT_OPTIONAL
if repeat_on:
number_of_repeats = 3
else:
number_of_repeats = 0
ending = endings[i]
#.set_trace()
if len(internal_groups)==1:
self.add_segment(internal_groups[0], number_of_repeats=number_of_repeats, optional=optional_setting,
ending=ending)
elif len(internal_groups)>0:
branches = [ self.branch(j,ending=ending, number_of_repeats=number_of_repeats, weight=w) for w,j in enumerate(internal_groups)]
self.add_fork(branches, optional = optional_setting, number_of_repeats=number_of_repeats)
else:
self.add_segment(m.group(0), optional=NOT_OPTIONAL,ending=endings[i])
# print 'non-group found', m.group(0)
def get_scan_as_string(self,x):
z = self.graph_scan(x)
if z:
return z[0].scan # add space for spreadsheet viewing
else:
return ''
def get_all_scans_as_string(self,x, ignore_skipping = True, separator = '\n'):
z = self.graph_scan(x, ignore_skipping = ignore_skipping)
if z:
return separator.join([x.scan for x in z])
else:
return '' #join(z[0].scan)
#return z[0].scan # add space for spreadsheet viewing
#else:
# return ''
def test_graph():
mg = CustomMeterGraph()
mg.add_fork([mg.branch('=-=='),
mg.branch('--==',weight=1,skip_if_matched=True)],
optional= NOT_OPTIONAL)
mg.add_fork([mg.branch('--=', ending=True, number_of_repeats=2),
mg.branch('==', ending=True)],
optional= NOT_OPTIONAL)
mg.add_segment('-',ending=True,optional=OPTIONAL)
return mg
if __name__ == '__main__':
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
import networkx as nx
def get_hindi_scanner(count=None):
hindi_dg = nx.read_yaml('data/hindi_meter.yaml')
hindi_scanner = CustomMeterGraph(count=count)
hindi_scanner.DG = hindi_dg
print hindi_scanner
return hindi_scanner
# scanner = get_hindi_scanner(count=16)
import pdb
pdb.set_trace()
scanner = CustomMeterGraph(phrase='[-=-==]+')
scanner.graph_scan('safed baazuu')
|
{
"content_hash": "38e5a28849a49c5548dd349ca2bdf490",
"timestamp": "",
"source": "github",
"line_count": 493,
"max_line_length": 178,
"avg_line_length": 39.099391480730226,
"alnum_prop": 0.5227744345299855,
"repo_name": "seanpue/chicago2015",
"id": "f0a9f3bce67f835196b386a133f61534a4f6b00d",
"size": "19332",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "custom_meter_graph.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "49453"
}
],
"symlink_target": ""
}
|
from axiom.test.historic import stubloader
from xquotient.quotientapp import QuotientPreferenceCollection
class PrefsUpgradeTest(stubloader.StubbedTest):
def testUpgrade(self):
pc = self.store.findUnique(QuotientPreferenceCollection)
# in version 3, all the prefs have either moved to a different
# preference collection or been removed entirely, so there isn't
# much to test.
|
{
"content_hash": "8846410b405e56c5e9c67975e2fe2247",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 72,
"avg_line_length": 41.6,
"alnum_prop": 0.7548076923076923,
"repo_name": "twisted/quotient",
"id": "8eb509cae081ba65f9f11e3da486729bc4dc98e4",
"size": "416",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xquotient/test/historic/test_quotientPreferenceCollection1to2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "13968"
},
{
"name": "JavaScript",
"bytes": "354447"
},
{
"name": "Python",
"bytes": "890995"
}
],
"symlink_target": ""
}
|
class PID:
def get_control(error, time):
delta_error = error - previous_error
# calculate the proportional term
proportional = error
# calculate the integral term
integral += error * time
# calculate the derivative term
derivative = delta_error / time
previous_error = error
return proportional * tuning[0] + integral * tuning[1] + derivative * tuning[2]
|
{
"content_hash": "4932ec4a7a18f832e35e4d7e0c849c53",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 87,
"avg_line_length": 27.5625,
"alnum_prop": 0.6099773242630385,
"repo_name": "essoplerck/Arduino-Drone",
"id": "762caa21014f044c20b999e6f9e20a6ca588114b",
"size": "441",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "documentation/presentation/examples/classes/controller.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "687"
},
{
"name": "C",
"bytes": "104"
},
{
"name": "C++",
"bytes": "6522"
}
],
"symlink_target": ""
}
|
input_name = '../examples/linear_elasticity/linear_elastic_up.py'
output_name = 'test_linear_elastic_up.vtk'
from testsBasic import TestInput
class Test( TestInput ):
pass
|
{
"content_hash": "5e726ff25f5b73c07c0b9e9241f8243c",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 65,
"avg_line_length": 29.5,
"alnum_prop": 0.751412429378531,
"repo_name": "olivierverdier/sfepy",
"id": "84e66602a28f5908b03288834959c7ec9e4420fb",
"size": "177",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_input_linear_elastic_up.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "395470"
},
{
"name": "Python",
"bytes": "1754577"
},
{
"name": "Shell",
"bytes": "297"
}
],
"symlink_target": ""
}
|
from nose import SkipTest
from kombu.tests.utils import redirect_stdouts
from funtests import transport
class test_django(transport.TransportCase):
transport = "django"
prefix = "django"
event_loop_max = 10
def before_connect(self):
@redirect_stdouts
def setup_django(stdout, stderr):
try:
import djkombu # noqa
except ImportError:
raise SkipTest("django-kombu not installed")
from django.conf import settings
if not settings.configured:
settings.configure(DATABASE_ENGINE="sqlite3",
DATABASE_NAME=":memory:",
INSTALLED_APPS=("djkombu", ))
from django.core.management import call_command
call_command("syncdb")
setup_django()
|
{
"content_hash": "ede0756ca9a34a7a95ed259df0730799",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 64,
"avg_line_length": 29.79310344827586,
"alnum_prop": 0.5752314814814815,
"repo_name": "WoLpH/kombu",
"id": "0c0a410d9c490b71d83e2da78365c54ebc615a5e",
"size": "864",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "funtests/tests/test_django.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "365468"
},
{
"name": "Shell",
"bytes": "1487"
}
],
"symlink_target": ""
}
|
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='object_detection/protos/post_processing.proto',
package='object_detection.protos',
serialized_pb=_b('\n-object_detection/protos/post_processing.proto\x12\x17object_detection.protos\"\x9a\x01\n\x16\x42\x61tchNonMaxSuppression\x12\x1a\n\x0fscore_threshold\x18\x01 \x01(\x02:\x01\x30\x12\x1a\n\riou_threshold\x18\x02 \x01(\x02:\x03\x30.6\x12%\n\x18max_detections_per_class\x18\x03 \x01(\x05:\x03\x31\x30\x30\x12!\n\x14max_total_detections\x18\x05 \x01(\x05:\x03\x31\x30\x30\"\xf9\x01\n\x0ePostProcessing\x12R\n\x19\x62\x61tch_non_max_suppression\x18\x01 \x01(\x0b\x32/.object_detection.protos.BatchNonMaxSuppression\x12Y\n\x0fscore_converter\x18\x02 \x01(\x0e\x32\x36.object_detection.protos.PostProcessing.ScoreConverter:\x08IDENTITY\"8\n\x0eScoreConverter\x12\x0c\n\x08IDENTITY\x10\x00\x12\x0b\n\x07SIGMOID\x10\x01\x12\x0b\n\x07SOFTMAX\x10\x02')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_POSTPROCESSING_SCORECONVERTER = _descriptor.EnumDescriptor(
name='ScoreConverter',
full_name='object_detection.protos.PostProcessing.ScoreConverter',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='IDENTITY', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SIGMOID', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SOFTMAX', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=425,
serialized_end=481,
)
_sym_db.RegisterEnumDescriptor(_POSTPROCESSING_SCORECONVERTER)
_BATCHNONMAXSUPPRESSION = _descriptor.Descriptor(
name='BatchNonMaxSuppression',
full_name='object_detection.protos.BatchNonMaxSuppression',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='score_threshold', full_name='object_detection.protos.BatchNonMaxSuppression.score_threshold', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='iou_threshold', full_name='object_detection.protos.BatchNonMaxSuppression.iou_threshold', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=0.6,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='max_detections_per_class', full_name='object_detection.protos.BatchNonMaxSuppression.max_detections_per_class', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=100,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='max_total_detections', full_name='object_detection.protos.BatchNonMaxSuppression.max_total_detections', index=3,
number=5, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=100,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=75,
serialized_end=229,
)
_POSTPROCESSING = _descriptor.Descriptor(
name='PostProcessing',
full_name='object_detection.protos.PostProcessing',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='batch_non_max_suppression', full_name='object_detection.protos.PostProcessing.batch_non_max_suppression', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='score_converter', full_name='object_detection.protos.PostProcessing.score_converter', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_POSTPROCESSING_SCORECONVERTER,
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=232,
serialized_end=481,
)
_POSTPROCESSING.fields_by_name['batch_non_max_suppression'].message_type = _BATCHNONMAXSUPPRESSION
_POSTPROCESSING.fields_by_name['score_converter'].enum_type = _POSTPROCESSING_SCORECONVERTER
_POSTPROCESSING_SCORECONVERTER.containing_type = _POSTPROCESSING
DESCRIPTOR.message_types_by_name['BatchNonMaxSuppression'] = _BATCHNONMAXSUPPRESSION
DESCRIPTOR.message_types_by_name['PostProcessing'] = _POSTPROCESSING
BatchNonMaxSuppression = _reflection.GeneratedProtocolMessageType('BatchNonMaxSuppression', (_message.Message,), dict(
DESCRIPTOR = _BATCHNONMAXSUPPRESSION,
__module__ = 'object_detection.protos.post_processing_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.BatchNonMaxSuppression)
))
_sym_db.RegisterMessage(BatchNonMaxSuppression)
PostProcessing = _reflection.GeneratedProtocolMessageType('PostProcessing', (_message.Message,), dict(
DESCRIPTOR = _POSTPROCESSING,
__module__ = 'object_detection.protos.post_processing_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.PostProcessing)
))
_sym_db.RegisterMessage(PostProcessing)
# @@protoc_insertion_point(module_scope)
|
{
"content_hash": "2c98c19d87ccdfdda023527df588b9da",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 763,
"avg_line_length": 39.50625,
"alnum_prop": 0.7370669197911723,
"repo_name": "unnikrishnankgs/va",
"id": "8f20780fcb24600a42deef6e4e5cc67973745862",
"size": "6437",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "venv/lib/python3.5/site-packages/tensorflow/models/object_detection/protos/post_processing_pb2.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "1836035"
},
{
"name": "C++",
"bytes": "12002305"
},
{
"name": "CMake",
"bytes": "128"
},
{
"name": "CSS",
"bytes": "64776"
},
{
"name": "Cuda",
"bytes": "78890"
},
{
"name": "Fortran",
"bytes": "8281"
},
{
"name": "GLSL",
"bytes": "976"
},
{
"name": "HTML",
"bytes": "297329"
},
{
"name": "JavaScript",
"bytes": "4313047"
},
{
"name": "Jupyter Notebook",
"bytes": "603900"
},
{
"name": "Makefile",
"bytes": "7573"
},
{
"name": "Nginx",
"bytes": "544"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Protocol Buffer",
"bytes": "72897"
},
{
"name": "PureBasic",
"bytes": "134"
},
{
"name": "Python",
"bytes": "51104955"
},
{
"name": "Shell",
"bytes": "71646"
},
{
"name": "Smarty",
"bytes": "28890"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
}
|
from datetime import date
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.append(os.path.abspath('_themes'))
sys.path.append(os.path.abspath('../flask_restful'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Flask-RESTful'
copyright = u'{}, Kevin Burke, Kyle Conroy, Ryan Horn, Frank Stratton, Guillaume Binet'.format(
date.today().year)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2'
# The full version, including alpha/beta/rc tags.
release = '0.2.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme_path = ['_themes']
html_theme = 'flask'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'index_logo': 'flask-restful.png'
}
# Add any paths that contain custom themes here, relative to this directory.
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': ['sidebarintro.html', 'sourcelink.html', 'searchbox.html'],
'**': ['sidebarlogo.html', 'localtoc.html', 'relations.html',
'sourcelink.html', 'searchbox.html']
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no module index is generated.
html_use_modindex = False
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Flask-RESTfuldoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Flask-RESTful.tex', u'Flask-RESTful Documentation',
u'Kyle Conroy, Ryan Horn, Frank Stratton', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'flask-restful', u'Flask-RESTful Documentation',
[u'Kyle Conroy, Ryan Horn, Frank Stratton'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Flask-RESTful', u'Flask-RESTful Documentation',
u'Kyle Conroy, Ryan Horn, Frank Stratton', 'Flask-RESTful', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
intersphinx_mapping = {'flask': ('http://flask.pocoo.org/docs/', None)}
|
{
"content_hash": "ddee76d08b4b6b88b3ac60928334fb11",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 98,
"avg_line_length": 32.98367346938775,
"alnum_prop": 0.7005321123623314,
"repo_name": "CanalTP/flask-restful",
"id": "8679debe99bfbf977b63ddac3a6c5c7bbf6f3f7b",
"size": "8505",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "692"
},
{
"name": "Python",
"bytes": "102502"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from geocamUtil import loader
class ExporterInfo(object):
def __init__(self, formatCode, extension, exporterClass, customLabel=None):
self.formatCode = formatCode
self.extension = extension
self.exporterClass = exporterClass
if customLabel:
self.label = customLabel
else:
self.label = exporterClass.label
self.url = None
PLAN_EXPORTERS = []
PLAN_EXPORTERS_BY_FORMAT = {}
for exporterInfo in settings.XGDS_PLANNER_PLAN_EXPORTERS:
# _formatCode, _extension, _exporterClassName, _customLabel
_formatCode = exporterInfo[0]
_extension = exporterInfo[1]
_exporterClassName = exporterInfo[2]
_customLabel = None
if len(exporterInfo) > 3:
_customLabel = exporterInfo[3]
_exporterInfo = ExporterInfo(_formatCode,
_extension,
loader.getClassByName(_exporterClassName),
_customLabel)
PLAN_EXPORTERS.append(_exporterInfo)
PLAN_EXPORTERS_BY_FORMAT[_formatCode] = _exporterInfo
|
{
"content_hash": "c892ff65ec446f8714b35f5c691da553",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 79,
"avg_line_length": 35,
"alnum_prop": 0.6366071428571428,
"repo_name": "xgds/xgds_planner2",
"id": "e48f98d4f31ed5e749f9e3468de185ad5930b96e",
"size": "1880",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xgds_planner2/choosePlanExporter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "22579"
},
{
"name": "HTML",
"bytes": "65619"
},
{
"name": "JavaScript",
"bytes": "215959"
},
{
"name": "Makefile",
"bytes": "303"
},
{
"name": "Python",
"bytes": "209162"
},
{
"name": "Shell",
"bytes": "1312"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'achivement_tracker.views.home', name='home'),
# url(r'^achivement_tracker/', include('achivement_tracker.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
)
|
{
"content_hash": "2dc11b27a4274a676db564dfb65b9477",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 75,
"avg_line_length": 34.8235294117647,
"alnum_prop": 0.6908783783783784,
"repo_name": "roberto-robles/achivement_tracker",
"id": "f0648bcb1d9d6a9dccf99e556c96031f66a1a957",
"size": "592",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "achivement_tracker/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8012"
}
],
"symlink_target": ""
}
|
"""Utilities for working with and creating SaveableObjects."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.eager import context
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.training.checkpointable import base as checkpointable
from tensorflow.python.training.saving import saveable_object
# Op names which identify variable reads which should be saved.
_VARIABLE_OPS = set(["Variable",
"VariableV2",
"AutoReloadVariable",
"VarHandleOp",
"ReadVariableOp"])
def set_cpu0(device_string):
"""Creates a new device string based on `device_string` but using /CPU:0.
If the device is already on /CPU:0, this is a no-op.
Args:
device_string: A device string.
Returns:
A device string.
"""
parsed_device = pydev.DeviceSpec.from_string(device_string)
parsed_device.device_type = "CPU"
parsed_device.device_index = 0
return parsed_device.to_string()
class ReferenceVariableSaveable(saveable_object.SaveableObject):
"""SaveableObject implementation that handles reference variables."""
def __init__(self, var, slice_spec, name):
spec = saveable_object.SaveSpec(var, slice_spec, name, dtype=var.dtype)
super(ReferenceVariableSaveable, self).__init__(var, [spec], name)
def restore(self, restored_tensors, restored_shapes):
restored_tensor = restored_tensors[0]
if restored_shapes is not None:
restored_tensor = array_ops.reshape(restored_tensor, restored_shapes[0])
return state_ops.assign(
self.op,
restored_tensor,
validate_shape=restored_shapes is None and
self.op.get_shape().is_fully_defined())
class ResourceVariableSaveable(saveable_object.SaveableObject):
"""SaveableObject implementation that handles ResourceVariables."""
def __init__(self, var, slice_spec, name):
self._var_device = var.device
self._var_shape = var.shape
if isinstance(var, ops.Tensor):
self.handle_op = var.op.inputs[0]
tensor = var
elif isinstance(var, resource_variable_ops.ResourceVariable):
def _read_variable_closure(v):
def f():
with ops.device(v.device):
x = v.read_value()
# To allow variables placed on non-CPU devices to be checkpointed,
# we copy them to CPU on the same machine first.
with ops.device("/device:CPU:0"):
return array_ops.identity(x)
return f
self.handle_op = var.handle
tensor = _read_variable_closure(var)
else:
raise ValueError(
"Saveable is neither a resource variable nor a read operation."
" Got: %s" % repr(var))
spec = saveable_object.SaveSpec(tensor, slice_spec, name,
dtype=var.dtype)
super(ResourceVariableSaveable, self).__init__(var, [spec], name)
def restore(self, restored_tensors, restored_shapes):
restored_tensor = restored_tensors[0]
if restored_shapes is not None:
restored_tensor = array_ops.reshape(restored_tensor, restored_shapes[0])
# Copy the restored tensor to the variable's device.
with ops.device(self._var_device):
restored_tensor = array_ops.identity(restored_tensor)
return resource_variable_ops.shape_safe_assign_variable_handle(
self.handle_op, self._var_shape, restored_tensor)
def _tensor_comes_from_variable(v):
return isinstance(v, ops.Tensor) and v.op.type in _VARIABLE_OPS
def saveable_objects_for_op(op, name):
"""Create `SaveableObject`s from an operation.
Args:
op: A variable, operation, or SaveableObject to coerce into a
SaveableObject.
name: A string name for the SaveableObject.
Yields:
`SaveableObject`s which together save/restore `op`.
Raises:
TypeError: If `name` is not a string.
ValueError: For operations with no known conversion to SaveableObject.
"""
if not isinstance(name, six.string_types):
raise TypeError(
"names_to_saveables must be a dict mapping string names to "
"checkpointable operations. Name is not a string: %s" % name)
if isinstance(op, saveable_object.SaveableObject):
yield op
elif isinstance(op, (list, tuple, variables.PartitionedVariable)):
if isinstance(op, variables.PartitionedVariable):
op = list(op)
# A set of slices.
slice_name = None
# pylint: disable=protected-access
for variable in op:
if not isinstance(variable, variables.Variable):
raise ValueError("Slices must all be Variables: %s" % variable)
if not variable._save_slice_info:
raise ValueError("Slices must all be slices: %s" % variable)
if slice_name is None:
slice_name = variable._save_slice_info.full_name
elif slice_name != variable._save_slice_info.full_name:
raise ValueError(
"Slices must all be from the same tensor: %s != %s" %
(slice_name, variable._save_slice_info.full_name))
if variable.op.type in ["Variable", "VariableV2",
"AutoReloadVariable"]:
yield ReferenceVariableSaveable(
variable, variable._save_slice_info.spec, name)
else:
yield ResourceVariableSaveable(
variable, variable._save_slice_info.spec, name)
# pylint: enable=protected-access
elif isinstance(op, checkpointable.CheckpointableBase) and not isinstance(
op, variables.Variable):
# pylint: disable=protected-access
for attr, factory in op._gather_saveables_for_checkpoint().items():
if attr == checkpointable.VARIABLE_VALUE_KEY:
# Keep original name for classes masquerading as variables.
full_name = name
else:
full_name = name + "_" + attr
op = (factory(full_name) if callable(factory) else factory)
for op in saveable_objects_for_op(op, op.name):
yield op
# pylint: enable=protected-access
else:
# A variable or tensor.
if isinstance(op, resource_variable_ops.ResourceVariable):
# pylint: disable=protected-access
if op._in_graph_mode:
variable = op._graph_element
else:
variable = op
# pylint: enable=protected-access
yield ResourceVariableSaveable(variable, "", name)
else:
with ops.init_scope():
if context.executing_eagerly():
raise ValueError("Can only save/restore ResourceVariables when "
"executing eagerly, got type: %s." % type(op))
variable = ops.internal_convert_to_tensor(op, as_ref=True)
if not _tensor_comes_from_variable(variable):
raise TypeError("names_to_saveables must be a dict mapping string "
"names to Tensors/Variables. Not a variable: %s" %
variable)
if variable.op.type in ["Variable", "VariableV2",
"AutoReloadVariable"]:
yield ReferenceVariableSaveable(variable, "", name)
else:
yield ResourceVariableSaveable(
variable, "", name)
def op_list_to_dict(op_list, convert_variable_to_tensor=True):
"""Create a dictionary of names to operation lists.
Args:
op_list: A list, tuple, or set of Variables or SaveableObjects.
convert_variable_to_tensor: Whether or not to convert single Variables
with no slice info into Tensors.
Returns:
A dictionary of names to the operations that must be saved under
that name. Variables with save_slice_info are grouped together under the
same key in no particular order.
Raises:
TypeError: If the type of op_list or its elements is not supported.
ValueError: If at least two saveables share the same name.
"""
if not isinstance(op_list, (list, tuple, set)):
raise TypeError("Variables to save should be passed in a dict or a "
"list: %s" % op_list)
# When ResourceVariables are converted to Tensors, read ops are added to the
# graph. Sorting the op_list ensures that the resulting graph is always
# constructed in a deterministic way:
op_list = sorted(op_list, key=lambda x: x.name)
names_to_saveables = {}
# pylint: disable=protected-access
for var in op_list:
if isinstance(var, saveable_object.SaveableObject):
names_to_saveables[var.name] = var
elif isinstance(var, variables.PartitionedVariable):
if var.name in names_to_saveables:
raise ValueError("At least two variables have the same name: %s" %
var.name)
names_to_saveables[var.name] = var
elif isinstance(var, variables.Variable) and var._save_slice_info:
name = var._save_slice_info.full_name
if name in names_to_saveables:
if not isinstance(names_to_saveables[name], list):
raise ValueError("Mixing slices and non-slices with the same name: "
"%s" % name)
names_to_saveables[name].append(var)
else:
names_to_saveables[name] = [var]
elif (isinstance(var, checkpointable.CheckpointableBase)
and not isinstance(var, variables.Variable)):
checkpointable_saveables = [
(factory() if callable(factory) else factory)
for factory in var._gather_saveables_for_checkpoint().values()]
names_to_saveables.update(
op_list_to_dict(checkpointable_saveables))
else:
if context.executing_eagerly():
if not isinstance(var, resource_variable_ops.ResourceVariable):
raise ValueError(
"Can only save/restore ResourceVariables when eager execution "
"is enabled, type: %s." % type(var))
set_var = names_to_saveables.setdefault(var._shared_name, var)
if set_var is not var:
raise ValueError(
("Two different ResourceVariable objects with the same "
"shared_name '%s' were passed to the Saver. This likely means "
"that they were created in different Graphs or isolation "
"contexts, and may not be checkpointed together.") %
(var._shared_name,))
else:
if convert_variable_to_tensor:
if isinstance(var, resource_variable_ops.ResourceVariable):
var = var._graph_element # pylint: disable=protected-access
else:
var = ops.internal_convert_to_tensor(var, as_ref=True)
if not _tensor_comes_from_variable(var):
raise TypeError("Variable to save is not a Variable: %s" % var)
if var.op.type == "ReadVariableOp":
name = var.op.inputs[0].op.name
else:
name = var.op.name
if name in names_to_saveables:
raise ValueError("At least two variables have the same name: %s" %
name)
names_to_saveables[name] = var
# pylint: enable=protected-access
return names_to_saveables
def _add_saveable(saveables, seen_ops, saveable):
"""Adds the saveable to the saveables list.
Args:
saveables: List to append the SaveableObject to.
seen_ops: Set of the ops of the saveables already processed. Used to
check that each saveable is only saved once.
saveable: The saveable.
Raises:
ValueError: If the saveable has already been processed.
"""
if saveable.op in seen_ops:
raise ValueError("The same saveable will be restored with two names: %s" %
saveable.name)
saveables.append(saveable)
seen_ops.add(saveable.op)
def validate_and_slice_inputs(names_to_saveables):
"""Returns the variables and names that will be used for a Saver.
Args:
names_to_saveables: A dict (k, v) where k is the name of an operation and
v is an operation to save or a BaseSaverBuilder.Saver.
Returns:
A list of SaveableObjects.
Raises:
TypeError: If any of the keys are not strings or any of the
values are not one of Tensor or Variable or a checkpointable operation.
ValueError: If the same operation is given in more than one value
(this also applies to slices of SlicedVariables).
"""
if not isinstance(names_to_saveables, dict):
names_to_saveables = op_list_to_dict(names_to_saveables)
saveables = []
seen_ops = set()
for name, op in sorted(names_to_saveables.items(),
# Avoid comparing ops, sort only by name.
key=lambda x: x[0]):
for converted_saveable_object in saveable_objects_for_op(op, name):
_add_saveable(saveables, seen_ops, converted_saveable_object)
return saveables
|
{
"content_hash": "075d367ef376d794968080774e073bfe",
"timestamp": "",
"source": "github",
"line_count": 326,
"max_line_length": 78,
"avg_line_length": 39.466257668711656,
"alnum_prop": 0.660267371366392,
"repo_name": "Bismarrck/tensorflow",
"id": "fa88d2c6ebd2f29c2d2de7583a918dcbc6b28b51",
"size": "13555",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/python/training/saving/saveable_object_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "4882"
},
{
"name": "Batchfile",
"bytes": "10132"
},
{
"name": "C",
"bytes": "493885"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "53117668"
},
{
"name": "CMake",
"bytes": "207176"
},
{
"name": "Dockerfile",
"bytes": "39024"
},
{
"name": "Go",
"bytes": "1303624"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "893928"
},
{
"name": "Jupyter Notebook",
"bytes": "2657814"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "68402"
},
{
"name": "Objective-C",
"bytes": "16140"
},
{
"name": "Objective-C++",
"bytes": "102511"
},
{
"name": "PHP",
"bytes": "5172"
},
{
"name": "Pascal",
"bytes": "221"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "43480067"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "838"
},
{
"name": "Shell",
"bytes": "497472"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
__title__ = 'pif.commands.get_public_ip'
__author__ = 'Artur Barseghyan'
__copyright__ = 'Copyright (c) 2013 Artur Barseghyan'
__license__ = 'GPL 2.0/LGPL 2.1'
__all__ = ('main',)
import argparse
from pif.utils import get_public_ip
def main():
"""
Get public IP.
:example:
$ python src/pif/get_public_ip.py -pc user
"""
parser = argparse.ArgumentParser(description="""
Get public IP.
""")
parser.add_argument("-c", "--checker", dest="preferred_checker", type=str, help="`preferred_checker` value", \
metavar="PREFERRED_CHECKER")
parser.add_argument("-v", "--verbose", dest="verbose", type=str, help="`verbose` value", \
metavar="VERBOSE")
args = parser.parse_args()
kwargs = {}
if args.preferred_checker:
kwargs.update({'preferred_checker': args.preferred_checker})
verbose = None
try:
verbose = bool(int(args.preferred_checker))
kwargs.update({'verbose': verbose})
except:
pass
try:
public_ip = get_public_ip(**kwargs)
print(public_ip)
except Exception as e:
print(e)
if __name__ == "__main__":
main()
|
{
"content_hash": "476c15f0bb9ee3772a5caabee6cd52b8",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 114,
"avg_line_length": 23.75,
"alnum_prop": 0.582995951417004,
"repo_name": "djabber/Dashboard",
"id": "1d647eff4f48e0772c58db6e1424983ba621fda1",
"size": "1235",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "bottle/dash/local/lib/pif-0.7/src/pif/commands/get_public_ip.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "519748"
},
{
"name": "C++",
"bytes": "10342"
},
{
"name": "CSS",
"bytes": "87554"
},
{
"name": "HTML",
"bytes": "95736"
},
{
"name": "Java",
"bytes": "5160021"
},
{
"name": "JavaScript",
"bytes": "179735"
},
{
"name": "Python",
"bytes": "5482405"
},
{
"name": "Shell",
"bytes": "7630"
},
{
"name": "Smarty",
"bytes": "22382"
}
],
"symlink_target": ""
}
|
from test_module import (module_func_2 as oar,)
module_func()
|
{
"content_hash": "d55b771699876e9940e90973b2e72a06",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 47,
"avg_line_length": 30.5,
"alnum_prop": 0.7540983606557377,
"repo_name": "DinoV/PTVS",
"id": "f8a93e1f718004ccbb3ef472450fe6b5bed1a090",
"size": "61",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "Python/Tests/TestData/AddImport/ImportFunctionFromExistingFromImportParensAsNameTrailingComma.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "109"
},
{
"name": "Batchfile",
"bytes": "4035"
},
{
"name": "C",
"bytes": "4974"
},
{
"name": "C#",
"bytes": "13192050"
},
{
"name": "C++",
"bytes": "187194"
},
{
"name": "CSS",
"bytes": "7024"
},
{
"name": "HTML",
"bytes": "45289"
},
{
"name": "JavaScript",
"bytes": "85712"
},
{
"name": "Objective-C",
"bytes": "4201"
},
{
"name": "PowerShell",
"bytes": "135280"
},
{
"name": "Python",
"bytes": "943244"
},
{
"name": "Smarty",
"bytes": "8356"
},
{
"name": "Tcl",
"bytes": "24968"
}
],
"symlink_target": ""
}
|
import sys
import six
import testscenarios
from oslo import messaging
from oslo.messaging._drivers import common as exceptions
from oslo.messaging.openstack.common import jsonutils
from tests import utils as test_utils
load_tests = testscenarios.load_tests_apply_scenarios
EXCEPTIONS_MODULE = 'exceptions' if six.PY2 else 'builtins'
class NovaStyleException(Exception):
format = 'I am Nova'
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if not message:
message = self.format % kwargs
super(NovaStyleException, self).__init__(message)
class KwargsStyleException(NovaStyleException):
format = 'I am %(who)s'
def add_remote_postfix(ex):
ex_type = type(ex)
message = str(ex)
str_override = lambda self: message
new_ex_type = type(ex_type.__name__ + "_Remote", (ex_type,),
{'__str__': str_override,
'__unicode__': str_override})
new_ex_type.__module__ = '%s_Remote' % ex.__class__.__module__
try:
ex.__class__ = new_ex_type
except TypeError:
ex.args = (message,) + ex.args[1:]
return ex
class SerializeRemoteExceptionTestCase(test_utils.BaseTestCase):
_log_failure = [
('log_failure', dict(log_failure=True)),
('do_not_log_failure', dict(log_failure=False)),
]
_add_remote = [
('add_remote', dict(add_remote=True)),
('do_not_add_remote', dict(add_remote=False)),
]
_exception_types = [
('bog_standard', dict(cls=Exception,
args=['test'],
kwargs={},
clsname='Exception',
modname=EXCEPTIONS_MODULE,
msg='test')),
('nova_style', dict(cls=NovaStyleException,
args=[],
kwargs={},
clsname='NovaStyleException',
modname=__name__,
msg='I am Nova')),
('nova_style_with_msg', dict(cls=NovaStyleException,
args=['testing'],
kwargs={},
clsname='NovaStyleException',
modname=__name__,
msg='testing')),
('kwargs_style', dict(cls=KwargsStyleException,
args=[],
kwargs={'who': 'Oslo'},
clsname='KwargsStyleException',
modname=__name__,
msg='I am Oslo')),
]
@classmethod
def generate_scenarios(cls):
cls.scenarios = testscenarios.multiply_scenarios(cls._log_failure,
cls._add_remote,
cls._exception_types)
def setUp(self):
super(SerializeRemoteExceptionTestCase, self).setUp()
def test_serialize_remote_exception(self):
errors = []
def stub_error(msg, *a, **kw):
if (a and len(a) == 1 and isinstance(a[0], dict) and a[0]):
a = a[0]
errors.append(str(msg) % a)
self.stubs.Set(exceptions.LOG, 'error', stub_error)
try:
try:
raise self.cls(*self.args, **self.kwargs)
except Exception as ex:
cls_error = ex
if self.add_remote:
ex = add_remote_postfix(ex)
raise ex
except Exception:
exc_info = sys.exc_info()
serialized = exceptions.serialize_remote_exception(
exc_info, log_failure=self.log_failure)
failure = jsonutils.loads(serialized)
self.assertEqual(self.clsname, failure['class'], failure)
self.assertEqual(self.modname, failure['module'])
self.assertEqual(self.msg, failure['message'])
self.assertEqual([self.msg], failure['args'])
self.assertEqual(self.kwargs, failure['kwargs'])
# Note: _Remote prefix not stripped from tracebacks
tb = cls_error.__class__.__name__ + ': ' + self.msg
self.assertIn(tb, ''.join(failure['tb']))
if self.log_failure:
self.assertTrue(len(errors) > 0, errors)
else:
self.assertEqual(0, len(errors), errors)
SerializeRemoteExceptionTestCase.generate_scenarios()
class DeserializeRemoteExceptionTestCase(test_utils.BaseTestCase):
_standard_allowed = [__name__]
scenarios = [
('bog_standard',
dict(allowed=_standard_allowed,
clsname='Exception',
modname=EXCEPTIONS_MODULE,
cls=Exception,
args=['test'],
kwargs={},
str='test\ntraceback\ntraceback\n',
remote_name='Exception',
remote_args=('test\ntraceback\ntraceback\n', ),
remote_kwargs={})),
('nova_style',
dict(allowed=_standard_allowed,
clsname='NovaStyleException',
modname=__name__,
cls=NovaStyleException,
args=[],
kwargs={},
str='test\ntraceback\ntraceback\n',
remote_name='NovaStyleException_Remote',
remote_args=('I am Nova', ),
remote_kwargs={})),
('nova_style_with_msg',
dict(allowed=_standard_allowed,
clsname='NovaStyleException',
modname=__name__,
cls=NovaStyleException,
args=['testing'],
kwargs={},
str='test\ntraceback\ntraceback\n',
remote_name='NovaStyleException_Remote',
remote_args=('testing', ),
remote_kwargs={})),
('kwargs_style',
dict(allowed=_standard_allowed,
clsname='KwargsStyleException',
modname=__name__,
cls=KwargsStyleException,
args=[],
kwargs={'who': 'Oslo'},
str='test\ntraceback\ntraceback\n',
remote_name='KwargsStyleException_Remote',
remote_args=('I am Oslo', ),
remote_kwargs={})),
('not_allowed',
dict(allowed=[],
clsname='NovaStyleException',
modname=__name__,
cls=messaging.RemoteError,
args=[],
kwargs={},
str=("Remote error: NovaStyleException test\n"
"[%r]." % u'traceback\ntraceback\n'),
msg=("Remote error: NovaStyleException test\n"
"[%r]." % u'traceback\ntraceback\n'),
remote_name='RemoteError',
remote_args=(),
remote_kwargs={'exc_type': 'NovaStyleException',
'value': 'test',
'traceback': 'traceback\ntraceback\n'})),
('unknown_module',
dict(allowed=['notexist'],
clsname='Exception',
modname='notexist',
cls=messaging.RemoteError,
args=[],
kwargs={},
str=("Remote error: Exception test\n"
"[%r]." % u'traceback\ntraceback\n'),
msg=("Remote error: Exception test\n"
"[%r]." % u'traceback\ntraceback\n'),
remote_name='RemoteError',
remote_args=(),
remote_kwargs={'exc_type': 'Exception',
'value': 'test',
'traceback': 'traceback\ntraceback\n'})),
('unknown_exception',
dict(allowed=[],
clsname='FarcicalError',
modname=EXCEPTIONS_MODULE,
cls=messaging.RemoteError,
args=[],
kwargs={},
str=("Remote error: FarcicalError test\n"
"[%r]." % u'traceback\ntraceback\n'),
msg=("Remote error: FarcicalError test\n"
"[%r]." % u'traceback\ntraceback\n'),
remote_name='RemoteError',
remote_args=(),
remote_kwargs={'exc_type': 'FarcicalError',
'value': 'test',
'traceback': 'traceback\ntraceback\n'})),
('unknown_kwarg',
dict(allowed=[],
clsname='Exception',
modname=EXCEPTIONS_MODULE,
cls=messaging.RemoteError,
args=[],
kwargs={'foobar': 'blaa'},
str=("Remote error: Exception test\n"
"[%r]." % u'traceback\ntraceback\n'),
msg=("Remote error: Exception test\n"
"[%r]." % u'traceback\ntraceback\n'),
remote_name='RemoteError',
remote_args=(),
remote_kwargs={'exc_type': 'Exception',
'value': 'test',
'traceback': 'traceback\ntraceback\n'})),
('system_exit',
dict(allowed=[],
clsname='SystemExit',
modname=EXCEPTIONS_MODULE,
cls=messaging.RemoteError,
args=[],
kwargs={},
str=("Remote error: SystemExit test\n"
"[%r]." % u'traceback\ntraceback\n'),
msg=("Remote error: SystemExit test\n"
"[%r]." % u'traceback\ntraceback\n'),
remote_name='RemoteError',
remote_args=(),
remote_kwargs={'exc_type': 'SystemExit',
'value': 'test',
'traceback': 'traceback\ntraceback\n'})),
]
def test_deserialize_remote_exception(self):
failure = {
'class': self.clsname,
'module': self.modname,
'message': 'test',
'tb': ['traceback\ntraceback\n'],
'args': self.args,
'kwargs': self.kwargs,
}
serialized = jsonutils.dumps(failure)
ex = exceptions.deserialize_remote_exception(serialized, self.allowed)
self.assertIsInstance(ex, self.cls)
self.assertEqual(self.remote_name, ex.__class__.__name__)
self.assertEqual(self.str, six.text_type(ex))
if hasattr(self, 'msg'):
self.assertEqual(self.msg, six.text_type(ex))
self.assertEqual((self.msg,) + self.remote_args, ex.args)
else:
self.assertEqual(self.remote_args, ex.args)
|
{
"content_hash": "e8473a92bcbde1c58f1e87a7d179c789",
"timestamp": "",
"source": "github",
"line_count": 292,
"max_line_length": 78,
"avg_line_length": 36.386986301369866,
"alnum_prop": 0.4883764705882353,
"repo_name": "redhat-openstack/oslo.messaging",
"id": "2665b5ef7b5235082fa51dd3e87f9ca3893fe2d6",
"size": "11232",
"binary": false,
"copies": "3",
"ref": "refs/heads/juno-patches",
"path": "tests/test_exception_serialization.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "954096"
},
{
"name": "Shell",
"bytes": "2805"
}
],
"symlink_target": ""
}
|
"""All functions return a Component so you can easily pipe or compose them.
There are two types of functions:
- decorators: return the original component
- containers: return a new component
"""
from functools import lru_cache, partial
import numpy as np
from omegaconf import OmegaConf
from pydantic import validate_arguments
from gdsfactory import ComponentReference
from gdsfactory.cell import cell
from gdsfactory.component import Component
from gdsfactory.components.straight import straight
from gdsfactory.components.text_rectangular import text_rectangular_multi_layer
from gdsfactory.port import auto_rename_ports
from gdsfactory.types import (
Anchor,
Axis,
ComponentSpec,
Float2,
LayerSpec,
List,
Optional,
Strs,
)
cache = lru_cache(maxsize=None)
def add_port(component: Component, **kwargs) -> Component:
"""Return Component with a new port."""
component.add_port(**kwargs)
return component
@cell
def add_text(
component: ComponentSpec,
text: str = "",
text_offset: Float2 = (0, 0),
text_anchor: Anchor = "cc",
text_factory: ComponentSpec = text_rectangular_multi_layer,
) -> Component:
"""Return component inside a new component with text geometry.
Args:
component: component spec.
text: text string.
text_offset: relative to component anchor. Defaults to center (cc).
text_anchor: relative to component (ce cw nc ne nw sc se sw center cc).
text_factory: function to add text labels.
"""
from gdsfactory.pdk import get_component
component = get_component(component)
component_new = Component()
component_new.component = component
ref = component_new.add_ref(component)
t = component_new << text_factory(text)
t.move(np.array(text_offset) + getattr(ref.size_info, text_anchor))
component_new.add_ports(ref.ports)
component_new.copy_child_info(component)
return component_new
def add_texts(
components: List[ComponentSpec],
prefix: str = "",
index0: int = 0,
**kwargs,
) -> List[Component]:
"""Return a list of Component with text labels.
Args:
components: list of component specs.
prefix: Optional prefix for the labels.
index0: defaults to 0 (0, for first component, 1 for second ...).
keyword Args:
text_offset: relative to component size info anchor. Defaults to center.
text_anchor: relative to component (ce cw nc ne nw sc se sw center cc).
text_factory: function to add text labels.
"""
return [
add_text(component, text=f"{prefix}{i + index0}", **kwargs)
for i, component in enumerate(components)
]
@cell
def rotate(
component: ComponentSpec, angle: float = 90, recenter: bool = False
) -> Component:
"""Return rotated component inside a new component.
Most times you just need to place a reference and rotate it.
This rotate function just encapsulates the rotated reference into a new component.
Args:
component: spec.
angle: to rotate in degrees.
recenter: recenter component after rotating.
"""
from gdsfactory.pdk import get_component
component = get_component(component)
component_new = Component()
component_new.component = component
ref = component_new.add_ref(component)
origin_offset = ref.origin - np.array((ref.xmin, ref.ymin))
ref.rotate(angle)
if recenter:
ref.move(
origin=ref.center,
destination=np.array((ref.xsize / 2, ref.ysize / 2)) - origin_offset,
)
component_new.add_ports(ref.ports)
component_new.copy_child_info(component)
return component_new
rotate90 = partial(rotate, angle=90)
rotate90n = partial(rotate, angle=-90)
rotate180 = partial(rotate, angle=180)
@cell
def mirror(
component: ComponentSpec, p1: Float2 = (0, 1), p2: Float2 = (0, 0)
) -> Component:
"""Return new Component with a mirrored reference.
Args:
component: component spec.
p1: first point to define mirror axis.
p2: second point to define mirror axis.
"""
from gdsfactory.pdk import get_component
component = get_component(component)
component_new = Component()
component_new.component = component
ref = component_new.add_ref(component)
ref.mirror(p1=p1, p2=p2)
component_new.add_ports(ref.ports)
component_new.copy_child_info(component)
return component_new
@cell
def move(
component: Component,
origin=(0, 0),
destination=None,
axis: Optional[Axis] = None,
) -> Component:
"""Return new Component with a moved reference to the original component.
Args:
component: to move.
origin: of component.
destination: Optional x, y.
axis: x or y axis.
"""
component_new = Component()
component_new.component = component
ref = component_new.add_ref(component)
ref.move(origin=origin, destination=destination, axis=axis)
component_new.add_ports(ref.ports)
component_new.copy_child_info(component)
return component_new
@cell
def transformed(ref: ComponentReference):
"""Returns flattened cell with reference transformations applied.
Args:
ref: the reference to flatten into a new cell.
"""
c = Component()
c.add(ref)
c = c.flatten()
c.copy_child_info(ref.ref_cell)
c.add_ports(ref.ports)
c.info["transformed_cell"] = ref.ref_cell.name
return c
def move_port_to_zero(component: Component, port_name: str = "o1"):
"""Return a container that contains a reference to the original component.
The new component has port_name in (0, 0).
"""
if port_name not in component.ports:
raise ValueError(
f"port_name = {port_name!r} not in {list(component.ports.keys())}"
)
return move(component, -component.ports[port_name].center)
def update_info(component: Component, **kwargs) -> Component:
"""Return Component with updated info."""
component.info.update(**kwargs)
return component
@validate_arguments
def add_settings_label(
component: ComponentSpec = straight,
layer_label: LayerSpec = (66, 0),
settings: Optional[Strs] = None,
ignore: Optional[Strs] = ("decorator",),
) -> Component:
"""Add a settings label to a component. Use it as a decorator.
Args:
component: spec.
layer_label: for label.
settings: list of settings to include. if None, adds all changed settings.
ignore: list of settings to ignore.
"""
from gdsfactory.pdk import get_component
component = get_component(component)
ignore = ignore or []
settings = settings or component.settings.changed.keys()
settings = set(settings) - set(ignore)
d = {setting: component.get_setting(setting) for setting in settings}
component.add_label(text=OmegaConf.to_yaml(d), layer=layer_label)
return component
__all__ = (
"add_port",
"add_text",
"add_settings_label",
"auto_rename_ports",
"cache",
"mirror",
"move",
"move_port_to_zero",
"rotate",
"update_info",
)
if __name__ == "__main__":
import gdsfactory as gf
c = gf.components.mmi1x2(
length_mmi=10,
decorator=partial(add_settings_label, settings=["name", "length_mmi"]),
)
# c.show(show_ports=True)
# cr = rotate(component=c)
# cr.show()
cr = transformed(c.ref())
cr.show()
# cr = c.rotate()
# cr.pprint()
# cr.show()
# cm = move(c, destination=(20, 20))
# cm.show()
# cm = mirror(c)
# cm.show()
# cm = c.mirror()
# cm.show()
# cm2 = move_port_to_zero(cm)
# cm2.show()
# cm3 = add_text(c, "hi")
# cm3.show()
# cr = rotate(component=c)
# cr.show()
# print(component_rotated)
# component_rotated.pprint
# component_netlist = component.get_netlist()
# component.pprint_netlist()
|
{
"content_hash": "35f8bbd0ddfaf4cd2eba6d65a97ee9fe",
"timestamp": "",
"source": "github",
"line_count": 307,
"max_line_length": 86,
"avg_line_length": 25.94788273615635,
"alnum_prop": 0.6550338940497112,
"repo_name": "gdsfactory/gdsfactory",
"id": "c26b2d6f0d312da256ad9c84acfdd56df74c5738",
"size": "7966",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gdsfactory/functions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "605"
},
{
"name": "Dockerfile",
"bytes": "31"
},
{
"name": "Makefile",
"bytes": "4572"
},
{
"name": "Python",
"bytes": "2471982"
},
{
"name": "Shell",
"bytes": "671"
},
{
"name": "XS",
"bytes": "10045"
}
],
"symlink_target": ""
}
|
import os
os.system('date')
os.system('hostname')
os.system("matlab -nodesktop -nosplash -r 'genLoads(PARAM_STRING)'")
os.system('qstat -j %s' % os.environ['JOB_ID'])
|
{
"content_hash": "2b6d9f84287afeee0e76557148ec3e53",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 68,
"avg_line_length": 21.25,
"alnum_prop": 0.6823529411764706,
"repo_name": "Guokr1991/cervix",
"id": "6dd49549f06af8d31149396f4d7f5a23eedc22cf",
"size": "170",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "field/9L4/genLoadsSGE.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Matlab",
"bytes": "2158"
},
{
"name": "Python",
"bytes": "14648"
}
],
"symlink_target": ""
}
|
"""
Module for inventory management tests
"""
from hamcrest import assert_that
from pyherc.test.cutesy import (carrying, Dagger, drop, Goblin, has_dropped,
Level, make, middle_of, place)
class TestDroppingItems():
"""
Tests for dropping items
"""
def __init__(self):
"""
Default constructor
"""
super().__init__()
def test_dropping_item(self):
"""
Items dropped by character should end on the floor
"""
dagger = Dagger()
Uglak = Goblin(carrying(dagger))
place(Uglak, middle_of(Level()))
make(Uglak, drop(dagger))
assert_that(Uglak, has_dropped(dagger))
|
{
"content_hash": "b3bfaf27f588b907dbfd68e2e3955610",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 76,
"avg_line_length": 24.344827586206897,
"alnum_prop": 0.5651558073654391,
"repo_name": "tuturto/pyherc",
"id": "db23865642265d5b54cdd8d76130480ea53170c5",
"size": "1832",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pyherc/test/bdd/test_inventory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "8825"
},
{
"name": "HTML",
"bytes": "529"
},
{
"name": "Hy",
"bytes": "603756"
},
{
"name": "Python",
"bytes": "975380"
}
],
"symlink_target": ""
}
|
"""
This features module provides access to features of the bytes of content in
revisions.
.. autodata:: revscoring.features.wikitext.revision
Represents the base revision of interest. Implements this a basic structure:
* revision: :class:`~revscoring.features.wikitext.Revision`
* parent: :class:`~revscoring.features.wikitext.Revision`
* diff: :class:`~revscoring.features.wikitext.Diff`
Supporting classes
++++++++++++++++++
.. autoclass:: revscoring.features.wikitext.Revision
:members:
:inherited-members:
:member-order: bysource
:Character features:
**chars** : `int`
The number of characters
**whitespace_chars** : `int`
The number of whitespace characters
**markup_chars** : `int`
The number of wikitext markup characters
**cjk_chars** : `int`
The number of Chinese/Japanese/Korean characters
**entity_chars** : `int`
The number of HTML entity characters
**url_chars** : `int`
The number of URL characters
**word_chars** : `int`
The number of word characters
**uppercase_word_chars** : `int`
The number of UPPERCASE WORD characters
**punctuation_chars** : `int`
The number of punctuation characters
**break_chars** : `int`
The number of break characters
**longest_repeated_char** : `int`
The length of the most longest character repetition
:Tokenized features:
**tokens** : `int`
The number of tokens
**numbers** : `int`
The number of number tokens
**whitespaces** : `int`
The number of whitespace tokens
**markups** : `int`
The number of markup tokens
**cjks** : `int`
The number of Chinese/Japanese/Korean tokens
**entities** : `int`
The number of HTML entity tokens
**urls** : `int`
The number of URL tokens
**words** : `int`
The number of word tokens
**uppercase_words** : `int`
The number of UPPERCASE word tokens
**punctuations** : `int`
The number of punctuation tokens
**breaks** : `int`
The number of break tokens
**longest_token** : `int`
The length of the longest token
**longest_word** : `int`
The length of the longest word-token
:Parsed features:
**content_chars** : `int`
The number of characters of viewable content (no markup or
templates)
**headings** : `int`
The number of headings
**external_links** : `int`
The number of external links
**wikilinks** : `int`
The number of wikilinks (internal to other pages in the wiki)
**tags** : `int`
The number of HTML tags
**ref_tags** : `int`
The number of <ref> tags
**templates** : `int`
The number of templates
.. autoclass:: revscoring.features.wikitext.Diff
:members:
:inherited-members:
:member-order: bysource
:Character features:
**chars_added** : `int`
The number of characters added
**chars_removed** : `int`
The number of characters removed
**numeric_chars_added** : `int`
The number of numeric characters added
**numeric_chars_removed** : `int`
The number of numeric characters removed
**whitespace_chars_added** : `int`
The number of whitespace characters added
**whitespace_chars_removed** : `int`
The number of whitespace characters removed
**markup_chars_added** : `int`
The number of markup characters added
**markup_chars_removed** : `int`
The number of markup characters removed
**cjk_chars_added** : `int`
The number of cjk characters added
**cjk_chars_removed** : `int`
The number of cjk characters removed
**entity_chars_added** : `int`
The number of entity characters added
**entity_chars_removed** : `int`
The number of entity characters removed
**url_chars_added** : `int`
The number of url characters added
**url_chars_removed** : `int`
The number of url characters removed
**word_chars_added** : `int`
The number of word characters added
**word_chars_removed** : `int`
The number of word characters removed
**uppercase_word_chars_added** : `int`
The number of UPPERCASE word characters added
**uppercase_word_chars_removed** : `int`
The number of UPPERCASE word characters removed
**punctuation_chars_added** : `int`
The number of punctuation characters added
**punctuation_chars_removed** : `int`
The number of punctuation characters removed
**break_chars_added** : `int`
The number of break characters added
**break_chars_removed** : `int`
The number of break characters removed
**longest_repeated_char_added** : `int`
The most repeated character added
:Token frequency features:
**token_delta_sum** : `int`
The sum of delta changes in the token frequency table
**token_delta_increase** : `int`
The sum of delta increases in the token frequency table
**token_delta_decrease** : `int`
The sum of delta decreases in the token frequency table
**token_prop_delta_sum** : `float`
The sum of proportional delta changes in the token
frequency table
**token_prop_delta_increase** : `float`
The sum of proportional delta increases in the token
frequency table
**token_prop_delta_decrease** : `float`
The sum of proportional delta decreases in the token
frequency table
**number_delta_sum** : `int`
The sum of delta changes in the number frequency table
**number_delta_increase** : `int`
The sum of delta increases in the number frequency table
**number_delta_decrease** : `int`
The sum of delta decreases in the number frequency table
**number_prop_delta_sum** : `float`
The sum of proportional delta changes in the number
frequency table
**number_prop_delta_increase** : `float`
The sum of proportional delta increases in the number
frequency table
**number_prop_delta_decrease** : `float`
The sum of proportional delta decreases in the number
frequency table
**whitespace_delta_sum** : `int`
The sum of delta changes in the whitespace frequency table
**whitespace_delta_increase** : `int`
The sum of delta increases in the whitespace frequency table
**whitespace_delta_decrease** : `int`
The sum of delta decreases in the whitespace frequency table
**whitespace_prop_delta_sum** : `float`
The sum of proportional delta changes in the whitespace
frequency table
**whitespace_prop_delta_increase** : `float`
The sum of proportional delta increases in the whitespace
frequency table
**whitespace_prop_delta_decrease** : `float`
The sum of proportional delta decreases in the whitespace
frequency table
**markup_delta_sum** : `int`
The sum of delta changes in the markup frequency table
**markup_delta_increase** : `int`
The sum of delta increases in the markup frequency table
**markup_delta_decrease** : `int`
The sum of delta decreases in the markup frequency table
**markup_prop_delta_sum** : `float`
The sum of proportional delta changes in the markup
frequency table
**markup_prop_delta_increase** : `float`
The sum of proportional delta increases in the markup
frequency table
**markup_prop_delta_decrease** : `float`
The sum of proportional delta decreases in the markup
frequency table
**cjk_delta_sum** : `int`
The sum of delta changes in the cjk frequency table
**cjk_delta_increase** : `int`
The sum of delta increases in the cjk frequency table
**cjk_delta_decrease** : `int`
The sum of delta decreases in the cjk frequency table
**cjk_prop_delta_sum** : `float`
The sum of proportional delta changes in the cjk
frequency table
**cjk_prop_delta_increase** : `float`
The sum of proportional delta increases in the cjk
frequency table
**cjk_prop_delta_decrease** : `float`
The sum of proportional delta decreases in the cjk
frequency table
**entity_delta_sum** : `int`
The sum of delta changes in the entity frequency table
**entity_delta_increase** : `int`
The sum of delta increases in the entity frequency table
**entity_delta_decrease** : `int`
The sum of delta decreases in the entity frequency table
**entity_prop_delta_sum** : `float`
The sum of proportional delta changes in the entity
frequency table
**entity_prop_delta_increase** : `float`
The sum of proportional delta increases in the entity
frequency table
**entity_prop_delta_decrease** : `float`
The sum of proportional delta decreases in the entity
frequency table
**url_delta_sum** : `int`
The sum of delta changes in the url frequency table
**url_delta_increase** : `int`
The sum of delta increases in the url frequency table
**url_delta_decrease** : `int`
The sum of delta decreases in the url frequency table
**url_prop_delta_sum** : `float`
The sum of proportional delta changes in the url
frequency table
**url_prop_delta_increase** : `float`
The sum of proportional delta increases in the url
frequency table
**url_prop_delta_decrease** : `float`
The sum of proportional delta decreases in the url
frequency table
**word_delta_sum** : `int`
The sum of delta changes in the word frequency table
**word_delta_increase** : `int`
The sum of delta increases in the word frequency table
**word_delta_decrease** : `int`
The sum of delta decreases in the word frequency table
**word_prop_delta_sum** : `float`
The sum of proportional delta changes in the word
frequency table
**word_prop_delta_increase** : `float`
The sum of proportional delta increases in the word
frequency table
**word_prop_delta_decrease** : `float`
The sum of proportional delta decreases in the word
frequency table
**uppercase_word_delta_sum** : `int`
The sum of delta changes in the UPPERCASE word frequency
table
**uppercase_word_delta_increase** : `int`
The sum of delta increases in the UPPERCASE word frequency
table
**uppercase_word_delta_decrease** : `int`
The sum of delta decreases in the UPPERCASE word frequency
table
**uppercase_word_prop_delta_sum** : `float`
The sum of proportional delta changes in the UPPERCASE word
frequency table
**uppercase_word_prop_delta_increase** : `float`
The sum of proportional delta increases in the UPPERCASE word
frequency table
**uppercase_word_prop_delta_decrease** : `float`
The sum of proportional delta decreases in the UPPERCASE word
frequency table
**punctuation_delta_sum** : `int`
The sum of delta changes in the punctuation frequency table
**punctuation_delta_increase** : `int`
The sum of delta increases in the punctuation frequency table
**punctuation_delta_decrease** : `int`
The sum of delta decreases in the punctuation frequency table
**punctuation_prop_delta_sum** : `float`
The sum of proportional delta changes in the punctuation
frequency table
**punctuation_prop_delta_increase** : `float`
The sum of proportional delta increases in the punctuation
frequency table
**punctuation_prop_delta_decrease** : `float`
The sum of proportional delta decreases in the punctuation
frequency table
**break_delta_sum** : `int`
The sum of delta changes in the break frequency table
**break_delta_increase** : `int`
The sum of delta increases in the break frequency table
**break_delta_decrease** : `int`
The sum of delta decreases in the break frequency table
**break_prop_delta_sum** : `float`
The sum of proportional delta changes in the break
frequency table
**break_prop_delta_increase** : `float`
The sum of proportional delta increases in the break
frequency table
**break_prop_delta_decrease** : `float`
The sum of proportional delta decreases in the break
frequency table
:Token edit features:
**segments_added** : `int`
The number of segments added
**segments_removed** : `int`
The number of segments removed
**tokens_added** : `int`
The number of tokens added
**tokens_removed** : `int`
The number of tokens removed
**numbers_added** : `int`
The number of number tokens added
**numbers_removed** : `int`
The number of number tokens removed
**markups_added** : `int`
The number of markup tokens added
**markups_removed** : `int`
The number of markup tokens removed
**whitespaces_added** : `int`
The number of whitespace tokens added
**whitespaces_removed** : `int`
The number of whitespace tokens removed
**cjks_added** : `int`
The number of cjk tokens added
**cjks_removed** : `int`
The number of cjk tokens removed
**entities_added** : `int`
The number of entity tokens added
**entities_removed** : `int`
The number of entity tokens removed
**urls_added** : `int`
The number of url tokens added
**urls_removed** : `int`
The number of url tokens removed
**words_added** : `int`
The number of word tokens added
**words_removed** : `int`
The number of word tokens removed
**uppercase_words_added** : `int`
The number of word tokens added
**uppercase_words_removed** : `int`
The number of word tokens removed
**punctuations_added** : `int`
The number of punctuation tokens added
**punctuations_removed** : `int`
The number of punctuation tokens removed
**breaks_added** : `int`
The number of break tokens added
**breaks_removed** : `int`
The number of break tokens removed
**longest_token_added** : `int`
The length of the longest token added"
**longest_uppercase_word_added** : `int`
The length of the longest sequence of UPPPERCASE characters
added
""" # noqa
from .features import Diff, Revision
from .revision_oriented import revision
__all__ = [revision, Revision, Diff]
|
{
"content_hash": "3fe5308ca50bc012f535c55e54dac546",
"timestamp": "",
"source": "github",
"line_count": 371,
"max_line_length": 81,
"avg_line_length": 43.23180592991914,
"alnum_prop": 0.5901240725731031,
"repo_name": "wiki-ai/revscoring",
"id": "46a28d2330b41f7cb49dde9d6067843e36b4513c",
"size": "16039",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "revscoring/features/wikitext/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "777"
},
{
"name": "Jupyter Notebook",
"bytes": "32675"
},
{
"name": "Python",
"bytes": "957061"
}
],
"symlink_target": ""
}
|
import theano
import theano.tensor as T
import treeano
import treeano.nodes as tn
import treeano.sandbox.utils
@treeano.register_node("clip_scaling")
class ClipScalingNode(treeano.NodeImpl):
hyperparameter_names = ("learnable",
"mins",
"maxs")
def compute_output(self, network, in_vw):
learnable = network.find_hyperparameter(["learnable"], False)
mins = network.find_hyperparameter(["mins"])
maxs = network.find_hyperparameter(["maxs"])
assert mins.ndim == maxs.ndim == 1
assert mins.shape == maxs.shape
mins = treeano.utils.as_fX(mins)
maxs = treeano.utils.as_fX(maxs)
num_scales = mins.shape[0]
if learnable:
mins_var = network.create_vw(
"mins",
shape=mins.shape,
is_shared=True,
tags={"parameter"},
default_inits=[treeano.inits.ConstantInit(mins)],
).variable
maxs_var = network.create_vw(
"maxs",
shape=maxs.shape,
is_shared=True,
tags={"parameter"},
default_inits=[treeano.inits.ConstantInit(maxs)],
).variable
else:
if treeano.utils.is_variable(mins):
mins_var = mins
else:
mins_var = T.constant(mins)
if treeano.utils.is_variable(maxs):
maxs_var = maxs
else:
maxs_var = T.constant(maxs)
in_pattern = list(range(in_vw.ndim))
# insert after channel dim
in_pattern.insert(2, "x")
param_pattern = ["x"] * in_vw.ndim
param_pattern.insert(2, 0)
in_b = in_vw.variable.dimshuffle(*in_pattern)
mins_b = mins_var.dimshuffle(*param_pattern)
maxs_b = maxs_var.dimshuffle(*param_pattern)
range_b = maxs_b - mins_b
# TODO constrain range to be > 0?
clipped = T.clip(in_b - mins_b, 0, range_b)
scaled = clipped / range_b
# reshape newly created dim into dim 1
out_ss = list(in_vw.symbolic_shape())
out_ss[1] *= num_scales
out_var = scaled.reshape(tuple(out_ss))
out_shape = list(in_vw.shape)
if out_shape[1] is not None:
out_shape[1] *= num_scales
out_shape = tuple(out_shape)
network.create_vw(
"default",
variable=out_var,
shape=out_shape,
tags={}
)
@treeano.register_node("tanh_scaling")
class TanhScalingNode(treeano.NodeImpl):
hyperparameter_names = ("learnable",
"means",
"scales")
def compute_output(self, network, in_vw):
learnable = network.find_hyperparameter(["learnable"], False)
means = network.find_hyperparameter(["means"])
scales = network.find_hyperparameter(["scales"])
assert means.ndim == scales.ndim == 1
assert means.shape == scales.shape
means = treeano.utils.as_fX(means)
scales = treeano.utils.as_fX(scales)
num_scales = means.shape[0]
if learnable:
means_var = network.create_vw(
"means",
shape=means.shape,
is_shared=True,
tags={"parameter"},
default_inits=[treeano.inits.ConstantInit(means)],
).variable
scales_var = network.create_vw(
"scales",
shape=scales.shape,
is_shared=True,
tags={"parameter"},
default_inits=[treeano.inits.ConstantInit(scales)],
).variable
else:
if treeano.utils.is_variable(means):
means_var = means
else:
means_var = T.constant(means)
if treeano.utils.is_variable(scales):
scales_var = scales
else:
scales_var = T.constant(scales)
in_pattern = list(range(in_vw.ndim))
# insert after channel dim
in_pattern.insert(2, "x")
param_pattern = ["x"] * in_vw.ndim
param_pattern.insert(2, 0)
in_b = in_vw.variable.dimshuffle(*in_pattern)
means_b = means_var.dimshuffle(*param_pattern)
scales_b = scales_var.dimshuffle(*param_pattern)
# TODO constrain scales to be > 0?
scaled = T.tanh((in_b - means_b) / scales_b)
# reshape newly created dim into dim 1
out_ss = list(in_vw.symbolic_shape())
out_ss[1] *= num_scales
out_var = scaled.reshape(tuple(out_ss))
out_shape = list(in_vw.shape)
if out_shape[1] is not None:
out_shape[1] *= num_scales
out_shape = tuple(out_shape)
network.create_vw(
"default",
variable=out_var,
shape=out_shape,
tags={"output"},
)
@treeano.register_node("rbf_scaling")
class RBFScalingNode(treeano.NodeImpl):
hyperparameter_names = ("learnable",
"means",
"scales")
def compute_output(self, network, in_vw):
learnable = network.find_hyperparameter(["learnable"], False)
means = network.find_hyperparameter(["means"])
scales = network.find_hyperparameter(["scales"])
assert means.ndim == scales.ndim == 1
assert means.shape == scales.shape
means = treeano.utils.as_fX(means)
scales = treeano.utils.as_fX(scales)
num_scales = means.shape[0]
if learnable:
means_var = network.create_vw(
"means",
shape=means.shape,
is_shared=True,
tags={"parameter"},
default_inits=[treeano.inits.ConstantInit(means)],
).variable
scales_var = network.create_vw(
"scales",
shape=scales.shape,
is_shared=True,
tags={"parameter"},
default_inits=[treeano.inits.ConstantInit(scales)],
).variable
else:
if treeano.utils.is_variable(means):
means_var = means
else:
means_var = T.constant(means)
if treeano.utils.is_variable(scales):
scales_var = scales
else:
scales_var = T.constant(scales)
in_pattern = list(range(in_vw.ndim))
# insert after channel dim
in_pattern.insert(2, "x")
param_pattern = ["x"] * in_vw.ndim
param_pattern.insert(2, 0)
in_b = in_vw.variable.dimshuffle(*in_pattern)
means_b = means_var.dimshuffle(*param_pattern)
scales_b = scales_var.dimshuffle(*param_pattern)
# TODO constrain scales to be > 0?
scaled = T.exp(-T.sqr(in_b - means_b) / scales_b)
# reshape newly created dim into dim 1
out_ss = list(in_vw.symbolic_shape())
out_ss[1] *= num_scales
out_var = scaled.reshape(tuple(out_ss))
out_shape = list(in_vw.shape)
if out_shape[1] is not None:
out_shape[1] *= num_scales
out_shape = tuple(out_shape)
network.create_vw(
"default",
variable=out_var,
shape=out_shape,
tags={"output"},
)
|
{
"content_hash": "9aa64f24ba275e3c3cbc3bca43147d2e",
"timestamp": "",
"source": "github",
"line_count": 233,
"max_line_length": 69,
"avg_line_length": 31.88412017167382,
"alnum_prop": 0.5255081437609369,
"repo_name": "jagill/treeano",
"id": "6e078372ce6e00ce3b828a85404f835ac068cc02",
"size": "7429",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "treeano/sandbox/nodes/input_scaling.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1121"
},
{
"name": "JavaScript",
"bytes": "16041"
},
{
"name": "Python",
"bytes": "748632"
}
],
"symlink_target": ""
}
|
import sys
import jinja2
import glob
import os
import argparse
from collections import OrderedDict
import abc
from shutil import copyfile, rmtree
EXTENSIONS = {'python' : 'py', 'documentation' : 'html'}
TYPES = ["boolean", "float", "long", "string", "int", "double", "array"]
class Constant(object):
def __init__(self, variableName, propertyName, type, description, defaultValue=None):
self.variableName = variableName
self.propertyName = propertyName
if type in TYPES:
self.type = type
else:
raise Exception("Type '" + type + "' not supported in " + variableName)
self.defaultValue = defaultValue
self.description = description
def __str__(self):
return self.variableName + "|" + self.propertyName + "|" + self.type + \
("|" + self.defaultValue if self.defaultValue else '') + "|" + self.description
class HelperPlugin(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def generate(self, group, constants):
return
def main(argv):
#Get templates from templates/ folder, and build our allowed gen languages from that
genTypes = []
for template in glob.glob("templates/*.template"):
gen = os.path.basename(template).split('.template')[0]
genTypes.append(gen)
genTypes += [file for file in os.listdir('templates') if os.path.isdir("templates/" + file)]
parser = argparse.ArgumentParser(description="Generate constants in different languages from a constants file")
parser.add_argument("constants_file", metavar="constants-file", help="Required: Location of constants template file")
parser.add_argument("gen", help="Required: The type of constants to generate", choices=genTypes)
parser.add_argument("-o", "--output-dir", help="The output location, defaults to gen-(gen).(gen)", default=None)
parser.add_argument("-H", "--helper", nargs="*", metavar="GROUP", help="Runs helper.main(group, constants[group])", default=None)
parameters = parser.parse_args()
gen = parameters.gen
constantsFile = parameters.constants_file
output = parameters.output_dir if parameters.output_dir else "gen-" + gen + "." + (EXTENSIONS.get(gen) if EXTENSIONS.get(gen) else gen)
# template = gen + ".template"
constants = processConstantsFile(constantsFile)
if parameters.helper:
try:
import helper
except (NameError, ImportError) as e:
raise NameError("Error importing helper file: " + str(e))
for group in parameters.helper:
groupCons = constants.get(group)
if groupCons == None:
raise Exception("Constant group " + group + " does not exist or has no constants")
helper.Helper().generate(group, constants[group])
processTemplate(constants, output, gen)
def processConstantsFile(inputFile):
"""
Process constants from a file, and returns a dictionary of the constants
"""
constants = OrderedDict() #To keep order in constants file
text = open(inputFile).readlines()
lastGroup = ''
for line in text:
line = line.split('#', 1)[0].strip()
if len(line) == 0:
continue
elif line.startswith('[') and line.endswith(']'):
lastGroup = line[1:-1]
constants[lastGroup] = []
else:
split = line.split('|')
if len(split) == 5:
cons = Constant(split[0], split[1], split[2], split[4], split[3])
constants[lastGroup].append(cons)
elif len(split) == 4:
cons = Constant(split[0], split[1], split[2], split[3])
constants[lastGroup].append(cons)
else:
raise Exception("Not enough arguments: " + line)
return OrderedDict(constants.items()) #Reverses dict
def processTemplate(constants, output, gen):
if os.path.isfile("templates/" + gen + ".template"):
if os.path.exists(gen):
raise Exception("Can't have " + gen +".template and " + gen + "directory in templates/")
jinja = jinja2.Environment(trim_blocks=True, loader=jinja2.PackageLoader('ezconfiguration_constants_generator', 'templates'))
if os.path.isdir(output):
rmtree(output)
out = open(output, 'w')
template = jinja.get_template(gen + ".template")
out.write(template.render(constants=constants))
out.close()
elif os.path.exists("templates/" + gen):
output = os.path.splitext(output)[0]
if os.path.exists(output):
rmtree(output)
processTemplateDirectory(constants, output , "templates/"+gen, )
else:
raise Exception("error finding template")
def processTemplateDirectory(constants, output, directory):
files = [f for f in os.listdir(directory) if f[0] != '.']
if not os.path.isdir(output):
os.makedirs(output)
for file in files:
src = os.path.join(directory, file)
dst = os.path.join(output, file)
if os.path.isdir(src):
processTemplateDirectory(constants, dst, src)
elif src.endswith(".template"):
jinja = jinja2.Environment(trim_blocks=True, loader=jinja2.FileSystemLoader(os.path.dirname(src)))
out = open(dst.split(".template")[0], 'w')
template = jinja.get_template(os.path.basename(src))
out.write(template.render(constants=constants))
out.close
else:
copyfile(src, dst)
if __name__ == "__main__":
main(sys.argv)
|
{
"content_hash": "4669482cfa0665cadd5b65a6742fa72e",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 139,
"avg_line_length": 36.63768115942029,
"alnum_prop": 0.689873417721519,
"repo_name": "ezbake/ezbake-configuration",
"id": "5646f06dd835c5d1bd46738de151875e28f6aade",
"size": "5679",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "constants/generator/ezconfiguration_constants_generator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "803"
},
{
"name": "C++",
"bytes": "1312906"
},
{
"name": "Java",
"bytes": "54139"
},
{
"name": "JavaScript",
"bytes": "93574"
},
{
"name": "Python",
"bytes": "117590"
},
{
"name": "Ruby",
"bytes": "34585"
},
{
"name": "Shell",
"bytes": "2994"
}
],
"symlink_target": ""
}
|
import socket
import Axon
from Kamaelia.Apps.JMB.WSGI import WSGIFactory
from Kamaelia.Chassis.ConnectedServer import ServerCore
from Kamaelia.Protocol.HTTP import ErrorPages
from Kamaelia.Support.Protocol.HTTP import HTTPProtocol
port=8080
#This is just a configuration dictionary for general WSGI stuff. This needs to be passed to the handler
#to run
WsgiConfig ={
'server_software' : "Example WSGI Web Server",
'server_admin' : "Jason Baker",
'wsgi_ver' : (1,0),
}
#Now we need to tell the server how to find the applications. We do this by creating a URL routing list.
#What this essentially does is tell the WsgiHandler where to find the modules containing the WSGI Applications.
url_list = [
{
'kp.regex' : 'simple',
'kp.import_path' : 'Kamaelia.Apps.JMB.WSGI.Apps.Simple',
'kp.app_object' : 'simple_app',
},
{
'kp.regex' : '.*', #This is the entry for the 404 error handler. This basically says "match everything else."
'kp.import_path' : 'Kamaelia.Apps.JMB.WSGI.Apps.ErrorHandler',
'kp.app_object' : 'application'
}
]
def main():
#This line is so that the HTTPRequestHandler knows what component to route requests to.
routing = [ ['/', WSGIFactory(WsgiConfig, url_list)] ]
server = ServerCore(protocol=HTTPProtocol(routing),
port=port,
socketOptions=(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1))
print 'Serving on port %s' % (port)
server.run()
if __name__ == '__main__':
main()
|
{
"content_hash": "4db8d80bb2b60056989b863908d5c672",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 115,
"avg_line_length": 33.53333333333333,
"alnum_prop": 0.6832339297548045,
"repo_name": "bbc/kamaelia",
"id": "f7ebf08e6c233c910c6be3aeb6647c1436f63300",
"size": "2471",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Code/Python/Apps/GSOC_JMB/App/wsgi_example.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "62985"
},
{
"name": "C",
"bytes": "212854"
},
{
"name": "C++",
"bytes": "327546"
},
{
"name": "CSS",
"bytes": "114434"
},
{
"name": "ChucK",
"bytes": "422"
},
{
"name": "Diff",
"bytes": "483"
},
{
"name": "Gettext Catalog",
"bytes": "3919909"
},
{
"name": "HTML",
"bytes": "1288960"
},
{
"name": "Java",
"bytes": "31832"
},
{
"name": "JavaScript",
"bytes": "829491"
},
{
"name": "Makefile",
"bytes": "5768"
},
{
"name": "NSIS",
"bytes": "18867"
},
{
"name": "PHP",
"bytes": "49059"
},
{
"name": "Perl",
"bytes": "31234"
},
{
"name": "Processing",
"bytes": "2885"
},
{
"name": "Pure Data",
"bytes": "7485482"
},
{
"name": "Python",
"bytes": "18896320"
},
{
"name": "Ruby",
"bytes": "4165"
},
{
"name": "Shell",
"bytes": "711244"
}
],
"symlink_target": ""
}
|
"""Pushbullet platform for notify component."""
import logging
import mimetypes
import voluptuous as vol
from homeassistant.const import CONF_API_KEY
import homeassistant.helpers.config_validation as cv
from homeassistant.components.notify import (
ATTR_DATA, ATTR_TARGET, ATTR_TITLE, ATTR_TITLE_DEFAULT, PLATFORM_SCHEMA,
BaseNotificationService)
_LOGGER = logging.getLogger(__name__)
ATTR_URL = 'url'
ATTR_FILE = 'file'
ATTR_FILE_URL = 'file_url'
ATTR_LIST = 'list'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_API_KEY): cv.string,
})
def get_service(hass, config, discovery_info=None):
"""Get the Pushbullet notification service."""
from pushbullet import PushBullet
from pushbullet import InvalidKeyError
try:
pushbullet = PushBullet(config[CONF_API_KEY])
except InvalidKeyError:
_LOGGER.error("Wrong API key supplied")
return None
return PushBulletNotificationService(pushbullet)
class PushBulletNotificationService(BaseNotificationService):
"""Implement the notification service for Pushbullet."""
def __init__(self, pb):
"""Initialize the service."""
self.pushbullet = pb
self.pbtargets = {}
self.refresh()
def refresh(self):
"""Refresh devices, contacts, etc.
pbtargets stores all targets available from this Pushbullet instance
into a dict. These are Pushbullet objects!. It sacrifices a bit of
memory for faster processing at send_message.
As of sept 2015, contacts were replaced by chats. This is not
implemented in the module yet.
"""
self.pushbullet.refresh()
self.pbtargets = {
'device': {
tgt.nickname.lower(): tgt for tgt in self.pushbullet.devices},
'channel': {
tgt.channel_tag.lower(): tgt for
tgt in self.pushbullet.channels},
}
def send_message(self, message=None, **kwargs):
"""Send a message to a specified target.
If no target specified, a 'normal' push will be sent to all devices
linked to the Pushbullet account.
Email is special, these are assumed to always exist. We use a special
call which doesn't require a push object.
"""
targets = kwargs.get(ATTR_TARGET)
title = kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT)
data = kwargs.get(ATTR_DATA)
refreshed = False
if not targets:
# Backward compatibility, notify all devices in own account.
self._push_data(message, title, data, self.pushbullet)
_LOGGER.info("Sent notification to self")
return
# Main loop, process all targets specified.
for target in targets:
try:
ttype, tname = target.split('/', 1)
except ValueError:
_LOGGER.error("Invalid target syntax: %s", target)
continue
# Target is email, send directly, don't use a target object.
# This also seems to work to send to all devices in own account.
if ttype == 'email':
self._push_data(message, title, data, self.pushbullet, tname)
_LOGGER.info("Sent notification to email %s", tname)
continue
# Refresh if name not found. While awaiting periodic refresh
# solution in component, poor mans refresh.
if ttype not in self.pbtargets:
_LOGGER.error("Invalid target syntax: %s", target)
continue
tname = tname.lower()
if tname not in self.pbtargets[ttype] and not refreshed:
self.refresh()
refreshed = True
# Attempt push_note on a dict value. Keys are types & target
# name. Dict pbtargets has all *actual* targets.
try:
self._push_data(message, title, data,
self.pbtargets[ttype][tname])
_LOGGER.info("Sent notification to %s/%s", ttype, tname)
except KeyError:
_LOGGER.error("No such target: %s/%s", ttype, tname)
continue
def _push_data(self, message, title, data, pusher, email=None):
"""Create the message content."""
from pushbullet import PushError
if data is None:
data = {}
data_list = data.get(ATTR_LIST)
url = data.get(ATTR_URL)
filepath = data.get(ATTR_FILE)
file_url = data.get(ATTR_FILE_URL)
try:
email_kwargs = {}
if email:
email_kwargs['email'] = email
if url:
pusher.push_link(title, url, body=message, **email_kwargs)
elif filepath:
if not self.hass.config.is_allowed_path(filepath):
_LOGGER.error("Filepath is not valid or allowed")
return
with open(filepath, 'rb') as fileh:
filedata = self.pushbullet.upload_file(fileh, filepath)
if filedata.get('file_type') == 'application/x-empty':
_LOGGER.error("Can not send an empty file")
return
filedata.update(email_kwargs)
pusher.push_file(title=title, body=message,
**filedata)
elif file_url:
if not file_url.startswith('http'):
_LOGGER.error("URL should start with http or https")
return
pusher.push_file(title=title, body=message,
file_name=file_url, file_url=file_url,
file_type=(mimetypes
.guess_type(file_url)[0]),
**email_kwargs)
elif data_list:
pusher.push_list(title, data_list, **email_kwargs)
else:
pusher.push_note(title, message, **email_kwargs)
except PushError as err:
_LOGGER.error("Notify failed: %s", err)
|
{
"content_hash": "7305462573cf24977845a6aa8c6a1ad4",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 78,
"avg_line_length": 37.53939393939394,
"alnum_prop": 0.5666774297707459,
"repo_name": "jnewland/home-assistant",
"id": "d1d9a6449ef2a30bb9e6df65d02166a3bd00c415",
"size": "6194",
"binary": false,
"copies": "7",
"ref": "refs/heads/ci",
"path": "homeassistant/components/pushbullet/notify.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "Python",
"bytes": "15240512"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17862"
}
],
"symlink_target": ""
}
|
"""
Django settings.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import sys
from os.path import join, dirname
from configurations import Configuration, values
class Common(Configuration):
reload(sys)
sys.setdefaultencoding('utf-8')
# COMMON CONFIGURATION
BASE_DIR = dirname(dirname(__file__))
PROJECT_NAME = 'example'
ROOT_URLCONF = 'urls'
BASE_URL = 'http://api.example.com'
WEB_URL = 'http://example.com'
API_VERSION = '0.1'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'wsgi.application'
# END COMMON CONFIGURATION
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = "user.User"
# LOGIN_REDIRECT_URL = "test_users:redirect"
# LOGIN_URL = "account_login"
# END Custom user app defaults
# APP CONFIGURATION
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
# 'grappelli',
# 'django.contrib.admin',
)
THIRD_PARTY_APPS = (
# 'crispy_forms', # Form layouts
'cacheops',
# 'django_extensions',
'rest_framework',
'rest_framework_swagger',
# 'django_rq',
# 'imagekit',
# 'django_gravatar',
# 'rest_framework_mongoengine',
# 'haystack',
)
# Apps specific for this project go here.
LOCAL_APPS = (
# Your stuff: custom apps go here
# 'apps.user',
)
# END LOCAL APPS
# APP CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# END APP CONFIGURATION
# REST FRAMEWORK CONFIGURATION
AUTHTOKEN_EXPIRED_DAYS = 7
REST_FRAMEWORK = {
'PAGE_SIZE': 10,
'PAGINATE_BY': 10,
'DEFAULT_AUTHENTICATION_CLASSES': (
# 'rest_framework.authentication.TokenAuthentication',
'customs.authentications.XTokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_RENDERER_CLASSES': (
# 'customs.renderers.XJSONRenderer',
'rest_framework.renderers.JSONRenderer',
'customs.renderers.XAdminRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
),
'DEFAULT_CONTENT_NEGOTIATION_CLASS': 'customs.negotiation.XDefaultContentNegotiation',
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
}
# END REST FRAMEWORK CONFIGURATION
# SWAGGER CONFIGURATION
# https://github.com/swagger-api/swagger-spec/blob/master/versions/1.2.md
SWAGGER_SETTINGS = {
"exclude_namespaces": [],
"api_version": API_VERSION,
"api_path": "/",
"enabled_methods": [
'get',
'post',
'put',
'patch',
'delete'
],
"api_key": '854ea3143b1cf554dd791ef5930ffccf5cab9b0c',
"is_authenticated": True,
"is_superuser": True,
# "permission_denied_handler": 'TODO',
"info": {
'contact': 'cyb@xinshu.me',
'description': 'API documents',
'title': 'API documents',
},
}
# END SWAGGER CONFIGURATION
# REDIS SESSION CONFIGURATION
SESSION_ENGINE = 'redis_sessions.session'
SESSION_REDIS_HOST = 'localhost'
SESSION_REDIS_PORT = 6379
SESSION_REDIS_DB = 0
# SESSION_REDIS_PASSWORD = '123456'
SESSION_REDIS_PREFIX = 'session'
# END REDIS SESSION CONFIGURATION
# RQ CONFIGURATION
RQ_QUEUES = {
'default': {
'HOST': 'localhost',
'PORT': 6379,
'DB': 1,
'DEFAULT_TIMEOUT': 900,
},
'high': {
'HOST': 'localhost',
'PORT': 6379,
'DB': 1,
'DEFAULT_TIMEOUT': 900,
},
'low': {
'HOST': 'localhost',
'PORT': 6379,
'DB': 1,
'DEFAULT_TIMEOUT': 900,
},
}
# END RQ CONFIGURATION
# CACHE OPS CONFIGURATION
CACHEOPS_REDIS = {
'host': 'localhost', # redis-server is on same machine
'port': 6379, # default redis port
'db': 2, # SELECT non-default redis database
# using separate redis db or redis instance
# is highly recommended
'socket_timeout': 3,
}
DEFAULT_CACHE_TIMEOUT = 60 * 30
CACHE_QUERY = True # CACHE DB QUERY
CACHEOPS = {
# Automatically cache any User.objects.get() calls for 15 minutes
# This includes request.user or post.author access,
# where Post.author is a foreign key to auth.User
'auth.user': {'ops': 'get', 'timeout': 60 * 15},
# 'apps.user.a': {'ops': 'get', 'timeout': 60 * 15},
# Automatically cache all gets and queryset fetches
# to other django.contrib.auth models for an hour
'auth.*': {'ops': ('fetch', 'get'), 'timeout': 60 * 60},
# Cache gets, fetches, counts and exists to Permission
# 'all' is just an alias for ('get', 'fetch', 'count', 'exists')
'auth.permission': {'ops': 'all', 'timeout': 60 * 60},
# Enable manual caching on all other models with default timeout of an hour
# Use Post.objects.cache().get(...)
# or Tags.objects.filter(...).order_by(...).cache()
# to cache particular ORM request.
# Invalidation is still automatic
'*.*': {'ops': (), 'timeout': 60 * 60},
# And since ops is empty by default you can rewrite last line as:
'*.*': {'timeout': 60 * 60},
}
# END CACHE OPS CONFIGURATION
# HAYSTACK CONFIGURATION
HAYSTACK_CONNECTIONS = {
# 'default': {
# 'ENGINE': 'haystack.backends.solr_backend.SolrEngine',
# 'URL': 'http://127.0.0.1:8983/solr'
# ...or for multicore...
# 'URL': 'http://127.0.0.1:8983/solr/mysite',
# },
'default': {
'ENGINE': 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine',
'URL': 'http://127.0.0.1:9200/',
'INDEX_NAME': 'haystack',
'INCLUDE_SPELLING': True,
}
}
# HAYSTACK_SIGNAL_PROCESSOR = 'haystack.signals.RealtimeSignalProcessor'
HAYSTACK_SIGNAL_PROCESSOR = 'apps.search.signals.DelaySignalProcessor'
HAYSTACK_SEARCH_RESULTS_PER_PAGE = 20
HAYSTACK_SIGNAL_TRIGGER_MODELS = (
# 'apps.article.models.Article',
)
# END HAYSTACK CONFIGURATION
# CORS HEADER
CORS_ALLOW_METHODS = (
'GET',
'POST',
'PUT',
'PATCH',
'DELETE',
'OPTIONS'
)
CORS_ALLOW_HEADERS = (
'x-requested-with',
'content-type',
'accept',
'origin',
'authorization',
'x-csrftoken',
'X-CSRFToken'
)
CORS_PREFLIGHT_MAX_AGE = 86400
CORS_ALLOW_CREDENTIALS = True
CORS_REPLACE_HTTPS_REFERER = False
# END CORS HEADER
# EMAIL CONFIGURATION
EMAIL_USE_TLS = False
EMAIL_HOST = 'smtp.exmail.qq.com'
EMAIL_PORT = 25
EMAIL_HOST_USER = 'noreply@example.com'
EMAIL_HOST_PASSWORD = '123456'
# END EMAIL CONFIGURATION
# LOGGING CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'django.utils.log.NullHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler',
'include_html': True,
},
'error': {
'level': 'ERROR',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(BASE_DIR, 'logs/error.log'),
'maxBytes': '16777216', # 16megabytes
'formatter': 'verbose'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'django': {
'handlers': ['error'],
'level': 'ERROR',
'propagate': True,
}
},
# you can also shortcut 'loggers' and just configure logging for EVERYTHING at once
'root': {
'handlers': ['console', 'mail_admins'],
'level': 'INFO'
},
}
# END LOGGING CONFIGURATION
# MIDDLEWARE CONFIGURATION
MIDDLEWARE_CLASSES = (
# 'django.middleware.cache.UpdateCacheMiddleware', # This must be first on the list
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
'djangosecure.middleware.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# 'django.middleware.cache.FetchFromCacheMiddleware', # This must be last
)
# END MIDDLEWARE CONFIGURATION
# MIGRATIONS CONFIGURATION
# MIGRATION_MODULES = {
# 'sites': 'contrib.sites.migrations'
# }
# END MIGRATIONS CONFIGURATION
# DEBUG
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = values.BooleanValue(False)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
# END DEBUG
# SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
# In production, this is changed to a values.SecretValue() setting
SECRET_KEY = "r2=_vzt7(8dcl+yyo*4dmef&jp&iwxb=6*4f59r)h^0udgtwb1"
# END SECRET CONFIGURATION
# FIXTURE CONFIGURATION
# See:
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
join(BASE_DIR, 'fixtures'),
)
# END FIXTURE CONFIGURATION
# DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
# Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or
# 'oracle'.
'ENGINE': 'django.db.backends.mysql',
# Or path to database file if using sqlite3.
'NAME': 'example',
'USER': 'root', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
# Set to empty string for localhost. Not used with sqlite3.
'HOST': '127.0.0.1',
# Set to empty string for default. Not used with sqlite3.
'PORT': '3306',
# 'TEST_CHARSET': 'utf8',
# 'TEST_COLLATION': 'utf8_general_ci',
'OPTIONS': {'charset': 'utf8mb4'},
},
}
# END DATABASE CONFIGURATION
# CACHING
# Do this here because thanks to django-pylibmc-sasl and pylibmc
# memcacheify (used on heroku) is painful to install on windows.
# CACHES = {
# 'default': {
# 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
# 'LOCATION': ''
# }
# }
# END CACHING
# GENERAL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone
TIME_ZONE = 'Asia/Shanghai'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = False
# END GENERAL CONFIGURATION
# TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
# Your stuff: custom template context processers go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_DIRS = (
join(BASE_DIR, 'templates'),
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# END TEMPLATE CONFIGURATION
# STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = join(BASE_DIR, 'static')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See:
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
# STATICFILES_DIRS = (
# join(BASE_DIR, 'static'),
# )
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# END STATIC FILE CONFIGURATION
# MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = join(BASE_DIR, 'media')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# END MEDIA CONFIGURATION
# AUTHENTICATION CONFIGURATION
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = "username"
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
# END AUTHENTICATION CONFIGURATION
# SLUGLIFIER
# AUTOSLUG_SLUGIFY_FUNCTION = "slugify.slugify"
# END SLUGLIFIER
|
{
"content_hash": "c4046bc3a53439bf1baa0a60a336fcd0",
"timestamp": "",
"source": "github",
"line_count": 485,
"max_line_length": 99,
"avg_line_length": 33.863917525773196,
"alnum_prop": 0.5868241597661958,
"repo_name": "fortyMiles/my-family",
"id": "9b61c7cfbfe32dea770ee878c941bbfec5821715",
"size": "16448",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "myfamily/settings/common.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "137077"
},
{
"name": "JavaScript",
"bytes": "25222"
},
{
"name": "Python",
"bytes": "105553"
}
],
"symlink_target": ""
}
|
import os
from cuttsum.resources import MultiProcessWorker
import cuttsum.judgements
from sklearn.ensemble import GradientBoostingRegressor
import pandas as pd
import numpy as np
import regex as re
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn import cross_validation
from sklearn.metrics import precision_score
from sklearn.naive_bayes import MultinomialNB
from sklearn.externals import joblib
import corenlp
from cuttsum.misc import english_stopwords
from cuttsum.pipeline import InputStreamResource
class NuggetRegressor(MultiProcessWorker):
cols = [
"BASIC length", #"BASIC char length",
"BASIC doc position", "BASIC all caps ratio",
"BASIC upper ratio",
# "BASIC lower ratio",
# "BASIC punc ratio",
"BASIC person ratio",
"BASIC location ratio",
"BASIC organization ratio", "BASIC date ratio",
"BASIC time ratio", "BASIC duration ratio",
"BASIC number ratio", "BASIC ordinal ratio",
"BASIC percent ratio",
"BASIC money ratio",
# "BASIC set ratio", "BASIC misc ratio"
#]
#query_bw_cols = [
# "Q_sent_query_cov",
# "Q_sent_syn_cov",
# "Q_sent_hyper_cov",
# "Q_sent_hypo_cov",
#]#
#query_fw_cols = [
"Q_query_sent_cov",
"Q_syn_sent_cov",
"Q_hyper_sent_cov",
"Q_hypo_sent_cov",
#]
#lm_cols = [
"LM domain avg lp",
"LM gw avg lp",
#sum_cols = [
"SUM_sbasic_sum",
"SUM_sbasic_amean",
# "SUM_sbasic_max",
"SUM_novelty_gmean",
"SUM_novelty_amean",
# "SUM_novelty_max",
"SUM_centrality",
"SUM_pagerank",
"SUM_sem_novelty_gmean",
"SUM_sem_novelty_amean",
# "SUM_novelty_max",
"SUM_sem_centrality",
"SUM_sem_pagerank",
#]
#stream_cols = [
"STREAM_sbasic_sum",
"STREAM_sbasic_amean",
"STREAM_sbasic_max",
"STREAM_per_prob_sum",
"STREAM_per_prob_max",
"STREAM_per_prob_amean",
"STREAM_loc_prob_sum",
"STREAM_loc_prob_max",
"STREAM_loc_prob_amean",
"STREAM_org_prob_sum",
"STREAM_org_prob_max",
"STREAM_org_prob_amean",
"STREAM_nt_prob_sum",
"STREAM_nt_prob_max",
"STREAM_nt_prob_amean",
]
def __init__(self):
#Resource.__init__(self)
self.dir_ = os.path.join(
os.getenv(u'TREC_DATA', u'.'), u'nugget-regressors')
if not os.path.exists(self.dir_):
os.makedirs(self.dir_)
def predict(self, event, df):
gbc = joblib.load(self.get_model_path(event))
return gbc.predict(df[self.cols].values)
def get_model_dir(self, event):
return os.path.join(self.dir_, event.fs_name())
def get_model_path(self, event):
return os.path.join(self.dir_, event.fs_name(), "gbc.pkl")
def get_job_units(self, event, corpus, **kwargs):
print self.get_model_path(event)
if not os.path.exists(self.get_model_path(event)):
return [0]
else:
return []
def do_job_unit(self, event, corpus, unit, **kwargs):
assert unit == 0
extractor = kwargs.get('extractor', "goose")
thresh = kwargs.get('thresh', .8)
delay = kwargs.get('delay', None)
topk = kwargs.get('topk', 20)
train_events = [e for e in cuttsum.events.get_events()
if e.query_num not in set([event.query_num, 7])]
res = InputStreamResource()
y = []
X = []
for train_event in train_events:
y_e = []
X_e = []
istream = res.get_dataframes(
train_event,
cuttsum.corpora.get_raw_corpus(train_event),
extractor, thresh, delay, topk)
for df in istream:
selector = (df["n conf"] == 1) & (df["nugget probs"].apply(len) == 0)
df.loc[selector, "nugget probs"] = \
df.loc[selector, "nuggets"].apply(lambda x: {n:1 for n in x})
df["probs"] = df["nugget probs"].apply(lambda x: [val for key, val in x.items()] +[0])
df["probs"] = df["probs"].apply(lambda x: np.max(x))
df.loc[(df["n conf"] == 1) & (df["nuggets"].apply(len) == 0), "probs"] = 0
y_t = df["probs"].values
y_t = y_t[:, np.newaxis]
y_e.append(y_t)
X_t = df[self.cols].values
X_e.append(X_t)
y_e = np.vstack(y_e)
y.append(y_e)
X_e = np.vstack(X_e)
X.append(X_e)
# print "WARNING NOT USING 2014 EVENTS"
X = np.vstack(X)
y = np.vstack(y)
gbc = GradientBoostingRegressor(
n_estimators=100, learning_rate=1.,
max_depth=3, random_state=0)
print "fitting", event
gbc.fit(X, y.ravel())
print event, "SCORE", gbc.score(X, y.ravel())
model_dir = self.get_model_dir(event)
if not os.path.exists(model_dir):
os.makedirs(model_dir)
joblib.dump(gbc, self.get_model_path(event), compress=9)
|
{
"content_hash": "42f0e9e48dc510de4391f17582b59f97",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 102,
"avg_line_length": 29.548022598870055,
"alnum_prop": 0.5531548757170172,
"repo_name": "kedz/cuttsum",
"id": "889cf22b624a8ce65acc165c5386862af6ee484b",
"size": "5230",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trec2015/cuttsum/classifiers/_nugget_regressor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "2065"
},
{
"name": "Makefile",
"bytes": "474"
},
{
"name": "Perl",
"bytes": "5644"
},
{
"name": "Python",
"bytes": "1375913"
},
{
"name": "Shell",
"bytes": "188220"
}
],
"symlink_target": ""
}
|
"""This code example gets all companies that are advertisers.
To create companies, run create_companies.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: CompanyService.getCompaniesByStatement
"""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
company_service = client.GetService('CompanyService', version='v201403')
# Create statement object to only select companies that are advertisers.
values = [{
'key': 'type',
'value': {
'xsi_type': 'TextValue',
'value': 'ADVERTISER'
}
}]
query = 'WHERE type = :type'
statement = dfp.FilterStatement(query, values)
# Get companies by statement.
while True:
response = company_service.getCompaniesByStatement(
statement.ToStatement())
if 'results' in response:
# Display results.
for company in response['results']:
print ('Company with ID \'%s\', name \'%s\', and type \'%s\' was found.'
% (company['id'], company['name'], company['type']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
|
{
"content_hash": "733b9437218e3dcc41c8e0ffbc96d25e",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 80,
"avg_line_length": 30.58490566037736,
"alnum_prop": 0.674892041949414,
"repo_name": "dietrichc/streamline-ppc-reports",
"id": "bba89452cdd3cace6de942875af26e28a30b30b0",
"size": "2239",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/dfp/v201403/company_service/get_advertisers.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "492"
},
{
"name": "JavaScript",
"bytes": "504"
},
{
"name": "Python",
"bytes": "2235969"
}
],
"symlink_target": ""
}
|
from datetime import datetime
from pretty_timedelta import pretty_timedelta
__author__ = 'gautam'
def pretty_time(datetime_value):
now = datetime.now()
delta = datetime_value - now
return pretty_timedelta(delta)
|
{
"content_hash": "36e6c4ba562d3e9dedc97dc8254f749f",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 45,
"avg_line_length": 25.22222222222222,
"alnum_prop": 0.7268722466960352,
"repo_name": "gautamk/private-journal",
"id": "0c9159f93640d1e3ac687e449984922f647cb423",
"size": "227",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "util/custom_filters/pretty_time.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11137"
},
{
"name": "JavaScript",
"bytes": "86057"
},
{
"name": "Python",
"bytes": "190124"
}
],
"symlink_target": ""
}
|
from setuptools import setup
setup(
name="idealreport",
packages=["idealreport"],
version="0.15",
description="Ideal Prediction reporting framework",
author="Ideal Prediction",
author_email="info@idealprediction.com",
url="https://github.com/idealprediction/idealreport",
download_url="https://github.com/idealprediction/idealreport/tarball/0.15",
keywords=["idealprediction", "report"],
classifiers=[],
package_data={"idealreport": ["htmlLibs/*.*", "template.html"]},
include_package_data=True,
setup_requires=["sphinx"],
install_requires=["htmltag", "pandas>=0.23.4"],
)
|
{
"content_hash": "f6528cf3424f263d8dc32f73aca6b651",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 79,
"avg_line_length": 35,
"alnum_prop": 0.6841269841269841,
"repo_name": "idealprediction/idealreport",
"id": "3e3fb9d56554137a78f5a777366b6a18f058ab98",
"size": "630",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2278"
},
{
"name": "HTML",
"bytes": "1879"
},
{
"name": "JavaScript",
"bytes": "6576643"
},
{
"name": "Python",
"bytes": "53908"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_gavyn_sykes.iff"
result.attribute_template_id = 9
result.stfName("npc_name","human_base_male")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "8491552ddfb5dca661eb23c80db86862",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 65,
"avg_line_length": 23,
"alnum_prop": 0.6923076923076923,
"repo_name": "obi-two/Rebelion",
"id": "c72292fd3e293784ff3902052a6ae1ad8e5582d9",
"size": "444",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/mobile/shared_dressed_gavyn_sykes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
import tempfile
from smartcrop import smart_crop
class Cropper:
def __init__(self):
self.crop_x = 1280
self.crop_y = 800
def crop(self, image_path):
smart_crop(image_path, self.crop_x, self.crop_y, tempfile.gettempdir() + "/_working_image_.jpeg", False)
return tempfile.gettempdir() + "/_working_image_.jpeg"
|
{
"content_hash": "7c62d4c59a739c8f31485181805f442b",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 112,
"avg_line_length": 27.153846153846153,
"alnum_prop": 0.6373937677053825,
"repo_name": "MtnFranke/rpi-photo-frame",
"id": "54511e34cfa01f1d0b2ceb6650f598bc48a7bc90",
"size": "353",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/image/cropper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1391"
},
{
"name": "HTML",
"bytes": "1099"
},
{
"name": "JavaScript",
"bytes": "10052"
},
{
"name": "Python",
"bytes": "18196"
},
{
"name": "Shell",
"bytes": "3734"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
from django_localflavor_mx.forms import (MXZipCodeField, MXRFCField,
MXStateSelect, MXCURPField)
from django.test import SimpleTestCase
from .forms import MXPersonProfileForm
class MXLocalFlavorTests(SimpleTestCase):
def setUp(self):
self.form = MXPersonProfileForm({
'state': 'MIC',
'rfc': 'toma880125kv3',
'curp': 'toma880125hmnrrn02',
'zip_code': '58120',
})
def test_get_display_methods(self):
"""Test that the get_*_display() methods are added to the model instances."""
place = self.form.save()
self.assertEqual(place.get_state_display(), 'Michoacán')
def test_errors(self):
"""Test that required MXFields throw appropriate errors."""
form = MXPersonProfileForm({
'state': 'Invalid state',
'rfc': 'invalid rfc',
'curp': 'invalid curp',
'zip_code': 'xxx',
})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['state'], ['Select a valid choice. Invalid state is not one of the available choices.'])
self.assertEqual(form.errors['rfc'], ['Enter a valid RFC.'])
self.assertEqual(form.errors['curp'], ['Ensure this value has at least 18 characters (it has 12).', 'Enter a valid CURP.'])
self.assertEqual(form.errors['zip_code'], ['Enter a valid zip code in the format XXXXX.'])
def test_field_blank_option(self):
"""Test that the empty option is there."""
state_select_html = """\
<select name="state" id="id_state">
<option value="">---------</option>
<option value="AGU">Aguascalientes</option>
<option value="BCN">Baja California</option>
<option value="BCS">Baja California Sur</option>
<option value="CAM">Campeche</option>
<option value="CHH">Chihuahua</option>
<option value="CHP">Chiapas</option>
<option value="COA">Coahuila</option>
<option value="COL">Colima</option>
<option value="DIF">Distrito Federal</option>
<option value="DUR">Durango</option>
<option value="GRO">Guerrero</option>
<option value="GUA">Guanajuato</option>
<option value="HID">Hidalgo</option>
<option value="JAL">Jalisco</option>
<option value="MEX">Estado de México</option>
<option value="MIC" selected="selected">Michoacán</option>
<option value="MOR">Morelos</option>
<option value="NAY">Nayarit</option>
<option value="NLE">Nuevo León</option>
<option value="OAX">Oaxaca</option>
<option value="PUE">Puebla</option>
<option value="QUE">Querétaro</option>
<option value="ROO">Quintana Roo</option>
<option value="SIN">Sinaloa</option>
<option value="SLP">San Luis Potosí</option>
<option value="SON">Sonora</option>
<option value="TAB">Tabasco</option>
<option value="TAM">Tamaulipas</option>
<option value="TLA">Tlaxcala</option>
<option value="VER">Veracruz</option>
<option value="YUC">Yucatán</option>
<option value="ZAC">Zacatecas</option>
</select>"""
self.assertHTMLEqual(str(self.form['state']), state_select_html)
def test_MXStateSelect(self):
f = MXStateSelect()
out = '''<select name="state">
<option value="AGU">Aguascalientes</option>
<option value="BCN">Baja California</option>
<option value="BCS">Baja California Sur</option>
<option value="CAM">Campeche</option>
<option value="CHH">Chihuahua</option>
<option value="CHP">Chiapas</option>
<option value="COA">Coahuila</option>
<option value="COL">Colima</option>
<option value="DIF">Distrito Federal</option>
<option value="DUR">Durango</option>
<option value="GRO">Guerrero</option>
<option value="GUA">Guanajuato</option>
<option value="HID">Hidalgo</option>
<option value="JAL">Jalisco</option>
<option value="MEX">Estado de México</option>
<option value="MIC" selected="selected">Michoacán</option>
<option value="MOR">Morelos</option>
<option value="NAY">Nayarit</option>
<option value="NLE">Nuevo León</option>
<option value="OAX">Oaxaca</option>
<option value="PUE">Puebla</option>
<option value="QUE">Querétaro</option>
<option value="ROO">Quintana Roo</option>
<option value="SIN">Sinaloa</option>
<option value="SLP">San Luis Potosí</option>
<option value="SON">Sonora</option>
<option value="TAB">Tabasco</option>
<option value="TAM">Tamaulipas</option>
<option value="TLA">Tlaxcala</option>
<option value="VER">Veracruz</option>
<option value="YUC">Yucatán</option>
<option value="ZAC">Zacatecas</option>
</select>'''
self.assertHTMLEqual(f.render('state', 'MIC'), out)
def test_MXZipCodeField(self):
error_format = ['Enter a valid zip code in the format XXXXX.']
valid = {
'58120': '58120',
'58502': '58502',
'59310': '59310',
'99999': '99999',
}
invalid = {
'17000': error_format,
'18000': error_format,
'19000': error_format,
'00000': error_format,
}
self.assertFieldOutput(MXZipCodeField, valid, invalid)
def test_MXRFCField(self):
error_format = ['Enter a valid RFC.']
error_checksum = ['Invalid checksum for RFC.']
valid = {
'MoFN641205eX5': 'MOFN641205EX5',
'ICa060120873': 'ICA060120873',
'eUcG751104rT0': 'EUCG751104RT0',
'GME08100195A': 'GME08100195A',
'AA&060524KX5': 'AA&060524KX5',
'CAÑ0708045P7': 'CAÑ0708045P7',
'aaa000101aa9': 'AAA000101AA9',
}
invalid = {
'MED0000000XA': error_format,
'0000000000XA': error_format,
'AAA000000AA6': error_format,
# Dates
'XXX880002XXX': error_format,
'XXX880200XXX': error_format,
'XXX880132XXX': error_format,
'XXX880230XXX': error_format,
'XXX880431XXX': error_format,
# Incorrect checksum
'MOGR650524E73': error_checksum,
'HVA7810058F1': error_checksum,
'MoFN641205eX2': error_checksum,
'ICa060120871': error_checksum,
'eUcG751104rT7': error_checksum,
'GME081001955': error_checksum,
'AA&060524KX9': error_checksum,
'CAÑ0708045P2': error_checksum,
}
self.assertFieldOutput(MXRFCField, valid, invalid)
def test_MXCURPField(self):
error_format = ['Enter a valid CURP.']
error_checksum = ['Invalid checksum for CURP.']
valid = {
'AaMG890608HDFLJL00': 'AAMG890608HDFLJL00',
'BAAd890419HMNRRV07': 'BAAD890419HMNRRV07',
'VIAA900930MMNClL08': 'VIAA900930MMNCLL08',
'HEGR891009HMNRRD09': 'HEGR891009HMNRRD09',
'MARR890512HMNRMN09': 'MARR890512HMNRMN09',
'MESJ890928HMNZNS00': 'MESJ890928HMNZNS00',
'BAAA890317HDFRLL03': 'BAAA890317HDFRLL03',
'TOMA880125HMNRRNO2': 'TOMA880125HMNRRNO2',
'OOMG890727HMNRSR06': 'OOMG890727HMNRSR06',
'AAAA000101HDFCCC09': 'AAAA000101HDFCCC09',
}
invalid = {
'AAAA000000HDFCCC09': error_format,
'AAAA000000HDFAAA03': error_format,
'AAAA000000HXXCCC08': error_format,
'AAAA000000XMNCCC02': error_format,
'HEGR891009HMNRRD0A': error_format,
'MARR890512HMNRMN0A': error_format,
'AaMG890608HDFLJL01': error_checksum,
'BAAd890419HMNRRV08': error_checksum,
'VIAA900930MMNClL09': error_checksum,
'MESJ890928HMNZNS01': error_checksum,
'BAAA890317HDFRLL04': error_checksum,
'TOMA880125HMNRRNO3': error_checksum,
'OOMG890727HMNRSR07': error_checksum,
}
self.assertFieldOutput(MXCURPField, valid, invalid)
|
{
"content_hash": "9fe2757150c77af6d605066e25b06d6c",
"timestamp": "",
"source": "github",
"line_count": 197,
"max_line_length": 131,
"avg_line_length": 39.263959390862944,
"alnum_prop": 0.6360698125404007,
"repo_name": "andres-torres-marroquin/django-localflavor-mx",
"id": "4dcb8edca4506e6346c46b9f2cf8c974e92030d9",
"size": "7775",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "21702"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from treasure_hunt.models import Level, UserProfile
admin.site.register(Level)
admin.site.register(UserProfile)
|
{
"content_hash": "6e338aef919ae74cab9930dddb49ef5f",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 51,
"avg_line_length": 29,
"alnum_prop": 0.8413793103448276,
"repo_name": "code-haven/Django-treasurehunt-demo",
"id": "2320eacd63d4a41afc699215a854b7203684374e",
"size": "145",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "treasure_hunt/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "212439"
},
{
"name": "JavaScript",
"bytes": "74673"
},
{
"name": "Python",
"bytes": "49763"
},
{
"name": "Shell",
"bytes": "63"
}
],
"symlink_target": ""
}
|
"""
RenderPipeline
Copyright (c) 2014-2016 tobspr <tobias.springer1@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from rplibs.yaml import load_yaml_file
from rpcore.rpobject import RPObject
class TaskScheduler(RPObject):
""" This class manages the scheduled tasks and splits them over multiple
frames. Plugins can query whether their subtasks should be executed
or queued for later frames. Also performs analysis on the task configuration
to figure if tasks are distributed uniformly. """
def __init__(self, pipeline):
RPObject.__init__(self)
self._pipeline = pipeline
self._tasks = []
self._frame_index = 0
self._load_config()
def _load_config(self):
""" Loads the tasks distribution configuration """
config = load_yaml_file("/$$rpconfig/task-scheduler.yaml")["frame_cycles"]
for frame_name, tasks in config: # pylint: disable=unused-variable
self._tasks.append(tasks)
def _check_missing_schedule(self, task_name):
""" Checks whether the given task is scheduled at some point. This can
be used to check whether any task is missing in the task scheduler config. """
for tasks in self._tasks:
if task_name in tasks:
break
else:
self.error("Task '" + task_name + "' is never scheduled and thus will never run!")
def is_scheduled(self, task_name):
""" Returns whether a given task is supposed to run this frame """
self._check_missing_schedule(task_name)
return task_name in self._tasks[self._frame_index]
def step(self):
""" Advances one frame """
self._frame_index = (self._frame_index + 1) % len(self._tasks)
@property
def num_tasks(self):
""" Returns the total amount of tasks """
return sum((len(i) for i in self._tasks))
@property
def num_scheduled_tasks(self):
""" Returns the amount of scheduled tasks this frame """
return len(self._tasks[self._frame_index])
|
{
"content_hash": "bfe20ebb149e569f7ae029677062b64c",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 94,
"avg_line_length": 39.55844155844156,
"alnum_prop": 0.6943532501641497,
"repo_name": "eswartz/RenderPipeline",
"id": "03d89869e53c588243c85282fa169916e58d7c85",
"size": "3046",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rpcore/util/task_scheduler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1241"
},
{
"name": "C",
"bytes": "21397"
},
{
"name": "C++",
"bytes": "160537"
},
{
"name": "GLSL",
"bytes": "712004"
},
{
"name": "Groff",
"bytes": "114"
},
{
"name": "Python",
"bytes": "1374140"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import time
import fixtures
from botocore.exceptions import ClientError as BotoClientError
from botocore import session as botocore_session
from glanceclient import client as glance_client
from keystoneauth1.identity import v3 as identity_v3
from keystoneauth1 import session
from neutronclient.neutron import client as neutron_client
from novaclient import client as nova_client
from novaclient import exceptions as nova_exc
from oslo_utils import uuidutils
from saharaclient.api import base as saharaclient_base
from saharaclient import client as sahara_client
import six
from swiftclient import client as swift_client
from swiftclient import exceptions as swift_exc
from tempest.lib import exceptions as exc
from sahara_tests.scenario import timeouts
from sahara_tests.scenario import utils
def get_session(auth_url=None, username=None, password=None,
project_name=None, verify=True, cert=None):
auth_url_fixed = auth_url.replace('/v2.0', '/v3')
if not auth_url_fixed.endswith('/v3'):
auth_url_fixed += '/v3'
auth = identity_v3.Password(auth_url=auth_url_fixed,
username=username,
password=password,
project_name=project_name,
user_domain_name='default',
project_domain_name='default')
return session.Session(auth=auth, verify=verify, cert=cert)
class Client(object):
def is_resource_deleted(self, method, *args, **kwargs):
raise NotImplementedError
def delete_resource(self, method, *args, **kwargs):
with fixtures.Timeout(
timeouts.Defaults.instance.timeout_delete_resource,
gentle=True):
while True:
if self.is_resource_deleted(method, *args, **kwargs):
break
time.sleep(5)
class SaharaClient(Client):
def __init__(self, *args, **kwargs):
self.api_version = '1.1'
if 'api_version' in kwargs:
self.api_version = kwargs['api_version']
del kwargs['api_version']
self.sahara_client = sahara_client.Client(self.api_version, *args,
**kwargs)
def create_node_group_template(self, *args, **kwargs):
data = self.sahara_client.node_group_templates.create(*args, **kwargs)
return data.id
def delete_node_group_template(self, node_group_template_id):
return self.delete_resource(
self.sahara_client.node_group_templates.delete,
node_group_template_id)
def create_cluster_template(self, *args, **kwargs):
data = self.sahara_client.cluster_templates.create(*args, **kwargs)
return data.id
def delete_cluster_template(self, cluster_template_id):
return self.delete_resource(
self.sahara_client.cluster_templates.delete,
cluster_template_id)
def create_cluster(self, *args, **kwargs):
data = self.sahara_client.clusters.create(*args, **kwargs)
return data.id
def delete_cluster(self, cluster_id):
return self.delete_resource(
self.sahara_client.clusters.delete,
cluster_id)
def scale_cluster(self, cluster_id, body):
return self.sahara_client.clusters.scale(cluster_id, body)
def start_cluster_verification(self, cluster_id):
return self.sahara_client.clusters.verification_update(cluster_id,
'START')
def create_datasource(self, *args, **kwargs):
data = self.sahara_client.data_sources.create(*args, **kwargs)
return data.id
def get_datasource(self, *args, **kwargs):
return self.sahara_client.data_sources.get(*args, **kwargs)
def delete_datasource(self, datasource_id):
return self.delete_resource(
self.sahara_client.data_sources.delete,
datasource_id)
def create_job_binary_internal(self, *args, **kwargs):
data = self.sahara_client.job_binary_internals.create(*args, **kwargs)
return data.id
def delete_job_binary_internal(self, job_binary_internal_id):
return self.delete_resource(
self.sahara_client.job_binary_internals.delete,
job_binary_internal_id)
def create_job_binary(self, *args, **kwargs):
data = self.sahara_client.job_binaries.create(*args, **kwargs)
return data.id
def delete_job_binary(self, job_binary_id):
return self.delete_resource(
self.sahara_client.job_binaries.delete,
job_binary_id)
def create_job_template(self, *args, **kwargs):
if self.api_version == '1.1':
data = self.sahara_client.jobs.create(*args, **kwargs)
else:
data = self.sahara_client.job_templates.create(*args, **kwargs)
return data.id
def delete_job_template(self, job_id):
if self.api_version == '1.1':
delete_function = self.sahara_client.jobs.delete
else:
delete_function = self.sahara_client.job_templates.delete
return self.delete_resource(delete_function, job_id)
def run_job(self, *args, **kwargs):
if self.api_version == '1.1':
data = self.sahara_client.job_executions.create(*args, **kwargs)
else:
data = self.sahara_client.jobs.create(*args, **kwargs)
return data.id
def delete_job_execution(self, job_execution_id):
if self.api_version == '1.1':
delete_function = self.sahara_client.job_executions.delete
else:
delete_function = self.sahara_client.jobs.delete
return self.delete_resource(delete_function, job_execution_id)
def get_cluster(self, cluster_id, show_progress=False):
return self.sahara_client.clusters.get(cluster_id, show_progress)
def get_cluster_status(self, cluster_id):
data = self.sahara_client.clusters.get(cluster_id)
return str(data.status)
def get_job_status(self, exec_id):
if self.api_version == '1.1':
data = self.sahara_client.job_executions.get(exec_id)
else:
data = self.sahara_client.jobs.get(exec_id)
return str(data.info['status'])
def get_job_info(self, exec_id):
if self.api_version == '1.1':
job_execution = self.sahara_client.job_executions.get(exec_id)
else:
job_execution = self.sahara_client.jobs.get(exec_id)
return self.sahara_client.jobs.get(job_execution.job_id)
def get_cluster_id(self, name):
if uuidutils.is_uuid_like(name):
return name
for cluster in self.sahara_client.clusters.list():
if cluster.name == name:
return cluster.id
def get_node_group_template_id(self, name):
for nodegroup in self.sahara_client.node_group_templates.list():
if nodegroup.name == name:
return nodegroup.id
def register_image(self, image_id, testcase):
try:
return self.sahara_client.images.get(image_id)
except saharaclient_base.APIException:
print("Image not registered in sahara. Registering and run tests")
if testcase.get('image_username') is not None:
self.sahara_client.images.update_image(
image_id, testcase.get('image_username'),
"Registered by scenario tests")
self.sahara_client.images.update_tags(
image_id, [testcase["plugin_name"],
testcase["plugin_version"]])
else:
raise exc.InvalidContentType(
"Registering of image failed. Please, specify "
"'image_username'. For details see README in scenario "
"tests.")
return self.sahara_client.images.get(image_id)
def is_resource_deleted(self, method, *args, **kwargs):
try:
method(*args, **kwargs)
except saharaclient_base.APIException as ex:
return ex.error_code == 404
return False
class NovaClient(Client):
def __init__(self, *args, **kwargs):
self.nova_client = nova_client.Client('2', *args, **kwargs)
def get_flavor_id(self, flavor_name):
if (uuidutils.is_uuid_like(flavor_name) or
(isinstance(flavor_name, six.string_types) and
flavor_name.isdigit())):
return flavor_name
for flavor in self.nova_client.flavors.list():
if flavor.name == flavor_name:
return flavor.id
raise exc.NotFound(flavor_name)
def create_flavor(self, flavor_object):
return self.nova_client.flavors.create(
flavor_object.get('name', utils.rand_name('scenario')),
flavor_object.get('ram', 1),
flavor_object.get('vcpus', 1),
flavor_object.get('root_disk', 0),
ephemeral=flavor_object.get('ephemeral_disk', 0),
swap=flavor_object.get('swap_disk', 0),
flavorid=flavor_object.get('id', 'auto'))
def delete_flavor(self, flavor_id):
return self.delete_resource(self.nova_client.flavors.delete, flavor_id)
def delete_keypair(self, key_name):
return self.delete_resource(
self.nova_client.keypairs.delete, key_name)
def is_resource_deleted(self, method, *args, **kwargs):
try:
method(*args, **kwargs)
except nova_exc.NotFound as ex:
return ex.code == 404
return False
class NeutronClient(Client):
def __init__(self, *args, **kwargs):
self.neutron_client = neutron_client.Client('2.0', *args, **kwargs)
def get_network_id(self, network_name):
if uuidutils.is_uuid_like(network_name):
return network_name
networks = self.neutron_client.list_networks(name=network_name)
networks = networks['networks']
if len(networks) < 1:
raise exc.NotFound(network_name)
return networks[0]['id']
def create_security_group_for_neutron(self, sg_name):
security_group = self.neutron_client.create_security_group({
"security_group":
{
"name": sg_name,
"description": "Just for test"
}
})
return security_group['security_group']['id']
def get_security_group_id(self, sg_name):
for sg in (self.neutron_client.list_security_groups()
["security_groups"]):
if sg['name'] == sg_name:
return sg['id']
raise exc.NotFound(sg_name)
def add_security_group_rule_for_neutron(self, sg_id):
return self.neutron_client.create_security_group_rule({
"security_group_rules": [
{
"direction": "ingress",
"ethertype": "IPv4",
"port_range_max": 65535,
"port_range_min": 1,
"protocol": "TCP",
"remote_group_id": None,
"remote_ip_prefix": None,
"security_group_id": sg_id
},
{
"direction": "egress",
"ethertype": "IPv4",
"port_range_max": 65535,
"port_range_min": 1,
"protocol": "TCP",
"remote_group_id": None,
"remote_ip_prefix": None,
"security_group_id": sg_id
}
]
})
def delete_security_group_for_neutron(self, sg_id):
return self.neutron_client.delete_security_group(sg_id)
class SwiftClient(Client):
def __init__(self, *args, **kwargs):
self.swift_client = swift_client.Connection(*args, **kwargs)
def create_container(self, container_name):
return self.swift_client.put_container(container_name)
def delete_container(self, container_name):
objects = self._get_objects(container_name)
for obj in objects:
self.delete_object(container_name, obj)
return self.delete_resource(
self.swift_client.delete_container, container_name)
def _get_objects(self, container_name):
metadata = self.swift_client.get_container(container_name)
objects = []
for obj in metadata[1]:
objects.append(obj['name'])
return objects[::-1]
def upload_data(self, container_name, object_name, data):
return self.swift_client.put_object(container_name, object_name, data)
def delete_object(self, container_name, object_name):
return self.delete_resource(
self.swift_client.delete_object,
container_name,
object_name)
def is_resource_deleted(self, method, *args, **kwargs):
try:
method(*args, **kwargs)
except swift_exc.ClientException as ex:
return ex.http_status == 404
return False
class BotoClient(Client):
def __init__(self, *args, **kwargs):
sess = botocore_session.get_session()
self.boto_client = sess.create_client(
's3',
endpoint_url=kwargs['endpoint'],
aws_access_key_id=kwargs['accesskey'],
aws_secret_access_key=kwargs['secretkey']
)
def create_bucket(self, bucket_name):
return self.boto_client.create_bucket(Bucket=bucket_name)
def _delete_and_check_bucket(self, bucket_name):
bucket_deleted = False
operation_parameters = {'Bucket': bucket_name}
try:
# While list_objects_v2 is the suggested function, pagination
# does not seems to work properly with RadosGW when it's used.
paginator = self.boto_client.get_paginator('list_objects')
page_iterator = paginator.paginate(**operation_parameters)
for page in page_iterator:
if 'Contents' not in page:
continue
for item in page['Contents']:
self.boto_client.delete_object(Bucket=bucket_name,
Key=item['Key'])
self.boto_client.delete_bucket(Bucket=bucket_name)
except BotoClientError as ex:
error = ex.response.get('Error', {})
# without the conversion the value is a tuple
error_code = '%s' % (error.get('Code', ''))
if error_code == 'NoSuchBucket':
bucket_deleted = True
return bucket_deleted
def delete_bucket(self, bucket_name):
return self.delete_resource(
self._delete_and_check_bucket, bucket_name)
def upload_data(self, bucket_name, object_name, data):
return self.boto_client.put_object(
Bucket=bucket_name,
Key=object_name,
Body=data)
def _delete_and_check_object(self, bucket_name, object_name):
self.boto_client.delete_object(Bucket=bucket_name, Key=object_name)
object_deleted = False
try:
self.boto_client.head_object(Bucket=bucket_name, Key=object_name)
except BotoClientError as ex:
error = ex.response.get('Error', {})
# without the conversion the value is a tuple
error_code = '%s' % (error.get('Code', ''))
if error_code == '404':
object_deleted = True
return object_deleted
def delete_object(self, bucket_name, object_name):
return self.delete_resource(
self._delete_and_check_object,
bucket_name, object_name)
def is_resource_deleted(self, method, *args, **kwargs):
# Exceptions are handled directly inside the call to "method",
# because they are not the same for objects and buckets.
return method(*args, **kwargs)
class GlanceClient(Client):
def __init__(self, *args, **kwargs):
self.glance_client = glance_client.Client('2', *args, **kwargs)
def get_image_id(self, image_name):
if uuidutils.is_uuid_like(image_name):
return image_name
for image in self.glance_client.images.list():
if image.name == image_name:
return image.id
raise exc.NotFound(image_name)
|
{
"content_hash": "78caa067344f0df4a626196ca650cb98",
"timestamp": "",
"source": "github",
"line_count": 436,
"max_line_length": 79,
"avg_line_length": 37.89908256880734,
"alnum_prop": 0.5913217138707335,
"repo_name": "openstack/sahara-scenario",
"id": "28b718f3d3d7b793bd59437b82af313f58efd9f4",
"size": "17107",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sahara_tests/scenario/clients.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "3609"
},
{
"name": "Mako",
"bytes": "52267"
},
{
"name": "PigLatin",
"bytes": "792"
},
{
"name": "Python",
"bytes": "128069"
},
{
"name": "Shell",
"bytes": "47"
}
],
"symlink_target": ""
}
|
from csv_unicode import UnicodeReader
#def open_if_filename(f):
def csv_gen(f=None, headers=None):
"""Attempts to act on the filelike object f."""
reader = UnicodeReader(f)
if not headers:
# Assume first row is headers
headers = reader.next()
for row in reader:
yield dict(zip(headers, row))
class CSVGen(object):
def __init__(self, f, headers=None):
self.reader = UnicodeReader(f)
if not headers:
headers = self.reader.next()
self.headers = headers
self.mapping = dict(zip(headers, range(len(headers))))
self.item = None
def __getitem__(self, key):
return self.item[self.mapping[key]]
def next(self):
return self.__iter__()
def __iter__(self):
self.item = self.reader.next()
return self
def keys(self):
return self.headers
|
{
"content_hash": "53a8b69bb14d964c695689a3d1526853",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 62,
"avg_line_length": 26.676470588235293,
"alnum_prop": 0.5788313120176406,
"repo_name": "marcharper/pyhelpers",
"id": "3e58f64fad5f1280c7c041b86f1ecda451b91ed2",
"size": "907",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "csv_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11919"
}
],
"symlink_target": ""
}
|
from Xlib.display import Display
from Xlib import X
class Workarea(object):
'''
This class contains some utils methods to simplify setting the Extended Window Manager Hints (EWMH).
Especially the property "_NET_WORKAREA" is important, as it fetches the size of the current desktop,
without status bars or dock bars. Thus it returns the usable space for 'normal' windows, called workarea.
The downside of this approach is, that it works only if one screen is connected. If two or more screens are
connected, they will be handled as one big screen. The returned size is not usable, as the individual screens may
have different resolutions and different status bars and dock bars. Therefore an other approach to detect the
workarea of each screen is needed.
'''
__display = Display()
_root_window = __display.screen().root
atom = __display.intern_atom
_workarea = _root_window.get_full_property(atom("_NET_WORKAREA"), X.AnyPropertyType).value
#upper_corner = _workarea[:2]
#screen_width = _workarea[2]
#screen_height = _workarea[3]
@staticmethod
def get_all_XIDs():
"""
Returns all IDs of the windows of the X server. These IDs are called XIDs.
:return: all XIDs
:rtype: list
"""
return Workarea._root_window.get_full_property(Workarea.atom("_NET_CLIENT_LIST_STACKING"),
X.AnyPropertyType).value
@staticmethod
def get_current_desktop():
"""
The index of the current desktop. This is always an integer between 0 and _NET_NUMBER_OF_DESKTOPS - 1.
:return: index of the current desktop
:rtype: int
"""
return Workarea._root_window.get_full_property(Workarea.atom("_NET_CURRENT_DESKTOP"), X.AnyPropertyType).value[
0]
@staticmethod
def get_workarea_width():
"""
Returns the workarea width.
:return: the workarea width
:rtype: int
"""
return Workarea._root_window.get_full_property(Workarea.atom("_NET_WORKAREA"), X.AnyPropertyType).value[2]
@staticmethod
def get_workarea_height():
"""
Returns the workarea height.
:return: the workarea height
:rtype: int
"""
return Workarea._root_window.get_full_property(Workarea.atom("_NET_WORKAREA"), X.AnyPropertyType).value[3]
@staticmethod
def get_upper_corner():
"""
Returns the left upper corner of the workarea relative to the desktop.
:return: the left upper corner of the workarea
:rtype: list
"""
return Workarea._root_window.get_full_property(Workarea.atom("_NET_WORKAREA"), X.AnyPropertyType).value[:2]
@staticmethod
def get_upper_corner_X():
"""
Returns the X coordinate of the left upper corner of the workarea, relative to the desktop.
:return: the X coordinate of the upper corner of the workarea
:rtype: int
"""
return Workarea.get_upper_corner()[0]
@staticmethod
def get_upper_corner_Y():
"""
Returns the Y coordinate of the left upper corner of the workarea, relative to the desktop.
:return: the Y coordinate of the upper corner of the workarea
:rtype: int
"""
return Workarea.get_upper_corner()[1]
@staticmethod
def get_root():
"""
Returns the root window of the current X screen. Usually there is only one X screen as multiscreen support is
provided by an X server extension.
E.g. RandR
:return: the root window
:rtype: Xlib.display.Window
"""
return Workarea._root_window
@staticmethod
def get_display():
"""
Returns the X display
:return: the X display
:rtype: Xlib.display.Display
"""
return Workarea.__display
def __init__(self):
pass
'''
Constructor
'''
|
{
"content_hash": "e2f743e0f3a1270dfaa3d32ec8fa0825",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 119,
"avg_line_length": 32.264,
"alnum_prop": 0.6226134391272006,
"repo_name": "gillesB/azulejo",
"id": "3e46a0d0722e8eb8949d272fdb751dac3bfd5fb9",
"size": "4033",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azulejo/Workarea.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32393"
},
{
"name": "Shell",
"bytes": "161"
}
],
"symlink_target": ""
}
|
"""Compute v2 Server action implementations"""
from osc_lib.command import command
from osc_lib import exceptions
from osc_lib import utils
from oslo_utils import importutils
from openstackclient.i18n import _
class CreateServerBackup(command.ShowOne):
_description = _("Create a server backup image")
IMAGE_API_VERSIONS = {
"1": "openstackclient.image.v1.image",
"2": "openstackclient.image.v2.image",
}
def get_parser(self, prog_name):
parser = super(CreateServerBackup, self).get_parser(prog_name)
parser.add_argument(
'server',
metavar='<server>',
help=_('Server to back up (name or ID)'),
)
parser.add_argument(
'--name',
metavar='<image-name>',
help=_('Name of the backup image (default: server name)'),
)
parser.add_argument(
'--type',
metavar='<backup-type>',
help=_(
'Used to populate the backup_type property of the backup '
'image (default: empty)'
),
)
parser.add_argument(
'--rotate',
metavar='<count>',
type=int,
help=_('Number of backups to keep (default: 1)'),
)
parser.add_argument(
'--wait',
action='store_true',
help=_('Wait for backup image create to complete'),
)
return parser
def take_action(self, parsed_args):
def _show_progress(progress):
if progress:
self.app.stderr.write('\rProgress: %s' % progress)
self.app.stderr.flush()
compute_client = self.app.client_manager.compute
server = utils.find_resource(
compute_client.servers,
parsed_args.server,
)
# Set sane defaults as this API wants all mouths to be fed
if parsed_args.name is None:
backup_name = server.name
else:
backup_name = parsed_args.name
if parsed_args.type is None:
backup_type = ""
else:
backup_type = parsed_args.type
if parsed_args.rotate is None:
backup_rotation = 1
else:
backup_rotation = parsed_args.rotate
compute_client.servers.backup(
server.id,
backup_name,
backup_type,
backup_rotation,
)
image_client = self.app.client_manager.image
image = utils.find_resource(
image_client.images,
backup_name,
)
if parsed_args.wait:
if utils.wait_for_status(
image_client.images.get,
image.id,
callback=_show_progress,
):
self.app.stdout.write('\n')
else:
msg = _('Error creating server backup: %s') % parsed_args.name
raise exceptions.CommandError(msg)
if self.app.client_manager._api_version['image'] == '1':
info = {}
info.update(image._info)
info['properties'] = utils.format_dict(info.get('properties', {}))
else:
# Get the right image module to format the output
image_module = importutils.import_module(
self.IMAGE_API_VERSIONS[
self.app.client_manager._api_version['image']
]
)
info = image_module._format_image(image)
return zip(*sorted(info.items()))
|
{
"content_hash": "168f1a85b4f0ea208925fbc52beee85d",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 78,
"avg_line_length": 30.81896551724138,
"alnum_prop": 0.5267132867132868,
"repo_name": "dtroyer/python-openstackclient",
"id": "1d560dc0c70abac456380e693ca95b081bad6009",
"size": "4188",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstackclient/compute/v2/server_backup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4040230"
},
{
"name": "Shell",
"bytes": "299"
}
],
"symlink_target": ""
}
|
from django.apps import AppConfig
class WebsiteConfig(AppConfig):
name = 'WebSite'
|
{
"content_hash": "14f6773df92be13c39c86330ff174de1",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 33,
"avg_line_length": 17.8,
"alnum_prop": 0.7528089887640449,
"repo_name": "MassyMeniche/Project_Y",
"id": "d106dc5a51d45fac54b785036482a239f4408b24",
"size": "89",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "WebSite/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "29847"
},
{
"name": "HTML",
"bytes": "48942"
},
{
"name": "JavaScript",
"bytes": "106333"
},
{
"name": "Python",
"bytes": "27012"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Question.qtemplate'
db.delete_column('website_question', 'qtemplate_id')
def backwards(self, orm):
# Adding field 'Question.qtemplate'
db.add_column('website_question', 'qtemplate',
self.gf('django.db.models.fields.related.ForeignKey')(default=0, to=orm['website.Template'], null=True, blank=True),
keep_default=False)
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'website.action': {
'Meta': {'object_name': 'Action'},
'action_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.ActionCategory']", 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.actioncategory': {
'Meta': {'object_name': 'ActionCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'})
},
'website.actiontutorial': {
'Meta': {'object_name': 'ActionTutorial'},
'action_identifier': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Tutorial']", 'null': 'True', 'blank': 'True'})
},
'website.address': {
'Meta': {'object_name': 'Address'},
'address1': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'address2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'website.answerchoice': {
'Meta': {'object_name': 'AnswerChoice'},
'answer_choice_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.AnswerChoiceGroup']"}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'website.answerchoicegroup': {
'Meta': {'object_name': 'AnswerChoiceGroup'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'})
},
'website.answerreference': {
'Meta': {'object_name': 'AnswerReference'},
'approval_status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'file_upload': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_callout': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'migrated_answer_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_status': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.applicability': {
'Meta': {'object_name': 'Applicability'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'website.application': {
'Meta': {'object_name': 'Application'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Address']", 'null': 'True', 'blank': 'True'}),
'applicant': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'current_status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']", 'null': 'True', 'blank': 'True'})
},
'website.applicationanswer': {
'Meta': {'object_name': 'ApplicationAnswer'},
'application': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Application']"}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'file_upload': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']"}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.applicationhistory': {
'Meta': {'object_name': 'ApplicationHistory'},
'application': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Application']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'status_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'})
},
'website.comment': {
'Meta': {'object_name': 'Comment'},
'approval_status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'comment_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_reference'", 'null': 'True', 'to': "orm['website.Comment']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_status': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.document': {
'Meta': {'object_name': 'Document'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'file_path': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Region']", 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'})
},
'website.documentcategory': {
'Meta': {'object_name': 'DocumentCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.entityview': {
'Meta': {'object_name': 'EntityView'},
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latest_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'session_key': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.entityviewcount': {
'Meta': {'object_name': 'EntityViewCount'},
'count_30_days': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'total_count': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'website.jurisdiction': {
'Meta': {'object_name': 'Jurisdiction'},
'city': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'county': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'last_contributed': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'last_contributed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'last_contributed_by_org': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_org_contributor'", 'null': 'True', 'to': "orm['website.Organization']"}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_org_jurisdiction'", 'null': 'True', 'to': "orm['website.Organization']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_jurisdiction'", 'null': 'True', 'to': "orm['website.Jurisdiction']"}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Region']", 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'website.jurisdictioncontributor': {
'Meta': {'object_name': 'JurisdictionContributor'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.organization': {
'Meta': {'object_name': 'Organization'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.OrganizationCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'fax': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'logo_scaled': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'parent_org': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'phone': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'A'", 'max_length': '8', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'website.organizationaddress': {
'Meta': {'object_name': 'OrganizationAddress'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Address']", 'null': 'True', 'blank': 'True'}),
'address_type': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'})
},
'website.organizationcategory': {
'Meta': {'object_name': 'OrganizationCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.organizationmember': {
'Meta': {'object_name': 'OrganizationMember'},
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invitation_key': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'invitor': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_member_invitor'", 'null': 'True', 'to': "orm['auth.User']"}),
'join_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Person']", 'null': 'True', 'blank': 'True'}),
'requested_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RoleType']", 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_member_user'", 'null': 'True', 'to': "orm['auth.User']"})
},
'website.organizationrating': {
'Meta': {'object_name': 'OrganizationRating'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'updated_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'website.person': {
'Meta': {'object_name': 'Person'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'phone_mobile': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'phone_primary': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'phone_secondary': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.personaddress': {
'Meta': {'object_name': 'PersonAddress'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Address']", 'null': 'True', 'blank': 'True'}),
'address_type': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Person']", 'null': 'True', 'blank': 'True'})
},
'website.question': {
'Meta': {'object_name': 'Question'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'answer_choice_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.AnswerChoiceGroup']", 'null': 'True', 'blank': 'True'}),
'applicability': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Applicability']", 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'default_value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'field_attributes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'form_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'has_multivalues': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instruction': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'js': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'label': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'terminology': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'validation_class': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.questioncategory': {
'Meta': {'object_name': 'QuestionCategory'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'website.questiondependency': {
'Meta': {'object_name': 'QuestionDependency'},
'answer_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'question1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'_questionDependency_question1'", 'to': "orm['website.Question']"}),
'question2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'_questionDependency_question2'", 'to': "orm['website.Question']"}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'strength': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'website.ratingcategory': {
'Meta': {'object_name': 'RatingCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'rating_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'website.ratinglevel': {
'Meta': {'object_name': 'RatingLevel'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'rank': ('django.db.models.fields.PositiveSmallIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'website.reaction': {
'Meta': {'object_name': 'Reaction'},
'action': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Action']", 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.ReactionCategory']", 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'reaction_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.reactioncategory': {
'Meta': {'object_name': 'ReactionCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'})
},
'website.region': {
'Meta': {'object_name': 'Region'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'website.rewardcategory': {
'Meta': {'object_name': 'RewardCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.roletype': {
'Meta': {'object_name': 'RoleType'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.template': {
'Meta': {'object_name': 'Template'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'db_index': 'True', 'blank': 'True'})
},
'website.templatequestion': {
'Meta': {'object_name': 'TemplateQuestion'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']"})
},
'website.tutorial': {
'Meta': {'object_name': 'Tutorial'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'start_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'website.tutorialpage': {
'Meta': {'object_name': 'TutorialPage'},
'display_order': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'selector': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'tip': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Tutorial']", 'null': 'True', 'blank': 'True'})
},
'website.usercommentview': {
'Meta': {'object_name': 'UserCommentView'},
'comments_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'last_comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Comment']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'view_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'})
},
'website.userdetail': {
'Meta': {'object_name': 'UserDetail'},
'display_preference': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'notification_preference': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'old_password': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'reset_password_key': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '124', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userfavorite': {
'Meta': {'object_name': 'UserFavorite'},
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userrating': {
'Meta': {'object_name': 'UserRating'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'updated_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userreward': {
'Meta': {'object_name': 'UserReward'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reward': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RewardCategory']", 'null': 'True', 'blank': 'True'}),
'reward_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.usersearch': {
'Meta': {'object_name': 'UserSearch'},
'entity_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'search_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.usertutorialhistory': {
'Meta': {'object_name': 'UserTutorialHistory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Tutorial']", 'null': 'True', 'blank': 'True'}),
'user_email': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'max_length': '75', 'null': 'True', 'blank': 'True'}),
'view_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'website.usertutorialpagehistory': {
'Meta': {'object_name': 'UserTutorialPageHistory'},
'checked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.TutorialPage']", 'null': 'True', 'blank': 'True'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Tutorial']", 'null': 'True', 'blank': 'True'}),
'user_email': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'max_length': '75', 'null': 'True', 'blank': 'True'})
},
'website.zipcode': {
'Meta': {'object_name': 'Zipcode'},
'city': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'county': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'db_index': 'True', 'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'db_index': 'True', 'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '2', 'null': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'db_index': 'True'})
}
}
complete_apps = ['website']
|
{
"content_hash": "635d523ce27785e0e8abaa06cbf01337",
"timestamp": "",
"source": "github",
"line_count": 545,
"max_line_length": 200,
"avg_line_length": 93.36330275229358,
"alnum_prop": 0.5474716506495293,
"repo_name": "solarpermit/solarpermit",
"id": "24ea86d96886070cfb17460d213b034686b01741",
"size": "50907",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "website/migrations/0057_auto__del_field_question_qtemplate.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "126992"
},
{
"name": "JavaScript",
"bytes": "808802"
},
{
"name": "Python",
"bytes": "6625868"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import logging
from math import ceil
import sys
import numpy as np
import tensorflow as tf
VGG_MEAN = [103.939, 116.779, 123.68]
class FCN8VGG:
def __init__(self, vgg16_npy_path=None):
if vgg16_npy_path is None:
path = sys.modules[self.__class__.__module__].__file__
# print path
path = os.path.abspath(os.path.join(path, os.pardir))
# print path
path = os.path.join(path, "vgg16.npy")
vgg16_npy_path = path
logging.info("Load npy file from '%s'.", vgg16_npy_path)
if not os.path.isfile(vgg16_npy_path):
logging.error(("File '%s' not found. Download it from "
"ftp://mi.eng.cam.ac.uk/pub/mttt2/"
"models/vgg16.npy"), vgg16_npy_path)
sys.exit(1)
self.data_dict = np.load(vgg16_npy_path, encoding='latin1').item()
self.wd = 5e-4
print("npy file loaded")
def build(self, rgb, train=False, num_classes=20, random_init_fc8=False,
debug=False, use_dilated=False):
"""
Build the VGG model using loaded weights
Parameters
----------
rgb: image batch tensor
Image in rgb shap. Scaled to Intervall [0, 255]
train: bool
Whether to build train or inference graph
num_classes: int
How many classes should be predicted (by fc8)
random_init_fc8 : bool
Whether to initialize fc8 layer randomly.
Finetuning is required in this case.
debug: bool
Whether to print additional Debug Information.
"""
# Convert RGB to BGR
with tf.name_scope('Processing'):
red, green, blue = tf.split(rgb, 3, 3)
# assert red.get_shape().as_list()[1:] == [224, 224, 1]
# assert green.get_shape().as_list()[1:] == [224, 224, 1]
# assert blue.get_shape().as_list()[1:] == [224, 224, 1]
bgr = tf.concat([
blue - VGG_MEAN[0],
green - VGG_MEAN[1],
red - VGG_MEAN[2],
], 3)
if debug:
bgr = tf.Print(bgr, [tf.shape(bgr)],
message='Shape of input image: ',
summarize=4, first_n=1)
self.conv1_1 = self._conv_layer(bgr, "conv1_1")
self.conv1_2 = self._conv_layer(self.conv1_1, "conv1_2")
self.pool1 = self._max_pool(self.conv1_2, 'pool1', debug)
self.conv2_1 = self._conv_layer(self.pool1, "conv2_1")
self.conv2_2 = self._conv_layer(self.conv2_1, "conv2_2")
self.pool2 = self._max_pool(self.conv2_2, 'pool2', debug)
self.conv3_1 = self._conv_layer(self.pool2, "conv3_1")
self.conv3_2 = self._conv_layer(self.conv3_1, "conv3_2")
self.conv3_3 = self._conv_layer(self.conv3_2, "conv3_3")
self.pool3 = self._max_pool(self.conv3_3, 'pool3', debug)
self.conv4_1 = self._conv_layer(self.pool3, "conv4_1")
self.conv4_2 = self._conv_layer(self.conv4_1, "conv4_2")
self.conv4_3 = self._conv_layer(self.conv4_2, "conv4_3")
if use_dilated:
pad = [[0, 0], [0, 0]]
self.pool4 = tf.nn.max_pool(self.conv4_3, ksize=[1, 2, 2, 1],
strides=[1, 1, 1, 1],
padding='SAME', name='pool4')
self.pool4 = tf.space_to_batch(self.pool4,
paddings=pad, block_size=2)
else:
self.pool4 = self._max_pool(self.conv4_3, 'pool4', debug)
self.conv5_1 = self._conv_layer(self.pool4, "conv5_1")
self.conv5_2 = self._conv_layer(self.conv5_1, "conv5_2")
self.conv5_3 = self._conv_layer(self.conv5_2, "conv5_3")
if use_dilated:
pad = [[0, 0], [0, 0]]
self.pool5 = tf.nn.max_pool(self.conv5_3, ksize=[1, 2, 2, 1],
strides=[1, 1, 1, 1],
padding='SAME', name='pool5')
self.pool5 = tf.space_to_batch(self.pool5,
paddings=pad, block_size=2)
else:
self.pool5 = self._max_pool(self.conv5_3, 'pool5', debug)
self.fc6 = self._fc_layer(self.pool5, "fc6")
if train:
self.fc6 = tf.nn.dropout(self.fc6, 0.5)
self.fc7 = self._fc_layer(self.fc6, "fc7")
if train:
self.fc7 = tf.nn.dropout(self.fc7, 0.5)
if use_dilated:
self.pool5 = tf.batch_to_space(self.pool5, crops=pad, block_size=2)
self.pool5 = tf.batch_to_space(self.pool5, crops=pad, block_size=2)
self.fc7 = tf.batch_to_space(self.fc7, crops=pad, block_size=2)
self.fc7 = tf.batch_to_space(self.fc7, crops=pad, block_size=2)
return
if random_init_fc8:
self.score_fr = self._score_layer(self.fc7, "score_fr",
num_classes)
else:
self.score_fr = self._fc_layer(self.fc7, "score_fr",
num_classes=num_classes,
relu=False)
self.pred = tf.argmax(self.score_fr, dimension=3)
self.upscore2 = self._upscore_layer(self.score_fr,
shape=tf.shape(self.pool4),
num_classes=num_classes,
debug=debug, name='upscore2',
ksize=4, stride=2)
self.score_pool4 = self._score_layer(self.pool4, "score_pool4",
num_classes=num_classes)
self.fuse_pool4 = tf.add(self.upscore2, self.score_pool4)
self.upscore4 = self._upscore_layer(self.fuse_pool4,
shape=tf.shape(self.pool3),
num_classes=num_classes,
debug=debug, name='upscore4',
ksize=4, stride=2)
self.score_pool3 = self._score_layer(self.pool3, "score_pool3",
num_classes=num_classes)
self.fuse_pool3 = tf.add(self.upscore4, self.score_pool3)
self.upscore32 = self._upscore_layer(self.fuse_pool3,
shape=tf.shape(bgr),
num_classes=num_classes,
debug=debug, name='upscore32',
ksize=16, stride=8)
self.pred_up = tf.argmax(self.upscore32, dimension=3)
def _max_pool(self, bottom, name, debug):
pool = tf.nn.max_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', name=name)
if debug:
pool = tf.Print(pool, [tf.shape(pool)],
message='Shape of %s' % name,
summarize=4, first_n=1)
return pool
def _conv_layer(self, bottom, name):
with tf.variable_scope(name) as scope:
filt = self.get_conv_filter(name)
conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME')
conv_biases = self.get_bias(name)
bias = tf.nn.bias_add(conv, conv_biases)
relu = tf.nn.relu(bias)
# Add summary to Tensorboard
_activation_summary(relu)
return relu
def _fc_layer(self, bottom, name, num_classes=None,
relu=True, debug=False):
with tf.variable_scope(name) as scope:
shape = bottom.get_shape().as_list()
if name == 'fc6':
filt = self.get_fc_weight_reshape(name, [7, 7, 512, 4096])
elif name == 'score_fr':
name = 'fc8' # Name of score_fr layer in VGG Model
filt = self.get_fc_weight_reshape(name, [1, 1, 4096, 1000],
num_classes=num_classes)
else:
filt = self.get_fc_weight_reshape(name, [1, 1, 4096, 4096])
self._add_wd_and_summary(filt, self.wd, "fc_wlosses")
conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME')
conv_biases = self.get_bias(name, num_classes=num_classes)
bias = tf.nn.bias_add(conv, conv_biases)
if relu:
bias = tf.nn.relu(bias)
_activation_summary(bias)
if debug:
bias = tf.Print(bias, [tf.shape(bias)],
message='Shape of %s' % name,
summarize=4, first_n=1)
return bias
def _score_layer(self, bottom, name, num_classes):
with tf.variable_scope(name) as scope:
# get number of input channels
in_features = bottom.get_shape()[3].value
shape = [1, 1, in_features, num_classes]
# He initialization Sheme
if name == "score_fr":
num_input = in_features
stddev = (2 / num_input)**0.5
elif name == "score_pool4":
stddev = 0.001
elif name == "score_pool3":
stddev = 0.0001
# Apply convolution
w_decay = self.wd
weights = self._variable_with_weight_decay(shape, stddev, w_decay,
decoder=True)
conv = tf.nn.conv2d(bottom, weights, [1, 1, 1, 1], padding='SAME')
# Apply bias
conv_biases = self._bias_variable([num_classes], constant=0.0)
bias = tf.nn.bias_add(conv, conv_biases)
_activation_summary(bias)
return bias
def _upscore_layer(self, bottom, shape,
num_classes, name, debug,
ksize=4, stride=2):
strides = [1, stride, stride, 1]
with tf.variable_scope(name):
in_features = bottom.get_shape()[3].value
if shape is None:
# Compute shape out of Bottom
in_shape = tf.shape(bottom)
h = ((in_shape[1] - 1) * stride) + 1
w = ((in_shape[2] - 1) * stride) + 1
new_shape = [in_shape[0], h, w, num_classes]
else:
new_shape = [shape[0], shape[1], shape[2], num_classes]
output_shape = tf.stack(new_shape)
logging.debug("Layer: %s, Fan-in: %d" % (name, in_features))
f_shape = [ksize, ksize, num_classes, in_features]
# create
num_input = ksize * ksize * in_features / stride
stddev = (2 / num_input)**0.5
weights = self.get_deconv_filter(f_shape)
self._add_wd_and_summary(weights, self.wd, "fc_wlosses")
deconv = tf.nn.conv2d_transpose(bottom, weights, output_shape,
strides=strides, padding='SAME')
if debug:
deconv = tf.Print(deconv, [tf.shape(deconv)],
message='Shape of %s' % name,
summarize=4, first_n=1)
_activation_summary(deconv)
return deconv
def get_deconv_filter(self, f_shape):
width = f_shape[0]
height = f_shape[1]
f = ceil(width/2.0)
c = (2 * f - 1 - f % 2) / (2.0 * f)
bilinear = np.zeros([f_shape[0], f_shape[1]])
for x in range(width):
for y in range(height):
value = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
bilinear[x, y] = value
weights = np.zeros(f_shape)
for i in range(f_shape[2]):
weights[:, :, i, i] = bilinear
init = tf.constant_initializer(value=weights,
dtype=tf.float32)
var = tf.get_variable(name="up_filter", initializer=init,
shape=weights.shape)
return var
def get_conv_filter(self, name):
init = tf.constant_initializer(value=self.data_dict[name][0],
dtype=tf.float32)
shape = self.data_dict[name][0].shape
print('Layer name: %s' % name)
print('Layer shape: %s' % str(shape))
var = tf.get_variable(name="filter", initializer=init, shape=shape)
if not tf.get_variable_scope().reuse:
weight_decay = tf.multiply(tf.nn.l2_loss(var), self.wd,
name='weight_loss')
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES,
weight_decay)
_variable_summaries(var)
return var
def get_bias(self, name, num_classes=None):
bias_wights = self.data_dict[name][1]
shape = self.data_dict[name][1].shape
if name == 'fc8':
bias_wights = self._bias_reshape(bias_wights, shape[0],
num_classes)
shape = [num_classes]
init = tf.constant_initializer(value=bias_wights,
dtype=tf.float32)
var = tf.get_variable(name="biases", initializer=init, shape=shape)
_variable_summaries(var)
return var
def get_fc_weight(self, name):
init = tf.constant_initializer(value=self.data_dict[name][0],
dtype=tf.float32)
shape = self.data_dict[name][0].shape
var = tf.get_variable(name="weights", initializer=init, shape=shape)
if not tf.get_variable_scope().reuse:
weight_decay = tf.multiply(tf.nn.l2_loss(var), self.wd,
name='weight_loss')
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES,
weight_decay)
_variable_summaries(var)
return var
def _bias_reshape(self, bweight, num_orig, num_new):
""" Build bias weights for filter produces with `_summary_reshape`
"""
n_averaged_elements = num_orig//num_new
avg_bweight = np.zeros(num_new)
for i in range(0, num_orig, n_averaged_elements):
start_idx = i
end_idx = start_idx + n_averaged_elements
avg_idx = start_idx//n_averaged_elements
if avg_idx == num_new:
break
avg_bweight[avg_idx] = np.mean(bweight[start_idx:end_idx])
return avg_bweight
def _summary_reshape(self, fweight, shape, num_new):
""" Produce weights for a reduced fully-connected layer.
FC8 of VGG produces 1000 classes. Most semantic segmentation
task require much less classes. This reshapes the original weights
to be used in a fully-convolutional layer which produces num_new
classes. To archive this the average (mean) of n adjanced classes is
taken.
Consider reordering fweight, to perserve semantic meaning of the
weights.
Args:
fweight: original weights
shape: shape of the desired fully-convolutional layer
num_new: number of new classes
Returns:
Filter weights for `num_new` classes.
"""
num_orig = shape[3]
shape[3] = num_new
assert(num_new < num_orig)
n_averaged_elements = num_orig//num_new
avg_fweight = np.zeros(shape)
for i in range(0, num_orig, n_averaged_elements):
start_idx = i
end_idx = start_idx + n_averaged_elements
avg_idx = start_idx//n_averaged_elements
if avg_idx == num_new:
break
avg_fweight[:, :, :, avg_idx] = np.mean(
fweight[:, :, :, start_idx:end_idx], axis=3)
return avg_fweight
def _variable_with_weight_decay(self, shape, stddev, wd, decoder=False):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal
distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
initializer = tf.truncated_normal_initializer(stddev=stddev)
var = tf.get_variable('weights', shape=shape,
initializer=initializer)
collection_name = tf.GraphKeys.REGULARIZATION_LOSSES
if wd and (not tf.get_variable_scope().reuse):
weight_decay = tf.multiply(
tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection(collection_name, weight_decay)
_variable_summaries(var)
return var
def _add_wd_and_summary(self, var, wd, collection_name=None):
if collection_name is None:
collection_name = tf.GraphKeys.REGULARIZATION_LOSSES
if wd and (not tf.get_variable_scope().reuse):
weight_decay = tf.multiply(
tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection(collection_name, weight_decay)
_variable_summaries(var)
return var
def _bias_variable(self, shape, constant=0.0):
initializer = tf.constant_initializer(constant)
var = tf.get_variable(name='biases', shape=shape,
initializer=initializer)
_variable_summaries(var)
return var
def get_fc_weight_reshape(self, name, shape, num_classes=None):
print('Layer name: %s' % name)
print('Layer shape: %s' % shape)
weights = self.data_dict[name][0]
weights = weights.reshape(shape)
if num_classes is not None:
weights = self._summary_reshape(weights, shape,
num_new=num_classes)
init = tf.constant_initializer(value=weights,
dtype=tf.float32)
var = tf.get_variable(name="weights", initializer=init, shape=shape)
return var
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = x.op.name
# tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.summary.histogram(tensor_name + '/activations', x)
tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def _variable_summaries(var):
"""Attach a lot of summaries to a Tensor."""
if not tf.get_variable_scope().reuse:
name = var.op.name
logging.info("Creating Summary for: %s" % name)
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar(name + '/mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_sum(tf.square(var - mean)))
tf.summary.scalar(name + '/sttdev', stddev)
tf.summary.scalar(name + '/max', tf.reduce_max(var))
tf.summary.scalar(name + '/min', tf.reduce_min(var))
tf.summary.histogram(name, var)
|
{
"content_hash": "c1cee525671f1c8b0a65373b5a04c682",
"timestamp": "",
"source": "github",
"line_count": 489,
"max_line_length": 79,
"avg_line_length": 40.8241308793456,
"alnum_prop": 0.5187096127836498,
"repo_name": "MarvinTeichmann/tensorflow-fcn",
"id": "834b32473a15edee9b36811ce3b890879104b301",
"size": "19963",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fcn8_vgg.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "57960"
}
],
"symlink_target": ""
}
|
"""This module is deprecated. Please use `airflow.providers.amazon.aws.hooks.emr`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.amazon.aws.hooks.emr import EmrHook # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.amazon.aws.hooks.emr`.",
DeprecationWarning, stacklevel=2
)
|
{
"content_hash": "d7bb382f300b009bc90e8517474b983d",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 86,
"avg_line_length": 31.363636363636363,
"alnum_prop": 0.7594202898550725,
"repo_name": "spektom/incubator-airflow",
"id": "a8cadcb6f727adb33e9abc4295f6e350324d28e9",
"size": "1132",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "airflow/contrib/hooks/emr_hook.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "17179"
},
{
"name": "HTML",
"bytes": "148492"
},
{
"name": "JavaScript",
"bytes": "25233"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "9768581"
},
{
"name": "Shell",
"bytes": "221415"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
}
|
from django.core.urlresolvers import reverse_lazy
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from hack4lt.forms import (
Task1Form,
Task2Form,
)
from hack4lt.models import TaskResult
def index_view(request):
return HttpResponseRedirect(reverse_lazy('lectures'))
def about_view(request):
return render(request, 'hack4lt/home.html', {})
def lectures_view(request):
return render(request, 'hack4lt/lectures.html', {})
def events_view(request):
return render(request, 'hack4lt/events.html', {})
def tasks_view(request):
return render(request, 'hack4lt/tasks.html', {})
@login_required(login_url=reverse_lazy('login'))
def admin_view(request):
if not request.user.is_superuser:
return HttpResponseRedirect(reverse_lazy('login'))
return render(request, 'hack4lt/admin.html', {
'tasks_to_check': TaskResult.objects.filter(done=False, should_check=True)
})
@login_required(login_url=reverse_lazy('login'))
def task_view(request, task_id):
if task_id == '1':
form_class = Task1Form
elif task_id == '2':
form_class = Task2Form
if request.method == 'POST':
form = form_class(data=request.POST, files=request.FILES, user=request.user)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse_lazy('tasks'))
else:
form = form_class(user=request.user)
return render(request, 'hack4lt/task.html', {
'form': form,
})
|
{
"content_hash": "e141bdeaf0cbcfe5959d6c53d49908b0",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 84,
"avg_line_length": 29.79245283018868,
"alnum_prop": 0.681443951868271,
"repo_name": "niekas/Hack4LT",
"id": "a2d58b7a0d89015928c97eca303e44ddd62b6e22",
"size": "1579",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/hack4lt/views/basic.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "520"
},
{
"name": "Python",
"bytes": "122310"
}
],
"symlink_target": ""
}
|
"""empty message
Revision ID: 7a4526a1eb30
Revises: 352cb746332a
Create Date: 2019-02-10 18:47:14.300006
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = '7a4526a1eb30'
down_revision = '352cb746332a'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
# op.create_foreign_key(None, 'tagmap', 'papers', ['paper_id'], ['id'])
# op.alter_column('users', 'email',
# existing_type=mysql.VARCHAR(length=120),
# nullable=False)
# ### end Alembic commands ###
pass
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('users', 'email',
existing_type=mysql.VARCHAR(length=120),
nullable=True)
op.drop_constraint(None, 'tagmap', type_='foreignkey')
# ### end Alembic commands ###
|
{
"content_hash": "2137a0557b8ed103b85a487f935c2f88",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 75,
"avg_line_length": 28.529411764705884,
"alnum_prop": 0.6453608247422681,
"repo_name": "cosanlab/cosanlab_website",
"id": "92a7f48c147e405beb84529744b21ea3fa73ca17",
"size": "970",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrations/versions/7a4526a1eb30_.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "114390"
},
{
"name": "HTML",
"bytes": "309957"
},
{
"name": "JavaScript",
"bytes": "306271"
},
{
"name": "Jupyter Notebook",
"bytes": "33416"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "25293"
},
{
"name": "SCSS",
"bytes": "19188"
}
],
"symlink_target": ""
}
|
import os
import sys
import re
import datetime
import socket
import traceback
from .WatchDogBase import WatchDogBase
from pandajedi.jedicore.MsgWrapper import MsgWrapper
from pandajedi.jedicore.ThreadUtils import ListWithLock, ThreadPool, WorkerThread
# logger
from pandacommon.pandalogger.PandaLogger import PandaLogger
logger = PandaLogger().getLogger(__name__.split('.')[-1])
# data locality updater for ATLAS
class AtlasDataLocalityUpdaterWatchDog(WatchDogBase):
# constructor
def __init__(self, taskBufferIF, ddmIF):
WatchDogBase.__init__(self, taskBufferIF, ddmIF)
self.pid = '{0}-{1}-dog'.format(socket.getfqdn().split('.')[0], os.getpid())
self.vo = 'atlas'
self.ddmIF = ddmIF.getInterface(self.vo)
# get list-with-lock of datasets to update
def get_datasets_list(self):
datasets_list = self.taskBufferIF.get_tasks_inputdatasets_JEDI(self.vo)
datasets_list = ListWithLock(datasets_list)
# return
return datasets_list
# update data locality records to DB table
def doUpdateDataLocality(self):
tmpLog = MsgWrapper(logger, ' #ATM #KV doUpdateDataLocality')
tmpLog.debug('start')
try:
# lock
got_lock = self.taskBufferIF.lockProcess_JEDI( vo=self.vo, prodSourceLabel='default',
cloud=None, workqueue_id=None, resource_name=None,
component='AtlasDataLocalityUpdaterWatchDog.doUpdateDataLocality',
pid=self.pid, timeLimit=240)
if not got_lock:
tmpLog.debug('locked by another process. Skipped')
return
tmpLog.debug('got lock')
# get list of datasets
datasets_list = self.get_datasets_list()
tmpLog.debug('got {0} datasets to update'.format(len(datasets_list)))
# make thread pool
thread_pool = ThreadPool()
# make workers
n_workers = 4
for _ in range(n_workers):
thr = DataLocalityUpdaterThread(taskDsList=datasets_list,
threadPool=thread_pool,
taskbufferIF=self.taskBufferIF,
ddmIF=self.ddmIF,
pid=self.pid,
loggerObj=tmpLog)
thr.start()
tmpLog.debug('started {0} updater workers'.format(n_workers))
# join
thread_pool.join()
# done
tmpLog.debug('done')
except Exception:
errtype, errvalue = sys.exc_info()[:2]
tmpLog.error('failed with {0} {1} {2}'.format(errtype, errvalue, traceback.format_exc()))
# clean up old data locality records in DB table
def doCleanDataLocality(self):
tmpLog = MsgWrapper(logger, ' #ATM #KV doCleanDataLocality')
tmpLog.debug('start')
try:
# lock
got_lock = self.taskBufferIF.lockProcess_JEDI( vo=self.vo, prodSourceLabel='default',
cloud=None, workqueue_id=None, resource_name=None,
component='AtlasDataLocalityUpdaterWatchDog.doCleanDataLocality',
pid=self.pid, timeLimit=1440)
if not got_lock:
tmpLog.debug('locked by another process. Skipped')
return
tmpLog.debug('got lock')
# lifetime of records
record_lifetime_hours = 24
# run
now_timestamp = datetime.datetime.utcnow()
before_timestamp = now_timestamp - datetime.timedelta(hours=record_lifetime_hours)
n_rows = self.taskBufferIF.deleteOutdatedDatasetLocality_JEDI(before_timestamp)
tmpLog.info('cleaned up {0} records'.format(n_rows))
# done
tmpLog.debug('done')
except Exception:
errtype, errvalue = sys.exc_info()[:2]
tmpLog.error('failed with {0} {1} {2}'.format(errtype, errvalue, traceback.format_exc()))
# main
def doAction(self):
try:
# get logger
origTmpLog = MsgWrapper(logger)
origTmpLog.debug('start')
# clean up data locality
self.doCleanDataLocality()
# update data locality
self.doUpdateDataLocality()
except Exception:
errtype, errvalue = sys.exc_info()[:2]
origTmpLog.error('failed with {0} {1}'.format(errtype, errvalue))
# return
origTmpLog.debug('done')
return self.SC_SUCCEEDED
# thread for data locality update
class DataLocalityUpdaterThread(WorkerThread):
# constructor
def __init__(self, taskDsList, threadPool, taskbufferIF, ddmIF, pid, loggerObj):
# initialize woker with no semaphore
WorkerThread.__init__(self, None, threadPool, loggerObj)
# attributres
self.taskDsList = taskDsList
self.taskBufferIF = taskbufferIF
self.ddmIF = ddmIF
self.msgType = 'datalocalityupdate'
self.pid = pid
self.logger = loggerObj
# main
def runImpl(self):
while True:
try:
# get part of datasets
nDatasets = 5
taskDsList = self.taskDsList.get(nDatasets)
if len(taskDsList) == 0:
# no more datasets, quit
self.logger.debug('{0} terminating since no more items'.format(self.name))
return
# loop over these datasets
for item in taskDsList:
if item is None:
continue
jediTaskID, datasetID, datasetName = item
dataset_replicas_map = self.ddmIF.listDatasetReplicas(datasetName)
for tmpRSE, tmpList in dataset_replicas_map.items():
tmpStatistics = tmpList[-1]
# exclude unknown
if tmpStatistics['found'] is None:
continue
# update dataset locality table
self.taskBufferIF.updateDatasetLocality_JEDI( jedi_taskid=jediTaskID,
datasetid=datasetID,
rse=tmpRSE)
except Exception as e:
self.logger.error('{0} failed in runImpl() with {1}: {2}'.format(self.__class__.__name__, str(e),
traceback.format_exc()))
return
|
{
"content_hash": "712bcfac000d4f1e42c0fccf96c64ee6",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 126,
"avg_line_length": 43.333333333333336,
"alnum_prop": 0.5293447293447293,
"repo_name": "PanDAWMS/panda-jedi",
"id": "4c151254bd6fccbe12a86ee8dcc6d59eeede3b09",
"size": "7020",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandajedi/jedidog/AtlasDataLocalityUpdaterWatchDog.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2698"
},
{
"name": "Python",
"bytes": "2118485"
},
{
"name": "Shell",
"bytes": "3029"
}
],
"symlink_target": ""
}
|
"""This module contains a Google Cloud Vertex AI hook."""
from typing import Dict, List, Optional, Sequence, Tuple, Union
from google.api_core.operation import Operation
from google.api_core.retry import Retry
from google.cloud.aiplatform import (
CustomContainerTrainingJob,
CustomPythonPackageTrainingJob,
CustomTrainingJob,
datasets,
models,
)
from google.cloud.aiplatform_v1 import JobServiceClient, PipelineServiceClient
from google.cloud.aiplatform_v1.services.job_service.pagers import ListCustomJobsPager
from google.cloud.aiplatform_v1.services.pipeline_service.pagers import (
ListPipelineJobsPager,
ListTrainingPipelinesPager,
)
from google.cloud.aiplatform_v1.types import CustomJob, PipelineJob, TrainingPipeline
from airflow import AirflowException
from airflow.providers.google.common.consts import CLIENT_INFO
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class CustomJobHook(GoogleBaseHook):
"""Hook for Google Cloud Vertex AI Custom Job APIs."""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
delegate_to=delegate_to,
impersonation_chain=impersonation_chain,
)
self._job: Optional[
Union[
CustomContainerTrainingJob,
CustomPythonPackageTrainingJob,
CustomTrainingJob,
]
] = None
def get_pipeline_service_client(
self,
region: Optional[str] = None,
) -> PipelineServiceClient:
"""Returns PipelineServiceClient."""
client_options = None
if region and region != 'global':
client_options = {'api_endpoint': f'{region}-aiplatform.googleapis.com:443'}
return PipelineServiceClient(
credentials=self._get_credentials(), client_info=CLIENT_INFO, client_options=client_options
)
def get_job_service_client(
self,
region: Optional[str] = None,
) -> JobServiceClient:
"""Returns JobServiceClient"""
client_options = None
if region and region != 'global':
client_options = {'api_endpoint': f'{region}-aiplatform.googleapis.com:443'}
return JobServiceClient(
credentials=self._get_credentials(), client_info=CLIENT_INFO, client_options=client_options
)
def get_custom_container_training_job(
self,
display_name: str,
container_uri: str,
command: Sequence[str] = [],
model_serving_container_image_uri: Optional[str] = None,
model_serving_container_predict_route: Optional[str] = None,
model_serving_container_health_route: Optional[str] = None,
model_serving_container_command: Optional[Sequence[str]] = None,
model_serving_container_args: Optional[Sequence[str]] = None,
model_serving_container_environment_variables: Optional[Dict[str, str]] = None,
model_serving_container_ports: Optional[Sequence[int]] = None,
model_description: Optional[str] = None,
model_instance_schema_uri: Optional[str] = None,
model_parameters_schema_uri: Optional[str] = None,
model_prediction_schema_uri: Optional[str] = None,
project: Optional[str] = None,
location: Optional[str] = None,
labels: Optional[Dict[str, str]] = None,
training_encryption_spec_key_name: Optional[str] = None,
model_encryption_spec_key_name: Optional[str] = None,
staging_bucket: Optional[str] = None,
) -> CustomContainerTrainingJob:
"""Returns CustomContainerTrainingJob object"""
return CustomContainerTrainingJob(
display_name=display_name,
container_uri=container_uri,
command=command,
model_serving_container_image_uri=model_serving_container_image_uri,
model_serving_container_predict_route=model_serving_container_predict_route,
model_serving_container_health_route=model_serving_container_health_route,
model_serving_container_command=model_serving_container_command,
model_serving_container_args=model_serving_container_args,
model_serving_container_environment_variables=model_serving_container_environment_variables,
model_serving_container_ports=model_serving_container_ports,
model_description=model_description,
model_instance_schema_uri=model_instance_schema_uri,
model_parameters_schema_uri=model_parameters_schema_uri,
model_prediction_schema_uri=model_prediction_schema_uri,
project=project,
location=location,
credentials=self._get_credentials(),
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
staging_bucket=staging_bucket,
)
def get_custom_python_package_training_job(
self,
display_name: str,
python_package_gcs_uri: str,
python_module_name: str,
container_uri: str,
model_serving_container_image_uri: Optional[str] = None,
model_serving_container_predict_route: Optional[str] = None,
model_serving_container_health_route: Optional[str] = None,
model_serving_container_command: Optional[Sequence[str]] = None,
model_serving_container_args: Optional[Sequence[str]] = None,
model_serving_container_environment_variables: Optional[Dict[str, str]] = None,
model_serving_container_ports: Optional[Sequence[int]] = None,
model_description: Optional[str] = None,
model_instance_schema_uri: Optional[str] = None,
model_parameters_schema_uri: Optional[str] = None,
model_prediction_schema_uri: Optional[str] = None,
project: Optional[str] = None,
location: Optional[str] = None,
labels: Optional[Dict[str, str]] = None,
training_encryption_spec_key_name: Optional[str] = None,
model_encryption_spec_key_name: Optional[str] = None,
staging_bucket: Optional[str] = None,
):
"""Returns CustomPythonPackageTrainingJob object"""
return CustomPythonPackageTrainingJob(
display_name=display_name,
container_uri=container_uri,
python_package_gcs_uri=python_package_gcs_uri,
python_module_name=python_module_name,
model_serving_container_image_uri=model_serving_container_image_uri,
model_serving_container_predict_route=model_serving_container_predict_route,
model_serving_container_health_route=model_serving_container_health_route,
model_serving_container_command=model_serving_container_command,
model_serving_container_args=model_serving_container_args,
model_serving_container_environment_variables=model_serving_container_environment_variables,
model_serving_container_ports=model_serving_container_ports,
model_description=model_description,
model_instance_schema_uri=model_instance_schema_uri,
model_parameters_schema_uri=model_parameters_schema_uri,
model_prediction_schema_uri=model_prediction_schema_uri,
project=project,
location=location,
credentials=self._get_credentials(),
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
staging_bucket=staging_bucket,
)
def get_custom_training_job(
self,
display_name: str,
script_path: str,
container_uri: str,
requirements: Optional[Sequence[str]] = None,
model_serving_container_image_uri: Optional[str] = None,
model_serving_container_predict_route: Optional[str] = None,
model_serving_container_health_route: Optional[str] = None,
model_serving_container_command: Optional[Sequence[str]] = None,
model_serving_container_args: Optional[Sequence[str]] = None,
model_serving_container_environment_variables: Optional[Dict[str, str]] = None,
model_serving_container_ports: Optional[Sequence[int]] = None,
model_description: Optional[str] = None,
model_instance_schema_uri: Optional[str] = None,
model_parameters_schema_uri: Optional[str] = None,
model_prediction_schema_uri: Optional[str] = None,
project: Optional[str] = None,
location: Optional[str] = None,
labels: Optional[Dict[str, str]] = None,
training_encryption_spec_key_name: Optional[str] = None,
model_encryption_spec_key_name: Optional[str] = None,
staging_bucket: Optional[str] = None,
):
"""Returns CustomTrainingJob object"""
return CustomTrainingJob(
display_name=display_name,
script_path=script_path,
container_uri=container_uri,
requirements=requirements,
model_serving_container_image_uri=model_serving_container_image_uri,
model_serving_container_predict_route=model_serving_container_predict_route,
model_serving_container_health_route=model_serving_container_health_route,
model_serving_container_command=model_serving_container_command,
model_serving_container_args=model_serving_container_args,
model_serving_container_environment_variables=model_serving_container_environment_variables,
model_serving_container_ports=model_serving_container_ports,
model_description=model_description,
model_instance_schema_uri=model_instance_schema_uri,
model_parameters_schema_uri=model_parameters_schema_uri,
model_prediction_schema_uri=model_prediction_schema_uri,
project=project,
location=location,
credentials=self._get_credentials(),
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
staging_bucket=staging_bucket,
)
@staticmethod
def extract_model_id(obj: Dict) -> str:
"""Returns unique id of the Model."""
return obj["name"].rpartition("/")[-1]
def wait_for_operation(self, operation: Operation, timeout: Optional[float] = None):
"""Waits for long-lasting operation to complete."""
try:
return operation.result(timeout=timeout)
except Exception:
error = operation.exception(timeout=timeout)
raise AirflowException(error)
def cancel_job(self) -> None:
"""Cancel Job for training pipeline"""
if self._job:
self._job.cancel()
def _run_job(
self,
job: Union[
CustomTrainingJob,
CustomContainerTrainingJob,
CustomPythonPackageTrainingJob,
],
dataset: Optional[
Union[
datasets.ImageDataset,
datasets.TabularDataset,
datasets.TextDataset,
datasets.VideoDataset,
]
] = None,
annotation_schema_uri: Optional[str] = None,
model_display_name: Optional[str] = None,
model_labels: Optional[Dict[str, str]] = None,
base_output_dir: Optional[str] = None,
service_account: Optional[str] = None,
network: Optional[str] = None,
bigquery_destination: Optional[str] = None,
args: Optional[List[Union[str, float, int]]] = None,
environment_variables: Optional[Dict[str, str]] = None,
replica_count: int = 1,
machine_type: str = "n1-standard-4",
accelerator_type: str = "ACCELERATOR_TYPE_UNSPECIFIED",
accelerator_count: int = 0,
boot_disk_type: str = "pd-ssd",
boot_disk_size_gb: int = 100,
training_fraction_split: Optional[float] = None,
validation_fraction_split: Optional[float] = None,
test_fraction_split: Optional[float] = None,
training_filter_split: Optional[str] = None,
validation_filter_split: Optional[str] = None,
test_filter_split: Optional[str] = None,
predefined_split_column_name: Optional[str] = None,
timestamp_split_column_name: Optional[str] = None,
tensorboard: Optional[str] = None,
sync=True,
) -> models.Model:
"""Run Job for training pipeline"""
model = job.run(
dataset=dataset,
annotation_schema_uri=annotation_schema_uri,
model_display_name=model_display_name,
model_labels=model_labels,
base_output_dir=base_output_dir,
service_account=service_account,
network=network,
bigquery_destination=bigquery_destination,
args=args,
environment_variables=environment_variables,
replica_count=replica_count,
machine_type=machine_type,
accelerator_type=accelerator_type,
accelerator_count=accelerator_count,
boot_disk_type=boot_disk_type,
boot_disk_size_gb=boot_disk_size_gb,
training_fraction_split=training_fraction_split,
validation_fraction_split=validation_fraction_split,
test_fraction_split=test_fraction_split,
training_filter_split=training_filter_split,
validation_filter_split=validation_filter_split,
test_filter_split=test_filter_split,
predefined_split_column_name=predefined_split_column_name,
timestamp_split_column_name=timestamp_split_column_name,
tensorboard=tensorboard,
sync=sync,
)
if model:
model.wait()
return model
else:
raise AirflowException("Training did not produce a Managed Model returning None.")
@GoogleBaseHook.fallback_to_default_project_id
def cancel_pipeline_job(
self,
project_id: str,
region: str,
pipeline_job: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
"""
Cancels a PipelineJob. Starts asynchronous cancellation on the PipelineJob. The server makes a best
effort to cancel the pipeline, but success is not guaranteed. Clients can use
[PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob] or other
methods to check whether the cancellation succeeded or whether the pipeline completed despite
cancellation. On successful cancellation, the PipelineJob is not deleted; instead it becomes a
pipeline with a [PipelineJob.error][google.cloud.aiplatform.v1.PipelineJob.error] value with a
[google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to ``Code.CANCELLED``, and
[PipelineJob.state][google.cloud.aiplatform.v1.PipelineJob.state] is set to ``CANCELLED``.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param pipeline_job: The name of the PipelineJob to cancel.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_pipeline_service_client(region)
name = client.pipeline_job_path(project_id, region, pipeline_job)
client.cancel_pipeline_job(
request={
'name': name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def cancel_training_pipeline(
self,
project_id: str,
region: str,
training_pipeline: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
"""
Cancels a TrainingPipeline. Starts asynchronous cancellation on the TrainingPipeline. The server makes
a best effort to cancel the pipeline, but success is not guaranteed. Clients can use
[PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline]
or other methods to check whether the cancellation succeeded or whether the pipeline completed despite
cancellation. On successful cancellation, the TrainingPipeline is not deleted; instead it becomes a
pipeline with a [TrainingPipeline.error][google.cloud.aiplatform.v1.TrainingPipeline.error] value with
a [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to ``Code.CANCELLED``, and
[TrainingPipeline.state][google.cloud.aiplatform.v1.TrainingPipeline.state] is set to ``CANCELLED``.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param training_pipeline: Required. The name of the TrainingPipeline to cancel.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_pipeline_service_client(region)
name = client.training_pipeline_path(project_id, region, training_pipeline)
client.cancel_training_pipeline(
request={
'name': name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def cancel_custom_job(
self,
project_id: str,
region: str,
custom_job: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
"""
Cancels a CustomJob. Starts asynchronous cancellation on the CustomJob. The server makes a best effort
to cancel the job, but success is not guaranteed. Clients can use
[JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob] or other methods to
check whether the cancellation succeeded or whether the job completed despite cancellation. On
successful cancellation, the CustomJob is not deleted; instead it becomes a job with a
[CustomJob.error][google.cloud.aiplatform.v1.CustomJob.error] value with a
[google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to ``Code.CANCELLED``, and
[CustomJob.state][google.cloud.aiplatform.v1.CustomJob.state] is set to ``CANCELLED``.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param custom_job: Required. The name of the CustomJob to cancel.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_job_service_client(region)
name = JobServiceClient.custom_job_path(project_id, region, custom_job)
client.cancel_custom_job(
request={
'name': name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def create_pipeline_job(
self,
project_id: str,
region: str,
pipeline_job: PipelineJob,
pipeline_job_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> PipelineJob:
"""
Creates a PipelineJob. A PipelineJob will run immediately when created.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param pipeline_job: Required. The PipelineJob to create.
:param pipeline_job_id: The ID to use for the PipelineJob, which will become the final component of
the PipelineJob name. If not provided, an ID will be automatically generated.
This value should be less than 128 characters, and valid characters are /[a-z][0-9]-/.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_pipeline_service_client(region)
parent = client.common_location_path(project_id, region)
result = client.create_pipeline_job(
request={
'parent': parent,
'pipeline_job': pipeline_job,
'pipeline_job_id': pipeline_job_id,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def create_training_pipeline(
self,
project_id: str,
region: str,
training_pipeline: TrainingPipeline,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> TrainingPipeline:
"""
Creates a TrainingPipeline. A created TrainingPipeline right away will be attempted to be run.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param training_pipeline: Required. The TrainingPipeline to create.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_pipeline_service_client(region)
parent = client.common_location_path(project_id, region)
result = client.create_training_pipeline(
request={
'parent': parent,
'training_pipeline': training_pipeline,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def create_custom_job(
self,
project_id: str,
region: str,
custom_job: CustomJob,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> CustomJob:
"""
Creates a CustomJob. A created CustomJob right away will be attempted to be run.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param custom_job: Required. The CustomJob to create. This corresponds to the ``custom_job`` field on
the ``request`` instance; if ``request`` is provided, this should not be set.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_job_service_client(region)
parent = JobServiceClient.common_location_path(project_id, region)
result = client.create_custom_job(
request={
'parent': parent,
'custom_job': custom_job,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def create_custom_container_training_job(
self,
project_id: str,
region: str,
display_name: str,
container_uri: str,
command: Sequence[str] = [],
model_serving_container_image_uri: Optional[str] = None,
model_serving_container_predict_route: Optional[str] = None,
model_serving_container_health_route: Optional[str] = None,
model_serving_container_command: Optional[Sequence[str]] = None,
model_serving_container_args: Optional[Sequence[str]] = None,
model_serving_container_environment_variables: Optional[Dict[str, str]] = None,
model_serving_container_ports: Optional[Sequence[int]] = None,
model_description: Optional[str] = None,
model_instance_schema_uri: Optional[str] = None,
model_parameters_schema_uri: Optional[str] = None,
model_prediction_schema_uri: Optional[str] = None,
labels: Optional[Dict[str, str]] = None,
training_encryption_spec_key_name: Optional[str] = None,
model_encryption_spec_key_name: Optional[str] = None,
staging_bucket: Optional[str] = None,
# RUN
dataset: Optional[
Union[
datasets.ImageDataset,
datasets.TabularDataset,
datasets.TextDataset,
datasets.VideoDataset,
]
] = None,
annotation_schema_uri: Optional[str] = None,
model_display_name: Optional[str] = None,
model_labels: Optional[Dict[str, str]] = None,
base_output_dir: Optional[str] = None,
service_account: Optional[str] = None,
network: Optional[str] = None,
bigquery_destination: Optional[str] = None,
args: Optional[List[Union[str, float, int]]] = None,
environment_variables: Optional[Dict[str, str]] = None,
replica_count: int = 1,
machine_type: str = "n1-standard-4",
accelerator_type: str = "ACCELERATOR_TYPE_UNSPECIFIED",
accelerator_count: int = 0,
boot_disk_type: str = "pd-ssd",
boot_disk_size_gb: int = 100,
training_fraction_split: Optional[float] = None,
validation_fraction_split: Optional[float] = None,
test_fraction_split: Optional[float] = None,
training_filter_split: Optional[str] = None,
validation_filter_split: Optional[str] = None,
test_filter_split: Optional[str] = None,
predefined_split_column_name: Optional[str] = None,
timestamp_split_column_name: Optional[str] = None,
tensorboard: Optional[str] = None,
sync=True,
) -> models.Model:
"""
Create Custom Container Training Job
:param display_name: Required. The user-defined name of this TrainingPipeline.
:param command: The command to be invoked when the container is started.
It overrides the entrypoint instruction in Dockerfile when provided
:param container_uri: Required: Uri of the training container image in the GCR.
:param model_serving_container_image_uri: If the training produces a managed Vertex AI Model, the URI
of the Model serving container suitable for serving the model produced by the
training script.
:param model_serving_container_predict_route: If the training produces a managed Vertex AI Model, An
HTTP path to send prediction requests to the container, and which must be supported
by it. If not specified a default HTTP path will be used by Vertex AI.
:param model_serving_container_health_route: If the training produces a managed Vertex AI Model, an
HTTP path to send health check requests to the container, and which must be supported
by it. If not specified a standard HTTP path will be used by AI Platform.
:param model_serving_container_command: The command with which the container is run. Not executed
within a shell. The Docker image's ENTRYPOINT is used if this is not provided.
Variable references $(VAR_NAME) are expanded using the container's
environment. If a variable cannot be resolved, the reference in the
input string will be unchanged. The $(VAR_NAME) syntax can be escaped
with a double $$, ie: $$(VAR_NAME). Escaped references will never be
expanded, regardless of whether the variable exists or not.
:param model_serving_container_args: The arguments to the command. The Docker image's CMD is used if
this is not provided. Variable references $(VAR_NAME) are expanded using the
container's environment. If a variable cannot be resolved, the reference
in the input string will be unchanged. The $(VAR_NAME) syntax can be
escaped with a double $$, ie: $$(VAR_NAME). Escaped references will
never be expanded, regardless of whether the variable exists or not.
:param model_serving_container_environment_variables: The environment variables that are to be
present in the container. Should be a dictionary where keys are environment variable names
and values are environment variable values for those names.
:param model_serving_container_ports: Declaration of ports that are exposed by the container. This
field is primarily informational, it gives Vertex AI information about the
network connections the container uses. Listing or not a port here has
no impact on whether the port is actually exposed, any port listening on
the default "0.0.0.0" address inside a container will be accessible from
the network.
:param model_description: The description of the Model.
:param model_instance_schema_uri: Optional. Points to a YAML file stored on Google Cloud
Storage describing the format of a single instance, which
are used in
``PredictRequest.instances``,
``ExplainRequest.instances``
and
``BatchPredictionJob.input_config``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform. Note: The URI given on output will be immutable
and probably different, including the URI scheme, than the
one given on input. The output URI will point to a location
where the user only has a read access.
:param model_parameters_schema_uri: Optional. Points to a YAML file stored on Google Cloud
Storage describing the parameters of prediction and
explanation via
``PredictRequest.parameters``,
``ExplainRequest.parameters``
and
``BatchPredictionJob.model_parameters``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform, if no parameters are supported it is set to an
empty string. Note: The URI given on output will be
immutable and probably different, including the URI scheme,
than the one given on input. The output URI will point to a
location where the user only has a read access.
:param model_prediction_schema_uri: Optional. Points to a YAML file stored on Google Cloud
Storage describing the format of a single prediction
produced by this Model, which are returned via
``PredictResponse.predictions``,
``ExplainResponse.explanations``,
and
``BatchPredictionJob.output_config``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform. Note: The URI given on output will be immutable
and probably different, including the URI scheme, than the
one given on input. The output URI will point to a location
where the user only has a read access.
:param project_id: Project to run training in.
:param region: Location to run training in.
:param labels: Optional. The labels with user-defined metadata to
organize TrainingPipelines.
Label keys and values can be no longer than 64
characters, can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
:param training_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the training pipeline. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, this TrainingPipeline will be secured by this key.
Note: Model trained by this TrainingPipeline is also secured
by this key if ``model_to_upload`` is not set separately.
:param model_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the model. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, the trained Model will be secured by this key.
:param staging_bucket: Bucket used to stage source and training artifacts.
:param dataset: Vertex AI to fit this training against.
:param annotation_schema_uri: Google Cloud Storage URI points to a YAML file describing
annotation schema. The schema is defined as an OpenAPI 3.0.2
[Schema Object]
(https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schema-object)
Only Annotations that both match this schema and belong to
DataItems not ignored by the split method are used in
respectively training, validation or test role, depending on
the role of the DataItem they are on.
When used in conjunction with
``annotations_filter``,
the Annotations used for training are filtered by both
``annotations_filter``
and
``annotation_schema_uri``.
:param model_display_name: If the script produces a managed Vertex AI Model. The display name of
the Model. The name can be up to 128 characters long and can be consist
of any UTF-8 characters.
If not provided upon creation, the job's display_name is used.
:param model_labels: Optional. The labels with user-defined metadata to
organize your Models.
Label keys and values can be no longer than 64
characters, can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
:param base_output_dir: GCS output directory of job. If not provided a timestamped directory in the
staging directory will be used.
Vertex AI sets the following environment variables when it runs your training code:
- AIP_MODEL_DIR: a Cloud Storage URI of a directory intended for saving model artifacts,
i.e. <base_output_dir>/model/
- AIP_CHECKPOINT_DIR: a Cloud Storage URI of a directory intended for saving checkpoints,
i.e. <base_output_dir>/checkpoints/
- AIP_TENSORBOARD_LOG_DIR: a Cloud Storage URI of a directory intended for saving TensorBoard
logs, i.e. <base_output_dir>/logs/
:param service_account: Specifies the service account for workload run-as account.
Users submitting jobs must have act-as permission on this run-as account.
:param network: The full name of the Compute Engine network to which the job
should be peered.
Private services access must already be configured for the network.
If left unspecified, the job is not peered with any network.
:param bigquery_destination: Provide this field if `dataset` is a BiqQuery dataset.
The BigQuery project location where the training data is to
be written to. In the given project a new dataset is created
with name
``dataset_<dataset-id>_<annotation-type>_<timestamp-of-training-call>``
where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All
training input data will be written into that dataset. In
the dataset three tables will be created, ``training``,
``validation`` and ``test``.
- AIP_DATA_FORMAT = "bigquery".
- AIP_TRAINING_DATA_URI ="bigquery_destination.dataset_*.training"
- AIP_VALIDATION_DATA_URI = "bigquery_destination.dataset_*.validation"
- AIP_TEST_DATA_URI = "bigquery_destination.dataset_*.test"
:param args: Command line arguments to be passed to the Python script.
:param environment_variables: Environment variables to be passed to the container.
Should be a dictionary where keys are environment variable names
and values are environment variable values for those names.
At most 10 environment variables can be specified.
The Name of the environment variable must be unique.
:param replica_count: The number of worker replicas. If replica count = 1 then one chief
replica will be provisioned. If replica_count > 1 the remainder will be
provisioned as a worker replica pool.
:param machine_type: The type of machine to use for training.
:param accelerator_type: Hardware accelerator type. One of ACCELERATOR_TYPE_UNSPECIFIED,
NVIDIA_TESLA_K80, NVIDIA_TESLA_P100, NVIDIA_TESLA_V100, NVIDIA_TESLA_P4,
NVIDIA_TESLA_T4
:param accelerator_count: The number of accelerators to attach to a worker replica.
:param boot_disk_type: Type of the boot disk, default is `pd-ssd`.
Valid values: `pd-ssd` (Persistent Disk Solid State Drive) or
`pd-standard` (Persistent Disk Hard Disk Drive).
:param boot_disk_size_gb: Size in GB of the boot disk, default is 100GB.
boot disk size must be within the range of [100, 64000].
:param training_fraction_split: Optional. The fraction of the input data that is to be used to train
the Model. This is ignored if Dataset is not provided.
:param validation_fraction_split: Optional. The fraction of the input data that is to be used to
validate the Model. This is ignored if Dataset is not provided.
:param test_fraction_split: Optional. The fraction of the input data that is to be used to evaluate
the Model. This is ignored if Dataset is not provided.
:param training_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to train the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param validation_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to validate the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param test_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to test the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param predefined_split_column_name: Optional. The key is a name of one of the Dataset's data
columns. The value of the key (either the label's value or
value in the column) must be one of {``training``,
``validation``, ``test``}, and it defines to which set the
given piece of data is assigned. If for a piece of data the
key is not present or has an invalid value, that piece is
ignored by the pipeline.
Supported only for tabular and time series Datasets.
:param timestamp_split_column_name: Optional. The key is a name of one of the Dataset's data
columns. The value of the key values of the key (the values in
the column) must be in RFC 3339 `date-time` format, where
`time-offset` = `"Z"` (e.g. 1985-04-12T23:20:50.52Z). If for a
piece of data the key is not present or has an invalid value,
that piece is ignored by the pipeline.
Supported only for tabular and time series Datasets.
:param tensorboard: Optional. The name of a Vertex AI resource to which this CustomJob will upload
logs. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}``
For more information on configuring your service account please visit:
https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-training
:param sync: Whether to execute the AI Platform job synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
"""
self._job = self.get_custom_container_training_job(
project=project_id,
location=region,
display_name=display_name,
container_uri=container_uri,
command=command,
model_serving_container_image_uri=model_serving_container_image_uri,
model_serving_container_predict_route=model_serving_container_predict_route,
model_serving_container_health_route=model_serving_container_health_route,
model_serving_container_command=model_serving_container_command,
model_serving_container_args=model_serving_container_args,
model_serving_container_environment_variables=model_serving_container_environment_variables,
model_serving_container_ports=model_serving_container_ports,
model_description=model_description,
model_instance_schema_uri=model_instance_schema_uri,
model_parameters_schema_uri=model_parameters_schema_uri,
model_prediction_schema_uri=model_prediction_schema_uri,
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
staging_bucket=staging_bucket,
)
if not self._job:
raise AirflowException("CustomJob was not created")
model = self._run_job(
job=self._job,
dataset=dataset,
annotation_schema_uri=annotation_schema_uri,
model_display_name=model_display_name,
model_labels=model_labels,
base_output_dir=base_output_dir,
service_account=service_account,
network=network,
bigquery_destination=bigquery_destination,
args=args,
environment_variables=environment_variables,
replica_count=replica_count,
machine_type=machine_type,
accelerator_type=accelerator_type,
accelerator_count=accelerator_count,
boot_disk_type=boot_disk_type,
boot_disk_size_gb=boot_disk_size_gb,
training_fraction_split=training_fraction_split,
validation_fraction_split=validation_fraction_split,
test_fraction_split=test_fraction_split,
training_filter_split=training_filter_split,
validation_filter_split=validation_filter_split,
test_filter_split=test_filter_split,
predefined_split_column_name=predefined_split_column_name,
timestamp_split_column_name=timestamp_split_column_name,
tensorboard=tensorboard,
sync=sync,
)
return model
@GoogleBaseHook.fallback_to_default_project_id
def create_custom_python_package_training_job(
self,
project_id: str,
region: str,
display_name: str,
python_package_gcs_uri: str,
python_module_name: str,
container_uri: str,
model_serving_container_image_uri: Optional[str] = None,
model_serving_container_predict_route: Optional[str] = None,
model_serving_container_health_route: Optional[str] = None,
model_serving_container_command: Optional[Sequence[str]] = None,
model_serving_container_args: Optional[Sequence[str]] = None,
model_serving_container_environment_variables: Optional[Dict[str, str]] = None,
model_serving_container_ports: Optional[Sequence[int]] = None,
model_description: Optional[str] = None,
model_instance_schema_uri: Optional[str] = None,
model_parameters_schema_uri: Optional[str] = None,
model_prediction_schema_uri: Optional[str] = None,
labels: Optional[Dict[str, str]] = None,
training_encryption_spec_key_name: Optional[str] = None,
model_encryption_spec_key_name: Optional[str] = None,
staging_bucket: Optional[str] = None,
# RUN
dataset: Optional[
Union[
datasets.ImageDataset,
datasets.TabularDataset,
datasets.TextDataset,
datasets.VideoDataset,
]
] = None,
annotation_schema_uri: Optional[str] = None,
model_display_name: Optional[str] = None,
model_labels: Optional[Dict[str, str]] = None,
base_output_dir: Optional[str] = None,
service_account: Optional[str] = None,
network: Optional[str] = None,
bigquery_destination: Optional[str] = None,
args: Optional[List[Union[str, float, int]]] = None,
environment_variables: Optional[Dict[str, str]] = None,
replica_count: int = 1,
machine_type: str = "n1-standard-4",
accelerator_type: str = "ACCELERATOR_TYPE_UNSPECIFIED",
accelerator_count: int = 0,
boot_disk_type: str = "pd-ssd",
boot_disk_size_gb: int = 100,
training_fraction_split: Optional[float] = None,
validation_fraction_split: Optional[float] = None,
test_fraction_split: Optional[float] = None,
training_filter_split: Optional[str] = None,
validation_filter_split: Optional[str] = None,
test_filter_split: Optional[str] = None,
predefined_split_column_name: Optional[str] = None,
timestamp_split_column_name: Optional[str] = None,
tensorboard: Optional[str] = None,
sync=True,
) -> models.Model:
"""
Create Custom Python Package Training Job
:param display_name: Required. The user-defined name of this TrainingPipeline.
:param python_package_gcs_uri: Required: GCS location of the training python package.
:param python_module_name: Required: The module name of the training python package.
:param container_uri: Required: Uri of the training container image in the GCR.
:param model_serving_container_image_uri: If the training produces a managed Vertex AI Model, the URI
of the Model serving container suitable for serving the model produced by the
training script.
:param model_serving_container_predict_route: If the training produces a managed Vertex AI Model, An
HTTP path to send prediction requests to the container, and which must be supported
by it. If not specified a default HTTP path will be used by Vertex AI.
:param model_serving_container_health_route: If the training produces a managed Vertex AI Model, an
HTTP path to send health check requests to the container, and which must be supported
by it. If not specified a standard HTTP path will be used by AI Platform.
:param model_serving_container_command: The command with which the container is run. Not executed
within a shell. The Docker image's ENTRYPOINT is used if this is not provided.
Variable references $(VAR_NAME) are expanded using the container's
environment. If a variable cannot be resolved, the reference in the
input string will be unchanged. The $(VAR_NAME) syntax can be escaped
with a double $$, ie: $$(VAR_NAME). Escaped references will never be
expanded, regardless of whether the variable exists or not.
:param model_serving_container_args: The arguments to the command. The Docker image's CMD is used if
this is not provided. Variable references $(VAR_NAME) are expanded using the
container's environment. If a variable cannot be resolved, the reference
in the input string will be unchanged. The $(VAR_NAME) syntax can be
escaped with a double $$, ie: $$(VAR_NAME). Escaped references will
never be expanded, regardless of whether the variable exists or not.
:param model_serving_container_environment_variables: The environment variables that are to be
present in the container. Should be a dictionary where keys are environment variable names
and values are environment variable values for those names.
:param model_serving_container_ports: Declaration of ports that are exposed by the container. This
field is primarily informational, it gives Vertex AI information about the
network connections the container uses. Listing or not a port here has
no impact on whether the port is actually exposed, any port listening on
the default "0.0.0.0" address inside a container will be accessible from
the network.
:param model_description: The description of the Model.
:param model_instance_schema_uri: Optional. Points to a YAML file stored on Google Cloud
Storage describing the format of a single instance, which
are used in
``PredictRequest.instances``,
``ExplainRequest.instances``
and
``BatchPredictionJob.input_config``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform. Note: The URI given on output will be immutable
and probably different, including the URI scheme, than the
one given on input. The output URI will point to a location
where the user only has a read access.
:param model_parameters_schema_uri: Optional. Points to a YAML file stored on Google Cloud
Storage describing the parameters of prediction and
explanation via
``PredictRequest.parameters``,
``ExplainRequest.parameters``
and
``BatchPredictionJob.model_parameters``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform, if no parameters are supported it is set to an
empty string. Note: The URI given on output will be
immutable and probably different, including the URI scheme,
than the one given on input. The output URI will point to a
location where the user only has a read access.
:param model_prediction_schema_uri: Optional. Points to a YAML file stored on Google Cloud
Storage describing the format of a single prediction
produced by this Model, which are returned via
``PredictResponse.predictions``,
``ExplainResponse.explanations``,
and
``BatchPredictionJob.output_config``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform. Note: The URI given on output will be immutable
and probably different, including the URI scheme, than the
one given on input. The output URI will point to a location
where the user only has a read access.
:param project_id: Project to run training in.
:param region: Location to run training in.
:param labels: Optional. The labels with user-defined metadata to
organize TrainingPipelines.
Label keys and values can be no longer than 64
characters, can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
:param training_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the training pipeline. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, this TrainingPipeline will be secured by this key.
Note: Model trained by this TrainingPipeline is also secured
by this key if ``model_to_upload`` is not set separately.
:param model_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the model. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, the trained Model will be secured by this key.
:param staging_bucket: Bucket used to stage source and training artifacts.
:param dataset: Vertex AI to fit this training against.
:param annotation_schema_uri: Google Cloud Storage URI points to a YAML file describing
annotation schema. The schema is defined as an OpenAPI 3.0.2
[Schema Object]
(https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schema-object)
Only Annotations that both match this schema and belong to
DataItems not ignored by the split method are used in
respectively training, validation or test role, depending on
the role of the DataItem they are on.
When used in conjunction with
``annotations_filter``,
the Annotations used for training are filtered by both
``annotations_filter``
and
``annotation_schema_uri``.
:param model_display_name: If the script produces a managed Vertex AI Model. The display name of
the Model. The name can be up to 128 characters long and can be consist
of any UTF-8 characters.
If not provided upon creation, the job's display_name is used.
:param model_labels: Optional. The labels with user-defined metadata to
organize your Models.
Label keys and values can be no longer than 64
characters, can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
:param base_output_dir: GCS output directory of job. If not provided a timestamped directory in the
staging directory will be used.
Vertex AI sets the following environment variables when it runs your training code:
- AIP_MODEL_DIR: a Cloud Storage URI of a directory intended for saving model artifacts,
i.e. <base_output_dir>/model/
- AIP_CHECKPOINT_DIR: a Cloud Storage URI of a directory intended for saving checkpoints,
i.e. <base_output_dir>/checkpoints/
- AIP_TENSORBOARD_LOG_DIR: a Cloud Storage URI of a directory intended for saving TensorBoard
logs, i.e. <base_output_dir>/logs/
:param service_account: Specifies the service account for workload run-as account.
Users submitting jobs must have act-as permission on this run-as account.
:param network: The full name of the Compute Engine network to which the job
should be peered.
Private services access must already be configured for the network.
If left unspecified, the job is not peered with any network.
:param bigquery_destination: Provide this field if `dataset` is a BiqQuery dataset.
The BigQuery project location where the training data is to
be written to. In the given project a new dataset is created
with name
``dataset_<dataset-id>_<annotation-type>_<timestamp-of-training-call>``
where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All
training input data will be written into that dataset. In
the dataset three tables will be created, ``training``,
``validation`` and ``test``.
- AIP_DATA_FORMAT = "bigquery".
- AIP_TRAINING_DATA_URI ="bigquery_destination.dataset_*.training"
- AIP_VALIDATION_DATA_URI = "bigquery_destination.dataset_*.validation"
- AIP_TEST_DATA_URI = "bigquery_destination.dataset_*.test"
:param args: Command line arguments to be passed to the Python script.
:param environment_variables: Environment variables to be passed to the container.
Should be a dictionary where keys are environment variable names
and values are environment variable values for those names.
At most 10 environment variables can be specified.
The Name of the environment variable must be unique.
:param replica_count: The number of worker replicas. If replica count = 1 then one chief
replica will be provisioned. If replica_count > 1 the remainder will be
provisioned as a worker replica pool.
:param machine_type: The type of machine to use for training.
:param accelerator_type: Hardware accelerator type. One of ACCELERATOR_TYPE_UNSPECIFIED,
NVIDIA_TESLA_K80, NVIDIA_TESLA_P100, NVIDIA_TESLA_V100, NVIDIA_TESLA_P4,
NVIDIA_TESLA_T4
:param accelerator_count: The number of accelerators to attach to a worker replica.
:param boot_disk_type: Type of the boot disk, default is `pd-ssd`.
Valid values: `pd-ssd` (Persistent Disk Solid State Drive) or
`pd-standard` (Persistent Disk Hard Disk Drive).
:param boot_disk_size_gb: Size in GB of the boot disk, default is 100GB.
boot disk size must be within the range of [100, 64000].
:param training_fraction_split: Optional. The fraction of the input data that is to be used to train
the Model. This is ignored if Dataset is not provided.
:param validation_fraction_split: Optional. The fraction of the input data that is to be used to
validate the Model. This is ignored if Dataset is not provided.
:param test_fraction_split: Optional. The fraction of the input data that is to be used to evaluate
the Model. This is ignored if Dataset is not provided.
:param training_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to train the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param validation_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to validate the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param test_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to test the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param predefined_split_column_name: Optional. The key is a name of one of the Dataset's data
columns. The value of the key (either the label's value or
value in the column) must be one of {``training``,
``validation``, ``test``}, and it defines to which set the
given piece of data is assigned. If for a piece of data the
key is not present or has an invalid value, that piece is
ignored by the pipeline.
Supported only for tabular and time series Datasets.
:param timestamp_split_column_name: Optional. The key is a name of one of the Dataset's data
columns. The value of the key values of the key (the values in
the column) must be in RFC 3339 `date-time` format, where
`time-offset` = `"Z"` (e.g. 1985-04-12T23:20:50.52Z). If for a
piece of data the key is not present or has an invalid value,
that piece is ignored by the pipeline.
Supported only for tabular and time series Datasets.
:param tensorboard: Optional. The name of a Vertex AI resource to which this CustomJob will upload
logs. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}``
For more information on configuring your service account please visit:
https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-training
:param sync: Whether to execute the AI Platform job synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
"""
self._job = self.get_custom_python_package_training_job(
project=project_id,
location=region,
display_name=display_name,
python_package_gcs_uri=python_package_gcs_uri,
python_module_name=python_module_name,
container_uri=container_uri,
model_serving_container_image_uri=model_serving_container_image_uri,
model_serving_container_predict_route=model_serving_container_predict_route,
model_serving_container_health_route=model_serving_container_health_route,
model_serving_container_command=model_serving_container_command,
model_serving_container_args=model_serving_container_args,
model_serving_container_environment_variables=model_serving_container_environment_variables,
model_serving_container_ports=model_serving_container_ports,
model_description=model_description,
model_instance_schema_uri=model_instance_schema_uri,
model_parameters_schema_uri=model_parameters_schema_uri,
model_prediction_schema_uri=model_prediction_schema_uri,
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
staging_bucket=staging_bucket,
)
if not self._job:
raise AirflowException("CustomJob was not created")
model = self._run_job(
job=self._job,
dataset=dataset,
annotation_schema_uri=annotation_schema_uri,
model_display_name=model_display_name,
model_labels=model_labels,
base_output_dir=base_output_dir,
service_account=service_account,
network=network,
bigquery_destination=bigquery_destination,
args=args,
environment_variables=environment_variables,
replica_count=replica_count,
machine_type=machine_type,
accelerator_type=accelerator_type,
accelerator_count=accelerator_count,
boot_disk_type=boot_disk_type,
boot_disk_size_gb=boot_disk_size_gb,
training_fraction_split=training_fraction_split,
validation_fraction_split=validation_fraction_split,
test_fraction_split=test_fraction_split,
training_filter_split=training_filter_split,
validation_filter_split=validation_filter_split,
test_filter_split=test_filter_split,
predefined_split_column_name=predefined_split_column_name,
timestamp_split_column_name=timestamp_split_column_name,
tensorboard=tensorboard,
sync=sync,
)
return model
@GoogleBaseHook.fallback_to_default_project_id
def create_custom_training_job(
self,
project_id: str,
region: str,
display_name: str,
script_path: str,
container_uri: str,
requirements: Optional[Sequence[str]] = None,
model_serving_container_image_uri: Optional[str] = None,
model_serving_container_predict_route: Optional[str] = None,
model_serving_container_health_route: Optional[str] = None,
model_serving_container_command: Optional[Sequence[str]] = None,
model_serving_container_args: Optional[Sequence[str]] = None,
model_serving_container_environment_variables: Optional[Dict[str, str]] = None,
model_serving_container_ports: Optional[Sequence[int]] = None,
model_description: Optional[str] = None,
model_instance_schema_uri: Optional[str] = None,
model_parameters_schema_uri: Optional[str] = None,
model_prediction_schema_uri: Optional[str] = None,
labels: Optional[Dict[str, str]] = None,
training_encryption_spec_key_name: Optional[str] = None,
model_encryption_spec_key_name: Optional[str] = None,
staging_bucket: Optional[str] = None,
# RUN
dataset: Optional[
Union[
datasets.ImageDataset,
datasets.TabularDataset,
datasets.TextDataset,
datasets.VideoDataset,
]
] = None,
annotation_schema_uri: Optional[str] = None,
model_display_name: Optional[str] = None,
model_labels: Optional[Dict[str, str]] = None,
base_output_dir: Optional[str] = None,
service_account: Optional[str] = None,
network: Optional[str] = None,
bigquery_destination: Optional[str] = None,
args: Optional[List[Union[str, float, int]]] = None,
environment_variables: Optional[Dict[str, str]] = None,
replica_count: int = 1,
machine_type: str = "n1-standard-4",
accelerator_type: str = "ACCELERATOR_TYPE_UNSPECIFIED",
accelerator_count: int = 0,
boot_disk_type: str = "pd-ssd",
boot_disk_size_gb: int = 100,
training_fraction_split: Optional[float] = None,
validation_fraction_split: Optional[float] = None,
test_fraction_split: Optional[float] = None,
training_filter_split: Optional[str] = None,
validation_filter_split: Optional[str] = None,
test_filter_split: Optional[str] = None,
predefined_split_column_name: Optional[str] = None,
timestamp_split_column_name: Optional[str] = None,
tensorboard: Optional[str] = None,
sync=True,
) -> models.Model:
"""
Create Custom Training Job
:param display_name: Required. The user-defined name of this TrainingPipeline.
:param script_path: Required. Local path to training script.
:param container_uri: Required: Uri of the training container image in the GCR.
:param requirements: List of python packages dependencies of script.
:param model_serving_container_image_uri: If the training produces a managed Vertex AI Model, the URI
of the Model serving container suitable for serving the model produced by the
training script.
:param model_serving_container_predict_route: If the training produces a managed Vertex AI Model, An
HTTP path to send prediction requests to the container, and which must be supported
by it. If not specified a default HTTP path will be used by Vertex AI.
:param model_serving_container_health_route: If the training produces a managed Vertex AI Model, an
HTTP path to send health check requests to the container, and which must be supported
by it. If not specified a standard HTTP path will be used by AI Platform.
:param model_serving_container_command: The command with which the container is run. Not executed
within a shell. The Docker image's ENTRYPOINT is used if this is not provided.
Variable references $(VAR_NAME) are expanded using the container's
environment. If a variable cannot be resolved, the reference in the
input string will be unchanged. The $(VAR_NAME) syntax can be escaped
with a double $$, ie: $$(VAR_NAME). Escaped references will never be
expanded, regardless of whether the variable exists or not.
:param model_serving_container_args: The arguments to the command. The Docker image's CMD is used if
this is not provided. Variable references $(VAR_NAME) are expanded using the
container's environment. If a variable cannot be resolved, the reference
in the input string will be unchanged. The $(VAR_NAME) syntax can be
escaped with a double $$, ie: $$(VAR_NAME). Escaped references will
never be expanded, regardless of whether the variable exists or not.
:param model_serving_container_environment_variables: The environment variables that are to be
present in the container. Should be a dictionary where keys are environment variable names
and values are environment variable values for those names.
:param model_serving_container_ports: Declaration of ports that are exposed by the container. This
field is primarily informational, it gives Vertex AI information about the
network connections the container uses. Listing or not a port here has
no impact on whether the port is actually exposed, any port listening on
the default "0.0.0.0" address inside a container will be accessible from
the network.
:param model_description: The description of the Model.
:param model_instance_schema_uri: Optional. Points to a YAML file stored on Google Cloud
Storage describing the format of a single instance, which
are used in
``PredictRequest.instances``,
``ExplainRequest.instances``
and
``BatchPredictionJob.input_config``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform. Note: The URI given on output will be immutable
and probably different, including the URI scheme, than the
one given on input. The output URI will point to a location
where the user only has a read access.
:param model_parameters_schema_uri: Optional. Points to a YAML file stored on Google Cloud
Storage describing the parameters of prediction and
explanation via
``PredictRequest.parameters``,
``ExplainRequest.parameters``
and
``BatchPredictionJob.model_parameters``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform, if no parameters are supported it is set to an
empty string. Note: The URI given on output will be
immutable and probably different, including the URI scheme,
than the one given on input. The output URI will point to a
location where the user only has a read access.
:param model_prediction_schema_uri: Optional. Points to a YAML file stored on Google Cloud
Storage describing the format of a single prediction
produced by this Model, which are returned via
``PredictResponse.predictions``,
``ExplainResponse.explanations``,
and
``BatchPredictionJob.output_config``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform. Note: The URI given on output will be immutable
and probably different, including the URI scheme, than the
one given on input. The output URI will point to a location
where the user only has a read access.
:param project_id: Project to run training in.
:param region: Location to run training in.
:param labels: Optional. The labels with user-defined metadata to
organize TrainingPipelines.
Label keys and values can be no longer than 64
characters, can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
:param training_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the training pipeline. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, this TrainingPipeline will be secured by this key.
Note: Model trained by this TrainingPipeline is also secured
by this key if ``model_to_upload`` is not set separately.
:param model_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the model. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, the trained Model will be secured by this key.
:param staging_bucket: Bucket used to stage source and training artifacts.
:param dataset: Vertex AI to fit this training against.
:param annotation_schema_uri: Google Cloud Storage URI points to a YAML file describing
annotation schema. The schema is defined as an OpenAPI 3.0.2
[Schema Object]
(https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schema-object)
Only Annotations that both match this schema and belong to
DataItems not ignored by the split method are used in
respectively training, validation or test role, depending on
the role of the DataItem they are on.
When used in conjunction with
``annotations_filter``,
the Annotations used for training are filtered by both
``annotations_filter``
and
``annotation_schema_uri``.
:param model_display_name: If the script produces a managed Vertex AI Model. The display name of
the Model. The name can be up to 128 characters long and can be consist
of any UTF-8 characters.
If not provided upon creation, the job's display_name is used.
:param model_labels: Optional. The labels with user-defined metadata to
organize your Models.
Label keys and values can be no longer than 64
characters, can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
:param base_output_dir: GCS output directory of job. If not provided a timestamped directory in the
staging directory will be used.
Vertex AI sets the following environment variables when it runs your training code:
- AIP_MODEL_DIR: a Cloud Storage URI of a directory intended for saving model artifacts,
i.e. <base_output_dir>/model/
- AIP_CHECKPOINT_DIR: a Cloud Storage URI of a directory intended for saving checkpoints,
i.e. <base_output_dir>/checkpoints/
- AIP_TENSORBOARD_LOG_DIR: a Cloud Storage URI of a directory intended for saving TensorBoard
logs, i.e. <base_output_dir>/logs/
:param service_account: Specifies the service account for workload run-as account.
Users submitting jobs must have act-as permission on this run-as account.
:param network: The full name of the Compute Engine network to which the job
should be peered.
Private services access must already be configured for the network.
If left unspecified, the job is not peered with any network.
:param bigquery_destination: Provide this field if `dataset` is a BiqQuery dataset.
The BigQuery project location where the training data is to
be written to. In the given project a new dataset is created
with name
``dataset_<dataset-id>_<annotation-type>_<timestamp-of-training-call>``
where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All
training input data will be written into that dataset. In
the dataset three tables will be created, ``training``,
``validation`` and ``test``.
- AIP_DATA_FORMAT = "bigquery".
- AIP_TRAINING_DATA_URI ="bigquery_destination.dataset_*.training"
- AIP_VALIDATION_DATA_URI = "bigquery_destination.dataset_*.validation"
- AIP_TEST_DATA_URI = "bigquery_destination.dataset_*.test"
:param args: Command line arguments to be passed to the Python script.
:param environment_variables: Environment variables to be passed to the container.
Should be a dictionary where keys are environment variable names
and values are environment variable values for those names.
At most 10 environment variables can be specified.
The Name of the environment variable must be unique.
:param replica_count: The number of worker replicas. If replica count = 1 then one chief
replica will be provisioned. If replica_count > 1 the remainder will be
provisioned as a worker replica pool.
:param machine_type: The type of machine to use for training.
:param accelerator_type: Hardware accelerator type. One of ACCELERATOR_TYPE_UNSPECIFIED,
NVIDIA_TESLA_K80, NVIDIA_TESLA_P100, NVIDIA_TESLA_V100, NVIDIA_TESLA_P4,
NVIDIA_TESLA_T4
:param accelerator_count: The number of accelerators to attach to a worker replica.
:param boot_disk_type: Type of the boot disk, default is `pd-ssd`.
Valid values: `pd-ssd` (Persistent Disk Solid State Drive) or
`pd-standard` (Persistent Disk Hard Disk Drive).
:param boot_disk_size_gb: Size in GB of the boot disk, default is 100GB.
boot disk size must be within the range of [100, 64000].
:param training_fraction_split: Optional. The fraction of the input data that is to be used to train
the Model. This is ignored if Dataset is not provided.
:param validation_fraction_split: Optional. The fraction of the input data that is to be used to
validate the Model. This is ignored if Dataset is not provided.
:param test_fraction_split: Optional. The fraction of the input data that is to be used to evaluate
the Model. This is ignored if Dataset is not provided.
:param training_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to train the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param validation_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to validate the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param test_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to test the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param predefined_split_column_name: Optional. The key is a name of one of the Dataset's data
columns. The value of the key (either the label's value or
value in the column) must be one of {``training``,
``validation``, ``test``}, and it defines to which set the
given piece of data is assigned. If for a piece of data the
key is not present or has an invalid value, that piece is
ignored by the pipeline.
Supported only for tabular and time series Datasets.
:param timestamp_split_column_name: Optional. The key is a name of one of the Dataset's data
columns. The value of the key values of the key (the values in
the column) must be in RFC 3339 `date-time` format, where
`time-offset` = `"Z"` (e.g. 1985-04-12T23:20:50.52Z). If for a
piece of data the key is not present or has an invalid value,
that piece is ignored by the pipeline.
Supported only for tabular and time series Datasets.
:param tensorboard: Optional. The name of a Vertex AI resource to which this CustomJob will upload
logs. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}``
For more information on configuring your service account please visit:
https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-training
:param sync: Whether to execute the AI Platform job synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
"""
self._job = self.get_custom_training_job(
project=project_id,
location=region,
display_name=display_name,
script_path=script_path,
container_uri=container_uri,
requirements=requirements,
model_serving_container_image_uri=model_serving_container_image_uri,
model_serving_container_predict_route=model_serving_container_predict_route,
model_serving_container_health_route=model_serving_container_health_route,
model_serving_container_command=model_serving_container_command,
model_serving_container_args=model_serving_container_args,
model_serving_container_environment_variables=model_serving_container_environment_variables,
model_serving_container_ports=model_serving_container_ports,
model_description=model_description,
model_instance_schema_uri=model_instance_schema_uri,
model_parameters_schema_uri=model_parameters_schema_uri,
model_prediction_schema_uri=model_prediction_schema_uri,
labels=labels,
training_encryption_spec_key_name=training_encryption_spec_key_name,
model_encryption_spec_key_name=model_encryption_spec_key_name,
staging_bucket=staging_bucket,
)
if not self._job:
raise AirflowException("CustomJob was not created")
model = self._run_job(
job=self._job,
dataset=dataset,
annotation_schema_uri=annotation_schema_uri,
model_display_name=model_display_name,
model_labels=model_labels,
base_output_dir=base_output_dir,
service_account=service_account,
network=network,
bigquery_destination=bigquery_destination,
args=args,
environment_variables=environment_variables,
replica_count=replica_count,
machine_type=machine_type,
accelerator_type=accelerator_type,
accelerator_count=accelerator_count,
boot_disk_type=boot_disk_type,
boot_disk_size_gb=boot_disk_size_gb,
training_fraction_split=training_fraction_split,
validation_fraction_split=validation_fraction_split,
test_fraction_split=test_fraction_split,
training_filter_split=training_filter_split,
validation_filter_split=validation_filter_split,
test_filter_split=test_filter_split,
predefined_split_column_name=predefined_split_column_name,
timestamp_split_column_name=timestamp_split_column_name,
tensorboard=tensorboard,
sync=sync,
)
return model
@GoogleBaseHook.fallback_to_default_project_id
def delete_pipeline_job(
self,
project_id: str,
region: str,
pipeline_job: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> Operation:
"""
Deletes a PipelineJob.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param pipeline_job: Required. The name of the PipelineJob resource to be deleted.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_pipeline_service_client(region)
name = client.pipeline_job_path(project_id, region, pipeline_job)
result = client.delete_pipeline_job(
request={
'name': name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def delete_training_pipeline(
self,
project_id: str,
region: str,
training_pipeline: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> Operation:
"""
Deletes a TrainingPipeline.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param training_pipeline: Required. The name of the TrainingPipeline resource to be deleted.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_pipeline_service_client(region)
name = client.training_pipeline_path(project_id, region, training_pipeline)
result = client.delete_training_pipeline(
request={
'name': name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def delete_custom_job(
self,
project_id: str,
region: str,
custom_job: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> Operation:
"""
Deletes a CustomJob.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param custom_job: Required. The name of the CustomJob to delete.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_job_service_client(region)
name = client.custom_job_path(project_id, region, custom_job)
result = client.delete_custom_job(
request={
'name': name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def get_pipeline_job(
self,
project_id: str,
region: str,
pipeline_job: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> PipelineJob:
"""
Gets a PipelineJob.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param pipeline_job: Required. The name of the PipelineJob resource.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_pipeline_service_client(region)
name = client.pipeline_job_path(project_id, region, pipeline_job)
result = client.get_pipeline_job(
request={
'name': name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def get_training_pipeline(
self,
project_id: str,
region: str,
training_pipeline: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> TrainingPipeline:
"""
Gets a TrainingPipeline.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param training_pipeline: Required. The name of the TrainingPipeline resource.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_pipeline_service_client(region)
name = client.training_pipeline_path(project_id, region, training_pipeline)
result = client.get_training_pipeline(
request={
'name': name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def get_custom_job(
self,
project_id: str,
region: str,
custom_job: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> CustomJob:
"""
Gets a CustomJob.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param custom_job: Required. The name of the CustomJob to get.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_job_service_client(region)
name = JobServiceClient.custom_job_path(project_id, region, custom_job)
result = client.get_custom_job(
request={
'name': name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def list_pipeline_jobs(
self,
project_id: str,
region: str,
page_size: Optional[int] = None,
page_token: Optional[str] = None,
filter: Optional[str] = None,
order_by: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> ListPipelineJobsPager:
"""
Lists PipelineJobs in a Location.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param filter: Optional. Lists the PipelineJobs that match the filter expression. The
following fields are supported:
- ``pipeline_name``: Supports ``=`` and ``!=`` comparisons.
- ``display_name``: Supports ``=``, ``!=`` comparisons, and
``:`` wildcard.
- ``pipeline_job_user_id``: Supports ``=``, ``!=``
comparisons, and ``:`` wildcard. for example, can check
if pipeline's display_name contains *step* by doing
display_name:"*step*"
- ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``,
``<=``, and ``>=`` comparisons. Values must be in RFC
3339 format.
- ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``,
``<=``, and ``>=`` comparisons. Values must be in RFC
3339 format.
- ``end_time``: Supports ``=``, ``!=``, ``<``, ``>``,
``<=``, and ``>=`` comparisons. Values must be in RFC
3339 format.
- ``labels``: Supports key-value equality and key presence.
Filter expressions can be combined together using logical
operators (``AND`` & ``OR``). For example:
``pipeline_name="test" AND create_time>"2020-05-18T13:30:00Z"``.
The syntax to define filter expression is based on
https://google.aip.dev/160.
:param page_size: Optional. The standard list page size.
:param page_token: Optional. The standard list page token. Typically obtained via
[ListPipelineJobsResponse.next_page_token][google.cloud.aiplatform.v1.ListPipelineJobsResponse.next_page_token]
of the previous
[PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs]
call.
:param order_by: Optional. A comma-separated list of fields to order by. The default
sort order is in ascending order. Use "desc" after a field
name for descending. You can have multiple order_by fields
provided e.g. "create_time desc, end_time", "end_time,
start_time, update_time" For example, using "create_time
desc, end_time" will order results by create time in
descending order, and if there are multiple jobs having the
same create time, order them by the end time in ascending
order. if order_by is not specified, it will order by
default order is create time in descending order. Supported
fields:
- ``create_time``
- ``update_time``
- ``end_time``
- ``start_time``
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_pipeline_service_client(region)
parent = client.common_location_path(project_id, region)
result = client.list_pipeline_jobs(
request={
'parent': parent,
'page_size': page_size,
'page_token': page_token,
'filter': filter,
'order_by': order_by,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def list_training_pipelines(
self,
project_id: str,
region: str,
page_size: Optional[int] = None,
page_token: Optional[str] = None,
filter: Optional[str] = None,
read_mask: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> ListTrainingPipelinesPager:
"""
Lists TrainingPipelines in a Location.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param filter: Optional. The standard list filter. Supported fields:
- ``display_name`` supports = and !=.
- ``state`` supports = and !=.
Some examples of using the filter are:
- ``state="PIPELINE_STATE_SUCCEEDED" AND display_name="my_pipeline"``
- ``state="PIPELINE_STATE_RUNNING" OR display_name="my_pipeline"``
- ``NOT display_name="my_pipeline"``
- ``state="PIPELINE_STATE_FAILED"``
:param page_size: Optional. The standard list page size.
:param page_token: Optional. The standard list page token. Typically obtained via
[ListTrainingPipelinesResponse.next_page_token][google.cloud.aiplatform.v1.ListTrainingPipelinesResponse.next_page_token]
of the previous
[PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines]
call.
:param read_mask: Optional. Mask specifying which fields to read.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_pipeline_service_client(region)
parent = client.common_location_path(project_id, region)
result = client.list_training_pipelines(
request={
'parent': parent,
'page_size': page_size,
'page_token': page_token,
'filter': filter,
'read_mask': read_mask,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def list_custom_jobs(
self,
project_id: str,
region: str,
page_size: Optional[int],
page_token: Optional[str],
filter: Optional[str],
read_mask: Optional[str],
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> ListCustomJobsPager:
"""
Lists CustomJobs in a Location.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param filter: Optional. The standard list filter. Supported fields:
- ``display_name`` supports = and !=.
- ``state`` supports = and !=.
Some examples of using the filter are:
- ``state="PIPELINE_STATE_SUCCEEDED" AND display_name="my_pipeline"``
- ``state="PIPELINE_STATE_RUNNING" OR display_name="my_pipeline"``
- ``NOT display_name="my_pipeline"``
- ``state="PIPELINE_STATE_FAILED"``
:param page_size: Optional. The standard list page size.
:param page_token: Optional. The standard list page token. Typically obtained via
[ListTrainingPipelinesResponse.next_page_token][google.cloud.aiplatform.v1.ListTrainingPipelinesResponse.next_page_token]
of the previous
[PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines]
call.
:param read_mask: Optional. Mask specifying which fields to read.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_job_service_client(region)
parent = JobServiceClient.common_location_path(project_id, region)
result = client.list_custom_jobs(
request={
'parent': parent,
'page_size': page_size,
'page_token': page_token,
'filter': filter,
'read_mask': read_mask,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
|
{
"content_hash": "e75b7b481441a0c72f658f47998eb0e0",
"timestamp": "",
"source": "github",
"line_count": 2034,
"max_line_length": 133,
"avg_line_length": 53.140609636184855,
"alnum_prop": 0.633678114129228,
"repo_name": "bolkedebruin/airflow",
"id": "cd73da149aa0e647d47522869e1a69f3618fc6e3",
"size": "108877",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airflow/providers/google/cloud/hooks/vertex_ai/custom_job.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25286"
},
{
"name": "Dockerfile",
"bytes": "40459"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "157840"
},
{
"name": "JavaScript",
"bytes": "167972"
},
{
"name": "Jinja",
"bytes": "33382"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "19287942"
},
{
"name": "Shell",
"bytes": "645244"
},
{
"name": "TypeScript",
"bytes": "173854"
}
],
"symlink_target": ""
}
|
import tensorflow as tf
from tensorboard.backend.event_processing import reservoir
class ReservoirTest(tf.test.TestCase):
def testEmptyReservoir(self):
r = reservoir.Reservoir(1)
self.assertFalse(r.Keys())
def testRespectsSize(self):
r = reservoir.Reservoir(42)
self.assertEqual(r._buckets["meaning of life"]._max_size, 42)
def testItemsAndKeys(self):
r = reservoir.Reservoir(42)
r.AddItem("foo", 4)
r.AddItem("bar", 9)
r.AddItem("foo", 19)
self.assertItemsEqual(r.Keys(), ["foo", "bar"])
self.assertEqual(r.Items("foo"), [4, 19])
self.assertEqual(r.Items("bar"), [9])
def testExceptions(self):
with self.assertRaises(ValueError):
reservoir.Reservoir(-1)
with self.assertRaises(ValueError):
reservoir.Reservoir(13.3)
r = reservoir.Reservoir(12)
with self.assertRaises(KeyError):
r.Items("missing key")
def testDeterminism(self):
"""Tests that the reservoir is deterministic."""
key = "key"
r1 = reservoir.Reservoir(10)
r2 = reservoir.Reservoir(10)
for i in range(100):
r1.AddItem("key", i)
r2.AddItem("key", i)
self.assertEqual(r1.Items(key), r2.Items(key))
def testBucketDeterminism(self):
"""Tests that reservoirs are deterministic at a bucket level.
This means that only the order elements are added within a
bucket matters.
"""
separate_reservoir = reservoir.Reservoir(10)
interleaved_reservoir = reservoir.Reservoir(10)
for i in range(100):
separate_reservoir.AddItem("key1", i)
for i in range(100):
separate_reservoir.AddItem("key2", i)
for i in range(100):
interleaved_reservoir.AddItem("key1", i)
interleaved_reservoir.AddItem("key2", i)
for key in ["key1", "key2"]:
self.assertEqual(
separate_reservoir.Items(key), interleaved_reservoir.Items(key)
)
def testUsesSeed(self):
"""Tests that reservoirs with different seeds keep different
samples."""
key = "key"
r1 = reservoir.Reservoir(10, seed=0)
r2 = reservoir.Reservoir(10, seed=1)
for i in range(100):
r1.AddItem("key", i)
r2.AddItem("key", i)
self.assertNotEqual(r1.Items(key), r2.Items(key))
def testFilterItemsByKey(self):
r = reservoir.Reservoir(100, seed=0)
for i in range(10):
r.AddItem("key1", i)
r.AddItem("key2", i)
self.assertEqual(len(r.Items("key1")), 10)
self.assertEqual(len(r.Items("key2")), 10)
self.assertEqual(r.FilterItems(lambda x: x <= 7, "key2"), 2)
self.assertEqual(len(r.Items("key2")), 8)
self.assertEqual(len(r.Items("key1")), 10)
self.assertEqual(r.FilterItems(lambda x: x <= 3, "key1"), 6)
self.assertEqual(len(r.Items("key1")), 4)
self.assertEqual(len(r.Items("key2")), 8)
class ReservoirBucketTest(tf.test.TestCase):
def testEmptyBucket(self):
b = reservoir._ReservoirBucket(1)
self.assertFalse(b.Items())
def testFillToSize(self):
b = reservoir._ReservoirBucket(100)
for i in range(100):
b.AddItem(i)
self.assertEqual(b.Items(), list(range(100)))
self.assertEqual(b._num_items_seen, 100)
def testDoesntOverfill(self):
b = reservoir._ReservoirBucket(10)
for i in range(1000):
b.AddItem(i)
self.assertEqual(len(b.Items()), 10)
self.assertEqual(b._num_items_seen, 1000)
def testMaintainsOrder(self):
b = reservoir._ReservoirBucket(100)
for i in range(10000):
b.AddItem(i)
items = b.Items()
prev = -1
for item in items:
self.assertTrue(item > prev)
prev = item
def testKeepsLatestItem(self):
b = reservoir._ReservoirBucket(5)
for i in range(100):
b.AddItem(i)
last = b.Items()[-1]
self.assertEqual(last, i)
def testSizeOneBucket(self):
b = reservoir._ReservoirBucket(1)
for i in range(20):
b.AddItem(i)
self.assertEqual(b.Items(), [i])
self.assertEqual(b._num_items_seen, 20)
def testSizeZeroBucket(self):
b = reservoir._ReservoirBucket(0)
for i in range(20):
b.AddItem(i)
self.assertEqual(b.Items(), list(range(i + 1)))
self.assertEqual(b._num_items_seen, 20)
def testSizeRequirement(self):
with self.assertRaises(ValueError):
reservoir._ReservoirBucket(-1)
with self.assertRaises(ValueError):
reservoir._ReservoirBucket(10.3)
def testRemovesItems(self):
b = reservoir._ReservoirBucket(100)
for i in range(10):
b.AddItem(i)
self.assertEqual(len(b.Items()), 10)
self.assertEqual(b._num_items_seen, 10)
self.assertEqual(b.FilterItems(lambda x: x <= 7), 2)
self.assertEqual(len(b.Items()), 8)
self.assertEqual(b._num_items_seen, 8)
def testRemovesItemsWhenItemsAreReplaced(self):
b = reservoir._ReservoirBucket(100)
for i in range(10000):
b.AddItem(i)
self.assertEqual(b._num_items_seen, 10000)
# Remove items
num_removed = b.FilterItems(lambda x: x <= 7)
self.assertGreater(num_removed, 92)
self.assertEqual([], [item for item in b.Items() if item > 7])
self.assertEqual(
b._num_items_seen,
int(round(10000 * (1 - float(num_removed) / 100))),
)
def testLazyFunctionEvaluationAndAlwaysKeepLast(self):
class FakeRandom(object):
def randint(self, a, b): # pylint:disable=unused-argument
return 999
class Incrementer(object):
def __init__(self):
self.n = 0
def increment_and_double(self, x):
self.n += 1
return x * 2
# We've mocked the randomness generator, so that once it is full, the last
# item will never get durable reservoir inclusion. Since always_keep_last is
# false, the function should only get invoked 100 times while filling up
# the reservoir. This laziness property is an essential performance
# optimization.
b = reservoir._ReservoirBucket(
100, FakeRandom(), always_keep_last=False
)
incrementer = Incrementer()
for i in range(1000):
b.AddItem(i, incrementer.increment_and_double)
self.assertEqual(incrementer.n, 100)
self.assertEqual(b.Items(), [x * 2 for x in range(100)])
# This time, we will always keep the last item, meaning that the function
# should get invoked once for every item we add.
b = reservoir._ReservoirBucket(100, FakeRandom(), always_keep_last=True)
incrementer = Incrementer()
for i in range(1000):
b.AddItem(i, incrementer.increment_and_double)
self.assertEqual(incrementer.n, 1000)
self.assertEqual(b.Items(), [x * 2 for x in range(99)] + [999 * 2])
class ReservoirBucketStatisticalDistributionTest(tf.test.TestCase):
def setUp(self):
self.total = 1000000
self.samples = 10000
self.n_buckets = 100
self.total_per_bucket = self.total // self.n_buckets
self.assertEqual(
self.total % self.n_buckets,
0,
"total must be evenly " "divisible by the number of buckets",
)
self.assertTrue(
self.total > self.samples, "need to have more items " "than samples"
)
def AssertBinomialQuantity(self, measured):
p = 1.0 * self.n_buckets / self.samples
mean = p * self.samples
variance = p * (1 - p) * self.samples
error = measured - mean
# Given that the buckets were actually binomially distributed, this
# fails with probability ~2E-9
passed = error * error <= 36.0 * variance
self.assertTrue(
passed,
"found a bucket with measured %d "
"too far from expected %d" % (measured, mean),
)
def testBucketReservoirSamplingViaStatisticalProperties(self):
# Not related to a 'ReservoirBucket', but instead number of buckets we put
# samples into for testing the shape of the distribution
b = reservoir._ReservoirBucket(_max_size=self.samples)
# add one extra item because we always keep the most recent item, which
# would skew the distribution; we can just slice it off the end instead.
for i in range(self.total + 1):
b.AddItem(i)
divbins = [0] * self.n_buckets
modbins = [0] * self.n_buckets
# Slice off the last item when we iterate.
for item in b.Items()[0:-1]:
divbins[item // self.total_per_bucket] += 1
modbins[item % self.n_buckets] += 1
for bucket_index in range(self.n_buckets):
divbin = divbins[bucket_index]
modbin = modbins[bucket_index]
self.AssertBinomialQuantity(divbin)
self.AssertBinomialQuantity(modbin)
if __name__ == "__main__":
tf.test.main()
|
{
"content_hash": "b8484e91fc65bd6e6d585363430122ae",
"timestamp": "",
"source": "github",
"line_count": 267,
"max_line_length": 84,
"avg_line_length": 35.27715355805243,
"alnum_prop": 0.590508546554836,
"repo_name": "tensorflow/tensorboard",
"id": "0db2a41625644a58c82652c9081ea07483175e85",
"size": "10110",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorboard/backend/event_processing/reservoir_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16222"
},
{
"name": "Dockerfile",
"bytes": "1226"
},
{
"name": "HTML",
"bytes": "154824"
},
{
"name": "Java",
"bytes": "20643"
},
{
"name": "JavaScript",
"bytes": "11869"
},
{
"name": "Jupyter Notebook",
"bytes": "7697"
},
{
"name": "Python",
"bytes": "2922179"
},
{
"name": "Rust",
"bytes": "311041"
},
{
"name": "SCSS",
"bytes": "136834"
},
{
"name": "Shell",
"bytes": "36731"
},
{
"name": "Starlark",
"bytes": "541743"
},
{
"name": "TypeScript",
"bytes": "5930550"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.