text stringlengths 6 947k | repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1
value | license stringclasses 15
values | size int64 6 947k | score float64 0 0.34 |
|---|---|---|---|---|---|---|
import base64
import urllib
from io import StringIO
import orjson
from django.conf import settings
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import (
create_s3_buckets,
get_test_image_file,
override_settings,
use_s3_backend,
)
from zerver.lib.upload import upload_backend, upload_emoji_image
from zerver.lib.users import get_api_key
class ThumbnailTest(ZulipTestCase):
@use_s3_backend
def test_s3_source_type(self) -> None:
def get_file_path_urlpart(uri: str, size: str='') -> str:
url_in_result = 'smart/filters:no_upscale()%s/%s/source_type/s3'
sharpen_filter = ''
if size:
url_in_result = f'/{size}/{url_in_result}'
sharpen_filter = ':sharpen(0.5,0.2,true)'
hex_uri = base64.urlsafe_b64encode(uri.encode()).decode('utf-8')
return url_in_result % (sharpen_filter, hex_uri)
create_s3_buckets(
settings.S3_AUTH_UPLOADS_BUCKET,
settings.S3_AVATAR_BUCKET)
hamlet = self.example_user('hamlet')
self.login_user(hamlet)
fp = StringIO("zulip!")
fp.name = "zulip.jpeg"
result = self.client_post("/json/user_uploads", {'file': fp})
self.assert_json_success(result)
json = orjson.loads(result.content)
self.assertIn("uri", json)
uri = json["uri"]
base = '/user_uploads/'
self.assertEqual(base, uri[:len(base)])
quoted_uri = urllib.parse.quote(uri[1:], safe='')
# Test full size image.
result = self.client_get(f"/thumbnail?url={quoted_uri}&size=full")
self.assertEqual(result.status_code, 302, result)
expected_part_url = get_file_path_urlpart(uri)
self.assertIn(expected_part_url, result.url)
# Test thumbnail size.
result = self.client_get(f"/thumbnail?url={quoted_uri}&size=thumbnail")
self.assertEqual(result.status_code, 302, result)
expected_part_url = get_file_path_urlpart(uri, '0x300')
self.assertIn(expected_part_url, result.url)
# Test custom emoji urls in Zulip messages.
user_profile = self.example_user("hamlet")
image_file = get_test_image_file("img.png")
file_name = "emoji.png"
upload_emoji_image(image_file, file_name, user_profile)
custom_emoji_url = upload_backend.get_emoji_url(file_name, user_profile.realm_id)
emoji_url_base = '/user_avatars/'
self.assertEqual(emoji_url_base, custom_emoji_url[:len(emoji_url_base)])
quoted_emoji_url = urllib.parse.quote(custom_emoji_url[1:], safe='')
# Test full size custom emoji image (for emoji link in messages case).
result = self.client_get(f"/thumbnail?url={quoted_emoji_url}&size=full")
self.assertEqual(result.status_code, 302, result)
self.assertIn(custom_emoji_url, result.url)
# Tests the /api/v1/thumbnail api endpoint with standard API auth
self.logout()
result = self.api_get(
hamlet,
f'/thumbnail?url={quoted_uri}&size=full')
self.assertEqual(result.status_code, 302, result)
expected_part_url = get_file_path_urlpart(uri)
self.assertIn(expected_part_url, result.url)
# Test with another user trying to access image using thumbor.
self.login('iago')
result = self.client_get(f"/thumbnail?url={quoted_uri}&size=full")
self.assertEqual(result.status_code, 403, result)
self.assert_in_response("You are not authorized to view this file.", result)
def test_external_source_type(self) -> None:
def run_test_with_image_url(image_url: str) -> None:
# Test full size image.
self.login('hamlet')
quoted_url = urllib.parse.quote(image_url, safe='')
encoded_url = base64.urlsafe_b64encode(image_url.encode()).decode('utf-8')
result = self.client_get(f"/thumbnail?url={quoted_url}&size=full")
self.assertEqual(result.status_code, 302, result)
expected_part_url = '/smart/filters:no_upscale()/' + encoded_url + '/source_type/external'
self.assertIn(expected_part_url, result.url)
# Test thumbnail size.
result = self.client_get(f"/thumbnail?url={quoted_url}&size=thumbnail")
self.assertEqual(result.status_code, 302, result)
expected_part_url = '/0x300/smart/filters:no_upscale():sharpen(0.5,0.2,true)/' + encoded_url + '/source_type/external'
self.assertIn(expected_part_url, result.url)
# Test api endpoint with standard API authentication.
self.logout()
user_profile = self.example_user("hamlet")
result = self.api_get(user_profile,
f"/thumbnail?url={quoted_url}&size=thumbnail")
self.assertEqual(result.status_code, 302, result)
expected_part_url = '/0x300/smart/filters:no_upscale():sharpen(0.5,0.2,true)/' + encoded_url + '/source_type/external'
self.assertIn(expected_part_url, result.url)
# Test api endpoint with legacy API authentication.
user_profile = self.example_user("hamlet")
result = self.client_get(f"/thumbnail?url={quoted_url}&size=thumbnail&api_key={get_api_key(user_profile)}")
self.assertEqual(result.status_code, 302, result)
expected_part_url = '/0x300/smart/filters:no_upscale():sharpen(0.5,0.2,true)/' + encoded_url + '/source_type/external'
self.assertIn(expected_part_url, result.url)
# Test a second logged-in user; they should also be able to access it
user_profile = self.example_user("iago")
result = self.client_get(f"/thumbnail?url={quoted_url}&size=thumbnail&api_key={get_api_key(user_profile)}")
self.assertEqual(result.status_code, 302, result)
expected_part_url = '/0x300/smart/filters:no_upscale():sharpen(0.5,0.2,true)/' + encoded_url + '/source_type/external'
self.assertIn(expected_part_url, result.url)
# Test with another user trying to access image using thumbor.
# File should be always accessible to user in case of external source
self.login('iago')
result = self.client_get(f"/thumbnail?url={quoted_url}&size=full")
self.assertEqual(result.status_code, 302, result)
expected_part_url = '/smart/filters:no_upscale()/' + encoded_url + '/source_type/external'
self.assertIn(expected_part_url, result.url)
image_url = 'https://images.foobar.com/12345'
run_test_with_image_url(image_url)
image_url = 'http://images.foobar.com/12345'
run_test_with_image_url(image_url)
image_url = '//images.foobar.com/12345'
run_test_with_image_url(image_url)
def test_local_file_type(self) -> None:
def get_file_path_urlpart(uri: str, size: str='') -> str:
url_in_result = 'smart/filters:no_upscale()%s/%s/source_type/local_file'
sharpen_filter = ''
if size:
url_in_result = f'/{size}/{url_in_result}'
sharpen_filter = ':sharpen(0.5,0.2,true)'
hex_uri = base64.urlsafe_b64encode(uri.encode()).decode('utf-8')
return url_in_result % (sharpen_filter, hex_uri)
self.login('hamlet')
fp = StringIO("zulip!")
fp.name = "zulip.jpeg"
result = self.client_post("/json/user_uploads", {'file': fp})
self.assert_json_success(result)
json = orjson.loads(result.content)
self.assertIn("uri", json)
uri = json["uri"]
base = '/user_uploads/'
self.assertEqual(base, uri[:len(base)])
# Test full size image.
# We remove the forward slash infront of the `/user_uploads/` to match
# Markdown behaviour.
quoted_uri = urllib.parse.quote(uri[1:], safe='')
result = self.client_get(f"/thumbnail?url={quoted_uri}&size=full")
self.assertEqual(result.status_code, 302, result)
expected_part_url = get_file_path_urlpart(uri)
self.assertIn(expected_part_url, result.url)
# Test thumbnail size.
result = self.client_get(f"/thumbnail?url={quoted_uri}&size=thumbnail")
self.assertEqual(result.status_code, 302, result)
expected_part_url = get_file_path_urlpart(uri, '0x300')
self.assertIn(expected_part_url, result.url)
# Test with a unicode filename.
fp = StringIO("zulip!")
fp.name = "μένει.jpg"
result = self.client_post("/json/user_uploads", {'file': fp})
self.assert_json_success(result)
json = orjson.loads(result.content)
self.assertIn("uri", json)
uri = json["uri"]
# We remove the forward slash infront of the `/user_uploads/` to match
# Markdown behaviour.
quoted_uri = urllib.parse.quote(uri[1:], safe='')
result = self.client_get(f"/thumbnail?url={quoted_uri}&size=full")
self.assertEqual(result.status_code, 302, result)
expected_part_url = get_file_path_urlpart(uri)
self.assertIn(expected_part_url, result.url)
# Test custom emoji urls in Zulip messages.
user_profile = self.example_user("hamlet")
image_file = get_test_image_file("img.png")
file_name = "emoji.png"
upload_emoji_image(image_file, file_name, user_profile)
custom_emoji_url = upload_backend.get_emoji_url(file_name, user_profile.realm_id)
emoji_url_base = '/user_avatars/'
self.assertEqual(emoji_url_base, custom_emoji_url[:len(emoji_url_base)])
quoted_emoji_url = urllib.parse.quote(custom_emoji_url[1:], safe='')
# Test full size custom emoji image (for emoji link in messages case).
result = self.client_get(f"/thumbnail?url={quoted_emoji_url}&size=full")
self.assertEqual(result.status_code, 302, result)
self.assertIn(custom_emoji_url, result.url)
# Tests the /api/v1/thumbnail api endpoint with HTTP basic auth.
self.logout()
user_profile = self.example_user("hamlet")
result = self.api_get(
user_profile,
f'/thumbnail?url={quoted_uri}&size=full')
self.assertEqual(result.status_code, 302, result)
expected_part_url = get_file_path_urlpart(uri)
self.assertIn(expected_part_url, result.url)
# Tests the /api/v1/thumbnail api endpoint with ?api_key
# auth.
user_profile = self.example_user("hamlet")
result = self.client_get(
f'/thumbnail?url={quoted_uri}&size=full&api_key={get_api_key(user_profile)}')
self.assertEqual(result.status_code, 302, result)
expected_part_url = get_file_path_urlpart(uri)
self.assertIn(expected_part_url, result.url)
# Test with another user trying to access image using thumbor.
self.login('iago')
result = self.client_get(f"/thumbnail?url={quoted_uri}&size=full")
self.assertEqual(result.status_code, 403, result)
self.assert_in_response("You are not authorized to view this file.", result)
@override_settings(THUMBOR_URL='127.0.0.1:9995')
def test_with_static_files(self) -> None:
self.login('hamlet')
uri = '/static/images/cute/turtle.png'
quoted_uri = urllib.parse.quote(uri[1:], safe='')
result = self.client_get(f"/thumbnail?url={quoted_uri}&size=full")
self.assertEqual(result.status_code, 302, result)
self.assertEqual(uri, result.url)
def test_with_thumbor_disabled(self) -> None:
self.login('hamlet')
fp = StringIO("zulip!")
fp.name = "zulip.jpeg"
result = self.client_post("/json/user_uploads", {'file': fp})
self.assert_json_success(result)
json = orjson.loads(result.content)
self.assertIn("uri", json)
uri = json["uri"]
base = '/user_uploads/'
self.assertEqual(base, uri[:len(base)])
quoted_uri = urllib.parse.quote(uri[1:], safe='')
with self.settings(THUMBOR_URL=''):
result = self.client_get(f"/thumbnail?url={quoted_uri}&size=full")
self.assertEqual(result.status_code, 302, result)
self.assertEqual(uri, result.url)
uri = 'https://www.google.com/images/srpr/logo4w.png'
quoted_uri = urllib.parse.quote(uri, safe='')
with self.settings(THUMBOR_URL=''):
result = self.client_get(f"/thumbnail?url={quoted_uri}&size=full")
self.assertEqual(result.status_code, 302, result)
base = 'https://external-content.zulipcdn.net/external_content/56c362a24201593891955ff526b3b412c0f9fcd2/68747470733a2f2f7777772e676f6f676c652e636f6d2f696d616765732f737270722f6c6f676f34772e706e67'
self.assertEqual(base, result.url)
uri = 'http://www.google.com/images/srpr/logo4w.png'
quoted_uri = urllib.parse.quote(uri, safe='')
with self.settings(THUMBOR_URL=''):
result = self.client_get(f"/thumbnail?url={quoted_uri}&size=full")
self.assertEqual(result.status_code, 302, result)
base = 'https://external-content.zulipcdn.net/external_content/7b6552b60c635e41e8f6daeb36d88afc4eabde79/687474703a2f2f7777772e676f6f676c652e636f6d2f696d616765732f737270722f6c6f676f34772e706e67'
self.assertEqual(base, result.url)
uri = '//www.google.com/images/srpr/logo4w.png'
quoted_uri = urllib.parse.quote(uri, safe='')
with self.settings(THUMBOR_URL=''):
result = self.client_get(f"/thumbnail?url={quoted_uri}&size=full")
self.assertEqual(result.status_code, 302, result)
base = 'https://external-content.zulipcdn.net/external_content/676530cf4b101d56f56cc4a37c6ef4d4fd9b0c03/2f2f7777772e676f6f676c652e636f6d2f696d616765732f737270722f6c6f676f34772e706e67'
self.assertEqual(base, result.url)
def test_with_different_THUMBOR_URL(self) -> None:
self.login('hamlet')
fp = StringIO("zulip!")
fp.name = "zulip.jpeg"
result = self.client_post("/json/user_uploads", {'file': fp})
self.assert_json_success(result)
json = orjson.loads(result.content)
self.assertIn("uri", json)
uri = json["uri"]
base = '/user_uploads/'
self.assertEqual(base, uri[:len(base)])
quoted_uri = urllib.parse.quote(uri[1:], safe='')
hex_uri = base64.urlsafe_b64encode(uri.encode()).decode('utf-8')
with self.settings(THUMBOR_URL='http://test-thumborhost.com'):
result = self.client_get(f"/thumbnail?url={quoted_uri}&size=full")
self.assertEqual(result.status_code, 302, result)
base = 'http://test-thumborhost.com/'
self.assertEqual(base, result.url[:len(base)])
expected_part_url = '/smart/filters:no_upscale()/' + hex_uri + '/source_type/local_file'
self.assertIn(expected_part_url, result.url)
def test_with_different_sizes(self) -> None:
def get_file_path_urlpart(uri: str, size: str='') -> str:
url_in_result = 'smart/filters:no_upscale()%s/%s/source_type/local_file'
sharpen_filter = ''
if size:
url_in_result = f'/{size}/{url_in_result}'
sharpen_filter = ':sharpen(0.5,0.2,true)'
hex_uri = base64.urlsafe_b64encode(uri.encode()).decode('utf-8')
return url_in_result % (sharpen_filter, hex_uri)
self.login('hamlet')
fp = StringIO("zulip!")
fp.name = "zulip.jpeg"
result = self.client_post("/json/user_uploads", {'file': fp})
self.assert_json_success(result)
json = orjson.loads(result.content)
self.assertIn("uri", json)
uri = json["uri"]
base = '/user_uploads/'
self.assertEqual(base, uri[:len(base)])
# Test with size supplied as a query parameter.
# size=thumbnail should return a 0x300 sized image.
# size=full should return the original resolution image.
quoted_uri = urllib.parse.quote(uri[1:], safe='')
result = self.client_get(f"/thumbnail?url={quoted_uri}&size=thumbnail")
self.assertEqual(result.status_code, 302, result)
expected_part_url = get_file_path_urlpart(uri, '0x300')
self.assertIn(expected_part_url, result.url)
result = self.client_get(f"/thumbnail?url={quoted_uri}&size=full")
self.assertEqual(result.status_code, 302, result)
expected_part_url = get_file_path_urlpart(uri)
self.assertIn(expected_part_url, result.url)
# Test with size supplied as a query parameter where size is anything
# else than 'full' or 'thumbnail'. Result should be an error message.
result = self.client_get(f"/thumbnail?url={quoted_uri}&size=480x360")
self.assertEqual(result.status_code, 403, result)
self.assert_in_response("Invalid size.", result)
# Test with no size param supplied. In this case as well we show an
# error message.
result = self.client_get(f"/thumbnail?url={quoted_uri}")
self.assertEqual(result.status_code, 400, "Missing 'size' argument")
| brainwane/zulip | zerver/tests/test_thumbnail.py | Python | apache-2.0 | 17,260 | 0.00197 |
class Information:
def __init__(self, objectid, cvid, information_type_id, description):
self.objectid = objectid
self.cvid = cvid
self.information_type_id = information_type_id
self.description = description
self.deleted = 0
| itucsdb1611/itucsdb1611 | classes/information.py | Python | gpl-3.0 | 270 | 0 |
#!/usr/bin/python2.4
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This is a simple HTTP server used for testing Chrome.
It supports several test URLs, as specified by the handlers in TestPageHandler.
It defaults to living on localhost:8888.
It can use https if you specify the flag --https=CERT where CERT is the path
to a pem file containing the certificate and private key that should be used.
To shut it down properly, visit localhost:8888/kill.
"""
import base64
import BaseHTTPServer
import cgi
import optparse
import os
import re
import shutil
import SocketServer
import sys
import time
import tlslite
import tlslite.api
import pyftpdlib.ftpserver
try:
import hashlib
_new_md5 = hashlib.md5
except ImportError:
import md5
_new_md5 = md5.new
SERVER_HTTP = 0
SERVER_FTP = 1
debug_output = sys.stderr
def debug(str):
debug_output.write(str + "\n")
debug_output.flush()
class StoppableHTTPServer(BaseHTTPServer.HTTPServer):
"""This is a specialization of of BaseHTTPServer to allow it
to be exited cleanly (by setting its "stop" member to True)."""
def serve_forever(self):
self.stop = False
self.nonce = None
while not self.stop:
self.handle_request()
self.socket.close()
class HTTPSServer(tlslite.api.TLSSocketServerMixIn, StoppableHTTPServer):
"""This is a specialization of StoppableHTTPerver that add https support."""
def __init__(self, server_address, request_hander_class, cert_path):
s = open(cert_path).read()
x509 = tlslite.api.X509()
x509.parse(s)
self.cert_chain = tlslite.api.X509CertChain([x509])
s = open(cert_path).read()
self.private_key = tlslite.api.parsePEMKey(s, private=True)
self.session_cache = tlslite.api.SessionCache()
StoppableHTTPServer.__init__(self, server_address, request_hander_class)
def handshake(self, tlsConnection):
"""Creates the SSL connection."""
try:
tlsConnection.handshakeServer(certChain=self.cert_chain,
privateKey=self.private_key,
sessionCache=self.session_cache)
tlsConnection.ignoreAbruptClose = True
return True
except tlslite.api.TLSError, error:
print "Handshake failure:", str(error)
return False
class TestPageHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def __init__(self, request, client_address, socket_server):
self._connect_handlers = [
self.RedirectConnectHandler,
self.ServerAuthConnectHandler,
self.DefaultConnectResponseHandler]
self._get_handlers = [
self.KillHandler,
self.NoCacheMaxAgeTimeHandler,
self.NoCacheTimeHandler,
self.CacheTimeHandler,
self.CacheExpiresHandler,
self.CacheProxyRevalidateHandler,
self.CachePrivateHandler,
self.CachePublicHandler,
self.CacheSMaxAgeHandler,
self.CacheMustRevalidateHandler,
self.CacheMustRevalidateMaxAgeHandler,
self.CacheNoStoreHandler,
self.CacheNoStoreMaxAgeHandler,
self.CacheNoTransformHandler,
self.DownloadHandler,
self.DownloadFinishHandler,
self.EchoHeader,
self.EchoAllHandler,
self.FileHandler,
self.RealFileWithCommonHeaderHandler,
self.RealBZ2FileWithCommonHeaderHandler,
self.AuthBasicHandler,
self.AuthDigestHandler,
self.SlowServerHandler,
self.ContentTypeHandler,
self.ServerRedirectHandler,
self.ClientRedirectHandler,
self.DefaultResponseHandler]
self._post_handlers = [
self.WriteFile,
self.EchoTitleHandler,
self.EchoAllHandler,
self.EchoHandler] + self._get_handlers
self._mime_types = {
'gif': 'image/gif',
'jpeg' : 'image/jpeg',
'jpg' : 'image/jpeg'
}
self._default_mime_type = 'text/html'
BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, request,
client_address,
socket_server)
def _ShouldHandleRequest(self, handler_name):
"""Determines if the path can be handled by the handler.
We consider a handler valid if the path begins with the
handler name. It can optionally be followed by "?*", "/*".
"""
pattern = re.compile('%s($|\?|/).*' % handler_name)
return pattern.match(self.path)
def GetMIMETypeFromName(self, file_name):
"""Returns the mime type for the specified file_name. So far it only looks
at the file extension."""
(shortname, extension) = os.path.splitext(file_name)
if len(extension) == 0:
# no extension.
return self._default_mime_type
# extension starts with a dot, so we need to remove it
return self._mime_types.get(extension[1:], self._default_mime_type)
def KillHandler(self):
"""This request handler kills the server, for use when we're done"
with the a particular test."""
if (self.path.find("kill") < 0):
return False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Cache-Control', 'max-age=0')
self.end_headers()
self.wfile.write("Time to die")
self.server.stop = True
return True
def NoCacheMaxAgeTimeHandler(self):
"""This request handler yields a page with the title set to the current
system time, and no caching requested."""
if not self._ShouldHandleRequest("/nocachetime/maxage"):
return False
self.send_response(200)
self.send_header('Cache-Control', 'max-age=0')
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def NoCacheTimeHandler(self):
"""This request handler yields a page with the title set to the current
system time, and no caching requested."""
if not self._ShouldHandleRequest("/nocachetime"):
return False
self.send_response(200)
self.send_header('Cache-Control', 'no-cache')
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheTimeHandler(self):
"""This request handler yields a page with the title set to the current
system time, and allows caching for one minute."""
if not self._ShouldHandleRequest("/cachetime"):
return False
self.send_response(200)
self.send_header('Cache-Control', 'max-age=60')
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheExpiresHandler(self):
"""This request handler yields a page with the title set to the current
system time, and set the page to expire on 1 Jan 2099."""
if not self._ShouldHandleRequest("/cache/expires"):
return False
self.send_response(200)
self.send_header('Expires', 'Thu, 1 Jan 2099 00:00:00 GMT')
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheProxyRevalidateHandler(self):
"""This request handler yields a page with the title set to the current
system time, and allows caching for 60 seconds"""
if not self._ShouldHandleRequest("/cache/proxy-revalidate"):
return False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Cache-Control', 'max-age=60, proxy-revalidate')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CachePrivateHandler(self):
"""This request handler yields a page with the title set to the current
system time, and allows caching for 5 seconds."""
if not self._ShouldHandleRequest("/cache/private"):
return False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Cache-Control', 'max-age=3, private')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CachePublicHandler(self):
"""This request handler yields a page with the title set to the current
system time, and allows caching for 5 seconds."""
if not self._ShouldHandleRequest("/cache/public"):
return False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Cache-Control', 'max-age=3, public')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheSMaxAgeHandler(self):
"""This request handler yields a page with the title set to the current
system time, and does not allow for caching."""
if not self._ShouldHandleRequest("/cache/s-maxage"):
return False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Cache-Control', 'public, s-maxage = 60, max-age = 0')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheMustRevalidateHandler(self):
"""This request handler yields a page with the title set to the current
system time, and does not allow caching."""
if not self._ShouldHandleRequest("/cache/must-revalidate"):
return False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Cache-Control', 'must-revalidate')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheMustRevalidateMaxAgeHandler(self):
"""This request handler yields a page with the title set to the current
system time, and does not allow caching event though max-age of 60
seconds is specified."""
if not self._ShouldHandleRequest("/cache/must-revalidate/max-age"):
return False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Cache-Control', 'max-age=60, must-revalidate')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheNoStoreHandler(self):
"""This request handler yields a page with the title set to the current
system time, and does not allow the page to be stored."""
if not self._ShouldHandleRequest("/cache/no-store"):
return False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Cache-Control', 'no-store')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheNoStoreMaxAgeHandler(self):
"""This request handler yields a page with the title set to the current
system time, and does not allow the page to be stored even though max-age
of 60 seconds is specified."""
if not self._ShouldHandleRequest("/cache/no-store/max-age"):
return False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Cache-Control', 'max-age=60, no-store')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheNoTransformHandler(self):
"""This request handler yields a page with the title set to the current
system time, and does not allow the content to transformed during
user-agent caching"""
if not self._ShouldHandleRequest("/cache/no-transform"):
return False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Cache-Control', 'no-transform')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def EchoHeader(self):
"""This handler echoes back the value of a specific request header."""
if not self._ShouldHandleRequest("/echoheader"):
return False
query_char = self.path.find('?')
if query_char != -1:
header_name = self.path[query_char+1:]
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.send_header('Cache-control', 'max-age=60000')
# insert a vary header to properly indicate that the cachability of this
# request is subject to value of the request header being echoed.
if len(header_name) > 0:
self.send_header('Vary', header_name)
self.end_headers()
if len(header_name) > 0:
self.wfile.write(self.headers.getheader(header_name))
return True
def EchoHandler(self):
"""This handler just echoes back the payload of the request, for testing
form submission."""
if not self._ShouldHandleRequest("/echo"):
return False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
length = int(self.headers.getheader('content-length'))
request = self.rfile.read(length)
self.wfile.write(request)
return True
def WriteFile(self):
"""This is handler dumps the content of POST request to a disk file into
the data_dir/dump. Sub-directories are not supported."""
prefix='/writefile/'
if not self.path.startswith(prefix):
return False
file_name = self.path[len(prefix):]
# do not allow fancy chars in file name
re.sub('[^a-zA-Z0-9_.-]+', '', file_name)
if len(file_name) and file_name[0] != '.':
path = os.path.join(self.server.data_dir, 'dump', file_name);
length = int(self.headers.getheader('content-length'))
request = self.rfile.read(length)
f = open(path, "wb")
f.write(request);
f.close()
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html>%s</html>' % file_name)
return True
def EchoTitleHandler(self):
"""This handler is like Echo, but sets the page title to the request."""
if not self._ShouldHandleRequest("/echotitle"):
return False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
length = int(self.headers.getheader('content-length'))
request = self.rfile.read(length)
self.wfile.write('<html><head><title>')
self.wfile.write(request)
self.wfile.write('</title></head></html>')
return True
def EchoAllHandler(self):
"""This handler yields a (more) human-readable page listing information
about the request header & contents."""
if not self._ShouldHandleRequest("/echoall"):
return False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><head><style>'
'pre { border: 1px solid black; margin: 5px; padding: 5px }'
'</style></head><body>'
'<div style="float: right">'
'<a href="http://localhost:8888/echo">back to referring page</a></div>'
'<h1>Request Body:</h1><pre>')
if self.command == 'POST':
length = int(self.headers.getheader('content-length'))
qs = self.rfile.read(length)
params = cgi.parse_qs(qs, keep_blank_values=1)
for param in params:
self.wfile.write('%s=%s\n' % (param, params[param][0]))
self.wfile.write('</pre>')
self.wfile.write('<h1>Request Headers:</h1><pre>%s</pre>' % self.headers)
self.wfile.write('</body></html>')
return True
def DownloadHandler(self):
"""This handler sends a downloadable file with or without reporting
the size (6K)."""
if self.path.startswith("/download-unknown-size"):
send_length = False
elif self.path.startswith("/download-known-size"):
send_length = True
else:
return False
#
# The test which uses this functionality is attempting to send
# small chunks of data to the client. Use a fairly large buffer
# so that we'll fill chrome's IO buffer enough to force it to
# actually write the data.
# See also the comments in the client-side of this test in
# download_uitest.cc
#
size_chunk1 = 35*1024
size_chunk2 = 10*1024
self.send_response(200)
self.send_header('Content-type', 'application/octet-stream')
self.send_header('Cache-Control', 'max-age=0')
if send_length:
self.send_header('Content-Length', size_chunk1 + size_chunk2)
self.end_headers()
# First chunk of data:
self.wfile.write("*" * size_chunk1)
self.wfile.flush()
# handle requests until one of them clears this flag.
self.server.waitForDownload = True
while self.server.waitForDownload:
self.server.handle_request()
# Second chunk of data:
self.wfile.write("*" * size_chunk2)
return True
def DownloadFinishHandler(self):
"""This handler just tells the server to finish the current download."""
if not self._ShouldHandleRequest("/download-finish"):
return False
self.server.waitForDownload = False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Cache-Control', 'max-age=0')
self.end_headers()
return True
def FileHandler(self):
"""This handler sends the contents of the requested file. Wow, it's like
a real webserver!"""
prefix = self.server.file_root_url
if not self.path.startswith(prefix):
return False
file = self.path[len(prefix):]
entries = file.split('/');
path = os.path.join(self.server.data_dir, *entries)
if os.path.isdir(path):
path = os.path.join(path, 'index.html')
if not os.path.isfile(path):
print "File not found " + file + " full path:" + path
self.send_error(404)
return True
f = open(path, "rb")
data = f.read()
f.close()
# If file.mock-http-headers exists, it contains the headers we
# should send. Read them in and parse them.
headers_path = path + '.mock-http-headers'
if os.path.isfile(headers_path):
f = open(headers_path, "r")
# "HTTP/1.1 200 OK"
response = f.readline()
status_code = re.findall('HTTP/\d+.\d+ (\d+)', response)[0]
self.send_response(int(status_code))
for line in f:
# "name: value"
name, value = re.findall('(\S+):\s*(.*)', line)[0]
self.send_header(name, value)
f.close()
else:
# Could be more generic once we support mime-type sniffing, but for
# now we need to set it explicitly.
self.send_response(200)
self.send_header('Content-type', self.GetMIMETypeFromName(file))
self.send_header('Content-Length', len(data))
self.end_headers()
self.wfile.write(data)
return True
def RealFileWithCommonHeaderHandler(self):
"""This handler sends the contents of the requested file without the pseudo
http head!"""
prefix='/realfiles/'
if not self.path.startswith(prefix):
return False
file = self.path[len(prefix):]
path = os.path.join(self.server.data_dir, file)
try:
f = open(path, "rb")
data = f.read()
f.close()
# just simply set the MIME as octal stream
self.send_response(200)
self.send_header('Content-type', 'application/octet-stream')
self.end_headers()
self.wfile.write(data)
except:
self.send_error(404)
return True
def RealBZ2FileWithCommonHeaderHandler(self):
"""This handler sends the bzip2 contents of the requested file with
corresponding Content-Encoding field in http head!"""
prefix='/realbz2files/'
if not self.path.startswith(prefix):
return False
parts = self.path.split('?')
file = parts[0][len(prefix):]
path = os.path.join(self.server.data_dir, file) + '.bz2'
if len(parts) > 1:
options = parts[1]
else:
options = ''
try:
self.send_response(200)
accept_encoding = self.headers.get("Accept-Encoding")
if accept_encoding.find("bzip2") != -1:
f = open(path, "rb")
data = f.read()
f.close()
self.send_header('Content-Encoding', 'bzip2')
self.send_header('Content-type', 'application/x-bzip2')
self.end_headers()
if options == 'incremental-header':
self.wfile.write(data[:1])
self.wfile.flush()
time.sleep(1.0)
self.wfile.write(data[1:])
else:
self.wfile.write(data)
else:
"""client do not support bzip2 format, send pseudo content
"""
self.send_header('Content-type', 'text/html; charset=ISO-8859-1')
self.end_headers()
self.wfile.write("you do not support bzip2 encoding")
except:
self.send_error(404)
return True
def AuthBasicHandler(self):
"""This handler tests 'Basic' authentication. It just sends a page with
title 'user/pass' if you succeed."""
if not self._ShouldHandleRequest("/auth-basic"):
return False
username = userpass = password = b64str = ""
set_cookie_if_challenged = self.path.find('?set-cookie-if-challenged') > 0
auth = self.headers.getheader('authorization')
try:
if not auth:
raise Exception('no auth')
b64str = re.findall(r'Basic (\S+)', auth)[0]
userpass = base64.b64decode(b64str)
username, password = re.findall(r'([^:]+):(\S+)', userpass)[0]
if password != 'secret':
raise Exception('wrong password')
except Exception, e:
# Authentication failed.
self.send_response(401)
self.send_header('WWW-Authenticate', 'Basic realm="testrealm"')
self.send_header('Content-type', 'text/html')
if set_cookie_if_challenged:
self.send_header('Set-Cookie', 'got_challenged=true')
self.end_headers()
self.wfile.write('<html><head>')
self.wfile.write('<title>Denied: %s</title>' % e)
self.wfile.write('</head><body>')
self.wfile.write('auth=%s<p>' % auth)
self.wfile.write('b64str=%s<p>' % b64str)
self.wfile.write('username: %s<p>' % username)
self.wfile.write('userpass: %s<p>' % userpass)
self.wfile.write('password: %s<p>' % password)
self.wfile.write('You sent:<br>%s<p>' % self.headers)
self.wfile.write('</body></html>')
return True
# Authentication successful. (Return a cachable response to allow for
# testing cached pages that require authentication.)
if_none_match = self.headers.getheader('if-none-match')
if if_none_match == "abc":
self.send_response(304)
self.end_headers()
else:
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Cache-control', 'max-age=60000')
self.send_header('Etag', 'abc')
self.end_headers()
self.wfile.write('<html><head>')
self.wfile.write('<title>%s/%s</title>' % (username, password))
self.wfile.write('</head><body>')
self.wfile.write('auth=%s<p>' % auth)
self.wfile.write('You sent:<br>%s<p>' % self.headers)
self.wfile.write('</body></html>')
return True
def AuthDigestHandler(self):
"""This handler tests 'Digest' authentication. It just sends a page with
title 'user/pass' if you succeed."""
if not self._ShouldHandleRequest("/auth-digest"):
return False
# Periodically generate a new nonce. Technically we should incorporate
# the request URL into this, but we don't care for testing.
nonce_life = 10
stale = False
if (not self.server.nonce or
(time.time() - self.server.nonce_time > nonce_life)):
if self.server.nonce:
stale = True
self.server.nonce_time = time.time()
self.server.nonce = \
_new_md5(time.ctime(self.server.nonce_time) +
'privatekey').hexdigest()
nonce = self.server.nonce
opaque = _new_md5('opaque').hexdigest()
password = 'secret'
realm = 'testrealm'
auth = self.headers.getheader('authorization')
pairs = {}
try:
if not auth:
raise Exception('no auth')
if not auth.startswith('Digest'):
raise Exception('not digest')
# Pull out all the name="value" pairs as a dictionary.
pairs = dict(re.findall(r'(\b[^ ,=]+)="?([^",]+)"?', auth))
# Make sure it's all valid.
if pairs['nonce'] != nonce:
raise Exception('wrong nonce')
if pairs['opaque'] != opaque:
raise Exception('wrong opaque')
# Check the 'response' value and make sure it matches our magic hash.
# See http://www.ietf.org/rfc/rfc2617.txt
hash_a1 = _new_md5(
':'.join([pairs['username'], realm, password])).hexdigest()
hash_a2 = _new_md5(':'.join([self.command, pairs['uri']])).hexdigest()
if 'qop' in pairs and 'nc' in pairs and 'cnonce' in pairs:
response = _new_md5(':'.join([hash_a1, nonce, pairs['nc'],
pairs['cnonce'], pairs['qop'], hash_a2])).hexdigest()
else:
response = _new_md5(':'.join([hash_a1, nonce, hash_a2])).hexdigest()
if pairs['response'] != response:
raise Exception('wrong password')
except Exception, e:
# Authentication failed.
self.send_response(401)
hdr = ('Digest '
'realm="%s", '
'domain="/", '
'qop="auth", '
'algorithm=MD5, '
'nonce="%s", '
'opaque="%s"') % (realm, nonce, opaque)
if stale:
hdr += ', stale="TRUE"'
self.send_header('WWW-Authenticate', hdr)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><head>')
self.wfile.write('<title>Denied: %s</title>' % e)
self.wfile.write('</head><body>')
self.wfile.write('auth=%s<p>' % auth)
self.wfile.write('pairs=%s<p>' % pairs)
self.wfile.write('You sent:<br>%s<p>' % self.headers)
self.wfile.write('We are replying:<br>%s<p>' % hdr)
self.wfile.write('</body></html>')
return True
# Authentication successful.
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><head>')
self.wfile.write('<title>%s/%s</title>' % (pairs['username'], password))
self.wfile.write('</head><body>')
self.wfile.write('auth=%s<p>' % auth)
self.wfile.write('pairs=%s<p>' % pairs)
self.wfile.write('</body></html>')
return True
def SlowServerHandler(self):
"""Wait for the user suggested time before responding. The syntax is
/slow?0.5 to wait for half a second."""
if not self._ShouldHandleRequest("/slow"):
return False
query_char = self.path.find('?')
wait_sec = 1.0
if query_char >= 0:
try:
wait_sec = int(self.path[query_char + 1:])
except ValueError:
pass
time.sleep(wait_sec)
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write("waited %d seconds" % wait_sec)
return True
def ContentTypeHandler(self):
"""Returns a string of html with the given content type. E.g.,
/contenttype?text/css returns an html file with the Content-Type
header set to text/css."""
if not self._ShouldHandleRequest("/contenttype"):
return False
query_char = self.path.find('?')
content_type = self.path[query_char + 1:].strip()
if not content_type:
content_type = 'text/html'
self.send_response(200)
self.send_header('Content-Type', content_type)
self.end_headers()
self.wfile.write("<html>\n<body>\n<p>HTML text</p>\n</body>\n</html>\n");
return True
def ServerRedirectHandler(self):
"""Sends a server redirect to the given URL. The syntax is
'/server-redirect?http://foo.bar/asdf' to redirect to
'http://foo.bar/asdf'"""
test_name = "/server-redirect"
if not self._ShouldHandleRequest(test_name):
return False
query_char = self.path.find('?')
if query_char < 0 or len(self.path) <= query_char + 1:
self.sendRedirectHelp(test_name)
return True
dest = self.path[query_char + 1:]
self.send_response(301) # moved permanently
self.send_header('Location', dest)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><head>')
self.wfile.write('</head><body>Redirecting to %s</body></html>' % dest)
return True
def ClientRedirectHandler(self):
"""Sends a client redirect to the given URL. The syntax is
'/client-redirect?http://foo.bar/asdf' to redirect to
'http://foo.bar/asdf'"""
test_name = "/client-redirect"
if not self._ShouldHandleRequest(test_name):
return False
query_char = self.path.find('?');
if query_char < 0 or len(self.path) <= query_char + 1:
self.sendRedirectHelp(test_name)
return True
dest = self.path[query_char + 1:]
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><head>')
self.wfile.write('<meta http-equiv="refresh" content="0;url=%s">' % dest)
self.wfile.write('</head><body>Redirecting to %s</body></html>' % dest)
return True
def DefaultResponseHandler(self):
"""This is the catch-all response handler for requests that aren't handled
by one of the special handlers above.
Note that we specify the content-length as without it the https connection
is not closed properly (and the browser keeps expecting data)."""
contents = "Default response given for path: " + self.path
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header("Content-Length", len(contents))
self.end_headers()
self.wfile.write(contents)
return True
def RedirectConnectHandler(self):
"""Sends a redirect to the CONNECT request for www.redirect.com. This
response is not specified by the RFC, so the browser should not follow
the redirect."""
if (self.path.find("www.redirect.com") < 0):
return False
dest = "http://www.destination.com/foo.js"
self.send_response(302) # moved temporarily
self.send_header('Location', dest)
self.send_header('Connection', 'close')
self.end_headers()
return True
def ServerAuthConnectHandler(self):
"""Sends a 401 to the CONNECT request for www.server-auth.com. This
response doesn't make sense because the proxy server cannot request
server authentication."""
if (self.path.find("www.server-auth.com") < 0):
return False
challenge = 'Basic realm="WallyWorld"'
self.send_response(401) # unauthorized
self.send_header('WWW-Authenticate', challenge)
self.send_header('Connection', 'close')
self.end_headers()
return True
def DefaultConnectResponseHandler(self):
"""This is the catch-all response handler for CONNECT requests that aren't
handled by one of the special handlers above. Real Web servers respond
with 400 to CONNECT requests."""
contents = "Your client has issued a malformed or illegal request."
self.send_response(400) # bad request
self.send_header('Content-type', 'text/html')
self.send_header("Content-Length", len(contents))
self.end_headers()
self.wfile.write(contents)
return True
def do_CONNECT(self):
for handler in self._connect_handlers:
if handler():
return
def do_GET(self):
for handler in self._get_handlers:
if handler():
return
def do_POST(self):
for handler in self._post_handlers:
if handler():
return
# called by the redirect handling function when there is no parameter
def sendRedirectHelp(self, redirect_name):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><body><h1>Error: no redirect destination</h1>')
self.wfile.write('Use <pre>%s?http://dest...</pre>' % redirect_name)
self.wfile.write('</body></html>')
def MakeDumpDir(data_dir):
"""Create directory named 'dump' where uploaded data via HTTP POST request
will be stored. If the directory already exists all files and subdirectories
will be deleted."""
dump_dir = os.path.join(data_dir, 'dump');
if os.path.isdir(dump_dir):
shutil.rmtree(dump_dir)
os.mkdir(dump_dir)
def MakeDataDir():
if options.data_dir:
if not os.path.isdir(options.data_dir):
print 'specified data dir not found: ' + options.data_dir + ' exiting...'
return None
my_data_dir = options.data_dir
else:
# Create the default path to our data dir, relative to the exe dir.
my_data_dir = os.path.dirname(sys.argv[0])
my_data_dir = os.path.join(my_data_dir, "..", "..", "..", "..",
"test", "data")
#TODO(ibrar): Must use Find* funtion defined in google\tools
#i.e my_data_dir = FindUpward(my_data_dir, "test", "data")
return my_data_dir
def main(options, args):
# redirect output to a log file so it doesn't spam the unit test output
logfile = open('testserver.log', 'w')
sys.stderr = sys.stdout = logfile
port = options.port
if options.server_type == SERVER_HTTP:
if options.cert:
# let's make sure the cert file exists.
if not os.path.isfile(options.cert):
print 'specified cert file not found: ' + options.cert + ' exiting...'
return
server = HTTPSServer(('127.0.0.1', port), TestPageHandler, options.cert)
print 'HTTPS server started on port %d...' % port
else:
server = StoppableHTTPServer(('127.0.0.1', port), TestPageHandler)
print 'HTTP server started on port %d...' % port
server.data_dir = MakeDataDir()
server.file_root_url = options.file_root_url
MakeDumpDir(server.data_dir)
# means FTP Server
else:
my_data_dir = MakeDataDir()
def line_logger(msg):
if (msg.find("kill") >= 0):
server.stop = True
print 'shutting down server'
sys.exit(0)
# Instantiate a dummy authorizer for managing 'virtual' users
authorizer = pyftpdlib.ftpserver.DummyAuthorizer()
# Define a new user having full r/w permissions and a read-only
# anonymous user
authorizer.add_user('chrome', 'chrome', my_data_dir, perm='elradfmw')
authorizer.add_anonymous(my_data_dir)
# Instantiate FTP handler class
ftp_handler = pyftpdlib.ftpserver.FTPHandler
ftp_handler.authorizer = authorizer
pyftpdlib.ftpserver.logline = line_logger
# Define a customized banner (string returned when client connects)
ftp_handler.banner = ("pyftpdlib %s based ftpd ready." %
pyftpdlib.ftpserver.__ver__)
# Instantiate FTP server class and listen to 127.0.0.1:port
address = ('127.0.0.1', port)
server = pyftpdlib.ftpserver.FTPServer(address, ftp_handler)
print 'FTP server started on port %d...' % port
try:
server.serve_forever()
except KeyboardInterrupt:
print 'shutting down server'
server.stop = True
if __name__ == '__main__':
option_parser = optparse.OptionParser()
option_parser.add_option("-f", '--ftp', action='store_const',
const=SERVER_FTP, default=SERVER_HTTP,
dest='server_type',
help='FTP or HTTP server default HTTP')
option_parser.add_option('', '--port', default='8888', type='int',
help='Port used by the server')
option_parser.add_option('', '--data-dir', dest='data_dir',
help='Directory from which to read the files')
option_parser.add_option('', '--https', dest='cert',
help='Specify that https should be used, specify '
'the path to the cert containing the private key '
'the server should use')
option_parser.add_option('', '--file-root-url', default='/files/',
help='Specify a root URL for files served.')
options, args = option_parser.parse_args()
sys.exit(main(options, args))
| kuiche/chromium | net/tools/testserver/testserver.py | Python | bsd-3-clause | 36,527 | 0.008104 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from typing import Callable
from typing import Union
from typing import cast
from enum import Enum
if False:
# Work around for MYPY for cyclic import problem
from libcloud.compute.base import BaseDriver
__all__ = [
"Type",
"LibcloudError",
"MalformedResponseError",
"ProviderError",
"InvalidCredsError",
"InvalidCredsException",
"LazyList"
]
class Type(str, Enum):
@classmethod
def tostring(cls, value):
# type: (Union[Enum, str]) -> str
"""Return the string representation of the state object attribute
:param str value: the state object to turn into string
:return: the uppercase string that represents the state object
:rtype: str
"""
value = cast(Enum, value)
return str(value._value_).upper()
@classmethod
def fromstring(cls, value):
# type: (str) -> str
"""Return the state object attribute that matches the string
:param str value: the string to look up
:return: the state object attribute that matches the string
:rtype: str
"""
return getattr(cls, value.upper(), None)
"""
NOTE: These methods are here for backward compatibility reasons where
Type values were simple strings and Type didn't inherit from Enum.
"""
def __eq__(self, other):
if isinstance(other, Type):
return other.value == self.value
elif isinstance(other, str):
return self.value == other
return super(Type, self).__eq__(other)
def upper(self):
return self.value.upper() # pylint: disable=no-member
def lower(self):
return self.value.lower() # pylint: disable=no-member
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return str(self.value)
def __repr__(self):
return self.value
def __hash__(self):
return id(self)
class LibcloudError(Exception):
"""The base class for other libcloud exceptions"""
def __init__(self, value, driver=None):
# type: (str, BaseDriver) -> None
super(LibcloudError, self).__init__(value)
self.value = value
self.driver = driver
def __str__(self):
return self.__repr__()
def __repr__(self):
return ("<LibcloudError in " +
repr(self.driver) +
" " +
repr(self.value) + ">")
class MalformedResponseError(LibcloudError):
"""Exception for the cases when a provider returns a malformed
response, e.g. you request JSON and provider returns
'<h3>something</h3>' due to some error on their side."""
def __init__(self, value, body=None, driver=None):
# type: (str, Optional[str], Optional[BaseDriver]) -> None
self.value = value
self.driver = driver
self.body = body
def __str__(self):
return self.__repr__()
def __repr__(self):
return ("<MalformedResponseException in " +
repr(self.driver) +
" " +
repr(self.value) +
">: " +
repr(self.body))
class ProviderError(LibcloudError):
"""
Exception used when provider gives back
error response (HTTP 4xx, 5xx) for a request.
Specific sub types can be derived for errors like
HTTP 401 : InvalidCredsError
HTTP 404 : NodeNotFoundError, ContainerDoesNotExistError
"""
def __init__(self, value, http_code, driver=None):
# type: (str, int, Optional[BaseDriver]) -> None
super(ProviderError, self).__init__(value=value, driver=driver)
self.http_code = http_code
def __str__(self):
return self.__repr__()
def __repr__(self):
return repr(self.value)
class InvalidCredsError(ProviderError):
"""Exception used when invalid credentials are used on a provider."""
def __init__(self, value='Invalid credentials with the provider',
driver=None):
# type: (str, Optional[BaseDriver]) -> None
# NOTE: We don't use http.client constants here since that adds ~20ms
# import time overhead
super(InvalidCredsError, self).__init__(value,
http_code=401,
driver=driver)
# Deprecated alias of :class:`InvalidCredsError`
InvalidCredsException = InvalidCredsError
class ServiceUnavailableError(ProviderError):
"""Exception used when a provider returns 503 Service Unavailable."""
def __init__(self, value='Service unavailable at provider', driver=None):
# type: (str, Optional[BaseDriver]) -> None
# NOTE: We don't use http.client constants here since that adds ~20ms
# import time overhead
super(ServiceUnavailableError, self).__init__(
value,
http_code=503,
driver=driver
)
class LazyList(object):
def __init__(self, get_more, value_dict=None):
# type: (Callable, Optional[dict]) -> None
self._data = [] # type: list
self._last_key = None
self._exhausted = False
self._all_loaded = False
self._get_more = get_more
self._value_dict = value_dict or {}
def __iter__(self):
if not self._all_loaded:
self._load_all()
data = self._data
for i in data:
yield i
def __getitem__(self, index):
if index >= len(self._data) and not self._all_loaded:
self._load_all()
return self._data[index]
def __len__(self):
self._load_all()
return len(self._data)
def __repr__(self):
self._load_all()
repr_string = ', ' .join([repr(item) for item in self._data])
repr_string = '[%s]' % (repr_string)
return repr_string
def _load_all(self):
while not self._exhausted:
newdata, self._last_key, self._exhausted = \
self._get_more(last_key=self._last_key,
value_dict=self._value_dict)
self._data.extend(newdata)
self._all_loaded = True
| Kami/libcloud | libcloud/common/types.py | Python | apache-2.0 | 6,989 | 0 |
from enum import Enum
class EventType(Enum):
"""Enum containing the various types of events that can occur."""
CLIENT_READY = "CLIENT_READY"
CLIENT_RESUMED = "CLIENT_RESUMED"
MESSAGE_RECEIVED = "MESSAGE_RECEIVED"
SERVER_MESSAGE_RECEIVED = "SERVER_MESSAGE_RECEIVED"
PRIVATE_MESSAGE_RECEIVED = "PRIVATE_MESSAGE_RECEIVED"
COMMAND_RECEIVED = "COMMAND_RECEIVED"
MESSAGE_DELETED = "MESSAGE_DELETED"
MESSAGE_EDITED = "MESSAGE_EDITED"
CHANNEL_DELETED = "CHANNEL_DELETED"
CHANNEL_CREATED = "CHANNEL_CREATED"
CHANNEL_UPDATED = "CHANNEL_UPDATED"
MEMBER_JOINED = "MEMBER_JOINED"
MEMBER_REMOVED = "MEMBER_REMOVED"
MEMBER_UPDATED = "MEMBER_UPDATED"
MEMBER_BANNED = "MEMBER_BANNED"
MEMBER_UNBANNED = "MEMBER_UNBANNED"
MEMBER_TYPING = "MEMBER_TYPING"
SERVER_JOINED = "SERVER_JOINED"
SERVER_REMOVED = "SERVER_REMOVED"
SERVER_UPDATED = "SERVER_UPDATED"
ROLE_CREATED = "ROLE_CREATED"
ROLE_DELETED = "ROLE_DELETED"
ROLE_UPDATED = "ROLE_UPDATED"
SERVER_AVAILABLE = "SERVER_AVAILABLE"
SERVER_UNAVAILABLE = "SERVER_UNAVAILABLE"
VOICE_STATE_UPDATED = "VOICE_STATE_UPDATED"
REACTION_ADDED = "REACTION_ADDED"
REACTION_REMOVED = "REACTION_REMOVED"
REACTIONS_CLEARED = "REACTIONS_CLEARED"
MEMBER_JOINED_GROUP = "MEMBER_JOINED_GROUP"
MEMBER_REMOVED_FROM_GROUP = "MEMBER_REMOVED_FROM_GROUP"
SERVER_EMOJIS_UPDATED = "SERVER_EMOJIS_UPDATED"
class RedisStorageScope(Enum):
"""Enum containing the possible Redis storage scopes."""
GLOBAL = "GLOBAL"
PLUGIN = "PLUGIN"
SERVER = "SERVER"
CHANNEL = "CHANNEL"
USER = "USER"
| nint8835/NintbotForDiscordV2 | NintbotForDiscord/Enums.py | Python | mit | 1,656 | 0 |
import os
import re
import sys
# This import is apparently needed for Nose on Red Hat's Python
import multiprocessing
from distutils.command.build_ext import build_ext
from distutils.errors import (CCompilerError, DistutilsExecError,
DistutilsPlatformError)
try:
from setuptools import setup, Extension, Feature
except ImportError:
from distutils.core import setup, Extension
Feature = None
cmdclass = {}
PYPY = hasattr(sys, 'pypy_version_info')
JYTHON = sys.platform.startswith('java')
requirements = []
if sys.version_info[0] == 2 or (sys.version_info[0] == 3
and sys.version_info[1] < 3):
requirements.append('ipaddr')
compile_args = ['-Wall', '-Wextra']
if sys.version_info[0] == 2:
compile_args.append('-fno-strict-aliasing')
ext_module = [
Extension(
'maxminddb.extension',
libraries=['maxminddb'],
sources=['maxminddb/extension/maxminddb.c'],
extra_compile_args=compile_args,
)
]
# Cargo cult code for installing extension with pure Python fallback.
# Taken from SQLAlchemy, but this same basic code exists in many modules.
ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError)
if sys.platform == 'win32':
# 2.6's distutils.msvc9compiler can raise an IOError when failing to
# find the compiler
ext_errors += (IOError,)
class BuildFailed(Exception):
def __init__(self):
self.cause = sys.exc_info()[1] # work around py 2/3 different syntax
class ve_build_ext(build_ext):
# This class allows C extension building to fail.
def run(self):
try:
build_ext.run(self)
except DistutilsPlatformError:
raise BuildFailed()
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except ext_errors:
raise BuildFailed()
except ValueError:
# this can happen on Windows 64 bit, see Python issue 7511
if "'path'" in str(sys.exc_info()[1]): # works with both py 2/3
raise BuildFailed()
raise
cmdclass['build_ext'] = ve_build_ext
#
ROOT = os.path.dirname(__file__)
with open(os.path.join(ROOT, 'README.rst'), 'rb') as fd:
README = fd.read().decode('utf8')
with open(os.path.join(ROOT, 'maxminddb', '__init__.py'), 'rb') as fd:
maxminddb_text = fd.read().decode('utf8')
LICENSE = re.compile(
r".*__license__ = '(.*?)'", re.S).match(maxminddb_text).group(1)
VERSION = re.compile(
r".*__version__ = '(.*?)'", re.S).match(maxminddb_text).group(1)
def status_msgs(*msgs):
print('*' * 75)
for msg in msgs:
print(msg)
print('*' * 75)
def find_packages(location):
packages = []
for pkg in ['maxminddb']:
for _dir, subdirectories, files in (
os.walk(os.path.join(location, pkg))):
if '__init__.py' in files:
tokens = _dir.split(os.sep)[len(location.split(os.sep)):]
packages.append(".".join(tokens))
return packages
def run_setup(with_cext):
kwargs = {}
if with_cext:
if Feature:
kwargs['features'] = {'extension': Feature(
"optional C implementation",
standard=True,
ext_modules=ext_module
)}
else:
kwargs['ext_modules'] = ext_module
setup(
name='maxminddb',
version=VERSION,
description='Python extension for reading the MaxMind DB format',
long_description=README,
url='http://www.maxmind.com/',
bugtrack_url='https://github.com/maxmind/MaxMind-DB-Reader-python/issues',
packages=find_packages('.'),
package_data={'': ['LICENSE']},
package_dir={'maxminddb': 'maxminddb'},
include_package_data=True,
install_requires=requirements,
tests_require=['nose'],
test_suite='nose.collector',
license=LICENSE,
cmdclass=cmdclass,
classifiers=(
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python',
'Topic :: Internet :: Proxy Servers',
'Topic :: Internet',
),
**kwargs
)
if PYPY or JYTHON:
run_setup(False)
status_msgs(
"WARNING: Disabling C extension due to Python platform.",
"Plain-Python build succeeded."
)
else:
try:
run_setup(True)
except BuildFailed as exc:
status_msgs(
exc.cause,
"WARNING: The C extension could not be compiled, " +
"speedups are not enabled.",
"Failure information, if any, is above.",
"Retrying the build without the C extension now."
)
run_setup(False)
status_msgs(
"WARNING: The C extension could not be compiled, " +
"speedups are not enabled.",
"Plain-Python build succeeded."
)
| kikinteractive/MaxMind-DB-Reader-python | setup.py | Python | apache-2.0 | 5,415 | 0.000554 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from sqlalchemy import *
from sqlalchemy import exc
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import *
from flask_login import AnonymousUserMixin
import sys
import os
import logging
from werkzeug.security import generate_password_hash
from flask_babel import gettext as _
import json
import datetime
from binascii import hexlify
dbpath = os.path.join(os.path.normpath(os.getenv("CALIBRE_DBPATH", os.path.dirname(os.path.realpath(__file__)) + os.sep + ".." + os.sep)), "app.db")
engine = create_engine('sqlite:///{0}'.format(dbpath), echo=False)
Base = declarative_base()
ROLE_USER = 0
ROLE_ADMIN = 1
ROLE_DOWNLOAD = 2
ROLE_UPLOAD = 4
ROLE_EDIT = 8
ROLE_PASSWD = 16
ROLE_ANONYMOUS = 32
ROLE_EDIT_SHELFS = 64
ROLE_DELETE_BOOKS = 128
DETAIL_RANDOM = 1
SIDEBAR_LANGUAGE = 2
SIDEBAR_SERIES = 4
SIDEBAR_CATEGORY = 8
SIDEBAR_HOT = 16
SIDEBAR_RANDOM = 32
SIDEBAR_AUTHOR = 64
SIDEBAR_BEST_RATED = 128
SIDEBAR_READ_AND_UNREAD = 256
SIDEBAR_RECENT = 512
SIDEBAR_SORTED = 1024
DEFAULT_PASS = "admin123"
DEFAULT_PORT = int(os.environ.get("CALIBRE_PORT", 8083))
DEVELOPMENT = False
class UserBase:
@property
def is_authenticated(self):
return True
def role_admin(self):
if self.role is not None:
return True if self.role & ROLE_ADMIN == ROLE_ADMIN else False
else:
return False
def role_download(self):
if self.role is not None:
return True if self.role & ROLE_DOWNLOAD == ROLE_DOWNLOAD else False
else:
return False
def role_upload(self):
return bool((self.role is not None)and(self.role & ROLE_UPLOAD == ROLE_UPLOAD))
def role_edit(self):
if self.role is not None:
return True if self.role & ROLE_EDIT == ROLE_EDIT else False
else:
return False
def role_passwd(self):
if self.role is not None:
return True if self.role & ROLE_PASSWD == ROLE_PASSWD else False
else:
return False
def role_anonymous(self):
if self.role is not None:
return True if self.role & ROLE_ANONYMOUS == ROLE_ANONYMOUS else False
else:
return False
def role_edit_shelfs(self):
if self.role is not None:
return True if self.role & ROLE_EDIT_SHELFS == ROLE_EDIT_SHELFS else False
else:
return False
def role_delete_books(self):
return bool((self.role is not None)and(self.role & ROLE_DELETE_BOOKS == ROLE_DELETE_BOOKS))
@property
def is_active(self):
return True
@property
def is_anonymous(self):
return False
def get_id(self):
return str(self.id)
def filter_language(self):
return self.default_language
def show_random_books(self):
return bool((self.sidebar_view is not None)and(self.sidebar_view & SIDEBAR_RANDOM == SIDEBAR_RANDOM))
def show_language(self):
return bool((self.sidebar_view is not None)and(self.sidebar_view & SIDEBAR_LANGUAGE == SIDEBAR_LANGUAGE))
def show_hot_books(self):
return bool((self.sidebar_view is not None)and(self.sidebar_view & SIDEBAR_HOT == SIDEBAR_HOT))
def show_recent(self):
return bool((self.sidebar_view is not None)and(self.sidebar_view & SIDEBAR_RECENT == SIDEBAR_RECENT))
def show_sorted(self):
return bool((self.sidebar_view is not None)and(self.sidebar_view & SIDEBAR_SORTED == SIDEBAR_SORTED))
def show_series(self):
return bool((self.sidebar_view is not None)and(self.sidebar_view & SIDEBAR_SERIES == SIDEBAR_SERIES))
def show_category(self):
return bool((self.sidebar_view is not None)and(self.sidebar_view & SIDEBAR_CATEGORY == SIDEBAR_CATEGORY))
def show_author(self):
return bool((self.sidebar_view is not None)and(self.sidebar_view & SIDEBAR_AUTHOR == SIDEBAR_AUTHOR))
def show_best_rated_books(self):
return bool((self.sidebar_view is not None)and(self.sidebar_view & SIDEBAR_BEST_RATED == SIDEBAR_BEST_RATED))
def show_read_and_unread(self):
return bool((self.sidebar_view is not None)and(self.sidebar_view & SIDEBAR_READ_AND_UNREAD == SIDEBAR_READ_AND_UNREAD))
def show_detail_random(self):
return bool((self.sidebar_view is not None)and(self.sidebar_view & DETAIL_RANDOM == DETAIL_RANDOM))
def __repr__(self):
return '<User %r>' % self.nickname
# Baseclass for Users in Calibre-web, settings which are depending on certain users are stored here. It is derived from
# User Base (all access methods are declared there)
class User(UserBase, Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
nickname = Column(String(64), unique=True)
email = Column(String(120), unique=True, default="")
role = Column(SmallInteger, default=ROLE_USER)
password = Column(String)
kindle_mail = Column(String(120), default="")
shelf = relationship('Shelf', backref='user', lazy='dynamic', order_by='Shelf.name')
downloads = relationship('Downloads', backref='user', lazy='dynamic')
locale = Column(String(2), default="en")
sidebar_view = Column(Integer, default=1)
default_language = Column(String(3), default="all")
mature_content = Column(Boolean, default=True)
# Class for anonymous user is derived from User base and complets overrides methods and properties for the
# anonymous user
class Anonymous(AnonymousUserMixin, UserBase):
def __init__(self):
self.loadSettings()
def loadSettings(self):
data = session.query(User).filter(User.role.op('&')(ROLE_ANONYMOUS) == ROLE_ANONYMOUS).first() # type: User
settings = session.query(Settings).first()
self.nickname = data.nickname
self.role = data.role
self.id=data.id
self.sidebar_view = data.sidebar_view
self.default_language = data.default_language
self.locale = data.locale
self.mature_content = data.mature_content
self.anon_browse = settings.config_anonbrowse
def role_admin(self):
return False
@property
def is_active(self):
return False
@property
def is_anonymous(self):
return self.anon_browse
@property
def is_authenticated(self):
return False
# Baseclass representing Shelfs in calibre-web inapp.db
class Shelf(Base):
__tablename__ = 'shelf'
id = Column(Integer, primary_key=True)
name = Column(String)
is_public = Column(Integer, default=0)
user_id = Column(Integer, ForeignKey('user.id'))
def __repr__(self):
return '<Shelf %r>' % self.name
# Baseclass representing Relationship between books and Shelfs in Calibre-web in app.db (N:M)
class BookShelf(Base):
__tablename__ = 'book_shelf_link'
id = Column(Integer, primary_key=True)
book_id = Column(Integer)
order = Column(Integer)
shelf = Column(Integer, ForeignKey('shelf.id'))
def __repr__(self):
return '<Book %r>' % self.id
class ReadBook(Base):
__tablename__ = 'book_read_link'
id = Column(Integer, primary_key=True)
book_id = Column(Integer, unique=False)
user_id = Column(Integer, ForeignKey('user.id'), unique=False)
is_read = Column(Boolean, unique=False)
class Bookmark(Base):
__tablename__ = 'bookmark'
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('user.id'))
book_id = Column(Integer)
format = Column(String(collation='NOCASE'))
bookmark_key = Column(String)
# Baseclass representing Downloads from calibre-web in app.db
class Downloads(Base):
__tablename__ = 'downloads'
id = Column(Integer, primary_key=True)
book_id = Column(Integer)
user_id = Column(Integer, ForeignKey('user.id'))
def __repr__(self):
return '<Download %r' % self.book_id
# Baseclass for representing settings in app.db with email server settings and Calibre database settings
# (application settings)
class Settings(Base):
__tablename__ = 'settings'
id = Column(Integer, primary_key=True)
mail_server = Column(String)
mail_port = Column(Integer, default=25)
mail_use_ssl = Column(SmallInteger, default=0)
mail_login = Column(String)
mail_password = Column(String)
mail_from = Column(String)
config_calibre_dir = Column(String)
config_port = Column(Integer, default=DEFAULT_PORT)
config_calibre_web_title = Column(String, default=u'Calibre-web')
config_books_per_page = Column(Integer, default=60)
config_random_books = Column(Integer, default=4)
config_title_regex = Column(String, default=u'^(A|The|An|Der|Die|Das|Den|Ein|Eine|Einen|Dem|Des|Einem|Eines)\s+')
config_log_level = Column(SmallInteger, default=logging.INFO)
config_uploading = Column(SmallInteger, default=0)
config_anonbrowse = Column(SmallInteger, default=0)
config_public_reg = Column(SmallInteger, default=0)
config_default_role = Column(SmallInteger, default=0)
config_columns_to_ignore = Column(String)
config_use_google_drive = Column(Boolean)
config_google_drive_client_id = Column(String)
config_google_drive_client_secret = Column(String)
config_google_drive_folder = Column(String)
config_google_drive_calibre_url_base = Column(String)
config_google_drive_watch_changes_response = Column(String)
config_columns_to_ignore = Column(String)
config_remote_login = Column(Boolean)
config_use_goodreads = Column(Boolean)
config_goodreads_api_key = Column(String)
config_goodreads_api_secret = Column(String)
config_mature_content_tags = Column(String) # type: str
def __repr__(self):
pass
class RemoteAuthToken(Base):
__tablename__ = 'remote_auth_token'
id = Column(Integer, primary_key=True)
auth_token = Column(String(8), unique=True)
user_id = Column(Integer, ForeignKey('user.id'))
verified = Column(Boolean, default=False)
expiration = Column(DateTime)
def __init__(self):
self.auth_token = hexlify(os.urandom(4))
self.expiration = datetime.datetime.now() + datetime.timedelta(minutes=10) # 10 min from now
def __repr__(self):
return '<Token %r>' % self.id
# Class holds all application specific settings in calibre-web
class Config:
def __init__(self):
self.config_main_dir = os.path.join(os.path.normpath(os.path.dirname(
os.path.realpath(__file__)) + os.sep + ".." + os.sep))
self.db_configured = None
self.loadSettings()
def loadSettings(self):
data = session.query(Settings).first() # type: Settings
self.config_calibre_dir = data.config_calibre_dir
self.config_port = data.config_port
self.config_calibre_web_title = data.config_calibre_web_title
self.config_books_per_page = data.config_books_per_page
self.config_random_books = data.config_random_books
self.config_title_regex = data.config_title_regex
self.config_log_level = data.config_log_level
self.config_uploading = data.config_uploading
self.config_anonbrowse = data.config_anonbrowse
self.config_public_reg = data.config_public_reg
self.config_default_role = data.config_default_role
self.config_columns_to_ignore = data.config_columns_to_ignore
self.config_use_google_drive = data.config_use_google_drive
self.config_google_drive_client_id = data.config_google_drive_client_id
self.config_google_drive_client_secret = data.config_google_drive_client_secret
self.config_google_drive_calibre_url_base = data.config_google_drive_calibre_url_base
self.config_google_drive_folder = data.config_google_drive_folder
if data.config_google_drive_watch_changes_response:
self.config_google_drive_watch_changes_response = json.loads(data.config_google_drive_watch_changes_response)
else:
self.config_google_drive_watch_changes_response=None
self.config_columns_to_ignore = data.config_columns_to_ignore
self.db_configured = bool(self.config_calibre_dir is not None and
(not self.config_use_google_drive or os.path.exists(self.config_calibre_dir + '/metadata.db')))
self.config_remote_login = data.config_remote_login
self.config_use_goodreads = data.config_use_goodreads
self.config_goodreads_api_key = data.config_goodreads_api_key
self.config_goodreads_api_secret = data.config_goodreads_api_secret
self.config_mature_content_tags = data.config_mature_content_tags
@property
def get_main_dir(self):
return self.config_main_dir
def role_admin(self):
if self.config_default_role is not None:
return True if self.config_default_role & ROLE_ADMIN == ROLE_ADMIN else False
else:
return False
def role_download(self):
if self.config_default_role is not None:
return True if self.config_default_role & ROLE_DOWNLOAD == ROLE_DOWNLOAD else False
else:
return False
def role_upload(self):
if self.config_default_role is not None:
return True if self.config_default_role & ROLE_UPLOAD == ROLE_UPLOAD else False
else:
return False
def role_edit(self):
if self.config_default_role is not None:
return True if self.config_default_role & ROLE_EDIT == ROLE_EDIT else False
else:
return False
def role_passwd(self):
if self.config_default_role is not None:
return True if self.config_default_role & ROLE_PASSWD == ROLE_PASSWD else False
else:
return False
def role_edit_shelfs(self):
if self.config_default_role is not None:
return True if self.config_default_role & ROLE_EDIT_SHELFS == ROLE_EDIT_SHELFS else False
else:
return False
def role_delete_books(self):
return bool((self.config_default_role is not None) and
(self.config_default_role & ROLE_DELETE_BOOKS == ROLE_DELETE_BOOKS))
def mature_content_tags(self):
if (sys.version_info > (3, 0)): #Python3 str, Python2 unicode
lstrip = str.lstrip
else:
lstrip = unicode.lstrip
return list(map(lstrip, self.config_mature_content_tags.split(",")))
def get_Log_Level(self):
ret_value=""
if self.config_log_level == logging.INFO:
ret_value='INFO'
elif self.config_log_level == logging.DEBUG:
ret_value='DEBUG'
elif self.config_log_level == logging.WARNING:
ret_value='WARNING'
elif self.config_log_level == logging.ERROR:
ret_value='ERROR'
return ret_value
# Migrate database to current version, has to be updated after every database change. Currently migration from
# everywhere to curent should work. Migration is done by checking if relevant coloums are existing, and than adding
# rows with SQL commands
def migrate_Database():
if not engine.dialect.has_table(engine.connect(), "book_read_link"):
ReadBook.__table__.create(bind=engine)
if not engine.dialect.has_table(engine.connect(), "bookmark"):
Bookmark.__table__.create(bind=engine)
try:
session.query(exists().where(User.locale)).scalar()
session.commit()
except exc.OperationalError: # Database is not compatible, some rows are missing
conn = engine.connect()
conn.execute("ALTER TABLE user ADD column locale String(2) DEFAULT 'en'")
conn.execute("ALTER TABLE user ADD column default_language String(3) DEFAULT 'all'")
session.commit()
try:
session.query(exists().where(Settings.config_calibre_dir)).scalar()
session.commit()
except exc.OperationalError: # Database is not compatible, some rows are missing
conn = engine.connect()
conn.execute("ALTER TABLE Settings ADD column `config_calibre_dir` String")
conn.execute("ALTER TABLE Settings ADD column `config_port` INTEGER DEFAULT 8083")
conn.execute("ALTER TABLE Settings ADD column `config_calibre_web_title` String DEFAULT 'Calibre-web'")
conn.execute("ALTER TABLE Settings ADD column `config_books_per_page` INTEGER DEFAULT 60")
conn.execute("ALTER TABLE Settings ADD column `config_random_books` INTEGER DEFAULT 4")
conn.execute("ALTER TABLE Settings ADD column `config_title_regex` String DEFAULT "
"'^(A|The|An|Der|Die|Das|Den|Ein|Eine|Einen|Dem|Des|Einem|Eines)\s+'")
conn.execute("ALTER TABLE Settings ADD column `config_log_level` SmallInteger DEFAULT " + str(logging.INFO))
conn.execute("ALTER TABLE Settings ADD column `config_uploading` SmallInteger DEFAULT 0")
conn.execute("ALTER TABLE Settings ADD column `config_anonbrowse` SmallInteger DEFAULT 0")
conn.execute("ALTER TABLE Settings ADD column `config_public_reg` SmallInteger DEFAULT 0")
session.commit()
try:
session.query(exists().where(Settings.config_use_google_drive)).scalar()
except exc.OperationalError:
conn = engine.connect()
conn.execute("ALTER TABLE Settings ADD column `config_use_google_drive` INTEGER DEFAULT 0")
conn.execute("ALTER TABLE Settings ADD column `config_google_drive_client_id` String DEFAULT ''")
conn.execute("ALTER TABLE Settings ADD column `config_google_drive_client_secret` String DEFAULT ''")
conn.execute("ALTER TABLE Settings ADD column `config_google_drive_calibre_url_base` INTEGER DEFAULT 0")
conn.execute("ALTER TABLE Settings ADD column `config_google_drive_folder` String DEFAULT ''")
conn.execute("ALTER TABLE Settings ADD column `config_google_drive_watch_changes_response` String DEFAULT ''")
try:
session.query(exists().where(Settings.config_columns_to_ignore)).scalar()
except exc.OperationalError:
conn = engine.connect()
conn.execute("ALTER TABLE Settings ADD column `config_columns_to_ignore` String DEFAULT ''")
session.commit()
try:
session.query(exists().where(Settings.config_default_role)).scalar()
session.commit()
except exc.OperationalError: # Database is not compatible, some rows are missing
conn = engine.connect()
conn.execute("ALTER TABLE Settings ADD column `config_default_role` SmallInteger DEFAULT 0")
session.commit()
try:
session.query(exists().where(BookShelf.order)).scalar()
session.commit()
except exc.OperationalError: # Database is not compatible, some rows are missing
conn = engine.connect()
conn.execute("ALTER TABLE book_shelf_link ADD column 'order' INTEGER DEFAULT 1")
session.commit()
try:
create = False
session.query(exists().where(User.sidebar_view)).scalar()
session.commit()
except exc.OperationalError: # Database is not compatible, some rows are missing
conn = engine.connect()
conn.execute("ALTER TABLE user ADD column `sidebar_view` Integer DEFAULT 1")
session.commit()
create=True
try:
if create:
conn = engine.connect()
conn.execute("SELECT language_books FROM user")
session.commit()
except exc.OperationalError:
conn = engine.connect()
conn.execute("UPDATE user SET 'sidebar_view' = (random_books* :side_random + language_books * :side_lang "
"+ series_books * :side_series + category_books * :side_category + hot_books * "
":side_hot + :side_autor + :detail_random)"
,{'side_random': SIDEBAR_RANDOM, 'side_lang': SIDEBAR_LANGUAGE, 'side_series': SIDEBAR_SERIES,
'side_category': SIDEBAR_CATEGORY, 'side_hot': SIDEBAR_HOT, 'side_autor': SIDEBAR_AUTHOR,
'detail_random': DETAIL_RANDOM})
session.commit()
try:
session.query(exists().where(User.mature_content)).scalar()
except exc.OperationalError:
conn = engine.connect()
conn.execute("ALTER TABLE user ADD column `mature_content` INTEGER DEFAULT 1")
if session.query(User).filter(User.role.op('&')(ROLE_ANONYMOUS) == ROLE_ANONYMOUS).first() is None:
create_anonymous_user()
try:
session.query(exists().where(Settings.config_remote_login)).scalar()
except exc.OperationalError:
conn = engine.connect()
conn.execute("ALTER TABLE Settings ADD column `config_remote_login` INTEGER DEFAULT 0")
try:
session.query(exists().where(Settings.config_use_goodreads)).scalar()
except exc.OperationalError:
conn = engine.connect()
conn.execute("ALTER TABLE Settings ADD column `config_use_goodreads` INTEGER DEFAULT 0")
conn.execute("ALTER TABLE Settings ADD column `config_goodreads_api_key` String DEFAULT ''")
conn.execute("ALTER TABLE Settings ADD column `config_goodreads_api_secret` String DEFAULT ''")
try:
session.query(exists().where(Settings.config_mature_content_tags)).scalar()
except exc.OperationalError:
conn = engine.connect()
conn.execute("ALTER TABLE Settings ADD column `config_mature_content_tags` String DEFAULT ''")
def clean_database():
# Remove expired remote login tokens
now = datetime.datetime.now()
session.query(RemoteAuthToken).filter(now > RemoteAuthToken.expiration).delete()
def create_default_config():
settings = Settings()
settings.mail_server = "mail.example.com"
settings.mail_port = 25
settings.mail_use_ssl = 0
settings.mail_login = "mail@example.com"
settings.mail_password = "mypassword"
settings.mail_from = "automailer <mail@example.com>"
session.add(settings)
session.commit()
def get_mail_settings():
settings = session.query(Settings).first()
if not settings:
return {}
data = {
'mail_server': settings.mail_server,
'mail_port': settings.mail_port,
'mail_use_ssl': settings.mail_use_ssl,
'mail_login': settings.mail_login,
'mail_password': settings.mail_password,
'mail_from': settings.mail_from
}
return data
# Generate user Guest (translated text), as anoymous user, no rights
def create_anonymous_user():
user = User()
user.nickname = _("Guest")
user.email = 'no@email'
user.role = ROLE_ANONYMOUS
user.password = generate_password_hash('1')
session.add(user)
try:
session.commit()
except Exception:
session.rollback()
# Generate User admin with admin123 password, and access to everything
def create_admin_user():
user = User()
user.nickname = "admin"
user.role = ROLE_USER + ROLE_ADMIN + ROLE_DOWNLOAD + ROLE_UPLOAD + ROLE_EDIT + ROLE_DELETE_BOOKS + ROLE_PASSWD
user.sidebar_view = DETAIL_RANDOM + SIDEBAR_LANGUAGE + SIDEBAR_SERIES + SIDEBAR_CATEGORY + SIDEBAR_HOT + \
SIDEBAR_RANDOM + SIDEBAR_AUTHOR + SIDEBAR_BEST_RATED + SIDEBAR_READ_AND_UNREAD + SIDEBAR_RECENT + \
SIDEBAR_SORTED
user.password = generate_password_hash(DEFAULT_PASS)
session.add(user)
try:
session.commit()
except Exception:
session.rollback()
# Open session for database connection
Session = sessionmaker()
Session.configure(bind=engine)
session = Session()
# generate database and admin and guest user, if no database is existing
if not os.path.exists(dbpath):
try:
Base.metadata.create_all(engine)
create_default_config()
create_admin_user()
create_anonymous_user()
except Exception:
raise
else:
Base.metadata.create_all(engine)
migrate_Database()
clean_database()
# Generate global Settings Object accecable from every file
config = Config()
| Kennyl/calibre-web | cps/ub.py | Python | gpl-3.0 | 23,868 | 0.005237 |
# -*- coding: utf-8 -*-
'''
# Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License in the project root for license information.
#
# This file was generated and any changes will be overwritten.
'''
from __future__ import unicode_literals
from ..collection_base import CollectionPageBase
from ..model.item import Item
class ItemsCollectionPage(CollectionPageBase):
def __getitem__(self, index):
"""Get the Item at the index specified
Args:
index (int): The index of the item to get from the ItemsCollectionPage
Returns:
:class:`Item<onedrivesdk.model.item.Item>`:
The Item at the index
"""
return Item(self._prop_list[index])
def items(self):
"""Get a generator of Item within the ItemsCollectionPage
Yields:
:class:`Item<onedrivesdk.model.item.Item>`:
The next Item in the collection
"""
for item in self._prop_list:
yield Item(item)
| OneDrive/onedrive-sdk-python | src/onedrivesdk/model/items_collection_page.py | Python | mit | 1,072 | 0.005597 |
def test_median_angle_correct():
from CPAC.median_angle import median_angle_correct
import numpy as np
import nibabel as nb
def getY(filepath):
nii = nb.load(filepath)
data = nii.get_data().astype(np.float64)
mask = (data != 0).sum(-1) != 0
return data[mask].T
def normalize(X):
Xc = X - X.mean(0)
return Xc/np.sqrt( (Xc**2).sum(0) )
subject = '/home/data/Projects/nuisance_reliability_paper/working_dir_CPAC_order/resting_preproc/funcpreproc/_session_id_NYU_TRT_session1_subject_id_sub05676/func_scale/mapflow/_func_scale0/lfo_3dc_RPI_3dv_3dc_maths.nii.gz'
target_angle = 88.0
Y_orig = normalize(getY(subject))
U_orig, S, Vh = np.linalg.svd(Y_orig, full_matrices=False)
corrected_file, angles_file = median_angle_correct(target_angle, subject)
Y_corr = normalize(getY(corrected_file))
median_angle_orig = np.median(np.arccos(U_orig[:,0].T.dot(Y_orig)))
median_angle_corr = np.median(np.arccos(U_orig[:,0].T.dot(Y_corr)))
print median_angle_orig*180.0/np.pi, median_angle_corr*180.0/np.pi
| erramuzpe/C-PAC | CPAC/median_angle/tests/test_median_angle.py | Python | bsd-3-clause | 1,151 | 0.01477 |
import uuid
from django.contrib.auth.models import User
from django.db import models
class Token(models.Model):
key = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
created_at = models.DateTimeField(auto_now_add=True)
is_active = models.BooleanField(default=True, db_index=True)
user = models.ForeignKey(User, null=True, blank=True)
| rafaelsierra/estoudebike-api | src/bike_auth/models.py | Python | apache-2.0 | 376 | 0.00266 |
# -*- coding: utf-8 -*-
#
# Cloud Robotics FX 会話理解API用メッセージ
#
# @author: Osamu Noguchi <noguchi@headwaters.co.jp>
# @version: 0.0.1
import cloudrobotics.message as message
APP_ID = 'SbrApiServices'
PROCESSING_ID = 'RbAppConversationApi'
# 会話メッセージ
#
class ConversationMessage(message.CRFXMessage):
def __init__(self, visitor, visitor_id, talkByMe, type):
super(ConversationMessage, self).__init__()
self.header['RoutingType'] = message.ROUTING_TYPE_CALL
self.header['AppProcessingId'] = PROCESSING_ID
self.header['MessageId'] = type
self.body = {
'visitor': visitor,
'visitor_id': visitor_id,
'talkByMe': talkByMe
}
| seijim/cloud-robotics-fx-v2 | CloudRoboticsApi/ClientCode_Pepper/HeadWaters/PepperCode2/lib/cloudrobotics/conversation/message.py | Python | mit | 746 | 0.002809 |
from flask import Flask, redirect, url_for, session, request
from flask_oauthlib.client import OAuth, OAuthException
FACEBOOK_APP_ID = '188477911223606'
FACEBOOK_APP_SECRET = '621413ddea2bcc5b2e83d42fc40495de'
app = Flask(__name__)
app.debug = True
app.secret_key = 'development'
oauth = OAuth(app)
facebook = oauth.remote_app(
'facebook',
consumer_key=FACEBOOK_APP_ID,
consumer_secret=FACEBOOK_APP_SECRET,
request_token_params={'scope': 'email'},
base_url='https://graph.facebook.com',
request_token_url=None,
access_token_url='/oauth/access_token',
authorize_url='https://www.facebook.com/dialog/oauth'
)
@app.route('/')
def index():
return redirect(url_for('login'))
@app.route('/login')
def login():
callback = url_for(
'facebook_authorized',
next=request.args.get('next') or request.referrer or None,
_external=True
)
return facebook.authorize(callback=callback)
@app.route('/login/authorized')
def facebook_authorized():
resp = facebook.authorized_response()
if resp is None:
return 'Access denied: reason=%s error=%s' % (
request.args['error_reason'],
request.args['error_description']
)
if isinstance(resp, OAuthException):
return 'Access denied: %s' % resp.message
session['oauth_token'] = (resp['access_token'], '')
me = facebook.get('/me')
return 'Logged in as id=%s name=%s redirect=%s' % \
(me.data['id'], me.data['name'], request.args.get('next'))
@facebook.tokengetter
def get_facebook_oauth_token():
return session.get('oauth_token')
if __name__ == '__main__':
app.run()
| Fleurer/flask-oauthlib | example/facebook.py | Python | bsd-3-clause | 1,663 | 0 |
import os
import logging
from .axle import split_package_name
logger = logging.getLogger(__name__)
class Path(object):
def __init__(self, path):
self.path = path
@property
def exists(self):
return os.path.exists(self.path)
class ReleaseValue(object):
_md5 = ''
def __init__(self, name, package_dir):
self.name, self.number = split_package_name(name)
self.fullname = name
self.package_dir = package_dir
def __eq__(self, other):
return self.fullname == other
def __repr__(self):
return self.fullname
@property
def md5(self):
if not self._md5:
hash_name = self.fullpath + '.md5'
try:
with open(hash_name) as hashed:
self._md5 = hashed.read()
except IOError:
# msg = u'{} does not exist'.format(hash_name)
# logger.exception(msg)
pass
return self._md5
@property
def fullpath(self):
return os.path.join(self.package_dir, self.fullname)
| rob-b/belt | belt/values.py | Python | bsd-3-clause | 1,091 | 0 |
# -*- coding: utf-8 -*-
import time
from ..utils.purge import uniquify
class EventManager:
def __init__(self, core):
self.pyload = core
self._ = core._
self.clients = []
def new_client(self, uuid):
self.clients.append(Client(uuid))
def clean(self):
for n, client in enumerate(self.clients):
if client.last_active + 30 < time.time():
del self.clients[n]
def get_events(self, uuid):
events = []
valid_uuid = False
for client in self.clients:
if client.uuid == uuid:
client.last_active = time.time()
valid_uuid = True
while client.new_events():
events.append(client.pop_event().to_list())
break
if not valid_uuid:
self.new_client(uuid)
events = [
ReloadAllEvent("queue").to_list(),
ReloadAllEvent("collector").to_list(),
]
return uniquify(events) # return uniquify(events, repr)
def add_event(self, event):
for client in self.clients:
client.add_event(event)
class Client:
def __init__(self, uuid):
self.uuid = uuid
self.last_active = time.time()
self.events = []
def new_events(self):
return len(self.events) > 0
def pop_event(self):
if not len(self.events):
return None
return self.events.pop(0)
def add_event(self, event):
self.events.append(event)
class UpdateEvent:
def __init__(self, itype, iid, destination):
assert itype == "pack" or itype == "file"
assert destination == "queue" or destination == "collector"
self.type = itype
self.id = iid
self.destination = destination
def to_list(self):
return ["update", self.destination, self.type, self.id]
class RemoveEvent:
def __init__(self, itype, iid, destination):
assert itype == "pack" or itype == "file"
assert destination == "queue" or destination == "collector"
self.type = itype
self.id = iid
self.destination = destination
def to_list(self):
return ["remove", self.destination, self.type, self.id]
class InsertEvent:
def __init__(self, itype, iid, after, destination):
assert itype == "pack" or itype == "file"
assert destination == "queue" or destination == "collector"
self.type = itype
self.id = iid
self.after = after
self.destination = destination
def to_list(self):
return ["insert", self.destination, self.type, self.id, self.after]
class ReloadAllEvent:
def __init__(self, destination):
assert destination == "queue" or destination == "collector"
self.destination = destination
def to_list(self):
return ["reload", self.destination]
class AccountUpdateEvent:
def to_list(self):
return ["account"]
class ConfigUpdateEvent:
def to_list(self):
return ["config"]
| vuolter/pyload | src/pyload/core/managers/event_manager.py | Python | agpl-3.0 | 3,084 | 0 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import board
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| crmccreary/openerp_server | openerp/addons/board/__init__.py | Python | agpl-3.0 | 1,082 | 0.001848 |
import _plotly_utils.basevalidators
class NamelengthsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="namelengthsrc", parent_name="bar.hoverlabel", **kwargs
):
super(NamelengthsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/bar/hoverlabel/_namelengthsrc.py | Python | mit | 432 | 0.002315 |
import collections
import os
import codecs
from .top_block import TopBlockGenerator
from .. import Constants
from ..io import yaml
class HierBlockGenerator(TopBlockGenerator):
"""Extends the top block generator to also generate a block YML file"""
def __init__(self, flow_graph, _):
"""
Initialize the hier block generator object.
Args:
flow_graph: the flow graph object
"""
platform = flow_graph.parent
output_dir = platform.config.hier_block_lib_dir
if not os.path.exists(output_dir):
os.mkdir(output_dir)
TopBlockGenerator.__init__(self, flow_graph, output_dir)
self._mode = Constants.HIER_BLOCK_FILE_MODE
self.file_path_yml = self.file_path[:-3] + '.block.yml'
def write(self):
"""generate output and write it to files"""
TopBlockGenerator.write(self)
data = yaml.dump(self._build_block_n_from_flow_graph_io())
replace = [
('parameters:', '\nparameters:'),
('inputs:', '\ninputs:'),
('outputs:', '\noutputs:'),
('asserts:', '\nasserts:'),
('templates:', '\ntemplates:'),
('documentation:', '\ndocumentation:'),
('file_format:', '\nfile_format:'),
]
for r in replace:
data = data.replace(*r)
with codecs.open(self.file_path_yml, 'w', encoding='utf-8') as fp:
fp.write(data)
# Windows only supports S_IREAD and S_IWRITE, other flags are ignored
os.chmod(self.file_path_yml, self._mode)
def _build_block_n_from_flow_graph_io(self):
"""
Generate a block YML nested data from the flow graph IO
Returns:
a yml node tree
"""
# Extract info from the flow graph
block_id = self._flow_graph.get_option('id')
parameters = self._flow_graph.get_parameters()
def var_or_value(name):
if name in (p.name for p in parameters):
return "${" + name + " }"
return name
# Build the nested data
data = collections.OrderedDict()
data['id'] = block_id
data['label'] = (
self._flow_graph.get_option('title') or
self._flow_graph.get_option('id').replace('_', ' ').title()
)
data['category'] = self._flow_graph.get_option('category')
# Parameters
data['parameters'] = []
for param_block in parameters:
p = collections.OrderedDict()
p['id'] = param_block.name
p['label'] = param_block.params['label'].get_value() or param_block.name
p['dtype'] = param_block.params['value'].dtype
p['default'] = param_block.params['value'].get_value()
p['hide'] = param_block.params['hide'].get_value()
data['parameters'].append(p)
# Ports
for direction in ('inputs', 'outputs'):
data[direction] = []
for port in get_hier_block_io(self._flow_graph, direction):
p = collections.OrderedDict()
p['label'] = port.parent.params['label'].value
if port.domain != Constants.DEFAULT_DOMAIN:
p['domain'] = port.domain
p['dtype'] = port.dtype
if port.domain != Constants.GR_MESSAGE_DOMAIN:
p['vlen'] = var_or_value(port.vlen)
if port.optional:
p['optional'] = True
data[direction].append(p)
t = data['templates'] = collections.OrderedDict()
t['imports'] = "from {0} import {0} # grc-generated hier_block".format(
self._flow_graph.get_option('id'))
# Make data
if parameters:
t['make'] = '{cls}(\n {kwargs},\n)'.format(
cls=block_id,
kwargs=',\n '.join(
'{key}=${{ {key} }}'.format(key=param.name) for param in parameters
),
)
else:
t['make'] = '{cls}()'.format(cls=block_id)
# Self-connect if there aren't any ports
if not data['inputs'] and not data['outputs']:
t['make'] += '\nself.connect(self.${id})'
# Callback data
t['callbacks'] = [
'set_{key}(${{ {key} }})'.format(key=param_block.name) for param_block in parameters
]
# Documentation
data['documentation'] = "\n".join(field for field in (
self._flow_graph.get_option('author'),
self._flow_graph.get_option('description'),
self.file_path
) if field)
data['grc_source'] = str(self._flow_graph.grc_file_path)
data['file_format'] = 1
return data
class QtHierBlockGenerator(HierBlockGenerator):
def _build_block_n_from_flow_graph_io(self):
n = HierBlockGenerator._build_block_n_from_flow_graph_io(self)
block_n = collections.OrderedDict()
# insert flags after category
for key, value in n.items():
block_n[key] = value
if key == 'category':
block_n['flags'] = 'need_qt_gui'
if not block_n['label'].upper().startswith('QT GUI'):
block_n['label'] = 'QT GUI ' + block_n['label']
gui_hint_param = collections.OrderedDict()
gui_hint_param['id'] = 'gui_hint'
gui_hint_param['label'] = 'GUI Hint'
gui_hint_param['dtype'] = 'gui_hint'
gui_hint_param['hide'] = 'part'
block_n['parameters'].append(gui_hint_param)
block_n['templates']['make'] += (
"\n<% win = 'self.%s'%id %>"
"\n${ gui_hint() % win }"
)
return block_n
def get_hier_block_io(flow_graph, direction, domain=None):
"""
Get a list of io ports for this flow graph.
Returns a list of blocks
"""
pads = flow_graph.get_pad_sources() if direction == 'inputs' else flow_graph.get_pad_sinks()
for pad in pads:
for port in (pad.sources if direction == 'inputs' else pad.sinks):
if domain and port.domain != domain:
continue
yield port
| mrjacobagilbert/gnuradio | grc/core/generator/hier_block.py | Python | gpl-3.0 | 6,202 | 0.000967 |
import subprocess
def send_command(*args):
delimiter = '&&'
if len(args) == 2 and delimiter in args[1]:
split_arg = args[1].split(delimiter)
args = [args[1]]
args.extend(split_arg)
process = subprocess.Popen(args, stdout=subprocess.PIPE)
return process.communicate()[0].decode("utf-8")
| andreipradan/raspberrymediaplayer | src/radio/api/utils.py | Python | mit | 328 | 0 |
# import .1dslicefrom3d
import artistools.makemodel.botyanski2017 | lukeshingles/artistools | artistools/makemodel/__init__.py | Python | mit | 65 | 0.015385 |
#from . import sslTests
| lwahlmeier/python-litesockets | tests/__init__.py | Python | unlicense | 24 | 0.041667 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'PayPalIPN.custom'
db.alter_column(u'paypal_ipn', 'custom', self.gf('django.db.models.fields.CharField')(max_length=256))
# Changing field 'PayPalIPN.transaction_subject'
db.alter_column(u'paypal_ipn', 'transaction_subject', self.gf('django.db.models.fields.CharField')(max_length=256))
def backwards(self, orm):
# Changing field 'PayPalIPN.custom'
db.alter_column(u'paypal_ipn', 'custom', self.gf('django.db.models.fields.CharField')(max_length=255))
# Changing field 'PayPalIPN.transaction_subject'
db.alter_column(u'paypal_ipn', 'transaction_subject', self.gf('django.db.models.fields.CharField')(max_length=255))
models = {
u'ipn.paypalipn': {
'Meta': {'object_name': 'PayPalIPN', 'db_table': "u'paypal_ipn'"},
'address_city': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'address_country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'address_country_code': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'address_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'address_state': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'address_status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'address_street': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'address_zip': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'amount1': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'amount2': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'amount3': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'amount_per_cycle': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'auction_buyer_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'auction_closing_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'auction_multi_item': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'auth_amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'auth_exp': ('django.db.models.fields.CharField', [], {'max_length': '28', 'blank': 'True'}),
'auth_id': ('django.db.models.fields.CharField', [], {'max_length': '19', 'blank': 'True'}),
'auth_status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'business': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'case_creation_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'case_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'case_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'charset': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'contact_phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'currency_code': ('django.db.models.fields.CharField', [], {'default': "'USD'", 'max_length': '32', 'blank': 'True'}),
'custom': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'exchange_rate': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '16', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'flag': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'flag_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'flag_info': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'for_auction': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'from_view': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'handling_amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initial_payment_amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'invoice': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'ipaddress': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True', 'blank': 'True'}),
'item_name': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'item_number': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'mc_amount1': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_amount2': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_amount3': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_currency': ('django.db.models.fields.CharField', [], {'default': "'USD'", 'max_length': '32', 'blank': 'True'}),
'mc_fee': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_gross': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_handling': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_shipping': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'memo': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'mp_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'next_payment_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'notify_version': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'num_cart_items': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'option_name1': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'option_name2': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'outstanding_balance': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'parent_txn_id': ('django.db.models.fields.CharField', [], {'max_length': '19', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '24', 'blank': 'True'}),
'payer_business_name': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'payer_email': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'payer_id': ('django.db.models.fields.CharField', [], {'max_length': '13', 'blank': 'True'}),
'payer_status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'payment_cycle': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'payment_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'payment_gross': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'payment_status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'payment_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'pending_reason': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'period1': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'period2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'period3': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'period_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'product_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'product_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'profile_status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'protection_eligibility': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'query': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'reason_code': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'reattempt': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'receipt_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'receiver_email': ('django.db.models.fields.EmailField', [], {'max_length': '254', 'blank': 'True'}),
'receiver_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'recur_times': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'recurring': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'recurring_payment_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'remaining_settle': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'residence_country': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'response': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'retry_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'rp_invoice_id': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'settle_amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'settle_currency': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'shipping': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'shipping_method': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'subscr_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'subscr_effective': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'subscr_id': ('django.db.models.fields.CharField', [], {'max_length': '19', 'blank': 'True'}),
'tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'test_ipn': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'time_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'transaction_entity': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'transaction_subject': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'txn_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'txn_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'verify_sign': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['ipn'] | aldenjenkins/foobargamingwebsite | paypal/standard/ipn/south_migrations/0006_auto__chg_field_paypalipn_custom__chg_field_paypalipn_transaction_subj.py | Python | bsd-3-clause | 14,862 | 0.007872 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2013 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
from pytest import mark
from translate.misc import wStringIO
from translate.storage import dtd, test_monolingual
def test_roundtrip_quoting():
specials = [
'Fish & chips',
'five < six',
'six > five',
'Use ',
'Use &nbsp;A "solution"',
"skop 'n bal",
'"""',
"'''",
'\n',
'\t',
'\r',
'Escape at end \\',
'',
'\\n',
'\\t',
'\\r',
'\\"',
'\r\n',
'\\r\\n',
'\\',
"Completed %S",
"&blockAttackSites;",
" ",
"&intro-point2-a;",
"&basePBMenu.label;",
#"Don't buy",
#"Don't \"buy\"",
"A \"thing\"",
"<a href=\"http"
]
for special in specials:
quoted_special = dtd.quotefordtd(special)
unquoted_special = dtd.unquotefromdtd(quoted_special)
print("special: %r\nquoted: %r\nunquoted: %r\n" % (special,
quoted_special,
unquoted_special))
assert special == unquoted_special
@mark.xfail(reason="Not Implemented")
def test_quotefordtd_unimplemented_cases():
"""Test unimplemented quoting DTD cases."""
assert dtd.quotefordtd("Between <p> and </p>") == ('"Between <p> and'
' </p>"')
def test_quotefordtd():
"""Test quoting DTD definitions"""
assert dtd.quotefordtd('') == '""'
assert dtd.quotefordtd("") == '""'
assert dtd.quotefordtd("Completed %S") == '"Completed %S"'
assert dtd.quotefordtd("&blockAttackSites;") == '"&blockAttackSites;"'
assert dtd.quotefordtd(" ") == '" "'
assert dtd.quotefordtd("&intro-point2-a;") == '"&intro-point2-a;"'
assert dtd.quotefordtd("&basePBMenu.label;") == '"&basePBMenu.label;"'
# The ' character isn't escaped as ' since the " char isn't present.
assert dtd.quotefordtd("Don't buy") == '"Don\'t buy"'
# The ' character is escaped as ' because the " character is present.
assert dtd.quotefordtd("Don't \"buy\"") == '"Don't "buy""'
assert dtd.quotefordtd("A \"thing\"") == '"A "thing""'
# The " character is not escaped when it indicates an attribute value.
assert dtd.quotefordtd("<a href=\"http") == "'<a href=\"http'"
# &
assert dtd.quotefordtd("Color & Light") == '"Color & Light"'
assert dtd.quotefordtd("Color & █") == '"Color & █"'
assert dtd.quotefordtd("Color&Light &red;") == '"Color&Light &red;"'
assert dtd.quotefordtd("Color & Light; Yes") == '"Color & Light; Yes"'
@mark.xfail(reason="Not Implemented")
def test_unquotefromdtd_unimplemented_cases():
"""Test unimplemented unquoting DTD cases."""
assert dtd.unquotefromdtd('"<p> and </p>"') == "<p> and </p>"
def test_unquotefromdtd():
"""Test unquoting DTD definitions"""
# %
assert dtd.unquotefromdtd('"Completed %S"') == "Completed %S"
assert dtd.unquotefromdtd('"Completed %S"') == "Completed %S"
assert dtd.unquotefromdtd('"Completed %S"') == "Completed %S"
# &entity;
assert dtd.unquotefromdtd('"Color&light █"') == "Color&light █"
assert dtd.unquotefromdtd('"Color & Light; Red"') == "Color & Light; Red"
assert dtd.unquotefromdtd('"&blockAttackSites;"') == "&blockAttackSites;"
assert dtd.unquotefromdtd('"&intro-point2-a;"') == "&intro-point2-a;"
assert dtd.unquotefromdtd('"&basePBMenu.label"') == "&basePBMenu.label"
# &
assert dtd.unquotefromdtd('"Color & Light"') == "Color & Light"
assert dtd.unquotefromdtd('"Color & █"') == "Color & █"
# nbsp
assert dtd.unquotefromdtd('" "') == " "
# '
assert dtd.unquotefromdtd("'Don't buy'") == "Don't buy"
# "
assert dtd.unquotefromdtd("'Don't "buy"'") == 'Don\'t "buy"'
assert dtd.unquotefromdtd('"A "thing""') == "A \"thing\""
assert dtd.unquotefromdtd('"A "thing""') == "A \"thing\""
assert dtd.unquotefromdtd("'<a href=\"http'") == "<a href=\"http"
# other chars
assert dtd.unquotefromdtd('"»"') == u"»"
def test_android_roundtrip_quoting():
specials = [
"don't",
'the "thing"'
]
for special in specials:
quoted_special = dtd.quoteforandroid(special)
unquoted_special = dtd.unquotefromandroid(quoted_special)
print("special: %r\nquoted: %r\nunquoted: %r\n" % (special,
quoted_special,
unquoted_special))
assert special == unquoted_special
def test_quoteforandroid():
"""Test quoting Android DTD definitions."""
assert dtd.quoteforandroid("don't") == r'"don\u0027t"'
assert dtd.quoteforandroid('the "thing"') == r'"the \"thing\""'
def test_unquotefromandroid():
"""Test unquoting Android DTD definitions."""
assert dtd.unquotefromandroid('"Don\\'t show"') == "Don't show"
assert dtd.unquotefromandroid('"Don\\\'t show"') == "Don't show"
assert dtd.unquotefromandroid('"Don\\u0027t show"') == "Don't show"
assert dtd.unquotefromandroid('"A \\"thing\\""') == "A \"thing\""
def test_removeinvalidamp(recwarn):
"""tests the the removeinvalidamps function"""
def tester(actual, expected=None):
if expected is None:
expected = actual
assert dtd.removeinvalidamps("test.name", actual) == expected
# No errors
tester("Valid &entity; included")
tester("Valid &entity.name; included")
tester("Valid Ӓ included")
tester("Valid &entity_name;")
# Errors that require & removal
tester("This & is broken", "This amp is broken")
tester("Mad & & &", "Mad amp &")
dtd.removeinvalidamps("simple.warningtest", "Dimpled &Ring")
assert recwarn.pop(UserWarning)
class TestDTDUnit(test_monolingual.TestMonolingualUnit):
UnitClass = dtd.dtdunit
def test_rich_get(self):
pass
def test_rich_set(self):
pass
class TestDTD(test_monolingual.TestMonolingualStore):
StoreClass = dtd.dtdfile
def dtdparse(self, dtdsource):
"""helper that parses dtd source without requiring files"""
dummyfile = wStringIO.StringIO(dtdsource)
dtdfile = dtd.dtdfile(dummyfile)
return dtdfile
def dtdregen(self, dtdsource):
"""helper that converts dtd source to dtdfile object and back"""
return str(self.dtdparse(dtdsource))
def test_simpleentity(self):
"""checks that a simple dtd entity definition is parsed correctly"""
dtdsource = '<!ENTITY test.me "bananas for sale">\n'
dtdfile = self.dtdparse(dtdsource)
assert len(dtdfile.units) == 1
dtdunit = dtdfile.units[0]
assert dtdunit.entity == "test.me"
assert dtdunit.definition == '"bananas for sale"'
def test_blanklines(self):
"""checks that blank lines don't break the parsing or regeneration"""
dtdsource = '<!ENTITY test.me "bananas for sale">\n\n'
dtdregen = self.dtdregen(dtdsource)
assert dtdsource == dtdregen
def test_simpleentity_source(self):
"""checks that a simple dtd entity definition can be regenerated as source"""
dtdsource = '<!ENTITY test.me "">\n'
dtdregen = self.dtdregen(dtdsource)
assert dtdsource == dtdregen
dtdsource = '<!ENTITY test.me "bananas for sale">\n'
dtdregen = self.dtdregen(dtdsource)
assert dtdsource == dtdregen
def test_hashcomment_source(self):
"""checks that a #expand comment is retained in the source"""
dtdsource = '#expand <!ENTITY lang.version "__MOZILLA_LOCALE_VERSION__">\n'
dtdregen = self.dtdregen(dtdsource)
assert dtdsource == dtdregen
def test_commentclosing(self):
"""tests that comment closes with trailing space aren't duplicated"""
dtdsource = '<!-- little comment --> \n<!ENTITY pane.title "Notifications">\n'
dtdregen = self.dtdregen(dtdsource)
assert dtdsource == dtdregen
def test_commententity(self):
"""check that we don't process messages in <!-- comments -->: bug 102"""
dtdsource = '''<!-- commenting out until bug 38906 is fixed
<!ENTITY messagesHeader.label "Messages"> -->'''
dtdfile = self.dtdparse(dtdsource)
assert len(dtdfile.units) == 1
dtdunit = dtdfile.units[0]
print(dtdunit)
assert dtdunit.isnull()
def test_newlines_in_entity(self):
"""tests that we can handle newlines in the entity itself"""
dtdsource = '''<!ENTITY fileNotFound.longDesc "
<ul>
<li>Check the file name for capitalisation or other typing errors.</li>
<li>Check to see if the file was moved, renamed or deleted.</li>
</ul>
">
'''
dtdregen = self.dtdregen(dtdsource)
print(dtdregen)
print(dtdsource)
assert dtdsource == dtdregen
def test_conflate_comments(self):
"""Tests that comments don't run onto the same line"""
dtdsource = '<!-- test comments -->\n<!-- getting conflated -->\n<!ENTITY sample.txt "hello">\n'
dtdregen = self.dtdregen(dtdsource)
print(dtdsource)
print(dtdregen)
assert dtdsource == dtdregen
def test_localisation_notes(self):
"""test to ensure that we retain the localisation note correctly"""
dtdsource = '''<!--LOCALIZATION NOTE (publishFtp.label): Edit box appears beside this label -->
<!ENTITY publishFtp.label "If publishing to a FTP site, enter the HTTP address to browse to:">
'''
dtdregen = self.dtdregen(dtdsource)
assert dtdsource == dtdregen
def test_entitityreference_in_source(self):
"""checks that an &entity; in the source is retained"""
dtdsource = '<!ENTITY % realBrandDTD SYSTEM "chrome://branding/locale/brand.dtd">\n%realBrandDTD;\n'
dtdregen = self.dtdregen(dtdsource)
assert dtdsource == dtdregen
#test for bug #610
def test_entitityreference_order_in_source(self):
"""checks that an &entity; in the source is retained"""
dtdsource = '<!ENTITY % realBrandDTD SYSTEM "chrome://branding/locale/brand.dtd">\n%realBrandDTD;\n<!-- some comment -->\n'
dtdregen = self.dtdregen(dtdsource)
assert dtdsource == dtdregen
# The following test is identical to the one above, except that the entity is split over two lines.
# This is to ensure that a recent bug fixed in dtdunit.parse() is at least partly documented.
# The essence of the bug was that after it had read "realBrandDTD", the line index is not reset
# before starting to parse the next line. It would then read the next available word (sequence of
# alphanum characters) in stead of SYSTEM and then get very confused by not finding an opening ' or
# " in the entity, borking the parsing for threst of the file.
dtdsource = '<!ENTITY % realBrandDTD\n SYSTEM "chrome://branding/locale/brand.dtd">\n%realBrandDTD;\n'
# FIXME: The following line is necessary, because of dtdfile's inability to remember the spacing of
# the source DTD file when converting back to DTD.
dtdregen = self.dtdregen(dtdsource).replace('realBrandDTD SYSTEM', 'realBrandDTD\n SYSTEM')
print(dtdsource)
print(dtdregen)
assert dtdsource == dtdregen
@mark.xfail(reason="Not Implemented")
def test_comment_following(self):
"""check that comments that appear after and entity are not pushed onto another line"""
dtdsource = '<!ENTITY textZoomEnlargeCmd.commandkey2 "="> <!-- + is above this key on many keyboards -->'
dtdregen = self.dtdregen(dtdsource)
assert dtdsource == dtdregen
def test_comment_newline_space_closing(self):
"""check that comments that are closed by a newline then space then --> don't break the following entries"""
dtdsource = '<!-- Comment\n -->\n<!ENTITY searchFocus.commandkey "k">\n'
dtdregen = self.dtdregen(dtdsource)
assert dtdsource == dtdregen
@mark.xfail(reason="Not Implemented")
def test_invalid_quoting(self):
"""checks that invalid quoting doesn't work - quotes can't be reopened"""
# TODO: we should rather raise an error
dtdsource = '<!ENTITY test.me "bananas for sale""room">\n'
assert dtd.unquotefromdtd(dtdsource[dtdsource.find('"'):]) == 'bananas for sale'
dtdfile = self.dtdparse(dtdsource)
assert len(dtdfile.units) == 1
dtdunit = dtdfile.units[0]
assert dtdunit.definition == '"bananas for sale"'
assert str(dtdfile) == '<!ENTITY test.me "bananas for sale">\n'
def test_missing_quotes(self, recwarn):
"""test that we fail graacefully when a message without quotes is found (bug #161)"""
dtdsource = '<!ENTITY bad no quotes">\n<!ENTITY good "correct quotes">\n'
dtdfile = self.dtdparse(dtdsource)
assert len(dtdfile.units) == 1
assert recwarn.pop(Warning)
# Test for bug #68
def test_entity_escaping(self):
"""Test entities escaping (& " < > ') (bug #68)"""
dtdsource = ('<!ENTITY securityView.privacy.header "Privacy & '
'History">\n<!ENTITY rights.safebrowsing-term3 "Uncheck '
'the options to "&blockAttackSites.label;" and '
'"&blockWebForgeries.label;"">\n<!ENTITY '
'translate.test1 \'XML encodings don't work\'>\n'
'<!ENTITY translate.test2 "In HTML the text paragraphs '
'are enclosed between <p> and </p> tags.">\n')
dtdfile = self.dtdparse(dtdsource)
assert len(dtdfile.units) == 4
#dtdunit = dtdfile.units[0]
#assert dtdunit.definition == '"Privacy & History"'
#assert dtdunit.target == "Privacy & History"
#assert dtdunit.source == "Privacy & History"
dtdunit = dtdfile.units[1]
assert dtdunit.definition == ('"Uncheck the options to "'
'&blockAttackSites.label;" and '
'"&blockWebForgeries.label;""')
assert dtdunit.target == ("Uncheck the options to \""
"&blockAttackSites.label;\" and \""
"&blockWebForgeries.label;\"")
assert dtdunit.source == ("Uncheck the options to \""
"&blockAttackSites.label;\" and \""
"&blockWebForgeries.label;\"")
dtdunit = dtdfile.units[2]
assert dtdunit.definition == "'XML encodings don't work'"
assert dtdunit.target == "XML encodings don\'t work"
assert dtdunit.source == "XML encodings don\'t work"
#dtdunit = dtdfile.units[3]
#assert dtdunit.definition == ('"In HTML the text paragraphs are '
# 'enclosed between <p> and </p'
# '> tags."')
#assert dtdunit.target == ("In HTML the text paragraphs are enclosed "
# "between <p> and </p> tags.")
#assert dtdunit.source == ("In HTML the text paragraphs are enclosed "
# "between <p> and </p> tags.")
# Test for bug #68
def test_entity_escaping_roundtrip(self):
"""Test entities escaping roundtrip (& " ...) (bug #68)"""
dtdsource = ('<!ENTITY securityView.privacy.header "Privacy & '
'History">\n<!ENTITY rights.safebrowsing-term3 "Uncheck '
'the options to "&blockAttackSites.label;" and '
'"&blockWebForgeries.label;"">\n<!ENTITY '
'translate.test1 \'XML encodings don't work\'>\n'
'<!ENTITY translate.test2 "In HTML the text paragraphs '
'are enclosed between <p> and </p> tags.">\n')
dtdregen = self.dtdregen(dtdsource)
assert dtdsource == dtdregen
class TestAndroidDTD(test_monolingual.TestMonolingualStore):
StoreClass = dtd.dtdfile
def dtdparse(self, dtdsource):
"""Parses an Android DTD source string and returns a DTD store.
This allows to simulate reading from Android DTD files without really
having real Android DTD files.
"""
dummyfile = wStringIO.StringIO(dtdsource)
dtdfile = dtd.dtdfile(dummyfile, android=True)
return dtdfile
def dtdregen(self, dtdsource):
"""Parses an Android DTD string to DTD store and then converts it back.
This allows to simulate reading from an Android DTD file to an
in-memory store and writing back to an Android DTD file without really
having a real file.
"""
return str(self.dtdparse(dtdsource))
# Test for bug #2480
def test_android_single_quote_escape(self):
"""Checks several single quote unescaping cases in Android DTD.
See bug #2480.
"""
dtdsource = ('<!ENTITY pref_char_encoding_off "Don\\\'t show menu">\n'
'<!ENTITY sync.nodevice.label \'Don\\'t show\'>\n'
'<!ENTITY sync.nodevice.label "Don\\u0027t show">\n')
dtdfile = self.dtdparse(dtdsource)
assert len(dtdfile.units) == 3
dtdunit = dtdfile.units[0]
assert dtdunit.definition == '"Don\\\'t show menu"'
assert dtdunit.target == "Don't show menu"
assert dtdunit.source == "Don't show menu"
dtdunit = dtdfile.units[1]
assert dtdunit.definition == "'Don\\'t show'"
assert dtdunit.target == "Don't show"
assert dtdunit.source == "Don't show"
dtdunit = dtdfile.units[2]
assert dtdunit.definition == '"Don\\u0027t show"'
assert dtdunit.target == "Don't show"
assert dtdunit.source == "Don't show"
# Test for bug #2480
def test_android_single_quote_escape_parse_and_convert_back(self):
"""Checks that Android DTD don't change after parse and convert back.
An Android DTD source string with several single quote escapes is used
instead of real files.
See bug #2480.
"""
dtdsource = ('<!ENTITY pref_char_encoding_off "Don\\\'t show menu">\n'
'<!ENTITY sync.nodevice.label \'Don\\'t show\'>\n'
'<!ENTITY sync.nodevice.label "Don\\u0027t show">\n')
dtdregen = self.dtdregen(dtdsource)
assert dtdsource == dtdregen
def test_android_double_quote_escape(self):
"""Checks double quote unescaping in Android DTD."""
dtdsource = '<!ENTITY translate.test "A \\"thing\\"">\n'
dtdfile = self.dtdparse(dtdsource)
assert len(dtdfile.units) == 1
dtdunit = dtdfile.units[0]
assert dtdunit.definition == '"A \\"thing\\""'
assert dtdunit.target == "A \"thing\""
assert dtdunit.source == "A \"thing\""
def test_android_double_quote_escape_parse_and_convert_back(self):
"""Checks that Android DTD don't change after parse and convert back.
An Android DTD source string with double quote escapes is used instead
of real files.
"""
dtdsource = '<!ENTITY translate.test "A \\"thing\\"">\n'
dtdregen = self.dtdregen(dtdsource)
assert dtdsource == dtdregen
| bluemini/kuma | vendor/packages/translate/storage/test_dtd.py | Python | mpl-2.0 | 20,624 | 0.001746 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>, and others
# Copyright: (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: command
short_description: Execute commands on targets
version_added: historical
description:
- The C(command) module takes the command name followed by a list of space-delimited arguments.
- The given command will be executed on all selected nodes.
- The command(s) will not be
processed through the shell, so variables like C($HOSTNAME) and operations
like C("*"), C("<"), C(">"), C("|"), C(";") and C("&") will not work.
Use the M(ansible.builtin.shell) module if you need these features.
- To create C(command) tasks that are easier to read than the ones using space-delimited
arguments, pass parameters using the C(args) L(task keyword,../reference_appendices/playbooks_keywords.html#task)
or use C(cmd) parameter.
- Either a free form command or C(cmd) parameter is required, see the examples.
- For Windows targets, use the M(ansible.windows.win_command) module instead.
options:
free_form:
description:
- The command module takes a free form string as a command to run.
- There is no actual parameter named 'free form'.
cmd:
type: str
description:
- The command to run.
argv:
type: list
description:
- Passes the command as a list rather than a string.
- Use C(argv) to avoid quoting values that would otherwise be interpreted incorrectly (for example "user name").
- Only the string (free form) or the list (argv) form can be provided, not both. One or the other must be provided.
version_added: "2.6"
creates:
type: path
description:
- A filename or (since 2.0) glob pattern. If a matching file already exists, this step B(won't) be run.
removes:
type: path
description:
- A filename or (since 2.0) glob pattern. If a matching file exists, this step B(will) be run.
version_added: "0.8"
chdir:
type: path
description:
- Change into this directory before running the command.
version_added: "0.6"
warn:
description:
- (deprecated) Enable or disable task warnings.
- This feature is deprecated and will be removed in 2.14.
- As of version 2.11, this option is now disabled by default.
type: bool
default: no
version_added: "1.8"
stdin:
description:
- Set the stdin of the command directly to the specified value.
version_added: "2.4"
stdin_add_newline:
type: bool
default: yes
description:
- If set to C(yes), append a newline to stdin data.
version_added: "2.8"
strip_empty_ends:
description:
- Strip empty lines from the end of stdout/stderr in result.
version_added: "2.8"
type: bool
default: yes
notes:
- If you want to run a command through the shell (say you are using C(<), C(>), C(|), etc), you actually want the M(ansible.builtin.shell) module instead.
Parsing shell metacharacters can lead to unexpected commands being executed if quoting is not done correctly so it is more secure to
use the C(command) module when possible.
- " C(creates), C(removes), and C(chdir) can be specified after the command.
For instance, if you only want to run a command if a certain file does not exist, use this."
- Check mode is supported when passing C(creates) or C(removes). If running in check mode and either of these are specified, the module will
check for the existence of the file and report the correct changed status. If these are not supplied, the task will be skipped.
- The C(executable) parameter is removed since version 2.4. If you have a need for this parameter, use the M(ansible.builtin.shell) module instead.
- For Windows targets, use the M(ansible.windows.win_command) module instead.
- For rebooting systems, use the M(ansible.builtin.reboot) or M(ansible.windows.win_reboot) module.
seealso:
- module: ansible.builtin.raw
- module: ansible.builtin.script
- module: ansible.builtin.shell
- module: ansible.windows.win_command
author:
- Ansible Core Team
- Michael DeHaan
'''
EXAMPLES = r'''
- name: Return motd to registered var
command: cat /etc/motd
register: mymotd
# free-form (string) arguments, all arguments on one line
- name: Run command if /path/to/database does not exist (without 'args')
command: /usr/bin/make_database.sh db_user db_name creates=/path/to/database
# free-form (string) arguments, some arguments on separate lines with the 'args' keyword
# 'args' is a task keyword, passed at the same level as the module
- name: Run command if /path/to/database does not exist (with 'args' keyword)
command: /usr/bin/make_database.sh db_user db_name
args:
creates: /path/to/database
# 'cmd' is module parameter
- name: Run command if /path/to/database does not exist (with 'cmd' parameter)
command:
cmd: /usr/bin/make_database.sh db_user db_name
creates: /path/to/database
- name: Change the working directory to somedir/ and run the command as db_owner if /path/to/database does not exist
command: /usr/bin/make_database.sh db_user db_name
become: yes
become_user: db_owner
args:
chdir: somedir/
creates: /path/to/database
# argv (list) arguments, each argument on a separate line, 'args' keyword not necessary
# 'argv' is a parameter, indented one level from the module
- name: Use 'argv' to send a command as a list - leave 'command' empty
command:
argv:
- /usr/bin/make_database.sh
- Username with whitespace
- dbname with whitespace
creates: /path/to/database
- name: Safely use templated variable to run command. Always use the quote filter to avoid injection issues
command: cat {{ myfile|quote }}
register: myoutput
'''
RETURN = r'''
msg:
description: changed
returned: always
type: bool
sample: True
start:
description: The command execution start time
returned: always
type: str
sample: '2017-09-29 22:03:48.083128'
end:
description: The command execution end time
returned: always
type: str
sample: '2017-09-29 22:03:48.084657'
delta:
description: The command execution delta time
returned: always
type: str
sample: '0:00:00.001529'
stdout:
description: The command standard output
returned: always
type: str
sample: 'Clustering node rabbit@slave1 with rabbit@master …'
stderr:
description: The command standard error
returned: always
type: str
sample: 'ls cannot access foo: No such file or directory'
cmd:
description: The command executed by the task
returned: always
type: list
sample:
- echo
- hello
rc:
description: The command return code (0 means success)
returned: always
type: int
sample: 0
stdout_lines:
description: The command standard output split in lines
returned: always
type: list
sample: [u'Clustering node rabbit@slave1 with rabbit@master …']
stderr_lines:
description: The command standard error split in lines
returned: always
type: list
sample: [u'ls cannot access foo: No such file or directory', u'ls …']
'''
import datetime
import glob
import os
import shlex
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native, to_bytes, to_text
from ansible.module_utils.common.collections import is_iterable
def check_command(module, commandline):
arguments = {'chown': 'owner', 'chmod': 'mode', 'chgrp': 'group',
'ln': 'state=link', 'mkdir': 'state=directory',
'rmdir': 'state=absent', 'rm': 'state=absent', 'touch': 'state=touch'}
commands = {'curl': 'get_url or uri', 'wget': 'get_url or uri',
'svn': 'subversion', 'service': 'service',
'mount': 'mount', 'rpm': 'yum, dnf or zypper', 'yum': 'yum', 'apt-get': 'apt',
'tar': 'unarchive', 'unzip': 'unarchive', 'sed': 'replace, lineinfile or template',
'dnf': 'dnf', 'zypper': 'zypper'}
become = ['sudo', 'su', 'pbrun', 'pfexec', 'runas', 'pmrun', 'machinectl']
if isinstance(commandline, list):
command = commandline[0]
else:
command = commandline.split()[0]
command = os.path.basename(command)
disable_suffix = "If you need to use command because {mod} is insufficient you can add" \
" 'warn: false' to this command task or set 'command_warnings=False' in" \
" ansible.cfg to get rid of this message."
substitutions = {'mod': None, 'cmd': command}
if command in arguments:
msg = "Consider using the {mod} module with {subcmd} rather than running '{cmd}'. " + disable_suffix
substitutions['mod'] = 'file'
substitutions['subcmd'] = arguments[command]
module.warn(msg.format(**substitutions))
if command in commands:
msg = "Consider using the {mod} module rather than running '{cmd}'. " + disable_suffix
substitutions['mod'] = commands[command]
module.warn(msg.format(**substitutions))
if command in become:
module.warn("Consider using 'become', 'become_method', and 'become_user' rather than running %s" % (command,))
def main():
# the command module is the one ansible module that does not take key=value args
# hence don't copy this one if you are looking to build others!
module = AnsibleModule(
argument_spec=dict(
_raw_params=dict(),
_uses_shell=dict(type='bool', default=False),
argv=dict(type='list'),
chdir=dict(type='path'),
executable=dict(),
creates=dict(type='path'),
removes=dict(type='path'),
# The default for this really comes from the action plugin
warn=dict(type='bool', default=False, removed_in_version='2.14', removed_from_collection='ansible.builtin'),
stdin=dict(required=False),
stdin_add_newline=dict(type='bool', default=True),
strip_empty_ends=dict(type='bool', default=True),
),
supports_check_mode=True,
)
shell = module.params['_uses_shell']
chdir = module.params['chdir']
executable = module.params['executable']
args = module.params['_raw_params']
argv = module.params['argv']
creates = module.params['creates']
removes = module.params['removes']
warn = module.params['warn']
stdin = module.params['stdin']
stdin_add_newline = module.params['stdin_add_newline']
strip = module.params['strip_empty_ends']
if not shell and executable:
module.warn("As of Ansible 2.4, the parameter 'executable' is no longer supported with the 'command' module. Not using '%s'." % executable)
executable = None
if (not args or args.strip() == '') and not argv:
module.fail_json(rc=256, msg="no command given")
if args and argv:
module.fail_json(rc=256, msg="only command or argv can be given, not both")
if not shell and args:
args = shlex.split(args)
args = args or argv
# All args must be strings
if is_iterable(args, include_strings=False):
args = [to_native(arg, errors='surrogate_or_strict', nonstring='simplerepr') for arg in args]
if chdir:
try:
chdir = to_bytes(os.path.abspath(chdir), errors='surrogate_or_strict')
except ValueError as e:
module.fail_json(msg='Unable to use supplied chdir: %s' % to_text(e))
try:
os.chdir(chdir)
except (IOError, OSError) as e:
module.fail_json(msg='Unable to change directory before execution: %s' % to_text(e))
if creates:
# do not run the command if the line contains creates=filename
# and the filename already exists. This allows idempotence
# of command executions.
if glob.glob(creates):
module.exit_json(
cmd=args,
stdout="skipped, since %s exists" % creates,
changed=False,
rc=0
)
if removes:
# do not run the command if the line contains removes=filename
# and the filename does not exist. This allows idempotence
# of command executions.
if not glob.glob(removes):
module.exit_json(
cmd=args,
stdout="skipped, since %s does not exist" % removes,
changed=False,
rc=0
)
if warn:
check_command(module, args)
startd = datetime.datetime.now()
if not module.check_mode:
rc, out, err = module.run_command(args, executable=executable, use_unsafe_shell=shell, encoding=None, data=stdin, binary_data=(not stdin_add_newline))
elif creates or removes:
rc = 0
out = err = b'Command would have run if not in check mode'
else:
module.exit_json(msg="skipped, running in check mode", skipped=True)
endd = datetime.datetime.now()
delta = endd - startd
if strip:
out = out.rstrip(b"\r\n")
err = err.rstrip(b"\r\n")
result = dict(
cmd=args,
stdout=out,
stderr=err,
rc=rc,
start=str(startd),
end=str(endd),
delta=str(delta),
changed=True,
)
if rc != 0:
module.fail_json(msg='non-zero return code', **result)
module.exit_json(**result)
if __name__ == '__main__':
main()
| jtyr/ansible | lib/ansible/modules/command.py | Python | gpl-3.0 | 13,711 | 0.003502 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""oslo.i18n integration module.
See https://docs.openstack.org/oslo.i18n/latest/user/index.html
"""
import oslo_i18n
_translators = oslo_i18n.TranslatorFactory(domain='oslo_vmware')
# The primary translation function using the well-known name "_"
_ = _translators.primary
| openstack/oslo.vmware | oslo_vmware/_i18n.py | Python | apache-2.0 | 852 | 0 |
from distutils.core import setup, Extension
setup(name = '_intcode',
version = '0.1',
ext_modules = [Extension('_intcode', sources = ['_intcode.c'])])
| msullivan/advent-of-code | 2019/setup.py | Python | mit | 164 | 0.04878 |
from __future__ import unicode_literals
import re
import time
import xml.etree.ElementTree
from .common import InfoExtractor
from ..utils import (
ExtractorError,
parse_duration,
)
class ClipfishIE(InfoExtractor):
IE_NAME = 'clipfish'
_VALID_URL = r'^https?://(?:www\.)?clipfish\.de/.*?/video/(?P<id>[0-9]+)/'
_TEST = {
'url': 'http://www.clipfish.de/special/game-trailer/video/3966754/fifa-14-e3-2013-trailer/',
'md5': '2521cd644e862936cf2e698206e47385',
'info_dict': {
'id': '3966754',
'ext': 'mp4',
'title': 'FIFA 14 - E3 2013 Trailer',
'duration': 82,
},
'skip': 'Blocked in the US'
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group(1)
info_url = ('http://www.clipfish.de/devxml/videoinfo/%s?ts=%d' %
(video_id, int(time.time())))
doc = self._download_xml(
info_url, video_id, note='Downloading info page')
title = doc.find('title').text
video_url = doc.find('filename').text
if video_url is None:
xml_bytes = xml.etree.ElementTree.tostring(doc)
raise ExtractorError('Cannot find video URL in document %r' %
xml_bytes)
thumbnail = doc.find('imageurl').text
duration = parse_duration(doc.find('duration').text)
return {
'id': video_id,
'title': title,
'url': video_url,
'thumbnail': thumbnail,
'duration': duration,
}
| apllicationCOM/youtube-dl-api-server | youtube_dl_server/youtube_dl/extractor/clipfish.py | Python | unlicense | 1,624 | 0.000616 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Customer.deleted'
db.add_column(u'django_fastbill_customer', 'deleted',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Customer.deleted'
db.delete_column(u'django_fastbill_customer', 'deleted')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '255'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'django_fastbill.article': {
'Meta': {'object_name': 'Article'},
'allow_multiple': ('django.db.models.fields.BooleanField', [], {}),
'article_number': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'changed_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'checkout_url': ('django.db.models.fields.URLField', [], {'max_length': '300'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'currency_code': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'description': ('django.db.models.fields.TextField', [], {}),
'is_addon': ('django.db.models.fields.BooleanField', [], {}),
'return_url_cancel': ('django.db.models.fields.URLField', [], {'max_length': '300'}),
'return_url_success': ('django.db.models.fields.URLField', [], {'max_length': '300'}),
'setup_fee': ('django.db.models.fields.FloatField', [], {}),
'subscription_cancellation': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'subscription_duration': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'subscription_duration_follow': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'subscription_interval': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'subscription_number_events': ('django.db.models.fields.IntegerField', [], {}),
'subscription_trial': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'unit_price': ('django.db.models.fields.FloatField', [], {}),
'vat_percent': ('django.db.models.fields.FloatField', [], {})
},
u'django_fastbill.customer': {
'Meta': {'object_name': 'Customer'},
'changed_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changedata_url': ('django.db.models.fields.URLField', [], {'max_length': '500'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'customer_ext_uid': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'customer_id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'customer_number': ('django.db.models.fields.IntegerField', [], {}),
'dashboard_url': ('django.db.models.fields.URLField', [], {'max_length': '500'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'fastbill_customer'", 'unique': 'True', 'null': 'True', 'to': u"orm['auth.User']"})
},
u'django_fastbill.invoice': {
'Meta': {'object_name': 'Invoice'},
'affiliate': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'cash_discount_days': ('django.db.models.fields.IntegerField', [], {}),
'cash_discount_percent': ('django.db.models.fields.FloatField', [], {}),
'changed_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'country_code': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'currency_code': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'customer_id': ('django.db.models.fields.IntegerField', [], {}),
'customer_number': ('django.db.models.fields.IntegerField', [], {}),
'days_for_payment': ('django.db.models.fields.IntegerField', [], {}),
'delivery_date': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'document_url': ('django.db.models.fields.URLField', [], {'max_length': '500'}),
'due_date': ('django.db.models.fields.DateTimeField', [], {}),
'introtext': ('django.db.models.fields.TextField', [], {}),
'invoice_date': ('django.db.models.fields.DateTimeField', [], {}),
'invoice_id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'invoice_number': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'invoice_title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'is_canceled': ('django.db.models.fields.BooleanField', [], {}),
'paid_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'payment_type': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'paypal_url': ('django.db.models.fields.URLField', [], {'max_length': '500'}),
'sub_total': ('django.db.models.fields.FloatField', [], {}),
'subscription_id': ('django.db.models.fields.IntegerField', [], {}),
'subscription_invoice_counter': ('django.db.models.fields.IntegerField', [], {}),
'template_id': ('django.db.models.fields.IntegerField', [], {}),
'total': ('django.db.models.fields.FloatField', [], {}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'vat_total': ('django.db.models.fields.FloatField', [], {})
},
u'django_fastbill.subscription': {
'Meta': {'object_name': 'Subscription'},
'article_number': ('django.db.models.fields.IntegerField', [], {}),
'cancellation_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'changed_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'customer_id': ('django.db.models.fields.IntegerField', [], {}),
'expiration_date': ('django.db.models.fields.DateTimeField', [], {}),
'invoice_title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'last_event': ('django.db.models.fields.DateTimeField', [], {}),
'next_event': ('django.db.models.fields.DateTimeField', [], {}),
'quantity': ('django.db.models.fields.IntegerField', [], {}),
'start': ('django.db.models.fields.DateTimeField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'subscription_ext_uid': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'subscription_id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'x_attributes': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['django_fastbill'] | phelmig/django-fastbill | django_fastbill/migrations/0004_auto__add_field_customer_deleted.py | Python | mit | 10,979 | 0.007469 |
#!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
XML_ENTRY_1 = """<?xml version='1.0'?>
<entry xmlns='http://www.w3.org/2005/Atom'
xmlns:g='http://base.google.com/ns/1.0'>
<category scheme="http://base.google.com/categories/itemtypes"
term="products"/>
<id> http://www.google.com/test/id/url </id>
<title type='text'>Testing 2000 series laptop</title>
<content type='xhtml'>
<div xmlns='http://www.w3.org/1999/xhtml'>A Testing Laptop</div>
</content>
<link rel='alternate' type='text/html'
href='http://www.provider-host.com/123456789'/>
<link rel='license'
href='http://creativecommons.org/licenses/by-nc/2.5/rdf'/>
<g:label>Computer</g:label>
<g:label>Laptop</g:label>
<g:label>testing laptop</g:label>
<g:item_type>products</g:item_type>
</entry>"""
TEST_BASE_ENTRY = """<?xml version='1.0'?>
<entry xmlns='http://www.w3.org/2005/Atom'
xmlns:g='http://base.google.com/ns/1.0'>
<category scheme="http://base.google.com/categories/itemtypes"
term="products"/>
<title type='text'>Testing 2000 series laptop</title>
<content type='xhtml'>
<div xmlns='http://www.w3.org/1999/xhtml'>A Testing Laptop</div>
</content>
<app:control xmlns:app='http://purl.org/atom/app#'>
<app:draft>yes</app:draft>
<gm:disapproved xmlns:gm='http://base.google.com/ns-metadata/1.0'/>
</app:control>
<link rel='alternate' type='text/html'
href='http://www.provider-host.com/123456789'/>
<g:label>Computer</g:label>
<g:label>Laptop</g:label>
<g:label>testing laptop</g:label>
<g:item_type>products</g:item_type>
</entry>"""
BIG_FEED = """<?xml version="1.0" encoding="utf-8"?>
<feed xmlns="http://www.w3.org/2005/Atom">
<title type="text">dive into mark</title>
<subtitle type="html">
A <em>lot</em> of effort
went into making this effortless
</subtitle>
<updated>2005-07-31T12:29:29Z</updated>
<id>tag:example.org,2003:3</id>
<link rel="alternate" type="text/html"
hreflang="en" href="http://example.org/"/>
<link rel="self" type="application/atom+xml"
href="http://example.org/feed.atom"/>
<rights>Copyright (c) 2003, Mark Pilgrim</rights>
<generator uri="http://www.example.com/" version="1.0">
Example Toolkit
</generator>
<entry>
<title>Atom draft-07 snapshot</title>
<link rel="alternate" type="text/html"
href="http://example.org/2005/04/02/atom"/>
<link rel="enclosure" type="audio/mpeg" length="1337"
href="http://example.org/audio/ph34r_my_podcast.mp3"/>
<id>tag:example.org,2003:3.2397</id>
<updated>2005-07-31T12:29:29Z</updated>
<published>2003-12-13T08:29:29-04:00</published>
<author>
<name>Mark Pilgrim</name>
<uri>http://example.org/</uri>
<email>f8dy@example.com</email>
</author>
<contributor>
<name>Sam Ruby</name>
</contributor>
<contributor>
<name>Joe Gregorio</name>
</contributor>
<content type="xhtml" xml:lang="en"
xml:base="http://diveintomark.org/">
<div xmlns="http://www.w3.org/1999/xhtml">
<p><i>[Update: The Atom draft is finished.]</i></p>
</div>
</content>
</entry>
</feed>
"""
SMALL_FEED = """<?xml version="1.0" encoding="utf-8"?>
<feed xmlns="http://www.w3.org/2005/Atom">
<title>Example Feed</title>
<link href="http://example.org/"/>
<updated>2003-12-13T18:30:02Z</updated>
<author>
<name>John Doe</name>
</author>
<id>urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6</id>
<entry>
<title>Atom-Powered Robots Run Amok</title>
<link href="http://example.org/2003/12/13/atom03"/>
<id>urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a</id>
<updated>2003-12-13T18:30:02Z</updated>
<summary>Some text.</summary>
</entry>
</feed>
"""
GBASE_FEED = """<?xml version='1.0' encoding='UTF-8'?>
<feed xmlns='http://www.w3.org/2005/Atom' xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/' xmlns:g='http://base.google.com/ns/1.0' xmlns:batch='http://schemas.google.com/gdata/batch'>
<id>http://www.google.com/base/feeds/snippets</id>
<updated>2007-02-08T23:18:21.935Z</updated>
<title type='text'>Items matching query: digital camera</title>
<link rel='alternate' type='text/html' href='http://base.google.com'>
</link>
<link rel='http://schemas.google.com/g/2005#feed' type='application/atom+xml' href='http://www.google.com/base/feeds/snippets'>
</link>
<link rel='self' type='application/atom+xml' href='http://www.google.com/base/feeds/snippets?start-index=1&max-results=25&bq=digital+camera'>
</link>
<link rel='next' type='application/atom+xml' href='http://www.google.com/base/feeds/snippets?start-index=26&max-results=25&bq=digital+camera'>
</link>
<generator version='1.0' uri='http://base.google.com'>GoogleBase </generator>
<openSearch:totalResults>2171885</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>25</openSearch:itemsPerPage>
<entry>
<id>http://www.google.com/base/feeds/snippets/13246453826751927533</id>
<published>2007-02-08T13:23:27.000Z</published>
<updated>2007-02-08T16:40:57.000Z</updated>
<category scheme='http://base.google.com/categories/itemtypes' term='Products'>
</category>
<title type='text'>Digital Camera Battery Notebook Computer 12v DC Power Cable - 5.5mm x 2.5mm (Center +) Camera Connecting Cables</title>
<content type='html'>Notebook Computer 12v DC Power Cable - 5.5mm x 2.1mm (Center +) This connection cable will allow any Digital Pursuits battery pack to power portable computers that operate with 12v power and have a 2.1mm power connector (center +) Digital ...</content>
<link rel='alternate' type='text/html' href='http://www.bhphotovideo.com/bnh/controller/home?O=productlist&A=details&Q=&sku=305668&is=REG&kw=DIDCB5092&BI=583'>
</link>
<link rel='self' type='application/atom+xml' href='http://www.google.com/base/feeds/snippets/13246453826751927533'>
</link>
<author>
<name>B&H Photo-Video</name>
<email>anon-szot0wdsq0at@base.google.com</email>
</author>
<g:payment_notes type='text'>PayPal & Bill Me Later credit available online only.</g:payment_notes>
<g:condition type='text'>new</g:condition>
<g:location type='location'>420 9th Ave. 10001</g:location>
<g:id type='text'>305668-REG</g:id>
<g:item_type type='text'>Products</g:item_type>
<g:brand type='text'>Digital Camera Battery</g:brand>
<g:expiration_date type='dateTime'>2007-03-10T13:23:27.000Z</g:expiration_date>
<g:customer_id type='int'>1172711</g:customer_id>
<g:price type='floatUnit'>34.95 usd</g:price>
<g:product_type type='text'>Digital Photography>Camera Connecting Cables</g:product_type>
<g:item_language type='text'>EN</g:item_language>
<g:manufacturer_id type='text'>DCB5092</g:manufacturer_id>
<g:target_country type='text'>US</g:target_country>
<g:weight type='float'>1.0</g:weight>
<g:image_link type='url'>http://base.google.com/base_image?q=http%3A%2F%2Fwww.bhphotovideo.com%2Fimages%2Fitems%2F305668.jpg&dhm=ffffffff84c9a95e&size=6</g:image_link>
</entry>
<entry>
<id>http://www.google.com/base/feeds/snippets/10145771037331858608</id>
<published>2007-02-08T13:23:27.000Z</published>
<updated>2007-02-08T16:40:57.000Z</updated>
<category scheme='http://base.google.com/categories/itemtypes' term='Products'>
</category>
<title type='text'>Digital Camera Battery Electronic Device 5v DC Power Cable - 5.5mm x 2.5mm (Center +) Camera Connecting Cables</title>
<content type='html'>Electronic Device 5v DC Power Cable - 5.5mm x 2.5mm (Center +) This connection cable will allow any Digital Pursuits battery pack to power any electronic device that operates with 5v power and has a 2.5mm power connector (center +) Digital ...</content>
<link rel='alternate' type='text/html' href='http://www.bhphotovideo.com/bnh/controller/home?O=productlist&A=details&Q=&sku=305656&is=REG&kw=DIDCB5108&BI=583'>
</link>
<link rel='self' type='application/atom+xml' href='http://www.google.com/base/feeds/snippets/10145771037331858608'>
</link>
<author>
<name>B&H Photo-Video</name>
<email>anon-szot0wdsq0at@base.google.com</email>
</author>
<g:location type='location'>420 9th Ave. 10001</g:location>
<g:condition type='text'>new</g:condition>
<g:weight type='float'>0.18</g:weight>
<g:target_country type='text'>US</g:target_country>
<g:product_type type='text'>Digital Photography>Camera Connecting Cables</g:product_type>
<g:payment_notes type='text'>PayPal & Bill Me Later credit available online only.</g:payment_notes>
<g:id type='text'>305656-REG</g:id>
<g:image_link type='url'>http://base.google.com/base_image?q=http%3A%2F%2Fwww.bhphotovideo.com%2Fimages%2Fitems%2F305656.jpg&dhm=7315bdc8&size=6</g:image_link>
<g:manufacturer_id type='text'>DCB5108</g:manufacturer_id>
<g:upc type='text'>838098005108</g:upc>
<g:price type='floatUnit'>34.95 usd</g:price>
<g:item_language type='text'>EN</g:item_language>
<g:brand type='text'>Digital Camera Battery</g:brand>
<g:customer_id type='int'>1172711</g:customer_id>
<g:item_type type='text'>Products</g:item_type>
<g:expiration_date type='dateTime'>2007-03-10T13:23:27.000Z</g:expiration_date>
</entry>
<entry>
<id>http://www.google.com/base/feeds/snippets/3128608193804768644</id>
<published>2007-02-08T02:21:27.000Z</published>
<updated>2007-02-08T15:40:13.000Z</updated>
<category scheme='http://base.google.com/categories/itemtypes' term='Products'>
</category>
<title type='text'>Digital Camera Battery Power Cable for Kodak 645 Pro-Back ProBack & DCS-300 Series Camera Connecting Cables</title>
<content type='html'>Camera Connection Cable - to Power Kodak 645 Pro-Back DCS-300 Series Digital Cameras This connection cable will allow any Digital Pursuits battery pack to power the following digital cameras: Kodak DCS Pro Back 645 DCS-300 series Digital Photography ...</content>
<link rel='alternate' type='text/html' href='http://www.bhphotovideo.com/bnh/controller/home?O=productlist&A=details&Q=&sku=305685&is=REG&kw=DIDCB6006&BI=583'>
</link>
<link rel='self' type='application/atom+xml' href='http://www.google.com/base/feeds/snippets/3128608193804768644'>
</link>
<author>
<name>B&H Photo-Video</name>
<email>anon-szot0wdsq0at@base.google.com</email>
</author>
<g:weight type='float'>0.3</g:weight>
<g:manufacturer_id type='text'>DCB6006</g:manufacturer_id>
<g:image_link type='url'>http://base.google.com/base_image?q=http%3A%2F%2Fwww.bhphotovideo.com%2Fimages%2Fitems%2F305685.jpg&dhm=72f0ca0a&size=6</g:image_link>
<g:location type='location'>420 9th Ave. 10001</g:location>
<g:payment_notes type='text'>PayPal & Bill Me Later credit available online only.</g:payment_notes>
<g:item_type type='text'>Products</g:item_type>
<g:target_country type='text'>US</g:target_country>
<g:accessory_for type='text'>digital kodak camera</g:accessory_for>
<g:brand type='text'>Digital Camera Battery</g:brand>
<g:expiration_date type='dateTime'>2007-03-10T02:21:27.000Z</g:expiration_date>
<g:item_language type='text'>EN</g:item_language>
<g:condition type='text'>new</g:condition>
<g:price type='floatUnit'>34.95 usd</g:price>
<g:customer_id type='int'>1172711</g:customer_id>
<g:product_type type='text'>Digital Photography>Camera Connecting Cables</g:product_type>
<g:id type='text'>305685-REG</g:id>
</entry>
</feed>"""
EXTENSION_TREE = """<?xml version="1.0" encoding="utf-8"?>
<feed xmlns="http://www.w3.org/2005/Atom">
<g:author xmlns:g="http://www.google.com">
<g:name>John Doe
<g:foo yes="no" up="down">Bar</g:foo>
</g:name>
</g:author>
</feed>
"""
TEST_AUTHOR = """<?xml version="1.0" encoding="utf-8"?>
<author xmlns="http://www.w3.org/2005/Atom">
<name xmlns="http://www.w3.org/2005/Atom">John Doe</name>
<email xmlns="http://www.w3.org/2005/Atom">johndoes@someemailadress.com</email>
<uri xmlns="http://www.w3.org/2005/Atom">http://www.google.com</uri>
</author>
"""
TEST_LINK = """<?xml version="1.0" encoding="utf-8"?>
<link xmlns="http://www.w3.org/2005/Atom" href="http://www.google.com"
rel="test rel" foo1="bar" foo2="rab"/>
"""
TEST_GBASE_ATTRIBUTE = """<?xml version="1.0" encoding="utf-8"?>
<g:brand type='text' xmlns:g="http://base.google.com/ns/1.0">Digital Camera Battery</g:brand>
"""
CALENDAR_FEED = """<?xml version='1.0' encoding='utf-8'?>
<feed xmlns='http://www.w3.org/2005/Atom'
xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/'
xmlns:gd='http://schemas.google.com/g/2005'
xmlns:gCal='http://schemas.google.com/gCal/2005'>
<id>http://www.google.com/calendar/feeds/default</id>
<updated>2007-03-20T22:48:57.833Z</updated>
<title type='text'>GData Ops Demo's Calendar List</title>
<link rel='http://schemas.google.com/g/2005#feed'
type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default'></link>
<link rel='http://schemas.google.com/g/2005#post'
type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default'></link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default'></link>
<author>
<name>GData Ops Demo</name>
<email>gdata.ops.demo@gmail.com</email>
</author>
<generator version='1.0' uri='http://www.google.com/calendar'>
Google Calendar</generator>
<openSearch:startIndex>1</openSearch:startIndex>
<entry>
<id>
http://www.google.com/calendar/feeds/default/gdata.ops.demo%40gmail.com</id>
<published>2007-03-20T22:48:57.837Z</published>
<updated>2007-03-20T22:48:52.000Z</updated>
<title type='text'>GData Ops Demo</title>
<link rel='alternate' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/gdata.ops.demo%40gmail.com/private/full'>
</link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/gdata.ops.demo%40gmail.com'>
</link>
<author>
<name>GData Ops Demo</name>
<email>gdata.ops.demo@gmail.com</email>
</author>
<gCal:color value='#2952A3'></gCal:color>
<gCal:accesslevel value='owner'></gCal:accesslevel>
<gCal:hidden value='false'></gCal:hidden>
<gCal:timezone value='America/Los_Angeles'></gCal:timezone>
</entry>
<entry>
<id>
http://www.google.com/calendar/feeds/default/jnh21ovnjgfph21h32gvms2758%40group.calendar.google.com</id>
<published>2007-03-20T22:48:57.837Z</published>
<updated>2007-03-20T22:48:53.000Z</updated>
<title type='text'>GData Ops Demo Secondary Calendar</title>
<summary type='text'></summary>
<link rel='alternate' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/jnh21ovnjgfph21h32gvms2758%40group.calendar.google.com/private/full'>
</link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/jnh21ovnjgfph21h32gvms2758%40group.calendar.google.com'>
</link>
<author>
<name>GData Ops Demo Secondary Calendar</name>
</author>
<gCal:color value='#528800'></gCal:color>
<gCal:accesslevel value='owner'></gCal:accesslevel>
<gCal:hidden value='false'></gCal:hidden>
<gCal:timezone value='America/Los_Angeles'></gCal:timezone>
<gd:where valueString=''></gd:where>
</entry>
</feed>
"""
CALENDAR_FULL_EVENT_FEED = """<?xml version='1.0' encoding='utf-8'?>
<feed xmlns='http://www.w3.org/2005/Atom'
xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/'
xmlns:gd='http://schemas.google.com/g/2005'
xmlns:gCal='http://schemas.google.com/gCal/2005'>
<id>
http://www.google.com/calendar/feeds/default/private/full</id>
<updated>2007-03-20T21:29:57.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/g/2005#event'></category>
<title type='text'>GData Ops Demo</title>
<subtitle type='text'>GData Ops Demo</subtitle>
<link rel='http://schemas.google.com/g/2005#feed'
type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full'>
</link>
<link rel='http://schemas.google.com/g/2005#post'
type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full'>
</link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full?updated-min=2001-01-01&max-results=25'>
</link>
<author>
<name>GData Ops Demo</name>
<email>gdata.ops.demo@gmail.com</email>
</author>
<generator version='1.0' uri='http://www.google.com/calendar'>
Google Calendar</generator>
<openSearch:totalResults>10</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>25</openSearch:itemsPerPage>
<gCal:timezone value='America/Los_Angeles'></gCal:timezone>
<entry>
<id>
http://www.google.com/calendar/feeds/default/private/full/o99flmgmkfkfrr8u745ghr3100</id>
<published>2007-03-20T21:29:52.000Z</published>
<updated>2007-03-20T21:29:57.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/g/2005#event'></category>
<title type='text'>test deleted</title>
<content type='text'></content>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=bzk5ZmxtZ21rZmtmcnI4dTc0NWdocjMxMDAgZ2RhdGEub3BzLmRlbW9AbQ'
title='alternate'></link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/o99flmgmkfkfrr8u745ghr3100'>
</link>
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/o99flmgmkfkfrr8u745ghr3100/63310109397'>
</link>
<author>
<name>GData Ops Demo</name>
<email>gdata.ops.demo@gmail.com</email>
</author>
<gCal:sendEventNotifications value='false'>
</gCal:sendEventNotifications>
<gd:eventStatus value='http://schemas.google.com/g/2005#event.canceled'>
</gd:eventStatus>
<gd:comments>
<gd:feedLink href='http://www.google.com/calendar/feeds/default/private/full/o99flmgmkfkfrr8u745ghr3100/comments'>
</gd:feedLink>
</gd:comments>
<gd:visibility value='http://schemas.google.com/g/2005#event.default'>
</gd:visibility>
<gd:transparency value='http://schemas.google.com/g/2005#event.opaque'>
</gd:transparency>
<gd:when startTime='2007-03-23T12:00:00.000-07:00'
endTime='2007-03-23T13:00:00.000-07:00'>
<gd:reminder minutes='10'></gd:reminder>
</gd:when>
<gd:where></gd:where>
</entry>
<entry>
<id>
http://www.google.com/calendar/feeds/default/private/full/2qt3ao5hbaq7m9igr5ak9esjo0</id>
<published>2007-03-20T21:26:04.000Z</published>
<updated>2007-03-20T21:28:46.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/g/2005#event'></category>
<title type='text'>Afternoon at Dolores Park with Kim</title>
<content type='text'></content>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=MnF0M2FvNWhiYXE3bTlpZ3I1YWs5ZXNqbzAgZ2RhdGEub3BzLmRlbW9AbQ'
title='alternate'></link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/2qt3ao5hbaq7m9igr5ak9esjo0'>
</link>
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/2qt3ao5hbaq7m9igr5ak9esjo0/63310109326'>
</link>
<author>
<name>GData Ops Demo</name>
<email>gdata.ops.demo@gmail.com</email>
</author>
<gCal:sendEventNotifications value='false'>
</gCal:sendEventNotifications>
<gd:eventStatus value='http://schemas.google.com/g/2005#event.confirmed'>
</gd:eventStatus>
<gd:comments>
<gd:feedLink href='http://www.google.com/calendar/feeds/default/private/full/2qt3ao5hbaq7m9igr5ak9esjo0/comments'>
</gd:feedLink>
</gd:comments>
<gd:visibility value='http://schemas.google.com/g/2005#event.private'>
</gd:visibility>
<gd:transparency value='http://schemas.google.com/g/2005#event.opaque'>
</gd:transparency>
<gd:who rel='http://schemas.google.com/g/2005#event.organizer'
valueString='GData Ops Demo' email='gdata.ops.demo@gmail.com'>
<gd:attendeeStatus value='http://schemas.google.com/g/2005#event.accepted'>
</gd:attendeeStatus>
</gd:who>
<gd:who rel='http://schemas.google.com/g/2005#event.attendee'
valueString='Ryan Boyd (API)' email='api.rboyd@gmail.com'>
<gd:attendeeStatus value='http://schemas.google.com/g/2005#event.invited'>
</gd:attendeeStatus>
</gd:who>
<gd:when startTime='2007-03-24T12:00:00.000-07:00'
endTime='2007-03-24T15:00:00.000-07:00'>
<gd:reminder minutes='20'></gd:reminder>
</gd:when>
<gd:where valueString='Dolores Park with Kim'></gd:where>
</entry>
<entry>
<id>
http://www.google.com/calendar/feeds/default/private/full/uvsqhg7klnae40v50vihr1pvos</id>
<published>2007-03-20T21:28:37.000Z</published>
<updated>2007-03-20T21:28:37.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/g/2005#event'></category>
<title type='text'>Team meeting</title>
<content type='text'></content>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=dXZzcWhnN2tsbmFlNDB2NTB2aWhyMXB2b3NfMjAwNzAzMjNUMTYwMDAwWiBnZGF0YS5vcHMuZGVtb0Bt'
title='alternate'></link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/uvsqhg7klnae40v50vihr1pvos'>
</link>
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/uvsqhg7klnae40v50vihr1pvos/63310109317'>
</link>
<author>
<name>GData Ops Demo</name>
<email>gdata.ops.demo@gmail.com</email>
</author>
<gd:recurrence>DTSTART;TZID=America/Los_Angeles:20070323T090000
DTEND;TZID=America/Los_Angeles:20070323T100000
RRULE:FREQ=WEEKLY;BYDAY=FR;UNTIL=20070817T160000Z;WKST=SU
BEGIN:VTIMEZONE TZID:America/Los_Angeles
X-LIC-LOCATION:America/Los_Angeles BEGIN:STANDARD
TZOFFSETFROM:-0700 TZOFFSETTO:-0800 TZNAME:PST
DTSTART:19701025T020000 RRULE:FREQ=YEARLY;BYMONTH=10;BYDAY=-1SU
END:STANDARD BEGIN:DAYLIGHT TZOFFSETFROM:-0800 TZOFFSETTO:-0700
TZNAME:PDT DTSTART:19700405T020000
RRULE:FREQ=YEARLY;BYMONTH=4;BYDAY=1SU END:DAYLIGHT
END:VTIMEZONE</gd:recurrence>
<gCal:sendEventNotifications value='true'>
</gCal:sendEventNotifications>
<gd:eventStatus value='http://schemas.google.com/g/2005#event.confirmed'>
</gd:eventStatus>
<gd:visibility value='http://schemas.google.com/g/2005#event.public'>
</gd:visibility>
<gd:transparency value='http://schemas.google.com/g/2005#event.opaque'>
</gd:transparency>
<gd:reminder minutes='10'></gd:reminder>
<gd:where valueString=''></gd:where>
</entry>
<entry>
<id>
http://www.google.com/calendar/feeds/default/private/full/st4vk9kiffs6rasrl32e4a7alo</id>
<published>2007-03-20T21:25:46.000Z</published>
<updated>2007-03-20T21:25:46.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/g/2005#event'></category>
<title type='text'>Movie with Kim and danah</title>
<content type='text'></content>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=c3Q0dms5a2lmZnM2cmFzcmwzMmU0YTdhbG8gZ2RhdGEub3BzLmRlbW9AbQ'
title='alternate'></link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/st4vk9kiffs6rasrl32e4a7alo'>
</link>
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/st4vk9kiffs6rasrl32e4a7alo/63310109146'>
</link>
<author>
<name>GData Ops Demo</name>
<email>gdata.ops.demo@gmail.com</email>
</author>
<gCal:sendEventNotifications value='false'>
</gCal:sendEventNotifications>
<gd:eventStatus value='http://schemas.google.com/g/2005#event.confirmed'>
</gd:eventStatus>
<gd:comments>
<gd:feedLink href='http://www.google.com/calendar/feeds/default/private/full/st4vk9kiffs6rasrl32e4a7alo/comments'>
</gd:feedLink>
</gd:comments>
<gd:visibility value='http://schemas.google.com/g/2005#event.default'>
</gd:visibility>
<gd:transparency value='http://schemas.google.com/g/2005#event.opaque'>
</gd:transparency>
<gd:when startTime='2007-03-24T20:00:00.000-07:00'
endTime='2007-03-24T21:00:00.000-07:00'>
<gd:reminder minutes='10'></gd:reminder>
</gd:when>
<gd:where></gd:where>
</entry>
<entry>
<id>
http://www.google.com/calendar/feeds/default/private/full/ofl1e45ubtsoh6gtu127cls2oo</id>
<published>2007-03-20T21:24:43.000Z</published>
<updated>2007-03-20T21:25:08.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/g/2005#event'></category>
<title type='text'>Dinner with Kim and Sarah</title>
<content type='text'></content>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=b2ZsMWU0NXVidHNvaDZndHUxMjdjbHMyb28gZ2RhdGEub3BzLmRlbW9AbQ'
title='alternate'></link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/ofl1e45ubtsoh6gtu127cls2oo'>
</link>
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/ofl1e45ubtsoh6gtu127cls2oo/63310109108'>
</link>
<author>
<name>GData Ops Demo</name>
<email>gdata.ops.demo@gmail.com</email>
</author>
<gCal:sendEventNotifications value='false'>
</gCal:sendEventNotifications>
<gd:eventStatus value='http://schemas.google.com/g/2005#event.confirmed'>
</gd:eventStatus>
<gd:comments>
<gd:feedLink href='http://www.google.com/calendar/feeds/default/private/full/ofl1e45ubtsoh6gtu127cls2oo/comments'>
</gd:feedLink>
</gd:comments>
<gd:visibility value='http://schemas.google.com/g/2005#event.default'>
</gd:visibility>
<gd:transparency value='http://schemas.google.com/g/2005#event.opaque'>
</gd:transparency>
<gd:when startTime='2007-03-20T19:00:00.000-07:00'
endTime='2007-03-20T21:30:00.000-07:00'>
<gd:reminder minutes='10'></gd:reminder>
</gd:when>
<gd:where></gd:where>
</entry>
<entry>
<id>
http://www.google.com/calendar/feeds/default/private/full/b69s2avfi2joigsclecvjlc91g</id>
<published>2007-03-20T21:24:19.000Z</published>
<updated>2007-03-20T21:25:05.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/g/2005#event'></category>
<title type='text'>Dinner with Jane and John</title>
<content type='text'></content>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=YjY5czJhdmZpMmpvaWdzY2xlY3ZqbGM5MWcgZ2RhdGEub3BzLmRlbW9AbQ'
title='alternate'></link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/b69s2avfi2joigsclecvjlc91g'>
</link>
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/b69s2avfi2joigsclecvjlc91g/63310109105'>
</link>
<author>
<name>GData Ops Demo</name>
<email>gdata.ops.demo@gmail.com</email>
</author>
<gCal:sendEventNotifications value='false'>
</gCal:sendEventNotifications>
<gd:eventStatus value='http://schemas.google.com/g/2005#event.confirmed'>
</gd:eventStatus>
<gd:comments>
<gd:feedLink href='http://www.google.com/calendar/feeds/default/private/full/b69s2avfi2joigsclecvjlc91g/comments'>
</gd:feedLink>
</gd:comments>
<gd:visibility value='http://schemas.google.com/g/2005#event.default'>
</gd:visibility>
<gd:transparency value='http://schemas.google.com/g/2005#event.opaque'>
</gd:transparency>
<gd:when startTime='2007-03-22T17:00:00.000-07:00'
endTime='2007-03-22T19:30:00.000-07:00'>
<gd:reminder minutes='10'></gd:reminder>
</gd:when>
<gd:where></gd:where>
</entry>
<entry>
<id>
http://www.google.com/calendar/feeds/default/private/full/u9p66kkiotn8bqh9k7j4rcnjjc</id>
<published>2007-03-20T21:24:33.000Z</published>
<updated>2007-03-20T21:24:33.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/g/2005#event'></category>
<title type='text'>Tennis with Elizabeth</title>
<content type='text'></content>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=dTlwNjZra2lvdG44YnFoOWs3ajRyY25qamMgZ2RhdGEub3BzLmRlbW9AbQ'
title='alternate'></link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/u9p66kkiotn8bqh9k7j4rcnjjc'>
</link>
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/u9p66kkiotn8bqh9k7j4rcnjjc/63310109073'>
</link>
<author>
<name>GData Ops Demo</name>
<email>gdata.ops.demo@gmail.com</email>
</author>
<gCal:sendEventNotifications value='false'>
</gCal:sendEventNotifications>
<gd:eventStatus value='http://schemas.google.com/g/2005#event.confirmed'>
</gd:eventStatus>
<gd:comments>
<gd:feedLink href='http://www.google.com/calendar/feeds/default/private/full/u9p66kkiotn8bqh9k7j4rcnjjc/comments'>
</gd:feedLink>
</gd:comments>
<gd:visibility value='http://schemas.google.com/g/2005#event.default'>
</gd:visibility>
<gd:transparency value='http://schemas.google.com/g/2005#event.opaque'>
</gd:transparency>
<gd:when startTime='2007-03-24T10:00:00.000-07:00'
endTime='2007-03-24T11:00:00.000-07:00'>
<gd:reminder minutes='10'></gd:reminder>
</gd:when>
<gd:where></gd:where>
</entry>
<entry>
<id>
http://www.google.com/calendar/feeds/default/private/full/76oj2kceidob3s708tvfnuaq3c</id>
<published>2007-03-20T21:24:00.000Z</published>
<updated>2007-03-20T21:24:00.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/g/2005#event'></category>
<title type='text'>Lunch with Jenn</title>
<content type='text'></content>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=NzZvajJrY2VpZG9iM3M3MDh0dmZudWFxM2MgZ2RhdGEub3BzLmRlbW9AbQ'
title='alternate'></link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/76oj2kceidob3s708tvfnuaq3c'>
</link>
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/76oj2kceidob3s708tvfnuaq3c/63310109040'>
</link>
<author>
<name>GData Ops Demo</name>
<email>gdata.ops.demo@gmail.com</email>
</author>
<gCal:sendEventNotifications value='false'>
</gCal:sendEventNotifications>
<gd:eventStatus value='http://schemas.google.com/g/2005#event.confirmed'>
</gd:eventStatus>
<gd:comments>
<gd:feedLink href='http://www.google.com/calendar/feeds/default/private/full/76oj2kceidob3s708tvfnuaq3c/comments'>
</gd:feedLink>
</gd:comments>
<gd:visibility value='http://schemas.google.com/g/2005#event.default'>
</gd:visibility>
<gd:transparency value='http://schemas.google.com/g/2005#event.opaque'>
</gd:transparency>
<gd:when startTime='2007-03-20T11:30:00.000-07:00'
endTime='2007-03-20T12:30:00.000-07:00'>
<gd:reminder minutes='10'></gd:reminder>
</gd:when>
<gd:where></gd:where>
</entry>
<entry>
<id>
http://www.google.com/calendar/feeds/default/private/full/5np9ec8m7uoauk1vedh5mhodco</id>
<published>2007-03-20T07:50:02.000Z</published>
<updated>2007-03-20T20:39:26.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/g/2005#event'></category>
<title type='text'>test entry</title>
<content type='text'>test desc</content>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=NW5wOWVjOG03dW9hdWsxdmVkaDVtaG9kY28gZ2RhdGEub3BzLmRlbW9AbQ'
title='alternate'></link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/5np9ec8m7uoauk1vedh5mhodco'>
</link>
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/5np9ec8m7uoauk1vedh5mhodco/63310106366'>
</link>
<author>
<name>GData Ops Demo</name>
<email>gdata.ops.demo@gmail.com</email>
</author>
<gCal:sendEventNotifications value='false'>
</gCal:sendEventNotifications>
<gd:eventStatus value='http://schemas.google.com/g/2005#event.confirmed'>
</gd:eventStatus>
<gd:comments>
<gd:feedLink href='http://www.google.com/calendar/feeds/default/private/full/5np9ec8m7uoauk1vedh5mhodco/comments'>
</gd:feedLink>
</gd:comments>
<gd:visibility value='http://schemas.google.com/g/2005#event.private'>
</gd:visibility>
<gd:transparency value='http://schemas.google.com/g/2005#event.opaque'>
</gd:transparency>
<gd:who rel='http://schemas.google.com/g/2005#event.attendee'
valueString='Vivian Li' email='vli@google.com'>
<gd:attendeeStatus value='http://schemas.google.com/g/2005#event.declined'>
</gd:attendeeStatus>
</gd:who>
<gd:who rel='http://schemas.google.com/g/2005#event.organizer'
valueString='GData Ops Demo' email='gdata.ops.demo@gmail.com'>
<gd:attendeeStatus value='http://schemas.google.com/g/2005#event.accepted'>
</gd:attendeeStatus>
</gd:who>
<gd:when startTime='2007-03-21T08:00:00.000-07:00'
endTime='2007-03-21T09:00:00.000-07:00'>
<gd:reminder minutes='10'></gd:reminder>
</gd:when>
<gd:where valueString='anywhere'></gd:where>
</entry>
<entry>
<id>
http://www.google.com/calendar/feeds/default/private/full/fu6sl0rqakf3o0a13oo1i1a1mg</id>
<published>2007-02-14T23:23:37.000Z</published>
<updated>2007-02-14T23:25:30.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/g/2005#event'></category>
<title type='text'>test</title>
<content type='text'></content>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=ZnU2c2wwcnFha2YzbzBhMTNvbzFpMWExbWcgZ2RhdGEub3BzLmRlbW9AbQ'
title='alternate'></link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/fu6sl0rqakf3o0a13oo1i1a1mg'>
</link>
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/fu6sl0rqakf3o0a13oo1i1a1mg/63307178730'>
</link>
<link rel="http://schemas.google.com/gCal/2005/webContent" title="World Cup" href="http://www.google.com/calendar/images/google-holiday.gif" type="image/gif">
<gCal:webContent width="276" height="120" url="http://www.google.com/logos/worldcup06.gif" />
</link>
<author>
<name>GData Ops Demo</name>
<email>gdata.ops.demo@gmail.com</email>
</author>
<gCal:sendEventNotifications value='false'>
</gCal:sendEventNotifications>
<gd:eventStatus value='http://schemas.google.com/g/2005#event.confirmed'>
</gd:eventStatus>
<gd:comments>
<gd:feedLink href='http://www.google.com/calendar/feeds/default/private/full/fu6sl0rqakf3o0a13oo1i1a1mg/comments'>
</gd:feedLink>
</gd:comments>
<gd:visibility value='http://schemas.google.com/g/2005#event.default'>
</gd:visibility>
<gd:transparency value='http://schemas.google.com/g/2005#event.opaque'>
</gd:transparency>
<gd:when startTime='2007-02-15T08:30:00.000-08:00'
endTime='2007-02-15T09:30:00.000-08:00'>
<gd:reminder minutes='10'></gd:reminder>
</gd:when>
<gd:where></gd:where>
</entry>
<entry>
<id>
http://www.google.com/calendar/feeds/default/private/full/h7a0haa4da8sil3rr19ia6luvc</id>
<published>2007-07-16T22:13:28.000Z</published>
<updated>2007-07-16T22:13:29.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/g/2005#event' />
<title type='text'></title>
<content type='text' />
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=aDdhMGhhYTRkYThzaWwzcnIxOWlhNmx1dmMgZ2RhdGEub3BzLmRlbW9AbQ'
title='alternate' />
<link rel='http://schemas.google.com/gCal/2005/webContent'
type='application/x-google-gadgets+xml'
href='http://gdata.ops.demo.googlepages.com/birthdayicon.gif'
title='Date and Time Gadget'>
<gCal:webContent width='300' height='136'
url='http://google.com/ig/modules/datetime.xml'>
<gCal:webContentGadgetPref name='color' value='green' />
</gCal:webContent>
</link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/h7a0haa4da8sil3rr19ia6luvc' />
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/h7a0haa4da8sil3rr19ia6luvc/63320307209' />
<author>
<name>GData Ops Demo</name>
<email>gdata.ops.demo@gmail.com</email>
</author>
<gd:comments>
<gd:feedLink href='http://www.google.com/calendar/feeds/default/private/full/h7a0haa4da8sil3rr19ia6luvc/comments' />
</gd:comments>
<gCal:sendEventNotifications value='false'>
</gCal:sendEventNotifications>
<gd:eventStatus value='http://schemas.google.com/g/2005#event.confirmed' />
<gd:visibility value='http://schemas.google.com/g/2005#event.default' />
<gd:transparency value='http://schemas.google.com/g/2005#event.opaque' />
<gd:when startTime='2007-03-14' endTime='2007-03-15' />
<gd:where />
</entry>
</feed>
"""
CALENDAR_BATCH_REQUEST = """<?xml version='1.0' encoding='utf-8'?>
<feed xmlns='http://www.w3.org/2005/Atom'
xmlns:batch='http://schemas.google.com/gdata/batch'
xmlns:gCal='http://schemas.google.com/gCal/2005'>
<category scheme='http://schemas.google.com/g/2005#kind' term='http://schemas.google.com/g/2005#event' />
<entry>
<batch:id>1</batch:id>
<batch:operation type='insert' />
<category scheme='http://schemas.google.com/g/2005#kind' term='http://schemas.google.com/g/2005#event' />
<title type='text'>Event inserted via batch</title>
</entry>
<entry>
<batch:id>2</batch:id>
<batch:operation type='query' />
<id>http://www.google.com/calendar/feeds/default/private/full/glcs0kv2qqa0gf52qi1jo018gc</id>
<category scheme='http://schemas.google.com/g/2005#kind' term='http://schemas.google.com/g/2005#event' />
<title type='text'>Event queried via batch</title>
</entry>
<entry>
<batch:id>3</batch:id>
<batch:operation type='update' />
<id>http://www.google.com/calendar/feeds/default/private/full/ujm0go5dtngdkr6u91dcqvj0qs</id>
<category scheme='http://schemas.google.com/g/2005#kind' term='http://schemas.google.com/g/2005#event' />
<title type='text'>Event updated via batch</title>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=dWptMGdvNWR0bmdka3I2dTkxZGNxdmowcXMgaGFyaXNodi50ZXN0QG0' title='alternate' />
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/ujm0go5dtngdkr6u91dcqvj0qs' />
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/ujm0go5dtngdkr6u91dcqvj0qs/63326098791' />
</entry>
<entry>
<batch:id>4</batch:id>
<batch:operation type='delete' />
<id>http://www.google.com/calendar/feeds/default/private/full/d8qbg9egk1n6lhsgq1sjbqffqc</id>
<category scheme='http://schemas.google.com/g/2005#kind' term='http://schemas.google.com/g/2005#event' />
<title type='text'>Event deleted via batch</title>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=ZDhxYmc5ZWdrMW42bGhzZ3Exc2picWZmcWMgaGFyaXNodi50ZXN0QG0' title='alternate' />
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/d8qbg9egk1n6lhsgq1sjbqffqc' />
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/d8qbg9egk1n6lhsgq1sjbqffqc/63326018324' />
</entry>
</feed>
"""
CALENDAR_BATCH_RESPONSE = """<?xml version='1.0' encoding='UTF-8'?>
<feed xmlns='http://www.w3.org/2005/Atom'
xmlns:batch='http://schemas.google.com/gdata/batch'
xmlns:gCal='http://schemas.google.com/gCal/2005'>
<id>http://www.google.com/calendar/feeds/default/private/full</id>
<updated>2007-09-21T23:01:00.380Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind' term='http://schemas.google.com/g/2005#event'></category>
<title type='text'>Batch Feed</title>
<link rel='http://schemas.google.com/g/2005#feed' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full' />
<link rel='http://schemas.google.com/g/2005#post' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full' />
<link rel='http://schemas.google.com/g/2005#batch' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/batch' />
<entry>
<batch:id>1</batch:id>
<batch:status code='201' reason='Created' />
<batch:operation type='insert' />
<id>http://www.google.com/calendar/feeds/default/private/full/n9ug78gd9tv53ppn4hdjvk68ek</id>
<category scheme='http://schemas.google.com/g/2005#kind' term='http://schemas.google.com/g/2005#event' />
<title type='text'>Event inserted via batch</title>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=bjl1Zzc4Z2Q5dHY1M3BwbjRoZGp2azY4ZWsgaGFyaXNodi50ZXN0QG0' title='alternate' />
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/n9ug78gd9tv53ppn4hdjvk68ek' />
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/n9ug78gd9tv53ppn4hdjvk68ek/63326098860' />
</entry>
<entry>
<batch:id>2</batch:id>
<batch:status code='200' reason='Success' />
<batch:operation type='query' />
<id>http://www.google.com/calendar/feeds/default/private/full/glsc0kv2aqa0ff52qi1jo018gc</id>
<category scheme='http://schemas.google.com/g/2005#kind' term='http://schemas.google.com/g/2005#event' />
<title type='text'>Event queried via batch</title>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=Z2xzYzBrdjJhcWEwZmY1MnFpMWpvMDE4Z2MgaGFyaXNodi50ZXN0QG0' title='alternate' />
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/glsc0kv2aqa0ff52qi1jo018gc' />
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/glsc0kv2aqa0ff52qi1jo018gc/63326098791' />
</entry>
<entry xmlns:gCal='http://schemas.google.com/gCal/2005'>
<batch:id>3</batch:id>
<batch:status code='200' reason='Success' />
<batch:operation type='update' />
<id>http://www.google.com/calendar/feeds/default/private/full/ujm0go5dtngdkr6u91dcqvj0qs</id>
<category scheme='http://schemas.google.com/g/2005#kind' term='http://schemas.google.com/g/2005#event' />
<title type='text'>Event updated via batch</title>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=dWptMGdvNWR0bmdka3I2dTkxZGNxdmowcXMgaGFyaXNodi50ZXN0QG0' title='alternate' />
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/ujm0go5dtngdkr6u91dcqvj0qs' />
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/ujm0go5dtngdkr6u91dcqvj0qs/63326098860' />
<batch:id>3</batch:id>
<batch:status code='200' reason='Success' />
<batch:operation type='update' />
</entry>
<entry>
<batch:id>4</batch:id>
<batch:status code='200' reason='Success' />
<batch:operation type='delete' />
<id>http://www.google.com/calendar/feeds/default/private/full/d8qbg9egk1n6lhsgq1sjbqffqc</id>
<category scheme='http://schemas.google.com/g/2005#kind' term='http://schemas.google.com/g/2005#event' />
<title type='text'>Event deleted via batch</title>
<content type='text'>Deleted</content>
</entry>
</feed>
"""
GBASE_ATTRIBUTE_FEED = """<?xml version='1.0' encoding='UTF-8'?>
<feed xmlns='http://www.w3.org/2005/Atom' xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/' xmlns:gm='http://base.google.com/ns-metadata/1.0'>
<id>http://www.google.com/base/feeds/attributes</id>
<updated>2006-11-01T20:35:59.578Z</updated>
<category scheme='http://base.google.com/categories/itemtypes' term='online jobs'></category>
<category scheme='http://base.google.com/categories/itemtypes' term='jobs'></category>
<title type='text'>Attribute histogram for query: [item type:jobs]</title>
<link rel='alternate' type='text/html' href='http://base.google.com'></link>
<link rel='http://schemas.google.com/g/2005#feed' type='application/atom+xml' href='http://www.google.com/base/feeds
/attributes'></link>
<link rel='self' type='application/atom+xml' href='http://www.google.com/base/feeds/attributes/-/jobs'></link>
<generator version='1.0' uri='http://base.google.com'>GoogleBase</generator>
<openSearch:totalResults>16</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>16</openSearch:itemsPerPage>
<entry>
<id>http://www.google.com/base/feeds/attributes/job+industry%28text%29N%5Bitem+type%3Ajobs%5D</id>
<updated>2006-11-01T20:36:00.100Z</updated>
<title type='text'>job industry(text)</title>
<content type='text'>Attribute"job industry" of type text.
</content>
<link rel='self' type='application/atom+xml' href='http://www.google.com/base/feeds/attributes/job+industry%28text
%29N%5Bitem+type%3Ajobs%5D'></link>
<gm:attribute name='job industry' type='text' count='4416629'>
<gm:value count='380772'>it internet</gm:value>
<gm:value count='261565'>healthcare</gm:value>
<gm:value count='142018'>information technology</gm:value>
<gm:value count='124622'>accounting</gm:value>
<gm:value count='111311'>clerical and administrative</gm:value>
<gm:value count='82928'>other</gm:value>
<gm:value count='77620'>sales and sales management</gm:value>
<gm:value count='68764'>information systems</gm:value>
<gm:value count='65859'>engineering and architecture</gm:value>
<gm:value count='64757'>sales</gm:value>
</gm:attribute>
</entry>
</feed>
"""
GBASE_ATTRIBUTE_ENTRY = """<?xml version='1.0' encoding='UTF-8'?>
<entry xmlns='http://www.w3.org/2005/Atom' xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/' xmlns:gm='http://base.google.com/ns-metadata/1.0'>
<id>http://www.google.com/base/feeds/attributes/job+industry%28text%29N%5Bitem+type%3Ajobs%5D</id>
<updated>2006-11-01T20:36:00.100Z</updated>
<title type='text'>job industry(text)</title>
<content type='text'>Attribute"job industry" of type text.
</content>
<link rel='self' type='application/atom+xml' href='http://www.google.com/base/feeds/attributes/job+industry%28text%29N%5Bitem+type%3Ajobs%5D'></link>
<gm:attribute name='job industry' type='text' count='4416629'>
<gm:value count='380772'>it internet</gm:value>
<gm:value count='261565'>healthcare</gm:value>
<gm:value count='142018'>information technology</gm:value>
<gm:value count='124622'>accounting</gm:value>
<gm:value count='111311'>clerical and administrative</gm:value>
<gm:value count='82928'>other</gm:value>
<gm:value count='77620'>sales and sales management</gm:value>
<gm:value count='68764'>information systems</gm:value>
<gm:value count='65859'>engineering and architecture</gm:value>
<gm:value count='64757'>sales</gm:value>
</gm:attribute>
</entry>
"""
GBASE_LOCALES_FEED = """<?xml version='1.0' encoding='UTF-8'?>
<feed xmlns='http://www.w3.org/2005/Atom'
xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/'
xmlns:gm='http://base.google.com/ns-metadata/1.0'>
<id> http://www.google.com/base/feeds/locales/</id>
<updated>2006-06-13T18:11:40.120Z</updated>
<title type="text">Locales</title>
<link rel="alternate" type="text/html" href="http://base.google.com"/>
<link rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml"
href="http://www.google.com/base/feeds/locales/"/>
<link rel="self" type="application/atom+xml" href="http://www.google.com/base/feeds/locales/"/>
<author>
<name>Google Inc.</name>
<email>base@google.com</email>
</author>
<generator version="1.0" uri="http://base.google.com">GoogleBase</generator>
<openSearch:totalResults>3</openSearch:totalResults>
<openSearch:itemsPerPage>25</openSearch:itemsPerPage>
<entry>
<id>http://www.google.com/base/feeds/locales/en_US</id>
<updated>2006-03-27T22:27:36.658Z</updated>
<category scheme="http://base.google.com/categories/locales" term="en_US"/>
<title type="text">en_US</title>
<content type="text">en_US</content>
<link rel="self" type="application/atom+xml"
href="http://www.google.com/base/feeds/locales/en_US"></link>
<link rel="related" type="application/atom+xml"
href="http://www.google.com/base/feeds/itemtypes/en_US" title="Item types in en_US"/>
</entry>
<entry>
<id>http://www.google.com/base/feeds/locales/en_GB</id>
<updated>2006-06-13T18:14:18.601Z</updated>
<category scheme="http://base.google.com/categories/locales" term="en_GB"/>
<title type="text">en_GB</title>
<content type="text">en_GB</content>
<link rel="related" type="application/atom+xml"
href="http://www.google.com/base/feeds/itemtypes/en_GB" title="Item types in en_GB"/>
<link rel="self" type="application/atom+xml"
href="http://www.google.com/base/feeds/locales/en_GB"/>
</entry>
<entry>
<id>http://www.google.com/base/feeds/locales/de_DE</id>
<updated>2006-06-13T18:14:18.601Z</updated>
<category scheme="http://base.google.com/categories/locales" term="de_DE"/>
<title type="text">de_DE</title>
<content type="text">de_DE</content>
<link rel="related" type="application/atom+xml"
href="http://www.google.com/base/feeds/itemtypes/de_DE" title="Item types in de_DE"/>
<link rel="self" type="application/atom+xml"
href="http://www.google.com/base/feeds/locales/de_DE"/>
</entry>
</feed>"""
RECURRENCE_EXCEPTION_ENTRY = """<entry xmlns='http://www.w3.org/2005/Atom'
xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/'
xmlns:gd='http://schemas.google.com/g/2005'
xmlns:gCal='http://schemas.google.com/gCal/2005'>
<id>
http://www.google.com/calendar/feeds/default/private/composite/i7lgfj69mjqjgnodklif3vbm7g</id>
<published>2007-04-05T21:51:49.000Z</published>
<updated>2007-04-05T21:51:49.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/g/2005#event'></category>
<title type='text'>testDavid</title>
<content type='text'></content>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=aTdsZ2ZqNjltanFqZ25vZGtsaWYzdmJtN2dfMjAwNzA0MDNUMTgwMDAwWiBnZGF0YS5vcHMudGVzdEBt'
title='alternate'></link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/composite/i7lgfj69mjqjgnodklif3vbm7g'>
</link>
<author>
<name>gdata ops</name>
<email>gdata.ops.test@gmail.com</email>
</author>
<gd:visibility value='http://schemas.google.com/g/2005#event.default'>
</gd:visibility>
<gCal:sendEventNotifications value='true'>
</gCal:sendEventNotifications>
<gd:transparency value='http://schemas.google.com/g/2005#event.opaque'>
</gd:transparency>
<gd:eventStatus value='http://schemas.google.com/g/2005#event.confirmed'>
</gd:eventStatus>
<gd:recurrence>DTSTART;TZID=America/Anchorage:20070403T100000
DTEND;TZID=America/Anchorage:20070403T110000
RRULE:FREQ=DAILY;UNTIL=20070408T180000Z;WKST=SU
EXDATE;TZID=America/Anchorage:20070407T100000
EXDATE;TZID=America/Anchorage:20070405T100000
EXDATE;TZID=America/Anchorage:20070404T100000 BEGIN:VTIMEZONE
TZID:America/Anchorage X-LIC-LOCATION:America/Anchorage
BEGIN:STANDARD TZOFFSETFROM:-0800 TZOFFSETTO:-0900 TZNAME:AKST
DTSTART:19701025T020000 RRULE:FREQ=YEARLY;BYMONTH=10;BYDAY=-1SU
END:STANDARD BEGIN:DAYLIGHT TZOFFSETFROM:-0900 TZOFFSETTO:-0800
TZNAME:AKDT DTSTART:19700405T020000
RRULE:FREQ=YEARLY;BYMONTH=4;BYDAY=1SU END:DAYLIGHT
END:VTIMEZONE</gd:recurrence>
<gd:where valueString=''></gd:where>
<gd:reminder minutes='10'></gd:reminder>
<gd:recurrenceException specialized='true'>
<gd:entryLink>
<entry>
<id>i7lgfj69mjqjgnodklif3vbm7g_20070407T180000Z</id>
<published>2007-04-05T21:51:49.000Z</published>
<updated>2007-04-05T21:52:58.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/g/2005#event'></category>
<title type='text'>testDavid</title>
<content type='text'></content>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=aTdsZ2ZqNjltanFqZ25vZGtsaWYzdmJtN2dfMjAwNzA0MDdUMTgwMDAwWiBnZGF0YS5vcHMudGVzdEBt'
title='alternate'></link>
<author>
<name>gdata ops</name>
<email>gdata.ops.test@gmail.com</email>
</author>
<gd:visibility value='http://schemas.google.com/g/2005#event.default'>
</gd:visibility>
<gd:originalEvent id='i7lgfj69mjqjgnodklif3vbm7g'
href='http://www.google.com/calendar/feeds/default/private/composite/i7lgfj69mjqjgnodklif3vbm7g'>
<gd:when startTime='2007-04-07T13:00:00.000-05:00'>
</gd:when>
</gd:originalEvent>
<gCal:sendEventNotifications value='false'>
</gCal:sendEventNotifications>
<gd:transparency value='http://schemas.google.com/g/2005#event.opaque'>
</gd:transparency>
<gd:eventStatus value='http://schemas.google.com/g/2005#event.canceled'>
</gd:eventStatus>
<gd:comments>
<gd:feedLink href='http://www.google.com/calendar/feeds/default/private/full/i7lgfj69mjqjgnodklif3vbm7g_20070407T180000Z/comments'>
<feed>
<updated>2007-04-05T21:54:09.285Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/g/2005#message'>
</category>
<title type='text'>Comments for: testDavid</title>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/feeds/default/private/full/i7lgfj69mjqjgnodklif3vbm7g_20070407T180000Z/comments'
title='alternate'></link>
</feed>
</gd:feedLink>
</gd:comments>
<gd:when startTime='2007-04-07T13:00:00.000-05:00'
endTime='2007-04-07T14:00:00.000-05:00'>
<gd:reminder minutes='10'></gd:reminder>
</gd:when>
<gd:where valueString=''></gd:where>
</entry>
</gd:entryLink>
</gd:recurrenceException>
</entry>"""
NICK_ENTRY = """<?xml version="1.0" encoding="UTF-8"?>
<atom:entry xmlns:atom="http://www.w3.org/2005/Atom"
xmlns:apps="http://schemas.google.com/apps/2006"
xmlns:gd="http://schemas.google.com/g/2005">
<atom:id>https://www.google.com/a/feeds/example.com/nickname/2.0/Foo</atom:id>
<atom:updated>1970-01-01T00:00:00.000Z</atom:updated>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#nickname'/>
<atom:title type="text">Foo</atom:title>
<atom:link rel="self" type="application/atom+xml"
href="https://www.google.com/a/feeds/example.com/nickname/2.0/Foo"/>
<atom:link rel="edit" type="application/atom+xml"
href="https://www.google.com/a/feeds/example.com/nickname/2.0/Foo"/>
<apps:nickname name="Foo"/>
<apps:login userName="TestUser"/>
</atom:entry>"""
NICK_FEED = """<?xml version="1.0" encoding="UTF-8"?>
<atom:feed xmlns:atom="http://www.w3.org/2005/Atom"
xmlns:openSearch="http://a9.com/-/spec/opensearchrss/1.0/"
xmlns:apps="http://schemas.google.com/apps/2006">
<atom:id>
http://www.google.com/a/feeds/example.com/nickname/2.0
</atom:id>
<atom:updated>1970-01-01T00:00:00.000Z</atom:updated>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#nickname'/>
<atom:title type="text">Nicknames for user SusanJones</atom:title>
<atom:link rel='http://schemas.google.com/g/2005#feed'
type="application/atom+xml"
href="http://www.google.com/a/feeds/example.com/nickname/2.0"/>
<atom:link rel='http://schemas.google.com/g/2005#post'
type="application/atom+xml"
href="http://www.google.com/a/feeds/example.com/nickname/2.0"/>
<atom:link rel="self" type="application/atom+xml"
href="http://www.google.com/a/feeds/example.com/nickname/2.0?username=TestUser"/>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>2</openSearch:itemsPerPage>
<atom:entry>
<atom:id>
http://www.google.com/a/feeds/example.com/nickname/2.0/Foo
</atom:id>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#nickname'/>
<atom:title type="text">Foo</atom:title>
<atom:link rel="self" type="application/atom+xml"
href="http://www.google.com/a/feeds/example.com/nickname/2.0/Foo"/>
<atom:link rel="edit" type="application/atom+xml"
href="http://www.google.com/a/feeds/example.com/nickname/2.0/Foo"/>
<apps:nickname name="Foo"/>
<apps:login userName="TestUser"/>
</atom:entry>
<atom:entry>
<atom:id>
http://www.google.com/a/feeds/example.com/nickname/2.0/suse
</atom:id>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#nickname'/>
<atom:title type="text">suse</atom:title>
<atom:link rel="self" type="application/atom+xml"
href="http://www.google.com/a/feeds/example.com/nickname/2.0/Bar"/>
<atom:link rel="edit" type="application/atom+xml"
href="http://www.google.com/a/feeds/example.com/nickname/2.0/Bar"/>
<apps:nickname name="Bar"/>
<apps:login userName="TestUser"/>
</atom:entry>
</atom:feed>"""
USER_ENTRY = """<?xml version="1.0" encoding="UTF-8"?>
<atom:entry xmlns:atom="http://www.w3.org/2005/Atom"
xmlns:apps="http://schemas.google.com/apps/2006"
xmlns:gd="http://schemas.google.com/g/2005">
<atom:id>https://www.google.com/a/feeds/example.com/user/2.0/TestUser</atom:id>
<atom:updated>1970-01-01T00:00:00.000Z</atom:updated>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#user'/>
<atom:title type="text">TestUser</atom:title>
<atom:link rel="self" type="application/atom+xml"
href="https://www.google.com/a/feeds/example.com/user/2.0/TestUser"/>
<atom:link rel="edit" type="application/atom+xml"
href="https://www.google.com/a/feeds/example.com/user/2.0/TestUser"/>
<apps:login userName="TestUser" password="password" suspended="false"
ipWhitelisted='false' hashFunctionName="SHA-1"/>
<apps:name familyName="Test" givenName="User"/>
<apps:quota limit="1024"/>
<gd:feedLink rel='http://schemas.google.com/apps/2006#user.nicknames'
href="https://www.google.com/a/feeds/example.com/nickname/2.0?username=Test-3121"/>
<gd:feedLink rel='http://schemas.google.com/apps/2006#user.emailLists'
href="https://www.google.com/a/feeds/example.com/emailList/2.0?recipient=testlist@example.com"/>
</atom:entry>"""
USER_FEED = """<?xml version="1.0" encoding="UTF-8"?>
<atom:feed xmlns:atom="http://www.w3.org/2005/Atom"
xmlns:apps="http://schemas.google.com/apps/2006"
xmlns:openSearch="http://a9.com/-/spec/opensearchrss/1.0/"
xmlns:gd="http://schemas.google.com/g/2005">
<atom:id>
http://www.google.com/a/feeds/example.com/user/2.0
</atom:id>
<atom:updated>1970-01-01T00:00:00.000Z</atom:updated>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#user'/>
<atom:title type="text">Users</atom:title>
<atom:link rel="next" type="application/atom+xml"
href="http://www.google.com/a/feeds/example.com/user/2.0?startUsername=john"/>
<atom:link rel='http://schemas.google.com/g/2005#feed'
type="application/atom+xml"
href="http://www.google.com/a/feeds/example.com/user/2.0"/>
<atom:link rel='http://schemas.google.com/g/2005#post'
type="application/atom+xml"
href="http://www.google.com/a/feeds/example.com/user/2.0"/>
<atom:link rel="self" type="application/atom+xml"
href="http://www.google.com/a/feeds/example.com/user/2.0"/>
<openSearch:startIndex>1</openSearch:startIndex>
<atom:entry>
<atom:id>
http://www.google.com/a/feeds/example.com/user/2.0/TestUser
</atom:id>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#user'/>
<atom:title type="text">TestUser</atom:title>
<atom:link rel="self" type="application/atom+xml"
href="http://www.google.com/a/feeds/example.com/user/2.0/TestUser"/>
<atom:link rel="edit" type="application/atom+xml"
href="http://www.google.com/a/feeds/example.com/user/2.0/TestUser"/>
<gd:who rel='http://schemas.google.com/apps/2006#user.recipient'
email="TestUser@example.com"/>
<apps:login userName="TestUser" suspended="false"/>
<apps:quota limit="2048"/>
<apps:name familyName="Test" givenName="User"/>
<gd:feedLink rel='http://schemas.google.com/apps/2006#user.nicknames'
href="http://www.google.com/a/feeds/example.com/nickname/2.0?username=TestUser"/>
<gd:feedLink rel='http://schemas.google.com/apps/2006#user.emailLists'
href="http://www.google.com/a/feeds/example.com/emailList/2.0?recipient=TestUser@example.com"/>
</atom:entry>
<atom:entry>
<atom:id>
http://www.google.com/a/feeds/example.com/user/2.0/JohnSmith
</atom:id>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#user'/>
<atom:title type="text">JohnSmith</atom:title>
<atom:link rel="self" type="application/atom+xml"
href="http://www.google.com/a/feeds/example.com/user/2.0/JohnSmith"/>
<atom:link rel="edit" type="application/atom+xml"
href="http://www.google.com/a/feeds/example.com/user/2.0/JohnSmith"/>
<gd:who rel='http://schemas.google.com/apps/2006#user.recipient'
email="JohnSmith@example.com"/>
<apps:login userName="JohnSmith" suspended="false"/>
<apps:quota limit="2048"/>
<apps:name familyName="Smith" givenName="John"/>
<gd:feedLink rel='http://schemas.google.com/apps/2006#user.nicknames'
href="http://www.google.com/a/feeds/example.com/nickname/2.0?username=JohnSmith"/>
<gd:feedLink rel='http://schemas.google.com/apps/2006#user.emailLists'
href="http://www.google.com/a/feeds/example.com/emailList/2.0?recipient=JohnSmith@example.com"/>
</atom:entry>
</atom:feed>"""
EMAIL_LIST_ENTRY = """<?xml version="1.0" encoding="UTF-8"?>
<atom:entry xmlns:atom="http://www.w3.org/2005/Atom"
xmlns:apps="http://schemas.google.com/apps/2006"
xmlns:gd="http://schemas.google.com/g/2005">
<atom:id>
https://www.google.com/a/feeds/example.com/emailList/2.0/testlist
</atom:id>
<atom:updated>1970-01-01T00:00:00.000Z</atom:updated>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#emailList'/>
<atom:title type="text">testlist</atom:title>
<atom:link rel="self" type="application/atom+xml"
href="https://www.google.com/a/feeds/example.com/emailList/2.0/testlist"/>
<atom:link rel="edit" type="application/atom+xml"
href="https://www.google.com/a/feeds/example.com/emailList/2.0/testlist"/>
<apps:emailList name="testlist"/>
<gd:feedLink rel='http://schemas.google.com/apps/2006#emailList.recipients'
href="http://www.google.com/a/feeds/example.com/emailList/2.0/testlist/recipient/"/>
</atom:entry>"""
EMAIL_LIST_FEED = """<?xml version="1.0" encoding="UTF-8"?>
<atom:feed xmlns:atom="http://www.w3.org/2005/Atom"
xmlns:openSearch="http://a9.com/-/spec/opensearchrss/1.0/"
xmlns:apps="http://schemas.google.com/apps/2006"
xmlns:gd="http://schemas.google.com/g/2005">
<atom:id>
http://www.google.com/a/feeds/example.com/emailList/2.0
</atom:id>
<atom:updated>1970-01-01T00:00:00.000Z</atom:updated>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#emailList'/>
<atom:title type="text">EmailLists</atom:title>
<atom:link rel="next" type="application/atom+xml"
href="http://www.google.com/a/feeds/example.com/emailList/2.0?startEmailListName=john"/>
<atom:link rel='http://schemas.google.com/g/2005#feed'
type="application/atom+xml"
href="http://www.google.com/a/feeds/example.com/emailList/2.0"/>
<atom:link rel='http://schemas.google.com/g/2005#post'
type="application/atom+xml"
href="http://www.google.com/a/feeds/example.com/emailList/2.0"/>
<atom:link rel="self" type="application/atom+xml"
href="http://www.google.com/a/feeds/example.com/emailList/2.0"/>
<openSearch:startIndex>1</openSearch:startIndex>
<atom:entry>
<atom:id>
http://www.google.com/a/feeds/example.com/emailList/2.0/us-sales
</atom:id>
<atom:updated>1970-01-01T00:00:00.000Z</atom:updated>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#emailList'/>
<atom:title type="text">us-sales</atom:title>
<atom:link rel="self" type="application/atom+xml"
href="http://www.google.com/a/feeds/example.com/emailList/2.0/us-sales"/>
<atom:link rel="edit" type="application/atom+xml"
href="http://www.google.com/a/feeds/example.com/emailList/2.0/us-sales"/>
<apps:emailList name="us-sales"/>
<gd:feedLink rel='http://schemas.google.com/apps/2006#emailList.recipients'
href="http://www.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient/"/>
</atom:entry>
<atom:entry>
<atom:id>
http://www.google.com/a/feeds/example.com/emailList/2.0/us-eng
</atom:id>
<atom:updated>1970-01-01T00:00:00.000Z</atom:updated>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#emailList'/>
<atom:title type="text">us-eng</atom:title>
<atom:link rel="self" type="application/atom+xml"
href="http://www.google.com/a/feeds/example.com/emailList/2.0/us-eng"/>
<atom:link rel="edit" type="application/atom+xml"
href="http://www.google.com/a/feeds/example.com/emailList/2.0/us-eng"/>
<apps:emailList name="us-eng"/>
<gd:feedLink rel='http://schemas.google.com/apps/2006#emailList.recipients'
href="http://www.google.com/a/feeds/example.com/emailList/2.0/us-eng/recipient/"/>
</atom:entry>
</atom:feed>"""
EMAIL_LIST_RECIPIENT_ENTRY = """<?xml version="1.0" encoding="UTF-8"?>
<atom:entry xmlns:atom="http://www.w3.org/2005/Atom"
xmlns:apps="http://schemas.google.com/apps/2006"
xmlns:gd="http://schemas.google.com/g/2005">
<atom:id>https://www.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient/TestUser%40example.com</atom:id>
<atom:updated>1970-01-01T00:00:00.000Z</atom:updated>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#emailList.recipient'/>
<atom:title type="text">TestUser</atom:title>
<atom:link rel="self" type="application/atom+xml"
href="https://www.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient/TestUser%40example.com"/>
<atom:link rel="edit" type="application/atom+xml"
href="https://www.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient/TestUser%40example.com"/>
<gd:who email="TestUser@example.com"/>
</atom:entry>"""
EMAIL_LIST_RECIPIENT_FEED = """<?xml version="1.0" encoding="UTF-8"?>
<atom:feed xmlns:atom="http://www.w3.org/2005/Atom"
xmlns:openSearch="http://a9.com/-/spec/opensearchrss/1.0/"
xmlns:gd="http://schemas.google.com/g/2005">
<atom:id>
http://www.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient
</atom:id>
<atom:updated>1970-01-01T00:00:00.000Z</atom:updated>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#emailList.recipient'/>
<atom:title type="text">Recipients for email list us-sales</atom:title>
<atom:link rel="next" type="application/atom+xml"
href="http://www.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient/?startRecipient=terry@example.com"/>
<atom:link rel='http://schemas.google.com/g/2005#feed'
type="application/atom+xml"
href="http://www.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient"/>
<atom:link rel='http://schemas.google.com/g/2005#post'
type="application/atom+xml"
href="http://www.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient"/>
<atom:link rel="self" type="application/atom+xml"
href="http://www.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient"/>
<openSearch:startIndex>1</openSearch:startIndex>
<atom:entry>
<atom:id>
http://www.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient/joe%40example.com
</atom:id>
<atom:updated>1970-01-01T00:00:00.000Z</atom:updated>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#emailList.recipient'/>
<atom:title type="text">joe@example.com</atom:title>
<atom:link rel="self" type="application/atom+xml"
href="http://www.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient/joe%40example.com"/>
<atom:link rel="edit" type="application/atom+xml"
href="http://www.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient/joe%40example.com"/>
<gd:who email="joe@example.com"/>
</atom:entry>
<atom:entry>
<atom:id>
http://www.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient/susan%40example.com
</atom:id>
<atom:updated>1970-01-01T00:00:00.000Z</atom:updated>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#emailList.recipient'/>
<atom:title type="text">susan@example.com</atom:title>
<atom:link rel="self" type="application/atom+xml"
href="http://www.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient/susan%40example.com"/>
<atom:link rel="edit" type="application/atom+xml"
href="http://www.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient/susan%40example.com"/>
<gd:who email="susan@example.com"/>
</atom:entry>
</atom:feed>"""
ACL_FEED = """<?xml version='1.0' encoding='UTF-8'?>
<feed xmlns='http://www.w3.org/2005/Atom'
xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/'
xmlns:gAcl='http://schemas.google.com/acl/2007'>
<id>http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full</id>
<updated>2007-04-21T00:52:04.000Z</updated>
<title type='text'>Elizabeth Bennet's access control list</title>
<link rel='http://schemas.google.com/acl/2007#controlledObject'
type='application/atom+xml'
href='http://www.google.com/calendar/feeds/liz%40gmail.com/private/full'>
</link>
<link rel='http://schemas.google.com/g/2005#feed'
type='application/atom+xml'
href='http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full'>
</link>
<link rel='http://schemas.google.com/g/2005#post'
type='application/atom+xml'
href='http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full'>
</link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full'>
</link>
<generator version='1.0'
uri='http://www.google.com/calendar'>Google Calendar</generator>
<openSearch:totalResults>2</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<entry>
<id>http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full/user%3Aliz%40gmail.com</id>
<updated>2007-04-21T00:52:04.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/acl/2007#accessRule'>
</category>
<title type='text'>owner</title>
<content type='text'></content>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full/user%3Aliz%40gmail.com'>
</link>
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full/user%3Aliz%40gmail.com'>
</link>
<author>
<name>Elizabeth Bennet</name>
<email>liz@gmail.com</email>
</author>
<gAcl:scope type='user' value='liz@gmail.com'></gAcl:scope>
<gAcl:role value='http://schemas.google.com/gCal/2005#owner'>
</gAcl:role>
</entry>
<entry>
<id>http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full/default</id>
<updated>2007-04-21T00:52:04.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/acl/2007#accessRule'>
</category>
<title type='text'>read</title>
<content type='text'></content>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full/default'>
</link>
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full/default'>
</link>
<author>
<name>Elizabeth Bennet</name>
<email>liz@gmail.com</email>
</author>
<gAcl:scope type='default'></gAcl:scope>
<gAcl:role value='http://schemas.google.com/gCal/2005#read'>
</gAcl:role>
</entry>
</feed>"""
ACL_ENTRY = """<?xml version='1.0' encoding='UTF-8'?>
<entry xmlns='http://www.w3.org/2005/Atom' xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/' xmlns:gd='http://schemas.google.com/g/2005' xmlns:gCal='http://schemas.google.com/gCal/2005' xmlns:gAcl='http://schemas.google.com/acl/2007'>
<id>http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full/user%3Aliz%40gmail.com</id>
<updated>2007-04-21T00:52:04.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/acl/2007#accessRule'>
</category>
<title type='text'>owner</title>
<content type='text'></content>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full/user%3Aliz%40gmail.com'>
</link>
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full/user%3Aliz%40gmail.com'>
</link>
<author>
<name>Elizabeth Bennet</name>
<email>liz@gmail.com</email>
</author>
<gAcl:scope type='user' value='liz@gmail.com'></gAcl:scope>
<gAcl:role value='http://schemas.google.com/gCal/2005#owner'>
</gAcl:role>
</entry>"""
DOCUMENT_LIST_FEED = """<?xml version='1.0' encoding='UTF-8'?>
<ns0:feed xmlns:ns0="http://www.w3.org/2005/Atom"><ns1:totalResults
xmlns:ns1="http://a9.com/-/spec/opensearchrss/1.0/">2</ns1:totalResults><ns1:startIndex
xmlns:ns1="http://a9.com/-/spec/opensearchrss/1.0/">1</ns1:startIndex><ns0:entry><ns0:content
src="http://foo.com/fm?fmcmd=102&key=supercalifragilisticexpeadocious"
type="text/html"
/><ns0:author><ns0:name>test.user</ns0:name><ns0:email>test.user@gmail.com</ns0:email></ns0:author><ns0:category
label="spreadsheet" scheme="http://schemas.google.com/g/2005#kind"
term="http://schemas.google.com/docs/2007#spreadsheet"
/><ns0:id>http://docs.google.com/feeds/documents/private/full/spreadsheet%3Asupercalifragilisticexpeadocious</ns0:id><ns0:link
href="http://foo.com/ccc?key=supercalifragilisticexpeadocious" rel="alternate"
type="text/html" /><ns0:link
href="http://foo.com/feeds/worksheets/supercalifragilisticexpeadocious/private/full"
rel="http://schemas.google.com/spreadsheets/2006#worksheetsfeed"
type="application/atom+xml" /><ns0:link
href="http://docs.google.com/feeds/documents/private/full/spreadsheet%3Asupercalifragilisticexpeadocious"
rel="self" type="application/atom+xml" /><ns0:title type="text">Test Spreadsheet</ns0:title><ns0:updated>2007-07-03T18:03:32.045Z</ns0:updated></ns0:entry><ns0:entry><ns0:content
src="http://docs.google.com/RawDocContents?action=fetch&docID=gr00vy"
type="text/html"
/><ns0:author><ns0:name>test.user</ns0:name><ns0:email>test.user@gmail.com</ns0:email></ns0:author><ns0:category
label="document" scheme="http://schemas.google.com/g/2005#kind"
term="http://schemas.google.com/docs/2007#document"
/><ns0:id>http://docs.google.com/feeds/documents/private/full/document%3Agr00vy</ns0:id><ns0:link
href="http://foobar.com/Doc?id=gr00vy" rel="alternate" type="text/html"
/><ns0:link
href="http://docs.google.com/feeds/documents/private/full/document%3Agr00vy"
rel="self" type="application/atom+xml" /><ns0:title type="text">Test Document</ns0:title><ns0:updated>2007-07-03T18:02:50.338Z</ns0:updated></ns0:entry><ns0:id>http://docs.google.com/feeds/documents/private/full</ns0:id><ns0:link
href="http://docs.google.com" rel="alternate" type="text/html" /><ns0:link
href="http://docs.google.com/feeds/documents/private/full"
rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml"
/><ns0:link href="http://docs.google.com/feeds/documents/private/full"
rel="http://schemas.google.com/g/2005#post" type="application/atom+xml"
/><ns0:link href="http://docs.google.com/feeds/documents/private/full"
rel="self" type="application/atom+xml" /><ns0:title type="text">Available
Documents -
test.user@gmail.com</ns0:title><ns0:updated>2007-07-09T23:07:21.898Z</ns0:updated></ns0:feed>
"""
DOCUMENT_LIST_ENTRY = """<?xml version='1.0' encoding='UTF-8'?>
<ns0:entry xmlns:ns0="http://www.w3.org/2005/Atom"><ns0:content
src="http://foo.com/fm?fmcmd=102&key=supercalifragilisticexpealidocious"
type="text/html"
/><ns0:author><ns0:name>test.user</ns0:name><ns0:email>test.user@gmail.com</ns0:email></ns0:author><ns0:category
label="spreadsheet" scheme="http://schemas.google.com/g/2005#kind"
term="http://schemas.google.com/docs/2007#spreadsheet"
/><ns0:id>http://docs.google.com/feeds/documents/private/full/spreadsheet%3Asupercalifragilisticexpealidocious</ns0:id><ns0:link
href="http://foo.com/ccc?key=supercalifragilisticexpealidocious"
rel="alternate" type="text/html" /><ns0:link
href="http://foo.com/feeds/worksheets/supercalifragilisticexpealidocious/private/full"
rel="http://schemas.google.com/spreadsheets/2006#worksheetsfeed"
type="application/atom+xml" /><ns0:link
href="http://docs.google.com/feeds/documents/private/full/spreadsheet%3Asupercalifragilisticexpealidocious"
rel="self" type="application/atom+xml" /><ns0:title type="text">Test Spreadsheet</ns0:title><ns0:updated>2007-07-03T18:03:32.045Z</ns0:updated></ns0:entry>
"""
BATCH_ENTRY = """<?xml version='1.0' encoding='UTF-8'?>
<entry xmlns="http://www.w3.org/2005/Atom"
xmlns:batch="http://schemas.google.com/gdata/batch"
xmlns:g="http://base.google.com/ns/1.0">
<id>http://www.google.com/base/feeds/items/2173859253842813008</id>
<published>2006-07-11T14:51:43.560Z</published>
<updated>2006-07-11T14:51: 43.560Z</updated>
<title type="text">title</title>
<content type="html">content</content>
<link rel="self"
type="application/atom+xml"
href="http://www.google.com/base/feeds/items/2173859253842813008"/>
<link rel="edit"
type="application/atom+xml"
href="http://www.google.com/base/feeds/items/2173859253842813008"/>
<g:item_type>recipes</g:item_type>
<batch:operation type="insert"/>
<batch:id>itemB</batch:id>
<batch:status code="201" reason="Created"/>
</entry>"""
BATCH_FEED_REQUEST = """<?xml version="1.0" encoding="UTF-8"?>
<feed
xmlns="http://www.w3.org/2005/Atom"
xmlns:openSearch="http://a9.com/-/spec/opensearchrss/1.0/"
xmlns:g="http://base.google.com/ns/1.0"
xmlns:batch="http://schemas.google.com/gdata/batch">
<title type="text">My Batch Feed</title>
<entry>
<id>http://www.google.com/base/feeds/items/13308004346459454600</id>
<batch:operation type="delete"/>
</entry>
<entry>
<id>http://www.google.com/base/feeds/items/17437536661927313949</id>
<batch:operation type="delete"/>
</entry>
<entry>
<title type="text">...</title>
<content type="html">...</content>
<batch:id>itemA</batch:id>
<batch:operation type="insert"/>
<g:item_type>recipes</g:item_type>
</entry>
<entry>
<title type="text">...</title>
<content type="html">...</content>
<batch:id>itemB</batch:id>
<batch:operation type="insert"/>
<g:item_type>recipes</g:item_type>
</entry>
</feed>"""
BATCH_FEED_RESULT = """<?xml version="1.0" encoding="UTF-8"?>
<feed
xmlns="http://www.w3.org/2005/Atom"
xmlns:openSearch="http://a9.com/-/spec/opensearchrss/1.0/"
xmlns:g="http://base.google.com/ns/1.0"
xmlns:batch="http://schemas.google.com/gdata/batch">
<id>http://www.google.com/base/feeds/items</id>
<updated>2006-07-11T14:51:42.894Z</updated>
<title type="text">My Batch</title>
<link rel="http://schemas.google.com/g/2005#feed"
type="application/atom+xml"
href="http://www.google.com/base/feeds/items"/>
<link rel="http://schemas.google.com/g/2005#post"
type="application/atom+xml"
href="http://www.google.com/base/feeds/items"/>
<link rel=" http://schemas.google.com/g/2005#batch"
type="application/atom+xml"
href="http://www.google.com/base/feeds/items/batch"/>
<entry>
<id>http://www.google.com/base/feeds/items/2173859253842813008</id>
<published>2006-07-11T14:51:43.560Z</published>
<updated>2006-07-11T14:51: 43.560Z</updated>
<title type="text">...</title>
<content type="html">...</content>
<link rel="self"
type="application/atom+xml"
href="http://www.google.com/base/feeds/items/2173859253842813008"/>
<link rel="edit"
type="application/atom+xml"
href="http://www.google.com/base/feeds/items/2173859253842813008"/>
<g:item_type>recipes</g:item_type>
<batch:operation type="insert"/>
<batch:id>itemB</batch:id>
<batch:status code="201" reason="Created"/>
</entry>
<entry>
<id>http://www.google.com/base/feeds/items/11974645606383737963</id>
<published>2006-07-11T14:51:43.247Z</published>
<updated>2006-07-11T14:51: 43.247Z</updated>
<title type="text">...</title>
<content type="html">...</content>
<link rel="self"
type="application/atom+xml"
href="http://www.google.com/base/feeds/items/11974645606383737963"/>
<link rel="edit"
type="application/atom+xml"
href="http://www.google.com/base/feeds/items/11974645606383737963"/>
<g:item_type>recipes</g:item_type>
<batch:operation type="insert"/>
<batch:id>itemA</batch:id>
<batch:status code="201" reason="Created"/>
</entry>
<entry>
<id>http://www.google.com/base/feeds/items/13308004346459454600</id>
<updated>2006-07-11T14:51:42.894Z</updated>
<title type="text">Error</title>
<content type="text">Bad request</content>
<batch:status code="404"
reason="Bad request"
content-type="application/xml">
<errors>
<error type="request" reason="Cannot find item"/>
</errors>
</batch:status>
</entry>
<entry>
<id>http://www.google.com/base/feeds/items/17437536661927313949</id>
<updated>2006-07-11T14:51:43.246Z</updated>
<content type="text">Deleted</content>
<batch:operation type="delete"/>
<batch:status code="200" reason="Success"/>
</entry>
</feed>"""
| santisiri/popego | envs/ALPHA-POPEGO/lib/python2.5/site-packages/gdata/test_data.py | Python | bsd-3-clause | 87,056 | 0.001861 |
#!/usr/bin/env python
# (C) Copyright IBM Corporation 2004, 2005
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# on the rights to use, copy, modify, merge, publish, distribute, sub
# license, and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
# IBM AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Authors:
# Ian Romanick <idr@us.ibm.com>
import libxml2
import re, sys, string
import typeexpr
def parse_GL_API( file_name, factory = None ):
doc = libxml2.readFile( file_name, None, libxml2.XML_PARSE_XINCLUDE + libxml2.XML_PARSE_NOBLANKS + libxml2.XML_PARSE_DTDVALID + libxml2.XML_PARSE_DTDATTR + libxml2.XML_PARSE_DTDLOAD + libxml2.XML_PARSE_NOENT )
ret = doc.xincludeProcess()
if not factory:
factory = gl_item_factory()
api = factory.create_item( "api", None, None )
api.process_element( doc )
# After the XML has been processed, we need to go back and assign
# dispatch offsets to the functions that request that their offsets
# be assigned by the scripts. Typically this means all functions
# that are not part of the ABI.
for func in api.functionIterateByCategory():
if func.assign_offset:
func.offset = api.next_offset;
api.next_offset += 1
doc.freeDoc()
return api
def is_attr_true( element, name ):
"""Read a name value from an element's attributes.
The value read from the attribute list must be either 'true' or
'false'. If the value is 'false', zero will be returned. If the
value is 'true', non-zero will be returned. An exception will be
raised for any other value."""
value = element.nsProp( name, None )
if value == "true":
return 1
elif value == "false":
return 0
else:
raise RuntimeError('Invalid value "%s" for boolean "%s".' % (value, name))
class gl_print_base:
"""Base class of all API pretty-printers.
In the model-view-controller pattern, this is the view. Any derived
class will want to over-ride the printBody, printRealHader, and
printRealFooter methods. Some derived classes may want to over-ride
printHeader and printFooter, or even Print (though this is unlikely).
"""
def __init__(self):
# Name of the script that is generating the output file.
# Every derived class should set this to the name of its
# source file.
self.name = "a"
# License on the *generated* source file. This may differ
# from the license on the script that is generating the file.
# Every derived class should set this to some reasonable
# value.
#
# See license.py for an example of a reasonable value.
self.license = "The license for this file is unspecified."
# The header_tag is the name of the C preprocessor define
# used to prevent multiple inclusion. Typically only
# generated C header files need this to be set. Setting it
# causes code to be generated automatically in printHeader
# and printFooter.
self.header_tag = None
# List of file-private defines that must be undefined at the
# end of the file. This can be used in header files to define
# names for use in the file, then undefine them at the end of
# the header file.
self.undef_list = []
return
def Print(self, api):
self.printHeader()
self.printBody(api)
self.printFooter()
return
def printHeader(self):
"""Print the header associated with all files and call the printRealHeader method."""
print '/* DO NOT EDIT - This file generated automatically by %s script */' \
% (self.name)
print ''
print '/*'
print ' * ' + self.license.replace('\n', '\n * ')
print ' */'
print ''
if self.header_tag:
print '#if !defined( %s )' % (self.header_tag)
print '# define %s' % (self.header_tag)
print ''
self.printRealHeader();
return
def printFooter(self):
"""Print the header associated with all files and call the printRealFooter method."""
self.printRealFooter()
if self.undef_list:
print ''
for u in self.undef_list:
print "# undef %s" % (u)
if self.header_tag:
print ''
print '#endif /* !defined( %s ) */' % (self.header_tag)
def printRealHeader(self):
"""Print the "real" header for the created file.
In the base class, this function is empty. All derived
classes should over-ride this function."""
return
def printRealFooter(self):
"""Print the "real" footer for the created file.
In the base class, this function is empty. All derived
classes should over-ride this function."""
return
def printPure(self):
"""Conditionally define `PURE' function attribute.
Conditionally defines a preprocessor macro `PURE' that wraps
GCC's `pure' function attribute. The conditional code can be
easilly adapted to other compilers that support a similar
feature.
The name is also added to the file's undef_list.
"""
self.undef_list.append("PURE")
print """# if defined(__GNUC__) || (defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590))
# define PURE __attribute__((pure))
# else
# define PURE
# endif"""
return
def printFastcall(self):
"""Conditionally define `FASTCALL' function attribute.
Conditionally defines a preprocessor macro `FASTCALL' that
wraps GCC's `fastcall' function attribute. The conditional
code can be easilly adapted to other compilers that support a
similar feature.
The name is also added to the file's undef_list.
"""
self.undef_list.append("FASTCALL")
print """# if defined(__i386__) && defined(__GNUC__) && !defined(__CYGWIN__) && !defined(__MINGW32__)
# define FASTCALL __attribute__((fastcall))
# else
# define FASTCALL
# endif"""
return
def printVisibility(self, S, s):
"""Conditionally define visibility function attribute.
Conditionally defines a preprocessor macro name S that wraps
GCC's visibility function attribute. The visibility used is
the parameter s. The conditional code can be easilly adapted
to other compilers that support a similar feature.
The name is also added to the file's undef_list.
"""
self.undef_list.append(S)
print """# if (defined(__GNUC__) && !defined(__CYGWIN__) && !defined(__MINGW32__)) || (defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590) && defined(__ELF__))
# define %s __attribute__((visibility("%s")))
# else
# define %s
# endif""" % (S, s, S)
return
def printNoinline(self):
"""Conditionally define `NOINLINE' function attribute.
Conditionally defines a preprocessor macro `NOINLINE' that
wraps GCC's `noinline' function attribute. The conditional
code can be easilly adapted to other compilers that support a
similar feature.
The name is also added to the file's undef_list.
"""
self.undef_list.append("NOINLINE")
print """# if defined(__GNUC__) || (defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590))
# define NOINLINE __attribute__((noinline))
# else
# define NOINLINE
# endif"""
return
def real_function_name(element):
name = element.nsProp( "name", None )
alias = element.nsProp( "alias", None )
if alias:
return alias
else:
return name
def real_category_name(c):
if re.compile("[1-9][0-9]*[.][0-9]+").match(c):
return "GL_VERSION_" + c.replace(".", "_")
else:
return c
def classify_category(name, number):
"""Based on the category name and number, select a numerical class for it.
Categories are divided into four classes numbered 0 through 3. The
classes are:
0. Core GL versions, sorted by version number.
1. ARB extensions, sorted by extension number.
2. Non-ARB extensions, sorted by extension number.
3. Un-numbered extensions, sorted by extension name.
"""
try:
core_version = float(name)
except Exception,e:
core_version = 0.0
if core_version > 0.0:
cat_type = 0
key = name
elif name.startswith("GL_ARB_") or name.startswith("GLX_ARB_") or name.startswith("WGL_ARB_"):
cat_type = 1
key = int(number)
else:
if number != None:
cat_type = 2
key = int(number)
else:
cat_type = 3
key = name
return [cat_type, key]
def create_parameter_string(parameters, include_names):
"""Create a parameter string from a list of gl_parameters."""
list = []
for p in parameters:
if p.is_padding:
continue
if include_names:
list.append( p.string() )
else:
list.append( p.type_string() )
if len(list) == 0: list = ["void"]
return string.join(list, ", ")
class gl_item:
def __init__(self, element, context):
self.context = context
self.name = element.nsProp( "name", None )
self.category = real_category_name( element.parent.nsProp( "name", None ) )
return
class gl_type( gl_item ):
def __init__(self, element, context):
gl_item.__init__(self, element, context)
self.size = int( element.nsProp( "size", None ), 0 )
te = typeexpr.type_expression( None )
tn = typeexpr.type_node()
tn.size = int( element.nsProp( "size", None ), 0 )
tn.integer = not is_attr_true( element, "float" )
tn.unsigned = is_attr_true( element, "unsigned" )
tn.name = "GL" + self.name
te.set_base_type_node( tn )
self.type_expr = te
return
def get_type_expression(self):
return self.type_expr
class gl_enum( gl_item ):
def __init__(self, element, context):
gl_item.__init__(self, element, context)
self.value = int( element.nsProp( "value", None ), 0 )
temp = element.nsProp( "count", None )
if not temp or temp == "?":
self.default_count = -1
else:
try:
c = int(temp)
except Exception,e:
raise RuntimeError('Invalid count value "%s" for enum "%s" in function "%s" when an integer was expected.' % (temp, self.name, n))
self.default_count = c
return
def priority(self):
"""Calculate a 'priority' for this enum name.
When an enum is looked up by number, there may be many
possible names, but only one is the 'prefered' name. The
priority is used to select which name is the 'best'.
Highest precedence is given to core GL name. ARB extension
names have the next highest, followed by EXT extension names.
Vendor extension names are the lowest.
"""
if self.name.endswith( "_BIT" ):
bias = 1
else:
bias = 0
if self.category.startswith( "GL_VERSION_" ):
priority = 0
elif self.category.startswith( "GL_ARB_" ):
priority = 2
elif self.category.startswith( "GL_EXT_" ):
priority = 4
else:
priority = 6
return priority + bias
class gl_parameter:
def __init__(self, element, context):
self.name = element.nsProp( "name", None )
ts = element.nsProp( "type", None )
self.type_expr = typeexpr.type_expression( ts, context )
temp = element.nsProp( "variable_param", None )
if temp:
self.count_parameter_list = temp.split( ' ' )
else:
self.count_parameter_list = []
# The count tag can be either a numeric string or the name of
# a variable. If it is the name of a variable, the int(c)
# statement will throw an exception, and the except block will
# take over.
c = element.nsProp( "count", None )
try:
count = int(c)
self.count = count
self.counter = None
except Exception,e:
count = 1
self.count = 0
self.counter = c
self.count_scale = int(element.nsProp( "count_scale", None ))
elements = (count * self.count_scale)
if elements == 1:
elements = 0
#if ts == "GLdouble":
# print '/* stack size -> %s = %u (before)*/' % (self.name, self.type_expr.get_stack_size())
# print '/* # elements = %u */' % (elements)
self.type_expr.set_elements( elements )
#if ts == "GLdouble":
# print '/* stack size -> %s = %u (after) */' % (self.name, self.type_expr.get_stack_size())
self.is_client_only = is_attr_true( element, 'client_only' )
self.is_counter = is_attr_true( element, 'counter' )
self.is_output = is_attr_true( element, 'output' )
# Pixel data has special parameters.
self.width = element.nsProp('img_width', None)
self.height = element.nsProp('img_height', None)
self.depth = element.nsProp('img_depth', None)
self.extent = element.nsProp('img_extent', None)
self.img_xoff = element.nsProp('img_xoff', None)
self.img_yoff = element.nsProp('img_yoff', None)
self.img_zoff = element.nsProp('img_zoff', None)
self.img_woff = element.nsProp('img_woff', None)
self.img_format = element.nsProp('img_format', None)
self.img_type = element.nsProp('img_type', None)
self.img_target = element.nsProp('img_target', None)
self.img_pad_dimensions = is_attr_true( element, 'img_pad_dimensions' )
self.img_null_flag = is_attr_true( element, 'img_null_flag' )
self.img_send_null = is_attr_true( element, 'img_send_null' )
self.is_padding = is_attr_true( element, 'padding' )
return
def compatible(self, other):
return 1
def is_array(self):
return self.is_pointer()
def is_pointer(self):
return self.type_expr.is_pointer()
def is_image(self):
if self.width:
return 1
else:
return 0
def is_variable_length(self):
return len(self.count_parameter_list) or self.counter
def is_64_bit(self):
count = self.type_expr.get_element_count()
if count:
if (self.size() / count) == 8:
return 1
else:
if self.size() == 8:
return 1
return 0
def string(self):
return self.type_expr.original_string + " " + self.name
def type_string(self):
return self.type_expr.original_string
def get_base_type_string(self):
return self.type_expr.get_base_name()
def get_dimensions(self):
if not self.width:
return [ 0, "0", "0", "0", "0" ]
dim = 1
w = self.width
h = "1"
d = "1"
e = "1"
if self.height:
dim = 2
h = self.height
if self.depth:
dim = 3
d = self.depth
if self.extent:
dim = 4
e = self.extent
return [ dim, w, h, d, e ]
def get_stack_size(self):
return self.type_expr.get_stack_size()
def size(self):
if self.is_image():
return 0
else:
return self.type_expr.get_element_size()
def get_element_count(self):
c = self.type_expr.get_element_count()
if c == 0:
return 1
return c
def size_string(self, use_parens = 1):
s = self.size()
if self.counter or self.count_parameter_list:
list = [ "compsize" ]
if self.counter and self.count_parameter_list:
list.append( self.counter )
elif self.counter:
list = [ self.counter ]
if s > 1:
list.append( str(s) )
if len(list) > 1 and use_parens :
return "(%s)" % (string.join(list, " * "))
else:
return string.join(list, " * ")
elif self.is_image():
return "compsize"
else:
return str(s)
def format_string(self):
if self.type_expr.original_string == "GLenum":
return "0x%x"
else:
return self.type_expr.format_string()
class gl_function( gl_item ):
def __init__(self, element, context):
self.context = context
self.name = None
self.entry_points = []
self.return_type = "void"
self.parameters = []
self.offset = -1
self.initialized = 0
self.images = []
self.assign_offset = 0
self.static_entry_points = []
# Track the parameter string (for the function prototype)
# for each entry-point. This is done because some functions
# change their prototype slightly when promoted from extension
# to ARB extension to core. glTexImage3DEXT and glTexImage3D
# are good examples of this. Scripts that need to generate
# code for these differing aliases need to real prototype
# for each entry-point. Otherwise, they may generate code
# that won't compile.
self.parameter_strings = {}
self.process_element( element )
return
def process_element(self, element):
name = element.nsProp( "name", None )
alias = element.nsProp( "alias", None )
if is_attr_true(element, "static_dispatch"):
self.static_entry_points.append(name)
self.entry_points.append( name )
if alias:
true_name = alias
else:
true_name = name
# Only try to set the offset when a non-alias
# entry-point is being processes.
offset = element.nsProp( "offset", None )
if offset:
try:
o = int( offset )
self.offset = o
except Exception, e:
self.offset = -1
if offset == "assign":
self.assign_offset = 1
if not self.name:
self.name = true_name
elif self.name != true_name:
raise RuntimeError("Function true name redefined. Was %s, now %s." % (self.name, true_name))
# There are two possible cases. The first time an entry-point
# with data is seen, self.initialized will be 0. On that
# pass, we just fill in the data. The next time an
# entry-point with data is seen, self.initialized will be 1.
# On that pass we have to make that the new values match the
# valuse from the previous entry-point.
parameters = []
return_type = "void"
child = element.children
while child:
if child.type == "element":
if child.name == "return":
return_type = child.nsProp( "type", None )
elif child.name == "param":
param = self.context.factory.create_item( "parameter", child, self.context)
parameters.append( param )
child = child.next
if self.initialized:
if self.return_type != return_type:
raise RuntimeError( "Return type changed in %s. Was %s, now %s." % (name, self.return_type, return_type))
if len(parameters) != len(self.parameters):
raise RuntimeError( "Parameter count mismatch in %s. Was %d, now %d." % (name, len(self.parameters), len(parameters)))
for j in range(0, len(parameters)):
p1 = parameters[j]
p2 = self.parameters[j]
if not p1.compatible( p2 ):
raise RuntimeError( 'Parameter type mismatch in %s. "%s" was "%s", now "%s".' % (name, p2.name, p2.type_expr.original_string, p1.type_expr.original_string))
if true_name == name or not self.initialized:
self.return_type = return_type
self.parameters = parameters
for param in self.parameters:
if param.is_image():
self.images.append( param )
if element.children:
self.initialized = 1
self.parameter_strings[name] = create_parameter_string(parameters, 1)
else:
self.parameter_strings[name] = None
return
def get_images(self):
"""Return potentially empty list of input images."""
return self.images
def parameterIterator(self):
return self.parameters.__iter__();
def get_parameter_string(self, entrypoint = None):
if entrypoint:
s = self.parameter_strings[ entrypoint ]
if s:
return s
return create_parameter_string( self.parameters, 1 )
def get_called_parameter_string(self):
p_string = ""
comma = ""
for p in self.parameterIterator():
p_string = p_string + comma + p.name
comma = ", "
return p_string
def is_abi(self):
return (self.offset >= 0 and not self.assign_offset)
def is_static_entry_point(self, name):
return name in self.static_entry_points
def dispatch_name(self):
if self.name in self.static_entry_points:
return self.name
else:
return "_dispatch_stub_%u" % (self.offset)
def static_name(self, name):
if name in self.static_entry_points:
return name
else:
return "_dispatch_stub_%u" % (self.offset)
class gl_item_factory:
"""Factory to create objects derived from gl_item."""
def create_item(self, item_name, element, context):
if item_name == "function":
return gl_function(element, context)
if item_name == "type":
return gl_type(element, context)
elif item_name == "enum":
return gl_enum(element, context)
elif item_name == "parameter":
return gl_parameter(element, context)
elif item_name == "api":
return gl_api(self)
else:
return None
class gl_api:
def __init__(self, factory):
self.functions_by_name = {}
self.enums_by_name = {}
self.types_by_name = {}
self.category_dict = {}
self.categories = [{}, {}, {}, {}]
self.factory = factory
self.next_offset = 0
typeexpr.create_initial_types()
return
def process_element(self, doc):
element = doc.children
while element.type != "element" or element.name != "OpenGLAPI":
element = element.next
if element:
self.process_OpenGLAPI(element)
return
def process_OpenGLAPI(self, element):
child = element.children
while child:
if child.type == "element":
if child.name == "category":
self.process_category( child )
elif child.name == "OpenGLAPI":
self.process_OpenGLAPI( child )
child = child.next
return
def process_category(self, cat):
cat_name = cat.nsProp( "name", None )
cat_number = cat.nsProp( "number", None )
[cat_type, key] = classify_category(cat_name, cat_number)
self.categories[cat_type][key] = [cat_name, cat_number]
child = cat.children
while child:
if child.type == "element":
if child.name == "function":
func_name = real_function_name( child )
temp_name = child.nsProp( "name", None )
self.category_dict[ temp_name ] = [cat_name, cat_number]
if self.functions_by_name.has_key( func_name ):
func = self.functions_by_name[ func_name ]
func.process_element( child )
else:
func = self.factory.create_item( "function", child, self )
self.functions_by_name[ func_name ] = func
if func.offset >= self.next_offset:
self.next_offset = func.offset + 1
elif child.name == "enum":
enum = self.factory.create_item( "enum", child, self )
self.enums_by_name[ enum.name ] = enum
elif child.name == "type":
t = self.factory.create_item( "type", child, self )
self.types_by_name[ "GL" + t.name ] = t
child = child.next
return
def functionIterateByCategory(self, cat = None):
"""Iterate over functions by category.
If cat is None, all known functions are iterated in category
order. See classify_category for details of the ordering.
Within a category, functions are sorted by name. If cat is
not None, then only functions in that category are iterated.
"""
lists = [{}, {}, {}, {}]
for func in self.functionIterateAll():
[cat_name, cat_number] = self.category_dict[func.name]
if (cat == None) or (cat == cat_name):
[func_cat_type, key] = classify_category(cat_name, cat_number)
if not lists[func_cat_type].has_key(key):
lists[func_cat_type][key] = {}
lists[func_cat_type][key][func.name] = func
functions = []
for func_cat_type in range(0,4):
keys = lists[func_cat_type].keys()
keys.sort()
for key in keys:
names = lists[func_cat_type][key].keys()
names.sort()
for name in names:
functions.append(lists[func_cat_type][key][name])
return functions.__iter__()
def functionIterateByOffset(self):
max_offset = -1
for func in self.functions_by_name.itervalues():
if func.offset > max_offset:
max_offset = func.offset
temp = [None for i in range(0, max_offset + 1)]
for func in self.functions_by_name.itervalues():
if func.offset != -1:
temp[ func.offset ] = func
list = []
for i in range(0, max_offset + 1):
if temp[i]:
list.append(temp[i])
return list.__iter__();
def functionIterateAll(self):
return self.functions_by_name.itervalues()
def enumIterateByName(self):
keys = self.enums_by_name.keys()
keys.sort()
list = []
for enum in keys:
list.append( self.enums_by_name[ enum ] )
return list.__iter__()
def categoryIterate(self):
"""Iterate over categories.
Iterate over all known categories in the order specified by
classify_category. Each iterated value is a tuple of the
name and number (which may be None) of the category.
"""
list = []
for cat_type in range(0,4):
keys = self.categories[cat_type].keys()
keys.sort()
for key in keys:
list.append(self.categories[cat_type][key])
return list.__iter__()
def get_category_for_name( self, name ):
if self.category_dict.has_key(name):
return self.category_dict[name]
else:
return ["<unknown category>", None]
def typeIterate(self):
return self.types_by_name.itervalues()
def find_type( self, type_name ):
if type_name in self.types_by_name:
return self.types_by_name[ type_name ].type_expr
else:
print "Unable to find base type matching \"%s\"." % (type_name)
return None
| ayoubg/gem5-graphics | Mesa-7.11.2_GPGPU-Sim/src/mapi/glapi/gen/gl_XML.py | Python | bsd-3-clause | 24,796 | 0.038877 |
import unittest
from ebird.api.validation import clean_provisional
class CleanProvisionalTests(unittest.TestCase):
"""Tests for the clean_provisional validation function."""
def test_converts_bool(self):
self.assertEqual("true", clean_provisional(True))
self.assertEqual("false", clean_provisional(False))
def test_converts_integer(self):
self.assertEqual("true", clean_provisional(1))
self.assertEqual("false", clean_provisional(0))
| ProjectBabbler/ebird-api | tests/validation/test_clean_provisional.py | Python | mit | 483 | 0 |
from __future__ import unicode_literals
from django.apps import AppConfig
class SystemConfig(AppConfig):
name = 'system'
| inteos/IBAdmin | system/apps.py | Python | agpl-3.0 | 128 | 0 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Glm(CMakePackage):
"""OpenGL Mathematics (GLM) is a header only C++ mathematics library for
graphics software based on the OpenGL Shading Language (GLSL) specification
"""
homepage = "https://github.com/g-truc/glm"
url = "https://github.com/g-truc/glm/archive/0.9.7.1.tar.gz"
version('0.9.7.1', sha256='285a0dc8f762b4e523c8710fbd97accaace0c61f45bc8be2bdb0deed07b0e6f3')
depends_on('cmake@2.6:', type='build')
| rspavel/spack | var/spack/repos/builtin/packages/glm/package.py | Python | lgpl-2.1 | 666 | 0.001502 |
"""
Provides mathematical functions
"""
#############################################################################
# #
# PyMS software for processing of metabolomic mass-spectrometry data #
# Copyright (C) 2005-2012 Vladimir Likic #
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License version 2 as #
# published by the Free Software Foundation. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the Free Software #
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. #
# #
#############################################################################
import copy, math
from pyms.Utils.Error import error
from pyms.Utils.Utils import is_list, is_number
def median(v):
"""
@summary: Returns a median of a list or numpy array
@param v: Input list or array
@type v: ListType or numpy.core.ndarray
@return: The median of the input list
@rtype: FloatType
@author: Vladimir Likic
"""
if not is_list(v):
error("argument neither list nor array")
local_data = copy.deepcopy(v)
local_data.sort()
N = len(local_data)
if (N % 2) == 0:
# even number of points
K = N/2 - 1
median = (local_data[K] + local_data[K+1])/2.0
else:
# odd number of points
K = (N - 1)/2 - 1
median = local_data[K+1]
return median
def vector_by_step(vstart,vstop,vstep):
"""
@summary: generates a list by using start, stop, and step values
@param vstart: Initial value
@type vstart: A number
@param vstop: Max value
@type vstop: A number
@param vstep: Step
@type vstep: A number
@return: A list generated
@rtype: ListType
@author: Vladimir Likic
"""
if not is_number(vstart) or not is_number(vstop) or not is_number(vstep):
error("parameters start, stop, step must be numbers")
v = []
p = vstart
while p < vstop:
v.append(p)
p = p + vstep
return v
def MAD(v):
"""
@summary: median absolute deviation
@param v: A list or array
@type v: ListType, TupleType, or numpy.core.ndarray
@return: median absolute deviation
@rtype: FloatType
@author: Vladimir Likic
"""
if not is_list(v):
error("argument neither list nor array")
m = median(v)
m_list = []
for xi in v:
d = math.fabs(xi - m)
m_list.append(d)
mad = median(m_list)/0.6745
return mad
def amin(v):
"""
@summary: Finds the minimum element in a list or array
@param v: A list or array
@type v: ListType, TupleType, or numpy.core.ndarray
@return: Tuple (maxi, maxv), where maxv is the minimum
element in the list and maxi is its index
@rtype: TupleType
@author: Vladimir Likic
"""
if not is_list(v):
error("argument neither list nor array")
minv = max(v) # built-in max() function
mini = None
for ii in range(len(v)):
if v[ii] < minv:
minv = v[ii]
mini = ii
if mini == None:
error("finding maximum failed")
return mini, minv
def mean(v):
"""
@summary: Calculates the mean
@param v: A list or array
@type v: ListType, TupleType, or numpy.core.ndarray
@return: Mean
@rtype: FloatType
@author: Vladimir Likic
"""
if not is_list(v):
error("argument neither list nor array")
s = 0.0
for e in v:
s = s + e
s_mean = s/float(len(v))
return s_mean
def std(v):
"""
@summary: Calculates standard deviation
@param v: A list or array
@type v: ListType, TupleType, or numpy.core.ndarray
@return: Mean
@rtype: FloatType
@author: Vladimir Likic
"""
if not is_list(v):
error("argument neither list nor array")
v_mean = mean(v)
s = 0.0
for e in v:
d = e - v_mean
s = s + d*d
s_mean = s/float(len(v)-1)
v_std = math.sqrt(s_mean)
return v_std
def rmsd(list1, list2):
"""
@summary: Calculates RMSD for the 2 lists
@param list1: First data set
@type list1: ListType, TupleType, or numpy.core.ndarray
@param list2: Second data set
@type list2: ListType, TupleType, or numpy.core.ndarray
@return: RMSD value
@rtype: FloatType
@author: Qiao Wang
@author: Andrew Isaac
@author: Vladimir Likic
"""
if not is_list(list1):
error("argument neither list nor array")
if not is_list(list2):
error("argument neither list nor array")
sum = 0.0
for i in range(len(list1)):
sum = sum + (list1[i] - list2[i]) ** 2
rmsd = math.sqrt(sum / len(list1))
return rmsd
| strets123/pyms | Utils/Math.py | Python | gpl-2.0 | 5,643 | 0.010987 |
try:
INSTALLED_APPS
except NameError:
INSTALLED_APPS=()
#Generated Config - Don't modify above this line
| igudym/twango | twango/template/default/src/conf/h_third_party_apps.py | Python | bsd-3-clause | 115 | 0.026087 |
# Copyright 2010-2011 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import strutils
from nova.api.openstack import common
from nova.image import glance
from nova import utils
class ViewBuilder(common.ViewBuilder):
_collection_name = "images"
def basic(self, request, image):
"""Return a dictionary with basic image attributes."""
return {
"image": {
"id": image.get("id"),
"name": image.get("name"),
"links": self._get_links(request,
image["id"],
self._collection_name),
},
}
def show(self, request, image):
"""Return a dictionary with image details."""
image_dict = {
"id": image.get("id"),
"name": image.get("name"),
"minRam": int(image.get("min_ram") or 0),
"minDisk": int(image.get("min_disk") or 0),
"metadata": image.get("properties", {}),
"created": self._format_date(image.get("created_at")),
"updated": self._format_date(image.get("updated_at")),
"status": self._get_status(image),
"progress": self._get_progress(image),
"links": self._get_links(request,
image["id"],
self._collection_name),
}
instance_uuid = image.get("properties", {}).get("instance_uuid")
if instance_uuid is not None:
server_ref = self._get_href_link(request, instance_uuid, 'servers')
image_dict["server"] = {
"id": instance_uuid,
"links": [{
"rel": "self",
"href": server_ref,
},
{
"rel": "bookmark",
"href": self._get_bookmark_link(request,
instance_uuid,
'servers'),
}],
}
auto_disk_config = image_dict['metadata'].get("auto_disk_config", None)
if auto_disk_config is not None:
value = strutils.bool_from_string(auto_disk_config)
image_dict["OS-DCF:diskConfig"] = (
'AUTO' if value else 'MANUAL')
return dict(image=image_dict)
def detail(self, request, images):
"""Show a list of images with details."""
list_func = self.show
coll_name = self._collection_name + '/detail'
return self._list_view(list_func, request, images, coll_name)
def index(self, request, images):
"""Show a list of images with basic attributes."""
list_func = self.basic
coll_name = self._collection_name
return self._list_view(list_func, request, images, coll_name)
def _list_view(self, list_func, request, images, coll_name):
"""Provide a view for a list of images.
:param list_func: Function used to format the image data
:param request: API request
:param images: List of images in dictionary format
:param coll_name: Name of collection, used to generate the next link
for a pagination query
:returns: Image reply data in dictionary format
"""
image_list = [list_func(request, image)["image"] for image in images]
images_links = self._get_collection_links(request, images, coll_name)
images_dict = dict(images=image_list)
if images_links:
images_dict["images_links"] = images_links
return images_dict
def _get_links(self, request, identifier, collection_name):
"""Return a list of links for this image."""
return [{
"rel": "self",
"href": self._get_href_link(request, identifier, collection_name),
},
{
"rel": "bookmark",
"href": self._get_bookmark_link(request,
identifier,
collection_name),
},
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
"href": self._get_alternate_link(request, identifier),
}]
def _get_alternate_link(self, request, identifier):
"""Create an alternate link for a specific image id."""
glance_url = glance.generate_glance_url(
request.environ['nova.context'])
glance_url = self._update_glance_link_prefix(glance_url)
return '/'.join([glance_url,
self._collection_name,
str(identifier)])
@staticmethod
def _format_date(dt):
"""Return standard format for a given datetime object."""
if dt is not None:
return utils.isotime(dt)
@staticmethod
def _get_status(image):
"""Update the status field to standardize format."""
return {
'active': 'ACTIVE',
'queued': 'SAVING',
'saving': 'SAVING',
'deleted': 'DELETED',
'pending_delete': 'DELETED',
'killed': 'ERROR',
}.get(image.get("status"), 'UNKNOWN')
@staticmethod
def _get_progress(image):
return {
"queued": 25,
"saving": 50,
"active": 100,
}.get(image.get("status"), 0)
| phenoxim/nova | nova/api/openstack/compute/views/images.py | Python | apache-2.0 | 6,081 | 0.000493 |
# pykarta/geocoder/massgis.py
# Copyright 2013--2019, Trinity College Computing Center
# Last modified: 22 October 2019
from __future__ import print_function
import lxml.etree as ET
from .geocoder_base import GeocoderBase, GeocoderResult, GeocoderError
import pykarta.address
# https://wiki.state.ma.us/confluence/pages/viewpage.action?pageId=451772508
class GeocoderMassGIS(GeocoderBase):
url_server = "gisprpxy.itd.state.ma.us"
url_path = "/MassGISCustomGeocodeLatLongApplication/MassGISCustomGeocodeService.asmx"
delay = 1.0 # no more than one request per second
def FindAddr(self, address, countrycode=None):
result = GeocoderResult(address, "MassGIS")
if address[self.f_state] in ("MA", "CT", "NY", "NH", "VT"): # covers these states in whole or in part
self.FindAddr2(address, result)
if result.coordinates is None:
self.debug(" No match")
return result
def FindAddr2(self, address, result):
query = ET.Element("{http://schemas.xmlsoap.org/soap/envelope/}Envelope",
# This is an LXML feature
nsmap={
"soap":"http://schemas.xmlsoap.org/soap/envelope/",
"xsi":"http://www.w3.org/2001/XMLSchema-instance",
"xsd":"http://www.w3.org/2001/XMLSchema",
}
)
query_body = ET.Element("{http://schemas.xmlsoap.org/soap/envelope/}Body")
query.append(query_body)
query_address = ET.Element("GeocodeAddress", nsmap={None:"http://tempuri.org/"})
query_body.append(query_address)
query_term = ET.Element("Address")
abbr_street = pykarta.address.abbreviate_street(address[self.f_street])
query_term.text = "%s %s" % (address[self.f_house_number], abbr_street)
query_address.append(query_term)
query_term = ET.Element("City")
query_term.text = address[self.f_city]
query_address.append(query_term)
query_term = ET.Element("State")
query_term.text = address[self.f_state]
query_address.append(query_term)
if address[self.f_postal_code] != "":
query_term = ET.Element("ZipCode")
query_term.text = address[self.f_postal_code]
query_address.append(query_term)
# xml_declaration and pretty_print require LXML
query_text = ET.tostring(ET.ElementTree(element=query), encoding="utf-8", xml_declaration=True, pretty_print=True)
#print(query_text)
resp_text = self.get(self.url_path, query=query_text, method="POST", content_type="text/xml")
#print(resp_text)
try:
tree = ET.XML(resp_text)
except:
self.debug(" Invalid response")
return result
self.debug_indented(ET.tostring(tree, encoding="utf-8", pretty_print=True))
match = tree.find(".//{http://tempuri.org/}GeocodeAddressResult")
score = match.find("{http://tempuri.org/}Score")
if score is not None:
score = score.text
matched_address = match.find("{http://tempuri.org/}MatchedAddress").text
lat = float(match.find("{http://tempuri.org/}Lat").text)
lon = float(match.find("{http://tempuri.org/}Long").text)
#print(score, lat, lon)
if score == "100" and matched_address.startswith("%s %s," % (address[self.f_house_number], abbr_street.upper())):
result.coordinates = (lat, lon)
result.precision = "INTERPOLATED"
else:
result.alternative_addresses.append(matched_address)
if __name__ == "__main__":
gc = GeocoderMassGIS()
gc.debug_enabled = True
print(gc.FindAddr(["457","Union Street","","West Springfield","MA",""]))
#print(gc.FindAddr(["10","Improbable Street","","Westfield","MA","01085"]))
#print gc.FindAddr(["32","Park Avenue Court","","West Springfield","MA",""])
| david672orford/pykarta | pykarta/geocoder/massgis.py | Python | gpl-2.0 | 3,478 | 0.02674 |
# -*- coding: iso-8859-1 -*-
# __init__.py: Top level .py file for python solution analysis tools.
#
# Copyright (C) 2010
# ASTRON (Netherlands Institute for Radio Astronomy)
# P.O.Box 2, 7990 AA Dwingeloo, The Netherlands
#
# This file is part of the LOFAR software suite.
# The LOFAR software suite is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The LOFAR software suite is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with the LOFAR software suite. If not, see <http://www.gnu.org/licenses/>.
#
# $Id: __init__.py 12729 2010-03-24 13:39:59Z vdtol $
#from ionosphere import *
__all__ = ['ionosphere', 'parmdbmain'] | kernsuite-debian/lofar | CEP/Calibration/ExpIon/src/__init__.py | Python | gpl-3.0 | 1,065 | 0.001878 |
#!/usr/bin/env python
# Install.py tool to build the GPU library
# used to automate the steps described in the README file in this dir
from __future__ import print_function
import sys,os,subprocess
# help message
help = """
Syntax from src dir: make lib-gpu args="-m machine -h hdir -a arch -p precision -e esuffix -m -o osuffix"
Syntax from lib dir: python Install.py -m machine -h hdir -a arch -p precision -e esuffix -m -o osuffix
specify one or more options, order does not matter
copies an existing Makefile.machine in lib/gpu to Makefile.auto
optionally edits these variables in Makefile.auto:
CUDA_HOME, CUDA_ARCH, CUDA_PRECISION, EXTRAMAKE
optionally uses Makefile.auto to build the GPU library -> libgpu.a
and to copy a Makefile.lammps.esuffix -> Makefile.lammps
optionally copies Makefile.auto to a new Makefile.osuffix
-m = use Makefile.machine as starting point, copy to Makefile.auto
default machine = linux
-h = set CUDA_HOME variable in Makefile.auto to hdir
hdir = path to NVIDIA Cuda software, e.g. /usr/local/cuda
-a = set CUDA_ARCH variable in Makefile.auto to arch
use arch = 20 for Tesla C2050/C2070 (Fermi) (deprecated as of CUDA 8.0)
or GeForce GTX 580 or similar
use arch = 30 for Tesla K10 (Kepler)
use arch = 35 for Tesla K40 (Kepler) or GeForce GTX Titan or similar
use arch = 37 for Tesla dual K80 (Kepler)
use arch = 60 for Tesla P100 (Pascal)
-p = set CUDA_PRECISION variable in Makefile.auto to precision
use precision = double or mixed or single
-e = set EXTRAMAKE variable in Makefile.auto to Makefile.lammps.esuffix
-b = make the GPU library using Makefile.auto
first performs a "make clean"
then produces libgpu.a if successful
also copies EXTRAMAKE file -> Makefile.lammps
-e can set which Makefile.lammps.esuffix file is copied
-o = copy final Makefile.auto to Makefile.osuffix
Examples:
make lib-gpu args="-b" # build GPU lib with default Makefile.linux
make lib-gpu args="-m xk7 -p single -o xk7.single" # create new Makefile.xk7.single, altered for single-precision
make lib-gpu args="-m mpi -a 35 -p single -o mpi.mixed -b" # create new Makefile.mpi.mixed, also build GPU lib with these settings
"""
# print error message or help
def error(str=None):
if not str: print(help)
else: print("ERROR",str)
sys.exit()
# parse args
args = sys.argv[1:]
nargs = len(args)
if nargs == 0: error()
isuffix = "linux"
hflag = aflag = pflag = eflag = 0
makeflag = 0
outflag = 0
iarg = 0
while iarg < nargs:
if args[iarg] == "-m":
if iarg+2 > nargs: error()
isuffix = args[iarg+1]
iarg += 2
elif args[iarg] == "-h":
if iarg+2 > nargs: error()
hflag = 1
hdir = args[iarg+1]
iarg += 2
elif args[iarg] == "-a":
if iarg+2 > nargs: error()
aflag = 1
arch = args[iarg+1]
iarg += 2
elif args[iarg] == "-p":
if iarg+2 > nargs: error()
pflag = 1
precision = args[iarg+1]
iarg += 2
elif args[iarg] == "-e":
if iarg+2 > nargs: error()
eflag = 1
lmpsuffix = args[iarg+1]
iarg += 2
elif args[iarg] == "-b":
makeflag = 1
iarg += 1
elif args[iarg] == "-o":
if iarg+2 > nargs: error()
outflag = 1
osuffix = args[iarg+1]
iarg += 2
else: error()
if pflag:
if precision == "double": precstr = "-D_DOUBLE_DOUBLE"
elif precision == "mixed": precstr = "-D_SINGLE_DOUBLE"
elif precision == "single": precstr = "-D_SINGLE_SINGLE"
else: error("Invalid precision setting")
# create Makefile.auto
# reset EXTRAMAKE, CUDA_HOME, CUDA_ARCH, CUDA_PRECISION if requested
if not os.path.exists("Makefile.%s" % isuffix):
error("lib/gpu/Makefile.%s does not exist" % isuffix)
lines = open("Makefile.%s" % isuffix,'r').readlines()
fp = open("Makefile.auto",'w')
for line in lines:
words = line.split()
if len(words) != 3:
fp.write(line)
continue
if hflag and words[0] == "CUDA_HOME" and words[1] == '=':
line = line.replace(words[2],hdir)
if aflag and words[0] == "CUDA_ARCH" and words[1] == '=':
line = line.replace(words[2],"-arch=sm_%s" % arch)
if pflag and words[0] == "CUDA_PRECISION" and words[1] == '=':
line = line.replace(words[2],precstr)
if eflag and words[0] == "EXTRAMAKE" and words[1] == '=':
line = line.replace(words[2],"Makefile.lammps.%s" % lmpsuffix)
fp.write(line)
fp.close()
# perform make
# make operations copies EXTRAMAKE file to Makefile.lammps
if makeflag:
print("Building libgpu.a ...")
cmd = "rm -f libgpu.a"
subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True)
cmd = "make -f Makefile.auto clean; make -f Makefile.auto"
txt = subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True)
print(txt.decode('UTF-8'))
if not os.path.exists("libgpu.a"):
error("Build of lib/gpu/libgpu.a was NOT successful")
if not os.path.exists("Makefile.lammps"):
error("lib/gpu/Makefile.lammps was NOT created")
# copy new Makefile.auto to Makefile.osuffix
if outflag:
print("Creating new Makefile.%s" % osuffix)
cmd = "cp Makefile.auto Makefile.%s" % osuffix
subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True)
| jag1g13/lammps | lib/gpu/Install.py | Python | gpl-2.0 | 5,201 | 0.013459 |
"""
Interactive tool to draw mask on an image or image-like array.
TODO:
* need concept of subannotation
* need to take options on a right click of an annotation
* add support for arbitrary polygons back in .
* rename species_list to label_list or category_list
* Just use metadata instead of species / category / label
# Need to incorporate parts into metadata
Notes:
3. Change bounding box and update continuously to the original image the
new ANNOTATIONs
2. Make new window and frames inside, double click to pull up normal window
with editing start with just taking in 6 images and ANNOTATIONs
1. ANNOTATION ID number, then list of 4 tuples
python -m utool.util_inspect check_module_usage --pat="interact_annotations.py"
References:
Adapted from matplotlib/examples/event_handling/poly_editor.py
Jan 9 2014: taken from: https://gist.github.com/tonysyu/3090704
CommandLine:
python -m plottool_ibeis.interact_annotations --test-test_interact_annots --show
"""
from __future__ import absolute_import, division, print_function
import six
import re
import numpy as np
try:
import vtool_ibeis as vt
except ImportError:
pass
import utool as ut
import itertools as it
import matplotlib as mpl
from six.moves import zip, range
from plottool_ibeis import draw_func2 as df2
from plottool_ibeis import abstract_interaction
print, rrr, profile = ut.inject2(__name__)
DEFAULT_SPECIES_TAG = '____'
# FIXE THESE TO BE GENERIC
ACCEPT_SAVE_HOTKEY = None # 'ctrl+a'
ADD_RECTANGLE_HOTKEY = 'ctrl+a' # 'ctrl+d'
ADD_RECTANGLE_FULL_HOTKEY = 'ctrl+f'
DEL_RECTANGLE_HOTKEY = 'ctrl+d' # 'ctrl+r'
TOGGLE_LABEL_HOTKEY = 'ctrl+t'
HACK_OFF_SPECIES_TYPING = True
if HACK_OFF_SPECIES_TYPING:
ADD_RECTANGLE_HOTKEY = 'a' # 'ctrl+d'
ADD_RECTANGLE_FULL_HOTKEY = 'f'
DEL_RECTANGLE_HOTKEY = 'd' # 'ctrl+r'
TOGGLE_LABEL_HOTKEY = 't'
NEXT_IMAGE_HOTKEYS = ['right', 'pagedown']
PREV_IMAGE_HOTKEYS = ['left', 'pageup']
TAU = np.pi * 2
class AnnotPoly(mpl.patches.Polygon, ut.NiceRepr):
"""
Helper to represent an annotation polygon
ibeis --aidcmd='Interact image' --aid=1
Example:
>>> # DISABLE_DOCTEST
>>> from plottool_ibeis.interact_annotations import * # NOQA
>>> verts = vt.verts_from_bbox([0, 0, 10, 10])
>>> poly = AnnotPoly(None, 0, verts, 0, '____')
"""
def __init__(poly, ax, num, verts, theta, species, fc=(0, 0, 0),
line_color=(1, 1, 1), line_width=4, is_orig=False,
metadata=None, valid_species=None, manager=None):
super(AnnotPoly, poly).__init__(verts, animated=True, fc=fc, ec='none',
alpha=0)
poly.manager = manager
# Ensure basecoords consistency
poly.basecoords = vt.verts_from_bbox(vt.bbox_from_verts(poly.xy))
#poly.basecoords = poly.xy
poly.num = num
poly.is_orig = is_orig
poly.theta = theta
poly.metadata = metadata
poly.valid_species = valid_species
poly.tab_list = valid_species
# put in previous text and tabcomplete list for autocompletion
poly.tctext = ''
poly.tcindex = 0
poly.anchor_idx = 2
poly.child_polys = {}
# Display stuff that should be removed from constructor
poly.xy = calc_display_coords(poly.basecoords, poly.theta)
poly.lines = poly._make_lines(line_color, line_width)
poly.handle = poly._make_handle_line()
poly.species = species
if ax is not None:
poly.axes_init(ax)
def axes_init(poly, ax):
species = poly.species
metadata = poly.metadata
if isinstance(metadata, ut.LazyDict):
metadata_ = ut.dict_subset(metadata, metadata.cached_keys())
else:
metadata_ = metadata
poly.species_tag = ax.text(
#tagpos[0], tagpos[1],
0, 0,
species,
bbox={'facecolor': 'white', 'alpha': .8},
verticalalignment='top',
)
poly.metadata_tag = ax.text(
0, 0,
#tagpos[0] + 5, tagpos[1] + 80,
ut.repr3(metadata_, nobr=True),
bbox={'facecolor': 'white', 'alpha': .7},
verticalalignment='top',
)
# ???
poly.species_tag.remove() # eliminate "leftover" copies
poly.metadata_tag.remove()
#
poly.update_display_coords()
def move_to_back(poly):
# FIXME: doesnt work exactly
# Probalby need to do in the context of other polys
zorder = 0
poly.set_zorder(zorder)
poly.lines.set_zorder(zorder)
poly.handle.set_zorder(zorder)
def __nice__(poly):
return '(num=%r)' % (poly.num)
def add_to_axis(poly, ax):
ax.add_patch(poly)
ax.add_line(poly.lines)
ax.add_line(poly.handle)
def remove_from_axis(poly, ax):
poly.remove()
poly.lines.remove()
poly.handle.remove()
def draw_self(poly, ax, show_species_tags=False, editable=True):
ax.draw_artist(poly)
if not editable and poly.lines.get_marker():
poly.lines.set_marker('')
elif editable and not poly.lines.get_marker():
poly.lines.set_marker('o')
ax.draw_artist(poly.lines)
if editable:
ax.draw_artist(poly.handle)
if editable and show_species_tags:
# Hack to fix matplotlib 1.5 bug
poly.species_tag.figure = ax.figure
poly.metadata_tag.figure = ax.figure
ax.draw_artist(poly.species_tag)
ax.draw_artist(poly.metadata_tag)
def _make_lines(poly, line_color, line_width):
""" verts - list of (x, y) tuples """
_xs, _ys = list(zip(*poly.xy))
color = np.array(line_color)
marker_face_color = line_color
line_kwargs = {'lw': line_width, 'color': color,
'mfc': marker_face_color}
lines = mpl.lines.Line2D(_xs, _ys, marker='o', alpha=1, animated=True,
**line_kwargs)
return lines
def _make_handle_line(poly):
_xs, _ys = list(zip(*poly.calc_handle_display_coords()))
line_width = 4
line_color = (0, 1, 0)
color = np.array(line_color)
marker_face_color = line_color
line_kwargs = {'lw': line_width, 'color': color, 'mfc': marker_face_color}
lines = mpl.lines.Line2D(_xs, _ys, marker='o', alpha=1, animated=True,
**line_kwargs)
return lines
def calc_tag_position(poly):
r"""
CommandLine:
python -m plottool_ibeis.interact_annotations --test-calc_tag_position --show
Example:
>>> # DISABLE_DOCTEST
>>> from plottool_ibeis.interact_annotations import * # NOQA
>>> poly = ut.DynStruct()
>>> poly.basecoords = vt.verts_from_bbox([0, 0, 400, 400], True)
>>> poly.theta = 0
>>> poly.xy = vt.verts_from_bbox([0, 0, 400, 400], True)
>>> tagpos = poly.calc_tag_position()
>>> print('tagpos = %r' % (tagpos,))
"""
points = [[
max(list(zip(*poly.basecoords))[0]),
min(list(zip(*poly.basecoords))[1])
]]
tagpos = rotate_points_around(points, poly.theta, *points_center(poly.xy))[0]
return tagpos
def calc_handle_display_coords(poly):
img_h = poly.manager.img.shape[0]
handle_length = img_h // 32
#MIN_HANDLE_LENGTH = 25
#handle_length = MIN_HANDLE_LENGTH
#handle_length = max(MIN_HANDLE_LENGTH, (h / 4))
cx, cy = points_center(poly.xy)
w, h = vt.get_pointset_extent_wh(np.array(poly.basecoords))
x0, y0 = cx, (cy - (h / 2)) # start at top edge
x1, y1 = (x0, y0 - handle_length)
pts = [(x0, y0), (x1, y1)]
pts = rotate_points_around(pts, poly.theta, cx, cy)
return pts
def update_color(poly, selected=False, editing_parts=False):
if editing_parts:
poly.lines.set_color(df2.PINK)
elif selected:
# Add selected color
sel_color = df2.ORANGE if poly.is_orig else df2.LIGHT_BLUE
poly.lines.set_color(sel_color)
else:
line = poly.lines
line_color = line.get_color()
desel_color = df2.WHITE if poly.is_orig else df2.LIGHTGRAY
if np.any(line_color != np.array(desel_color)):
line.set_color(np.array(desel_color))
def update_lines(poly):
poly.lines.set_data(list(zip(*poly.xy)))
poly.handle.set_data(list(zip(*poly.calc_handle_display_coords())))
def set_species(poly, text):
poly.tctext = text
poly.species_tag.set_text(text)
def increment_species(poly, amount=1):
if len(poly.tab_list) > 0:
tci = (poly.tcindex + amount) % len(poly.tab_list)
poly.tcindex = tci
# All tab is going to do is go through the possibilities
poly.species_tag.set_text(poly.tab_list[poly.tcindex])
def resize_poly(poly, x, y, idx, ax):
"""
Resize a rectangle using idx as the given anchor point. Respects
current rotation.
CommandLine:
python -m plottool_ibeis.interact_annotations --exec-resize_poly --show
Example:
>>> # DISABLE_DOCTEST
>>> from plottool_ibeis.interact_annotations import * # NOQA
>>> (h, w) = img.shape[0:2]
>>> x1, y1 = 10, 10
>>> x2, y2 = w - 10, h - 10
>>> coords = ((x1, y1), (x1, y2), (x2, y2), (x2, y1))
>>> x = 3 * w / 4
>>> y = 3 * h / 4
>>> idx = 3
>>> resize_poly(poly, x, y, idx)
>>> update_UI()
>>> import plottool_ibeis as pt
>>> pt.show_if_requested()
"""
# TODO: allow resize by middle click to scale from the center
# the minus one is because the last coordinate is duplicated (by
# matplotlib) to get a closed polygon
tmpcoords = poly.xy[:-1]
idx = idx % len(tmpcoords)
previdx = (idx - 1) % len(tmpcoords)
nextidx = (idx + 1) % len(tmpcoords)
(dx, dy) = (x - poly.xy[idx][0], y - poly.xy[idx][1])
# Fudge factor is due to gravity vectors constants
fudge_factor = (idx) * TAU / 4
poly_theta = poly.theta + fudge_factor
polar_idx2prev = polarDelta(tmpcoords[idx], tmpcoords[previdx])
polar_idx2next = polarDelta(tmpcoords[idx], tmpcoords[nextidx])
tmpcoords[idx] = (tmpcoords[idx][0] + dx, tmpcoords[idx][1] + dy)
mag_delta = np.linalg.norm((dx, dy))
theta_delta = np.arctan2(dy, dx)
theta_rot = theta_delta - (poly_theta + TAU / 4)
rotx = mag_delta * np.cos(theta_rot)
roty = mag_delta * np.sin(theta_rot)
polar_idx2prev[0] -= rotx
polar_idx2next[0] += roty
tmpcoords[previdx] = apply_polarDelta(polar_idx2prev, tmpcoords[idx])
tmpcoords[nextidx] = apply_polarDelta(polar_idx2next, tmpcoords[idx])
# rotate the points by -theta to get the "unrotated" points for use as
# basecoords
tmpcoords = rotate_points_around(tmpcoords, -poly.theta,
*points_center(poly.xy))
# ensure the poly is closed, matplotlib might do this, but I'm not sure
# if it preserves the ordering we depend on, even if it does add the
# point
tmpcoords = tmpcoords[:] + [tmpcoords[0]]
dispcoords = calc_display_coords(tmpcoords, poly.theta)
if (check_valid_coords(ax, dispcoords) and check_min_wh(tmpcoords)):
poly.basecoords = tmpcoords
poly.update_display_coords()
def rotate_poly(poly, dtheta, ax):
coords_lis = calc_display_coords(poly.basecoords, poly.theta + dtheta)
if check_valid_coords(ax, coords_lis):
poly.theta += dtheta
poly.update_display_coords()
def move_poly(poly, dx, dy, ax):
new_coords = [(x + dx, y + dy) for (x, y) in poly.basecoords]
coords_list = calc_display_coords(new_coords, poly.theta)
if check_valid_coords(ax, coords_list):
poly.basecoords = new_coords
poly.update_display_coords()
def update_display_coords(poly):
poly.xy = calc_display_coords(poly.basecoords, poly.theta)
tag_pos = poly.calc_tag_position()
poly.species_tag.set_position((tag_pos[0] + 5, tag_pos[1]))
poly.metadata_tag.set_position((tag_pos[0] + 5, tag_pos[1] + 50))
def print_info(poly):
print('poly = %r' % (poly,))
print('poly.tag_text = %r' % (poly.species_tag.get_text(),))
print('poly.metadata = %r' % (poly.metadata,))
def get_poly_mask(poly, shape):
h, w = shape[0:2]
y, x = np.mgrid[:h, :w]
points = np.transpose((x.ravel(), y.ravel()))
verts = poly.xy
path = mpl.path.Path(verts)
mask = path.contains_points(points)
#mask = nxutils.points_inside_poly(points, verts)
return mask.reshape(h, w)
def is_near_handle(poly, xy_pt, max_dist):
line = poly.calc_handle_display_coords()
return is_within_distance_from_line(xy_pt, line, max_dist)
@property
def size(poly):
return vt.bbox_from_verts(poly.xy)[2:4]
@six.add_metaclass(ut.ReloadingMetaclass)
class AnnotationInteraction(abstract_interaction.AbstractInteraction):
"""
An interactive polygon editor.
SeeAlso:
ibeis.viz.interact.interact_annotations2
(ensure that any updates here are propogated there)
Args:
verts_list (list) : list of lists of (float, float)
List of (x, y) coordinates used as vertices of the polygon.
"""
# --- Initialization and Figure Widgets
def __init__(self, img, img_ind=None, commit_callback=None,
verts_list=None,
bbox_list=None,
theta_list=None,
species_list=None,
metadata_list=None,
line_width=4, line_color=(1, 1, 1), face_color=(0, 0, 0),
fnum=None, default_species=DEFAULT_SPECIES_TAG,
next_callback=None, prev_callback=None, do_mask=False,
valid_species=[],
**kwargs):
super(AnnotationInteraction, self).__init__(fnum=fnum, **kwargs)
self.valid_species = valid_species
self.commit_callback = commit_callback # commit_callback
self.but_width = .14
#self.but_height = .08
self.next_prev_but_height = .08
self.but_height = self.next_prev_but_height - .01
self.callback_funcs = dict([
('close_event', self.on_close),
('draw_event', self.draw_callback),
('button_press_event', self.on_click),
('button_release_event', self.on_click_release),
('figure_leave_event', self.on_figure_leave),
('key_press_event', self.on_key_press),
('motion_notify_event', self.on_motion),
('pick_event', self.on_pick),
#('resize_event', self.on_resize),
])
self.mpl_callback_ids = {}
self.img = img
self.show_species_tags = True
self.max_dist = 10
def _reinitialize_variables():
self.do_mask = do_mask
self.img_ind = img_ind
self.species_tag = default_species
self.showverts = True
self.fc_default = face_color
self.mouseX = None # mouse X coordinate
self.mouseY = None # mouse Y coordinate
self.ind_xy = None
self._autoinc_polynum = it.count(0) # num polys in image
self._poly_held = False # if any poly is active
self._selected_poly = None # active polygon
self.parent_poly = None # level of parts heirarchy
self.background = None
# Ensure nothing is down
self.reset_mouse_state()
_reinitialize_variables()
# hack involving exploting lexical scoping to save defaults for a
# restore operation
self.reinitialize_variables = _reinitialize_variables
try:
self.fig = df2.figure(fnum=self.fnum, doclf=True, docla=True)
df2.close_figure(self.fig)
except AttributeError:
pass
self.fig = df2.figure(fnum=self.fnum, doclf=True, docla=True)
self.reinitialize_figure(fnum=self.fnum)
assert verts_list is None or bbox_list is None, 'only one can be specified'
# bbox_list will get converted to verts_list
if verts_list is not None:
bbox_list = vt.bboxes_from_vert_list(verts_list)
if bbox_list is not None:
verts_list = [vt.verts_from_bbox(bbox) for bbox in bbox_list]
if theta_list is None:
theta_list = [0 for _ in verts_list]
if species_list is None:
species_list = [self.species_tag for _ in verts_list]
if metadata_list is None:
metadata_list = [None for _ in verts_list]
# Create the list of polygons
self.handle_polygon_creation(bbox_list, theta_list, species_list, metadata_list)
self._ind = None # the active vert
self._current_rotate_poly = None
self.mpl_callback_ids = {}
self.connect_mpl_callbacks(self.fig.canvas)
self.add_action_buttons()
self.update_callbacks(next_callback, prev_callback)
def reinitialize_figure(self, fnum=None):
self.fig.clear()
self.fig.clf()
#self.fig.cla()
#ut.qflag()
self.fnum = fnum
#print(self.fnum)
ax = df2.gca()
#self.fig.ax = ax
self.ax = ax
df2.remove_patches(self.ax)
df2.imshow(self.img, fnum=fnum)
ax.set_clip_on(False)
ax.set_title(('\n'.join([
'Click and drag to select/move/resize/orient an ANNOTATION',
#'Press enter to clear the species tag of the selected ANNOTATION',
'Press tab to cycle through annotation species',
#'Type to edit the ANNOTATION species (press tab to autocomplete)'
])))
def add_action_buttons(self):
self.append_button(
'Add Annotation\n' + pretty_hotkey_map(ADD_RECTANGLE_HOTKEY),
rect=[0.18, 0.015, self.but_width, self.but_height],
callback=self.add_new_poly
)
# self.append_button(
# 'Add Full Annotation\n' + pretty_hotkey_map(ADD_RECTANGLE_FULL_HOTKEY),
# rect=[0.34, 0.015, self.but_width, self.but_height],
# callback=ut.partial(self.add_new_poly, full=True)
# )
self.append_button(
'Delete Annotation\n' + pretty_hotkey_map(DEL_RECTANGLE_HOTKEY),
rect=[0.50, 0.015, self.but_width, self.but_height],
callback=self.delete_current_poly
)
self.append_button(
'Save and Exit\n' + pretty_hotkey_map(ACCEPT_SAVE_HOTKEY),
rect=[0.66, 0.015, self.but_width, self.but_height],
callback=self.save_and_exit
)
def disconnect_mpl_callbacks(self, canvas):
""" disconnects all connected matplotlib callbacks """
for name, callbackid in six.iteritems(self.mpl_callback_ids):
canvas.mpl_disconnect(callbackid)
self.mpl_callback_ids = {}
def connect_mpl_callbacks(self, canvas):
""" disconnects matplotlib callbacks specified in the
self.mpl_callback_ids dict """
#http://matplotlib.org/1.3.1/api/backend_bases_api.html
# Create callback ids
self.disconnect_mpl_callbacks(canvas)
self.mpl_callback_ids = {
name: canvas.mpl_connect(name, func)
for name, func in six.iteritems(self.callback_funcs)
}
self.fig.canvas = canvas
# --- Updates
def update_callbacks(self, next_callback, prev_callback):
self.prev_callback = prev_callback
self.next_callback = next_callback
# Hack because the callbacks actually need to be wrapped
_next_callback = None if self.next_callback is None else self.next_image
_prev_callback = None if self.prev_callback is None else self.prev_image
self.append_button(
'Previous Image\n' + pretty_hotkey_map(PREV_IMAGE_HOTKEYS),
rect=[0.02, 0.01, self.but_width, self.next_prev_but_height],
callback=_prev_callback,
)
self.append_button(
'Next Image\n' + pretty_hotkey_map(NEXT_IMAGE_HOTKEYS),
rect=[0.82, 0.01, self.but_width, self.next_prev_but_height],
callback=_next_callback,
)
def update_image_and_callbacks(self, img, bbox_list, theta_list,
species_list, metadata_list, next_callback,
prev_callback):
self.disconnect_mpl_callbacks(self.fig.canvas)
for poly in six.itervalues(self.polys):
poly.remove()
self.polys = {}
self.reinitialize_variables()
self.img = img
self.reinitialize_figure(fnum=self.fnum)
self.handle_polygon_creation(bbox_list, theta_list, species_list,
metadata_list)
self.add_action_buttons()
self.draw()
self.connect_mpl_callbacks(self.fig.canvas)
self.update_callbacks(next_callback, prev_callback)
print('[interact_annot] drawing')
self.draw()
self.update_UI()
def _update_poly_colors(self):
for poly in six.itervalues(self.uneditable_polys):
poly.update_color()
for ind, poly in six.iteritems(self.editable_polys):
assert poly.num == ind
selected = poly is self._selected_poly
editing_parts = poly is self.parent_poly
poly.update_color(selected, editing_parts)
self.draw()
def _update_poly_lines(self):
for poly in six.itervalues(self.uneditable_polys):
#self.last_vert_ind = len(poly.xy) - 1
poly.update_lines()
for poly in six.itervalues(self.editable_polys):
self.last_vert_ind = len(poly.xy) - 1
poly.update_lines()
def update_UI(self):
self._update_poly_lines()
self._update_poly_colors()
self.fig.canvas.restore_region(self.background)
self.draw_artists()
self.fig.canvas.blit(self.ax.bbox)
def draw_artists(self):
for poly in six.itervalues(self.uneditable_polys):
poly.draw_self(self.ax, editable=False)
for poly in six.itervalues(self.editable_polys):
poly.draw_self(self.ax, self.show_species_tags)
# --- Data Matainence / Other
@property
def uneditable_polys(self):
if self.in_edit_parts_mode:
return {self.parent_poly.num: self.parent_poly}
#return self.polys
else:
return {}
@property
def editable_polys(self):
#return self.polys
if self.in_edit_parts_mode:
return self.parent_poly.child_polys
else:
if self.polys is None:
self.polys = {}
return self.polys
def get_poly_under_cursor(self, x, y):
"""
get the index of the vertex under cursor if within max_dist tolerance
"""
# Remove any deleted polygons
poly_dict = {k: v for k, v in self.editable_polys.items() if v is not None}
if len(poly_dict) > 0:
poly_inds = list(poly_dict.keys())
poly_list = ut.take(poly_dict, poly_inds)
# Put polygon coords into figure space
poly_pts = [poly.get_transform().transform(np.asarray(poly.xy))
for poly in poly_list]
# Find the nearest vertex from the annotations
ind_dist_list = [vt.nearest_point(x, y, polypts)
for polypts in poly_pts]
dist_lists = ut.take_column(ind_dist_list, 1)
min_idx = np.argmin(dist_lists)
sel_polyind = poly_inds[min_idx]
sel_vertx, sel_dist = ind_dist_list[min_idx]
# Ensure nearest distance is within threshold
if sel_dist >= self.max_dist ** 2:
sel_polyind, sel_vertx = (None, None)
else:
sel_polyind, sel_vertx = (None, None)
return sel_polyind, sel_vertx
def get_most_recently_added_poly(self):
if len(self.editable_polys) == 0:
return None
else:
# most recently added polygon has the highest index
poly_ind = max(list(self.editable_polys.keys()))
return self.editable_polys[poly_ind]
def new_polygon(self, verts, theta, species, fc=(0, 0, 0),
line_color=(1, 1, 1), line_width=4, is_orig=False,
metadata=None):
""" verts - list of (x, y) tuples """
# create new polygon from verts
num = six.next(self._autoinc_polynum)
poly = AnnotPoly(ax=self.ax, num=num, verts=verts, theta=theta,
species=species, fc=fc, line_color=line_color,
line_width=line_width, is_orig=is_orig,
metadata=metadata, valid_species=self.valid_species,
manager=self)
poly.set_picker(self.is_poly_pickable)
return poly
def handle_polygon_creation(self, bbox_list, theta_list, species_list,
metadata_list):
""" Maintain original input """
assert bbox_list is not None
if theta_list is None:
theta_list = [0.0 for _ in range(len(bbox_list))]
if species_list is None:
species_list = ['' for _ in range(len(bbox_list))]
assert len(bbox_list) == len(theta_list), 'inconconsitent data1'
assert len(bbox_list) == len(species_list), 'inconconsitent data2'
assert len(bbox_list) == len(metadata_list), 'inconconsitent data2'
self.original_indices = list(range(len(bbox_list)))
self.original_bbox_list = bbox_list
self.original_theta_list = theta_list
self.original_species_list = species_list
self.original_metadata_list = metadata_list
# Convert bbox to verticies
verts_list = [vt.verts_from_bbox(bbox) for bbox in bbox_list]
for verts in verts_list:
verts = np.array(verts)
for vert in verts:
enforce_dims(self.ax, vert)
# Create polygons
poly_list = [self.new_polygon(verts_, theta, species, is_orig=True,
metadata=metadata)
for (verts_, theta, species, metadata) in
zip(verts_list, theta_list, species_list, metadata_list)]
self.polys = {poly.num: poly for poly in poly_list}
if len(self.polys) != 0:
# Select poly with largest area
wh_list = np.array([poly.size for poly in six.itervalues(self.polys)])
poly_index = list(self.polys.keys())[wh_list.prod(axis=1).argmax()]
self._selected_poly = self.polys[poly_index]
self._update_poly_colors()
self._update_poly_lines()
else:
self._selected_poly = None
# Add polygons to the axis
for poly in six.itervalues(self.polys):
poly.add_to_axis(self.ax)
# Give polygons mpl change callbacks
#for poly in six.itervalues(self.polys):
# poly.add_callback(self.poly_changed)
# --- Actions
def add_new_poly(self, event=None, full=False):
""" Adds a new annotation to the image """
if full:
(h, w) = self.img.shape[0:2]
x1, y1 = 1, 1
x2, y2 = w - 1, h - 1
coords = ((x1, y1), (x1, y2), (x2, y2), (x2, y1))
else:
if self._selected_poly is not None:
defaultshape_polys = {
self._selected_poly.num:
self._selected_poly
}
else:
defaultshape_polys = self.editable_polys
coords = default_vertices(self.img, defaultshape_polys,
self.mouseX, self.mouseY)
poly = self.new_polygon(verts=coords, theta=0,
species=self.species_tag)
poly.parent = self.parent_poly
# Add to the correct place in current heirarchy
self.editable_polys[poly.num] = poly
poly.add_to_axis(self.ax)
#self.polys[poly.num] = poly
#poly.add_callback(self.poly_changed)
self._ind = None # the active vert
self._selected_poly = self.get_most_recently_added_poly()
self._update_poly_lines()
self._update_poly_colors()
self.draw()
def delete_current_poly(self, event=None):
"""
Removes an annotation
"""
if self._selected_poly is None:
print('[interact_annot] No polygon selected to delete')
else:
print('[interact_annot] delete annot')
poly = self._selected_poly
#self.polys.pop(poly.num)
del self.editable_polys[poly.num]
# remove the poly from the figure itself
poly.remove_from_axis(self.ax)
#reset anything that has to do with current poly
self._selected_poly = self.get_most_recently_added_poly()
self._poly_held = False
if self._selected_poly is not None:
self._update_poly_colors()
self.draw()
def edit_poly_parts(self, poly):
if poly is None and self.parent_poly is not None:
self._selected_poly = self.parent_poly
print('self.parent_poly = %r' % (self.parent_poly,))
self.parent_poly = poly
if poly is not None:
self._selected_poly = self.get_most_recently_added_poly()
print('self._selected_poly = %r' % (self._selected_poly,))
if poly is None:
self.ax.imshow(vt.convert_colorspace(self.img, 'RGB'))
else:
# Mask the part of the image not belonging to the annotation
mask = poly.get_poly_mask(self.img.shape)
masked_img = apply_mask(self.img, mask)
self.ax.imshow(vt.convert_colorspace(masked_img, 'RGB'))
self._update_poly_colors()
@property
def in_edit_parts_mode(self):
return self.parent_poly is not None
def toggle_species_label(self):
print('[interact_annot] toggle_species_label()')
self.show_species_tags = not self.show_species_tags
self.update_UI()
def save_and_exit(self, event, do_close=True):
"""
The Save and Exit Button
write a callback to redraw viz for bbox_list
"""
print('[interact_annot] Pressed Accept Button')
def _get_annottup_list():
annottup_list = []
indices_list = []
#theta_list = []
for poly in six.itervalues(self.polys):
assert poly is not None
index = poly.num
bbox = tuple(map(int, vt.bbox_from_verts(poly.basecoords)))
theta = poly.theta
species = poly.species_tag.get_text()
annottup = (bbox, theta, species)
indices_list.append(index)
annottup_list.append(annottup)
return indices_list, annottup_list
def _send_back_annotations():
print('[interact_annot] _send_back_annotations')
indices_list, annottup_list = _get_annottup_list()
# Delete if index is in original_indices but no in indices_list
deleted_indices = list(set(self.original_indices) -
set(indices_list))
changed_indices = []
unchanged_indices = [] # sanity check
changed_annottups = []
new_annottups = []
original_annottup_list = list(zip(self.original_bbox_list,
self.original_theta_list,
self.original_species_list))
for index, annottup in zip(indices_list, annottup_list):
# If the index is not in the originals then it is new
if index not in self.original_indices:
new_annottups.append(annottup)
else:
if annottup not in original_annottup_list:
changed_annottups.append(annottup)
changed_indices.append(index)
else:
unchanged_indices.append(index)
self.commit_callback(unchanged_indices, deleted_indices,
changed_indices, changed_annottups,
new_annottups)
if self.commit_callback is not None:
_send_back_annotations()
# Make mask from selection
if self.do_mask is True:
self.fig.clf()
self.ax = ax = self.fig.subplot(111)
mask_list = [poly.get_poly_mask(self.img.shape)
for poly in six.itervalues(self.polys)]
if len(mask_list) == 0:
print('[interact_annot] No polygons to make mask out of')
return 0
mask = mask_list[0]
for mask_ in mask_list:
mask = np.maximum(mask, mask_)
#mask = self.get_poly_mask()
# User must close previous figure
# Modify the image with the mask
masked_img = apply_mask(self.img, mask)
# show the modified image
ax.imshow(masked_img)
ax.title('Region outside of mask is darkened')
ax.figure.show()
return
print('[interact_annot] Accept Over')
if do_close:
df2.close_figure(self.fig)
# --- Connected Slots and Callbacks
def next_image(self, event):
if self.next_callback is not None:
self.next_callback()
def prev_image(self, event):
if self.prev_callback is not None:
self.prev_callback()
def start(self):
# FIXME: conform to abstract_interaction start conventions
#self._ensure_running()
#self.show_page()
self.show()
def show(self):
self.draw()
self.bring_to_front()
def draw_callback(self, event):
self.background = self.fig.canvas.copy_from_bbox(self.ax.bbox)
self.draw_artists()
def _show_poly_context_menu(self, event):
def _make_options():
metadata = self._selected_poly.metadata
options = []
options += [
#('Foo: ', ut.partial(print, 'bar')),
#('Move to back ', self._selected_poly.move_to_back),
('PolyInfo: ', self._selected_poly.print_info),
]
if isinstance(metadata, ut.LazyDict):
options += metadata.nocache_eval('annot_context_options')
return options
options = _make_options()
self.show_popup_menu(options, event)
def is_poly_pickable(self, artist, event):
if artist.num in self.editable_polys:
mouse_xy = event.x, event.y
hit = artist.contains_point(mouse_xy)
else:
hit = False
#import utool
#utool.embed()
props = {'dblclick': event.dblclick}
return hit, props
def on_pick(self, event):
""" Makes selected polygon translucent """
if self.debug > 0 or True:
print('[interact_annot] on_pick')
if not self._poly_held:
artist = event.artist
print('[interact_annot] picked artist = %r' % (artist,))
self._selected_poly = artist
self._poly_held = True
if event.dblclick and not self.in_edit_parts_mode:
self.edit_poly_parts(self._selected_poly)
pass
#x, y = event.mouseevent.xdata, event.mouseevent.xdata
def on_click(self, event):
"""
python -m ibeis.viz.interact.interact_annotations2 --test-ishow_image2 --show
"""
super(AnnotationInteraction, self).on_click(event)
if self._ind is not None:
self._ind = None
return
if not self.showverts:
return
if event.inaxes is None:
return
if len(self.editable_polys) == 0:
print('[interact_annot] No polygons on screen')
return
# Right click - context menu
if event.button == self.RIGHT_BUTTON:
self._show_poly_context_menu(event)
# Left click, indicate that a mouse button is down
if event.button == self.LEFT_BUTTON:
#if event.dblclick and not self.in_edit_parts_mode:
# # On double click enter a single annotation to annotation parts
# #print("DOUBLECLICK")
# #self.edit_poly_parts(self._selected_poly)
if event.key == 'shift':
self._current_rotate_poly = self._selected_poly
else:
# Determine if we are clicking the rotation line
mouse_xy = (event.xdata, event.ydata)
for poly in six.itervalues(self.editable_polys):
if poly.is_near_handle(mouse_xy, self.max_dist):
self._current_rotate_poly = poly
break
if event.dblclick:
# Reset rotation
if self._current_rotate_poly is not None:
self._current_rotate_poly.theta = 0
self._current_rotate_poly.update_display_coords()
polyind, self._ind = self.get_poly_under_cursor(event.x, event.y)
if self._ind is not None and polyind is not None:
self._selected_poly = self.editable_polys[polyind]
if self._selected_poly is None:
return
self.ind_xy = self._selected_poly.xy[self._ind]
self._poly_held = True
self._selected_poly.anchor_idx = self._ind
self.mouseX, self.mouseY = event.xdata, event.ydata
if self._poly_held is True or self._ind is not None:
self._selected_poly.set_alpha(.2)
self._update_poly_colors()
self._update_poly_colors()
self._update_poly_lines()
if self.background is not None:
self.fig.canvas.restore_region(self.background)
else:
print('[interact_annot] error: self.background is none.'
' Trying refresh.')
self.fig.canvas.restore_region(self.background)
self.background = self.fig.canvas.copy_from_bbox(self.ax.bbox)
# Redraw blitted objects
self.draw_artists()
self.fig.canvas.blit(self.ax.bbox)
def on_motion(self, event):
if ut.VERBOSE:
print('[interact_annot] on_motion')
print('[interact_annot] Got key: %r' % event.key)
super(AnnotationInteraction, self).on_motion(event)
# uses boolean punning for terseness
lastX = self.mouseX or None
lastY = self.mouseY or None
# Allow for getting coordinates outside the axes
ax = self.ax
mousePos = [event.x, event.y]
self.mouseX, self.mouseY = ax.transData.inverted().transform(mousePos)
deltaX = lastX is not None and self.mouseX - lastX
deltaY = lastY is not None and self.mouseY - lastY
if not self.showverts:
return
#if self.in_edit_parts_mode:
# return
quick_resize = (self._poly_held is True and (
(event.button == self.MIDDLE_BUTTON) or
(event.button == self.RIGHT_BUTTON) or
(event.button == self.LEFT_BUTTON and event.key == 'ctrl')
))
if self._poly_held is True and self._ind is not None:
# Resize by dragging corner
self._selected_poly.resize_poly(self.mouseX, self.mouseY,
self._ind, self.ax)
self._selected_poly.anchor_idx = self._ind
elif quick_resize:
# Quick resize with special click
anchor_idx = self._selected_poly.anchor_idx
idx = (anchor_idx + 2) % 4 # choose opposite anchor point
self._selected_poly.resize_poly(self.mouseX, self.mouseY, idx,
self.ax)
elif self._current_rotate_poly:
# Rotate using handle
cx, cy = points_center(self._current_rotate_poly.xy)
theta = np.arctan2(cy - self.mouseY, cx - self.mouseX) - TAU / 4
dtheta = theta - self._current_rotate_poly.theta
self._current_rotate_poly.rotate_poly(dtheta, self.ax)
elif self._ind is None and event.button == self.LEFT_BUTTON:
# Translate by dragging inside annot
flag = deltaX is not None and deltaY is not None
if self._poly_held is True and flag:
self._selected_poly.move_poly(deltaX, deltaY, self.ax)
self._ind = None
else:
return
self.update_UI()
def on_click_release(self, event):
super(AnnotationInteraction, self).on_click_release(event)
#if self._poly_held is True:
self._poly_held = False
self._current_rotate_poly = None
if not self.showverts:
return
if self._selected_poly is None:
return
_flag = (
self._ind is None or
self._poly_held is False or
(self._ind is not None and
self.is_down['left'] is True and
self._selected_poly is not None
)
)
if _flag:
self._selected_poly.set_alpha(0)
#self._selected_poly.set_facecolor('white')
self.update_UI()
if self._ind is None:
return
if len(self.editable_polys) == 0:
print('[interact_annot] No polygons on screen')
return
if self._selected_poly is None:
print('[interact_annot] WARNING: Polygon unknown.'
' Using default. (2)')
self._selected_poly = self.get_most_recently_added_poly()
curr_xy = self._selected_poly.xy[self._ind]
if self.ind_xy is not None:
if np.all(np.fabs(self.ind_xy - curr_xy) < 3):
return
self._ind = None
self._poly_held = False
self.draw()
def on_figure_leave(self, event):
if self.debug > 0:
print('[interact_annot] figure leave')
#self.print_status()
#self.on_click_release(event)
self._poly_held = False
self._ind = None
self.reset_mouse_state()
#self.print_status()
def on_key_press(self, event):
if self.debug > 0:
print('[interact_annot] on_key_press')
print('[interact_annot] Got key: %r' % event.key)
print('[interact_annot] Got key: %r' % event.key)
if not event.inaxes:
return
if event.key == ACCEPT_SAVE_HOTKEY:
self.save_and_exit(event)
elif event.key == ADD_RECTANGLE_HOTKEY:
self.add_new_poly()
elif event.key == ADD_RECTANGLE_FULL_HOTKEY:
self.add_new_poly(full=True)
elif event.key == DEL_RECTANGLE_HOTKEY:
self.delete_current_poly()
elif event.key == TOGGLE_LABEL_HOTKEY:
self.toggle_species_label()
if re.match('escape', event.key):
self.edit_poly_parts(None)
if re.match('^backspace$', event.key):
self._selected_poly.set_species(DEFAULT_SPECIES_TAG)
if re.match('^tab$', event.key):
self._selected_poly.increment_species(amount=1)
if re.match('^ctrl\+tab$', event.key):
self._selected_poly.increment_species(amount=-1)
# NEXT ANND PREV COMMAND
def _matches_hotkey(key, hotkeys):
return any([re.match(hk, key) is not None for hk in
ut.ensure_iterable(hotkeys)])
if _matches_hotkey(event.key, PREV_IMAGE_HOTKEYS):
self.prev_image(event)
if _matches_hotkey(event.key, NEXT_IMAGE_HOTKEYS):
self.next_image(event)
self.draw()
#def poly_changed(self, poly):
# """ this method is called whenever the polygon object is called """
# print('poly_changed poly=%r' % (poly,))
# # only copy the artist props to the line (except visibility)
# #vis = poly.lines.get_visible()
# #vis = poly.handle.get_visible()
# #poly.lines.set_visible(vis)
# #poly.handle.set_visible(vis)
def pretty_hotkey_map(hotkeys):
if hotkeys is None:
return ''
hotkeys = [hotkeys] if not isinstance(hotkeys, list) else hotkeys
mapping = {
#'right': 'right arrow',
#'left': 'left arrow',
}
mapped_hotkeys = [mapping.get(hk, hk) for hk in hotkeys]
hotkey_str = '(' + ut.conj_phrase(mapped_hotkeys, 'or') + ')'
return hotkey_str
def apply_mask(img, mask):
masked_img = img.copy()
masked_img[~mask] = np.uint8(np.clip(masked_img[~mask] - 100., 0, 255))
return masked_img
def points_center(pts):
# the polygons have the first point listed twice in order for them to be
# drawn as closed, but that point shouldn't be counted twice for computing
# the center (hence the [:-1] slice)
return np.array(pts[:-1]).mean(axis=0)
def rotate_points_around(points, theta, ax, ay):
"""
References:
http://www.euclideanspace.com/maths/geometry/affine/aroundPoint/matrix2d/
"""
# TODO: Can use vtool_ibeis for this
sin, cos, array = np.sin, np.cos, np.array
augpts = array([array((x, y, 1)) for (x, y) in points])
ct = cos(theta)
st = sin(theta)
# correct matrix obtained from
rot_mat = array(
[(ct, -st, ax - ct * ax + st * ay),
(st, ct, ay - st * ax - ct * ay),
( 0, 0, 1)]
)
return [(x, y) for (x, y, z) in rot_mat.dot(augpts.T).T]
def calc_display_coords(oldcoords, theta):
return rotate_points_around(oldcoords, theta, *points_center(oldcoords))
def polarDelta(p1, p2):
mag = vt.L2(p1, p2)
theta = np.arctan2(p2[1] - p1[1], p2[0] - p1[0])
return [mag, theta]
def apply_polarDelta(poldelt, cart):
newx = cart[0] + (poldelt[0] * np.cos(poldelt[1]))
newy = cart[1] + (poldelt[0] * np.sin(poldelt[1]))
return (newx, newy)
def is_within_distance_from_line(pt, line, max_dist):
pt = np.array(pt)
line = np.array(line)
return vt.distance_to_lineseg(pt, line[0], line[1]) <= max_dist
def check_min_wh(coords):
"""
Depends on hardcoded indices, which is inelegant, but
we're already depending on those for the FUDGE_FACTORS
array above
0----1
| |
3----2
"""
MIN_W = 5
MIN_H = 5
# the seperate 1 and 2 variables are not strictly necessary, but
# provide a sanity check to ensure that we're dealing with the
# right shape
#w, h = vt.get_pointset_extent_wh(np.array(coords))
w1 = coords[1][0] - coords[0][0]
w2 = coords[2][0] - coords[3][0]
h1 = coords[3][1] - coords[0][1]
h2 = coords[2][1] - coords[1][1]
assert np.isclose(w1, w2), ('w1: %r, w2: %r' % (w1, w2))
assert np.isclose(h1, h2), ('h1: %r, h2: %r' % (h1, h2))
w, h = w1, h1
#print('w, h = (%r, %r)' % (w1, h1))
return (MIN_W < w) and (MIN_H < h)
def default_vertices(img, polys=None, mouseX=None, mouseY=None):
"""Default to rectangle that has a quarter-width/height border."""
(h, w) = img.shape[0:2]
# Center the new verts around wherever the mouse is
if mouseX is not None and mouseY is not None:
center_x = mouseX
center_h = mouseY
else:
center_x = w // 2
center_h = h // 2
if polys is not None and len(polys) > 0:
# Use the largest polygon size as the default verts
wh_list = np.array([vt.bbox_from_verts(poly.xy)[2:4]
for poly in six.itervalues(polys)])
w_, h_ = wh_list.max(axis=0) // 2
else:
# If no poly exists use 1/4 of the image size
w_, h_ = (w // 4, h // 4)
# Get the x/y extents by offseting the centers
x1, x2 = np.array([center_x, center_x]) + (w_ * np.array([-1, 1]))
y1, y2 = np.array([center_h, center_h]) + (h_ * np.array([-1, 1]))
# Clip to bounds
x1 = max(x1, 1)
y1 = max(y1, 1)
x2 = min(x2, w - 1)
y2 = min(y2, h - 1)
return ((x1, y1), (x1, y2), (x2, y2), (x2, y1))
def check_valid_coords(ax, coords_list):
return all([check_dims(ax, xy_pt) for xy_pt in coords_list])
def check_dims(ax, xy_pt, margin=0.5):
"""
checks if bounding box dims are ok
Allow the bounding box to go off the image
so orientations can be done correctly
"""
num_out = 0
xlim = ax.get_xlim()
ylim = ax.get_ylim()
if xy_pt[0] < xlim[0] + margin:
num_out += 1
if xy_pt[0] > xlim[1] - margin:
num_out += 1
if xy_pt[1] < ylim[1] + margin:
num_out += 1
if xy_pt[1] > ylim[0] - margin:
num_out += 1
return num_out <= 3
def enforce_dims(ax, xy_pt, margin=0.5):
"""
ONLY USE THIS ON UNROTATED RECTANGLES, as to do otherwise may yield
arbitrary polygons
"""
xlim = ax.get_xlim()
ylim = ax.get_ylim()
if xy_pt[0] < xlim[0] + margin:
xy_pt[0] = xlim[0] + margin
if xy_pt[0] > xlim[1] - margin:
xy_pt[0] = xlim[1] - margin
if xy_pt[1] < ylim[1] + margin:
xy_pt[1] = ylim[1] + margin
if xy_pt[1] > ylim[0] - margin:
xy_pt[1] = ylim[0] - margin
return True
def test_interact_annots():
r"""
CommandLine:
python -m plottool_ibeis.interact_annotations --test-test_interact_annots --show
Example:
>>> # ENABLE_DOCTEST
>>> from plottool_ibeis.interact_annotations import * # NOQA
>>> import plottool_ibeis as pt
>>> # build test data
>>> # execute function
>>> self = test_interact_annots()
>>> # verify results
>>> print(self)
>>> pt.show_if_requested()
"""
print('[interact_annot] *** START DEMO ***')
verts_list = [
((0, 400), (400, 400), (400, 0), (0, 0), (0, 400)),
((400, 700), (700, 700), (700, 400), (400, 400), (400, 700))
]
#if img is None:
try:
img_url = 'http://i.imgur.com/Vq9CLok.jpg'
img_fpath = ut.grab_file_url(img_url)
img = vt.imread(img_fpath)
except Exception as ex:
print('[interact_annot] cant read zebra: %r' % ex)
img = np.random.uniform(0, 255, size=(100, 100))
valid_species = ['species1', 'species2']
metadata_list = [{'name': 'foo'}, None]
self = AnnotationInteraction(img, verts_list=verts_list,
valid_species=valid_species,
metadata_list=metadata_list,
fnum=0) # NOQA
return self
if __name__ == '__main__':
"""
CommandLine:
python -m plottool_ibeis.interact_annotations --exec-test_interact_annots --show
CommandLine:
python -m plottool_ibeis.interact_annotations
python -m plottool_ibeis.interact_annotations --allexamples
python -m plottool_ibeis.interact_annotations --allexamples --noface --nosrc
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
| Erotemic/plottool | plottool_ibeis/interact_annotations.py | Python | apache-2.0 | 52,275 | 0.001607 |
from __future__ import absolute_import
from django.core.urlresolvers import reverse
from sentry.models import TagKey, TagValue
from sentry.testutils import APITestCase
class ProjectTagKeyValuesTest(APITestCase):
def test_simple(self):
project = self.create_project()
tagkey = TagKey.objects.create(project=project, key='foo')
tagvalue = TagValue.objects.create(project=project, key='foo', value='bar')
self.login_as(user=self.user)
url = reverse('sentry-api-0-project-tagkey-values', kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
'key': tagkey.key,
})
response = self.client.get(url)
assert response.status_code == 200
assert len(response.data) == 1
assert response.data[0]['value'] == 'bar'
| TedaLIEz/sentry | tests/sentry/api/endpoints/test_project_tagkey_values.py | Python | bsd-3-clause | 862 | 0.00116 |
# UrbanFootprint v1.5
# Copyright (C) 2017 Calthorpe Analytics
#
# This file is part of UrbanFootprint version 1.5
#
# UrbanFootprint is distributed under the terms of the GNU General
# Public License version 3, as published by the Free Software Foundation. This
# code is distributed WITHOUT ANY WARRANTY, without implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License v3 for more details; see <http://www.gnu.org/licenses/>.
import logging
import datetime
from django.utils.timezone import utc
from footprint.main.managers.geo_inheritance_manager import GeoInheritanceManager
from footprint.main.models.analysis_module.analysis_tool import AnalysisTool
from footprint.main.models.config.scenario import BaseScenario, FutureScenario
from footprint.main.models.geospatial.db_entity_keys import DbEntityKey
from footprint.main.models.presentation.layer.layer import Layer
from footprint.main.utils.utils import timestamp
from footprint.utils.websockets import send_message_to_client
from tilestache_uf.utils import invalidate_feature_cache
logger = logging.getLogger(__name__)
__author__ = 'calthorpe_analytics'
class AgricultureUpdaterTool(AnalysisTool):
objects = GeoInheritanceManager()
class Meta(object):
app_label = 'main'
abstract = False
def test_agriculture_core(self, **kwargs):
self.agriculture_analysis(**kwargs)
ANALYSIS_FIELDS = ["gross_net_pct",
"built_form_key",
"built_form_id",
"density_pct",
"acres_gross",
"crop_yield",
"market_value",
"production_cost",
"water_consumption",
"labor_force",
"truck_trips"]
def progress(self, proportion, **kwargs):
send_message_to_client(
kwargs['user'].id,
dict(
event='postSavePublisherProportionCompleted',
job_id=str(kwargs['job'].hashid),
config_entity_id=self.config_entity.id,
ids=[kwargs['analysis_module'].id],
class_name='AnalysisModule',
key=kwargs['analysis_module'].key,
proportion=proportion))
def update_dependent_scenarios(self, base_features, scenario):
if isinstance(scenario, BaseScenario):
future_scenarios = FutureScenario.objects.filter(parent_config_entity=scenario.parent_config_entity_subclassed)
logger.info("Updating dependent scenarios {0} of {1}".format(future_scenarios, scenario))
for future_scenario in future_scenarios:
agriculture_feature_class = future_scenario.db_entity_feature_class(DbEntityKey.FUTURE_AGRICULTURE)
future_features = agriculture_feature_class.objects.filter(
id__in=base_features,
updater__isnull=True
)
logger.info("Updating {0} features of {1}".format(future_features.count(), future_scenario))
updated_built_forms = []
for feature in future_features.iterator():
base_feature = base_features.get(id=feature.id)
if base_feature.built_form_id != feature.built_form_id:
updated_built_forms.append(feature)
base_attributes = dict(
gross_net_pct=base_feature.gross_net_pct,
built_form_key=base_feature.built_form_key,
built_form_id=base_feature.built_form_id,
density_pct=base_feature.density_pct,
acres_gross=base_feature.acres_gross,
crop_yield=base_feature.crop_yield,
market_value=base_feature.market_value,
production_cost=base_feature.production_cost,
water_consumption=base_feature.water_consumption,
labor_force=base_feature.labor_force,
truck_trips=base_feature.truck_trips,
)
for attr, value in base_attributes.iteritems():
setattr(feature, attr, value)
feature.save(update_fields=self.ANALYSIS_FIELDS)
layer = Layer.objects.filter(presentation__config_entity=agriculture_feature_class.config_entity,
db_entity_interest__db_entity__key=agriculture_feature_class.db_entity_key)[0]
if updated_built_forms:
for key in layer.keys:
# clear tilestache cache for updated dependencies
invalidate_feature_cache(key, updated_built_forms)
def update(self, **kwargs):
scenario = self.config_entity.subclassed
logger.debug('{0}:Starting Agriculture Core Analysis for {1}'.format(timestamp(), self.config_entity))
if isinstance(scenario, BaseScenario):
agriculture_db_entity_key = DbEntityKey.BASE_AGRICULTURE_CANVAS
elif isinstance(scenario, FutureScenario):
agriculture_db_entity_key = DbEntityKey.FUTURE_AGRICULTURE_CANVAS
else:
raise Exception("Config Entity is not a Future or Base Scenario, cannot run AgricultureCore.")
ids = kwargs.get('ids', None)
agriculture_feature_class = self.config_entity.db_entity_feature_class(agriculture_db_entity_key)
if ids:
features = agriculture_feature_class.objects.filter(id__in=ids)
else:
features = agriculture_feature_class.objects.filter(built_form__isnull=False)
feature_count = features.count()
if not feature_count:
logger.info("No features to process!")
return
logger.debug("Processing {0} features...".format(feature_count))
iterator_start = datetime.datetime.utcnow().replace(tzinfo=utc)
self.progress(0.05, **kwargs)
if feature_count <= 36:
increment_portion = (.9 / feature_count) + .001
equal_portion = 1
else:
increment_portion = .05
equal_portion = int((feature_count - 1) / 18)
i = 1
for feature in features.iterator():
if i % equal_portion == 0:
self.progress(increment_portion, **kwargs)
if not feature.built_form:
feature.built_form_key = None
feature.crop_yield = 0
feature.market_value = 0
feature.production_cost = 0
feature.water_consumption = 0
feature.labor_force = 0
feature.truck_trips = 0
else:
applied_acres = feature.acres_gross * feature.density_pct * feature.dev_pct
agriculture_attribute_set = feature.built_form.resolve_built_form(feature.built_form).agriculture_attribute_set
feature.built_form_key = feature.built_form.key
feature.crop_yield = agriculture_attribute_set.crop_yield * applied_acres
feature.market_value = agriculture_attribute_set.unit_price * feature.crop_yield
feature.production_cost = agriculture_attribute_set.cost * applied_acres
feature.water_consumption = agriculture_attribute_set.water_consumption * applied_acres
feature.labor_force = agriculture_attribute_set.labor_input * applied_acres
feature.truck_trips = agriculture_attribute_set.truck_trips * applied_acres
feature.save(update_fields=self.ANALYSIS_FIELDS)
i += 1
total_time = datetime.datetime.utcnow().replace(tzinfo=utc) - iterator_start
logger.debug("Processed {0} features in {1}: {2} per feature".format(
feature_count, total_time, total_time/feature_count
))
self.progress(.9, **kwargs)
logger.debug('{0}:Finished Agriculture Core Analysis for {1} '.format(timestamp(), self.config_entity))
self.update_dependent_scenarios(features, scenario)
#
# def update_progress(self, number, total, start, **kwargs):
# if total < 20:
# parts = float(total)
# else:
# parts = 20
# chunk = float(total) / parts
# increment = 1 / parts
# if number % chunk < 1:
# progress_value = float(number) / float(total)
# octotherps = int(round(progress_value * parts))
# spaces = parts - octotherps
# bar = '#'*octotherps + ' '*spaces
# print '\r[{0}] {1}%'.format(bar, round(progress_value*100, 2)) + " | " + \
# self.estimated_time_remaining(progress_value, start) + " remaining"
# else:
# return
#
#
# def estimated_time_remaining(self, progress, start):
# current_time = datetime.datetime.utcnow().replace(tzinfo=utc)
# elapsed = current_time - start
# total_time = (elapsed * int(round((10/progress)))) / 10
# remaining_estimate = total_time - elapsed
# return str(remaining_estimate)
| CalthorpeAnalytics/urbanfootprint | footprint/main/models/analysis_module/agriculture_module/agriculture_updater_tool.py | Python | gpl-3.0 | 9,307 | 0.002579 |
from __future__ import unicode_literals
from django.core.exceptions import PermissionDenied
from django.core.mail import send_mail
from django.core import validators
from django.db import models
from django.db.models.manager import EmptyManager
from django.utils.crypto import get_random_string, salted_hmac
from django.utils import six
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.contrib import auth
from django.contrib.auth.hashers import (
check_password, make_password, is_password_usable)
from django.contrib.auth.signals import user_logged_in
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import python_2_unicode_compatible
def update_last_login(sender, user, **kwargs):
"""
A signal receiver which updates the last_login date for
the user logging in.
"""
user.last_login = timezone.now()
user.save(update_fields=['last_login'])
user_logged_in.connect(update_last_login)
class PermissionManager(models.Manager):
def get_by_natural_key(self, codename, app_label, model):
return self.get(
codename=codename,
content_type=ContentType.objects.get_by_natural_key(app_label,
model),
)
@python_2_unicode_compatible
class Permission(models.Model):
"""
The permissions system provides a way to assign permissions to specific
users and groups of users.
The permission system is used by the Django admin site, but may also be
useful in your own code. The Django admin site uses permissions as follows:
- The "add" permission limits the user's ability to view the "add" form
and add an object.
- The "change" permission limits a user's ability to view the change
list, view the "change" form and change an object.
- The "delete" permission limits the ability to delete an object.
Permissions are set globally per type of object, not per specific object
instance. It is possible to say "Mary may change news stories," but it's
not currently possible to say "Mary may change news stories, but only the
ones she created herself" or "Mary may only change news stories that have a
certain status or publication date."
Three basic permissions -- add, change and delete -- are automatically
created for each Django model.
"""
name = models.CharField(_('name'), max_length=255)
content_type = models.ForeignKey(ContentType)
codename = models.CharField(_('codename'), max_length=100)
objects = PermissionManager()
class Meta:
verbose_name = _('permission')
verbose_name_plural = _('permissions')
unique_together = (('content_type', 'codename'),)
ordering = ('content_type__app_label', 'content_type__model',
'codename')
def __str__(self):
return "%s | %s | %s" % (
six.text_type(self.content_type.app_label),
six.text_type(self.content_type),
six.text_type(self.name))
def natural_key(self):
return (self.codename,) + self.content_type.natural_key()
natural_key.dependencies = ['contenttypes.contenttype']
class GroupManager(models.Manager):
"""
The manager for the auth's Group model.
"""
def get_by_natural_key(self, name):
return self.get(name=name)
@python_2_unicode_compatible
class Group(models.Model):
"""
Groups are a generic way of categorizing users to apply permissions, or
some other label, to those users. A user can belong to any number of
groups.
A user in a group automatically has all the permissions granted to that
group. For example, if the group Site editors has the permission
can_edit_home_page, any user in that group will have that permission.
Beyond permissions, groups are a convenient way to categorize users to
apply some label, or extended functionality, to them. For example, you
could create a group 'Special users', and you could write code that would
do special things to those users -- such as giving them access to a
members-only portion of your site, or sending them members-only email
messages.
"""
name = models.CharField(_('name'), max_length=80, unique=True)
permissions = models.ManyToManyField(Permission,
verbose_name=_('permissions'), blank=True)
objects = GroupManager()
class Meta:
verbose_name = _('group')
verbose_name_plural = _('groups')
def __str__(self):
return self.name
def natural_key(self):
return (self.name,)
class BaseUserManager(models.Manager):
@classmethod
def normalize_email(cls, email):
"""
Normalize the address by lowercasing the domain part of the email
address.
"""
email = email or ''
try:
email_name, domain_part = email.strip().rsplit('@', 1)
except ValueError:
pass
else:
email = '@'.join([email_name, domain_part.lower()])
return email
def make_random_password(self, length=10,
allowed_chars='abcdefghjkmnpqrstuvwxyz'
'ABCDEFGHJKLMNPQRSTUVWXYZ'
'23456789'):
"""
Generates a random password with the given length and given
allowed_chars. Note that the default value of allowed_chars does not
have "I" or "O" or letters and digits that look similar -- just to
avoid confusion.
"""
return get_random_string(length, allowed_chars)
def get_by_natural_key(self, username):
return self.get(**{self.model.USERNAME_FIELD: username})
class UserManager(BaseUserManager):
def _create_user(self, username, email, password,
is_staff, is_superuser, **extra_fields):
"""
Creates and saves a User with the given username, email and password.
"""
now = timezone.now()
if not username:
raise ValueError('The given username must be set')
email = self.normalize_email(email)
user = self.model(username=username, email=email,
is_staff=is_staff, is_active=True,
is_superuser=is_superuser, last_login=now,
date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, username, email=None, password=None, **extra_fields):
return self._create_user(username, email, password, False, False,
**extra_fields)
def create_superuser(self, username, email, password, **extra_fields):
return self._create_user(username, email, password, True, True,
**extra_fields)
@python_2_unicode_compatible
class AbstractBaseUser(models.Model):
password = models.CharField(_('password'), max_length=128)
last_login = models.DateTimeField(_('last login'), default=timezone.now)
is_active = True
REQUIRED_FIELDS = []
class Meta:
abstract = True
def get_username(self):
"Return the identifying username for this User"
return getattr(self, self.USERNAME_FIELD)
def __str__(self):
return self.get_username()
def natural_key(self):
return (self.get_username(),)
def is_anonymous(self):
"""
Always returns False. This is a way of comparing User objects to
anonymous users.
"""
return False
def is_authenticated(self):
"""
Always return True. This is a way to tell if the user has been
authenticated in templates.
"""
return True
def set_password(self, raw_password):
self.password = make_password(raw_password)
def check_password(self, raw_password):
"""
Returns a boolean of whether the raw_password was correct. Handles
hashing formats behind the scenes.
"""
def setter(raw_password):
self.set_password(raw_password)
self.save(update_fields=["password"])
return check_password(raw_password, self.password, setter)
def set_unusable_password(self):
# Sets a value that will never be a valid hash
self.password = make_password(None)
def has_usable_password(self):
return is_password_usable(self.password)
def get_full_name(self):
raise NotImplementedError('subclasses of AbstractBaseUser must provide a get_full_name() method')
def get_short_name(self):
raise NotImplementedError('subclasses of AbstractBaseUser must provide a get_short_name() method.')
def get_session_auth_hash(self):
"""
Returns an HMAC of the password field.
"""
key_salt = "django.contrib.auth.models.AbstractBaseUser.get_session_auth_hash"
return salted_hmac(key_salt, self.password).hexdigest()
# A few helper functions for common logic between User and AnonymousUser.
def _user_get_all_permissions(user, obj):
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_all_permissions"):
permissions.update(backend.get_all_permissions(user, obj))
return permissions
def _user_has_perm(user, perm, obj):
"""
A backend can raise `PermissionDenied` to short-circuit permission checking.
"""
for backend in auth.get_backends():
if not hasattr(backend, 'has_perm'):
continue
try:
if backend.has_perm(user, perm, obj):
return True
except PermissionDenied:
return False
return False
def _user_has_module_perms(user, app_label):
"""
A backend can raise `PermissionDenied` to short-circuit permission checking.
"""
for backend in auth.get_backends():
if not hasattr(backend, 'has_module_perms'):
continue
try:
if backend.has_module_perms(user, app_label):
return True
except PermissionDenied:
return False
return False
class PermissionsMixin(models.Model):
"""
A mixin class that adds the fields and methods necessary to support
Django's Group and Permission model using the ModelBackend.
"""
is_superuser = models.BooleanField(_('superuser status'), default=False,
help_text=_('Designates that this user has all permissions without '
'explicitly assigning them.'))
groups = models.ManyToManyField(Group, verbose_name=_('groups'),
blank=True, help_text=_('The groups this user belongs to. A user will '
'get all permissions granted to each of '
'their groups.'),
related_name="user_set", related_query_name="user")
user_permissions = models.ManyToManyField(Permission,
verbose_name=_('user permissions'), blank=True,
help_text=_('Specific permissions for this user.'),
related_name="user_set", related_query_name="user")
class Meta:
abstract = True
def get_group_permissions(self, obj=None):
"""
Returns a list of permission strings that this user has through their
groups. This method queries all available auth backends. If an object
is passed in, only permissions matching this object are returned.
"""
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_group_permissions"):
permissions.update(backend.get_group_permissions(self, obj))
return permissions
def get_all_permissions(self, obj=None):
return _user_get_all_permissions(self, obj)
def has_perm(self, perm, obj=None):
"""
Returns True if the user has the specified permission. This method
queries all available auth backends, but returns immediately if any
backend returns True. Thus, a user who has permission from a single
auth backend is assumed to have permission in general. If an object is
provided, permissions for this specific object are checked.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
# Otherwise we need to check the backends.
return _user_has_perm(self, perm, obj)
def has_perms(self, perm_list, obj=None):
"""
Returns True if the user has each of the specified permissions. If
object is passed, it checks if the user has all required perms for this
object.
"""
for perm in perm_list:
if not self.has_perm(perm, obj):
return False
return True
def has_module_perms(self, app_label):
"""
Returns True if the user has any permissions in the given app label.
Uses pretty much the same logic as has_perm, above.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
return _user_has_module_perms(self, app_label)
class AbstractUser(AbstractBaseUser, PermissionsMixin):
"""
An abstract base class implementing a fully featured User model with
admin-compliant permissions.
Username, password and email are required. Other fields are optional.
"""
username = models.CharField(_('username'), max_length=30, unique=True,
help_text=_('Required. 30 characters or fewer. Letters, digits and '
'@/./+/-/_ only.'),
validators=[
validators.RegexValidator(r'^[\w.@+-]+$',
_('Enter a valid username. '
'This value may contain only letters, numbers '
'and @/./+/-/_ characters.'), 'invalid'),
],
error_messages={
'unique': _("A user with that username already exists."),
})
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=30, blank=True)
email = models.EmailField(_('email address'), blank=True)
is_staff = models.BooleanField(_('staff status'), default=False,
help_text=_('Designates whether the user can log into this admin '
'site.'))
is_active = models.BooleanField(_('active'), default=True,
help_text=_('Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
objects = UserManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
abstract = True
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"Returns the short name for the user."
return self.first_name
def email_user(self, subject, message, from_email=None, **kwargs):
"""
Sends an email to this User.
"""
send_mail(subject, message, from_email, [self.email], **kwargs)
class User(AbstractUser):
"""
Users within the Django authentication system are represented by this
model.
Username, password and email are required. Other fields are optional.
"""
class Meta(AbstractUser.Meta):
swappable = 'AUTH_USER_MODEL'
@python_2_unicode_compatible
class AnonymousUser(object):
id = None
pk = None
username = ''
is_staff = False
is_active = False
is_superuser = False
_groups = EmptyManager(Group)
_user_permissions = EmptyManager(Permission)
def __init__(self):
pass
def __str__(self):
return 'AnonymousUser'
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return 1 # instances always return the same hash value
def save(self):
raise NotImplementedError("Django doesn't provide a DB representation for AnonymousUser.")
def delete(self):
raise NotImplementedError("Django doesn't provide a DB representation for AnonymousUser.")
def set_password(self, raw_password):
raise NotImplementedError("Django doesn't provide a DB representation for AnonymousUser.")
def check_password(self, raw_password):
raise NotImplementedError("Django doesn't provide a DB representation for AnonymousUser.")
def _get_groups(self):
return self._groups
groups = property(_get_groups)
def _get_user_permissions(self):
return self._user_permissions
user_permissions = property(_get_user_permissions)
def get_group_permissions(self, obj=None):
return set()
def get_all_permissions(self, obj=None):
return _user_get_all_permissions(self, obj=obj)
def has_perm(self, perm, obj=None):
return _user_has_perm(self, perm, obj=obj)
def has_perms(self, perm_list, obj=None):
for perm in perm_list:
if not self.has_perm(perm, obj):
return False
return True
def has_module_perms(self, module):
return _user_has_module_perms(self, module)
def is_anonymous(self):
return True
def is_authenticated(self):
return False
| dhoffman34/django | django/contrib/auth/models.py | Python | bsd-3-clause | 17,902 | 0.001061 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal
from ..net import VAE
from ..util.common import batch_to_sequence, sequence_to_batch
from ..util.init import orthogonal_init
class _CNNBase(nn.Module):
def __init__(self):
super(_CNNBase, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=8, stride=4)
self.conv2 = nn.Conv2d(16, 32, kernel_size=4, stride=2)
self.fc = nn.Linear(3200, 256)
def forward(self, x):
h1 = F.relu(self.conv1(x))
h2 = F.relu(self.conv2(h1))
h2 = h2.view(h2.size(0), -1)
return F.relu(self.fc(h2))
class _RNNBase(nn.Module):
def __init__(self, input_size, output_size):
super(_RNNBase, self).__init__()
self.rnn = nn.LSTM(input_size, output_size, batch_first=True)
# self.rnn = nn.GRU(input_size, output_size, batch_first=True)
self.hidden = None
def forward(self, x):
if self.hidden is None:
out, self.hidden = self.rnn(x)
else:
c, h = self.hidden
self.hidden = c.detach(), h.detach()
out, self.hidden = self.rnn(x, self.hidden)
return out
class VAEPolicy(nn.Module):
def __init__(self, input_shape, output_shape):
super(VAEPolicy, self).__init__()
self.pd = None
z_size = 256
self.visual = VAE(z_size, add_noise=False)
self.mean_head = nn.Linear(z_size, output_shape[0])
self.log_std_head = nn.Parameter(torch.zeros(output_shape[0]))
self.value_head = nn.Linear(z_size, 1)
# self.mean_head.apply(orthogonal_init([nn.Linear], 'linear'))
# self.value_head.apply(orthogonal_init([nn.Linear], 'linear'))
def forward(self, x):
with torch.no_grad():
feature, _, _ = self.visual.encode(x)
mean = self.mean_head(feature.detach())
std = self.log_std_head.expand_as(mean).exp()
self.pd = Normal(mean, std)
action = self.pd.sample() if self.training else mean
value = self.value_head(feature.detach())
return action, value
@property
def recurrent(self):
return False
@property
def name(self):
return 'VAEPolicy'
class VAELSTMPolicy(nn.Module):
def __init__(self, input_shape, output_shape):
super(VAELSTMPolicy, self).__init__()
self.pd = None
z_size = 128
self.visual = VAE(z_size, add_noise=False)
self.rnn = _RNNBase(z_size, z_size)
self.mean_head = nn.Linear(z_size, output_shape[0])
self.log_std_head = nn.Parameter(torch.zeros(output_shape[0]))
self.value_head = nn.Linear(z_size, 1)
for param in self.visual.parameters():
param.requires_grad = False
self.value_head.apply(orthogonal_init([nn.Linear], 'linear'))
self.mean_head.apply(orthogonal_init([nn.Linear], 'tanh'))
def forward(self, x):
with torch.no_grad:
feature, _, _ = self.visual.encode(x)
feature = batch_to_sequence(feature, self.num_steps)
memory = sequence_to_batch(self.rnn(feature))
mean = self.mean_head(memory)
std = self.log_std_head.expand_as(mean).exp()
self.pd = Normal(mean, std)
action = self.pd.sample() if self.training else mean
value = self.value_head(memory)
return action, value
@property
def num_steps(self):
return 8
@property
def recurrent(self):
return True
@property
def name(self):
return 'VAELSTMPolicy'
class CNNPolicy(nn.Module):
def __init__(self, input_shape, output_shape):
super(CNNPolicy, self).__init__()
self.pd = None
self.cnn = _CNNBase()
size = self.cnn.fc.out_features
self.mean_head = nn.Linear(size, output_shape[0])
self.log_std_head = nn.Parameter(torch.zeros(output_shape[0]))
self.value_head = nn.Linear(size, 1)
# self.cnn.apply(orthogonal_init([nn.Linear], 'relu'))
# self.mean_head.apply(orthogonal_init([nn.Linear], 'linear'))
# self.value_head.apply(orthogonal_init([nn.Linear], 'linear'))
def forward(self, x):
feature = self.cnn(x)
mean = self.mean_head(feature)
std = self.log_std_head.expand_as(mean).exp()
self.pd = Normal(mean, std)
action = self.pd.sample() if self.training else mean
value = self.value_head(feature)
return action, value
@property
def recurrent(self):
return False
@property
def name(self):
return 'CNNPolicy'
class CNNLSTMPolicy(nn.Module):
def __init__(self, input_shape, output_shape):
super(CNNLSTMPolicy, self).__init__()
self.pd = None
self.cnn = _CNNBase()
size = self.cnn.fc.out_features
self.rnn = _RNNBase(size, size)
self.mean_head = nn.Linear(size, output_shape[0])
self.log_std_head = nn.Parameter(torch.ones(output_shape[0]))
self.value_head = nn.Linear(size, 1)
# self.value_head.apply(orthogonal_init([nn.Linear], 'linear'))
# self.mean_head.apply(orthogonal_init([nn.Linear], 'tanh'))
def forward(self, x):
feature = self.cnn(x)
feature = batch_to_sequence(feature, self.num_steps)
memory = sequence_to_batch(self.rnn(feature))
mean = self.mean_head(memory)
std = self.log_std_head.expand_as(mean).exp()
self.pd = Normal(mean, std)
action = self.pd.sample() if self.training else mean
value = self.value_head(memory)
return action, value
@property
def num_steps(self):
return 8
@property
def recurrent(self):
return True
@property
def name(self):
return 'CNNLSTMPolicy'
class MLPPolicy(nn.Module):
def __init__(self, input_shape, output_shape):
super(MLPPolicy, self).__init__()
self.pd = None
self.pi_fc1 = nn.Linear(input_shape[0], 64)
self.pi_fc2 = nn.Linear(64, 64)
self.vf_fc1 = nn.Linear(input_shape[0], 64)
self.vf_fc2 = nn.Linear(64, 64)
self.mean_head = nn.Linear(64, output_shape[0])
self.log_std_head = nn.Parameter(torch.zeros(output_shape[0]))
self.value_head = nn.Linear(64, 1)
self.apply(orthogonal_init([nn.Linear], 'tanh'))
self.mean_head.apply(orthogonal_init([nn.Linear], 'linear'))
self.value_head.apply(orthogonal_init([nn.Linear], 'linear'))
def forward(self, x):
pi_h1 = F.tanh(self.pi_fc1(x))
pi_h2 = F.tanh(self.pi_fc2(pi_h1))
mean = self.mean_head(pi_h2)
std = self.log_std_head.expand_as(mean).exp()
self.pd = Normal(mean, std)
action = self.pd.sample() if self.training else mean
vf_h1 = F.tanh(self.vf_fc1(x))
vf_h2 = F.tanh(self.vf_fc2(vf_h1))
value = self.value_head(vf_h2)
return action, value
@property
def recurrent(self):
return False
@property
def name(self):
return 'MLPPolicy'
class MLPLSTMPolicy(nn.Module):
def __init__(self, input_shape, output_shape):
super(MLPLSTMPolicy, self).__init__()
self.pd = None
self.pi_fc = nn.Linear(input_shape[0], 64)
self.pi_rnn = _RNNBase(64, 64)
self.vf_fc = nn.Linear(input_shape[0], 64)
self.vf_rnn = _RNNBase(64, 64)
self.mean_head = nn.Linear(64, output_shape[0])
self.log_std_head = nn.Parameter(torch.zeros(output_shape[0]))
self.value_head = nn.Linear(64, 1)
self.pi_fc.apply(orthogonal_init([nn.Linear], 'tanh'))
self.vf_fc.apply(orthogonal_init([nn.Linear], 'tanh'))
self.value_head.apply(orthogonal_init([nn.Linear], 'linear'))
def forward(self, x):
pi_h1 = F.tanh(self.pi_fc(x))
pi_h1 = batch_to_sequence(pi_h1, self.num_steps)
pi_h2 = sequence_to_batch(self.pi_rnn(pi_h1))
pi_h2 = F.tanh(pi_h2)
mean = self.mean_head(pi_h2)
std = self.log_std_head.expand_as(mean).exp()
vf_h1 = F.tanh(self.vf_fc(x))
vf_h1 = batch_to_sequence(vf_h1, self.num_steps)
vf_h2 = sequence_to_batch(self.vf_rnn(vf_h1))
vf_h2 = F.tanh(vf_h2)
value = self.value_head(vf_h2)
self.pd = Normal(mean, std)
action = self.pd.sample() if self.training else mean
return action, value
@property
def num_steps(self):
return 8
@property
def recurrent(self):
return True
@property
def name(self):
return 'MLPLSTMPolicy'
| qsheeeeen/Self-Driving-Car | rl_toolbox/policy/shared.py | Python | mit | 8,677 | 0 |
__author__ = 'thgoette'
from BasicTest import BasicTest, wait
import unittest
class InlineCallbacks(BasicTest):
@wait
def test_inlinecallbacks(self):
print "Starting inlineCallback Test "
def callback(result):
print "Callback: " + str(result)
self.assertEqual(result, 50)
d = self.caller.call("plus5times10", 0)
d.addBoth(callback)
return d
if __name__ == '__main__':
unittest.main()
| CN-UPB/OpenBarista | utils/decaf-utils-rpc/tests/unittests/InlineCallbacks.py | Python | mpl-2.0 | 468 | 0.002137 |
#!/usr/bin/env python
"""
fastq_split.py [-n|--num_files N_FILES] <input filename> <output directory>
"""
import os
import sys
import math
from srt.fastq import *
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-n", "--num_files", dest="num_files",
help="Number of output files", type="int", default=5)
(options, args) = parser.parse_args()
input_filename = args[0]
output_directory = args[1]
if options.num_files<=0:
print "Number of files must be > 0"
sys.exit(-1)
num_places = 1+int(math.log10(options.num_files))
# Define output filename format
_ = os.path.split(input_filename)[-1]
base = os.path.splitext(_)[0]
ext = os.path.splitext(_)[1]
format = os.path.join(output_directory, "%s_%%0.%ii%s") % (base, num_places, ext)
# Open files
output_file = []
for i in xrange(options.num_files):
output_filename = format % (i+1)
output_file.append(FastqFile(output_filename, "w"))
# Split reads
i = 0
for h,s,q in FastqFile(input_filename):
output_file[i].write(h,s,q)
i = (i+1) % options.num_files
# Close files
for _ in output_file:
_.close()
| PapenfussLab/Srtools | bin/fastq_split.py | Python | artistic-2.0 | 1,113 | 0.006289 |
# -*- coding: utf-8 -*-
"""Test CLR field support."""
import System
import pytest
from Python.Test import FieldTest
def test_public_instance_field():
"""Test public instance fields."""
ob = FieldTest()
assert ob.PublicField == 0
ob.PublicField = 1
assert ob.PublicField == 1
with pytest.raises(TypeError):
del FieldTest().PublicField
def test_public_static_field():
"""Test public static fields."""
ob = FieldTest()
assert FieldTest.PublicStaticField == 0
FieldTest.PublicStaticField = 1
assert FieldTest.PublicStaticField == 1
assert ob.PublicStaticField == 1
ob.PublicStaticField = 0
assert ob.PublicStaticField == 0
with pytest.raises(TypeError):
del FieldTest.PublicStaticField
with pytest.raises(TypeError):
del FieldTest().PublicStaticField
def test_protected_instance_field():
"""Test protected instance fields."""
ob = FieldTest()
assert ob.ProtectedField == 0
ob.ProtectedField = 1
assert ob.ProtectedField == 1
with pytest.raises(TypeError):
del FieldTest().ProtectedField
def test_protected_static_field():
"""Test protected static fields."""
ob = FieldTest()
assert FieldTest.ProtectedStaticField == 0
FieldTest.ProtectedStaticField = 1
assert FieldTest.ProtectedStaticField == 1
assert ob.ProtectedStaticField == 1
ob.ProtectedStaticField = 0
assert ob.ProtectedStaticField == 0
with pytest.raises(TypeError):
del FieldTest.ProtectedStaticField
with pytest.raises(TypeError):
del FieldTest().ProtectedStaticField
def test_read_only_instance_field():
"""Test readonly instance fields."""
assert FieldTest().ReadOnlyField == 0
with pytest.raises(TypeError):
FieldTest().ReadOnlyField = 1
with pytest.raises(TypeError):
del FieldTest().ReadOnlyField
def test_read_only_static_field():
"""Test readonly static fields."""
ob = FieldTest()
assert FieldTest.ReadOnlyStaticField == 0
assert ob.ReadOnlyStaticField == 0
with pytest.raises(TypeError):
FieldTest.ReadOnlyStaticField = 1
with pytest.raises(TypeError):
FieldTest().ReadOnlyStaticField = 1
with pytest.raises(TypeError):
del FieldTest.ReadOnlyStaticField
with pytest.raises(TypeError):
del FieldTest().ReadOnlyStaticField
def test_constant_field():
"""Test const fields."""
ob = FieldTest()
assert FieldTest.ConstField == 0
assert ob.ConstField == 0
with pytest.raises(TypeError):
FieldTest().ConstField = 1
with pytest.raises(TypeError):
FieldTest.ConstField = 1
with pytest.raises(TypeError):
del FieldTest().ConstField
with pytest.raises(TypeError):
del FieldTest.ConstField
def test_internal_field():
"""Test internal fields."""
with pytest.raises(AttributeError):
_ = FieldTest().InternalField
with pytest.raises(AttributeError):
_ = FieldTest().InternalStaticField
with pytest.raises(AttributeError):
_ = FieldTest.InternalStaticField
def test_private_field():
"""Test private fields."""
with pytest.raises(AttributeError):
_ = FieldTest().PrivateField
with pytest.raises(AttributeError):
_ = FieldTest().PrivateStaticField
with pytest.raises(AttributeError):
_ = FieldTest.PrivateStaticField
def test_field_descriptor_get_set():
"""Test field descriptor get / set."""
# This test ensures that setting an attribute implemented with
# a descriptor actually goes through the descriptor (rather than
# silently replacing the descriptor in the instance or type dict.
ob = FieldTest()
assert FieldTest.PublicStaticField == 0
assert ob.PublicStaticField == 0
descriptor = FieldTest.__dict__['PublicStaticField']
assert type(descriptor) != int
ob.PublicStaticField = 0
descriptor = FieldTest.__dict__['PublicStaticField']
assert type(descriptor) != int
FieldTest.PublicStaticField = 0
descriptor = FieldTest.__dict__['PublicStaticField']
assert type(descriptor) != int
def test_field_descriptor_wrong_type():
"""Test setting a field using a value of the wrong type."""
with pytest.raises(ValueError):
FieldTest().PublicField = "spam"
def test_field_descriptor_abuse():
"""Test field descriptor abuse."""
desc = FieldTest.__dict__['PublicField']
with pytest.raises(TypeError):
desc.__get__(0, 0)
with pytest.raises(TypeError):
desc.__set__(0, 0)
def test_boolean_field():
"""Test boolean fields."""
# change this to true / false later for Python 2.3?
ob = FieldTest()
assert ob.BooleanField is False
ob.BooleanField = True
assert ob.BooleanField is True
ob.BooleanField = False
assert ob.BooleanField is False
ob.BooleanField = 1
assert ob.BooleanField is True
ob.BooleanField = 0
assert ob.BooleanField is False
def test_sbyte_field():
"""Test sbyte fields."""
ob = FieldTest()
assert ob.SByteField == 0
ob.SByteField = 1
assert ob.SByteField == 1
def test_byte_field():
"""Test byte fields."""
ob = FieldTest()
assert ob.ByteField == 0
ob.ByteField = 1
assert ob.ByteField == 1
def test_char_field():
"""Test char fields."""
ob = FieldTest()
assert ob.CharField == u'A'
assert ob.CharField == 'A'
ob.CharField = 'B'
assert ob.CharField == u'B'
assert ob.CharField == 'B'
ob.CharField = u'C'
assert ob.CharField == u'C'
assert ob.CharField == 'C'
def test_int16_field():
"""Test int16 fields."""
ob = FieldTest()
assert ob.Int16Field == 0
ob.Int16Field = 1
assert ob.Int16Field == 1
def test_int32_field():
"""Test int32 fields."""
ob = FieldTest()
assert ob.Int32Field == 0
ob.Int32Field = 1
assert ob.Int32Field == 1
def test_int64_field():
"""Test int64 fields."""
ob = FieldTest()
assert ob.Int64Field == 0
ob.Int64Field = 1
assert ob.Int64Field == 1
def test_uint16_field():
"""Test uint16 fields."""
ob = FieldTest()
assert ob.UInt16Field == 0
ob.UInt16Field = 1
assert ob.UInt16Field == 1
def test_uint32_field():
"""Test uint32 fields."""
ob = FieldTest()
assert ob.UInt32Field == 0
ob.UInt32Field = 1
assert ob.UInt32Field == 1
def test_uint64_field():
"""Test uint64 fields."""
ob = FieldTest()
assert ob.UInt64Field == 0
ob.UInt64Field = 1
assert ob.UInt64Field == 1
def test_single_field():
"""Test single fields."""
ob = FieldTest()
assert ob.SingleField == 0.0
ob.SingleField = 1.1
assert ob.SingleField == 1.1
def test_double_field():
"""Test double fields."""
ob = FieldTest()
assert ob.DoubleField == 0.0
ob.DoubleField = 1.1
assert ob.DoubleField == 1.1
def test_decimal_field():
"""Test decimal fields."""
ob = FieldTest()
assert ob.DecimalField == System.Decimal(0)
ob.DecimalField = System.Decimal(1)
assert ob.DecimalField == System.Decimal(1)
def test_string_field():
"""Test string fields."""
ob = FieldTest()
assert ob.StringField == "spam"
ob.StringField = "eggs"
assert ob.StringField == "eggs"
def test_interface_field():
"""Test interface fields."""
from Python.Test import Spam, ISpam
ob = FieldTest()
assert ISpam(ob.SpamField).GetValue() == "spam"
assert ob.SpamField.GetValue() == "spam"
ob.SpamField = Spam("eggs")
assert ISpam(ob.SpamField).GetValue() == "eggs"
assert ob.SpamField.GetValue() == "eggs"
def test_object_field():
"""Test ob fields."""
ob = FieldTest()
assert ob.ObjectField is None
ob.ObjectField = System.String("spam")
assert ob.ObjectField == "spam"
ob.ObjectField = System.Int32(1)
assert ob.ObjectField == 1
ob.ObjectField = None
assert ob.ObjectField is None
def test_enum_field():
"""Test enum fields."""
from Python.Test import ShortEnum
ob = FieldTest()
assert ob.EnumField == ShortEnum.Zero
ob.EnumField = ShortEnum.One
assert ob.EnumField == ShortEnum.One
def test_nullable_field():
"""Test nullable fields."""
ob = FieldTest()
ob.StringField = None
assert ob.StringField is None
ob.ObjectField = None
assert ob.ObjectField is None
ob.SpamField = None
assert ob.SpamField is None
# Primitive types and enums should not be set to null.
with pytest.raises(TypeError):
FieldTest().Int32Field = None
with pytest.raises(TypeError):
FieldTest().EnumField = None
| AlexCatarino/pythonnet | tests/test_field.py | Python | mit | 8,719 | 0 |
import traceback, socket, os, time, smtplib, re, sys, getpass, logging
try:
import autotest.common as common
except ImportError:
import common
from autotest_lib.client.common_lib import global_config
CONFIG_SECTION = 'SCHEDULER'
CONFIG_SECTION_SMTP = 'SERVER'
class EmailNotificationManager(object):
def __init__(self):
self._emails = []
self._from_address = global_config.global_config.get_config_value(
CONFIG_SECTION, "notify_email_from", default=getpass.getuser())
self._notify_address = global_config.global_config.get_config_value(
CONFIG_SECTION, "notify_email", default='')
self._smtp_server = global_config.global_config.get_config_value(
CONFIG_SECTION_SMTP, "smtp_server", default='localhost')
self._smtp_port = global_config.global_config.get_config_value(
CONFIG_SECTION_SMTP, "smtp_port", default=None)
self._smtp_user = global_config.global_config.get_config_value(
CONFIG_SECTION_SMTP, "smtp_user", default='')
self._smtp_password = global_config.global_config.get_config_value(
CONFIG_SECTION_SMTP, "smtp_password", default='')
def send_email(self, to_string, subject, body):
"""Mails out emails to the addresses listed in to_string.
to_string is split into a list which can be delimited by any of:
';', ',', ':' or any whitespace
"""
# Create list from string removing empty strings from the list.
to_list = [x for x in re.split('\s|,|;|:', to_string) if x]
if not to_list:
return
msg = "From: %s\nTo: %s\nSubject: %s\n\n%s" % (
self._from_address, ', '.join(to_list), subject, body)
try:
mailer = smtplib.SMTP(self._smtp_server, self._smtp_port)
try:
if self._smtp_user:
mailer.login(self._smtp_user, self._smtp_password)
mailer.sendmail(self._from_address, to_list, msg)
finally:
try:
mailer.quit()
except:
logging.exception('mailer.quit() failed:')
except Exception:
logging.exception('Sending email failed:')
def enqueue_notify_email(self, subject, message):
logging.error(subject + '\n' + message)
if not self._notify_address:
return
body = 'Subject: ' + subject + '\n'
body += "%s / %s / %s\n%s" % (socket.gethostname(),
os.getpid(),
time.strftime("%X %x"), message)
self._emails.append(body)
def send_queued_emails(self):
if not self._emails:
return
subject = 'Scheduler notifications from ' + socket.gethostname()
separator = '\n' + '-' * 40 + '\n'
body = separator.join(self._emails)
self.send_email(self._notify_address, subject, body)
self._emails = []
def log_stacktrace(self, reason):
logging.exception(reason)
message = "EXCEPTION: %s\n%s" % (reason, traceback.format_exc())
self.enqueue_notify_email("monitor_db exception", message)
manager = EmailNotificationManager()
| libvirt/autotest | scheduler/email_manager.py | Python | gpl-2.0 | 3,273 | 0.002139 |
# -*- coding: utf-8 -*-
NAMESPACE_NFE = 'http://www.portalfiscal.inf.br/nfe'
NAMESPACE_SIG = 'http://www.w3.org/2000/09/xmldsig#'
NAMESPACE_SOAP = 'http://www.w3.org/2003/05/soap-envelope'
NAMESPACE_XSI = 'http://www.w3.org/2001/XMLSchema-instance'
NAMESPACE_XSD = 'http://www.w3.org/2001/XMLSchema'
NAMESPACE_METODO = 'http://www.portalfiscal.inf.br/nfe/wsdl/'
VERSAO_PADRAO = '3.10'
VERSAO_QRCODE = '100'
TIPOS_DOCUMENTO = (
'CNPJ',
'CPF',
)
ICMS_TIPOS_TRIBUTACAO = (
('00', 'ICMS 00 - Tributada integralmente'),
('10', 'ICMS 10 - Tributada com cobranca do ICMS por substituicao tributaria'),
('20', 'ICMS 20 - Com reducao da base de calculo'),
('30', 'ICMS 30 - Isenta ou nao tributada e com cobranca do ICMS por substituicao tributaria'),
('40', 'ICMS 40 - Isenta'),
('41', 'ICMS 41 - Nao tributada'),
('50', 'ICMS 50 - Suspensao'),
('51', 'ICMS 51 - Diferimento'),
('60', 'ICMS 60 - Cobrado anteriormente por substituicao tributaria'),
('70', 'ICMS 70 - Com reducao da base de calculo e cobranca do ICMS por substituicao tributaria'),
('90', 'ICMS 90 - Outras'),
('101', 'ICMS 101 - Tributação ICMS pelo Simples Nacional, CSOSN=101'),
('102', 'ICMS 102 - Tributação ICMS pelo Simples Nacional, CSOSN=102, 103, 300 ou 400'),
('201', 'ICMS 201 - Tributação ICMS pelo Simples Nacional, CSOSN=201'),
('202', 'ICMS 202 - Tributação ICMS pelo Simples Nacional, CSOSN=202 ou 203'),
('500', 'ICMS 500 - Tributação ICMS pelo Simples Nacional, CSOSN=500'),
('900', 'ICMS 900 - Tributação ICMS pelo Simples Nacional, CSOSN=900'),
('ST', 'ICMS ST - Grupo de informação do ICMS ST devido para a UF de destino, nas operações interestaduais de produtos que tiveram retenção antecipada de ICMS por ST na UF do remetente. Repasse via Substituto Tributário.')
)
ICMS_ORIGENS = (
(0, 'Nacional'),
(1, 'Estrangeira - Importacao Direta'),
(2, 'Estrangeira - Adquirida no Mercado Interno'),
)
ICMS_MODALIDADES = (
(0, 'Margem Valor Agregado'),
(1, 'Pauta (valor)'),
(2, 'Preco Tabelado Max. (valor)'),
(3, 'Valor da Operacao'),
)
NF_STATUS = (
'Em Digitacao',
'Validada',
'Assinada',
'Em processamento',
'Autorizada',
'Rejeitada',
'Cancelada',
)
NF_TIPOS_DOCUMENTO = (
(0, 'Entrada'),
(1, 'Saida'),
)
NF_PROCESSOS_EMISSAO = (
(0, u'Emissão de NF-e com aplicativo do contribuinte'),
(1, u'Emissão de NF-e avulsa pelo Fisco'),
(2, u'Emissão de NF-e avulsa, pelo contribuinte com seu certificado digital, através do site do Fisco'),
(3, u'Emissão NF-e pelo contribuinte com aplicativo fornecido pelo Fisco'),
)
NF_TIPOS_IMPRESSAO_DANFE = (
(1, 'Retrato'),
(2, 'Paisagem'),
)
NF_FORMAS_PAGAMENTO = (
(0, 'Pagamento a vista'),
(1, 'Pagamento a prazo'),
(2, 'Outros'),
)
NF_FORMAS_EMISSAO = (
(1, 'Normal'),
(2, 'Contingencia'),
(3, 'Contingencia com SCAN'),
(4, 'Contingencia via DPEC'),
(5, 'Contingencia FS-DA'),
)
NF_FINALIDADES_EMISSAO = (
(1, 'NF-e normal'),
(2, 'NF-e complementar'),
(3, 'NF-e de ajuste'),
)
NF_REFERENCIADA_TIPOS = (
'Nota Fiscal eletronica',
'Nota Fiscal',
)
NF_PRODUTOS_ESPECIFICOS = (
'Veiculo',
'Medicamento',
'Armamento',
'Combustivel',
)
NF_AMBIENTES = (
(1, 'Producao'),
(2, 'Homologacao'),
)
IPI_TIPOS_TRIBUTACAO = (
('00', 'IPI 00 - Entrada com recuperacao de credito'),
('01', 'IPI 01 - Entrada tributada com aliquota zero'),
('02', 'IPI 02 - Entrada isenta'),
('03', 'IPI 03 - Entrada nao-tributada'),
('04', 'IPI 04 - Entrada imune'),
('05', 'IPI 05 - Entrada com suspensao'),
('49', 'IPI 49 - Outras entradas'),
('50', 'IPI 50 - Saida tributada'),
('51', 'IPI 51 - Saida tributada com aliquota zero'),
('52', 'IPI 52 - Saida isenta'),
('53', 'IPI 53 - Saida nao-tributada'),
('54', 'IPI 54 - Saida imune'),
('55', 'IPI 55 - Saida com suspensao'),
('99', 'IPI 99 - Outas saidas'),
)
IPI_TIPOS_CALCULO = (
'Percentual',
'Em Valor',
)
PIS_TIPOS_TRIBUTACAO = (
('01', 'PIS 01 - Operação Tributável - Base de cálculo = valor da operação alíquota normal (cumulativo/não cumulativo)'),
('02', 'PIS 02 - Operação Tributável - Base de cálculo = valor da operação (alíquota diferenciada)'),
('03', 'PIS 03 - Operacao Tributavel - Base de cálculo = quantidade vendida x alíquota por unidade de produto)'),
('04', 'PIS 04 - Operacao Tributavel - Tributacao Monofasica - (Aliquota Zero)'),
('06', 'PIS 06 - Operacao Tributavel - Aliquota Zero'),
('07', 'PIS 07 - Operacao Isenta da Contribuicao'),
('08', 'PIS 08 - Operacao sem Indidencia da Contribuicao'),
('09', 'PIS 09 - Operacao com Suspensao da Contribuicao'),
('49', 'PIS 49 - Outras Operações de Saída'),
('50', 'PIS 50 - Operação com Direito a Crédito - Vinculada Exclusivamente a Receita Tributada no Mercado Interno'),
('51', 'PIS 51 - Operação com Direito a Crédito - Vinculada Exclusivamente a Receita Não Tributada no Mercado Interno'),
('52', 'PIS 52 - Operação com Direito a Crédito – Vinculada Exclusivamente a Receita de Exportação'),
('53', 'PIS 53 - Operação com Direito a Crédito - Vinculada a Receitas Tributadas e Não-Tributadas no Mercado Interno'),
('54', 'PIS 54 - Operação com Direito a Crédito - Vinculada a Receitas Tributadas no Mercado Interno e de Exportação'),
('55', 'PIS 55 - Operação com Direito a Crédito - Vinculada a Receitas Não Tributadas no Mercado Interno e de Exportação'),
('56', 'PIS 56 - Operação com Direito a Crédito - Vinculada a Receitas Tributadas e Não Tributadas no Mercado Interno, e de Exportação'),
('60', 'PIS 60 - Crédito Presumido - Operação de Aquisição Vinculada Exclusivamente a Receita Tributada no Mercado Interno'),
('61', 'PIS 61 - Crédito Presumido - Operação de Aquisição Vinculada Exclusivamente a Receita Não Tributada no Mercado Interno'),
('62', 'PIS 62 - Crédito Presumido - Operação de Aquisição Vinculada Exclusivamente a Receita de Exportação'),
('63', 'PIS 63 - Crédito Presumido - Operação de Aquisição Vinculada a Receitas Tributadas e Não-Tributadas no Mercado Interno'),
('64', 'PIS 64 - Crédito Presumido - Operação de Aquisição Vinculada a Receitas Tributadas no Mercado Interno e de Exportação'),
('65', 'PIS 65 - Crédito Presumido - Operação de Aquisição Vinculada a Receitas Não Tributadas no Mercado Interno e de Exportação'),
('66', 'PIS 66 - Crédito Presumido - Operação de Aquisição Vinculada a Receitas Tributadas e Não-Tributadas no Mercado Interno, e de Exportação'),
('67', 'PIS 67 - Crédito Presumido - Outras Operações'),
('70', 'PIS 70 - Operação de Aquisição sem Direito a Crédito'),
('71', 'PIS 71 - Operação de Aquisição com Isenção'),
('72', 'PIS 72 - Operação de Aquisição com Suspensão'),
('73', 'PIS 73 - Operação de Aquisição a Alíquota Zero'),
('74', 'PIS 74 - Operação de Aquisição; sem Incidência da Contribuição'),
('75', 'PIS 75 - Operação de Aquisição por Substituição Tributária'),
('98', 'PIS 98 - Outras Operações de Entrada'),
('99', 'PIS 99 - Outras operacoes'),
)
PIS_TIPOS_CALCULO = IPI_TIPOS_CALCULO
COFINS_TIPOS_TRIBUTACAO = (
('01', 'COFINS 01 - Operação Tributável - Base de cálculo = valor da operação alíquota normal (cumulativo/não cumulativo)'),
('02', 'COFINS 02 - Operação Tributável - Base de cálculo = valor da operação (alíquota diferenciada)'),
('03', 'COFINS 03 - Operacao Tributavel - Base de cálculo = quantidade vendida x alíquota por unidade de produto)'),
('04', 'COFINS 04 - Operacao Tributavel - Tributacao Monofasica - (Aliquota Zero)'),
('06', 'COFINS 06 - Operacao Tributavel - Aliquota Zero'),
('07', 'COFINS 07 - Operacao Isenta da Contribuicao'),
('08', 'COFINS 08 - Operacao sem Indidencia da Contribuicao'),
('09', 'COFINS 09 - Operacao com Suspensao da Contribuicao'),
('49', 'COFINS 49 - Outras Operações de Saída'),
('50', 'COFINS 50 - Operação com Direito a Crédito - Vinculada Exclusivamente a Receita Tributada no Mercado Interno'),
('51', 'COFINS 51 - Operação com Direito a Crédito - Vinculada Exclusivamente a Receita Não Tributada no Mercado Interno'),
('52', 'COFINS 52 - Operação com Direito a Crédito – Vinculada Exclusivamente a Receita de Exportação'),
('53', 'COFINS 53 - Operação com Direito a Crédito - Vinculada a Receitas Tributadas e Não-Tributadas no Mercado Interno'),
('54', 'COFINS 54 - Operação com Direito a Crédito - Vinculada a Receitas Tributadas no Mercado Interno e de Exportação'),
('55', 'COFINS 55 - Operação com Direito a Crédito - Vinculada a Receitas Não Tributadas no Mercado Interno e de Exportação'),
('56', 'COFINS 56 - Operação com Direito a Crédito - Vinculada a Receitas Tributadas e Não Tributadas no Mercado Interno, e de Exportação'),
('60', 'COFINS 60 - Crédito Presumido - Operação de Aquisição Vinculada Exclusivamente a Receita Tributada no Mercado Interno'),
('61', 'COFINS 61 - Crédito Presumido - Operação de Aquisição Vinculada Exclusivamente a Receita Não Tributada no Mercado Interno'),
('62', 'COFINS 62 - Crédito Presumido - Operação de Aquisição Vinculada Exclusivamente a Receita de Exportação'),
('63', 'COFINS 63 - Crédito Presumido - Operação de Aquisição Vinculada a Receitas Tributadas e Não-Tributadas no Mercado Interno'),
('64', 'COFINS 64 - Crédito Presumido - Operação de Aquisição Vinculada a Receitas Tributadas no Mercado Interno e de Exportação'),
('65', 'COFINS 65 - Crédito Presumido - Operação de Aquisição Vinculada a Receitas Não Tributadas no Mercado Interno e de Exportação'),
('66', 'COFINS 66 - Crédito Presumido - Operação de Aquisição Vinculada a Receitas Tributadas e Não-Tributadas no Mercado Interno, e de Exportação'),
('67', 'COFINS 67 - Crédito Presumido - Outras Operações'),
('70', 'COFINS 70 - Operação de Aquisição sem Direito a Crédito'),
('71', 'COFINS 71 - Operação de Aquisição com Isenção'),
('72', 'COFINS 72 - Operação de Aquisição com Suspensão'),
('73', 'COFINS 73 - Operação de Aquisição a Alíquota Zero'),
('74', 'COFINS 74 - Operação de Aquisição; sem Incidência da Contribuição'),
('75', 'COFINS 75 - Operação de Aquisição por Substituição Tributária'),
('98', 'COFINS 98 - Outras Operações de Entrada'),
('99', 'COFINS 99 - Outras operacoes'),
)
COFINS_TIPOS_CALCULO = IPI_TIPOS_CALCULO
MODALIDADES_FRETE = (
(0, '0 - Por conta do emitente'),
(1, '1 - Por conta do destinatario'),
(2, '2 - Por conta de terceiros'),
(9, '9 - Sem frete'),
)
ORIGENS_PROCESSO = (
'SEFAZ',
'Justica federal',
'Justica estadual',
'Secex/RFB',
'Outros',
)
CODIGO_BRASIL = '1058'
CODIGOS_ESTADOS = {
'RO': '11',
'AC': '12',
'AM': '13',
'RR': '14',
'PA': '15',
'AP': '16',
'TO': '17',
'MA': '21',
'PI': '22',
'CE': '23',
'RN': '24',
'PB': '25',
'PE': '26',
'AL': '27',
'SE': '28',
'BA': '29',
'MG': '31',
'ES': '32',
'RJ': '33',
'SP': '35',
'PR': '41',
'SC': '42',
'RS': '43',
'MS': '50',
'MT': '51',
'GO': '52',
'DF': '53',
}
| leonardoRC/PyNFe | pynfe/utils/flags.py | Python | lgpl-3.0 | 11,560 | 0.004088 |
# Standard Library
from builtins import str
import os
import zipfile
from urllib.parse import quote_plus
from urllib.request import urlopen
# Third Party Stuff
from django import template
from django.conf import settings
from django.contrib.auth.models import User
from django.db.models import Q
# Spoken Tutorial Stuff
from creation.models import *
from creation.views import (
is_administrator,
is_contenteditor,
is_contributor,
is_domainreviewer,
is_external_contributor,
is_internal_contributor,
is_qualityreviewer,
is_videoreviewer,
is_language_manager
)
from spoken.forms import TutorialSearchForm
register = template.Library()
def format_component_title(name):
return name.replace('_', ' ').capitalize()
def get_url_name(name):
return quote_plus(name)
def get_zip_content(path):
file_names = None
try:
zf = zipfile.ZipFile(path, 'r')
file_names = zf.namelist()
return file_names
except Exception as e:
return False
def is_script_available(path):
try:
code = urlopen(script_path).code
except Exception as e:
code = e.code
if(int(code) == 200):
return True
return False
def get_review_status_list(key):
status_list = ['Pending', 'Waiting for Admin Review', 'Waiting for Domain Review', 'Waiting for Quality Review', 'Accepted', 'Need Improvement', 'Not Required']
return status_list[key];
def get_review_status_class(key):
status_list = ['danger', 'active', 'warning', 'info', 'success', 'danger', 'success']
return status_list[key];
def get_review_status_symbol(key):
status_list = ['fa fa-1 fa-minus-circle review-pending-upload', 'fa fa-1 fa-check-circle review-admin-review', 'fa fa-1 fa-check-circle review-domain-review', 'fa fa-1 fa-check-circle review-quality-review', 'fa fa-1 fa-check-circle review-accepted', 'fa fa-1 fa-times-circle review-pending-upload', 'fa fa-1 fa-ban review-accepted']
return status_list[key];
def get_username(key):
user = User.objects.get(pk = key)
return user.username
def get_last_video_upload_time(key):
rec = None
try:
rec = ContributorLog.objects.filter(tutorial_resource_id = key.id).order_by('-created')[0]
tmpdt = key.updated
for tmp in rec:
tmpdt = rec.created
return tmpdt
except:
return key.updated
def get_component_name(comp):
comps = {
1: 'Outline',
2: 'Script',
3: 'Video',
4: 'Slides',
5: 'Codefiles',
6: 'Assignment'
}
key = ''
try:
key = comps[comp]
except:
pass
return key.title()
def get_missing_component_reply(mcid):
rows = TutorialMissingComponentReply.objects.filter(missing_component_id = mcid)
replies = ''
for row in rows:
replies += '<p>' + row.reply_message + '<b> -' + row.user.username + '</b></p>'
if replies:
replies = '<br /><b>Replies:</b>' + replies
return replies
def formatismp4(path):
'''
** Registered to be used in jinja template **
Function takes in a file name and checks if the
last 3 characters are `mp4`.
'''
return path[-3:] == 'mp4' or path[-3:] == 'mov'
def instruction_sheet(foss, lang):
file_path = settings.MEDIA_ROOT + 'videos/' + str(foss.id) + '/' + foss.foss.replace(' ', '-') + '-Instruction-Sheet-' + lang.name + '.pdf'
if lang.name != 'English':
if os.path.isfile(file_path):
file_path = settings.MEDIA_URL + 'videos/' + str(foss.id) + '/' + foss.foss.replace(' ', '-') + '-Instruction-Sheet-' + lang.name + '.pdf'
return file_path
file_path = settings.MEDIA_ROOT + 'videos/' + str(foss.id) + '/' + foss.foss.replace(' ', '-') + '-Instruction-Sheet-English.pdf'
if os.path.isfile(file_path):
file_path = settings.MEDIA_URL + 'videos/' + str(foss.id) + '/' + foss.foss.replace(' ', '-') + '-Instruction-Sheet-English.pdf'
return file_path
return False
def installation_sheet(foss, lang):
file_path = settings.MEDIA_ROOT + 'videos/' + str(foss.id) + '/' + foss.foss.replace(' ', '-') + '-Installation-Sheet-' + lang.name + '.pdf'
if lang.name != 'English':
if os.path.isfile(file_path):
file_path = settings.MEDIA_URL + 'videos/' + str(foss.id) + '/' + foss.foss.replace(' ', '-') + '-Installation-Sheet-' + lang.name + '.pdf'
return file_path
file_path = settings.MEDIA_ROOT + 'videos/' + str(foss.id) + '/' + foss.foss.replace(' ', '-') + '-Installation-Sheet-English.pdf'
if os.path.isfile(file_path):
file_path = settings.MEDIA_URL + 'videos/' + str(foss.id) + '/' + foss.foss.replace(' ', '-') + '-Installation-Sheet-English.pdf'
return file_path
return False
def brochure(foss, lang):
file_path = settings.MEDIA_ROOT + 'videos/' + str(foss.id) + '/' + foss.foss.replace(' ', '-') + '-Brochure-' + lang.name + '.pdf'
if lang.name != 'English':
if os.path.isfile(file_path):
file_path = settings.MEDIA_URL + 'videos/' + str(foss.id) + '/' + foss.foss.replace(' ', '-') + '-Brochure-' + lang.name + '.pdf'
return file_path
file_path = settings.MEDIA_ROOT + 'videos/' + str(foss.id) + '/' + foss.foss.replace(' ', '-') + '-Brochure-English.pdf'
if os.path.isfile(file_path):
file_path = settings.MEDIA_URL + 'videos/' + str(foss.id) + '/' + foss.foss.replace(' ', '-') + '-Brochure-English.pdf'
return file_path
return False
def get_thumb_path(row, append_str):
path = settings.MEDIA_URL + 'videos/' + str(row.foss_id) + '/' + str(row.id) + '/' + row.tutorial.replace(' ', '-') + '-' + append_str + '.png'
return path
def get_srt_path(tr):
data = ''
english_srt = settings.MEDIA_ROOT + 'videos/' + str(tr.tutorial_detail.foss_id) + '/' + str(tr.tutorial_detail_id) + '/' + tr.tutorial_detail.tutorial.replace(' ', '-') + '-English.srt'
if os.path.isfile(english_srt):
data = '<track kind="captions" src="'+ settings.MEDIA_URL + 'videos/' + str(tr.tutorial_detail.foss_id) + '/' + str(tr.tutorial_detail_id) + '/' + tr.tutorial_detail.tutorial.replace(' ', '-') + '-English.srt' + '" srclang="en" label="English"></track>'
if tr.language.name != 'English':
native_srt = settings.MEDIA_ROOT + 'videos/' + str(tr.tutorial_detail.foss_id) + '/' + str(tr.tutorial_detail_id) + '/' + tr.tutorial_detail.tutorial.replace(' ', '-') + '-' + tr.language.name +'.srt'
print(native_srt)
if os.path.isfile(native_srt):
data += '<track kind="captions" src="'+ settings.MEDIA_URL + 'videos/' + str(tr.tutorial_detail.foss_id) + '/' + str(tr.tutorial_detail_id) + '/' + tr.tutorial_detail.tutorial.replace(' ', '-') + '-' + tr.language.name + '.srt' + '" srclang="en" label="' + tr.language.name + '"></track>'
return data
def get_video_visits(tr):
tr.hit_count = tr.hit_count + 1
tr.save()
return tr.hit_count
def get_prerequisite(tr, td):
print((tr, td))
try:
tr_rec = TutorialResource.objects.get(Q(status = 1) | Q(status = 2), tutorial_detail = td, language_id = tr.language_id)
return get_url_name(td.foss.foss) + '/' + get_url_name(td.tutorial) + '/' + tr_rec.language.name
except Exception as e:
print(e)
if tr.language.name != 'English':
try:
tr_rec = TutorialResource.objects.get(Q(status = 1) | Q(status = 2), tutorial_detail = td, language__name = 'English')
return get_url_name(td.foss.foss) + '/' + get_url_name(td.tutorial) + '/English'
except:
return None
pass
return None
def get_prerequisite_from_td(td, lang):
try:
tr_rec = TutorialResource.objects.get(Q(status = 1) | Q(status = 2), tutorial_detail = td, language_id = lang.id)
return tr_rec.id
except:
if lang.name != 'English':
try:
tr_rec = TutorialResource.objects.get(Q(status = 1) | Q(status = 2), tutorial_detail = td, language__name = 'English')
return tr_rec.id
except:
pass
return None
def get_timed_script(script_path, timed_script_path):
if timed_script_path:
timed_script = settings.SCRIPT_URL + timed_script_path
else:
timed_script = settings.SCRIPT_URL + script_path + '-timed'
print(script_path)
code = 0
try:
code = urlopen(timed_script).code
except Exception as e:
timed_script = settings.SCRIPT_URL + \
script_path.replace(' ', '-').replace('_', '-') + '-timed'
print(timed_script)
try:
code = urlopen(timed_script).code
except Exception as e:
print((code, '----', e))
code = 0
if(int(code) == 200):
return timed_script
return ''
def tutorialsearch():
context = {
'form': TutorialSearchForm()
}
return context
def get_mp4_video(tr):
video_name = tr.video
splitat = -4
tname, text = video_name[:splitat], video_name[splitat:]
path = settings.MEDIA_ROOT + 'videos/' + str(tr.tutorial_detail.foss_id) + '/' + str(tr.tutorial_detail_id) + '/' + tname + '.mp4'
if os.path.isfile(path):
return 'videos/' + str(tr.tutorial_detail.foss_id) + '/' + str(tr.tutorial_detail_id) + '/' + tname + '.mp4'
return False
register.inclusion_tag('spoken/templates/tutorial_search_form.html')(tutorialsearch)
#register.filter('tutorialsearch', tutorialsearch)
register.filter('get_timed_script', get_timed_script)
register.filter('formatismp4', formatismp4)
register.filter('get_prerequisite_from_td', get_prerequisite_from_td)
register.filter('get_prerequisite', get_prerequisite)
register.filter('get_video_visits', get_video_visits)
register.filter('get_srt_path', get_srt_path)
register.filter('get_thumb_path', get_thumb_path)
register.filter('get_missing_component_reply', get_missing_component_reply)
register.filter('get_component_name', get_component_name)
register.filter('get_url_name', get_url_name)
register.filter('get_zip_content', get_zip_content)
register.filter('get_contributor', is_contributor)
register.filter('get_internal_contributor', is_internal_contributor)
register.filter('get_external_contributor', is_external_contributor)
register.filter('get_videoreviewer', is_videoreviewer)
register.filter('get_domainreviewer', is_domainreviewer)
register.filter('get_qualityreviewer', is_qualityreviewer)
register.filter('get_administrator', is_administrator)
register.filter('get_last_video_upload_time', get_last_video_upload_time)
register.filter('get_review_status_list', get_review_status_list)
register.filter('get_review_status_symbol', get_review_status_symbol)
register.filter('get_review_status_class', get_review_status_class)
register.filter('get_username', get_username)
register.filter('instruction_sheet', instruction_sheet)
register.filter('installation_sheet', installation_sheet)
register.filter('brochure', brochure)
register.filter('get_contenteditor', is_contenteditor)
register.filter('format_component_title', format_component_title)
register.filter('get_mp4_video', get_mp4_video)
register.filter('get_language_manager',is_language_manager) | Spoken-tutorial/spoken-website | creation/templatetags/creationdata.py | Python | gpl-3.0 | 11,230 | 0.011843 |
"""
Tests for content parsing, and form-overloaded content parsing.
"""
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.contrib.sessions.middleware import SessionMiddleware
from django.core.handlers.wsgi import WSGIRequest
from django.test import TestCase
from rest_framework import status
from rest_framework.authentication import SessionAuthentication
from rest_framework.compat import patterns
from rest_framework.parsers import (
BaseParser,
FormParser,
MultiPartParser,
JSONParser
)
from rest_framework.request import Request, Empty
from rest_framework.response import Response
from rest_framework.settings import api_settings
from rest_framework.test import APIRequestFactory, APIClient
from rest_framework.views import APIView
from rest_framework.compat import six
from io import BytesIO
import json
factory = APIRequestFactory()
class PlainTextParser(BaseParser):
media_type = 'text/plain'
def parse(self, stream, media_type=None, parser_context=None):
"""
Returns a 2-tuple of `(data, files)`.
`data` will simply be a string representing the body of the request.
`files` will always be `None`.
"""
return stream.read()
class TestMethodOverloading(TestCase):
def test_method(self):
"""
Request methods should be same as underlying request.
"""
request = Request(factory.get('/'))
self.assertEqual(request.method, 'GET')
request = Request(factory.post('/'))
self.assertEqual(request.method, 'POST')
def test_overloaded_method(self):
"""
POST requests can be overloaded to another method by setting a
reserved form field
"""
request = Request(factory.post('/', {api_settings.FORM_METHOD_OVERRIDE: 'DELETE'}))
self.assertEqual(request.method, 'DELETE')
def test_x_http_method_override_header(self):
"""
POST requests can also be overloaded to another method by setting
the X-HTTP-Method-Override header.
"""
request = Request(factory.post('/', {'foo': 'bar'}, HTTP_X_HTTP_METHOD_OVERRIDE='DELETE'))
self.assertEqual(request.method, 'DELETE')
request = Request(factory.get('/', {'foo': 'bar'}, HTTP_X_HTTP_METHOD_OVERRIDE='DELETE'))
self.assertEqual(request.method, 'DELETE')
class TestContentParsing(TestCase):
def test_standard_behaviour_determines_no_content_GET(self):
"""
Ensure request.DATA returns empty QueryDict for GET request.
"""
request = Request(factory.get('/'))
self.assertEqual(request.DATA, {})
def test_standard_behaviour_determines_no_content_HEAD(self):
"""
Ensure request.DATA returns empty QueryDict for HEAD request.
"""
request = Request(factory.head('/'))
self.assertEqual(request.DATA, {})
def test_request_DATA_with_form_content(self):
"""
Ensure request.DATA returns content for POST request with form content.
"""
data = {'qwerty': 'uiop'}
request = Request(factory.post('/', data))
request.parsers = (FormParser(), MultiPartParser())
self.assertEqual(list(request.DATA.items()), list(data.items()))
def test_request_DATA_with_text_content(self):
"""
Ensure request.DATA returns content for POST request with
non-form content.
"""
content = six.b('qwerty')
content_type = 'text/plain'
request = Request(factory.post('/', content, content_type=content_type))
request.parsers = (PlainTextParser(),)
self.assertEqual(request.DATA, content)
def test_request_POST_with_form_content(self):
"""
Ensure request.POST returns content for POST request with form content.
"""
data = {'qwerty': 'uiop'}
request = Request(factory.post('/', data))
request.parsers = (FormParser(), MultiPartParser())
self.assertEqual(list(request.POST.items()), list(data.items()))
def test_standard_behaviour_determines_form_content_PUT(self):
"""
Ensure request.DATA returns content for PUT request with form content.
"""
data = {'qwerty': 'uiop'}
request = Request(factory.put('/', data))
request.parsers = (FormParser(), MultiPartParser())
self.assertEqual(list(request.DATA.items()), list(data.items()))
def test_standard_behaviour_determines_non_form_content_PUT(self):
"""
Ensure request.DATA returns content for PUT request with
non-form content.
"""
content = six.b('qwerty')
content_type = 'text/plain'
request = Request(factory.put('/', content, content_type=content_type))
request.parsers = (PlainTextParser(), )
self.assertEqual(request.DATA, content)
def test_overloaded_behaviour_allows_content_tunnelling(self):
"""
Ensure request.DATA returns content for overloaded POST request.
"""
json_data = {'foobar': 'qwerty'}
content = json.dumps(json_data)
content_type = 'application/json'
form_data = {
api_settings.FORM_CONTENT_OVERRIDE: content,
api_settings.FORM_CONTENTTYPE_OVERRIDE: content_type
}
request = Request(factory.post('/', form_data))
request.parsers = (JSONParser(), )
self.assertEqual(request.DATA, json_data)
def test_form_POST_unicode(self):
"""
JSON POST via default web interface with unicode data
"""
# Note: environ and other variables here have simplified content compared to real Request
CONTENT = b'_content_type=application%2Fjson&_content=%7B%22request%22%3A+4%2C+%22firm%22%3A+1%2C+%22text%22%3A+%22%D0%9F%D1%80%D0%B8%D0%B2%D0%B5%D1%82%21%22%7D'
environ = {
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(CONTENT),
'wsgi.input': BytesIO(CONTENT),
}
wsgi_request = WSGIRequest(environ=environ)
wsgi_request._load_post_and_files()
parsers = (JSONParser(), FormParser(), MultiPartParser())
parser_context = {
'encoding': 'utf-8',
'kwargs': {},
'args': (),
}
request = Request(wsgi_request, parsers=parsers, parser_context=parser_context)
method = request.method
self.assertEqual(method, 'POST')
self.assertEqual(request._content_type, 'application/json')
self.assertEqual(request._stream.getvalue(), b'{"request": 4, "firm": 1, "text": "\xd0\x9f\xd1\x80\xd0\xb8\xd0\xb2\xd0\xb5\xd1\x82!"}')
self.assertEqual(request._data, Empty)
self.assertEqual(request._files, Empty)
# def test_accessing_post_after_data_form(self):
# """
# Ensures request.POST can be accessed after request.DATA in
# form request.
# """
# data = {'qwerty': 'uiop'}
# request = factory.post('/', data=data)
# self.assertEqual(request.DATA.items(), data.items())
# self.assertEqual(request.POST.items(), data.items())
# def test_accessing_post_after_data_for_json(self):
# """
# Ensures request.POST can be accessed after request.DATA in
# json request.
# """
# data = {'qwerty': 'uiop'}
# content = json.dumps(data)
# content_type = 'application/json'
# parsers = (JSONParser, )
# request = factory.post('/', content, content_type=content_type,
# parsers=parsers)
# self.assertEqual(request.DATA.items(), data.items())
# self.assertEqual(request.POST.items(), [])
# def test_accessing_post_after_data_for_overloaded_json(self):
# """
# Ensures request.POST can be accessed after request.DATA in overloaded
# json request.
# """
# data = {'qwerty': 'uiop'}
# content = json.dumps(data)
# content_type = 'application/json'
# parsers = (JSONParser, )
# form_data = {Request._CONTENT_PARAM: content,
# Request._CONTENTTYPE_PARAM: content_type}
# request = factory.post('/', form_data, parsers=parsers)
# self.assertEqual(request.DATA.items(), data.items())
# self.assertEqual(request.POST.items(), form_data.items())
# def test_accessing_data_after_post_form(self):
# """
# Ensures request.DATA can be accessed after request.POST in
# form request.
# """
# data = {'qwerty': 'uiop'}
# parsers = (FormParser, MultiPartParser)
# request = factory.post('/', data, parsers=parsers)
# self.assertEqual(request.POST.items(), data.items())
# self.assertEqual(request.DATA.items(), data.items())
# def test_accessing_data_after_post_for_json(self):
# """
# Ensures request.DATA can be accessed after request.POST in
# json request.
# """
# data = {'qwerty': 'uiop'}
# content = json.dumps(data)
# content_type = 'application/json'
# parsers = (JSONParser, )
# request = factory.post('/', content, content_type=content_type,
# parsers=parsers)
# self.assertEqual(request.POST.items(), [])
# self.assertEqual(request.DATA.items(), data.items())
# def test_accessing_data_after_post_for_overloaded_json(self):
# """
# Ensures request.DATA can be accessed after request.POST in overloaded
# json request
# """
# data = {'qwerty': 'uiop'}
# content = json.dumps(data)
# content_type = 'application/json'
# parsers = (JSONParser, )
# form_data = {Request._CONTENT_PARAM: content,
# Request._CONTENTTYPE_PARAM: content_type}
# request = factory.post('/', form_data, parsers=parsers)
# self.assertEqual(request.POST.items(), form_data.items())
# self.assertEqual(request.DATA.items(), data.items())
class MockView(APIView):
authentication_classes = (SessionAuthentication,)
def post(self, request):
if request.POST.get('app_scaffolding') is not None:
return Response(status=status.HTTP_200_OK)
return Response(status=status.INTERNAL_SERVER_ERROR)
urlpatterns = patterns('',
(r'^$', MockView.as_view()),
)
class TestContentParsingWithAuthentication(TestCase):
urls = 'rest_framework.tests.test_request'
def setUp(self):
self.csrf_client = APIClient(enforce_csrf_checks=True)
self.username = 'john'
self.email = 'lennon@thebeatles.com'
self.password = 'password'
self.user = User.objects.create_user(self.username, self.email, self.password)
def test_user_logged_in_authentication_has_POST_when_not_logged_in(self):
"""
Ensures request.POST exists after SessionAuthentication when user
doesn't log in.
"""
content = {'app_scaffolding': 'app_scaffolding'}
response = self.client.post('/', content)
self.assertEqual(status.HTTP_200_OK, response.status_code)
response = self.csrf_client.post('/', content)
self.assertEqual(status.HTTP_200_OK, response.status_code)
# def test_user_logged_in_authentication_has_post_when_logged_in(self):
# """Ensures request.POST exists after UserLoggedInAuthentication when user does log in"""
# self.client.login(username='john', password='password')
# self.csrf_client.login(username='john', password='password')
# content = {'app_scaffolding': 'app_scaffolding'}
# response = self.client.post('/', content)
# self.assertEqual(status.OK, response.status_code, "POST data is malformed")
# response = self.csrf_client.post('/', content)
# self.assertEqual(status.OK, response.status_code, "POST data is malformed")
class TestUserSetter(TestCase):
def setUp(self):
# Pass request object through session middleware so session is
# available to login and logout functions
self.request = Request(factory.get('/'))
SessionMiddleware().process_request(self.request)
User.objects.create_user('ringo', 'starr@thebeatles.com', 'yellow')
self.user = authenticate(username='ringo', password='yellow')
def test_user_can_be_set(self):
self.request.user = self.user
self.assertEqual(self.request.user, self.user)
def test_user_can_login(self):
login(self.request, self.user)
self.assertEqual(self.request.user, self.user)
def test_user_can_logout(self):
self.request.user = self.user
self.assertFalse(self.request.user.is_anonymous())
logout(self.request)
self.assertTrue(self.request.user.is_anonymous())
class TestAuthSetter(TestCase):
def test_auth_can_be_set(self):
request = Request(factory.get('/'))
request.auth = 'DUMMY'
self.assertEqual(request.auth, 'DUMMY')
| MobileWebApps/backend-python-rest-gae | lib/rest_framework/tests/test_request.py | Python | bsd-3-clause | 13,239 | 0.001057 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=200)),
('content', models.TextField()),
('pub_date', models.DateTimeField()),
('summary', models.CharField(max_length=500)),
],
options={
'ordering': ('title',),
},
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=32)),
],
),
migrations.AddField(
model_name='article',
name='tag',
field=models.ManyToManyField(to='blog.Tag', blank=True),
),
]
| nth2say/simple_django_blog | blog/migrations/0001_initial.py | Python | mit | 1,171 | 0.001708 |
import os
from celery import Celery
from django.apps import apps, AppConfig
from django.conf import settings
if not settings.configured:
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.local') # pragma: no cover
app = Celery('lipame')
class CeleryConfig(AppConfig):
name = 'lipame.taskapp'
verbose_name = 'Celery Config'
def ready(self):
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
installed_apps = [app_config.name for app_config in apps.get_app_configs()]
app.autodiscover_tasks(lambda: installed_apps, force=True)
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request)) # pragma: no cover
| savioabuga/lipame | lipame/taskapp/celery.py | Python | mit | 903 | 0.005537 |
#!/usr/bin/env python3
# Copyright 2013,2016 The Font Bakery Authors.
# Copyright 2017 The Google Font Tools Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# See AUTHORS.txt for the list of Authors and LICENSE.txt for the License.
"""
Replace a collection of fonts nametable's with new tables based on
the Google Fonts naming spec from just the filename.
The fsSelection, fsType and macStyle also get updated
to reflect the new names.
"""
from __future__ import print_function
import re
import ntpath
from argparse import (ArgumentParser,
RawTextHelpFormatter)
from fontTools.ttLib import TTFont, newTable
WIN_SAFE_STYLES = [
'Regular',
'Bold',
'Italic',
'BoldItalic',
]
MACSTYLE = {
'Regular': 0,
'Bold': 1,
'Italic': 2,
'Bold Italic': 3
}
# Weight name to value mapping:
WEIGHTS = {
"Thin": 250,
"ExtraLight": 275,
"Light": 300,
"Regular": 400,
"Italic": 400,
"Medium": 500,
"SemiBold": 600,
"Bold": 700,
"ExtraBold": 800,
"Black": 900
}
REQUIRED_FIELDS = [
(0, 1, 0, 0),
(1, 1, 0, 0),
(2, 1, 0, 0),
(3, 1, 0, 0),
(4, 1, 0, 0),
(5, 1, 0, 0),
(6, 1, 0, 0),
(7, 1, 0, 0),
(8, 1, 0, 0),
(9, 1, 0, 0),
(11, 1, 0, 0),
(12, 1, 0, 0),
(13, 1, 0, 0),
(14, 1, 0, 0),
(0, 3, 1, 1033),
(1, 3, 1, 1033),
(1, 3, 1, 1033),
(2, 3, 1, 1033),
(3, 3, 1, 1033),
(4, 3, 1, 1033),
(5, 3, 1, 1033),
(6, 3, 1, 1033),
(7, 3, 1, 1033),
(8, 3, 1, 1033),
(9, 3, 1, 1033),
(11, 3, 1, 1033),
(12, 3, 1, 1033),
(13, 3, 1, 1033),
(14, 3, 1, 1033),
]
def _split_camelcase(text):
return re.sub(r"(?<=\w)([A-Z])", r" \1", text)
def _mac_subfamily_name(style_name):
if style_name.startswith('Italic'):
pass
elif 'Italic' in style_name:
style_name = style_name.replace('Italic', ' Italic')
return style_name
def _unique_id(version, vendor_id, filename):
# Glyphsapp style 2.000;MYFO;Arsenal-Bold
# version;vendorID;filename
return '%s;%s;%s' % (version, vendor_id, filename)
def _version(text):
return re.search(r'[0-9]{1,4}\.[0-9]{1,8}', text).group(0)
def _full_name(family_name, style_name):
style_name = _mac_subfamily_name(style_name)
full_name = '%s %s' % (family_name, style_name)
return full_name
def _win_family_name(family_name, style_name):
name = family_name
if style_name not in WIN_SAFE_STYLES:
name = '%s %s' % (family_name, style_name)
if 'Italic' in name:
name = re.sub(r'Italic', r'', name)
return name
def _win_subfamily_name(style_name):
name = style_name
if 'BoldItalic' == name:
return 'Bold Italic'
elif 'Italic' in name:
return 'Italic'
elif name == 'Bold':
return 'Bold'
else:
return 'Regular'
def set_usWeightClass(style_name):
name = style_name
if name != 'Italic':
name = re.sub(r'Italic', r'', style_name)
return WEIGHTS[name]
def set_macStyle(style_name):
return MACSTYLE[style_name]
def set_fsSelection(fsSelection, style):
bits = fsSelection
if 'Regular' in style:
bits |= 0b1000000
else:
bits &= ~0b1000000
if style in ['Bold', 'BoldItalic']:
bits |= 0b100000
else:
bits &= ~0b100000
if 'Italic' in style:
bits |= 0b1
else:
bits &= ~0b1
if not bits:
bits = 0b1000000
return bits
def nametable_from_filename(filepath):
"""Generate a new nametable based on a ttf and the GF Spec"""
font = TTFont(filepath)
old_table = font['name']
new_table = newTable('name')
filename = ntpath.basename(filepath)[:-4]
family_name, style_name = filename.split('-')
family_name = _split_camelcase(family_name)
font_version = font['name'].getName(5, 3, 1, 1033)
font_version = font_version.toUnicode()
vendor_id = font['OS/2'].achVendID
# SET MAC NAME FIELDS
# -------------------
# Copyright
old_cp = old_table.getName(0, 3, 1, 1033).string.decode('utf_16_be')
new_table.setName(old_cp.encode('mac_roman'), 0, 1, 0, 0)
# Font Family Name
new_table.setName(family_name.encode('mac_roman'), 1, 1, 0, 0)
# Subfamily name
mac_subfamily_name = _mac_subfamily_name(style_name).encode('mac_roman')
new_table.setName(mac_subfamily_name, 2, 1, 0, 0)
# Unique ID
unique_id = _unique_id(_version(font_version), vendor_id, filename)
mac_unique_id = unique_id.encode('mac_roman')
new_table.setName(mac_unique_id, 3, 1, 0, 0)
# Full name
fullname = _full_name(family_name, style_name)
mac_fullname = fullname.encode('mac_roman')
new_table.setName(mac_fullname, 4, 1, 0, 0)
# Version string
old_v = old_table.getName(5, 3, 1, 1033).string.decode('utf_16_be')
mac_old_v = old_v.encode('mac_roman')
new_table.setName(mac_old_v, 5, 1, 0, 0)
# Postscript name
mac_ps_name = filename.encode('mac_roman')
new_table.setName(mac_ps_name, 6, 1, 0, 0)
# SET WIN NAME FIELDS
# -------------------
# Copyright
new_table.setName(old_cp, 0, 3, 1, 1033)
# Font Family Name
win_family_name = _win_family_name(family_name, style_name)
win_family_name = win_family_name.encode('utf_16_be')
new_table.setName(win_family_name, 1, 3, 1, 1033)
# Subfamily Name
win_subfamily_name = _win_subfamily_name(style_name).encode('utf_16_be')
new_table.setName(win_subfamily_name, 2, 3, 1, 1033)
# Unique ID
win_unique_id = unique_id.encode('utf_16_be')
new_table.setName(win_unique_id, 3, 3, 1, 1033)
# Full name
win_fullname = fullname.encode('utf_16_be')
new_table.setName(win_fullname, 4, 3, 1, 1033)
# Version string
win_old_v = old_v.encode('utf_16_be')
new_table.setName(win_old_v, 5, 3, 1, 1033)
# Postscript name
win_ps_name = filename.encode('utf_16_be')
new_table.setName(win_ps_name, 6, 3, 1, 1033)
if style_name not in WIN_SAFE_STYLES:
# Preferred Family Name
new_table.setName(family_name.encode('utf_16_be'), 16, 3, 1, 1033)
# Preferred SubfamilyName
win_pref_subfam_name = _mac_subfamily_name(style_name).encode('utf_16_be')
new_table.setName(win_pref_subfam_name, 17, 3, 1, 1033)
# PAD missing fields
# ------------------
for field in REQUIRED_FIELDS:
text = None
if new_table.getName(*field):
pass # Name has already been updated
elif old_table.getName(*field):
text = old_table.getName(*field).string
elif old_table.getName(field[0], 3, 1, 1033):
text = old_table.getName(field[0], 3, 1, 1033).string.decode('utf_16_be')
elif old_table.getName(field[0], 1, 0, 0): # check if field exists for mac
text = old_table.getName(field[0], 3, 1, 1033).string.decode('mac_roman')
if text:
new_table.setName(text, *field)
return new_table
parser = ArgumentParser(description=__doc__,
formatter_class=RawTextHelpFormatter)
parser.add_argument('fonts', nargs="+")
def main():
args = parser.parse_args()
for font_path in args.fonts:
nametable = nametable_from_filename(font_path)
font = TTFont(font_path)
font_filename = ntpath.basename(font_path)
font['name'] = nametable
style = font_filename[:-4].split('-')[-1]
font['OS/2'].usWeightClass = set_usWeightClass(style)
font['OS/2'].fsSelection = set_fsSelection(font['OS/2'].fsSelection, style)
win_style = font['name'].getName(2, 3, 1, 1033).string.decode('utf_16_be')
font['head'].macStyle = set_macStyle(win_style)
font.save(font_path + '.fix')
print('font saved %s.fix' % font_path)
if __name__ == '__main__':
main()
| googlefonts/gftools | bin/gftools-nametable-from-filename.py | Python | apache-2.0 | 7,889 | 0.013056 |
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from flask import session
from indico.core import signals
from indico.modules.groups.core import GroupProxy
from indico.util.i18n import _
from indico.web.flask.util import url_for
from indico.web.menu import SideMenuItem
__all__ = ('GroupProxy',)
@signals.menu.items.connect_via('admin-sidemenu')
def _extend_admin_menu(sender, **kwargs):
if session.user.is_admin:
return SideMenuItem('groups', _("Groups"), url_for('groups.groups'), section='user_management')
@signals.users.merged.connect
def _merge_users(target, source, **kwargs):
target.local_groups |= source.local_groups
source.local_groups.clear()
| mic4ael/indico | indico/modules/groups/__init__.py | Python | mit | 889 | 0.001125 |
# Common dialog code.
#
# Copyright (C) 2007, 2008 Red Hat, Inc. All rights reserved.
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details. You should have
# received a copy of the GNU General Public License along with this program; if
# not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA. Any Red Hat trademarks that are
# incorporated in the source code or documentation are not subject to the GNU
# General Public License and may only be used or replicated with the express
# permission of Red Hat, Inc.
#
# Red Hat Author: Miloslav Trmac <mitr@redhat.com>
import os
import gtk.glade
import settings
__all__ = ('DialogBase')
class DialogBase(object):
'''Commmon utilities for dialogs.'''
def __init__(self, toplevel_name, parent, notebook_name = None):
'''Create a window from the glade file and get references to widgets.
If notebook_name is not None, use it in validate_values(). Make the
window transient for parent.
'''
glade_xml = gtk.glade.XML(settings.glade_file_path, toplevel_name)
for name in self._glade_widget_names:
w = glade_xml.get_widget(name)
assert w is not None, 'Widget %s not found in glade file' % name
setattr(self, name, w)
# This name is special :)
self.window = glade_xml.get_widget(toplevel_name)
if parent is not None:
self.window.set_transient_for(parent)
if notebook_name is None:
self.__notebook_widget = None
else:
self.__notebook_widget = glade_xml.get_widget(notebook_name)
assert self.__notebook_widget is not None
def destroy(self):
'''Destroy the dialog.'''
self.window.destroy()
def _validate_get_failure(self):
'''Check whether the window state is a valid configuration.
Return None if it is valid. Otherwise, return (message, notebook page
index or None, widget).
'''
raise NotImplementedError()
def _validate_values(self):
'''Check whether the dialog state is a valid configuration.
Return True if it is valid. Otherwise, display an error message and
return False.
'''
a = self._validate_get_failure()
if a is None:
return True
(msg, page, widget) = a
if self.__notebook_widget is not None:
self.__notebook_widget.set_current_page(page)
self._modal_error_dialog(msg)
widget.grab_focus()
return False
def _modal_error_dialog(self, msg):
'''Show a modal error dialog.'''
dlg = gtk.MessageDialog(self.window, gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_ERROR, gtk.BUTTONS_CLOSE, msg)
dlg.run()
dlg.destroy()
def _radio_set(self, value, pairs):
'''Update the "active" state of several toggle buttons.
The pairs parameter is a tuple of (widget name, expected value) pairs.
Expected value is either a single value, or a tuple of possible values.
'''
for (name, expected) in pairs:
if type(expected) == tuple:
active = value in expected
else:
active = value == expected
getattr(self, name).set_active(active)
def _radio_get(self, pairs):
'''Get the "active" button from a group of radio buttons.
The pairs parameter is a tuple of (widget name, return value) pairs.
If no widget is active, an assertion will fail.
'''
for (name, value) in pairs:
if getattr(self, name).get_active():
return value
assert False, 'No widget is active'
def _setup_browse_button(self, button, entry, title, action):
'''Set up a "Browse" button for a path entry.'''
button.connect('clicked', self.__browse_button_clicked, entry, title,
action)
def __browse_button_clicked(self, unused, entry, title, action):
if action == gtk.FILE_CHOOSER_ACTION_SAVE:
stock_accept = gtk.STOCK_SAVE
else:
stock_accept = gtk.STOCK_OPEN
dlg = gtk.FileChooserDialog(title, self.window, action,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
stock_accept, gtk.RESPONSE_ACCEPT))
path = entry.get_text()
if action == gtk.FILE_CHOOSER_ACTION_SAVE:
dlg.set_current_folder(os.path.dirname(path))
dlg.set_current_name(os.path.basename(path))
else:
dlg.set_filename(path)
r = dlg.run()
if r == gtk.RESPONSE_ACCEPT:
entry.set_text(dlg.get_filename())
dlg.destroy()
| ystk/debian-audit | system-config-audit/src/dialog_base.py | Python | gpl-2.0 | 5,208 | 0.000768 |
from enum import Enum
from collections import namedtuple
import settings
import os
import dbaccess
import urllib.request
import ast
import shutil
def move_file(filename, src, dest):
src = os.path.join(src, filename)
dest = os.path.join(dest, filename)
shutil.move(src, dest)
def create_file(filename, path):
filename = os.path.join(path, filename)
f = open(filename, 'w+')
f.close()
def delete_file(filename, path):
filename = os.path.join(path, filename)
os.remove(filename)
def delete_directory(directory):
os.rmdir(directory)
def get_problem_name(number):
url = "http://uhunt.felix-halim.net/api/p/id/{0}".format(number)
with urllib.request.urlopen(url) as response:
result = response.read()
result = ast.literal_eval(result.decode('utf-8'))
result = result["title"]
return result
class Language(Enum):
C, CPP, JAVA, PYTHON = range(4)
language_extensions = {
Language.C: 'c',
Language.CPP: 'cpp',
Language.JAVA: 'java',
Language.PYTHON: 'py'}
class Status(Enum):
TEMPORARY, WORKING, PAUSED, FINISHED, ARCHIVED = range(5)
@staticmethod
def get_directory(status):
return status.name.lower()
class ProblemData(object):
def __init__(self, problem_id, name, category_id=None):
self.problem_id = problem_id
self.name = name
self.category_id = category_id
self.language = None
self.attempt_no = None
self.status = None
self.source_file = None
self.input_file = None
self.output_file = None
def __eq__(self, other):
if other:
return self.problem_id == other.problem_id and self.attempt_no == other.attempt_no
else:
return False
def __ne__(self, other):
if other:
return other and self.problem_id != other.problem_id or self.attempt_no != other.attempt_no
else:
return True
class ProblemNotFound(Exception):
pass
class ProblemManager(object):
def __get_problem_from_db(self, problem_id):
result = dbaccess.read('problem', where={'id': problem_id})
if result:
result = result[0]
problem = ProblemData(result[0], result[1], result[2])
return problem
else:
return None
def create_files(self, problem):
path = os.path.join(settings.get('repo_path'), Status.get_directory(problem.status))
create_file(problem.source_file, path)
create_file(problem.input_file, path)
create_file(problem.output_file, path)
def create_data(self, problem):
result = dbaccess.read('problem', where={'id': problem.problem_id})
if not result:
dbaccess.insert('problem', data={'id': problem.problem_id,
'name': problem.name, 'category_id': problem.category_id})
result = dbaccess.read('problem_attempt', where={'problem_id': problem.problem_id})
attempt_no = len(result)
attempt_no += 1
dbaccess.insert('problem_attempt',
data={'problem_id': problem.problem_id, 'attempt_no': attempt_no,
'language_id': problem.language.value, 'status_id': problem.status.value})
def delete_files(self, problem):
path = os.path.join(settings.get('repo_path'), Status.get_directory(problem.status))
delete_file(problem.source_file, path)
delete_file(problem.input_file, path)
delete_file(problem.output_file, path)
def delete_data(self, problem):
dbaccess.delete('problem_attempt',
where={'problem_id': problem.problem_id,
'attempt_no': problem.attempt_no})
result = dbaccess.read('problem_attempt',
where={'problem_id': problem.problem_id})
if not result:
dbaccess.delete('problem', where={'id': problem.problem_id})
def set_status(self, status, problem):
src_dir = Status.get_directory(problem.status)
src_dir = os.path.join(settings.get('repo_path'), src_dir)
problem.status = status
dest_dir = Status.get_directory(problem.status)
dest_dir = os.path.join(settings.get('repo_path'), dest_dir)
move_file(problem.source_file, src_dir, dest_dir)
move_file(problem.input_file, src_dir, dest_dir)
move_file(problem.output_file, src_dir, dest_dir)
dbaccess.update(
'problem_attempt',
data={'status_id': problem.status.value},
where={'problem_id': problem.problem_id,
'attempt_no': problem.attempt_no})
def get_data_for_new(self, problem_id, language):
problem = self.__get_problem_from_db(problem_id)
if not problem:
name = get_problem_name(problem_id)
problem = ProblemData(problem_id, name, None)
problem.language = language
result = dbaccess.read('problem_attempt', where={'problem_id': problem_id})
problem.attempt_no = len(result) + 1
problem.status = Status.TEMPORARY
prefix = str(problem_id) + '.'
problem.source_file = prefix + language_extensions[language]
problem.input_file = prefix + 'in'
problem.output_file = prefix + 'out'
return problem
def get_data(self, problem_id, attempt_no):
problem = self.__get_problem_from_db(problem_id)
result = dbaccess.read('problem_attempt',
columns=['status_id', 'language_id'],
where={'problem_id': problem_id, 'attempt_no': attempt_no})
if not result:
message = ' '.join(['Problem:', str(problem_id), 'was not found on the database.'])
raise ProblemNotFound(message)
problem.attempt_no = attempt_no
problem.status = Status(result[0][0])
problem.language = Language(result[0][1])
prefix = str(problem_id) + '.'
problem.source_file = prefix + language_extensions[problem.language]
problem.input_file = prefix + 'in'
problem.output_file = prefix + 'out'
return problem
def update_category(self, problem):
dbaccess.update('problem', data={'category_id': problem.category_id}, where={'id': problem.problem_id})
| thyagostall/apollo | src/problem.py | Python | mit | 6,320 | 0.004589 |
definition = {
"where": "?subj a foaf:Organization .",
"fields": {
"name": {
"where": "?subj rdfs:label ?obj ."
}
}
} | gwu-libraries/vivo2notld | vivo2notld/definitions/organization_summary.py | Python | mit | 157 | 0.006369 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains common building blocks for neural networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from official.modeling import tf_utils
@tf.keras.utils.register_keras_serializable(package='Vision')
class ResidualBlock(tf.keras.layers.Layer):
"""A residual block."""
def __init__(self,
filters,
strides,
use_projection=False,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
activation='relu',
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
**kwargs):
"""A residual block with BN after convolutions.
Args:
filters: `int` number of filters for the first two convolutions. Note that
the third and final convolution will use 4 times as many filters.
strides: `int` block stride. If greater than 1, this block will ultimately
downsample the input.
use_projection: `bool` for whether this block should use a projection
shortcut (versus the default identity shortcut). This is usually `True`
for the first block of a block group, which may change the number of
filters and the resolution.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
Default to None.
bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2d.
Default to None.
activation: `str` name of the activation function.
use_sync_bn: if True, use synchronized batch normalization.
norm_momentum: `float` normalization omentum for the moving average.
norm_epsilon: `float` small float added to variance to avoid dividing by
zero.
**kwargs: keyword arguments to be passed.
"""
super(ResidualBlock, self).__init__(**kwargs)
self._filters = filters
self._strides = strides
self._use_projection = use_projection
self._use_sync_bn = use_sync_bn
self._activation = activation
self._kernel_initializer = kernel_initializer
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
if use_sync_bn:
self._norm = tf.keras.layers.experimental.SyncBatchNormalization
else:
self._norm = tf.keras.layers.BatchNormalization
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
self._activation_fn = tf_utils.get_activation(activation)
def build(self, input_shape):
if self._use_projection:
self._shortcut = tf.keras.layers.Conv2D(
filters=self._filters,
kernel_size=1,
strides=self._strides,
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm0 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)
self._conv1 = tf.keras.layers.Conv2D(
filters=self._filters,
kernel_size=3,
strides=self._strides,
padding='same',
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm1 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)
self._conv2 = tf.keras.layers.Conv2D(
filters=self._filters,
kernel_size=3,
strides=1,
padding='same',
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm2 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)
super(ResidualBlock, self).build(input_shape)
def get_config(self):
config = {
'filters': self._filters,
'strides': self._strides,
'use_projection': self._use_projection,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'activation': self._activation,
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon
}
base_config = super(ResidualBlock, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs):
shortcut = inputs
if self._use_projection:
shortcut = self._shortcut(shortcut)
shortcut = self._norm0(shortcut)
x = self._conv1(inputs)
x = self._norm1(x)
x = self._activation_fn(x)
x = self._conv2(x)
x = self._norm2(x)
return self._activation_fn(x + shortcut)
@tf.keras.utils.register_keras_serializable(package='Vision')
class BottleneckBlock(tf.keras.layers.Layer):
"""A standard bottleneck block."""
def __init__(self,
filters,
strides,
use_projection=False,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
activation='relu',
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
**kwargs):
"""A standard bottleneck block with BN after convolutions.
Args:
filters: `int` number of filters for the first two convolutions. Note that
the third and final convolution will use 4 times as many filters.
strides: `int` block stride. If greater than 1, this block will ultimately
downsample the input.
use_projection: `bool` for whether this block should use a projection
shortcut (versus the default identity shortcut). This is usually `True`
for the first block of a block group, which may change the number of
filters and the resolution.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
Default to None.
bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2d.
Default to None.
activation: `str` name of the activation function.
use_sync_bn: if True, use synchronized batch normalization.
norm_momentum: `float` normalization omentum for the moving average.
norm_epsilon: `float` small float added to variance to avoid dividing by
zero.
**kwargs: keyword arguments to be passed.
"""
super(BottleneckBlock, self).__init__(**kwargs)
self._filters = filters
self._strides = strides
self._use_projection = use_projection
self._use_sync_bn = use_sync_bn
self._activation = activation
self._kernel_initializer = kernel_initializer
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
if use_sync_bn:
self._norm = tf.keras.layers.experimental.SyncBatchNormalization
else:
self._norm = tf.keras.layers.BatchNormalization
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
self._activation_fn = tf_utils.get_activation(activation)
def build(self, input_shape):
if self._use_projection:
self._shortcut = tf.keras.layers.Conv2D(
filters=self._filters * 4,
kernel_size=1,
strides=self._strides,
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm0 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)
self._conv1 = tf.keras.layers.Conv2D(
filters=self._filters,
kernel_size=1,
strides=1,
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm1 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)
self._conv2 = tf.keras.layers.Conv2D(
filters=self._filters,
kernel_size=3,
strides=self._strides,
padding='same',
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm2 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)
self._conv3 = tf.keras.layers.Conv2D(
filters=self._filters * 4,
kernel_size=1,
strides=1,
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm3 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)
super(BottleneckBlock, self).build(input_shape)
def get_config(self):
config = {
'filters': self._filters,
'strides': self._strides,
'use_projection': self._use_projection,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'activation': self._activation,
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon
}
base_config = super(BottleneckBlock, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs):
shortcut = inputs
if self._use_projection:
shortcut = self._shortcut(shortcut)
shortcut = self._norm0(shortcut)
x = self._conv1(inputs)
x = self._norm1(x)
x = self._activation_fn(x)
x = self._conv2(x)
x = self._norm2(x)
x = self._activation_fn(x)
x = self._conv3(x)
x = self._norm3(x)
return self._activation_fn(x + shortcut)
| tombstone/models | official/vision/detection/modeling/architecture/nn_blocks.py | Python | apache-2.0 | 11,423 | 0.002626 |
#encoding:utf-8
def get_topn(logfile, topn=10):
fhandler = open(logfile, 'r')
rt_dict = {}
# 统计
while True:
line = fhandler.readline()
if line == '':
break
nodes = line.split()
ip, url, code = nodes[0], nodes[6], nodes[8]
key = (ip, url, code)
if key not in rt_dict:
rt_dict[key] = 1
else:
rt_dict[key] = rt_dict[key] + 1
fhandler.close()
#print rt_dict
# 排序
rt_list = rt_dict.items()
# [(key, value), (key, value)]
for j in range(0, topn):
for i in range(0, len(rt_list) - 1):
if rt_list[i][1] > rt_list[i + 1][1]:
temp = rt_list[i]
rt_list[i] = rt_list[i + 1]
rt_list[i + 1] = temp
return rt_list[-1:-topn - 1:-1]
if __name__ == '__main__':
logfile = '/home/share/www_access_20140823.log'
print get_topn(topn=5, logfile=logfile) | 51reboot/actual_09_homework | 07/zhaoyuanhai/cmdb/loganalysis.py | Python | mit | 1,002 | 0.003018 |
from django.core.mail import send_mail
from greatesttodo.celery import app
from reminder.models import Reminder
@app.task
def send_email_reminder(reminder_id):
try:
reminder = Reminder.objects.get(id=reminder_id)
send_mail(
subject=reminder.text,
message=reminder.text,
from_email=None,
recipient_list=[reminder.email]
)
except Reminder.DoesNotExist:
pass
| Rub4ek/scalors-assignment-backend | reminder/tasks.py | Python | mit | 447 | 0 |
#!/usr/bin/env python
# Unix SMB/CIFS implementation.
# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007-2008
#
# Based on the original in EJS:
# Copyright (C) Andrew Tridgell <tridge@samba.org> 2005
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Samba 4."""
__docformat__ = "restructuredText"
import os
import sys
import samba.param
def source_tree_topdir():
'''return the top level directory (the one containing the source4 directory)'''
paths = [ "../../..", "../../../.." ]
for p in paths:
topdir = os.path.normpath(os.path.join(os.path.dirname(__file__), p))
if os.path.exists(os.path.join(topdir, 'source4')):
return topdir
raise RuntimeError("unable to find top level source directory")
def in_source_tree():
'''return True if we are running from within the samba source tree'''
try:
topdir = source_tree_topdir()
except RuntimeError:
return False
return True
import ldb
from samba._ldb import Ldb as _Ldb
class Ldb(_Ldb):
"""Simple Samba-specific LDB subclass that takes care
of setting up the modules dir, credentials pointers, etc.
Please note that this is intended to be for all Samba LDB files,
not necessarily the Sam database. For Sam-specific helper
functions see samdb.py.
"""
def __init__(self, url=None, lp=None, modules_dir=None, session_info=None,
credentials=None, flags=0, options=None):
"""Opens a Samba Ldb file.
:param url: Optional LDB URL to open
:param lp: Optional loadparm object
:param modules_dir: Optional modules directory
:param session_info: Optional session information
:param credentials: Optional credentials, defaults to anonymous.
:param flags: Optional LDB flags
:param options: Additional options (optional)
This is different from a regular Ldb file in that the Samba-specific
modules-dir is used by default and that credentials and session_info
can be passed through (required by some modules).
"""
if modules_dir is not None:
self.set_modules_dir(modules_dir)
else:
self.set_modules_dir(os.path.join(samba.param.modules_dir(), "ldb"))
if session_info is not None:
self.set_session_info(session_info)
if credentials is not None:
self.set_credentials(credentials)
if lp is not None:
self.set_loadparm(lp)
# This must be done before we load the schema, as these handlers for
# objectSid and objectGUID etc must take precedence over the 'binary
# attribute' declaration in the schema
self.register_samba_handlers()
# TODO set debug
def msg(l, text):
print text
#self.set_debug(msg)
self.set_utf8_casefold()
# Allow admins to force non-sync ldb for all databases
if lp is not None:
nosync_p = lp.get("nosync", "ldb")
if nosync_p is not None and nosync_p == True:
flags |= ldb.FLG_NOSYNC
self.set_create_perms(0600)
if url is not None:
self.connect(url, flags, options)
def searchone(self, attribute, basedn=None, expression=None,
scope=ldb.SCOPE_BASE):
"""Search for one attribute as a string.
:param basedn: BaseDN for the search.
:param attribute: Name of the attribute
:param expression: Optional search expression.
:param scope: Search scope (defaults to base).
:return: Value of attribute as a string or None if it wasn't found.
"""
res = self.search(basedn, scope, expression, [attribute])
if len(res) != 1 or res[0][attribute] is None:
return None
values = set(res[0][attribute])
assert len(values) == 1
return self.schema_format_value(attribute, values.pop())
def erase_users_computers(self, dn):
"""Erases user and computer objects from our AD.
This is needed since the 'samldb' module denies the deletion of primary
groups. Therefore all groups shouldn't be primary somewhere anymore.
"""
try:
res = self.search(base=dn, scope=ldb.SCOPE_SUBTREE, attrs=[],
expression="(|(objectclass=user)(objectclass=computer))")
except ldb.LdbError, (errno, _):
if errno == ldb.ERR_NO_SUCH_OBJECT:
# Ignore no such object errors
return
else:
raise
try:
for msg in res:
self.delete(msg.dn, ["relax:0"])
except ldb.LdbError, (errno, _):
if errno != ldb.ERR_NO_SUCH_OBJECT:
# Ignore no such object errors
raise
def erase_except_schema_controlled(self):
"""Erase this ldb.
:note: Removes all records, except those that are controlled by
Samba4's schema.
"""
basedn = ""
# Try to delete user/computer accounts to allow deletion of groups
self.erase_users_computers(basedn)
# Delete the 'visible' records, and the invisble 'deleted' records (if this DB supports it)
for msg in self.search(basedn, ldb.SCOPE_SUBTREE,
"(&(|(objectclass=*)(distinguishedName=*))(!(distinguishedName=@BASEINFO)))",
[], controls=["show_deleted:0", "show_recycled:0"]):
try:
self.delete(msg.dn, ["relax:0"])
except ldb.LdbError, (errno, _):
if errno != ldb.ERR_NO_SUCH_OBJECT:
# Ignore no such object errors
raise
res = self.search(basedn, ldb.SCOPE_SUBTREE,
"(&(|(objectclass=*)(distinguishedName=*))(!(distinguishedName=@BASEINFO)))", [], controls=["show_deleted:0", "show_recycled:0"])
assert len(res) == 0
# delete the specials
for attr in ["@SUBCLASSES", "@MODULES",
"@OPTIONS", "@PARTITION", "@KLUDGEACL"]:
try:
self.delete(attr, ["relax:0"])
except ldb.LdbError, (errno, _):
if errno != ldb.ERR_NO_SUCH_OBJECT:
# Ignore missing dn errors
raise
def erase(self):
"""Erase this ldb, removing all records."""
self.erase_except_schema_controlled()
# delete the specials
for attr in ["@INDEXLIST", "@ATTRIBUTES"]:
try:
self.delete(attr, ["relax:0"])
except ldb.LdbError, (errno, _):
if errno != ldb.ERR_NO_SUCH_OBJECT:
# Ignore missing dn errors
raise
def load_ldif_file_add(self, ldif_path):
"""Load a LDIF file.
:param ldif_path: Path to LDIF file.
"""
self.add_ldif(open(ldif_path, 'r').read())
def add_ldif(self, ldif, controls=None):
"""Add data based on a LDIF string.
:param ldif: LDIF text.
"""
for changetype, msg in self.parse_ldif(ldif):
assert changetype == ldb.CHANGETYPE_NONE
self.add(msg, controls)
def modify_ldif(self, ldif, controls=None):
"""Modify database based on a LDIF string.
:param ldif: LDIF text.
"""
for changetype, msg in self.parse_ldif(ldif):
if changetype == ldb.CHANGETYPE_ADD:
self.add(msg, controls)
else:
self.modify(msg, controls)
def substitute_var(text, values):
"""Substitute strings of the form ${NAME} in str, replacing
with substitutions from values.
:param text: Text in which to subsitute.
:param values: Dictionary with keys and values.
"""
for (name, value) in values.items():
assert isinstance(name, str), "%r is not a string" % name
assert isinstance(value, str), "Value %r for %s is not a string" % (value, name)
text = text.replace("${%s}" % name, value)
return text
def check_all_substituted(text):
"""Check that all substitution variables in a string have been replaced.
If not, raise an exception.
:param text: The text to search for substitution variables
"""
if not "${" in text:
return
var_start = text.find("${")
var_end = text.find("}", var_start)
raise Exception("Not all variables substituted: %s" %
text[var_start:var_end+1])
def read_and_sub_file(file_name, subst_vars):
"""Read a file and sub in variables found in it
:param file_name: File to be read (typically from setup directory)
param subst_vars: Optional variables to subsitute in the file.
"""
data = open(file_name, 'r').read()
if subst_vars is not None:
data = substitute_var(data, subst_vars)
check_all_substituted(data)
return data
def setup_file(template, fname, subst_vars=None):
"""Setup a file in the private dir.
:param template: Path of the template file.
:param fname: Path of the file to create.
:param subst_vars: Substitution variables.
"""
if os.path.exists(fname):
os.unlink(fname)
data = read_and_sub_file(template, subst_vars)
f = open(fname, 'w')
try:
f.write(data)
finally:
f.close()
def valid_netbios_name(name):
"""Check whether a name is valid as a NetBIOS name. """
# See crh's book (1.4.1.1)
if len(name) > 15:
return False
for x in name:
if not x.isalnum() and not x in " !#$%&'()-.@^_{}~":
return False
return True
def import_bundled_package(modulename, location):
"""Import the bundled version of a package.
:note: This should only be called if the system version of the package
is not adequate.
:param modulename: Module name to import
:param location: Location to add to sys.path (can be relative to
${srcdir}/lib)
"""
if in_source_tree():
sys.path.insert(0, os.path.join(source_tree_topdir(), "lib", location))
sys.modules[modulename] = __import__(modulename)
else:
sys.modules[modulename] = __import__(
"samba.external.%s" % modulename, fromlist=["samba.external"])
def ensure_external_module(modulename, location):
"""Add a location to sys.path if an external dependency can't be found.
:param modulename: Module name to import
:param location: Location to add to sys.path (can be relative to
${srcdir}/lib)
"""
try:
__import__(modulename)
except ImportError:
import_bundled_package(modulename, location)
from samba import _glue
version = _glue.version
interface_ips = _glue.interface_ips
set_debug_level = _glue.set_debug_level
get_debug_level = _glue.get_debug_level
unix2nttime = _glue.unix2nttime
nttime2string = _glue.nttime2string
nttime2unix = _glue.nttime2unix
unix2nttime = _glue.unix2nttime
generate_random_password = _glue.generate_random_password
strcasecmp_m = _glue.strcasecmp_m
strstr_m = _glue.strstr_m
| gwr/samba | source4/scripting/python/samba/__init__.py | Python | gpl-3.0 | 11,696 | 0.002137 |
import unittest
import sys
import inspect
from robot.running.handlers import _PythonHandler, _JavaHandler, DynamicHandler
from robot import utils
from robot.utils.asserts import *
from robot.running.testlibraries import TestLibrary
from robot.running.dynamicmethods import (
GetKeywordArguments, GetKeywordDocumentation, RunKeyword)
from robot.errors import DataError
from classes import NameLibrary, DocLibrary, ArgInfoLibrary
from ArgumentsPython import ArgumentsPython
if utils.JYTHON:
import ArgumentsJava
def _get_handler_methods(lib):
attrs = [getattr(lib, a) for a in dir(lib) if not a.startswith('_')]
return [a for a in attrs if inspect.ismethod(a)]
def _get_java_handler_methods(lib):
# This hack assumes that all java handlers used start with 'a_' -- easier
# than excluding 'equals' etc. otherwise
return [a for a in _get_handler_methods(lib) if a.__name__.startswith('a_') ]
class LibraryMock:
def __init__(self, name='MyLibrary', scope='GLOBAL'):
self.name = self.orig_name = name
self.scope = scope
class TestPythonHandler(unittest.TestCase):
def test_name(self):
for method in _get_handler_methods(NameLibrary()):
handler = _PythonHandler(LibraryMock('mylib'), method.__name__, method)
assert_equals(handler.name, method.__doc__)
assert_equals(handler.longname, 'mylib.'+method.__doc__)
def test_docs(self):
for method in _get_handler_methods(DocLibrary()):
handler = _PythonHandler(LibraryMock(), method.__name__, method)
assert_equals(handler.doc, method.expected_doc)
assert_equals(handler.shortdoc, method.expected_shortdoc)
def test_arguments(self):
for method in _get_handler_methods(ArgInfoLibrary()):
handler = _PythonHandler(LibraryMock(), method.__name__, method)
args = handler.arguments
argspec = (args.positional, args.defaults, args.varargs, args.kwargs)
expected = eval(method.__doc__)
assert_equals(argspec, expected, method.__name__)
def test_arg_limits(self):
for method in _get_handler_methods(ArgumentsPython()):
handler = _PythonHandler(LibraryMock(), method.__name__, method)
exp_mina, exp_maxa = eval(method.__doc__)
assert_equals(handler.arguments.minargs, exp_mina)
assert_equals(handler.arguments.maxargs, exp_maxa)
def test_getarginfo_getattr(self):
handlers = TestLibrary('classes.GetattrLibrary').handlers
assert_equals(len(handlers), 3)
for handler in handlers:
assert_true(handler.name in ['Foo','Bar','Zap'])
assert_equals(handler.arguments.minargs, 0)
assert_equals(handler.arguments.maxargs, sys.maxint)
class TestDynamicHandlerCreation(unittest.TestCase):
def test_none_doc(self):
self._assert_doc(None, '')
def test_empty_doc(self):
self._assert_doc('')
def test_non_empty_doc(self):
self._assert_doc('This is some documentation')
def test_non_ascii_doc(self):
self._assert_doc(u'P\xe4iv\xe4\xe4')
if not utils.IRONPYTHON:
def test_with_utf8_doc(self):
doc = u'P\xe4iv\xe4\xe4'
self._assert_doc(doc.encode('UTF-8'), doc)
def test_invalid_doc_type(self):
self._assert_fails('Return value must be string.', doc=True)
def test_none_argspec(self):
self._assert_spec(None, maxargs=sys.maxint, vararg='varargs', kwarg=False)
def test_none_argspec_when_kwargs_supported(self):
self._assert_spec(None, maxargs=sys.maxint, vararg='varargs', kwarg='kwargs')
def test_empty_argspec(self):
self._assert_spec([])
def test_mandatory_args(self):
for argspec in [['arg'], ['arg1', 'arg2', 'arg3']]:
self._assert_spec(argspec, len(argspec), len(argspec), argspec)
def test_only_default_args(self):
self._assert_spec(['defarg1=value', 'defarg2=defvalue'], 0, 2,
['defarg1', 'defarg2'], ['value', 'defvalue'])
def test_default_value_may_contain_equal_sign(self):
self._assert_spec(['d=foo=bar'], 0, 1, ['d'], ['foo=bar'])
def test_varargs(self):
self._assert_spec(['*vararg'], 0, sys.maxint, vararg='vararg')
def test_kwargs(self):
self._assert_spec(['**kwarg'], 0, 0, kwarg='kwarg')
def test_varargs_and_kwargs(self):
self._assert_spec(['*vararg', '**kwarg'],
0, sys.maxint, vararg='vararg', kwarg='kwarg')
def test_integration(self):
self._assert_spec(['arg', 'default=value'], 1, 2,
['arg', 'default'], ['value'])
self._assert_spec(['arg', 'default=value', '*var'], 1, sys.maxint,
['arg', 'default'], ['value'], 'var')
self._assert_spec(['arg', 'default=value', '**kw'], 1, 2,
['arg', 'default'], ['value'], None, 'kw')
self._assert_spec(['arg', 'default=value', '*var', '**kw'], 1, sys.maxint,
['arg', 'default'], ['value'], 'var', 'kw')
def test_invalid_argspec_type(self):
for argspec in [True, [1, 2]]:
self._assert_fails("Return value must be list of strings.", argspec)
def test_mandatory_arg_after_default_arg(self):
for argspec in [['d=v', 'arg'], ['a', 'b', 'c=v', 'd']]:
self._assert_fails('Non-default argument after default arguments.',
argspec)
def test_positional_after_vararg(self):
for argspec in [['*foo', 'arg'], ['arg', '*var', 'arg'],
['a', 'b=d', '*var', 'c'], ['*var', '*vararg']]:
self._assert_fails('Positional argument after varargs.', argspec)
def test_kwarg_not_last(self):
for argspec in [['**foo', 'arg'], ['arg', '**kw', 'arg'],
['a', 'b=d', '**kw', 'c'], ['**kw', '*vararg'],
['**kw', '**kwarg']]:
self._assert_fails('Only last argument can be kwargs.', argspec)
def test_missing_kwargs_support(self):
self._assert_fails("Too few 'run_keyword' method parameters"
" for **kwargs support.",
['**kwargs'])
def _assert_doc(self, doc, expected=None):
expected = doc if expected is None else expected
assert_equals(self._create_handler(doc=doc).doc, expected)
def _assert_spec(self, argspec, minargs=0, maxargs=0, positional=[],
defaults=[], vararg=None, kwarg=None):
if kwarg is None:
kwargs_support_modes = [True, False]
elif kwarg is False:
kwargs_support_modes = [False]
kwarg = None
else:
kwargs_support_modes = [True]
for kwargs_support in kwargs_support_modes:
arguments = self._create_handler(argspec,
kwargs_support=kwargs_support
).arguments
assert_equals(arguments.minargs, minargs)
assert_equals(arguments.maxargs, maxargs)
assert_equals(arguments.positional, positional)
assert_equals(arguments.defaults, defaults)
assert_equals(arguments.varargs, vararg)
assert_equals(arguments.kwargs, kwarg)
def _assert_fails(self, error, argspec=None, doc=None):
assert_raises_with_msg(DataError, error,
self._create_handler, argspec, doc)
def _create_handler(self, argspec=None, doc=None, kwargs_support=False):
lib = LibraryMock('TEST CASE')
if kwargs_support:
lib.run_keyword = lambda name, args, kwargs: None
else:
lib.run_keyword = lambda name, args: None
lib.run_keyword.__name__ = 'run_keyword'
doc = GetKeywordDocumentation(lib)._handle_return_value(doc)
argspec = GetKeywordArguments(lib)._handle_return_value(argspec)
return DynamicHandler(lib, 'mock', RunKeyword(lib), doc, argspec)
if utils.JYTHON:
handlers = dict((method.__name__, method) for method in
_get_java_handler_methods(ArgumentsJava('Arg', ['varargs'])))
class TestJavaHandler(unittest.TestCase):
def test_arg_limits_no_defaults_or_varargs(self):
for count in [0, 1, 3]:
method = handlers['a_%d' % count]
handler = _JavaHandler(LibraryMock(), method.__name__, method)
assert_equals(handler.arguments.minargs, count)
assert_equals(handler.arguments.maxargs, count)
def test_arg_limits_with_varargs(self):
for count in [0, 1]:
method = handlers['a_%d_n' % count]
handler = _JavaHandler(LibraryMock(), method.__name__, method)
assert_equals(handler.arguments.minargs, count)
assert_equals(handler.arguments.maxargs, sys.maxint)
def test_arg_limits_with_defaults(self):
# defaults i.e. multiple signatures
for mina, maxa in [(0, 1), (1, 3)]:
method = handlers['a_%d_%d' % (mina, maxa)]
handler = _JavaHandler(LibraryMock(), method.__name__, method)
assert_equals(handler.arguments.minargs, mina)
assert_equals(handler.arguments.maxargs, maxa)
class TestArgumentCoercer(unittest.TestCase):
def setUp(self):
self.lib = TestLibrary('ArgTypeCoercion', ['42', 'true'])
def test_coercion_in_constructor(self):
instance = self.lib.get_instance()
assert_equals(instance.myInt, 42)
assert_equals(instance.myBool, True)
def test_coercing_to_integer(self):
self._test_coercion(self._handler_named('intArgument'),
['1'], [1])
def test_coercing_to_boolean(self):
handler = self._handler_named('booleanArgument')
self._test_coercion(handler, ['True'], [True])
self._test_coercion(handler, ['FALSE'], [ False])
def test_coercing_to_real_number(self):
self._test_coercion(self._handler_named('doubleArgument'),
['1.42'], [1.42])
self._test_coercion(self._handler_named('floatArgument'),
['-9991.098'], [-9991.098])
def test_coercion_with_compatible_types(self):
self._test_coercion(self._handler_named('coercableKeywordWithCompatibleTypes'),
['9999', '-42', 'FaLsE', '31.31'],
[9999, -42, False, 31.31])
def test_arguments_that_are_not_strings_are_not_coerced(self):
self._test_coercion(self._handler_named('intArgument'),
[self.lib], [self.lib])
self._test_coercion(self._handler_named('booleanArgument'),
[42], [42])
def test_coercion_fails_with_reasonable_message(self):
exp_msg = 'Argument at position 1 cannot be coerced to %s.'
self._test_coercion_fails(self._handler_named('intArgument'),
exp_msg % 'integer')
self._test_coercion_fails(self._handler_named('booleanArgument'),
exp_msg % 'boolean')
self._test_coercion_fails(self._handler_named('floatArgument'),
exp_msg % 'floating point number')
def test_no_arg_no_coercion(self):
self._test_coercion(self._handler_named('noArgument'), [], [])
def test_coercing_multiple_arguments(self):
self._test_coercion(self._handler_named('coercableKeyword'),
['10.0', '42', 'tRUe'], [10.0, 42, True])
def test_coercion_is_not_done_with_conflicting_signatures(self):
self._test_coercion(self._handler_named('unCoercableKeyword'),
['True', '42'], ['True', '42'])
def test_coercable_and_uncoercable_args_in_same_kw(self):
self._test_coercion(self._handler_named('coercableAndUnCoercableArgs'),
['1', 'False', '-23', '0'], ['1', False, -23, '0'])
def _handler_named(self, name):
return self.lib.handlers[name]
def _test_coercion(self, handler, args, expected):
assert_equals(handler._arg_coercer.coerce(args, {}), expected)
def _test_coercion_fails(self, handler, expected_message):
assert_raises_with_msg(ValueError, expected_message,
handler._arg_coercer.coerce, ['invalid'], {})
if __name__ == '__main__':
unittest.main()
| yahman72/robotframework | utest/running/test_handlers.py | Python | apache-2.0 | 12,868 | 0.001399 |
##
##SMART FP7 - Search engine for MultimediA enviRonment generated contenT
##Webpage: http://smartfp7.eu
##
## This Source Code Form is subject to the terms of the Mozilla Public
## License, v. 2.0. If a copy of the MPL was not distributed with this
## file, You can obtain one at http://mozilla.org/MPL/2.0/.
##
## The Original Code is Copyright (c) 2012-2013 Atos
## All Rights Reserved
##
## Contributor(s):
## Jose Miguel Garrido, jose.garridog at atos dot net
##
"""The third Multimedia Data Manager.
This module stores the metadata from XML files to a SQLite database.
The video generator uses this database to create the actual video clips"""
# This file must work in python >2.7 and >3.3
import sys
p_v = 2 if sys.version_info < (3,) else 3
if p_v == 2:
import urllib, urllib2
import ConfigParser as cp
else:
import urllib.request, urllib.parse, urllib.error
import configparser as cp
import json
import couchdb
import argparse
import logging
import time, datetime
def getConf(filename,section):
dict1 = {}
config = cp.ConfigParser()
config.read(filename)
options = config.options(section)
for option in options:
try:
dict1[option] = config.get(section, option)
except:
print("exception on {}!".format(option))
dict1[option] = None
dict1["wait_time"] = int(dict1["wait_time"])
dict1["couch_server"] = dict1["couch_server"] if (dict1["couch_server"]!="None") else None
return dict1
def createURL(conf):
query = { "@id": conf["id"] }
if conf["search_type"] == "textual":
command = "txtSearch"
if conf["search_for"] == "venue":
target = "venues"
else:
target = "activities"
if p_v == 2:
url = '{}/{}/{}?label=%22{}%22'.format(conf["url_base"],command,
target,
urllib.quote(conf["keywords"]))
else:
url = '{}/{}/{}?label=%22{}%22'.format(conf["url_base"],command,
target,
urllib.parse.quote(conf["keywords"]))
query.update({ "keywords":conf["keywords"].split(),
"searched_item":conf["search_for"],
"search_type":"textual" })
elif conf["search_type"] == "geo-search":
command = "structuredSearch"
query.update({"search_type":"geo-search"})
if conf["search_for"] == "venue":
query.update({"searched_item":"venues"})
if conf["coord_type"] == "square":
target = "locRec"
query.update({"search_coords":[conf["coord1_long"],conf["coord1_lat"],
conf["coord2_long"],conf["coord2_lat"]]})
else:
target = "locCirc"
query.update({"search_coords":[conf["coord1_long"],conf["coord1_lat"],
conf["radius"]]})
else:
query.update({"searched_item":"activities"})
if conf["coord_type"] == "square":
target = "actRec"
query.update({"search_coords":[conf["coord1_long"],conf["coord1_lat"],
conf["coord2_long"],conf["coord2_lat"]]})
else:
target = "actCirc"
query.update({"search_coords":[conf["coord1_long"],conf["coord1_lat"],
conf["radius"]]})
if target in ("actCirc","locCirc"):
url = '{}/{}/{}?lat1={}&long1={}&radius={}'.format(conf["url_base"],
command,
target,
conf["coord1_lat"],
conf["coord1_long"],
conf["radius"])
else:
url = '{}/{}/{}?lat1={}&long1={}&lat2={}&long2={}'.format(conf["url_base"],
command,target,
conf["coord1_lat"],
conf["coord1_long"],
conf["coord2_lat"],
conf["coord2_long"])
logging.debug(url)
logging.debug(query)
return url, query
def formatItem(key,doc,time_query,query_info,num):
data = {}
data["time"] = time_query
ldm_result = {}
ldm_result.update(query_info)
ldm_result["key"] = key
if query_info["search_type"] == "textual":
ldm_result["location"] = doc["location"]
else:
ldm_result["location"] = [i["location"] for i in doc["location"]]
ldm_result["location_long"] = [i["long"] for i in doc["location"]]
ldm_result["location_lat"] = [i["lat"] for i in doc["location"]]
if "isPrimaryTopicOf" in doc:
ldm_result["is_primary_topic_of"] = doc["isPrimaryTopicOf"]
if "txt" in doc:
ldm_result["txt"] = doc["txt"]
if "label" in doc:
ldm_result["label"] = doc["label"]
if "date" in doc:
ldm_result["date"] = doc["date"]
if "name" in doc:
ldm_result["name"] = doc["name"]
if "attendance" in doc:
ldm_result["attendance"] = doc["attendance"]
data["ldm_result"] = ldm_result
timestamp = time.time()+(num/1000.0)
time_txt = datetime.datetime.utcfromtimestamp(timestamp).isoformat()+"Z"
item = { "_id":time_txt, "data":data, "timestamp":str(int(timestamp*1000))}
# check for not intended results
remainder = set(doc.keys()) - set(("location", "isPrimaryTopicOf", "txt", "label","date","name","attendance") )
if remainder:
logging.warning("WARNING")
logging.warning(remainder)
logging.debug(item)
return item
def storeItem(db,item):
db.save(item)
if __name__ == '__main__':
#inicialization
logging.basicConfig(level=logging.DEBUG,format='%(asctime)s-> %(message)s')
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--conf_file",type=str,
help="configuration file path")
parser.add_argument("-s", "--section",type=str,
help="section of the configuration to apply")
args = parser.parse_args()
conf_file = args.conf_file if args.conf_file else "ldm_feeder_conf.ini"
section = args.section if args.conf_file else "default"
while True: #until loop
conf = getConf(conf_file,section)
couch = couchdb.Server(conf["couch_server"]) if conf["couch_server"] else couchdb.Server()
db = couch[conf["couch_database"]]
#the program itself
url, query_info = createURL(conf)
if p_v == 2:
response = urllib2.urlopen(url).read()
else:
response = urllib.request.urlopen(url).read()
response = response.decode("utf-8")
response = json.loads(response)
if "locations" in response["data"]:
items = "locations"
elif "activities" in response["data"]:
items = "activities"
for num, i in enumerate(response["data"][items]):
responseItem = formatItem(i,response["data"][items][i],
response["data"]["time"],query_info, num)
storeItem(db, responseItem)
if conf["wait_time"] == 0:
break
else:
time.sleep(conf["wait_time"])
| SmartSearch/Edge-Node | LinkedDataManager/feed_generator/ldm_feeder.py | Python | mpl-2.0 | 8,016 | 0.013348 |
# -*- coding: utf-8 -*-
""" Tests of managing ESX hypervisors directly. If another direct ones will be supported, it should
not be difficult to extend the parametrizer.
"""
import pytest
from cfme.infrastructure.provider.virtualcenter import VMwareProvider
from cfme.common.provider import DefaultEndpoint
from utils import testgen
from utils.net import resolve_hostname
from utils.version import Version
def pytest_generate_tests(metafunc):
argnames, argvalues, idlist = testgen.providers_by_class(metafunc, [VMwareProvider])
argnames = argnames + ["_host_provider"]
new_idlist = []
new_argvalues = []
for i, argvalue_tuple in enumerate(argvalues):
args = dict(zip(argnames, argvalue_tuple))
# TODO
# All this should be replaced with a proper ProviderFilter passed to testgen.providers()
if args['provider'].type != "virtualcenter":
continue
hosts = args['provider'].data.get("hosts", [])
if not hosts:
continue
version = args['provider'].data.get("version")
if version is None:
# No version, no test
continue
if Version(version) < "5.0":
# Ignore lesser than 5
continue
host = hosts[0]
ip_address = resolve_hostname(host["name"])
endpoint = DefaultEndpoint(credentials=host["credentials"], hostname=host["name"])
# Mock provider data
provider_data = {}
provider_data.update(args['provider'].data)
provider_data["name"] = host["name"]
provider_data["hostname"] = host["name"]
provider_data["ipaddress"] = ip_address
provider_data.pop("host_provisioning", None)
provider_data["hosts"] = [host]
provider_data["discovery_range"] = {}
provider_data["discovery_range"]["start"] = ip_address
provider_data["discovery_range"]["end"] = ip_address
host_provider = VMwareProvider(
name=host["name"],
ip_address=ip_address,
endpoints=endpoint,
provider_data=provider_data)
argvalues[i].append(host_provider)
idlist[i] = "{}/{}".format(args['provider'].key, host["name"])
new_idlist.append(idlist[i])
new_argvalues.append(argvalues[i])
testgen.parametrize(metafunc, argnames, new_argvalues, ids=new_idlist, scope="module")
@pytest.yield_fixture(scope="module")
def host_provider(_host_provider, provider):
if provider.exists:
# Delete original provider's hosts first
for host in provider.hosts:
if host.exists:
host.delete(cancel=False)
# Get rid of the original provider, it would make a mess.
provider.delete(cancel=False)
provider.wait_for_delete()
yield _host_provider
for host in _host_provider.hosts:
if host.exists:
host.delete(cancel=False)
_host_provider.delete(cancel=False)
_host_provider.wait_for_delete()
@pytest.mark.tier(2)
def test_validate(host_provider):
"""Tests that the CFME can manage also just the hosts of VMware.
Prerequisities:
* A CFME and a VMware provider (not setup in the CFME yet).
Steps:
* Use the IP address of a host of the VMware provider and its credentials and use them to
set up a VMware provider.
* Refresh the provider
* The provider should refresh without problems.
"""
host_provider.create()
host_provider.refresh_provider_relationships()
host_provider.validate()
| dajohnso/cfme_tests | cfme/tests/infrastructure/test_esx_direct_host.py | Python | gpl-2.0 | 3,559 | 0.001686 |
# coding=utf-8
# Copyright 2020 The Learning-to-Prompt Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific Learning-to-Prompt governing permissions and
# limitations under the License.
# ==============================================================================
"""Data preprocessing and augmentation from SimCLR."""
import functools
from absl import flags
import tensorflow.compat.v1 as tf
FLAGS = flags.FLAGS
CROP_PROPORTION = 0.875 # Standard for ImageNet.
def random_apply(func, p, x):
"""Randomly apply function func to x with probability p."""
return tf.cond(
tf.less(
tf.random_uniform([], minval=0, maxval=1, dtype=tf.float32),
tf.cast(p, tf.float32)), lambda: func(x), lambda: x)
def random_brightness(image, max_delta, impl='simclrv2'):
"""A multiplicative vs additive change of brightness."""
if impl == 'simclrv2':
factor = tf.random_uniform([], tf.maximum(1.0 - max_delta, 0),
1.0 + max_delta)
image = image * factor
elif impl == 'simclrv1':
image = tf.image.random_brightness(image, max_delta=max_delta)
else:
raise ValueError('Unknown impl {} for random brightness.'.format(impl))
return image
def to_grayscale(image, keep_channels=True):
image = tf.image.rgb_to_grayscale(image)
if keep_channels:
image = tf.tile(image, [1, 1, 3])
return image
def color_jitter(image, strength, random_order=True, impl='simclrv2'):
"""Distorts the color of the image.
Args:
image: The input image tensor.
strength: the floating number for the strength of the color augmentation.
random_order: A bool, specifying whether to randomize the jittering order.
impl: 'simclrv1' or 'simclrv2'. Whether to use simclrv1 or simclrv2's
version of random brightness.
Returns:
The distorted image tensor.
"""
brightness = 0.8 * strength
contrast = 0.8 * strength
saturation = 0.8 * strength
hue = 0.2 * strength
if random_order:
return color_jitter_rand(
image, brightness, contrast, saturation, hue, impl=impl)
else:
return color_jitter_nonrand(
image, brightness, contrast, saturation, hue, impl=impl)
def color_jitter_nonrand(image,
brightness=0,
contrast=0,
saturation=0,
hue=0,
impl='simclrv2'):
"""Distorts the color of the image (jittering order is fixed).
Args:
image: The input image tensor.
brightness: A float, specifying the brightness for color jitter.
contrast: A float, specifying the contrast for color jitter.
saturation: A float, specifying the saturation for color jitter.
hue: A float, specifying the hue for color jitter.
impl: 'simclrv1' or 'simclrv2'. Whether to use simclrv1 or simclrv2's
version of random brightness.
Returns:
The distorted image tensor.
"""
with tf.name_scope('distort_color'):
def apply_transform(i, x, brightness, contrast, saturation, hue):
"""Apply the i-th transformation."""
if brightness != 0 and i == 0:
x = random_brightness(x, max_delta=brightness, impl=impl)
elif contrast != 0 and i == 1:
x = tf.image.random_contrast(x, lower=1 - contrast, upper=1 + contrast)
elif saturation != 0 and i == 2:
x = tf.image.random_saturation(
x, lower=1 - saturation, upper=1 + saturation)
elif hue != 0:
x = tf.image.random_hue(x, max_delta=hue)
return x
for i in range(4):
image = apply_transform(i, image, brightness, contrast, saturation, hue)
image = tf.clip_by_value(image, 0., 1.)
return image
def color_jitter_rand(image,
brightness=0,
contrast=0,
saturation=0,
hue=0,
impl='simclrv2'):
"""Distorts the color of the image (jittering order is random).
Args:
image: The input image tensor.
brightness: A float, specifying the brightness for color jitter.
contrast: A float, specifying the contrast for color jitter.
saturation: A float, specifying the saturation for color jitter.
hue: A float, specifying the hue for color jitter.
impl: 'simclrv1' or 'simclrv2'. Whether to use simclrv1 or simclrv2's
version of random brightness.
Returns:
The distorted image tensor.
"""
with tf.name_scope('distort_color'):
def apply_transform(i, x):
"""Apply the i-th transformation."""
def brightness_foo():
if brightness == 0:
return x
else:
return random_brightness(x, max_delta=brightness, impl=impl)
def contrast_foo():
if contrast == 0:
return x
else:
return tf.image.random_contrast(
x, lower=1 - contrast, upper=1 + contrast)
def saturation_foo():
if saturation == 0:
return x
else:
return tf.image.random_saturation(
x, lower=1 - saturation, upper=1 + saturation)
def hue_foo():
if hue == 0:
return x
else:
return tf.image.random_hue(x, max_delta=hue)
x = tf.cond(
tf.less(i, 2),
lambda: tf.cond(tf.less(i, 1), brightness_foo, contrast_foo),
lambda: tf.cond(tf.less(i, 3), saturation_foo, hue_foo))
return x
perm = tf.random_shuffle(tf.range(4))
for i in range(4):
image = apply_transform(perm[i], image)
image = tf.clip_by_value(image, 0., 1.)
return image
def _compute_crop_shape(image_height, image_width, aspect_ratio,
crop_proportion):
"""Compute aspect ratio-preserving shape for central crop.
The resulting shape retains `crop_proportion` along one side and a proportion
less than or equal to `crop_proportion` along the other side.
Args:
image_height: Height of image to be cropped.
image_width: Width of image to be cropped.
aspect_ratio: Desired aspect ratio (width / height) of output.
crop_proportion: Proportion of image to retain along the less-cropped side.
Returns:
crop_height: Height of image after cropping.
crop_width: Width of image after cropping.
"""
image_width_float = tf.cast(image_width, tf.float32)
image_height_float = tf.cast(image_height, tf.float32)
def _requested_aspect_ratio_wider_than_image():
crop_height = tf.cast(
tf.rint(crop_proportion / aspect_ratio * image_width_float), tf.int32)
crop_width = tf.cast(tf.rint(crop_proportion * image_width_float), tf.int32)
return crop_height, crop_width
def _image_wider_than_requested_aspect_ratio():
crop_height = tf.cast(
tf.rint(crop_proportion * image_height_float), tf.int32)
crop_width = tf.cast(
tf.rint(crop_proportion * aspect_ratio * image_height_float), tf.int32)
return crop_height, crop_width
return tf.cond(aspect_ratio > image_width_float / image_height_float,
_requested_aspect_ratio_wider_than_image,
_image_wider_than_requested_aspect_ratio)
def center_crop(image, height, width, crop_proportion):
"""Crops to center of image and rescales to desired size.
Args:
image: Image Tensor to crop.
height: Height of image to be cropped.
width: Width of image to be cropped.
crop_proportion: Proportion of image to retain along the less-cropped side.
Returns:
A `height` x `width` x channels Tensor holding a central crop of `image`.
"""
shape = tf.shape(image)
image_height = shape[0]
image_width = shape[1]
crop_height, crop_width = _compute_crop_shape(image_height, image_width,
height / width, crop_proportion)
offset_height = ((image_height - crop_height) + 1) // 2
offset_width = ((image_width - crop_width) + 1) // 2
image = tf.image.crop_to_bounding_box(image, offset_height, offset_width,
crop_height, crop_width)
image = tf.image.resize_bicubic([image], [height, width])[0]
return image
def distorted_bounding_box_crop(image,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0),
max_attempts=100,
scope=None):
"""Generates cropped_image using one of the bboxes randomly distorted.
See `tf.image.sample_distorted_bounding_box` for more documentation.
Args:
image: `Tensor` of image data.
bbox: `Tensor` of bounding boxes arranged `[1, num_boxes, coords]` where
each coordinate is [0, 1) and the coordinates are arranged as `[ymin,
xmin, ymax, xmax]`. If num_boxes is 0 then use the whole image.
min_object_covered: An optional `float`. Defaults to `0.1`. The cropped area
of the image must contain at least this fraction of any bounding box
supplied.
aspect_ratio_range: An optional list of `float`s. The cropped area of the
image must have an aspect ratio = width / height within this range.
area_range: An optional list of `float`s. The cropped area of the image must
contain a fraction of the supplied image within in this range.
max_attempts: An optional `int`. Number of attempts at generating a cropped
region of the image of the specified constraints. After `max_attempts`
failures, return the entire image.
scope: Optional `str` for name scope.
Returns:
(cropped image `Tensor`, distorted bbox `Tensor`).
"""
with tf.name_scope(scope, 'distorted_bounding_box_crop', [image, bbox]):
shape = tf.shape(image)
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
shape,
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, _ = sample_distorted_bounding_box
# Crop the image to the specified bounding box.
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
image = tf.image.crop_to_bounding_box(image, offset_y, offset_x,
target_height, target_width)
return image
def crop_and_resize(image, height, width):
"""Make a random crop and resize it to height `height` and width `width`.
Args:
image: Tensor representing the image.
height: Desired image height.
width: Desired image width.
Returns:
A `height` x `width` x channels Tensor holding a random crop of `image`.
"""
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
aspect_ratio = width / height
image = distorted_bounding_box_crop(
image,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(3. / 4 * aspect_ratio, 4. / 3. * aspect_ratio),
area_range=(0.08, 1.0),
max_attempts=100,
scope=None)
return tf.image.resize_bicubic([image], [height, width])[0]
def gaussian_blur(image, kernel_size, sigma, padding='SAME'):
"""Blurs the given image with separable convolution.
Args:
image: Tensor of shape [height, width, channels] and dtype float to blur.
kernel_size: Integer Tensor for the size of the blur kernel. This is should
be an odd number. If it is an even number, the actual kernel size will be
size + 1.
sigma: Sigma value for gaussian operator.
padding: Padding to use for the convolution. Typically 'SAME' or 'VALID'.
Returns:
A Tensor representing the blurred image.
"""
radius = tf.to_int32(kernel_size / 2)
kernel_size = radius * 2 + 1
x = tf.to_float(tf.range(-radius, radius + 1))
blur_filter = tf.exp(-tf.pow(x, 2.0) /
(2.0 * tf.pow(tf.to_float(sigma), 2.0)))
blur_filter /= tf.reduce_sum(blur_filter)
# One vertical and one horizontal filter.
blur_v = tf.reshape(blur_filter, [kernel_size, 1, 1, 1])
blur_h = tf.reshape(blur_filter, [1, kernel_size, 1, 1])
num_channels = tf.shape(image)[-1]
blur_h = tf.tile(blur_h, [1, 1, num_channels, 1])
blur_v = tf.tile(blur_v, [1, 1, num_channels, 1])
expand_batch_dim = image.shape.ndims == 3
if expand_batch_dim:
# Tensorflow requires batched input to convolutions, which we can fake with
# an extra dimension.
image = tf.expand_dims(image, axis=0)
blurred = tf.nn.depthwise_conv2d(
image, blur_h, strides=[1, 1, 1, 1], padding=padding)
blurred = tf.nn.depthwise_conv2d(
blurred, blur_v, strides=[1, 1, 1, 1], padding=padding)
if expand_batch_dim:
blurred = tf.squeeze(blurred, axis=0)
return blurred
def random_crop_with_resize(image, height, width, p=1.0):
"""Randomly crop and resize an image.
Args:
image: `Tensor` representing an image of arbitrary size.
height: Height of output image.
width: Width of output image.
p: Probability of applying this transformation.
Returns:
A preprocessed image `Tensor`.
"""
def _transform(image): # pylint: disable=missing-docstring
image = crop_and_resize(image, height, width)
return image
return random_apply(_transform, p=p, x=image)
def random_color_jitter(image, color_jitter_strength, p=1.0, impl='simclrv2'):
def _transform(image):
color_jitter_t = functools.partial(
color_jitter, strength=color_jitter_strength, impl=impl)
image = random_apply(color_jitter_t, p=0.8, x=image)
return random_apply(to_grayscale, p=0.2, x=image)
return random_apply(_transform, p=p, x=image)
def random_blur(image, height, width, p=1.0):
"""Randomly blur an image.
Args:
image: `Tensor` representing an image of arbitrary size.
height: Height of output image.
width: Width of output image.
p: probability of applying this transformation.
Returns:
A preprocessed image `Tensor`.
"""
del width
def _transform(image):
sigma = tf.random.uniform([], 0.1, 2.0, dtype=tf.float32)
return gaussian_blur(
image, kernel_size=height // 10, sigma=sigma, padding='SAME')
return random_apply(_transform, p=p, x=image)
def batch_random_blur(images_list, height, width, blur_probability=0.5):
"""Apply efficient batch data transformations.
Args:
images_list: a list of image tensors.
height: the height of image.
width: the width of image.
blur_probability: the probaility to apply the blur operator.
Returns:
Preprocessed feature list.
"""
def generate_selector(p, bsz):
shape = [bsz, 1, 1, 1]
selector = tf.cast(
tf.less(tf.random_uniform(shape, 0, 1, dtype=tf.float32), p),
tf.float32)
return selector
new_images_list = []
for images in images_list:
images_new = random_blur(images, height, width, p=1.)
selector = generate_selector(blur_probability, tf.shape(images)[0])
images = images_new * selector + images * (1 - selector)
images = tf.clip_by_value(images, 0., 1.)
new_images_list.append(images)
return new_images_list
def preprocess_for_train(image,
height,
width,
color_jitter_strength,
color_distort=True,
crop=True,
flip=True,
impl='simclrv2'):
"""Preprocesses the given image for training.
Args:
image: `Tensor` representing an image of arbitrary size.
height: Height of output image.
width: Width of output image.
color_jitter_strength: color jitter strength.
color_distort: Whether to apply the color distortion.
crop: Whether to crop the image.
flip: Whether or not to flip left and right of an image.
impl: 'simclrv1' or 'simclrv2'. Whether to use simclrv1 or simclrv2's
version of random brightness.
Returns:
A preprocessed image `Tensor`.
"""
if crop:
image = random_crop_with_resize(image, height, width)
if flip:
image = tf.image.random_flip_left_right(image)
if color_distort:
image = random_color_jitter(image, color_jitter_strength, impl=impl)
image = tf.reshape(image, [height, width, 3])
image = tf.clip_by_value(image, 0., 1.)
return image
def preprocess_for_eval(image, height, width, crop=True):
"""Preprocesses the given image for evaluation.
Args:
image: `Tensor` representing an image of arbitrary size.
height: Height of output image.
width: Width of output image.
crop: Whether or not to (center) crop the test images.
Returns:
A preprocessed image `Tensor`.
"""
if crop:
image = center_crop(image, height, width, crop_proportion=CROP_PROPORTION)
image = tf.reshape(image, [height, width, 3])
image = tf.clip_by_value(image, 0., 1.)
return image
| google-research/l2p | augment/color_util.py | Python | apache-2.0 | 17,401 | 0.006724 |
from django.conf.urls import patterns, url
from cart import views
urlpatterns = patterns('',
url(r'^$', views.view_cart, name='view'),
url(r'^add/$', views.add_to_cart, name='add'),
url(r'^remove/$', views.remove_from_cart, name='remove'),
url(r'^update/$', views.update_cart, name='update'),
url(r'^checkout/$', views.checkout, name='checkout'),
url(r'^update_checkout/$', views.update_checkout, name='update_checkout'),
# Для ajax обновления виджета корзины
url(r'^summary/$', views.get_cart_summary, name='get_cart_summary'),
)
| juntatalor/qexx | cart/urls.py | Python | mit | 768 | 0.004049 |
# -*- coding: utf-8 -*-
import json
import os
import urllib.parse
from functools import wraps
import bottle
from bottle import (
route,
run,
jinja2_template as template,
redirect,
request,
response,
static_file,
BaseTemplate,
)
import tweepy
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_DIR = os.path.join(BASE_DIR, 'static')
BaseTemplate.settings.update(
{
'filters': {
'encode_query': lambda query: urllib.parse.urlencode({'q': query})
}
}
)
#######################################################################################################################
#
# Middleware
#
#######################################################################################################################
class TwitterManager(object):
def __init__(self, consumer_key, consumer_secret, access_token=None,
access_token_secret=None, callback_url=None):
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.access_token = access_token
self.access_token_secret = access_token_secret
self.callback_url = callback_url
self.request_token = None
self.api = None
def get_authorization_url(self):
auth = tweepy.OAuthHandler(self.consumer_key,
self.consumer_secret,
self.callback_url)
try:
redirect_url = auth.get_authorization_url()
except tweepy.TweepError:
raise tweepy.TweepError('Error! Failed to get request token')
self.request_token = auth.request_token
return redirect_url
def get_access_token(self, verifier):
auth = tweepy.OAuthHandler(self.consumer_key,
self.consumer_secret)
if self.request_token is None:
raise tweepy.TweepError("Request token not set yet.")
auth.request_token = self.request_token
try:
auth.get_access_token(verifier)
except tweepy.TweepError:
raise tweepy.TweepError('Error! Failed to get access token')
return (
auth.access_token,
auth.access_token_secret,
)
def set_access_token(self, key, secret):
self.access_token = key
self.access_token_secret = secret
def get_oauth_api(self, access_token, access_token_secret):
auth = tweepy.OAuthHandler(self.consumer_key,
self.consumer_secret)
auth.set_access_token(access_token, access_token_secret)
return tweepy.API(auth)
def set_api(self):
self.api = self.get_oauth_api(self.access_token, self.access_token_secret)
def authenticate(self, verifier):
token = self.get_access_token(verifier)
self.set_access_token(*token)
self.set_api()
class TwitterMiddleware(object):
def __init__(self, app, tweepy_config):
self.app = app
self.tweepy_settings = tweepy_config
self.tweepy_manager = TwitterManager(**self.tweepy_settings)
def __call__(self, environ, start_response):
environ['twitter'] = self.tweepy_manager
return self.app(environ, start_response)
#######################################################################################################################
#
# Decorators
#
#######################################################################################################################
def login_required(f):
@wraps(f)
def _login_required(*args, **kwargs):
twitter = request.environ.get('twitter')
if twitter.api is None:
return redirect('/')
return f(*args, **kwargs)
return _login_required
#######################################################################################################################
#
# Controllers
#
#######################################################################################################################
@route('/static/<filename:path>')
def send_static(filename):
return static_file(filename, root=STATIC_DIR)
@route('/')
def index():
return template('index')
@route('/oauth')
def oauth():
twitter = request.environ.get('twitter')
redirect_url = twitter.get_authorization_url()
return redirect(redirect_url)
@route('/verify')
def verify():
twitter = request.environ.get('twitter')
verifier = request.params.get('oauth_verifier')
twitter.authenticate(verifier)
return redirect('home')
@route('/home')
@login_required
def home():
twitter = request.environ.get('twitter')
user = twitter.api.me()
return template('home', user=user)
@route('/api/saved_searches/list')
@login_required
def get_saved_searches():
twitter = request.environ.get('twitter')
saved_searches = twitter.api.saved_searches()
data = []
for s in saved_searches:
timestamp = s.created_at.strftime('%Y-%m-%d %H:%M:%S')
data.append({'id': s.id, 'name': s.name, 'query': s.query, 'timestamp': timestamp})
response.headers['Content-Type'] = 'application/json'
return json.dumps(data)
if __name__ == "__main__":
twitter_config = {
'consumer_key': os.environ['TSSM_CONSUMER_KEY'],
'consumer_secret': os.environ['TSSM_CONSUMER_SECRET'],
'callback_url': 'http://127.0.0.1:8000/verify',
}
app = TwitterMiddleware(bottle.app(), twitter_config)
run(app=app, host="localhost", port=8000, debug=True, reloader=True)
| kk6/tssm | app.py | Python | mit | 5,552 | 0.001441 |
# -*- coding: utf-8 -*-
# © 2016 ACSONE SA/NV (<http://acsone.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from openerp import api, fields, models
from dateutil.rrule import (rrule,
YEARLY,
MONTHLY,
WEEKLY,
DAILY)
from dateutil.relativedelta import relativedelta
class DateRangeGenerator(models.TransientModel):
_name = 'date.range.generator'
@api.model
def _default_company(self):
return self.env['res.company']._company_default_get('date.range')
name_prefix = fields.Char('Range name prefix', required=True)
date_start = fields.Date(strint='Start date', required=True)
type_id = fields.Many2one(
comodel_name='date.range.type', string='Type', required=True,
ondelete='cascade')
company_id = fields.Many2one(
comodel_name='res.company', string='Company',
default=_default_company)
unit_of_time = fields.Selection([
(YEARLY, 'years'),
(MONTHLY, 'months'),
(WEEKLY, 'weeks'),
(DAILY, 'days')], required=True)
duration_count = fields.Integer('Duration', required=True)
count = fields.Integer(
string="Number of ranges to generate", required=True)
@api.multi
def _compute_date_ranges(self):
self.ensure_one()
vals = rrule(freq=self.unit_of_time, interval=self.duration_count,
dtstart=fields.Date.from_string(self.date_start),
count=self.count+1)
vals = list(vals)
date_ranges = []
for idx, dt_start in enumerate(vals[:-1]):
date_start = fields.Date.to_string(dt_start.date())
# always remove 1 day for the date_end since range limits are
# inclusive
dt_end = vals[idx+1].date() - relativedelta(days=1)
date_end = fields.Date.to_string(dt_end)
date_ranges.append({
'name': '%s-%d' % (self.name_prefix, idx + 1),
'date_start': date_start,
'date_end': date_end,
'type_id': self.type_id.id,
'company_id': self.company_id.id})
return date_ranges
@api.multi
def action_apply(self):
date_ranges = self._compute_date_ranges()
if date_ranges:
for dr in date_ranges:
self.env['date.range'].create(dr)
return self.env['ir.actions.act_window'].for_xml_id(
module='date_range', xml_id='date_range_action')
| be-cloud-be/horizon-addons | server-tools/date_range/wizard/date_range_generator.py | Python | agpl-3.0 | 2,576 | 0 |
# -*- coding: utf-8 -*-
# Copyright (C) 2016 Adam Collin, Mathew Topper
# Copyright (C) 2017-2018 Mathew Topper
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Created on Wed Apr 06 15:59:04 2016
.. moduleauthor:: Adam Collin <adam.collin@ieee.org>
.. moduleauthor:: Mathew Topper <mathew.topper@dataonlygreater.com>
"""
from datetime import timedelta
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
from matplotlib.dates import (MONTHLY,
DateFormatter,
RRuleLocator,
date2num,
rrulewrapper)
from textwrap import wrap
from . import PlotInterface
class InstallationGanttChartPlot(PlotInterface):
@classmethod
def get_name(cls):
'''A class method for the common name of the interface.
Returns:
str: A unique string
'''
return "Installation Gantt Chart"
@classmethod
def declare_inputs(cls):
'''A class method to declare all the variables required as inputs by
this interface.
Returns:
list: List of inputs identifiers
Example:
The returned value can be None or a list of identifier strings which
appear in the data descriptions. For example::
inputs = ["My:first:variable",
"My:second:variable",
]
'''
input_list = [
"project.install_support_structure_dates",
"project.install_devices_dates",
"project.install_dynamic_cable_dates",
"project.install_export_cable_dates",
"project.install_array_cable_dates",
"project.install_surface_piercing_substation_dates",
"project.install_subsea_collection_point_dates",
"project.install_cable_protection_dates",
"project.install_driven_piles_dates",
"project.install_direct_embedment_dates",
"project.install_gravity_based_dates",
"project.install_pile_anchor_dates",
"project.install_drag_embedment_dates",
"project.install_suction_embedment_dates",
"project.device_phase_installation_times",
"project.electrical_phase_installation_times",
"project.mooring_phase_installation_times",
"project.installation_plan"]
return input_list
@classmethod
def declare_optional(cls):
option_list = [
"project.install_support_structure_dates",
"project.install_devices_dates",
"project.install_dynamic_cable_dates",
"project.install_export_cable_dates",
"project.install_array_cable_dates",
"project.install_surface_piercing_substation_dates",
"project.install_subsea_collection_point_dates",
"project.install_cable_protection_dates",
"project.install_driven_piles_dates",
"project.install_direct_embedment_dates",
"project.install_gravity_based_dates",
"project.install_pile_anchor_dates",
"project.install_drag_embedment_dates",
"project.install_suction_embedment_dates",
"project.device_phase_installation_times",
"project.electrical_phase_installation_times",
"project.mooring_phase_installation_times",
"project.installation_plan"]
return option_list
@classmethod
def declare_id_map(self):
'''Declare the mapping for variable identifiers in the data description
to local names for use in the interface. This helps isolate changes in
the data description or interface from effecting the other.
Returns:
dict: Mapping of local to data description variable identifiers
Example:
The returned value must be a dictionary containing all the inputs and
outputs from the data description and a local alias string. For
example::
id_map = {"var1": "My:first:variable",
"var2": "My:second:variable",
"var3": "My:third:variable"
}
'''
id_map = {"install_support_structure_dates":
"project.install_support_structure_dates",
"install_devices_dates":
"project.install_devices_dates",
"install_dynamic_cable_dates":
"project.install_dynamic_cable_dates",
"install_export_cable_dates":
"project.install_export_cable_dates",
"install_array_cable_dates":
"project.install_array_cable_dates",
"install_surface_piercing_substation_dates":
"project.install_surface_piercing_substation_dates",
"install_subsea_collection_point_dates":
"project.install_subsea_collection_point_dates",
"install_cable_protection_dates":
"project.install_cable_protection_dates",
"install_driven_piles_dates":
"project.install_driven_piles_dates",
"install_direct_embedment_dates":
"project.install_direct_embedment_dates",
"install_gravity_based_dates":
"project.install_gravity_based_dates",
"install_pile_anchor_dates":
"project.install_pile_anchor_dates",
"install_drag_embedment_dates":
"project.install_drag_embedment_dates",
"install_suction_embedment_dates":
"project.install_suction_embedment_dates",
"install_device_times":
"project.device_phase_installation_times",
"install_electrical_times":
"project.electrical_phase_installation_times",
"install_mooring_times":
"project.mooring_phase_installation_times",
"plan": "project.installation_plan"
}
return id_map
def connect(self):
self.fig_handle = installation_gantt_chart(
self.data.plan,
self.data.install_support_structure_dates,
self.data.install_devices_dates,
self.data.install_dynamic_cable_dates,
self.data.install_export_cable_dates,
self.data.install_array_cable_dates,
self.data.install_surface_piercing_substation_dates,
self.data.install_subsea_collection_point_dates,
self.data.install_cable_protection_dates,
self.data.install_driven_piles_dates,
self.data.install_direct_embedment_dates,
self.data.install_gravity_based_dates,
self.data.install_pile_anchor_dates,
self.data.install_drag_embedment_dates,
self.data.install_suction_embedment_dates,
self.data.install_device_times,
self.data.install_electrical_times,
self.data.install_mooring_times)
return
def installation_gantt_chart(plan=None,
install_support_structure_dates=None,
install_devices_dates=None,
install_dynamic_cable_dates=None,
install_export_cable_dates=None,
install_array_cable_dates=None,
install_surface_piercing_substation_dates=None,
install_subsea_collection_point_dates=None,
install_cable_protection_dates=None,
install_driven_piles_dates=None,
install_direct_embedment_dates=None,
install_gravity_based_dates=None,
install_pile_anchor_dates=None,
install_drag_embedment_dates=None,
install_suction_embedment_dates=None,
install_device_times=None,
install_electrical_times=None,
install_mooring_times=None):
if plan is None: return None
installation = {}
# sort data
if any('support structure' in phase for phase in plan):
component_time = install_device_times.loc['Support Structure']
values = installation_gantt_dates(install_support_structure_dates,
component_time["Preparation"])
installation['Installation of support structure'] = values
if any('devices' in phase for phase in plan):
component_time = install_device_times.loc['Device']
values = installation_gantt_dates(install_devices_dates,
component_time["Preparation"])
installation['Installation of devices'] = values
if any('dynamic' in phase for phase in plan):
component_time = install_electrical_times.loc['Dynamic Cables']
values = installation_gantt_dates(install_dynamic_cable_dates,
component_time["Preparation"])
installation['Installation of dynamic cables'] = values
if any('export' in phase for phase in plan):
component_time = install_electrical_times.loc['Export Cables']
values = installation_gantt_dates(install_export_cable_dates,
component_time["Preparation"])
installation['Installation of static export cables'] = values
if any('array' in phase for phase in plan):
component_time = install_electrical_times.loc['Inter-Array Cables']
values = installation_gantt_dates(install_array_cable_dates,
component_time["Preparation"])
installation['Installation of static array cables'] = values
if any('surface piercing' in phase for phase in plan):
component_time = install_electrical_times.loc['Collection Points']
values = installation_gantt_dates(
install_surface_piercing_substation_dates,
component_time["Preparation"])
installation[
'Installation of collection point (surface piercing)'] = values
if any('seabed' in phase for phase in plan):
component_time = install_electrical_times.loc['Collection Points']
values = installation_gantt_dates(
install_subsea_collection_point_dates,
component_time["Preparation"])
installation['Installation of collection point (seabed)'] = values
if any('cable protection' in phase for phase in plan):
component_time = install_electrical_times.loc[
'External Cable Protection']
values = installation_gantt_dates(install_cable_protection_dates,
component_time["Preparation"])
installation['Installation of external cable protection'] = values
if any('driven piles' in phase for phase in plan):
component_time = install_mooring_times.loc['Driven Piles']
values = installation_gantt_dates(install_driven_piles_dates,
component_time["Preparation"])
installation['Installation of driven piles anchors/foundations'] =\
values
if any('direct-embedment' in phase for phase in plan):
component_time = install_mooring_times.loc["Direct-Embedment Anchors"]
values = installation_gantt_dates(install_direct_embedment_dates,
component_time["Preparation"])
installation[
'Installation of mooring systems with direct-embedment '
'anchors'] = values
if any('gravity based' in phase for phase in plan):
component_time = install_mooring_times.loc[
"Gravity Based Foundations"]
values = installation_gantt_dates(install_gravity_based_dates,
component_time["Preparation"])
installation['Installation of gravity based foundations'] = values
if any('pile anchor' in phase for phase in plan):
component_time = install_mooring_times.loc["Pile Anchors"]
values = installation_gantt_dates(install_pile_anchor_dates,
component_time["Preparation"])
installation[
'Installation of mooring systems with pile anchors'] = values
if any('drag-embedment' in phase for phase in plan):
component_time = install_mooring_times.loc["Drag-Embedment Anchors"]
values = installation_gantt_dates(install_drag_embedment_dates,
component_time["Preparation"])
installation[
'Installation of mooring systems with drag-embedment '
'anchors'] = values
if any('suction-embedment' in phase for phase in plan):
component_time = install_mooring_times.loc["Suction-Caisson Anchors"]
values = installation_gantt_dates(install_suction_embedment_dates,
component_time["Preparation"])
installation[
'Installation of mooring systems with suction-embedment '
'anchors'] = values
# Data
num_phases = len(plan)
pos = np.arange(0.5, num_phases / 2. + 1.0, 0.5)
ylabels = []
customDates = []
# for operation in Installation['OPERATION']:
for operation in plan:
l_phase = operation
log_phase_descript = l_phase
ylabels.append(log_phase_descript)
start_dt = (installation[l_phase]['Start date'] -
timedelta(hours=installation[l_phase]['Prep time']))
prep_dt = installation[l_phase]['Start date']
depart_dt = installation[l_phase]['Depart date']
end_dt = installation[l_phase]['End date']
customDates.append([date2num(start_dt),
date2num(prep_dt),
date2num(depart_dt),
date2num(end_dt)])
task_dates = {}
for i,task in enumerate(ylabels):
task_dates[task] = customDates[i]
fig = plt.figure()
ax = plt.subplot2grid((1, 2), (0, 1), colspan=1)
# Plot the data:
(start_date,
end_prep_begin_wait_date,
end_wait_begin_sea_date,
end_date) = task_dates[ylabels[0]]
ax.barh(0.5, (end_date - start_date),
left=start_date,
height=0.4,
align='center',
color='blue',
alpha = 0.75)
ax.barh(0.4, (end_prep_begin_wait_date - start_date),
left=start_date,
height=0.1,
align='center',
color='red',
alpha=0.75,
label="Prep Time")
ax.barh(0.5, (end_wait_begin_sea_date - end_prep_begin_wait_date),
left=end_prep_begin_wait_date,
height=0.1,
align='center',
color='yellow',
alpha=0.75,
label="Departure Delay")
ax.barh(0.6, (end_date - end_wait_begin_sea_date),
left=end_wait_begin_sea_date,
height=0.1,
align='center',
color='green',
alpha=0.75,
label="Sea Time")
for i in range(0,len(ylabels)-1):
(start_date,
end_prep_begin_wait_date,
end_wait_begin_sea_date,
end_date) = task_dates[ylabels[i+1]]
ax.barh((i * 0.5) + 1.0, (end_date - start_date),
left=start_date,
height=0.4,
align='center',
color='blue',
alpha=0.75)
ax.barh((i * 0.5) + 0.9, (end_prep_begin_wait_date - start_date),
left=start_date,
height=0.1,
align='center',
color='red',
alpha=0.75)
ax.barh((i * 0.5) + 1.0,
(end_wait_begin_sea_date - end_prep_begin_wait_date),
left=end_prep_begin_wait_date,
height=0.1,
align='center',
color='yellow',
alpha=0.75)
ax.barh((i * 0.5) + 1.1, (end_date - end_wait_begin_sea_date),
left=end_wait_begin_sea_date,
height=0.1,
align='center',
color='green',
alpha=0.75)
# Format the y-axis
ylabels = ['\n'.join(wrap(l, 40)) for l in ylabels]
plt.yticks(pos, ylabels)
# Format the x-axis
ax.axis('tight')
ax.set_ylim(ymin=-0.1, ymax=(num_phases) / 2 + 1.0)
ax.grid(color='g', linestyle=':')
ax.xaxis_date() #Tell matplotlib that these are dates...
rule = rrulewrapper(MONTHLY, interval=1)
loc = RRuleLocator(rule)
formatter = DateFormatter("%b '%y")
ax.xaxis.set_major_locator(loc)
ax.xaxis.set_major_formatter(formatter)
for label in ax.get_xticklabels():
label.set_rotation(30)
# Format the legend
font = font_manager.FontProperties(size='small')
ax.legend(loc=0, prop=font)
# Finish up
ax.invert_yaxis()
fig.autofmt_xdate()
return fig
def installation_gantt_dates(dates, prep_time):
gantt_dict = {'Start date': dates["Start"],
'Depart date': dates["Depart"],
'End date': dates["End"],
'Prep time': prep_time}
return gantt_dict
| DTOcean/dtocean-core | dtocean_core/interfaces/plots_installation.py | Python | gpl-3.0 | 18,785 | 0.000799 |
from server.util import ScriptManager
def objectClick2_2213(player, obId, obX, obY):
player.getPA().openUpBank()
def objectClick2_11758(player, obId, obX, obY):
player.getPA().openUpBank() | TheRealVestige/VestigeX-Server | Data/scripts/player/objects/objectclick2.py | Python | gpl-3.0 | 196 | 0.02551 |
import datetime
from airflow.models import DAG
from airflow.operators.latest_only_operator import LatestOnlyOperator
import utils.helpers as helpers
args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': datetime.datetime(2017, 3, 1),
'retries': 1,
'retry_delay': datetime.timedelta(minutes=10),
}
dag = DAG(
dag_id='data_contributions',
default_args=args,
max_active_runs=1,
schedule_interval='@daily'
)
latest_only_task = LatestOnlyOperator(
task_id='latest_only',
dag=dag,
)
data_contributions_processor_task = helpers.create_processor_task(
name='data_contributions',
dag=dag
)
data_contributions_processor_task.set_upstream(latest_only_task)
| opentrials/opentrials-airflow | dags/data_contributions.py | Python | mpl-2.0 | 718 | 0 |
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: sfp_XXX
# Purpose: Description of the plug-in.
#
# Author: Name and e-mail address
#
# Created: Date
# Copyright: (c) Name
# Licence: GPL
# -------------------------------------------------------------------------------
from sflib import SpiderFoot, SpiderFootPlugin, SpiderFootEvent
class sfp_XXX(SpiderFootPlugin):
"""Name:Description"""
# Default options
opts = {}
# Option descriptions
optdescs = {
# For each option in opts you should have a key/value pair here
# describing it. It will end up in the UI to explain the option
# to the end-user.
}
# Be sure to completely clear any class variables in setup()
# or you run the risk of data persisting between scan runs.
# Target
results = dict()
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = dict()
# Clear / reset any other class member variables here
# or you risk them persisting between threads.
for opt in userOpts.keys():
self.opts[opt] = userOpts[opt]
# What events is this module interested in for input
# * = be notified about all events.
def watchedEvents(self):
return ["*"]
# What events this module produces
# This is to support the end user in selecting modules based on events
# produced.
def producedEvents(self):
return None
# Handle events sent to this module
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
# If you are processing TARGET_WEB_CONTENT from sfp_spider, this is how you
# would get the source of that raw data (e.g. a URL.)
eventSource = event.sourceEvent.data
self.sf.debug("Received event, " + eventName + ", from " + srcModuleName)
# DO SOMETHING HERE
# Notify other modules of what you've found
evt = SpiderFootEvent("EVENT_CODE_HERE", "data here", self.__name__, event.sourceEvent)
self.notifyListeners(evt)
return None
# If you intend for this module to act on its own (e.g. not solely rely
# on events from other modules, then you need to have a start() method
# and within that method call self.checkForStop() to see if you've been
# politely asked by the controller to stop your activities (user abort.)
# End of sfp_XXX class
| LubyRuffy/spiderfoot | modules/sfp_template.py | Python | gpl-2.0 | 2,631 | 0.00152 |
import os
import sys
from robot.api import logger
from keywordgroup import KeywordGroup
from robot.libraries.BuiltIn import BuiltIn
try:
from robot.libraries.BuiltIn import RobotNotRunningError
except ImportError:
RobotNotRunningError = AttributeError
class Logging(KeywordGroup):
# Private
def _debug(self, message):
logger.debug(message)
def _get_log_dir(self):
try:
variables = BuiltIn().get_variables()
logfile = variables['${LOG FILE}']
if logfile != 'NONE':
return os.path.dirname(logfile)
return variables['${OUTPUTDIR}']
except RobotNotRunningError:
return os.getcwd()
def _html(self, message):
logger.info(message, True, False)
def _info(self, message):
logger.info(message)
def _log(self, message, level='INFO'):
level = level.upper()
if (level == 'INFO'): self._info(message)
elif (level == 'DEBUG'): self._debug(message)
elif (level == 'WARN'): self._warn(message)
elif (level == 'HTML'): self._html(message)
def _log_list(self, items, what='item'):
msg = ['Altogether %d %s%s.' % (len(items), what, ['s',''][len(items)==1])]
for index, item in enumerate(items):
msg.append('%d: %s' % (index+1, item))
self._info('\n'.join(msg))
return items
def _warn(self, message):
logger.warn(message)
| overfly83/bjrobot | src/BJRobot/keywords/logging.py | Python | mit | 1,462 | 0.004788 |
"""Layout for images and other replaced elements.
See http://dev.w3.org/csswg/css-images-3/#sizing
"""
from .min_max import handle_min_max_height, handle_min_max_width
from .percent import percentage
def default_image_sizing(intrinsic_width, intrinsic_height, intrinsic_ratio,
specified_width, specified_height,
default_width, default_height):
"""Default sizing algorithm for the concrete object size.
Return a ``(concrete_width, concrete_height)`` tuple.
See http://dev.w3.org/csswg/css-images-3/#default-sizing
"""
if specified_width == 'auto':
specified_width = None
if specified_height == 'auto':
specified_height = None
if specified_width is not None and specified_height is not None:
return specified_width, specified_height
elif specified_width is not None:
return specified_width, (
specified_width / intrinsic_ratio if intrinsic_ratio is not None
else intrinsic_height if intrinsic_height is not None
else default_height)
elif specified_height is not None:
return (
specified_height * intrinsic_ratio if intrinsic_ratio is not None
else intrinsic_width if intrinsic_width is not None
else default_width
), specified_height
else:
if intrinsic_width is not None or intrinsic_height is not None:
return default_image_sizing(
intrinsic_width, intrinsic_height, intrinsic_ratio,
intrinsic_width, intrinsic_height, default_width,
default_height)
else:
return contain_constraint_image_sizing(
default_width, default_height, intrinsic_ratio)
def contain_constraint_image_sizing(constraint_width, constraint_height,
intrinsic_ratio):
"""Contain constraint sizing algorithm for the concrete object size.
Return a ``(concrete_width, concrete_height)`` tuple.
See http://dev.w3.org/csswg/css-images-3/#contain-constraint
"""
return _constraint_image_sizing(
constraint_width, constraint_height, intrinsic_ratio, cover=False)
def cover_constraint_image_sizing(constraint_width, constraint_height,
intrinsic_ratio):
"""Cover constraint sizing algorithm for the concrete object size.
Return a ``(concrete_width, concrete_height)`` tuple.
See http://dev.w3.org/csswg/css-images-3/#cover-constraint
"""
return _constraint_image_sizing(
constraint_width, constraint_height, intrinsic_ratio, cover=True)
def _constraint_image_sizing(constraint_width, constraint_height,
intrinsic_ratio, cover):
if intrinsic_ratio is None:
return constraint_width, constraint_height
elif cover ^ (constraint_width > constraint_height * intrinsic_ratio):
return constraint_height * intrinsic_ratio, constraint_height
else:
return constraint_width, constraint_width / intrinsic_ratio
def replacedbox_layout(box):
# TODO: respect box-sizing ?
object_fit = box.style['object_fit']
position = box.style['object_position']
image = box.replacement
intrinsic_width, intrinsic_height, intrinsic_ratio = (
image.get_intrinsic_size(
box.style['image_resolution'], box.style['font_size']))
if None in (intrinsic_width, intrinsic_height):
intrinsic_width, intrinsic_height = contain_constraint_image_sizing(
box.width, box.height, intrinsic_ratio)
if object_fit == 'fill':
draw_width, draw_height = box.width, box.height
else:
if object_fit == 'contain' or object_fit == 'scale-down':
draw_width, draw_height = contain_constraint_image_sizing(
box.width, box.height, intrinsic_ratio)
elif object_fit == 'cover':
draw_width, draw_height = cover_constraint_image_sizing(
box.width, box.height, intrinsic_ratio)
else:
assert object_fit == 'none', object_fit
draw_width, draw_height = intrinsic_width, intrinsic_height
if object_fit == 'scale-down':
draw_width = min(draw_width, intrinsic_width)
draw_height = min(draw_height, intrinsic_height)
origin_x, position_x, origin_y, position_y = position[0]
ref_x = box.width - draw_width
ref_y = box.height - draw_height
position_x = percentage(position_x, ref_x)
position_y = percentage(position_y, ref_y)
if origin_x == 'right':
position_x = ref_x - position_x
if origin_y == 'bottom':
position_y = ref_y - position_y
position_x += box.content_box_x()
position_y += box.content_box_y()
return draw_width, draw_height, position_x, position_y
@handle_min_max_width
def replaced_box_width(box, containing_block):
"""Set the used width for replaced boxes."""
from .block import block_level_width
width, height, ratio = box.replacement.get_intrinsic_size(
box.style['image_resolution'], box.style['font_size'])
# This algorithm simply follows the different points of the specification:
# http://www.w3.org/TR/CSS21/visudet.html#inline-replaced-width
if box.height == 'auto' and box.width == 'auto':
if width is not None:
# Point #1
box.width = width
elif ratio is not None:
if height is not None:
# Point #2 first part
box.width = height * ratio
else:
# Point #3
block_level_width(box, containing_block)
if box.width == 'auto':
if ratio is not None:
# Point #2 second part
box.width = box.height * ratio
elif width is not None:
# Point #4
box.width = width
else:
# Point #5
# It's pretty useless to rely on device size to set width.
box.width = 300
@handle_min_max_height
def replaced_box_height(box):
"""Compute and set the used height for replaced boxes."""
# http://www.w3.org/TR/CSS21/visudet.html#inline-replaced-height
width, height, ratio = box.replacement.get_intrinsic_size(
box.style['image_resolution'], box.style['font_size'])
# Test 'auto' on the computed width, not the used width
if box.height == 'auto' and box.width == 'auto':
box.height = height
elif box.height == 'auto' and ratio:
box.height = box.width / ratio
if box.height == 'auto' and box.width == 'auto' and height is not None:
box.height = height
elif ratio is not None and box.height == 'auto':
box.height = box.width / ratio
elif box.height == 'auto' and height is not None:
box.height = height
elif box.height == 'auto':
# It's pretty useless to rely on device size to set width.
box.height = 150
def inline_replaced_box_layout(box, containing_block):
"""Lay out an inline :class:`boxes.ReplacedBox` ``box``."""
for side in ['top', 'right', 'bottom', 'left']:
if getattr(box, f'margin_{side}') == 'auto':
setattr(box, f'margin_{side}', 0)
inline_replaced_box_width_height(box, containing_block)
def inline_replaced_box_width_height(box, containing_block):
if box.style['width'] == 'auto' and box.style['height'] == 'auto':
replaced_box_width.without_min_max(box, containing_block)
replaced_box_height.without_min_max(box)
min_max_auto_replaced(box)
else:
replaced_box_width(box, containing_block)
replaced_box_height(box)
def min_max_auto_replaced(box):
"""Resolve min/max constraints on replaced elements with 'auto' sizes."""
width = box.width
height = box.height
min_width = box.min_width
min_height = box.min_height
max_width = max(min_width, box.max_width)
max_height = max(min_height, box.max_height)
# (violation_width, violation_height)
violations = (
'min' if width < min_width else 'max' if width > max_width else '',
'min' if height < min_height else 'max' if height > max_height else '')
# Work around divisions by zero. These are pathological cases anyway.
# TODO: is there a cleaner way?
if width == 0:
width = 1e-6
if height == 0:
height = 1e-6
# ('', ''): nothing to do
if violations == ('max', ''):
box.width = max_width
box.height = max(max_width * height / width, min_height)
elif violations == ('min', ''):
box.width = min_width
box.height = min(min_width * height / width, max_height)
elif violations == ('', 'max'):
box.width = max(max_height * width / height, min_width)
box.height = max_height
elif violations == ('', 'min'):
box.width = min(min_height * width / height, max_width)
box.height = min_height
elif violations == ('max', 'max'):
if max_width / width <= max_height / height:
box.width = max_width
box.height = max(min_height, max_width * height / width)
else:
box.width = max(min_width, max_height * width / height)
box.height = max_height
elif violations == ('min', 'min'):
if min_width / width <= min_height / height:
box.width = min(max_width, min_height * width / height)
box.height = min_height
else:
box.width = min_width
box.height = min(max_height, min_width * height / width)
elif violations == ('min', 'max'):
box.width = min_width
box.height = max_height
elif violations == ('max', 'min'):
box.width = max_width
box.height = min_height
def block_replaced_box_layout(context, box, containing_block):
"""Lay out the block :class:`boxes.ReplacedBox` ``box``."""
from .block import block_level_width
from .float import avoid_collisions
box = box.copy()
if box.style['width'] == 'auto' and box.style['height'] == 'auto':
computed_margins = box.margin_left, box.margin_right
block_replaced_width.without_min_max(
box, containing_block)
replaced_box_height.without_min_max(box)
min_max_auto_replaced(box)
box.margin_left, box.margin_right = computed_margins
block_level_width.without_min_max(box, containing_block)
else:
block_replaced_width(box, containing_block)
replaced_box_height(box)
# Don't collide with floats
# http://www.w3.org/TR/CSS21/visuren.html#floats
box.position_x, box.position_y, _ = avoid_collisions(
context, box, containing_block, outer=False)
resume_at = None
next_page = {'break': 'any', 'page': None}
adjoining_margins = []
collapsing_through = False
return box, resume_at, next_page, adjoining_margins, collapsing_through
@handle_min_max_width
def block_replaced_width(box, containing_block):
from .block import block_level_width
# http://www.w3.org/TR/CSS21/visudet.html#block-replaced-width
replaced_box_width.without_min_max(box, containing_block)
block_level_width.without_min_max(box, containing_block)
| Kozea/WeasyPrint | weasyprint/layout/replaced.py | Python | bsd-3-clause | 11,239 | 0 |
from __future__ import absolute_import | rmwdeveloper/webhack | webhack/__init__.py | Python | mit | 38 | 0.026316 |
# -*- coding: utf-8 -*-
"""Provides vertical object."""
from __future__ import absolute_import
from ..entity import Entity
class Vertical(Entity):
"""docstring for Vertical."""
collection = 'verticals'
resource = 'vertical'
_relations = {
'advertiser',
}
_pull = {
'id': int,
'name': None,
'created_on': Entity._strpt,
'updated_on': Entity._strpt,
'version': int,
}
_push = _pull
def __init__(self, session, properties=None, **kwargs):
super(Vertical, self).__init__(session, properties, **kwargs)
| Cawb07/t1-python | terminalone/models/vertical.py | Python | bsd-3-clause | 596 | 0 |
from django.conf import settings
from django.utils import translation
from geotrek.tourism import models as tourism_models
from geotrek.tourism.views import TouristicContentViewSet, TouristicEventViewSet
from geotrek.trekking.management.commands.sync_rando import Command as BaseCommand
# Register mapentity models
from geotrek.tourism import urls # NOQA
class Command(BaseCommand):
def sync_content(self, lang, content):
self.sync_pdf(lang, content)
for picture, resized in content.resized_pictures:
self.sync_media_file(lang, resized)
def sync_event(self, lang, event):
self.sync_pdf(lang, event)
for picture, resized in event.resized_pictures:
self.sync_media_file(lang, resized)
def sync_tourism(self, lang):
self.sync_geojson(lang, TouristicContentViewSet, 'touristiccontents')
self.sync_geojson(lang, TouristicEventViewSet, 'touristicevents')
contents = tourism_models.TouristicContent.objects.existing().order_by('pk')
contents = contents.filter(**{'published_{lang}'.format(lang=lang): True})
for content in contents:
self.sync_content(lang, content)
events = tourism_models.TouristicEvent.objects.existing().order_by('pk')
events = events.filter(**{'published_{lang}'.format(lang=lang): True})
for event in events:
self.sync_event(lang, event)
def sync(self):
super(Command, self).sync()
self.sync_static_file('**', 'tourism/touristicevent.svg')
self.sync_pictograms('**', tourism_models.InformationDeskType)
self.sync_pictograms('**', tourism_models.TouristicContentCategory)
self.sync_pictograms('**', tourism_models.TouristicContentType)
self.sync_pictograms('**', tourism_models.TouristicEventType)
for lang in settings.MODELTRANSLATION_LANGUAGES:
translation.activate(lang)
self.sync_tourism(lang)
| johan--/Geotrek | geotrek/tourism/management/commands/sync_rando.py | Python | bsd-2-clause | 1,966 | 0.002543 |
"""
Copyright 2018 vidosits (https://github.com/vidosits/)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import warnings
import pytest
import numpy as np
from tensorflow import keras
from keras_retinanet import losses
from keras_retinanet.models.densenet import DenseNetBackbone
parameters = ['densenet121']
@pytest.mark.parametrize("backbone", parameters)
def test_backbone(backbone):
# ignore warnings in this test
warnings.simplefilter('ignore')
num_classes = 10
inputs = np.zeros((1, 200, 400, 3), dtype=np.float32)
targets = [np.zeros((1, 14814, 5), dtype=np.float32), np.zeros((1, 14814, num_classes + 1))]
inp = keras.layers.Input(inputs[0].shape)
densenet_backbone = DenseNetBackbone(backbone)
model = densenet_backbone.retinanet(num_classes=num_classes, inputs=inp)
model.summary()
# compile model
model.compile(
loss={
'regression': losses.smooth_l1(),
'classification': losses.focal()
},
optimizer=keras.optimizers.Adam(lr=1e-5, clipnorm=0.001))
model.fit(inputs, targets, batch_size=1)
| delftrobotics/keras-retinanet | tests/models/test_densenet.py | Python | apache-2.0 | 1,587 | 0.00063 |
# This import depends on the automake rule protoc_middleman, please make sure
# protoc_middleman has been built before run this file.
import json
import re
import os.path
# BEGIN OPENSOURCE
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
# END OPENSOURCE
import tmp.benchmarks_pb2 as benchmarks_pb2
__file_size_map = {}
def __get_data_size(filename):
if filename[0] != '/':
filename = os.path.dirname(os.path.abspath(__file__)) + "/../" + filename
if filename in __file_size_map:
return __file_size_map[filename]
benchmark_dataset = benchmarks_pb2.BenchmarkDataset()
benchmark_dataset.ParseFromString(
open(filename, "rb").read())
size = 0
count = 0
for payload in benchmark_dataset.payload:
size += len(payload)
count += 1
__file_size_map[filename] = (size, 1.0 * size / count)
return size, 1.0 * size / count
def __extract_file_name(file_name):
name_list = re.split(r"[/\.]", file_name)
short_file_name = ""
for name in name_list:
if name[:14] == "google_message":
short_file_name = name
return short_file_name
__results = []
# CPP results example:
# [
# "benchmarks": [
# {
# "bytes_per_second": int,
# "cpu_time_ns": double,
# "iterations": int,
# "name: string,
# "real_time_ns: double,
# ...
# },
# ...
# ],
# ...
# ]
def __parse_cpp_result(filename):
if filename == "":
return
if filename[0] != '/':
filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename
with open(filename, "rb") as f:
results = json.loads(f.read())
for benchmark in results["benchmarks"]:
data_filename = "".join(
re.split("(_parse_|_serialize)", benchmark["name"])[0])
behavior = benchmark["name"][len(data_filename) + 1:]
if data_filename[:2] == "BM":
data_filename = data_filename[3:]
__results.append({
"language": "cpp",
"dataFilename": data_filename,
"behavior": behavior,
"throughput": benchmark["bytes_per_second"] / 2.0 ** 20
})
# Synthetic benchmark results example:
# [
# "benchmarks": [
# {
# "cpu_time_ns": double,
# "iterations": int,
# "name: string,
# "real_time_ns: double,
# ...
# },
# ...
# ],
# ...
# ]
def __parse_synthetic_result(filename):
if filename == "":
return
if filename[0] != "/":
filename = os.path.dirname(os.path.abspath(__file__)) + "/" + filename
with open(filename, "rb") as f:
results = json.loads(f.read())
for benchmark in results["benchmarks"]:
__results.append({
"language": "cpp",
"dataFilename": "",
"behavior": "synthetic",
"throughput": 10.0**9 / benchmark["cpu_time_ns"]
})
# Python results example:
# [
# [
# {
# "filename": string,
# "benchmarks": {
# behavior: results,
# ...
# },
# },
# ...
# ], #pure-python
# ...
# ]
def __parse_python_result(filename):
if filename == "":
return
if filename[0] != '/':
filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename
with open(filename, "rb") as f:
results_list = json.loads(f.read())
for results in results_list:
for result in results:
_, avg_size = __get_data_size(result["filename"])
for behavior in result["benchmarks"]:
__results.append({
"language": "python",
"dataFilename": __extract_file_name(result["filename"]),
"behavior": behavior,
"throughput": result["benchmarks"][behavior]
})
# Java results example:
# [
# {
# "id": string,
# "instrumentSpec": {...},
# "measurements": [
# {
# "weight": float,
# "value": {
# "magnitude": float,
# "unit": string
# },
# ...
# },
# ...
# ],
# "run": {...},
# "scenario": {
# "benchmarkSpec": {
# "methodName": string,
# "parameters": {
# defined parameters in the benchmark: parameters value
# },
# ...
# },
# ...
# }
#
# },
# ...
# ]
def __parse_java_result(filename):
if filename == "":
return
if filename[0] != '/':
filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename
with open(filename, "rb") as f:
results = json.loads(f.read())
for result in results:
total_weight = 0
total_value = 0
for measurement in result["measurements"]:
total_weight += measurement["weight"]
total_value += measurement["value"]["magnitude"]
avg_time = total_value * 1.0 / total_weight
total_size, _ = __get_data_size(
result["scenario"]["benchmarkSpec"]["parameters"]["dataFile"])
__results.append({
"language": "java",
"throughput": total_size / avg_time * 1e9 / 2 ** 20,
"behavior": result["scenario"]["benchmarkSpec"]["methodName"],
"dataFilename": __extract_file_name(
result["scenario"]["benchmarkSpec"]["parameters"]["dataFile"])
})
# Go benchmark results:
#
# goos: linux
# goarch: amd64
# Benchmark/.././datasets/google_message2/dataset.google_message2.pb/Unmarshal-12 3000 705784 ns/op
# Benchmark/.././datasets/google_message2/dataset.google_message2.pb/Marshal-12 2000 634648 ns/op
# Benchmark/.././datasets/google_message2/dataset.google_message2.pb/Size-12 5000 244174 ns/op
# Benchmark/.././datasets/google_message2/dataset.google_message2.pb/Clone-12 300 4120954 ns/op
# Benchmark/.././datasets/google_message2/dataset.google_message2.pb/Merge-12 300 4108632 ns/op
# PASS
# ok _/usr/local/google/home/yilunchong/mygit/protobuf/benchmarks 124.173s
def __parse_go_result(filename):
if filename == "":
return
if filename[0] != '/':
filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename
with open(filename, "rb") as f:
for line in f:
result_list = re.split(r"[\ \t]+", line)
if result_list[0][:9] != "Benchmark":
continue
first_slash_index = result_list[0].find('/')
last_slash_index = result_list[0].rfind('/')
full_filename = result_list[0][first_slash_index+1:last_slash_index]
total_bytes, _ = __get_data_size(full_filename)
behavior_with_suffix = result_list[0][last_slash_index+1:]
last_dash = behavior_with_suffix.rfind("-")
if last_dash == -1:
behavior = behavior_with_suffix
else:
behavior = behavior_with_suffix[:last_dash]
__results.append({
"dataFilename": __extract_file_name(full_filename),
"throughput": total_bytes / float(result_list[2]) * 1e9 / 2 ** 20,
"behavior": behavior,
"language": "go"
})
# Self built json results example:
#
# [
# {
# "filename": string,
# "benchmarks": {
# behavior: results,
# ...
# },
# },
# ...
# ]
def __parse_custom_result(filename, language):
if filename == "":
return
if filename[0] != '/':
filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename
with open(filename, "rb") as f:
results = json.loads(f.read())
for result in results:
_, avg_size = __get_data_size(result["filename"])
for behavior in result["benchmarks"]:
__results.append({
"language": language,
"dataFilename": __extract_file_name(result["filename"]),
"behavior": behavior,
"throughput": result["benchmarks"][behavior]
})
def __parse_js_result(filename, language):
return __parse_custom_result(filename, language)
def __parse_php_result(filename, language):
return __parse_custom_result(filename, language)
def get_result_from_file(cpp_file="",
java_file="",
python_file="",
go_file="",
synthetic_file="",
node_file="",
php_c_file="",
php_file=""):
results = {}
if cpp_file != "":
__parse_cpp_result(cpp_file)
if java_file != "":
__parse_java_result(java_file)
if python_file != "":
__parse_python_result(python_file)
if go_file != "":
__parse_go_result(go_file)
if synthetic_file != "":
__parse_synthetic_result(synthetic_file)
if node_file != "":
__parse_js_result(node_file, "node")
if php_file != "":
__parse_php_result(php_file, "php")
if php_c_file != "":
__parse_php_result(php_c_file, "php")
return __results
| scheib/chromium | third_party/protobuf/benchmarks/util/result_parser.py | Python | bsd-3-clause | 8,710 | 0.00907 |
"""Active Directory authentication backend."""
import itertools
import logging
import dns
from django.conf import settings
from django.contrib.auth.models import User
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
try:
import ldap
from ldap.dn import dn2str, str2dn
from ldap.filter import filter_format
except ImportError:
ldap = None
from reviewboard.accounts.backends.base import BaseAuthBackend
from reviewboard.accounts.forms.auth import ActiveDirectorySettingsForm
logger = logging.getLogger(__name__)
class ActiveDirectoryBackend(BaseAuthBackend):
"""Authenticate a user against an Active Directory server.
This is controlled by the following Django settings:
.. setting:: AD_DOMAIN_CONTROLLER
``AD_DOMAIN_CONTROLLER``:
The domain controller (or controllers) to connect to. This must be
a string, but multiple controllers can be specified by separating
each with a space.
This is ``auth_ad_domain_controller`` in the site configuration.
.. setting:: AD_DOMAIN_NAME
``AD_DOMAIN_NAME``:
The Active Directory domain name. This must be a string.
This is ``auth_ad_domain_name`` in the site configuration.
.. setting:: AD_FIND_DC_FROM_DNS
``AD_FIND_DC_FROM_DNS``:
Whether domain controllers should be found by using DNS. This must be
a boolean.
This is ``auth_ad_find_dc_from_dns`` in the site configuration.
.. setting:: AD_GROUP_NAME
``AD_GROUP_NAME``:
The optional name of the group to restrict available users to. This
must be a string.
This is ``auth_ad_group_name`` in the site configuration.
.. setting:: AD_OU_NAME
``AD_OU_NAME``:
The optional name of the Organizational Unit to restrict available users
to. This must be a string.
This is ``auth_ad_ou_name`` in the site configuration.
.. setting:: AD_RECURSION_DEPTH
``AD_RECURSION_DEPTH``:
Maximum depth to recurse when checking group membership. A value of
-1 means infinite depth is supported. A value of 0 turns off recursive
checks.
This is ``auth_ad_recursion_depth`` in the site configuration.
.. setting:: AD_SEARCH_ROOT
``AD_SEARCH_ROOT``:
A custom search root for entries in Active Directory. This must be a
string.
This is ``auth_ad_search_root`` in the site configuration.
.. setting:: AD_USE_TLS
``AD_USE_TLS``:
Whether to use TLS when communicating over LDAP. This must be a
boolean.
This is ``auth_ad_use_tls`` in the site configuration.
"""
backend_id = 'ad'
name = _('Active Directory')
settings_form = ActiveDirectorySettingsForm
login_instructions = \
_('Use your standard Active Directory username and password.')
def get_domain_name(self):
"""Return the current Active Directory domain name.
This returns the domain name as set in :setting:`AD_DOMAIN_NAME`.
Returns:
unicode:
The Active Directory domain name.
"""
return settings.AD_DOMAIN_NAME
def get_ldap_search_root(self, user_domain=None):
"""Return the search root(s) for users in the LDAP server.
If :setting:`AD_SEARCH_ROOT` is set, then it will be used. Otherwise,
a suitable search root will be computed based on the domain name
(either the provided ``user_domain`` or the result of
:py:meth:`get_domain_name`) and any configured Organizational Unit
name (:setting:`AD_OU_NAME`).
Args:
user_domain (unicode, optional):
An explicit Active Directory domain to use for the search root.
Returns:
unicode:
The search root used to locate users.
"""
if getattr(settings, 'AD_SEARCH_ROOT', None):
return settings.AD_SEARCH_ROOT
dn = []
if settings.AD_OU_NAME:
dn.append([('ou', settings.AD_OU_NAME, None)])
if user_domain is None:
user_domain = self.get_domain_name()
if user_domain:
dn += [
[('dc', dc, None)]
for dc in user_domain.split('.')
]
return dn2str(dn)
def search_ad(self, con, filterstr, user_domain=None):
"""Search the given LDAP server based on the provided filter.
Args:
con (ldap.LDAPObject):
The LDAP connection to search.
filterstr (unicode):
The filter string used to locate objects in Active Directory.
user_domain (unicode, optional):
An explicit domain used for the search. If not provided,
:py:meth:`get_domain_name` will be used.
Returns:
list of tuple:
The list of search results. Each tuple in the list is in the form
of ``(dn, attrs)``, where ``dn`` is the Distinguished Name of the
entry and ``attrs`` is a dictionary of attributes for that entry.
"""
search_root = self.get_ldap_search_root(user_domain)
logger.debug('Search root "%s" for filter "%s"',
search_root, filterstr)
return con.search_s(search_root,
scope=ldap.SCOPE_SUBTREE,
filterstr=filterstr)
def find_domain_controllers_from_dns(self, user_domain=None):
"""Find and return the active domain controllers using DNS.
Args:
user_domain (unicode, optional):
An explicit domain used for the search. If not provided,
:py:meth:`get_domain_name` will be used.
Returns:
list of unicode:
The list of domain controllers.
"""
record_name = '_ldap._tcp.%s' % (user_domain or self.get_domain_name())
try:
answer = dns.resolver.query(record_name, 'SRV')
return [
(rdata.port, rdata.target.to_unicode(omit_final_dot=True))
for rdata in sorted(answer,
key=lambda rdata: (rdata.priority,
-rdata.weight))
]
except dns.resolver.NXDOMAIN:
# The domain could not be found. Skip it.
pass
except Exception as e:
logger.error('Unable to query for Active Directory domain '
'controllers using DNS record "%s": %s',
record_name,
e)
return []
def can_recurse(self, depth):
"""Return whether the given recursion depth is too deep.
Args:
depth (int):
The current depth to check.
Returns:
bool:
``True`` if the provided depth can be recursed into. ``False``
if it's too deep.
"""
return (settings.AD_RECURSION_DEPTH == -1 or
depth <= settings.AD_RECURSION_DEPTH)
def get_member_of(self, con, search_results, seen=None, depth=0):
"""Return the LDAP groups for the given users.
This iterates over the users specified in ``search_results`` and
returns a set of groups of which those users are members.
Args:
con (ldap.LDAPObject):
The LDAP connection used for checking groups memberships.
search_results (list of tuple):
The list of search results to check. This expects a result
from :py:meth:`search_ad`.
seen (set, optional):
The set of groups that have already been seen when recursing.
This is used internally by this method and should not be
provided by the caller.
depth (int, optional):
The current recursion depth. This is used internally by this
method and should not be provided by the caller.
Returns:
set:
The group memberships found for the given users.
"""
depth += 1
if seen is None:
seen = set()
can_recurse = self.can_recurse(depth)
for name, data in search_results:
if name is None:
continue
new_groups = []
for group_dn in data.get('memberOf', []):
parts = itertools.chain.from_iterable(str2dn(group_dn))
for attr, value, flags in parts:
if attr.lower() == 'cn':
new_groups.append(value)
break
old_seen = seen.copy()
seen.update(new_groups)
# Collect groups recursively.
if not can_recurse:
logger.warning('Recursive group check reached maximum '
'recursion depth (%s)',
depth)
continue
for group in new_groups:
if group in old_seen:
continue
# Search for groups with the specified CN. Use the CN rather
# than the sAMAccountName so that behavior is correct when
# the values differ (e.g. if a "pre-Windows 2000" group name
# is set in AD).
group_data = self.search_ad(
con,
filter_format('(&(objectClass=group)(cn=%s))', [group]))
seen.update(self.get_member_of(con, group_data,
seen=seen, depth=depth))
return seen
def get_ldap_connections(self, user_domain, request=None):
"""Return all LDAP connections used for Active Directory.
This returns an iterable of connections to the LDAP servers specified
in :setting:`AD_DOMAIN_CONTROLLER`.
Args:
user_domain (unicode, optional):
The domain for the user.
request (django.http.HttpRequest, optional):
The HTTP request from the client. This is used only for logging
purposes.
Yields:
tuple of (unicode, ldap.LDAPObject):
The connections to the configured LDAP servers.
"""
use_tls = settings.AD_USE_TLS
if settings.AD_FIND_DC_FROM_DNS:
dcs = self.find_domain_controllers_from_dns(user_domain)
else:
dcs = []
for dc_entry in settings.AD_DOMAIN_CONTROLLER.split():
if ':' in dc_entry:
try:
host, port = dc_entry.split(':')
except ValueError:
logger.warning('Invalid LDAP domain controller "%s". '
'Skipping.',
dc_entry)
else:
host = dc_entry
if use_tls:
port = '636'
else:
port = '389'
dcs.append((port, host))
for dc in dcs:
port, host = dc
if use_tls or port == '636':
ldap_scheme = 'ldaps'
else:
ldap_scheme = 'ldap'
ldap_uri = '%s://%s:%s' % (ldap_scheme, host, port)
connection = ldap.initialize(ldap_uri,
bytes_mode=False)
connection.set_option(ldap.OPT_PROTOCOL_VERSION, 3)
connection.set_option(ldap.OPT_REFERRALS, 0)
yield ldap_uri, connection
def authenticate(self, request, username, password, **kwargs):
"""Authenticate a user against Active Directory.
This will attempt to authenticate the user against Active Directory.
If the username and password are valid, a user will be returned, and
added to the database if it doesn't already exist.
Version Changed:
4.0:
The ``request`` argument is now mandatory as the first positional
argument, as per requirements in Django.
Args:
request (django.http.HttpRequest):
The HTTP request from the caller. This may be ``None``.
username (unicode):
The username to authenticate.
password (unicode):
The user's password.
**kwargs (dict, unused):
Additional keyword arguments passed by the caller.
Returns:
django.contrib.auth.models.User:
The authenticated user, or ``None`` if the user could not be
authenticated for any reason.
"""
username = username.strip()
if ldap is None:
logger.error('Attempted to authenticate user "%s" in LDAP, but '
'the python-ldap package is not installed!',
username,
request=request)
return None
user_subdomain = ''
if '@' in username:
username, user_subdomain = username.split('@', 1)
elif '\\' in username:
user_subdomain, username = username.split('\\', 1)
user_domain = self.get_domain_name()
if user_subdomain:
user_domain = '%s.%s' % (user_subdomain, user_domain)
required_group = settings.AD_GROUP_NAME
for uri, connection in self.get_ldap_connections(user_domain,
request=request):
try:
bind_username = '%s@%s' % (username, user_domain)
connection.simple_bind_s(bind_username, password)
user_data = self.search_ad(
connection,
filter_format('(&(objectClass=user)(sAMAccountName=%s))',
[username]),
user_domain)
if not user_data:
return None
if required_group:
try:
group_names = self.get_member_of(connection, user_data)
except Exception as e:
logger.error('Unable to retrieve groups for user '
'"%s" from controller "%s": %s',
username, uri, e,
request=request,
exc_info=1)
return None
if required_group not in group_names:
logger.warning('User %s is not in required group "%s" '
'on controller "%s"',
username, required_group, uri,
request=request)
return None
return self.get_or_create_user(username=username,
request=request,
ad_user_data=user_data)
except ldap.SERVER_DOWN as e:
logger.warning('Unable to authenticate with the domain '
'controller "%s". It is down. Error details: '
'%r',
uri, e,
request=request)
continue
except ldap.INVALID_CREDENTIALS:
logger.warning('Unable to authenticate user "%s" on '
'domain controller "%s". The user credentials '
'are invalid.',
username, uri,
request=request)
return None
except ldap.LDAPError as e:
logger.warning('Error talking to domain controller "%s". '
'Error details: %s, %r',
uri, type(e), e,
request=request)
continue
except Exception as e:
logger.exception('Unexpected error occurred while '
'authenticating with Active Directory: %s',
e,
request=request)
continue
logger.error('Could not contact any domain controller servers when '
'authenticating for user "%s".',
username,
request=request)
return None
def get_or_create_user(self, username, request=None, ad_user_data=None):
"""Return an existing user or create one if it doesn't exist.
This does not authenticate the user.
If the user does not exist in the database, but does in Active
Directory, its information will be stored in the database for later
lookup. However, this will only happen if ``ad_user_data`` is provided.
Args:
username (unicode):
The name of the user to look up or create.
request (django.http.HttpRequest, unused):
The HTTP request from the client. This is unused.
ad_user_data (list of tuple, optional):
Data about the user to create. This is generally provided by
:py:meth:`authenticate`.
Returns:
django.contrib.auth.models.User:
The resulting user, or ``None`` if one could not be found.
"""
username = self.INVALID_USERNAME_CHAR_REGEX.sub('', username).lower()
try:
return User.objects.get(username=username)
except User.DoesNotExist:
if ad_user_data is None:
return None
try:
user_info = ad_user_data[0][1]
first_name = force_text(
user_info.get('givenName', [username])[0])
last_name = force_text(user_info.get('sn', [''])[0])
email = force_text(user_info.get(
'mail',
['%s@%s' % (username, settings.AD_DOMAIN_NAME)])[0])
user = User(username=username,
password='',
first_name=first_name,
last_name=last_name,
email=email)
user.is_staff = False
user.is_superuser = False
user.set_unusable_password()
user.save()
return user
except Exception:
return None
| reviewboard/reviewboard | reviewboard/accounts/backends/ad.py | Python | mit | 18,842 | 0 |
import os
import sys
setupfile = file("setup.py",'r')
input = sys.argv[1]
lines = setupfile.readlines()
output = file("setup.py", 'w')
for line in lines:
if line.startswith(" library_dirs"):
newline = " library_dirs=['"+str(input)+"'],\n"
output.write(newline)
else:
output.write(line)
setupfile.close()
output.close()
| kshedstrom/pyroms | bathy_smoother/external/lp_solve_5.5/extra/Python/pyhelp.py | Python | bsd-3-clause | 381 | 0.023622 |
# Outspline - A highly modular and extensible outliner.
# Copyright (C) 2011-2014 Dario Giovannetti <dev@dariogiovannetti.net>
#
# This file is part of Outspline.
#
# Outspline is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Outspline is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Outspline. If not, see <http://www.gnu.org/licenses/>.
from collections import OrderedDict as OD
data = (
OD((
("enabled", "on"),
)),
OD((
("GlobalShortcuts", (
OD((
("minimize", "Ctrl+Shift+q"),
)),
OD()
)),
))
)
| xguse/outspline | src/outspline/conf/plugins/wxtrayicon.py | Python | gpl-3.0 | 1,043 | 0 |
import pytest
from addons.osfstorage.models import OsfStorageFile
from api.base.settings.defaults import API_BASE
from api_tests import utils as test_utils
from api_tests.preprints.filters.test_filters import PreprintsListFilteringMixin
from api_tests.preprints.views.test_preprint_list_mixin import PreprintIsPublishedListMixin, PreprintIsValidListMixin
from framework.auth.core import Auth
from osf.models import PreprintService
from osf_tests.factories import (
PreprintFactory,
AuthUserFactory,
ProjectFactory,
SubjectFactory,
PreprintProviderFactory,
)
from website.util import permissions
class TestNodePreprintsListFiltering(PreprintsListFilteringMixin):
@pytest.fixture()
def user(self):
return AuthUserFactory()
@pytest.fixture()
def provider_one(self):
return PreprintProviderFactory(name='Sockarxiv')
@pytest.fixture()
def provider_two(self):
return PreprintProviderFactory(name='Piratearxiv')
@pytest.fixture()
def provider_three(self):
return PreprintProviderFactory(name='Mockarxiv')
@pytest.fixture()
def project_one(self, user):
return ProjectFactory(creator=user)
@pytest.fixture()
def project_two(self, project_one):
return project_one
@pytest.fixture()
def project_three(self, project_one):
return project_one
@pytest.fixture()
def url(self, project_one):
return '/{}nodes/{}/preprints/?version=2.2&'.format(API_BASE, project_one._id)
def test_provider_filter_equals_returns_one(self, app, user, provider_two, preprint_two, provider_url):
expected = [preprint_two._id]
res = app.get('{}{}'.format(provider_url, provider_two._id), auth=user.auth)
actual = [preprint['id'] for preprint in res.json['data']]
assert expected == actual
class TestNodePreprintIsPublishedList(PreprintIsPublishedListMixin):
@pytest.fixture()
def user_admin_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def provider_one(self):
return PreprintProviderFactory()
@pytest.fixture()
def provider_two(self):
return PreprintProviderFactory()
@pytest.fixture()
def project_published(self, user_admin_contrib):
return ProjectFactory(creator=user_admin_contrib, is_public=True)
@pytest.fixture()
def project_public(self, user_write_contrib, project_published):
project_published.add_contributor(user_write_contrib, permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS, save=True)
return project_published
@pytest.fixture()
def url(self, project_published):
return '/{}nodes/{}/preprints/?version=2.2&'.format(API_BASE, project_published._id)
@pytest.fixture()
def preprint_unpublished(self, user_admin_contrib, provider_one, project_published, subject):
return PreprintFactory(creator=user_admin_contrib, filename='mgla.pdf', provider=provider_one, subjects=[[subject._id]], project=project_published, is_published=False)
def test_unpublished_visible_to_admins(self, app, user_admin_contrib, preprint_unpublished, preprint_published, url):
res = app.get(url, auth=user_admin_contrib.auth)
assert len(res.json['data']) == 2
assert preprint_unpublished._id in [d['id'] for d in res.json['data']]
assert preprint_published._id in [d['id'] for d in res.json['data']]
def test_unpublished_invisible_to_write_contribs(self, app, user_write_contrib, preprint_unpublished, preprint_published, url):
res = app.get(url, auth=user_write_contrib.auth)
assert len(res.json['data']) == 1
assert preprint_unpublished._id not in [d['id'] for d in res.json['data']]
def test_filter_published_false_write_contrib(self, app, user_write_contrib, preprint_unpublished, url):
res = app.get('{}filter[is_published]=false'.format(url), auth=user_write_contrib.auth)
assert len(res.json['data']) == 0
class TestNodePreprintIsValidList(PreprintIsValidListMixin):
@pytest.fixture()
def user_admin_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def project(self, user_admin_contrib, user_write_contrib):
project = ProjectFactory(creator=user_admin_contrib, is_public=True)
project.add_contributor(user_write_contrib, permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS, save=True)
return project
@pytest.fixture()
def provider(self):
return PreprintProviderFactory()
@pytest.fixture()
def url(self, project):
return '/{}nodes/{}/preprints/?version=2.2&'.format(API_BASE, project._id)
# test override: custom exception checks because of node permission failures
def test_preprint_private_invisible_no_auth(self, app, project, preprint, url):
res = app.get(url)
assert len(res.json['data']) == 1
project.is_public = False
project.save()
res = app.get(url, expect_errors=True)
assert res.status_code == 401
# test override: custom exception checks because of node permission failures
def test_preprint_private_invisible_non_contributor(self, app, user_non_contrib, project, preprint, url):
res = app.get(url, auth=user_non_contrib.auth)
assert len(res.json['data']) == 1
project.is_public = False
project.save()
res = app.get(url, auth=user_non_contrib.auth, expect_errors=True)
assert res.status_code == 403
# test override: custom exception checks because of node permission failures
def test_preprint_node_deleted_invisible(self, app, user_admin_contrib, user_write_contrib, user_non_contrib, project, preprint, url):
project.is_deleted = True
project.save()
# no auth
res = app.get(url, expect_errors=True)
assert res.status_code == 410
# contrib
res = app.get(url, auth=user_non_contrib.auth, expect_errors=True)
assert res.status_code == 410
# write_contrib
res = app.get(url, auth=user_write_contrib.auth, expect_errors=True)
assert res.status_code == 410
# admin
res = app.get(url, auth=user_admin_contrib.auth, expect_errors=True)
assert res.status_code == 410
| TomBaxter/osf.io | api_tests/nodes/views/test_node_preprints.py | Python | apache-2.0 | 6,285 | 0.003819 |
'''
Router Sim v1.0
Program description: This program simulates a network consisting of routers.
It will be displayed in a command line interface and feature controls
to build and maintain the network by modifying routers, save/load the
network to external files, display information about the routers in
the network and, use pathfinding algorithms to find routes within the
network.
Made by WGDEV, some rights reserved, see license.txt for more info
'''
Main = list() #the database of all routers, keeps track of links to other routers, looks like [[router #,[[other router #, bandwidth],...]]...]
'''
Description: Implements dijkstra's algorithim to find the best path to a target router from an inital router
Parameters:
Router: The initial router's number
Target: The target router's number
Returns:
A string representation of the path to the target from the initial router or an error message
'''
def findPath(Router, Target):
global Main
q = [[0,Router,str(Router)]] #q is short for the queue, looks like [[delay,router #,path so far]...]
mapy = [[0,Router]] #Keeps track of the best path to a router so far, sorted based on router #,uses the same format as q except no path so far
if not binSearch(0,Main,Router)[1]:
return("Initial router not found!")
q.insert(0,[False,False])
while (q[0][1] != Target):
q.pop(0)
if len(q) <= 0:
return("No path found!")
ln = binSearch(1,mapy,q[0][1])
if ln[1] and mapy[ln[0]][0] < q[0][0]:
continue
ln = binSearch(0,Main, q[0][1])
for i in Main[ln[0]][1]:
ln2 = binSearch(1,mapy,i[0])
nc = q[0][0] + i[1]
if not ln2[1]:
mapy.insert(ln2[0],[nc,i[0]])
else:
if mapy[ln2[0]][0] > nc:
mapy[ln2[0]][0] = nc
else:
continue
ln3 = binSearch(0,q,nc)
if ln3[0] <= 0:
ln3[0] = 1
q.insert(ln3[0],[nc,i[0],q[0][2]+"->" +str(i[0])])
return("Delay is " + str(q[0][0]) + " with a path of " + q[0][2])
'''
Description: Implements a binary search algorithim to find the index of a list with an item at a specified index in a jagged list
Parameters:
Index: The index of the value in each list in the jagged list to compare
List: The jagged list
Value: The value to find
Returns:
A list with two items, the first is the index of the list with the value, the second is if the list is actually in the list
'''
def binSearch(Index,List,Value):
if len(List) == 0:
return [0,False]
if len(List) == 1:
if List[0][Index] == Value:
return [0,True]
elif List[0][Index] > Value:
return [0,False]
else:
return [1,False]
mini = 0
maxi = len(List)-1
while(maxi-mini>1):
mid = (int)((mini+maxi)/2)
if(List[mid][Index]== Value):
return [mid,True]
elif(List[mid][Index] > Value):
maxi = mid
else:
mini = mid
if (List[mini][Index] == Value):
return [mini,True]
elif (List[maxi][Index] == Value):
return [maxi,True]
elif (List[mini][Index] > Value):
return [mini,False]
elif (List[maxi][Index] > Value):
return [maxi,False]
else:
return [maxi+1,False]
'''
Description: Prints all the availabe commands
'''
def showOptions():
print("Command -> Effect:")
print("save [filename] -> saves the network to the file")
print("load [filename] -> loads the network from the file")
print("tracert [router 1] [router 2] -> finds the path between two routers")
print("link [router 1] [router 2] [delay] -> adds/updates a link between two routers")
print("remove [router 1] [router 2] -> removes the link between two routers")
print("delete [router] -> deletes the router")
print("neighbour [router] -> lists all routers directly linked to the specified router")
print("topology -> lists all routers in the network")
'''
Description: Prints all the routers in the network
'''
def showRouters():
print("Showing all routers in the network:")
for i in Main:
print("Router " + str(i[0]) + " has " + str(len(i[1])) + " link(s) to other routers")
'''
Description: Prints all the routers that are directly linked to a router
Parameters:
Router1: The specified router's number
'''
def showRoutes(Router1):
ln = binSearch(0,Main,Router1)
if (not ln[1]):
print ("Router does not exist!")
return
print("Showing neighbour(s) for router "+str(Router1)+":")
for i in Main[ln[0]][1]:
print("Other router is " + str(i[0]) + " with a delay of " + str(i[1]) + ".")
'''
Description: Takes two routers, creates them if they do not exist and, links them or changes the cost if the link already exists
Parameters:
Router1: The first router's number
Router2: The second router's number
Cost: The new cost of the link
Returns: A string represening the number of routers that were created by the function
'''
def addRoute(Router1,Router2,Cost):
if Cost < 1:
return "Delay must be at least 1!"
if Router1 == Router2:
return "Links cannot loop!"
global Main
outp = 0
ln = binSearch(0,Main,Router1)
if ln[1] == False:
Main.insert(ln[0],[Router1,[]])
outp +=1
ln2 = binSearch(0,Main[ln[0]][1],Router2)
if ln2[1] == False:
Main[ln[0]][1].insert(ln2[0],[Router2,Cost])
else:
Main[ln[0]][1][ln2[0]][1] = Cost
ln = binSearch(0,Main,Router2)
if ln[1] == False:
Main.insert(ln[0],[Router2,[]])
outp +=1
ln2 = binSearch(0,Main[ln[0]][1],Router1)
if ln2[1] == False:
Main[ln[0]][1].insert(ln2[0],[Router1,Cost])
else:
Main[ln[0]][1][ln2[0]][1] = Cost
return "Link sucessfully added, with " + str(outp) + " router(s) automatically installed."
'''
Description: Takes two routers, deletes the link between them and, deletes them if they do not have any links left afterwards
Parameters:
Router1: The first router's number
Router2: The second router's number
Returns: A string represening the number of routers that were deleted by the function
'''
def removeRoute(Router1,Router2):
global Main
outp = 0
ln1 = binSearch(0,Main,Router1)
if (not (ln1[1])):
return "One or more specified router(s) do not exist!"
ln = binSearch(0,Main[ln1[0]][1],Router2)
if (not ln[1]):
return "The link does not exist!"
Main[ln1[0]][1].pop(ln[0])
if len(Main[ln1[0]][1]) <= 0:
Main.pop(ln1[0])
outp += 1
ln1 = binSearch(0,Main,Router2)
if (not (ln1[1])):
return "One or more specified router(s) do not exist!"
ln = binSearch(0,Main[ln1[0]][1],Router1)
if (not ln[1]):
return "The link does not exist!"
Main[ln1[0]][1].pop(ln[0])
if len(Main[ln1[0]][1]) <= 0:
Main.pop(ln1[0])
outp += 1
return "Link sucessfully deleted, with " + str(outp) + " router(s) automatically removed."
'''
Description: Deletes a router and any links associated with the router, also automatically deletes any routers with no links
Parameters:
Router1: The specified router's number
Returns: A string represening the number of routers that were deleted by the function
'''
def deleteRoute(Router1):
global Main
outp = 0
ln = binSearch(0,Main,Router1)
if (not ln[1]):
return "Router does not exist!"
while (len(Main[ln[0]][1]) > 0):
ln1 = binSearch(0,Main,Main[ln[0]][1][0][0])
ln2 = binSearch(0,Main[ln1[0]][1],Router1)
Main[ln1[0]][1].pop(ln2[0])
if len(Main[ln1[0]][1]) <= 0:
Main.pop(ln1[0])
if (ln1[0] < ln[0]):
ln[0] -= 1
outp += 1
Main[ln[0]][1].pop(0)
Main.pop(ln[0])
return "Router sucessfully deleted, with " + str(outp) + " other router(s) automatically removed."
'''
Description: Saves the network to a text document
Parameters:
Name: The text document's name
Returns: A string represening if the network was successfully saved
'''
def save(Name):
try:
File = open(Name, "w")
File.write(str(len(Main)))
for i in Main:
File.write(" " + str(i[0]) + " " + str(len(i[1])))
for j in i[1]:
File.write(" " + str(j[0]) + " " + str(j[1]))
File.close()
return "Network saved."
except:
return "Error saving network!"
'''
Description: Loads the network from a text document
Parameters:
Name: The text document's name
Returns: A string represening if the network was successfully loaded
'''
def load(Name):
try:
File = open(Name, "r")
inp = File.read().split(" ")
File.close()
cur = 1
global Main
Main = list()
while (cur<len(inp)):
t1 = list()
t1.append(int(inp[cur]))
t1.append(list())
cur += 1
tSize = int(inp[cur])
cur += 1
for i in range(tSize):
t1[1].append([int(inp[cur]),int(inp[cur+1])])
cur += 2
Main.append(t1)
return "Network loaded."
except:
return "Error loading network!"
#The actual program starts here
print("Welcome to RouterSim v1.0 made by WGDEV, some rights reserved see license.txt for more info, enter \"?\" for a list of commands.")
while (True):
i = input().lower().split(' ')
if (i[0] == "?"):
showOptions()
elif (i[0] == "link"):
print(addRoute(int(i[1]),int(i[2]),int(i[3])))
elif (i[0] == "remove"):
print(removeRoute(int(i[1]),int(i[2])))
elif (i[0] == "delete"):
print(deleteRoute(int(i[1])))
elif (i[0] == "neighbour"):
showRoutes(int(i[1]))
elif (i[0] == "topology"):
showRouters()
elif (i[0] == "tracert"):
print(findPath(int(i[1]),int(i[2])))
elif(i[0] == "save"):
print(save(i[1]))
elif(i[0] == "load"):
print(load(i[1]))
else:
print("Command invalid, enter \"?\" for a list of commands!")
| WGDEVS/RouterSim | RouterSim.py | Python | gpl-2.0 | 10,354 | 0.016226 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-02-15 07:47
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('studygroups', '0083_auto_20180209_1210'),
]
operations = [
migrations.RenameModel(
old_name='Facilitator',
new_name='Profile',
),
]
| p2pu/learning-circles | studygroups/migrations/0084_auto_20180215_0747.py | Python | mit | 366 | 0 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-08 22:45
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('topics', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='articletopicrank',
options={'ordering': ('rank',)},
),
migrations.AlterModelOptions(
name='wordtopicrank',
options={'ordering': ('rank',)},
),
]
| GeorgiaTechDHLab/TOME | topics/migrations/0002_auto_20170308_2245.py | Python | bsd-3-clause | 537 | 0 |
import arcas
import pandas
def test_setup():
api = arcas.Springer()
assert api.standard == 'http://api.springer.com/metadata/pam?q='
def test_keys():
api = arcas.Springer()
assert api.keys() == ['url', 'key', 'unique_key', 'title', 'author', 'abstract',
'doi', 'date', 'journal', 'provenance', 'category', 'score',
'open_access']
def test_parameters_and_url_author():
api = arcas.Springer()
parameters = api.parameters_fix(author='Glynatsi')
assert parameters == ['name:Glynatsi']
url = api.create_url_search(parameters)
assert url == 'http://api.springer.com/metadata/pam?q=name:Glynatsi&api_key=Your key here'
def test_parameters_and_url_title():
api = arcas.Springer()
parameters = api.parameters_fix(title='Game')
assert parameters == ['title:Game']
url = api.create_url_search(parameters)
assert url == 'http://api.springer.com/metadata/pam?q=title:Game&api_key=Your key here'
def test_parameters_and_url_category():
api = arcas.Springer()
parameters = api.parameters_fix(category='game theory')
assert parameters == ['subject:game theory']
url = api.create_url_search(parameters)
assert url == 'http://api.springer.com/metadata/pam?q=subject:game theory&api_key=Your key here'
def test_parameters_and_url_journal():
api = arcas.Springer()
parameters = api.parameters_fix(journal='Springer')
assert parameters == ['pub:Springer']
url = api.create_url_search(parameters)
assert url == 'http://api.springer.com/metadata/pam?q=pub:Springer&api_key=Your key here'
def test_parameters_and_url_record():
api = arcas.Springer()
parameters = api.parameters_fix(records=1)
assert parameters == ['p=1']
url = api.create_url_search(parameters)
assert url == 'http://api.springer.com/metadata/pam?q=p=1&api_key=Your key here'
def test_parameters_and_url_start():
api = arcas.Springer()
parameters = api.parameters_fix(start=1)
assert parameters == ['s=1']
url = api.create_url_search(parameters)
assert url == 'http://api.springer.com/metadata/pam?q=s=1&api_key=Your key here'
def test_create_url_search():
api = arcas.Springer()
parameters = api.parameters_fix(title='Nash', journal='Spinger', records=2, start=5)
url = api.create_url_search(parameters)
assert url == 'http://api.springer.com/metadata/pam?q=title:Nash+AND+pub:Spinger&p=2&s=5&api_key=Your key here'
def test_to_dataframe():
dummy_article = {'identifier': 'doi:10.1000/', 'title': 'Title',
'creator': 'E Glynatsi, V Knight', 'publicationName':
'Awesome Journal', 'genre': 'ReviewPaper', 'openAccess': 'false',
'h1': 'Abstract', 'p': 'Abstract',
'doi': '10.1000/', 'publisher': 'Springer',
'publicationDate': '2021-01-01', 'url': 'http://dx.doi.org/10.1000/',
'openAccess': 'false',}
api = arcas.Springer()
article = api.to_dataframe(dummy_article)
assert isinstance(article, pandas.core.frame.DataFrame)
assert list(article.columns) == api.keys()
assert len(article['url']) == 2
assert article['url'].unique()[0] == 'http://dx.doi.org/10.1000/'
assert article['key'].unique()[0] == 'Glynatsi2021'
assert article['title'].unique()[0] == 'Title'
assert article['abstract'].unique()[0] == 'Abstract'
assert article['journal'].unique()[0] == 'Awesome Journal'
assert article['date'].unique()[0] == 2021
assert article['open_access'].unique()[0] == False
assert article['score'].unique()[0] == 'Not available' | ArcasProject/Arcas | tests/test_springer.py | Python | mit | 3,670 | 0.007084 |
import numpy as np
import pytest
from pandas.errors import NullFrequencyError
import pandas as pd
from pandas import (
DatetimeIndex,
Index,
NaT,
Series,
TimedeltaIndex,
date_range,
offsets,
)
import pandas._testing as tm
from pandas.tseries.offsets import BDay
class TestShift:
@pytest.mark.parametrize(
"ser",
[
Series([np.arange(5)]),
date_range("1/1/2011", periods=24, freq="H"),
Series(range(5), index=date_range("2017", periods=5)),
],
)
@pytest.mark.parametrize("shift_size", [0, 1, 2])
def test_shift_always_copy(self, ser, shift_size):
# GH22397
assert ser.shift(shift_size) is not ser
@pytest.mark.parametrize("move_by_freq", [pd.Timedelta("1D"), pd.Timedelta("1min")])
def test_datetime_shift_always_copy(self, move_by_freq):
# GH#22397
ser = Series(range(5), index=date_range("2017", periods=5))
assert ser.shift(freq=move_by_freq) is not ser
def test_shift(self, datetime_series):
shifted = datetime_series.shift(1)
unshifted = shifted.shift(-1)
tm.assert_index_equal(shifted.index, datetime_series.index)
tm.assert_index_equal(unshifted.index, datetime_series.index)
tm.assert_numpy_array_equal(
unshifted.dropna().values, datetime_series.values[:-1]
)
offset = BDay()
shifted = datetime_series.shift(1, freq=offset)
unshifted = shifted.shift(-1, freq=offset)
tm.assert_series_equal(unshifted, datetime_series)
unshifted = datetime_series.shift(0, freq=offset)
tm.assert_series_equal(unshifted, datetime_series)
shifted = datetime_series.shift(1, freq="B")
unshifted = shifted.shift(-1, freq="B")
tm.assert_series_equal(unshifted, datetime_series)
# corner case
unshifted = datetime_series.shift(0)
tm.assert_series_equal(unshifted, datetime_series)
# Shifting with PeriodIndex
ps = tm.makePeriodSeries()
shifted = ps.shift(1)
unshifted = shifted.shift(-1)
tm.assert_index_equal(shifted.index, ps.index)
tm.assert_index_equal(unshifted.index, ps.index)
tm.assert_numpy_array_equal(unshifted.dropna().values, ps.values[:-1])
shifted2 = ps.shift(1, "B")
shifted3 = ps.shift(1, BDay())
tm.assert_series_equal(shifted2, shifted3)
tm.assert_series_equal(ps, shifted2.shift(-1, "B"))
msg = "Given freq D does not match PeriodIndex freq B"
with pytest.raises(ValueError, match=msg):
ps.shift(freq="D")
# legacy support
shifted4 = ps.shift(1, freq="B")
tm.assert_series_equal(shifted2, shifted4)
shifted5 = ps.shift(1, freq=BDay())
tm.assert_series_equal(shifted5, shifted4)
# 32-bit taking
# GH#8129
index = date_range("2000-01-01", periods=5)
for dtype in ["int32", "int64"]:
s1 = Series(np.arange(5, dtype=dtype), index=index)
p = s1.iloc[1]
result = s1.shift(periods=p)
expected = Series([np.nan, 0, 1, 2, 3], index=index)
tm.assert_series_equal(result, expected)
# GH#8260
# with tz
s = Series(
date_range("2000-01-01 09:00:00", periods=5, tz="US/Eastern"), name="foo"
)
result = s - s.shift()
exp = Series(TimedeltaIndex(["NaT"] + ["1 days"] * 4), name="foo")
tm.assert_series_equal(result, exp)
# incompat tz
s2 = Series(date_range("2000-01-01 09:00:00", periods=5, tz="CET"), name="foo")
msg = "DatetimeArray subtraction must have the same timezones or no timezones"
with pytest.raises(TypeError, match=msg):
s - s2
def test_shift2(self):
ts = Series(
np.random.randn(5), index=date_range("1/1/2000", periods=5, freq="H")
)
result = ts.shift(1, freq="5T")
exp_index = ts.index.shift(1, freq="5T")
tm.assert_index_equal(result.index, exp_index)
# GH#1063, multiple of same base
result = ts.shift(1, freq="4H")
exp_index = ts.index + offsets.Hour(4)
tm.assert_index_equal(result.index, exp_index)
idx = DatetimeIndex(["2000-01-01", "2000-01-02", "2000-01-04"])
msg = "Cannot shift with no freq"
with pytest.raises(NullFrequencyError, match=msg):
idx.shift(1)
def test_shift_fill_value(self):
# GH#24128
ts = Series(
[1.0, 2.0, 3.0, 4.0, 5.0], index=date_range("1/1/2000", periods=5, freq="H")
)
exp = Series(
[0.0, 1.0, 2.0, 3.0, 4.0], index=date_range("1/1/2000", periods=5, freq="H")
)
# check that fill value works
result = ts.shift(1, fill_value=0.0)
tm.assert_series_equal(result, exp)
exp = Series(
[0.0, 0.0, 1.0, 2.0, 3.0], index=date_range("1/1/2000", periods=5, freq="H")
)
result = ts.shift(2, fill_value=0.0)
tm.assert_series_equal(result, exp)
ts = Series([1, 2, 3])
res = ts.shift(2, fill_value=0)
assert res.dtype == ts.dtype
def test_shift_categorical_fill_value(self):
ts = Series(["a", "b", "c", "d"], dtype="category")
res = ts.shift(1, fill_value="a")
expected = Series(
pd.Categorical(
["a", "a", "b", "c"], categories=["a", "b", "c", "d"], ordered=False
)
)
tm.assert_equal(res, expected)
# check for incorrect fill_value
msg = "'fill_value=f' is not present in this Categorical's categories"
with pytest.raises(TypeError, match=msg):
ts.shift(1, fill_value="f")
def test_shift_dst(self):
# GH#13926
dates = date_range("2016-11-06", freq="H", periods=10, tz="US/Eastern")
s = Series(dates)
res = s.shift(0)
tm.assert_series_equal(res, s)
assert res.dtype == "datetime64[ns, US/Eastern]"
res = s.shift(1)
exp_vals = [NaT] + dates.astype(object).values.tolist()[:9]
exp = Series(exp_vals)
tm.assert_series_equal(res, exp)
assert res.dtype == "datetime64[ns, US/Eastern]"
res = s.shift(-2)
exp_vals = dates.astype(object).values.tolist()[2:] + [NaT, NaT]
exp = Series(exp_vals)
tm.assert_series_equal(res, exp)
assert res.dtype == "datetime64[ns, US/Eastern]"
for ex in [10, -10, 20, -20]:
res = s.shift(ex)
exp = Series([NaT] * 10, dtype="datetime64[ns, US/Eastern]")
tm.assert_series_equal(res, exp)
assert res.dtype == "datetime64[ns, US/Eastern]"
@pytest.mark.filterwarnings("ignore:tshift is deprecated:FutureWarning")
def test_tshift(self, datetime_series):
# TODO: remove this test when tshift deprecation is enforced
# PeriodIndex
ps = tm.makePeriodSeries()
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
tm.assert_series_equal(unshifted, ps)
shifted2 = ps.tshift(freq="B")
tm.assert_series_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=BDay())
tm.assert_series_equal(shifted, shifted3)
msg = "Given freq M does not match PeriodIndex freq B"
with pytest.raises(ValueError, match=msg):
ps.tshift(freq="M")
# DatetimeIndex
shifted = datetime_series.tshift(1)
unshifted = shifted.tshift(-1)
tm.assert_series_equal(datetime_series, unshifted)
shifted2 = datetime_series.tshift(freq=datetime_series.index.freq)
tm.assert_series_equal(shifted, shifted2)
inferred_ts = Series(
datetime_series.values, Index(np.asarray(datetime_series.index)), name="ts"
)
shifted = inferred_ts.tshift(1)
expected = datetime_series.tshift(1)
expected.index = expected.index._with_freq(None)
tm.assert_series_equal(shifted, expected)
unshifted = shifted.tshift(-1)
tm.assert_series_equal(unshifted, inferred_ts)
no_freq = datetime_series[[0, 5, 7]]
msg = "Freq was not set in the index hence cannot be inferred"
with pytest.raises(ValueError, match=msg):
no_freq.tshift()
def test_tshift_deprecated(self, datetime_series):
# GH#11631
with tm.assert_produces_warning(FutureWarning):
datetime_series.tshift()
def test_period_index_series_shift_with_freq(self):
ps = tm.makePeriodSeries()
shifted = ps.shift(1, freq="infer")
unshifted = shifted.shift(-1, freq="infer")
tm.assert_series_equal(unshifted, ps)
shifted2 = ps.shift(freq="B")
tm.assert_series_equal(shifted, shifted2)
shifted3 = ps.shift(freq=BDay())
tm.assert_series_equal(shifted, shifted3)
def test_datetime_series_shift_with_freq(self, datetime_series):
shifted = datetime_series.shift(1, freq="infer")
unshifted = shifted.shift(-1, freq="infer")
tm.assert_series_equal(datetime_series, unshifted)
shifted2 = datetime_series.shift(freq=datetime_series.index.freq)
tm.assert_series_equal(shifted, shifted2)
inferred_ts = Series(
datetime_series.values, Index(np.asarray(datetime_series.index)), name="ts"
)
shifted = inferred_ts.shift(1, freq="infer")
expected = datetime_series.shift(1, freq="infer")
expected.index = expected.index._with_freq(None)
tm.assert_series_equal(shifted, expected)
unshifted = shifted.shift(-1, freq="infer")
tm.assert_series_equal(unshifted, inferred_ts)
def test_period_index_series_shift_with_freq_error(self):
ps = tm.makePeriodSeries()
msg = "Given freq M does not match PeriodIndex freq B"
with pytest.raises(ValueError, match=msg):
ps.shift(freq="M")
def test_datetime_series_shift_with_freq_error(self, datetime_series):
no_freq = datetime_series[[0, 5, 7]]
msg = "Freq was not set in the index hence cannot be inferred"
with pytest.raises(ValueError, match=msg):
no_freq.shift(freq="infer")
def test_shift_int(self, datetime_series):
ts = datetime_series.astype(int)
shifted = ts.shift(1)
expected = ts.astype(float).shift(1)
tm.assert_series_equal(shifted, expected)
def test_shift_object_non_scalar_fill(self):
# shift requires scalar fill_value except for object dtype
ser = Series(range(3))
with pytest.raises(ValueError, match="fill_value must be a scalar"):
ser.shift(1, fill_value=[])
df = ser.to_frame()
with pytest.raises(ValueError, match="fill_value must be a scalar"):
df.shift(1, fill_value=np.arange(3))
obj_ser = ser.astype(object)
result = obj_ser.shift(1, fill_value={})
assert result[0] == {}
obj_df = obj_ser.to_frame()
result = obj_df.shift(1, fill_value={})
assert result.iloc[0, 0] == {}
def test_shift_categorical(self):
# GH#9416
s = Series(["a", "b", "c", "d"], dtype="category")
tm.assert_series_equal(s.iloc[:-1], s.shift(1).shift(-1).dropna())
sp1 = s.shift(1)
tm.assert_index_equal(s.index, sp1.index)
assert np.all(sp1.values.codes[:1] == -1)
assert np.all(s.values.codes[:-1] == sp1.values.codes[1:])
sn2 = s.shift(-2)
tm.assert_index_equal(s.index, sn2.index)
assert np.all(sn2.values.codes[-2:] == -1)
assert np.all(s.values.codes[2:] == sn2.values.codes[:-2])
tm.assert_index_equal(s.values.categories, sp1.values.categories)
tm.assert_index_equal(s.values.categories, sn2.values.categories)
def test_shift_dt64values_int_fill_deprecated(self):
# GH#31971
ser = Series([pd.Timestamp("2020-01-01"), pd.Timestamp("2020-01-02")])
with tm.assert_produces_warning(FutureWarning):
result = ser.shift(1, fill_value=0)
expected = Series([pd.Timestamp(0), ser[0]])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("periods", [1, 2, 3, 4])
def test_shift_preserve_freqstr(self, periods):
# GH#21275
ser = Series(
range(periods),
index=pd.date_range("2016-1-1 00:00:00", periods=periods, freq="H"),
)
result = ser.shift(1, "2H")
expected = Series(
range(periods),
index=pd.date_range("2016-1-1 02:00:00", periods=periods, freq="H"),
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"input_data, output_data",
[(np.empty(shape=(0,)), []), (np.ones(shape=(2,)), [np.nan, 1.0])],
)
def test_shift_non_writable_array(self, input_data, output_data):
# GH21049 Verify whether non writable numpy array is shiftable
input_data.setflags(write=False)
result = Series(input_data).shift(1)
expected = Series(output_data, dtype="float64")
tm.assert_series_equal(result, expected)
| jreback/pandas | pandas/tests/series/methods/test_shift.py | Python | bsd-3-clause | 13,266 | 0.00098 |
"""Common base class for Redis-based distributed worker systems.
This software is released under an MIT/X11 open source license.
Copyright 2012-2014 Diffeo, Inc.
"""
from __future__ import absolute_import
import logging
import socket
import redis
from rejester.exceptions import ProgrammerError
logger = logging.getLogger(__name__)
class RedisBase(object):
"""Common base class for Redis-based distributed worker systems.
This class stores common metadata for systems based on the Redis
in-memory database (http://redis.io/).
The work being done is identified by two strings, the _application name_
and the _namespace_. These two strings are concatenated together and
prepended to most Redis keys by ``_namespace()``. To avoid leaking
database space, it is important to clean up the namespace, for instance
with ``delete_namespace()``, when the application is done.
"""
def __init__(self, config):
"""Initialize the registry using a configuration object.
``config`` should be a dictionary with the following keys:
``registry_addresses``
list of ``host:port`` for the Redis server(s)
``app_name``
application name (typically fixed, e.g. "rejester")
``namespace``
application invocation namespace name (should be unique per run)
"""
super(RedisBase, self).__init__()
self.config = config
if 'registry_addresses' not in config:
raise ProgrammerError('registry_addresses not set')
redis_address, redis_port = config['registry_addresses'][0].split(':')
redis_port = int(redis_port)
self._local_ip = self._ipaddress(redis_address, redis_port)
if 'app_name' not in config:
raise ProgrammerError('app_name must be specified to configure Registry')
self._namespace_str = config['app_name'] + '_' + config['namespace']
self.pool = redis.ConnectionPool(host=redis_address, port=redis_port, db=0)
def _ipaddress(self, host, port):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((host, port))
local_ip = s.getsockname()[0]
s.close()
return local_ip
def delete_namespace(self):
'''Remove all keys from the namespace
'''
conn = redis.Redis(connection_pool=self.pool)
keys = conn.keys("%s*" % self._namespace_str)
for i in xrange(0, len(keys), 10000):
conn.delete(*keys[i:i+10000])
logger.debug('tearing down %r', self._namespace_str)
def _namespace(self, name):
return "%s_%s" % (self._namespace_str, name)
| diffeo/rejester | rejester/_redis.py | Python | mit | 2,666 | 0.0015 |
import platform
from copy import *
from ctypes import *
class Param(Structure): #Forward declaration
pass
class Value(Structure):
pass
class StringValue(Structure):
pass
class BoolValue(Structure):
pass
class NumberValue(Structure):
pass
class ListValue(Structure):
pass
PARAM_P = POINTER(Param)
VALUE_P = POINTER(Value)
LIST_P = POINTER(ListValue)
Value._fields_ = [
("type", c_uint),
("val", c_void_p)
]
StringValue._fields_ = [
("value", c_char_p)
]
BoolValue._fields_ = [
("value", c_bool)
]
NumberValue._fields_ = [
("value", c_int)
]
ListValue._fields_ = [
("value", VALUE_P),
("next", LIST_P)
]
Param._fields_ = [
("key", c_char_p),
("value", VALUE_P),
("next", PARAM_P)
]
class zTemplate(object):
def __init__(self):
if platform.system() == "Windows":
self.lib = cdll.LoadLibrary("bin/zTemplate.dll")
else:
self.lib = cdll.LoadLibrary("bin/zTemplate.so")
self.lib.render.restype = c_char_p
self.lib.render.argtype = [c_char_p, PARAM_P]
self.lib.render_text.restype = c_char_p
self.lib.render.argtype = [c_char_p, PARAM_P]
def handle_type(self, value):
v = Value()
if type(value) == list:
v.type = 4
rev = value[:]
rev.reverse()
prev_item = None
for item in rev:
lv = ListValue()
self.Values.append(lv)
lv.value = VALUE_P(self.handle_type(item))
if prev_item != None:
lv.next = LIST_P(prev_item)
prev_item = lv
v.val = cast(byref(lv), c_void_p)
elif type(value) == dict:
pass
elif type(value) == str:
sv = StringValue()
sv.value = value.encode("UTF-8")
self.Values.append(sv)
v.type = 1
v.val = cast(byref(sv), c_void_p)
elif type(value) == bool:
bv = BoolValue()
bv.value = value
self.Values.append(bv)
v.type = 2
v.val = cast(byref(bv), c_void_p)
elif type(value) == int:
nv = NumberValue()
nv.value = value
self.Values.append(nv)
v.type = 3
v.val = cast(byref(nv), c_void_p)
else:
print("Unhandled type %s" % type(value))
return v
def render(self, file, params = {}):
root = self.construct_params(params)
return self.lib.render(file.encode("UTF-8"), byref(root))
def render_text(self, text, params = {}):
root = self.construct_params(params)
return self.lib.render_text(text.encode("UTF-8"), byref(root))
def construct_params(self, params):
root = Param()
cursor = root
self.Values = [] #Just to keep our value structs not destroyed
for key, value in params.items():
if type(value) == dict:
for name, member in value.items():
p = Param()
p.key = ("%s->%s" % (key, name)).encode("UTF-8")
v = self.handle_type(member)
p.value = VALUE_P(v)
cursor.next = PARAM_P(p)
cursor = p
else:
p = Param()
p.key = key.encode("UTF-8")
v = self.handle_type(value)
p.value = VALUE_P(v)
cursor.next = PARAM_P(p)
cursor = p
return root | zaibacu/zTemplate | lib/zTemplate.py | Python | mit | 2,885 | 0.038475 |
import unittest
import traveler
class MainTest(unittest.TestCase):
def test_base(self):
self.assertTrue(True)
def setUp(self):
self.poller = traveler.Poller()
def test_poller(self):
j = self.poller.load()
self.assertTrue(isinstance(j, str))
if __name__ == '__main__':
unittest.main()
| MrTrustworthy/traveler | tests/test_poller.py | Python | mit | 340 | 0.002941 |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2012-6 Met Office.
#
# This file is part of Rose, a framework for meteorological suites.
#
# Rose is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Rose is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Rose. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
import copy
import os
import rose.config
import rose.config_tree
import rose.env
import rose.macro
import rose.macros.rule
import rose.resource
class TriggerMacro(rose.macro.MacroBaseRoseEdit):
"""Class to load and check trigger dependencies."""
ERROR_BAD_EXPR = "Invalid trigger expression: {0}"
ERROR_BAD_STATE = "State should be {0}"
ERROR_CYCLIC = 'Cyclic dependency detected: {0} to {1}'
ERROR_DUPL_TRIG = "Badly defined trigger - {0} is 'duplicate'"
ERROR_MISSING_METADATA = 'No metadata entry found'
WARNING_STATE_CHANGED = '{0} -> {1}'
IGNORED_STATUS_PARENT = 'from state of parent: {0}'
IGNORED_STATUS_VALUE = ('from parent value: {0} '
'is not {2} ({1})')
IGNORED_STATUS_VALUES = ('from parent value: {0} with {1} '
'is not in the allowed values: {2}')
PARENT_VALUE = 'value {0}'
_evaluated_rule_checks = {}
MAX_STORED_RULE_CHECKS = 10000
def _setup_triggers(self, meta_config):
self.trigger_family_lookup = {}
self._id_is_duplicate = {} # Speedup dictionary.
self.enabled_dict = {}
self.evaluator = rose.macros.rule.RuleEvaluator()
self.rec_rule = rose.macros.rule.REC_EXPR_IS_THIS_RULE
for setting_id, sect_node in meta_config.value.items():
if sect_node.is_ignored():
continue
opt_node = sect_node.get([rose.META_PROP_TRIGGER], no_ignore=True)
if opt_node is not None:
expr = opt_node.value
id_value_dict = rose.variable.parse_trigger_expression(expr)
for trig_id, values in id_value_dict.items():
if values == []:
id_value_dict.update({trig_id: [None]})
self.trigger_family_lookup.update({setting_id: id_value_dict})
self._trigger_involved_ids = self.get_all_ids()
def transform(self, config, meta_config=None):
"""Apply metadata trigger expressions to variables."""
self.reports = []
meta_config = self._load_meta_config(config, meta_config)
self._setup_triggers(meta_config)
self.enabled_dict = {}
self.ignored_dict = {}
enabled = rose.config.ConfigNode.STATE_NORMAL
trig_ignored = rose.config.ConfigNode.STATE_SYST_IGNORED
user_ignored = rose.config.ConfigNode.STATE_USER_IGNORED
state_map = {enabled: 'enabled ',
trig_ignored: 'trig-ignored',
user_ignored: 'user-ignored'}
change_list = []
id_list = []
prev_ignoreds = {trig_ignored: [], user_ignored: []}
for keylist, node in config.walk():
if len(keylist) == 1:
n_id = keylist[0]
else:
n_id = self._get_id_from_section_option(*keylist)
id_list.append(n_id)
if node.state in prev_ignoreds:
prev_ignoreds[node.state].append(n_id)
for var_id in self.trigger_family_lookup:
self.update(var_id, config, meta_config)
for var_id in id_list:
section, option = self._get_section_option_from_id(var_id)
node = config.get([section, option])
old, new = None, None
if var_id in self.ignored_dict:
node.state = trig_ignored
if not any([var_id in v for k, v in prev_ignoreds.items()]):
old, new = state_map[enabled], state_map[trig_ignored]
elif var_id in prev_ignoreds[trig_ignored]:
node.state = enabled
old, new = state_map[trig_ignored], state_map[enabled]
elif (var_id in prev_ignoreds[user_ignored] and
var_id in self._trigger_involved_ids):
node.state = enabled
old, new = state_map[user_ignored], state_map[enabled]
if old != new:
info = self.WARNING_STATE_CHANGED.format(old, new)
if option is None:
value = None
else:
value = node.value
self.add_report(section, option, value, info)
return config, self.reports
def update(self, var_id, config_data, meta_config):
"""Update enabled and ignored ids starting with var_id.
var_id - a setting id to start the triggering update at.
config_data - a rose.config.ConfigNode or a dictionary that
looks like this:
{"sections":
{"namelist:foo": rose.section.Section instance,
"env": rose.section.Section instance},
"variables":
{"namelist:foo": [rose.variable.Variable instance,
rose.variable.Variable instance],
"env": [rose.variable.Variable instance]
}
}
meta_config - a rose.config.ConfigNode.
only_these_sections (default None) - a list of sections to
examine. If specified, checking for other sections will be
skipped.
"""
has_ignored_parent = True
config_sections = self._get_config_sections(config_data)
config_sections_duplicate_map = self._get_duplicate_config_sections(
config_data, config_sections=config_sections)
start_ids = [var_id]
alt_ids = self._get_id_duplicates(
var_id, config_data, meta_config,
config_sections_duplicate_map=config_sections_duplicate_map
)
if alt_ids:
start_ids = alt_ids
id_stack = []
for start_id in start_ids:
if (start_id in self.enabled_dict and
start_id not in self.ignored_dict):
has_ignored_parent = False
if not sum([start_id in v for v in
self.trigger_family_lookup.values()]):
has_ignored_parent = False
section, option = self._get_section_option_from_id(start_id)
is_node_present = self._get_config_has_id(config_data, start_id)
if section in self.ignored_dict and option is not None:
has_ignored_parent = True
has_ignored_parent = has_ignored_parent or not is_node_present
id_stack.append((start_id, has_ignored_parent))
update_id_list = []
while id_stack:
this_id, has_ignored_parent = id_stack[0]
alt_ids = self._get_id_duplicates(
this_id, config_data, meta_config,
config_sections_duplicate_map=config_sections_duplicate_map
)
if alt_ids:
this_id = alt_ids.pop(0)
for alt_id in alt_ids:
id_stack.insert(1, (alt_id, has_ignored_parent))
is_duplicate = self._check_is_id_dupl(this_id, meta_config)
# Triggered sections need their options to trigger sub children.
if this_id in config_sections:
options = []
for option in self._get_config_section_options(config_data,
this_id):
skip_id = self._get_id_from_section_option(
this_id, option)
if skip_id in self.trigger_family_lookup:
id_stack.insert(1, (skip_id, has_ignored_parent))
update_id_list.append(this_id)
if not self.check_is_id_trigger(this_id, meta_config):
id_stack.pop(0)
continue
if not has_ignored_parent:
section, option = self._get_section_option_from_id(this_id)
is_node_present = self._get_config_has_id(
config_data, this_id)
value = self._get_config_id_value(
config_data, this_id)
if option is None and is_node_present:
value = True
# Check the children of this id
id_val_map = self._get_family_dict(
this_id, config_data, meta_config)
for child_id, vals in id_val_map.items():
if has_ignored_parent or value is None:
help_text = self.IGNORED_STATUS_PARENT.format(this_id)
self.ignored_dict.setdefault(child_id, {})
self.ignored_dict[child_id].update({this_id: help_text})
if child_id in self.enabled_dict:
child_list = self.enabled_dict[child_id]
if this_id in child_list:
child_list.remove(this_id)
if not child_list:
self.enabled_dict.pop(child_id)
id_stack.insert(1, (child_id, True))
else: # Enabled parent
if vals == [None]:
# Enabled parent with a value, don't care what it is.
self.enabled_dict.setdefault(child_id, [])
if this_id not in self.enabled_dict[child_id]:
self.enabled_dict[child_id].append(this_id)
if this_id in self.ignored_dict.get(child_id, {}):
self.ignored_dict[child_id].pop(this_id)
if (child_id in self.ignored_dict and
self.ignored_dict[child_id] == {}):
self.ignored_dict.pop(child_id)
id_stack.insert(1, (child_id, False))
elif not self._check_values_ok(value, this_id, vals):
# Enabled parent, with the wrong values.
repr_value = self.PARENT_VALUE.format(value)
if len(vals) == 1:
help_text = self.IGNORED_STATUS_VALUE.format(
this_id, repr_value, repr(vals[0]))
else:
help_text = self.IGNORED_STATUS_VALUES.format(
this_id, repr_value, repr(vals))
self.ignored_dict.setdefault(child_id, {})
self.ignored_dict[child_id].update(
{this_id: help_text})
if child_id in self.enabled_dict:
child_list = self.enabled_dict[child_id]
if this_id in child_list:
child_list.remove(this_id)
if not child_list:
self.enabled_dict.pop(child_id)
id_stack.insert(1, (child_id, True))
else:
# Enabled parent, value is ok.
self.enabled_dict.setdefault(child_id, [])
if this_id not in self.enabled_dict[child_id]:
self.enabled_dict[child_id].append(this_id)
if this_id in self.ignored_dict.get(child_id, {}):
self.ignored_dict[child_id].pop(this_id)
if (child_id in self.ignored_dict and
self.ignored_dict[child_id] == {}):
self.ignored_dict.pop(child_id)
id_stack.insert(1, (child_id, False))
id_stack.pop(0)
return update_id_list
def validate(self, config, meta_config=None):
self.reports = []
if meta_config is None:
meta_config = rose.config.ConfigNode()
if not hasattr(self, 'trigger_family_lookup'):
self._setup_triggers(meta_config)
enabled = rose.config.ConfigNode.STATE_NORMAL
trig_ignored = rose.config.ConfigNode.STATE_SYST_IGNORED
user_ignored = rose.config.ConfigNode.STATE_USER_IGNORED
state_map = {enabled: 'enabled ',
trig_ignored: 'trig-ignored',
user_ignored: 'user-ignored'}
invalid_trigger_reports = self.validate_dependencies(config,
meta_config)
if invalid_trigger_reports:
return invalid_trigger_reports
macro_config = copy.deepcopy(config)
trig_config, reports = self.transform(macro_config, meta_config)
transform_reports = copy.deepcopy(reports)
del self.reports[:]
for report in transform_reports:
config_node = config.get([report.section, report.option])
trig_config_node = trig_config.get([report.section, report.option])
if report.option is None:
value = None
else:
value = trig_config_node.value
after_state_string = state_map[trig_config_node.state].strip()
info = self.ERROR_BAD_STATE.format(after_state_string)
self.add_report(report.section, report.option,
value, info)
return self.reports
def validate_dependencies(self, config, meta_config):
"""Validate the trigger setup - e.g. check for cyclic dependencies."""
self.reports = []
if meta_config is None:
meta_config = rose.config.ConfigNode()
if not hasattr(self, 'trigger_family_lookup'):
self._setup_triggers(meta_config)
config_sections = config.value.keys()
meta_settings = [k for k in meta_config.value.keys()
if not meta_config.value[k].is_ignored()]
allowed_repetitions = {}
trigger_ids = self.trigger_family_lookup.keys()
trigger_ids.sort()
for var_id in trigger_ids:
allowed_repetitions[var_id] = 0
for id_value_dict in self.trigger_family_lookup.values():
for var_id in id_value_dict:
allowed_repetitions.setdefault(var_id, 0)
allowed_repetitions[var_id] += 1
for start_id in trigger_ids:
id_value_dict = self._get_family_dict(start_id, config,
meta_config)
triggered_ids = id_value_dict.keys()
triggered_ids.sort()
if self._check_is_id_dupl(start_id, meta_config):
st_sect, st_opt = self._get_section_option_from_id(start_id)
for tr_id in triggered_ids:
tr_sect, tr_opt = self._get_section_option_from_id(tr_id)
if tr_sect != st_sect:
return self._get_error_report_for_id(
start_id, config,
self.ERROR_DUPL_TRIG.format(st_sect))
for value_list in id_value_dict.values():
for string in [s for s in value_list if s is not None]:
if self.rec_rule.search(string):
try:
self.evaluate_trig_rule(string, start_id, '')
except rose.macros.rule.RuleValueError:
continue
except Exception:
return self._get_error_report_for_id(
start_id, config,
self.ERROR_BAD_EXPR.format(string))
stack = [(start_id, triggered_ids)]
id_list = []
while stack:
var_id, child_ids = stack[0]
base_id = self._get_stripped_id(var_id, meta_config)
if base_id not in meta_settings:
return self._get_error_report_for_id(
var_id, config, self.ERROR_MISSING_METADATA)
id_list.append(var_id)
child_ids.sort()
if var_id in config_sections:
child_ids += config.get([var_id]).value.keys()
for child_id in child_ids:
base_id = self._get_stripped_id(child_id, meta_config)
if base_id not in meta_settings:
return self._get_error_report_for_id(
child_id, config, self.ERROR_MISSING_METADATA)
if child_id in self.trigger_family_lookup:
grandchildren = (
self.trigger_family_lookup[child_id].keys())
grandchildren.sort()
stack.insert(1, (child_id, grandchildren))
if (id_list.count(child_id) + 1 >
allowed_repetitions[child_id] and
id_list.count(child_id) >= 2):
# Then it may be looping cyclically.
duplicate_seq = self._get_dup_sequence(id_list,
child_id)
if duplicate_seq:
return self._get_error_report_for_id(
var_id, config,
self.ERROR_CYCLIC.format(child_id, var_id))
stack.pop(0)
return []
def _get_duplicate_config_sections(self, config_data,
config_sections=None):
if config_sections is None:
config_sections = self._get_config_sections(config_data)
config_sections_duplicate_map = {}
for section in config_sections:
if "(" in section:
base_section = section.split("(")[0]
config_sections_duplicate_map.setdefault(base_section, [])
config_sections_duplicate_map[base_section].append(section)
return config_sections_duplicate_map
def _get_family_dict(self, setting_id, config_data, meta_config):
if self._check_is_id_dupl(setting_id, meta_config):
sect, opt = self._get_section_option_from_id(setting_id)
base_sect = rose.macro.REC_ID_STRIP.sub("", sect)
trig_id = self._get_id_from_section_option(base_sect, opt)
items = self.trigger_family_lookup.get(trig_id, {}).items()
for i, (child_id, vals) in enumerate(items):
ch_sect, ch_opt = self._get_section_option_from_id(child_id)
if rose.macro.REC_ID_STRIP.sub("", ch_sect) == base_sect:
new_id = self._get_id_from_section_option(sect, ch_opt)
items[i] = (new_id, vals)
return dict(items)
items = self.trigger_family_lookup.get(setting_id, {}).items()
dupl_adjusted_items = []
while items:
child_id, vals = items.pop(0)
alt_ids = self._get_id_duplicates(
child_id, config_data, meta_config)
if alt_ids:
for alt_id in alt_ids:
dupl_adjusted_items.append((alt_id, vals))
else:
dupl_adjusted_items.append((child_id, vals))
return dict(dupl_adjusted_items)
def _get_id_duplicates(self, setting_id, config_data, meta_config,
config_sections_duplicate_map=None):
dupl_ids = []
if self._check_is_id_dupl(setting_id, meta_config):
sect, opt = self._get_section_option_from_id(setting_id)
if config_sections_duplicate_map is None:
config_sections_duplicate_map = (
self._get_duplicate_config_sections(config_data))
for section in config_sections_duplicate_map.get(sect, []):
new_id = self._get_id_from_section_option(section, opt)
dupl_ids.append(new_id)
return dupl_ids
def _check_is_id_dupl(self, setting_id, meta_config):
if setting_id not in self._id_is_duplicate:
sect, opt = self._get_section_option_from_id(setting_id)
# Note: when modifier metadata ticket goes in, change the regex.
sect = rose.macro.REC_ID_STRIP.sub("", sect)
node = meta_config.get([sect, rose.META_PROP_DUPLICATE])
self._id_is_duplicate[setting_id] = (
node is not None and node.value == rose.META_PROP_VALUE_TRUE)
return self._id_is_duplicate[setting_id]
def _get_stripped_id(self, setting_id, meta_config):
if self._check_is_id_dupl(setting_id, meta_config):
sect, opt = self._get_section_option_from_id(setting_id)
base_sect = rose.macro.REC_ID_STRIP.sub("", sect)
return self._get_id_from_section_option(base_sect, opt)
return setting_id
def check_is_id_trigger(self, setting_id, meta_config):
return (self._get_stripped_id(setting_id, meta_config) in
self.trigger_family_lookup)
def _get_error_report_for_id(self, variable_id, config, error_string):
section, option = self._get_section_option_from_id(variable_id)
node = config.get([section, option])
value = None if node is None else node.value
self.add_report(section, option, value, error_string)
return self.reports
def _get_dup_sequence(self, id_list, child_id):
"""Check that the last two sequences for child_id are not equal."""
id_copy_list = [i for i in id_list]
id_copy_list.reverse()
index_1 = id_copy_list.index(child_id)
if index_1 == 0:
return id_copy_list
index_2 = id_copy_list.index(child_id, index_1 + 1)
if (id_copy_list[:index_1] == id_copy_list[index_1 + 1: index_2]):
return [i for i in reversed(id_copy_list[:index_2])]
return []
def _check_values_ok(self, value, setting_id, allowed_values):
"""Check whether a value of setting_id matches any allowed values."""
for string in allowed_values:
if value is not None and self.rec_rule.search(string):
if self.evaluate_trig_rule(string, setting_id, value):
return True
else:
if string == value:
return True
return rose.env.contains_env_var(value)
def evaluate_trig_rule(self, rule, setting_id, value):
"""Launch an evaluation of a custom trigger expression."""
try:
return self._evaluated_rule_checks[(rule, value)]
except KeyError:
section, option = self._get_section_option_from_id(setting_id)
tiny_config = rose.config.ConfigNode()
tiny_config.set([section, option], value)
tiny_meta_config = rose.config.ConfigNode()
check_failed = self.evaluator.evaluate_rule(
rule, setting_id, tiny_config, tiny_meta_config)
if len(self._evaluated_rule_checks) > self.MAX_STORED_RULE_CHECKS:
self._evaluated_rule_checks.popitem()
self._evaluated_rule_checks[(rule, value)] = check_failed
return check_failed
def get_all_ids(self):
"""Return all setting ids involved in the triggers."""
ids = []
for trigger_id in self.trigger_family_lookup.keys():
ids.append(trigger_id)
for id_value_dict in self.trigger_family_lookup.values():
for triggered_id in id_value_dict:
if triggered_id not in ids:
ids.append(triggered_id)
return ids
| kaday/rose | lib/python/rose/macros/trigger.py | Python | gpl-3.0 | 24,332 | 0 |
import io
import os
import requests
import shutil
import sys
import zipfile
from waxe_image import __version__
API_RELEASES_URL = 'https://api.github.com/repos/waxe/waxe-image/releases'
NG_BUILD_FOLDER = 'website'
def main(argv=sys.argv):
if len(argv) > 2:
print('Too many arguments')
sys.exit(1)
global NG_BUILD_FOLDER
if len(argv) == 2:
NG_BUILD_FOLDER = argv[1]
if os.path.isdir(NG_BUILD_FOLDER):
shutil.rmtree(NG_BUILD_FOLDER)
if os.path.exists(NG_BUILD_FOLDER):
print('There is an issue with the folder %s' % NG_BUILD_FOLDER)
sys.exit(1)
r = requests.get(API_RELEASES_URL)
if r.status_code != 200:
raise ValueError('Bad status code %s' % r.status_code)
releases = r.json()
release = None
for rel in releases:
if rel['tag_name'] == __version__:
release = rel
break
if not release:
raise Exception('No release found for the current version %s' %
__version__)
ng_asset = None
for asset in release['assets']:
if 'waxe-image-ng.zip' in asset['browser_download_url']:
ng_asset = asset
break
assert(ng_asset)
url = ng_asset['browser_download_url']
r = requests.get(url, stream=True)
if r.status_code != 200:
raise ValueError('Bad status code %s' % r.status_code)
z = zipfile.ZipFile(io.StringIO(r.content))
z.extractall(NG_BUILD_FOLDER)
| waxe/waxe-image | waxe_image/scripts/get_ng_build.py | Python | mit | 1,483 | 0 |
import gitlab
from .. import mock, wrapped_config_get
gitlab.Gitlab("") # instantiation necessary to discover gitlab ProjectManager
class _GitlabProject:
def __init__(self, status):
self.commits = {"my_ref": self._Commit(status)}
self.tags = self._Tags()
self.releases = self._Releases()
class _Commit:
def __init__(self, status):
self.statuses = self._Statuses(status)
class _Statuses:
def __init__(self, status):
if status == "pending":
self.jobs = [
{
"name": "good_job",
"status": "passed",
"allow_failure": False,
},
{
"name": "slow_job",
"status": "pending",
"allow_failure": False,
},
]
elif status == "failure":
self.jobs = [
{
"name": "good_job",
"status": "passed",
"allow_failure": False,
},
{"name": "bad_job", "status": "failed", "allow_failure": False},
]
elif status == "allow_failure":
self.jobs = [
{
"name": "notsobad_job",
"status": "failed",
"allow_failure": True,
},
{
"name": "good_job2",
"status": "passed",
"allow_failure": False,
},
]
elif status == "success":
self.jobs = [
{
"name": "good_job1",
"status": "passed",
"allow_failure": True,
},
{
"name": "good_job2",
"status": "passed",
"allow_failure": False,
},
]
def list(self):
return self.jobs
class _Tags:
def __init__(self):
pass
def get(self, version):
if version == "vmy_good_tag":
return self._Tag()
elif version == "vmy_locked_tag":
return self._Tag(locked=True)
else:
raise gitlab.exceptions.GitlabGetError
class _Tag:
def __init__(self, locked=False):
self.locked = locked
def set_release_description(self, changelog):
if self.locked:
raise gitlab.exceptions.GitlabUpdateError
class _Releases:
def __init__(self):
pass
def create(self, input):
if input["name"] and input["tag_name"]:
if (
input["tag_name"] == "vmy_good_tag"
or input["tag_name"] == "vmy_locked_tag"
):
return self._Release()
raise gitlab.exceptions.GitlabCreateError
class _Release:
def __init__(self, locked=False):
pass
def mock_gitlab(status="success"):
mocks = [
mock.patch("os.environ", {"GL_TOKEN": "token"}),
mock.patch(
"semantic_release.hvcs.config.get", wrapped_config_get(hvcs="gitlab")
),
mock.patch("gitlab.Gitlab.auth"),
mock.patch(
"gitlab.v4.objects.ProjectManager",
return_value={"owner/repo": _GitlabProject(status)},
),
]
def wraps(func):
for option in reversed(mocks):
func = option(func)
return func
return wraps
| relekang/python-semantic-release | tests/mocks/mock_gitlab.py | Python | mit | 4,092 | 0.000489 |
# -*- coding: utf-8 -*-
def classeq(x, y):
return x.__class__==y.__class__
class Element(object): pass
| chaosim/dao | dao/base.py | Python | gpl-3.0 | 113 | 0.035398 |
N, M = map(int,input().split()) # More than 6 lines of code will result in 0 score. Blank lines are not counted.
for i in range(1, N, 2):
print(('.|.' * i).center(M, '-')) # Enter Code Here
print('WELCOME'.center(M, '-')) # Enter Code Here
for i in range(N-2, -1, -2):
print(('.|.' * i).center(M, '-')) # Enter Code Here
| avtomato/HackerRank | Python/_03_Strings/_09_Designer_Door_Mat/solution.py | Python | mit | 333 | 0.006006 |
from django import template
from django.template import TOKEN_VAR, TOKEN_BLOCK, TOKEN_COMMENT, TOKEN_TEXT, \
BLOCK_TAG_START, VARIABLE_TAG_START, VARIABLE_TAG_END, BLOCK_TAG_END
register = template.Library()
class RawNode(template.Node):
def __init__(self, data):
self.data = data
def render(self, context):
return self.data
@register.tag
def raw(parser, token):
"""
Render as just text everything between ``{% raw %}`` and ``{% endraw %}``.
"""
ENDRAW = 'endraw'
data = u''
while parser.tokens:
token = parser.next_token()
if token.token_type == TOKEN_BLOCK and token.contents == ENDRAW:
return RawNode(data)
if token.token_type == TOKEN_VAR:
data += '%s %s %s' % (VARIABLE_TAG_START, token.contents, VARIABLE_TAG_END)
elif token.token_type == TOKEN_BLOCK:
data += '%s %s %s' % (BLOCK_TAG_START, token.contents, BLOCK_TAG_END)
elif token.token_type == TOKEN_COMMENT:
pass # django.template don`t save comments
elif token.token_type == TOKEN_TEXT:
data += token.contents
parser.unclosed_block_tag([ENDRAW])
@register.simple_tag
def start_block():
return u'{%'
@register.simple_tag
def end_block():
return u'%}'
| redsolution/redsolution-cms | redsolutioncms/templatetags/redsolutioncms_tags.py | Python | gpl-3.0 | 1,289 | 0.006206 |
import datetime
import os
import json
import re
import psycopg2 as dbapi2
from flask import Flask
from flask import redirect
from flask import request
from flask import render_template
from flask.helpers import url_for
from store import Store
from fixture import *
from sponsors import *
from curlers import *
from clubs import *
from psycopg2.tests import dbapi20
class Clubs:
def __init__(self, name, place, year, chair, number_of_members, rewardnumber):
self.name = name
self.place = place
self.year = year
self.chair = chair
self.number_of_members = number_of_members
self.rewardnumber = rewardnumber
def init_clubs_db(cursor):
cursor.execute( """CREATE TABLE IF NOT EXISTS CLUBS (
ID SERIAL,
NAME VARCHAR(80) NOT NULL,
PLACES INTEGER NOT NULL REFERENCES COUNTRIES(COUNTRY_ID) ON DELETE CASCADE ON UPDATE CASCADE,
YEAR NUMERIC(4) NOT NULL,
CHAIR VARCHAR(80) NOT NULL,
NUMBER_OF_MEMBERS INTEGER NOT NULL,
REWARDNUMBER INTEGER,
PRIMARY KEY(ID)
)""")
add_test_data(cursor)
def add_test_data(cursor):
cursor.execute("""
INSERT INTO CLUBS
(NAME, PLACES, YEAR, CHAIR, NUMBER_OF_MEMBERS, REWARDNUMBER) VALUES (
'Orlando Curling Club',
1,
2014,
'Bryan Pittard',
'7865',
'0');
INSERT INTO CLUBS
(NAME, PLACES, YEAR, CHAIR, NUMBER_OF_MEMBERS, REWARDNUMBER) VALUES (
'Wausau Curling Club',
1,
1896,
'Jennie Moran',
'54403',
'11');
INSERT INTO CLUBS
(NAME, PLACES, YEAR, CHAIR, NUMBER_OF_MEMBERS, REWARDNUMBER) VALUES (
'Fenerbahçe',
3,
2011,
'Aziz Yıldırım',
'9002',
'1');
INSERT INTO CLUBS
(NAME, PLACES, YEAR, CHAIR, NUMBER_OF_MEMBERS, REWARDNUMBER) VALUES (
'Galatasaray',
3,
2000,
'Dursun Aydın Ozbek',
'17864',
'5'
)""")
def add_club(app, request, club):
connection = dbapi2.connect(app.config['dsn'])
try:
cursor = connection.cursor()
try:
cursor = connection.cursor()
cursor.execute("""INSERT INTO CLUBS
(NAME, PLACES, YEAR, CHAIR, NUMBER_OF_MEMBERS, REWARDNUMBER) VALUES (
%s,
%s,
%s,
%s,
%s,
%s
)""", (club.name, club.place, club.year,
club.chair, club.number_of_members, club.rewardnumber))
except:
cursor.rollback()
finally:
cursor.close()
except:
connection.rollback()
finally:
connection.commit()
connection.close()
def delete_club(app, id):
connection = dbapi2.connect(app.config['dsn'])
try:
cursor = connection.cursor()
try:
cursor.execute('DELETE FROM CLUBS WHERE ID = %s', (id,))
except:
cursor.rollback()
finally:
cursor.close()
except:
connection.rollback()
finally:
connection.commit()
connection.close()
def get_clubs_page(app):
if request.method == 'GET':
now = datetime.datetime.now()
clubs = get_all_clubs(app)
countries = get_country_names(app)
return render_template('clubs.html',
clubs=clubs, countries=countries,
current_time=now.ctime())
elif "add" in request.form:
club = Clubs(request.form['name'],
request.form['place'],
request.form['year'],
request.form['chair'],
request.form['number_of_members'],
request.form['rewardnumber'])
add_club(app, request, club)
return redirect(url_for('clubs_page'))
elif "delete" in request.form:
for line in request.form:
if "checkbox" in line:
delete_club(app, int(line[9:]))
return redirect(url_for('clubs_page'))
elif 'search' in request.form:
clubs = search_club(app, request.form['club_to_search'])
return render_template('clubs_search_page.html', clubs = clubs)
def get_clubs_edit_page(app,club_id):
if request.method == 'GET':
now = datetime.datetime.now()
club = get_club(app, club_id)
countries = get_country_names(app)
return render_template('clubs_edit_page.html', current_time=now.ctime(), club=club, countries=countries)
if request.method == 'POST':
club = Clubs(request.form['name'],
request.form['place'],
request.form['year'],
request.form['chair'],
request.form['number_of_members'],
request.form['rewardnumber'])
update_club(app, request.form['id'], club)
return redirect(url_for('clubs_page'))
def get_country_names(app):
connection=dbapi2.connect(app.config['dsn'])
try:
cursor = connection.cursor()
try:
cursor.execute('SELECT COUNTRY_ID,COUNTRY_NAME FROM COUNTRIES')
countries = cursor.fetchall()
except dbapi2.Error as e:
print(e.pgerror)
finally:
cursor.close()
except:
connection.rollback()
finally:
connection.close()
return countries
def get_club(app, club_id):
club=None
connection = dbapi2.connect(app.config['dsn'])
try:
cursor = connection.cursor()
try:
cursor.execute('''
SELECT C.ID, C.NAME, S.COUNTRY_NAME, C.YEAR, C.CHAIR, C.NUMBER_OF_MEMBERS, C.REWARDNUMBER
FROM CLUBS AS C,COUNTRIES AS S
WHERE (
C.ID=%s AND C.PLACES=S.COUNTRY_ID
)
''', club_id);
club = cursor.fetchone()
except dbapi2.Error as e:
print(e.pgerror)
cursor.rollback()
finally:
cursor.close()
except dbapi2.Error as e:
print(e.pgerror)
connection.rollback()
finally:
connection.close()
return club
def update_club(app, id, club):
connection = dbapi2.connect(app.config['dsn'])
try:
cursor = connection.cursor()
try:
cursor.execute("""
UPDATE CLUBS
SET NAME = %s,
PLACES = %s,
YEAR = %s,
CHAIR=%s,
NUMBER_OF_MEMBERS=%s,
REWARDNUMBER= %s
WHERE ID= %s
""", (club.name, club.place, club.year,
club.chair, club.number_of_members, club.rewardnumber, id))
except:
cursor.rollback()
finally:
cursor.close()
except:
connection.rollback()
finally:
connection.commit()
connection.close()
def get_all_clubs(app):
connection = dbapi2.connect(app.config['dsn'])
try:
cursor=connection.cursor()
try:
cursor.execute('''
SELECT C.ID, C.NAME, K.COUNTRY_NAME, C.YEAR, C.CHAIR, C.NUMBER_OF_MEMBERS, C.REWARDNUMBER
FROM CLUBS AS C, COUNTRIES AS K
WHERE C.PLACES=K.COUNTRY_ID
''')
print(1)
clubs = cursor.fetchall()
except:
cursor.rollback()
finally:
cursor.close()
except dbapi2.Error as e:
print(e.pgerror)
connection.rollback()
finally:
connection.close()
return clubs
def search_club(app, name):
connection = dbapi2.connect(app.config['dsn'])
try:
cursor = connection.cursor()
try:
cursor.execute("""
SELECT C.ID, C.NAME, S.COUNTRY_NAME, C.YEAR, C.CHAIR, C.NUMBER_OF_MEMBERS, C.REWARDNUMBER
FROM CLUBS AS C , COUNTRIES AS S
WHERE(
UPPER(C.NAME)=UPPER(%s) AND
C.PLACES=S.COUNTRY_ID
)""", (name,))
clubs = cursor.fetchall()
except dbapi2.Error as e:
print(e.pgerror)
finally:
cursor.close()
except bapi2.Error as e:
print(e.pgerror)
connection.rollback()
finally:
connection.close()
return clubs
| itucsdb1509/itucsdb1509 | clubs.py | Python | gpl-3.0 | 8,354 | 0.003953 |
# coding=utf-8
# Copyright 2022 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for trax.shapes."""
from absl.testing import absltest
import numpy as np
from trax import shapes
from trax.shapes import ShapeDtype
class ShapesTest(absltest.TestCase):
def test_constructor_and_read_properties(self):
sd = ShapeDtype((2, 3), np.int32)
self.assertEqual(sd.shape, (2, 3))
self.assertEqual(sd.dtype, np.int32)
def test_default_dtype_is_float32(self):
sd = ShapeDtype((2, 3))
self.assertEqual(sd.shape, (2, 3))
self.assertEqual(sd.dtype, np.float32)
def test_signature_on_ndarray(self):
array = np.array([[2, 3, 5, 7],
[11, 13, 17, 19]],
dtype=np.int16)
sd = shapes.signature(array)
self.assertEqual(sd.shape, (2, 4))
self.assertEqual(sd.dtype, np.int16)
def test_shape_dtype_repr(self):
sd = ShapeDtype((2, 3))
repr_string = '{}'.format(sd)
self.assertEqual(repr_string,
"ShapeDtype{shape:(2, 3), dtype:<class 'numpy.float32'>}")
def test_splice_signatures(self):
sd1 = ShapeDtype((1,))
sd2 = ShapeDtype((2,))
sd3 = ShapeDtype((3,))
sd4 = ShapeDtype((4,))
sd5 = ShapeDtype((5,))
# Signatures can be ShapeDtype instances, tuples of 2+ ShapeDtype instances,
# or empty tuples.
sig1 = sd1
sig2 = (sd2, sd3, sd4)
sig3 = ()
sig4 = sd5
spliced = shapes.splice_signatures(sig1, sig2, sig3, sig4)
self.assertEqual(spliced, (sd1, sd2, sd3, sd4, sd5))
def test_len_signature(self):
"""Signatures of all sizes should give correct length when asked."""
x1 = np.array([1, 2, 3])
x2 = np.array([10, 20, 30])
inputs0 = ()
inputs1 = x1 # NOT in a tuple
inputs2 = (x1, x2)
sig0 = shapes.signature(inputs0)
sig1 = shapes.signature(inputs1)
sig2 = shapes.signature(inputs2)
# pylint: disable=g-generic-assert
self.assertEqual(len(sig0), 0)
self.assertEqual(len(sig1), 1)
self.assertEqual(len(sig2), 2)
# pylint: enable=g-generic-assert
if __name__ == '__main__':
absltest.main()
| google/trax | trax/shapes_test.py | Python | apache-2.0 | 2,632 | 0.00304 |
import functools
import itertools
import types
import unittest
import six
from chainer.testing import _bundle
from chainer import utils
def _param_to_str(obj):
if isinstance(obj, type):
return obj.__name__
return repr(obj)
def _shorten(s, maxlen):
# Shortens the string down to maxlen, by replacing the middle part with
# a 3-dots string '...'.
ellipsis = '...'
if len(s) <= maxlen:
return s
n1 = (maxlen - len(ellipsis)) // 2
n2 = maxlen - len(ellipsis) - n1
s = s[:n1] + ellipsis + s[-n2:]
assert len(s) == maxlen
return s
def _make_class_name(base_class_name, i_param, param):
# Creates a class name for a single combination of parameters.
SINGLE_PARAM_MAXLEN = 100 # Length limit of a single parameter value
PARAMS_MAXLEN = 5000 # Length limit of the whole parameters part
param_strs = [
'{}={}'.format(k, _shorten(_param_to_str(v), SINGLE_PARAM_MAXLEN))
for k, v in param.items()]
param_strs = _shorten(', '.join(param_strs), PARAMS_MAXLEN)
cls_name = '{}_param_{}_{{{}}}'.format(
base_class_name, i_param, param_strs)
return cls_name
def _parameterize_test_case_generator(base, params):
# Defines the logic to generate parameterized test case classes.
for i, param in enumerate(params):
cls_name = _make_class_name(base.__name__, i, param)
def __str__(self):
name = base.__str__(self)
return '%s parameter: %s' % (name, param)
mb = {'__str__': __str__}
for k, v in six.iteritems(param):
if isinstance(v, types.FunctionType):
def create_new_v():
f = v
def new_v(self, *args, **kwargs):
return f(*args, **kwargs)
return new_v
mb[k] = create_new_v()
else:
mb[k] = v
def method_generator(base_method):
# Generates a wrapped test method
# Bind to a new variable.
param2 = param
@functools.wraps(base_method)
def new_method(self, *args, **kwargs):
try:
return base_method(self, *args, **kwargs)
except unittest.SkipTest:
raise
except Exception as e:
s = six.StringIO()
s.write('Parameterized test failed.\n\n')
s.write('Base test method: {}.{}\n'.format(
base.__name__, base_method.__name__))
s.write('Test parameters:\n')
for k, v in six.iteritems(param2):
s.write(' {}: {}\n'.format(k, v))
utils._raise_from(e.__class__, s.getvalue(), e)
return new_method
yield (cls_name, mb, method_generator)
def parameterize(*params):
# TODO(niboshi): Add documentation
return _bundle.make_decorator(
lambda base: _parameterize_test_case_generator(base, params))
def _values_to_dicts(names, values):
assert isinstance(names, six.string_types)
assert isinstance(values, (tuple, list))
def safe_zip(ns, vs):
if len(ns) == 1:
return [(ns[0], vs)]
assert isinstance(vs, (tuple, list)) and len(ns) == len(vs)
return zip(ns, vs)
names = names.split(',')
params = [dict(safe_zip(names, value_list)) for value_list in values]
return params
def from_pytest_parameterize(names, values):
# Pytest-style parameterization.
# TODO(niboshi): Add documentation
return _values_to_dicts(names, values)
def parameterize_pytest(names, values):
# Pytest-style parameterization.
# TODO(niboshi): Add documentation
return parameterize(*from_pytest_parameterize(names, values))
def product(parameter):
# TODO(niboshi): Add documentation
if isinstance(parameter, dict):
return product([
_values_to_dicts(names, values)
for names, values in sorted(parameter.items())])
elif isinstance(parameter, list):
# list of lists of dicts
if not all(isinstance(_, list) for _ in parameter):
raise TypeError('parameter must be list of lists of dicts')
if not all(isinstance(_, dict) for l in parameter for _ in l):
raise TypeError('parameter must be list of lists of dicts')
lst = []
for dict_lst in itertools.product(*parameter):
a = {}
for d in dict_lst:
a.update(d)
lst.append(a)
return lst
else:
raise TypeError(
'parameter must be either dict or list. Actual: {}'.format(
type(parameter)))
def product_dict(*parameters):
# TODO(niboshi): Add documentation
return [
{k: v for dic in dicts for k, v in six.iteritems(dic)}
for dicts in itertools.product(*parameters)]
| okuta/chainer | chainer/testing/parameterized.py | Python | mit | 4,970 | 0.000201 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.