text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import copy
from django.contrib.gis.db.models import GeometryField
from django.contrib.gis.db.models.sql import AreaField, DistanceField
from django.test import SimpleTestCase
class FieldsTests(SimpleTestCase):
def test_area_field_deepcopy(self):
field = AreaField(None)
self.assertEqual(copy.deepcopy(field), field)
def test_distance_field_deepcopy(self):
field = DistanceField(None)
self.assertEqual(copy.deepcopy(field), field)
class GeometryFieldTests(SimpleTestCase):
def test_deconstruct_empty(self):
field = GeometryField()
*_, kwargs = field.deconstruct()
self.assertEqual(kwargs, {"srid": 4326})
def test_deconstruct_values(self):
field = GeometryField(
srid=4067,
dim=3,
geography=True,
extent=(
50199.4814,
6582464.0358,
-50000.0,
761274.6247,
7799839.8902,
50000.0,
),
tolerance=0.01,
)
*_, kwargs = field.deconstruct()
self.assertEqual(
kwargs,
{
"srid": 4067,
"dim": 3,
"geography": True,
"extent": (
50199.4814,
6582464.0358,
-50000.0,
761274.6247,
7799839.8902,
50000.0,
),
"tolerance": 0.01,
},
)
|
{
"content_hash": "3eeb28ff817eeaade6778e5ce7445db5",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 69,
"avg_line_length": 27.732142857142858,
"alnum_prop": 0.49452672247263363,
"repo_name": "monetate/django",
"id": "933514fee7a2450d84648d3e2572114262a7420e",
"size": "1553",
"binary": false,
"copies": "17",
"ref": "refs/heads/main",
"path": "tests/gis_tests/test_fields.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "91986"
},
{
"name": "HTML",
"bytes": "238949"
},
{
"name": "JavaScript",
"bytes": "157441"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Python",
"bytes": "16193262"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "392"
}
],
"symlink_target": ""
}
|
from validator.BaseValidator import BaseValidator
class ConditionalValidator(BaseValidator):
operator = None #should be a lambda expression which return boolean variable
message = "This value is not valid"
def validate(self, fieldA, fieldB):
fieldA = super(ConditionalValidator, self).validate(fieldA)
fieldB = super(ConditionalValidator, self).validate(fieldB)
return self.operator(fieldA, fieldB)
def __init__(self, params):
super(ConditionalValidator, self).__init__(params)
if 'fieldB' in params:
self.fieldB = params.get('fieldB')
else:
raise ValueError("Missing conditional field parameter")
if 'operator' in params:
self.operator = eval(params.get('operator'))
else:
raise ValueError("Missing operator parameter")
if self.operator.__name__ != "<lambda>":
raise ValueError("Operator should be an lambda function")
|
{
"content_hash": "124e7203dbfd029521d9b15b49f458eb",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 80,
"avg_line_length": 33.689655172413794,
"alnum_prop": 0.6550665301944729,
"repo_name": "mkesicki/excel_validator",
"id": "a50e607ebb361fff54c16acfacb2e6d1ef1ccb0e",
"size": "977",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "validator/ConditionalValidator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "99"
},
{
"name": "Python",
"bytes": "16646"
},
{
"name": "Shell",
"bytes": "2189"
}
],
"symlink_target": ""
}
|
from .extdocker import *
from .helpers import *
|
{
"content_hash": "46107d47602ea9b94cd123eeba9d29e5",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 24,
"avg_line_length": 23.5,
"alnum_prop": 0.7659574468085106,
"repo_name": "dtwardow/docker-py-helpers",
"id": "c61742198690a7b3283c678c68e57b06e9d0271d",
"size": "47",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "extdocker/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15199"
}
],
"symlink_target": ""
}
|
if True:
print('Hello')
|
{
"content_hash": "9c30329c0540642ac4c82b6512dfc271",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 18,
"avg_line_length": 14,
"alnum_prop": 0.5714285714285714,
"repo_name": "Evmorov/ruby-coffeescript",
"id": "fd509f2656e39aaedbba8bc96c198ee3bf4373f1",
"size": "28",
"binary": false,
"copies": "2",
"ref": "refs/heads/source",
"path": "code/python/conditional_if.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "474"
},
{
"name": "CoffeeScript",
"bytes": "11785"
},
{
"name": "HTML",
"bytes": "1915"
},
{
"name": "JavaScript",
"bytes": "70"
},
{
"name": "Ruby",
"bytes": "15174"
}
],
"symlink_target": ""
}
|
import logging
import sys
from ming.orm import ThreadLocalORMSession
from pylons import tmpl_context as c, app_globals as g
from allura import model as M
from allura.lib import helpers as h
from allura.lib import utils
log = logging.getLogger(__name__)
def main(options):
log.addHandler(logging.StreamHandler(sys.stdout))
log.setLevel(getattr(logging, options.log_level.upper()))
nbhd = M.Neighborhood.query.get(name=options.neighborhood)
if not nbhd:
return 'Invalid neighborhood "%s".' % options.neighborhood
admin_role = M.ProjectRole.by_name(
'Admin', project=nbhd.neighborhood_project)
nbhd_admin = admin_role.users_with_role(
project=nbhd.neighborhood_project)[0].user
log.info('Making updates as neighborhood admin "%s"' % nbhd_admin.username)
q = {'neighborhood_id': nbhd._id,
'is_nbhd_project': False, 'deleted': False}
private_count = public_count = 0
for projects in utils.chunked_find(M.Project, q):
for p in projects:
role_anon = M.ProjectRole.upsert(name='*anonymous',
project_id=p.root_project._id)
if M.ACE.allow(role_anon._id, 'read') not in p.acl:
if options.test:
log.info('Would be made public: "%s"' % p.shortname)
else:
log.info('Making public: "%s"' % p.shortname)
p.acl.append(M.ACE.allow(role_anon._id, 'read'))
with h.push_config(c, project=p, user=nbhd_admin):
ThreadLocalORMSession.flush_all()
g.post_event('project_updated')
private_count += 1
else:
log.info('Already public: "%s"' % p.shortname)
public_count += 1
log.info('Already public: %s' % public_count)
if options.test:
log.info('Would be made public: %s' % private_count)
else:
log.info('Made public: %s' % private_count)
return 0
def parser():
import argparse
parser = argparse.ArgumentParser(
description='Make all projects in a neighborhood public.')
parser.add_argument('neighborhood', metavar='NEIGHBORHOOD', type=str,
help='Neighborhood name.')
parser.add_argument('--test', dest='test', default=False,
action='store_true',
help='Run in test mode (no updates will be applied).')
parser.add_argument('--log', dest='log_level', default='INFO',
help='Log level (DEBUG, INFO, WARNING, ERROR, CRITICAL).')
return parser
def parse_options():
return parser().parse_args()
if __name__ == '__main__':
sys.exit(main(parse_options()))
|
{
"content_hash": "6fdfbde6a0f4aeb0a62277b456c7810f",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 82,
"avg_line_length": 36.93333333333333,
"alnum_prop": 0.5902527075812274,
"repo_name": "heiths/allura",
"id": "9cc6494cd3ec1e7da0e6116316f8e2b6763a4034",
"size": "3640",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/publicize-neighborhood.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6142"
},
{
"name": "CSS",
"bytes": "173671"
},
{
"name": "HTML",
"bytes": "751039"
},
{
"name": "JavaScript",
"bytes": "1136845"
},
{
"name": "Makefile",
"bytes": "7788"
},
{
"name": "Puppet",
"bytes": "6872"
},
{
"name": "Python",
"bytes": "4238265"
},
{
"name": "RAML",
"bytes": "26153"
},
{
"name": "Ruby",
"bytes": "7006"
},
{
"name": "Shell",
"bytes": "131827"
},
{
"name": "XSLT",
"bytes": "3357"
}
],
"symlink_target": ""
}
|
"""
Goal: Define the routes for general pages
@authors:
Andrei Sura <sura.andrei@gmail.com>
Ruchi Vivek Desai <ruchivdesai@gmail.com>
Sanath Pasumarthy <sanath@ufl.edu>
@see https://flask-login.readthedocs.org/en/latest/
@see https://pythonhosted.org/Flask-Principal/
"""
import hashlib
import base64
import datetime
import uuid
from flask import current_app
from flask import redirect
from flask import render_template
from flask import request
from flask import session
from flask import url_for
from app.models.log_entity import LogEntity
from app.models.web_session_entity import WebSessionEntity
from app.models.user_agent_entity import UserAgentEntity
from wtforms import Form, TextField, PasswordField, HiddenField, validators
from flask_login import LoginManager
from flask_login import login_user, logout_user, current_user
from flask_principal import \
Identity, AnonymousIdentity, identity_changed, identity_loaded, RoleNeed
from app.main import app
from app import utils
from app.models.user_entity import UserEntity
# set the login manager for the app
login_manager = LoginManager(app)
# Possible options: strong, basic, None
login_manager.session_protection = "strong"
login_manager.login_message = ""
login_manager.login_message_category = "info"
@login_manager.user_loader
def load_user(user_id):
"""Return the user from the database"""
return UserEntity.get_by_id(user_id)
@login_manager.unauthorized_handler
def unauthorized():
""" Returns a message for the unauthorized users """
return 'Please <a href="{}">login</a> first.'.format(url_for('index'))
@app.errorhandler(403)
def page_not_found(e):
"""
Redirect to login page if probing a protected resources before login
"""
return redirect(url_for('index') + "?next={}".format(request.url))
class LoginForm(Form):
""" Declare the validation rules for the login form """
next = HiddenField(default='')
# email = TextField('Email')
email = TextField('Email',
[validators.Required(),
validators.Length(min=4, max=25)])
password = PasswordField('Password',
[validators.Required(),
validators.Length(min=6, max=25)])
def get_user_agent():
"""Find an existing user agent or insert a new one"""
# The raw user agent string received from the browser
uag = request.user_agent
hash = utils.compute_text_md5(uag.string)
# The entity representing the user agent
user_agent = UserAgentEntity.get_by_hash(hash)
if user_agent is None:
platform = uag.platform if uag.platform is not None else ''
browser = uag.browser if uag.browser is not None else ''
version = uag.version if uag.version is not None else ''
language = uag.language if uag.language is not None else ''
user_agent = UserAgentEntity.create(user_agent=uag.string,
hash=hash,
platform=platform,
browser=browser,
version=version,
language=language)
return user_agent
@app.before_request
def check_session_id():
"""
Generate a UUID and store it in the session
as well as in the WebSession table.
"""
user_agent = get_user_agent()
if 'uuid' not in session:
session['uuid'] = str(uuid.uuid4())
WebSessionEntity.create(session_id=session['uuid'],
user_id=current_user.get_id(),
ip=request.remote_addr,
date_time=datetime.datetime.now(),
user_agent=user_agent)
return
if current_user.is_authenticated():
# update the user_id on the first request after login is completed
session_id = session['uuid']
web_session = WebSessionEntity.get_by_session_id(session_id)
if web_session is not None:
web_session = WebSessionEntity.update(
web_session,
user_id=current_user.get_id())
else:
app.logger.error("No row found for sess_id: {}".format(session_id))
@app.route('/', methods=['POST', 'GET'])
def index():
""" Render the login page"""
if app.config['LOGIN_USING_SHIB_AUTH']:
return render_login_shib()
return render_login_local()
def render_login_local():
""" Render the login page with username/pass
@see #index()
@see #render_login_shib()
"""
if current_user.is_authenticated():
return redirect(get_role_landing_page())
uuid = session['uuid']
form = LoginForm(request.form)
if request.method == 'POST' and form.validate():
email = form.email.data.strip(
) if form.email.data else ""
password = form.password.data.strip() if form.password.data else ""
app.logger.debug("{} password: {}".format(email, password))
app.logger.debug("Checking email: {}".format(email))
user = UserEntity.query.filter_by(email=email).first()
if user:
app.logger.debug("Found user object: {}".format(user))
else:
utils.flash_error("No such email: {}".format(email))
LogEntity.login(uuid, "No such email: {}".format(email))
return redirect(url_for('index'))
# if utils.is_valid_auth(app.config['SECRET_KEY'], auth.uathSalt,
# password, auth.uathPassword):
if '' == user.password_hash:
app.logger.info('Log login event for: {}'.format(user))
LogEntity.login(uuid, 'Successful login via email/password')
login_user(user, remember=False, force=False)
# Tell Flask-Principal that the identity has changed
identity_changed.send(current_app._get_current_object(),
identity=Identity(user.get_id()))
return redirect(get_role_landing_page())
else:
app.logger.info('Incorrect pass for: {}'.format(user))
LogEntity.login_error(uuid, 'Incorrect pass for: {}'.format(user))
# When sending a GET request render the login form
return render_template('index.html', form=form,
next_page=request.args.get('next'))
@app.route('/loginExternalAuth', methods=['POST', 'GET'])
def shibb_redirect():
"""
Redirect to the local shibboleth instance where
we can pass the return path.
This route is reached when the user clicks the "Login" button.
Note: This is equivalent to Apache's syntax:
Redirect seeother /loginExternalAuth /Shibboleth.sso/Login?target=...
@see #index()
@see #shibb_return()
"""
next_page = "/Shibboleth.sso/Login?target={}"\
.format(url_for('shibb_return'))
return redirect(next_page)
@app.route('/loginExternalAuthReturn', methods=['POST', 'GET'])
def shibb_return():
"""
Read the Shibboleth headers returned by the IdP after
the user entered the username/password.
If the `eduPersonPrincipalName` (aka Eppn) for the user matches the
usrEmail of an active user then let the user in,
otherwise let them see the login page.
@see #shibb_redirect()
"""
if current_user.is_authenticated():
# next_page = request.args.get('next') or get_role_landing_page()
return redirect(get_role_landing_page())
# fresh login...
uuid = session['uuid']
email = request.headers['Mail']
glid = request.headers['Glid'] # Gatorlink ID
app.logger.debug("Checking if email: {} is registered for glid: {}"
.format(email, glid))
user = UserEntity.query.filter_by(email=email).first()
if not user:
utils.flash_error("No such user: {}".format(email))
LogEntity.login_error(uuid,
"Shibboleth user is not registered for this app")
return redirect(url_for('index'))
if not user.is_active():
utils.flash_error("Inactive user: {}".format(email))
LogEntity.login_error(uuid, 'Inactive user tried to login')
return redirect(url_for('index'))
if user.is_expired():
utils.flash_error("User account for {} expired on {}"
.format(email, user.access_expires_at))
LogEntity.login_error(uuid, 'Expired user tried to login')
return redirect(url_for('index'))
# Log it
app.logger.info('Successful login via Shibboleth for: {}'.format(user))
LogEntity.login(uuid, 'Successful login via Shibboleth')
login_user(user, remember=False, force=False)
# Tell Flask-Principal that the identity has changed
identity_changed.send(current_app._get_current_object(),
identity=Identity(user.get_id()))
next_page = get_role_landing_page()
return redirect(next_page)
def render_login_shib():
""" Render the login page with button redirecting to
Shibboleth /loginExternalAuth path
"""
return render_template('login_shib.html', form=request.form)
def get_role_landing_page():
"""
Get the landing page for a user with specific role
:return None if the user has no roles
"""
next_page = request.form.get('next')
# Per Chris's request all users land on the same page
if next_page is not None and next_page != 'None':
return next_page
return url_for('dashboard')
@identity_loaded.connect_via(app)
def on_identity_loaded(sender, identity):
""" Describe what `needs` does this identity provide
@TODO: add unit tests
http://stackoverflow.com/questions/16712321/unit-testing-a-flask-principal-application
"""
if type(current_user) == 'AnonymousUserMixin':
return
identity.user = current_user
if hasattr(current_user, 'roles'):
for role in current_user.roles:
# app.logger.debug("Provide role: {}".format(role))
identity.provides.add(RoleNeed(role.name))
@login_manager.request_loader
def load_user_from_request(req):
""" To support login from both a url argument and from Basic Auth
using the Authorization header
@TODO: use for api requests?
Need to add column `UserAuth.uathApiKey`
"""
# first, try to login using the api_key url arg
api_key = req.args.get('api_key')
if not api_key:
# next, try to login using Basic Auth
api_key = req.headers.get('Authorization')
if api_key:
api_key = api_key.replace('Basic ', '', 1)
try:
api_key = base64.b64decode(api_key)
except TypeError:
pass
if api_key:
md5 = hashlib.md5()
md5.update(api_key)
app.logger.debug("trying api_key: {}".format(md5.digest()))
user = UserEntity.query.filter_by(api_key=api_key).first()
return user
# finally, return None if neither of the api_keys is valid
return None
@app.route('/logout')
def logout():
""" Destroy the user session and redirect to the home page
Shib:
https://shib.ncsu.edu/docs/logout.html
https://wiki.shibboleth.net/confluence/display/CONCEPT/SLOIssues
"""
# Log the logout
if 'uuid' in session:
LogEntity.logout(session['uuid'])
logout_user()
# Remove session keys set by Flask-Principal, and `uuid` key set manually
for key in ('identity.name', 'identity.auth_type', 'uuid'):
session.pop(key, None)
# Tell Flask-Principal the user is anonymous
identity_changed.send(current_app._get_current_object(),
identity=AnonymousIdentity())
return redirect('/')
|
{
"content_hash": "0df268d545460709940f2fa15e205468",
"timestamp": "",
"source": "github",
"line_count": 349,
"max_line_length": 94,
"avg_line_length": 33.88538681948424,
"alnum_prop": 0.6256553357009978,
"repo_name": "indera/barebones-flask-app",
"id": "c83d20217fb3651450af37e2163f80fefa587505",
"size": "11826",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "app/routes/pages.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "5186"
},
{
"name": "Cucumber",
"bytes": "156"
},
{
"name": "HTML",
"bytes": "20265"
},
{
"name": "JavaScript",
"bytes": "33377"
},
{
"name": "Python",
"bytes": "80559"
},
{
"name": "Shell",
"bytes": "599"
}
],
"symlink_target": ""
}
|
import os
import tempfile
from pyoram.util.virtual_heap import \
SizedVirtualHeap
from pyoram.encrypted_storage.encrypted_heap_storage import \
EncryptedHeapStorage
def main():
#
# get a unique filename in the current directory
#
fid, tmpname = tempfile.mkstemp(dir=os.getcwd())
os.close(fid)
os.remove(tmpname)
print("Storage Name: %s" % (tmpname))
key_size = 32
header_data = b'a message'
heap_base = 3
heap_height = 2
block_size = 8
blocks_per_bucket=4
initialize = lambda i: \
bytes(bytearray([i] * block_size * blocks_per_bucket))
vheap = SizedVirtualHeap(
heap_base,
heap_height,
blocks_per_bucket=blocks_per_bucket)
with EncryptedHeapStorage.setup(
tmpname,
block_size,
heap_height,
key_size=key_size,
header_data=header_data,
heap_base=heap_base,
blocks_per_bucket=blocks_per_bucket,
initialize=initialize) as f:
assert tmpname == f.storage_name
assert f.header_data == header_data
print(f.read_path(vheap.random_bucket()))
key = f.key
assert os.path.exists(tmpname)
with EncryptedHeapStorage(tmpname, key=key) as f:
assert tmpname == f.storage_name
assert f.header_data == header_data
print(f.read_path(vheap.random_bucket()))
#
# cleanup
#
os.remove(tmpname)
if __name__ == "__main__":
main() # pragma: no cover
|
{
"content_hash": "dd0974769ec3f254e870757c704f96f6",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 73,
"avg_line_length": 27.31578947368421,
"alnum_prop": 0.588310854206808,
"repo_name": "ghackebeil/PyORAM",
"id": "3ea59164275c86433f98e90fcdd63b28b3f753af",
"size": "1557",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/encrypted_heap_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "551"
},
{
"name": "Python",
"bytes": "410159"
},
{
"name": "Shell",
"bytes": "374"
}
],
"symlink_target": ""
}
|
import pytest
from virtualenv.run import session_via_cli
@pytest.mark.parametrize(
("args", "download"),
[([], False), (["--no-download"], False), (["--never-download"], False), (["--download"], True)],
)
def test_download_cli_flag(args, download, tmp_path):
session = session_via_cli(args + [str(tmp_path)])
assert session.seeder.download is download
|
{
"content_hash": "9d6dbf941382bf6ba57d8d72a75923b8",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 101,
"avg_line_length": 30.916666666666668,
"alnum_prop": 0.6549865229110512,
"repo_name": "pypa/virtualenv",
"id": "3344c74384409415815c4b9b1d9948b4aa57c37c",
"size": "371",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tests/unit/seed/embed/test_base_embed.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1512"
},
{
"name": "C",
"bytes": "1135"
},
{
"name": "Nu",
"bytes": "3265"
},
{
"name": "PowerShell",
"bytes": "1810"
},
{
"name": "Python",
"bytes": "459846"
},
{
"name": "Shell",
"bytes": "6706"
}
],
"symlink_target": ""
}
|
from django.core.management.base import BaseCommand, CommandError
import shortuuid
from flowjs.models import FlowFile
from shutil import copyfile
from cbh_core_ws.parser import get_sheetnames, get_sheet
from cbh_datastore_ws.resources import DataPointClassificationResource, AttachmentResource, DataPointProjectFieldResource, FlowFileResource
from django.http import HttpRequest
from django.test import TestCase, RequestFactory
from django.contrib.auth.models import User
import json
class Command(BaseCommand):
def handle(self, *args, **options):
if len(args) != 2:
raise CommandError(
'Usage: python manage.py import_spreadsheet [filename] [data point classification id]')
from flowjs.models import FlowFile
# Here we fake the flowfile system by creating an identifier
two_letterg = shortuuid.ShortUUID()
two_letterg.set_alphabet("ABCDEFGHJKLMNPQRSTUVWXYZ")
code = two_letterg.random(length=20)
ff = FlowFile.objects.create(
identifier=code, original_filename=args[0])
copyfile(args[0], ff.path)
fact = RequestFactory()
request = fact.get(
"/dev/datastore/cbh_flowfiles/%s/?format=json" % code)
request.user = User.objects.get(pk=1)
ffr = FlowFileResource()
resp = ffr.get_detail(request, identifier=code)
data = json.loads(resp.content)
print "Please paste the required sheet name:"
# Need to add sheetnames to the flowfile API so can choose before
# creating the attachment as you need one attachment per sheet
for sheetname in data["sheet_names"]:
print sheetname
sheetname = raw_input()
fact = RequestFactory()
request = fact.post("/dev/datastore/cbh_attachments/?format=json", json.dumps({
"flowfile": "/dev/datastore/cbh_flowfiles/%s" % code,
"data_point_classification": "/dev/datastore/cbh_datapoint_classifications/" + args[1],
"chosen_data_form_config": "/dev/datastore/cbh_data_form_config/2",
"sheet_name": sheetname
}), content_type="application/json")
request.user = User.objects.get(pk=1)
ar = AttachmentResource()
resp = ar.post_list(request)
data = json.loads(resp.content)
print data["resource_uri"]
for index, field in enumerate(data["attachment_custom_field_config"]["project_data_fields"]):
field["attachment_field_mapped_to"] = field[
"mapped_to_form"]["titleMap"][index]["value"]
fact = RequestFactory()
request = fact.patch("/dev/datastore/cbh_datapoint_fields/?format=json", json.dumps({
"objects": data["attachment_custom_field_config"]["project_data_fields"]
}), content_type="application/json")
request.user = User.objects.get(pk=1)
pfr = DataPointProjectFieldResource()
resp = pfr.patch_list(request)
print resp.status_code
fact = RequestFactory()
request = fact.post(
data["resource_uri"] + "/save_temporary_data",
"{}",
content_type="application/json",
)
request.user = User.objects.get(pk=1)
ar.post_save_temp_data(request, pk=data["id"])
# document
# assay
# activities
|
{
"content_hash": "b91debeff01adccf9deeebd261f7fa1c",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 139,
"avg_line_length": 41.098765432098766,
"alnum_prop": 0.6491438870531692,
"repo_name": "thesgc/cbh_datastore_ws",
"id": "ecd33fb365e12af5bcdf011e99773d208e23b821",
"size": "3329",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cbh_datastore_ws/management/commands/import_spreadsheet.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "661"
},
{
"name": "Makefile",
"bytes": "1264"
},
{
"name": "Python",
"bytes": "112458"
}
],
"symlink_target": ""
}
|
import requests
from config import PROXY, TG_TOKEN
__author__ = 'ihciah'
CALLBACK = "https://ihc.im/" + TG_TOKEN # Modify this url to your callback url.
url = "https://api.telegram.org/bot%s/setWebhook" % TG_TOKEN
res = requests.post(url, {"url": CALLBACK}, proxies=PROXY)
print res.content
|
{
"content_hash": "ebdeb737a0264b41b6b2cabc07202964",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 80,
"avg_line_length": 29.5,
"alnum_prop": 0.7050847457627119,
"repo_name": "ihciah/AndroidSMSRelay",
"id": "a3aa1d445db6c69db18b265f49e2d8c2c5660aa0",
"size": "354",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/setwebhooks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14381"
},
{
"name": "Shell",
"bytes": "235"
}
],
"symlink_target": ""
}
|
"""Semantic Protobufs are serialization agnostic, rich data types."""
import copy
import cStringIO
import json
import struct
from google.protobuf import descriptor_pb2
from google.protobuf import descriptor
from google.protobuf import text_format
import logging
from grr.lib import rdfvalue
from grr.lib import registry
from grr.lib import type_info
from grr.lib import utils
from grr.lib.rdfvalues import proto2
from grr.proto import semantic_pb2
# pylint: disable=super-init-not-called
# We copy these here to remove dependency on the protobuf library.
TAG_TYPE_BITS = 3 # Number of bits used to hold type info in a proto tag.
TAG_TYPE_MASK = (1 << TAG_TYPE_BITS) - 1 # 0x7
# These numbers identify the wire type of a protocol buffer value.
# We use the least-significant TAG_TYPE_BITS bits of the varint-encoded
# tag-and-type to store one of these WIRETYPE_* constants.
# These values must match WireType enum in google/protobuf/wire_format.h.
WIRETYPE_VARINT = 0
WIRETYPE_FIXED64 = 1
WIRETYPE_LENGTH_DELIMITED = 2
WIRETYPE_START_GROUP = 3
WIRETYPE_END_GROUP = 4
WIRETYPE_FIXED32 = 5
_WIRETYPE_MAX = 5
# The following are the varint encoding/decoding functions taken from the
# protobuf library. Placing them in this file allows us to remove dependency on
# the standard protobuf library.
ORD_MAP = dict((chr(x), x) for x in range(0, 256))
CHR_MAP = dict((x, chr(x)) for x in range(0, 256))
HIGH_CHR_MAP = dict((x, chr(0x80 | x)) for x in range(0, 256))
# Some optimizations to get rid of AND operations below since they are really
# slow in Python.
ORD_MAP_AND_0X80 = dict((chr(x), x & 0x80) for x in range(0, 256))
ORD_MAP_AND_0X7F = dict((chr(x), x & 0x7F) for x in range(0, 256))
# This function is HOT.
def ReadTag(buf, pos):
"""Read a tag from the buffer, and return a (tag_bytes, new_pos) tuple."""
try:
start = pos
while ORD_MAP_AND_0X80[buf[pos]]:
pos += 1
pos += 1
return (buf[start:pos], pos)
except IndexError:
raise ValueError("Invalid tag")
# This function is HOT.
def VarintWriter(write, value):
"""Convert an integer to a varint and write it using the write function."""
if value < 0:
raise ValueError("Varint can not encode a negative number.")
bits = value & 0x7f
value >>= 7
while value:
write(HIGH_CHR_MAP[bits])
bits = value & 0x7f
value >>= 7
return write(CHR_MAP[bits])
def SignedVarintWriter(write, value):
"""Encode a signed integer as a zigzag encoded signed integer."""
if value < 0:
value += (1 << 64)
bits = value & 0x7f
value >>= 7
while value:
write(HIGH_CHR_MAP[bits])
bits = value & 0x7f
value >>= 7
return write(CHR_MAP[bits])
# This function is HOT.
def VarintReader(buf, pos):
"""A 64 bit decoder from google.protobuf.internal.decoder."""
result = 0
shift = 0
while 1:
b = buf[pos]
result |= (ORD_MAP_AND_0X7F[b] << shift)
pos += 1
if not ORD_MAP_AND_0X80[b]:
return (result, pos)
shift += 7
if shift >= 64:
raise rdfvalue.DecodeError("Too many bytes when decoding varint.")
def SignedVarintReader(buf, pos):
"""A Signed 64 bit decoder from google.protobuf.internal.decoder."""
result = 0
shift = 0
while 1:
b = buf[pos]
result |= (ORD_MAP_AND_0X7F[b] << shift)
pos += 1
if not ORD_MAP_AND_0X80[b]:
if result > 0x7fffffffffffffff:
result -= (1 << 64)
return (result, pos)
shift += 7
if shift >= 64:
raise rdfvalue.DecodeError("Too many bytes when decoding varint.")
class ProtoType(type_info.TypeInfoObject):
"""A specific type descriptor for protobuf fields.
This is an abstract class - do not instantiate directly.
"""
# Must be overridden by implementations.
wire_type = None
# We cache the serialized version of the tag here so we just need to do a
# string comparison instead of decoding the tag each time.
tag_data = None
# The semantic type of the object described by this descriptor.
type = None
# The type name according to the .proto domain specific language.
proto_type_name = "string"
# A field may be defined but not added to the container immediately. In that
# case we wait for late binding to resolve the target and then bind the field
# to the protobuf descriptor set only when its target is resolved.
late_bound = False
# The Semantic protobuf class which owns this field descriptor.
owner = None
# This flag indicates if the default should be set into the owner protobuf on
# access.
set_default_on_access = False
def __init__(self, field_number=None, required=False, labels=None,
set_default_on_access=None, **kwargs):
super(ProtoType, self).__init__(**kwargs)
self.field_number = field_number
self.required = required
if set_default_on_access is not None:
self.set_default_on_access = set_default_on_access
self.labels = labels or []
if field_number is None:
raise type_info.TypeValueError("No valid field number specified.")
self.CalculateTags()
def Copy(self, field_number=None):
"""Returns a copy of descriptor, optionally changing the field number."""
result = copy.copy(self)
if field_number is not None:
result.field_number = field_number
result.CalculateTags()
return result
def CalculateTags(self):
# In python Varint encoding is expensive so we want to move as much of the
# hard work from the Write() methods which are called frequently to the type
# descriptor constructor which is only called once (during protobuf
# decleration time). Pre-calculating the tag makes for faster serialization.
self.tag = self.field_number << 3 | self.wire_type
tmp = cStringIO.StringIO()
VarintWriter(tmp.write, self.tag)
self.tag_data = tmp.getvalue()
def IsDirty(self, unused_python_format):
"""Return and clear the dirty state of the python object."""
return False
def Write(self, stream, value):
"""Encode the tag and value into the stream.
Note that value should already be in wire format.
This function is HOT.
Args:
stream: The stream to write on.
value: This is the value to write encoded according to the specific wire
format of this type.
"""
raise NotImplementedError()
def Read(self, buff, index):
"""Read a value from the buffer.
Note that reading into the wire format should be as fast as possible.
This function is HOT.
Args:
buff: A string to read from.
index: Where to start reading from.
Returns:
A value encoded in wire format specific to this type.
"""
raise NotImplementedError()
def ConvertFromWireFormat(self, value, container=None):
"""Convert value from the internal type to the real type.
When data is being parsed, it might be quicker to store it in a different
format internally. This is because we must parse all tags, but only decode
those fields which are being accessed.
This function is called when we retrieve a field on access, so we only pay
the penalty once, and cache the result.
This function is HOT.
Args:
value: A parameter stored in the wire format for this type.
container: The protobuf that contains this field.
Returns:
The parameter encoded in the python format representation.
"""
_ = container
return value
def ConvertToWireFormat(self, value):
"""Convert the parameter into the internal storage format.
This function is the inverse of ConvertFromWireFormat().
This function is HOT.
Args:
value: A python format representation of the value as coerced by the
Validate() method. This is type specific, but always the same.
Returns:
The parameter encoded in the wire format representation.
"""
return value
def _FormatDescriptionComment(self):
result = "".join(["\n // %s\n"%x for x in self.description.splitlines()])
return result
def _FormatDefault(self):
return " [default = %s]" % self.GetDefault()
def _FormatField(self):
result = " optional %s %s = %s%s" % (
self.proto_type_name, self.name, self.field_number,
self._FormatDefault())
return result + ";\n"
def Definition(self):
"""Return a string with the definition of this field."""
return self._FormatDescriptionComment() + self._FormatField()
def Format(self, value):
"""A Generator for display lines representing value."""
yield str(value)
def Validate(self, value, container=None):
"""Validate the value."""
_ = container
return value
def GetDefault(self, container=None):
_ = container
return self.default
def __str__(self):
return "<Field %s (%s) of %s: field_number: %s>" % (
self.name, self.__class__.__name__, self.owner.__name__,
self.field_number)
def SetOwner(self, owner):
self.owner = owner
class ProtoUnknown(ProtoType):
"""A type descriptor for unknown fields.
We keep unknown fields with this type descriptor so we can re-serialize them
again. This way if we parse a protobuf with fields we dont know, we maintain
those upon serialization.
"""
def __init__(self, encoded_tag=None, **unused_kwargs):
self.encoded_tag = encoded_tag
def Write(self, stream, value):
stream.write(self.encoded_tag)
stream.write(value)
class ProtoString(ProtoType):
"""A string encoded in a protobuf."""
wire_type = WIRETYPE_LENGTH_DELIMITED
# This descriptor describes unicode strings.
type = rdfvalue.RDFString
def __init__(self, default=u"", **kwargs):
# Strings default to "" if not specified.
super(ProtoString, self).__init__(**kwargs)
# Ensure the default is a unicode object.
if default is not None:
self.default = utils.SmartUnicode(default)
def Validate(self, value, **_):
"""Validates a python format representation of the value."""
# We only accept a base string, unicode object or RDFString here.
if not (value.__class__ is str or value.__class__ is unicode or
value.__class__ is rdfvalue.RDFString):
raise type_info.TypeValueError("%s not a valid string" % value)
# A String means a unicode String. We must be dealing with unicode strings
# here and the input must be encodable as a unicode object.
try:
return unicode(value)
except UnicodeError:
raise type_info.TypeValueError("Not a valid unicode string")
def Write(self, stream, value):
stream.write(self.tag_data)
VarintWriter(stream.write, len(value))
stream.write(value)
def Read(self, buff, index):
length, index = VarintReader(buff, index)
return buff[index:index+length], index+length
def ConvertFromWireFormat(self, value, container=None):
"""Internally strings are utf8 encoded."""
try:
return unicode(value, "utf8")
except UnicodeError:
raise rdfvalue.DecodeError("Unicode decoding error")
def ConvertToWireFormat(self, value):
"""Internally strings are utf8 encoded."""
return value.encode("utf8")
def Definition(self):
"""Return a string with the definition of this field."""
return self._FormatDescriptionComment() + self._FormatField()
def _FormatDefault(self):
if self.GetDefault():
return " [default = %r]" % self.GetDefault()
else:
return ""
def Format(self, value):
yield repr(value)
class ProtoBinary(ProtoType):
"""A binary string encoded in a protobuf."""
wire_type = WIRETYPE_LENGTH_DELIMITED
# This descriptor describes strings.
type = rdfvalue.RDFString
proto_type_name = "bytes"
def __init__(self, default="", **kwargs):
# Byte strings default to "" if not specified.
super(ProtoBinary, self).__init__(**kwargs)
# Ensure the default is a string object.
if default is not None:
self.default = utils.SmartStr(default)
def Validate(self, value, **_):
if value.__class__ is not str:
raise type_info.TypeValueError("%s not a valid string" % value)
return value
def Write(self, stream, value):
stream.write(self.tag_data)
VarintWriter(stream.write, len(value))
stream.write(value)
def Read(self, buff, index):
length, index = VarintReader(buff, index)
return buff[index:index+length], index+length
def Definition(self):
"""Return a string with the definition of this field."""
return self._FormatDescriptionComment() + self._FormatField()
def Format(self, value):
yield repr(value)
def _FormatDefault(self):
if self.GetDefault():
return " [default = %r]" % self.GetDefault()
else:
return ""
class ProtoUnsignedInteger(ProtoType):
"""An unsigned VarInt encoded in the protobuf."""
wire_type = WIRETYPE_VARINT
# This descriptor describes integers.
type = rdfvalue.RDFInteger
proto_type_name = "uint64"
def __init__(self, default=0, **kwargs):
# Integers default to 0 if not specified.
super(ProtoUnsignedInteger, self).__init__(default=default, **kwargs)
def Validate(self, value, **_):
try:
return int(value)
except ValueError:
raise type_info.TypeValueError("Invalid value %s for Integer" % value)
def Write(self, stream, value):
stream.write(self.tag_data)
VarintWriter(stream.write, value)
def Read(self, buff, index):
return VarintReader(buff, index)
def _FormatDefault(self):
if self.GetDefault():
return " [default = %r]" % self.GetDefault()
else:
return ""
class ProtoSignedInteger(ProtoUnsignedInteger):
"""A signed VarInt encoded in the protobuf.
Note: signed VarInts are more expensive than unsigned VarInts.
"""
proto_type_name = "int64"
def Write(self, stream, value):
stream.write(self.tag_data)
SignedVarintWriter(stream.write, value)
def Read(self, buff, index):
return SignedVarintReader(buff, index)
class ProtoFixed32(ProtoUnsignedInteger):
"""A 32 bit fixed unsigned integer.
The wire format is a 4 byte string, while the python type is a long.
"""
_size = 4
proto_type_name = "sfixed32"
wire_type = WIRETYPE_FIXED32
def Write(self, stream, value):
stream.write(self.tag_data)
stream.write(value)
def Read(self, buff, index):
return buff[index:index+self._size], index+self._size
def ConvertToWireFormat(self, value):
return struct.pack("<L", long(value))
def ConvertFromWireFormat(self, value, container=None):
return struct.unpack("<L", value)[0]
class ProtoFixed64(ProtoFixed32):
_size = 8
proto_type_name = "sfixed64"
wire_type = WIRETYPE_FIXED64
def ConvertToWireFormat(self, value):
return struct.pack("<Q", long(value))
def ConvertFromWireFormat(self, value, container=None):
return struct.unpack("<Q", value)[0]
class ProtoFixedU32(ProtoFixed32):
"""A 32 bit fixed unsigned integer.
The wire format is a 4 byte string, while the python type is a long.
"""
proto_type_name = "fixed32"
def ConvertToWireFormat(self, value):
return struct.pack("<l", long(value))
def ConvertFromWireFormat(self, value, container=None):
return struct.unpack("<l", value)[0]
class ProtoFloat(ProtoFixed32):
"""A float.
The wire format is a 4 byte string, while the python type is a float.
"""
proto_type_name = "float"
def Validate(self, value, **_):
if not isinstance(value, (int, long, float)):
raise type_info.TypeValueError("Invalid value %s for Float" % value)
return value
def ConvertToWireFormat(self, value):
return struct.pack("<f", float(value))
def ConvertFromWireFormat(self, value, container=None):
return struct.unpack("<f", value)[0]
class ProtoDouble(ProtoFixed64):
"""A double.
The wire format is a 8 byte string, while the python type is a float.
"""
proto_type_name = "double"
def Validate(self, value, **_):
if not isinstance(value, (int, long, float)):
raise type_info.TypeValueError("Invalid value %s for Integer" % value)
return value
def ConvertToWireFormat(self, value):
return struct.pack("<d", float(value))
def ConvertFromWireFormat(self, value, container=None):
return struct.unpack("<d", value)[0]
class Enum(int):
"""A class that wraps enums.
Enums are just integers, except when printed they have a name.
"""
def __new__(cls, val, name=None, description=None):
instance = super(Enum, cls).__new__(cls, val)
instance.name = name or str(val)
instance.description = description
return instance
def __eq__(self, other):
return int(self) == other or self.name == other
def __str__(self):
return self.name
def __unicode__(self):
return unicode(self.name)
class ProtoEnum(ProtoSignedInteger):
"""An enum native proto type.
This is really encoded as an integer but only certain values are allowed.
"""
def __init__(self, default=None, enum_name=None, enum=None,
enum_descriptions=None, **kwargs):
super(ProtoEnum, self).__init__(**kwargs)
if enum_name is None:
raise type_info.TypeValueError("Enum groups must be given a name.")
self.enum_name = enum_name
self.proto_type_name = enum_name
if isinstance(enum, EnumContainer):
enum = enum.enum_dict
for v in enum.itervalues():
if not (v.__class__ is int or v.__class__ is long):
raise type_info.TypeValueError("Enum values must be integers.")
self.enum_container = EnumContainer(
name=enum_name, descriptions=enum_descriptions, **(enum or {}))
self.enum = self.enum_container.enum_dict
self.reverse_enum = self.enum_container.reverse_enum
# Ensure the default is a valid enum value.
if default is not None:
self.default = self.Validate(default)
def GetDefault(self, container=None):
_ = container
return Enum(self.default, name=self.reverse_enum.get(self.default))
def Validate(self, value, **_):
"""Check that value is a valid enum."""
# None is a valid value - it means the field is not set.
if value is None:
return
# If the value is a string we need to try to convert it to an integer.
checked_value = value
if isinstance(value, basestring):
checked_value = self.enum.get(value)
if checked_value is None:
raise type_info.TypeValueError(
"Value %s is not a valid enum value for field %s" % (
value, self.name))
return Enum(checked_value, name=self.reverse_enum.get(value))
def Definition(self):
"""Return a string with the definition of this field."""
result = self._FormatDescriptionComment()
result += " enum %s {\n" % self.enum_name
for k, v in sorted(self.reverse_enum.items()):
result += " %s = %s;\n" % (v, k)
result += " }\n"
result += self._FormatField()
return result
def Format(self, value):
yield self.reverse_enum.get(value, str(value))
def ConvertToWireFormat(self, value):
return int(value)
def ConvertFromWireFormat(self, value, container=None):
return Enum(value, name=self.reverse_enum.get(value))
class EnumValue(Enum):
"""Backwards compatibility for stored data.
This class is necessary for reading data created with GRR server version
0.2.9-1 and earlier. It can be removed when we can drop support for this old
data.
"""
pass
class ProtoBoolean(ProtoEnum):
"""A Boolean."""
def __init__(self, **kwargs):
super(ProtoBoolean, self).__init__(
enum_name="Bool", enum=dict(True=1, False=0), **kwargs)
self.proto_type_name = "bool"
class ProtoNested(ProtoType):
"""A nested RDFProtoStruct inside the field."""
wire_type = WIRETYPE_START_GROUP
closing_tag_data = None
# When we access a nested protobuf we automatically create it and assign it to
# the owner protobuf.
set_default_on_access = True
def __init__(self, nested=None, **kwargs):
super(ProtoNested, self).__init__(**kwargs)
# Nested can refer to a target RDFProtoStruct by name.
if isinstance(nested, basestring):
self.proto_type_name = nested
# Try to resolve the type it names
self.type = getattr(rdfvalue, nested, None)
# We do not know about this type yet. Implement Late Binding.
if self.type is None:
self.late_bound = True
# Register a late binding callback.
rdfvalue.RegisterLateBindingCallback(nested, self.LateBind)
# Or it can be an subclass of RDFProtoStruct.
elif issubclass(nested, RDFProtoStruct):
self.type = nested
self.proto_type_name = nested.__name__
else:
raise type_info.TypeValueError(
"Only RDFProtoStructs can be nested, not %s" % nested.__name__)
def CalculateTags(self):
super(ProtoNested, self).CalculateTags()
# Pre-calculate the closing tag data.
self.closing_tag = ((self.field_number << 3) | WIRETYPE_END_GROUP)
tmp = cStringIO.StringIO()
VarintWriter(tmp.write, self.closing_tag)
self.closing_tag_data = tmp.getvalue()
def LateBind(self, target=None):
"""Late binding callback.
This method is called on this field descriptor when the target RDFValue
class is finally defined. It gives the field descriptor an opportunity to
initialize after the point of definition.
Args:
target: The target nested class.
Raises:
TypeError: If the target class is not of the expected type.
"""
if not issubclass(target, RDFProtoStruct):
raise TypeError("Field %s expects a protobuf, but target is %s" %
self, target)
self.late_bound = False
# The target type is now resolved.
self.type = target
# Register us in our owner.
self.owner.AddDescriptor(self)
def IsDirty(self, proto):
"""Return and clear the dirty state of the python object."""
if proto.dirty:
return True
for python_format, _, type_descriptor in proto.GetRawData().itervalues():
if python_format is not None and type_descriptor.IsDirty(python_format):
proto.dirty = True
return True
return False
def GetDefault(self, container=None):
"""When a nested proto is accessed, default to an empty one."""
return self.type()
def Validate(self, value, **_):
# We may coerce it to the correct type.
if value.__class__ is not self.type:
try:
value = self.type(value)
except rdfvalue.InitializeError:
raise type_info.TypeValueError(
"Field %s must be of type %s" % (self.name, self.type.__name__))
return value
def Write(self, stream, value):
"""Serialize the nested protobuf value into the stream."""
stream.write(self.tag_data)
raw_data = value.GetRawData()
for name in raw_data:
python_format, wire_format, type_descriptor = raw_data[name]
if wire_format is None or (python_format and
type_descriptor.IsDirty(python_format)):
wire_format = type_descriptor.ConvertToWireFormat(python_format)
# We do not bother to cache the wire format because usually a protobuf
# is only serialized once and then discarded, so keeping the wire
# formats around does not give a good cache hit rate.
type_descriptor.Write(stream, wire_format)
stream.write(self.closing_tag_data)
def Skip(self, encoded_tag, buff, index):
"""Skip the field at index."""
tag_type = ORD_MAP[encoded_tag[0]] & TAG_TYPE_MASK
# We dont need to actually understand the data, we just need to figure out
# where the end of the unknown field is so we can preserve the data. When we
# write these fields back (With their encoded tag) they should be still
# valid.
if tag_type == WIRETYPE_VARINT:
_, index = ReadTag(buff, index)
elif tag_type == WIRETYPE_FIXED64:
index += 8
elif tag_type == WIRETYPE_FIXED32:
index += 4
elif tag_type == WIRETYPE_LENGTH_DELIMITED:
length, start = VarintReader(buff, index)
index = start + length
# Skip an entire nested protobuf - This calls into Skip() recursively.
elif tag_type == WIRETYPE_START_GROUP:
start = index
while index < len(buff):
group_encoded_tag, index = ReadTag(buff, index)
if (ORD_MAP[group_encoded_tag[0]] & TAG_TYPE_MASK ==
WIRETYPE_END_GROUP):
break
# Recursive call to skip the next field.
index = self.Skip(group_encoded_tag, buff, index)
else:
raise rdfvalue.DecodeError("Unexpected Tag.")
# The data to be written includes the encoded_tag and the decoded data
# together.
return index
def ReadIntoObject(self, buff, index, value_obj, length=None):
"""Reads all tags until the next end group and store in the value_obj."""
raw_data = value_obj.GetRawData()
buffer_len = length or len(buff)
while index < buffer_len:
encoded_tag, index = ReadTag(buff, index)
# This represents the closing tag group for the enclosing protobuf.
if encoded_tag == self.closing_tag_data:
break
type_info_obj = value_obj.type_infos_by_encoded_tag.get(encoded_tag)
# If the tag is not found we need to skip it. Skipped fields are
# inaccessible to this actual object, because they have no type info
# describing them, however they are still stored in the raw data
# representation because they will be re-serialized back. This way
# programs which simply read protobufs and write them back do not need to
# know all the fields, some of which were defined in a later version of
# the application. In order to avoid having to worry about repeated fields
# here, we just insert them into the raw data dict with a key which should
# be unique.
if type_info_obj is None:
start = index
end = self.Skip(encoded_tag, buff, start)
# Record an unknown field as a generic ProtoType. The key is unique and
# ensures we do not collide the dict on repeated fields of the encoded
# tag. Note that this field is not really accessible using Get() and
# does not have a python format representation. It will be written back
# using the same wire format it was read with.
raw_data[index] = (None, buff[start:end],
ProtoUnknown(encoded_tag=encoded_tag))
index = end
continue
value, index = type_info_obj.Read(buff, index)
if type_info_obj.__class__ is ProtoList:
value_obj.Get(type_info_obj.name).Append(wire_format=value)
else:
raw_data[type_info_obj.name] = (None, value, type_info_obj)
return index
def Read(self, buff, index):
"""Parse a nested protobuf."""
# Make new instance and parse the data into it.
result = self.type()
index = self.ReadIntoObject(buff, index, result)
return result, index
def Definition(self):
"""Return a string with the definition of this field."""
return self._FormatDescriptionComment() + self._FormatField()
def _FormatField(self):
result = " optional %s %s = %s" % (self.proto_type_name,
self.name, self.field_number)
return result + ";\n"
def Format(self, value):
for line in value.Format():
yield " %s" % line
class ProtoEmbedded(ProtoNested):
"""A field may be embedded as a serialized protobuf.
Embedding is more efficient than nesting since the emebedded protobuf does not
need to be parsed at all, if the user does not access any elements in it.
Embedded protobufs are simply serialized as bytes using the wire format
WIRETYPE_LENGTH_DELIMITED. Hence the wire format is a simple python string,
but the python format representation is an RDFProtoStruct.
"""
wire_type = WIRETYPE_LENGTH_DELIMITED
def ConvertFromWireFormat(self, value, container=None):
"""The wire format is simply a string."""
result = self.type()
self.ReadIntoObject(value, 0, result)
return result
def ConvertToWireFormat(self, value):
"""Encode the nested protobuf into wire format."""
output = cStringIO.StringIO()
for entry in value.GetRawData().itervalues():
python_format, wire_format, type_descriptor = entry
if wire_format is None or (python_format and
type_descriptor.IsDirty(python_format)):
wire_format = type_descriptor.ConvertToWireFormat(python_format)
type_descriptor.Write(output, wire_format)
return output.getvalue()
def Write(self, stream, value):
"""Serialize this protobuf as an embedded protobuf."""
stream.write(self.tag_data)
VarintWriter(stream.write, len(value))
stream.write(value)
def Read(self, buff, index):
length, index = VarintReader(buff, index)
return buff[index:index+length], index+length
class ProtoDynamicEmbedded(ProtoType):
"""An embedded field which has a dynamic type."""
wire_type = WIRETYPE_LENGTH_DELIMITED
set_default_on_access = True
def __init__(self, dynamic_cb=None, **kwargs):
"""Initialize the type descriptor.
We call the dynamic_method to know which type should be used to decode the
embedded bytestream.
Args:
dynamic_cb: A callback to be used to return the class to parse the
embedded data. We pass the callback our container.
**kwargs: Passthrough.
"""
super(ProtoDynamicEmbedded, self).__init__(**kwargs)
self._type = dynamic_cb
def ConvertFromWireFormat(self, value, container=None):
"""The wire format is simply a string."""
return self._type(container)(value)
def ConvertToWireFormat(self, value):
"""Encode the nested protobuf into wire format."""
return value.SerializeToString()
def Write(self, stream, value):
"""Serialize this protobuf as an embedded protobuf."""
stream.write(self.tag_data)
VarintWriter(stream.write, len(value))
stream.write(value)
def Read(self, buff, index):
length, index = VarintReader(buff, index)
return buff[index:index+length], index+length
def Validate(self, value, container=None):
required_type = self._type(container)
if required_type and not isinstance(value, required_type):
raise ValueError("Expected value of type %s" % required_type)
return value
def GetDefault(self, container=None):
cls = self._type(container or self.owner())
if cls is not None:
return cls()
def Format(self, value):
for line in value.Format():
yield " %s" % line
class RepeatedFieldHelper(object):
"""A helper for the RDFProto to handle repeated fields.
This helper is intended to only be constructed from the RDFProto class.
"""
__metaclass__ = registry.MetaclassRegistry
dirty = False
def __init__(self, wrapped_list=None, type_descriptor=None, container=None):
"""Constructor.
Args:
wrapped_list: The list within the protobuf which we wrap.
type_descriptor: A type descriptor describing the type of the list
elements..
container: The protobuf which contains this repeated field.
Raises:
AttributeError: If parameters are not valid.
"""
if wrapped_list is None:
self.wrapped_list = []
elif wrapped_list.__class__ is RepeatedFieldHelper:
self.wrapped_list = wrapped_list.wrapped_list
else:
self.wrapped_list = wrapped_list
if type_descriptor is None:
raise AttributeError("type_descriptor not specified.")
self.type_descriptor = type_descriptor
self.container = container
def IsDirty(self):
"""Is this repeated item dirty?
This is used to invalidate any caches that our owners have of us.
Returns:
True if this object is dirty.
"""
if self.dirty:
return True
# If any of the items is dirty we are also dirty.
for item in self.wrapped_list:
if self.type_descriptor.IsDirty(item[0]):
self.dirty = True
return True
return False
def Copy(self):
return RepeatedFieldHelper(wrapped_list=self.wrapped_list[:],
type_descriptor=self.type_descriptor)
def Append(self, rdf_value=utils.NotAValue, wire_format=None, **kwargs):
"""Append the value to our internal list."""
if rdf_value is utils.NotAValue:
if wire_format is None:
rdf_value = self.type_descriptor.type(**kwargs)
else:
rdf_value = None
else:
# Coerce the value to the required type.
try:
rdf_value = self.type_descriptor.Validate(rdf_value, **kwargs)
except (TypeError, ValueError) as e:
raise type_info.TypeValueError(
"Assignment value must be %s, but %s can not "
"be coerced. Error: %s" % (self.type_descriptor.proto_type_name,
type(rdf_value), e))
self.wrapped_list.append((rdf_value, wire_format))
return rdf_value
def Pop(self, item):
result = self[item]
self.wrapped_list.pop(item)
return result
def Extend(self, iterable):
for i in iterable:
self.Append(rdf_value=i)
append = utils.Proxy("Append")
remove = utils.Proxy("Remove")
def __getitem__(self, item):
# Ensure we handle slices as well.
if item.__class__ is slice:
result = []
for i in range(*item.indices(len(self))):
result.append(self.wrapped_list[i])
return self.__class__(
wrapped_list=result, type_descriptor=self.type_descriptor)
python_format, wire_format = self.wrapped_list[item]
if python_format is None:
python_format = self.type_descriptor.ConvertFromWireFormat(
wire_format, container=self.container)
self.wrapped_list[item] = (python_format, wire_format)
return python_format
def __len__(self):
return len(self.wrapped_list)
def __ne__(self, other):
return not self == other # pylint: disable=g-comparison-negation
def __eq__(self, other):
if len(self) != len(other):
return False
for x, y in zip(self, other):
if x != y:
return False
return True
def __str__(self):
result = []
result.append("'%s': [" % self.type_descriptor.name)
for element in self:
for line in self.type_descriptor.Format(element):
result.append(" %s" % line)
result.append("]")
return "\n".join(result)
def __unicode__(self):
return utils.SmartUnicode(str(self))
def Validate(self):
for x in self:
x.Validate()
class ProtoList(ProtoType):
"""A repeated type."""
set_default_on_access = True
def __init__(self, delegate, **kwargs):
self.delegate = delegate
if not isinstance(delegate, ProtoType):
raise AttributeError(
"Delegate class must derive from ProtoType, not %s" %
delegate.__class__.__name__)
# If our delegate is late bound we must also be late bound. This means that
# the repeated field is not registered in the owner protobuf just
# yet. However, we do not actually need to register a late binding callback
# ourselves, since the delegate field descriptor already did this. We simply
# wait until the delegate calls our AddDescriptor() method and then we call
# our own owner's AddDescriptor() method to ensure we re-register.
self.late_bound = delegate.late_bound
self.wire_type = delegate.wire_type
super(ProtoList, self).__init__(name=delegate.name,
description=delegate.description,
field_number=delegate.field_number,
friendly_name=delegate.friendly_name)
def IsDirty(self, value):
return value.IsDirty()
def GetDefault(self, container=None):
# By default an empty RepeatedFieldHelper.
return RepeatedFieldHelper(type_descriptor=self.delegate,
container=container)
def Validate(self, value, **_):
"""Check that value is a list of the required type."""
# Assigning from same kind can allow us to skip verification since all
# elements in a RepeatedFieldHelper already are coerced to the delegate
# type. In that case we just make a copy. This only works when the value
# wraps the same type as us.
if (value.__class__ is RepeatedFieldHelper and
value.type_descriptor is self.delegate):
result = value.Copy()
# Make sure the base class finds the value valid.
else:
# The value may be a generator here, so we just iterate over it.
result = RepeatedFieldHelper(type_descriptor=self.delegate)
result.Extend(value)
return result
def Write(self, stream, value):
for python_format, wire_format in value.wrapped_list:
if wire_format is None or (python_format and
value.type_descriptor.IsDirty(python_format)):
wire_format = value.type_descriptor.ConvertToWireFormat(python_format)
value.type_descriptor.Write(stream, wire_format)
def Read(self, buff, index):
return self.delegate.Read(buff, index)
def Format(self, value):
yield "["
for element in value:
for line in self.delegate.Format(element):
yield " %s" % line
yield "]"
def _FormatField(self):
result = " repeated %s %s = %s" % (
self.delegate.proto_type_name, self.name, self.field_number)
return result + ";\n"
def SetOwner(self, owner):
self.owner = owner
# We are the owner for the delegate field descriptor.
self.delegate.SetOwner(self)
def AddDescriptor(self, field_desc):
"""This method will be called by our delegate during late binding."""
# Just relay it up to our owner.
self.late_bound = False
self.delegate = field_desc
self.wire_type = self.delegate.wire_type
self.owner.AddDescriptor(self)
class ProtoRDFValue(ProtoBinary):
"""Serialize arbitrary rdfvalue members.
RDFValue members can be serialized in a number of different ways according to
their preferred data_store_type member. We map the descriptions in
data_store_type into a suitable protobuf serialization for optimal
serialization. We therefore use a delegate type descriptor to best convert
from the RDFValue to the wire type. For example, an RDFDatetime is best
represented as an integer (number of microseconds since the epoch). Hence
RDFDatetime.SerializeToDataStore() will return an integer, and the delegate
will be ProtoUnsignedInteger().
To convert from the RDFValue python type to the delegate's wire type we
therefore need to make two conversions:
1) Our python format is the RDFValue -> intermediate data store format using
RDFValue.SerializeToDataStore(). This will produce a python object which is
the correct python format for the delegate primitive type descriptor.
2) Use the delegate to obtain the wire format of its own python type
(i.e. self.delegate.ConvertToWireFormat())
NOTE: The default value for an RDFValue is None. It is impossible for us to
know how to instantiate a valid default value without being told by the
user. This is unlike the default value for strings or ints which are "" and 0
respectively.
"""
# We delegate encoding/decoding to a primitive field descriptor based on the
# semantic type's data_store_type attribute.
primitive_desc = None
# We store our args here so we can use the same args to initialize the
# delegate descriptor.
_kwargs = None
_PROTO_DATA_STORE_LOOKUP = dict(
bytes=ProtoBinary,
unsigned_integer=ProtoUnsignedInteger,
integer=ProtoUnsignedInteger,
signed_integer=ProtoSignedInteger,
string=ProtoString)
def __init__(self, rdf_type=None, default=None, **kwargs):
super(ProtoRDFValue, self).__init__(default=default, **kwargs)
self._kwargs = kwargs
if isinstance(rdf_type, basestring):
self.original_proto_type_name = self.proto_type_name = rdf_type
# Try to resolve the type it names
self.type = getattr(rdfvalue, rdf_type, None)
# We do not know about this type yet. Implement Late Binding.
if self.type is None:
self.late_bound = True
# Register a late binding callback.
rdfvalue.RegisterLateBindingCallback(rdf_type, self.LateBind)
else:
# The semantic type was found successfully.
self._GetPrimitiveEncoder()
# Or it can be an subclass of RDFValue.
elif issubclass(rdf_type, rdfvalue.RDFValue):
self.type = rdf_type
self.original_proto_type_name = self.proto_type_name = rdf_type.__name__
self._GetPrimitiveEncoder()
else:
type_info.TypeValueError("An rdf_type must be specified.")
def LateBind(self, target=None):
"""Bind the field descriptor to the owner once the target is defined."""
self.type = target
self._GetPrimitiveEncoder()
# Now re-add the descriptor to the owner protobuf.
self.late_bound = False
self.owner.AddDescriptor(self)
def _GetPrimitiveEncoder(self):
"""Finds the primitive encoder according to the type's data_store_type."""
# Decide what should the primitive type be for packing the target rdfvalue
# into the protobuf and create a delegate descriptor to control that.
primitive_cls = self._PROTO_DATA_STORE_LOOKUP[self.type.data_store_type]
self.primitive_desc = primitive_cls(**self._kwargs)
# Our wiretype is the same as the delegate's.
self.wire_type = self.primitive_desc.wire_type
self.proto_type_name = self.primitive_desc.proto_type_name
# Recalculate our tags.
self.CalculateTags()
def GetDefault(self, container=None):
_ = container
# We must return an instance of our type. This allows the field to be
# initialized with a string default.
if self.default is not None and self.default.__class__ is not self.type:
self.default = self.Validate(self.default)
return self.default
def IsDirty(self, python_format):
"""Return the dirty state of the python object."""
return python_format.dirty
def Definition(self):
return ("\n // Semantic Type: %s" %
self.type.__name__) + self.primitive_desc.Definition()
def Read(self, buff, index):
return self.primitive_desc.Read(buff, index)
def Write(self, buff, index):
return self.primitive_desc.Write(buff, index)
def Validate(self, value, **_):
# Try to coerce into the correct type:
if value.__class__ is not self.type:
try:
value = self.type(value)
except rdfvalue.DecodeError as e:
raise type_info.TypeValueError(e)
return value
def ConvertFromWireFormat(self, value, container=None):
# Wire format should be compatible with the data_store_type for the
# rdfvalue. We use the delegate primitive descriptor to perform the
# conversion.
value = self.primitive_desc.ConvertFromWireFormat(
value, container=container)
result = self.type(value)
return result
def ConvertToWireFormat(self, value):
return self.primitive_desc.ConvertToWireFormat(value.SerializeToDataStore())
def Copy(self, field_number=None):
"""Returns descriptor copy, optionally changing field number."""
new_args = self._kwargs.copy()
if field_number is not None:
new_args["field_number"] = field_number
return ProtoRDFValue(rdf_type=self.original_proto_type_name,
default=getattr(self, "default", None),
**new_args)
def _FormatField(self):
result = " optional %s %s = %s" % (self.proto_type_name,
self.name, self.field_number)
return result + ";\n"
def Format(self, value):
yield "%s:" % self.type.__name__
for line in str(value).splitlines():
yield " %s" % line
def __str__(self):
return "<Field %s (Sem Type: %s) of %s: field_number: %s>" % (
self.name, self.proto_type_name, self.owner.__name__,
self.field_number)
class AbstractSerlializer(object):
"""A serializer which parses to/from the intermediate python objects."""
def SerializeToString(self, value):
"""Serialize the RDFStruct object into a string."""
def ParseFromString(self, value_obj, string):
"""Parse the string and set attributes in the value_obj."""
class JsonSerializer(AbstractSerlializer):
"""A serializer based on Json."""
def _SerializedToIntermediateForm(self, data):
"""Convert to an intermediate form suitable for JSON encoding.
Since JSON is unable to encode arbitrary data, we need to convert the data
into something which is valid JSON.
Args:
data: An arbitrary data from the RDFStruct's internal form.
Returns:
This function returns a valid JSON serializable object, which can, in turn
be reversed using the _ParseFromIntermediateForm() method.
Raises:
ValueError: If data can not be suitably encoded.
"""
# These types can be serialized by json.
if isinstance(data, (int, long, unicode)):
return data
# We encode an RDFStruct as a dict.
elif isinstance(data, rdfvalue.RDFStruct):
result = dict(__n=data.__class__.__name__)
for entry in data.GetRawData().itervalues():
python_format, wire_format, type_descriptor = entry
if wire_format is None or (python_format and
type_descriptor.IsDirty(python_format)):
wire_format = type_descriptor.ConvertToWireFormat(python_format)
result[type_descriptor.field_number] = (
self._SerializedToIntermediateForm(wire_format))
return result
# A RepeatedFieldHelper is serialized as a list of objects.
elif isinstance(data, RepeatedFieldHelper):
return [self._SerializedToIntermediateForm(x) for x in data]
# A byte string must be encoded for json since it can not encode arbitrary
# binary data.
elif isinstance(data, str):
return data.encode("base64")
# Should never get here.
raise ValueError("Unable to serialize internal type %s" % data)
def SerializeToString(self, data):
"""Convert the internal data structure to json compatible form."""
return json.dumps(self._SerializedToIntermediateForm(data))
def _ParseFromIntermediateForm(self, data):
"""Convert from Intermediate JSON form to a python object."""
result = {}
for k, v in data.iteritems():
if isinstance(v, (int, long, unicode)):
result[k] = v
elif isinstance(v, dict):
rdfvalue_class = self.classes.get(v["t"])
# Just ignore RDFValues we dont understand.
if rdfvalue_class is not None:
tmp = result[k] = rdfvalue_class()
tmp.SetRawData(self._ParseFromIntermediateForm(v["d"]))
elif isinstance(v, str):
result[k] = v.decode("base64")
return result
def ParseFromString(self, value_obj, string):
value_obj.SetRawData(self._ParseFromIntermediateForm(json.loads(string)))
class RDFStructMetaclass(rdfvalue.RDFValueMetaclass):
"""A metaclass which registers new RDFProtoStruct instances."""
def __init__(cls, name, bases, env_dict): # pylint: disable=no-self-argument
super(RDFStructMetaclass, cls).__init__(name, bases, env_dict)
cls.type_infos = type_info.TypeDescriptorSet()
# Keep track of the late bound fields.
cls.late_bound_type_infos = {}
cls.type_infos_by_field_number = {}
cls.type_infos_by_encoded_tag = {}
# Build the class by parsing an existing protobuf class.
if cls.protobuf is not None:
proto2.DefineFromProtobuf(cls, cls.protobuf)
# Pre-populate the class using the type_infos class member.
if cls.type_description is not None:
for field_desc in cls.type_description:
cls.AddDescriptor(field_desc)
# Allow the class to suppress some fields.
if cls.suppressions:
cls.type_infos = cls.type_infos.Remove(*cls.suppressions)
cls._class_attributes = set(dir(cls))
class RDFStruct(rdfvalue.RDFValue):
"""An RDFValue object which contains fields like a struct.
Struct members contain values such as integers, strings etc. These are stored
in an internal data structure.
A value can be in two states, the wire format is a serialized format closely
resembling the state it appears on the wire. The Decoded format is the
representation closely representing an internal python type. The idea is that
converting from a serialized wire encoding to the wire format is as cheap as
possible. Similarly converting from a python object to the python
representation is also very cheap.
Lazy evaluation occurs when we need to obtain the python representation of a
decoded field. This allows us to skip the evaluation of complex data.
For example, suppose we have a protobuf with several "string" fields
(i.e. unicode objects). The wire format for a "string" field is a UTF8 encoded
binary string, but the python object is a unicode object.
Normally when parsing the protobuf we can extract the wire format
representation very cheaply, but conversion to a unicode object is quite
expensive. If the user never access the specific field, we can keep the
internal representation in wire format and not convert it to a unicode object.
"""
__metaclass__ = RDFStructMetaclass
# This can be populated with a type_info.TypeDescriptorSet() object to
# initialize the class.
type_description = None
# This class can be defined using the protobuf definition language (e.g. a
# .proto file). If defined here, we parse the .proto file for the message with
# the exact same class name and add the field descriptions from it.
definition = None
# This class can be defined in terms of an existing annotated regular
# protobuf. See RDFProtoStruct.DefineFromProtobuf().
protobuf = None
# This is where the type infos are constructed.
type_infos = None
# Mark as dirty each time we modify this object.
dirty = False
_data = None
# This is the serializer which will be used by this class. It can be
# interchanged or overriden as required.
_serializer = JsonSerializer()
# A list of fields which will be removed from this class's type descriptor
# set.
suppressions = []
def __init__(self, initializer=None, age=None, **kwargs):
# Maintain the order so that parsing and serializing a proto does not change
# the serialized form.
self._data = {}
self._age = age
for arg, value in kwargs.iteritems():
if not hasattr(self.__class__, arg):
if arg in self.late_bound_type_infos:
raise AttributeError(
"Field %s refers to an as yet undefined Semantic Type." %
self.late_bound_type_infos[arg])
raise AttributeError(
"Proto %s has no field %s" % (self.__class__.__name__, arg))
# Call setattr to allow the class to define @property psuedo fields which
# can also be initialized.
setattr(self, arg, value)
if initializer is None:
return
elif initializer.__class__ is self.__class__:
self.ParseFromString(initializer.SerializeToString())
elif initializer.__class__ is str:
try:
self.ParseFromString(initializer)
except rdfvalue.DecodeError:
logging.error("Unable to parse: %s.", initializer.encode("hex")[:2048])
raise
else:
raise ValueError("%s can not be initialized from %s" % (
self.__class__.__name__, type(initializer)))
def Clear(self):
"""Clear all the fields."""
self._data = {}
def HasField(self, field_name):
"""Checks if the field exists."""
return field_name in self._data
def _CopyRawData(self):
new_raw_data = {}
# We need to copy all entries in _data. Those entries are tuples of
# - an object (if it has already been deserialized)
# - the serialized object (if it has been serialized)
# - the type_info.
# To copy this, it's easiest to just copy the serialized object if it
# exists. We have to make sure though that the object is not a protobuf.
# If it is, someone else might have changed the subobject and the
# serialization is not accurate anymore. This is indicated by the dirty
# flag. Type_infos can be just copied by reference.
for name, (obj, serialized, t_info) in self._data.iteritems():
if serialized is None:
obj = copy.copy(obj)
else:
try:
if t_info.IsDirty(obj):
obj, serialized = copy.copy(obj), None
else:
obj = None
except AttributeError:
obj = None
new_raw_data[name] = (obj, serialized, t_info)
return new_raw_data
def Copy(self):
"""Make an efficient copy of this protobuf."""
result = self.__class__()
result.SetRawData(self._CopyRawData())
# The copy should have the same age as us.
result.age = self.age
return result
def __deepcopy__(self, memo):
result = self.__class__()
result.SetRawData(copy.deepcopy(self._data, memo))
return result
def GetRawData(self):
"""Retrieves the raw python representation of the object.
This is normally only used by serializers which are tightly coupled with the
raw data representation. External users should not make use of the internal
raw data structures.
Returns:
the raw python object representation (a dict).
"""
return self._data
def ListFields(self):
"""Iterates over the fields which are actually set.
Yields:
a tuple of (type_descriptor, value) for each field which is set.
"""
for type_descriptor in self.type_infos:
if type_descriptor.name in self._data:
yield type_descriptor, self.Get(type_descriptor.name)
def SetRawData(self, data):
self._data = data
self.dirty = True
def SerializeToString(self):
return self._serializer.SerializeToString(self)
def ParseFromString(self, string):
self._serializer.ParseFromString(self, string)
self.dirty = True
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
if len(self._data) != len(other.GetRawData()):
return False
for field in self._data:
if self.Get(field) != other.Get(field):
return False
return True
def __ne__(self, other):
return not self == other # pylint: disable=g-comparison-negation
def Format(self):
"""Format a message in a human readable way."""
yield "message %s {" % self.__class__.__name__
for k, (python_format, wire_format,
type_descriptor) in sorted(self.GetRawData().items()):
if python_format is None:
python_format = type_descriptor.ConvertFromWireFormat(
wire_format, container=self)
# Skip printing of unknown fields.
if isinstance(k, basestring):
prefix = utils.SmartStr(k) + " :"
for line in type_descriptor.Format(python_format):
yield " %s %s" % (prefix, line)
prefix = ""
yield "}"
def __str__(self):
return "\n".join(self.Format())
def __unicode__(self):
return utils.SmartUnicode(str(self))
def __dir__(self):
"""Add the virtualized fields to the console's tab completion."""
return (dir(super(RDFStruct, self)) +
[x.name for x in self.type_infos])
def _Set(self, value, type_descriptor):
"""Validate the value and set the attribute with it."""
attr = type_descriptor.name
# A value of None means we clear the field.
if value is None:
self._data.pop(attr, None)
return
# Validate the value and obtain the python format representation.
value = type_descriptor.Validate(value, container=self)
# Store the lazy value object.
self._data[attr] = (value, None, type_descriptor)
# Make sure to invalidate our parent's cache if needed.
self.dirty = True
return value
def Set(self, attr, value):
"""Sets the attribute in to the value."""
type_info_obj = self.type_infos.get(attr)
if type_info_obj is None:
raise AttributeError("Field %s is not known." % attr)
return self._Set(value, type_info_obj)
def SetWireFormat(self, attr, value):
"""Sets the attribute providing the serialized representation."""
type_info_obj = self.type_infos.get(attr)
if type_info_obj is None:
raise AttributeError("Field %s is not known." % attr)
self._data[attr] = (None, value, type_info_obj)
# Make sure to invalidate our parent's cache if needed.
self.dirty = True
def Get(self, attr):
"""Retrieve the attribute specified."""
entry = self._data.get(attr)
# We dont have this field, try the defaults.
if entry is None:
type_descriptor = self.type_infos.get(attr)
if type_descriptor is None:
raise AttributeError("'%s' object has no attribute '%s'" % (
self.__class__.__name__, attr))
# Assign the default value now.
default = type_descriptor.GetDefault(container=self)
if default is None:
return
if type_descriptor.set_default_on_access:
default = self.Set(attr, default)
return default
python_format, wire_format, type_descriptor = entry
# Decode on demand and cache for next time.
if python_format is None:
python_format = type_descriptor.ConvertFromWireFormat(
wire_format, container=self)
self._data[attr] = (python_format, wire_format, type_descriptor)
return python_format
def GetWireFormat(self, attr):
"""Retrieve the attribute specified in serialized form."""
entry = self._data.get(attr)
# We dont have this field, try the defaults.
if entry is None:
return ""
python_format, wire_format, type_descriptor = entry
if wire_format is None:
wire_format = python_format.SerializeToDataStore()
self._data[attr] = (python_format, wire_format, type_descriptor)
return wire_format
@classmethod
def AddDescriptor(cls, field_desc):
if not isinstance(field_desc, ProtoType):
raise type_info.TypeValueError(
"%s field '%s' should be of type ProtoType" % (
cls.__name__, field_desc.name))
cls.type_infos_by_field_number[field_desc.field_number] = field_desc
cls.type_infos.Append(field_desc)
def __getstate__(self):
"""Support the pickle protocol."""
return dict(data=self.SerializeToString())
def __setstate__(self, data):
"""Support the pickle protocol."""
self._data = {}
self.ParseFromString(data["data"])
class ProtobufType(ProtoNested):
"""A type descriptor for the top level protobuf."""
def __init__(self):
self.tag_data = ""
self.closing_tag_data = ""
class ProtocolBufferSerializer(AbstractSerlializer):
"""A serializer based on protocol buffers."""
def __init__(self):
self.protobuf = ProtobufType()
def SerializeToString(self, data):
"""Serialize the RDFProtoStruct object into a string."""
stream = cStringIO.StringIO()
self.protobuf.Write(stream, data)
return stream.getvalue()
def ParseFromString(self, value_obj, string):
self.protobuf.ReadIntoObject(string, 0, value_obj)
class EnumContainer(object):
"""A data class to hold enum objects."""
def __init__(self, name=None, descriptions=None, **kwargs):
descriptions = descriptions or {}
self.enum_dict = {}
self.reverse_enum = {}
self.name = name
for k, v in kwargs.items():
v = Enum(v, name=k, description=descriptions.get(k, None))
self.enum_dict[k] = v
self.reverse_enum[v] = k
setattr(self, k, v)
class RDFProtoStruct(RDFStruct):
"""An RDFStruct which uses protobufs for serialization.
This implementation is faster than the standard protobuf library.
"""
_serializer = ProtocolBufferSerializer()
# TODO(user): if a semantic proto defines a field with the same name as
# these class variables under some circumstances the proto default value will
# be set incorrectly. Figure out a way to make this safe.
shortest_encoded_tag = 0
longest_encoded_tag = 0
# If set to a standard proto2 generated class, we introspect it and extract
# type descriptors from it. This allows this implementation to use an
# annotated .proto file to define semantic types.
protobuf = None
# This mapping is used to provide concrete implementations for semantic types
# annotated in the .proto file. This is a dict with keys being the semantic
# names, and values being the concrete implementations for these types.
# By default include standard semantic objects. Additional objects can be
# added if needed.
_dependencies = dict(RDFURN=rdfvalue.RDFURN,
RDFDatetime=rdfvalue.RDFDatetime)
def AsPrimitiveProto(self):
"""Return an old style protocol buffer object."""
if self.protobuf:
result = self.protobuf()
result.ParseFromString(self.SerializeToString())
return result
def AsDict(self):
result = {}
for desc in self.type_infos:
if self.HasField(desc.name):
result[desc.name] = getattr(self, desc.name)
return result
def ToPrimitiveDict(self):
return self._ToPrimitive(self.AsDict())
def _ToPrimitive(self, value):
if isinstance(value, RepeatedFieldHelper):
return list(self._ToPrimitive(v) for v in value)
elif isinstance(value, rdfvalue.Dict):
new_val = value.ToDict()
return dict((k, self._ToPrimitive(v)) for k, v in new_val.items())
elif isinstance(value, dict):
return dict((k, self._ToPrimitive(v)) for k, v in value.items())
elif isinstance(value, RDFProtoStruct):
return self._ToPrimitive(value.AsDict())
elif isinstance(value, Enum):
return str(value)
else:
return value
def __nonzero__(self):
return bool(self._data)
@classmethod
def EmitProto(cls):
"""Emits .proto file definitions."""
result = "message %s {\n" % cls.__name__
for _, desc in sorted(cls.type_infos_by_field_number.items()):
result += desc.Definition()
result += "}\n"
return result
@classmethod
def _MakeDescriptor(cls, package_name, desc_proto, file_desc_proto,
descriptors=None):
"""Creates a protobuf descriptor out of DescriptorProto."""
descriptors = descriptors or dict()
full_message_name = [package_name, desc_proto.name]
file_descriptor = descriptor.FileDescriptor(
file_desc_proto.name, file_desc_proto.package,
serialized_pb=file_desc_proto.SerializeToString())
# Create Descriptors for enum types
enum_types = {}
for enum_proto in desc_proto.enum_type:
full_name = ".".join(full_message_name + [enum_proto.name])
values = []
for index, enum_val in enumerate(enum_proto.value):
values.append(descriptor.EnumValueDescriptor(
enum_val.name, index, enum_val.number))
enum_desc = descriptor.EnumDescriptor(enum_proto.name, full_name,
None, values)
enum_types[full_name] = enum_desc
fields = []
for field_proto in desc_proto.field:
full_name = ".".join(full_message_name + [field_proto.name])
enum_desc = None
message_desc = None
if field_proto.HasField("type_name"):
type_name = field_proto.type_name
full_type_name = ".".join(full_message_name +
[type_name[type_name.rfind(".") + 1:]])
if full_type_name in enum_types:
enum_desc = enum_types[full_type_name]
elif type_name in descriptors:
message_desc = descriptors[type_name]
# Else type_name references a non-local type, which isn't implemented
field = descriptor.FieldDescriptor(
field_proto.name, full_name, field_proto.number - 1,
field_proto.number, field_proto.type,
descriptor.FieldDescriptor.ProtoTypeToCppProtoType(field_proto.type),
field_proto.label, None, message_desc, enum_desc, None, False, None,
options=field_proto.options, has_default_value=False)
fields.append(field)
desc_name = ".".join(full_message_name)
return descriptor.Descriptor(desc_proto.name, desc_name, None, None, fields,
[], enum_types.values(), [],
file=file_descriptor)
PRIMITIVE_TYPE_MAPPING = {
"string": descriptor_pb2.FieldDescriptorProto.TYPE_STRING,
"bytes": descriptor_pb2.FieldDescriptorProto.TYPE_BYTES,
"uint64": descriptor_pb2.FieldDescriptorProto.TYPE_UINT64,
"int64": descriptor_pb2.FieldDescriptorProto.TYPE_INT32,
"float": descriptor_pb2.FieldDescriptorProto.TYPE_FLOAT,
"double": descriptor_pb2.FieldDescriptorProto.TYPE_DOUBLE,
"bool": descriptor_pb2.FieldDescriptorProto.TYPE_BOOL
}
@classmethod
def EmitProtoDescriptor(cls, package_name):
file_descriptor = descriptor_pb2.FileDescriptorProto()
file_descriptor.name = cls.__name__.lower() + ".proto"
file_descriptor.package = package_name
descriptors = dict()
message_type = file_descriptor.message_type.add()
message_type.name = cls.__name__
for number, desc in sorted(cls.type_infos_by_field_number.items()):
# Name 'metadata' is reserved to store ExportedMetadata value.
field = None
if (isinstance(desc, type_info.ProtoEnum) and
not isinstance(desc, type_info.ProtoBoolean)):
field = message_type.field.add()
field.type = descriptor_pb2.FieldDescriptorProto.TYPE_ENUM
field.type_name = desc.enum_name
if desc.enum_name not in [x.name for x in message_type.enum_type]:
enum_type = message_type.enum_type.add()
enum_type.name = desc.enum_name
for key, value in desc.enum.iteritems():
enum_type_value = enum_type.value.add()
enum_type_value.name = key
enum_type_value.number = value
elif isinstance(desc, type_info.ProtoEmbedded):
field = message_type.field.add()
field.type = descriptor_pb2.FieldDescriptorProto.TYPE_MESSAGE
if hasattr(desc.type, "protobuf"):
field.type_name = "." + desc.type.protobuf.DESCRIPTOR.full_name
descriptors[field.type_name] = desc.type.protobuf.DESCRIPTOR
# Register import of a proto file containing embedded protobuf
# definition.
if (desc.type.protobuf.DESCRIPTOR.file.name not in
file_descriptor.dependency):
file_descriptor.dependency.append(
desc.type.protobuf.DESCRIPTOR.file.name)
else:
raise NotImplementedError("Can't emit proto descriptor for values "
"with nested non-protobuf-based values.")
elif isinstance(desc, type_info.ProtoList):
field = message_type.field.add()
field.type = descriptor_pb2.FieldDescriptorProto.TYPE_MESSAGE
if hasattr(desc.type, "protobuf"):
field.type_name = "." + desc.type.protobuf.DESCRIPTOR.full_name
else:
raise NotImplementedError("Can't emit proto descriptor for values "
"with repeated non-protobuf-based values.")
field.label = descriptor_pb2.FieldDescriptorProto.LABEL_REPEATED
else:
field = message_type.field.add()
field.type = cls.PRIMITIVE_TYPE_MAPPING[desc.proto_type_name]
if field:
field.name = desc.name
field.number = number
if not field.HasField("label"):
field.label = descriptor_pb2.FieldDescriptorProto.LABEL_OPTIONAL
return cls._MakeDescriptor(package_name, message_type, file_descriptor,
descriptors=descriptors)
def Validate(self):
"""Validates the semantic protobuf for internal consistency.
Derived classes can override this method to ensure the proto is sane
(e.g. required fields, or any arbitrary condition). This method is called
prior to serialization. Note that it is not necessary to validate fields
against their semantic types - it is impossible to set fields which are
invalid. This function is more intended to validate the entire protobuf for
internal consistency.
Raises:
type_info.TypeValueError if the proto is invalid.
"""
@classmethod
def FromTextFormat(cls, text):
"""Parse this object from a text representation."""
tmp = cls.protobuf() # pylint: disable=not-callable
text_format.Merge(text, tmp)
return cls(tmp.SerializeToString())
@classmethod
def AddDescriptor(cls, field_desc):
"""Register this descriptor with the Proto Struct."""
if not isinstance(field_desc, ProtoType):
raise type_info.TypeValueError(
"%s field '%s' should be of type ProtoType" % (
cls.__name__, field_desc.name))
# Ensure the field descriptor knows the class that owns it.
field_desc.SetOwner(cls)
# If the field is late bound we do not really add it to the descriptor set
# yet. We must wait for the LateBindingPlaceHolder() to add it later.
if field_desc.late_bound:
# Keep track of unbound fields.
cls.late_bound_type_infos[field_desc.name] = field_desc
return
# Ensure this field number is unique:
if field_desc.field_number in cls.type_infos_by_field_number:
raise type_info.TypeValueError(
"Field number %s for field %s is not unique in %s" % (
field_desc.field_number, field_desc.name, cls.__name__))
# We store an index of the type info by tag values to speed up parsing.
cls.type_infos_by_field_number[field_desc.field_number] = field_desc
cls.type_infos_by_encoded_tag[field_desc.tag_data] = field_desc
cls.type_infos.Append(field_desc)
cls.late_bound_type_infos.pop(field_desc.name, None)
# Add direct accessors only if the class does not already have them.
if not hasattr(cls, field_desc.name):
# This lambda is a class method so pylint: disable=protected-access
# This is much faster than __setattr__/__getattr__
setattr(cls, field_desc.name, property(
lambda self: self.Get(field_desc.name),
lambda self, x: self._Set(x, field_desc),
None, field_desc.description))
class SemanticDescriptor(RDFProtoStruct):
"""A semantic protobuf describing the .proto extension."""
protobuf = semantic_pb2.SemanticDescriptor
|
{
"content_hash": "7e86a805a71f2f846a5d458484d932a7",
"timestamp": "",
"source": "github",
"line_count": 2187,
"max_line_length": 80,
"avg_line_length": 32.19844535893919,
"alnum_prop": 0.6687778692947826,
"repo_name": "defaultnamehere/grr",
"id": "701393754588c7629427cb041f4c42b6fe4597f2",
"size": "70440",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/rdfvalues/structs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "C++",
"bytes": "55149"
},
{
"name": "CSS",
"bytes": "36345"
},
{
"name": "JavaScript",
"bytes": "831633"
},
{
"name": "Makefile",
"bytes": "5939"
},
{
"name": "Python",
"bytes": "4541648"
},
{
"name": "Shell",
"bytes": "31077"
}
],
"symlink_target": ""
}
|
import os
from json import dumps
from django.conf import settings
from django.test import TestCase
from django.core.files.storage import default_storage
from django.utils.datastructures import MultiValueDict
from django.core.files.uploadedfile import InMemoryUploadedFile
from django_images.models import Image
from django_images.forms import ImageForm
from django_images.forms import MultipleFormatImageForm
from .models import TestImage
class TestDjangoImages(TestCase):
def test_validation(self):
"""Validate an image against a complex format"""
filepath = os.path.join(settings.BASE_DIR, 'middle.jpeg')
with open(filepath) as f:
# prepare form data
image = InMemoryUploadedFile(
f,
'image',
'middle.jpeg',
'image/jpeg',
42, # not significant for the test
'utf-8'
)
files = MultiValueDict()
files['image'] = image
post = MultiValueDict()
post['name'] = 'test image'
# create form
form = ImageForm(TestImage, post, files)
# validate resize operation
v = form.is_valid()
self.assertTrue(v)
def test_multi_format_validation(self):
"""Validate an image against a complex format"""
filepath = os.path.join(settings.BASE_DIR, 'big.jpeg')
with open(filepath) as f:
# prepare form data
image = InMemoryUploadedFile(
f,
'image',
'big.jpeg',
'image/jpeg',
42, # not significant for the test
'utf-8'
)
files = MultiValueDict()
files['image'] = image
post = MultiValueDict()
post['name'] = 'test image'
post['fmt'] = 'TestImage'
# create form
form = MultipleFormatImageForm(Image.formats(), post, files)
# validate resize operation
v = form.is_valid()
self.assertTrue(v)
def test_resize_big_image_in_background_format(self):
"""Test resizing of big enough image to background format"""
filepath = os.path.join(settings.BASE_DIR, 'big.jpeg')
with open(filepath) as f:
# prepare form data
image = InMemoryUploadedFile(
f,
'image',
'big.jpeg',
'image/jpeg',
42, # not significant for the test
'utf-8'
)
files = MultiValueDict()
files['image'] = image
post = MultiValueDict()
post['name'] = 'test image'
# create form
form = ImageForm(TestImage, post, files)
# validate resize operation
self.assertTrue(form.is_valid())
# execute resize operation
image = form.save()
for size in ('og', 'lg', 'md', 'sm', 'xs'):
filepath = getattr(image, size)['filepath']
filepath = os.path.join(settings.MEDIA_ROOT, filepath)
self.assertTrue(os.path.exists(filepath))
def test_model_api(self):
"""Test that Image model behave correctly"""
image = TestImage(
uid='42',
json_xs=dumps(dict(
width=100,
height=100,
filepath='42_xs.ext',
)),
json_sm=dumps(dict(
width=100,
height=100,
filepath='42_sm.ext',
)),
json_md=dumps(dict(
width=100,
height=100,
filepath='42_md.ext',
)),
json_lg=dumps(dict(
width=100,
height=100,
filepath='42_lg.ext',
)),
json_og=dumps(dict(
width=100,
height=100,
filepath='42.ext',
))
)
self.assertEqual(
image.xs['url'],
'http://example.com/media/42_xs.ext'
)
self.assertEqual(
image.sm['url'],
'http://example.com/media/42_sm.ext'
)
self.assertEqual(
image.md['url'],
'http://example.com/media/42_md.ext'
)
self.assertEqual(
image.lg['url'],
'http://example.com/media/42_lg.ext'
)
self.assertEqual(
image.og['url'],
'http://example.com/media/42.ext'
)
def test_fail_to_resize_small_image_in_background_format(self):
"""Test resizing of image fails validation"""
filepath = os.path.join(settings.BASE_DIR, 'small.jpeg')
with open(filepath) as f:
# prepare form data
image = InMemoryUploadedFile(
f,
'image',
'small.jpeg',
'image/jpeg',
42, # not significant for the test
'utf-8'
)
files = MultiValueDict()
files['image'] = image
post = MultiValueDict()
post['ptype'] = 1
post['name'] = 'test with small.jpeg'
# create form
form = ImageForm(TestImage, post, files)
# validate resize operation
self.assertFalse(form.is_valid())
def test_generate_unique_filename(self):
"""Test that two images with same size and same name
can be stored on disk"""
def create_image():
filepath = os.path.join(settings.BASE_DIR, 'big.jpeg')
with open(filepath) as f:
# prepare form data
image = InMemoryUploadedFile(
f,
'image',
'big.jpeg',
'image/jpeg',
42, # not significant for the test
'utf-8'
)
files = MultiValueDict()
files['image'] = image
post = MultiValueDict()
post['name'] = 'test image'
# create form
form = ImageForm(TestImage, post, files)
# validate resize operation
form.is_valid()
# execute resize operation
image = form.save()
return image
# create two times the same image:
one = create_image()
two = create_image()
self.assertTrue(one.og['url'] != two.og['url'])
def test_delete_image(self):
"""Test that two images with same size and same name
can be stored on disk"""
def create_image():
filepath = os.path.join(settings.BASE_DIR, 'big.jpeg')
with open(filepath) as f:
# prepare form data
image = InMemoryUploadedFile(
f,
'image',
'big.jpeg',
'image/jpeg',
42, # not significant for the test
'utf-8'
)
files = MultiValueDict()
files['image'] = image
post = MultiValueDict()
post['ptype'] = 1
post['name'] = 'test with big.jpeg'
# create form
form = ImageForm(TestImage, post, files)
# validate resize operation
form.is_valid()
# execute resize operation
image = form.save()
return image
# create two times the same image:
one = create_image()
self.assertTrue(default_storage.exists(one.og['filepath']))
one.delete()
self.assertFalse(default_storage.exists(one.og['filepath']))
|
{
"content_hash": "ae40e8d42250baab787c492bb69e4266",
"timestamp": "",
"source": "github",
"line_count": 242,
"max_line_length": 72,
"avg_line_length": 32.81404958677686,
"alnum_prop": 0.4850774461654703,
"repo_name": "amirouche/django-images",
"id": "cc954d5579e45e84fd0e4bfcebd06e9fb3d91389",
"size": "7941",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "640"
},
{
"name": "HTML",
"bytes": "2125"
},
{
"name": "Python",
"bytes": "33962"
},
{
"name": "Shell",
"bytes": "33"
}
],
"symlink_target": ""
}
|
from django.db import models
# Create your models here.
class Status(models.Model):
lastchange= models.CharField(max_length=90, db_column='LastChange',) # Field name made lowercase
smbhosts = models.IntegerField(db_column='SMBHosts') # Field name made lowercase.
ftphosts = models.IntegerField(db_column='FTPHosts') # Field name made lowercase.
directories = models.IntegerField(db_column='Directories') # Field name made lowercase.
files = models.IntegerField(db_column='Files') # Field name made lowercase.
filesize = models.BigIntegerField(db_column='FileSize') # Field name made lowercase.
queries = models.IntegerField(db_column='Queries') # Field name made lowercase.
updatinghost = models.IntegerField(db_column='UpdatingHost') # Field name made lowercase.
id = models.IntegerField(primary_key=True, db_column='ID') # Field name made lowercase.
class Meta:
db_table = u'status'
class Status2(models.Model):
id = models.IntegerField(primary_key=True, db_column='ID') # Field name made lowercase.
time = models.DateTimeField(db_column='Time') # Field name made lowercase.
queries = models.IntegerField(db_column='Queries') # Field name made lowercase.
onlinehosts = models.IntegerField(db_column='OnlineHosts') # Field name made lowercase.
class Meta:
db_table = u'status2'
class Log(models.Model):
id = models.IntegerField(primary_key=True,db_column="LID")
# Time - timestamp of logged activity
time = models.IntegerField(db_column="Time",max_length=10) # !! Turn this into datefield after production
# SearchString - site activities, categorized by start word
# 'Browse:' - user navigating deep browse.
# 'Search:' - user making regular search query
# 'Search [MOVIE]:' - user searching through the movies search
# 'Search [MUSIC]:' - user searching through the music search
# 'Search [SHOW]:' - user searching through the show search
# 'Request:' - user making request
# 'Complete:' - user completing request
# 'EditReq:' - User editing request
# 'Poll:' - User creating poll
searchstring = models.CharField(max_length=255,db_column="SearchString")
# Client - IP of user
client = models.CharField(max_length=64,db_column="Client")
# Duration - fuck if I know. Honestly, some of the relic shit that's in this
# code, it's disgraceful.
duration = models.FloatField(max_length=10, db_column="Duration", default=0)
# Found - same here.
found = models.IntegerField(max_length=10,db_column="Found", default=0)
# Hits - how many results from a search query
hits = models.IntegerField(max_length=10,db_column="Hits", default=0)
# Position - what page of results, multiplied by PERPAGE
position = models.IntegerField(max_length=10,db_column='Position', default=0)
# Mode - which index searched
# 1. Files
# 2. Dirs
# 3. FilesDirs
mode = models.IntegerField(max_length=3,db_column="Mode",default=0)
# HostType - type of share searched
# 1: SMB & FTP
# 2: FTP
# 3: SMB <- the only one we use.
hosttype = models.IntegerField(max_length=3,db_column="HostType",default=0)
# Flags - means different things for different activities, I think
# Search: describes type of file searched for
# 1: all files
# 2: audio files
# 3: video files
# 4: software
# 5: text
# 6: images
flags = models.IntegerField(max_length=3, db_column="Flags",default=0)
date = models.IntegerField(max_length=10, db_column="Date",default=0)
minsize = models.IntegerField(max_length=10,db_column="MinSize",default=0)
maxsize = models.IntegerField(max_length=10,db_column="MaxSize",default=0)
class Meta:
db_table = u'log'
|
{
"content_hash": "74f9229c863bb0180d39da6eda05b9f1",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 109,
"avg_line_length": 44.54545454545455,
"alnum_prop": 0.6678571428571428,
"repo_name": "blampe/M2M",
"id": "db0e862b6bf54f1b1ab1ebb75e56671c5a1a3325",
"size": "3920",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "m2m/stats/models.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "754736"
},
{
"name": "Java",
"bytes": "6333"
},
{
"name": "JavaScript",
"bytes": "21268"
},
{
"name": "PHP",
"bytes": "18"
},
{
"name": "Python",
"bytes": "6374305"
},
{
"name": "Shell",
"bytes": "4721"
}
],
"symlink_target": ""
}
|
import numpy as np
class Node(object):
"""
Base class for nodes in the network.
Arguments:
`inbound_nodes`: A list of nodes with edges into this node.
"""
def __init__(self, inbound_nodes=[]):
"""
Node's constructor (runs when the object is instantiated). Sets
properties that all nodes need.
"""
# A list of nodes with edges into this node.
self.inbound_nodes = inbound_nodes
# The eventual value of this node. Set by running
# the forward() method.
self.value = None
# A list of nodes that this node outputs to.
self.outbound_nodes = []
# New property! Keys are the inputs to this node and
# their values are the partials of this node with
# respect to that input.
self.gradients = {}
# Sets this node as an outbound node for all of
# this node's inputs.
for node in inbound_nodes:
node.outbound_nodes.append(self)
def forward(self):
"""
Every node that uses this class as a base class will
need to define its own `forward` method.
"""
raise NotImplementedError
def backward(self):
"""
Every node that uses this class as a base class will
need to define its own `backward` method.
"""
raise NotImplementedError
class Input(Node):
"""
A generic input into the network.
"""
def __init__(self):
# The base class constructor has to run to set all
# the properties here.
#
# The most important property on an Input is value.
# self.value is set during `topological_sort` later.
Node.__init__(self)
def forward(self):
# Do nothing because nothing is calculated.
pass
def backward(self):
# An Input node has no inputs so the gradient (derivative)
# is zero.
# The key, `self`, is reference to this object.
self.gradients = {self: 0}
# Weights and bias may be inputs, so you need to sum
# the gradient from output gradients.
for n in self.outbound_nodes:
self.gradients[self] += n.gradients[self]
class Linear(Node):
"""
Represents a node that performs a linear transform.
"""
def __init__(self, X, W, b):
# The base class (Node) constructor. Weights and bias
# are treated like inbound nodes.
Node.__init__(self, [X, W, b])
def forward(self):
"""
Performs the math behind a linear transform.
"""
X = self.inbound_nodes[0].value
W = self.inbound_nodes[1].value
b = self.inbound_nodes[2].value
self.value = np.dot(X, W) + b
def backward(self):
"""
Calculates the gradient based on the output values.
"""
# Initialize a partial for each of the inbound_nodes.
self.gradients = {n: np.zeros_like(n.value) for n in self.inbound_nodes}
# Cycle through the outputs. The gradient will change depending
# on each output, so the gradients are summed over all outputs.
for n in self.outbound_nodes:
# Get the partial of the cost with respect to this node.
grad_cost = n.gradients[self]
# Set the partial of the loss with respect to this node's inputs.
self.gradients[self.inbound_nodes[0]] += np.dot(grad_cost, self.inbound_nodes[1].value.T)
# Set the partial of the loss with respect to this node's weights.
self.gradients[self.inbound_nodes[1]] += np.dot(self.inbound_nodes[0].value.T, grad_cost)
# Set the partial of the loss with respect to this node's bias.
self.gradients[self.inbound_nodes[2]] += np.sum(grad_cost, axis=0, keepdims=False)
class Sigmoid(Node):
"""
Represents a node that performs the sigmoid activation function.
"""
def __init__(self, node):
# The base class constructor.
Node.__init__(self, [node])
def _sigmoid(self, x):
"""
This method is separate from `forward` because it
will be used with `backward` as well.
`x`: A numpy array-like object.
"""
return 1. / (1. + np.exp(-x))
def forward(self):
"""
Perform the sigmoid function and set the value.
"""
input_value = self.inbound_nodes[0].value
self.value = self._sigmoid(input_value)
def backward(self):
"""
Calculates the gradient using the derivative of
the sigmoid function.
"""
# Initialize the gradients to 0.
self.gradients = {n: np.zeros_like(n.value) for n in self.inbound_nodes}
# Sum the partial with respect to the input over all the outputs.
for n in self.outbound_nodes:
grad_cost = n.gradients[self]
sigmoid = self.value
self.gradients[self.inbound_nodes[0]] += sigmoid * (1 - sigmoid) * grad_cost
class MSE(Node):
def __init__(self, y, a):
"""
The mean squared error cost function.
Should be used as the last node for a network.
"""
# Call the base class' constructor.
Node.__init__(self, [y, a])
def forward(self):
"""
Calculates the mean squared error.
"""
# NOTE: We reshape these to avoid possible matrix/vector broadcast
# errors.
#
# For example, if we subtract an array of shape (3,) from an array of shape
# (3,1) we get an array of shape(3,3) as the result when we want
# an array of shape (3,1) instead.
#
# Making both arrays (3,1) insures the result is (3,1) and does
# an elementwise subtraction as expected.
y = self.inbound_nodes[0].value.reshape(-1, 1)
a = self.inbound_nodes[1].value.reshape(-1, 1)
self.m = self.inbound_nodes[0].value.shape[0]
# Save the computed output for backward.
self.diff = y - a
self.value = np.mean(self.diff ** 2)
def backward(self):
"""
Calculates the gradient of the cost.
"""
self.gradients[self.inbound_nodes[0]] = (2 / self.m) * self.diff
self.gradients[self.inbound_nodes[1]] = (-2 / self.m) * self.diff
class MathStuff:
@staticmethod
def topological_sort(feed_dict):
"""
Sort the nodes in topological order using Kahn's Algorithm.
`feed_dict`: A dictionary where the key is a `Input` Node and the value is the respective value feed to that Node.
Returns a list of sorted nodes.
"""
input_nodes = [n for n in feed_dict.keys()]
G = {}
nodes = [n for n in input_nodes]
while len(nodes) > 0:
n = nodes.pop(0)
if n not in G:
G[n] = {'in': set(), 'out': set()}
for m in n.outbound_nodes:
if m not in G:
G[m] = {'in': set(), 'out': set()}
G[n]['out'].add(m)
G[m]['in'].add(n)
nodes.append(m)
L = []
S = set(input_nodes)
while len(S) > 0:
n = S.pop()
if isinstance(n, Input):
n.value = feed_dict[n]
L.append(n)
for m in n.outbound_nodes:
G[n]['out'].remove(m)
G[m]['in'].remove(n)
# if no other incoming edges add to S
if len(G[m]['in']) == 0:
S.add(m)
return L
@staticmethod
def forward_and_backward(graph):
"""
Performs a forward pass and a backward pass through a list of sorted Nodes.
Arguments:
`graph`: The result of calling `topological_sort`.
"""
# Forward pass
for n in graph:
n.forward()
# Backward pass
# see: https://docs.python.org/2.3/whatsnew/section-slices.html
for n in graph[::-1]:
n.backward()
@staticmethod
def sgd_update(trainables, learning_rate=1e-2):
r"""
Updates the value of each trainable with SGD.
The equation is given by:
.. math:: x=x-\alpha *\frac{\partial cost}{\partial x}
Where as :math:`\alpha` is the learning rate
The line
.. code-block:: python
t.value -= learning_rate * partial
Is given by
.. math:: {x}'=x-\eta \triangledown C
Arguments:
`trainables`: A list of `Input` Nodes representing weights/biases.
`learning_rate`: The learning rate.
"""
# Change the trainable's value by subtracting the learning rate
# multiplied by the partial of the cost with respect to this
# trainable.
for t in trainables:
partial = t.gradients[t]
t.value -= learning_rate * partial
|
{
"content_hash": "6dc7bd9109b998d6a16a8e30eb2f76d5",
"timestamp": "",
"source": "github",
"line_count": 284,
"max_line_length": 122,
"avg_line_length": 31.5,
"alnum_prop": 0.5602503912363067,
"repo_name": "akshaybabloo/Car-ND",
"id": "20af5fc64a04d8565fd2b10af2712846351b9f7e",
"size": "8946",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Term_1/MinFlow_2/MiniFlow_challenge/MiniFLow.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "551"
},
{
"name": "Jupyter Notebook",
"bytes": "16855408"
},
{
"name": "Python",
"bytes": "367767"
}
],
"symlink_target": ""
}
|
import unittest, random, sys, time, re
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_browse as h2b, h2o_import as h2i, h2o_glm
def write_syn_dataset(csvPathname, rowCount, colCount, SEED):
r1 = random.Random(SEED)
# keep a single thread from the original SEED, for repeatability.
SEED2 = r1.randint(0, sys.maxint)
r2 = random.Random(SEED2)
dsf = open(csvPathname, "w+")
# complete separation
for i in range(rowCount):
# not using colCount. Just one col
rowData = []
ri1 = i
rowTotal = ri1
rowData.append(ri1)
if i > (rowCount/2):
result = 1
else:
result = 0
rowData.append(str(result))
### print colCount, rowTotal, result
rowDataCsv = ",".join(map(str,rowData))
dsf.write(rowDataCsv + "\n")
dsf.close()
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init(1,use_flatfile=True)
@classmethod
def tearDownClass(cls):
### time.sleep(3600)
h2o.tear_down_cloud()
def test_GLM2_convergence_2(self):
SYNDATASETS_DIR = h2o.make_syn_dir()
tryList = [
(100, 1, 'cD', 300),
# (100, 100, 'cE', 300),
# (100, 200, 'cF', 300),
# (100, 300, 'cG', 300),
# (100, 400, 'cH', 300),
# (100, 500, 'cI', 300),
]
### h2b.browseTheCloud()
lenNodes = len(h2o.nodes)
USEKNOWNFAILURE = False
for (rowCount, colCount, hex_key, timeoutSecs) in tryList:
SEEDPERFILE = random.randint(0, sys.maxint)
csvFilename = 'syn_%s_%sx%s.csv' % (SEEDPERFILE,rowCount,colCount)
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
print "\nCreating random", csvPathname
write_syn_dataset(csvPathname, rowCount, colCount, SEEDPERFILE)
if USEKNOWNFAILURE:
csvFilename = 'failtoconverge_100x50.csv'
csvPathname = 'logreg/' + csvFilename
parseResult = h2i.import_parse(path=csvPathname, hex_key=hex_key, timeoutSecs=10, schema='put')
print "Parse result['destination_key']:", parseResult['destination_key']
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
print "\n" + csvFilename
y = colCount
kwargs = {
'max_iter': 40,
'lambda': 1e-1,
'alpha': 0.5,
'n_folds': 0,
'beta_epsilon': 1e-4,
}
if USEKNOWNFAILURE:
kwargs['response'] = 50
else:
kwargs['response'] = y
emsg = None
for i in range(3):
start = time.time()
glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
print 'glm #', i, 'end on', csvPathname, 'took', time.time() - start, 'seconds'
# we can pass the warning, without stopping in the test, so we can
# redo it in the browser for comparison
(warnings, coefficients, intercept) = h2o_glm.simpleCheckGLM(self,
glm, None, allowFailWarning=True, **kwargs)
if 1==0:
print "\n", "\ncoefficients in col order:"
# since we're loading the x50 file all the time..the real colCount
# should be 50 (0 to 49)
if USEKNOWNFAILURE:
showCols = 50
else:
showCols = colCount
for c in range(showCols):
print "%s:\t%s" % (c, coefficients[c])
print "intercept:\t", intercept
# gets the failed to converge, here, after we see it in the browser too
x = re.compile("[Ff]ailed")
if warnings:
print "warnings:", warnings
for w in warnings:
print "w:", w
if (re.search(x,w)):
# first
if emsg is None: emsg = w
print w
if emsg: break
if not h2o.browse_disable:
h2b.browseJsonHistoryAsUrlLastMatch("Inspect")
time.sleep(5)
h2b.browseJsonHistoryAsUrlLastMatch("GLM")
time.sleep(5)
# gets the failed to converge, here, after we see it in the browser too
if emsg is not None:
raise Exception(emsg)
if __name__ == '__main__':
h2o.unit_main()
|
{
"content_hash": "e1ef09a499965795d3a6b683087a72b8",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 107,
"avg_line_length": 35.56521739130435,
"alnum_prop": 0.5008149959250203,
"repo_name": "eg-zhang/h2o-2",
"id": "fed8f9cedfc2e4ffa3cae2b8dbdb16bf1b1ceee8",
"size": "4908",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "py/testdir_single_jvm/test_GLM2_convergence_2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7065"
},
{
"name": "C",
"bytes": "2461"
},
{
"name": "CSS",
"bytes": "216906"
},
{
"name": "CoffeeScript",
"bytes": "205094"
},
{
"name": "Emacs Lisp",
"bytes": "7446"
},
{
"name": "Groovy",
"bytes": "518"
},
{
"name": "HTML",
"bytes": "177967"
},
{
"name": "Java",
"bytes": "5177683"
},
{
"name": "JavaScript",
"bytes": "42958"
},
{
"name": "Makefile",
"bytes": "50927"
},
{
"name": "PHP",
"bytes": "8490"
},
{
"name": "Perl",
"bytes": "22594"
},
{
"name": "Python",
"bytes": "3244626"
},
{
"name": "R",
"bytes": "1631216"
},
{
"name": "Ruby",
"bytes": "299"
},
{
"name": "Scala",
"bytes": "39365"
},
{
"name": "Shell",
"bytes": "189829"
}
],
"symlink_target": ""
}
|
from pbr import version
__version__ = version.VersionInfo('python-magnetodbclient').version_string()
|
{
"content_hash": "690399f8d6cc30720d63f00598f54bf6",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 76,
"avg_line_length": 25.75,
"alnum_prop": 0.7669902912621359,
"repo_name": "aostapenko/python-magnetodbclient",
"id": "8043bec97e651c3b6249ed65112d8747c3e3ad1b",
"size": "850",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "magnetodbclient/version.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "143024"
}
],
"symlink_target": ""
}
|
from enum import Enum,EnumMeta
SymEnum = None # class defined after use
class SymEnumValue(object):
"Class used to define SymEnum member that have additional attributes."
def __init__(self, value, externalName=None):
self.value = value
self.externalName = externalName
class _SysEnumExternalNameMap(object):
"Mapping between internal and external member names"
def __init__(self):
self.intToExt = {}
self.extToInt = {}
def add(self, intName, extName):
self.intToExt[intName] = extName
self.extToInt[extName] = intName
def toExtName(self, intName):
"return name unchanged in no mapping"
return self.intToExt.get(intName, intName)
def toIntName(self, extName):
"return name unchanged in no mapping"
return self.extToInt.get(extName, extName)
class SymEnumMeta(EnumMeta):
"""metaclass for SysEnumMeta that implements looking up singleton members
by string name."""
@staticmethod
def __symEnumValueUpdate(classdict, name, extNameMap):
"record info about a member specified with SymEnum and update value in classdict"
symValue = classdict[name]
classdict[name] = symValue.value
extNameMap.add(name, symValue.externalName)
@staticmethod
def __symEnumDerivedNew(metacls, cls, bases, classdict):
"update class fields defined as SymEnumValue to register external names"
extNameMap = classdict["__extNameMap__"] = _SysEnumExternalNameMap()
for name in classdict.iterkeys():
if isinstance(classdict[name], SymEnumValue):
SymEnumMeta.__symEnumValueUpdate(classdict, name, extNameMap)
return EnumMeta.__new__(metacls, cls, bases, classdict)
def __new__(metacls, cls, bases, classdict):
if SymEnum in bases:
return SymEnumMeta.__symEnumDerivedNew(metacls, cls, bases, classdict)
else:
return EnumMeta.__new__(metacls, cls, bases, classdict)
def __call__(cls, value, names=None, module=None, typ=None):
"look up a value object, either by name of value,"
if (names is None) and isinstance(value, str):
# map string name to instance, check for external name
value = cls.__extNameMap__.toIntName(value)
member = cls._member_map_.get(value)
if member is None:
raise ValueError("'%s' is not a member or alias of %s" % (value, cls.__name__))
else:
return member
else:
return EnumMeta.__call__(cls, value, names, module, typ)
class SymEnum(Enum):
"""
Metaclass for symbolic enumerations. These are easily converted between
string values and Enum objects. This support construction from string
values and str() returns value without class name. Aliases can be
added using the Enum approach of:
val = 1
valalias = val
To handle string values that are not valid Python member names, an external
name maybe associated with a field using a SymEnumValue object
utr5 = SymEnumValue(1, "5'UTR")
Either field name or external name maybe used to obtain a value. The external
name is returned with str().
"""
__metaclass__ = SymEnumMeta
def __str__(self):
return self.__extNameMap__.toExtName(self.name)
def __le__(self, other):
if isinstance(other, SymEnum):
return self.value <= other.value
else:
return self.value <= other
def __lt__(self, other):
if isinstance(other, SymEnum):
return self.value < other.value
else:
return self.value < other
def __ge__(self, other):
if isinstance(other, SymEnum):
return self.value >= other.value
else:
return self.value >= other
def __gt__(self, other):
if isinstance(other, SymEnum):
return self.value > other.value
else:
return self.value > other
def __eq__(self, other):
if isinstance(other, SymEnum):
return self.value == other.value
else:
return False
def __ne__(self, other):
if isinstance(other, SymEnum):
return self.value != other.value
else:
return True
|
{
"content_hash": "deda80dcd2fd7f73b3ceebc1bbf575cf",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 95,
"avg_line_length": 35.28455284552845,
"alnum_prop": 0.6248847926267281,
"repo_name": "ifiddes/pycbio",
"id": "64336153389808a4879918e7fe01e902e4c15837",
"size": "4432",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pycbio/sys/symEnum.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Gnuplot",
"bytes": "112679"
},
{
"name": "Makefile",
"bytes": "7284"
},
{
"name": "Python",
"bytes": "714944"
},
{
"name": "Shell",
"bytes": "186"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Award'
db.create_table('videos_award', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('video', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['videos.Video'], null=True, blank=True)),
('preview', self.gf('django.db.models.fields.files.ImageField')(max_length=100)),
('category', self.gf('django.db.models.fields.CharField')(max_length=50)),
('region', self.gf('django.db.models.fields.CharField')(max_length=50)),
('award_type', self.gf('django.db.models.fields.CharField')(max_length=50)),
))
db.send_create_signal('videos', ['Award'])
def backwards(self, orm):
# Deleting model 'Award'
db.delete_table('videos_award')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'videos.award': {
'Meta': {'object_name': 'Award'},
'award_type': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'category': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'preview': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']", 'null': 'True', 'blank': 'True'})
},
'videos.video': {
'Meta': {'object_name': 'Video'},
'bitly_link_db': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'category': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 28, 0, 0)', 'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'judge_mark': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'shortlink': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'default': "'unsent'", 'max_length': '10'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'upload_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'views': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'votes': ('django.db.models.fields.BigIntegerField', [], {'default': '0'})
}
}
complete_apps = ['videos']
|
{
"content_hash": "1f1357838de428e8e157a2d8085202df",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 182,
"avg_line_length": 69.9,
"alnum_prop": 0.5526943252265141,
"repo_name": "mozilla/firefox-flicks",
"id": "9636fc55bfb6a0ab9a5d27dd978a331e72f0632d",
"size": "6315",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flicks/videos/migrations/0010_auto__add_award.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "68358"
},
{
"name": "HTML",
"bytes": "337116"
},
{
"name": "JavaScript",
"bytes": "44816"
},
{
"name": "Puppet",
"bytes": "6653"
},
{
"name": "Python",
"bytes": "4166155"
},
{
"name": "Shell",
"bytes": "2409"
}
],
"symlink_target": ""
}
|
import logging
import string
import urllib
import urllib2
from command import Command
import utils
class Request(urllib2.Request):
"""Extends the urllib2.Request to support all HTTP request types."""
def __init__(self, url, data=None, method=None):
"""Initialise a new HTTP request.
Args:
url - String for the URL to send the request to.
data - Data to send with the request.
"""
if method is None:
method = data is not None and 'POST' or 'GET'
elif method != 'POST' and method != 'PUT':
data = None
self._method = method
urllib2.Request.__init__(self, url, data=data)
def get_method(self):
"""Returns the HTTP method used by this request."""
return self._method
class Response(object):
"""Represents an HTTP response.
Attributes:
fp - File object for the response body.
code - The HTTP status code returned by the server.
headers - A dictionary of headers returned by the server.
url - URL of the retrieved resource represented by this Response.
"""
def __init__(self, fp, code, headers, url):
"""Initialise a new Response.
Args:
fp - The response body file object.
code - The HTTP status code returned by the server.
headers - A dictionary of headers returned by the server.
url - URL of the retrieved resource represented by this Response.
"""
self.fp = fp
self.read = fp.read
self.code = code
self.headers = headers
self.url = url
def close(self):
"""Close the response body file object."""
self.read = None
self.fp = None
def info(self):
"""Returns the response headers."""
return self.headers
def geturl(self):
"""Returns the URL for the resource returned in this response."""
return self.url
class HttpErrorHandler(urllib2.HTTPDefaultErrorHandler):
"""A custom HTTP error handler.
Used to return Response objects instead of raising an HTTPError exception.
"""
def http_error_default(self, req, fp, code, msg, headers):
"""Default HTTP error handler.
Args:
req - The original Request object.
fp - The response body file object.
code - The HTTP status code returned by the server.
msg - The HTTP status message returned by the server.
headers - The response headers.
Returns:
A new Response object.
"""
return Response(fp, code, headers, req.get_full_url())
class RemoteConnection(object):
"""A connection with the Remote WebDriver server.
Communicates with the server using the WebDriver wire protocol:
http://code.google.com/p/selenium/wiki/JsonWireProtocol
"""
def __init__(self, remote_server_addr):
self._url = remote_server_addr
self._commands = {
Command.NEW_SESSION: ('POST', '/session'),
Command.QUIT: ('DELETE', '/session/$sessionId'),
Command.GET_CURRENT_WINDOW_HANDLE:
('GET', '/session/$sessionId/window_handle'),
Command.GET_WINDOW_HANDLES:
('GET', '/session/$sessionId/window_handles'),
Command.GET: ('POST', '/session/$sessionId/url'),
Command.GO_FORWARD: ('POST', '/session/$sessionId/forward'),
Command.GO_BACK: ('POST', '/session/$sessionId/back'),
Command.REFRESH: ('POST', '/session/$sessionId/refresh'),
Command.EXECUTE_SCRIPT: ('POST', '/session/$sessionId/execute'),
Command.GET_CURRENT_URL: ('GET', '/session/$sessionId/url'),
Command.GET_TITLE: ('GET', '/session/$sessionId/title'),
Command.GET_PAGE_SOURCE: ('GET', '/session/$sessionId/source'),
Command.SCREENSHOT: ('GET', '/session/$sessionId/screenshot'),
Command.SET_BROWSER_VISIBLE:
('POST', '/session/$sessionId/visible'),
Command.IS_BROWSER_VISIBLE: ('GET', '/session/$sessionId/visible'),
Command.FIND_ELEMENT: ('POST', '/session/$sessionId/element'),
Command.FIND_ELEMENTS: ('POST', '/session/$sessionId/elements'),
Command.GET_ACTIVE_ELEMENT:
('POST', '/session/$sessionId/element/active'),
Command.FIND_CHILD_ELEMENT:
('POST', '/session/$sessionId/element/$id/element'),
Command.FIND_CHILD_ELEMENTS:
('POST', '/session/$sessionId/element/$id/elements'),
Command.CLICK_ELEMENT: ('POST', '/session/$sessionId/element/$id/click'),
Command.CLEAR_ELEMENT: ('POST', '/session/$sessionId/element/$id/clear'),
Command.SUBMIT_ELEMENT: ('POST', '/session/$sessionId/element/$id/submit'),
Command.GET_ELEMENT_TEXT: ('GET', '/session/$sessionId/element/$id/text'),
Command.SEND_KEYS_TO_ELEMENT:
('POST', '/session/$sessionId/element/$id/value'),
Command.GET_ELEMENT_VALUE:
('GET', '/session/$sessionId/element/$id/value'),
Command.GET_ELEMENT_TAG_NAME:
('GET', '/session/$sessionId/element/$id/name'),
Command.IS_ELEMENT_SELECTED:
('GET', '/session/$sessionId/element/$id/selected'),
Command.SET_ELEMENT_SELECTED:
('POST', '/session/$sessionId/element/$id/selected'),
Command.TOGGLE_ELEMENT:
('POST', '/session/$sessionId/element/$id/toggle'),
Command.IS_ELEMENT_ENABLED:
('GET', '/session/$sessionId/element/$id/enabled'),
Command.IS_ELEMENT_DISPLAYED:
('GET', '/session/$sessionId/element/$id/displayed'),
Command.HOVER_OVER_ELEMENT:
('POST', '/session/$sessionId/element/$id/hover'),
Command.GET_ELEMENT_LOCATION:
('GET', '/session/$sessionId/element/$id/location'),
Command.GET_ELEMENT_LOCATION_ONCE_SCROLLED_INTO_VIEW:
('GET', '/session/$sessionId/element/$id/location_in_view'),
Command.GET_ELEMENT_SIZE:
('GET', '/session/$sessionId/element/$id/size'),
Command.GET_ELEMENT_ATTRIBUTE:
('GET', '/session/$sessionId/element/$id/attribute/$name'),
Command.ELEMENT_EQUALS:
('GET', '/session/$sessionId/element/$id/equals/$other'),
Command.GET_ALL_COOKIES: ('GET', '/session/$sessionId/cookie'),
Command.ADD_COOKIE: ('POST', '/session/$sessionId/cookie'),
Command.DELETE_ALL_COOKIES:
('DELETE', '/session/$sessionId/cookie'),
Command.DELETE_COOKIE:
('DELETE', '/session/$sessionId/cookie/$name'),
Command.SWITCH_TO_FRAME: ('POST', '/session/$sessionId/frame'),
Command.SWITCH_TO_WINDOW: ('POST', '/session/$sessionId/window'),
Command.CLOSE: ('DELETE', '/session/$sessionId/window'),
Command.DRAG_ELEMENT:
('POST', '/session/$sessionId/element/$id/drag'),
Command.GET_SPEED: ('GET', '/session/$sessionId/speed'),
Command.SET_SPEED: ('POST', '/session/$sessionId/speed'),
Command.GET_ELEMENT_VALUE_OF_CSS_PROPERTY:
('GET', '/session/$sessionId/element/$id/css/$propertyName')
}
def execute(self, command, params):
"""Send a command to the remote server.
Any path subtitutions required for the URL mapped to the command should be
included in the command parameters.
Args:
command - A string specifying the command to execute.
params - A dictionary of named parameters to send with the command as
its JSON payload.
"""
command_info = self._commands[command]
assert command_info is not None, 'Unrecognised command %s' % command
data = utils.dump_json(params)
path = string.Template(command_info[1]).substitute(params)
url = '%s%s' % (self._url, path)
return self._request(url, method=command_info[0], data=data)
def _request(self, url, data=None, method=None):
"""Send an HTTP request to the remote server.
Args:
method - A string for the HTTP method to send the request with.
url - The URL to send the request to.
body - The message body to send.
Returns:
A dictionary with the server's parsed JSON response.
"""
logging.debug('%s %s %s' % (method, url, data))
request = Request(url, data=data, method=method)
request.add_header('Accept', 'application/json')
opener = urllib2.build_opener(urllib2.HTTPRedirectHandler(),
HttpErrorHandler())
response = opener.open(request)
try:
if response.code > 399 and response.code < 500:
return {'status': response.code, 'value': response.read()}
body = response.read().replace('\x00', '').strip()
if body:
data = utils.load_json(body.strip())
assert type(data) is dict, (
'Invalid server response body: %s' % body)
assert 'status' in data, (
'Invalid server response; no status: %s' % body)
assert 'value' in data, (
'Invalid server response; no value: %s' % body)
return data
finally:
response.close()
|
{
"content_hash": "159e5c5ad8e7f8e2ffe23c140aee8a60",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 87,
"avg_line_length": 42.03056768558952,
"alnum_prop": 0.5785974025974026,
"repo_name": "mfazekas/safaridriver",
"id": "84e87049c05a47bf5072c9ed39d70599e1a29645",
"size": "10249",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "remote/client/src/py/remote_connection.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "853"
},
{
"name": "C",
"bytes": "17707"
},
{
"name": "C#",
"bytes": "1142439"
},
{
"name": "C++",
"bytes": "9351513"
},
{
"name": "Java",
"bytes": "4942599"
},
{
"name": "JavaScript",
"bytes": "11288115"
},
{
"name": "Objective-C",
"bytes": "210758"
},
{
"name": "Python",
"bytes": "2142248"
},
{
"name": "Ruby",
"bytes": "181980"
},
{
"name": "Shell",
"bytes": "7226"
}
],
"symlink_target": ""
}
|
#!/usr/bin/env python3
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# PREREQ: installer_prereq.py
import os
import re
import sys
import json
import glob
import shutil
import string
import random
import requests
import argparse
import platform
import subprocess
from art import *
from os import path
from pprint import pprint
from google.cloud import storage
from googleapiclient import discovery
from colorama import Fore, Back, Style
from python_terraform import Terraform
from oauth2client.client import GoogleCredentials
ACTION_CREATE_DEPLOYMENT = "1"
ACTION_UPDATE_DEPLOYMENT = "2"
ACTION_DELETE_DEPLOYMENT = "3"
ACTION_LIST_DEPLOYMENT = "4"
def main(varcontents={}, module_name=None, action=None, projid=None, tfbucket=None, check=None):
orgid = ""
folderid = ""
billing_acc = ""
currentusr = ""
setup_path = os.getcwd()
# Setting "gcloud auth application-default" to deploy RAD Lab Modules
currentusr = radlabauth(currentusr)
# Setting up Project-ID
projid = set_proj(projid)
# Checking for User Permissions
if check == True:
launcherperm(projid, currentusr)
# Listing / Selecting from available RAD Lab modules
if module_name is None:
module_name = list_modules()
# Checking Module specific permissions
if check == True:
moduleperm(projid, module_name, currentusr)
# Validating user input Terraform variables against selected module
validate_tfvars(varcontents, module_name)
# Select Action to perform
if action is None or action == "":
action = select_action().strip()
# Setting up required attributes for any RAD Lab module deployment
env_path, tfbucket, orgid, billing_acc, folderid, randomid = module_deploy_common_settings(action, module_name, setup_path, varcontents, projid, tfbucket)
# Utilizing Terraform Wrapper for init / apply / destroy
env(action, orgid, billing_acc, folderid, env_path, randomid, tfbucket, projid)
print("\nGCS Bucket storing Terrafrom Configs: " + tfbucket + "\n")
print("\nTERRAFORM DEPLOYMENT COMPLETED!!!\n")
def radlabauth(currentusr):
try:
token = subprocess.Popen(["gcloud auth application-default print-access-token"], shell=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL).stdout.read().strip().decode('utf-8')
r = requests.get('https://www.googleapis.com/oauth2/v3/tokeninfo?access_token=' + token)
currentusr = r.json()["email"]
# Setting Credentials for non Cloud Shell CLI
if (platform.system() != 'Linux' and platform.processor() != '' and not platform.system().startswith('cs-')):
# countdown(5)
# Adding Execution handling if GOOGLE_APPLICATION_CREDENTIALS is set to Empty.
try:
del os.environ['GOOGLE_APPLICATION_CREDENTIALS']
except:
pass
x = input("\nWould you like to proceed the RAD Lab deployment with user - " + Fore.YELLOW + currentusr + Style.RESET_ALL + ' ?\n[1] Yes\n[2] No\n' + Fore.YELLOW + Style.BRIGHT + 'Choose a number : ' + Style.RESET_ALL).strip()
if (x == '1'):
pass
elif (x == '2'):
print("\nLogin with User account with which you would like to deploy RAD Lab Modules...\n")
os.system("gcloud auth application-default login")
else:
currentusr = '0'
except:
# Adding Execution handling if GOOGLE_APPLICATION_CREDENTIALS is set to Empty.
if (platform.system() != 'Linux' and platform.processor() != '' and not platform.system().startswith('cs-')):
try:
del os.environ['GOOGLE_APPLICATION_CREDENTIALS']
except:
pass
print("\nLogin with User account with which you would like to deploy RAD Lab Modules...\n")
os.system("gcloud auth application-default login")
finally:
if (currentusr == '0'):
sys.exit(Fore.RED + "\nError Occured - INVALID choice.\n")
else:
token = subprocess.Popen(["gcloud auth application-default print-access-token"], shell=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL).stdout.read().strip().decode('utf-8')
r = requests.get('https://www.googleapis.com/oauth2/v3/tokeninfo?access_token=' + token)
currentusr = r.json()["email"]
os.system("gcloud config set account " + currentusr)
print(
"\nUser to deploy RAD Lab Modules (Selected) : " + Fore.GREEN + Style.BRIGHT + currentusr + Style.RESET_ALL)
return currentusr
def set_proj(projid):
if projid is None:
projid = os.popen("gcloud config list --format 'value(core.project)' 2>/dev/null").read().strip()
if (projid != ""):
select_proj = input("\nWhich Project would you like to use for RAD Lab management (Example - Creating/Utilizing GCS bucket where Terraform states will be stored) ? :" + "\n[1] Currently set project - " + Fore.GREEN + projid + Style.RESET_ALL + "\n[2] Enter a different Project ID" + Fore.YELLOW + Style.BRIGHT + "\nChoose a number for the RAD Lab management Project" + Style.RESET_ALL + ': ').strip()
if (select_proj == '2'):
projid = input(Fore.YELLOW + Style.BRIGHT + "Enter the Project ID" + Style.RESET_ALL + ': ').strip()
elif (select_proj != '1' and select_proj != '2'):
sys.exit(Fore.RED + "\nError Occured - INVALID choice.\n")
else:
projid = input(Fore.YELLOW + Style.BRIGHT + "\nEnter the Project ID for RAD Lab management" + Style.RESET_ALL + ': ').strip()
else:
pass
os.system("gcloud config set project " + projid)
os.system("gcloud auth application-default set-quota-project " + projid )
print("\nProject ID (Selected) : " + Fore.GREEN + Style.BRIGHT + projid + Style.RESET_ALL)
return projid
def launcherperm(projid, currentusr):
# Hardcoded Project level required RAD Lab Launcher roles
launcherprojroles = ['roles/storage.admin', 'roles/serviceusage.serviceUsageConsumer']
# Hardcoded Org level required RAD Lab Launcher roles
launcherorgroles = ['roles/iam.organizationRoleViewer']
credentials = GoogleCredentials.get_application_default()
service0 = discovery.build('cloudresourcemanager', 'v3', credentials=credentials)
request0 = service0.projects().getIamPolicy(resource='projects/' + projid)
response0 = request0.execute()
projiam = True
for role in launcherprojroles:
rolefound = False
ownerrole = False
for y in range(len(response0['bindings'])):
# print("ROLE --->")
# print(response0['bindings'][y]['role'])
# print("MEMBERS --->")
# print(response0['bindings'][y]['members'])
# Check for Owner role on RAD Lab Management Project
if (response0['bindings'][y]['role'] == 'roles/owner' and 'user:' + currentusr in response0['bindings'][y]['members']):
rolefound = True
ownerrole = True
print("\n" + currentusr + " has roles/owner role for RAD Lab Management Project: " + projid)
break
# Check for Required roles on RAD Lab Management Project
elif (response0['bindings'][y]['role'] == role):
rolefound = True
if ('user:' + currentusr not in response0['bindings'][y]['members']):
projiam = False
sys.exit(
Fore.RED + "\nError Occured - RADLAB LAUNCHER PERMISSION ISSUE | " + role + " permission missing...\n(Review https://github.com/GoogleCloudPlatform/rad-lab/tree/main/radlab-launcher#iam-permissions-prerequisites for more details)\n" + Style.RESET_ALL)
else:
pass
if rolefound == False:
sys.exit(
Fore.RED + "\nError Occured - RADLAB LAUNCHER PERMISSION ISSUE | " + role + " permission missing...\n(Review https://github.com/GoogleCloudPlatform/rad-lab/tree/main/radlab-launcher#iam-permissions-prerequisites for more details)\n" + Style.RESET_ALL)
if (ownerrole == True):
break
if projiam == True:
print(Fore.GREEN + '\nRADLAB LAUNCHER - Project Permission check passed' + Style.RESET_ALL)
service1 = discovery.build('cloudresourcemanager', 'v3', credentials=credentials)
request1 = service1.projects().get(name='projects/' + projid)
response1 = request1.execute()
if 'parent' in response1.keys():
service2 = discovery.build('cloudresourcemanager', 'v3', credentials=credentials)
org = findorg(response1['parent'])
request2 = service2.organizations().getIamPolicy(resource=org)
response2 = request2.execute()
orgiam = True
for role in launcherorgroles:
rolefound = False
for x in range(len(response2['bindings'])):
# print("ROLE --->")
# print(response2['bindings'][x]['role'])
# print("MEMBERS --->")
# print(response2['bindings'][x]['members'])
if (role == response2['bindings'][x]['role']):
rolefound = True
if ('user:' + currentusr not in response2['bindings'][x]['members']):
orgiam = False
sys.exit(Fore.RED + "\nError Occured - RADLAB LAUNCHER PERMISSION ISSUE | " + role + " permission missing...\n(Review https://github.com/GoogleCloudPlatform/rad-lab/tree/main/radlab-launcher#iam-permissions-prerequisites for more details)\n" + Style.RESET_ALL)
else:
pass
if rolefound == False:
sys.exit(Fore.RED + "\nError Occured - RADLAB LAUNCHER PERMISSION ISSUE | " + role + " permission missing...\n(Review https://github.com/GoogleCloudPlatform/rad-lab/tree/main/radlab-launcher#iam-permissions-prerequisites for more details)\n" + Style.RESET_ALL)
if orgiam == True:
print(Fore.GREEN + '\nRADLAB LAUNCHER - Organization Permission check passed' + Style.RESET_ALL)
else:
print(Fore.YELLOW + '\nRADLAB LAUNCHER - Skipping Organization Permission check. No Organization associated with the project: ' + projid + Style.RESET_ALL)
def findorg(parent):
if 'folders' in parent:
credentials = GoogleCredentials.get_application_default()
s = discovery.build('cloudresourcemanager', 'v3', credentials=credentials)
req = s.folders().get(name=parent)
res = req.execute()
return findorg(res['parent'])
else:
# print(Fore.GREEN + "Org identified: " + Style.BRIGHT + parent + Style.RESET_ALL)
return parent
def moduleperm(projid, module_name, currentusr):
# Check if any of the org policy is used in orgpolicy.tf
setorgpolicy = True
try:
## Finding policy variables in orgpolicy.tf
with open(os.path.dirname(os.getcwd()) + '/modules/' + module_name + '/orgpolicy.tf', "r") as file:
policy_vars = []
for line in file:
if ('count' in line and 'var.' in line and '||' not in line):
policy_vars.append(line[line.find("var.") + len("var."):line.find("?")].strip())
# print("Org Policy Variables:")
# print(policy_vars)
## [CHECK 1] Checking for commented orgpolicy resource in orgpolicy.tf
numCommentedOrgPolicy = 0
for policy in policy_vars:
with open(os.path.dirname(os.getcwd()) + '/modules/' + module_name + '/orgpolicy.tf', "r") as file:
for line in file:
# Finding Org policy resource block
if ('count' in line and 'var.' + policy in line and '?' in line):
# Checking for commented resource block line
if (line.startswith('#') or line.startswith('//')):
numCommentedOrgPolicy = numCommentedOrgPolicy + 1
# If No. of commented Org Policies are equal to total policies; No Org policy set
if (numCommentedOrgPolicy == len(policy_vars)):
setorgpolicy = False
## [CHECK 2] Checking if policy variables in variables.tf are set to 'false'
numDisabledOrgPolicyVar = 0
for var in policy_vars:
varblock = ""
block = False
with open(os.path.dirname(os.getcwd()) + '/modules/' + module_name + '/variables.tf', "r") as file:
for line in file:
if (var in line):
block = True
elif ('}' in line):
block = False
if (block == True):
varblock = varblock + line
# print(varblock + '}')
# Count number of disabled policies
if ('false' in varblock.split('default')[1]):
numDisabledOrgPolicyVar = numDisabledOrgPolicyVar + 1
# If No. of disabled Org Policies are equal to total policies; No Org policy set
if (numDisabledOrgPolicyVar == len(policy_vars)):
setorgpolicy = False
## [CHECK 3] Checking if policy variables in variables.tf are commented
numCommentedOrgPolicyVar = 0
for var in policy_vars:
with open(os.path.dirname(os.getcwd()) + '/modules/' + module_name + '/variables.tf', "r") as file:
for line in file:
# Finding Org policy resource block
if ('variable' in line and policy in line):
# Checking for commented resource block line
if (line.startswith('#') or line.startswith('//') or line.startswith('/*')):
numCommentedOrgPolicyVar = numCommentedOrgPolicyVar + 1
# If No. of commented Org Policies Variables are equal to total policies; No Org policy set
if (numCommentedOrgPolicyVar == len(policy_vars)):
setorgpolicy = False
except:
setorgpolicy = False
# Check if reusing project
create_project = True
try:
## Finding 'create_project' variable in variables.tf
varblock = ""
block = False
with open(os.path.dirname(os.getcwd()) + '/modules/' + module_name + '/variables.tf', "r") as file:
for line in file:
if ('create_project' in line):
block = True
elif ('}' in line):
block = False
if (block == True):
varblock = varblock + line
# print(varblock + '}')
if ('false' in varblock.split('default')[1]):
create_project = False
except Exception as e:
print(e)
print("\nSET ORG POLICY: " + str(setorgpolicy))
print("CREATE PROJECT: " + str(create_project))
# Scrape out Module specific permissions for the module
try:
with open(os.path.dirname(os.getcwd()) + '/modules/' + module_name + '/README.md', "r") as file:
section = False
orgroles = []
projroles = []
for line in file:
if (line.startswith("## IAM Permissions Prerequisites")):
section = True
# Identifying Roles if New Project is supposed to be created
if (create_project == True):
if (section == True and line.startswith('- Parent: `')):
orgroles.append(re.search("\`(.*?)\`", line).group(1))
if (section == True and line.startswith('- Project: `')):
projroles.append(re.search("\`(.*?)\`", line).group(1))
# Identifying Roles if Reusing any Existing project
else:
if (section == True and (line.startswith('- `') or line.startswith('- `'))):
projroles.append(re.search("\`(.*?)\`", line).group(1))
if (line.startswith('#') and not line.startswith("## IAM Permissions Prerequisites")):
section = False
# Removing optional role 'roles/orgpolicy.policyAdmin' if Org Policy is not set
if (setorgpolicy == False and 'roles/orgpolicy.policyAdmin' in orgroles):
orgroles.remove('roles/orgpolicy.policyAdmin')
except:
print(Fore.RED + 'IAM Permissions Prerequisites are missing in the README.md or the README.md file do not exisits for module : ' + module_name + Style.RESET_ALL)
# Check Module permissions permission
credentials = GoogleCredentials.get_application_default()
service = discovery.build('cloudresourcemanager', 'v3', credentials=credentials)
# Check Project level permissions
if len(projroles) != 0:
# print("Project Roles to check:")
# print(projroles)
# print("/*************** PROJECT IAM POLICY *************/")
request1 = service.projects().getIamPolicy(resource='projects/' + projid)
response1 = request1.execute()
projiam = True
for role in projroles:
rolefound = False
for y in range(len(response1['bindings'])):
# print("ROLE --->")
# print(response1['bindings'][y]['role'])
# print("MEMBERS --->")
# print(response1['bindings'][y]['members'])
if (role == response1['bindings'][y]['role']):
rolefound = True
if ('user:' + currentusr not in response1['bindings'][y]['members']):
projiam = False
sys.exit(Fore.RED + "\nError Occured - RADLAB MODULE PERMISSION ISSUE | " + role + " permission missing...\n(Review https://github.com/GoogleCloudPlatform/rad-lab/tree/main/modules/" + module_name + "#iam-permissions-prerequisites for more details)\n" + Style.RESET_ALL)
else:
pass
if rolefound == False:
sys.exit(Fore.RED + "\nError Occured - RADLAB MODULE PERMISSION ISSUE | " + role + " permission missing...\n(Review https://github.com/GoogleCloudPlatform/rad-lab/tree/main/modules/" + module_name + "#iam-permissions-prerequisites for more details)\n" + Style.RESET_ALL)
if projiam == True:
print(Fore.GREEN + '\nRADLAB MODULE (' + module_name + ')- Project Permission check passed' + Style.RESET_ALL)
# Check Org level permissions
if len(orgroles) != 0:
# print("Org Roles to check:")
# print(orgroles)
request = service.projects().get(name='projects/' + projid)
response = request.execute()
if 'parent' in response.keys():
# print("/*************** ORG IAM POLICY *************/")
org = findorg(response['parent'])
request2 = service.organizations().getIamPolicy(resource=org)
response2 = request2.execute()
# pprint(response2)
orgiam = True
for role in orgroles:
rolefound = False
for x in range(len(response2['bindings'])):
# print("ROLE --->")
# print(response2['bindings'][x]['role'])
# print("MEMBERS --->")
# print(response2['bindings'][x]['members'])
if (role == response2['bindings'][x]['role']):
rolefound = True
if ('user:' + currentusr not in response2['bindings'][x]['members']):
orgiam = False
sys.exit(Fore.RED + "\nError Occured - RADLAB MODULE (" + module_name + ") PERMISSION ISSUE | " + role + " permission missing...\n(Review https://github.com/GoogleCloudPlatform/rad-lab/tree/main/modules/" + module_name + "#iam-permissions-prerequisites for more details)\n" + Style.RESET_ALL)
else:
pass
if rolefound == False:
sys.exit(Fore.RED + "\nError Occured - RADLAB MODULE (" + module_name + ") PERMISSION ISSUE | " + role + " permission missing...\n(Review https://github.com/GoogleCloudPlatform/rad-lab/tree/main/modules/" + module_name + "#iam-permissions-prerequisites for more details)\n" + Style.RESET_ALL)
if orgiam == True:
print(Fore.GREEN + '\nRADLAB MODULE (' + module_name + ') - Organization Permission check passed' + Style.RESET_ALL)
else:
print(Fore.YELLOW + '\nRADLAB LAUNCHER - Skipping Organization Permission check. No Organization associated with the project: ' + projid + Style.RESET_ALL)
def env(action, orgid, billing_acc, folderid, env_path, deployment_id, tfbucket, projid):
tr = Terraform(working_dir=env_path)
return_code, stdout, stderr = tr.init_cmd(capture_output=False)
if (action == ACTION_CREATE_DEPLOYMENT or action == ACTION_UPDATE_DEPLOYMENT):
return_code, stdout, stderr = tr.apply_cmd(capture_output=False, auto_approve=True, var={'organization_id': orgid, 'billing_account_id': billing_acc, 'deployment_id': deployment_id})
return_code, stdout, stderr = tr.apply_cmd(refresh=True, capture_output=False, auto_approve=True, var={'organization_id': orgid, 'billing_account_id': billing_acc, 'deployment_id': deployment_id})
elif (action == ACTION_DELETE_DEPLOYMENT):
return_code, stdout, stderr = tr.destroy_cmd(capture_output=False, auto_approve=True, var={'organization_id': orgid, 'billing_account_id': billing_acc, 'deployment_id': deployment_id})
# return_code - 0 Success & 1 Error
if (return_code == 1):
print(stderr)
sys.exit(Fore.RED + Style.BRIGHT + "\nError Occured - Deployment failed for ID: " + deployment_id + "\n" + "Retry using above Deployment ID" + Style.RESET_ALL)
else:
target_path = 'radlab/' + env_path.split('/')[len(env_path.split('/')) - 1] + '/deployments'
if (action == ACTION_CREATE_DEPLOYMENT or action == ACTION_UPDATE_DEPLOYMENT):
if glob.glob(env_path + '/*.tf'):
upload_from_directory(projid, env_path, '/*.tf', tfbucket, target_path)
if glob.glob(env_path + '/*.json'):
upload_from_directory(projid, env_path, '/*.json', tfbucket, target_path)
if glob.glob(env_path + '/elk'):
upload_from_directory(projid, env_path, '/elk/**', tfbucket, target_path)
if glob.glob(env_path + '/scripts'):
upload_from_directory(projid, env_path, '/scripts/**', tfbucket, target_path)
if glob.glob(env_path + '/templates'):
upload_from_directory(projid, env_path, '/templates/**', tfbucket, target_path)
elif (action == ACTION_DELETE_DEPLOYMENT):
deltfgcs(tfbucket, 'radlab/' + env_path.split('/')[len(env_path.split('/')) - 1], projid)
# Deleting Local deployment config
shutil.rmtree(env_path)
def upload_from_directory(projid, directory_path: str, content: str, dest_bucket_name: str, dest_blob_name: str):
rel_paths = glob.glob(directory_path + content, recursive=True)
bucket = storage.Client(project=projid).get_bucket(dest_bucket_name)
for local_file in rel_paths:
file = local_file.replace(directory_path, '')
remote_path = f'{dest_blob_name}/{"/".join(file.split(os.sep)[1:])}'
if os.path.isfile(local_file):
blob = bucket.blob(remote_path)
blob.upload_from_filename(local_file)
def select_action():
action = input(
"\nAction to perform for RAD Lab Deployment ?\n[1] Create New\n[2] Update\n[3] Delete\n[4] List\n" + Fore.YELLOW + Style.BRIGHT + "Choose a number for the RAD Lab Module Deployment Action" + Style.RESET_ALL + ': ').strip()
if (action == ACTION_CREATE_DEPLOYMENT or action == ACTION_UPDATE_DEPLOYMENT or action == ACTION_DELETE_DEPLOYMENT or action == ACTION_LIST_DEPLOYMENT):
return action
else:
sys.exit(Fore.RED + "\nError Occured - INVALID choice.\n")
def basic_input(orgid, billing_acc, folderid, randomid):
print("\nEnter following info to start the setup and use the user which have Project Owner & Billing Account User roles:-")
# Selecting Org ID
if (orgid == ''):
orgid = getorgid()
# Org ID Validation
if (orgid.strip() and orgid.strip().isdecimal() == False):
sys.exit(Fore.RED + "\nError Occured - INVALID ORG ID\n")
print("\nOrg ID (Selected) : " + Fore.GREEN + Style.BRIGHT + orgid + Style.RESET_ALL)
# Selecting Folder ID
if (folderid == ''):
x = input("\nSet Folder ID ?\n[1] Enter Manually\n[2] Skip setting Folder ID\n" + Fore.YELLOW + Style.BRIGHT + "Choose a number for your choice" + Style.RESET_ALL + ': ').strip()
if (x == '1'):
folderid = input(Fore.YELLOW + Style.BRIGHT + "\nFolder ID" + Style.RESET_ALL + ': ').strip()
elif (x == '2'):
print("Skipped setting Folder ID...")
else:
sys.exit(Fore.RED + "\nError Occured - INVALID CHOICE\n" + Style.RESET_ALL)
# Folder ID Validation
if (folderid.strip() and folderid.strip().isdecimal() == False):
sys.exit(Fore.RED + "\nError Occured - INVALID FOLDER ID ACCOUNT\n")
print("\nFolder ID (Selected) : " + Fore.GREEN + Style.BRIGHT + folderid + Style.RESET_ALL)
# Selecting Billing Account
if (billing_acc == ''):
billing_acc = getbillingacc()
print("\nBilling Account (Selected) : " + Fore.GREEN + Style.BRIGHT + billing_acc + Style.RESET_ALL)
# Billing Account Validation
if (billing_acc.count('-') != 2):
sys.exit(Fore.RED + "\nError Occured - INVALID Billing Account\n")
# Create Random Deployment ID
if (randomid == ''):
randomid = get_random_alphanumeric_string(4)
return orgid, billing_acc, folderid, randomid
def create_env(env_path, orgid, billing_acc, folderid):
my_path = env_path + '/env.json'
envjson = [
{
"orgid": orgid,
"billing_acc": billing_acc,
"folderid": folderid
}
]
with open(my_path, 'w') as file:
json.dump(envjson, file, indent=4)
def get_env(env_path):
# Read orgid / billing acc / folder id from env.json
my_path = env_path + '/env.json'
# Opening JSON file
f = open(my_path, )
# returns JSON object as a dictionary
data = json.load(f)
orgid = data[0]['orgid']
billing_acc = data[0]['billing_acc']
folderid = data[0]['folderid']
# Closing file
f.close()
return orgid, billing_acc, folderid
def setlocaldeployment(tfbucket, prefix, env_path, projid):
if (blob_exists(tfbucket, prefix, projid)):
# Checking if 'deployment' folder exist in local. If YES, delete the same.
delifexist(env_path)
# Creating Local directory
os.makedirs(env_path)
# Copy Terraform deployment configs from GCS to Local
if (download_blob(projid, tfbucket, prefix, env_path) == True):
print("Terraform state downloaded to local...")
else:
print(Fore.RED + "\nError Occured whiled downloading Deployment Configs from GCS. Checking if the deployment exist locally...\n")
elif (os.path.isdir(env_path)):
print("Terraform state exist locally...")
else:
sys.exit(Fore.RED + "\nThe deployment with the entered ID do not exist !\n")
def download_blob(projid, tfbucket, prefix, env_path):
"""Downloads a blob from the bucket."""
try:
bucket_dir = 'radlab/' + prefix + '/deployments/'
local_dir = env_path + '/'
storage_client = storage.Client(project=projid)
bucket = storage_client.get_bucket(tfbucket)
blobs = bucket.list_blobs(prefix=bucket_dir) # Get list of files
for blob in blobs:
content = blob.name.replace(bucket_dir, '')
# Create Nested Folders structure in Local Directory
if '/' in content:
if (os.path.isdir(local_dir + os.path.dirname(content)) == False):
os.makedirs(local_dir + os.path.dirname(content))
# Download file
blob.download_to_filename(local_dir + content) # Download
return True
except:
return False
def get_random_alphanumeric_string(length):
letters_and_digits = string.ascii_lowercase + string.digits
result_str = ''.join((random.choice(letters_and_digits) for i in range(length)))
# print("Random alphanumeric String is:", result_str)
return result_str
def getbillingacc():
x = input("\nSet Billing Account ?\n[1] Enter Manually\n[2] Select from the List\n" + Fore.YELLOW + Style.BRIGHT + "Choose a number for your choice" + Style.RESET_ALL + ': ').strip()
if (x == '1'):
billing_acc = input(Fore.YELLOW + Style.BRIGHT + "Enter the Billing Account ( Example format - ABCDEF-GHIJKL-MNOPQR )" + Style.RESET_ALL + ': ').strip()
return billing_acc
elif (x == '2'):
credentials = GoogleCredentials.get_application_default()
service = discovery.build('cloudbilling', 'v1', credentials=credentials)
request = service.billingAccounts().list()
response = request.execute()
# print(response['billingAccounts'])
print("\nList of Billing account you have access to: \n")
billing_accounts = []
# Print out Billing accounts
for x in range(len(response['billingAccounts'])):
print("[" + str(x + 1) + "] " + response['billingAccounts'][x]['name'] + " " + response['billingAccounts'][x]['displayName'])
billing_accounts.append(response['billingAccounts'][x]['name'])
# Take user input and get the corresponding item from the list
inp = int(input(Fore.YELLOW + Style.BRIGHT + "Choose a number for Billing Account" + Style.RESET_ALL + ': '))
if inp in range(1, len(billing_accounts) + 1):
inp = billing_accounts[inp - 1]
billing_acc = inp.split('/')
# print(billing_acc[1])
return billing_acc[1]
else:
sys.exit(Fore.RED + "\nError Occured - INVALID BILLING ACCOUNT\n")
else:
sys.exit(Fore.RED + "\nError Occured - INVALID CHOICE\n" + Style.RESET_ALL)
def getorgid():
x = input("\nSet Org ID ?\n[1] Enter Manually\n[2] Select from the List\n[3] Skip setting Org ID\n" + Fore.YELLOW + Style.BRIGHT + "Choose a number for your choice" + Style.RESET_ALL + ': ').strip()
if (x == '1'):
orgid = input(Fore.YELLOW + Style.BRIGHT + "Enter the Org ID ( Example format - 1234567890 )" + Style.RESET_ALL + ': ').strip()
return orgid
elif (x == '2'):
credentials = GoogleCredentials.get_application_default()
service = discovery.build('cloudresourcemanager', 'v1beta1', credentials=credentials)
request = service.organizations().list()
response = request.execute()
# pprint(response)
print("\nList of Org ID you have access to: \n")
org_ids = []
# Print out Org IDs accounts
for x in range(len(response['organizations'])):
print("[" + str(x + 1) + "] " + response['organizations'][x]['organizationId'] + " " + response['organizations'][x]['displayName'] + " " + response['organizations'][x]['lifecycleState'])
org_ids.append(response['organizations'][x]['organizationId'])
# Take user input and get the corresponding item from the list
inp = int(input(Fore.YELLOW + Style.BRIGHT + "Choose a number for Organization ID" + Style.RESET_ALL + ': '))
if inp in range(1, len(org_ids) + 1):
orgid = org_ids[inp - 1]
# print(orgid)
return orgid
else:
sys.exit(Fore.RED + "\nError Occured - INVALID ORG ID SELECTED\n" + Style.RESET_ALL)
elif (x == '3'):
print("Skipped setting Org ID...")
return ''
else:
sys.exit(Fore.RED + "\nError Occured - INVALID CHOICE\n" + Style.RESET_ALL)
def delifexist(env_path):
# print(os.path.isdir(env_path))
if (os.path.isdir(env_path)):
shutil.rmtree(env_path)
def getbucket(action, projid):
"""Lists all buckets."""
storage_client = storage.Client(project=projid)
bucketoption = ''
if (action == ACTION_CREATE_DEPLOYMENT):
bucketoption = input("\nWant to use existing GCS Bucket for Terraform configs or Create Bucket ?:\n[1] Use Existing Bucket\n[2] Create New Bucket\n" + Fore.YELLOW + Style.BRIGHT + "Choose a number for your choice" + Style.RESET_ALL + ': ').strip()
if (bucketoption == '1' or action == ACTION_UPDATE_DEPLOYMENT or action == ACTION_DELETE_DEPLOYMENT or action == ACTION_LIST_DEPLOYMENT):
try:
buckets = storage_client.list_buckets()
barray = []
x = 0
print("\nSelect a bucket for Terraform Configs & States... \n")
# Print out Buckets in the default project
for bucket in buckets:
print("[" + str(x + 1) + "] " + bucket.name)
barray.append(bucket.name)
x = x + 1
# Take user input and get the corresponding item from the list
try:
inp = int(input(Fore.YELLOW + Style.BRIGHT + "Choose a number for Bucket Name" + Style.RESET_ALL + ': '))
except:
print(Fore.RED + "\nINVALID or NO OPTION SELECTED FOR BUCKET NAME.\n\nEnter the Bucket name Manually...\n" + Style.RESET_ALL)
if inp in range(1, len(barray) + 1):
tfbucket = barray[inp - 1]
return tfbucket
else:
print(Fore.RED + "\nINVALID or NO OPTION SELECTED FOR BUCKET NAME.\n\nEnter the Bucket name Manually...\n" + Style.RESET_ALL)
sys.exit(1)
except Exception as e:
print(e)
# except:
# tfbucket = input(Fore.YELLOW + Style.BRIGHT +"Enter the GCS Bucket name where Terraform Configs & States will be stored"+ Style.RESET_ALL + ": ")
# tfbucket = tfbucket.lower().strip()
# return tfbucket
elif (bucketoption == '2'):
print("CREATE BUCKET")
bucketprefix = input(Fore.YELLOW + Style.BRIGHT + "\nEnter the prefix for the bucket name i.e. radlab-[PREFIX] " + Style.RESET_ALL + ': ')
# Creates the new bucket
# Note: These samples create a bucket in the default US multi-region with a default storage class of Standard Storage.
# To create a bucket outside these defaults, see [Creating storage buckets](https://cloud.google.com/storage/docs/creating-buckets).
bucket = storage_client.create_bucket('radlab-' + bucketprefix)
print("Bucket {} created.".format(bucket.name))
return bucket.name
else:
sys.exit(Fore.RED + "\nInvalid Choice")
def settfstategcs(env_path, prefix, tfbucket, projid):
prefix = "radlab/" + prefix + "/terraform_state"
# Validate Terraform Bucket ID
client = storage.Client(project=projid)
try:
bucket = client.get_bucket(tfbucket)
# print(bucket)
except:
sys.exit(Fore.RED + "\nError Occured - INVALID BUCKET NAME or NO ACCESS\n" + Style.RESET_ALL)
# Create backend.tf file
f = open(env_path + '/backend.tf', 'w+')
f.write('terraform {\n backend "gcs"{\n bucket="' + tfbucket + '"\n prefix="' + prefix + '"\n }\n}')
f.close()
def deltfgcs(tfbucket, prefix, projid):
storage_client = storage.Client(project=projid)
bucket = storage_client.get_bucket(tfbucket)
blobs = bucket.list_blobs(prefix=prefix)
for blob in blobs:
blob.delete()
def blob_exists(tfbucket, prefix, projid):
storage_client = storage.Client(project=projid)
bucket = storage_client.get_bucket(tfbucket)
blob = bucket.blob('radlab/' + prefix + '/deployments/main.tf')
# print(blob.exists())
return blob.exists()
def list_radlab_deployments(tfbucket, module_name, projid):
"""Lists all the blobs in the bucket that begin with the prefix."""
storage_client = storage.Client(project=projid)
bucket = storage_client.get_bucket(tfbucket)
iterator = bucket.list_blobs(prefix='radlab/', delimiter='/')
response = iterator._get_next_page_response()
print("\nPlease find the list of existing " + module_name + " module deployments below:\n")
for prefix in response['prefixes']:
if module_name in prefix:
print(Fore.GREEN + Style.BRIGHT + prefix.split('/')[1] + Style.RESET_ALL)
def list_modules():
modules = [s.replace(os.path.dirname(os.getcwd()) + '/modules/', "") for s in glob.glob(os.path.dirname(os.getcwd()) + '/modules/*')]
modules = sorted(modules)
c = 1
print_list = ''
# Printing List of Modules
for module in modules:
first_line = ''
# Fetch Module name
try:
with open(os.path.dirname(os.getcwd()) + '/modules/' + module + '/README.md', "r") as file:
first_line = file.readline()
except:
print(Fore.RED + 'Missing README.md file for module: ' + module + Style.RESET_ALL)
print_list = print_list + "[" + str(c) + "] " + first_line.strip() + Fore.GREEN + " (" + module + ")\n" + Style.RESET_ALL
c = c + 1
# Selecting Module
try:
selected_module = input("\nList of available RAD Lab modules:\n" + print_list + "[" + str(c) + "] Exit\n" + Fore.YELLOW + Style.BRIGHT + "Choose a number for the RAD Lab Module" + Style.RESET_ALL + ': ').strip()
selected_module = int(selected_module)
except:
sys.exit(Fore.RED + "\nInvalid module")
# Validating User Module selection
if selected_module > 0 and selected_module < c:
# print(modules)
module_name = modules[selected_module - 1]
print("\nRAD Lab Module (selected) : " + Fore.GREEN + Style.BRIGHT + module_name + Style.RESET_ALL)
return module_name
elif selected_module == c:
sys.exit(Fore.GREEN + "\nExiting Installer")
else:
sys.exit(Fore.RED + "\nInvalid module")
def module_deploy_common_settings(action, module_name, setup_path, varcontents, projid, tfbucket):
# Get Terraform Bucket Details
if tfbucket is None:
tfbucket = getbucket(action, projid)
print("\nGCS bucket for Terraform config & state (Selected) : " + Fore.GREEN + Style.BRIGHT + tfbucket + Style.RESET_ALL)
# Setting Org ID, Billing Account, Folder ID
if (action == ACTION_CREATE_DEPLOYMENT):
# Check for any overides of basic inputs from terraform.tfvars file
orgid, billing_acc, folderid, randomid = check_basic_inputs_tfvars(varcontents)
# Getting Base Inputs
orgid, billing_acc, folderid, randomid = basic_input(orgid, billing_acc, folderid, randomid)
# Set environment path as deployment directory
prefix = module_name + '_' + randomid
env_path = setup_path + '/deployments/' + prefix
# Checking if 'deployment' folder exist in local. If YES, delete the same.
delifexist(env_path)
# Copy module directory
shutil.copytree(os.path.dirname(os.getcwd()) + '/modules/' + module_name, env_path)
# Set Terraform states remote backend as GCS
settfstategcs(env_path, prefix, tfbucket, projid)
# Create file with billing/org/folder details
create_tfvars(env_path, varcontents)
# Create file with billing/org/folder details
create_env(env_path, orgid, billing_acc, folderid)
print("\nCREATING DEPLOYMENT...")
return env_path, tfbucket, orgid, billing_acc, folderid, randomid
elif (action == ACTION_UPDATE_DEPLOYMENT or action == ACTION_DELETE_DEPLOYMENT):
# List Existing Deployments
list_radlab_deployments(tfbucket, module_name, projid)
# Get Deployment ID
randomid = input(Fore.YELLOW + Style.BRIGHT + "\nEnter RAD Lab Module Deployment ID (example 'l8b3' is the id for module deployment with name - data_science_l8b3)" + Style.RESET_ALL + ': ')
randomid = randomid.strip()
# Validating Deployment ID
if (len(randomid) == 4 and randomid.isalnum()):
# Set environment path as deployment directory
prefix = module_name + '_' + randomid
env_path = setup_path + '/deployments/' + prefix
# Setting Local Deployment
setlocaldeployment(tfbucket, prefix, env_path, projid)
else:
sys.exit(Fore.RED + "\nInvalid deployment ID!\n")
# Get env values
orgid, billing_acc, folderid = get_env(env_path)
# Set Terraform states remote backend as GCS
settfstategcs(env_path, prefix, tfbucket, projid)
# Create file with billing/org/folder details and user input variables
if os.path.exists(env_path + '/terraform.tfvars'):
os.remove(env_path + '/terraform.tfvars')
create_tfvars(env_path, varcontents)
if (action == ACTION_UPDATE_DEPLOYMENT):
print("\nUPDATING DEPLOYMENT...")
if (action == ACTION_DELETE_DEPLOYMENT):
print("\nDELETING DEPLOYMENT...")
return env_path, tfbucket, orgid, billing_acc, folderid, randomid
elif (action == ACTION_LIST_DEPLOYMENT):
list_radlab_deployments(tfbucket, module_name, projid)
sys.exit()
else:
sys.exit(Fore.RED + "\nInvalid RAD Lab Module Action selected")
def validate_tfvars(varcontents, module_name):
keys = list(varcontents.keys())
if keys:
print("Variables in file:")
print(keys)
for key in keys:
status = False
try:
with open(os.path.dirname(os.getcwd()) + '/modules/' + module_name + '/variables.tf', 'r') as myfile:
for line in myfile:
if ('variable "' + key + '"' in line):
# print (key + ": Found")
status = True
break
except:
sys.exit(Fore.RED + 'variables.tf missing for module: ' + module_name)
# Check if an invalid variable is passed!
if (status == False):
sys.exit(
Fore.RED + 'Variable: ' + key + ' passed in input file, do not exist in variables.tf file of ' + module_name + ' module.')
# print(varcontents)
return True
def create_tfvars(env_path, varcontents):
# Check if any variable exist
if (bool(varcontents) == True):
# Creating terraform.tfvars file in deployment folder
f = open(env_path + "/terraform.tfvars", "w+")
for var in varcontents:
f.write(var.strip() + "=" + varcontents[var].strip() + "\n")
f.close()
else:
print("Skipping creation of terraform.tfvars as no input file for variables...")
def check_basic_inputs_tfvars(varcontents):
try:
orgid = varcontents['organization_id'].strip('"')
except:
orgid = ''
try:
billing_acc = varcontents['billing_account_id'].strip('"')
except:
billing_acc = ''
try:
folderid = varcontents['folder_id'].strip('"')
except:
folderid = ''
try:
randomid = varcontents['deployment_id'].strip('"')
except:
randomid = ''
return orgid, billing_acc, folderid, randomid
def fetchvariables(filecontents):
variables = {}
# Check if there is any variable; If NOT do not create terraform.tfvars file
for x in filecontents:
# Skipping for commented lines
if x.startswith('#') or x.startswith('//'):
continue
elif (len(x.split("=")) == 2):
x = x.strip()
# print(x)
variables[x.split("=")[0].strip()] = x.split("=")[1].strip()
# print(variables)
if (bool(variables) == True):
return variables
else:
sys.exit(Fore.RED + 'No variables in the input file')
if __name__ == "__main__":
try:
print('\n' + text2art("RADLAB", font="larry3d"))
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--rad-project', dest="projid", help="RAD Lab management GCP Project.", required=False)
parser.add_argument('-b', '--rad-bucket', dest="tfbucket", help="RAD Lab management GCS Bucket where Terraform states for the modules will be stored.", required=False)
parser.add_argument('-m', '--module', dest="module_name", choices=sorted([s.replace(os.path.dirname(os.getcwd()) + '/modules/', "") for s in glob.glob(os.path.dirname(os.getcwd()) + '/modules/*')]), help="RADLab Module name under ../../modules folder", required=False)
parser.add_argument('-a', '--action', dest="action", choices=['create', 'update', 'delete', 'list'], help="Type of action you want to perform for the selected RADLab module.", required=False)
parser.add_argument('-f', '--varfile', dest="file", type=argparse.FileType('r', encoding='UTF-8'), help="Input file (with complete path) for terraform.tfvars contents.", required=False)
parser.add_argument('-dc', '--disable-perm-check', dest="disable_perm_check", action='store_false', help="Flag to disable RAD Lab permissions pre-check.", required=False)
args = parser.parse_args()
# File Argument
if args.file is not None:
print("Checking input file...")
filecontents = args.file.readlines()
variables = fetchvariables(filecontents)
else:
variables = {}
# Action Argument
if args.action == 'create':
action = ACTION_CREATE_DEPLOYMENT
elif args.action == 'update':
action = ACTION_UPDATE_DEPLOYMENT
elif args.action == 'delete':
action = ACTION_DELETE_DEPLOYMENT
elif args.action == 'list':
action = ACTION_LIST_DEPLOYMENT
else:
action = None
main(variables, args.module_name, action, args.projid, args.tfbucket, args.disable_perm_check)
except Exception as e:
print(e)
|
{
"content_hash": "50bb382348c282ba395cd199d2e2b838",
"timestamp": "",
"source": "github",
"line_count": 1071,
"max_line_length": 412,
"avg_line_length": 43.69094304388422,
"alnum_prop": 0.6051332464257474,
"repo_name": "GoogleCloudPlatform/rad-lab",
"id": "a3996b1267cdc597f465899676f6a067161ab95d",
"size": "46793",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "radlab-launcher/radlab.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1095"
},
{
"name": "Dockerfile",
"bytes": "3360"
},
{
"name": "HCL",
"bytes": "358305"
},
{
"name": "HTML",
"bytes": "4568"
},
{
"name": "JavaScript",
"bytes": "12416"
},
{
"name": "Jupyter Notebook",
"bytes": "349435"
},
{
"name": "Python",
"bytes": "93759"
},
{
"name": "Shell",
"bytes": "40564"
},
{
"name": "Smarty",
"bytes": "2950"
},
{
"name": "TypeScript",
"bytes": "278297"
}
],
"symlink_target": ""
}
|
import logging
from torch.optim import SGD, Adam, ASGD, Adamax, Adadelta, Adagrad, RMSprop
logger = logging.getLogger("ptsemseg")
key2opt = {
"sgd": SGD,
"adam": Adam,
"asgd": ASGD,
"adamax": Adamax,
"adadelta": Adadelta,
"adagrad": Adagrad,
"rmsprop": RMSprop,
}
def get_optimizer(cfg):
if cfg["training"]["optimizer"] is None:
logger.info("Using SGD optimizer")
return SGD
else:
opt_name = cfg["training"]["optimizer"]["name"]
if opt_name not in key2opt:
raise NotImplementedError("Optimizer {} not implemented".format(opt_name))
logger.info("Using {} optimizer".format(opt_name))
return key2opt[opt_name]
|
{
"content_hash": "7fce082c16222b53e276d01d86a9e50e",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 86,
"avg_line_length": 24.517241379310345,
"alnum_prop": 0.6216596343178622,
"repo_name": "meetshah1995/pytorch-semseg",
"id": "3d7996726b302474e49f3d49995027c680c7a18b",
"size": "711",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ptsemseg/optimizers/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "173693"
}
],
"symlink_target": ""
}
|
from honeybadger.fake_connection import send_notice
from testfixtures import log_capture
import json
@log_capture()
def test_send_notice_logging(l):
config = {'api_key': 'aaa'}
payload = json.dumps({'test': 'payload'})
send_notice(config, payload)
l.check(
('honeybadger.fake_connection', 'INFO', 'Development mode is enabled; this error will be reported if it occurs after you deploy your app.'),
('honeybadger.fake_connection', 'DEBUG', 'The config used is {} with payload {}'.format(config, payload)))
|
{
"content_hash": "5c822c8b2876948b1e58181ac500c1bf",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 148,
"avg_line_length": 36.06666666666667,
"alnum_prop": 0.6931608133086876,
"repo_name": "honeybadger-io/honeybadger-python",
"id": "c69e289a10902d056a62430d0154764ef40be73b",
"size": "541",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "honeybadger/tests/test_fake_connection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "81557"
},
{
"name": "Shell",
"bytes": "721"
}
],
"symlink_target": ""
}
|
import base64
from Crypto.Hash import HMAC
from Crypto import Random
from trove_guestagent.openstack.common.gettextutils import _ # noqa
from trove_guestagent.openstack.common import importutils
class CryptoutilsException(Exception):
"""Generic Exception for Crypto utilities."""
message = _("An unknown error occurred in crypto utils.")
class CipherBlockLengthTooBig(CryptoutilsException):
"""The block size is too big."""
def __init__(self, requested, permitted):
msg = _("Block size of %(given)d is too big, max = %(maximum)d")
message = msg % {'given': requested, 'maximum': permitted}
super(CryptoutilsException, self).__init__(message)
class HKDFOutputLengthTooLong(CryptoutilsException):
"""The amount of Key Material asked is too much."""
def __init__(self, requested, permitted):
msg = _("Length of %(given)d is too long, max = %(maximum)d")
message = msg % {'given': requested, 'maximum': permitted}
super(CryptoutilsException, self).__init__(message)
class HKDF(object):
"""An HMAC-based Key Derivation Function implementation (RFC5869)
This class creates an object that allows to use HKDF to derive keys.
"""
def __init__(self, hashtype='SHA256'):
self.hashfn = importutils.import_module('Crypto.Hash.' + hashtype)
self.max_okm_length = 255 * self.hashfn.digest_size
def extract(self, ikm, salt=None):
"""An extract function that can be used to derive a robust key given
weak Input Key Material (IKM) which could be a password.
Returns a pseudorandom key (of HashLen octets)
:param ikm: input keying material (ex a password)
:param salt: optional salt value (a non-secret random value)
"""
if salt is None:
salt = '\x00' * self.hashfn.digest_size
return HMAC.new(salt, ikm, self.hashfn).digest()
def expand(self, prk, info, length):
"""An expand function that will return arbitrary length output that can
be used as keys.
Returns a buffer usable as key material.
:param prk: a pseudorandom key of at least HashLen octets
:param info: optional string (can be a zero-length string)
:param length: length of output keying material (<= 255 * HashLen)
"""
if length > self.max_okm_length:
raise HKDFOutputLengthTooLong(length, self.max_okm_length)
N = (length + self.hashfn.digest_size - 1) / self.hashfn.digest_size
okm = ""
tmp = ""
for block in range(1, N + 1):
tmp = HMAC.new(prk, tmp + info + chr(block), self.hashfn).digest()
okm += tmp
return okm[:length]
MAX_CB_SIZE = 256
class SymmetricCrypto(object):
"""Symmetric Key Crypto object.
This class creates a Symmetric Key Crypto object that can be used
to encrypt, decrypt, or sign arbitrary data.
:param enctype: Encryption Cipher name (default: AES)
:param hashtype: Hash/HMAC type name (default: SHA256)
"""
def __init__(self, enctype='AES', hashtype='SHA256'):
self.cipher = importutils.import_module('Crypto.Cipher.' + enctype)
self.hashfn = importutils.import_module('Crypto.Hash.' + hashtype)
def new_key(self, size):
return Random.new().read(size)
def encrypt(self, key, msg, b64encode=True):
"""Encrypt the provided msg and returns the cyphertext optionally
base64 encoded.
Uses AES-128-CBC with a Random IV by default.
The plaintext is padded to reach blocksize length.
The last byte of the block is the length of the padding.
The length of the padding does not include the length byte itself.
:param key: The Encryption key.
:param msg: the plain text.
:returns encblock: a block of encrypted data.
"""
iv = Random.new().read(self.cipher.block_size)
cipher = self.cipher.new(key, self.cipher.MODE_CBC, iv)
# CBC mode requires a fixed block size. Append padding and length of
# padding.
if self.cipher.block_size > MAX_CB_SIZE:
raise CipherBlockLengthTooBig(self.cipher.block_size, MAX_CB_SIZE)
r = len(msg) % self.cipher.block_size
padlen = self.cipher.block_size - r - 1
msg += '\x00' * padlen
msg += chr(padlen)
enc = iv + cipher.encrypt(msg)
if b64encode:
enc = base64.b64encode(enc)
return enc
def decrypt(self, key, msg, b64decode=True):
"""Decrypts the provided ciphertext, optionally base 64 encoded, and
returns the plaintext message, after padding is removed.
Uses AES-128-CBC with an IV by default.
:param key: The Encryption key.
:param msg: the ciphetext, the first block is the IV
"""
if b64decode:
msg = base64.b64decode(msg)
iv = msg[:self.cipher.block_size]
cipher = self.cipher.new(key, self.cipher.MODE_CBC, iv)
padded = cipher.decrypt(msg[self.cipher.block_size:])
l = ord(padded[-1]) + 1
plain = padded[:-l]
return plain
def sign(self, key, msg, b64encode=True):
"""Signs a message string and returns a base64 encoded signature.
Uses HMAC-SHA-256 by default.
:param key: The Signing key.
:param msg: the message to sign.
"""
h = HMAC.new(key, msg, self.hashfn)
out = h.digest()
if b64encode:
out = base64.b64encode(out)
return out
|
{
"content_hash": "37121d47fa911923f1f683ce4308928f",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 79,
"avg_line_length": 34.15950920245399,
"alnum_prop": 0.6312859195402298,
"repo_name": "denismakogon/trove-guestagent",
"id": "fb5bf3dd23793cd136ed1ab243c436f4fa5c1fa5",
"size": "6219",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trove_guestagent/openstack/common/crypto/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "19900"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "1023022"
}
],
"symlink_target": ""
}
|
import unittest
import devtools_monitor
from page_track import PageTrack
class MockDevToolsConnection(object):
def __init__(self):
self.stop_has_been_called = False
def RegisterListener(self, name, listener):
pass
def StopMonitoring(self):
self.stop_has_been_called = True
class PageTrackTest(unittest.TestCase):
_EVENTS = [{'method': 'Page.frameStartedLoading',
'params': {'frameId': '1234.1'}},
{'method': 'Page.frameAttached',
'params': {'frameId': '1234.12', 'parentFrameId': '1234.1'}},
{'method': 'Page.frameStartedLoading',
'params': {'frameId': '1234.12'}},
{'method': 'Page.frameStoppedLoading',
'params': {'frameId': '1234.12'}},
{'method': 'Page.frameStoppedLoading',
'params': {'frameId': '1234.1'}}]
def testAsksMonitoringToStop(self):
devtools_connection = MockDevToolsConnection()
page_track = PageTrack(devtools_connection)
for msg in PageTrackTest._EVENTS[:-1]:
page_track.Handle(msg['method'], msg)
self.assertFalse(devtools_connection.stop_has_been_called)
msg = PageTrackTest._EVENTS[-1]
page_track.Handle(msg['method'], msg)
self.assertTrue(devtools_connection.stop_has_been_called)
def testUnknownParent(self):
page_track = PageTrack(None)
msg = {'method': 'Page.frameAttached',
'params': {'frameId': '1234.12', 'parentFrameId': '1234.1'}}
with self.assertRaises(AssertionError):
page_track.Handle(msg['method'], msg)
def testStopsLoadingUnknownFrame(self):
page_track = PageTrack(None)
msg = {'method': 'Page.frameStoppedLoading',
'params': {'frameId': '1234.12'}}
with self.assertRaises(AssertionError):
page_track.Handle(msg['method'], msg)
def testGetMainFrameId(self):
devtools_connection = MockDevToolsConnection()
page_track = PageTrack(devtools_connection)
for msg in PageTrackTest._EVENTS:
page_track.Handle(msg['method'], msg)
self.assertEquals('1234.1', page_track.GetMainFrameId())
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "1169828415f3311ee145659c9b352004",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 75,
"avg_line_length": 35.049180327868854,
"alnum_prop": 0.6449953227315248,
"repo_name": "highweb-project/highweb-webcl-html5spec",
"id": "3056d99f8ac2cbbe7817c8d83cab058b2570ed43",
"size": "2301",
"binary": false,
"copies": "15",
"ref": "refs/heads/highweb-20160310",
"path": "tools/android/loading/page_track_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""Atom class, used in Structure objects."""
import numpy
import warnings
import copy
from Bio.PDB.Entity import DisorderedEntityWrapper
from Bio.PDB.PDBExceptions import PDBConstructionWarning
from Bio.PDB.Vector import Vector
from Bio.Data import IUPACData
class Atom(object):
def __init__(self, name, coord, bfactor, occupancy, altloc, fullname, serial_number,
element=None):
"""
Atom object.
The Atom object stores atom name (both with and without spaces),
coordinates, B factor, occupancy, alternative location specifier
and (optionally) anisotropic B factor and standard deviations of
B factor and positions.
@param name: atom name (eg. "CA"). Note that spaces are normally stripped.
@type name: string
@param coord: atomic coordinates (x,y,z)
@type coord: Numeric array (Float0, size 3)
@param bfactor: isotropic B factor
@type bfactor: number
@param occupancy: occupancy (0.0-1.0)
@type occupancy: number
@param altloc: alternative location specifier for disordered atoms
@type altloc: string
@param fullname: full atom name, including spaces, e.g. " CA ". Normally
these spaces are stripped from the atom name.
@type fullname: string
@param element: atom element, e.g. "C" for Carbon, "HG" for mercury,
@type fullname: uppercase string (or None if unknown)
"""
self.level="A"
# Reference to the residue
self.parent=None
# the atomic data
self.name=name # eg. CA, spaces are removed from atom name
self.fullname=fullname # e.g. " CA ", spaces included
self.coord=coord
self.bfactor=bfactor
self.occupancy=occupancy
self.altloc=altloc
self.full_id=None # (structure id, model id, chain id, residue id, atom id)
self.id=name # id of atom is the atom name (e.g. "CA")
self.disordered_flag=0
self.anisou_array=None
self.siguij_array=None
self.sigatm_array=None
self.serial_number=serial_number
# Dictionary that keeps addictional properties
self.xtra={}
assert not element or element == element.upper(), element
self.element = self._assign_element(element)
self.mass = self._assign_atom_mass()
def _assign_element(self, element):
"""Tries to guess element from atom name if not recognised."""
if not element or element.capitalize() not in IUPACData.atom_weights:
# Inorganic elements have their name shifted left by one position
# (is a convention in PDB, but not part of the standard).
# isdigit() check on last two characters to avoid mis-assignment of
# hydrogens atoms (GLN HE21 for example)
if self.fullname[0] != " " and not self.fullname[2:].isdigit():
putative_element = self.name.strip()
else:
# Hs may have digit in [0]
if self.name[0].isdigit():
putative_element = self.name[1]
else:
putative_element = self.name[0]
if putative_element.capitalize() in IUPACData.atom_weights:
msg = "Used element %r for Atom (name=%s) with given element %r" \
% (putative_element, self.name, element)
element = putative_element
else:
msg = "Could not assign element %r for Atom (name=%s) with given element %r" \
% (putative_element, self.name, element)
element = ""
warnings.warn(msg, PDBConstructionWarning)
return element
def _assign_atom_mass(self):
# Needed for Bio/Struct/Geometry.py C.O.M. function
if self.element:
return IUPACData.atom_weights[self.element.capitalize()]
else:
return float('NaN')
# Special methods
def __repr__(self):
"Print Atom object as <Atom atom_name>."
return "<Atom %s>" % self.get_id()
def __sub__(self, other):
"""
Calculate distance between two atoms.
Example:
>>> distance=atom1-atom2
@param other: the other atom
@type other: L{Atom}
"""
diff=self.coord-other.coord
return numpy.sqrt(numpy.dot(diff,diff))
# set methods
def set_serial_number(self, n):
self.serial_number=n
def set_bfactor(self, bfactor):
self.bfactor=bfactor
def set_coord(self, coord):
self.coord=coord
def set_altloc(self, altloc):
self.altloc=altloc
def set_occupancy(self, occupancy):
self.occupancy=occupancy
def set_sigatm(self, sigatm_array):
"""
Set standard deviation of atomic parameters.
The standard deviation of atomic parameters consists
of 3 positional, 1 B factor and 1 occupancy standard
deviation.
@param sigatm_array: standard deviations of atomic parameters.
@type sigatm_array: Numeric array (length 5)
"""
self.sigatm_array=sigatm_array
def set_siguij(self, siguij_array):
"""
Set standard deviations of anisotropic temperature factors.
@param siguij_array: standard deviations of anisotropic temperature factors.
@type siguij_array: Numeric array (length 6)
"""
self.siguij_array=siguij_array
def set_anisou(self, anisou_array):
"""
Set anisotropic B factor.
@param anisou_array: anisotropic B factor.
@type anisou_array: Numeric array (length 6)
"""
self.anisou_array=anisou_array
# Public methods
def flag_disorder(self):
"""Set the disordered flag to 1.
The disordered flag indicates whether the atom is disordered or not.
"""
self.disordered_flag=1
def is_disordered(self):
"Return the disordered flag (1 if disordered, 0 otherwise)."
return self.disordered_flag
def set_parent(self, parent):
"""Set the parent residue.
Arguments:
o parent - Residue object
"""
self.parent=parent
def detach_parent(self):
"Remove reference to parent."
self.parent=None
def get_sigatm(self):
"Return standard deviation of atomic parameters."
return self.sigatm_array
def get_siguij(self):
"Return standard deviations of anisotropic temperature factors."
return self.siguij_array
def get_anisou(self):
"Return anisotropic B factor."
return self.anisou_array
def get_parent(self):
"Return parent residue."
return self.parent
def get_serial_number(self):
return self.serial_number
def get_name(self):
"Return atom name."
return self.name
def get_id(self):
"Return the id of the atom (which is its atom name)."
return self.id
def get_full_id(self):
"""Return the full id of the atom.
The full id of an atom is the tuple
(structure id, model id, chain id, residue id, atom name, altloc).
"""
return self.parent.get_full_id()+((self.name, self.altloc),)
def get_coord(self):
"Return atomic coordinates."
return self.coord
def get_bfactor(self):
"Return B factor."
return self.bfactor
def get_occupancy(self):
"Return occupancy."
return self.occupancy
def get_fullname(self):
"Return the atom name, including leading and trailing spaces."
return self.fullname
def get_altloc(self):
"Return alternative location specifier."
return self.altloc
def get_level(self):
return self.level
def transform(self, rot, tran):
"""
Apply rotation and translation to the atomic coordinates.
Example:
>>> rotation=rotmat(pi, Vector(1,0,0))
>>> translation=array((0,0,1), 'f')
>>> atom.transform(rotation, translation)
@param rot: A right multiplying rotation matrix
@type rot: 3x3 Numeric array
@param tran: the translation vector
@type tran: size 3 Numeric array
"""
self.coord=numpy.dot(self.coord, rot)+tran
def get_vector(self):
"""
Return coordinates as Vector.
@return: coordinates as 3D vector
@rtype: Vector
"""
x,y,z=self.coord
return Vector(x,y,z)
def copy(self):
"""
Create a copy of the Atom.
Parent information is lost.
"""
# Do a shallow copy then explicitly copy what needs to be deeper.
shallow = copy.copy(self)
shallow.detach_parent()
shallow.set_coord(copy.copy(self.get_coord()))
shallow.xtra = self.xtra.copy()
return shallow
class DisorderedAtom(DisorderedEntityWrapper):
"""
This class contains all Atom objects that represent the same disordered
atom. One of these atoms is "selected" and all method calls not caught
by DisorderedAtom are forwarded to the selected Atom object. In that way, a
DisorderedAtom behaves exactly like a normal Atom. By default, the selected
Atom object represents the Atom object with the highest occupancy, but a
different Atom object can be selected by using the disordered_select(altloc)
method.
"""
def __init__(self, id):
"""
Arguments:
o id - string, atom name
"""
self.last_occupancy=-1
DisorderedEntityWrapper.__init__(self, id)
# Special methods
def __repr__(self):
return "<Disordered Atom %s>" % self.get_id()
def disordered_add(self, atom):
"Add a disordered atom."
# Add atom to dict, use altloc as key
atom.flag_disorder()
# set the residue parent of the added atom
residue=self.get_parent()
atom.set_parent(residue)
altloc=atom.get_altloc()
occupancy=atom.get_occupancy()
self[altloc]=atom
if occupancy>self.last_occupancy:
self.last_occupancy=occupancy
self.disordered_select(altloc)
|
{
"content_hash": "d1ad21626879f1d1f394c508d09c5826",
"timestamp": "",
"source": "github",
"line_count": 332,
"max_line_length": 94,
"avg_line_length": 31.44578313253012,
"alnum_prop": 0.600287356321839,
"repo_name": "bryback/quickseq",
"id": "c9764357f159fab588ef52ec41a43fb7ef889b06",
"size": "10681",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "genescript/Bio/PDB/Atom.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "272327"
},
{
"name": "JavaScript",
"bytes": "61962"
},
{
"name": "Python",
"bytes": "4049558"
}
],
"symlink_target": ""
}
|
import getpass, poplib, sys
if len(sys.argv) != 3:
print 'usage: %s hostname user' % sys.argv[0]
exit(2)
hostname, user = sys.argv[1:]
passwd = getpass.getpass()
p = poplib.POP3_SSL(hostname) # or "POP3" if SSL is not supported
try:
p.user(user)
p.pass_(passwd)
except poplib.error_proto, e:
print "Login failed:", e
else:
status = p.stat()
print "You have %d messages totaling %d bytes" % status
finally:
p.quit()
|
{
"content_hash": "a0ea395464af7fe7ea3351bd45cef60d",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 66,
"avg_line_length": 22.55,
"alnum_prop": 0.6407982261640798,
"repo_name": "jac2130/BayesGame",
"id": "db2fadba46fcb6c40ae53b189d153133b5b380c2",
"size": "536",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "foundations-of-python-network-programming/python2/14/popconn.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "95"
},
{
"name": "C#",
"bytes": "1110"
},
{
"name": "CSS",
"bytes": "2118"
},
{
"name": "HTML",
"bytes": "166635"
},
{
"name": "JavaScript",
"bytes": "751618"
},
{
"name": "PHP",
"bytes": "339"
},
{
"name": "Perl",
"bytes": "3136"
},
{
"name": "Python",
"bytes": "1821680"
},
{
"name": "Shell",
"bytes": "1630"
},
{
"name": "Smarty",
"bytes": "7840"
}
],
"symlink_target": ""
}
|
from typing import Optional
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class InitialRecursiveSolution:
def isBalanced(self, root: Optional[TreeNode]) -> bool:
def check(node: Optional[TreeNode]) -> int:
if not node:
return 0
if (left := check(node.left)) == -1 or (right := check(node.right)) == -1:
return -1
if abs(left - right) > 1:
return -1
return 1 + max(left, right)
return check(root) >= 0
|
{
"content_hash": "006bb4920bc22e072b1af24aebd1d72b",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 86,
"avg_line_length": 24.51851851851852,
"alnum_prop": 0.5377643504531722,
"repo_name": "vilisimo/ads",
"id": "780a8a685257400dc96671799dfc1a5780511ae5",
"size": "1207",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/leetcode/easy/ex0110.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "80324"
},
{
"name": "Python",
"bytes": "183126"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
__all__ = ['general','plot','stats','text']
from . import general, plot, stats, text
|
{
"content_hash": "14d0eeeeb714fd09e40092b2e40b4879",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 43,
"avg_line_length": 41.333333333333336,
"alnum_prop": 0.6693548387096774,
"repo_name": "mattjj/pybasicbayes",
"id": "ed6fb6df75d06313f4d24356855e82c8f3e7c651",
"size": "124",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pybasicbayes/util/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "283289"
}
],
"symlink_target": ""
}
|
from os import path
from setuptools import setup
setup(
name="tucan",
version="0.8",
description="New Grades Notification Script for TuCaN",
long_description=open(path.join(path.dirname(__file__), "README.rst")).read(),
url="http://github.com/fhirschmann/tucan",
author="Fabian Hirschmann",
author_email="fabian@hirschmann.email",
license="MIT",
platforms="any",
install_requires=[
"lxml",
"mechanize",
],
keywords="tucan tu darmstadt technische universität",
scripts=["bin/tucan"],
)
|
{
"content_hash": "2e699da1a0c28bc307dd29b77f74462c",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 82,
"avg_line_length": 26.428571428571427,
"alnum_prop": 0.6504504504504505,
"repo_name": "fhirschmann/tucan",
"id": "62925aa577e69ad3ae791b706dfaa7d8993a380c",
"size": "581",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4025"
}
],
"symlink_target": ""
}
|
"""
"""
__author__ = "Jérôme Samson"
__copyright__ = "Copyright 2014, Mikros Image"
import os
import sys
import csv
import time
import datetime
from optparse import OptionParser
import numpy as np
import pygal
from pygal.style import *
try:
import simplejson as json
except ImportError:
import json
from octopus.dispatcher import settings
from octopus.core import singletonconfig
from pulitools.common import roundTime
from pulitools.common import lowerQuartile, higherQuartile
###########################################################################################################################
# Data example:
# {
# "prod":{
# "ddd" : { "jobs":15, "err":1, "paused":2, "ready/blocked":10, "running":2, "allocatedRN":5, "readyCommandCount":15},
# "dior_tea" : { "jobs":1, "err":0, "paused":0, "ready/blocked":0, "running":1, "allocatedRN":1, "readyCommandCount":15},
# },
# "user":{
# "brr" : { "jobs":15, "err":1, "paused":2, "ready/blocked":10, "running":2 , "allocatedRN":5, "readyCommandCount":15},
# "bho" : { "jobs":1, "err":0, "paused":0, "ready/blocked":0, "running":1 , "allocatedRN":1, "readyCommandCount":15},
# "lap" : { "jobs":1, "err":0, "paused":0, "ready/blocked":0, "running":1 , "allocatedRN":1, "readyCommandCount":15},
# },
# "step":{
# ...
# },
# "type":{
# ...
# },
# "total": { "jobs":15, "err":1, "paused":2, "ready/blocked":10, "running":2 , "allocatedRN":5, "readyCommandCount":150}
# "requestDate": "Wed Apr 2 12:16:01 2014"
# }
def process_args():
'''
Manages arguments parsing definition and help information
'''
usage = "usage: %prog [general options] [restriction list] [output option]"
desc="""Displays information.
"""
parser = OptionParser(usage=usage, description=desc, version="%prog 0.1" )
parser.add_option( "-f", action="store", dest="sourceFile", default=os.path.join(settings.LOGDIR, "usage_stats.log"), help="Source file" )
parser.add_option( "-o", action="store", dest="outputFile", default="./queue_avg.svg", help="Target output file." )
parser.add_option( "-v", action="store_true", dest="verbose", help="Verbose output" )
parser.add_option( "-s", action="store", dest="rangeIn", type="int", help="Start range is N hours in past", default=3 )
parser.add_option( "-e", action="store", dest="rangeOut", type="int", help="End range is N hours in past (mus be lower than '-s option'", default=0 )
parser.add_option( "-t", "--title", action="store", dest="title", help="Indicates a title", default="Queue usage over time")
parser.add_option( "-r", "--res", action="store", dest="resolution", type="int", help="Indicates ", default=10 )
parser.add_option( "--stack", action="store_true", dest="stacked", default=False)
parser.add_option( "--line", action="store_true", dest="line", default=True)
parser.add_option( "--log", action="store_true", dest="logarithmic", help="Display graph with a logarithmic scale", default=False )
parser.add_option( "--scale", action="store", dest="scaleEvery", type="int", help="Indicates the number of scale values to display", default=8 )
options, args = parser.parse_args()
return options, args
if __name__ == "__main__":
options, args = process_args()
VERBOSE = options.verbose
if VERBOSE:
print "Command options: %s" % options
print "Command arguments: %s" % args
if options.rangeIn < options.rangeOut:
print "Invalid start/end range"
sys.exit()
startDate = time.time() - 3600 * options.rangeIn
endDate = time.time() - 3600 * options.rangeOut
if VERBOSE:
print "Loading stats: %r " % options.sourceFile
print " - from: %r " % datetime.date.fromtimestamp(startDate)
print " - to: %r " % datetime.date.fromtimestamp(endDate)
print "Start."
strScale=[]
scale=[]
tot=[]
totErr=[]
totRun=[]
totPaused=[]
totReady=[]
# arrRnByProd = np.array()
rnByProd = {}
log = []
#
# Load json log and filter by date
#
with open(options.sourceFile, "r" ) as f:
for line in f:
data = json.loads(line)
if (startDate < data['requestDate'] and data['requestDate'] <= endDate):
log.append( json.loads(line) )
for i, data in enumerate(log):
eventDate = datetime.datetime.fromtimestamp( data['requestDate'] )
# tot.append(data["total"]["jobs"])
totErr.append(data["total"]["err"])
totPaused.append(data["total"]["paused"])
totReady.append(data["total"]["ready"])
totRun.append(data["total"]["running"])
# for key, val in data["prod"].items():
# if key not in rnByProd:
# rnByProd[key] = np.array( [0]*len(log) )
# rnByProd[key][i] = val["allocatedRN"]
scale.append( eventDate )
if VERBOSE:
print "Num events: %d" % len(scale)
# for key,val in rnByProd.items():
# print "%s - %r - %s" % (key, len(val), val)
print "Creating graph."
stepSize = len(scale) / options.resolution
newshape = (options.resolution, stepSize)
useableSize = len(scale) - ( len(scale) % options.resolution )
err = np.array(totErr[-useableSize:])
paused = np.array(totPaused[-useableSize:])
ready = np.array(totReady[-useableSize:])
run = np.array(totRun[-useableSize:])
avg_err= np.mean( np.reshape(err, newshape), axis=1)
avg_paused= np.mean( np.reshape(paused, newshape), axis=1)
avg_ready= np.mean( np.reshape(ready, newshape), axis=1)
avg_run= np.mean( np.reshape(run, newshape), axis=1)
# # med= np.median(data, axis=1)
# # amin= np.min(data, axis=1)
# # amax= np.max(data, axis=1)
# # q1= lowerQuartile(data)
# # q2= higherQuartile(data)
# # std= np.std(data, axis=1)
strScale = [''] * options.resolution
tmpscale = np.reshape(scale[-useableSize:], newshape)
# # print ("tmp scale %d = %r" % (len(tmpscale), tmpscale) )
# # print ("str scale %d = %r" % (len(strScale), strScale) )
for i,date in enumerate(tmpscale[::len(tmpscale)/options.scaleEvery]):
newIndex = i*len(tmpscale)/options.scaleEvery
if newIndex < len(strScale):
strScale[newIndex] = date[0].strftime('%H:%M')
strScale[0] = scale[0].strftime('%Y-%m-%d %H:%M')
strScale[-1] = scale[-1].strftime('%Y-%m-%d %H:%M')
# if VERBOSE:
# print ("newshape %d = %r" % (len(newshape), newshape) )
# print ("avg %d = %r" % (len(avg_working), avg_working) )
# print ("scale %d = %r" % (len(strScale), strScale) )
if options.stacked:
avg_usage = pygal.StackedLine( x_label_rotation=30,
include_x_axis=True,
logarithmic=False,
show_dots=False,
width=800,
height=300,
fill=True,
interpolate='hermite',
interpolation_parameters={'type': 'cardinal', 'c': 1.0},
interpolation_precision=3,
style=RedBlueStyle
)
else:
avg_usage = pygal.Line( x_label_rotation=30,
include_x_axis=True,
logarithmic=False,
show_dots=True,
width=800,
height=300,
interpolate='hermite',
interpolation_parameters={'type': 'cardinal', 'c': 1.0},
interpolation_precision=3,
style=RedBlueStyle
)
avg_usage.title = options.title
avg_usage.x_labels = strScale
avg_usage.add('Error', avg_err )
avg_usage.add('Paused', avg_paused )
avg_usage.add('Running', avg_run )
avg_usage.add('Ready', avg_ready )
avg_usage.render_to_file( options.outputFile )
if VERBOSE:
print "Done."
|
{
"content_hash": "d2d42acece8d53899a15dcaa6db69898",
"timestamp": "",
"source": "github",
"line_count": 230,
"max_line_length": 153,
"avg_line_length": 36.278260869565216,
"alnum_prop": 0.5504554170661553,
"repo_name": "smaragden/OpenRenderManagement",
"id": "6d179818b830693b8b6e1f627751ec01fe634a6e",
"size": "8370",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/pulitools/stats/display_queue_usage.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "889392"
},
{
"name": "Shell",
"bytes": "5347"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.jvm.targets.exportable_jvm_library import ExportableJvmLibrary
class JavaLibrary(ExportableJvmLibrary):
"""A Java library.
Normally has conceptually-related sources; invoking the ``compile`` goal
on this target compiles Java and generates classes. Invoking the ``jar``
goal on this target creates a ``.jar``; but that's an unusual thing to do.
Instead, a ``jvm_binary`` might depend on this library; that binary is a
more sensible thing to bundle.
:API: public
"""
def __init__(self, *args, **kwargs):
"""
:param provides: The ``artifact``
to publish that represents this target outside the repo.
:param resources: An optional list of file paths (DEPRECATED) or
``resources`` targets (which in turn point to file paths). The paths
indicate text file resources to place in this module's jar.
"""
super(JavaLibrary, self).__init__(*args, **kwargs)
self.add_labels('java')
|
{
"content_hash": "b816a9e5b121a34fad2ee44f9059b100",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 93,
"avg_line_length": 39.535714285714285,
"alnum_prop": 0.6964769647696477,
"repo_name": "dbentley/pants",
"id": "78ecc48ca64644a1b82ef9c329d6d086f85fda83",
"size": "1254",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/pants/backend/jvm/targets/java_library.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "781"
},
{
"name": "CSS",
"bytes": "11572"
},
{
"name": "Cucumber",
"bytes": "919"
},
{
"name": "GAP",
"bytes": "2459"
},
{
"name": "Go",
"bytes": "1569"
},
{
"name": "HTML",
"bytes": "64699"
},
{
"name": "Java",
"bytes": "290988"
},
{
"name": "JavaScript",
"bytes": "31040"
},
{
"name": "Protocol Buffer",
"bytes": "3783"
},
{
"name": "Python",
"bytes": "4277407"
},
{
"name": "Scala",
"bytes": "84066"
},
{
"name": "Shell",
"bytes": "50882"
},
{
"name": "Thrift",
"bytes": "2898"
}
],
"symlink_target": ""
}
|
"""
Cluster of helper functions.
"""
import pickle
from glob import glob
import cv2
import numpy as np
from scipy.misc import imresize, imread
import os
# Constants
OUTPUT_DIR = os.path.abspath('output')
CALIBRATION_PATH_PICKLE = os.path.abspath('output' + os.sep + 'calibration.p')
CAL_IMAGE_PATH = os.path.abspath('camera_cal' + os.sep + 'calibration*.jpg')
ROWS, COLS = (6, 9)
CAL_IMAGE_SIZE = (720, 1280, 3)
"""
1. Calibrate the camera.
"""
class CalibrateCamera:
def __init__(self, image_shape, calibration):
"""
Removed lens distortion.
Parameters
----------
image_shape: tuple
Width and height of the image.
calibration: dict
Calibrated image.
"""
self.objpoints = calibration['objpoints']
self.imgpoints = calibration['imgpoints']
self.image_shape = image_shape
self.ret, self.mtx, self.dist, self.rvecs, self.tvecs = cv2.calibrateCamera(self.objpoints, self.imgpoints, image_shape, None, None)
def undistort(self, img):
"""
The function transforms an image to compensate radial and tangential lens distortion.
Parameters
----------
img: ndarray
Image.
Returns
-------
ndarray: ndarray
Undistorted image.
"""
return cv2.undistort(img, self.mtx, self.dist, None, self.mtx)
def get_camera_calibration():
"""
If the pickled image is present in the location given in `CALIBRATION_PATH_PICKLE`, then open the pickle and
return the data; if not call `calculate_camera_calibration`.
"""
if not os.path.isfile(CALIBRATION_PATH_PICKLE):
if not os.path.isdir(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
calibration = _calculate_camera_calibration(CAL_IMAGE_PATH, ROWS, COLS)
with open(CALIBRATION_PATH_PICKLE, 'wb') as file:
pickle.dump(calibration, file=file)
else:
with open(CALIBRATION_PATH_PICKLE, "rb") as file:
calibration = pickle.load(file)
return calibration
def _calculate_camera_calibration(path_pattern, rows, cols):
"""
Based on the chessboard images located in `camera_cal`, calculate the camera calibration.
Parameters
----------
path_pattern: str
Path pattern for Glob.
rows: int
Number of rows on chessboard.
cols: int
Number of columns on chess board.
Returns
-------
calibration: dict
A dictionary of `cv2.calibrateCamera`
"""
objp = np.zeros((rows * cols, 3), np.float32)
objp[:, :2] = np.mgrid[0:cols, 0:rows].T.reshape(-1, 2)
objpoints = []
imgpoints = []
images = glob(path_pattern)
cal_images = np.zeros((len(images), *CAL_IMAGE_SIZE), dtype=np.uint8)
successful_count = 0
for idx, fname in enumerate(images):
img = imread(fname)
if img.shape[0] != CAL_IMAGE_SIZE[0] or img.shape[1] != CAL_IMAGE_SIZE[1]:
img = imresize(img, CAL_IMAGE_SIZE)
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (cols, rows), None)
if ret:
successful_count += 1
objpoints.append(objp)
imgpoints.append(corners)
img = cv2.drawChessboardCorners(img, (cols, rows), corners, ret)
cal_images[idx] = img
print("%s/%s camera calibration images processed." % (successful_count, len(images)))
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, CAL_IMAGE_SIZE[:-1], None, None)
calibration = {'objpoints': objpoints,
'imgpoints': imgpoints,
'cal_images': cal_images,
'mtx': mtx,
'dist': dist,
'rvecs': rvecs,
'tvecs': tvecs}
return calibration
"""
2. Change the perspective
"""
class PerspectiveTransformer:
"""
Helps to change the perspective of the image.
"""
def __init__(self, src, dst):
"""
Parameters
----------
src: ndarray
Source coordinates.
dst: ndarray
Destination coordinates.
"""
self.src = src
self.dst = dst
self.M = cv2.getPerspectiveTransform(src, dst)
self.M_inv = cv2.getPerspectiveTransform(dst, src)
def transform(self, img):
"""
Transform the image using CV2's `warpPerspective`.
Parameters
----------
img: ndarray
Image.
Returns
-------
image: ndarray
Transformed image.
"""
return cv2.warpPerspective(img, self.M, (img.shape[1], img.shape[0]), flags=cv2.INTER_LINEAR)
def inverse_transform(self, img):
"""
Inverse transform the image using CV2's `warpPerspective`.
Parameters
----------
img: ndarray
Image.
Returns
-------
image: ndarray
Transformed image.
"""
return cv2.warpPerspective(img, self.M_inv, (img.shape[1], img.shape[0]), flags=cv2.INTER_LINEAR)
"""
3. Generate images.
"""
def generate_lane_mask(img, v_cutoff=0):
"""
Generate a masking binary image with lane selected.
Parameters
----------
img: ndarray
Image.
v_cutoff:
Image cutoff.
Returns
-------
mask: ndarray
A binary Numpy array of the masked image.
"""
window = img[v_cutoff:, :, :]
yuv = cv2.cvtColor(window, cv2.COLOR_RGB2YUV)
yuv = 255 - yuv
hls = cv2.cvtColor(window, cv2.COLOR_RGB2HLS)
chs = np.stack((yuv[:, :, 1], yuv[:, :, 2], hls[:, :, 2]), axis=2)
gray = np.mean(chs, 2)
s_x = abs_sobel(gray, orient='x', kernel_size=3)
s_y = abs_sobel(gray, orient='y', kernel_size=3)
grad_dir = gradient_direction(s_x, s_y)
grad_mag = gradient_magnitude(s_x, s_y)
ylw = extract_yellow(window)
highlights = extract_highlights(window[:, :, 0])
mask = np.zeros(img.shape[:-1], dtype=np.uint8)
mask[v_cutoff:, :][((s_x >= 25) & (s_x <= 255) &
(s_y >= 25) & (s_y <= 255)) |
((grad_mag >= 30) & (grad_mag <= 512) &
(grad_dir >= 0.2) & (grad_dir <= 1.)) |
(ylw == 255) |
(highlights == 255)] = 1
mask = binary_noise_reduction(mask, 4)
return mask
def abs_sobel(img_ch, orient='x', kernel_size=3):
"""
Takes the absolute values of Sobel derivative.
Parameters
----------
img_ch
orient: str
Orientation of the derivative. `x` or`y`
kernel_size: int
Kernel size.
Returns
-------
abs_sobel: ndarray
Absolute array of Sobel derivative.
"""
if orient == 'x':
axis = (1, 0)
elif orient == 'y':
axis = (0, 1)
else:
raise ValueError('orient has to be "x" or "y" not "%s"' % orient)
sobel = cv2.Sobel(img_ch, -1, *axis, ksize=kernel_size)
abs_sobel = np.absolute(sobel)
return abs_sobel
def gradient_magnitude(sobel_x, sobel_y):
"""
Calculates the magnitude of the gradient.
Parameters
----------
sobel_x
sobel_y
Returns
-------
abs_grad_mag: int
"""
abs_grad_mag = np.sqrt(sobel_x ** 2 + sobel_y ** 2)
return abs_grad_mag.astype(np.uint16)
def gradient_direction(sobel_x, sobel_y):
"""
Calculates the direction of the gradient. NaN values cause by zero division will be replaced
by the maximum value (np.pi / 2).
Parameters
----------
sobel_x
sobel_y
Returns
-------
abs_grad_dir: ndarray
"""
abs_grad_dir = np.absolute(np.arctan(sobel_y / sobel_x))
abs_grad_dir[np.isnan(abs_grad_dir)] = np.pi / 2
return abs_grad_dir.astype(np.float32)
def extract_yellow(img):
"""
Mask all yellow pixels.
Parameters
----------
img
Returns
-------
mask: ndarray
Masked image
"""
hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
mask = cv2.inRange(hsv, (20, 50, 150), (40, 255, 255))
return mask
def extract_highlights(img, p=99.9):
"""
Get the selected highlights from the image.
Parameters
----------
img
p: float
Percentile to compute
Returns
-------
mask: ndarray
Masked image
"""
p = int(np.percentile(img, p) - 30)
mask = cv2.inRange(img, p, 255)
return mask
def binary_noise_reduction(img, thresh):
"""
Parameters
----------
img
thresh
Returns
-------
img: ndarray
Filtered image.
"""
k = np.array([[1, 1, 1],
[1, 0, 1],
[1, 1, 1]])
nb_neighbours = cv2.filter2D(img, ddepth=-1, kernel=k)
img[nb_neighbours < thresh] = 0
return img
"""
4. Detect the lines on the road based on image generated.
"""
class Line:
"""
Detect the lines on th road.
"""
def __init__(self, n_frames=1, x=None, y=None):
"""
Parameters
----------
n_frames: int
Number of frames to smooth
x: list
`X` coordinates
y: list
`Y` coordinates
"""
# Frame memory
self.n_frames = n_frames
# was the line detected in the last iteration?
self.detected = False
# number of pixels added per frame
self.n_pixel_per_frame = []
# x values of the last n fits of the line
self.recent_xfitted = []
# average x values of the fitted line over the last n iterations
self.bestx = None
# polynomial coefficients averaged over the last n iterations
self.best_fit = None
# polynomial coefficients for the most recent fit
self.current_fit = None
# Polynom for the current coefficients
self.current_fit_poly = None
# Polynom for the average coefficients over the last n iterations
self.best_fit_poly = None
# radius of curvature of the line in some units
self.radius_of_curvature = None
# distance in meters of vehicle center from the line
self.line_base_pos = None
# difference in fit coefficients between last and new fits
self.diffs = np.array([0, 0, 0], dtype='float')
# x values for detected line pixels
self.allx = None
# y values for detected line pixels
self.ally = None
if x is not None:
self.update(x, y)
def update(self, x, y):
"""
Update the lines.
Parameters
----------
x: list
List of `X` values
y: list
List of `Y` values
"""
assert len(x) == len(y), 'x and y have to be the same size'
self.allx = x
self.ally = y
self.n_pixel_per_frame.append(len(self.allx))
self.recent_xfitted.extend(self.allx)
if len(self.n_pixel_per_frame) > self.n_frames:
n_x_to_remove = self.n_pixel_per_frame.pop(0)
self.recent_xfitted = self.recent_xfitted[n_x_to_remove:]
self.bestx = np.mean(self.recent_xfitted)
self.current_fit = np.polyfit(self.allx, self.ally, 2)
if self.best_fit is None:
self.best_fit = self.current_fit
else:
self.best_fit = (self.best_fit * (self.n_frames - 1) + self.current_fit) / self.n_frames
self.current_fit_poly = np.poly1d(self.current_fit)
self.best_fit_poly = np.poly1d(self.best_fit)
def is_current_fit_parallel(self, other_line, threshold=(0, 0)):
"""
Does the line fit well?
Parameters
----------
other_line
threshold: tuple
Threshold for
Returns
-------
is_parallel: bool
True or False
"""
first_coefi_dif = np.abs(self.current_fit[0] - other_line.current_fit[0])
second_coefi_dif = np.abs(self.current_fit[1] - other_line.current_fit[1])
is_parallel = first_coefi_dif < threshold[0] and second_coefi_dif < threshold[1]
return is_parallel
def get_current_fit_distance(self, other_line):
"""
Gets the distance between the current fit polynomials of two lines
Parameters
----------
other_line
Returns
-------
abs: ndarray
Distance.
"""
return np.abs(self.current_fit_poly(719) - other_line.current_fit_poly(719))
def get_best_fit_distance(self, other_line):
"""
Gets the distance between the best fit polynomials of two lines
Parameters
----------
other_line
Returns
-------
absolute: ndarray
Absolute value.
"""
return np.abs(self.best_fit_poly(719) - other_line.best_fit_poly(719))
def calc_curvature(fit_cr):
"""
Calculates the curvature of a line in meters
Parameters
----------
fit_cr
Returns
-------
curve_radius: float
Curvature of line.
"""
# Define conversions in x and y from pixels space to meters
ym_per_pix = 30 / 720 # meters per pixel in y dimension
xm_per_pix = 3.7 / 700 # meteres per pixel in x dimension
y = np.array(np.linspace(0, 719, num=10))
x = np.array([fit_cr(x) for x in y])
y_eval = np.max(y)
fit_cr = np.polyfit(y * ym_per_pix, x * xm_per_pix, 2)
curve_radius = ((1 + (2 * fit_cr[0] * y_eval / 2. + fit_cr[1]) ** 2) ** 1.5) / np.absolute(2 * fit_cr[0])
return curve_radius
if __name__ == '__main__':
import matplotlib.pyplot as plt
from scipy import misc
import os
image = misc.imread('test_images' + os.sep + 'test2.jpg')
masked_image = generate_lane_mask(image)
plt.imshow(masked_image, cmap='gray')
plt.show()
|
{
"content_hash": "a1d8fd7bcfa6c5edc517321bef87e098",
"timestamp": "",
"source": "github",
"line_count": 568,
"max_line_length": 140,
"avg_line_length": 24.79225352112676,
"alnum_prop": 0.551839227382474,
"repo_name": "akshaybabloo/Car-ND",
"id": "036ecdd769a6819a89364ecbf5f894179607de3f",
"size": "14082",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Project_4/old_process/helper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "551"
},
{
"name": "Jupyter Notebook",
"bytes": "16855408"
},
{
"name": "Python",
"bytes": "367767"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from .instrumentation.wsgi import InstanaWSGIMiddleware
# Alias for historical name
iWSGIMiddleware = InstanaWSGIMiddleware
|
{
"content_hash": "69b19c75f3d1a9843c814358a59c359f",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 55,
"avg_line_length": 27.5,
"alnum_prop": 0.8363636363636363,
"repo_name": "instana/python-sensor",
"id": "666991c3a73c6f9e9f851e09e4bb24f2e9a3b9ff",
"size": "231",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "instana/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "419"
},
{
"name": "HTML",
"bytes": "154"
},
{
"name": "Python",
"bytes": "1056302"
}
],
"symlink_target": ""
}
|
"""Tests for tfx.tools.cli.handler.beam_dag_runner_patcher."""
from unittest import mock
import tensorflow as tf
from tfx.orchestration import pipeline as tfx_pipeline
from tfx.orchestration.beam import beam_dag_runner
from tfx.tools.cli.handler import beam_dag_runner_patcher
_PIPELINE_NAME = 'pipeline1'
class BeamDagRunnerPatcherTest(tf.test.TestCase):
@mock.patch.object(beam_dag_runner.BeamDagRunner, 'run', autospec=True)
def testPatcher(self, mock_run):
patcher = beam_dag_runner_patcher.BeamDagRunnerPatcher()
with patcher.patch() as context:
beam_dag_runner.BeamDagRunner().run(
tfx_pipeline.Pipeline(_PIPELINE_NAME, ''))
mock_run.assert_not_called()
self.assertEqual(context[patcher.PIPELINE_NAME], _PIPELINE_NAME)
if __name__ == '__main__':
tf.test.main()
|
{
"content_hash": "c4c75d7a84ab4ec3bbfa88165e051f61",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 73,
"avg_line_length": 31.423076923076923,
"alnum_prop": 0.7319461444308446,
"repo_name": "tensorflow/tfx",
"id": "8dc24c85c242a91c9ddf6c8b588e6bab99b1bc08",
"size": "1413",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tfx/tools/cli/handler/beam_dag_runner_patcher_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "7405"
},
{
"name": "Jupyter Notebook",
"bytes": "38579"
},
{
"name": "Python",
"bytes": "6009050"
},
{
"name": "Shell",
"bytes": "34056"
},
{
"name": "Starlark",
"bytes": "20324"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, division
from collections import defaultdict
from nltk.compat import Counter
from nltk.tag import TaggerI
from nltk.tbl import Feature, Template
from nltk import jsontags
######################################################################
# Brill Templates
######################################################################
@jsontags.register_tag
class Word(Feature):
"""
Feature which examines the text (word) of nearby tokens.
"""
json_tag = 'nltk.tag.brill.Word'
@staticmethod
def extract_property(tokens, index):
"""@return: The given token's text."""
return tokens[index][0]
@jsontags.register_tag
class Pos(Feature):
"""
Feature which examines the tags of nearby tokens.
"""
json_tag = 'nltk.tag.brill.Pos'
@staticmethod
def extract_property(tokens, index):
"""@return: The given token's tag."""
return tokens[index][1]
def nltkdemo18():
"""
Return 18 templates, from the original nltk demo, in multi-feature syntax
"""
return [
Template(Pos([-1])),
Template(Pos([1])),
Template(Pos([-2])),
Template(Pos([2])),
Template(Pos([-2, -1])),
Template(Pos([1, 2])),
Template(Pos([-3, -2, -1])),
Template(Pos([1, 2, 3])),
Template(Pos([-1]), Pos([1])),
Template(Word([-1])),
Template(Word([1])),
Template(Word([-2])),
Template(Word([2])),
Template(Word([-2, -1])),
Template(Word([1, 2])),
Template(Word([-3, -2, -1])),
Template(Word([1, 2, 3])),
Template(Word([-1]), Word([1])),
]
def nltkdemo18plus():
"""
Return 18 templates, from the original nltk demo, and additionally a few
multi-feature ones (the motivation is easy comparison with nltkdemo18)
"""
return nltkdemo18() + [
Template(Word([-1]), Pos([1])),
Template(Pos([-1]), Word([1])),
Template(Word([-1]), Word([0]), Pos([1])),
Template(Pos([-1]), Word([0]), Word([1])),
Template(Pos([-1]), Word([0]), Pos([1])),
]
def fntbl37():
"""
Return 37 templates taken from the postagging task of the
fntbl distribution http://www.cs.jhu.edu/~rflorian/fntbl/
(37 is after excluding a handful which do not condition on Pos[0];
fntbl can do that but the current nltk implementation cannot.)
"""
return [
Template(Word([0]), Word([1]), Word([2])),
Template(Word([-1]), Word([0]), Word([1])),
Template(Word([0]), Word([-1])),
Template(Word([0]), Word([1])),
Template(Word([0]), Word([2])),
Template(Word([0]), Word([-2])),
Template(Word([1, 2])),
Template(Word([-2, -1])),
Template(Word([1, 2, 3])),
Template(Word([-3, -2, -1])),
Template(Word([0]), Pos([2])),
Template(Word([0]), Pos([-2])),
Template(Word([0]), Pos([1])),
Template(Word([0]), Pos([-1])),
Template(Word([0])),
Template(Word([-2])),
Template(Word([2])),
Template(Word([1])),
Template(Word([-1])),
Template(Pos([-1]), Pos([1])),
Template(Pos([1]), Pos([2])),
Template(Pos([-1]), Pos([-2])),
Template(Pos([1])),
Template(Pos([-1])),
Template(Pos([-2])),
Template(Pos([2])),
Template(Pos([1, 2, 3])),
Template(Pos([1, 2])),
Template(Pos([-3, -2, -1])),
Template(Pos([-2, -1])),
Template(Pos([1]), Word([0]), Word([1])),
Template(Pos([1]), Word([0]), Word([-1])),
Template(Pos([-1]), Word([-1]), Word([0])),
Template(Pos([-1]), Word([0]), Word([1])),
Template(Pos([-2]), Pos([-1])),
Template(Pos([1]), Pos([2])),
Template(Pos([1]), Pos([2]), Word([1]))
]
def brill24():
"""
Return 24 templates of the seminal TBL paper, Brill (1995)
"""
return [
Template(Pos([-1])),
Template(Pos([1])),
Template(Pos([-2])),
Template(Pos([2])),
Template(Pos([-2, -1])),
Template(Pos([1, 2])),
Template(Pos([-3, -2, -1])),
Template(Pos([1, 2, 3])),
Template(Pos([-1]), Pos([1])),
Template(Pos([-2]), Pos([-1])),
Template(Pos([1]), Pos([2])),
Template(Word([-1])),
Template(Word([1])),
Template(Word([-2])),
Template(Word([2])),
Template(Word([-2, -1])),
Template(Word([1, 2])),
Template(Word([-1, 0])),
Template(Word([0, 1])),
Template(Word([0])),
Template(Word([-1]), Pos([-1])),
Template(Word([1]), Pos([1])),
Template(Word([0]), Word([-1]), Pos([-1])),
Template(Word([0]), Word([1]), Pos([1])),
]
def describe_template_sets():
"""
Print the available template sets in this demo, with a short description"
"""
import inspect
import sys
# a bit of magic to get all functions in this module
templatesets = inspect.getmembers(sys.modules[__name__], inspect.isfunction)
for (name, obj) in templatesets:
if name == "describe_template_sets":
continue
print(name, obj.__doc__, "\n")
######################################################################
# The Brill Tagger
######################################################################
@jsontags.register_tag
class BrillTagger(TaggerI):
"""
Brill's transformational rule-based tagger. Brill taggers use an
initial tagger (such as ``tag.DefaultTagger``) to assign an initial
tag sequence to a text; and then apply an ordered list of
transformational rules to correct the tags of individual tokens.
These transformation rules are specified by the ``TagRule``
interface.
Brill taggers can be created directly, from an initial tagger and
a list of transformational rules; but more often, Brill taggers
are created by learning rules from a training corpus, using one
of the TaggerTrainers available.
"""
json_tag = 'nltk.tag.BrillTagger'
def __init__(self, initial_tagger, rules, training_stats=None):
"""
:param initial_tagger: The initial tagger
:type initial_tagger: TaggerI
:param rules: An ordered list of transformation rules that
should be used to correct the initial tagging.
:type rules: list(TagRule)
:param training_stats: A dictionary of statistics collected
during training, for possible later use
:type training_stats: dict
"""
self._initial_tagger = initial_tagger
self._rules = tuple(rules)
self._training_stats = training_stats
def encode_json_obj(self):
return self._initial_tagger, self._rules, self._training_stats
@classmethod
def decode_json_obj(cls, obj):
_initial_tagger, _rules, _training_stats = obj
return cls(_initial_tagger, _rules, _training_stats)
def rules(self):
"""
Return the ordered list of transformation rules that this tagger has learnt
:return: the ordered list of transformation rules that correct the initial tagging
:rtype: list of Rules
"""
return self._rules
def train_stats(self, statistic=None):
"""
Return a named statistic collected during training, or a dictionary of all
available statistics if no name given
:param statistic: name of statistic
:type statistic: str
:return: some statistic collected during training of this tagger
:rtype: any (but usually a number)
"""
if statistic is None:
return self._training_stats
else:
return self._training_stats.get(statistic)
def tag(self, tokens):
# Inherit documentation from TaggerI
# Run the initial tagger.
tagged_tokens = self._initial_tagger.tag(tokens)
# Create a dictionary that maps each tag to a list of the
# indices of tokens that have that tag.
tag_to_positions = defaultdict(set)
for i, (token, tag) in enumerate(tagged_tokens):
tag_to_positions[tag].add(i)
# Apply each rule, in order. Only try to apply rules at
# positions that have the desired original tag.
for rule in self._rules:
# Find the positions where it might apply
positions = tag_to_positions.get(rule.original_tag, [])
# Apply the rule at those positions.
changed = rule.apply(tagged_tokens, positions)
# Update tag_to_positions with the positions of tags that
# were modified.
for i in changed:
tag_to_positions[rule.original_tag].remove(i)
tag_to_positions[rule.replacement_tag].add(i)
return tagged_tokens
def print_template_statistics(self, test_stats=None, printunused=True):
"""
Print a list of all templates, ranked according to efficiency.
If test_stats is available, the templates are ranked according to their
relative contribution (summed for all rules created from a given template,
weighted by score) to the performance on the test set. If no test_stats, then
statistics collected during training are used instead. There is also
an unweighted measure (just counting the rules). This is less informative,
though, as many low-score rules will appear towards end of training.
:param test_stats: dictionary of statistics collected during testing
:type test_stats: dict of str -> any (but usually numbers)
:param printunused: if True, print a list of all unused templates
:type printunused: bool
:return: None
:rtype: None
"""
tids = [r.templateid for r in self._rules]
train_stats = self.train_stats()
trainscores = train_stats['rulescores']
assert len(trainscores) == len(tids), "corrupt statistics: " \
"{0} train scores for {1} rules".format(trainscores, tids)
template_counts = Counter(tids)
weighted_traincounts = Counter()
for (tid, score) in zip(tids, trainscores):
weighted_traincounts[tid] += score
tottrainscores = sum(trainscores)
# det_tplsort() is for deterministic sorting;
# the otherwise convenient Counter.most_common() unfortunately
# does not break ties deterministically
# between python versions and will break cross-version tests
def det_tplsort(tpl_value):
return (tpl_value[1], repr(tpl_value[0]))
def print_train_stats():
print("TEMPLATE STATISTICS (TRAIN) {0} templates, {1} rules)".format(
len(template_counts),
len(tids))
)
print("TRAIN ({tokencount:7d} tokens) initial {initialerrors:5d} {initialacc:.4f} "
"final: {finalerrors:5d} {finalacc:.4f} ".format(**train_stats))
head = "#ID | Score (train) | #Rules | Template"
print(head, "\n", "-" * len(head), sep="")
train_tplscores = sorted(weighted_traincounts.items(), key=det_tplsort, reverse=True)
for (tid, trainscore) in train_tplscores:
s = "{0} | {1:5d} {2:5.3f} |{3:4d} {4:.3f} | {5}".format(
tid,
trainscore,
trainscore/tottrainscores,
template_counts[tid],
template_counts[tid]/len(tids),
Template.ALLTEMPLATES[int(tid)],
)
print(s)
def print_testtrain_stats():
testscores = test_stats['rulescores']
print("TEMPLATE STATISTICS (TEST AND TRAIN) ({0} templates, {1} rules)".format(
len(template_counts),
len(tids)),
)
print("TEST ({tokencount:7d} tokens) initial {initialerrors:5d} {initialacc:.4f} "
"final: {finalerrors:5d} {finalacc:.4f} ".format(**test_stats))
print("TRAIN ({tokencount:7d} tokens) initial {initialerrors:5d} {initialacc:.4f} "
"final: {finalerrors:5d} {finalacc:.4f} ".format(**train_stats))
weighted_testcounts = Counter()
for (tid, score) in zip(tids, testscores):
weighted_testcounts[tid] += score
tottestscores = sum(testscores)
head = "#ID | Score (test) | Score (train) | #Rules | Template"
print(head, "\n", "-" * len(head), sep="")
test_tplscores = sorted(weighted_testcounts.items(), key=det_tplsort, reverse=True)
for (tid, testscore) in test_tplscores:
s = "{0:s} |{1:5d} {2:6.3f} | {3:4d} {4:.3f} |{5:4d} {6:.3f} | {7:s}".format(
tid,
testscore,
testscore/tottestscores,
weighted_traincounts[tid],
weighted_traincounts[tid]/tottrainscores,
template_counts[tid],
template_counts[tid]/len(tids),
Template.ALLTEMPLATES[int(tid)],
)
print(s)
def print_unused_templates():
usedtpls = set([int(tid) for tid in tids])
unused = [(tid, tpl) for (tid, tpl) in enumerate(Template.ALLTEMPLATES) if tid not in usedtpls]
print("UNUSED TEMPLATES ({0})".format(len(unused)))
for (tid, tpl) in unused:
print("{0:03d} {1:s}".format(tid, tpl))
if test_stats is None:
print_train_stats()
else:
print_testtrain_stats()
print()
if printunused:
print_unused_templates()
print()
def batch_tag_incremental(self, sequences, gold):
"""
Tags by applying each rule to the entire corpus (rather than all rules to a
single sequence). The point is to collect statistics on the test set for
individual rules.
NOTE: This is inefficient (does not build any index, so will traverse the entire
corpus N times for N rules) -- usually you would not care about statistics for
individual rules and thus use batch_tag() instead
:param sequences: lists of token sequences (sentences, in some applications) to be tagged
:type sequences: list of list of strings
:param gold: the gold standard
:type gold: list of list of strings
:returns: tuple of (tagged_sequences, ordered list of rule scores (one for each rule))
"""
def counterrors(xs):
return sum(t[1] != g[1] for pair in zip(xs, gold) for (t, g) in zip(*pair))
testing_stats = {}
testing_stats['tokencount'] = sum(len(t) for t in sequences)
testing_stats['sequencecount'] = len(sequences)
tagged_tokenses = [self._initial_tagger.tag(tokens) for tokens in sequences]
testing_stats['initialerrors'] = counterrors(tagged_tokenses)
testing_stats['initialacc'] = 1 - testing_stats['initialerrors']/testing_stats['tokencount']
# Apply each rule to the entire corpus, in order
errors = [testing_stats['initialerrors']]
for rule in self._rules:
for tagged_tokens in tagged_tokenses:
rule.apply(tagged_tokens)
errors.append(counterrors(tagged_tokenses))
testing_stats['rulescores'] = [err0 - err1 for (err0, err1) in zip(errors, errors[1:])]
testing_stats['finalerrors'] = errors[-1]
testing_stats['finalacc'] = 1 - testing_stats['finalerrors']/testing_stats['tokencount']
return (tagged_tokenses, testing_stats)
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
|
{
"content_hash": "4167ebbafda1fa8dcc001392617d3d45",
"timestamp": "",
"source": "github",
"line_count": 419,
"max_line_length": 107,
"avg_line_length": 37.9928400954654,
"alnum_prop": 0.5635404233934292,
"repo_name": "zimmermegan/smarda",
"id": "3aa46f87bcc73ac055bdb80368addcca5680ae2e",
"size": "16254",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "nltk-3.0.3/nltk/tag/brill.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "7308"
},
{
"name": "GLSL",
"bytes": "381901"
},
{
"name": "Groff",
"bytes": "1093743"
},
{
"name": "Java",
"bytes": "392985"
},
{
"name": "JavaScript",
"bytes": "30326"
},
{
"name": "Makefile",
"bytes": "769"
},
{
"name": "Perl",
"bytes": "7219"
},
{
"name": "Python",
"bytes": "3899088"
},
{
"name": "Shell",
"bytes": "1072"
},
{
"name": "XSLT",
"bytes": "5485"
}
],
"symlink_target": ""
}
|
import unittest
from tests.util.global_reactor import TEST_SWITCHES
from tests.util.protocol_util import SshTester, TelnetTester
class RoutingEngineTest(unittest.TestCase):
def test_2_ssh(self):
conf = TEST_SWITCHES["brocade"]
tester1 = SshTester("ssh-1", "127.0.0.1", conf["ssh"], u'root', u'root')
tester2 = SshTester("ssh-2", "127.0.0.1", conf["ssh"], u'root', u'root')
tester1.connect()
tester1.write("enable")
tester1.read("Password:")
tester1.write_invisible(conf["extra"]["password"])
tester1.read("SSH@my_switch#")
tester1.write("skip-page-display")
tester1.read("SSH@my_switch#")
tester2.connect()
tester1.write("skip-page-display")
tester1.read("SSH@my_switch#")
tester2.write("enable")
tester2.read("Password:")
tester2.write_invisible(conf["extra"]["password"])
tester2.read("SSH@my_switch#")
tester2.write("configure terminal")
tester2.read("SSH@my_switch(config)#")
tester1.write("skip-page-display")
tester1.read("SSH@my_switch#")
tester2.write("exit")
tester2.read("SSH@my_switch#")
tester1.write("exit")
tester1.read_eof()
tester1.disconnect()
tester2.write("exit")
tester2.read_eof()
tester2.disconnect()
def test_2_telnet(self):
conf = TEST_SWITCHES["cisco"]
tester1 = TelnetTester("telnet-1", "127.0.0.1", conf["telnet"], 'root', 'root')
tester2 = TelnetTester("telnet-2", "127.0.0.1", conf["telnet"], 'root', 'root')
tester1.connect()
tester1.write("enable")
tester1.read("Password: ")
tester1.write_invisible(conf["extra"]["password"])
tester1.read("my_switch#")
tester1.write("terminal length 0")
tester1.read("my_switch#")
tester2.connect()
tester1.write("terminal length 0")
tester1.read("my_switch#")
tester2.write("enable")
tester2.read("Password: ")
tester2.write_invisible(conf["extra"]["password"])
tester2.read("my_switch#")
tester2.write("configure terminal")
tester2.readln("Enter configuration commands, one per line. End with CNTL/Z.")
tester2.read("my_switch(config)#")
tester1.write("terminal length 0")
tester1.read("my_switch#")
tester2.write("exit")
tester2.read("my_switch#")
tester1.write("exit")
tester1.read_eof()
tester1.disconnect()
tester2.write("exit")
tester2.read_eof()
tester2.disconnect()
|
{
"content_hash": "1c8a0f1833d26eb98523b4e2dd315db2",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 87,
"avg_line_length": 31.08235294117647,
"alnum_prop": 0.593111279333838,
"repo_name": "internaphosting/fake-switches",
"id": "d91b144dc1f193b928a63a7cd04835940961d4ad",
"size": "2642",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/test_simultaneous_connections.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "587681"
}
],
"symlink_target": ""
}
|
from datetime import datetime, timedelta
from NodeDefender.db.sql import SQL, EventModel, iCPEModel, SensorModel
import NodeDefender
def latest(icpe, sensor):
return EventModel.query.join(iCPEModel).join(SensorModel).\
filter(iCPEModel.mac_address == icpe).\
filter(SensorModel.mac_address == sensor).first()
def get(icpe, sensor, limit = None):
if limit is None:
limit = 10
return SQL.session.query(EventModel).\
join(EventModel.sensor).\
join(EventModel.icpe).\
filter(iCPEModel.mac_address == icpe).\
filter(SensorModel.sensor_id == sensor).\
order_by(EventModel.date.desc()).limit(int(limit)).all()
def average(icpe, sensor, time_ago = None):
if time_ago is None:
time_ago = (datetime.now() - timedelta(days=1))
total_events = SQL.session.query(EventModel).\
join(EventModel.sensor).\
join(EventModel.icpe).\
filter(iCPEModel.mac_address == icpe).\
filter(SensorModel.sensor_id == sensor).\
filter(EventModel.date > time_ago).all()
ret_data = {}
ret_data['icpe'] = icpe
ret_data['sensor'] = sensor
ret_data['total'] = len(total_events)
ret_data['critical'] = len([event for event in total_events if
event.critical])
ret_data['normal'] = len([event for event in total_events if
event.normal])
return ret_data
def put(mac, sensor_id, commandclass, commandclasstype, state, value):
icpe = NodeDefender.db.icpe.get_sql(mac)
sensor = NodeDefender.db.sensor.get_sql(mac, sensor_id)
commandclass = NodeDefender.db.commandclass.\
get_sql(mac, sensor_id, classname = commandclass)
if commandclass is None:
return
event = EventModel(state, value)
event.node = icpe.node
event.icpe = icpe
event.sensor = sensor
event.commandclass = commandclass
if commandclasstype:
event.commandclasstype = NodeDefender.db.commandclass.\
get_type(mac, sensor_id, commandclass.name, commandclasstype)
SQL.session.add(event)
SQL.session.commit()
return True
|
{
"content_hash": "612a0d39ac73a06c06979e80d145a30f",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 77,
"avg_line_length": 35.15873015873016,
"alnum_prop": 0.6275395033860045,
"repo_name": "CTSNE/NodeDefender",
"id": "b2b02f9986366c8543574e01d529d97134561671",
"size": "2215",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "NodeDefender/db/data/sensor/event.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5419"
},
{
"name": "HTML",
"bytes": "188223"
},
{
"name": "JavaScript",
"bytes": "2861"
},
{
"name": "Python",
"bytes": "290127"
}
],
"symlink_target": ""
}
|
import os
import textwrap
from pip._vendor.six.moves.urllib import parse as urllib_parse
from tests.lib import pyversion
def test_find_links_relative_path(script, data):
"""Test find-links as a relative path."""
result = script.pip(
'install',
'parent==0.1',
'--no-index',
'--find-links',
'packages/',
cwd=data.root,
)
egg_info_folder = (
script.site_packages / 'parent-0.1-py%s.egg-info' % pyversion
)
initools_folder = script.site_packages / 'parent'
assert egg_info_folder in result.files_created, str(result)
assert initools_folder in result.files_created, str(result)
def test_find_links_requirements_file_relative_path(script, data):
"""Test find-links as a relative path to a reqs file."""
script.scratch_path.joinpath("test-req.txt").write_text(textwrap.dedent("""
--no-index
--find-links=%s
parent==0.1
""" % data.packages.replace(os.path.sep, '/')))
result = script.pip(
'install',
'-r',
script.scratch_path / "test-req.txt",
cwd=data.root,
)
egg_info_folder = (
script.site_packages / 'parent-0.1-py%s.egg-info' % pyversion
)
initools_folder = script.site_packages / 'parent'
assert egg_info_folder in result.files_created, str(result)
assert initools_folder in result.files_created, str(result)
def test_install_from_file_index_hash_link(script, data):
"""
Test that a pkg can be installed from a file:// index using a link with a
hash
"""
result = script.pip('install', '-i', data.index_url(), 'simple==1.0')
egg_info_folder = (
script.site_packages / 'simple-1.0-py%s.egg-info' % pyversion
)
assert egg_info_folder in result.files_created, str(result)
def test_file_index_url_quoting(script, data):
"""
Test url quoting of file index url with a space
"""
index_url = data.index_url(urllib_parse.quote("in dex"))
result = script.pip(
'install', '-vvv', '--index-url', index_url, 'simple'
)
assert (script.site_packages / 'simple') in result.files_created, (
str(result.stdout)
)
assert (
script.site_packages / 'simple-1.0-py%s.egg-info' % pyversion
) in result.files_created, str(result)
|
{
"content_hash": "1852dd68d851bb32ba93b4ee72d5521d",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 79,
"avg_line_length": 31.671232876712327,
"alnum_prop": 0.6237024221453287,
"repo_name": "xavfernandez/pip",
"id": "5a31db8b4826004d65c7c26d2d1ac8c096de3bd5",
"size": "2312",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/functional/test_install_index.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "1618"
},
{
"name": "HTML",
"bytes": "2625"
},
{
"name": "PowerShell",
"bytes": "2137"
},
{
"name": "Python",
"bytes": "1629000"
},
{
"name": "Shell",
"bytes": "2109"
}
],
"symlink_target": ""
}
|
import unittest
from typing import List
from unittest import mock
from uuid import UUID
import pytest
from google.api_core.exceptions import AlreadyExists, GoogleAPICallError
from google.cloud.exceptions import NotFound
from google.cloud.pubsub_v1.types import ReceivedMessage
from googleapiclient.errors import HttpError
from parameterized import parameterized
from airflow.providers.google.cloud.hooks.pubsub import PubSubException, PubSubHook
from airflow.version import version
BASE_STRING = 'airflow.providers.google.common.hooks.base_google.{}'
PUBSUB_STRING = 'airflow.providers.google.cloud.hooks.pubsub.{}'
EMPTY_CONTENT = b''
TEST_PROJECT = 'test-project'
TEST_TOPIC = 'test-topic'
TEST_SUBSCRIPTION = 'test-subscription'
TEST_UUID = UUID('cf4a56d2-8101-4217-b027-2af6216feb48')
TEST_MESSAGES = [
{'data': b'Hello, World!', 'attributes': {'type': 'greeting'}},
{'data': b'Knock, knock'},
{'attributes': {'foo': ''}},
]
EXPANDED_TOPIC = f'projects/{TEST_PROJECT}/topics/{TEST_TOPIC}'
EXPANDED_SUBSCRIPTION = f'projects/{TEST_PROJECT}/subscriptions/{TEST_SUBSCRIPTION}'
LABELS = {'airflow-version': 'v' + version.replace('.', '-').replace('+', '-')}
def mock_init(
self,
gcp_conn_id,
delegate_to=None,
impersonation_chain=None,
):
pass
class TestPubSubHook(unittest.TestCase):
def setUp(self):
with mock.patch(BASE_STRING.format('GoogleBaseHook.__init__'), new=mock_init):
self.pubsub_hook = PubSubHook(gcp_conn_id='test')
def _generate_messages(self, count) -> List[ReceivedMessage]:
return [
ReceivedMessage(
ack_id=str(i),
message={
"data": f'Message {i}'.encode(),
"attributes": {"type": "generated message"},
},
)
for i in range(1, count + 1)
]
@mock.patch(
"airflow.providers.google.cloud.hooks.pubsub.PubSubHook.client_info", new_callable=mock.PropertyMock
)
@mock.patch("airflow.providers.google.cloud.hooks.pubsub.PubSubHook._get_credentials")
@mock.patch("airflow.providers.google.cloud.hooks.pubsub.PublisherClient")
def test_publisher_client_creation(self, mock_client, mock_get_creds, mock_client_info):
assert self.pubsub_hook._client is None
result = self.pubsub_hook.get_conn()
mock_client.assert_called_once_with(
credentials=mock_get_creds.return_value, client_info=mock_client_info.return_value
)
assert mock_client.return_value == result
assert self.pubsub_hook._client == result
@mock.patch(
"airflow.providers.google.cloud.hooks.pubsub.PubSubHook.client_info", new_callable=mock.PropertyMock
)
@mock.patch("airflow.providers.google.cloud.hooks.pubsub.PubSubHook._get_credentials")
@mock.patch("airflow.providers.google.cloud.hooks.pubsub.SubscriberClient")
def test_subscriber_client_creation(self, mock_client, mock_get_creds, mock_client_info):
assert self.pubsub_hook._client is None
result = self.pubsub_hook.subscriber_client
mock_client.assert_called_once_with(
credentials=mock_get_creds.return_value, client_info=mock_client_info.return_value
)
assert mock_client.return_value == result
@mock.patch(PUBSUB_STRING.format('PubSubHook.get_conn'))
def test_create_nonexistent_topic(self, mock_service):
create_method = mock_service.return_value.create_topic
self.pubsub_hook.create_topic(project_id=TEST_PROJECT, topic=TEST_TOPIC)
create_method.assert_called_once_with(
request=dict(name=EXPANDED_TOPIC, labels=LABELS, message_storage_policy=None, kms_key_name=None),
retry=None,
timeout=None,
metadata=(),
)
@mock.patch(PUBSUB_STRING.format('PubSubHook.get_conn'))
def test_delete_topic(self, mock_service):
delete_method = mock_service.return_value.delete_topic
self.pubsub_hook.delete_topic(project_id=TEST_PROJECT, topic=TEST_TOPIC)
delete_method.assert_called_once_with(
request=dict(topic=EXPANDED_TOPIC), retry=None, timeout=None, metadata=()
)
@mock.patch(PUBSUB_STRING.format('PubSubHook.get_conn'))
def test_delete_nonexisting_topic_failifnotexists(self, mock_service):
mock_service.return_value.delete_topic.side_effect = NotFound(
f'Topic does not exists: {EXPANDED_TOPIC}'
)
with pytest.raises(PubSubException) as ctx:
self.pubsub_hook.delete_topic(project_id=TEST_PROJECT, topic=TEST_TOPIC, fail_if_not_exists=True)
assert str(ctx.value) == f'Topic does not exist: {EXPANDED_TOPIC}'
@mock.patch(PUBSUB_STRING.format('PubSubHook.get_conn'))
def test_delete_topic_api_call_error(self, mock_service):
mock_service.return_value.delete_topic.side_effect = GoogleAPICallError(
f'Error deleting topic: {EXPANDED_TOPIC}'
)
with pytest.raises(PubSubException):
self.pubsub_hook.delete_topic(project_id=TEST_PROJECT, topic=TEST_TOPIC, fail_if_not_exists=True)
@mock.patch(PUBSUB_STRING.format('PubSubHook.get_conn'))
def test_create_preexisting_topic_failifexists(self, mock_service):
mock_service.return_value.create_topic.side_effect = AlreadyExists(
f'Topic already exists: {TEST_TOPIC}'
)
with pytest.raises(PubSubException) as ctx:
self.pubsub_hook.create_topic(project_id=TEST_PROJECT, topic=TEST_TOPIC, fail_if_exists=True)
assert str(ctx.value) == f'Topic already exists: {TEST_TOPIC}'
@mock.patch(PUBSUB_STRING.format('PubSubHook.get_conn'))
def test_create_preexisting_topic_nofailifexists(self, mock_service):
mock_service.return_value.create_topic.side_effect = AlreadyExists(
f'Topic already exists: {EXPANDED_TOPIC}'
)
self.pubsub_hook.create_topic(project_id=TEST_PROJECT, topic=TEST_TOPIC)
@mock.patch(PUBSUB_STRING.format('PubSubHook.get_conn'))
def test_create_topic_api_call_error(self, mock_service):
mock_service.return_value.create_topic.side_effect = GoogleAPICallError(
f'Error creating topic: {TEST_TOPIC}'
)
with pytest.raises(PubSubException):
self.pubsub_hook.create_topic(project_id=TEST_PROJECT, topic=TEST_TOPIC, fail_if_exists=True)
@mock.patch(PUBSUB_STRING.format('PubSubHook.subscriber_client'))
def test_create_nonexistent_subscription(self, mock_service):
create_method = mock_service.create_subscription
response = self.pubsub_hook.create_subscription(
project_id=TEST_PROJECT, topic=TEST_TOPIC, subscription=TEST_SUBSCRIPTION
)
create_method.assert_called_once_with(
request=dict(
name=EXPANDED_SUBSCRIPTION,
topic=EXPANDED_TOPIC,
push_config=None,
ack_deadline_seconds=10,
retain_acked_messages=None,
message_retention_duration=None,
labels=LABELS,
enable_message_ordering=False,
expiration_policy=None,
filter=None,
dead_letter_policy=None,
retry_policy=None,
),
retry=None,
timeout=None,
metadata=(),
)
assert TEST_SUBSCRIPTION == response
@mock.patch(PUBSUB_STRING.format('PubSubHook.subscriber_client'))
def test_create_subscription_different_project_topic(self, mock_service):
create_method = mock_service.create_subscription
response = self.pubsub_hook.create_subscription(
project_id=TEST_PROJECT,
topic=TEST_TOPIC,
subscription=TEST_SUBSCRIPTION,
subscription_project_id='a-different-project',
)
expected_subscription = f'projects/a-different-project/subscriptions/{TEST_SUBSCRIPTION}'
create_method.assert_called_once_with(
request=dict(
name=expected_subscription,
topic=EXPANDED_TOPIC,
push_config=None,
ack_deadline_seconds=10,
retain_acked_messages=None,
message_retention_duration=None,
labels=LABELS,
enable_message_ordering=False,
expiration_policy=None,
filter=None,
dead_letter_policy=None,
retry_policy=None,
),
retry=None,
timeout=None,
metadata=(),
)
assert TEST_SUBSCRIPTION == response
@mock.patch(PUBSUB_STRING.format('PubSubHook.subscriber_client'))
def test_delete_subscription(self, mock_service):
self.pubsub_hook.delete_subscription(project_id=TEST_PROJECT, subscription=TEST_SUBSCRIPTION)
delete_method = mock_service.delete_subscription
delete_method.assert_called_once_with(
request=dict(subscription=EXPANDED_SUBSCRIPTION), retry=None, timeout=None, metadata=()
)
@mock.patch(PUBSUB_STRING.format('PubSubHook.subscriber_client'))
def test_delete_nonexisting_subscription_failifnotexists(self, mock_service):
mock_service.delete_subscription.side_effect = NotFound(
f'Subscription does not exists: {EXPANDED_SUBSCRIPTION}'
)
with pytest.raises(PubSubException) as ctx:
self.pubsub_hook.delete_subscription(
project_id=TEST_PROJECT, subscription=TEST_SUBSCRIPTION, fail_if_not_exists=True
)
assert str(ctx.value) == f'Subscription does not exist: {EXPANDED_SUBSCRIPTION}'
@mock.patch(PUBSUB_STRING.format('PubSubHook.subscriber_client'))
def test_delete_subscription_api_call_error(self, mock_service):
mock_service.delete_subscription.side_effect = GoogleAPICallError(
f'Error deleting subscription {EXPANDED_SUBSCRIPTION}'
)
with pytest.raises(PubSubException):
self.pubsub_hook.delete_subscription(
project_id=TEST_PROJECT, subscription=TEST_SUBSCRIPTION, fail_if_not_exists=True
)
@mock.patch(PUBSUB_STRING.format('PubSubHook.subscriber_client'))
@mock.patch(PUBSUB_STRING.format('uuid4'), new_callable=mock.Mock(return_value=lambda: TEST_UUID))
def test_create_subscription_without_subscription_name(self, mock_uuid, mock_service):
create_method = mock_service.create_subscription
expected_name = EXPANDED_SUBSCRIPTION.replace(TEST_SUBSCRIPTION, f'sub-{TEST_UUID}')
response = self.pubsub_hook.create_subscription(project_id=TEST_PROJECT, topic=TEST_TOPIC)
create_method.assert_called_once_with(
request=dict(
name=expected_name,
topic=EXPANDED_TOPIC,
push_config=None,
ack_deadline_seconds=10,
retain_acked_messages=None,
message_retention_duration=None,
labels=LABELS,
enable_message_ordering=False,
expiration_policy=None,
filter=None,
dead_letter_policy=None,
retry_policy=None,
),
retry=None,
timeout=None,
metadata=(),
)
assert f'sub-{TEST_UUID}' == response
@mock.patch(PUBSUB_STRING.format('PubSubHook.subscriber_client'))
def test_create_subscription_with_ack_deadline(self, mock_service):
create_method = mock_service.create_subscription
response = self.pubsub_hook.create_subscription(
project_id=TEST_PROJECT, topic=TEST_TOPIC, subscription=TEST_SUBSCRIPTION, ack_deadline_secs=30
)
create_method.assert_called_once_with(
request=dict(
name=EXPANDED_SUBSCRIPTION,
topic=EXPANDED_TOPIC,
push_config=None,
ack_deadline_seconds=30,
retain_acked_messages=None,
message_retention_duration=None,
labels=LABELS,
enable_message_ordering=False,
expiration_policy=None,
filter=None,
dead_letter_policy=None,
retry_policy=None,
),
retry=None,
timeout=None,
metadata=(),
)
assert TEST_SUBSCRIPTION == response
@mock.patch(PUBSUB_STRING.format('PubSubHook.subscriber_client'))
def test_create_subscription_with_filter(self, mock_service):
create_method = mock_service.create_subscription
response = self.pubsub_hook.create_subscription(
project_id=TEST_PROJECT,
topic=TEST_TOPIC,
subscription=TEST_SUBSCRIPTION,
filter_='attributes.domain="com"',
)
create_method.assert_called_once_with(
request=dict(
name=EXPANDED_SUBSCRIPTION,
topic=EXPANDED_TOPIC,
push_config=None,
ack_deadline_seconds=10,
retain_acked_messages=None,
message_retention_duration=None,
labels=LABELS,
enable_message_ordering=False,
expiration_policy=None,
filter='attributes.domain="com"',
dead_letter_policy=None,
retry_policy=None,
),
retry=None,
timeout=None,
metadata=(),
)
assert TEST_SUBSCRIPTION == response
@mock.patch(PUBSUB_STRING.format('PubSubHook.subscriber_client'))
def test_create_subscription_failifexists(self, mock_service):
mock_service.create_subscription.side_effect = AlreadyExists(
f'Subscription already exists: {EXPANDED_SUBSCRIPTION}'
)
with pytest.raises(PubSubException) as ctx:
self.pubsub_hook.create_subscription(
project_id=TEST_PROJECT, topic=TEST_TOPIC, subscription=TEST_SUBSCRIPTION, fail_if_exists=True
)
assert str(ctx.value) == f'Subscription already exists: {EXPANDED_SUBSCRIPTION}'
@mock.patch(PUBSUB_STRING.format('PubSubHook.subscriber_client'))
def test_create_subscription_api_call_error(self, mock_service):
mock_service.create_subscription.side_effect = GoogleAPICallError(
f'Error creating subscription {EXPANDED_SUBSCRIPTION}'
)
with pytest.raises(PubSubException):
self.pubsub_hook.create_subscription(
project_id=TEST_PROJECT, topic=TEST_TOPIC, subscription=TEST_SUBSCRIPTION, fail_if_exists=True
)
@mock.patch(PUBSUB_STRING.format('PubSubHook.subscriber_client'))
def test_create_subscription_nofailifexists(self, mock_service):
mock_service.create_subscription.side_effect = AlreadyExists(
f'Subscription already exists: {EXPANDED_SUBSCRIPTION}'
)
response = self.pubsub_hook.create_subscription(
project_id=TEST_PROJECT, topic=TEST_TOPIC, subscription=TEST_SUBSCRIPTION
)
assert TEST_SUBSCRIPTION == response
@mock.patch(PUBSUB_STRING.format('PubSubHook.get_conn'))
def test_publish(self, mock_service):
publish_method = mock_service.return_value.publish
self.pubsub_hook.publish(project_id=TEST_PROJECT, topic=TEST_TOPIC, messages=TEST_MESSAGES)
calls = [
mock.call(topic=EXPANDED_TOPIC, data=message.get("data", b''), **message.get('attributes', {}))
for message in TEST_MESSAGES
]
publish_method.has_calls(calls)
@mock.patch(PUBSUB_STRING.format('PubSubHook.get_conn'))
def test_publish_api_call_error(self, mock_service):
publish_method = mock_service.return_value.publish
publish_method.side_effect = GoogleAPICallError(f'Error publishing to topic {EXPANDED_SUBSCRIPTION}')
with pytest.raises(PubSubException):
self.pubsub_hook.publish(project_id=TEST_PROJECT, topic=TEST_TOPIC, messages=TEST_MESSAGES)
@mock.patch(PUBSUB_STRING.format('PubSubHook.subscriber_client'))
def test_pull(self, mock_service):
pull_method = mock_service.pull
pulled_messages = []
for i, msg in enumerate(TEST_MESSAGES):
pulled_messages.append({'ackId': i, 'message': msg})
pull_method.return_value.received_messages = pulled_messages
response = self.pubsub_hook.pull(
project_id=TEST_PROJECT, subscription=TEST_SUBSCRIPTION, max_messages=10
)
pull_method.assert_called_once_with(
request=dict(
subscription=EXPANDED_SUBSCRIPTION,
max_messages=10,
return_immediately=False,
),
retry=None,
timeout=None,
metadata=(),
)
assert pulled_messages == response
@mock.patch(PUBSUB_STRING.format('PubSubHook.subscriber_client'))
def test_pull_no_messages(self, mock_service):
pull_method = mock_service.pull
pull_method.return_value.received_messages = []
response = self.pubsub_hook.pull(
project_id=TEST_PROJECT, subscription=TEST_SUBSCRIPTION, max_messages=10
)
pull_method.assert_called_once_with(
request=dict(
subscription=EXPANDED_SUBSCRIPTION,
max_messages=10,
return_immediately=False,
),
retry=None,
timeout=None,
metadata=(),
)
assert [] == response
@parameterized.expand(
[
(exception,)
for exception in [
HttpError(resp={'status': '404'}, content=EMPTY_CONTENT),
GoogleAPICallError("API Call Error"),
]
]
)
@mock.patch(PUBSUB_STRING.format('PubSubHook.subscriber_client'))
def test_pull_fails_on_exception(self, exception, mock_service):
pull_method = mock_service.pull
pull_method.side_effect = exception
with pytest.raises(PubSubException):
self.pubsub_hook.pull(project_id=TEST_PROJECT, subscription=TEST_SUBSCRIPTION, max_messages=10)
pull_method.assert_called_once_with(
request=dict(
subscription=EXPANDED_SUBSCRIPTION,
max_messages=10,
return_immediately=False,
),
retry=None,
timeout=None,
metadata=(),
)
@mock.patch(PUBSUB_STRING.format('PubSubHook.subscriber_client'))
def test_acknowledge_by_ack_ids(self, mock_service):
ack_method = mock_service.acknowledge
self.pubsub_hook.acknowledge(
project_id=TEST_PROJECT, subscription=TEST_SUBSCRIPTION, ack_ids=['1', '2', '3']
)
ack_method.assert_called_once_with(
request=dict(
subscription=EXPANDED_SUBSCRIPTION,
ack_ids=['1', '2', '3'],
),
retry=None,
timeout=None,
metadata=(),
)
@mock.patch(PUBSUB_STRING.format('PubSubHook.subscriber_client'))
def test_acknowledge_by_message_objects(self, mock_service):
ack_method = mock_service.acknowledge
self.pubsub_hook.acknowledge(
project_id=TEST_PROJECT,
subscription=TEST_SUBSCRIPTION,
messages=self._generate_messages(3),
)
ack_method.assert_called_once_with(
request=dict(
subscription=EXPANDED_SUBSCRIPTION,
ack_ids=['1', '2', '3'],
),
retry=None,
timeout=None,
metadata=(),
)
@parameterized.expand(
[
(exception,)
for exception in [
HttpError(resp={'status': '404'}, content=EMPTY_CONTENT),
GoogleAPICallError("API Call Error"),
]
]
)
@mock.patch(PUBSUB_STRING.format('PubSubHook.subscriber_client'))
def test_acknowledge_fails_on_exception(self, exception, mock_service):
ack_method = mock_service.acknowledge
ack_method.side_effect = exception
with pytest.raises(PubSubException):
self.pubsub_hook.acknowledge(
project_id=TEST_PROJECT, subscription=TEST_SUBSCRIPTION, ack_ids=['1', '2', '3']
)
ack_method.assert_called_once_with(
request=dict(
subscription=EXPANDED_SUBSCRIPTION,
ack_ids=['1', '2', '3'],
),
retry=None,
timeout=None,
metadata=(),
)
@parameterized.expand(
[
(messages,)
for messages in [
[{"data": b'test'}],
[{"data": b''}],
[{"data": b'test', "attributes": {"weight": "100kg"}}],
[{"data": b'', "attributes": {"weight": "100kg"}}],
[{"attributes": {"weight": "100kg"}}],
]
]
)
def test_messages_validation_positive(self, messages):
PubSubHook._validate_messages(messages)
@parameterized.expand(
[
([("wrong type",)], "Wrong message type. Must be a dictionary."),
([{"wrong_key": b'test'}], "Wrong message. Dictionary must contain 'data' or 'attributes'."),
([{"data": 'wrong string'}], "Wrong message. 'data' must be send as a bytestring"),
([{"data": None}], "Wrong message. 'data' must be send as a bytestring"),
(
[{"attributes": None}],
"Wrong message. If 'data' is not provided 'attributes' must be a non empty dictionary.",
),
(
[{"attributes": "wrong string"}],
"Wrong message. If 'data' is not provided 'attributes' must be a non empty dictionary.",
),
]
)
def test_messages_validation_negative(self, messages, error_message):
with pytest.raises(PubSubException) as ctx:
PubSubHook._validate_messages(messages)
assert str(ctx.value) == error_message
|
{
"content_hash": "848af263ba158f9d2cba56ebdf25c2f0",
"timestamp": "",
"source": "github",
"line_count": 543,
"max_line_length": 110,
"avg_line_length": 41.101289134438304,
"alnum_prop": 0.6125996953132001,
"repo_name": "mistercrunch/airflow",
"id": "027c2b2f3d89eb99a617f6accd1691b2bba488cc",
"size": "23106",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tests/providers/google/cloud/hooks/test_pubsub.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "36341"
},
{
"name": "HTML",
"bytes": "99243"
},
{
"name": "JavaScript",
"bytes": "891460"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "773270"
},
{
"name": "Shell",
"bytes": "5659"
}
],
"symlink_target": ""
}
|
"""
Created on Thu May 4 06:47:16 2017
@author: lucas
"""
import tweepy
import pandas as pd
import numpy as np
import time
def authenticate_twitter_api(
credentials_path="../private_data/twitter_credentials.csv"):
"""Athenticates with twitter api
:credentials_path: path to a csv with a single row consisting
of an entry for consumer_key, consumer_secret, access_key,
and access_secret
"""
credentials_df = pd.read_csv(str(credentials_path))
consumer_key = credentials_df['consumer_key'][0]
consumer_secret = credentials_df['consumer_secret'][0]
access_key = credentials_df['access_key'][0]
access_secret = credentials_df['access_secret'][0]
OAUTH_KEYS = {'consumer_key': consumer_key,
'consumer_secret': consumer_secret,
'access_token_key': access_key,
'access_token_secret': access_secret}
auth = tweepy.OAuthHandler(OAUTH_KEYS['consumer_key'],
OAUTH_KEYS['consumer_secret'])
api = tweepy.API(auth, wait_on_rate_limit=True,
wait_on_rate_limit_notify=True)
return (auth, api)
def get_twitter_followers(users_list,
credentials_path="./private_data/twitter_credentials.csv",
followers_path="./private_data/twitter_followers.csv"):
"""Retrieves Twitter followers given a list of Twitter usernames.
:users_list: list of Twitter usernames
:credentials_path: dataframe of twitter api credentials
:followers_path: path to write csv of followers to
"""
auth, api = authenticate_twitter_api(credentials_path)
followers_df = pd.DataFrame()
for name in users_list:
# Build list of followers, storing in csv after each
# user in case it breaks at some point.
print(name)
count = 0
ids = []
for page in tweepy.Cursor(api.followers_ids, screen_name=name).pages():
ids.extend(page)
print(count)
count += 1
time.sleep(70)
current_followers_df = pd.DataFrame()
current_followers_df['userId'] = ids
followers_df = pd.concat([followers_df, current_followers_df])
followers_df.to_csv(followers_path, index=False)
return(followers_df)
def get_twitter_users_data(users_list,
credentials_path="./private_data/twitter_credentials.csv",
users_path="./private_data/twitter_users.csv"):
"""Retrieves Twitter followers given a list of Twitter usernames.
:users_list: list of Twitter ids
:credentials_path: dataframe of twitter api credentials
:followers_path: path to write csv of followers to
"""
auth, api = authenticate_twitter_api(credentials_path)
number_users = len(users_list)
user_data = []
count = 0
# Iterate through users in groups of 100
for i in range(np.int(number_users/100)):
current_users = users_list[100*i:min(100*(i+1), len(users_list))]
try:
user_data + api.lookup_users(current_users)
except:
pass
count += 1
print(count)
users_df = pd.DataFrame()
users_df['userData'] = user_data
users_df.to_csv(users_path, index=False)
time.sleep(70)
return(users_df)
def retrieve_tweet(tweepy_user):
"""Returns twitter user's latest status if it exists
:tweepy_user: tweepy api user object
"""
try:
return(tweepy_user.status.text)
except:
return("")
def parse_twitter_profiles(users):
"""Parses data returned from Tweepy and returns only
necessary data for this project
Returns pandas dataframe with parsed data
:users: Pandas series of type tweepy.models.User
"""
twit_id = users.apply(lambda x: x.id)
screen_name = users.apply(lambda x: x.screen_name)
name = users.apply(lambda x: x.name)
location = users.apply(lambda x: x.location)
language = users.apply(lambda x: x.lang)
description = users.apply(lambda x: x.description)
tweet = users.apply(lambda x: retrieve_tweet(x))
parsed_profiles = pd.DataFrame()
parsed_profiles['twit_id'] = twit_id
parsed_profiles['screen_name'] = screen_name
parsed_profiles['name'] = name
parsed_profiles['location'] = location
parsed_profiles['language'] = language
parsed_profiles['description'] = description
parsed_profiles['tweet'] = tweet
return(parsed_profiles)
|
{
"content_hash": "81253257919de9c6a28f898cd1592692",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 85,
"avg_line_length": 36.60162601626016,
"alnum_prop": 0.6363838294091515,
"repo_name": "lgallen/morton_wordvectors",
"id": "a8a5408d35966012215ac92dac935dcccd9d67b8",
"size": "4549",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tweety/tweety.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "317791"
},
{
"name": "Jupyter Notebook",
"bytes": "60023"
},
{
"name": "Python",
"bytes": "10676"
}
],
"symlink_target": ""
}
|
INSTANCE_ID = 'instance_id'
TENANT_ID = 'tenant_id'
TENANT_NAME = 'tenant_name'
HOST_NAME = 'host_name'
# Network attributes
NET_ID = 'id'
NET_NAME = 'name'
NET_VLAN_ID = 'vlan_id'
NET_VLAN_NAME = 'vlan_name'
NET_PORTS = 'ports'
CREDENTIAL_ID = 'credential_id'
CREDENTIAL_NAME = 'credential_name'
CREDENTIAL_USERNAME = 'user_name'
CREDENTIAL_PASSWORD = 'password'
CREDENTIAL_TYPE = 'type'
MASKED_PASSWORD = '********'
USERNAME = 'username'
PASSWORD = 'password'
LOGGER_COMPONENT_NAME = "cisco_plugin"
NEXUS_PLUGIN = 'nexus_plugin'
VSWITCH_PLUGIN = 'vswitch_plugin'
DEVICE_IP = 'device_ip'
NETWORK_ADMIN = 'network_admin'
NETWORK = 'network'
PORT = 'port'
BASE_PLUGIN_REF = 'base_plugin_ref'
CONTEXT = 'context'
SUBNET = 'subnet'
#### N1Kv CONSTANTS
# Special vlan_id value in n1kv_vlan_allocations table indicating flat network
FLAT_VLAN_ID = -1
# Topic for tunnel notifications between the plugin and agent
TUNNEL = 'tunnel'
# Maximum VXLAN range configurable for one network profile.
MAX_VXLAN_RANGE = 1000000
# Values for network_type
NETWORK_TYPE_FLAT = 'flat'
NETWORK_TYPE_VLAN = 'vlan'
NETWORK_TYPE_VXLAN = 'vxlan'
NETWORK_TYPE_LOCAL = 'local'
NETWORK_TYPE_NONE = 'none'
NETWORK_TYPE_TRUNK = 'trunk'
NETWORK_TYPE_MULTI_SEGMENT = 'multi-segment'
# Values for network sub_type
NETWORK_TYPE_OVERLAY = 'overlay'
NETWORK_SUBTYPE_NATIVE_VXLAN = 'native_vxlan'
NETWORK_SUBTYPE_TRUNK_VLAN = NETWORK_TYPE_VLAN
NETWORK_SUBTYPE_TRUNK_VXLAN = NETWORK_TYPE_OVERLAY
# Prefix for VM Network name
VM_NETWORK_NAME_PREFIX = 'vmn_'
DEFAULT_HTTP_TIMEOUT = 15
SET = 'set'
INSTANCE = 'instance'
PROPERTIES = 'properties'
NAME = 'name'
ID = 'id'
POLICY = 'policy'
TENANT_ID_NOT_SET = 'TENANT_ID_NOT_SET'
ENCAPSULATIONS = 'encapsulations'
STATE = 'state'
ONLINE = 'online'
MAPPINGS = 'mappings'
MAPPING = 'mapping'
SEGMENTS = 'segments'
SEGMENT = 'segment'
BRIDGE_DOMAIN_SUFFIX = '_bd'
LOGICAL_NETWORK_SUFFIX = '_log_net'
ENCAPSULATION_PROFILE_SUFFIX = '_profile'
UUID_LENGTH = 36
# Nexus vlan and vxlan segment range
NEXUS_VLAN_RESERVED_MIN = 3968
NEXUS_VLAN_RESERVED_MAX = 4047
NEXUS_VXLAN_MIN = 4096
NEXUS_VXLAN_MAX = 16000000
|
{
"content_hash": "f238a41433eb9182c386ba5c1a60f8a9",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 78,
"avg_line_length": 23.395604395604394,
"alnum_prop": 0.7271019257867544,
"repo_name": "gopal1cloud/neutron",
"id": "9d8d7d5314ea5d4fb60235a2ef513383ba65857b",
"size": "2838",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neutron/plugins/cisco/common/cisco_constants.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1451"
},
{
"name": "Python",
"bytes": "9138456"
},
{
"name": "Shell",
"bytes": "9202"
}
],
"symlink_target": ""
}
|
import PyBool_builder as pbb
import PyBool_public_interface as pb
import copy
#This one memoizes.
class MemoizeMutable:
"""
Memoize(fn) - an instance which acts like fn but memoizes its arguments
Will work on functions with mutable arguments (slower than Memoize)
"""
def __init__(self, fn):
self.fn = fn
self.memo = {}
def __call__(self, *args):
import cPickle
str = cPickle.dumps(args)
if not self.memo.has_key(str):
self.memo[str] = self.fn(*args)
return self.memo[str]
##############################################################################
#Public Method
##############################################################################
def ite_build(bdd):
"""
Public Function that initializes the main expression for the
recursive ite function and takes care of the corner cases (all
True and all False)
"""
expr = copy.deepcopy(bdd["expr"])
expr = pb.simplify(expr)
F,G,H = _parse_ite(expr)
ite = MemoizeMutable(_ite)
x = ite(bdd, F, G, H)
#Corner Case
if bdd["u"] == 1:
bdd["u"] = x
##############################################################################
#Private Methods
##############################################################################
def _ite(bdd,F,G,H):
"""
Main recursive method. Follows Bryants paper with a few
added heuristics which are noted.
"""
####################
#Possible Base Cases
####################
#If G and H and constants:
#
#Base case if F is a variable or constant
#Else Parse F into FGH form.
if _is_const(G,True) and _is_const(H,False):
if F["type"] == "var":
return _ite_mk(bdd, F["name"][0] ,1,0)
elif F["type"] == "const":
return 1 if F["value"] else 0
else:
F,G,H = _parse_ite(F)
elif _is_const(H,True) and _is_const(G,False):
if F["type"] == "var":
return _ite_mk(bdd, F["name"][0] ,0,1)
elif F["type"] == "const":
return 0 if F["value"] else 1
else:
F,G,H = _parse_ite(F)
####################
#if H and G are equal and constant
#Just return what H and G are (my heuristic)
elif _is_const(G, False) and _is_const(H, False):
return 0
elif _is_const(G,True) and _is_const(H,True):
return 1
####################
#If F is a const, then we only have to consider
#Either G or H. Base case if they're variable otherwise
#parse them into F, G, H
elif _is_const(F,True):
if G["type"] == "var":
return _ite_mk(bdd,G["name"][0],1,0)
else:
F,G,H = _parse_ite(G)
elif _is_const(F,False):
if H["type"] == "var":
return _ite_mk(bdd, H["name"][0],1,0)
else:
F,G,H = _parse_ite(H)
####################
#Find the top variable.
v = top_variable(bdd, F,G,H)
#create new expressions with variable propagated
Fv, Gv, Hv = copy.deepcopy(F), copy.deepcopy(G), copy.deepcopy(H)
Fv = pb.propagate(Fv, (v, True))
Gv = pb.propagate(Gv, (v, True))
Hv = pb.propagate(Hv, (v, True))
Fnv = pb.propagate(F, (v, False))
Gnv = pb.propagate(G, (v, False))
Hnv = pb.propagate(H, (v, False))
#Recursively find T (then) and E (else) nodes
T = _ite(bdd, Fv, Gv, Hv)
E = _ite(bdd, Fnv, Gnv, Hnv)
#If they're the same, then we don't need to make a new
#node
if T == E:
return T
#make a new node and return it
R = _ite_mk(bdd,v,T,E)
return R
def _parse_ite(expr):
"""
This method takes in any expression EXPR and parses it into
a F, G, H expressions such that expr is the same as ite(F,G,H).
This is possible because every boolean function can be represented
in ite form. See Bryant's paper for formulas.
"""
if expr["type"] == "const":
v = expr["value"]
return pbb.mk_const_expr(v),pbb.mk_const_expr(v),pbb.mk_const_expr(v)
if expr["type"] == "var":
return expr, pbb.mk_const_expr(True), pbb.mk_const_expr(False)
if expr["type"] == "neg":
return expr["expr"], pbb.mk_const_expr(False), pbb.mk_const_expr(True)
if expr["type"] == "and":
return expr["expr1"], expr["expr2"], pbb.mk_const_expr(False)
if expr["type"] == "or":
return expr["expr1"], pbb.mk_const_expr(True), expr["expr2"]
if expr["type"] == "impl":
return expr["expr1"], expr["expr2"], pbb.mk_const_expr(True)
if expr["type"] == "xor":
return expr["expr1"], pbb.mk_neg_expr(expr["expr2"]), expr["expr2"]
if expr["type"] == "eqv":
return expr["expr1"], expr["expr2"], pbb.mk_neg_expr(expr["expr2"])
def _ite_mk(bdd, v, t, e):
"""
Special mk method for the ite operator.
(Could possibly be improved by passing in a minimum
variable index so the entire var_order list doesn't have to be
traversed each time.
"""
#Get the index
i = bdd["var_order"].index(v) + 1
#Have we seen it before?
if (i,t,e) in bdd["h_table"]:
return bdd["h_table"][(i,t,e)]
#Make new Node
u = bdd["u"] + 1
bdd["h_table"][(i,t,e)] = u
bdd["t_table"][u] = (i,t,e)
bdd["u"] = u
return u
def top_variable(bdd, F,G,H):
"""
Given 3 expressions (F, G, H) and a bdd dictionary, returns the top
variable in the three expressions.
(Could possibly be faster by passing a minimum argument so that
the whole var_order list doesn't have to be traversed)
"""
#Make a big list with all the variables in it.
exp_vars = pb.get_vars(F)
exp_vars.extend(pb.get_vars(G))
exp_vars.extend(pb.get_vars(H))
#Turn it into a set.
exp_vars = set(exp_vars)
#Traverse through all the variables in
#var_order and return the one that appears
#first.
for x in bdd["var_order"]:
if x in exp_vars:
return x
#else return None.
return None
def _is_const(expr, value):
"""
Helper Method, given an expression, returns True
if the expression is a constant and is equal to
the value passed in with VALUE
"""
if expr["type"] == "const":
return value == expr["value"]
return False
|
{
"content_hash": "36f8630d9a417c7cad194af537491155",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 78,
"avg_line_length": 29.06392694063927,
"alnum_prop": 0.5302435192458759,
"repo_name": "KingsleyZ/PyBool",
"id": "445722dee2954b81264d470a49e93e2ee7e654ce",
"size": "6929",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/BDD_V2/BDD_V2/include/BDD_ite1.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "160136"
},
{
"name": "JavaScript",
"bytes": "12598"
},
{
"name": "PHP",
"bytes": "41103"
},
{
"name": "Python",
"bytes": "123789"
}
],
"symlink_target": ""
}
|
'''\
Welcome to the Salt repl which exposes the execution environment of a minion in
a pre-configured Python shell
__opts__, __salt__, __grains__, and __pillar__ are available.
Jinja can be tested with full access to the above structures in the usual way:
JINJA("""\\
I am {{ salt['cmd.run']('whoami') }}.
{% if otherstuff %}
Some other stuff here
{% endif %}
""", otherstuff=True)
A history file is maintained in ~/.saltsh_history.
completion behavior can be customized via the ~/.inputrc file.
'''
# pylint: disable=file-perms
# Import python libs
from __future__ import absolute_import
import atexit
import os
import readline
import sys
from code import InteractiveConsole
# Import salt libs
import salt.client
import salt.config
import salt.loader
import salt.output
import salt.pillar
import salt.runner
# Import 3rd party libs
import jinja2
from salt.ext.six.moves import builtins # pylint: disable=import-error
# pylint: disable=W0611
# These are imported to be available in the spawned shell
import yaml
import pprint
HISTFILE = '{HOME}/.saltsh_history'.format(**os.environ)
def savehist():
'''
Save the history file
'''
readline.write_history_file(HISTFILE)
def get_salt_vars():
'''
Return all the Salt-usual double-under data structures for a minion
'''
# Create the Salt __opts__ variable
__opts__ = salt.config.client_config(
os.environ.get('SALT_MINION_CONFIG', '/etc/salt/minion'))
# Populate grains if it hasn't been done already
if 'grains' not in __opts__ or not __opts__['grains']:
__opts__['grains'] = salt.loader.grains(__opts__)
# file_roots and pillar_roots should be set in the minion config
if 'file_client' not in __opts__ or not __opts__['file_client']:
__opts__['file_client'] = 'local'
# ensure we have a minion id
if 'id' not in __opts__ or not __opts__['id']:
__opts__['id'] = 'saltsh_mid'
# Populate template variables
__salt__ = salt.loader.minion_mods(__opts__)
__grains__ = __opts__['grains']
if __opts__['file_client'] == 'local':
__pillar__ = salt.pillar.get_pillar(
__opts__,
__grains__,
__opts__.get('id'),
__opts__.get('environment'),
).compile_pillar()
else:
__pillar__ = {}
JINJA = lambda x, **y: jinja2.Template(x).render( # pylint: disable=C0103,W0612
grains=__grains__,
salt=__salt__,
opts=__opts__,
pillar=__pillar__,
**y)
return locals()
def main():
'''
The main entry point
'''
salt_vars = get_salt_vars()
def salt_outputter(value):
'''
Use Salt's outputters to print values to the shell
'''
if value is not None:
builtins._ = value
salt.output.display_output(value, '', salt_vars['__opts__'])
sys.displayhook = salt_outputter
# Set maximum number of items that will be written to the history file
readline.set_history_length(300)
if os.path.exists(HISTFILE):
readline.read_history_file(HISTFILE)
atexit.register(savehist)
atexit.register(lambda: sys.stdout.write('Salt you later!\n'))
saltrepl = InteractiveConsole(locals=salt_vars)
saltrepl.interact(banner=__doc__)
if __name__ == '__main__':
main()
|
{
"content_hash": "386f2ebda3934a34073ee9d0826e88c9",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 84,
"avg_line_length": 25.022058823529413,
"alnum_prop": 0.6112253893623274,
"repo_name": "stephane-martin/salt-debian-packaging",
"id": "4167cebc924e2a2ac5c31f4c2c85ae848bbd9aaa",
"size": "3449",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "salt-2016.3.2/tests/saltsh.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13798"
},
{
"name": "C",
"bytes": "986"
},
{
"name": "Groff",
"bytes": "13634346"
},
{
"name": "HTML",
"bytes": "39558"
},
{
"name": "Makefile",
"bytes": "20902"
},
{
"name": "NSIS",
"bytes": "22316"
},
{
"name": "PowerShell",
"bytes": "38719"
},
{
"name": "Python",
"bytes": "40857506"
},
{
"name": "SaltStack",
"bytes": "58278"
},
{
"name": "Scheme",
"bytes": "1790"
},
{
"name": "Shell",
"bytes": "829927"
},
{
"name": "Tcl",
"bytes": "6532"
},
{
"name": "TeX",
"bytes": "11632"
}
],
"symlink_target": ""
}
|
import generator
import pyncs
def Build(spec):
model_type = spec["model_type"]
parameters = pyncs.string_to_generator_map()
parameters = {}
for key, generator_spec in spec.items():
if key == "model_type":
continue
value_generator = generator.Build(generator_spec)
value_generator.thisown = False
parameters[str(key)] = value_generator
return pyncs.ModelParameters(str(model_type), pyncs.string_to_generator_map(parameters))
|
{
"content_hash": "95712d7f03e1b149ecceaa1f4f817d51",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 90,
"avg_line_length": 30.533333333333335,
"alnum_prop": 0.7139737991266376,
"repo_name": "BrainComputationLab/ncs",
"id": "ccdb0f4149c5fcd4bf957082f92432f63d994e9b",
"size": "458",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "applications/simulator/model_parameters.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C++",
"bytes": "470604"
},
{
"name": "CMake",
"bytes": "11688"
},
{
"name": "Cuda",
"bytes": "84303"
},
{
"name": "Protocol Buffer",
"bytes": "1492"
},
{
"name": "Python",
"bytes": "92897"
}
],
"symlink_target": ""
}
|
import numpy as np
from numba import cuda
from numba.cuda.testing import unittest
class TestArrayAttr(unittest.TestCase):
def test_contigous_2d(self):
ary = np.arange(10)
cary = ary.reshape(2, 5)
fary = np.asfortranarray(cary)
dcary = cuda.to_device(cary)
dfary = cuda.to_device(fary)
self.assertTrue(dcary.is_c_contiguous())
self.assertTrue(not dfary.is_c_contiguous())
self.assertTrue(not dcary.is_f_contiguous())
self.assertTrue(dfary.is_f_contiguous())
def test_contigous_3d(self):
ary = np.arange(20)
cary = ary.reshape(2, 5, 2)
fary = np.asfortranarray(cary)
dcary = cuda.to_device(cary)
dfary = cuda.to_device(fary)
self.assertTrue(dcary.is_c_contiguous())
self.assertTrue(not dfary.is_c_contiguous())
self.assertTrue(not dcary.is_f_contiguous())
self.assertTrue(dfary.is_f_contiguous())
def test_contigous_4d(self):
ary = np.arange(60)
cary = ary.reshape(2, 5, 2, 3)
fary = np.asfortranarray(cary)
dcary = cuda.to_device(cary)
dfary = cuda.to_device(fary)
self.assertTrue(dcary.is_c_contiguous())
self.assertTrue(not dfary.is_c_contiguous())
self.assertTrue(not dcary.is_f_contiguous())
self.assertTrue(dfary.is_f_contiguous())
def test_ravel_c(self):
ary = np.arange(60)
reshaped = ary.reshape(2, 5, 2, 3)
expect = reshaped.ravel(order='C')
dary = cuda.to_device(reshaped)
dflat = dary.ravel()
flat = dflat.copy_to_host()
self.assertTrue(flat.ndim == 1)
self.assertTrue(np.all(expect == flat))
def test_ravel_f(self):
ary = np.arange(60)
reshaped = np.asfortranarray(ary.reshape(2, 5, 2, 3))
expect = reshaped.ravel(order='F')
dary = cuda.to_device(reshaped)
dflat = dary.ravel(order='F')
flat = dflat.copy_to_host()
self.assertTrue(flat.ndim == 1)
self.assertTrue(np.all(expect == flat))
def test_reshape_c(self):
ary = np.arange(10)
expect = ary.reshape(2, 5)
dary = cuda.to_device(ary)
dary_reshaped = dary.reshape(2, 5)
got = dary_reshaped.copy_to_host()
self.assertTrue(np.all(expect == got))
def test_reshape_f(self):
ary = np.arange(10)
expect = ary.reshape(2, 5, order='F')
dary = cuda.to_device(ary)
dary_reshaped = dary.reshape(2, 5, order='F')
got = dary_reshaped.copy_to_host()
self.assertTrue(np.all(expect == got))
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "357a4f5ecd8adcabc812cb66c3654fe6",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 61,
"avg_line_length": 32.91358024691358,
"alnum_prop": 0.5926481620405101,
"repo_name": "pitrou/numba",
"id": "7bb586b15e0124ccd5060b952192548730318342",
"size": "2666",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "numba/cuda/tests/cudadrv/test_array_attr.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "2212"
},
{
"name": "C",
"bytes": "241911"
},
{
"name": "C++",
"bytes": "17024"
},
{
"name": "Cuda",
"bytes": "214"
},
{
"name": "HTML",
"bytes": "98846"
},
{
"name": "PowerShell",
"bytes": "3153"
},
{
"name": "Python",
"bytes": "3236740"
},
{
"name": "Shell",
"bytes": "120"
}
],
"symlink_target": ""
}
|
"""Implementation of acl command for cloud storage providers."""
from __future__ import absolute_import
from gslib import aclhelpers
from gslib.cloud_api import AccessDeniedException
from gslib.cloud_api import BadRequestException
from gslib.cloud_api import Preconditions
from gslib.cloud_api import ServiceException
from gslib.command import Command
from gslib.command import SetAclExceptionHandler
from gslib.command import SetAclFuncWrapper
from gslib.command_argument import CommandArgument
from gslib.cs_api_map import ApiSelector
from gslib.exception import CommandException
from gslib.help_provider import CreateHelpText
from gslib.storage_url import StorageUrlFromString
from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages
from gslib.util import NO_MAX
from gslib.util import Retry
from gslib.util import UrlsAreForSingleProvider
_SET_SYNOPSIS = """
gsutil acl set [-f] [-r] [-a] file-or-canned_acl_name url...
"""
_GET_SYNOPSIS = """
gsutil acl get url
"""
_CH_SYNOPSIS = """
gsutil acl ch [-f] [-r] -u|-g|-d|-p <grant>... url...
where each <grant> is one of the following forms:
-u <id|email>:<perm>
-g <id|email|domain|All|AllAuth>:<perm>
-p <viewers|editors|owners>-<project number>
-d <id|email|domain|All|AllAuth>
"""
_GET_DESCRIPTION = """
<B>GET</B>
The "acl get" command gets the ACL text for a bucket or object, which you can
save and edit for the acl set command.
"""
_SET_DESCRIPTION = """
<B>SET</B>
The "acl set" command allows you to set an Access Control List on one or
more buckets and objects. The simplest way to use it is to specify one of
the canned ACLs, e.g.,:
gsutil acl set private gs://bucket
If you want to make an object or bucket publicly readable or writable, it is
recommended to use "acl ch", to avoid accidentally removing OWNER permissions.
See "gsutil help acl ch" for details.
See "gsutil help acls" for a list of all canned ACLs.
If you want to define more fine-grained control over your data, you can
retrieve an ACL using the "acl get" command, save the output to a file, edit
the file, and then use the "acl set" command to set that ACL on the buckets
and/or objects. For example:
gsutil acl get gs://bucket/file.txt > acl.txt
Make changes to acl.txt such as adding an additional grant, then:
gsutil acl set acl.txt gs://cats/file.txt
Note that you can set an ACL on multiple buckets or objects at once,
for example:
gsutil acl set acl.txt gs://bucket/*.jpg
If you have a large number of ACLs to update you might want to use the
gsutil -m option, to perform a parallel (multi-threaded/multi-processing)
update:
gsutil -m acl set acl.txt gs://bucket/*.jpg
Note that multi-threading/multi-processing is only done when the named URLs
refer to objects. gsutil -m acl set gs://bucket1 gs://bucket2 will run the
acl set operations sequentially.
<B>SET OPTIONS</B>
The "set" sub-command has the following options
-R, -r Performs "acl set" request recursively, to all objects under
the specified URL.
-a Performs "acl set" request on all object versions.
-f Normally gsutil stops at the first error. The -f option causes
it to continue when it encounters errors. If some of the ACLs
couldn't be set, gsutil's exit status will be non-zero even if
this flag is set. This option is implicitly set when running
"gsutil -m acl...".
"""
_CH_DESCRIPTION = """
<B>CH</B>
The "acl ch" (or "acl change") command updates access control lists, similar
in spirit to the Linux chmod command. You can specify multiple access grant
additions and deletions in a single command run; all changes will be made
atomically to each object in turn. For example, if the command requests
deleting one grant and adding a different grant, the ACLs being updated will
never be left in an intermediate state where one grant has been deleted but
the second grant not yet added. Each change specifies a user or group grant
to add or delete, and for grant additions, one of R, W, O (for the
permission to be granted). A more formal description is provided in a later
section; below we provide examples.
<B>CH EXAMPLES</B>
Examples for "ch" sub-command:
Grant anyone on the internet READ access to the object example-object:
gsutil acl ch -u AllUsers:R gs://example-bucket/example-object
NOTE: By default, publicly readable objects are served with a Cache-Control
header allowing such objects to be cached for 3600 seconds. If you need to
ensure that updates become visible immediately, you should set a
Cache-Control header of "Cache-Control:private, max-age=0, no-transform" on
such objects. For help doing this, see "gsutil help setmeta".
Grant anyone on the internet WRITE access to the bucket example-bucket
(WARNING: this is not recommended as you will be responsible for the content):
gsutil acl ch -u AllUsers:W gs://example-bucket
Grant the user john.doe@example.com WRITE access to the bucket
example-bucket:
gsutil acl ch -u john.doe@example.com:WRITE gs://example-bucket
Grant the group admins@example.com OWNER access to all jpg files in
the top level of example-bucket:
gsutil acl ch -g admins@example.com:O gs://example-bucket/*.jpg
Grant the owners of project example-project-123 WRITE access to the bucket
example-bucket:
gsutil acl ch -p owners-example-project-123:W gs://example-bucket
NOTE: You can replace 'owners' with 'viewers' or 'editors' to grant access
to a project's viewers/editors respectively.
Grant the user with the specified canonical ID READ access to all objects
in example-bucket that begin with folder/:
gsutil acl ch -r \\
-u 84fac329bceSAMPLE777d5d22b8SAMPLE785ac2SAMPLE2dfcf7c4adf34da46:R \\
gs://example-bucket/folder/
Grant the service account foo@developer.gserviceaccount.com WRITE access to
the bucket example-bucket:
gsutil acl ch -u foo@developer.gserviceaccount.com:W gs://example-bucket
Grant all users from the `Google Apps
<https://www.google.com/work/apps/business/>`_ domain my-domain.org READ
access to the bucket gcs.my-domain.org:
gsutil acl ch -g my-domain.org:R gs://gcs.my-domain.org
Remove any current access by john.doe@example.com from the bucket
example-bucket:
gsutil acl ch -d john.doe@example.com gs://example-bucket
If you have a large number of objects to update, enabling multi-threading
with the gsutil -m flag can significantly improve performance. The
following command adds OWNER for admin@example.org using
multi-threading:
gsutil -m acl ch -r -u admin@example.org:O gs://example-bucket
Grant READ access to everyone from my-domain.org and to all authenticated
users, and grant OWNER to admin@mydomain.org, for the buckets
my-bucket and my-other-bucket, with multi-threading enabled:
gsutil -m acl ch -r -g my-domain.org:R -g AllAuth:R \\
-u admin@mydomain.org:O gs://my-bucket/ gs://my-other-bucket
<B>CH ROLES</B>
You may specify the following roles with either their shorthand or
their full name:
R: READ
W: WRITE
O: OWNER
<B>CH ENTITIES</B>
There are four different entity types: Users, Groups, All Authenticated Users,
and All Users.
Users are added with -u and a plain ID or email address, as in
"-u john-doe@gmail.com:r". Note: Service Accounts are considered to be users.
Groups are like users, but specified with the -g flag, as in
"-g power-users@example.com:fc". Groups may also be specified as a full
domain, as in "-g my-company.com:r".
AllAuthenticatedUsers and AllUsers are specified directly, as
in "-g AllUsers:R" or "-g AllAuthenticatedUsers:O". These are case
insensitive, and may be shortened to "all" and "allauth", respectively.
Removing roles is specified with the -d flag and an ID, email
address, domain, or one of AllUsers or AllAuthenticatedUsers.
Many entities' roles can be specified on the same command line, allowing
bundled changes to be executed in a single run. This will reduce the number of
requests made to the server.
<B>CH OPTIONS</B>
The "ch" sub-command has the following options
-d Remove all roles associated with the matching entity.
-f Normally gsutil stops at the first error. The -f option causes
it to continue when it encounters errors. With this option the
gsutil exit status will be 0 even if some ACLs couldn't be
changed.
-g Add or modify a group entity's role.
-p Add or modify a project viewers/editors/owners role.
-R, -r Performs acl ch request recursively, to all objects under the
specified URL.
-u Add or modify a user entity's role.
"""
_SYNOPSIS = (_SET_SYNOPSIS + _GET_SYNOPSIS.lstrip('\n') +
_CH_SYNOPSIS.lstrip('\n') + '\n\n')
_DESCRIPTION = ("""
The acl command has three sub-commands:
""" + '\n'.join([_GET_DESCRIPTION, _SET_DESCRIPTION, _CH_DESCRIPTION]))
_DETAILED_HELP_TEXT = CreateHelpText(_SYNOPSIS, _DESCRIPTION)
_get_help_text = CreateHelpText(_GET_SYNOPSIS, _GET_DESCRIPTION)
_set_help_text = CreateHelpText(_SET_SYNOPSIS, _SET_DESCRIPTION)
_ch_help_text = CreateHelpText(_CH_SYNOPSIS, _CH_DESCRIPTION)
def _ApplyExceptionHandler(cls, exception):
cls.logger.error('Encountered a problem: %s', exception)
cls.everything_set_okay = False
def _ApplyAclChangesWrapper(cls, url_or_expansion_result, thread_state=None):
cls.ApplyAclChanges(url_or_expansion_result, thread_state=thread_state)
class AclCommand(Command):
"""Implementation of gsutil acl command."""
# Command specification. See base class for documentation.
command_spec = Command.CreateCommandSpec(
'acl',
command_name_aliases=['getacl', 'setacl', 'chacl'],
usage_synopsis=_SYNOPSIS,
min_args=2,
max_args=NO_MAX,
supported_sub_args='afRrg:u:d:p:',
file_url_ok=False,
provider_url_ok=False,
urls_start_arg=1,
gs_api_support=[ApiSelector.XML, ApiSelector.JSON],
gs_default_api=ApiSelector.JSON,
argparse_arguments={
'set': [
CommandArgument.MakeFileURLOrCannedACLArgument(),
CommandArgument.MakeZeroOrMoreCloudURLsArgument()
],
'get': [
CommandArgument.MakeNCloudURLsArgument(1)
],
'ch': [
CommandArgument.MakeZeroOrMoreCloudURLsArgument()
],
}
)
# Help specification. See help_provider.py for documentation.
help_spec = Command.HelpSpec(
help_name='acl',
help_name_aliases=['getacl', 'setacl', 'chmod', 'chacl'],
help_type='command_help',
help_one_line_summary='Get, set, or change bucket and/or object ACLs',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={
'get': _get_help_text, 'set': _set_help_text, 'ch': _ch_help_text},
)
def _CalculateUrlsStartArg(self):
if not self.args:
self.RaiseWrongNumberOfArgumentsException()
if (self.args[0].lower() == 'set') or (self.command_alias_used == 'setacl'):
return 1
else:
return 0
def _SetAcl(self):
"""Parses options and sets ACLs on the specified buckets/objects."""
self.continue_on_error = False
if self.sub_opts:
for o, unused_a in self.sub_opts:
if o == '-a':
self.all_versions = True
elif o == '-f':
self.continue_on_error = True
elif o == '-r' or o == '-R':
self.recursion_requested = True
else:
self.RaiseInvalidArgumentException()
try:
self.SetAclCommandHelper(SetAclFuncWrapper, SetAclExceptionHandler)
except AccessDeniedException, unused_e:
self._WarnServiceAccounts()
raise
if not self.everything_set_okay:
raise CommandException('ACLs for some objects could not be set.')
def _ChAcl(self):
"""Parses options and changes ACLs on the specified buckets/objects."""
self.parse_versions = True
self.changes = []
self.continue_on_error = False
if self.sub_opts:
for o, a in self.sub_opts:
if o == '-f':
self.continue_on_error = True
elif o == '-g':
if 'gserviceaccount.com' in a:
raise CommandException(
'Service accounts are considered users, not groups; please use '
'"gsutil acl ch -u" instead of "gsutil acl ch -g"')
self.changes.append(
aclhelpers.AclChange(a, scope_type=aclhelpers.ChangeType.GROUP))
elif o == '-p':
self.changes.append(
aclhelpers.AclChange(a, scope_type=aclhelpers.ChangeType.PROJECT))
elif o == '-u':
self.changes.append(
aclhelpers.AclChange(a, scope_type=aclhelpers.ChangeType.USER))
elif o == '-d':
self.changes.append(aclhelpers.AclDel(a))
elif o == '-r' or o == '-R':
self.recursion_requested = True
else:
self.RaiseInvalidArgumentException()
if not self.changes:
raise CommandException(
'Please specify at least one access change '
'with the -g, -u, or -d flags')
if (not UrlsAreForSingleProvider(self.args) or
StorageUrlFromString(self.args[0]).scheme != 'gs'):
raise CommandException(
'The "{0}" command can only be used with gs:// URLs'.format(
self.command_name))
self.everything_set_okay = True
self.ApplyAclFunc(_ApplyAclChangesWrapper, _ApplyExceptionHandler,
self.args)
if not self.everything_set_okay:
raise CommandException('ACLs for some objects could not be set.')
def _RaiseForAccessDenied(self, url):
self._WarnServiceAccounts()
raise CommandException('Failed to set acl for %s. Please ensure you have '
'OWNER-role access to this resource.' % url)
@Retry(ServiceException, tries=3, timeout_secs=1)
def ApplyAclChanges(self, name_expansion_result, thread_state=None):
"""Applies the changes in self.changes to the provided URL.
Args:
name_expansion_result: NameExpansionResult describing the target object.
thread_state: If present, gsutil Cloud API instance to apply the changes.
"""
if thread_state:
gsutil_api = thread_state
else:
gsutil_api = self.gsutil_api
url = name_expansion_result.expanded_storage_url
if url.IsBucket():
bucket = gsutil_api.GetBucket(url.bucket_name, provider=url.scheme,
fields=['acl', 'metageneration'])
current_acl = bucket.acl
elif url.IsObject():
gcs_object = gsutil_api.GetObjectMetadata(
url.bucket_name, url.object_name, provider=url.scheme,
generation=url.generation,
fields=['acl', 'generation', 'metageneration'])
current_acl = gcs_object.acl
if not current_acl:
self._RaiseForAccessDenied(url)
modification_count = 0
for change in self.changes:
modification_count += change.Execute(url, current_acl, 'acl', self.logger)
if modification_count == 0:
self.logger.info('No changes to %s', url)
return
try:
if url.IsBucket():
preconditions = Preconditions(meta_gen_match=bucket.metageneration)
bucket_metadata = apitools_messages.Bucket(acl=current_acl)
gsutil_api.PatchBucket(url.bucket_name, bucket_metadata,
preconditions=preconditions,
provider=url.scheme, fields=['id'])
else: # Object
preconditions = Preconditions(gen_match=gcs_object.generation,
meta_gen_match=gcs_object.metageneration)
object_metadata = apitools_messages.Object(acl=current_acl)
gsutil_api.PatchObjectMetadata(
url.bucket_name, url.object_name, object_metadata,
preconditions=preconditions, provider=url.scheme,
generation=url.generation)
except BadRequestException as e:
# Don't retry on bad requests, e.g. invalid email address.
raise CommandException('Received bad request from server: %s' % str(e))
except AccessDeniedException:
self._RaiseForAccessDenied(url)
self.logger.info('Updated ACL on %s', url)
def RunCommand(self):
"""Command entry point for the acl command."""
action_subcommand = self.args.pop(0)
self.ParseSubOpts(check_args=True)
self.def_acl = False
if action_subcommand == 'get':
self.GetAndPrintAcl(self.args[0])
elif action_subcommand == 'set':
self._SetAcl()
elif action_subcommand in ('ch', 'change'):
self._ChAcl()
else:
raise CommandException(('Invalid subcommand "%s" for the %s command.\n'
'See "gsutil help acl".') %
(action_subcommand, self.command_name))
return 0
|
{
"content_hash": "730f2651b6171d2ed8ee8b58bfad27e5",
"timestamp": "",
"source": "github",
"line_count": 459,
"max_line_length": 87,
"avg_line_length": 37.21568627450981,
"alnum_prop": 0.6762674159934434,
"repo_name": "ltilve/ChromiumGStreamerBackend",
"id": "a63dcfe717b87411df801cd938abc140a2595273",
"size": "17702",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "tools/telemetry/third_party/gsutilz/gslib/commands/acl.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "37073"
},
{
"name": "Batchfile",
"bytes": "8451"
},
{
"name": "C",
"bytes": "9568645"
},
{
"name": "C++",
"bytes": "246813997"
},
{
"name": "CSS",
"bytes": "943687"
},
{
"name": "DM",
"bytes": "60"
},
{
"name": "Groff",
"bytes": "2494"
},
{
"name": "HTML",
"bytes": "27371019"
},
{
"name": "Java",
"bytes": "15348315"
},
{
"name": "JavaScript",
"bytes": "20872607"
},
{
"name": "Makefile",
"bytes": "70983"
},
{
"name": "Objective-C",
"bytes": "2029825"
},
{
"name": "Objective-C++",
"bytes": "10156554"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "PLpgSQL",
"bytes": "182741"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "494625"
},
{
"name": "Python",
"bytes": "8594611"
},
{
"name": "Shell",
"bytes": "486464"
},
{
"name": "Standard ML",
"bytes": "5106"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render_to_response
from django.shortcuts import redirect
from django.template import RequestContext
from django.http import HttpResponseRedirect
|
{
"content_hash": "9e2ceb3be59c013182752e1b37f88f8c",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 47,
"avg_line_length": 43.25,
"alnum_prop": 0.8786127167630058,
"repo_name": "xtornasol512/phyroserver",
"id": "162cefa38e380ee7933a2f0367ab1bad6557fcbe",
"size": "197",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "back-end/phyrosite/phyrosite/blog/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "64863"
},
{
"name": "JavaScript",
"bytes": "229209"
},
{
"name": "Python",
"bytes": "20885"
},
{
"name": "Shell",
"bytes": "202"
}
],
"symlink_target": ""
}
|
"""
Digit classification
====================
"""
import numpy as np
import matplotlib.pyplot as plt
import deeppy as dp
# Fetch MNIST data
dataset = dp.dataset.MNIST()
x_train, y_train, x_test, y_test = dataset.data(flat=True, dp_dtypes=True)
# Normalize pixel intensities
scaler = dp.StandardScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)
# Prepare network inputs
batch_size = 128
train_input = dp.SupervisedInput(x_train, y_train, batch_size=batch_size)
test_input = dp.Input(x_test)
# Setup network
weight_gain = 2.0
weight_decay = 0.0005
net = dp.NeuralNetwork(
layers=[
dp.FullyConnected(
n_out=1024,
weights=dp.Parameter(dp.AutoFiller(weight_gain),
weight_decay=weight_decay),
),
dp.ReLU(),
dp.FullyConnected(
n_out=1024,
weights=dp.Parameter(dp.AutoFiller(weight_gain),
weight_decay=weight_decay),
),
dp.ReLU(),
dp.FullyConnected(
n_out=dataset.n_classes,
weights=dp.Parameter(dp.AutoFiller()),
),
],
loss=dp.SoftmaxCrossEntropy(),
)
# Train network
n_epochs = [50, 15]
learn_rate = 0.05
for i, epochs in enumerate(n_epochs):
trainer = dp.StochasticGradientDescent(
max_epochs=epochs,
learn_rule=dp.Momentum(learn_rate=learn_rate/10**i, momentum=0.94),
)
trainer.train(net, train_input)
# Evaluate on test data
error = np.mean(net.predict(test_input) != y_test)
print('Test error rate: %.4f' % error)
# Plot dataset examples
def plot_img(img, title):
plt.figure()
plt.imshow(img, cmap='gray', interpolation='nearest')
plt.axis('off')
plt.title(title)
plt.tight_layout()
imgs = np.reshape(x_train[:63, ...], (-1, 28, 28))
plot_img(dp.misc.img_tile(dp.misc.img_stretch(imgs)),
'Dataset examples')
# Plot learned features in first layer
w = np.array(net.layers[0].weights.array)
w = np.reshape(w.T, (-1,) + dataset.img_shape)
w = w[np.argsort(np.std(w, axis=(1, 2)))[-64:]]
plot_img(dp.misc.img_tile(dp.misc.img_stretch(w)),
'Examples of features learned')
|
{
"content_hash": "8f17237974a97c36f843efa8161a619b",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 75,
"avg_line_length": 26.397590361445783,
"alnum_prop": 0.6211775445002282,
"repo_name": "MohammedWasim/deeppy",
"id": "40e0c68d9008850a090b7eab1da1901ef2954aa7",
"size": "2214",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "examples/mlp_mnist.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "86832"
}
],
"symlink_target": ""
}
|
import os
import re
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
# Managed by bumpversion
version = '0.4.4'
def convert_md_to_rst(filename):
try:
import pypandoc
rst = pypandoc.convert('{}.md'.format(filename), 'rst')
except (IOError, ImportError):
rst = ''
return rst
if sys.argv[-1] == 'publish':
try:
import wheel
print("Wheel version: ", wheel.__version__)
except ImportError:
print('Wheel library missing. Please run "pip install wheel"')
sys.exit()
os.system('python setup.py sdist upload')
os.system('python setup.py bdist_wheel upload')
sys.exit()
if sys.argv[-1] == 'tag':
print("Tagging the version on git:")
os.system("git tag -a %s -m 'version %s'" % (version, version))
os.system("git push --tags")
sys.exit()
readme = convert_md_to_rst('README')
history = convert_md_to_rst('HISTORY')
setup(
name='django-jsonsuit',
version=version,
description="""Django goodies to dress JSON data in a suit.""",
long_description=readme + '\n\n' + history,
author='Marc Zimmermann',
author_email='tooreht@gmail.com',
url='https://github.com/tooreht/django-jsonsuit',
packages=[
'jsonsuit',
],
include_package_data=True,
install_requires=[
'django>=1.8',
],
license="MIT",
zip_safe=False,
keywords='django-jsonsuit',
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Framework :: Django :: 1.10',
'Framework :: Django :: 1.11',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
|
{
"content_hash": "5833315d22aee996863116853dc22144",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 70,
"avg_line_length": 27.68354430379747,
"alnum_prop": 0.5916780978509374,
"repo_name": "tooreht/django-jsonsuit",
"id": "b2507244604b02af8fe20b2017b349680b33cd55",
"size": "2233",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "19591"
},
{
"name": "HTML",
"bytes": "1148"
},
{
"name": "JavaScript",
"bytes": "2058"
},
{
"name": "Makefile",
"bytes": "1763"
},
{
"name": "Python",
"bytes": "15651"
}
],
"symlink_target": ""
}
|
r"""
Contains the class library that was generated while working on
SPG's engineering challenge.
"""
from gdelt_data_processor import (
endpoints,
event_data,
last_updated_data,
data_recipient,
load_gdelt_event_data,
)
|
{
"content_hash": "9c9b3c40c32e6b466453b33179268e49",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 62,
"avg_line_length": 17.285714285714285,
"alnum_prop": 0.7024793388429752,
"repo_name": "AustinTSchaffer/DailyProgrammer",
"id": "df4c50db4402c43707ec801b776ca33c211addcf",
"size": "242",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "InterviewChallenges/GDELT-Event-Data-Processor/gdelt_data_processor/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9482"
},
{
"name": "C#",
"bytes": "11127"
},
{
"name": "Dockerfile",
"bytes": "308"
},
{
"name": "F#",
"bytes": "26762"
},
{
"name": "HCL",
"bytes": "2461"
},
{
"name": "HTML",
"bytes": "824"
},
{
"name": "Java",
"bytes": "22830"
},
{
"name": "Julia",
"bytes": "3416"
},
{
"name": "Lua",
"bytes": "6296"
},
{
"name": "Python",
"bytes": "284314"
},
{
"name": "Rust",
"bytes": "1517"
},
{
"name": "Shell",
"bytes": "871"
}
],
"symlink_target": ""
}
|
from pprint import pprint
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.shortcuts import render
from django.views import View
from fo2.connections import db_cursor_so
from utils.functions.views import cleanned_fields_to_context
from estoque import classes, forms, models
class Transferencia(PermissionRequiredMixin, View):
Form_class = forms.TransferenciaForm
template_name = 'estoque/transferencia.html'
title_name = 'Movimentações'
cleanned_fields_to_context = cleanned_fields_to_context
def __init__(self):
self.permission_required = 'estoque.can_transferencia'
self.context = {'titulo': self.title_name}
def valid_tipo(self):
try:
tip_mov = models.TipoMovStq.objects.get(codigo=self.kwargs['tipo'])
except models.TipoMovStq.DoesNotExist as e:
self.context.update({
'erro_input': True,
'erro_msg':
f'Tipo de movimento de estoque "{self.kwargs["tipo"]}" '
'não cadastrado.',
})
return
return tip_mov
def get_tipo(self):
self.tip_mov = self.valid_tipo()
if self.tip_mov:
self.context.update({
'tipo': self.tip_mov.codigo,
'titulo': self.tip_mov.descricao,
})
def mount_context(self):
try:
transf = classes.Transfere(
self.cursor, self.request, self.tip_mov,
*(self.context[f] for f in [
'nivel', 'ref', 'tam',
'cores' if self.context['cores'] else 'cor',
'qtd', 'deposito_origem', 'deposito_destino',
'nova_ref', 'novo_tam',
'novas_cores' if self.context['novas_cores'] else 'nova_cor',
'num_doc', 'descricao']),
)
except Exception as e:
self.context.update({
'erro_input': True,
'erro_msg': e,
})
return
self.context.update({
'mov_origem': self.tip_mov.trans_saida != 0,
'mov_destino': self.tip_mov.trans_entrada != 0,
'itens_saida': transf.itens_saida,
'itens_entrada': transf.itens_entrada,
'num_doc': transf.num_doc,
})
if 'executa' in self.request.POST:
try:
transf.exec()
except Exception as e:
self.context.update({
'erro_exec': True,
'erro_msg': e,
})
return
self.context.update({
'sucesso_msg': f"{self.context['titulo']} executada."
})
def get(self, request, *args, **kwargs):
self.request = request
self.cursor = db_cursor_so(request)
self.get_tipo()
if not self.tip_mov:
return render(request, self.template_name, self.context)
self.context['form'] = self.Form_class(
cursor=self.cursor, user=request.user, tipo_mov=self.tip_mov)
return render(request, self.template_name, self.context)
def post(self, request, *args, **kwargs):
self.request = request
self.cursor = db_cursor_so(request)
self.get_tipo()
self.context['form'] = self.Form_class(
request.POST, cursor=self.cursor, user=self.request.user, tipo_mov=self.tip_mov)
if self.context['form'].is_valid():
self.cleanned_fields_to_context()
if self.tip_mov:
self.mount_context()
self.context['form'] = self.Form_class(
self.context, cursor=self.cursor, user=self.request.user, tipo_mov=self.tip_mov)
else:
self.context.update({
'erro_input': True,
'erro_msg':
'Erro no preenchimento dos campos.',
})
return render(request, self.template_name, self.context)
|
{
"content_hash": "fc6374939b7e8e6ddfdbd1c127908073",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 96,
"avg_line_length": 35.10434782608696,
"alnum_prop": 0.5405003715630419,
"repo_name": "anselmobd/fo2",
"id": "00890fd6c406f070ccc49d9e8cf1ddd38dc98ddb",
"size": "4040",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/estoque/views/transferencia.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "160899"
},
{
"name": "HTML",
"bytes": "855985"
},
{
"name": "JavaScript",
"bytes": "203109"
},
{
"name": "PLSQL",
"bytes": "2762"
},
{
"name": "Python",
"bytes": "3228268"
},
{
"name": "Shell",
"bytes": "2161"
}
],
"symlink_target": ""
}
|
"""
The configuration file used by :py:mod:`agar.config` implementations and other libraries using the
`google.appengine.api.lib_config`_ configuration library. Configuration overrides go in this file.
"""
from env_setup import setup; setup()
##############################################################################
# AGAR SETTINGS
##############################################################################
# Root level WSGI application modules that 'agar.url.uri_for()' will search
agar_url_APPLICATIONS = ['main']
|
{
"content_hash": "2e6ba3f40e79bed03338ac52fe80c4c0",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 98,
"avg_line_length": 44,
"alnum_prop": 0.5359848484848485,
"repo_name": "agostodev/substrate",
"id": "49714be2d1a632f3563427968a604f8eff8fb304",
"size": "528",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "app/appengine_config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "106"
},
{
"name": "Python",
"bytes": "395239"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleParserError, AnsibleError, AnsibleAssertionError
from ansible.module_utils.six import iteritems, string_types
from ansible.module_utils._text import to_text
from ansible.parsing.splitter import parse_kv, split_args
from ansible.plugins.loader import module_loader, action_loader
from ansible.template import Templar
from ansible.utils.sentinel import Sentinel
# For filtering out modules correctly below
FREEFORM_ACTIONS = frozenset((
'command',
'win_command',
'shell',
'win_shell',
'script',
'raw'
))
RAW_PARAM_MODULES = FREEFORM_ACTIONS.union((
'include',
'include_vars',
'include_tasks',
'include_role',
'import_tasks',
'import_role',
'add_host',
'group_by',
'set_fact',
'meta',
))
BUILTIN_TASKS = frozenset((
'meta',
'include',
'include_tasks',
'include_role',
'import_tasks',
'import_role'
))
class ModuleArgsParser:
"""
There are several ways a module and argument set can be expressed:
# legacy form (for a shell command)
- action: shell echo hi
# common shorthand for local actions vs delegate_to
- local_action: shell echo hi
# most commonly:
- copy: src=a dest=b
# legacy form
- action: copy src=a dest=b
# complex args form, for passing structured data
- copy:
src: a
dest: b
# gross, but technically legal
- action:
module: copy
args:
src: a
dest: b
# Standard YAML form for command-type modules. In this case, the args specified
# will act as 'defaults' and will be overridden by any args specified
# in one of the other formats (complex args under the action, or
# parsed from the k=v string
- command: 'pwd'
args:
chdir: '/tmp'
This class has some of the logic to canonicalize these into the form
- module: <module_name>
delegate_to: <optional>
args: <args>
Args may also be munged for certain shell command parameters.
"""
def __init__(self, task_ds=None, collection_list=None):
task_ds = {} if task_ds is None else task_ds
if not isinstance(task_ds, dict):
raise AnsibleAssertionError("the type of 'task_ds' should be a dict, but is a %s" % type(task_ds))
self._task_ds = task_ds
self._collection_list = collection_list
def _split_module_string(self, module_string):
'''
when module names are expressed like:
action: copy src=a dest=b
the first part of the string is the name of the module
and the rest are strings pertaining to the arguments.
'''
tokens = split_args(module_string)
if len(tokens) > 1:
return (tokens[0], " ".join(tokens[1:]))
else:
return (tokens[0], "")
def _normalize_parameters(self, thing, action=None, additional_args=None):
'''
arguments can be fuzzy. Deal with all the forms.
'''
additional_args = {} if additional_args is None else additional_args
# final args are the ones we'll eventually return, so first update
# them with any additional args specified, which have lower priority
# than those which may be parsed/normalized next
final_args = dict()
if additional_args:
if isinstance(additional_args, string_types):
templar = Templar(loader=None)
if templar._contains_vars(additional_args):
final_args['_variable_params'] = additional_args
else:
raise AnsibleParserError("Complex args containing variables cannot use bare variables (without Jinja2 delimiters), "
"and must use the full variable style ('{{var_name}}')")
elif isinstance(additional_args, dict):
final_args.update(additional_args)
else:
raise AnsibleParserError('Complex args must be a dictionary or variable string ("{{var}}").')
# how we normalize depends if we figured out what the module name is
# yet. If we have already figured it out, it's a 'new style' invocation.
# otherwise, it's not
if action is not None:
args = self._normalize_new_style_args(thing, action)
else:
(action, args) = self._normalize_old_style_args(thing)
# this can occasionally happen, simplify
if args and 'args' in args:
tmp_args = args.pop('args')
if isinstance(tmp_args, string_types):
tmp_args = parse_kv(tmp_args)
args.update(tmp_args)
# only internal variables can start with an underscore, so
# we don't allow users to set them directly in arguments
if args and action not in FREEFORM_ACTIONS:
for arg in args:
arg = to_text(arg)
if arg.startswith('_ansible_'):
raise AnsibleError("invalid parameter specified for action '%s': '%s'" % (action, arg))
# finally, update the args we're going to return with the ones
# which were normalized above
if args:
final_args.update(args)
return (action, final_args)
def _normalize_new_style_args(self, thing, action):
'''
deals with fuzziness in new style module invocations
accepting key=value pairs and dictionaries, and returns
a dictionary of arguments
possible example inputs:
'echo hi', 'shell'
{'region': 'xyz'}, 'ec2'
standardized outputs like:
{ _raw_params: 'echo hi', _uses_shell: True }
'''
if isinstance(thing, dict):
# form is like: { xyz: { x: 2, y: 3 } }
args = thing
elif isinstance(thing, string_types):
# form is like: copy: src=a dest=b
check_raw = action in FREEFORM_ACTIONS
args = parse_kv(thing, check_raw=check_raw)
elif thing is None:
# this can happen with modules which take no params, like ping:
args = None
else:
raise AnsibleParserError("unexpected parameter type in action: %s" % type(thing), obj=self._task_ds)
return args
def _normalize_old_style_args(self, thing):
'''
deals with fuzziness in old-style (action/local_action) module invocations
returns tuple of (module_name, dictionary_args)
possible example inputs:
{ 'shell' : 'echo hi' }
'shell echo hi'
{'module': 'ec2', 'x': 1 }
standardized outputs like:
('ec2', { 'x': 1} )
'''
action = None
args = None
if isinstance(thing, dict):
# form is like: action: { module: 'copy', src: 'a', dest: 'b' }
thing = thing.copy()
if 'module' in thing:
action, module_args = self._split_module_string(thing['module'])
args = thing.copy()
check_raw = action in FREEFORM_ACTIONS
args.update(parse_kv(module_args, check_raw=check_raw))
del args['module']
elif isinstance(thing, string_types):
# form is like: action: copy src=a dest=b
(action, args) = self._split_module_string(thing)
check_raw = action in FREEFORM_ACTIONS
args = parse_kv(args, check_raw=check_raw)
else:
# need a dict or a string, so giving up
raise AnsibleParserError("unexpected parameter type in action: %s" % type(thing), obj=self._task_ds)
return (action, args)
def parse(self):
'''
Given a task in one of the supported forms, parses and returns
returns the action, arguments, and delegate_to values for the
task, dealing with all sorts of levels of fuzziness.
'''
thing = None
action = None
delegate_to = self._task_ds.get('delegate_to', Sentinel)
args = dict()
# This is the standard YAML form for command-type modules. We grab
# the args and pass them in as additional arguments, which can/will
# be overwritten via dict updates from the other arg sources below
additional_args = self._task_ds.get('args', dict())
# We can have one of action, local_action, or module specified
# action
if 'action' in self._task_ds:
# an old school 'action' statement
thing = self._task_ds['action']
action, args = self._normalize_parameters(thing, action=action, additional_args=additional_args)
# local_action
if 'local_action' in self._task_ds:
# local_action is similar but also implies a delegate_to
if action is not None:
raise AnsibleParserError("action and local_action are mutually exclusive", obj=self._task_ds)
thing = self._task_ds.get('local_action', '')
delegate_to = 'localhost'
action, args = self._normalize_parameters(thing, action=action, additional_args=additional_args)
# module: <stuff> is the more new-style invocation
# walk the input dictionary to see we recognize a module name
for (item, value) in iteritems(self._task_ds):
if item in BUILTIN_TASKS or action_loader.has_plugin(item, collection_list=self._collection_list) or \
module_loader.has_plugin(item, collection_list=self._collection_list):
# finding more than one module name is a problem
if action is not None:
raise AnsibleParserError("conflicting action statements: %s, %s" % (action, item), obj=self._task_ds)
action = item
thing = value
action, args = self._normalize_parameters(thing, action=action, additional_args=additional_args)
# if we didn't see any module in the task at all, it's not a task really
if action is None:
if 'ping' not in module_loader:
raise AnsibleParserError("The requested action was not found in configured module paths. "
"Additionally, core modules are missing. If this is a checkout, "
"run 'git pull --rebase' to correct this problem.",
obj=self._task_ds)
else:
raise AnsibleParserError("no action detected in task. This often indicates a misspelled module name, or incorrect module path.",
obj=self._task_ds)
elif args.get('_raw_params', '') != '' and action not in RAW_PARAM_MODULES:
templar = Templar(loader=None)
raw_params = args.pop('_raw_params')
if templar._contains_vars(raw_params):
args['_variable_params'] = raw_params
else:
raise AnsibleParserError("this task '%s' has extra params, which is only allowed in the following modules: %s" % (action,
", ".join(RAW_PARAM_MODULES)),
obj=self._task_ds)
return (action, args, delegate_to)
|
{
"content_hash": "8b25f5ff2d06031889ab8fab1ab394a8",
"timestamp": "",
"source": "github",
"line_count": 303,
"max_line_length": 160,
"avg_line_length": 38.16831683168317,
"alnum_prop": 0.5814094249891916,
"repo_name": "SergeyCherepanov/ansible",
"id": "d23f2f2b9d1596ebe4bb9a75834afcd502adc1e7",
"size": "12300",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "ansible/ansible/parsing/mod_args.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Shell",
"bytes": "824"
}
],
"symlink_target": ""
}
|
import unittest
from django.db import models
from django.test.client import Client
from django.contrib.auth.models import User, Group
from django.contrib.comments.models import Comment
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from actstream.signals import action
from actstream.models import Action, Follow, follow, user_stream, model_stream, actor_stream
from testapp.models import Player
class ActivityTestCase(unittest.TestCase):
def setUp(self):
self.group = Group.objects.get_or_create(name='CoolGroup')[0]
self.user1 = User.objects.get_or_create(username='admin')[0]
self.user1.set_password('admin')
self.user1.is_superuser = self.user1.is_active = self.user1.is_staff = True
self.user1.save()
self.user2 = User.objects.get_or_create(username='Two')[0]
# User1 joins group
self.user1.groups.add(self.group)
action.send(self.user1,verb='joined',target=self.group)
# User1 follows User2
follow(self.user1, self.user2)
# User2 joins group
self.user2.groups.add(self.group)
action.send(self.user2,verb='joined',target=self.group)
# User2 follows group
follow(self.user2, self.group)
# User1 comments on group
action.send(self.user1,verb='commented on',target=self.group)
comment = Comment.objects.get_or_create(
user = self.user1,
content_type = ContentType.objects.get_for_model(self.group),
object_pk = self.group.pk,
comment = 'Sweet Group!',
site = Site.objects.get_current()
)[0]
# Group responds to comment
action.send(self.group,verb='responded to',target=comment)
self.client = Client()
def test_user1(self):
self.assertEqual(map(unicode, actor_stream(self.user1)),
[u'admin commented on CoolGroup 0 minutes ago', u'admin started following Two 0 minutes ago', u'admin joined CoolGroup 0 minutes ago'])
def test_user2(self):
self.assertEqual(map(unicode, actor_stream(self.user2)),
[u'Two started following CoolGroup 0 minutes ago', u'Two joined CoolGroup 0 minutes ago'])
def test_group(self):
self.assertEqual(map(unicode, actor_stream(self.group)),
[u'CoolGroup responded to admin: Sweet Group!... 0 minutes ago'])
def test_stream(self):
self.assertEqual(map(unicode, user_stream(self.user1)),
[u'Two started following CoolGroup 0 minutes ago', u'Two joined CoolGroup 0 minutes ago'])
self.assertEqual(map(unicode, user_stream(self.user2)),
[u'CoolGroup responded to admin: Sweet Group!... 0 minutes ago'])
def test_rss(self):
rss = self.client.get('/feed/').content
self.assert_(rss.startswith('<?xml version="1.0" encoding="utf-8"?>\n<rss xmlns:atom="http://www.w3.org/2005/Atom" version="2.0">'))
self.assert_(rss.find('Activity feed for your followed actors')>-1)
def test_atom(self):
atom = self.client.get('/feed/atom/').content
self.assert_(atom.startswith('<?xml version="1.0" encoding="utf-8"?>\n<feed xmlns="http://www.w3.org/2005/Atom" xml:lang="en-us">'))
self.assert_(atom.find('Activity feed for your followed actors')>-1)
def test_zombies(self):
from random import choice, randint
humans = [Player.objects.create() for i in range(10)]
zombies = [Player.objects.create(state=1) for _ in range(2)]
while len(humans):
for z in zombies:
if not len(humans): break
victim = choice(humans)
humans.pop(humans.index(victim))
victim.state = 1
victim.save()
zombies.append(victim)
action.send(z,verb='killed',target=victim)
self.assertEqual(map(unicode,model_stream(Player))[:5],
map(unicode,Action.objects.order_by('-timestamp')[:5]))
def tearDown(self):
from django.core.serializers import serialize
for i,m in enumerate((Comment,ContentType,Player,Follow,Action,User,Group)):
f = open('testdata%d.json'%i,'w')
f.write(serialize('json',m.objects.all()))
f.close()
Action.objects.all().delete()
Comment.objects.all().delete()
Player.objects.all().delete()
User.objects.all().delete()
Group.objects.all().delete()
Follow.objects.all().delete()
|
{
"content_hash": "d836af7e716e20fc975d86277e27f5f3",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 147,
"avg_line_length": 43.03669724770642,
"alnum_prop": 0.6175655510552122,
"repo_name": "whatsthehubbub/playpilots",
"id": "34005e0313bba56fad92eff43e253fe2292f302c",
"size": "4691",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ebi/actstream/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "65908"
},
{
"name": "HTML",
"bytes": "209253"
},
{
"name": "JavaScript",
"bytes": "57686"
},
{
"name": "Python",
"bytes": "294667"
}
],
"symlink_target": ""
}
|
import os
import shutil
import tempfile
from bcbio.utils import file_exists
from bcbio.bam import is_paired, _get_sort_order, sort
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
from bcbio.distributed.transaction import file_transaction, tx_tmpdir
from bcbio.pipeline import config_utils
from bcbio.rnaseq import gtf
from bcbio.log import logger
def run(data):
"""Quantitaive isoforms expression by eXpress"""
name = dd.get_sample_name(data)
in_bam = dd.get_transcriptome_bam(data)
config = data['config']
if not in_bam:
logger.info("Transcriptome-mapped BAM file not found, skipping eXpress.")
return data
out_dir = os.path.join(dd.get_work_dir(data), "express", name)
out_file = os.path.join(out_dir, name + ".xprs")
express = config_utils.get_program("express", data['config'])
strand = _set_stranded_flag(in_bam, data)
if not file_exists(out_file):
gtf_fasta = gtf.gtf_to_fasta(dd.get_gtf_file(data), dd.get_ref_file(data))
with tx_tmpdir(data) as tmp_dir:
with file_transaction(out_dir) as tx_out_dir:
bam_file = _prepare_bam_file(in_bam, tmp_dir, config)
cmd = ("{express} --no-update-check -o {tx_out_dir} {strand} {gtf_fasta} {bam_file}")
do.run(cmd.format(**locals()), "Run express on %s." % in_bam, {})
shutil.move(os.path.join(out_dir, "results.xprs"), out_file)
eff_count_file = _get_column(out_file, out_file.replace(".xprs", "_eff.counts"), 7)
tpm_file = _get_column(out_file, out_file.replace("xprs", "tpm"), 14)
fpkm_file = _get_column(out_file, out_file.replace("xprs", "fpkm"), 10)
data = dd.set_express_counts(data, eff_count_file)
data = dd.set_express_tpm(data, tpm_file)
data = dd.set_express_fpkm(data, fpkm_file)
return data
def _get_column(in_file, out_file, column):
"""Subset one column from a file
"""
with file_transaction(out_file) as tx_out_file:
with open(in_file) as in_handle:
with open(tx_out_file, 'w') as out_handle:
for line in in_handle:
cols = line.strip().split("\t")
if line.find("eff_count") > 0:
continue
number = cols[column]
if column == 7:
number = int(round(float(number), 0))
out_handle.write("%s\t%s\n" % (cols[1], number))
return out_file
def _set_stranded_flag(bam_file, data):
strand_flag = {"unstranded": "",
"firststrand": "--rf-stranded",
"secondstrand": "--fr-stranded",
"firststrand-s": "--r-stranded",
"secondstrand-s": "--f-stranded"}
stranded = dd.get_strandedness(data)
assert stranded in strand_flag, ("%s is not a valid strandedness value. "
"Valid values are 'firststrand', "
"'secondstrand' and 'unstranded" % (stranded))
if stranded != "unstranded" and not is_paired(bam_file):
stranded += "-s"
flag = strand_flag[stranded]
return flag
def _prepare_bam_file(bam_file, tmp_dir, config):
"""
Pipe sort by name cmd in case sort by coordinates
"""
sort_mode = _get_sort_order(bam_file, config)
if sort_mode != "queryname":
bam_file = sort(bam_file, config, "queryname")
return bam_file
def isoform_to_gene_name(gtf_file, out_file=None):
"""
produce a table of isoform -> gene mappings for loading into EBSeq
"""
if not out_file:
out_file = tempfile.NamedTemporaryFile(delete=False).name
if file_exists(out_file):
return out_file
db = gtf.get_gtf_db(gtf_file)
line_format = "{transcript}\t{gene}\n"
with file_transaction(out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for feature in db.features_of_type('transcript'):
transcript = feature['transcript_id'][0]
gene = feature['gene_id'][0]
out_handle.write(line_format.format(**locals()))
return out_file
|
{
"content_hash": "aee94c51bb350ee6e2798572e2186601",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 101,
"avg_line_length": 42.5360824742268,
"alnum_prop": 0.6025206010664081,
"repo_name": "Cyberbio-Lab/bcbio-nextgen",
"id": "23963c91c9cfc41b7b312bbea2f4dd3412043861",
"size": "4126",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "bcbio/rnaseq/express.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1656627"
},
{
"name": "Ruby",
"bytes": "624"
},
{
"name": "Shell",
"bytes": "14377"
}
],
"symlink_target": ""
}
|
import os
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.translation import ugettext_lazy as _
from model_utils import Choices
from x_file_accel_redirects.conf import settings
class AccelRedirect(models.Model):
FILENAME_SOLVERS = Choices(
(1, 'remainder', _(u'Everything after last "/" is is threated as filename')),
(2, 'none', _(u'Do not try processing filenames (e.g. for service)')),
)
description = models.CharField(
max_length=64,
)
prefix = models.CharField(
_(u"URL prefix"),
help_text=_(u'URL prefix for accel_view that will be handled with this config'),
default='media',
max_length=64,
unique=True,
)
login_required = models.BooleanField(
_(u"Login required"),
help_text=_(u"Protect files with authentication"),
default=True,
)
internal_path = models.CharField(
_(u"Internal path"),
help_text=_(
u"Path that is served by nginx as internal to use in X-Accel-Redirect "
u"header. Actual Accell will be "
u"'{internal_path}/{path_in_url_after_prefix}'"
),
max_length=64,
)
serve_document_root = models.CharField(
_(u"Document root"),
help_text=_(u"Path with actual files to serve manualy when settings.X_FILE_ACCEL is False"),
default='',
blank=True,
max_length=64,
)
filename_solver = models.PositiveSmallIntegerField(
choices=FILENAME_SOLVERS,
default=FILENAME_SOLVERS.remainder,
)
class Meta:
verbose_name = _(u'Accel-Redirect config')
verbose_name_plural = _(u'Accel-Redirect configs')
db_table = 'xfar_accelredirect'
def __unicode__(self):
return self.description
def clean(self):
if settings.X_FILE_ACCEL and not self.serve_document_root:
raise ValidationError(_(u'X_FILE_ACCEL is disabled! Please set serve_document_root field.'))
if self.prefix.find('/') >= 0:
raise ValidationError(u"prefix should not contain slashes")
def get_filename(self, filepath):
if self.filename_solver == self.FILENAME_SOLVERS.remainder:
return filepath.split('/')[-1]
elif self.filename_solver == self.FILENAME_SOLVERS.none:
return None
else:
raise ValueError(
u'Something wrong with filename_solver value! processing of '
u'filename_solver "%s" is not implemented' % self.filename_solver
)
def process(self, filepath):
self.filepath = filepath
self.disposition_header = "attachment; filename={0}".format(self.get_filename(filepath))
self.accel_path = os.path.join(self.internal_path, filepath)
|
{
"content_hash": "dd21e34c91630aaee3e55d530a4eec01",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 104,
"avg_line_length": 34.21686746987952,
"alnum_prop": 0.6232394366197183,
"repo_name": "42cc/django-x-file-accel",
"id": "e8dbc2b078ff89c38a5bf2dc905fa436ad475c3f",
"size": "2864",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "x_file_accel_redirects/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "9983"
}
],
"symlink_target": ""
}
|
from nltk.parse import load_parser
from nltk.draw.tree import draw_trees
from util import skolemize
from nltk.sem import logic
"""
An implementation of the Hole Semantics model, following Blackburn and Bos,
Representation and Inference for Natural Language (CSLI, 2005).
The semantic representations are built by the grammar hole.fcfg.
This module contains driver code to read in sentences and parse them
according to a hole semantics grammar.
After parsing, the semantic representation is in the form of an underspecified
representation that is not easy to read. We use a "plugging" algorithm to
convert that representation into first-order logic formulas.
"""
# Note that in this code there may be multiple types of trees being referred to:
#
# 1. parse trees
# 2. the underspecified representation
# 3. first-order logic formula trees
# 4. the search space when plugging (search tree)
#
class Constants(object):
ALL = 'ALL'
EXISTS = 'EXISTS'
NOT = 'NOT'
AND = 'AND'
OR = 'OR'
IMP = 'IMP'
IFF = 'IFF'
PRED = 'PRED'
LEQ = 'LEQ'
HOLE = 'HOLE'
LABEL = 'LABEL'
MAP = {ALL: lambda v,e: logic.AllExpression(v.variable, e),
EXISTS: lambda v,e: logic.ExistsExpression(v.variable, e),
NOT: logic.NegatedExpression,
AND: logic.AndExpression,
OR: logic.OrExpression,
IMP: logic.ImpExpression,
IFF: logic.IffExpression,
PRED: logic.ApplicationExpression}
class HoleSemantics(object):
"""
This class holds the broken-down components of a hole semantics, i.e. it
extracts the holes, labels, logic formula fragments and constraints out of
a big conjunction of such as produced by the hole semantics grammar. It
then provides some operations on the semantics dealing with holes, labels
and finding legal ways to plug holes with labels.
"""
def __init__(self, usr):
"""
Constructor. `usr' is a C{logic.sem.Expression} representing an
Underspecified Representation Structure (USR). A USR has the following
special predicates:
ALL(l,v,n),
EXISTS(l,v,n),
AND(l,n,n),
OR(l,n,n),
IMP(l,n,n),
IFF(l,n,n),
PRED(l,v,n,v[,v]*) where the brackets and star indicate zero or more repetitions,
LEQ(n,n),
HOLE(n),
LABEL(n)
where l is the label of the node described by the predicate, n is either
a label or a hole, and v is a variable.
"""
self.holes = set()
self.labels = set()
self.fragments = {} # mapping of label -> formula fragment
self.constraints = set() # set of Constraints
self._break_down(usr)
self.top_most_labels = self._find_top_most_labels()
self.top_hole = self._find_top_hole()
def is_node(self, x):
"""
Return true if x is a node (label or hole) in this semantic
representation.
"""
return x in (self.labels | self.holes)
def _break_down(self, usr):
"""
Extract holes, labels, formula fragments and constraints from the hole
semantics underspecified representation (USR).
"""
if isinstance(usr, logic.AndExpression):
self._break_down(usr.first)
self._break_down(usr.second)
elif isinstance(usr, logic.ApplicationExpression):
func, args = usr.uncurry()
if func.variable.name == Constants.LEQ:
self.constraints.add(Constraint(args[0], args[1]))
elif func.variable.name == Constants.HOLE:
self.holes.add(args[0])
elif func.variable.name == Constants.LABEL:
self.labels.add(args[0])
else:
label = args[0]
assert not self.fragments.has_key(label)
self.fragments[label] = (func, args[1:])
else:
raise ValueError(usr.node)
def _find_top_nodes(self, node_list):
top_nodes = node_list.copy()
for f in self.fragments.itervalues():
#the label is the first argument of the predicate
args = f[1]
for arg in args:
if arg in node_list:
top_nodes.discard(arg)
return top_nodes
def _find_top_most_labels(self):
"""
Return the set of labels which are not referenced directly as part of
another formula fragment. These will be the top-most labels for the
subtree that they are part of.
"""
return self._find_top_nodes(self.labels)
def _find_top_hole(self):
"""
Return the hole that will be the top of the formula tree.
"""
top_holes = self._find_top_nodes(self.holes)
assert len(top_holes) == 1 # it must be unique
return top_holes.pop()
def pluggings(self):
"""
Calculate and return all the legal pluggings (mappings of labels to
holes) of this semantics given the constraints.
"""
record = []
self._plug_nodes([(self.top_hole, [])], self.top_most_labels, {}, record)
return record
def _plug_nodes(self, queue, potential_labels, plug_acc, record):
"""
Plug the nodes in `queue' with the labels in `potential_labels'.
Each element of `queue' is a tuple of the node to plug and the list of
ancestor holes from the root of the graph to that node.
`potential_labels' is a set of the labels which are still available for
plugging.
`plug_acc' is the incomplete mapping of holes to labels made on the
current branch of the search tree so far.
`record' is a list of all the complete pluggings that we have found in
total so far. It is the only parameter that is destructively updated.
"""
if queue != []:
(node, ancestors) = queue[0]
if node in self.holes:
# The node is a hole, try to plug it.
self._plug_hole(node, ancestors, queue[1:], potential_labels, plug_acc, record)
else:
assert node in self.labels
# The node is a label. Replace it in the queue by the holes and
# labels in the formula fragment named by that label.
args = self.fragments[node][1]
head = [(a, ancestors) for a in args if self.is_node(a)]
self._plug_nodes(head + queue[1:], potential_labels, plug_acc, record)
else:
raise Exception('queue empty')
def _plug_hole(self, hole, ancestors0, queue, potential_labels0,
plug_acc0, record):
"""
Try all possible ways of plugging a single hole.
See _plug_nodes for the meanings of the parameters.
"""
# Add the current hole we're trying to plug into the list of ancestors.
assert hole not in ancestors0
ancestors = [hole] + ancestors0
# Try each potential label in this hole in turn.
for l in potential_labels0:
# Is the label valid in this hole?
if self._violates_constraints(l, ancestors):
continue
plug_acc = plug_acc0.copy()
plug_acc[hole] = l
potential_labels = potential_labels0.copy()
potential_labels.remove(l)
if len(potential_labels) == 0:
# No more potential labels. That must mean all the holes have
# been filled so we have found a legal plugging so remember it.
#
# Note that the queue might not be empty because there might
# be labels on there that point to formula fragments with
# no holes in them. _sanity_check_plugging will make sure
# all holes are filled.
self._sanity_check_plugging(plug_acc, self.top_hole, [])
record.append(plug_acc)
else:
# Recursively try to fill in the rest of the holes in the
# queue. The label we just plugged into the hole could have
# holes of its own so at the end of the queue. Putting it on
# the end of the queue gives us a breadth-first search, so that
# all the holes at level i of the formula tree are filled
# before filling level i+1.
# A depth-first search would work as well since the trees must
# be finite but the bookkeeping would be harder.
self._plug_nodes(queue + [(l, ancestors)], potential_labels, plug_acc, record)
def _violates_constraints(self, label, ancestors):
"""
Return True if the `label' cannot be placed underneath the holes given
by the set `ancestors' because it would violate the constraints imposed
on it.
"""
for c in self.constraints:
if c.lhs == label:
if c.rhs not in ancestors:
return True
return False
def _sanity_check_plugging(self, plugging, node, ancestors):
"""
Make sure that a given plugging is legal. We recursively go through
each node and make sure that no constraints are violated.
We also check that all holes have been filled.
"""
if node in self.holes:
ancestors = [node] + ancestors
label = plugging[node]
else:
label = node
assert label in self.labels
for c in self.constraints:
if c.lhs == label:
assert c.rhs in ancestors
args = self.fragments[label][1]
for arg in args:
if self.is_node(arg):
self._sanity_check_plugging(plugging, arg, [label] + ancestors)
def formula_tree(self, plugging):
"""
Return the first-order logic formula tree for this underspecified
representation using the plugging given.
"""
return self._formula_tree(plugging, self.top_hole)
def _formula_tree(self, plugging, node):
if node in plugging:
return self._formula_tree(plugging, plugging[node])
elif self.fragments.has_key(node):
pred,args = self.fragments[node]
children = [self._formula_tree(plugging, arg) for arg in args]
return reduce(Constants.MAP[pred.variable.name], children)
else:
return node
class Constraint(object):
"""
This class represents a constraint of the form (L =< N),
where L is a label and N is a node (a label or a hole).
"""
def __init__(self, lhs, rhs):
self.lhs = lhs
self.rhs = rhs
def __eq__(self, other):
if self.__class__ == other.__class__:
return self.lhs == other.lhs and self.rhs == other.rhs
else:
return False
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(repr(self))
def __repr__(self):
return '(%s < %s)' % (self.lhs, self.rhs)
def hole_readings(sentence, grammar_filename=None, verbose=False):
if not grammar_filename:
grammar_filename = 'grammars/sample_grammars/hole.fcfg'
if verbose: print 'Reading grammar file', grammar_filename
parser = load_parser(grammar_filename)
# Parse the sentence.
tokens = sentence.split()
trees = parser.nbest_parse(tokens)
if verbose: print 'Got %d different parses' % len(trees)
all_readings = []
for tree in trees:
# Get the semantic feature from the top of the parse tree.
sem = tree.node['SEM'].simplify()
# Print the raw semantic representation.
if verbose: print 'Raw: ', sem
# Skolemize away all quantifiers. All variables become unique.
while isinstance(sem, logic.LambdaExpression):
sem = sem.term
skolemized = skolemize(sem)
if verbose: print 'Skolemized:', skolemized
# Break the hole semantics representation down into its components
# i.e. holes, labels, formula fragments and constraints.
hole_sem = HoleSemantics(skolemized)
# Maybe show the details of the semantic representation.
if verbose:
print 'Holes: ', hole_sem.holes
print 'Labels: ', hole_sem.labels
print 'Constraints: ', hole_sem.constraints
print 'Top hole: ', hole_sem.top_hole
print 'Top labels: ', hole_sem.top_most_labels
print 'Fragments:'
for (l,f) in hole_sem.fragments.items():
print '\t%s: %s' % (l, f)
# Find all the possible ways to plug the formulas together.
pluggings = hole_sem.pluggings()
# Build FOL formula trees using the pluggings.
readings = map(hole_sem.formula_tree, pluggings)
# Print out the formulas in a textual format.
if verbose:
for i,r in enumerate(readings):
print
print '%d. %s' % (i, r)
print
all_readings.extend(readings)
return all_readings
if __name__ == '__main__':
for r in hole_readings('a dog barks'): print r
print
for r in hole_readings('every girl chases a dog'): print r
|
{
"content_hash": "929eddb03809e38120afc5679caaf081",
"timestamp": "",
"source": "github",
"line_count": 357,
"max_line_length": 95,
"avg_line_length": 37.55182072829132,
"alnum_prop": 0.5916007757720424,
"repo_name": "tadgh/ArgoRevisit",
"id": "f3c85cb2400566ac2cfeb66feece22f5b4fcbc69",
"size": "13631",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "third_party/nltk/sem/hole.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "123941"
},
{
"name": "Shell",
"bytes": "69"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
import logging
from django import get_version
from django.contrib.admin.options import ModelAdmin
from django.contrib.admin.sites import AdminSite
from django.db.models import CharField, Model
from django.forms import Form, ModelForm
from django.utils import unittest
from mock import patch
from suds import WebFault
from . import VATIN, fields, models
VALID_VIES = 'DE284754038'
VALID_VIES_COUNTRY_CODE = 'DE'
VALID_VIES_NUMBER = '284754038'
VALID_VIES_IE = ['1234567X', '1X23456X', '1234567XX', ]
class VIESModel(Model):
vat = models.VATINField()
class EmptyVIESModel(Model):
name = CharField(default='John Doe', max_length=50)
vat = models.VATINField(blank=True, null=True)
class VIESModelForm(ModelForm):
class Meta:
model = VIESModel
exclude = []
class EmptyVIESModelForm(ModelForm):
class Meta:
model = EmptyVIESModel
exclude = []
class VIESForm(Form):
vat = fields.VATINField()
class EmptyVIESForm(Form):
vat = fields.VATINField(required=False)
custom_error = {
'invalid_vat': 'This VAT number is not valid'
}
class VIESFormCustomError(Form):
vat = fields.VATINField(error_messages=custom_error)
custom_error_16 = {
'invalid_vat': '%(value)s is not a valid European VAT.'
}
class VIESFormCustomError16(Form):
vat = fields.VATINField(error_messages=custom_error_16)
class VIESTestCase(unittest.TestCase):
def setUp(self):
pass
def test_creation(self):
try:
VATIN(VALID_VIES_COUNTRY_CODE, VALID_VIES_NUMBER)
except Exception as e:
self.fail(e.message)
def test_verified(self):
with self.assertRaises(ValueError):
VATIN('xx', VALID_VIES_NUMBER)
def test_country_code_getter(self):
v = VATIN(VALID_VIES_COUNTRY_CODE.lower(), VALID_VIES_NUMBER)
self.assertEqual(v.country_code, VALID_VIES_COUNTRY_CODE)
def test_is_valid(self):
v = VATIN(VALID_VIES_COUNTRY_CODE, VALID_VIES_NUMBER)
self.assertTrue(v.is_valid())
def test_result(self):
v = VATIN(VALID_VIES_COUNTRY_CODE, VALID_VIES_NUMBER)
self.assertFalse(hasattr(v, 'result'))
self.assertTrue(v.is_valid())
# v should have a result now
self.assertTrue(hasattr(v, 'result'))
self.assertEqual(v.result['countryCode'], VALID_VIES_COUNTRY_CODE)
self.assertEqual(v.result['vatNumber'], VALID_VIES_NUMBER)
def test_ie_regex_validation(self):
import re
SRE_MATCH_TYPE = type(re.match("", ""))
for vn in VALID_VIES_IE:
v = VATIN('IE', vn)
assert isinstance(
v._validate(),
SRE_MATCH_TYPE), 'Validation failed for {}'.format(vn)
@patch('vies.Client')
def test_raises_when_suds_WebFault(self, mock_client):
"""Raise an error if suds raises a WebFault."""
mock_checkVat = mock_client.return_value.service.checkVat
mock_checkVat.side_effect = WebFault(500, 'error')
v = VATIN(VALID_VIES_COUNTRY_CODE, VALID_VIES_NUMBER)
logging.getLogger('vies').setLevel(logging.CRITICAL)
with self.assertRaises(WebFault):
v.is_valid()
logging.getLogger('vies').setLevel(logging.NOTSET)
mock_checkVat.assert_called_with(
VALID_VIES_COUNTRY_CODE,
VALID_VIES_NUMBER)
class ModelTestCase(unittest.TestCase):
def setUp(self):
pass
def test_create(self):
"""Object is correctly created."""
vies = VIESModel.objects.create(vat=VALID_VIES)
self.assertNotEqual(VIESModel.objects.count(), 0)
self.assertEqual(vies.vat, VALID_VIES)
def test_save(self):
"""Object is correctly saved."""
vies_saved = VIESModel()
vies_saved.vat = VALID_VIES
vies_saved.save()
vies_received = VIESModel.objects.get(pk=vies_saved.pk)
self.assertNotEqual(VIESModel.objects.count(), 0)
self.assertEqual(vies_received.vat, VALID_VIES)
class ModelFormTestCase(unittest.TestCase):
def test_is_valid(self):
"""Form is valid."""
form = VIESModelForm({
'vat_0': VALID_VIES_COUNTRY_CODE,
'vat_1': VALID_VIES_NUMBER})
self.assertTrue(form.is_valid())
vies = form.save()
self.assertEqual(vies.vat, VALID_VIES)
def test_is_not_valid_country(self):
"""Invalid country."""
form = VIESModelForm({
'vat_0': 'xx',
'vat_1': VALID_VIES_NUMBER})
self.assertFalse(form.is_valid())
def test_is_not_valid_numbers(self):
"""Invalid number."""
form = VIESModelForm({
'vat_0': VALID_VIES_COUNTRY_CODE,
'vat_1': 'xx123+-'})
self.assertFalse(form.is_valid())
def test_is_not_valid(self):
"""Invalid number."""
form = VIESModelForm({'vat_0': 'GB', 'vat_1': '000000000'})
self.assertFalse(form.is_valid())
def test_save(self):
"""Form is saved."""
form = VIESModelForm({
'vat_0': VALID_VIES_COUNTRY_CODE,
'vat_1': VALID_VIES_NUMBER})
self.assertTrue(form.is_valid())
vies_saved = form.save()
vies_received = VIESModel.objects.get(pk=vies_saved.pk)
self.assertEqual(vies_received, vies_saved)
self.assertNotEqual(VIESModel.objects.count(), 0)
self.assertEqual(vies_received.vat, VALID_VIES)
def test_empty(self):
form = EmptyVIESModelForm({'name': 'Eva'})
self.assertTrue(form.is_valid())
def test_is_valid_and_has_vatinData(self):
"""Valid VATINFields' vatinData() return result dict."""
form = VIESModelForm({'vat_0': 'NL', 'vat_1': '124851903B01'})
self.assertEqual(form.fields['vat'].vatinData(), None)
form.is_valid()
data = form.fields['vat'].vatinData()
self.assertEqual(data['name'], 'JIETER')
def test_invalid_error_message(self):
form = VIESForm({'vat_0': 'NL', 'vat_1': '0000000000'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['vat'][0],
'Not a valid European VAT number.')
def test_custom_invalid_error_message(self):
form = VIESFormCustomError({'vat_0': 'NL',
'vat_1': '0000000000'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['vat'][0],
'This VAT number is not valid')
def test_custom_invalid_error_message_with_value(self):
form = VIESFormCustomError16({'vat_0': 'NL', 'vat_1': '0000000000'})
self.assertFalse(form.is_valid())
if get_version() > '1.6':
self.assertEqual(form.errors['vat'][0],
'NL0000000000 is not a valid European VAT.')
class MockRequest(object):
pass
request = MockRequest()
class AdminTestCase(unittest.TestCase):
def setUp(self):
self.site = AdminSite()
def test_VATINField_admin(self):
"""Admin form is generated."""
ma = ModelAdmin(VIESModel, self.site)
try:
ma.get_form(request)
except Exception as e:
self.fail(e.message)
|
{
"content_hash": "ca1166c8826c81a98ff6bd37fe4f627c",
"timestamp": "",
"source": "github",
"line_count": 251,
"max_line_length": 76,
"avg_line_length": 29.08764940239044,
"alnum_prop": 0.615669086426517,
"repo_name": "chripede/django-vies",
"id": "b19ea120e5eca207dcc2e2b3ab4507bd41297899",
"size": "7325",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vies/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19544"
}
],
"symlink_target": ""
}
|
choice = 0
cur_val_pow = 0
min_val_pow = 0
max_val_pow = 9750
cur_pct_pow = 0
min_pct_pow = 0
max_pct_pow = 100
while choice != -1:
choice = int(input('Are you entering a (1) Power Percentage or a (2) Power Value? (Enter -1 to quit): '))
if choice == -1:
break
elif choice == 1:
while (cur_pct_pow != -1):
cur_pct_pow = int(input('Enter a power percentage between ' + str(min_pct_pow) + ' and ' + str(max_pct_pow) + ' (or -1 to return to input type selection): '))
if cur_pct_pow == -1:
break
elif cur_pct_pow > max_pct_pow or cur_pct_pow < min_pct_pow:
print('Invalid percentage entered. Percentage must be between ' + str(min_pct_pow) + ' and ' + str(max_pct_pow) + '. Please try again.')
else:
pct_to_val = (cur_pct_pow * max_val_pow) / max_pct_pow
print('Power Value: ' + str(pct_to_val))
elif choice == 2:
while (cur_val_pow != -1):
cur_val_pow = int(input('Enter a power value between ' + str(min_val_pow) + ' and ' + str(max_val_pow) + ' (or -1 to return to input type selection): '))
if cur_val_pow == -1:
break
elif cur_val_pow > max_val_pow or cur_val_pow < min_val_pow:
print('Invalid value entered. Value must be between ' + str(min_val_pow) + ' and ' + str(max_val_pow) + '. Please try again.')
else:
val_to_pct = (200*cur_val_pow + max_val_pow) / (2*max_val_pow)
print('Power Percentage: ' + str(val_to_pct))
else:
print('Invalid choice entered. Please try again or enter -1 to quit.')
|
{
"content_hash": "5733c323b42597a2b5212d8bb2d39ab7",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 170,
"avg_line_length": 49.3235294117647,
"alnum_prop": 0.5438282647584973,
"repo_name": "emakris1/Testing",
"id": "df95197b9b89ad68cdc56fe5c906fd65fe4d1208",
"size": "1697",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/int_test.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1404"
},
{
"name": "C++",
"bytes": "3080"
},
{
"name": "Java",
"bytes": "1296"
},
{
"name": "Python",
"bytes": "2777"
}
],
"symlink_target": ""
}
|
"""
Bug tracker tools.
"""
import abc
import urllib
from docutils import nodes
from .tool import Tool, Role
class BugTracker(Tool):
"""Abstract class for bug tracker tools."""
__metaclass__ = abc.ABCMeta
def __init__(self, project_name):
self.project_name = project_name
super(BugTracker, self).__init__()
def update(self):
"""Nothing has to be done to update bug tracker tools."""
@abc.abstractproperty
def base_url(self):
"""Base URL of the bug tracker service."""
raise NotImplementedError
@property
def bug_link(self):
"""Link to the bug tracker interface."""
raise NotImplementedError
class Github(BugTracker):
"""Github bug tracker tool."""
base_url = 'https://github.com/'
@property
def bug_link(self):
return '%s%s/issues' % (self.base_url, self.project_name)
class Redmine(BugTracker):
"""Redmine bug tracker tool."""
def __init__(self, project_name, base_url):
super(Redmine, self).__init__(project_name)
self._base_url = base_url
def update(self):
"""Update the local repository."""
repository_url = '%sprojects/%s/repository' % (
self._base_url, self.project_name)
urllib.urlopen(repository_url)
@property
def base_url(self):
return self._base_url
@property
def bug_link(self):
return '%sprojects/%s/issues' % (self.base_url, self.project_name)
class BugLink(Role):
"""Link to the bug tracker."""
def run(self, name, rawtext, text, lineno, inliner, options=None,
content=None):
return [nodes.reference('', text, refuri=self.tool.bug_link)], []
|
{
"content_hash": "26739aff1cb95123ce33e8a252411eb0",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 74,
"avg_line_length": 24.797101449275363,
"alnum_prop": 0.614260666277031,
"repo_name": "Kozea/sitenco",
"id": "33c4ec789441bd1e0e2120dbd91d7b0f60bcd468",
"size": "1711",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sitenco/config/bug_tracker.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1380"
},
{
"name": "Makefile",
"bytes": "576"
},
{
"name": "Python",
"bytes": "25002"
}
],
"symlink_target": ""
}
|
import sys
from unittest import mock
from neutron.tests import base
from neutron.tests import post_mortem_debug
class TestTesttoolsExceptionHandler(base.BaseTestCase):
def test_exception_handler(self):
try:
self.fail()
except Exception:
exc_info = sys.exc_info()
with mock.patch('traceback.print_exception') as mock_print_exception:
with mock.patch('pdb.post_mortem') as mock_post_mortem:
with mock.patch.object(post_mortem_debug,
'get_ignored_traceback',
return_value=mock.Mock()):
post_mortem_debug.get_exception_handler('pdb')(exc_info)
# traceback will become post_mortem_debug.FilteredTraceback
filtered_exc_info = (exc_info[0], exc_info[1], mock.ANY)
mock_print_exception.assert_called_once_with(*filtered_exc_info)
mock_post_mortem.assert_called_once_with(mock.ANY)
def test__get_debugger(self):
def import_mock(name, *args):
mod_mock = mock.Mock()
mod_mock.__name__ = name
mod_mock.post_mortem = mock.Mock()
return mod_mock
with mock.patch('builtins.__import__', side_effect=import_mock):
pdb_debugger = post_mortem_debug._get_debugger('pdb')
pudb_debugger = post_mortem_debug._get_debugger('pudb')
self.assertEqual('pdb', pdb_debugger.__name__)
self.assertEqual('pudb', pudb_debugger.__name__)
class TestFilteredTraceback(base.BaseTestCase):
def test_filter_traceback(self):
tb1 = mock.Mock()
tb2 = mock.Mock()
tb1.tb_next = tb2
tb2.tb_next = None
ftb1 = post_mortem_debug.FilteredTraceback(tb1, tb2)
for attr in ['lasti', 'lineno', 'frame']:
attr_name = 'tb_%s' % attr
self.assertEqual(getattr(tb1, attr_name, None),
getattr(ftb1, attr_name, None))
self.assertIsNone(ftb1.tb_next)
class TestGetIgnoredTraceback(base.BaseTestCase):
def _test_get_ignored_traceback(self, ignored_bit_array, expected):
root_tb = mock.Mock()
tb = root_tb
tracebacks = [tb]
for x in range(len(ignored_bit_array) - 1):
tb.tb_next = mock.Mock()
tb = tb.tb_next
tracebacks.append(tb)
tb.tb_next = None
tb = root_tb
for ignored in ignored_bit_array:
if ignored:
tb.tb_frame.f_globals = ['__unittest']
else:
tb.tb_frame.f_globals = []
tb = tb.tb_next
actual = post_mortem_debug.get_ignored_traceback(root_tb)
if expected is not None:
expected = tracebacks[expected]
self.assertEqual(expected, actual)
def test_no_ignored_tracebacks(self):
self._test_get_ignored_traceback([0, 0, 0], None)
def test_single_member_trailing_chain(self):
self._test_get_ignored_traceback([0, 0, 1], 2)
def test_two_member_trailing_chain(self):
self._test_get_ignored_traceback([0, 1, 1], 1)
def test_first_traceback_ignored(self):
self._test_get_ignored_traceback([1, 0, 0], None)
def test_middle_traceback_ignored(self):
self._test_get_ignored_traceback([0, 1, 0], None)
|
{
"content_hash": "a6c51887237eac1c3a57a45aefe53f3c",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 77,
"avg_line_length": 35.305263157894736,
"alnum_prop": 0.586463923673226,
"repo_name": "mahak/neutron",
"id": "15244d949615c0cd1a85f74164e887ec15befcc4",
"size": "3956",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/tests/test_post_mortem_debug.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "2773"
},
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "15942116"
},
{
"name": "Ruby",
"bytes": "1257"
},
{
"name": "Shell",
"bytes": "83270"
}
],
"symlink_target": ""
}
|
import json
import sys
import logging
import requests
import os
import datetime
from flask import Flask, jsonify, request, make_response
from flask.ext.cache import Cache
from dateutil import parser
httpSuccessCode = 200
wundergroundBaseUrl = "http://api.wunderground.com/api/"
wundergroundConditions = "/conditions/q/"
wundergroundHourly = "/hourly/q/"
wundergroundHistory = "/history/q/"
wundergroundPwsPrefix = "pws:"
wundergroundJsonSuffix = ".json"
wundergroundDefaultApiKey = '8c7c951afe2e2a3d'
app = Flask(__name__)
# define the cache config keys (could be in a settings file to support memcached)
app.config['CACHE_TYPE'] = 'simple'
# register the cache instance and bind it
app.cache = Cache(app)
# stubbed test data - for backward compat. with older UI
amWeatherData = [
{
'time': '06:00',
'temperatureF': 25,
'windSpeedMph': 15,
'windDirection' : 'SSW',
'precipitation': 'Snow'
},
{
'time': '07:00',
'temperatureF': 27,
'windSpeedMph': 13,
'windDirection' : 'SSW',
'precipitation': 'Snow'
},
{
'time': '08:00',
'temperatureF': 31,
'windSpeedMph': 11,
'windDirection' : 'SSW',
'precipitation': 'WintryMix'
}
]
# stubbed test data - for backward compat. with older UI
pmWeatherData = [
{
'time': '16:00',
'temperatureF': 35,
'windSpeedMph': 7,
'windDirection' : 'N',
'precipitation': 'None'
},
{
'time': '17:00',
'temperatureF': 34,
'windSpeedMph': 9,
'windDirection' : 'N',
'precipitation': 'Rain'
},
{
'time': '18:00',
'temperatureF': 33,
'windSpeedMph': 12,
'windDirection' : 'N',
'precipitation': 'Sleet'
}
]
# stubbed test data, also serves as response data structure definition
returnJson = {
'error' : '',
'input' : {
'toMidPoint' : "07:00",
'fromMidPoint' : "17:30"
},
'info' : {
'asOf' : "Mar 1, 3:01pm",
'tempStationLoc' : 'Chicago Bronzeville, Chicago, Illinois',
'windStationLoc' : 'U.S. Cellular Field/Bridgeport, Chicago, Illinois'
},
'today' : {
'to' : {
'now' : {
'tempF' : 1,
'windSpeedMph' : 10,
'windGustMph' : 12,
'windDirection' : 'N',
'precipInHr' : 0.1,
'humidityPct' : 50,
'conditions' : 'clear'
},
'midpoint' : {
'tempF' : 2,
'windSpeedMph' : 11,
'windGustMph' : 12,
'windDirection' : 'NNE',
'precipInHr' : 0,
'humidityPct' : 50,
'conditions' : 'cloudy'
}
},
'from' : {
'before' : {
'tempF' : 3,
'windSpeedMph' : 12,
'windGustMph' : 12,
'windDirection' : 'NE',
'precipInHr' : 0.5,
'humidityPct' : 50,
'conditions' : 'rain'
},
'midpoint' : {
'tempF' : 4,
'windSpeedMph' : 13,
'windGustMph' : 14,
'windDirection' : 'E',
'precipInHr' : 0.2,
'humidityPct' : 50,
'conditions' : 'snow'
}
}
},
'tomorrow' : {
'to' : {
'before' : {
'tempF' : 5,
'windSpeedMph' : 14,
'windGustMph' : 14,
'windDirection' : 'ESE',
'precipInHr' : 0.1,
'humidityPct' : 50,
'conditions' : 'hail'
},
'midpoint' : {
'tempF' : 6,
'windSpeedMph' : 15,
'windGustMph' : 16,
'windDirection' : 'SE',
'precipInHr' : 0.1,
'humidityPct' : 50,
'conditions' : 'sleet'
}
},
'from' : {
'before' : {
'tempF' : 7,
'windSpeedMph' : 16,
'windGustMph' : 18,
'windDirection' : 'S',
'precipInHr' : 0.1,
'humidityPct' : 50,
'conditions' : 'wintrymix'
},
'midpoint' : {
'tempF' : 8,
'windSpeedMph' : 17,
'windGustMph' : 19,
'windDirection' : 'SW',
'precipInHr' : 0.1,
'humidityPct' : 50,
'conditions' : 'snow'
}
}
}
}
# endpoints
# legacy endpoint for backward-compat. with older UI
@app.route('/today/amrush', methods=['GET'])
def get_amRush():
return jsonify({'samples': amWeatherData})
# legacy endpoint for backward-compat. with older UI
@app.route('/today/pmrush', methods=['GET'])
def get_pmRush():
return jsonify({'samples': pmWeatherData})
# get commute am / pm weather for today & tomorrow
@app.route('/commuteWeatherTodayTomorrow', methods=['GET'])
@app.cache.cached(timeout=30) # cache this endpoint for 30sec
def get_commuteWeatherTodayTomorrow():
try:
wundergroundApiKey = os.environ.get('WUNDERGROUND_API_KEY')
if (wundergroundApiKey is None or wundergroundApiKey == ''):
wundergroundApiKey = wundergroundDefaultApiKey
# get params
windStation = request.args.get('windStation')
tempStation = request.args.get('tempStation')
toMidpoint = request.args.get('toMidpoint')
fromMidpoint = request.args.get('fromMidpoint')
# get current conditions
tempStationConditions = get_weather_api_data(wundergroundBaseUrl + wundergroundApiKey + wundergroundConditions + wundergroundPwsPrefix + tempStation + wundergroundJsonSuffix)
windStationConditions = get_weather_api_data(wundergroundBaseUrl + wundergroundApiKey + wundergroundConditions + wundergroundPwsPrefix + windStation + wundergroundJsonSuffix)
# get forecast
forecast = get_weather_api_data(wundergroundBaseUrl + wundergroundApiKey + wundergroundHourly + wundergroundPwsPrefix + tempStation + wundergroundJsonSuffix)
# populate return info
returnJson['input']['toMidPoint'] = toMidpoint
returnJson['input']['fromMidPoint'] = fromMidpoint
# use dateutil parser for greater flexibility in time format (ignore date component)
toMidpointTime = parser.parse(toMidpoint)
fromMidpointTime = parser.parse(fromMidpoint)
asOfString = tempStationConditions['current_observation']['observation_time_rfc822']
asOfDatetime = parser.parse(asOfString)
returnJson['info']['asOf'] = asOfString
returnJson['info']['tempStationLoc'] = tempStationConditions['current_observation']['observation_location']['full']
returnJson['info']['windStationLoc'] = windStationConditions['current_observation']['observation_location']['full']
# always populate today to now
set_from_current(tempStationConditions, windStationConditions, returnJson['today']['to']['now'] )
# populate today to midpoint
point = returnJson['today']['to']['midpoint']
set_default_data(point)
hourlyForecast = get_hourlyForecastIfExists(forecast, asOfDatetime, 0, 0, toMidpointTime)
if (hourlyForecast is not None):
set_from_hourly_forecast(hourlyForecast, point)
# populate today from before
point = returnJson['today']['from']['before']
set_default_data(point)
hourlyForecast = get_hourlyForecastIfExists(forecast, asOfDatetime, 0, -1, fromMidpointTime)
if (hourlyForecast is not None):
set_from_hourly_forecast(hourlyForecast, point)
# populate today from midpoint
point = returnJson['today']['from']['midpoint']
set_default_data(point)
hourlyForecast = get_hourlyForecastIfExists(forecast, asOfDatetime, 0, 0, fromMidpointTime)
if (hourlyForecast is not None):
set_from_hourly_forecast(hourlyForecast, point)
# tomorrow to before
point = returnJson['tomorrow']['to']['before']
set_default_data(point)
hourlyForecast = get_hourlyForecastIfExists(forecast, asOfDatetime, 1, -1, toMidpointTime)
if (hourlyForecast is not None):
set_from_hourly_forecast(hourlyForecast, point)
# tomorrow to midpoint
point = returnJson['tomorrow']['to']['midpoint']
set_default_data(point)
hourlyForecast = get_hourlyForecastIfExists(forecast, asOfDatetime, 1, 0, toMidpointTime)
if (hourlyForecast is not None):
set_from_hourly_forecast(hourlyForecast, point) # tomorrow from before
# tomorrow from before
point = returnJson['tomorrow']['from']['before']
set_default_data(point)
hourlyForecast = get_hourlyForecastIfExists(forecast, asOfDatetime, 1, -1, fromMidpointTime)
if (hourlyForecast is not None):
set_from_hourly_forecast(hourlyForecast, point)
# tomorrow from midpoint
point = returnJson['tomorrow']['from']['midpoint']
set_default_data(point)
hourlyForecast = get_hourlyForecastIfExists(forecast, asOfDatetime, 1, 0, fromMidpointTime)
if (hourlyForecast is not None):
set_from_hourly_forecast(hourlyForecast, point)
# return
return jsonify({'weatherData' : returnJson})
except Exception as e:
return make_response(jsonify({'weatherData' : { 'error': str(e)}}), 500)
# find an hourly forecast (if available) for the specified current day offset, hr offset
def get_hourlyForecastIfExists(forecast, asOfDatetime, dayOffset, hrOffset, midpointTime):
# TODO: consider a worst-case temp/wind return for the hr after midpoint
# TODO: Consider taking into account minute component of midpointTime
calced_datetime = (asOfDatetime + datetime.timedelta(days=dayOffset)).replace(hour=midpointTime.hour)
calced_datetime += datetime.timedelta(hours=hrOffset)
for h in forecast['hourly_forecast']:
f_hr = int(h['FCTTIME']['hour'])
f_day = int(h['FCTTIME']['mday'])
if (f_hr == calced_datetime.hour and f_day == calced_datetime.day):
return h
return None
# set default "no data available" data
def set_default_data(target):
target['conditions'] = '-'
target['humidityPct'] = -1
target['precipInHr'] = -1
target['tempF'] = -1
target['windDirection'] = '-'
target['windGustMph'] = -1
target['windSpeedMph'] = -1
# set return json structure from current conditions structure
def set_from_current(tempStation, windStation, target):
target['conditions'] = tempStation['current_observation']['weather']
target['humidityPct'] = tempStation['current_observation'][
'relative_humidity'].replace('%', '')
target['precipInHr'] = tempStation['current_observation']['precip_1hr_in']
target['tempF'] = tempStation['current_observation']['temp_f']
target['windDirection'] = windStation['current_observation']['wind_dir']
target['windGustMph'] = windStation['current_observation']['wind_gust_mph']
target['windSpeedMph'] = windStation['current_observation']['wind_mph']
# set return json structure from forecast structure
def set_from_hourly_forecast(forecastHour, target):
# TODO: look at 'snow' *or* 'qpf'
target['conditions'] = forecastHour['condition']
target['humidityPct'] = forecastHour['humidity']
target['precipInHr'] = forecastHour['qpf']['english']
target['tempF'] = forecastHour['temp']['english']
target['windDirection'] = forecastHour['wdir']['dir']
target['windGustMph'] = 0
target['windSpeedMph'] = forecastHour['wspd']['english']
# get weather api data for specified url
def get_weather_api_data(url):
response = requests.get(url)
responseJson = response.json()
if (response.status_code != httpSuccessCode):
raise Exception('non-success code ' + str(response.status_code) + ' invoking: ' + url)
if ('error' in responseJson['response']):
raise Exception('error "' + responseJson['response']['error']['description'] + '" invoking: ' + url)
return responseJson
if __name__ == '__main__':
app.run(debug=False, host='0.0.0.0')
|
{
"content_hash": "94fcda346ee5211fe41069b598925867",
"timestamp": "",
"source": "github",
"line_count": 344,
"max_line_length": 182,
"avg_line_length": 36.19767441860465,
"alnum_prop": 0.5918727915194346,
"repo_name": "brogersyh/Dockerfiles-for-Linux",
"id": "94e42c83259276630c6294e9fc6058cca8fd003d",
"size": "12516",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python-REST-Service/public-service.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12516"
}
],
"symlink_target": ""
}
|
from __future__ import division, print_function
import numpy as np
import scipy.stats
import os
import logging
from astropy.tests.helper import pytest
from astropy.modeling import models
from astropy.modeling.fitting import _fitter_to_model_params
from stingray import Powerspectrum
from stingray.modeling import ParameterEstimation, PSDParEst, \
OptimizationResults, SamplingResults
from stingray.modeling import PSDPosterior, set_logprior, PSDLogLikelihood, \
LogLikelihood
try:
from statsmodels.tools.numdiff import approx_hess
comp_hessian = True
except ImportError:
comp_hessian = False
try:
import emcee
can_sample = True
except ImportError:
can_sample = False
class LogLikelihoodDummy(LogLikelihood):
def __init__(self, x, y, model):
LogLikelihood.__init__(self, x, y, model)
def evaluate(self, parse, neg=False):
return np.nan
class TestParameterEstimation(object):
@classmethod
def setup_class(cls):
m = 1
nfreq = 100000
freq = np.arange(nfreq)
noise = np.random.exponential(size=nfreq)
power = noise * 2.0
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
cls.ps = ps
cls.a_mean, cls.a_var = 2.0, 1.0
cls.model = models.Const1D()
p_amplitude = lambda amplitude: \
scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude)
cls.priors = {"amplitude": p_amplitude}
cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power, cls.model,
m=cls.ps.m)
cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)
def test_par_est_initializes(self):
pe = ParameterEstimation()
def test_parest_stores_max_post_correctly(self):
"""
Make sure the keyword for Maximum A Posteriori fits is stored correctly
as a default.
"""
pe = ParameterEstimation()
assert pe.max_post is True, "max_post should be set to True as a default."
def test_object_works_with_loglikelihood_object(self):
llike = PSDLogLikelihood(self.ps.freq, self.ps.power,
self.model, m=self.ps.m)
pe = ParameterEstimation()
res = pe.fit(llike, [2.0])
pass
def test_fit_fails_when_object_is_not_posterior_or_likelihood(self):
x = np.ones(10)
y = np.ones(10)
pe = ParameterEstimation()
with pytest.raises(TypeError):
res = pe.fit(x, y)
def test_fit_fails_without_lpost_or_t0(self):
pe = ParameterEstimation()
with pytest.raises(TypeError):
res = pe.fit()
def test_fit_fails_without_t0(self):
pe = ParameterEstimation()
with pytest.raises(TypeError):
res = pe.fit(np.ones(10))
def test_fit_fails_with_incorrect_number_of_parameters(self):
pe = ParameterEstimation()
t0 = [1, 2]
with pytest.raises(ValueError):
res = pe.fit(self.lpost, t0)
def test_fit_method_works_with_correct_parameter(self):
pe = ParameterEstimation()
t0 = [2.0]
res = pe.fit(self.lpost, t0)
def test_fit_method_returns_optimization_results_object(self):
pe = ParameterEstimation()
t0 = [2.0]
res = pe.fit(self.lpost, t0)
assert isinstance(res,
OptimizationResults), "res must be of type OptimizationResults"
def test_fit_method_fails_with_too_many_tries(self):
lpost = LogLikelihoodDummy(self.ps.freq, self.ps.power, self.model)
pe = ParameterEstimation()
t0 = [2.0]
with pytest.raises(Exception):
res = pe.fit(lpost, t0, neg=True)
def test_compute_lrt_fails_when_garbage_goes_in(self):
pe = ParameterEstimation()
t0 = [2.0]
with pytest.raises(TypeError):
pe.compute_lrt(self.lpost, t0, None, t0)
with pytest.raises(ValueError):
pe.compute_lrt(self.lpost, t0[:-1], self.lpost, t0)
def test_compute_lrt_sets_max_post_to_false(self):
t0 = [2.0]
pe = ParameterEstimation(max_post=True)
assert pe.max_post is True
delta_deviance = pe.compute_lrt(self.lpost, t0, self.lpost, t0)
assert pe.max_post is False
def test_compute_lrt_computes_deviance_correctly(self):
t0 = [2.0]
pe = ParameterEstimation()
delta_deviance, opt1, opt2 = pe.compute_lrt(self.lpost, t0,
self.lpost, t0)
assert delta_deviance < 1e-7
@pytest.mark.skipif("not can_sample")
def test_sampler_runs(self):
pe = ParameterEstimation()
if os.path.exists("test_corner.pdf"):
os.unlink("test_corner.pdf")
sample_res = pe.sample(self.lpost, [2.0], nwalkers=100, niter=10,
burnin=50, print_results=True, plot=True)
assert os.path.exists("test_corner.pdf")
assert sample_res.acceptance > 0.25
assert isinstance(sample_res, SamplingResults)
@pytest.mark.skipif("can_sample")
def test_sample_raises_error_without_emcee(self):
pe = ParameterEstimation()
with pytest.raises(ImportError):
sample_res = pe.sample(self.lpost, [2.0])
def test_simulate_lrt_fails_in_superclass(self):
pe = ParameterEstimation()
with pytest.raises(NotImplementedError):
pe.simulate_lrts(None, None, None, None, None)
class TestOptimizationResults(object):
@classmethod
def setup_class(cls):
m = 1
nfreq = 100000
freq = np.arange(nfreq)
noise = np.random.exponential(size=nfreq)
power = noise * 2.0
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
cls.ps = ps
cls.a_mean, cls.a_var = 2.0, 1.0
cls.model = models.Const1D()
p_amplitude = lambda amplitude: \
scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude)
cls.priors = {"amplitude": p_amplitude}
cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power,
cls.model, m=cls.ps.m)
cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)
cls.fitmethod = "BFGS"
cls.max_post = True
cls.t0 = [2.0]
cls.neg = True
cls.opt = scipy.optimize.minimize(cls.lpost, cls.t0,
method=cls.fitmethod,
args=cls.neg, tol=1.e-10)
def test_object_initializes(self):
res = OptimizationResults(self.lpost, self.opt, neg=self.neg)
def test_object_has_right_attributes(self):
res = OptimizationResults(self.lpost, self.opt, neg=self.neg)
assert hasattr(res, "p_opt")
assert hasattr(res, "result")
assert hasattr(res, "deviance")
assert hasattr(res, "aic")
assert hasattr(res, "bic")
assert hasattr(res, "model")
assert isinstance(res.model, models.Const1D)
def test_p_opt_is_correct(self):
res = OptimizationResults(self.lpost, self.opt, neg=self.neg)
assert res.p_opt == self.opt.x, "res.p_opt must be the same as opt.x!"
assert np.isclose(res.p_opt[0], 2.0, atol=0.1, rtol=0.1)
def test_model_is_same_as_in_lpost(self):
res = OptimizationResults(self.lpost, self.opt, neg=self.neg)
assert res.model == self.lpost.model
def test_result_is_same_as_in_opt(self):
res = OptimizationResults(self.lpost, self.opt, neg=self.neg)
assert res.result == self.opt.fun
def test_compute_model_works_correctly(self):
res = OptimizationResults(self.lpost, self.opt, neg=self.neg)
mean_model = np.ones_like(self.lpost.x) * self.opt.x[0]
assert np.all(
res.mfit == mean_model), "res.model should be exactly " \
"the model for the data."
def test_compute_criteria_computes_criteria_correctly(self):
res = OptimizationResults(self.lpost, self.opt, neg=self.neg)
test_aic = 169440.83719024697
test_bic = 169245.62163088709
test_deviance = 337950.6823459795
assert np.isclose(res.aic, test_aic, atol=0.1, rtol=0.1)
assert np.isclose(res.bic, test_bic, atol=0.1, rtol=0.1)
assert np.isclose(res.deviance, test_deviance, atol=0.1, rtol=0.1)
def test_merit_calculated_correctly(self):
res = OptimizationResults(self.lpost, self.opt, neg=self.neg)
test_merit = 98770.654981073574
assert np.isclose(res.merit, test_merit, atol=0.1, rtol=0.1)
def test_res_is_of_correct_type(self):
pe = ParameterEstimation()
t0 = [2.0]
res = pe.fit(self.lpost, t0)
assert isinstance(res, OptimizationResults)
class OptimizationResultsSubclassDummy(OptimizationResults):
def __init__(self, lpost, res, neg):
self.neg = neg
if res is not None:
self.result = res.fun
self.p_opt = res.x
else:
self.result = None
self.p_opt = None
self.model = lpost.model
class TestOptimizationResultInternalFunctions(object):
@classmethod
def setup_class(cls):
m = 1
nfreq = 100000
freq = np.linspace(1, 1000, nfreq)
np.random.seed(100) # set the seed for the random number generator
noise = np.random.exponential(size=nfreq)
cls.model = models.PowerLaw1D() + models.Const1D()
cls.model.x_0_0.fixed = True
cls.alpha_0 = 2.0
cls.amplitude_0 = 100.0
cls.amplitude_1 = 2.0
cls.model.alpha_0 = cls.alpha_0
cls.model.amplitude_0 = cls.amplitude_0
cls.model.amplitude_1 = cls.amplitude_1
p = cls.model(freq)
power = noise * p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
cls.ps = ps
cls.a_mean, cls.a_var = 2.0, 1.0
cls.a2_mean, cls.a2_var = 100.0, 10.0
p_amplitude_1 = lambda amplitude: \
scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude)
p_alpha_0 = lambda alpha: \
scipy.stats.uniform(0.0, 5.0).pdf(alpha)
p_amplitude_0 = lambda amplitude: \
scipy.stats.norm(loc=cls.a2_mean, scale=cls.a2_var).pdf(
amplitude)
cls.priors = {"amplitude_1": p_amplitude_1,
"amplitude_0": p_amplitude_0,
"alpha_0": p_alpha_0}
cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power,
cls.model, m=cls.ps.m)
cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)
cls.fitmethod = "BFGS"
cls.max_post = True
cls.t0 = [cls.amplitude_0, cls.alpha_0, cls.amplitude_1]
cls.neg = True
cls.opt = scipy.optimize.minimize(cls.lpost, cls.t0,
method=cls.fitmethod,
args=cls.neg, tol=1.e-5)
cls.optres = OptimizationResultsSubclassDummy(cls.lpost, cls.opt,
neg=True)
def test_compute_model(self):
optres = OptimizationResultsSubclassDummy(self.lpost, self.opt,
neg=True)
optres._compute_model(self.lpost)
assert hasattr(optres,
"mfit"), "OptimizationResult object should have mfit " \
"attribute at this point!"
_fitter_to_model_params(self.model, self.opt.x)
mfit_test = self.model(self.lpost.x)
assert np.all(optres.mfit == mfit_test)
@pytest.mark.skipif("comp_hessian")
def test_compute_covariance_without_comp_hessian(self):
optres = OptimizationResultsSubclassDummy(self.lpost, None,
neg=True)
optres._compute_covariance(self.lpost, None)
assert optres.cov is None
assert optres.err is None
def test_compute_statistics_computes_mfit(self):
optres = OptimizationResultsSubclassDummy(self.lpost, self.opt,
neg=True)
assert hasattr(optres, "mfit") is False
optres._compute_statistics(self.lpost)
assert hasattr(optres, "mfit")
def test_compute_statistics_computes_all_statistics(self):
optres = OptimizationResultsSubclassDummy(self.lpost, self.opt,
neg=True)
optres._compute_statistics(self.lpost)
assert hasattr(optres, "merit")
assert hasattr(optres, "dof")
assert hasattr(optres, "sexp")
assert hasattr(optres, "ssd")
assert hasattr(optres, "sobs")
def test_compute_statistics_returns_correct_values(self):
test_merit = 99765.718448514497
test_dof = 99997.0
test_sexp = 600000.0
test_ssd = 1095.4451150103323
test_sobs = -154.901207861497
optres = OptimizationResultsSubclassDummy(self.lpost, self.opt,
neg=True)
optres._compute_statistics(self.lpost)
assert np.isclose(test_merit, optres.merit, atol=0.01, rtol=0.01)
assert test_dof == optres.dof
assert test_sexp == optres.sexp
assert test_ssd == optres.ssd
assert np.isclose(test_sobs, optres.sobs, atol=0.01, rtol=0.01)
def test_compute_criteria_returns_correct_attributes(self):
optres = OptimizationResultsSubclassDummy(self.lpost, self.opt,
neg=True)
optres._compute_criteria(self.lpost)
assert hasattr(optres, "aic")
assert hasattr(optres, "bic")
assert hasattr(optres, "deviance")
def test_compute_criteria_returns_correct_values(self):
optres = OptimizationResultsSubclassDummy(self.lpost, self.opt,
neg=True)
optres._compute_criteria(self.lpost)
test_aic = 170988.9174964963
test_bic = 171017.4562728912
test_deviance = 341954.16168676887
assert np.isclose(test_aic, optres.aic)
assert np.isclose(test_bic, optres.bic)
assert np.isclose(test_deviance, optres.deviance)
def test_compute_covariance_with_hess_inverse(self):
optres = OptimizationResultsSubclassDummy(self.lpost, self.opt,
neg=True)
optres._compute_covariance(self.lpost, self.opt)
assert np.all(optres.cov == np.asarray(self.opt.hess_inv))
assert np.all(optres.err == np.sqrt(np.diag(self.opt.hess_inv)))
def test_compute_covariance_without_hess_inverse(self):
fitmethod = "powell"
opt = scipy.optimize.minimize(self.lpost, self.t0,
method=fitmethod,
args=self.neg, tol=1.e-10)
optres = OptimizationResultsSubclassDummy(self.lpost, opt,
neg=True)
optres._compute_covariance(self.lpost, opt)
if comp_hessian:
phess = approx_hess(opt.x, self.lpost)
hess_inv = np.linalg.inv(phess)
assert np.all(optres.cov == hess_inv)
assert np.all(optres.err == np.sqrt(np.diag(np.abs(hess_inv))))
else:
assert optres.cov is None
assert optres.err is None
def test_print_summary_works(self, logger, caplog):
fitmethod = "powell"
opt = scipy.optimize.minimize(self.lpost, self.t0,
method=fitmethod,
args=self.neg, tol=1.e-10)
optres = OptimizationResultsSubclassDummy(self.lpost, opt,
neg=True)
optres._compute_covariance(self.lpost, self.opt)
optres.print_summary(self.lpost)
assert 'Parameter amplitude_0' in caplog.text
assert "Parameter x_0_0" in caplog.text
assert "Parameter alpha_0" in caplog.text
assert "Parameter amplitude_1" in caplog.text
assert "(Fixed)" in caplog.text
assert "Fitting statistics" in caplog.text
assert "number of data points" in caplog.text
assert "Deviance [-2 log L] D =" in caplog.text
assert "The Akaike Information Criterion of " \
"the model is" in caplog.text
assert "The Bayesian Information Criterion of " \
"the model is" in caplog.text
assert "The figure-of-merit function for this model" in caplog.text
assert "Summed Residuals S =" in caplog.text
assert "Expected S" in caplog.text
assert "merit function" in caplog.text
if can_sample:
class SamplingResultsDummy(SamplingResults):
def __init__(self, sampler, ci_min=0.05, ci_max=0.95):
# store all the samples
self.samples = sampler.flatchain
self.nwalkers = np.float(sampler.chain.shape[0])
self.niter = np.float(sampler.iterations)
# store number of dimensions
self.ndim = sampler.dim
# compute and store acceptance fraction
self.acceptance = np.nanmean(sampler.acceptance_fraction)
self.L = self.acceptance * self.samples.shape[0]
class TestSamplingResults(object):
@classmethod
def setup_class(cls):
m = 1
nfreq = 100000
freq = np.arange(nfreq)
noise = np.random.exponential(size=nfreq)
power = noise * 2.0
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
cls.ps = ps
cls.a_mean, cls.a_var = 2.0, 1.0
cls.model = models.Const1D()
p_amplitude = lambda amplitude: \
scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(
amplitude)
cls.priors = {"amplitude": p_amplitude}
cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power,
cls.model, m=cls.ps.m)
cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)
cls.fitmethod = "BFGS"
cls.max_post = True
cls.t0 = [2.0]
cls.neg = True
pe = ParameterEstimation()
res = pe.fit(cls.lpost, cls.t0)
cls.nwalkers = 100
cls.niter = 200
np.random.seed(200)
p0 = np.array(
[np.random.multivariate_normal(res.p_opt, res.cov) for
i in range(cls.nwalkers)])
cls.sampler = emcee.EnsembleSampler(cls.nwalkers,
len(res.p_opt), cls.lpost,
args=[False], threads=1)
_, _, _ = cls.sampler.run_mcmc(p0, cls.niter)
def test_can_sample_is_true(self):
assert can_sample
def test_sample_results_object_initializes(self):
SamplingResults(self.sampler)
def test_sample_results_produces_attributes(self):
s = SamplingResults(self.sampler)
assert s.samples.shape[0] == self.nwalkers * self.niter
def test_sampling_results_acceptance_ratio(self):
s = SamplingResults(self.sampler)
assert s.acceptance > 0.25
assert np.isclose(s.L,
s.acceptance * self.nwalkers * self.niter)
def test_check_convergence_works(self):
s = SamplingResultsDummy(self.sampler)
s._check_convergence(self.sampler)
assert hasattr(s, "rhat")
def test_rhat_computes_correct_answer(self):
s = SamplingResults(self.sampler)
rhat_test = 3.81886815e-06
assert np.isclose(rhat_test, s.rhat[0], atol=0.001, rtol=0.001)
def test_infer_works(self):
s = SamplingResultsDummy(self.sampler)
s._infer()
assert hasattr(s, "mean")
assert hasattr(s, "std")
assert hasattr(s, "ci")
def test_infer_computes_correct_values(self):
s = SamplingResults(self.sampler)
test_mean = 2.00190793
test_std = 0.00195719
test_ci = [[1.99435539], [1.9971502]]
assert np.isclose(test_mean, s.mean[0], atol=0.01, rtol=0.01)
assert np.isclose(test_std, s.std[0], atol=0.01, rtol=0.01)
assert np.all(np.isclose(test_ci, s.ci, atol=0.01, rtol=0.01))
@pytest.fixture()
def logger():
logger = logging.getLogger('Some.Logger')
logger.setLevel(logging.INFO)
return logger
class TestPSDParEst(object):
@classmethod
def setup_class(cls):
m = 1
nfreq = 100000
freq = np.linspace(1, 10.0, nfreq)
rng = np.random.RandomState(100) # set the seed for the random number generator
noise = rng.exponential(size=nfreq)
cls.model = models.Lorentz1D() + models.Const1D()
cls.x_0_0 = 2.0
cls.fwhm_0 = 0.1
cls.amplitude_0 = 100.0
cls.amplitude_1 = 2.0
cls.model.x_0_0 = cls.x_0_0
cls.model.fwhm_0 = cls.fwhm_0
cls.model.amplitude_0 = cls.amplitude_0
cls.model.amplitude_1 = cls.amplitude_1
p = cls.model(freq)
np.random.seed(400)
power = noise*p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1]-freq[0]
ps.norm = "leahy"
cls.ps = ps
cls.a_mean, cls.a_var = 2.0, 1.0
cls.a2_mean, cls.a2_var = 100.0, 10.0
p_amplitude_1 = lambda amplitude: \
scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude)
p_x_0_0 = lambda alpha: \
scipy.stats.uniform(0.0, 5.0).pdf(alpha)
p_fwhm_0 = lambda alpha: \
scipy.stats.uniform(0.0, 0.5).pdf(alpha)
p_amplitude_0 = lambda amplitude: \
scipy.stats.norm(loc=cls.a2_mean, scale=cls.a2_var).pdf(amplitude)
cls.priors = {"amplitude_1": p_amplitude_1,
"amplitude_0": p_amplitude_0,
"x_0_0": p_x_0_0,
"fwhm_0": p_fwhm_0}
cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power,
cls.model, m=cls.ps.m)
cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)
cls.fitmethod = "BFGS"
cls.max_post = True
cls.t0 = [cls.x_0_0, cls.fwhm_0, cls.amplitude_0, cls.amplitude_1]
cls.neg = True
def test_fitting_with_ties_and_bounds(self, capsys):
double_f = lambda model : model.x_0_0 * 2
model = self.model.copy()
model += models.Lorentz1D(amplitude=model.amplitude_0,
x_0 = model.x_0_0 * 2,
fwhm = model.fwhm_0)
model.x_0_0 = self.model.x_0_0
model.amplitude_0 = self.model.amplitude_0
model.amplitude_1 = self.model.amplitude_1
model.fwhm_0 = self.model.fwhm_0
model.x_0_2.tied = double_f
model.fwhm_0.bounds = [0, 10]
model.amplitude_0.fixed = True
p = model(self.ps.freq)
noise = np.random.exponential(size=len(p))
power = noise*p
ps = Powerspectrum()
ps.freq = self.ps.freq
ps.power = power
ps.m = self.ps.m
ps.df = self.ps.df
ps.norm = "leahy"
pe = PSDParEst(ps, fitmethod="TNC")
llike = PSDLogLikelihood(ps.freq, ps.power, model)
true_pars = [self.x_0_0, self.fwhm_0,
self.amplitude_1,
model.amplitude_2.value,
model.fwhm_2.value]
res = pe.fit(llike, true_pars, neg=True)
compare_pars = [self.x_0_0, self.fwhm_0,
self.amplitude_1,
model.amplitude_2.value,
model.fwhm_2.value]
assert np.allclose(compare_pars, res.p_opt, rtol=0.5)
def test_par_est_initializes(self):
pe = PSDParEst(self.ps)
def test_parest_stores_max_post_correctly(self):
"""
Make sure the keyword for Maximum A Posteriori fits is stored correctly
as a default.
"""
pe = PSDParEst(self.ps)
assert pe.max_post is True, "max_post should be set to True as a default."
def test_fit_fails_when_object_is_not_posterior_or_likelihood(self):
x = np.ones(10)
y = np.ones(10)
pe = PSDParEst(self.ps)
with pytest.raises(TypeError):
res = pe.fit(x, y)
def test_fit_fails_without_lpost_or_t0(self):
pe = PSDParEst(self.ps)
with pytest.raises(TypeError):
res = pe.fit()
def test_fit_fails_without_t0(self):
pe = PSDParEst(self.ps)
with pytest.raises(TypeError):
res = pe.fit(np.ones(10))
def test_fit_fails_with_incorrect_number_of_parameters(self):
pe = PSDParEst(self.ps)
t0 = [1,2]
with pytest.raises(ValueError):
res = pe.fit(self.lpost, t0)
def test_fit_method_works_with_correct_parameter(self):
pe = PSDParEst(self.ps)
lpost = PSDPosterior(self.ps.freq, self.ps.power,
self.model, self.priors, m=self.ps.m)
t0 = [2.0, 1, 1, 1]
res = pe.fit(lpost, t0)
def test_fit_method_returns_optimization_results_object(self):
pe = PSDParEst(self.ps)
t0 = [2.0, 1, 1, 1]
res = pe.fit(self.lpost, t0)
assert isinstance(res, OptimizationResults), "res must be of type " \
"OptimizationResults"
def test_plotfits_leahy(self):
pe = PSDParEst(self.ps)
t0 = [2.0, 1, 1, 1]
lpost = PSDPosterior(self.ps.freq, self.ps.power,
self.model, self.priors, m=self.ps.m)
res = pe.fit(lpost, t0)
pe.plotfits(res, save_plot=True)
assert os.path.exists("test_ps_fit.png")
os.unlink("test_ps_fit.png")
def test_plotfits_log_leahy(self):
pe = PSDParEst(self.ps)
t0 = [2.0, 1, 1, 1]
res = pe.fit(self.lpost, t0)
pe.plotfits(res, save_plot=True, log=True)
assert os.path.exists("test_ps_fit.png")
os.unlink("test_ps_fit.png")
def test_plotfits_rms(self):
t0 = [2.0, 1, 1, 1]
ps = Powerspectrum()
ps.freq = self.ps.freq
ps.power = self.ps.power
ps.m = self.ps.m
ps.df = self.ps.df
ps.norm = "rms"
pe = PSDParEst(ps)
res = pe.fit(self.lpost, t0)
pe.plotfits(res, res2=res, save_plot=True)
assert os.path.exists("test_ps_fit.png")
os.unlink("test_ps_fit.png")
def test_plotfits_log_rms(self):
ps = Powerspectrum()
ps.freq = self.ps.freq
ps.power = self.ps.power
ps.m = self.ps.m
ps.df = self.ps.df
ps.norm = "rms"
pe = PSDParEst(ps)
t0 = [2.0, 1, 1, 1]
res = pe.fit(self.lpost, t0)
pe.plotfits(res, res2=res, save_plot=True, log=True)
assert os.path.exists("test_ps_fit.png")
os.unlink("test_ps_fit.png")
def test_plotfits_pow(self):
t0 = [2.0, 1, 1, 1]
ps = Powerspectrum()
ps.freq = self.ps.freq
ps.power = self.ps.power
ps.m = self.ps.m
ps.df = self.ps.df
ps.norm = "none"
pe = PSDParEst(ps)
res = pe.fit(self.lpost, t0)
pe.plotfits(res, res2=res, save_plot=True)
assert os.path.exists("test_ps_fit.png")
os.unlink("test_ps_fit.png")
def test_plotfits_log_pow(self):
ps = Powerspectrum()
ps.freq = self.ps.freq
ps.power = self.ps.power
ps.m = self.ps.m
ps.df = self.ps.df
ps.norm = "none"
pe = PSDParEst(ps)
t0 = [2.0, 1, 1, 1]
res = pe.fit(self.lpost, t0)
pe.plotfits(res, res2=res, save_plot=True, log=True)
assert os.path.exists("test_ps_fit.png")
os.unlink("test_ps_fit.png")
def test_compute_lrt_fails_when_garbage_goes_in(self):
pe = PSDParEst(self.ps)
t0 = [2.0, 1, 1, 1]
with pytest.raises(TypeError):
pe.compute_lrt(self.lpost, t0, None, t0)
with pytest.raises(ValueError):
pe.compute_lrt(self.lpost, t0[:-1], self.lpost, t0)
def test_compute_lrt_sets_max_post_to_false(self):
t0 = [2.0, 1, 1, 1]
pe = PSDParEst(self.ps, max_post=True)
assert pe.max_post is True
delta_deviance, _, _ = pe.compute_lrt(self.lpost, t0, self.lpost, t0)
assert pe.max_post is False
def test_compute_lrt_computes_deviance_correctly(self):
t0 = [2.0, 1, 1, 1]
pe = PSDParEst(self.ps, max_post=True)
# MB: This is a little too random
delta_deviance, _, _ = pe.compute_lrt(self.lpost, t0,
self.lpost, t0)
assert np.absolute(delta_deviance) < 1.5e-4
@pytest.mark.skipif("not can_sample")
def test_sampler_runs(self):
pe = PSDParEst(self.ps)
sample_res = pe.sample(self.lpost, [2.0, 0.1, 100, 2.0], nwalkers=50,
niter=10, burnin=15, print_results=True,
plot=True)
assert os.path.exists("test_corner.pdf")
os.unlink("test_corner.pdf")
assert sample_res.acceptance > 0.25
assert isinstance(sample_res, SamplingResults)
def test_generate_model_data(self):
pe = PSDParEst(self.ps)
m = self.model
_fitter_to_model_params(m, self.t0)
model = m(self.ps.freq)
pe_model = pe._generate_model(self.lpost, [2.0, 0.1, 100, 2.0])
assert np.allclose(model, pe_model)
def generate_data_rng_object_works(self):
pe = PSDParEst(self.ps)
sim_data1 = pe._generate_data(self.lpost,
[2.0, 0.1, 100.0, 2.0],
seed=1)
sim_data2 = pe._generate_data(self.lpost,
[2.0, 0.1, 100.0, 2.0],
seed=1)
assert np.allclose(sim_data1.power, sim_data2.power)
def test_generate_data_produces_correct_distribution(self):
model = models.Const1D()
model.amplitude = 2.0
p = model(self.ps.freq)
seed = 100
rng = np.random.RandomState(seed)
noise = rng.exponential(size=len(p))
power = noise*p
ps = Powerspectrum()
ps.freq = self.ps.freq
ps.power = power
ps.m = 1
ps.df = self.ps.freq[1]-self.ps.freq[0]
ps.norm = "leahy"
lpost = PSDLogLikelihood(ps.freq, ps.power, model, m=1)
pe = PSDParEst(ps)
rng2 = np.random.RandomState(seed)
sim_data = pe._generate_data(lpost, [2.0], rng2)
assert np.allclose(ps.power, sim_data.power)
def test_generate_model_breaks_with_wrong_input(self):
pe = PSDParEst(self.ps)
with pytest.raises(AssertionError):
pe_model = pe._generate_model([1, 2, 3, 4], [1, 2, 3, 4])
def test_generate_model_breaks_for_wrong_number_of_parameters(self):
pe = PSDParEst(self.ps)
with pytest.raises(AssertionError):
pe_model = pe._generate_model(self.lpost, [1, 2, 3])
def test_compute_lrt_works(self):
m = 1
nfreq = 100000
freq = np.linspace(1, 10, nfreq)
rng = np.random.RandomState(100)
noise = rng.exponential(size=nfreq)
model = models.Const1D()
model.amplitude = 2.0
p = model(freq)
power = noise * p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)
loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)
s_all = np.atleast_2d(np.ones(10) * 2.0).T
model2 = models.PowerLaw1D() + models.Const1D()
model2.x_0_0.fixed = True
loglike2 = PSDLogLikelihood(ps.freq, ps.power, model2, 1)
pe = PSDParEst(ps)
lrt_obs, res1, res2 = pe.compute_lrt(loglike, [2.0], loglike2,
[2.0, 1.0, 2.0], neg=True)
lrt_sim = pe.simulate_lrts(s_all, loglike, [2.0], loglike2,
[2.0, 1.0, 2.0], max_post=False,
seed=100)
assert (lrt_obs > 0.4) and (lrt_obs < 0.6)
assert np.all(lrt_sim < 10.0) and np.all(lrt_sim > 0.01)
def test_compute_lrt_fails_with_wrong_input(self):
pe = PSDParEst(self.ps)
with pytest.raises(AssertionError):
lrt_sim = pe.simulate_lrts(np.arange(10), self.lpost, [1, 2, 3, 4],
[1, 2, 3, 4], [1, 2, 3, 4])
def test_pvalue_calculated_correctly(self):
a = [1, 1, 1, 2]
obs_val = 1.5
pe = PSDParEst(self.ps)
pval = pe._compute_pvalue(obs_val, a)
assert np.isclose(pval, 1./len(a))
def test_calibrate_lrt_fails_without_lpost_objects(self):
pe = PSDParEst(self.ps)
with pytest.raises(TypeError):
pval = pe.calibrate_lrt(self.lpost, [1, 2, 3, 4],
np.arange(10), np.arange(4))
def test_calibrate_lrt_fails_with_wrong_parameters(self):
pe = PSDParEst(self.ps)
with pytest.raises(ValueError):
pval = pe.calibrate_lrt(self.lpost, [1, 2, 3, 4],
self.lpost, [1, 2, 3])
def test_calibrate_lrt_works_as_expected(self):
m = 1
nfreq = 100000
freq = np.linspace(1, 10, nfreq)
rng = np.random.RandomState(100)
noise = rng.exponential(size=nfreq)
model = models.Const1D()
model.amplitude = 2.0
p = model(freq)
power = noise * p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)
s_all = np.atleast_2d(np.ones(10) * 2.0).T
model2 = models.PowerLaw1D() + models.Const1D()
model2.x_0_0.fixed = True
loglike2 = PSDLogLikelihood(ps.freq, ps.power, model2, 1)
pe = PSDParEst(ps)
pval = pe.calibrate_lrt(loglike, [2.0], loglike2,
[2.0, 1.0, 2.0], sample=s_all,
max_post=False, nsim=10,
seed=100)
assert pval > 0.001
def test_calibrate_lrt_works_with_mvn(self):
m = 1
nfreq = 100000
freq = np.linspace(1, 10, nfreq)
rng = np.random.RandomState(100)
noise = rng.exponential(size=nfreq)
model = models.Const1D()
model.amplitude = 2.0
p = model(freq)
power = noise * p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)
model2 = models.PowerLaw1D() + models.Const1D()
model2.x_0_0.fixed = True
loglike2 = PSDLogLikelihood(ps.freq, ps.power, model2, 1)
pe = PSDParEst(ps)
pval = pe.calibrate_lrt(loglike, [2.0], loglike2,
[2.0, 1.0, 2.0], sample=None,
max_post=False, nsim=10,
seed=100)
assert pval > 0.001
@pytest.mark.skipif("not can_sample")
def test_calibrate_lrt_works_with_sampling(self):
m = 1
nfreq = 100000
freq = np.linspace(1, 10, nfreq)
rng = np.random.RandomState(100)
noise = rng.exponential(size=nfreq)
model = models.Const1D()
model.amplitude = 2.0
p = model(freq)
power = noise * p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
lpost = PSDPosterior(ps.freq, ps.power, model, m=1)
p_amplitude_1 = lambda amplitude: \
scipy.stats.norm(loc=2.0, scale=1.0).pdf(amplitude)
p_alpha_0 = lambda alpha: \
scipy.stats.uniform(0.0, 5.0).pdf(alpha)
p_amplitude_0 = lambda amplitude: \
scipy.stats.norm(loc=self.a2_mean, scale=self.a2_var).pdf(
amplitude)
priors = {"amplitude": p_amplitude_1}
priors2 = {"amplitude_1": p_amplitude_1,
"amplitude_0": p_amplitude_0,
"alpha_0": p_alpha_0}
lpost.logprior = set_logprior(lpost, priors)
model2 = models.PowerLaw1D() + models.Const1D()
model2.x_0_0.fixed = True
lpost2 = PSDPosterior(ps.freq, ps.power, model2, 1)
lpost2.logprior = set_logprior(lpost2, priors2)
pe = PSDParEst(ps)
pval = pe.calibrate_lrt(lpost, [2.0], lpost2,
[2.0, 1.0, 2.0], sample=None,
max_post=True, nsim=10, nwalkers=100,
burnin=100, niter=20,
seed=100)
assert pval > 0.001
def test_find_highest_outlier_works_as_expected(self):
mp_ind = 5
max_power = 1000.0
ps = Powerspectrum()
ps.freq = np.arange(10)
ps.power = np.ones_like(ps.freq)
ps.power[mp_ind] = max_power
ps.m = 1
ps.df = ps.freq[1]-ps.freq[0]
ps.norm = "leahy"
pe = PSDParEst(ps)
max_x, max_ind = pe._find_outlier(ps.freq, ps.power, max_power)
assert np.isclose(max_x, ps.freq[mp_ind])
assert max_ind == mp_ind
def test_compute_highest_outlier_works(self):
mp_ind = 5
max_power = 1000.0
ps = Powerspectrum()
ps.freq = np.arange(10)
ps.power = np.ones_like(ps.freq)
ps.power[mp_ind] = max_power
ps.m = 1
ps.df = ps.freq[1]-ps.freq[0]
ps.norm = "leahy"
model = models.Const1D()
p_amplitude = lambda amplitude: \
scipy.stats.norm(loc=1.0, scale=1.0).pdf(
amplitude)
priors = {"amplitude": p_amplitude}
lpost = PSDPosterior(ps.freq, ps.power, model, 1)
lpost.logprior = set_logprior(lpost, priors)
pe = PSDParEst(ps)
res = pe.fit(lpost, [1.0])
res.mfit = np.ones_like(ps.freq)
max_y, max_x, max_ind = pe._compute_highest_outlier(lpost, res)
assert np.isclose(max_y[0], 2*max_power)
assert np.isclose(max_x[0], ps.freq[mp_ind])
assert max_ind == mp_ind
def test_simulate_highest_outlier_works(self):
m = 1
nfreq = 100000
seed = 100
freq = np.linspace(1, 10, nfreq)
rng = np.random.RandomState(seed)
noise = rng.exponential(size=nfreq)
model = models.Const1D()
model.amplitude = 2.0
p = model(freq)
power = noise * p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
nsim = 10
loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)
s_all = np.atleast_2d(np.ones(nsim) * 2.0).T
pe = PSDParEst(ps)
res = pe.fit(loglike, [2.0], neg=True)
maxpow_sim = pe.simulate_highest_outlier(s_all, loglike, [2.0],
max_post=False, seed=seed)
assert maxpow_sim.shape[0] == nsim
assert np.all(maxpow_sim > 20.00) and np.all(maxpow_sim < 31.0)
def test_calibrate_highest_outlier_works(self):
m = 1
nfreq = 100000
seed = 100
freq = np.linspace(1, 10, nfreq)
rng = np.random.RandomState(seed)
noise = rng.exponential(size=nfreq)
model = models.Const1D()
model.amplitude = 2.0
p = model(freq)
power = noise * p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
nsim = 10
loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)
s_all = np.atleast_2d(np.ones(nsim) * 2.0).T
pe = PSDParEst(ps)
pval = pe.calibrate_highest_outlier(loglike, [2.0], sample=s_all,
max_post=False, seed=seed)
assert pval > 0.001
def test_calibrate_highest_outlier_works_with_mvn(self):
m = 1
nfreq = 100000
seed = 100
freq = np.linspace(1, 10, nfreq)
rng = np.random.RandomState(seed)
noise = rng.exponential(size=nfreq)
model = models.Const1D()
model.amplitude = 2.0
p = model(freq)
power = noise * p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
nsim = 10
loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)
pe = PSDParEst(ps)
pval = pe.calibrate_highest_outlier(loglike, [2.0], sample=None,
max_post=False, seed=seed,
nsim=nsim)
assert pval > 0.001
@pytest.mark.skipif("not can_sample")
def test_calibrate_highest_outlier_works_with_sampling(self):
m = 1
nfreq = 100000
seed = 100
freq = np.linspace(1, 10, nfreq)
rng = np.random.RandomState(seed)
noise = rng.exponential(size=nfreq)
model = models.Const1D()
model.amplitude = 2.0
p = model(freq)
power = noise * p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
nsim = 10
lpost = PSDPosterior(ps.freq, ps.power, model, m=1)
p_amplitude = lambda amplitude: \
scipy.stats.norm(loc=1.0, scale=1.0).pdf(
amplitude)
priors = {"amplitude": p_amplitude}
lpost.logprior = set_logprior(lpost, priors)
pe = PSDParEst(ps)
pval = pe.calibrate_highest_outlier(lpost, [2.0], sample=None,
max_post=True, seed=seed,
nsim=nsim, niter=20, nwalkers=100,
burnin=100)
assert pval > 0.001
|
{
"content_hash": "b8f1c5c05709a07f6344965cfdf010ec",
"timestamp": "",
"source": "github",
"line_count": 1387,
"max_line_length": 89,
"avg_line_length": 31.346070656092287,
"alnum_prop": 0.5453458150286359,
"repo_name": "pabell/stingray",
"id": "677ff3ce5726a0a8d8dda9b4f365c48d43012c5e",
"size": "43477",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stingray/modeling/tests/test_parameterestimation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "689248"
}
],
"symlink_target": ""
}
|
import paramiko
import os
def open_ssh_session(ip_address, dns_address, login, passw):
'''initiate open session with explicit netobject'''
ssh = paramiko.SSHClient()
# https://github.com/onyxfish/relay/issues/11
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if ip_address: # there must be empty or ip_address or dns_addres, so script need to choose only 1 of them
ssh.connect(ip_address, username=login, password=passw)
else:
ssh.connect(dns_address, username=login, password=passw)
return ssh
def create_backup_folder(STATIC_ROOT, corp_folder, site_folder, device_folder):
'''create folder for future save location of the file'''
path_to_backup_file = "%s/customers/%s/%s/backups/%s" % (STATIC_ROOT, corp_folder,
site_folder, device_folder)
if not os.path.exists(path_to_backup_file):
os.makedirs(path_to_backup_file)
return path_to_backup_file
# def save_config_to_file(path_to_backup_file, file_name, stdout):
# ''' save output data into file that will be used as backup '''
# with open(os.path.join(path_to_backup_file, str(file_name)), 'w') as f:
# f.write(str(stdout.read()))
|
{
"content_hash": "4d0c816ec11c2d9528977521c10e9f40",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 110,
"avg_line_length": 40.29032258064516,
"alnum_prop": 0.655724579663731,
"repo_name": "Landver/netmon",
"id": "0780ad5e83310d9f5ae3aee468e5c66d4a976744",
"size": "1249",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/backups/common_scripts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2142"
},
{
"name": "HTML",
"bytes": "43874"
},
{
"name": "JavaScript",
"bytes": "3841"
},
{
"name": "Python",
"bytes": "76302"
},
{
"name": "Shell",
"bytes": "4188"
}
],
"symlink_target": ""
}
|
def request(context, flow):
if "application/x-www-form-urlencoded" in flow.request.headers["content-type"]:
form = flow.request.get_form_urlencoded()
form["mitmproxy"] = ["rocks"]
flow.request.set_form_urlencoded(form)
|
{
"content_hash": "e10fb2971ef8bd7474d9e2a375ca8d37",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 83,
"avg_line_length": 49.2,
"alnum_prop": 0.6707317073170732,
"repo_name": "devasia1000/anti_adblock",
"id": "3d93e392d7f2d0d8f599692e3eab60999c509fdb",
"size": "247",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/modify_form.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "183315"
},
{
"name": "JavaScript",
"bytes": "1393841"
},
{
"name": "Python",
"bytes": "810543"
},
{
"name": "Shell",
"bytes": "3540"
}
],
"symlink_target": ""
}
|
from .network_interfaces_operations import NetworkInterfacesOperations
from .application_gateways_operations import ApplicationGatewaysOperations
from .express_route_circuit_authorizations_operations import ExpressRouteCircuitAuthorizationsOperations
from .express_route_circuit_peerings_operations import ExpressRouteCircuitPeeringsOperations
from .express_route_circuits_operations import ExpressRouteCircuitsOperations
from .express_route_service_providers_operations import ExpressRouteServiceProvidersOperations
from .load_balancers_operations import LoadBalancersOperations
from .network_security_groups_operations import NetworkSecurityGroupsOperations
from .security_rules_operations import SecurityRulesOperations
from .network_watchers_operations import NetworkWatchersOperations
from .packet_captures_operations import PacketCapturesOperations
from .public_ip_addresses_operations import PublicIPAddressesOperations
from .route_filters_operations import RouteFiltersOperations
from .route_filter_rules_operations import RouteFilterRulesOperations
from .route_tables_operations import RouteTablesOperations
from .routes_operations import RoutesOperations
from .bgp_service_communities_operations import BgpServiceCommunitiesOperations
from .usages_operations import UsagesOperations
from .virtual_networks_operations import VirtualNetworksOperations
from .subnets_operations import SubnetsOperations
from .virtual_network_peerings_operations import VirtualNetworkPeeringsOperations
from .virtual_network_gateways_operations import VirtualNetworkGatewaysOperations
from .virtual_network_gateway_connections_operations import VirtualNetworkGatewayConnectionsOperations
from .local_network_gateways_operations import LocalNetworkGatewaysOperations
__all__ = [
'NetworkInterfacesOperations',
'ApplicationGatewaysOperations',
'ExpressRouteCircuitAuthorizationsOperations',
'ExpressRouteCircuitPeeringsOperations',
'ExpressRouteCircuitsOperations',
'ExpressRouteServiceProvidersOperations',
'LoadBalancersOperations',
'NetworkSecurityGroupsOperations',
'SecurityRulesOperations',
'NetworkWatchersOperations',
'PacketCapturesOperations',
'PublicIPAddressesOperations',
'RouteFiltersOperations',
'RouteFilterRulesOperations',
'RouteTablesOperations',
'RoutesOperations',
'BgpServiceCommunitiesOperations',
'UsagesOperations',
'VirtualNetworksOperations',
'SubnetsOperations',
'VirtualNetworkPeeringsOperations',
'VirtualNetworkGatewaysOperations',
'VirtualNetworkGatewayConnectionsOperations',
'LocalNetworkGatewaysOperations',
]
|
{
"content_hash": "c6018bfa6f47aef46f0764c108dfca52",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 104,
"avg_line_length": 51.549019607843135,
"alnum_prop": 0.8535564853556485,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "9fc62b57d75af13897e776c1f2d493c9e60ff59c",
"size": "3103",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2016_12_01/operations/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
}
|
"""Tests for io_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import six
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras.utils import io_utils
from tensorflow.python.platform import test
class TestIOUtils(keras_parameterized.TestCase):
def test_ask_to_proceed_with_overwrite(self):
with test.mock.patch.object(six.moves, 'input') as mock_log:
mock_log.return_value = 'y'
self.assertTrue(io_utils.ask_to_proceed_with_overwrite('/tmp/not_exists'))
mock_log.return_value = 'n'
self.assertFalse(
io_utils.ask_to_proceed_with_overwrite('/tmp/not_exists'))
mock_log.side_effect = ['m', 'y']
self.assertTrue(io_utils.ask_to_proceed_with_overwrite('/tmp/not_exists'))
mock_log.side_effect = ['m', 'n']
self.assertFalse(
io_utils.ask_to_proceed_with_overwrite('/tmp/not_exists'))
def test_path_to_string(self):
class PathLikeDummy(object):
def __fspath__(self):
return 'dummypath'
dummy = object()
if sys.version_info >= (3, 4):
from pathlib import Path # pylint:disable=g-import-not-at-top
# conversion of PathLike
self.assertEqual(io_utils.path_to_string(Path('path')), 'path')
if sys.version_info >= (3, 6):
self.assertEqual(io_utils.path_to_string(PathLikeDummy()), 'dummypath')
# pass-through, works for all versions of python
self.assertEqual(io_utils.path_to_string('path'), 'path')
self.assertIs(io_utils.path_to_string(dummy), dummy)
if __name__ == '__main__':
test.main()
|
{
"content_hash": "fc08d6357795a3c3ed387ded01e0f210",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 80,
"avg_line_length": 30.10909090909091,
"alnum_prop": 0.6708937198067633,
"repo_name": "cxxgtxy/tensorflow",
"id": "a0ead4ee623c4974866f9e6c155aac14d1a78338",
"size": "2345",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/utils/io_utils_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7481"
},
{
"name": "C",
"bytes": "186817"
},
{
"name": "C++",
"bytes": "24882047"
},
{
"name": "CMake",
"bytes": "164374"
},
{
"name": "Go",
"bytes": "854846"
},
{
"name": "HTML",
"bytes": "564161"
},
{
"name": "Java",
"bytes": "307246"
},
{
"name": "Jupyter Notebook",
"bytes": "1833659"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37393"
},
{
"name": "Objective-C",
"bytes": "7037"
},
{
"name": "Objective-C++",
"bytes": "64142"
},
{
"name": "Protocol Buffer",
"bytes": "225621"
},
{
"name": "Python",
"bytes": "22009999"
},
{
"name": "Shell",
"bytes": "341543"
},
{
"name": "TypeScript",
"bytes": "797437"
}
],
"symlink_target": ""
}
|
"""\
grip.command
~~~~~~~~~~~~
Implements the command-line interface for Grip.
Usage:
grip [options] [<path>] [<address>]
grip -V | --version
grip -h | --help
Where:
<path> is a file to render or a directory containing README.md (- for stdin)
<address> is what to listen on, of the form <host>[:<port>], or just <port>
Options:
--user-content Render as user-content like comments or issues.
--context=<repo> The repository context, only taken into account
when using --user-content.
--user=<username> A GitHub username for API authentication. If used
without the --pass option, an upcoming password
input will be necessary.
--pass=<password> A GitHub password or auth token for API auth.
--wide Renders wide, i.e. when the side nav is collapsed.
This only takes effect when --user-content is used.
--clear Clears the cached styles and assets and exits.
--export Exports to <path>.html or README.md instead of
serving, optionally using [<address>] as the out
file (- for stdout).
--no-inline Link to styles instead inlining when using --export.
-b --browser Open a tab in the browser after the server starts.
--api-url=<url> Specify a different base URL for the github API,
for example that of a Github Enterprise instance.
Default is the public API: https://api.github.com
--title=<title> Manually sets the page's title.
The default is the filename.
--norefresh Do not automatically refresh the Readme content when
the file changes.
--quiet Do not print to the terminal.
"""
from __future__ import print_function
import sys
import mimetypes
import socket
import errno
from docopt import docopt
from getpass import getpass
from path_and_address import resolve, split_address
from . import __version__
from .api import clear_cache, export, serve
from .exceptions import ReadmeNotFoundError
usage = '\n\n\n'.join(__doc__.split('\n\n\n')[1:])
version = 'Grip ' + __version__
def main(argv=None, force_utf8=True, patch_svg=True):
"""
The entry point of the application.
"""
if force_utf8 and sys.version_info[0] == 2:
reload(sys) # noqa
sys.setdefaultencoding('utf-8')
if patch_svg and sys.version_info[0] == 2 and sys.version_info[1] <= 6:
mimetypes.add_type('image/svg+xml', '.svg')
if argv is None:
argv = sys.argv[1:]
# Show specific errors
if '-a' in argv or '--address' in argv:
print('Use grip [options] <path> <address> instead of -a')
print('See grip -h for details')
return 2
if '-p' in argv or '--port' in argv:
print('Use grip [options] [<path>] [<hostname>:]<port> instead of -p')
print('See grip -h for details')
return 2
# Parse options
args = docopt(usage, argv=argv, version=version)
# Handle printing version with -V (docopt handles --version)
if args['-V']:
print(version)
return 0
# Clear the cache
if args['--clear']:
clear_cache()
return 0
# Get password from prompt if necessary
password = args['--pass']
if args['--user'] and not password:
password = getpass()
# Export to a file instead of running a server
if args['--export']:
try:
export(args['<path>'], args['--user-content'], args['--context'],
args['--user'], password, False, args['--wide'],
not args['--no-inline'], args['<address>'],
args['--api-url'], args['--title'], args['--quiet'])
return 0
except ReadmeNotFoundError as ex:
print('Error:', ex)
return 1
# Parse arguments
path, address = resolve(args['<path>'], args['<address>'])
host, port = split_address(address)
# Validate address
if address and not host and port is None:
print('Error: Invalid address', repr(address))
# Run server
try:
serve(path, host, port, args['--user-content'], args['--context'],
args['--user'], password, False, args['--wide'], False,
args['--api-url'], args['--title'], not args['--norefresh'],
args['--browser'], args['--quiet'], None)
return 0
except ReadmeNotFoundError as ex:
print('Error:', ex)
return 1
except socket.error as ex:
print('Error:', ex)
if ex.errno == errno.EADDRINUSE:
print('This port is in use. Is a grip server already running? '
'Stop that instance or specify another port here.')
return 1
|
{
"content_hash": "a73cc93a66a00f1eec64435fbb3059d6",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 78,
"avg_line_length": 34.510791366906474,
"alnum_prop": 0.5899520533666875,
"repo_name": "joeyespo/grip",
"id": "3fcb0ded4c172d84d24fdfa8660f77d01a1b3b50",
"size": "4797",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grip/command.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "9661"
},
{
"name": "Python",
"bytes": "81960"
}
],
"symlink_target": ""
}
|
"""Tests for functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.python.framework import function
from tensorflow.python.ops import functional_ops
class FunctionTest(tf.test.TestCase):
def _mat(self, x):
return np.array([x]).astype("float32").reshape([1, 1])
def testBasic(self):
g = tf.Graph()
# Define a function
# foo(a:float, b:float, c:float)->u:float,v:float,w:float
# u = matmul(a, b) + c
# v = u^2
# w = u + v
# TODO(zhifengc): replaces w/ a nicer @decorator sugar.
foo = tf.Graph()
with foo.as_default():
a = tf.placeholder(tf.float32, name="a")
b = tf.placeholder(tf.float32, name="b")
c = tf.placeholder(tf.float32, name="c")
u = tf.add(tf.matmul(a, b), c, name="u")
v = tf.square(u, name="v")
w = tf.add_n([u, v], name="w")
fdef = function.graph_to_function_def(foo, "foo", [a, b, c], [u, v, w])
g._add_function(fdef)
# Compute 2 * 3 + 4 and its square.
with g.as_default(), tf.Session() as sess:
two = tf.constant(self._mat(2.0), name="two")
three = tf.constant(self._mat(3.0), name="three")
four = tf.constant(self._mat(4.0), name="four")
# TODO(zhifengc): w/ @decorator sugar, we will just do:
# y, s, t = foo_func(two, three, four)
# The graph contains two ops each of which calls foo.
u0, v0, w0 = g.create_op("foo",
[two, three, four],
[tf.float32, tf.float32, tf.float32],
compute_shapes=False).outputs
u1, v1, w1 = g.create_op("foo",
[four, two, three],
[tf.float32, tf.float32, tf.float32],
compute_shapes=False).outputs
# Checks some property of the graph def.
gdef = g.as_graph_def()
self.assertEqual(len(gdef.node), 5) # 5 nodes added.
self.assertEqual(len(gdef.library.function), 1) # 1 function is defined.
for _ in xrange(10):
# Run the graph, which is basicly two function calls.
ans_u0, ans_v0, ans_w0, ans_u1, ans_v1, ans_w1 = sess.run([u0, v0, w0,
u1, v1, w1])
self.assertAllEqual(ans_u0, self._mat(10.0)) # 2 * 3 + 4 = 10
self.assertAllEqual(ans_v0, self._mat(100.0)) # 10^2 = 100
self.assertAllEqual(ans_w0, self._mat(110.0)) # 100 + 10 = 110
self.assertAllEqual(ans_u1, self._mat(11.0)) # 4 * 2 + 3 = 11
self.assertAllEqual(ans_v1, self._mat(121.0)) # 11^2 = 121
self.assertAllEqual(ans_w1, self._mat(132.0)) # 11 + 121 = 132
def testDefineFunction2Args(self):
def APlus2B(a, b):
return a + b * 2
with tf.Graph().as_default():
f_def = function.define_function(APlus2B, {"a": tf.float32,
"b": tf.float32})
one = tf.constant([1.0])
two = tf.constant([2.0])
call = function.call_function(f_def, one, two)
self.assertEquals("APlus2B", call.op.name)
with tf.Session() as sess:
self.assertAllEqual([5.0], sess.run(call))
def testGradientFunc(self):
def XSquarePlusOne(x):
return x * x + 1.0
def XSquarePlusOneGrad(x, dy):
dx = functional_ops._symbolic_gradient(input=[x, dy],
Tout=[tf.float32],
f="XSquarePlusOne",
name="dx")
return dx
g = tf.Graph()
with g.as_default():
f = function.define_function(XSquarePlusOne, {"x": tf.float32})
g = function.define_function(XSquarePlusOneGrad, {"x": tf.float32,
"dy": tf.float32})
epsilon = tf.constant([0.1])
two = tf.constant([2.0])
call_f = function.call_function(f, two)
call_g = function.call_function(g, two, epsilon)
with tf.Session() as sess:
self.assertAllClose([5.0], sess.run(call_f))
self.assertAllClose([0.4], sess.run(call_g))
def testTanhSymGrad(self):
g = tf.Graph()
with g.as_default():
@function.Defun(x=tf.float32)
def Forward(x):
return tf.reduce_sum(tf.tanh(x))
x = tf.placeholder(tf.float32)
y = Forward(x)
dx = tf.gradients([y], [x])
inp = np.array([-1, 1, 2, -2], dtype=np.float32)
feed = {x: inp}
cfg = tf.ConfigProto(
graph_options=tf.GraphOptions(
optimizer_options=tf.OptimizerOptions(
opt_level=tf.OptimizerOptions.L1,
do_function_inlining=True)))
with tf.Session(graph=g, config=cfg) as sess:
out, = sess.run(dx, feed)
self.assertAllClose(1 - np.square(np.tanh(inp)), out)
def testSymGradShape(self):
g = tf.Graph()
with g.as_default():
x = tf.placeholder(tf.float32, [25, 4])
y = tf.placeholder(tf.float32, [200, 100])
dz = tf.placeholder(tf.float32, [1])
# We assume Foo is a function of (x, y) -> (z) Then, Foo's
# gradient function is (x, y, dz) -> (dx, dy). dx's shape
# should be the same as x's; and dy's shape should be the same
# as y's.
dx, dy = functional_ops._symbolic_gradient(input=[x, y, dz],
Tout=[tf.float32] * 2,
f="Foo")
self.assertEquals(x.get_shape(), dx.get_shape())
self.assertEquals(y.get_shape(), dy.get_shape())
def testDefineFunctionNoArgs(self):
def AConstant():
return tf.constant([42])
with tf.Graph().as_default():
f_def = function.define_function(AConstant, {})
call = function.call_function(f_def)
self.assertEquals("AConstant", call.op.name)
with tf.Session() as sess:
self.assertAllEqual([42], sess.run(call))
def testDefineFunctionNames(self):
def Foo(a):
return a + 1
with tf.Graph().as_default():
f_def = function.define_function(Foo, {"a": tf.float32})
one = tf.constant([1.0])
call1 = function.call_function(f_def, one)
self.assertEquals("Foo", call1.op.name)
call2 = function.call_function(f_def, one)
self.assertEquals("Foo_1", call2.op.name)
call3 = function.call_function(f_def, one, name="mine")
self.assertEquals("mine", call3.op.name)
with tf.name_scope("my"):
call4 = function.call_function(f_def, one, name="precious")
self.assertEquals("my/precious", call4.op.name)
def testDefineErrors(self):
def NoResult():
pass
def VarArgs(*unused_b):
return tf.constant([1])
def DefaultArg(unused_a=12):
return tf.constant([1])
def KwArgs(**unused_kwargs):
return tf.constant([1])
def PlusMinus(a, b):
return a + b, b - a
with tf.Graph().as_default():
with self.assertRaisesRegexp(ValueError, "return at least one tensor"):
function.define_function(NoResult, {})
with self.assertRaisesRegexp(ValueError, "plain arglists are supported"):
function.define_function(VarArgs, {})
with self.assertRaisesRegexp(ValueError, "plain arglists are supported"):
function.define_function(DefaultArg, {})
with self.assertRaisesRegexp(ValueError, "plain arglists are supported"):
function.define_function(KwArgs, {})
with self.assertRaisesRegexp(ValueError, "specified input types"):
function.define_function(PlusMinus, {})
with self.assertRaisesRegexp(ValueError, "specified input types"):
function.define_function(PlusMinus, {"c": tf.float32})
with self.assertRaisesRegexp(ValueError, "type for argument: b"):
function.define_function(PlusMinus, {"a": tf.float32,
"c": tf.float32})
with self.assertRaisesRegexp(ValueError, "specified input types"):
function.define_function(PlusMinus, {"a": tf.float32,
"b": tf.float32,
"c": tf.float32})
def testCallErrors(self):
def Const():
return tf.constant(1)
def PlusOne(a):
return a + 1
def PlusMinus(a, b):
return a + b, b - a
with tf.Graph().as_default():
one = tf.constant([1])
two = tf.constant([2])
const = function.define_function(Const, {})
plus_one = function.define_function(PlusOne, {"a": tf.int32})
plus_minus = function.define_function(PlusMinus, {"a": tf.int32,
"b": tf.int32})
function.call_function(const)
with self.assertRaisesRegexp(ValueError, "arguments: 0"):
function.call_function(const, one)
with self.assertRaisesRegexp(ValueError, "arguments: 0"):
function.call_function(const, one, two)
with self.assertRaisesRegexp(ValueError, "arguments: 1"):
function.call_function(plus_one)
function.call_function(plus_one, one)
with self.assertRaisesRegexp(ValueError, "arguments: 1"):
function.call_function(plus_one, one, two)
with self.assertRaisesRegexp(ValueError, "arguments: 2"):
function.call_function(plus_minus)
with self.assertRaisesRegexp(ValueError, "arguments: 2"):
function.call_function(plus_minus, one)
function.call_function(plus_minus, one, two)
function.call_function(plus_one, one, name="p1")
with self.assertRaisesRegexp(ValueError, "Unknown keyword arguments"):
function.call_function(plus_one, one, device="/gpu:0")
def testFunctionDecorator(self):
with tf.Graph().as_default():
@function.Defun(b=tf.float32)
def Minus1(b):
return b - 1.0
two = tf.constant([2.])
call1 = Minus1(two)
self.assertEquals("Minus1", call1.op.name)
# pylint: disable=unexpected-keyword-arg
call2 = Minus1(call1, name="next")
# pylint:enable=unexpected-keyword-arg
self.assertEquals("next", call2.op.name)
with tf.Session() as sess:
self.assertAllEqual([1], sess.run(call1))
self.assertAllEqual([0], sess.run(call2))
def testNestedFunction(self):
with tf.Graph().as_default():
@function.Defun(x=tf.float32)
def Cube(x):
return x * x * x
@function.Defun(x=tf.float32, y=tf.float32)
def CubeXPlusY(x, y):
return Cube(x) + y
z = CubeXPlusY(tf.constant(3.0), tf.constant(-2.0))
with self.test_session():
self.assertAllEqual(z.eval(), 25.0)
class UnrollLSTMTest(tf.test.TestCase):
BATCH_SIZE = 16
LSTM_DIMS = 32
NUM_UNROLL = 20
def _Weights(self):
dims = self.LSTM_DIMS
return tf.random_uniform([2 * dims, 4 * dims], -1, 1, seed=123456)
def _Input(self):
return tf.random_uniform(
[self.NUM_UNROLL, self.BATCH_SIZE, self.LSTM_DIMS],
seed=654321)
# Helper to construct a LSTM cell graph.
@classmethod
def LSTMCell(cls, x, mprev, cprev, weights):
xm = tf.concat(1, [x, mprev])
i_i, i_g, f_g, o_g = tf.split(1, 4, tf.matmul(xm, weights))
new_c = tf.sigmoid(f_g) * cprev + tf.sigmoid(i_g) * tf.tanh(i_i)
new_c = tf.clip_by_value(new_c, -50.0, 50.0)
new_m = tf.sigmoid(o_g) * tf.tanh(new_c)
return new_m, new_c
def _BuildForward(self, weights, inp, mode="cell"):
def Loop(cell, w, i):
x = tf.unpack(i, self.NUM_UNROLL)
m = tf.zeros_like(x[0])
c = tf.zeros_like(x[0])
for i in range(self.NUM_UNROLL):
m, c = cell(x[i], m, c, w)
return m
cell = UnrollLSTMTest.LSTMCell
if mode == "complete":
# Constructs the complete graph in python.
return Loop(cell, weights, inp)
cell = function.Defun(x=tf.float32,
mprev=tf.float32,
cprev=tf.float32,
weights=tf.float32)(cell)
if mode == "cell":
# Just represent the LSTM as a function.
return Loop(cell, weights, inp)
if mode == "loop":
# Wraps the whole loop as a function.
@function.Defun(w=tf.float32, i=tf.float32)
def LSTMLoop(w, i):
return Loop(cell, w, i)
return LSTMLoop(weights, inp)
if mode == "loop10":
# Wraps 10 lstm steps into one function, and the whole loop
# into another calling the formers.
# Groups 10 steps at a time):
# TODO(zhifengc): Any way to make the syntax less hideous?
@function.Defun(m=tf.float32,
c=tf.float32,
w=tf.float32,
x0=tf.float32,
x1=tf.float32,
x2=tf.float32,
x3=tf.float32,
x4=tf.float32,
x5=tf.float32,
x6=tf.float32,
x7=tf.float32,
x8=tf.float32,
x9=tf.float32)
def Loop10(w, m, c, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9):
for x in [x0, x1, x2, x3, x4, x5, x6, x7, x8, x9]:
m, c = cell(x, m, c, w)
return m, c
@function.Defun(weights=tf.float32, inp=tf.float32)
def LSTMLoop10(weights, inp):
x = tf.unpack(inp, self.NUM_UNROLL)
m = tf.zeros_like(x[0])
c = tf.zeros_like(x[0])
assert self.NUM_UNROLL % 10 == 0
for i in range(0, self.NUM_UNROLL, 10):
m, c = Loop10(weights, m, c, *x[i:i + 10])
return m
return LSTMLoop10(weights, inp)
def _OptimizerOptions(self):
ret = []
for cse in [False, True]:
for inline in [False, True]:
for cfold in [False, True]:
ret.append(tf.ConfigProto(graph_options=tf.GraphOptions(
optimizer_options=tf.OptimizerOptions(
opt_level=tf.OptimizerOptions.L0,
do_common_subexpression_elimination=cse,
do_function_inlining=inline,
do_constant_folding=cfold))))
return ret
def testUnrollLSTM(self):
# Run one step of the unrolled lstm graph.
def RunForward(mode, cfg=None):
print("mode = ", mode)
g = tf.Graph()
start = time.time()
with g.as_default():
weights = self._Weights()
inp = self._Input()
m = self._BuildForward(weights, inp, mode)
gdef = g.as_graph_def()
finish = time.time()
print("time: ", finish - start, " txt size: ", len(str(gdef)),
"gdef bin size: ", len(gdef.SerializeToString()))
with g.as_default(), tf.Session(config=cfg) as sess:
return sess.run(m)
mv0 = RunForward("complete")
for cfg in self._OptimizerOptions():
print("cfg = ", cfg)
mv1 = RunForward("cell", cfg)
mv2 = RunForward("loop", cfg)
mv3 = RunForward("loop10", cfg)
self.assertAllClose(mv0, mv1, rtol=1e-4)
self.assertAllClose(mv0, mv2, rtol=1e-4)
self.assertAllClose(mv0, mv3, rtol=1e-4)
def testUnrollLSTMGrad(self):
# Run one step of the unrolled lstm graph.
def RunForwardBackward(mode, cfg=None):
print("mode = ", mode)
g = tf.Graph()
start = time.time()
with g.as_default():
weights = self._Weights()
inp = self._Input()
m = self._BuildForward(weights, inp, mode)
loss = tf.reduce_sum(tf.square(m))
dw = tf.gradients([loss], [weights])
gdef = g.as_graph_def()
finish = time.time()
print("time: ", finish - start, " txt size: ", len(str(gdef)),
"gdef bin size: ", len(gdef.SerializeToString()))
with g.as_default(), tf.Session(config=cfg) as sess:
return sess.run(dw)
d0 = RunForwardBackward("complete")
for cfg in self._OptimizerOptions():
print("cfg = ", cfg)
d1 = RunForwardBackward("cell", cfg)
d2 = RunForwardBackward("loop", cfg)
d3 = RunForwardBackward("loop10", cfg)
self.assertAllClose(d0, d1, rtol=1e-4)
self.assertAllClose(d0, d2, rtol=1e-4)
self.assertAllClose(d0, d3, rtol=1e-4)
if __name__ == "__main__":
tf.test.main()
|
{
"content_hash": "d65772bad7a05f5df78c983362b854b0",
"timestamp": "",
"source": "github",
"line_count": 460,
"max_line_length": 79,
"avg_line_length": 35.517391304347825,
"alnum_prop": 0.5686742563349247,
"repo_name": "RyanYoung25/tensorflow",
"id": "6728d31a25472f721d0e4a0bc47bfbe8dc1cb340",
"size": "17014",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/framework/function_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "151630"
},
{
"name": "C++",
"bytes": "6922849"
},
{
"name": "CMake",
"bytes": "29325"
},
{
"name": "CSS",
"bytes": "107"
},
{
"name": "HTML",
"bytes": "657597"
},
{
"name": "Java",
"bytes": "50361"
},
{
"name": "JavaScript",
"bytes": "16175"
},
{
"name": "Jupyter Notebook",
"bytes": "777942"
},
{
"name": "Objective-C",
"bytes": "1288"
},
{
"name": "Protocol Buffer",
"bytes": "89536"
},
{
"name": "Python",
"bytes": "3835693"
},
{
"name": "Shell",
"bytes": "66697"
},
{
"name": "TypeScript",
"bytes": "329009"
}
],
"symlink_target": ""
}
|
import numpy as np
from scipy.spatial.distance import pdist, squareform
from .tasks import _fast_geo
from .constants import ISPY3
from tree_distance import PhyloTree
GOLDEN = (np.sqrt(5)-1)/2
# Functions for OutOfSampleMDS
def _eigen(mtx, inverse=False):
if np.allclose(mtx, mtx.T):
fn = np.linalg.eigh
else:
fn = np.linalg.eig
vals, vecs = fn(mtx)
ix = np.argsort(vals)[::-1]
vals = vals[ix]
vecs = vecs[:,ix]
if inverse:
return vecs, vals, np.linalg.inv(vecs)
return vecs, vals
def _new_mean(mtx, sum_, size, new_row, index):
old_row = mtx[index]
return (sum_ - 2*old_row.sum() + old_row[index] + 2*new_row.sum() - new_row[index]) / size
def _new_rowmean(mtx, rowsum, nrow, new_row, index):
old_row = mtx[index]
tmp = rowsum - old_row + new_row
tmp[index] = new_row.sum()
return tmp / nrow
### Functions for OptimiseDistanceFit
def g_(x,a,c):
"""
Calculate vector of equations of residuals,
evaluated at x
G[i] = Sum_j (x[j] - a[i,j])^2 - C[i]**2
where:
x is the point we're trying to fit (in M dimensions),
a is the (N) already embedded points (in M dimensions),
C is the expected distance
"""
return ((x-a)**2).sum(1) - c**2
def g(x,a,c):
"""
Christophe's suggestion for residuals,
G[i] = Sqrt(Sum_j (x[j] - a[i,j])^2) - C[i]
"""
return np.sqrt(((x-a)**2).sum(1)) - c
def f(x, a, c):
""" Objective function (sum of squared residuals) """
v = g(x, a, c)
return v.dot(v)
def jac_(x,a):
""" Jacobian matrix of partial derivatives """
return 2 * (x-a)
def jac(x,a):
""" Jacobian matrix given Christophe's suggestion of f """
return (x-a) / np.sqrt(((x-a)**2).sum(1))[:,np.newaxis]
def gradient(x, a, c):
""" J'.G """
return jac(x, a).T.dot(g(x, a, c))
def hessian(x, a):
""" J'.J """
j = jac(x, a)
return j.T.dot(j)
def grad_desc_update(x, a, c, step=0.01):
"""
Given a value of x, return a better x
using gradient descent
"""
return x - step * gradient(x,a,c)
def newton_update(x, a, c, step=1.0):
"""
Given a value of x, return a better x
using newton-gauss
"""
return x - step*np.linalg.inv(hessian(x, a)).dot(gradient(x, a, c))
def levenberg_marquardt_update(x, a, c, damping=0.001):
"""
Given a value of x, return a better x
using newton-gauss
"""
hess = hessian(x, a)
return x - np.linalg.inv(hess + damping*np.diag(hess)).dot(gradient(x, a, c))
def golden_section_search(fn, a, b, tolerance=1e-5):
"""
WIKIPEDIA IMPLEMENTATION
golden section search
to find the minimum of f on [a,b]
f: a strictly unimodal function on [a,b]
example:
>>> f=lambda x:(x-2)**2
>>> x=gss(f,1,5)
>>> x
2.000009644875678
"""
c = b - GOLDEN*(b-a)
d = a + GOLDEN*(b-a)
while abs(c-d) > tolerance:
fc, fd = fn(c), fn(d)
if fc < fd:
b = d
d = c #fd=fc;fc=f(c)
c = b - GOLDEN*(b-a)
else:
a = c
c = d #fc=fd;fd=f(d)
d = a + GOLDEN*(b-a)
return (b+a)/2
def optimise_newton(x, a, c, tolerance=0.001):
"""
Optimise value of x using newton gauss
"""
x_new = x
x_old = x-1 # dummy value
while np.abs(x_new - x_old).sum() > tolerance:
x_old = x_new
x_new = newton_update(x_old, a, c)
return x_new
def optimise_levenberg_marquardt(x, a, c, damping=0.001, tolerance=0.001):
"""
Optimise value of x using levenberg-marquardt
"""
x_new = x
x_old = x-1 # dummy value
f_old = f(x_new, a, c)
while np.abs(x_new - x_old).sum() > tolerance:
x_old = x_new
x_tmp = levenberg_marquardt_update(x_old, a, c, damping)
f_new = f(x_tmp, a, c)
if f_new < f_old:
damping = np.max(damping/10., 1e-20)
x_new = x_tmp
f_old = f_new
else:
damping *= 10.
return x_new
def optimise_gradient_descent(x, a, c, tolerance=0.001):
"""
Optimise value of x using gradient descent
"""
x_new = x
x_old = x-1 # dummy value
while np.abs(x_new - x_old).sum() > tolerance:
x_old = x_new
step_size = golden_section_search(lambda step: f(grad_desc_update(x_old, a, c, step), a, c), -1.0, 1.0)
x_new = grad_desc_update(x_old, a, c, step_size)
return x_new
### Functions to add bootstraps to collections
def run_optimise_bootstrap_coords(boot_collection, ref_collection, ref_coords, task=_fast_geo, rooted=False, **kwargs):
fit = np.empty((len(boot_collection), ref_coords.shape[1]))
if ISPY3:
query_trees = [PhyloTree(tree.encode(), rooted) for tree in boot_collection.trees]
ref_trees = [PhyloTree(tree.encode(), rooted) for tree in ref_collection.trees]
else:
query_trees = [PhyloTree(tree, rooted) for tree in boot_collection.trees]
ref_trees = [PhyloTree(tree, rooted) for tree in ref_collection.trees]
for i, tree in enumerate(query_trees):
ref_dists = np.array([task(tree, ref_tree, False) for ref_tree in ref_trees])
opt = OptimiseDistanceFit(ref_coords.values, ref_dists)
fit[i] = opt.newton(**kwargs)
return fit
def run_out_of_sample_mds(boot_collection, ref_collection, ref_distance_matrix, index, dimensions, task=_fast_geo, rooted=False, **kwargs):
"""
index = index of the locus the bootstrap sample corresponds to - only important if
using recalc=True in kwargs
"""
fit = np.empty((len(boot_collection), dimensions))
if ISPY3:
query_trees = [PhyloTree(tree.encode(), rooted) for tree in boot_collection.trees]
ref_trees = [PhyloTree(tree.encode(), rooted) for tree in ref_collection.trees]
else:
query_trees = [PhyloTree(tree, rooted) for tree in boot_collection.trees]
ref_trees = [PhyloTree(tree, rooted) for tree in ref_collection.trees]
for i, tree in enumerate(query_trees):
distvec = np.array([task(tree, ref_tree, False) for ref_tree in ref_trees])
oos = OutOfSampleMDS(ref_distance_matrix)
fit[i] = oos.fit(index, distvec, dimensions=dimensions, **kwargs)
return fit
def run_analytical_fit(boot_collection, ref_collection, ref_coords, task=_fast_geo, rooted=False, **kwargs):
fit = np.empty((len(boot_collection), ref_coords.shape[1]))
if ISPY3:
query_trees = [PhyloTree(tree.encode(), rooted) for tree in boot_collection.trees]
ref_trees = [PhyloTree(tree.encode(), rooted) for tree in ref_collection.trees]
else:
query_trees = [PhyloTree(tree, rooted) for tree in boot_collection.trees]
ref_trees = [PhyloTree(tree, rooted) for tree in ref_collection.trees]
for i, tree in enumerate(query_trees):
ref_dists = np.array([task(tree, ref_tree, False) for ref_tree in ref_trees])
aft = AnalyticalFit(ref_coords.values, **kwargs)
fit[i] = aft.fit(ref_dists)
return fit
### Functions to assess closeness of fitted distances to reference
def stress(ref_cds, est_cds):
"""
Kruskal's stress
"""
ref_dists = pdist(ref_cds)
est_dists = pdist(est_cds)
return np.sqrt(((ref_dists - est_dists)**2).sum() / (ref_dists**2).sum())
def stress_dm(ref_distance_matrix, est_cds):
ref_dists = squareform(ref_distance_matrix)
est_dists = pdist(est_cds)
return np.sqrt(((ref_dists - est_dists)**2).sum() / (ref_dists**2).sum())
def rmsd(ref_cds, est_cds):
"""
Root-mean-squared-difference
"""
ref_dists = pdist(ref_cds)
est_dists = pdist(est_cds)
return np.sqrt(((ref_dists - est_dists)**2).mean())
def rmsd_dm(ref_distance_matrix, est_cds):
ref_dists = squareform(ref_distance_matrix)
est_dists = pdist(est_cds)
return np.sqrt(((ref_dists - est_dists)**2).mean())
### Classes
class OptimiseDistanceFit(object):
"""
Usage:
opt = OptimiseDistanceFit(coords, dists, ['pairwise'|'adjacent']='adjacent'])
opt.newton([start_x]=analytical_estimate, [tolerance]=1.0e-6)
"""
def __init__(self, reference_coords, reference_dists, **kwargs):
"""
Set up OptimiseDistanceFit with coordinates
of reference points to fit to, and reference
distances to find closest match to
"""
self._a = reference_coords
self._c = reference_dists
self._analytical_fitter = AnalyticalFit(reference_coords, **kwargs)
def residuals(self, x):
"""
Calculate vector of equations of residuals evaluated
at x
"""
return g(x, self._a, self._c)
def objective_fn(self, x):
"""
Calculate objective function at x
"""
return f(x, self._a, self._c)
def jacobian(self, x):
"""
Evaluate jacobian matrix at x
"""
return jac(x, self._a)
def gradient(self, x):
"""
Evaluate gradient vector at x
"""
return gradient(x, self._a, self._c)
def hessian(self, x):
"""
Evaluate hessian matrix at x
"""
return hessian(x, self._a)
def newton(self, start_x=None, tolerance=1.0e-6):
"""
Optimise value of x using newton gauss
"""
if start_x is None:
start_x = self._analytical_fitter.fit(self._c)
return optimise_newton(start_x, self._a, self._c, tolerance)
def gradient_descent(self, start_x=None, tolerance=1.0e-6):
"""
Optimise value of x using gradient descent
"""
if start_x is None:
start_x = self._analytical_fitter.fit(self._c)
return optimise_gradient_descent(start_x, self._a, self._c, tolerance)
def levenberg_marquardt(self, start_x=None, damping=1.0e-3, tolerance=1.0e-6):
"""
Optimise value of x using levenberg marquardt
"""
if start_x is None:
start_x = self._analytical_fitter.fit(self._c)
return optimise_levenberg_marquardt(start_x, self._a, self._c, tolerance)
class OutOfSampleMDS(object):
def __init__(self, distance_matrix):
"""
Store quantities calculated from a distance matrix,
including the CMDS coordinate matrix
"""
# Calculate all requirements once and store
self.dmsq = distance_matrix**2
self.rows, self.cols = distance_matrix.shape
self.rowsum = (self.dmsq).sum(1)
self.sum = (self.dmsq).sum()
self.rowmean = (self.rowsum / self.rows)
self.mean = self.sum / (self.rows*self.cols)
B = -0.5 * (self.dmsq
- self.rowmean[np.newaxis]
- self.rowmean[:,np.newaxis]
+ self.mean)
U, l = _eigen(B)
# l=l.astype(np.complex)
l = np.clip(l, np.finfo(l.dtype).eps, np.inf)
self.mult_factor = U*(1.0/np.sqrt(l))
self.coords = U*np.sqrt(l)
# self.mult_factor = self.mult_factor.astype(np.float)
def new_B_row(self, row_index, distvec, recalc_mean=False):
if recalc_mean:
rowsum = distvec.sum()
mean = _new_mean(self.dmsq, self.sum, self.rows*self.cols, distvec, row_index)
rowmean = _new_rowmean(self.dmsq, self.rowsum, self.rows, distvec, row_index)
else:
mean = self.mean
rowmean = self.rowmean
b_row = -0.5 * (distvec - rowmean - rowmean[row_index] + mean)
return b_row
def new_coords(self, b_row):
return np.dot(b_row, self.mult_factor)
def fit(self, index, distvec, recalc=False, dimensions=3):
"""
Replace distance matrix values at row/column index with
distances in distvec, and compute new coordinates.
Optionally use distvec to update means and (potentially)
get a better estimate.
distvec values should be plain distances, not squared
distances.
"""
brow = self.new_B_row(index, distvec**2, recalc)
return self.new_coords(brow)[:dimensions]
class AnalyticalFit(object):
"""
Fit coords (x,y,[z]) so that distances from reference coordinates
match closest to reference distances
2D case -
reference coords = |a, b|
|c, d|
|e, f|
reference dists = [P, Q, R]
Fit new coords (x, y):
(x-a)^2 + (y-b)^2 = P^2 (eq.1) | This is an overdetermined
(x-c)^2 + (y-d)^2 = Q^2 (eq.2) | system of simultaneous,
(x-e)^2 + (y-f)^2 = R^2 (eq.3) | non-linear equations
Construct overdetermined system of simultaneous *linear* equations
by subtracting equations from each other - quadratic terms cancel
(eq.4) = (eq.1) - (eq.2)
(eq.5) = (eq.2) - (eq.3)
(eq.6) = (eq.3) - (eq.1)
(2c-2a)x + (2d-2b)y = P^2 - Q^2 - a^2 - b^2 + c^2 + d^2 (eq.4)
(2e-2c)x + (2f-2d)y = Q^2 - R^2 - c^2 - d^2 + e^2 + f^2 (eq.5)
(2a-2e)x + (2b-2f)y = R^2 - P^2 - e^2 - f^2 + a^2 + b^2 (eq.6)
Best-fit solution found by least squares:
(A is the matrix of coefficients on the left sides of eq.4--6,
b is the vector of values on the rhs of eq.4--6
A and the part of b that depends only on coords can be
calculated once and stored)
Ax = b
A'Ax = A'b
x = (A'A)^-1 A'b
"""
def __init__(self, ref_crds, method='adjacent'):
"""
Construct A, part of b that depends on coords only,
and Moore-Penrose pseudoinverse of A (i.e. (A'A)^-1A')
"""
if method == 'adjacent':
self._A, self._partial_b = self._make_A_and_part_of_b_adjacent(ref_crds)
self._pinvA = np.linalg.pinv(self._A)
elif method == 'pairwise':
self._A, self._partial_b = self._make_A_and_part_of_b_pairwise(ref_crds)
self._pinvA = np.linalg.pinv(self._A)
else:
raise ValueError('Unrecognised method {}'.format(method))
self._method = method
def _all_pairwise_comps(self, mtx):
nrow = mtx.shape[0]
ix_a, ix_b = np.triu_indices(nrow, 1)
return mtx[ix_a], mtx[ix_b]
def _rotate_rows(self, mtx):
"""
rotate the matrix so all rows move up,
and top row moves to bottom
"""
return np.roll(mtx, -1, 0)
def _make_A_and_part_of_b_adjacent(self, ref_crds):
"""
Make A and part of b. See docstring of this class
for answer to "What are A and b?"
"""
rot = self._rotate_rows(ref_crds)
A = 2*(rot - ref_crds)
partial_b = (rot**2 - ref_crds**2).sum(1)
return A, partial_b
def _make_A_and_part_of_b_pairwise(self, ref_crds):
m, n = self._all_pairwise_comps(ref_crds)
A = 2*(n-m)
partial_b = (n**2 - m**2).sum(1)
return A, partial_b
def _analytical_fit_adjacent(self, ref_dists):
"""
Fit coords (x,y,[z]) so that distances from reference coordinates
match closest to reference distances
"""
dists = ref_dists**2
rot_dists = self._rotate_rows(dists)
b = dists - rot_dists + self._partial_b
self._b = b
return self._pinvA.dot(b)
def _analytical_fit_pairwise(self, ref_dists):
dists = ref_dists**2
d1, d2 = self._all_pairwise_comps(dists)
b = d1 - d2 + self._partial_b
self._b = b
return self._pinvA.dot(b)
def fit(self, ref_dists):
if self._method == 'adjacent':
return self._analytical_fit_adjacent(ref_dists)
elif self._method == 'pairwise':
return self._analytical_fit_pairwise(ref_dists)
else:
raise ValueError('Unrecognised method {}'.format(method))
class BootstrapFitter(object):
"""
Automate process of adding bootstraps to sample
"""
pass
|
{
"content_hash": "5a4bee8ef247a2a779c1b5c12c8210c4",
"timestamp": "",
"source": "github",
"line_count": 487,
"max_line_length": 139,
"avg_line_length": 32.6570841889117,
"alnum_prop": 0.579728370221328,
"repo_name": "kgori/treeCl",
"id": "af509df7aea947befd68d458844661ed88d8cb20",
"size": "15904",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "treeCl/bootstrap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "41097"
},
{
"name": "C++",
"bytes": "1731778"
},
{
"name": "CMake",
"bytes": "2686"
},
{
"name": "Jupyter Notebook",
"bytes": "76541"
},
{
"name": "Makefile",
"bytes": "251"
},
{
"name": "Python",
"bytes": "428585"
},
{
"name": "Shell",
"bytes": "468"
}
],
"symlink_target": ""
}
|
"""
Testing the generation of the classes for the rest api
"""
import pytest
import responses
from rest_client.generate_classes import generate_classes
from rest_client.generate_classes import get_provided_classes, get_class_meta
from rest_client.generate_classes import get_objects
from .rest_responses import PRODUCT_GET_ARGS, PRODUCT_OPTIONS_ARGS
from .rest_responses import PRODUCT_GET_KWARGS, PRODUCT_OPTIONS_KWARGS
from .rest_responses import PRODUCT_LIST_GET_ARGS, PRODUCT_LIST_GET_KWARGS
from .rest_responses import PRODUCT_MEAS_GET_KWARGS
from .rest_responses import MEAS_OPTIONS_ARGS, MEAS_OPTIONS_KWARGS
@responses.activate # pylint: disable=E1101
def test_getting_classes():
responses.add(*PRODUCT_GET_ARGS, **PRODUCT_MEAS_GET_KWARGS) # pylint: disable=E1101
classes = get_provided_classes('http://127.0.0.1:8000/api/', 'me', 'you')
assert len(classes) == 2
for cls in classes:
assert cls in ['Product', 'Measurement']
@responses.activate # pylint: disable=E1101
def test_getting_meta():
responses.add(*PRODUCT_OPTIONS_ARGS, # pylint: disable=E1101
**PRODUCT_OPTIONS_KWARGS)
resp_meta = get_class_meta('http://127.0.0.1:8000/api/',
'Product', 'me', 'you')
meta = {'product_name': {'type': 'string', 'required': True,
'read_only': False, 'label': 'Product name',
'max_length': 30}}
assert resp_meta == meta
@responses.activate # pylint: disable=E1101
def test_create_classes():
responses.add(*PRODUCT_GET_ARGS, **PRODUCT_GET_KWARGS) # pylint: disable=E1101
responses.add(*PRODUCT_OPTIONS_ARGS, **PRODUCT_OPTIONS_KWARGS) # pylint: disable=E1101
cls_names = generate_classes('http://127.0.0.1:8000/api/', 'me', 'you')
assert cls_names == ['Product']
try:
from rest_client.generate_classes import Product
except ImportError:
pytest.fail('Class couldn´t imported')
prod = Product('', '')
assert hasattr(prod, 'product_name')
assert hasattr(prod, 'product_name_field')
assert hasattr(prod, 'url')
assert hasattr(prod.product_name_field, 'required')
assert hasattr(prod.product_name_field, 'read_only')
assert hasattr(prod.product_name_field, 'max_length')
assert hasattr(prod.product_name_field, 'label')
assert prod.product_name_field.label == 'Product name'
assert prod.product_name_field.max_length == 30
assert prod.product_name_field.required
assert not prod.product_name_field.read_only
with pytest.raises(ValueError):
prod.product_name = "This string is longer than 30 characters"
@responses.activate # pylint: disable=E1101
def test_properties():
responses.add(*PRODUCT_GET_ARGS, # pylint: disable=E1101
**PRODUCT_MEAS_GET_KWARGS)
responses.add(*MEAS_OPTIONS_ARGS, # pylint: disable=E1101
**MEAS_OPTIONS_KWARGS)
responses.add(*PRODUCT_OPTIONS_ARGS, # pylint: disable=E1101
**PRODUCT_OPTIONS_KWARGS)
_ = generate_classes('http://127.0.0.1:8000/api/', 'me', 'you')
try:
from rest_client.generate_classes import Measurement
except ImportError:
pytest.fail('Class couldn´t imported')
meas = Measurement('', '')
meas.date = 42
meas.examiner = 'Bert the bread'
assert meas.date == 42
assert meas.date_field.yvalue == 42
assert meas.examiner == 'Bert the bread'
assert meas.examiner_field.yvalue == 'Bert the bread'
@responses.activate # pylint: disable=E1101
def test_get_objects():
responses.add(*PRODUCT_LIST_GET_ARGS, **PRODUCT_LIST_GET_KWARGS) # pylint: disable=E1101
obj_list = get_objects('http://127.0.0.1:8000/api/',
'Product', 'me', 'you')
assert len(obj_list) == 3
product_names = [obj.product_name for obj in obj_list]
for i, obj in enumerate(obj_list):
assert 'product{0}'.format(i+1) in product_names
prod_index = obj.product_name[-1]
assert obj.url == 'http://127.0.0.1:8000/api/product/{0}/'.format(
prod_index)
@responses.activate # pylint: disable=E1101
def test_get_fields():
responses.add(*PRODUCT_GET_ARGS, **PRODUCT_GET_KWARGS) # pylint: disable=E1101
responses.add(*PRODUCT_OPTIONS_ARGS, **PRODUCT_OPTIONS_KWARGS) # pylint: disable=E1101
cls_names = generate_classes('http://127.0.0.1:8000/api/', 'me', 'you')
assert cls_names == ['Product']
try:
from rest_client.generate_classes import Product
except ImportError:
pytest.fail('Class couldn´t imported')
prod = Product('', '')
fields = prod.get_fields()
assert fields == ['product_name']
@responses.activate # pylint: disable=E1101
def test_patch_object():
responses.add(*PRODUCT_LIST_GET_ARGS, # pylint: disable=E1101
**PRODUCT_LIST_GET_KWARGS)
responses.add(responses.PATCH, 'http://127.0.0.1:8000/api/product/1/') # pylint: disable=E1101
obj_list = get_objects('http://127.0.0.1:8000/api/',
'Product', 'me', 'you')
obj = obj_list[0]
obj.product_name = 'Spam and eggs'
obj.patch()
assert len(responses.calls) == 2 # pylint: disable=E1101
body = responses.calls[1].request.body # pylint: disable=E1101
assert body == b'{"product_name": "Spam and eggs"}'
with pytest.raises(OSError):
obj_list[1].patch()
|
{
"content_hash": "345d7763d40552818124c3b62fdfe5fc",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 99,
"avg_line_length": 42,
"alnum_prop": 0.6552233296419343,
"repo_name": "RedBeardCode/QDjConChart",
"id": "cace301e39ae04ee39c3258baa2411e4bccb71f4",
"size": "5468",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_class_generation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31332"
}
],
"symlink_target": ""
}
|
from pprint import pprint
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
cls = get_driver(Provider.NTTA)
driver = cls("my username", "my password", region="ntta-na")
pprint(driver.list_nodes())
|
{
"content_hash": "7e9b3fc982ec2edf32d07debe739b022",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 60,
"avg_line_length": 27.11111111111111,
"alnum_prop": 0.7745901639344263,
"repo_name": "mistio/libcloud",
"id": "406ba728af7d3c0e19110349c8951220ed69138f",
"size": "244",
"binary": false,
"copies": "3",
"ref": "refs/heads/trunk",
"path": "docs/examples/compute/ntta/instantiate_driver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1819"
},
{
"name": "HTML",
"bytes": "2545"
},
{
"name": "PowerShell",
"bytes": "410"
},
{
"name": "Python",
"bytes": "9067225"
},
{
"name": "Shell",
"bytes": "12994"
}
],
"symlink_target": ""
}
|
from django.core.urlresolvers import reverse
from django.template import defaultfilters as filters
from django.utils import http
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon_lib import exceptions
from horizon_lib import tables
from openstack_horizon import api
from openstack_horizon import policy
class AddPoolLink(tables.LinkAction):
name = "addpool"
verbose_name = _("Add Pool")
url = "horizon:project:loadbalancers:addpool"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_pool"),)
class AddVipLink(tables.LinkAction):
name = "addvip"
verbose_name = _("Add VIP")
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_vip"),)
def get_link_url(self, pool):
base_url = reverse("horizon:project:loadbalancers:addvip",
kwargs={'pool_id': pool.id})
return base_url
def allowed(self, request, datum=None):
if datum and datum.vip_id:
return False
return True
class AddMemberLink(tables.LinkAction):
name = "addmember"
verbose_name = _("Add Member")
url = "horizon:project:loadbalancers:addmember"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_member"),)
class AddMonitorLink(tables.LinkAction):
name = "addmonitor"
verbose_name = _("Add Monitor")
url = "horizon:project:loadbalancers:addmonitor"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_health_monitor"),)
class DeleteVipLink(policy.PolicyTargetMixin, tables.DeleteAction):
name = "deletevip"
policy_rules = (("network", "delete_vip"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete VIP",
u"Delete VIPs",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Scheduled deletion of VIP",
u"Scheduled deletion of VIPs",
count
)
def allowed(self, request, datum=None):
if datum and not datum.vip_id:
return False
return True
class DeletePoolLink(policy.PolicyTargetMixin, tables.DeleteAction):
name = "deletepool"
policy_rules = (("network", "delete_pool"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Pool",
u"Delete Pools",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Scheduled deletion of Pool",
u"Scheduled deletion of Pools",
count
)
def allowed(self, request, datum=None):
if datum and datum.vip_id:
return False
return True
class DeleteMonitorLink(policy.PolicyTargetMixin,
tables.DeleteAction):
name = "deletemonitor"
policy_rules = (("network", "delete_health_monitor"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Monitor",
u"Delete Monitors",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Scheduled deletion of Monitor",
u"Scheduled deletion of Monitors",
count
)
class DeleteMemberLink(policy.PolicyTargetMixin, tables.DeleteAction):
name = "deletemember"
policy_rules = (("network", "delete_member"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Member",
u"Delete Members",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Scheduled deletion of Member",
u"Scheduled deletion of Members",
count
)
class UpdatePoolLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "updatepool"
verbose_name = _("Edit Pool")
classes = ("ajax-modal", "btn-update",)
policy_rules = (("network", "update_pool"),)
def get_link_url(self, pool):
base_url = reverse("horizon:project:loadbalancers:updatepool",
kwargs={'pool_id': pool.id})
return base_url
class UpdateVipLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "updatevip"
verbose_name = _("Edit VIP")
classes = ("ajax-modal", "btn-update",)
policy_rules = (("network", "update_vip"),)
def get_link_url(self, pool):
base_url = reverse("horizon:project:loadbalancers:updatevip",
kwargs={'vip_id': pool.vip_id})
return base_url
def allowed(self, request, datum=None):
if datum and not datum.vip_id:
return False
return True
class UpdateMemberLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "updatemember"
verbose_name = _("Edit Member")
classes = ("ajax-modal", "btn-update",)
policy_rules = (("network", "update_member"),)
def get_link_url(self, member):
base_url = reverse("horizon:project:loadbalancers:updatemember",
kwargs={'member_id': member.id})
return base_url
class UpdateMonitorLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "updatemonitor"
verbose_name = _("Edit Monitor")
classes = ("ajax-modal", "btn-update",)
policy_rules = (("network", "update_health_monitor"),)
def get_link_url(self, monitor):
base_url = reverse("horizon:project:loadbalancers:updatemonitor",
kwargs={'monitor_id': monitor.id})
return base_url
def get_vip_link(pool):
if pool.vip_id:
return reverse("horizon:project:loadbalancers:vipdetails",
args=(http.urlquote(pool.vip_id),))
else:
return None
class AddPMAssociationLink(policy.PolicyTargetMixin,
tables.LinkAction):
name = "addassociation"
verbose_name = _("Associate Monitor")
url = "horizon:project:loadbalancers:addassociation"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_pool_health_monitor"),)
def allowed(self, request, datum=None):
try:
tenant_id = request.user.tenant_id
monitors = api.lbaas.pool_health_monitor_list(request,
tenant_id=tenant_id)
for m in monitors:
if m.id not in datum['health_monitors']:
return True
except Exception:
exceptions.handle(request,
_('Failed to retrieve health monitors.'))
return False
class DeletePMAssociationLink(policy.PolicyTargetMixin,
tables.LinkAction):
name = "deleteassociation"
verbose_name = _("Disassociate Monitor")
url = "horizon:project:loadbalancers:deleteassociation"
classes = ("ajax-modal", "btn-danger")
icon = "remove"
policy_rules = (("network", "delete_pool_health_monitor"),)
def allowed(self, request, datum=None):
if datum and not datum['health_monitors']:
return False
return True
class PoolsTable(tables.DataTable):
name = tables.Column("name",
verbose_name=_("Name"),
link="horizon:project:loadbalancers:pooldetails")
description = tables.Column('description', verbose_name=_("Description"))
provider = tables.Column('provider', verbose_name=_("Provider"),
filters=(lambda v: filters.default(v, _('N/A')),))
subnet_name = tables.Column('subnet_name', verbose_name=_("Subnet"))
protocol = tables.Column('protocol', verbose_name=_("Protocol"))
status = tables.Column('status', verbose_name=_("Status"))
vip_name = tables.Column('vip_name', verbose_name=_("VIP"),
link=get_vip_link)
class Meta:
name = "poolstable"
verbose_name = _("Pools")
table_actions = (AddPoolLink, DeletePoolLink)
row_actions = (UpdatePoolLink, AddVipLink, UpdateVipLink,
DeleteVipLink, AddPMAssociationLink,
DeletePMAssociationLink, DeletePoolLink)
def get_pool_link(member):
return reverse("horizon:project:loadbalancers:pooldetails",
args=(http.urlquote(member.pool_id),))
def get_member_link(member):
return reverse("horizon:project:loadbalancers:memberdetails",
args=(http.urlquote(member.id),))
class MembersTable(tables.DataTable):
address = tables.Column('address',
verbose_name=_("IP Address"),
link=get_member_link,
attrs={'data-type': "ip"})
protocol_port = tables.Column('protocol_port',
verbose_name=_("Protocol Port"))
weight = tables.Column('weight',
verbose_name=_("Weight"))
pool_name = tables.Column('pool_name',
verbose_name=_("Pool"), link=get_pool_link)
status = tables.Column('status', verbose_name=_("Status"))
class Meta:
name = "memberstable"
verbose_name = _("Members")
table_actions = (AddMemberLink, DeleteMemberLink)
row_actions = (UpdateMemberLink, DeleteMemberLink)
def get_monitor_details(monitor):
if monitor.type in ('HTTP', 'HTTPS'):
return ("%(http_method)s %(url_path)s => %(codes)s" %
{'http_method': monitor.http_method,
'url_path': monitor.url_path,
'codes': monitor.expected_codes})
else:
return _("-")
class MonitorsTable(tables.DataTable):
monitor_type = tables.Column(
"type", verbose_name=_("Monitor Type"),
link="horizon:project:loadbalancers:monitordetails")
delay = tables.Column("delay", verbose_name=_("Delay"))
timeout = tables.Column("timeout", verbose_name=_("Timeout"))
max_retries = tables.Column("max_retries", verbose_name=_("Max Retries"))
details = tables.Column(get_monitor_details, verbose_name=_("Details"))
class Meta:
name = "monitorstable"
verbose_name = _("Monitors")
table_actions = (AddMonitorLink, DeleteMonitorLink)
row_actions = (UpdateMonitorLink, DeleteMonitorLink)
|
{
"content_hash": "0d54e0ec063bc6e8dd691d110b710e59",
"timestamp": "",
"source": "github",
"line_count": 328,
"max_line_length": 79,
"avg_line_length": 32.06707317073171,
"alnum_prop": 0.5975470621791215,
"repo_name": "mrunge/openstack_horizon",
"id": "596a1a419ac060e00aa8e160c24a7b3c499a346f",
"size": "11141",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack_horizon/dashboards/project/loadbalancers/tables.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "63809"
},
{
"name": "JavaScript",
"bytes": "40"
},
{
"name": "Python",
"bytes": "3460539"
},
{
"name": "Shell",
"bytes": "16000"
}
],
"symlink_target": ""
}
|
import controllers
from app import app
def run_server():
app.run()
if __name__ == "__main__":
run_server()
|
{
"content_hash": "f3bcf01364e5a008de3d60ff60225184",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 26,
"avg_line_length": 11.9,
"alnum_prop": 0.5882352941176471,
"repo_name": "drkitty/checklist",
"id": "76839701f860d40902e380afaad43889c9af0234",
"size": "142",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "runserver.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2580"
},
{
"name": "JavaScript",
"bytes": "4827"
},
{
"name": "Python",
"bytes": "5596"
},
{
"name": "Shell",
"bytes": "387"
}
],
"symlink_target": ""
}
|
import pytest
import httpretty
from requests.exceptions import HTTPError
from .fixtures import pretty_api
from zipa import api_test_com as t
@pytest.mark.httpretty
def test_iter_returns_single_object(pretty_api):
t.config.secure = False
for item in t.item['a']:
assert item.name == 'a'
@pytest.mark.httpretty
def test_iter_completes(pretty_api):
items = []
t.config.secure = False
for i in t.list:
items.append(i)
assert items == [{u'item1': u'name1'}, {u'item2': u'name2'},
{u'item3': u'name3'}, {u'item4': u'name4'},
{u'item5': u'name5'}]
@pytest.mark.httpretty
def test_iter_next_link_is_error(pretty_api):
items = []
t.config.secure = False
with pytest.raises(HTTPError):
for item in t.list.first:
items.append(item)
|
{
"content_hash": "36a7271ce0e63a561ae7676cfeb93b20",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 64,
"avg_line_length": 23.47222222222222,
"alnum_prop": 0.621301775147929,
"repo_name": "PressLabs/zipa",
"id": "cdc422f23aa03acc6e4e154cff44051b47b9da67",
"size": "845",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_resource_iter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "669"
},
{
"name": "Python",
"bytes": "23969"
}
],
"symlink_target": ""
}
|
from django.shortcuts import redirect
from django.conf import settings
__all__ = [
'LoginRequiredMiddleware',
]
class LoginRequiredMiddleware():
'''
This middleware flips Djangos default login_required usage. By default, all views are login required
You can use the @public decorator to make a function publically accessible
'''
def process_view(self, request, view_func, view_args, view_kwargs):
'''Checks to see if the view is publically accessible or not'''
while isinstance(view_func, partial): # if partial - use original function for authorization
view_func = view_func.func
if not self.is_public(view_func):
if request.user.is_authenticated(): # only extended checks are needed
return None
return self.redirect_to_login(request.get_full_path()) # => login page
def redirect_to_login(self, original_target, login_url=settings.LOGIN_URL):
'''Redirects to the login page, with a get param for next'''
return redirect("%s?%s=%s" % (login_url, 'next', original_target))
def is_public(self, function):
result = False
'''Checks to see if a view has been flagged with the @public decorator'''
try: # cache is found
return function.is_public_view
except AttributeError: # cache is not found
# always avoid djangos stuff
if function.__module__.startswith('django'):
result = True
# here we avoide modules that have been deemed public
elif 'public_modules' in settings.PRIMER:
for module in settings.PRIMER['public_modules']:
if function.__module__.startswith(module):
result = True
break
try: # try to recreate cache
function.is_public_view = result
except AttributeError:
pass
return result
|
{
"content_hash": "23be19f89f8d19506e60cfe9b9b36994",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 104,
"avg_line_length": 39.074074074074076,
"alnum_prop": 0.5753554502369668,
"repo_name": "jamesmfriedman/django-primer",
"id": "17eb1bb10254140a81c1012a157259d09bc4e8d3",
"size": "2110",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "primer/middleware/login_required_middleware.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "210640"
},
{
"name": "JavaScript",
"bytes": "76063"
},
{
"name": "PHP",
"bytes": "232"
},
{
"name": "Python",
"bytes": "137085"
},
{
"name": "Shell",
"bytes": "4521"
}
],
"symlink_target": ""
}
|
"""
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta
from abc import abstractmethod
from math import ceil
import numpy as np
from scipy.sparse import issparse
from ._criterion import Criterion
from ._splitter import Splitter
from ._tree import BestFirstTreeBuilder
from ._tree import DepthFirstTreeBuilder
from ._tree import Tree
from . import _tree, _splitter, _criterion
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..exceptions import NotFittedError
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array
from ..utils import check_random_state
from ..utils import compute_sample_weight
from ..utils.multiclass import check_classification_targets
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _criterion.Gini, "entropy": _criterion.Entropy}
CRITERIA_REG = {"mse": _criterion.MSE, "friedman_mse": _criterion.FriedmanMSE}
DENSE_SPLITTERS = {"best": _splitter.BestSplitter,
"random": _splitter.RandomSplitter}
SPARSE_SPLITTERS = {"best": _splitter.BestSparseSplitter,
"random": _splitter.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None,
presort=False):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.presort = presort
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True,
X_idx_sorted=None):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
X_idx_sorted : array-like, shape = [n_samples, n_features], optional
The indexes of the sorted training input samples. If many tree
are grown on the same dataset, this allows the ordering to be
cached between trees. If None, the data will be sorted here.
Don't use this parameter unless you know what to do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
y = check_array(y, ensure_2d=False, dtype=None)
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
check_classification_targets(y)
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_encoded = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_encoded[:, k] = np.unique(y[:, k],
return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_encoded
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.min_samples_leaf, (numbers.Integral, np.integer)):
min_samples_leaf = self.min_samples_leaf
else: # float
min_samples_leaf = int(ceil(self.min_samples_leaf * n_samples))
if isinstance(self.min_samples_split, (numbers.Integral, np.integer)):
min_samples_split = self.min_samples_split
else: # float
min_samples_split = int(ceil(self.min_samples_split * n_samples))
min_samples_split = max(2, min_samples_split)
min_samples_split = max(min_samples_split, 2 * min_samples_leaf)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1,
int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if not (0. < self.min_samples_split <= 1. or
2 <= self.min_samples_split):
raise ValueError("min_samples_split must be in at least 2"
" or in (0, 1], got %s" % min_samples_split)
if not (0. < self.min_samples_leaf <= 0.5 or
1 <= self.min_samples_leaf):
raise ValueError("min_samples_leaf must be at least than 1 "
"or in (0, 0.5], got %s" % min_samples_leaf)
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if self.presort == 'auto' and issparse(X):
presort = False
elif self.presort == 'auto':
presort = True
if presort is True and issparse(X):
raise ValueError("Presorting is not supported for sparse "
"matrices.")
# If multiple trees are built on the same dataset, we only want to
# presort once. Splitters now can accept presorted indices if desired,
# but do not handle any presorting themselves. Ensemble algorithms
# which desire presorting must do presorting themselves and pass that
# matrix into each tree.
if X_idx_sorted is None and presort:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),
dtype=np.int32)
if presort and X_idx_sorted.shape != X.shape:
raise ValueError("The shape of X (X.shape = {}) doesn't match "
"the shape of X_idx_sorted (X_idx_sorted"
".shape = {})".format(X.shape,
X_idx_sorted.shape))
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
min_samples_leaf,
min_weight_leaf,
random_state,
self.presort)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight, X_idx_sorted)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
"match the input. Model n_features is %s and "
"input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
.. versionadded:: 0.17
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
def decision_path(self, X, check_input=True):
"""Return the decision path in the tree
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.decision_path(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None,
presort=False):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state,
presort=presort)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error, which is equal to
variance reduction as feature selection criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
presort=False):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state,
presort=presort)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
|
{
"content_hash": "a296289f559379452f98a30bb640a5c1",
"timestamp": "",
"source": "github",
"line_count": 1024,
"max_line_length": 83,
"avg_line_length": 39.5009765625,
"alnum_prop": 0.573067319340404,
"repo_name": "DailyActie/Surrogate-Model",
"id": "fbc32e8613f06baeede79887646e86f7b7fd4d67",
"size": "40449",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "01-codes/scikit-learn-master/sklearn/tree/tree.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "345"
},
{
"name": "Batchfile",
"bytes": "18746"
},
{
"name": "C",
"bytes": "13004913"
},
{
"name": "C++",
"bytes": "14692003"
},
{
"name": "CMake",
"bytes": "72831"
},
{
"name": "CSS",
"bytes": "303488"
},
{
"name": "Fortran",
"bytes": "7339415"
},
{
"name": "HTML",
"bytes": "854774"
},
{
"name": "Java",
"bytes": "38854"
},
{
"name": "JavaScript",
"bytes": "2432846"
},
{
"name": "Jupyter Notebook",
"bytes": "829689"
},
{
"name": "M4",
"bytes": "1379"
},
{
"name": "Makefile",
"bytes": "48708"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "PHP",
"bytes": "93585"
},
{
"name": "Pascal",
"bytes": "1449"
},
{
"name": "Perl",
"bytes": "1152272"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "34668203"
},
{
"name": "Roff",
"bytes": "5925"
},
{
"name": "Ruby",
"bytes": "92498"
},
{
"name": "Shell",
"bytes": "94698"
},
{
"name": "TeX",
"bytes": "156540"
},
{
"name": "TypeScript",
"bytes": "41691"
}
],
"symlink_target": ""
}
|
from ...online_crowdsourcing import *
import json
import yaml
import math
import os
import numpy as np
import random
import urllib
import time
BASE_URL = "sbranson.no-ip.org/bluebirds"
NUM_COLS = 3
# Crowdsourcing for binary classification. Incorporates a worker skill and image difficulty model
class CrowdDatasetBinaryClassification(CrowdDataset):
def __init__(self, **kwds):
super(CrowdDatasetBinaryClassification, self).__init__(**kwds)
self._CrowdImageClass_ = CrowdImageBinaryClassification
self._CrowdWorkerClass_ = CrowdWorkerBinaryClassification
self._CrowdLabelClass_ = CrowdLabelBinaryClassification
if (not 'estimate_priors_automatically' in kwds): self.estimate_priors_automatically = False #True
self.prob_present_given_present_beta, self.prob_not_present_given_not_present_beta, self.prob_correct_beta = 10,10,20 # beta prior; when estimating per worker examples, # training w/ the global prior
self.prior_beta = 15
self.prob_present_beta = 2.5
self.prob_not_present_given_not_present_prior = self.prob_not_present_given_not_present = .8 # The prior probability an annotator says the class is not present when it is actually not present
self.prob_present_given_present_prior = self.prob_present_given_present = .8 # The prior probability an annotator says the class is present when it is actually present
self.prob_present_prior = self.prob_present = .5 # The prior probability that the class is present
self.prob_correct_prior = self.prob_correct = .8
self.skill_names = ['Prob Correct Given Present', 'Prob Correct Given Not Present']
name = self.name if self.name and len(self.name) > 0 else "object"
self.hit_params = {'object_name':name};
dollars_per_hour, sec_per_click, sec_per_hour = 8, 1.2, 3600
self.reward = 0.15
self.images_per_hit = int(math.ceil(self.reward/dollars_per_hour*sec_per_hour/(sec_per_click*self.prob_present_prior)))
self.description = self.title = "Click on images where " + ('an ' if name[0].lower() in ['a','e','i','o','u'] else 'a ') + name + " is present"
self.keywords = "images,labelling,present," + name
self.html_template_dir = 'html/binary'
def copy_parameters_from(self, dataset, full=True):
super(CrowdDatasetBinaryClassification, self).copy_parameters_from(dataset, full=full)
if full:
self.prob_not_present_given_not_present = dataset.prob_not_present_given_not_present
self.prob_present_given_present = dataset.prob_present_given_present
self.prob_present = dataset.prob_present
self.prob_correct = dataset.prob_correct
self.estimate_priors_automatically = False
def estimate_priors(self, gt_dataset=None):
num_present, num_not_present = self.prior_beta*self.prob_present_prior, self.prior_beta*(1-self.prob_present_prior)
num_not_present_given_not_present, num_present_given_not_present = self.prior_beta*self.prob_not_present_given_not_present_prior, self.prior_beta*(1-self.prob_not_present_given_not_present_prior)
num_present_given_present, num_not_present_given_present = self.prior_beta*self.prob_present_given_present_prior, self.prior_beta*(1-self.prob_present_given_present_prior)
self.initialize_parameters(avoid_if_finished=True)
for i in self.images:
has_cv = (1 if (self.cv_worker and self.cv_worker.id in self.images[i].z) else 0)
if len(self.images[i].z)-has_cv < 1:
continue
if not gt_dataset is None:
y = gt_dataset.images[i].y.label
else:
#self.images[i].predict_true_labels(avoid_if_finished=True)
y = self.images[i].y.soft_label if hasattr(self.images[i].y,"soft_label") else self.images[i].y.label
for w in self.images[i].z:
if not self.images[i].z[w].is_computer_vision():
z = self.images[i].z[w].label
num_present += y
num_not_present += 1-y
num_not_present_given_not_present += (1-y)*(1-z)
num_present_given_not_present += (1-y)*z
num_present_given_present += y*z
num_not_present_given_present += y*(1-z)
self.prob_present = min(.9999,max(.0001,float(num_present)/max(0.0001,num_present+num_not_present)))
self.prob_not_present_given_not_present = min(.9999,max(.0001,float(num_not_present_given_not_present)/max(0.0001,num_not_present_given_not_present+num_present_given_not_present)))
self.prob_present_given_present = min(.9999,max(.0001,float(num_present_given_present)/max(0.0001,num_present_given_present+num_not_present_given_present)))
self.prob_correct = min(.9999,max(.0001,float(num_present_given_present + num_not_present_given_not_present)/max(0.0001,num_present_given_present+num_not_present_given_present + num_not_present_given_not_present+num_present_given_not_present)))
def initialize_parameters(self, avoid_if_finished=False):
for w in self.workers:
self.workers[w].prob_not_present_given_not_present = self.prob_not_present_given_not_present
self.workers[w].prob_present_given_present = self.prob_present_given_present
self.workers[w].prob_present = self.prob_present
self.workers[w].prob_correct = self.prob_correct
class CrowdImageBinaryClassification(CrowdImage):
def __init__(self, id, params):
super(CrowdImageBinaryClassification, self).__init__(id, params)
self.original_name = id
def crowdsource_simple(self, avoid_if_finished=False):
if avoid_if_finished and self.finished:
return
num, numT = 0, 0
for w in self.z:
if not self.z[w].is_computer_vision():
num += self.z[w].label
numT += 1
label = (1.0 if (num>numT/2.0 or (num==numT/2.0 and random.random()>.5)) else 0.0)
self.y = CrowdLabelBinaryClassification(self, None, label=label)
self.y.soft_label = label if num!=numT/2.0 else .5
def predict_true_labels(self, avoid_if_finished=False):
if avoid_if_finished and self.finished:
return
if not self.cv_pred is None and not self.params.naive_computer_vision:
ll_not_present,ll_present = math.log(1-self.cv_pred.prob), math.log(self.cv_pred.prob)
else:
ll_not_present,ll_present = math.log(1-self.params.prob_present), math.log(self.params.prob_present)
for w in self.z:
if not self.z[w].is_computer_vision() or self.params.naive_computer_vision:
ll_present += math.log(self.z[w].worker.prob_present_given_present if self.z[w].label==1 else 1-self.z[w].worker.prob_present_given_present)
ll_not_present += math.log(self.z[w].worker.prob_not_present_given_not_present if self.z[w].label==0 else 1-self.z[w].worker.prob_not_present_given_not_present)
self.y = CrowdLabelBinaryClassification(self, None, label=(1.0 if (ll_present>ll_not_present or(ll_present==ll_not_present and random.random()>.5)) else 0.0))
#ll_not_present,ll_present = ll_not_present-.5*math.log(1-self.params.prob_present), ll_present-.5*math.log(self.params.prob_present)
self.ll_present, self.ll_not_present = ll_present, ll_not_present
m = max(ll_present,ll_not_present)
self.prob = math.exp(ll_present-m)/(math.exp(ll_not_present-m)+math.exp(ll_present-m))
self.risk = self.prob*(1-self.y.label) + (1-self.prob)*self.y.label
self.y.soft_label = self.prob
def compute_log_likelihood(self):
y = self.y.soft_label if hasattr(self.y,"soft_label") else self.y.label
return (1-y)*math.log(1-self.cv_pred.prob)+y*math.log(self.cv_pred.prob) if not self.cv_pred is None else (1-y)*math.log(1-self.params.prob_present)+y*math.log(1-self.params.prob_present)
# Estimate difficulty parameters
def estimate_parameters(self, avoid_if_finished=False):
if (avoid_if_finished and self.finished) or len(self.z)<=1:
return
def check_finished(self, set_finished=True):
if self.finished:
return True
self.risk = self.prob*(1-self.y.label) + (1-self.prob)*self.y.label
finished = self.risk <= self.params.min_risk
if set_finished: self.finished = finished
return finished
class CrowdWorkerBinaryClassification(CrowdWorker):
def __init__(self, id, params):
super(CrowdWorkerBinaryClassification,self).__init__(id, params)
self.skill = None
self.prob_not_present_given_not_present = params.prob_not_present_given_not_present
self.prob_present_given_present = params.prob_present_given_present
self.prob_present = params.prob_present
self.prob_correct = params.prob_correct
def compute_log_likelihood(self):
ll = ((self.params.prob_present_given_present*self.params.prob_present_given_present_beta-1)*math.log(self.prob_present_given_present) +
((1-self.params.prob_present_given_present)*self.params.prob_present_given_present_beta-1)*math.log(1-self.prob_present_given_present))
ll += ((self.params.prob_not_present_given_not_present*self.params.prob_not_present_given_not_present_beta-1)*math.log(self.prob_not_present_given_not_present) +
((1-self.params.prob_not_present_given_not_present)*self.params.prob_not_present_given_not_present_beta-1)*math.log(1-self.prob_not_present_given_not_present))
return ll
# Estimate worker skill parameters
def estimate_parameters(self, avoid_if_finished=False):
# For each worker, we have binomial distributions for 1) the probability a worker thinks a
# class is present if the class is present in the ground truth, 2) the probability a worker thinks a
# class is present if the class is not present in the ground truth. Each of these distributions has a
# Beta prior from the distribution of all workers pooled together
num_present = self.params.prob_present_given_present_beta
num_present_given_present = self.params.prob_present_given_present_beta*self.params.prob_present_given_present
num_not_present = self.params.prob_not_present_given_not_present_beta
num_not_present_given_not_present = self.params.prob_not_present_given_not_present_beta*self.params.prob_not_present_given_not_present
num_worker_present = 0
for i in self.images:
y = self.images[i].y.soft_label if hasattr(self.images[i].y,"soft_label") else self.images[i].y.label
num_present += y
num_not_present += 1-y
num_present_given_present += y*self.images[i].z[self.id].label
num_not_present_given_not_present += (1-y)*(1-self.images[i].z[self.id].label)
num_worker_present += self.images[i].z[self.id].label
self.num_present, self.num_not_present, self.num_present_given_present, self.num_not_present_given_not_present, self.num_worker_present = num_present, num_not_present, num_present_given_present, num_not_present_given_not_present, num_worker_present
#b = 5
#self.prob_correct = (num_present_given_present + num_not_present_given_not_present) / (num_present+num_not_present)
#num_present_given_present, num_present, num_not_present_given_not_present, num_not_present = num_present_given_present+self.prob_correct*b, num_present+b, num_not_present_given_not_present+self.prob_correct*b, num_not_present+b
beta = min(self.params.prob_present_beta, num_present+num_not_present)
self.prob_present = float(num_worker_present) / max(0.0001,len(self.images))
num_present_given_present += self.prob_present*beta
num_present += beta
num_not_present_given_not_present += (1-self.prob_present)*beta
num_not_present += beta
self.prob_present_given_present = float(num_present_given_present) / num_present
self.prob_not_present_given_not_present = float(num_not_present_given_not_present) / num_not_present
self.skill = [self.prob_present_given_present, self.prob_not_present_given_not_present]
class CrowdLabelBinaryClassification(CrowdLabel):
def __init__(self, image, worker, label=None):
super(CrowdLabelBinaryClassification, self).__init__(image, worker)
self.label = label
self.gtype = 'binary'
def compute_log_likelihood(self):
y = self.image.y.soft_label if hasattr(self.image.y,"soft_label") else self.image.y.label
z = self.label
return (math.log(self.worker.prob_present_given_present)*y*z + math.log(self.worker.prob_not_present_given_not_present)*(1-y)*(1-z) +
math.log(1-self.worker.prob_present_given_present)*y*(1-z) + math.log(1-self.worker.prob_not_present_given_not_present)*(1-y)*z)
def loss(self, y):
return abs(self.label-y.label)
def parse(self, data):
super(CrowdLabelBinaryClassification, self).parse(data)
self.label = float(self.label)
|
{
"content_hash": "ff7fc4cbf2b13af2b35f8780d10c2ca9",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 252,
"avg_line_length": 60.094339622641506,
"alnum_prop": 0.6972527472527472,
"repo_name": "sbranson/online_crowdsourcing",
"id": "a87b66d732ee252bfc942312ee32589e4032fa75",
"size": "12740",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crowdsourcing/annotation_types/classification/binary.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "15706"
},
{
"name": "Python",
"bytes": "444456"
}
],
"symlink_target": ""
}
|
"""The Python datastore API used by app developers.
Defines Entity, Query, and Iterator classes, as well as methods for all of the
datastore's calls. Also defines conversions between the Python classes and
their PB counterparts.
The datastore errors are defined in the datastore_errors module. That module is
only required to avoid circular imports. datastore imports datastore_types,
which needs BadValueError, so it can't be defined in datastore.
"""
import heapq
import itertools
import logging
import os
import re
import sys
import threading
import traceback
from xml.sax import saxutils
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import capabilities
from google.appengine.api import datastore_errors
from google.appengine.api import datastore_types
from google.appengine.datastore import datastore_pb
from google.appengine.datastore import datastore_query
from google.appengine.datastore import datastore_rpc
from google.appengine.datastore import entity_pb
MAX_ALLOWABLE_QUERIES = 30
MAXIMUM_RESULTS = 1000
DEFAULT_TRANSACTION_RETRIES = 3
READ_CAPABILITY = capabilities.CapabilitySet('datastore_v3')
WRITE_CAPABILITY = capabilities.CapabilitySet(
'datastore_v3',
capabilities=['write'])
_MAX_INDEXED_PROPERTIES = 20000
_MAX_ID_BATCH_SIZE = datastore_rpc._MAX_ID_BATCH_SIZE
Key = datastore_types.Key
typename = datastore_types.typename
STRONG_CONSISTENCY = datastore_rpc.Configuration.STRONG_CONSISTENCY
EVENTUAL_CONSISTENCY = datastore_rpc.Configuration.EVENTUAL_CONSISTENCY
_MAX_INT_32 = 2**31-1
def NormalizeAndTypeCheck(arg, types):
"""Normalizes and type checks the given argument.
Args:
arg: an instance or iterable of the given type(s)
types: allowed type or tuple of types
Returns:
A (list, bool) tuple. The list is a normalized, shallow copy of the
argument. The boolean is True if the argument was a sequence, False
if it was a single object.
Raises:
AssertionError: types includes list or tuple.
BadArgumentError: arg is not an instance or sequence of one of the given
types.
"""
if not isinstance(types, (list, tuple)):
types = (types,)
assert list not in types and tuple not in types
if isinstance(arg, types):
return [arg], False
else:
if isinstance(arg, basestring):
raise datastore_errors.BadArgumentError(
'Expected an instance or iterable of %s; received %s (a %s).' %
(types, arg, typename(arg)))
try:
arg_list = list(arg)
except TypeError:
raise datastore_errors.BadArgumentError(
'Expected an instance or iterable of %s; received %s (a %s).' %
(types, arg, typename(arg)))
for val in arg_list:
if not isinstance(val, types):
raise datastore_errors.BadArgumentError(
'Expected one of %s; received %s (a %s).' %
(types, val, typename(val)))
return arg_list, True
def NormalizeAndTypeCheckKeys(keys):
"""Normalizes and type checks that the given argument is a valid key or keys.
A wrapper around NormalizeAndTypeCheck() that accepts strings, Keys, and
Entities, and normalizes to Keys.
Args:
keys: a Key or sequence of Keys
Returns:
A (list of Keys, bool) tuple. See NormalizeAndTypeCheck.
Raises:
BadArgumentError: arg is not an instance or sequence of one of the given
types.
"""
keys, multiple = NormalizeAndTypeCheck(keys, (basestring, Entity, Key))
keys = [_GetCompleteKeyOrError(key) for key in keys]
return (keys, multiple)
def _GetConfigFromKwargs(kwargs, convert_rpc=False,
config_class=datastore_rpc.Configuration):
"""Get a Configuration object from the keyword arguments.
This is purely an internal helper for the various public APIs below
such as Get().
Args:
kwargs: A dict containing the keyword arguments passed to a public API.
convert_rpc: If the an rpc should be converted or passed on directly.
config_class: The config class that should be generated.
Returns:
A UserRPC instance, or a Configuration instance, or None.
Raises:
TypeError if unexpected keyword arguments are present.
"""
if not kwargs:
return None
rpc = kwargs.pop('rpc', None)
if rpc is not None:
if not isinstance(rpc, apiproxy_stub_map.UserRPC):
raise datastore_errors.BadArgumentError(
'rpc= argument should be None or a UserRPC instance')
if 'config' in kwargs:
raise datastore_errors.BadArgumentError(
'Expected rpc= or config= argument but not both')
if not convert_rpc:
if kwargs:
raise datastore_errors.BadArgumentError(
'Unexpected keyword arguments: %s' % ', '.join(kwargs))
return rpc
read_policy = getattr(rpc, 'read_policy', None)
kwargs['config'] = datastore_rpc.Configuration(
deadline=rpc.deadline, read_policy=read_policy,
config=_GetConnection().config)
return config_class(**kwargs)
class _BaseIndex(object):
BUILDING, SERVING, DELETING, ERROR = range(4)
ASCENDING = datastore_query.PropertyOrder.ASCENDING
DESCENDING = datastore_query.PropertyOrder.DESCENDING
def __init__(self, index_id, kind, has_ancestor, properties):
"""Construct a datastore index instance.
Args:
index_id: Required long; Uniquely identifies the index
kind: Required string; Specifies the kind of the entities to index
has_ancestor: Required boolean; indicates if the index supports a query
that filters entities by the entity group parent
properties: Required list of (string, int) tuples; The entity properties
to index. First item in a tuple is the property name and the second
item is the sorting direction (ASCENDING|DESCENDING).
The order of the properties is based on the order in the index.
"""
argument_error = datastore_errors.BadArgumentError
datastore_types.ValidateInteger(index_id, 'index_id', argument_error,
zero_ok=True)
datastore_types.ValidateString(kind, 'kind', argument_error, empty_ok=True)
if not isinstance(properties, (list, tuple)):
raise argument_error('properties must be a list or a tuple')
for idx, index_property in enumerate(properties):
if not isinstance(index_property, (list, tuple)):
raise argument_error('property[%d] must be a list or a tuple' % idx)
if len(index_property) != 2:
raise argument_error('property[%d] length should be 2 but was %d' %
(idx, len(index_property)))
datastore_types.ValidateString(index_property[0], 'property name',
argument_error)
_BaseIndex.__ValidateEnum(index_property[1],
(self.ASCENDING, self.DESCENDING),
'sort direction')
self.__id = long(index_id)
self.__kind = kind
self.__has_ancestor = bool(has_ancestor)
self.__properties = properties
@staticmethod
def __ValidateEnum(value, accepted_values, name='value',
exception=datastore_errors.BadArgumentError):
datastore_types.ValidateInteger(value, name, exception)
if not value in accepted_values:
raise exception('%s should be one of %s but was %d' %
(name, str(accepted_values), value))
def _Id(self):
"""Returns the index id, a long."""
return self.__id
def _Kind(self):
"""Returns the index kind, a string. Empty string ('') if none."""
return self.__kind
def _HasAncestor(self):
"""Indicates if this is an ancestor index, a boolean."""
return self.__has_ancestor
def _Properties(self):
"""Returns the index properties. a tuple of
(index name as a string, [ASCENDING|DESCENDING]) tuples.
"""
return self.__properties
def __eq__(self, other):
return self.__id == other.__id
def __ne__(self, other):
return self.__id != other.__id
def __hash__(self):
return hash(self.__id)
class Index(_BaseIndex):
"""A datastore index."""
Id = _BaseIndex._Id
Kind = _BaseIndex._Kind
HasAncestor = _BaseIndex._HasAncestor
Properties = _BaseIndex._Properties
class DatastoreAdapter(datastore_rpc.AbstractAdapter):
"""Adapter between datatypes defined here (Entity etc.) and protobufs.
See the base class in datastore_rpc.py for more docs.
"""
index_state_mappings = {
entity_pb.CompositeIndex.ERROR: Index.ERROR,
entity_pb.CompositeIndex.DELETED: Index.DELETING,
entity_pb.CompositeIndex.READ_WRITE: Index.SERVING,
entity_pb.CompositeIndex.WRITE_ONLY: Index.BUILDING
}
index_direction_mappings = {
entity_pb.Index_Property.ASCENDING: Index.ASCENDING,
entity_pb.Index_Property.DESCENDING: Index.DESCENDING
}
def key_to_pb(self, key):
return key._Key__reference
def pb_to_key(self, pb):
return Key._FromPb(pb)
def entity_to_pb(self, entity):
return entity._ToPb()
def pb_to_entity(self, pb):
return Entity._FromPb(pb)
def pb_to_index(self, pb):
index_def = pb.definition()
properties = [(property.name().decode('utf-8'),
DatastoreAdapter.index_direction_mappings.get(property.direction()))
for property in index_def.property_list()]
index = Index(pb.id(), index_def.entity_type().decode('utf-8'),
index_def.ancestor(), properties)
state = DatastoreAdapter.index_state_mappings.get(pb.state())
return index, state
_adapter = DatastoreAdapter()
_thread_local = threading.local()
_ENV_KEY = '__DATASTORE_CONNECTION_INITIALIZED__'
def __InitConnection():
"""Internal method to make sure the connection state has been initialized."""
if os.getenv(_ENV_KEY) and hasattr(_thread_local, 'connection_stack'):
return
_thread_local.connection_stack = [datastore_rpc.Connection(adapter=_adapter)]
os.environ[_ENV_KEY] = '1'
def _GetConnection():
"""Internal method to retrieve a datastore connection local to the thread."""
__InitConnection()
return _thread_local.connection_stack[-1]
def _SetConnection(connection):
"""Internal method to replace the current thread local connection."""
__InitConnection()
_thread_local.connection_stack[-1] = connection
def _PushConnection(new_connection):
"""Internal method to save the current connection and sets a new one.
Args:
new_connection: The connection to set.
"""
__InitConnection()
_thread_local.connection_stack.append(new_connection)
def _PopConnection():
"""Internal method to restores the previous connection.
Returns:
The current connection.
"""
assert len(_thread_local.connection_stack) >= 2
return _thread_local.connection_stack.pop()
def _MakeSyncCall(service, call, request, response, config=None):
"""The APIProxy entry point for a synchronous API call.
Args:
service: For backwards compatibility, must be 'datastore_v3'.
call: String representing which function to call.
request: Protocol buffer for the request.
response: Protocol buffer for the response.
config: Optional Configuration to use for this request.
Returns:
Response protocol buffer. Caller should always use returned value
which may or may not be same as passed in 'response'.
Raises:
apiproxy_errors.Error or a subclass.
"""
conn = _GetConnection()
if isinstance(request, datastore_pb.Query):
conn._set_request_read_policy(request, config)
conn._set_request_transaction(request)
rpc = conn._make_rpc_call(config, call, request, response)
conn.check_rpc_success(rpc)
return response
def CreateRPC(service='datastore_v3',
deadline=None, callback=None, read_policy=None):
"""Create an rpc for use in configuring datastore calls.
NOTE: This functions exists for backwards compatibility. Please use
CreateConfig() instead. NOTE: the latter uses 'on_completion',
which is a function taking an argument, wherease CreateRPC uses
'callback' which is a function without arguments.
Args:
service: Optional string; for backwards compatibility, must be
'datastore_v3'.
deadline: Optional int or float, deadline for calls in seconds.
callback: Optional callable, a callback triggered when this rpc
completes; takes no arguments.
read_policy: Optional read policy; set to EVENTUAL_CONSISTENCY to
enable eventually consistent reads (i.e. reads that may be
satisfied from an older version of the datastore in some cases).
The default read policy may have to wait until in-flight
transactions are committed.
Returns:
A UserRPC instance.
"""
assert service == 'datastore_v3'
conn = _GetConnection()
config = None
if deadline is not None:
config = datastore_rpc.Configuration(deadline=deadline)
rpc = conn._create_rpc(config)
rpc.callback = callback
if read_policy is not None:
rpc.read_policy = read_policy
return rpc
def CreateConfig(**kwds):
"""Create a Configuration object for use in configuring datastore calls.
This configuration can be passed to most datastore calls using the
'config=...' argument.
Args:
deadline: Optional deadline; default None (which means the
system default deadline will be used, typically 5 seconds).
on_completion: Optional callback function; default None. If
specified, it will be called with a UserRPC object as argument
when an RPC completes.
read_policy: Optional read policy; set to EVENTUAL_CONSISTENCY to
enable eventually consistent reads (i.e. reads that may be
satisfied from an older version of the datastore in some cases).
The default read policy may have to wait until in-flight
transactions are committed.
**kwds: Other keyword arguments as long as they are supported by
datastore_rpc.Configuration().
Returns:
A datastore_rpc.Configuration instance.
"""
return datastore_rpc.Configuration(**kwds)
def CreateTransactionOptions(**kwds):
"""Create a configuration object for use in configuring transactions.
This configuration can be passed as run_in_transaction_option's first
argument.
Args:
deadline: Optional deadline; default None (which means the
system default deadline will be used, typically 5 seconds).
on_completion: Optional callback function; default None. If
specified, it will be called with a UserRPC object as argument
when an RPC completes.
xg: set to true to allow cross-group transactions (high replication
datastore only)
retries: set the number of retries for a transaction
**kwds: Other keyword arguments as long as they are supported by
datastore_rpc.TransactionOptions().
Returns:
A datastore_rpc.TransactionOptions instance.
"""
return datastore_rpc.TransactionOptions(**kwds)
def PutAsync(entities, **kwargs):
"""Asynchronously store one or more entities in the datastore.
Identical to datastore.Put() except returns an asynchronous object. Call
get_result() on the return value to block on the call and get the results.
"""
extra_hook = kwargs.pop('extra_hook', None)
config = _GetConfigFromKwargs(kwargs)
if getattr(config, 'read_policy', None) == EVENTUAL_CONSISTENCY:
raise datastore_errors.BadRequestError(
'read_policy is only supported on read operations.')
entities, multiple = NormalizeAndTypeCheck(entities, Entity)
for entity in entities:
if entity.is_projection():
raise datastore_errors.BadRequestError(
'Cannot put a partial entity: %s' % entity)
if not entity.kind() or not entity.app():
raise datastore_errors.BadRequestError(
'App and kind must not be empty, in entity: %s' % entity)
def local_extra_hook(keys):
num_keys = len(keys)
num_entities = len(entities)
if num_keys != num_entities:
raise datastore_errors.InternalError(
'Put accepted %d entities but returned %d keys.' %
(num_entities, num_keys))
for entity, key in zip(entities, keys):
if entity._Entity__key._Key__reference != key._Key__reference:
assert not entity._Entity__key.has_id_or_name()
entity._Entity__key._Key__reference.CopyFrom(key._Key__reference)
if multiple:
result = keys
else:
result = keys[0]
if extra_hook:
return extra_hook(result)
return result
return _GetConnection().async_put(config, entities, local_extra_hook)
def Put(entities, **kwargs):
"""Store one or more entities in the datastore.
The entities may be new or previously existing. For new entities, Put() will
fill in the app id and key assigned by the datastore.
If the argument is a single Entity, a single Key will be returned. If the
argument is a list of Entity, a list of Keys will be returned.
Args:
entities: Entity or list of Entities
config: Optional Configuration to use for this request, must be specified
as a keyword argument.
Returns:
Key or list of Keys
Raises:
TransactionFailedError, if the Put could not be committed.
"""
return PutAsync(entities, **kwargs).get_result()
def GetAsync(keys, **kwargs):
"""Asynchronously retrieves one or more entities from the datastore.
Identical to datastore.Get() except returns an asynchronous object. Call
get_result() on the return value to block on the call and get the results.
"""
extra_hook = kwargs.pop('extra_hook', None)
config = _GetConfigFromKwargs(kwargs)
keys, multiple = NormalizeAndTypeCheckKeys(keys)
def local_extra_hook(entities):
if multiple:
result = entities
else:
if entities[0] is None:
raise datastore_errors.EntityNotFoundError()
result = entities[0]
if extra_hook:
return extra_hook(result)
return result
return _GetConnection().async_get(config, keys, local_extra_hook)
def Get(keys, **kwargs):
"""Retrieves one or more entities from the datastore.
Retrieves the entity or entities with the given key(s) from the datastore
and returns them as fully populated Entity objects, as defined below. If
there is an error, raises a subclass of datastore_errors.Error.
If keys is a single key or string, an Entity will be returned, or
EntityNotFoundError will be raised if no existing entity matches the key.
However, if keys is a list or tuple, a list of entities will be returned
that corresponds to the sequence of keys. It will include entities for keys
that were found and None placeholders for keys that were not found.
Args:
keys: Key or string or list of Keys or strings
config: Optional Configuration to use for this request, must be specified
as a keyword argument.
Returns:
Entity or list of Entity objects
"""
return GetAsync(keys, **kwargs).get_result()
def GetIndexesAsync(**kwargs):
"""Asynchronously retrieves the application indexes and their states.
Identical to GetIndexes() except returns an asynchronous object. Call
get_result() on the return value to block on the call and get the results.
"""
extra_hook = kwargs.pop('extra_hook', None)
config = _GetConfigFromKwargs(kwargs)
def local_extra_hook(result):
if extra_hook:
return extra_hook(result)
return result
return _GetConnection().async_get_indexes(config, local_extra_hook)
def GetIndexes(**kwargs):
"""Retrieves the application indexes and their states.
Args:
config: Optional Configuration to use for this request, must be specified
as a keyword argument.
Returns:
A list of (Index, Index.[BUILDING|SERVING|DELETING|ERROR]) tuples.
An index can be in the following states:
Index.BUILDING: Index is being built and therefore can not serve queries
Index.SERVING: Index is ready to service queries
Index.DELETING: Index is being deleted
Index.ERROR: Index encounted an error in the BUILDING state
"""
return GetIndexesAsync(**kwargs).get_result()
def DeleteAsync(keys, **kwargs):
"""Asynchronously deletes one or more entities from the datastore.
Identical to datastore.Delete() except returns an asynchronous object. Call
get_result() on the return value to block on the call.
"""
config = _GetConfigFromKwargs(kwargs)
if getattr(config, 'read_policy', None) == EVENTUAL_CONSISTENCY:
raise datastore_errors.BadRequestError(
'read_policy is only supported on read operations.')
keys, _ = NormalizeAndTypeCheckKeys(keys)
return _GetConnection().async_delete(config, keys)
def Delete(keys, **kwargs):
"""Deletes one or more entities from the datastore. Use with care!
Deletes the given entity(ies) from the datastore. You can only delete
entities from your app. If there is an error, raises a subclass of
datastore_errors.Error.
Args:
# the primary key(s) of the entity(ies) to delete
keys: Key or string or list of Keys or strings
config: Optional Configuration to use for this request, must be specified
as a keyword argument.
Raises:
TransactionFailedError, if the Delete could not be committed.
"""
return DeleteAsync(keys, **kwargs).get_result()
class Entity(dict):
"""A datastore entity.
Includes read-only accessors for app id, kind, and primary key. Also
provides dictionary-style access to properties.
"""
__projection = False
def __init__(self, kind, parent=None, _app=None, name=None, id=None,
unindexed_properties=[], namespace=None, **kwds):
"""Constructor. Takes the kind and transaction root, which cannot be
changed after the entity is constructed, and an optional parent. Raises
BadArgumentError or BadKeyError if kind is invalid or parent is not an
existing Entity or Key in the datastore.
Args:
# this entity's kind
kind: string
# if provided, this entity's parent. Its key must be complete.
parent: Entity or Key
# if provided, this entity's name.
name: string
# if provided, this entity's id.
id: integer
# if provided, a sequence of property names that should not be indexed
# by the built-in single property indices.
unindexed_properties: list or tuple of strings
namespace: string
# if provided, overrides the default namespace_manager setting.
"""
ref = entity_pb.Reference()
_app = datastore_types.ResolveAppId(_app)
ref.set_app(_app)
_namespace = kwds.pop('_namespace', None)
if kwds:
raise datastore_errors.BadArgumentError(
'Excess keyword arguments ' + repr(kwds))
if namespace is None:
namespace = _namespace
elif _namespace is not None:
raise datastore_errors.BadArgumentError(
"Must not set both _namespace and namespace parameters.")
datastore_types.ValidateString(kind, 'kind',
datastore_errors.BadArgumentError)
if parent is not None:
parent = _GetCompleteKeyOrError(parent)
if _app != parent.app():
raise datastore_errors.BadArgumentError(
" %s doesn't match parent's app %s" %
(_app, parent.app()))
if namespace is None:
namespace = parent.namespace()
elif namespace != parent.namespace():
raise datastore_errors.BadArgumentError(
" %s doesn't match parent's namespace %s" %
(namespace, parent.namespace()))
ref.CopyFrom(parent._Key__reference)
namespace = datastore_types.ResolveNamespace(namespace)
datastore_types.SetNamespace(ref, namespace)
last_path = ref.mutable_path().add_element()
last_path.set_type(kind.encode('utf-8'))
if name is not None and id is not None:
raise datastore_errors.BadArgumentError(
"Cannot set both name and id on an Entity")
if name is not None:
datastore_types.ValidateString(name, 'name')
last_path.set_name(name.encode('utf-8'))
if id is not None:
datastore_types.ValidateInteger(id, 'id')
last_path.set_id(id)
self.set_unindexed_properties(unindexed_properties)
self.__key = Key._FromPb(ref)
def app(self):
"""Returns the name of the application that created this entity, a
string or None if not set.
"""
return self.__key.app()
def namespace(self):
"""Returns the namespace of this entity, a string or None."""
return self.__key.namespace()
def kind(self):
"""Returns this entity's kind, a string."""
return self.__key.kind()
def is_saved(self):
"""Returns if this entity has been saved to the datastore."""
last_path = self.__key._Key__reference.path().element_list()[-1]
return ((last_path.has_name() ^ last_path.has_id()) and
self.__key.has_id_or_name())
def is_projection(self):
"""Returns if this entity is a projection from full entity.
Projected entities:
- may not contain all properties from the original entity;
- only contain single values for lists;
- may not contain values with the same type as the original entity.
"""
return self.__projection
def key(self):
"""Returns this entity's primary key, a Key instance."""
return self.__key
def parent(self):
"""Returns this entity's parent, as a Key. If this entity has no parent,
returns None.
"""
return self.key().parent()
def entity_group(self):
"""Returns this entity's entity group as a Key.
Note that the returned Key will be incomplete if this is a a root entity
and its key is incomplete.
"""
return self.key().entity_group()
def unindexed_properties(self):
"""Returns this entity's unindexed properties, as a frozenset of strings."""
return getattr(self, '_Entity__unindexed_properties', [])
def set_unindexed_properties(self, unindexed_properties):
unindexed_properties, multiple = NormalizeAndTypeCheck(unindexed_properties, basestring)
if not multiple:
raise datastore_errors.BadArgumentError(
'unindexed_properties must be a sequence; received %s (a %s).' %
(unindexed_properties, typename(unindexed_properties)))
for prop in unindexed_properties:
datastore_types.ValidateProperty(prop, None)
self.__unindexed_properties = frozenset(unindexed_properties)
def __setitem__(self, name, value):
"""Implements the [] operator. Used to set property value(s).
If the property name is the empty string or not a string, raises
BadPropertyError. If the value is not a supported type, raises
BadValueError.
"""
datastore_types.ValidateProperty(name, value)
dict.__setitem__(self, name, value)
def setdefault(self, name, value):
"""If the property exists, returns its value. Otherwise sets it to value.
If the property name is the empty string or not a string, raises
BadPropertyError. If the value is not a supported type, raises
BadValueError.
"""
datastore_types.ValidateProperty(name, value)
return dict.setdefault(self, name, value)
def update(self, other):
"""Updates this entity's properties from the values in other.
If any property name is the empty string or not a string, raises
BadPropertyError. If any value is not a supported type, raises
BadValueError.
"""
for name, value in other.items():
self.__setitem__(name, value)
def copy(self):
"""The copy method is not supported.
"""
raise NotImplementedError('Entity does not support the copy() method.')
def ToXml(self):
"""Returns an XML representation of this entity. Atom and gd:namespace
properties are converted to XML according to their respective schemas. For
more information, see:
http://www.atomenabled.org/developers/syndication/
http://code.google.com/apis/gdata/common-elements.html
This is *not* optimized. It shouldn't be used anywhere near code that's
performance-critical.
"""
xml = u'<entity kind=%s' % saxutils.quoteattr(self.kind())
if self.__key.has_id_or_name():
xml += ' key=%s' % saxutils.quoteattr(str(self.__key))
xml += '>'
if self.__key.has_id_or_name():
xml += '\n <key>%s</key>' % self.__key.ToTagUri()
properties = self.keys()
if properties:
properties.sort()
xml += '\n ' + '\n '.join(self._PropertiesToXml(properties))
xml += '\n</entity>\n'
return xml
def _PropertiesToXml(self, properties):
""" Returns a list of the XML representations of each of the given
properties. Ignores properties that don't exist in this entity.
Arg:
properties: string or list of strings
Returns:
list of strings
"""
xml_properties = []
for propname in properties:
if not self.has_key(propname):
continue
propname_xml = saxutils.quoteattr(propname)
values = self[propname]
if not isinstance(values, list):
values = [values]
proptype = datastore_types.PropertyTypeName(values[0])
proptype_xml = saxutils.quoteattr(proptype)
escaped_values = self._XmlEscapeValues(propname)
open_tag = u'<property name=%s type=%s>' % (propname_xml, proptype_xml)
close_tag = u'</property>'
xml_properties += [open_tag + val + close_tag for val in escaped_values]
return xml_properties
def _XmlEscapeValues(self, property):
""" Returns a list of the XML-escaped string values for the given property.
Raises an AssertionError if the property doesn't exist.
Arg:
property: string
Returns:
list of strings
"""
assert self.has_key(property)
xml = []
values = self[property]
if not isinstance(values, list):
values = [values]
for val in values:
if hasattr(val, 'ToXml'):
xml.append(val.ToXml())
else:
if val is None:
xml.append('')
else:
xml.append(saxutils.escape(unicode(val)))
return xml
def ToPb(self):
"""Converts this Entity to its protocol buffer representation.
Returns:
entity_pb.Entity
"""
return self._ToPb(False)
def _ToPb(self, mark_key_as_saved=True):
"""Converts this Entity to its protocol buffer representation. Not
intended to be used by application developers.
Returns:
entity_pb.Entity
"""
pb = entity_pb.EntityProto()
pb.mutable_key().CopyFrom(self.key()._ToPb())
last_path = pb.key().path().element_list()[-1]
if mark_key_as_saved and last_path.has_name() and last_path.has_id():
last_path.clear_id()
group = pb.mutable_entity_group()
if self.__key.has_id_or_name():
root = pb.key().path().element(0)
group.add_element().CopyFrom(root)
properties = self.items()
properties.sort()
for (name, values) in properties:
properties = datastore_types.ToPropertyPb(name, values)
if not isinstance(properties, list):
properties = [properties]
for prop in properties:
if ((prop.has_meaning() and
prop.meaning() in datastore_types._RAW_PROPERTY_MEANINGS) or
name in self.unindexed_properties()):
pb.raw_property_list().append(prop)
else:
pb.property_list().append(prop)
if pb.property_size() > _MAX_INDEXED_PROPERTIES:
raise datastore_errors.BadRequestError(
'Too many indexed properties for entity %r.' % self.key())
return pb
@staticmethod
def FromPb(pb, validate_reserved_properties=True,
default_kind='<not specified>'):
"""Static factory method. Returns the Entity representation of the
given protocol buffer (datastore_pb.Entity).
Args:
pb: datastore_pb.Entity or str encoding of a datastore_pb.Entity
validate_reserved_properties: deprecated
default_kind: str, the kind to use if the pb has no key.
Returns:
Entity: the Entity representation of pb
"""
if isinstance(pb, str):
real_pb = entity_pb.EntityProto()
real_pb.ParsePartialFromString(pb)
pb = real_pb
return Entity._FromPb(
pb, require_valid_key=False, default_kind=default_kind)
@staticmethod
def _FromPb(pb, require_valid_key=True, default_kind='<not specified>'):
"""Static factory method. Returns the Entity representation of the
given protocol buffer (datastore_pb.Entity). Not intended to be used by
application developers.
The Entity PB's key must be complete. If it isn't, an AssertionError is
raised.
Args:
# a protocol buffer Entity
pb: datastore_pb.Entity
default_kind: str, the kind to use if the pb has no key.
Returns:
# the Entity representation of the argument
Entity
"""
if not pb.key().path().element_size():
pb.mutable_key().CopyFrom(Key.from_path(default_kind, 0)._ToPb())
last_path = pb.key().path().element_list()[-1]
if require_valid_key:
assert last_path.has_id() ^ last_path.has_name()
if last_path.has_id():
assert last_path.id() != 0
else:
assert last_path.has_name()
assert last_path.name()
unindexed_properties = [unicode(p.name(), 'utf-8')
for p in pb.raw_property_list()]
if pb.key().has_name_space():
namespace = pb.key().name_space()
else:
namespace = ''
e = Entity(unicode(last_path.type(), 'utf-8'),
unindexed_properties=unindexed_properties,
_app=pb.key().app(), namespace=namespace)
ref = e.__key._Key__reference
ref.CopyFrom(pb.key())
temporary_values = {}
for prop_list in (pb.property_list(), pb.raw_property_list()):
for prop in prop_list:
if prop.meaning() == entity_pb.Property.INDEX_VALUE:
e.__projection = True
try:
value = datastore_types.FromPropertyPb(prop)
except (AssertionError, AttributeError, TypeError, ValueError), e:
raise datastore_errors.Error(
'Property %s is corrupt in the datastore:\n%s' %
(prop.name(), traceback.format_exc()))
multiple = prop.multiple()
if multiple:
value = [value]
name = prop.name()
cur_value = temporary_values.get(name)
if cur_value is None:
temporary_values[name] = value
elif not multiple or not isinstance(cur_value, list):
raise datastore_errors.Error(
'Property %s is corrupt in the datastore; it has multiple '
'values, but is not marked as multiply valued.' % name)
else:
cur_value.extend(value)
for name, value in temporary_values.iteritems():
decoded_name = unicode(name, 'utf-8')
datastore_types.ValidateReadProperty(decoded_name, value)
dict.__setitem__(e, decoded_name, value)
return e
class Query(dict):
"""A datastore query.
(Instead of this, consider using appengine.ext.gql.Query! It provides a
query language interface on top of the same functionality.)
Queries are used to retrieve entities that match certain criteria, including
app id, kind, and property filters. Results may also be sorted by properties.
App id and kind are required. Only entities from the given app, of the given
type, are returned. If an ancestor is set, with Ancestor(), only entities
with that ancestor are returned.
Property filters are used to provide criteria based on individual property
values. A filter compares a specific property in each entity to a given
value or list of possible values.
An entity is returned if its property values match *all* of the query's
filters. In other words, filters are combined with AND, not OR. If an
entity does not have a value for a property used in a filter, it is not
returned.
Property filters map filter strings of the form '<property name> <operator>'
to filter values. Use dictionary accessors to set property filters, like so:
> query = Query('Person')
> query['name ='] = 'Ryan'
> query['age >='] = 21
This query returns all Person entities where the name property is 'Ryan',
'Ken', or 'Bret', and the age property is at least 21.
Another way to build this query is:
> query = Query('Person')
> query.update({'name =': 'Ryan', 'age >=': 21})
The supported operators are =, >, <, >=, and <=. Only one inequality
filter may be used per query. Any number of equals filters may be used in
a single Query.
A filter value may be a list or tuple of values. This is interpreted as
multiple filters with the same filter string and different values, all ANDed
together. For example, this query returns everyone with the tags "google"
and "app engine":
> Query('Person', {'tag =': ('google', 'app engine')})
Result entities can be returned in different orders. Use the Order()
method to specify properties that results will be sorted by, and in which
direction.
Note that filters and orderings may be provided at any time before the query
is run. When the query is fully specified, Run() runs the query and returns
an iterator. The query results can be accessed through the iterator.
A query object may be reused after it's been run. Its filters and
orderings can be changed to create a modified query.
If you know how many result entities you need, use Get() to fetch them:
> query = Query('Person', {'age >': 21})
> for person in query.Get(4):
> print 'I have four pints left. Have one on me, %s!' % person['name']
If you don't know how many results you need, or if you need them all, you
can get an iterator over the results by calling Run():
> for person in Query('Person', {'age >': 21}).Run():
> print 'Have a pint on me, %s!' % person['name']
Get() is more efficient than Run(), so use Get() whenever possible.
Finally, the Count() method returns the number of result entities matched by
the query. The returned count is cached; successive Count() calls will not
re-scan the datastore unless the query is changed.
"""
ASCENDING = datastore_query.PropertyOrder.ASCENDING
DESCENDING = datastore_query.PropertyOrder.DESCENDING
ORDER_FIRST = datastore_query.QueryOptions.ORDER_FIRST
ANCESTOR_FIRST = datastore_query.QueryOptions.ANCESTOR_FIRST
FILTER_FIRST = datastore_query.QueryOptions.FILTER_FIRST
OPERATORS = {'==': datastore_query.PropertyFilter._OPERATORS['=']}
OPERATORS.update(datastore_query.PropertyFilter._OPERATORS)
INEQUALITY_OPERATORS = datastore_query.PropertyFilter._INEQUALITY_OPERATORS
UPPERBOUND_INEQUALITY_OPERATORS = frozenset(['<', '<='])
FILTER_REGEX = re.compile(
'^\s*([^\s]+)(\s+(%s)\s*)?$' % '|'.join(OPERATORS),
re.IGNORECASE | re.UNICODE)
__kind = None
__app = None
__namespace = None
__orderings = None
__ancestor_pb = None
__distinct = False
__group_by = None
__index_list_source = None
__cursor_source = None
__compiled_query_source = None
__filter_order = None
__filter_counter = 0
__inequality_prop = None
__inequality_count = 0
def __init__(self, kind=None, filters={}, _app=None, keys_only=False,
compile=True, cursor=None, namespace=None, end_cursor=None,
projection=None, distinct=None, _namespace=None):
"""Constructor.
Raises BadArgumentError if kind is not a string. Raises BadValueError or
BadFilterError if filters is not a dictionary of valid filters.
Args:
namespace: string, the namespace to query.
kind: string, the kind of entities to query, or None.
filters: dict, initial set of filters.
keys_only: boolean, if keys should be returned instead of entities.
projection: iterable of property names to project.
distinct: boolean, if projection should be distinct.
compile: boolean, if the query should generate cursors.
cursor: datastore_query.Cursor, the start cursor to use.
end_cursor: datastore_query.Cursor, the end cursor to use.
_namespace: deprecated, use namespace instead.
"""
if namespace is None:
namespace = _namespace
elif _namespace is not None:
raise datastore_errors.BadArgumentError(
"Must not set both _namespace and namespace parameters.")
if kind is not None:
datastore_types.ValidateString(kind, 'kind',
datastore_errors.BadArgumentError)
self.__kind = kind
self.__orderings = []
self.__filter_order = {}
self.update(filters)
self.__app = datastore_types.ResolveAppId(_app)
self.__namespace = datastore_types.ResolveNamespace(namespace)
self.__query_options = datastore_query.QueryOptions(
keys_only=keys_only,
produce_cursors=compile,
start_cursor=cursor,
end_cursor=end_cursor,
projection=projection)
if distinct:
if not self.__query_options.projection:
raise datastore_errors.BadQueryError(
'cannot specify distinct without a projection')
self.__distinct = True
self.__group_by = self.__query_options.projection
def Order(self, *orderings):
"""Specify how the query results should be sorted.
Result entities will be sorted by the first property argument, then by the
second, and so on. For example, this:
> query = Query('Person')
> query.Order('bday', ('age', Query.DESCENDING))
sorts everyone in order of their birthday, starting with January 1.
People with the same birthday are sorted by age, oldest to youngest.
The direction for each sort property may be provided; if omitted, it
defaults to ascending.
Order() may be called multiple times. Each call resets the sort order
from scratch.
If an inequality filter exists in this Query it must be the first property
passed to Order. Any number of sort orders may be used after the
inequality filter property. Without inequality filters, any number of
filters with different orders may be specified.
Entities with multiple values for an order property are sorted by their
lowest value.
Note that a sort order implies an existence filter! In other words,
Entities without the sort order property are filtered out, and *not*
included in the query results.
If the sort order property has different types in different entities - ie,
if bob['id'] is an int and fred['id'] is a string - the entities will be
grouped first by the property type, then sorted within type. No attempt is
made to compare property values across types.
Raises BadArgumentError if any argument is of the wrong format.
Args:
# the properties to sort by, in sort order. each argument may be either a
# string or (string, direction) 2-tuple.
Returns:
# this query
Query
"""
orderings = list(orderings)
for (order, i) in zip(orderings, range(len(orderings))):
if not (isinstance(order, basestring) or
(isinstance(order, tuple) and len(order) in [2, 3])):
raise datastore_errors.BadArgumentError(
'Order() expects strings or 2- or 3-tuples; received %s (a %s). ' %
(order, typename(order)))
if isinstance(order, basestring):
order = (order,)
datastore_types.ValidateString(order[0], 'sort order property',
datastore_errors.BadArgumentError)
property = order[0]
direction = order[-1]
if direction not in (Query.ASCENDING, Query.DESCENDING):
if len(order) == 3:
raise datastore_errors.BadArgumentError(
'Order() expects Query.ASCENDING or DESCENDING; received %s' %
str(direction))
direction = Query.ASCENDING
if (self.__kind is None and
(property != datastore_types.KEY_SPECIAL_PROPERTY or
direction != Query.ASCENDING)):
raise datastore_errors.BadArgumentError(
'Only %s ascending orders are supported on kindless queries' %
datastore_types.KEY_SPECIAL_PROPERTY)
orderings[i] = (property, direction)
if (orderings and self.__inequality_prop and
orderings[0][0] != self.__inequality_prop):
raise datastore_errors.BadArgumentError(
'First ordering property must be the same as inequality filter '
'property, if specified for this query; received %s, expected %s' %
(orderings[0][0], self.__inequality_prop))
self.__orderings = orderings
return self
def Hint(self, hint):
"""Sets a hint for how this query should run.
The query hint gives us information about how best to execute your query.
Currently, we can only do one index scan, so the query hint should be used
to indicates which index we should scan against.
Use FILTER_FIRST if your first filter will only match a few results. In
this case, it will be most efficient to scan against the index for this
property, load the results into memory, and apply the remaining filters
and sort orders there.
Similarly, use ANCESTOR_FIRST if the query's ancestor only has a few
descendants. In this case, it will be most efficient to scan all entities
below the ancestor and load them into memory first.
Use ORDER_FIRST if the query has a sort order and the result set is large
or you only plan to fetch the first few results. In that case, we
shouldn't try to load all of the results into memory; instead, we should
scan the index for this property, which is in sorted order.
Note that hints are currently ignored in the v3 datastore!
Arg:
one of datastore.Query.[ORDER_FIRST, ANCESTOR_FIRST, FILTER_FIRST]
Returns:
# this query
Query
"""
if hint is not self.__query_options.hint:
self.__query_options = datastore_query.QueryOptions(
hint=hint, config=self.__query_options)
return self
def Ancestor(self, ancestor):
"""Sets an ancestor for this query.
This restricts the query to only return result entities that are descended
from a given entity. In other words, all of the results will have the
ancestor as their parent, or parent's parent, or etc.
Raises BadArgumentError or BadKeyError if parent is not an existing Entity
or Key in the datastore.
Args:
# the key must be complete
ancestor: Entity or Key
Returns:
# this query
Query
"""
self.__ancestor_pb = _GetCompleteKeyOrError(ancestor)._ToPb()
return self
def IsKeysOnly(self):
"""Returns True if this query is keys only, false otherwise."""
return self.__query_options.keys_only
def GetQueryOptions(self):
"""Returns a datastore_query.QueryOptions for the current instance."""
return self.__query_options
def GetQuery(self):
"""Returns a datastore_query.Query for the current instance."""
return datastore_query.Query(app=self.__app,
namespace=self.__namespace,
kind=self.__kind,
ancestor=self.__ancestor_pb,
filter_predicate=self.GetFilterPredicate(),
order=self.GetOrder(),
group_by=self.__group_by)
def GetOrder(self):
"""Gets a datastore_query.Order for the current instance.
Returns:
datastore_query.Order or None if there are no sort orders set on the
current Query.
"""
orders = [datastore_query.PropertyOrder(property, direction)
for property, direction in self.__orderings]
if orders:
return datastore_query.CompositeOrder(orders)
return None
def GetFilterPredicate(self):
"""Returns a datastore_query.FilterPredicate for the current instance.
Returns:
datastore_query.FilterPredicate or None if no filters are set on the
current Query.
"""
ordered_filters = [(i, f) for f, i in self.__filter_order.iteritems()]
ordered_filters.sort()
property_filters = []
for _, filter_str in ordered_filters:
if filter_str not in self:
continue
values = self[filter_str]
match = self._CheckFilter(filter_str, values)
name = match.group(1)
op = match.group(3)
if op is None or op == '==':
op = '='
property_filters.append(datastore_query.make_filter(name, op, values))
if property_filters:
return datastore_query.CompositeFilter(
datastore_query.CompositeFilter.AND,
property_filters)
return None
def GetDistinct(self):
"""Returns True if the current instance is distinct.
Returns:
A boolean indicating if the distinct flag is set.
"""
return self.__distinct
def GetIndexList(self):
"""Get the index list from the last run of this query.
Returns:
A list of indexes used by the last run of this query.
Raises:
AssertionError: The query has not yet been run.
"""
index_list_function = self.__index_list_source
if index_list_function:
return index_list_function()
raise AssertionError('No index list available because this query has not '
'been executed')
def GetCursor(self):
"""Get the cursor from the last run of this query.
The source of this cursor varies depending on what the last call was:
- Run: A cursor that points immediately after the last result pulled off
the returned iterator.
- Get: A cursor that points immediately after the last result in the
returned list.
- Count: A cursor that points immediately after the last result counted.
Returns:
A datastore_query.Cursor object that can be used in subsequent query
requests.
Raises:
AssertionError: The query has not yet been run or cannot be compiled.
"""
cursor_function = self.__cursor_source
if cursor_function:
cursor = cursor_function()
if cursor:
return cursor
raise AssertionError('No cursor available, either this query has not '
'been executed or there is no compilation '
'available for this kind of query')
def GetBatcher(self, config=None):
"""Runs this query and returns a datastore_query.Batcher.
This is not intended to be used by application developers. Use Get()
instead!
Args:
config: Optional Configuration to use for this request.
Returns:
# an iterator that provides access to the query results
Iterator
"""
query_options = self.GetQueryOptions().merge(config)
if self.__distinct and query_options.projection != self.__group_by:
raise datastore_errors.BadArgumentError(
'cannot override projection when distinct is set')
return self.GetQuery().run(_GetConnection(), query_options)
def Run(self, **kwargs):
"""Runs this query.
If a filter string is invalid, raises BadFilterError. If a filter value is
invalid, raises BadValueError. If an IN filter is provided, and a sort
order on another property is provided, raises BadQueryError.
If you know in advance how many results you want, use limit=#. It's
more efficient.
Args:
kwargs: Any keyword arguments accepted by datastore_query.QueryOptions().
Returns:
# an iterator that provides access to the query results
Iterator
"""
config = _GetConfigFromKwargs(kwargs, convert_rpc=True,
config_class=datastore_query.QueryOptions)
itr = Iterator(self.GetBatcher(config=config))
self.__index_list_source = itr.GetIndexList
self.__cursor_source = itr.cursor
self.__compiled_query_source = itr._compiled_query
return itr
def Get(self, limit, offset=0, **kwargs):
"""Deprecated, use list(Run(...)) instead.
Args:
limit: int or long representing the maximum number of entities to return.
offset: int or long representing the number of entities to skip
kwargs: Any keyword arguments accepted by datastore_query.QueryOptions().
Returns:
# a list of entities
[Entity, ...]
"""
if limit is None:
kwargs.setdefault('batch_size', _MAX_INT_32)
return list(self.Run(limit=limit, offset=offset, **kwargs))
def Count(self, limit=1000, **kwargs):
"""Returns the number of entities that this query matches.
Args:
limit, a number or None. If there are more results than this, stop short
and just return this number. Providing this argument makes the count
operation more efficient.
config: Optional Configuration to use for this request.
Returns:
The number of results.
"""
original_offset = kwargs.pop('offset', 0)
if limit is None:
offset = _MAX_INT_32
else:
offset = min(limit + original_offset, _MAX_INT_32)
kwargs['limit'] = 0
kwargs['offset'] = offset
config = _GetConfigFromKwargs(kwargs, convert_rpc=True,
config_class=datastore_query.QueryOptions)
batch = self.GetBatcher(config=config).next()
self.__index_list_source = (
lambda: [index for index, state in batch.index_list])
self.__cursor_source = lambda: batch.cursor(0)
self.__compiled_query_source = lambda: batch._compiled_query
return max(0, batch.skipped_results - original_offset)
def __iter__(self):
raise NotImplementedError(
'Query objects should not be used as iterators. Call Run() first.')
def __getstate__(self):
state = self.__dict__.copy()
state['_Query__index_list_source'] = None
state['_Query__cursor_source'] = None
state['_Query__compiled_query_source'] = None
return state
def __setstate__(self, state):
if '_Query__query_options' not in state:
state['_Query__query_options'] = datastore_query.QueryOptions(
keys_only=state.pop('_Query__keys_only'),
produce_cursors=state.pop('_Query__compile'),
start_cursor=state.pop('_Query__cursor'),
end_cursor=state.pop('_Query__end_cursor'))
self.__dict__ = state
def __setitem__(self, filter, value):
"""Implements the [] operator. Used to set filters.
If the filter string is empty or not a string, raises BadFilterError. If
the value is not a supported type, raises BadValueError.
"""
if isinstance(value, tuple):
value = list(value)
datastore_types.ValidateProperty(' ', value)
match = self._CheckFilter(filter, value)
property = match.group(1)
operator = match.group(3)
dict.__setitem__(self, filter, value)
if (operator in self.INEQUALITY_OPERATORS and
property != datastore_types._UNAPPLIED_LOG_TIMESTAMP_SPECIAL_PROPERTY):
if self.__inequality_prop is None:
self.__inequality_prop = property
else:
assert self.__inequality_prop == property
self.__inequality_count += 1
if filter not in self.__filter_order:
self.__filter_order[filter] = self.__filter_counter
self.__filter_counter += 1
def setdefault(self, filter, value):
"""If the filter exists, returns its value. Otherwise sets it to value.
If the property name is the empty string or not a string, raises
BadPropertyError. If the value is not a supported type, raises
BadValueError.
"""
datastore_types.ValidateProperty(' ', value)
self._CheckFilter(filter, value)
return dict.setdefault(self, filter, value)
def __delitem__(self, filter):
"""Implements the del [] operator. Used to remove filters.
"""
dict.__delitem__(self, filter)
del self.__filter_order[filter]
match = Query.FILTER_REGEX.match(filter)
property = match.group(1)
operator = match.group(3)
if operator in self.INEQUALITY_OPERATORS:
assert self.__inequality_count >= 1
assert property == self.__inequality_prop
self.__inequality_count -= 1
if self.__inequality_count == 0:
self.__inequality_prop = None
def update(self, other):
"""Updates this query's filters from the ones in other.
If any filter string is invalid, raises BadFilterError. If any value is
not a supported type, raises BadValueError.
"""
for filter, value in other.items():
self.__setitem__(filter, value)
def copy(self):
"""The copy method is not supported.
"""
raise NotImplementedError('Query does not support the copy() method.')
def _CheckFilter(self, filter, values):
"""Type check a filter string and list of values.
Raises BadFilterError if the filter string is empty, not a string, or
invalid. Raises BadValueError if the value type is not supported.
Args:
filter: String containing the filter text.
values: List of associated filter values.
Returns:
re.MatchObject (never None) that matches the 'filter'. Group 1 is the
property name, group 3 is the operator. (Group 2 is unused.)
"""
try:
match = Query.FILTER_REGEX.match(filter)
if not match:
raise datastore_errors.BadFilterError(
'Could not parse filter string: %s' % str(filter))
except TypeError:
raise datastore_errors.BadFilterError(
'Could not parse filter string: %s' % str(filter))
property = match.group(1)
operator = match.group(3)
if operator is None:
operator = '='
if isinstance(values, tuple):
values = list(values)
elif not isinstance(values, list):
values = [values]
if isinstance(values[0], datastore_types._RAW_PROPERTY_TYPES):
raise datastore_errors.BadValueError(
'Filtering on %s properties is not supported.' % typename(values[0]))
if (operator in self.INEQUALITY_OPERATORS and
property != datastore_types._UNAPPLIED_LOG_TIMESTAMP_SPECIAL_PROPERTY):
if self.__inequality_prop and property != self.__inequality_prop:
raise datastore_errors.BadFilterError(
'Only one property per query may have inequality filters (%s).' %
', '.join(self.INEQUALITY_OPERATORS))
elif len(self.__orderings) >= 1 and self.__orderings[0][0] != property:
raise datastore_errors.BadFilterError(
'Inequality operators (%s) must be on the same property as the '
'first sort order, if any sort orders are supplied' %
', '.join(self.INEQUALITY_OPERATORS))
if (self.__kind is None and
property != datastore_types.KEY_SPECIAL_PROPERTY and
property != datastore_types._UNAPPLIED_LOG_TIMESTAMP_SPECIAL_PROPERTY):
raise datastore_errors.BadFilterError(
'Only %s filters are allowed on kindless queries.' %
datastore_types.KEY_SPECIAL_PROPERTY)
if property == datastore_types._UNAPPLIED_LOG_TIMESTAMP_SPECIAL_PROPERTY:
if self.__kind:
raise datastore_errors.BadFilterError(
'Only kindless queries can have %s filters.' %
datastore_types._UNAPPLIED_LOG_TIMESTAMP_SPECIAL_PROPERTY)
if not operator in self.UPPERBOUND_INEQUALITY_OPERATORS:
raise datastore_errors.BadFilterError(
'Only %s operators are supported with %s filters.' % (
self.UPPERBOUND_INEQUALITY_OPERATORS,
datastore_types._UNAPPLIED_LOG_TIMESTAMP_SPECIAL_PROPERTY))
if property in datastore_types._SPECIAL_PROPERTIES:
if property == datastore_types.KEY_SPECIAL_PROPERTY:
for value in values:
if not isinstance(value, Key):
raise datastore_errors.BadFilterError(
'%s filter value must be a Key; received %s (a %s)' %
(datastore_types.KEY_SPECIAL_PROPERTY, value, typename(value)))
return match
def _Run(self, limit=None, offset=None,
prefetch_count=None, next_count=None, **kwargs):
"""Deprecated, use Run() instead."""
return self.Run(limit=limit, offset=offset,
prefetch_size=prefetch_count, batch_size=next_count,
**kwargs)
def _ToPb(self, limit=None, offset=None, count=None):
query_options = datastore_query.QueryOptions(
config=self.GetQueryOptions(),
limit=limit,
offset=offset,
batch_size=count)
return self.GetQuery()._to_pb(_GetConnection(), query_options)
def _GetCompiledQuery(self):
"""Returns the internal-only pb representation of the last query run.
Do not use.
Raises:
AssertionError: Query not compiled or not yet executed.
"""
compiled_query_function = self.__compiled_query_source
if compiled_query_function:
compiled_query = compiled_query_function()
if compiled_query:
return compiled_query
raise AssertionError('No compiled query available, either this query has '
'not been executed or there is no compilation '
'available for this kind of query')
GetCompiledQuery = _GetCompiledQuery
GetCompiledCursor = GetCursor
def AllocateIdsAsync(model_key, size=None, **kwargs):
"""Asynchronously allocates a range of IDs.
Identical to datastore.AllocateIds() except returns an asynchronous object.
Call get_result() on the return value to block on the call and get the
results.
"""
max = kwargs.pop('max', None)
config = _GetConfigFromKwargs(kwargs)
if getattr(config, 'read_policy', None) == EVENTUAL_CONSISTENCY:
raise datastore_errors.BadRequestError(
'read_policy is only supported on read operations.')
keys, _ = NormalizeAndTypeCheckKeys(model_key)
if len(keys) > 1:
raise datastore_errors.BadArgumentError(
'Cannot allocate IDs for more than one model key at a time')
rpc = _GetConnection().async_allocate_ids(config, keys[0], size, max)
return rpc
def AllocateIds(model_key, size=None, **kwargs):
"""Allocates a range of IDs of size or with max for the given key.
Allocates a range of IDs in the datastore such that those IDs will not
be automatically assigned to new entities. You can only allocate IDs
for model keys from your app. If there is an error, raises a subclass of
datastore_errors.Error.
Either size or max must be provided but not both. If size is provided then a
range of the given size is returned. If max is provided then the largest
range of ids that are safe to use with an upper bound of max is returned (can
be an empty range).
Max should only be provided if you have an existing numeric id range that you
want to reserve, e.g. bulk loading entities that already have IDs. If you
don't care about which IDs you receive, use size instead.
Args:
model_key: Key or string to serve as a model specifying the ID sequence
in which to allocate IDs
size: integer, number of IDs to allocate.
max: integer, upper bound of the range of IDs to allocate.
config: Optional Configuration to use for this request.
Returns:
(start, end) of the allocated range, inclusive.
"""
return AllocateIdsAsync(model_key, size, **kwargs).get_result()
class MultiQuery(Query):
"""Class representing a query which requires multiple datastore queries.
This class is actually a subclass of datastore.Query as it is intended to act
like a normal Query object (supporting the same interface).
Does not support keys only queries, since it needs whole entities in order
to merge sort them. (That's not true if there are no sort orders, or if the
sort order is on __key__, but allowing keys only queries in those cases, but
not in others, would be confusing.)
"""
def __init__(self, bound_queries, orderings):
if len(bound_queries) > MAX_ALLOWABLE_QUERIES:
raise datastore_errors.BadArgumentError(
'Cannot satisfy query -- too many subqueries (max: %d, got %d).'
' Probable cause: too many IN/!= filters in query.' %
(MAX_ALLOWABLE_QUERIES, len(bound_queries)))
projection = (bound_queries and
bound_queries[0].GetQueryOptions().projection)
for query in bound_queries:
if projection != query.GetQueryOptions().projection:
raise datastore_errors.BadQueryError(
'All queries must have the same projection.')
if query.IsKeysOnly():
raise datastore_errors.BadQueryError(
'MultiQuery does not support keys_only.')
self.__projection = projection
self.__bound_queries = bound_queries
self.__orderings = orderings
self.__compile = False
def __str__(self):
res = 'MultiQuery: '
for query in self.__bound_queries:
res = '%s %s' % (res, str(query))
return res
def Get(self, limit, offset=0, **kwargs):
"""Deprecated, use list(Run(...)) instead.
Args:
limit: int or long representing the maximum number of entities to return.
offset: int or long representing the number of entities to skip
kwargs: Any keyword arguments accepted by datastore_query.QueryOptions().
Returns:
A list of entities with at most "limit" entries (less if the query
completes before reading limit values).
"""
if limit is None:
kwargs.setdefault('batch_size', _MAX_INT_32)
return list(self.Run(limit=limit, offset=offset, **kwargs))
class SortOrderEntity(object):
"""Allow entity comparisons using provided orderings.
The iterator passed to the constructor is eventually consumed via
calls to GetNext(), which generate new SortOrderEntity s with the
same orderings.
"""
def __init__(self, entity_iterator, orderings):
"""Ctor.
Args:
entity_iterator: an iterator of entities which will be wrapped.
orderings: an iterable of (identifier, order) pairs. order
should be either Query.ASCENDING or Query.DESCENDING.
"""
self.__entity_iterator = entity_iterator
self.__entity = None
self.__min_max_value_cache = {}
try:
self.__entity = entity_iterator.next()
except StopIteration:
pass
else:
self.__orderings = orderings
def __str__(self):
return str(self.__entity)
def GetEntity(self):
"""Gets the wrapped entity."""
return self.__entity
def GetNext(self):
"""Wrap and return the next entity.
The entity is retrieved from the iterator given at construction time.
"""
return MultiQuery.SortOrderEntity(self.__entity_iterator,
self.__orderings)
def CmpProperties(self, that):
"""Compare two entities and return their relative order.
Compares self to that based on the current sort orderings and the
key orders between them. Returns negative, 0, or positive depending on
whether self is less, equal to, or greater than that. This
comparison returns as if all values were to be placed in ascending order
(highest value last). Only uses the sort orderings to compare (ignores
keys).
Args:
that: SortOrderEntity
Returns:
Negative if self < that
Zero if self == that
Positive if self > that
"""
if not self.__entity:
return cmp(self.__entity, that.__entity)
for (identifier, order) in self.__orderings:
value1 = self.__GetValueForId(self, identifier, order)
value2 = self.__GetValueForId(that, identifier, order)
result = cmp(value1, value2)
if order == Query.DESCENDING:
result = -result
if result:
return result
return 0
def __GetValueForId(self, sort_order_entity, identifier, sort_order):
value = _GetPropertyValue(sort_order_entity.__entity, identifier)
if isinstance(value, list):
entity_key = sort_order_entity.__entity.key()
if (entity_key, identifier) in self.__min_max_value_cache:
value = self.__min_max_value_cache[(entity_key, identifier)]
elif sort_order == Query.DESCENDING:
value = min(value)
else:
value = max(value)
self.__min_max_value_cache[(entity_key, identifier)] = value
return value
def __cmp__(self, that):
"""Compare self to that w.r.t. values defined in the sort order.
Compare an entity with another, using sort-order first, then the key
order to break ties. This can be used in a heap to have faster min-value
lookup.
Args:
that: other entity to compare to
Returns:
negative: if self is less than that in sort order
zero: if self is equal to that in sort order
positive: if self is greater than that in sort order
"""
property_compare = self.CmpProperties(that)
if property_compare:
return property_compare
else:
return cmp(self.__entity.key(), that.__entity.key())
def _ExtractBounds(self, config):
"""This function extracts the range of results to consider.
Since MultiQuery dedupes in memory, we must apply the offset and limit in
memory. The results that should be considered are
results[lower_bound:upper_bound].
We also pass the offset=0 and limit=upper_bound to the base queries to
optimize performance.
Args:
config: The base datastore_query.QueryOptions.
Returns:
a tuple consisting of the lower_bound and upper_bound to impose in memory
and the config to use with each bound query. The upper_bound may be None.
"""
if config is None:
return 0, None, None
lower_bound = config.offset or 0
upper_bound = config.limit
if lower_bound:
if upper_bound is not None:
upper_bound = min(lower_bound + upper_bound, _MAX_INT_32)
config = datastore_query.QueryOptions(offset=0,
limit=upper_bound,
config=config)
return lower_bound, upper_bound, config
def __GetProjectionOverride(self, config):
"""Returns a tuple of (original projection, projeciton override).
If projection is None, there is no projection. If override is None,
projection is sufficent for this query.
"""
projection = datastore_query.QueryOptions.projection(config)
if projection is None:
projection = self.__projection
else:
projection = projection
if not projection:
return None, None
override = set()
for prop, _ in self.__orderings:
if prop not in projection:
override.add(prop)
if not override:
return projection, None
return projection, projection + tuple(override)
def Run(self, **kwargs):
"""Return an iterable output with all results in order.
Merge sort the results. First create a list of iterators, then walk
though them and yield results in order.
Args:
kwargs: Any keyword arguments accepted by datastore_query.QueryOptions().
Returns:
An iterator for the result set.
"""
config = _GetConfigFromKwargs(kwargs, convert_rpc=True,
config_class=datastore_query.QueryOptions)
if config and config.keys_only:
raise datastore_errors.BadRequestError(
'keys only queries are not supported by multi-query.')
lower_bound, upper_bound, config = self._ExtractBounds(config)
projection, override = self.__GetProjectionOverride(config)
if override:
config = datastore_query.QueryOptions(projection=override, config=config)
results = []
count = 1
log_level = logging.DEBUG - 1
for bound_query in self.__bound_queries:
logging.log(log_level, 'Running query #%i' % count)
results.append(bound_query.Run(config=config))
count += 1
def GetDedupeKey(sort_order_entity):
if projection:
return (sort_order_entity.GetEntity().key(),
frozenset(sort_order_entity.GetEntity().iteritems()))
else:
return sort_order_entity.GetEntity().key()
def IterateResults(results):
"""Iterator function to return all results in sorted order.
Iterate over the array of results, yielding the next element, in
sorted order. This function is destructive (results will be empty
when the operation is complete).
Args:
results: list of result iterators to merge and iterate through
Yields:
The next result in sorted order.
"""
result_heap = []
for result in results:
heap_value = MultiQuery.SortOrderEntity(result, self.__orderings)
if heap_value.GetEntity():
heapq.heappush(result_heap, heap_value)
used_keys = set()
while result_heap:
if upper_bound is not None and len(used_keys) >= upper_bound:
break
top_result = heapq.heappop(result_heap)
dedupe_key = GetDedupeKey(top_result)
if dedupe_key not in used_keys:
result = top_result.GetEntity()
if override:
for key in result.keys():
if key not in projection:
del result[key]
yield result
else:
pass
used_keys.add(dedupe_key)
results_to_push = []
while result_heap:
next = heapq.heappop(result_heap)
if dedupe_key != GetDedupeKey(next):
results_to_push.append(next)
break
else:
results_to_push.append(next.GetNext())
results_to_push.append(top_result.GetNext())
for popped_result in results_to_push:
if popped_result.GetEntity():
heapq.heappush(result_heap, popped_result)
it = IterateResults(results)
try:
for _ in xrange(lower_bound):
it.next()
except StopIteration:
pass
return it
def Count(self, limit=1000, **kwargs):
"""Return the number of matched entities for this query.
Will return the de-duplicated count of results. Will call the more
efficient Get() function if a limit is given.
Args:
limit: maximum number of entries to count (for any result > limit, return
limit).
config: Optional Configuration to use for this request.
Returns:
count of the number of entries returned.
"""
kwargs['limit'] = limit
config = _GetConfigFromKwargs(kwargs, convert_rpc=True,
config_class=datastore_query.QueryOptions)
projection, override = self.__GetProjectionOverride(config)
if not projection:
config = datastore_query.QueryOptions(keys_only=True, config=config)
elif override:
config = datastore_query.QueryOptions(projection=override, config=config)
lower_bound, upper_bound, config = self._ExtractBounds(config)
used_keys = set()
for bound_query in self.__bound_queries:
for result in bound_query.Run(config=config):
if projection:
dedupe_key = (result.key(),
tuple(result.iteritems()))
else:
dedupe_key = result
used_keys.add(dedupe_key)
if upper_bound and len(used_keys) >= upper_bound:
return upper_bound - lower_bound
return max(0, len(used_keys) - lower_bound)
def GetIndexList(self):
raise AssertionError('No index_list available for a MultiQuery (queries '
'using "IN" or "!=" operators)')
def GetCursor(self):
raise AssertionError('No cursor available for a MultiQuery (queries '
'using "IN" or "!=" operators)')
def _GetCompiledQuery(self):
"""Internal only, do not use."""
raise AssertionError('No compilation available for a MultiQuery (queries '
'using "IN" or "!=" operators)')
def __setitem__(self, query_filter, value):
"""Add a new filter by setting it on all subqueries.
If any of the setting operations raise an exception, the ones
that succeeded are undone and the exception is propagated
upward.
Args:
query_filter: a string of the form "property operand".
value: the value that the given property is compared against.
"""
saved_items = []
for index, query in enumerate(self.__bound_queries):
saved_items.append(query.get(query_filter, None))
try:
query[query_filter] = value
except:
for q, old_value in itertools.izip(self.__bound_queries[:index],
saved_items):
if old_value is not None:
q[query_filter] = old_value
else:
del q[query_filter]
raise
def __delitem__(self, query_filter):
"""Delete a filter by deleting it from all subqueries.
If a KeyError is raised during the attempt, it is ignored, unless
every subquery raised a KeyError. If any other exception is
raised, any deletes will be rolled back.
Args:
query_filter: the filter to delete.
Raises:
KeyError: No subquery had an entry containing query_filter.
"""
subquery_count = len(self.__bound_queries)
keyerror_count = 0
saved_items = []
for index, query in enumerate(self.__bound_queries):
try:
saved_items.append(query.get(query_filter, None))
del query[query_filter]
except KeyError:
keyerror_count += 1
except:
for q, old_value in itertools.izip(self.__bound_queries[:index],
saved_items):
if old_value is not None:
q[query_filter] = old_value
raise
if keyerror_count == subquery_count:
raise KeyError(query_filter)
def __iter__(self):
return iter(self.__bound_queries)
GetCompiledCursor = GetCursor
GetCompiledQuery = _GetCompiledQuery
def RunInTransaction(function, *args, **kwargs):
"""Runs a function inside a datastore transaction.
Runs the user-provided function inside transaction, retries default
number of times.
Args:
function: a function to be run inside the transaction on all remaining
arguments
*args: positional arguments for function.
**kwargs: keyword arguments for function.
Returns:
the function's return value, if any
Raises:
TransactionFailedError, if the transaction could not be committed.
"""
return RunInTransactionOptions(None, function, *args, **kwargs)
def RunInTransactionCustomRetries(retries, function, *args, **kwargs):
"""Runs a function inside a datastore transaction.
Runs the user-provided function inside transaction, with a specified
number of retries.
Args:
retries: number of retries (not counting the initial try)
function: a function to be run inside the transaction on all remaining
arguments
*args: positional arguments for function.
**kwargs: keyword arguments for function.
Returns:
the function's return value, if any
Raises:
TransactionFailedError, if the transaction could not be committed.
"""
options = datastore_rpc.TransactionOptions(retries=retries)
return RunInTransactionOptions(options, function, *args, **kwargs)
def RunInTransactionOptions(options, function, *args, **kwargs):
"""Runs a function inside a datastore transaction.
Runs the user-provided function inside a full-featured, ACID datastore
transaction. Every Put, Get, and Delete call in the function is made within
the transaction. All entities involved in these calls must belong to the
same entity group. Queries are supported as long as they specify an
ancestor belonging to the same entity group.
The trailing arguments are passed to the function as positional arguments.
If the function returns a value, that value will be returned by
RunInTransaction. Otherwise, it will return None.
The function may raise any exception to roll back the transaction instead of
committing it. If this happens, the transaction will be rolled back and the
exception will be re-raised up to RunInTransaction's caller.
If you want to roll back intentionally, but don't have an appropriate
exception to raise, you can raise an instance of datastore_errors.Rollback.
It will cause a rollback, but will *not* be re-raised up to the caller.
The function may be run more than once, so it should be idempotent. It
should avoid side effects, and it shouldn't have *any* side effects that
aren't safe to occur multiple times. This includes modifying the arguments,
since they persist across invocations of the function. However, this doesn't
include Put, Get, and Delete calls, of course.
Example usage:
> def decrement(key, amount=1):
> counter = datastore.Get(key)
> counter['count'] -= amount
> if counter['count'] < 0: # don't let the counter go negative
> raise datastore_errors.Rollback()
> datastore.Put(counter)
>
> counter = datastore.Query('Counter', {'name': 'foo'})
> datastore.RunInTransaction(decrement, counter.key(), amount=5)
Transactions satisfy the traditional ACID properties. They are:
- Atomic. All of a transaction's operations are executed or none of them are.
- Consistent. The datastore's state is consistent before and after a
transaction, whether it committed or rolled back. Invariants such as
"every entity has a primary key" are preserved.
- Isolated. Transactions operate on a snapshot of the datastore. Other
datastore operations do not see intermediated effects of the transaction;
they only see its effects after it has committed.
- Durable. On commit, all writes are persisted to the datastore.
Nested transactions are not supported.
Args:
options: TransactionOptions specifying options (number of retries, etc) for
this transaction
function: a function to be run inside the transaction on all remaining
arguments
*args: positional arguments for function.
**kwargs: keyword arguments for function.
Returns:
the function's return value, if any
Raises:
TransactionFailedError, if the transaction could not be committed.
"""
options = datastore_rpc.TransactionOptions(options)
if IsInTransaction():
if options.propagation in (None, datastore_rpc.TransactionOptions.NESTED):
raise datastore_errors.BadRequestError(
'Nested transactions are not supported.')
elif options.propagation is datastore_rpc.TransactionOptions.INDEPENDENT:
txn_connection = _PopConnection()
try:
return RunInTransactionOptions(options, function, *args, **kwargs)
finally:
_PushConnection(txn_connection)
return function(*args, **kwargs)
if options.propagation is datastore_rpc.TransactionOptions.MANDATORY:
raise datastore_errors.BadRequestError('Requires an existing transaction.')
retries = options.retries
if retries is None:
retries = DEFAULT_TRANSACTION_RETRIES
conn = _GetConnection()
_PushConnection(None)
try:
for _ in range(0, retries + 1):
_SetConnection(conn.new_transaction(options))
ok, result = _DoOneTry(function, args, kwargs)
if ok:
return result
finally:
_PopConnection()
raise datastore_errors.TransactionFailedError(
'The transaction could not be committed. Please try again.')
def _DoOneTry(function, args, kwargs):
"""Helper to call a function in a transaction, once.
Args:
function: The function to call.
*args: Tuple of positional arguments.
**kwargs: Dict of keyword arguments.
"""
try:
result = function(*args, **kwargs)
except:
original_exception = sys.exc_info()
try:
_GetConnection().rollback()
except Exception:
logging.exception('Exception sending Rollback:')
type, value, trace = original_exception
if isinstance(value, datastore_errors.Rollback):
return True, None
else:
raise type, value, trace
else:
if _GetConnection().commit():
return True, result
else:
logging.warning('Transaction collision. Retrying... %s', '')
return False, None
def _MaybeSetupTransaction(request, keys):
"""Begin a transaction, if necessary, and populate it in the request.
This API exists for internal backwards compatibility, primarily with
api/taskqueue/taskqueue.py.
Args:
request: A protobuf with a mutable_transaction() method.
keys: Unused.
Returns:
A transaction if we're inside a transaction, otherwise None
"""
return _GetConnection()._set_request_transaction(request)
def IsInTransaction():
"""Determine whether already running in transaction.
Returns:
True if already running in transaction, else False.
"""
return isinstance(_GetConnection(), datastore_rpc.TransactionalConnection)
def Transactional(_func=None, **kwargs):
"""A decorator that makes sure a function is run in a transaction.
Defaults propagation to datastore_rpc.TransactionOptions.ALLOWED, which means
any existing transaction will be used in place of creating a new one.
WARNING: Reading from the datastore while in a transaction will not see any
changes made in the same transaction. If the function being decorated relies
on seeing all changes made in the calling scoope, set
propagation=datastore_rpc.TransactionOptions.NESTED.
Args:
_func: do not use.
**kwargs: TransactionOptions configuration options.
Returns:
A wrapper for the given function that creates a new transaction if needed.
"""
if _func is not None:
return Transactional()(_func)
if not kwargs.pop('require_new', None):
kwargs.setdefault('propagation', datastore_rpc.TransactionOptions.ALLOWED)
options = datastore_rpc.TransactionOptions(**kwargs)
def outer_wrapper(func):
def inner_wrapper(*args, **kwds):
return RunInTransactionOptions(options, func, *args, **kwds)
return inner_wrapper
return outer_wrapper
@datastore_rpc._positional(1)
def NonTransactional(_func=None, allow_existing=True):
"""A decorator that insures a function is run outside a transaction.
If there is an existing transaction (and allow_existing=True), the existing
transaction is paused while the function is executed.
Args:
_func: do not use
allow_existing: If false, throw an exception if called from within a
transaction
Returns:
A wrapper for the decorated function that ensures it runs outside a
transaction.
"""
if _func is not None:
return NonTransactional()(_func)
def outer_wrapper(func):
def inner_wrapper(*args, **kwds):
if not IsInTransaction():
return func(*args, **kwds)
if not allow_existing:
raise datastore_errors.BadRequestError(
'Function cannot be called from within a transaction.')
txn_connection = _PopConnection()
try:
return func(*args, **kwds)
finally:
_PushConnection(txn_connection)
return inner_wrapper
return outer_wrapper
def _GetCompleteKeyOrError(arg):
"""Expects an Entity or a Key, and returns the corresponding Key. Raises
BadArgumentError or BadKeyError if arg is a different type or is incomplete.
Args:
arg: Entity or Key
Returns:
Key
"""
if isinstance(arg, Key):
key = arg
elif isinstance(arg, basestring):
key = Key(arg)
elif isinstance(arg, Entity):
key = arg.key()
elif not isinstance(arg, Key):
raise datastore_errors.BadArgumentError(
'Expects argument to be an Entity or Key; received %s (a %s).' %
(arg, typename(arg)))
assert isinstance(key, Key)
if not key.has_id_or_name():
raise datastore_errors.BadKeyError('Key %r is not complete.' % key)
return key
def _GetPropertyValue(entity, property):
"""Returns an entity's value for a given property name.
Handles special properties like __key__ as well as normal properties.
Args:
entity: datastore.Entity
property: str; the property name
Returns:
property value. For __key__, a datastore_types.Key.
Raises:
KeyError, if the entity does not have the given property.
"""
if property in datastore_types._SPECIAL_PROPERTIES:
if property == datastore_types._UNAPPLIED_LOG_TIMESTAMP_SPECIAL_PROPERTY:
raise KeyError(property)
assert property == datastore_types.KEY_SPECIAL_PROPERTY
return entity.key()
else:
return entity[property]
def _AddOrAppend(dictionary, key, value):
"""Adds the value to the existing values in the dictionary, if any.
If dictionary[key] doesn't exist, sets dictionary[key] to value.
If dictionary[key] is not a list, sets dictionary[key] to [old_value, value].
If dictionary[key] is a list, appends value to that list.
Args:
dictionary: a dict
key, value: anything
"""
if key in dictionary:
existing_value = dictionary[key]
if isinstance(existing_value, list):
existing_value.append(value)
else:
dictionary[key] = [existing_value, value]
else:
dictionary[key] = value
class Iterator(datastore_query.ResultsIterator):
"""Thin wrapper of datastore_query.ResultsIterator.
Deprecated, do not use, only for backwards compatability.
"""
def _Next(self, count=None):
if count is None:
count = 20
result = []
for r in self:
if len(result) >= count:
break;
result.append(r)
return result
def GetCompiledCursor(self, query):
return self.cursor()
def GetIndexList(self):
"""Returns the list of indexes used to perform the query."""
tuple_index_list = super(Iterator, self).index_list()
return [index for index, state in tuple_index_list]
_Get = _Next
index_list = GetIndexList
DatastoreRPC = apiproxy_stub_map.UserRPC
GetRpcFromKwargs = _GetConfigFromKwargs
_CurrentTransactionKey = IsInTransaction
_ToDatastoreError = datastore_rpc._ToDatastoreError
_DatastoreExceptionFromErrorCodeAndDetail = datastore_rpc._DatastoreExceptionFromErrorCodeAndDetail
|
{
"content_hash": "16508a749d35c7e8281f00b0e225ba15",
"timestamp": "",
"source": "github",
"line_count": 2865,
"max_line_length": 99,
"avg_line_length": 31.386387434554972,
"alnum_prop": 0.6731278218900825,
"repo_name": "ychen820/microblog",
"id": "f01e9bb2162fa947b2d8ed78fae1c002dd190e3a",
"size": "90527",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "y/google-cloud-sdk/platform/google_appengine/google/appengine/api/datastore.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "414229"
},
{
"name": "CSS",
"bytes": "257787"
},
{
"name": "Emacs Lisp",
"bytes": "4733"
},
{
"name": "Groff",
"bytes": "1236200"
},
{
"name": "HTML",
"bytes": "2617468"
},
{
"name": "JavaScript",
"bytes": "1106437"
},
{
"name": "Makefile",
"bytes": "15714"
},
{
"name": "Objective-C",
"bytes": "26302"
},
{
"name": "PHP",
"bytes": "2511443"
},
{
"name": "Perl",
"bytes": "1109010"
},
{
"name": "Python",
"bytes": "71588489"
},
{
"name": "R",
"bytes": "548"
},
{
"name": "Shell",
"bytes": "49796"
},
{
"name": "TeX",
"bytes": "3149"
},
{
"name": "VimL",
"bytes": "5645"
}
],
"symlink_target": ""
}
|
"""Handle remote configuration inputs specified via S3 buckets.
"""
import os
import yaml
from bcbio.distributed import objectstore
from bcbio import utils
def load_s3(sample_config):
"""Move a sample configuration locally, providing remote upload.
"""
with objectstore.open(sample_config) as in_handle:
config = yaml.load(in_handle)
r_sample_config = objectstore.parse_remote(sample_config)
config["upload"] = {"method": "s3",
"dir": os.path.join(os.pardir, "final"),
"bucket": r_sample_config.bucket,
"folder": os.path.join(os.path.dirname(r_sample_config.key), "final")}
region = r_sample_config.region or objectstore.default_region(sample_config)
if region:
config["upload"]["region"] = region
if not os.access(os.pardir, os.W_OK | os.X_OK):
raise IOError("Cannot write to the parent directory of work directory %s\n"
"bcbio wants to store prepared uploaded files to %s\n"
"We recommend structuring your project in a project specific directory structure\n"
"with a specific work directory (mkdir -p your-project/work && cd your-project/work)."
% (os.getcwd(), os.path.join(os.pardir, "final")))
config = _add_jar_resources(config, sample_config)
out_file = os.path.join(utils.safe_makedir(os.path.join(os.getcwd(), "config")),
os.path.basename(r_sample_config.key))
with open(out_file, "w") as out_handle:
yaml.dump(config, out_handle, default_flow_style=False, allow_unicode=False)
return out_file
def _add_jar_resources(config, sample_config):
"""Find uploaded jars for GATK and MuTect relative to input file.
Automatically puts these into the configuration file to make them available
for downstream processing. Searches for them in the specific project folder
and also a global jar directory for a bucket.
"""
base, rest = sample_config.split("//", 1)
for dirname in [os.path.join("%s//%s" % (base, rest.split("/")[0]), "jars"),
os.path.join(os.path.dirname(sample_config), "jars")]:
for fname in objectstore.list(dirname):
if fname.lower().find("genomeanalysistk") >= 0:
prog = "gatk"
elif fname.lower().find("mutect") >= 0:
prog = "mutect"
else:
prog = None
if prog:
if "resources" not in config:
config["resources"] = {}
if prog not in config["resources"]:
config["resources"][prog] = {}
config["resources"][prog]["jar"] = str(fname)
return config
|
{
"content_hash": "49619955aea6e4d327168d82a17d0713",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 108,
"avg_line_length": 47.05084745762712,
"alnum_prop": 0.5979827089337176,
"repo_name": "brainstorm/bcbio-nextgen-vm",
"id": "4d2a4c1b51da7357ee166b15e9453695b74f3450",
"size": "2776",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "bcbiovm/aws/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "144833"
},
{
"name": "Shell",
"bytes": "597"
}
],
"symlink_target": ""
}
|
import random
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from ...models import (
Transcript, TranscriptPhraseVote
)
from ...tasks import update_transcript_stats
class Command(BaseCommand):
help = 'Creates random votes for 5 phrases in a random transcript'
def handle(self, *args, **options):
users = User.objects.all()[:5]
transcript = Transcript.objects.random_transcript(
in_progress=False
).first()
phrases = transcript.phrases.all()[:5]
for phrase in phrases:
for user in users:
TranscriptPhraseVote.objects.create(
transcript_phrase=phrase,
user=user,
upvote=random.choice([True, False])
)
update_transcript_stats(transcript)
|
{
"content_hash": "72ab0b1163630fd32af8a667ec9c8080",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 70,
"avg_line_length": 30.892857142857142,
"alnum_prop": 0.623121387283237,
"repo_name": "WGBH/FixIt",
"id": "c66e9c7d17d0d52e9f800e1c389d1aa23d77c64f",
"size": "865",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mla_game/apps/transcript/management/commands/fake_game_one_gameplay.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "58374"
},
{
"name": "HTML",
"bytes": "33239"
},
{
"name": "JavaScript",
"bytes": "145391"
},
{
"name": "Python",
"bytes": "161920"
}
],
"symlink_target": ""
}
|
from bottle import route, static_file, debug, run, get, redirect
#from bottle import post, request
import os, re, inspect
import json
#enable bottle debug
debug(True)
# WebApp route path
routePath = '/RpiMonitor'
# get directory of WebApp (pyWebMOC.py's dir)
rootPath = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
RRDDIR = rootPath + '/data'
@route(routePath)
def rootHome():
#return redirect(routePath+'/index.html')
return redirect(routePath+'/index.html')
@route(routePath + '/<filename:re:.*\.html>')
def html_file(filename):
return static_file(filename, root=rootPath)
@get(routePath + '/assets/<filepath:path>')
def assets_file(filepath):
return static_file(filepath, root=rootPath+'/assets')
@get(routePath + '/data/<filepath:path>')
def rrd_file(filepath):
return static_file(filepath, root=rootPath+'/data')
#get system rrdfiles
@get(routePath + '/sysrrd')
def sysRRDFile():
sysrrdlist = ["cpustatus.rrd", "meminfo.rrd", "uptime.rrd"]
return json.dumps({"rrdfiles":sysrrdlist})
#get network rrd files
@get(routePath + '/netrrd')
def getNetworkRRD():
global RRDDIR
flist = [f for f in os.listdir(RRDDIR) if re.match('^interface-\w*\.rrd', f)]
print flist
return json.dumps({"rrdflies":flist})
#get HDD rrd files
@get(routePath + '/hddrrd')
def getHDDRRD():
global RRDDIR
hdd_files = [f for f in os.listdir(RRDDIR) if re.match('^hdd-\w*\.rrd', f)]
return json.dumps({"rrdflies":hdd_files})
#get mount point rrd files
@get(routePath + '/mountrrd')
def getMountRRD():
flist = [f for f in os.listdir(RRDDIR) if re.match('^mount-\w*\.rrd', f)]
return json.dumps({"rrdflies":flist})
run(host='localhost', port=9999, reloader=True) #debug
|
{
"content_hash": "5ee7972eb3321b4330d76da3676d1bc7",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 84,
"avg_line_length": 28.80327868852459,
"alnum_prop": 0.6898121798520205,
"repo_name": "dmitryikh/rpi-monitor",
"id": "d05c729b7bb21810629eb12c0a0eb64e5e489749",
"size": "1780",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web-server/rpi_monitor_web.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "411"
},
{
"name": "JavaScript",
"bytes": "315603"
},
{
"name": "Python",
"bytes": "17052"
}
],
"symlink_target": ""
}
|
from sys import maxsize
class Group:
def __init__(self, name=None, header=None, footer=None, gr_id=None):
self.name = name
self.header = header
self.footer = footer
self.id = gr_id
def __repr__(self):
return "%s:%s; %s; %s" % (self.id, self.name, self.header, self.footer)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) and self.name == other.name
def id_or_max(gr):
if gr.id:
return int(gr.id)
else:
return maxsize
|
{
"content_hash": "ecff25aa658966f0416e8b543267a1d4",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 103,
"avg_line_length": 28.45,
"alnum_prop": 0.5553602811950791,
"repo_name": "lyudmill/python_training",
"id": "2a956017117cee47193833b9eb15d97c6bb7795f",
"size": "569",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "model/group.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Gherkin",
"bytes": "563"
},
{
"name": "Python",
"bytes": "54951"
}
],
"symlink_target": ""
}
|
import unittest
from tkp.db.configstore import store_config, fetch_config
from tkp.db import rollback, execute, Database
from tkp.db.general import insert_dataset
from tkp.testutil.decorators import requires_database
from sqlalchemy.exc import IntegrityError
config = {'section1': {'key1': 'value1', 'key2': 2},
'section2': {'key3': 'value3', 'key4': 4},
'section3': {'key5': 0.20192, 'key4': 0.000001}}
@requires_database()
class TestConfigStore(unittest.TestCase):
def setUp(self):
description = "TestConfigStore"
self.dataset_id = insert_dataset(description)
def tearDown(self):
rollback()
def test_config_store(self):
store_config(config, self.dataset_id)
query1 = """
select count(id)
from config
where section=%s and key=%s and value=%s and dataset=%s
"""
args1 = ('section1', 'key1', 'value1', self.dataset_id)
num1 = execute(query1, args1).rowcount
self.assertEquals(num1, 1)
args2 = ('section2', 'key3', 'value3', self.dataset_id)
num2 = execute(query1, args2).rowcount
self.assertEquals(num2, 1)
def test_bad_type(self):
"""
We don't allow storing types not in tkp.db.confstore.types
"""
bad_config = {'section4': {'key_badtype': ['bad, type']}}
self.assertRaises(TypeError, store_config, bad_config, self.dataset_id)
def test_bad_type_in_db(self):
"""
fetch_config should raise TypeError if invalid type in DB
"""
q = """
INSERT INTO config (dataset, section, key, value, type)
VALUES (%s, 'bad', 'type', '[]', 'list');
"""
execute(q, (self.dataset_id,))
self.assertRaises(TypeError, fetch_config, self.dataset_id)
def test_config_fetch(self):
store_config(config, self.dataset_id)
fetched_config = fetch_config(self.dataset_id)
self.assertEquals(config, fetched_config)
def test_double_store(self):
"""
storing the same data twice should fail
"""
store_config(config, self.dataset_id)
database = Database()
with self.assertRaises(IntegrityError):
store_config(config, self.dataset_id)
|
{
"content_hash": "d69eedcc310f2fd578fdbeeb1168fc54",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 79,
"avg_line_length": 33.8955223880597,
"alnum_prop": 0.6129458388375165,
"repo_name": "mkuiack/tkp",
"id": "831c1ecffe729694f332d2934635d15a4f2980b7",
"size": "2271",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/test_database/test_configstore.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "PLpgSQL",
"bytes": "18823"
},
{
"name": "Python",
"bytes": "903657"
},
{
"name": "Shell",
"bytes": "588"
}
],
"symlink_target": ""
}
|
from pyjamas import DOM
from pyjamas import Factory
from pyjamas.ui.Widget import Widget
class Frame(Widget):
_props = [("url", "Url", "Url", None),
]
def __init__(self, url="", Element=None, **kwargs):
kwargs['Url'] = kwargs.get('Url', url)
self.setElement(Element or DOM.createIFrame())
Widget.__init__(self, **kwargs)
@classmethod
def _getProps(self):
return Widget._getProps() + self._props
def getUrl(self):
return DOM.getAttribute(self.getElement(), "src")
def setUrl(self, url):
return DOM.setAttribute(self.getElement(), "src", url)
Factory.registerClass('pyjamas.ui.Frame', 'Frame', Frame)
|
{
"content_hash": "c7a32b01663f334e205c759a89065380",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 62,
"avg_line_length": 25.62962962962963,
"alnum_prop": 0.619942196531792,
"repo_name": "damoti/pyjx-gwt",
"id": "fad39743fffbafa6d63ad6ac625cdcb0f0cda4ff",
"size": "1351",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "gwt/pyjamas/ui/Frame.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2985"
},
{
"name": "JavaScript",
"bytes": "5695"
},
{
"name": "Python",
"bytes": "1783449"
}
],
"symlink_target": ""
}
|
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
PROJECT_ROOT = os.path.dirname(os.path.dirname(__file__))
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(PROJECT_ROOT, 'example.db'),
}
}
SITE_ID = 1
# Make this unique, and don't share it with anybody.
SECRET_KEY = '5$f%)&a4tc*bg(79+ku!7o$kri-duw99@hq_)va^_kaw9*l)!7'
# Language
# TIME_ZONE = 'America/Chicago'
LANGUAGE_CODE = 'en-us'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Paths
MEDIA_ROOT = ''
MEDIA_URL = '/media/'
STATIC_ROOT = ''
STATIC_URL = '/static/'
# Apps
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'example.urls'
WSGI_APPLICATION = 'example.wsgi.application'
TEMPLATE_DIRS = ()
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.sessions',
#'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'polymorphic', # needed if you want to use the polymorphic admin
'pexp', # this Django app is for testing and experimentation; not needed otherwise
)
# Logging configuration
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
{
"content_hash": "f2f91a049a4ad37e920c2038f5026503",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 98,
"avg_line_length": 23.48,
"alnum_prop": 0.6316013628620102,
"repo_name": "fusionbox/django_polymorphic",
"id": "05a5c1f5b47ef9c55d50fa82073d59d1280f3e6c",
"size": "2348",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/example/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "147785"
},
{
"name": "Shell",
"bytes": "5120"
}
],
"symlink_target": ""
}
|
from __future__ import with_statement
import contextlib
import sys
from .interfaces import Connectable
from .interfaces import ExceptionContext
from .util import _distill_params
from .util import _distill_params_20
from .util import TransactionalContext
from .. import exc
from .. import inspection
from .. import log
from .. import util
from ..sql import compiler
from ..sql import util as sql_util
"""Defines :class:`_engine.Connection` and :class:`_engine.Engine`.
"""
_EMPTY_EXECUTION_OPTS = util.immutabledict()
class Connection(Connectable):
"""Provides high-level functionality for a wrapped DB-API connection.
**This is the SQLAlchemy 1.x.x version** of the :class:`_engine.Connection`
class. For the :term:`2.0 style` version, which features some API
differences, see :class:`_future.Connection`.
The :class:`_engine.Connection` object is procured by calling
the :meth:`_engine.Engine.connect` method of the :class:`_engine.Engine`
object, and provides services for execution of SQL statements as well
as transaction control.
The Connection object is **not** thread-safe. While a Connection can be
shared among threads using properly synchronized access, it is still
possible that the underlying DBAPI connection may not support shared
access between threads. Check the DBAPI documentation for details.
The Connection object represents a single DBAPI connection checked out
from the connection pool. In this state, the connection pool has no affect
upon the connection, including its expiration or timeout state. For the
connection pool to properly manage connections, connections should be
returned to the connection pool (i.e. ``connection.close()``) whenever the
connection is not in use.
.. index::
single: thread safety; Connection
"""
_is_future = False
_sqla_logger_namespace = "sqlalchemy.engine.Connection"
# used by sqlalchemy.engine.util.TransactionalContext
_trans_context_manager = None
def __init__(
self,
engine,
connection=None,
close_with_result=False,
_branch_from=None,
_execution_options=None,
_dispatch=None,
_has_events=None,
_allow_revalidate=True,
):
"""Construct a new Connection."""
self.engine = engine
self.dialect = engine.dialect
self.__branch_from = _branch_from
if _branch_from:
# branching is always "from" the root connection
assert _branch_from.__branch_from is None
self._dbapi_connection = connection
self._execution_options = _execution_options
self._echo = _branch_from._echo
self.should_close_with_result = False
self.dispatch = _dispatch
self._has_events = _branch_from._has_events
else:
self._dbapi_connection = (
connection
if connection is not None
else engine.raw_connection()
)
self._transaction = self._nested_transaction = None
self.__savepoint_seq = 0
self.__in_begin = False
self.should_close_with_result = close_with_result
self.__can_reconnect = _allow_revalidate
self._echo = self.engine._should_log_info()
if _has_events is None:
# if _has_events is sent explicitly as False,
# then don't join the dispatch of the engine; we don't
# want to handle any of the engine's events in that case.
self.dispatch = self.dispatch._join(engine.dispatch)
self._has_events = _has_events or (
_has_events is None and engine._has_events
)
assert not _execution_options
self._execution_options = engine._execution_options
if self._has_events or self.engine._has_events:
self.dispatch.engine_connect(self, _branch_from is not None)
@util.memoized_property
def _message_formatter(self):
if "logging_token" in self._execution_options:
token = self._execution_options["logging_token"]
return lambda msg: "[%s] %s" % (token, msg)
else:
return None
def _log_info(self, message, *arg, **kw):
fmt = self._message_formatter
if fmt:
message = fmt(message)
self.engine.logger.info(message, *arg, **kw)
def _log_debug(self, message, *arg, **kw):
fmt = self._message_formatter
if fmt:
message = fmt(message)
self.engine.logger.debug(message, *arg, **kw)
@property
def _schema_translate_map(self):
return self._execution_options.get("schema_translate_map", None)
def schema_for_object(self, obj):
"""Return the schema name for the given schema item taking into
account current schema translate map.
"""
name = obj.schema
schema_translate_map = self._execution_options.get(
"schema_translate_map", None
)
if (
schema_translate_map
and name in schema_translate_map
and obj._use_schema_map
):
return schema_translate_map[name]
else:
return name
def _branch(self):
"""Return a new Connection which references this Connection's
engine and connection; but does not have close_with_result enabled,
and also whose close() method does nothing.
.. deprecated:: 1.4 the "branching" concept will be removed in
SQLAlchemy 2.0 as well as the "Connection.connect()" method which
is the only consumer for this.
The Core uses this very sparingly, only in the case of
custom SQL default functions that are to be INSERTed as the
primary key of a row where we need to get the value back, so we have
to invoke it distinctly - this is a very uncommon case.
Userland code accesses _branch() when the connect()
method is called. The branched connection
acts as much as possible like the parent, except that it stays
connected when a close() event occurs.
"""
return self.engine._connection_cls(
self.engine,
self._dbapi_connection,
_branch_from=self.__branch_from if self.__branch_from else self,
_execution_options=self._execution_options,
_has_events=self._has_events,
_dispatch=self.dispatch,
)
def _generate_for_options(self):
"""define connection method chaining behavior for execution_options"""
if self._is_future:
return self
else:
c = self.__class__.__new__(self.__class__)
c.__dict__ = self.__dict__.copy()
return c
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
self.close()
def execution_options(self, **opt):
r""" Set non-SQL options for the connection which take effect
during execution.
For a "future" style connection, this method returns this same
:class:`_future.Connection` object with the new options added.
For a legacy connection, this method returns a copy of this
:class:`_engine.Connection` which references the same underlying DBAPI
connection, but also defines the given execution options which will
take effect for a call to
:meth:`execute`. As the new :class:`_engine.Connection` references the
same underlying resource, it's usually a good idea to ensure that
the copies will be discarded immediately, which is implicit if used
as in::
result = connection.execution_options(stream_results=True).\
execute(stmt)
Note that any key/value can be passed to
:meth:`_engine.Connection.execution_options`,
and it will be stored in the
``_execution_options`` dictionary of the :class:`_engine.Connection`.
It
is suitable for usage by end-user schemes to communicate with
event listeners, for example.
The keywords that are currently recognized by SQLAlchemy itself
include all those listed under :meth:`.Executable.execution_options`,
as well as others that are specific to :class:`_engine.Connection`.
:param autocommit: Available on: Connection, statement.
When True, a COMMIT will be invoked after execution
when executed in 'autocommit' mode, i.e. when an explicit
transaction is not begun on the connection. Note that this
is **library level, not DBAPI level autocommit**. The DBAPI
connection will remain in a real transaction unless the
"AUTOCOMMIT" isolation level is used.
.. deprecated:: 1.4 The "autocommit" execution option is deprecated
and will be removed in SQLAlchemy 2.0. See
:ref:`migration_20_autocommit` for discussion.
:param compiled_cache: Available on: Connection.
A dictionary where :class:`.Compiled` objects
will be cached when the :class:`_engine.Connection`
compiles a clause
expression into a :class:`.Compiled` object. This dictionary will
supersede the statement cache that may be configured on the
:class:`_engine.Engine` itself. If set to None, caching
is disabled, even if the engine has a configured cache size.
Note that the ORM makes use of its own "compiled" caches for
some operations, including flush operations. The caching
used by the ORM internally supersedes a cache dictionary
specified here.
:param logging_token: Available on: :class:`_engine.Connection`,
:class:`_engine.Engine`.
Adds the specified string token surrounded by brackets in log
messages logged by the connection, i.e. the logging that's enabled
either via the :paramref:`_sa.create_engine.echo` flag or via the
``logging.getLogger("sqlalchemy.engine")`` logger. This allows a
per-connection or per-sub-engine token to be available which is
useful for debugging concurrent connection scenarios.
.. versionadded:: 1.4.0b2
.. seealso::
:ref:`dbengine_logging_tokens` - usage example
:paramref:`_sa.create_engine.logging_name` - adds a name to the
name used by the Python logger object itself.
:param isolation_level: Available on: :class:`_engine.Connection`.
Set the transaction isolation level for the lifespan of this
:class:`_engine.Connection` object.
Valid values include those string
values accepted by the :paramref:`_sa.create_engine.isolation_level`
parameter passed to :func:`_sa.create_engine`. These levels are
semi-database specific; see individual dialect documentation for
valid levels.
The isolation level option applies the isolation level by emitting
statements on the DBAPI connection, and **necessarily affects the
original Connection object overall**, not just the copy that is
returned by the call to :meth:`_engine.Connection.execution_options`
method. The isolation level will remain at the given setting until
the DBAPI connection itself is returned to the connection pool, i.e.
the :meth:`_engine.Connection.close` method on the original
:class:`_engine.Connection` is called,
where an event handler will emit
additional statements on the DBAPI connection in order to revert the
isolation level change.
.. warning:: The ``isolation_level`` execution option should
**not** be used when a transaction is already established, that
is, the :meth:`_engine.Connection.begin`
method or similar has been
called. A database cannot change the isolation level on a
transaction in progress, and different DBAPIs and/or
SQLAlchemy dialects may implicitly roll back or commit
the transaction, or not affect the connection at all.
.. note:: The ``isolation_level`` execution option is implicitly
reset if the :class:`_engine.Connection` is invalidated, e.g. via
the :meth:`_engine.Connection.invalidate` method, or if a
disconnection error occurs. The new connection produced after
the invalidation will not have the isolation level re-applied
to it automatically.
.. seealso::
:paramref:`_sa.create_engine.isolation_level`
- set per :class:`_engine.Engine` isolation level
:meth:`_engine.Connection.get_isolation_level`
- view current level
:ref:`SQLite Transaction Isolation <sqlite_isolation_level>`
:ref:`PostgreSQL Transaction Isolation <postgresql_isolation_level>`
:ref:`MySQL Transaction Isolation <mysql_isolation_level>`
:ref:`SQL Server Transaction Isolation <mssql_isolation_level>`
:ref:`session_transaction_isolation` - for the ORM
:param no_parameters: When ``True``, if the final parameter
list or dictionary is totally empty, will invoke the
statement on the cursor as ``cursor.execute(statement)``,
not passing the parameter collection at all.
Some DBAPIs such as psycopg2 and mysql-python consider
percent signs as significant only when parameters are
present; this option allows code to generate SQL
containing percent signs (and possibly other characters)
that is neutral regarding whether it's executed by the DBAPI
or piped into a script that's later invoked by
command line tools.
:param stream_results: Available on: Connection, statement.
Indicate to the dialect that results should be
"streamed" and not pre-buffered, if possible. This is a limitation
of many DBAPIs. The flag is currently understood within a subset
of dialects within the PostgreSQL and MySQL categories, and
may be supported by other third party dialects as well.
.. seealso::
:ref:`engine_stream_results`
:param schema_translate_map: Available on: Connection, Engine.
A dictionary mapping schema names to schema names, that will be
applied to the :paramref:`_schema.Table.schema` element of each
:class:`_schema.Table`
encountered when SQL or DDL expression elements
are compiled into strings; the resulting schema name will be
converted based on presence in the map of the original name.
.. versionadded:: 1.1
.. seealso::
:ref:`schema_translating`
.. seealso::
:meth:`_engine.Engine.execution_options`
:meth:`.Executable.execution_options`
:meth:`_engine.Connection.get_execution_options`
""" # noqa
c = self._generate_for_options()
c._execution_options = c._execution_options.union(opt)
if self._has_events or self.engine._has_events:
self.dispatch.set_connection_execution_options(c, opt)
self.dialect.set_connection_execution_options(c, opt)
return c
def get_execution_options(self):
"""Get the non-SQL options which will take effect during execution.
.. versionadded:: 1.3
.. seealso::
:meth:`_engine.Connection.execution_options`
"""
return self._execution_options
@property
def closed(self):
"""Return True if this connection is closed."""
# note this is independent for a "branched" connection vs.
# the base
return self._dbapi_connection is None and not self.__can_reconnect
@property
def invalidated(self):
"""Return True if this connection was invalidated."""
# prior to 1.4, "invalid" was stored as a state independent of
# "closed", meaning an invalidated connection could be "closed",
# the _dbapi_connection would be None and closed=True, yet the
# "invalid" flag would stay True. This meant that there were
# three separate states (open/valid, closed/valid, closed/invalid)
# when there is really no reason for that; a connection that's
# "closed" does not need to be "invalid". So the state is now
# represented by the two facts alone.
if self.__branch_from:
return self.__branch_from.invalidated
return self._dbapi_connection is None and not self.closed
@property
def connection(self):
"""The underlying DB-API connection managed by this Connection.
This is a SQLAlchemy connection-pool proxied connection
which then has the attribute
:attr:`_pool._ConnectionFairy.dbapi_connection` that refers to the
actual driver connection.
.. seealso::
:ref:`dbapi_connections`
"""
if self._dbapi_connection is None:
try:
return self._revalidate_connection()
except (exc.PendingRollbackError, exc.ResourceClosedError):
raise
except BaseException as e:
self._handle_dbapi_exception(e, None, None, None, None)
else:
return self._dbapi_connection
def get_isolation_level(self):
"""Return the current isolation level assigned to this
:class:`_engine.Connection`.
This will typically be the default isolation level as determined
by the dialect, unless if the
:paramref:`.Connection.execution_options.isolation_level`
feature has been used to alter the isolation level on a
per-:class:`_engine.Connection` basis.
This attribute will typically perform a live SQL operation in order
to procure the current isolation level, so the value returned is the
actual level on the underlying DBAPI connection regardless of how
this state was set. Compare to the
:attr:`_engine.Connection.default_isolation_level` accessor
which returns the dialect-level setting without performing a SQL
query.
.. versionadded:: 0.9.9
.. seealso::
:attr:`_engine.Connection.default_isolation_level`
- view default level
:paramref:`_sa.create_engine.isolation_level`
- set per :class:`_engine.Engine` isolation level
:paramref:`.Connection.execution_options.isolation_level`
- set per :class:`_engine.Connection` isolation level
"""
try:
return self.dialect.get_isolation_level(self.connection)
except BaseException as e:
self._handle_dbapi_exception(e, None, None, None, None)
@property
def default_isolation_level(self):
"""The default isolation level assigned to this
:class:`_engine.Connection`.
This is the isolation level setting that the
:class:`_engine.Connection`
has when first procured via the :meth:`_engine.Engine.connect` method.
This level stays in place until the
:paramref:`.Connection.execution_options.isolation_level` is used
to change the setting on a per-:class:`_engine.Connection` basis.
Unlike :meth:`_engine.Connection.get_isolation_level`,
this attribute is set
ahead of time from the first connection procured by the dialect,
so SQL query is not invoked when this accessor is called.
.. versionadded:: 0.9.9
.. seealso::
:meth:`_engine.Connection.get_isolation_level`
- view current level
:paramref:`_sa.create_engine.isolation_level`
- set per :class:`_engine.Engine` isolation level
:paramref:`.Connection.execution_options.isolation_level`
- set per :class:`_engine.Connection` isolation level
"""
return self.dialect.default_isolation_level
def _invalid_transaction(self):
if self.invalidated:
raise exc.PendingRollbackError(
"Can't reconnect until invalid %stransaction is rolled "
"back."
% (
"savepoint "
if self._nested_transaction is not None
else ""
),
code="8s2b",
)
else:
assert not self._is_future
raise exc.PendingRollbackError(
"This connection is on an inactive %stransaction. "
"Please rollback() fully before proceeding."
% (
"savepoint "
if self._nested_transaction is not None
else ""
),
code="8s2a",
)
def _revalidate_connection(self):
if self.__branch_from:
return self.__branch_from._revalidate_connection()
if self.__can_reconnect and self.invalidated:
if self._transaction is not None:
self._invalid_transaction()
self._dbapi_connection = self.engine.raw_connection(
_connection=self
)
return self._dbapi_connection
raise exc.ResourceClosedError("This Connection is closed")
@property
def _still_open_and_dbapi_connection_is_valid(self):
return self._dbapi_connection is not None and getattr(
self._dbapi_connection, "is_valid", False
)
@property
def info(self):
"""Info dictionary associated with the underlying DBAPI connection
referred to by this :class:`_engine.Connection`, allowing user-defined
data to be associated with the connection.
The data here will follow along with the DBAPI connection including
after it is returned to the connection pool and used again
in subsequent instances of :class:`_engine.Connection`.
"""
return self.connection.info
@util.deprecated_20(":meth:`.Connection.connect`")
def connect(self, close_with_result=False):
"""Returns a branched version of this :class:`_engine.Connection`.
The :meth:`_engine.Connection.close` method on the returned
:class:`_engine.Connection` can be called and this
:class:`_engine.Connection` will remain open.
This method provides usage symmetry with
:meth:`_engine.Engine.connect`, including for usage
with context managers.
"""
return self._branch()
def invalidate(self, exception=None):
"""Invalidate the underlying DBAPI connection associated with
this :class:`_engine.Connection`.
An attempt will be made to close the underlying DBAPI connection
immediately; however if this operation fails, the error is logged
but not raised. The connection is then discarded whether or not
close() succeeded.
Upon the next use (where "use" typically means using the
:meth:`_engine.Connection.execute` method or similar),
this :class:`_engine.Connection` will attempt to
procure a new DBAPI connection using the services of the
:class:`_pool.Pool` as a source of connectivity (e.g.
a "reconnection").
If a transaction was in progress (e.g. the
:meth:`_engine.Connection.begin` method has been called) when
:meth:`_engine.Connection.invalidate` method is called, at the DBAPI
level all state associated with this transaction is lost, as
the DBAPI connection is closed. The :class:`_engine.Connection`
will not allow a reconnection to proceed until the
:class:`.Transaction` object is ended, by calling the
:meth:`.Transaction.rollback` method; until that point, any attempt at
continuing to use the :class:`_engine.Connection` will raise an
:class:`~sqlalchemy.exc.InvalidRequestError`.
This is to prevent applications from accidentally
continuing an ongoing transactional operations despite the
fact that the transaction has been lost due to an
invalidation.
The :meth:`_engine.Connection.invalidate` method,
just like auto-invalidation,
will at the connection pool level invoke the
:meth:`_events.PoolEvents.invalidate` event.
:param exception: an optional ``Exception`` instance that's the
reason for the invalidation. is passed along to event handlers
and logging functions.
.. seealso::
:ref:`pool_connection_invalidation`
"""
if self.__branch_from:
return self.__branch_from.invalidate(exception=exception)
if self.invalidated:
return
if self.closed:
raise exc.ResourceClosedError("This Connection is closed")
if self._still_open_and_dbapi_connection_is_valid:
self._dbapi_connection.invalidate(exception)
self._dbapi_connection = None
def detach(self):
"""Detach the underlying DB-API connection from its connection pool.
E.g.::
with engine.connect() as conn:
conn.detach()
conn.execute(text("SET search_path TO schema1, schema2"))
# work with connection
# connection is fully closed (since we used "with:", can
# also call .close())
This :class:`_engine.Connection` instance will remain usable.
When closed
(or exited from a context manager context as above),
the DB-API connection will be literally closed and not
returned to its originating pool.
This method can be used to insulate the rest of an application
from a modified state on a connection (such as a transaction
isolation level or similar).
"""
self._dbapi_connection.detach()
def _autobegin(self):
self.begin()
def begin(self):
"""Begin a transaction and return a transaction handle.
The returned object is an instance of :class:`.Transaction`.
This object represents the "scope" of the transaction,
which completes when either the :meth:`.Transaction.rollback`
or :meth:`.Transaction.commit` method is called.
.. tip::
The :meth:`_engine.Connection.begin` method is invoked when using
the :meth:`_engine.Engine.begin` context manager method as well.
All documentation that refers to behaviors specific to the
:meth:`_engine.Connection.begin` method also apply to use of the
:meth:`_engine.Engine.begin` method.
Legacy use: nested calls to :meth:`.begin` on the same
:class:`_engine.Connection` will return new :class:`.Transaction`
objects that represent an emulated transaction within the scope of the
enclosing transaction, that is::
trans = conn.begin() # outermost transaction
trans2 = conn.begin() # "nested"
trans2.commit() # does nothing
trans.commit() # actually commits
Calls to :meth:`.Transaction.commit` only have an effect
when invoked via the outermost :class:`.Transaction` object, though the
:meth:`.Transaction.rollback` method of any of the
:class:`.Transaction` objects will roll back the
transaction.
.. tip::
The above "nesting" behavior is a legacy behavior specific to
:term:`1.x style` use and will be removed in SQLAlchemy 2.0. For
notes on :term:`2.0 style` use, see
:meth:`_future.Connection.begin`.
.. seealso::
:meth:`_engine.Connection.begin_nested` - use a SAVEPOINT
:meth:`_engine.Connection.begin_twophase` -
use a two phase /XID transaction
:meth:`_engine.Engine.begin` - context manager available from
:class:`_engine.Engine`
"""
if self._is_future:
assert not self.__branch_from
elif self.__branch_from:
return self.__branch_from.begin()
if self.__in_begin:
# for dialects that emit SQL within the process of
# dialect.do_begin() or dialect.do_begin_twophase(), this
# flag prevents "autobegin" from being emitted within that
# process, while allowing self._transaction to remain at None
# until it's complete.
return
elif self._transaction is None:
self._transaction = RootTransaction(self)
return self._transaction
else:
if self._is_future:
raise exc.InvalidRequestError(
"a transaction is already begun for this connection"
)
else:
return MarkerTransaction(self)
def begin_nested(self):
"""Begin a nested transaction (i.e. SAVEPOINT) and return a
transaction handle, assuming an outer transaction is already
established.
Nested transactions require SAVEPOINT support in the
underlying database. Any transaction in the hierarchy may
``commit`` and ``rollback``, however the outermost transaction
still controls the overall ``commit`` or ``rollback`` of the
transaction of a whole.
The legacy form of :meth:`_engine.Connection.begin_nested` method has
alternate behaviors based on whether or not the
:meth:`_engine.Connection.begin` method was called previously. If
:meth:`_engine.Connection.begin` was not called, then this method will
behave the same as the :meth:`_engine.Connection.begin` method and
return a :class:`.RootTransaction` object that begins and commits a
real transaction - **no savepoint is invoked**. If
:meth:`_engine.Connection.begin` **has** been called, and a
:class:`.RootTransaction` is already established, then this method
returns an instance of :class:`.NestedTransaction` which will invoke
and manage the scope of a SAVEPOINT.
.. tip::
The above mentioned behavior of
:meth:`_engine.Connection.begin_nested` is a legacy behavior
specific to :term:`1.x style` use. In :term:`2.0 style` use, the
:meth:`_future.Connection.begin_nested` method instead autobegins
the outer transaction that can be committed using
"commit-as-you-go" style; see
:meth:`_future.Connection.begin_nested` for migration details.
.. versionchanged:: 1.4.13 The behavior of
:meth:`_engine.Connection.begin_nested`
as returning a :class:`.RootTransaction` if
:meth:`_engine.Connection.begin` were not called has been restored
as was the case in 1.3.x versions; in previous 1.4.x versions, an
outer transaction would be "autobegun" but would not be committed.
.. seealso::
:meth:`_engine.Connection.begin`
:meth:`_engine.Connection.begin_twophase`
"""
if self._is_future:
assert not self.__branch_from
elif self.__branch_from:
return self.__branch_from.begin_nested()
if self._transaction is None:
if not self._is_future:
util.warn_deprecated_20(
"Calling Connection.begin_nested() in 2.0 style use will "
"return a NestedTransaction (SAVEPOINT) in all cases, "
"that will not commit the outer transaction. For code "
"that is cross-compatible between 1.x and 2.0 style use, "
"ensure Connection.begin() is called before calling "
"Connection.begin_nested()."
)
return self.begin()
else:
self._autobegin()
return NestedTransaction(self)
def begin_twophase(self, xid=None):
"""Begin a two-phase or XA transaction and return a transaction
handle.
The returned object is an instance of :class:`.TwoPhaseTransaction`,
which in addition to the methods provided by
:class:`.Transaction`, also provides a
:meth:`~.TwoPhaseTransaction.prepare` method.
:param xid: the two phase transaction id. If not supplied, a
random id will be generated.
.. seealso::
:meth:`_engine.Connection.begin`
:meth:`_engine.Connection.begin_twophase`
"""
if self.__branch_from:
return self.__branch_from.begin_twophase(xid=xid)
if self._transaction is not None:
raise exc.InvalidRequestError(
"Cannot start a two phase transaction when a transaction "
"is already in progress."
)
if xid is None:
xid = self.engine.dialect.create_xid()
return TwoPhaseTransaction(self, xid)
def recover_twophase(self):
return self.engine.dialect.do_recover_twophase(self)
def rollback_prepared(self, xid, recover=False):
self.engine.dialect.do_rollback_twophase(self, xid, recover=recover)
def commit_prepared(self, xid, recover=False):
self.engine.dialect.do_commit_twophase(self, xid, recover=recover)
def in_transaction(self):
"""Return True if a transaction is in progress."""
if self.__branch_from is not None:
return self.__branch_from.in_transaction()
return self._transaction is not None and self._transaction.is_active
def in_nested_transaction(self):
"""Return True if a transaction is in progress."""
if self.__branch_from is not None:
return self.__branch_from.in_nested_transaction()
return (
self._nested_transaction is not None
and self._nested_transaction.is_active
)
def _is_autocommit(self):
return (
self._execution_options.get("isolation_level", None)
== "AUTOCOMMIT"
)
def get_transaction(self):
"""Return the current root transaction in progress, if any.
.. versionadded:: 1.4
"""
if self.__branch_from is not None:
return self.__branch_from.get_transaction()
return self._transaction
def get_nested_transaction(self):
"""Return the current nested transaction in progress, if any.
.. versionadded:: 1.4
"""
if self.__branch_from is not None:
return self.__branch_from.get_nested_transaction()
return self._nested_transaction
def _begin_impl(self, transaction):
assert not self.__branch_from
if self._echo:
self._log_info("BEGIN (implicit)")
self.__in_begin = True
if self._has_events or self.engine._has_events:
self.dispatch.begin(self)
try:
self.engine.dialect.do_begin(self.connection)
except BaseException as e:
self._handle_dbapi_exception(e, None, None, None, None)
finally:
self.__in_begin = False
def _rollback_impl(self):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.rollback(self)
if self._still_open_and_dbapi_connection_is_valid:
if self._echo:
if self._is_autocommit():
self._log_info(
"ROLLBACK using DBAPI connection.rollback(), "
"DBAPI should ignore due to autocommit mode"
)
else:
self._log_info("ROLLBACK")
try:
self.engine.dialect.do_rollback(self.connection)
except BaseException as e:
self._handle_dbapi_exception(e, None, None, None, None)
def _commit_impl(self, autocommit=False):
assert not self.__branch_from
# AUTOCOMMIT isolation-level is a dialect-specific concept, however
# if a connection has this set as the isolation level, we can skip
# the "autocommit" warning as the operation will do "autocommit"
# in any case
if autocommit and not self._is_autocommit():
util.warn_deprecated_20(
"The current statement is being autocommitted using "
"implicit autocommit, which will be removed in "
"SQLAlchemy 2.0. "
"Use the .begin() method of Engine or Connection in order to "
"use an explicit transaction for DML and DDL statements."
)
if self._has_events or self.engine._has_events:
self.dispatch.commit(self)
if self._echo:
if self._is_autocommit():
self._log_info(
"COMMIT using DBAPI connection.commit(), "
"DBAPI should ignore due to autocommit mode"
)
else:
self._log_info("COMMIT")
try:
self.engine.dialect.do_commit(self.connection)
except BaseException as e:
self._handle_dbapi_exception(e, None, None, None, None)
def _savepoint_impl(self, name=None):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.savepoint(self, name)
if name is None:
self.__savepoint_seq += 1
name = "sa_savepoint_%s" % self.__savepoint_seq
if self._still_open_and_dbapi_connection_is_valid:
self.engine.dialect.do_savepoint(self, name)
return name
def _rollback_to_savepoint_impl(self, name):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.rollback_savepoint(self, name, None)
if self._still_open_and_dbapi_connection_is_valid:
self.engine.dialect.do_rollback_to_savepoint(self, name)
def _release_savepoint_impl(self, name):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.release_savepoint(self, name, None)
if self._still_open_and_dbapi_connection_is_valid:
self.engine.dialect.do_release_savepoint(self, name)
def _begin_twophase_impl(self, transaction):
assert not self.__branch_from
if self._echo:
self._log_info("BEGIN TWOPHASE (implicit)")
if self._has_events or self.engine._has_events:
self.dispatch.begin_twophase(self, transaction.xid)
if self._still_open_and_dbapi_connection_is_valid:
self.__in_begin = True
try:
self.engine.dialect.do_begin_twophase(self, transaction.xid)
except BaseException as e:
self._handle_dbapi_exception(e, None, None, None, None)
finally:
self.__in_begin = False
def _prepare_twophase_impl(self, xid):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.prepare_twophase(self, xid)
if self._still_open_and_dbapi_connection_is_valid:
assert isinstance(self._transaction, TwoPhaseTransaction)
try:
self.engine.dialect.do_prepare_twophase(self, xid)
except BaseException as e:
self._handle_dbapi_exception(e, None, None, None, None)
def _rollback_twophase_impl(self, xid, is_prepared):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.rollback_twophase(self, xid, is_prepared)
if self._still_open_and_dbapi_connection_is_valid:
assert isinstance(self._transaction, TwoPhaseTransaction)
try:
self.engine.dialect.do_rollback_twophase(
self, xid, is_prepared
)
except BaseException as e:
self._handle_dbapi_exception(e, None, None, None, None)
def _commit_twophase_impl(self, xid, is_prepared):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.commit_twophase(self, xid, is_prepared)
if self._still_open_and_dbapi_connection_is_valid:
assert isinstance(self._transaction, TwoPhaseTransaction)
try:
self.engine.dialect.do_commit_twophase(self, xid, is_prepared)
except BaseException as e:
self._handle_dbapi_exception(e, None, None, None, None)
def _autorollback(self):
if self.__branch_from:
self.__branch_from._autorollback()
if not self.in_transaction():
self._rollback_impl()
def _warn_for_legacy_exec_format(self):
util.warn_deprecated_20(
"The connection.execute() method in "
"SQLAlchemy 2.0 will accept parameters as a single "
"dictionary or a "
"single sequence of dictionaries only. "
"Parameters passed as keyword arguments, tuples or positionally "
"oriented dictionaries and/or tuples "
"will no longer be accepted."
)
def close(self):
"""Close this :class:`_engine.Connection`.
This results in a release of the underlying database
resources, that is, the DBAPI connection referenced
internally. The DBAPI connection is typically restored
back to the connection-holding :class:`_pool.Pool` referenced
by the :class:`_engine.Engine` that produced this
:class:`_engine.Connection`. Any transactional state present on
the DBAPI connection is also unconditionally released via
the DBAPI connection's ``rollback()`` method, regardless
of any :class:`.Transaction` object that may be
outstanding with regards to this :class:`_engine.Connection`.
After :meth:`_engine.Connection.close` is called, the
:class:`_engine.Connection` is permanently in a closed state,
and will allow no further operations.
"""
if self.__branch_from:
assert not self._is_future
util.warn_deprecated_20(
"The .close() method on a so-called 'branched' connection is "
"deprecated as of 1.4, as are 'branched' connections overall, "
"and will be removed in a future release. If this is a "
"default-handling function, don't close the connection."
)
self._dbapi_connection = None
self.__can_reconnect = False
return
if self._transaction:
self._transaction.close()
skip_reset = True
else:
skip_reset = False
if self._dbapi_connection is not None:
conn = self._dbapi_connection
# as we just closed the transaction, close the connection
# pool connection without doing an additional reset
if skip_reset:
conn._close_no_reset()
else:
conn.close()
# There is a slight chance that conn.close() may have
# triggered an invalidation here in which case
# _dbapi_connection would already be None, however usually
# it will be non-None here and in a "closed" state.
self._dbapi_connection = None
self.__can_reconnect = False
def scalar(self, object_, *multiparams, **params):
"""Executes and returns the first column of the first row.
The underlying result/cursor is closed after execution.
"""
return self.execute(object_, *multiparams, **params).scalar()
def scalars(self, object_, *multiparams, **params):
"""Executes and returns a scalar result set, which yields scalar values
from the first column of each row.
This method is equivalent to calling :meth:`_engine.Connection.execute`
to receive a :class:`_result.Result` object, then invoking the
:meth:`_result.Result.scalars` method to produce a
:class:`_result.ScalarResult` instance.
:return: a :class:`_result.ScalarResult`
.. versionadded:: 1.4.24
"""
return self.execute(object_, *multiparams, **params).scalars()
def execute(self, statement, *multiparams, **params):
r"""Executes a SQL statement construct and returns a
:class:`_engine.CursorResult`.
:param statement: The statement to be executed. May be
one of:
* a plain string (deprecated)
* any :class:`_expression.ClauseElement` construct that is also
a subclass of :class:`.Executable`, such as a
:func:`_expression.select` construct
* a :class:`.FunctionElement`, such as that generated
by :data:`.func`, will be automatically wrapped in
a SELECT statement, which is then executed.
* a :class:`.DDLElement` object
* a :class:`.DefaultGenerator` object
* a :class:`.Compiled` object
.. deprecated:: 2.0 passing a string to
:meth:`_engine.Connection.execute` is
deprecated and will be removed in version 2.0. Use the
:func:`_expression.text` construct with
:meth:`_engine.Connection.execute`, or the
:meth:`_engine.Connection.exec_driver_sql`
method to invoke a driver-level
SQL string.
:param \*multiparams/\**params: represent bound parameter
values to be used in the execution. Typically,
the format is either a collection of one or more
dictionaries passed to \*multiparams::
conn.execute(
table.insert(),
{"id":1, "value":"v1"},
{"id":2, "value":"v2"}
)
...or individual key/values interpreted by \**params::
conn.execute(
table.insert(), id=1, value="v1"
)
In the case that a plain SQL string is passed, and the underlying
DBAPI accepts positional bind parameters, a collection of tuples
or individual values in \*multiparams may be passed::
conn.execute(
"INSERT INTO table (id, value) VALUES (?, ?)",
(1, "v1"), (2, "v2")
)
conn.execute(
"INSERT INTO table (id, value) VALUES (?, ?)",
1, "v1"
)
Note above, the usage of a question mark "?" or other
symbol is contingent upon the "paramstyle" accepted by the DBAPI
in use, which may be any of "qmark", "named", "pyformat", "format",
"numeric". See `pep-249
<https://www.python.org/dev/peps/pep-0249/>`_ for details on
paramstyle.
To execute a textual SQL statement which uses bound parameters in a
DBAPI-agnostic way, use the :func:`_expression.text` construct.
.. deprecated:: 2.0 use of tuple or scalar positional parameters
is deprecated. All params should be dicts or sequences of dicts.
Use :meth:`.exec_driver_sql` to execute a plain string with
tuple or scalar positional parameters.
"""
if isinstance(statement, util.string_types):
util.warn_deprecated_20(
"Passing a string to Connection.execute() is "
"deprecated and will be removed in version 2.0. Use the "
"text() construct, "
"or the Connection.exec_driver_sql() method to invoke a "
"driver-level SQL string."
)
return self._exec_driver_sql(
statement,
multiparams,
params,
_EMPTY_EXECUTION_OPTS,
future=False,
)
try:
meth = statement._execute_on_connection
except AttributeError as err:
util.raise_(
exc.ObjectNotExecutableError(statement), replace_context=err
)
else:
return meth(self, multiparams, params, _EMPTY_EXECUTION_OPTS)
def _execute_function(self, func, multiparams, params, execution_options):
"""Execute a sql.FunctionElement object."""
return self._execute_clauseelement(
func.select(), multiparams, params, execution_options
)
def _execute_default(
self,
default,
multiparams,
params,
# migrate is calling this directly :(
execution_options=_EMPTY_EXECUTION_OPTS,
):
"""Execute a schema.ColumnDefault object."""
execution_options = self._execution_options.merge_with(
execution_options
)
distilled_parameters = _distill_params(self, multiparams, params)
if self._has_events or self.engine._has_events:
(
default,
distilled_params,
event_multiparams,
event_params,
) = self._invoke_before_exec_event(
default, distilled_parameters, execution_options
)
try:
conn = self._dbapi_connection
if conn is None:
conn = self._revalidate_connection()
dialect = self.dialect
ctx = dialect.execution_ctx_cls._init_default(
dialect, self, conn, execution_options
)
except (exc.PendingRollbackError, exc.ResourceClosedError):
raise
except BaseException as e:
self._handle_dbapi_exception(e, None, None, None, None)
ret = ctx._exec_default(None, default, None)
if self.should_close_with_result:
self.close()
if self._has_events or self.engine._has_events:
self.dispatch.after_execute(
self,
default,
event_multiparams,
event_params,
execution_options,
ret,
)
return ret
def _execute_ddl(self, ddl, multiparams, params, execution_options):
"""Execute a schema.DDL object."""
execution_options = ddl._execution_options.merge_with(
self._execution_options, execution_options
)
distilled_parameters = _distill_params(self, multiparams, params)
if self._has_events or self.engine._has_events:
(
ddl,
distilled_params,
event_multiparams,
event_params,
) = self._invoke_before_exec_event(
ddl, distilled_parameters, execution_options
)
exec_opts = self._execution_options.merge_with(execution_options)
schema_translate_map = exec_opts.get("schema_translate_map", None)
dialect = self.dialect
compiled = ddl.compile(
dialect=dialect, schema_translate_map=schema_translate_map
)
ret = self._execute_context(
dialect,
dialect.execution_ctx_cls._init_ddl,
compiled,
None,
execution_options,
compiled,
)
if self._has_events or self.engine._has_events:
self.dispatch.after_execute(
self,
ddl,
event_multiparams,
event_params,
execution_options,
ret,
)
return ret
def _invoke_before_exec_event(
self, elem, distilled_params, execution_options
):
if len(distilled_params) == 1:
event_multiparams, event_params = [], distilled_params[0]
else:
event_multiparams, event_params = distilled_params, {}
for fn in self.dispatch.before_execute:
elem, event_multiparams, event_params = fn(
self,
elem,
event_multiparams,
event_params,
execution_options,
)
if event_multiparams:
distilled_params = list(event_multiparams)
if event_params:
raise exc.InvalidRequestError(
"Event handler can't return non-empty multiparams "
"and params at the same time"
)
elif event_params:
distilled_params = [event_params]
else:
distilled_params = []
return elem, distilled_params, event_multiparams, event_params
def _execute_clauseelement(
self, elem, multiparams, params, execution_options
):
"""Execute a sql.ClauseElement object."""
execution_options = elem._execution_options.merge_with(
self._execution_options, execution_options
)
distilled_params = _distill_params(self, multiparams, params)
has_events = self._has_events or self.engine._has_events
if has_events:
(
elem,
distilled_params,
event_multiparams,
event_params,
) = self._invoke_before_exec_event(
elem, distilled_params, execution_options
)
if distilled_params:
# ensure we don't retain a link to the view object for keys()
# which links to the values, which we don't want to cache
keys = sorted(distilled_params[0])
for_executemany = len(distilled_params) > 1
else:
keys = []
for_executemany = False
dialect = self.dialect
schema_translate_map = execution_options.get(
"schema_translate_map", None
)
compiled_cache = execution_options.get(
"compiled_cache", self.engine._compiled_cache
)
compiled_sql, extracted_params, cache_hit = elem._compile_w_cache(
dialect=dialect,
compiled_cache=compiled_cache,
column_keys=keys,
for_executemany=for_executemany,
schema_translate_map=schema_translate_map,
linting=self.dialect.compiler_linting | compiler.WARN_LINTING,
)
ret = self._execute_context(
dialect,
dialect.execution_ctx_cls._init_compiled,
compiled_sql,
distilled_params,
execution_options,
compiled_sql,
distilled_params,
elem,
extracted_params,
cache_hit=cache_hit,
)
if has_events:
self.dispatch.after_execute(
self,
elem,
event_multiparams,
event_params,
execution_options,
ret,
)
return ret
def _execute_compiled(
self,
compiled,
multiparams,
params,
execution_options=_EMPTY_EXECUTION_OPTS,
):
"""Execute a sql.Compiled object.
TODO: why do we have this? likely deprecate or remove
"""
execution_options = compiled.execution_options.merge_with(
self._execution_options, execution_options
)
distilled_parameters = _distill_params(self, multiparams, params)
if self._has_events or self.engine._has_events:
(
compiled,
distilled_params,
event_multiparams,
event_params,
) = self._invoke_before_exec_event(
compiled, distilled_parameters, execution_options
)
dialect = self.dialect
ret = self._execute_context(
dialect,
dialect.execution_ctx_cls._init_compiled,
compiled,
distilled_parameters,
execution_options,
compiled,
distilled_parameters,
None,
None,
)
if self._has_events or self.engine._has_events:
self.dispatch.after_execute(
self,
compiled,
event_multiparams,
event_params,
execution_options,
ret,
)
return ret
def _exec_driver_sql(
self, statement, multiparams, params, execution_options, future
):
execution_options = self._execution_options.merge_with(
execution_options
)
distilled_parameters = _distill_params(self, multiparams, params)
if not future:
if self._has_events or self.engine._has_events:
(
statement,
distilled_params,
event_multiparams,
event_params,
) = self._invoke_before_exec_event(
statement, distilled_parameters, execution_options
)
dialect = self.dialect
ret = self._execute_context(
dialect,
dialect.execution_ctx_cls._init_statement,
statement,
distilled_parameters,
execution_options,
statement,
distilled_parameters,
)
if not future:
if self._has_events or self.engine._has_events:
self.dispatch.after_execute(
self,
statement,
event_multiparams,
event_params,
execution_options,
ret,
)
return ret
def _execute_20(
self,
statement,
parameters=None,
execution_options=_EMPTY_EXECUTION_OPTS,
):
args_10style, kwargs_10style = _distill_params_20(parameters)
try:
meth = statement._execute_on_connection
except AttributeError as err:
util.raise_(
exc.ObjectNotExecutableError(statement), replace_context=err
)
else:
return meth(self, args_10style, kwargs_10style, execution_options)
def exec_driver_sql(
self, statement, parameters=None, execution_options=None
):
r"""Executes a SQL statement construct and returns a
:class:`_engine.CursorResult`.
:param statement: The statement str to be executed. Bound parameters
must use the underlying DBAPI's paramstyle, such as "qmark",
"pyformat", "format", etc.
:param parameters: represent bound parameter values to be used in the
execution. The format is one of: a dictionary of named parameters,
a tuple of positional parameters, or a list containing either
dictionaries or tuples for multiple-execute support.
E.g. multiple dictionaries::
conn.exec_driver_sql(
"INSERT INTO table (id, value) VALUES (%(id)s, %(value)s)",
[{"id":1, "value":"v1"}, {"id":2, "value":"v2"}]
)
Single dictionary::
conn.exec_driver_sql(
"INSERT INTO table (id, value) VALUES (%(id)s, %(value)s)",
dict(id=1, value="v1")
)
Single tuple::
conn.exec_driver_sql(
"INSERT INTO table (id, value) VALUES (?, ?)",
(1, 'v1')
)
.. note:: The :meth:`_engine.Connection.exec_driver_sql` method does
not participate in the
:meth:`_events.ConnectionEvents.before_execute` and
:meth:`_events.ConnectionEvents.after_execute` events. To
intercept calls to :meth:`_engine.Connection.exec_driver_sql`, use
:meth:`_events.ConnectionEvents.before_cursor_execute` and
:meth:`_events.ConnectionEvents.after_cursor_execute`.
.. seealso::
:pep:`249`
"""
args_10style, kwargs_10style = _distill_params_20(parameters)
return self._exec_driver_sql(
statement,
args_10style,
kwargs_10style,
execution_options,
future=True,
)
def _execute_context(
self,
dialect,
constructor,
statement,
parameters,
execution_options,
*args,
**kw
):
"""Create an :class:`.ExecutionContext` and execute, returning
a :class:`_engine.CursorResult`."""
branched = self
if self.__branch_from:
# if this is a "branched" connection, do everything in terms
# of the "root" connection, *except* for .close(), which is
# the only feature that branching provides
self = self.__branch_from
try:
conn = self._dbapi_connection
if conn is None:
conn = self._revalidate_connection()
context = constructor(
dialect, self, conn, execution_options, *args, **kw
)
except (exc.PendingRollbackError, exc.ResourceClosedError):
raise
except BaseException as e:
self._handle_dbapi_exception(
e, util.text_type(statement), parameters, None, None
)
if (
self._transaction
and not self._transaction.is_active
or (
self._nested_transaction
and not self._nested_transaction.is_active
)
):
self._invalid_transaction()
elif self._trans_context_manager:
TransactionalContext._trans_ctx_check(self)
if self._is_future and self._transaction is None:
self._autobegin()
context.pre_exec()
if dialect.use_setinputsizes:
context._set_input_sizes()
cursor, statement, parameters = (
context.cursor,
context.statement,
context.parameters,
)
if not context.executemany:
parameters = parameters[0]
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_cursor_execute:
statement, parameters = fn(
self,
cursor,
statement,
parameters,
context,
context.executemany,
)
if self._echo:
self._log_info(statement)
stats = context._get_cache_stats()
if not self.engine.hide_parameters:
self._log_info(
"[%s] %r",
stats,
sql_util._repr_params(
parameters, batches=10, ismulti=context.executemany
),
)
else:
self._log_info(
"[%s] [SQL parameters hidden due to hide_parameters=True]"
% (stats,)
)
evt_handled = False
try:
if context.executemany:
if self.dialect._has_events:
for fn in self.dialect.dispatch.do_executemany:
if fn(cursor, statement, parameters, context):
evt_handled = True
break
if not evt_handled:
self.dialect.do_executemany(
cursor, statement, parameters, context
)
elif not parameters and context.no_parameters:
if self.dialect._has_events:
for fn in self.dialect.dispatch.do_execute_no_params:
if fn(cursor, statement, context):
evt_handled = True
break
if not evt_handled:
self.dialect.do_execute_no_params(
cursor, statement, context
)
else:
if self.dialect._has_events:
for fn in self.dialect.dispatch.do_execute:
if fn(cursor, statement, parameters, context):
evt_handled = True
break
if not evt_handled:
self.dialect.do_execute(
cursor, statement, parameters, context
)
if self._has_events or self.engine._has_events:
self.dispatch.after_cursor_execute(
self,
cursor,
statement,
parameters,
context,
context.executemany,
)
context.post_exec()
result = context._setup_result_proxy()
if not self._is_future:
should_close_with_result = branched.should_close_with_result
if not result._soft_closed and should_close_with_result:
result._autoclose_connection = True
if (
# usually we're in a transaction so avoid relatively
# expensive / legacy should_autocommit call
self._transaction is None
and context.should_autocommit
):
self._commit_impl(autocommit=True)
# for "connectionless" execution, we have to close this
# Connection after the statement is complete.
# legacy stuff.
if should_close_with_result and context._soft_closed:
assert not self._is_future
# CursorResult already exhausted rows / has no rows.
# close us now
branched.close()
except BaseException as e:
self._handle_dbapi_exception(
e, statement, parameters, cursor, context
)
return result
def _cursor_execute(self, cursor, statement, parameters, context=None):
"""Execute a statement + params on the given cursor.
Adds appropriate logging and exception handling.
This method is used by DefaultDialect for special-case
executions, such as for sequences and column defaults.
The path of statement execution in the majority of cases
terminates at _execute_context().
"""
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_cursor_execute:
statement, parameters = fn(
self, cursor, statement, parameters, context, False
)
if self._echo:
self._log_info(statement)
self._log_info("[raw sql] %r", parameters)
try:
for fn in (
()
if not self.dialect._has_events
else self.dialect.dispatch.do_execute
):
if fn(cursor, statement, parameters, context):
break
else:
self.dialect.do_execute(cursor, statement, parameters, context)
except BaseException as e:
self._handle_dbapi_exception(
e, statement, parameters, cursor, context
)
if self._has_events or self.engine._has_events:
self.dispatch.after_cursor_execute(
self, cursor, statement, parameters, context, False
)
def _safe_close_cursor(self, cursor):
"""Close the given cursor, catching exceptions
and turning into log warnings.
"""
try:
cursor.close()
except Exception:
# log the error through the connection pool's logger.
self.engine.pool.logger.error(
"Error closing cursor", exc_info=True
)
_reentrant_error = False
_is_disconnect = False
def _handle_dbapi_exception(
self, e, statement, parameters, cursor, context
):
exc_info = sys.exc_info()
is_exit_exception = util.is_exit_exception(e)
if not self._is_disconnect:
self._is_disconnect = (
isinstance(e, self.dialect.dbapi.Error)
and not self.closed
and self.dialect.is_disconnect(
e,
self._dbapi_connection if not self.invalidated else None,
cursor,
)
) or (is_exit_exception and not self.closed)
invalidate_pool_on_disconnect = not is_exit_exception
if self._reentrant_error:
util.raise_(
exc.DBAPIError.instance(
statement,
parameters,
e,
self.dialect.dbapi.Error,
hide_parameters=self.engine.hide_parameters,
dialect=self.dialect,
ismulti=context.executemany
if context is not None
else None,
),
with_traceback=exc_info[2],
from_=e,
)
self._reentrant_error = True
try:
# non-DBAPI error - if we already got a context,
# or there's no string statement, don't wrap it
should_wrap = isinstance(e, self.dialect.dbapi.Error) or (
statement is not None
and context is None
and not is_exit_exception
)
if should_wrap:
sqlalchemy_exception = exc.DBAPIError.instance(
statement,
parameters,
e,
self.dialect.dbapi.Error,
hide_parameters=self.engine.hide_parameters,
connection_invalidated=self._is_disconnect,
dialect=self.dialect,
ismulti=context.executemany
if context is not None
else None,
)
else:
sqlalchemy_exception = None
newraise = None
if (
self._has_events or self.engine._has_events
) and not self._execution_options.get(
"skip_user_error_events", False
):
ctx = ExceptionContextImpl(
e,
sqlalchemy_exception,
self.engine,
self,
cursor,
statement,
parameters,
context,
self._is_disconnect,
invalidate_pool_on_disconnect,
)
for fn in self.dispatch.handle_error:
try:
# handler returns an exception;
# call next handler in a chain
per_fn = fn(ctx)
if per_fn is not None:
ctx.chained_exception = newraise = per_fn
except Exception as _raised:
# handler raises an exception - stop processing
newraise = _raised
break
if self._is_disconnect != ctx.is_disconnect:
self._is_disconnect = ctx.is_disconnect
if sqlalchemy_exception:
sqlalchemy_exception.connection_invalidated = (
ctx.is_disconnect
)
# set up potentially user-defined value for
# invalidate pool.
invalidate_pool_on_disconnect = (
ctx.invalidate_pool_on_disconnect
)
if should_wrap and context:
context.handle_dbapi_exception(e)
if not self._is_disconnect:
if cursor:
self._safe_close_cursor(cursor)
with util.safe_reraise(warn_only=True):
self._autorollback()
if newraise:
util.raise_(newraise, with_traceback=exc_info[2], from_=e)
elif should_wrap:
util.raise_(
sqlalchemy_exception, with_traceback=exc_info[2], from_=e
)
else:
util.raise_(exc_info[1], with_traceback=exc_info[2])
finally:
del self._reentrant_error
if self._is_disconnect:
del self._is_disconnect
if not self.invalidated:
dbapi_conn_wrapper = self._dbapi_connection
if invalidate_pool_on_disconnect:
self.engine.pool._invalidate(dbapi_conn_wrapper, e)
self.invalidate(e)
if self.should_close_with_result:
assert not self._is_future
self.close()
@classmethod
def _handle_dbapi_exception_noconnection(cls, e, dialect, engine):
exc_info = sys.exc_info()
is_disconnect = dialect.is_disconnect(e, None, None)
should_wrap = isinstance(e, dialect.dbapi.Error)
if should_wrap:
sqlalchemy_exception = exc.DBAPIError.instance(
None,
None,
e,
dialect.dbapi.Error,
hide_parameters=engine.hide_parameters,
connection_invalidated=is_disconnect,
)
else:
sqlalchemy_exception = None
newraise = None
if engine._has_events:
ctx = ExceptionContextImpl(
e,
sqlalchemy_exception,
engine,
None,
None,
None,
None,
None,
is_disconnect,
True,
)
for fn in engine.dispatch.handle_error:
try:
# handler returns an exception;
# call next handler in a chain
per_fn = fn(ctx)
if per_fn is not None:
ctx.chained_exception = newraise = per_fn
except Exception as _raised:
# handler raises an exception - stop processing
newraise = _raised
break
if sqlalchemy_exception and is_disconnect != ctx.is_disconnect:
sqlalchemy_exception.connection_invalidated = (
is_disconnect
) = ctx.is_disconnect
if newraise:
util.raise_(newraise, with_traceback=exc_info[2], from_=e)
elif should_wrap:
util.raise_(
sqlalchemy_exception, with_traceback=exc_info[2], from_=e
)
else:
util.raise_(exc_info[1], with_traceback=exc_info[2])
def _run_ddl_visitor(self, visitorcallable, element, **kwargs):
"""run a DDL visitor.
This method is only here so that the MockConnection can change the
options given to the visitor so that "checkfirst" is skipped.
"""
visitorcallable(self.dialect, self, **kwargs).traverse_single(element)
@util.deprecated(
"1.4",
"The :meth:`_engine.Connection.transaction` "
"method is deprecated and will be "
"removed in a future release. Use the :meth:`_engine.Engine.begin` "
"context manager instead.",
)
def transaction(self, callable_, *args, **kwargs):
r"""Execute the given function within a transaction boundary.
The function is passed this :class:`_engine.Connection`
as the first argument, followed by the given \*args and \**kwargs,
e.g.::
def do_something(conn, x, y):
conn.execute(text("some statement"), {'x':x, 'y':y})
conn.transaction(do_something, 5, 10)
The operations inside the function are all invoked within the
context of a single :class:`.Transaction`.
Upon success, the transaction is committed. If an
exception is raised, the transaction is rolled back
before propagating the exception.
.. note::
The :meth:`.transaction` method is superseded by
the usage of the Python ``with:`` statement, which can
be used with :meth:`_engine.Connection.begin`::
with conn.begin():
conn.execute(text("some statement"), {'x':5, 'y':10})
As well as with :meth:`_engine.Engine.begin`::
with engine.begin() as conn:
conn.execute(text("some statement"), {'x':5, 'y':10})
.. seealso::
:meth:`_engine.Engine.begin` - engine-level transactional
context
:meth:`_engine.Engine.transaction` - engine-level version of
:meth:`_engine.Connection.transaction`
"""
kwargs["_sa_skip_warning"] = True
trans = self.begin()
try:
ret = self.run_callable(callable_, *args, **kwargs)
trans.commit()
return ret
except:
with util.safe_reraise():
trans.rollback()
@util.deprecated(
"1.4",
"The :meth:`_engine.Connection.run_callable` "
"method is deprecated and will "
"be removed in a future release. Invoke the callable function "
"directly, passing the Connection.",
)
def run_callable(self, callable_, *args, **kwargs):
r"""Given a callable object or function, execute it, passing
a :class:`_engine.Connection` as the first argument.
The given \*args and \**kwargs are passed subsequent
to the :class:`_engine.Connection` argument.
This function, along with :meth:`_engine.Engine.run_callable`,
allows a function to be run with a :class:`_engine.Connection`
or :class:`_engine.Engine` object without the need to know
which one is being dealt with.
"""
return callable_(self, *args, **kwargs)
class ExceptionContextImpl(ExceptionContext):
"""Implement the :class:`.ExceptionContext` interface."""
def __init__(
self,
exception,
sqlalchemy_exception,
engine,
connection,
cursor,
statement,
parameters,
context,
is_disconnect,
invalidate_pool_on_disconnect,
):
self.engine = engine
self.connection = connection
self.sqlalchemy_exception = sqlalchemy_exception
self.original_exception = exception
self.execution_context = context
self.statement = statement
self.parameters = parameters
self.is_disconnect = is_disconnect
self.invalidate_pool_on_disconnect = invalidate_pool_on_disconnect
class Transaction(TransactionalContext):
"""Represent a database transaction in progress.
The :class:`.Transaction` object is procured by
calling the :meth:`_engine.Connection.begin` method of
:class:`_engine.Connection`::
from sqlalchemy import create_engine
engine = create_engine("postgresql://scott:tiger@localhost/test")
connection = engine.connect()
trans = connection.begin()
connection.execute(text("insert into x (a, b) values (1, 2)"))
trans.commit()
The object provides :meth:`.rollback` and :meth:`.commit`
methods in order to control transaction boundaries. It
also implements a context manager interface so that
the Python ``with`` statement can be used with the
:meth:`_engine.Connection.begin` method::
with connection.begin():
connection.execute(text("insert into x (a, b) values (1, 2)"))
The Transaction object is **not** threadsafe.
.. seealso::
:meth:`_engine.Connection.begin`
:meth:`_engine.Connection.begin_twophase`
:meth:`_engine.Connection.begin_nested`
.. index::
single: thread safety; Transaction
"""
__slots__ = ()
_is_root = False
def __init__(self, connection):
raise NotImplementedError()
def _do_deactivate(self):
"""do whatever steps are necessary to set this transaction as
"deactive", however leave this transaction object in place as far
as the connection's state.
for a "real" transaction this should roll back the transaction
and ensure this transaction is no longer a reset agent.
this is used for nesting of marker transactions where the marker
can set the "real" transaction as rolled back, however it stays
in place.
for 2.0 we hope to remove this nesting feature.
"""
raise NotImplementedError()
@property
def _deactivated_from_connection(self):
"""True if this transaction is totally deactivated from the connection
and therefore can no longer affect its state.
"""
raise NotImplementedError()
def _do_close(self):
raise NotImplementedError()
def _do_rollback(self):
raise NotImplementedError()
def _do_commit(self):
raise NotImplementedError()
@property
def is_valid(self):
return self.is_active and not self.connection.invalidated
def close(self):
"""Close this :class:`.Transaction`.
If this transaction is the base transaction in a begin/commit
nesting, the transaction will rollback(). Otherwise, the
method returns.
This is used to cancel a Transaction without affecting the scope of
an enclosing transaction.
"""
try:
self._do_close()
finally:
assert not self.is_active
def rollback(self):
"""Roll back this :class:`.Transaction`.
The implementation of this may vary based on the type of transaction in
use:
* For a simple database transaction (e.g. :class:`.RootTransaction`),
it corresponds to a ROLLBACK.
* For a :class:`.NestedTransaction`, it corresponds to a
"ROLLBACK TO SAVEPOINT" operation.
* For a :class:`.TwoPhaseTransaction`, DBAPI-specific methods for two
phase transactions may be used.
"""
try:
self._do_rollback()
finally:
assert not self.is_active
def commit(self):
"""Commit this :class:`.Transaction`.
The implementation of this may vary based on the type of transaction in
use:
* For a simple database transaction (e.g. :class:`.RootTransaction`),
it corresponds to a COMMIT.
* For a :class:`.NestedTransaction`, it corresponds to a
"RELEASE SAVEPOINT" operation.
* For a :class:`.TwoPhaseTransaction`, DBAPI-specific methods for two
phase transactions may be used.
"""
try:
self._do_commit()
finally:
assert not self.is_active
def _get_subject(self):
return self.connection
def _transaction_is_active(self):
return self.is_active
def _transaction_is_closed(self):
return not self._deactivated_from_connection
class MarkerTransaction(Transaction):
"""A 'marker' transaction that is used for nested begin() calls.
.. deprecated:: 1.4 future connection for 2.0 won't support this pattern.
"""
__slots__ = ("connection", "_is_active", "_transaction")
def __init__(self, connection):
assert connection._transaction is not None
if not connection._transaction.is_active:
raise exc.InvalidRequestError(
"the current transaction on this connection is inactive. "
"Please issue a rollback first."
)
assert not connection._is_future
util.warn_deprecated_20(
"Calling .begin() when a transaction is already begun, creating "
"a 'sub' transaction, is deprecated "
"and will be removed in 2.0. See the documentation section "
"'Migrating from the nesting pattern' for background on how "
"to migrate from this pattern."
)
self.connection = connection
if connection._trans_context_manager:
TransactionalContext._trans_ctx_check(connection)
if connection._nested_transaction is not None:
self._transaction = connection._nested_transaction
else:
self._transaction = connection._transaction
self._is_active = True
@property
def _deactivated_from_connection(self):
return not self.is_active
@property
def is_active(self):
return self._is_active and self._transaction.is_active
def _deactivate(self):
self._is_active = False
def _do_close(self):
# does not actually roll back the root
self._deactivate()
def _do_rollback(self):
# does roll back the root
if self._is_active:
try:
self._transaction._do_deactivate()
finally:
self._deactivate()
def _do_commit(self):
self._deactivate()
class RootTransaction(Transaction):
"""Represent the "root" transaction on a :class:`_engine.Connection`.
This corresponds to the current "BEGIN/COMMIT/ROLLBACK" that's occurring
for the :class:`_engine.Connection`. The :class:`_engine.RootTransaction`
is created by calling upon the :meth:`_engine.Connection.begin` method, and
remains associated with the :class:`_engine.Connection` throughout its
active span. The current :class:`_engine.RootTransaction` in use is
accessible via the :attr:`_engine.Connection.get_transaction` method of
:class:`_engine.Connection`.
In :term:`2.0 style` use, the :class:`_future.Connection` also employs
"autobegin" behavior that will create a new
:class:`_engine.RootTransaction` whenever a connection in a
non-transactional state is used to emit commands on the DBAPI connection.
The scope of the :class:`_engine.RootTransaction` in 2.0 style
use can be controlled using the :meth:`_future.Connection.commit` and
:meth:`_future.Connection.rollback` methods.
"""
_is_root = True
__slots__ = ("connection", "is_active")
def __init__(self, connection):
assert connection._transaction is None
if connection._trans_context_manager:
TransactionalContext._trans_ctx_check(connection)
self.connection = connection
self._connection_begin_impl()
connection._transaction = self
self.is_active = True
def _deactivate_from_connection(self):
if self.is_active:
assert self.connection._transaction is self
self.is_active = False
elif self.connection._transaction is not self:
util.warn("transaction already deassociated from connection")
@property
def _deactivated_from_connection(self):
return self.connection._transaction is not self
def _do_deactivate(self):
# called from a MarkerTransaction to cancel this root transaction.
# the transaction stays in place as connection._transaction, but
# is no longer active and is no longer the reset agent for the
# pooled connection. the connection won't support a new begin()
# until this transaction is explicitly closed, rolled back,
# or committed.
assert self.connection._transaction is self
if self.is_active:
self._connection_rollback_impl()
# handle case where a savepoint was created inside of a marker
# transaction that refers to a root. nested has to be cancelled
# also.
if self.connection._nested_transaction:
self.connection._nested_transaction._cancel()
self._deactivate_from_connection()
def _connection_begin_impl(self):
self.connection._begin_impl(self)
def _connection_rollback_impl(self):
self.connection._rollback_impl()
def _connection_commit_impl(self):
self.connection._commit_impl()
def _close_impl(self, try_deactivate=False):
try:
if self.is_active:
self._connection_rollback_impl()
if self.connection._nested_transaction:
self.connection._nested_transaction._cancel()
finally:
if self.is_active or try_deactivate:
self._deactivate_from_connection()
if self.connection._transaction is self:
self.connection._transaction = None
assert not self.is_active
assert self.connection._transaction is not self
def _do_close(self):
self._close_impl()
def _do_rollback(self):
self._close_impl(try_deactivate=True)
def _do_commit(self):
if self.is_active:
assert self.connection._transaction is self
try:
self._connection_commit_impl()
finally:
# whether or not commit succeeds, cancel any
# nested transactions, make this transaction "inactive"
# and remove it as a reset agent
if self.connection._nested_transaction:
self.connection._nested_transaction._cancel()
self._deactivate_from_connection()
# ...however only remove as the connection's current transaction
# if commit succeeded. otherwise it stays on so that a rollback
# needs to occur.
self.connection._transaction = None
else:
if self.connection._transaction is self:
self.connection._invalid_transaction()
else:
raise exc.InvalidRequestError("This transaction is inactive")
assert not self.is_active
assert self.connection._transaction is not self
class NestedTransaction(Transaction):
"""Represent a 'nested', or SAVEPOINT transaction.
The :class:`.NestedTransaction` object is created by calling the
:meth:`_engine.Connection.begin_nested` method of
:class:`_engine.Connection`.
When using :class:`.NestedTransaction`, the semantics of "begin" /
"commit" / "rollback" are as follows:
* the "begin" operation corresponds to the "BEGIN SAVEPOINT" command, where
the savepoint is given an explicit name that is part of the state
of this object.
* The :meth:`.NestedTransaction.commit` method corresponds to a
"RELEASE SAVEPOINT" operation, using the savepoint identifier associated
with this :class:`.NestedTransaction`.
* The :meth:`.NestedTransaction.rollback` method corresponds to a
"ROLLBACK TO SAVEPOINT" operation, using the savepoint identifier
associated with this :class:`.NestedTransaction`.
The rationale for mimicking the semantics of an outer transaction in
terms of savepoints so that code may deal with a "savepoint" transaction
and an "outer" transaction in an agnostic way.
.. seealso::
:ref:`session_begin_nested` - ORM version of the SAVEPOINT API.
"""
__slots__ = ("connection", "is_active", "_savepoint", "_previous_nested")
def __init__(self, connection):
assert connection._transaction is not None
if connection._trans_context_manager:
TransactionalContext._trans_ctx_check(connection)
self.connection = connection
self._savepoint = self.connection._savepoint_impl()
self.is_active = True
self._previous_nested = connection._nested_transaction
connection._nested_transaction = self
def _deactivate_from_connection(self, warn=True):
if self.connection._nested_transaction is self:
self.connection._nested_transaction = self._previous_nested
elif warn:
util.warn(
"nested transaction already deassociated from connection"
)
@property
def _deactivated_from_connection(self):
return self.connection._nested_transaction is not self
def _cancel(self):
# called by RootTransaction when the outer transaction is
# committed, rolled back, or closed to cancel all savepoints
# without any action being taken
self.is_active = False
self._deactivate_from_connection()
if self._previous_nested:
self._previous_nested._cancel()
def _close_impl(self, deactivate_from_connection, warn_already_deactive):
try:
if self.is_active and self.connection._transaction.is_active:
self.connection._rollback_to_savepoint_impl(self._savepoint)
finally:
self.is_active = False
if deactivate_from_connection:
self._deactivate_from_connection(warn=warn_already_deactive)
assert not self.is_active
if deactivate_from_connection:
assert self.connection._nested_transaction is not self
def _do_deactivate(self):
self._close_impl(False, False)
def _do_close(self):
self._close_impl(True, False)
def _do_rollback(self):
self._close_impl(True, True)
def _do_commit(self):
if self.is_active:
try:
self.connection._release_savepoint_impl(self._savepoint)
finally:
# nested trans becomes inactive on failed release
# unconditionally. this prevents it from trying to
# emit SQL when it rolls back.
self.is_active = False
# but only de-associate from connection if it succeeded
self._deactivate_from_connection()
else:
if self.connection._nested_transaction is self:
self.connection._invalid_transaction()
else:
raise exc.InvalidRequestError(
"This nested transaction is inactive"
)
class TwoPhaseTransaction(RootTransaction):
"""Represent a two-phase transaction.
A new :class:`.TwoPhaseTransaction` object may be procured
using the :meth:`_engine.Connection.begin_twophase` method.
The interface is the same as that of :class:`.Transaction`
with the addition of the :meth:`prepare` method.
"""
__slots__ = ("connection", "is_active", "xid", "_is_prepared")
def __init__(self, connection, xid):
self._is_prepared = False
self.xid = xid
super(TwoPhaseTransaction, self).__init__(connection)
def prepare(self):
"""Prepare this :class:`.TwoPhaseTransaction`.
After a PREPARE, the transaction can be committed.
"""
if not self.is_active:
raise exc.InvalidRequestError("This transaction is inactive")
self.connection._prepare_twophase_impl(self.xid)
self._is_prepared = True
def _connection_begin_impl(self):
self.connection._begin_twophase_impl(self)
def _connection_rollback_impl(self):
self.connection._rollback_twophase_impl(self.xid, self._is_prepared)
def _connection_commit_impl(self):
self.connection._commit_twophase_impl(self.xid, self._is_prepared)
class Engine(Connectable, log.Identified):
"""
Connects a :class:`~sqlalchemy.pool.Pool` and
:class:`~sqlalchemy.engine.interfaces.Dialect` together to provide a
source of database connectivity and behavior.
This is the **SQLAlchemy 1.x version** of :class:`_engine.Engine`. For
the :term:`2.0 style` version, which includes some API differences,
see :class:`_future.Engine`.
An :class:`_engine.Engine` object is instantiated publicly using the
:func:`~sqlalchemy.create_engine` function.
.. seealso::
:doc:`/core/engines`
:ref:`connections_toplevel`
"""
_execution_options = _EMPTY_EXECUTION_OPTS
_has_events = False
_connection_cls = Connection
_sqla_logger_namespace = "sqlalchemy.engine.Engine"
_is_future = False
_schema_translate_map = None
def __init__(
self,
pool,
dialect,
url,
logging_name=None,
echo=None,
query_cache_size=500,
execution_options=None,
hide_parameters=False,
):
self.pool = pool
self.url = url
self.dialect = dialect
if logging_name:
self.logging_name = logging_name
self.echo = echo
self.hide_parameters = hide_parameters
if query_cache_size != 0:
self._compiled_cache = util.LRUCache(
query_cache_size, size_alert=self._lru_size_alert
)
else:
self._compiled_cache = None
log.instance_logger(self, echoflag=echo)
if execution_options:
self.update_execution_options(**execution_options)
def _lru_size_alert(self, cache):
if self._should_log_info:
self.logger.info(
"Compiled cache size pruning from %d items to %d. "
"Increase cache size to reduce the frequency of pruning.",
len(cache),
cache.capacity,
)
@property
def engine(self):
return self
def clear_compiled_cache(self):
"""Clear the compiled cache associated with the dialect.
This applies **only** to the built-in cache that is established
via the :paramref:`_engine.create_engine.query_cache_size` parameter.
It will not impact any dictionary caches that were passed via the
:paramref:`.Connection.execution_options.query_cache` parameter.
.. versionadded:: 1.4
"""
if self._compiled_cache:
self._compiled_cache.clear()
def update_execution_options(self, **opt):
r"""Update the default execution_options dictionary
of this :class:`_engine.Engine`.
The given keys/values in \**opt are added to the
default execution options that will be used for
all connections. The initial contents of this dictionary
can be sent via the ``execution_options`` parameter
to :func:`_sa.create_engine`.
.. seealso::
:meth:`_engine.Connection.execution_options`
:meth:`_engine.Engine.execution_options`
"""
self._execution_options = self._execution_options.union(opt)
self.dispatch.set_engine_execution_options(self, opt)
self.dialect.set_engine_execution_options(self, opt)
def execution_options(self, **opt):
"""Return a new :class:`_engine.Engine` that will provide
:class:`_engine.Connection` objects with the given execution options.
The returned :class:`_engine.Engine` remains related to the original
:class:`_engine.Engine` in that it shares the same connection pool and
other state:
* The :class:`_pool.Pool` used by the new :class:`_engine.Engine`
is the
same instance. The :meth:`_engine.Engine.dispose`
method will replace
the connection pool instance for the parent engine as well
as this one.
* Event listeners are "cascaded" - meaning, the new
:class:`_engine.Engine`
inherits the events of the parent, and new events can be associated
with the new :class:`_engine.Engine` individually.
* The logging configuration and logging_name is copied from the parent
:class:`_engine.Engine`.
The intent of the :meth:`_engine.Engine.execution_options` method is
to implement "sharding" schemes where multiple :class:`_engine.Engine`
objects refer to the same connection pool, but are differentiated
by options that would be consumed by a custom event::
primary_engine = create_engine("mysql://")
shard1 = primary_engine.execution_options(shard_id="shard1")
shard2 = primary_engine.execution_options(shard_id="shard2")
Above, the ``shard1`` engine serves as a factory for
:class:`_engine.Connection`
objects that will contain the execution option
``shard_id=shard1``, and ``shard2`` will produce
:class:`_engine.Connection`
objects that contain the execution option ``shard_id=shard2``.
An event handler can consume the above execution option to perform
a schema switch or other operation, given a connection. Below
we emit a MySQL ``use`` statement to switch databases, at the same
time keeping track of which database we've established using the
:attr:`_engine.Connection.info` dictionary,
which gives us a persistent
storage space that follows the DBAPI connection::
from sqlalchemy import event
from sqlalchemy.engine import Engine
shards = {"default": "base", shard_1: "db1", "shard_2": "db2"}
@event.listens_for(Engine, "before_cursor_execute")
def _switch_shard(conn, cursor, stmt,
params, context, executemany):
shard_id = conn._execution_options.get('shard_id', "default")
current_shard = conn.info.get("current_shard", None)
if current_shard != shard_id:
cursor.execute("use %s" % shards[shard_id])
conn.info["current_shard"] = shard_id
.. seealso::
:meth:`_engine.Connection.execution_options`
- update execution options
on a :class:`_engine.Connection` object.
:meth:`_engine.Engine.update_execution_options`
- update the execution
options for a given :class:`_engine.Engine` in place.
:meth:`_engine.Engine.get_execution_options`
"""
return self._option_cls(self, opt)
def get_execution_options(self):
"""Get the non-SQL options which will take effect during execution.
.. versionadded: 1.3
.. seealso::
:meth:`_engine.Engine.execution_options`
"""
return self._execution_options
@property
def name(self):
"""String name of the :class:`~sqlalchemy.engine.interfaces.Dialect`
in use by this :class:`Engine`."""
return self.dialect.name
@property
def driver(self):
"""Driver name of the :class:`~sqlalchemy.engine.interfaces.Dialect`
in use by this :class:`Engine`."""
return self.dialect.driver
echo = log.echo_property()
def __repr__(self):
return "Engine(%r)" % (self.url,)
def dispose(self):
"""Dispose of the connection pool used by this
:class:`_engine.Engine`.
This has the effect of fully closing all **currently checked in**
database connections. Connections that are still checked out
will **not** be closed, however they will no longer be associated
with this :class:`_engine.Engine`,
so when they are closed individually,
eventually the :class:`_pool.Pool` which they are associated with will
be garbage collected and they will be closed out fully, if
not already closed on checkin.
A new connection pool is created immediately after the old one has
been disposed. This new pool, like all SQLAlchemy connection pools,
does not make any actual connections to the database until one is
first requested, so as long as the :class:`_engine.Engine`
isn't used again,
no new connections will be made.
.. seealso::
:ref:`engine_disposal`
"""
self.pool.dispose()
self.pool = self.pool.recreate()
self.dispatch.engine_disposed(self)
def _execute_default(
self, default, multiparams=(), params=util.EMPTY_DICT
):
with self.connect() as conn:
return conn._execute_default(default, multiparams, params)
@contextlib.contextmanager
def _optional_conn_ctx_manager(self, connection=None):
if connection is None:
with self.connect() as conn:
yield conn
else:
yield connection
class _trans_ctx(object):
def __init__(self, conn, transaction, close_with_result):
self.conn = conn
self.transaction = transaction
self.close_with_result = close_with_result
def __enter__(self):
self.transaction.__enter__()
return self.conn
def __exit__(self, type_, value, traceback):
try:
self.transaction.__exit__(type_, value, traceback)
finally:
if not self.close_with_result:
self.conn.close()
def begin(self, close_with_result=False):
"""Return a context manager delivering a :class:`_engine.Connection`
with a :class:`.Transaction` established.
E.g.::
with engine.begin() as conn:
conn.execute(
text("insert into table (x, y, z) values (1, 2, 3)")
)
conn.execute(text("my_special_procedure(5)"))
Upon successful operation, the :class:`.Transaction`
is committed. If an error is raised, the :class:`.Transaction`
is rolled back.
Legacy use only: the ``close_with_result`` flag is normally ``False``,
and indicates that the :class:`_engine.Connection` will be closed when
the operation is complete. When set to ``True``, it indicates the
:class:`_engine.Connection` is in "single use" mode, where the
:class:`_engine.CursorResult` returned by the first call to
:meth:`_engine.Connection.execute` will close the
:class:`_engine.Connection` when that :class:`_engine.CursorResult` has
exhausted all result rows.
.. seealso::
:meth:`_engine.Engine.connect` - procure a
:class:`_engine.Connection` from
an :class:`_engine.Engine`.
:meth:`_engine.Connection.begin` - start a :class:`.Transaction`
for a particular :class:`_engine.Connection`.
"""
if self._connection_cls._is_future:
conn = self.connect()
else:
conn = self.connect(close_with_result=close_with_result)
try:
trans = conn.begin()
except:
with util.safe_reraise():
conn.close()
return Engine._trans_ctx(conn, trans, close_with_result)
@util.deprecated(
"1.4",
"The :meth:`_engine.Engine.transaction` "
"method is deprecated and will be "
"removed in a future release. Use the :meth:`_engine.Engine.begin` "
"context "
"manager instead.",
)
def transaction(self, callable_, *args, **kwargs):
r"""Execute the given function within a transaction boundary.
The function is passed a :class:`_engine.Connection` newly procured
from :meth:`_engine.Engine.connect` as the first argument,
followed by the given \*args and \**kwargs.
e.g.::
def do_something(conn, x, y):
conn.execute(text("some statement"), {'x':x, 'y':y})
engine.transaction(do_something, 5, 10)
The operations inside the function are all invoked within the
context of a single :class:`.Transaction`.
Upon success, the transaction is committed. If an
exception is raised, the transaction is rolled back
before propagating the exception.
.. note::
The :meth:`.transaction` method is superseded by
the usage of the Python ``with:`` statement, which can
be used with :meth:`_engine.Engine.begin`::
with engine.begin() as conn:
conn.execute(text("some statement"), {'x':5, 'y':10})
.. seealso::
:meth:`_engine.Engine.begin` - engine-level transactional
context
:meth:`_engine.Connection.transaction`
- connection-level version of
:meth:`_engine.Engine.transaction`
"""
kwargs["_sa_skip_warning"] = True
with self.connect() as conn:
return conn.transaction(callable_, *args, **kwargs)
@util.deprecated(
"1.4",
"The :meth:`_engine.Engine.run_callable` "
"method is deprecated and will be "
"removed in a future release. Use the :meth:`_engine.Engine.begin` "
"context manager instead.",
)
def run_callable(self, callable_, *args, **kwargs):
r"""Given a callable object or function, execute it, passing
a :class:`_engine.Connection` as the first argument.
The given \*args and \**kwargs are passed subsequent
to the :class:`_engine.Connection` argument.
This function, along with :meth:`_engine.Connection.run_callable`,
allows a function to be run with a :class:`_engine.Connection`
or :class:`_engine.Engine` object without the need to know
which one is being dealt with.
"""
kwargs["_sa_skip_warning"] = True
with self.connect() as conn:
return conn.run_callable(callable_, *args, **kwargs)
def _run_ddl_visitor(self, visitorcallable, element, **kwargs):
with self.begin() as conn:
conn._run_ddl_visitor(visitorcallable, element, **kwargs)
@util.deprecated_20(
":meth:`_engine.Engine.execute`",
alternative="All statement execution in SQLAlchemy 2.0 is performed "
"by the :meth:`_engine.Connection.execute` method of "
":class:`_engine.Connection`, "
"or in the ORM by the :meth:`.Session.execute` method of "
":class:`.Session`.",
)
def execute(self, statement, *multiparams, **params):
"""Executes the given construct and returns a
:class:`_engine.CursorResult`.
The arguments are the same as those used by
:meth:`_engine.Connection.execute`.
Here, a :class:`_engine.Connection` is acquired using the
:meth:`_engine.Engine.connect` method, and the statement executed
with that connection. The returned :class:`_engine.CursorResult`
is flagged
such that when the :class:`_engine.CursorResult` is exhausted and its
underlying cursor is closed, the :class:`_engine.Connection`
created here
will also be closed, which allows its associated DBAPI connection
resource to be returned to the connection pool.
"""
connection = self.connect(close_with_result=True)
return connection.execute(statement, *multiparams, **params)
@util.deprecated_20(
":meth:`_engine.Engine.scalar`",
alternative="All statement execution in SQLAlchemy 2.0 is performed "
"by the :meth:`_engine.Connection.execute` method of "
":class:`_engine.Connection`, "
"or in the ORM by the :meth:`.Session.execute` method of "
":class:`.Session`; the :meth:`_future.Result.scalar` "
"method can then be "
"used to return a scalar result.",
)
def scalar(self, statement, *multiparams, **params):
"""Executes and returns the first column of the first row.
The underlying result/cursor is closed after execution.
"""
return self.execute(statement, *multiparams, **params).scalar()
def _execute_clauseelement(
self,
elem,
multiparams=None,
params=None,
execution_options=_EMPTY_EXECUTION_OPTS,
):
connection = self.connect(close_with_result=True)
return connection._execute_clauseelement(
elem, multiparams, params, execution_options
)
def _execute_compiled(
self,
compiled,
multiparams,
params,
execution_options=_EMPTY_EXECUTION_OPTS,
):
connection = self.connect(close_with_result=True)
return connection._execute_compiled(
compiled, multiparams, params, execution_options
)
def connect(self, close_with_result=False):
"""Return a new :class:`_engine.Connection` object.
The :class:`_engine.Connection` object is a facade that uses a DBAPI
connection internally in order to communicate with the database. This
connection is procured from the connection-holding :class:`_pool.Pool`
referenced by this :class:`_engine.Engine`. When the
:meth:`_engine.Connection.close` method of the
:class:`_engine.Connection` object
is called, the underlying DBAPI connection is then returned to the
connection pool, where it may be used again in a subsequent call to
:meth:`_engine.Engine.connect`.
"""
return self._connection_cls(self, close_with_result=close_with_result)
@util.deprecated(
"1.4",
"The :meth:`_engine.Engine.table_names` "
"method is deprecated and will be "
"removed in a future release. Please refer to "
":meth:`_reflection.Inspector.get_table_names`.",
)
def table_names(self, schema=None, connection=None):
"""Return a list of all table names available in the database.
:param schema: Optional, retrieve names from a non-default schema.
:param connection: Optional, use a specified connection.
"""
with self._optional_conn_ctx_manager(connection) as conn:
insp = inspection.inspect(conn)
return insp.get_table_names(schema)
@util.deprecated(
"1.4",
"The :meth:`_engine.Engine.has_table` "
"method is deprecated and will be "
"removed in a future release. Please refer to "
":meth:`_reflection.Inspector.has_table`.",
)
def has_table(self, table_name, schema=None):
"""Return True if the given backend has a table of the given name.
.. seealso::
:ref:`metadata_reflection_inspector` - detailed schema inspection
using the :class:`_reflection.Inspector` interface.
:class:`.quoted_name` - used to pass quoting information along
with a schema identifier.
"""
with self._optional_conn_ctx_manager(None) as conn:
insp = inspection.inspect(conn)
return insp.has_table(table_name, schema=schema)
def _wrap_pool_connect(self, fn, connection):
dialect = self.dialect
try:
return fn()
except dialect.dbapi.Error as e:
if connection is None:
Connection._handle_dbapi_exception_noconnection(
e, dialect, self
)
else:
util.raise_(
sys.exc_info()[1], with_traceback=sys.exc_info()[2]
)
def raw_connection(self, _connection=None):
"""Return a "raw" DBAPI connection from the connection pool.
The returned object is a proxied version of the DBAPI
connection object used by the underlying driver in use.
The object will have all the same behavior as the real DBAPI
connection, except that its ``close()`` method will result in the
connection being returned to the pool, rather than being closed
for real.
This method provides direct DBAPI connection access for
special situations when the API provided by
:class:`_engine.Connection`
is not needed. When a :class:`_engine.Connection` object is already
present, the DBAPI connection is available using
the :attr:`_engine.Connection.connection` accessor.
.. seealso::
:ref:`dbapi_connections`
"""
return self._wrap_pool_connect(self.pool.connect, _connection)
class OptionEngineMixin(object):
_sa_propagate_class_events = False
def __init__(self, proxied, execution_options):
self._proxied = proxied
self.url = proxied.url
self.dialect = proxied.dialect
self.logging_name = proxied.logging_name
self.echo = proxied.echo
self._compiled_cache = proxied._compiled_cache
self.hide_parameters = proxied.hide_parameters
log.instance_logger(self, echoflag=self.echo)
# note: this will propagate events that are assigned to the parent
# engine after this OptionEngine is created. Since we share
# the events of the parent we also disallow class-level events
# to apply to the OptionEngine class directly.
#
# the other way this can work would be to transfer existing
# events only, using:
# self.dispatch._update(proxied.dispatch)
#
# that might be more appropriate however it would be a behavioral
# change for logic that assigns events to the parent engine and
# would like it to take effect for the already-created sub-engine.
self.dispatch = self.dispatch._join(proxied.dispatch)
self._execution_options = proxied._execution_options
self.update_execution_options(**execution_options)
def _get_pool(self):
return self._proxied.pool
def _set_pool(self, pool):
self._proxied.pool = pool
pool = property(_get_pool, _set_pool)
def _get_has_events(self):
return self._proxied._has_events or self.__dict__.get(
"_has_events", False
)
def _set_has_events(self, value):
self.__dict__["_has_events"] = value
_has_events = property(_get_has_events, _set_has_events)
class OptionEngine(OptionEngineMixin, Engine):
pass
Engine._option_cls = OptionEngine
|
{
"content_hash": "f201e28f1411a7235203c15b5afbe90f",
"timestamp": "",
"source": "github",
"line_count": 3323,
"max_line_length": 84,
"avg_line_length": 35.729762263015346,
"alnum_prop": 0.5894045312894803,
"repo_name": "monetate/sqlalchemy",
"id": "2444b5c7fe1a3d351d956e75e52a0a61dec683e6",
"size": "118965",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/sqlalchemy/engine/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "49142"
},
{
"name": "Python",
"bytes": "11790244"
}
],
"symlink_target": ""
}
|
"""Change the reference to Qt frameworks.
Typical usage:
% change_qt_reference_mac.py --qtdir=/path/to/qtdir/ \
--target=/path/to/target.app/Contents/MacOS/target
"""
__author__ = "horo"
import optparse
import os
from util import PrintErrorAndExit
from util import RunOrDie
def ParseOption():
"""Parse command line options."""
parser = optparse.OptionParser()
parser.add_option('--qtdir', dest='qtdir')
parser.add_option('--target', dest='target')
(opts, _) = parser.parse_args()
return opts
def main():
opt = ParseOption()
if not opt.qtdir:
PrintErrorAndExit('--qtdir option is mandatory.')
if not opt.target:
PrintErrorAndExit('--target option is mandatory.')
qtdir = os.path.abspath(opt.qtdir)
target = os.path.abspath(opt.target)
# Changes the reference to QtCore framework from the target application
cmd = ["install_name_tool", "-change",
"%s/lib/QtCore.framework/Versions/4/QtCore" % qtdir,
"@executable_path/../Frameworks/QtCore.framework/Versions/4/QtCore",
"%s" % target]
RunOrDie(cmd)
# Changes the reference to QtGui framework from the target application
cmd = ["install_name_tool", "-change",
"%s/lib/QtGui.framework/Versions/4/QtGui" % qtdir,
"@executable_path/../Frameworks/QtGui.framework/Versions/4/QtGui",
"%s" % target]
RunOrDie(cmd)
if __name__ == '__main__':
main()
|
{
"content_hash": "a863234383565eb1287fed5caa758631",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 77,
"avg_line_length": 24.75438596491228,
"alnum_prop": 0.6654854712969526,
"repo_name": "takahashikenichi/mozc",
"id": "0305b84ce7cb40fdd9d547d2f63198efdb327eda",
"size": "2969",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/build_tools/change_qt_reference_mac.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "200335"
},
{
"name": "C++",
"bytes": "10808666"
},
{
"name": "CSS",
"bytes": "26088"
},
{
"name": "Emacs Lisp",
"bytes": "80074"
},
{
"name": "HTML",
"bytes": "266980"
},
{
"name": "Java",
"bytes": "2751856"
},
{
"name": "JavaScript",
"bytes": "919906"
},
{
"name": "Makefile",
"bytes": "3754"
},
{
"name": "Objective-C",
"bytes": "34833"
},
{
"name": "Objective-C++",
"bytes": "227200"
},
{
"name": "Protocol Buffer",
"bytes": "112300"
},
{
"name": "Python",
"bytes": "1056960"
},
{
"name": "QMake",
"bytes": "861"
},
{
"name": "Shell",
"bytes": "9928"
},
{
"name": "Yacc",
"bytes": "2104"
}
],
"symlink_target": ""
}
|
import os
import shutil
import sys
import random
import tempfile
import mlflow
from mlflow import (
log_metric,
log_param,
log_artifacts,
get_artifact_uri,
active_run,
get_tracking_uri,
log_artifact,
MlflowClient,
)
if __name__ == "__main__":
print("Running {} with tracking URI {}".format(sys.argv[0], get_tracking_uri()))
log_param("param1", 5)
log_metric("foo", 5)
log_metric("foo", 6)
log_metric("foo", 7)
log_metric("random_int", random.randint(0, 100))
run_id = active_run().info.run_id
# Get run metadata & data from the tracking server
service = MlflowClient()
run = service.get_run(run_id)
print("Metadata & data for run with UUID %s: %s" % (run_id, run))
local_dir = tempfile.mkdtemp()
message = "test artifact written during run %s within artifact URI %s\n" % (
active_run().info.run_id,
get_artifact_uri(),
)
try:
file_path = os.path.join(local_dir, "some_output_file.txt")
with open(file_path, "w") as handle:
handle.write(message)
log_artifacts(local_dir, "some_subdir")
log_artifact(file_path, "another_dir")
finally:
shutil.rmtree(local_dir)
|
{
"content_hash": "18f8465c9f5b6d55df47ce6e03257ef1",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 84,
"avg_line_length": 28.46511627906977,
"alnum_prop": 0.6160130718954249,
"repo_name": "mlflow/mlflow",
"id": "00103d71594ca41b64c5f9c32b203b628910fd1a",
"size": "1224",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/remote_store/remote_server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "24965"
},
{
"name": "Dockerfile",
"bytes": "1206"
},
{
"name": "HTML",
"bytes": "16439"
},
{
"name": "Java",
"bytes": "276538"
},
{
"name": "JavaScript",
"bytes": "3606345"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "6057051"
},
{
"name": "R",
"bytes": "202454"
},
{
"name": "Scala",
"bytes": "39353"
},
{
"name": "Shell",
"bytes": "27246"
},
{
"name": "TSQL",
"bytes": "211"
},
{
"name": "TypeScript",
"bytes": "313772"
}
],
"symlink_target": ""
}
|
from setuptools import find_packages, setup
import os
from aima import __version__ as version
from aima import __authors__, __github_url__
from aima import __doc__ as description
from aima import __name__ as package_name
print('Installing package named {0}. . .'.format(package_name))
try:
from pip.req import parse_requirements
requirements = list(parse_requirements('requirements.txt'))
except:
requirements = []
install_requires=[str(req).split(' ')[0].strip() for req in requirements if req.req and not req.url]
print('requires: %r' % install_requires)
dependency_links=[req.url for req in requirements if req.url]
print('dependcies: %r' % dependency_links)
long_description = 'Python packages implementing the algorithms and example code in the textbook "Artificial Intalligence: A Modern Approach" by Norvig and Russell.'
try:
long_description = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
except:
pass
EXCLUDE_FROM_PACKAGES = []
setup(
name = package_name,
packages=find_packages(exclude=EXCLUDE_FROM_PACKAGES), #[package_name],
include_package_data = True, # install non-.py files listed in MANIFEST.in (.js, .html, .txt, .md, etc)
install_requires = install_requires,
dependency_links = dependency_links,
version = version,
description = description,
long_description = long_description,
author = ', '.join(__authors__),
author_email = "peter.norvig@gmail.com",
#tests_require = ['doctest'],
#test_suite = 'setuptest.setuptest.SetupTestSuite',
#cmdclass = {'test': test},
url = __github_url__,
download_url = "%s/tarball/%s" % (__github_url__, version),
keywords = ["ai", "ml", "artificial intelligence", "machine intelligence", "norvig", "russell", "agent", "bot", "book", "textbook", "algorithm", "machine-learning", "search"],
classifiers = [
"Programming Language :: Python",
"Programming Language :: Python :: 2.5",
"Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Visualization",
"Topic :: Scientific/Engineering :: Mathematics",
],
)
|
{
"content_hash": "88c490b88415674d6320d343d4b6fc4c",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 179,
"avg_line_length": 40.96666666666667,
"alnum_prop": 0.6663954434499593,
"repo_name": "hobson/aima",
"id": "602f1dc51d2118a1f4659088e9a51fb05d4329d3",
"size": "2525",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4430"
},
{
"name": "Makefile",
"bytes": "562"
},
{
"name": "Python",
"bytes": "249470"
},
{
"name": "Shell",
"bytes": "278"
}
],
"symlink_target": ""
}
|
"""
==========================
Display images for MEG
==========================
"""
import numpy as np
from expyfun import visual, ExperimentController
from expyfun.io import write_hdf5
import time
from PIL import Image
import os
from os import path as op
import glob
# background color
testing = False
test_trig = [15]
if testing:
bgcolor = [0., 0., 0., 1.]
else:
bgcolor = [0., 0., 0., 1.]
# Paths to images
#basedir = '/home/jyeatman/projects/MEG/images/'
basedir = os.path.join('C:\\Users\\neuromag\\Desktop\\jason\\wordStim')
if not os.path.isdir(basedir):
basedir = os.path.join('/mnt/diskArray/projects/MEG/wordStim')
""" Words, False fonts (Korean), Faces, Objects """
imagedirs = ['word_c254_p0', 'word_c254_p50', 'word_c254_p80', 'bigram_c254_p20']
nimages = [30, 30, 30, 30] # number of images in each category
if len(nimages) == 1: # Does nothing....
nimages = np.repeat(nimages, len(imagedirs))
n_totalimages = sum(nimages)
# ISIs to be used. Must divide evenly into nimages
isis = np.arange(1., 1.51, 0.1) #np.arange(.62, .84, .02)
imduration = 0.8 # Image duration 800 ms
s = .5 # Image scale
# Create a vector of ISIs in a random order. One ISI for each image
rng = np.random.RandomState(int(time.time()))
ISI = np.tile(isis, int(np.ceil(n_totalimages/len(isis)))+1)
rng.shuffle(ISI)
ISI = ISI[:(n_totalimages)]
total_time = sum(ISI) + sum(nimages)*imduration # in second
n_flickers = int(total_time*2)+1 # every 500 ms
n_target = int(0.2*n_flickers)
fix_seq = np.zeros(n_flickers)
fix_seq[:n_target] = 1
rng.shuffle(fix_seq)
for i in range(0,len(fix_seq)-3):
if (fix_seq[i] + fix_seq[i+1]) == 2:
fix_seq[i+1] = 0
if fix_seq[i+3] == 0:
fix_seq[i+3] = 1
elif fix_seq[i+4] == 0:
fix_seq[i+4] = 1
# Creat a vector of dot colors for each ISI
c = ['g', 'b', 'y', 'c']
k = 0
m = 0
fix_color = []
for i in range(0,len(fix_seq)):
if fix_seq[i] == 1:
fix_color.append('r')
else:
fix_color.append(c[k])
k += 1
k = np.mod(k,4)
if k == 0:
rng.shuffle(c)
while fix_color[i] == c[0]:
rng.shuffle(c)
# Create a vector marking the category of each image
imtype = []
for i in range(1, len(imagedirs)+1):
imtype.extend(np.tile(i, nimages[i-1]))
rng.shuffle(imtype)
# Build the path structure to each image in each image directory. Each image
# category is an entry into the list. The categories are in sequential order
# matching imorder, but the images within each category are random
templist = []
tempnumber = []
c = -1
for imname in imagedirs:
c = c+1
# Temporary variable with image names in order
tmp = sorted(glob.glob(os.path.join(basedir, imname, '*')))
# Randomly grab nimages from the list
n = rng.randint(0, len(tmp), nimages[c])
tmp2 = []
for i in n:
tmp2.append(tmp[i])
# Add the random image list to an entry in imagelist
templist.extend(tmp2)
# record the image number
tempnumber.extend(n)
assert len(templist[-1]) > 0
temp_list = np.arange(0,len(templist))
rng.shuffle(temp_list)
imagelist = []
imnumber = []
for i in temp_list:
imagelist.append(templist[i])
imnumber.append(tempnumber[i])
# Start instance of the experiment controller
with ExperimentController('ShowImages', full_screen=True) as ec:
#write_hdf5(op.splitext(ec.data_fname)[0] + '_trials.hdf5',
# dict(imorder_shuf=imorder_shuf,
# imtype_shuf=imtype_shuf))
fr = 1/ec.estimate_screen_fs() # Estimate frame rate
realRR = ec.estimate_screen_fs()
realRR = round(realRR)
adj = fr/2 # Adjustment factor for accurate flip
# Wait to fill the screen
ec.set_visible(False)
# Set the background color to gray
ec.set_background_color(bgcolor)
n_frames = round(total_time * realRR)
img_frames = round(imduration*realRR)
jitter = np.arange(0,realRR*0.2) # 0~200 ms jitter
temp_flicker = np.arange(0,n_frames,int(realRR/2)) # Get temp_flicker frames: every .5 s
delay = []
for i in np.arange(0,len(temp_flicker)):
rng.shuffle(jitter)
delay.append(jitter[0])
frame_flicker = temp_flicker + delay #
frame_img = [0]
for i in np.arange(0,len(ISI)):
frame_img.append(frame_img[i] + img_frames + int(ISI[i]*realRR))
frame_img = frame_img[:-1]
# load up the image stack. The images in img_buffer are in the sequential
# non-shuffled order
img = []
for im in imagelist:
img_buffer = np.array(Image.open(im), np.uint8) / 255.
if img_buffer.ndim == 2:
img_buffer = np.tile(img_buffer[:, :, np.newaxis], [1, 1, 3])
img.append(visual.RawImage(ec, img_buffer, scale=s))
ec.check_force_quit()
# make a blank image
blank = visual.RawImage(ec, np.tile(bgcolor[0], np.multiply([s, s, 1], (121, 245, 3))))
bright = visual.RawImage(ec, np.tile([1.], np.multiply([s, s, 1], (121, 245, 3))))
# Calculate stimulus size
d_pix = -np.diff(ec._convert_units([[3., 0.], [3., 0.]], 'deg', 'pix'), axis=-1)
# do the drawing, then flip
ec.set_visible(True)
frametimes = []
buttons = []
ec.listen_presses()
last_flip = -1
# Create a fixation dot
fix = visual.FixationDot(ec, colors=('k', 'k'))
fix.set_radius(4, 0, 'pix')
fix.draw()
# Display instruction (7 seconds).
# They will be different depending on the run number
if int(ec.session) % 2:
t = visual.Text(ec,text='Button press when the dot turns red - Ignore images',pos=[0,.1],font_size=40,color='k')
else:
t = visual.Text(ec,text='Button press for fake word',pos=[0,.1],font_size=40,color='k')
t.draw()
ec.flip()
ec.wait_secs(1.0)
# Show images
count = 0 # This is for testing...
# Initial blank
init_blanktime = 1.
fix.set_colors(colors=('k', 'k'))
blank.draw(), fix.draw()
ec.write_data_line('dotcolorFix', 'k')
last_flip = ec.flip()
# The iterable 'trial' randomizes the order of everything since it is
# drawn from imorder_shuf
trial = 0
frame = 0
flicker = 0
imageframe = []
stampframe = []
trigger = 0
t0 = time.time()
while frame < n_frames-1:
# if frame == frame_flicker[flicker]:
# fix.set_colors(colors=(fix_color[flicker],fix_color[flicker]))
# ec.write_data_line('dotcolorFix', fix_color[flicker])
# if flicker < len(frame_flicker)-2:
# flicker += 1
if frame == frame_img[trial]:
# ec.write_data_line('imnumber', imnumber[trial])
# ec.write_data_line('imtype', imtype[trial])
# trig = imtype[trial]
trigger = 1
bright.draw()
trial += 1
# ec.stamp_triggers(1, check='int4')
else:
trigger = 0
blank.draw()
# ec.stamp_triggers(2,check='int4')
# count = (count + 1) % 2
# blank.draw()
# else:
# img[trial].draw()
# fix.draw()
# Mark the log file of the trial type
# ec.write_data_line('imnumber', imnumber[trial])
# ec.write_data_line('imtype', imtype[trial])
# ec.write_data_line('dotcolorIm', dcolor[trial])
# The image is flipped ISI milliseconds after the blank
if trigger:
ec.stamp_triggers(1,check='int4',wait_for_last=False)
# stampframe.append(frame)
last_flip = ec.flip()
ec.get_presses()
frametimes.append(last_flip)
ec.check_force_quit()
frame += 1
# Now the experiment is over and we show 5 seconds of blank
print "\n\n Elasped time: %0.4f secs" % (time.time()-t0)
print "\n\n Targeted time: %0.4f secs" % total_time
blank.draw(), fix.draw()
ec.flip()
ec.wait_secs(5.0)
pressed = ec.get_presses() # relative_to=0.0
|
{
"content_hash": "3e6a666c858e1fc429ca12fbd5fe9fe5",
"timestamp": "",
"source": "github",
"line_count": 250,
"max_line_length": 120,
"avg_line_length": 31.984,
"alnum_prop": 0.5947973986993497,
"repo_name": "yeatmanlab/BrainTools",
"id": "5f2b4159839d47a92ac201d960c18692d275bb91",
"size": "7996",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "experiments/r21/test2_r21_event_related.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Forth",
"bytes": "587"
},
{
"name": "M",
"bytes": "62"
},
{
"name": "MATLAB",
"bytes": "527044"
},
{
"name": "Objective-C",
"bytes": "291"
},
{
"name": "Python",
"bytes": "1025956"
}
],
"symlink_target": ""
}
|
import os
import time
import pandas as pd
import cmapPy.pandasGEXpress.write_gctx as write_gctx
import cmapPy.pandasGEXpress.write_gct as write_gct
import cmapPy.pandasGEXpress.parse as parse
import cmapPy.pandasGEXpress.subset_gctoo as sg
# for storing timing results
gct_times = {}
gctx_times = {}
# large input gctx; see notes above for more info about this
big_gctoo = parse.parse("/path/to/large/gctx/file")
# column and row spaces to test writing on
col_spaces = [96, 384, 1536, 3000, 6000, 12000, 24000, 48000, 100000]
row_spaces = [978, 10174]
for c in col_spaces:
for r in row_spaces:
curr_gctoo = sg.subset_gctoo(big_gctoo, ridx = range(0, r), cidx=range(0,c))
# gct writing
out_fname = "write_test_n" + str(c) + "x" + str(r) + ".gct"
start = time.clock()
write_gct.write(curr_gctoo, out_fname)
end = time.clock()
elapsed_time = end - start
gct_times[out_fname] = elapsed_time
os.remove(out_fname)
# gctx writing
out_fname = "write_test_n" + str(c) + "x" + str(r) + ".gctx"
start = time.clock()
write_gctx.write(curr_gctoo, out_fname)
end = time.clock()
elapsed_time = end - start
gctx_times[out_fname] = elapsed_time
os.remove(out_fname)
# write results to file
gct_df = pd.DataFrame(pd.Series(gct_times))
gctx_df = pd.DataFrame(pd.Series(gctx_times))
write_times_df = pd.concat([gct_df, gctx_df])
write_times_df.columns = ["write_time"]
write_times_df.to_csv("python_writing_results.txt", sep="\t")
|
{
"content_hash": "2631f2b69b0ff91127eaa98d10f9398c",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 78,
"avg_line_length": 32.28888888888889,
"alnum_prop": 0.6971782518926359,
"repo_name": "cmap/cmapPy",
"id": "317f3b4f29f269efaf30d80c7f8f48ccdeac06bc",
"size": "1758",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "performance_testing/python_write_timing.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "178"
},
{
"name": "Jupyter Notebook",
"bytes": "96474"
},
{
"name": "Python",
"bytes": "492786"
}
],
"symlink_target": ""
}
|
"""restructuredText Exporter class"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from IPython.config import Config
from .templateexporter import TemplateExporter
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class RSTExporter(TemplateExporter):
"""
Exports restructured text documents.
"""
def _file_extension_default(self):
return '.rst'
def _template_file_default(self):
return 'rst'
output_mimetype = 'text/restructuredtext'
@property
def default_config(self):
c = Config({'ExtractOutputPreprocessor':{'enabled':True}})
c.merge(super(RSTExporter,self).default_config)
return c
|
{
"content_hash": "da2520151b2ab0697e378a4946128a9f",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 78,
"avg_line_length": 31.625,
"alnum_prop": 0.433201581027668,
"repo_name": "initNirvana/Easyphotos",
"id": "731e9785e62a589b713fb880e762982b33e4f695",
"size": "1265",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "env/lib/python3.4/site-packages/IPython/nbconvert/exporters/rst.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "13653"
},
{
"name": "HTML",
"bytes": "129191"
},
{
"name": "JavaScript",
"bytes": "1401324"
},
{
"name": "Python",
"bytes": "11874458"
},
{
"name": "Shell",
"bytes": "3668"
},
{
"name": "Smarty",
"bytes": "21402"
}
],
"symlink_target": ""
}
|
from django.core.urlresolvers import reverse
from django.test import Client
from funfactory.helpers import urlparams
from nose.tools import eq_, ok_
from mozillians.common.tests import TestCase, requires_login, requires_vouch
from mozillians.groups.models import GroupMembership
from mozillians.groups.tests import GroupFactory, GroupAliasFactory, SkillFactory
from mozillians.users.tests import UserFactory
class ShowTests(TestCase):
def setUp(self):
self.group = GroupFactory.create()
self.url = reverse('groups:show_group', kwargs={'url': self.group.url})
self.user_1 = UserFactory.create()
self.user_2 = UserFactory.create()
self.group.add_member(self.user_2.userprofile)
def test_show_user_not_in_group(self):
with self.login(self.user_1) as client:
response = client.get(self.url, follow=True)
eq_(response.status_code, 200)
context = response.context
eq_(context['group'], self.group)
eq_(context['in_group'], False)
eq_(context['people'].paginator.count, 1)
eq_(context['people'][0].userprofile, self.user_2.userprofile)
ok_(not context['is_pending'])
def test_show_user_in_group(self):
"""Test show() for a user within the group."""
with self.login(self.user_2) as client:
response = client.get(self.url, follow=True)
eq_(response.status_code, 200)
context = response.context
eq_(context['group'], self.group)
eq_(context['in_group'], True)
eq_(context['people'].paginator.count, 1)
eq_(context['people'][0].userprofile, self.user_2.userprofile)
ok_(not context['is_pending'])
def test_show_pending_user(self):
# Make user 2 pending
GroupMembership.objects.filter(userprofile=self.user_2.userprofile,
group=self.group).update(status=GroupMembership.PENDING)
with self.login(self.user_2) as client:
response = client.get(self.url, follow=True)
eq_(response.status_code, 200)
context = response.context
eq_(context['group'], self.group)
eq_(context['in_group'], False)
eq_(context['people'].paginator.count, 1)
eq_(context['people'][0].userprofile, self.user_2.userprofile)
ok_(context['is_pending'])
def test_show_empty_group(self):
group = GroupFactory.create()
url = reverse('groups:show_group', kwargs={'url': group.url})
with self.login(self.user_1) as client:
response = client.get(url, follow=True)
eq_(response.status_code, 200)
context = response.context
eq_(context['people'].paginator.count, 0)
ok_(not context['is_pending'])
def test_show_review_terms_pending(self):
group = GroupFactory.create(terms='Example terms')
user = UserFactory.create()
group.add_member(user.userprofile, status=GroupMembership.PENDING_TERMS)
url = reverse('groups:show_group', kwargs={'url': group.url})
with self.login(user) as client:
response = client.get(url, follow=True)
eq_(response.status_code, 200)
self.assertTemplateUsed(response, 'groups/terms.html')
def test_show_review_terms_accepted(self):
group = GroupFactory.create(terms='Example terms')
user = UserFactory.create()
group.add_member(user.userprofile, status=GroupMembership.MEMBER)
url = reverse('groups:show_group', kwargs={'url': group.url})
with self.login(user) as client:
response = client.get(url, follow=True)
eq_(response.status_code, 200)
self.assertTemplateUsed(response, 'groups/group.html')
def test_show_group_members_sorted(self):
"""
Test show() where group members are sorted in alphabetical
ascending order.
"""
group = GroupFactory.create()
user_1 = UserFactory.create(userprofile={'full_name': 'Carol'})
user_2 = UserFactory.create(userprofile={'full_name': 'Alice'})
user_3 = UserFactory.create(userprofile={'full_name': 'Bob'})
group.add_member(user_1.userprofile)
group.add_member(user_2.userprofile)
group.add_member(user_3.userprofile)
url = reverse('groups:show_group', kwargs={'url': group.url})
with self.login(user_1) as client:
response = client.get(url, follow=True)
eq_(response.status_code, 200)
people = response.context['people']
eq_(people[0].userprofile, user_2.userprofile)
eq_(people[1].userprofile, user_3.userprofile)
eq_(people[2].userprofile, user_1.userprofile)
def test_show_common_skills(self):
"""Show most common skills first."""
user_1 = UserFactory.create()
user_2 = UserFactory.create()
user_3 = UserFactory.create()
user_4 = UserFactory.create()
group = GroupFactory.create()
group.add_member(user_1.userprofile)
group.add_member(user_2.userprofile)
group.add_member(user_3.userprofile)
group.add_member(user_4.userprofile)
skill_1 = SkillFactory.create()
skill_2 = SkillFactory.create()
skill_3 = SkillFactory.create()
skill_4 = SkillFactory.create()
skill_3.members.add(user_1.userprofile)
skill_3.members.add(user_2.userprofile)
skill_3.members.add(user_3.userprofile)
skill_3.members.add(user_4.userprofile)
skill_2.members.add(user_2.userprofile)
skill_2.members.add(user_3.userprofile)
skill_2.members.add(user_4.userprofile)
skill_4.members.add(user_3.userprofile)
skill_4.members.add(user_4.userprofile)
skill_1.members.add(user_1.userprofile)
users = UserFactory.create_batch(5)
for user in users:
skill_4.members.add(user.userprofile)
url = reverse('groups:show_group', kwargs={'url': group.url})
with self.login(user_1) as client:
response = client.get(url, follow=True)
eq_(response.status_code, 200)
skills = response.context['skills']
eq_(skills[0], skill_3)
eq_(skills[1], skill_2)
eq_(skills[2], skill_4)
ok_(skill_1 not in skills)
@requires_login()
def test_show_anonymous(self):
client = Client()
client.get(self.url, follow=True)
@requires_vouch()
def test_show_unvouched(self):
user = UserFactory.create(vouched=False)
with self.login(user) as client:
client.get(self.url, follow=True)
def test_nonexistant_group(self):
url = reverse('groups:show_group', kwargs={'url': 'invalid'})
with self.login(self.user_1) as client:
response = client.get(url, follow=True)
eq_(response.status_code, 404)
def test_alias_redirection(self):
user = UserFactory.create()
group = GroupFactory.create()
group_alias = GroupAliasFactory.create(alias=group)
url = reverse('groups:show_group', kwargs={'url': group_alias.url})
with self.login(user) as client:
response = client.get(url, follow=True)
eq_(response.status_code, 200)
eq_(response.context['group'], group)
def test_show_leave_button_value_with_curator(self):
curator_user = UserFactory.create()
group = GroupFactory.create()
group.curators.add(curator_user.userprofile)
user = UserFactory.create()
group.add_member(user.userprofile)
url = reverse('groups:show_group', kwargs={'url': group.url})
with self.login(curator_user) as client:
response = client.get(url, follow=True)
eq_(response.status_code, 200)
eq_(response.context['show_leave_button'], False)
with self.login(user) as client:
response = client.get(url, follow=True)
eq_(response.status_code, 200)
eq_(response.context['show_leave_button'], True)
def test_show_leave_button_value_without_curator(self):
group = GroupFactory.create()
user = UserFactory.create()
group.add_member(user.userprofile)
url = reverse('groups:show_group', kwargs={'url': group.url})
with self.login(user) as client:
response = client.get(url, follow=True)
eq_(response.status_code, 200)
eq_(response.context['show_leave_button'], True)
ok_(not response.context['is_pending'])
def test_show_leave_button_value_members_cant_leave(self):
"""
Don't show leave button for a group whose members_can_leave flag
is False, even for group member
"""
group = GroupFactory.create(members_can_leave=False)
user = UserFactory.create()
group.add_member(user.userprofile)
url = reverse('groups:show_group', kwargs={'url': group.url})
with self.login(user) as client:
response = client.get(url, follow=True)
eq_(response.status_code, 200)
eq_(response.context['show_leave_button'], False)
ok_(not response.context['is_pending'])
def test_show_leave_button_value_members_can_leave(self):
"""
Do show leave button for a group whose members_can_leave flag
is True, for group member
"""
group = GroupFactory.create(members_can_leave=True)
user = UserFactory.create()
group.add_member(user.userprofile)
url = reverse('groups:show_group', kwargs={'url': group.url})
with self.login(user) as client:
response = client.get(url, follow=True)
eq_(response.status_code, 200)
eq_(response.context['show_leave_button'], True)
ok_(not response.context['is_pending'])
def test_show_leave_button_value_members_can_leave_non_member(self):
"""
Don't show leave button for a group whose members_can_leave flag
is True, if not group member
"""
group = GroupFactory.create(members_can_leave=True)
user = UserFactory.create()
url = reverse('groups:show_group', kwargs={'url': group.url})
with self.login(user) as client:
response = client.get(url, follow=True)
eq_(response.status_code, 200)
eq_(response.context['show_leave_button'], False)
ok_(not response.context['is_pending'])
def test_show_join_button_accepting_members_yes(self):
group = GroupFactory.create(accepting_new_members='yes')
user = UserFactory.create()
url = reverse('groups:show_group', kwargs={'url': group.url})
with self.login(user) as client:
response = client.get(url, follow=True)
eq_(response.status_code, 200)
eq_(response.context['show_join_button'], True)
ok_(not response.context['is_pending'])
def test_show_join_button_accepting_members_yes_member(self):
group = GroupFactory.create(accepting_new_members='yes')
user = UserFactory.create()
group.add_member(user.userprofile)
url = reverse('groups:show_group', kwargs={'url': group.url})
with self.login(user) as client:
response = client.get(url, follow=True)
eq_(response.status_code, 200)
eq_(response.context['show_join_button'], False)
def test_show_join_button_accepting_members_by_request(self):
group = GroupFactory.create(accepting_new_members='yes')
user = UserFactory.create()
url = reverse('groups:show_group', kwargs={'url': group.url})
with self.login(user) as client:
response = client.get(url, follow=True)
eq_(response.status_code, 200)
eq_(response.context['show_join_button'], True)
def test_show_join_button_accepting_members_by_request_member(self):
group = GroupFactory.create(accepting_new_members='yes')
user = UserFactory.create()
group.add_member(user.userprofile)
url = reverse('groups:show_group', kwargs={'url': group.url})
with self.login(user) as client:
response = client.get(url, follow=True)
eq_(response.status_code, 200)
eq_(response.context['show_join_button'], False)
def test_show_join_button_accepting_members_no(self):
group = GroupFactory.create(accepting_new_members='no')
user = UserFactory.create()
url = reverse('groups:show_group', kwargs={'url': group.url})
with self.login(user) as client:
response = client.get(url, follow=True)
eq_(response.status_code, 200)
eq_(response.context['show_join_button'], False)
def test_show_leave_button_value_skill(self):
skill = SkillFactory.create()
user = UserFactory.create()
skill.members.add(user.userprofile)
url = reverse('groups:show_skill', kwargs={'url': skill.url})
with self.login(user) as client:
response = client.get(url, follow=True)
eq_(response.status_code, 200)
eq_(response.context['show_leave_button'], True)
ok_(not response.context['is_pending'])
def test_show_filter_accepting_new_members_no(self):
self.group.curators.add(self.user_1.userprofile)
self.group.accepting_new_members = 'no'
self.group.save()
with self.login(self.user_1) as client:
response = client.get(self.url, follow=True)
ok_('membership_filter_form' in response.context)
eq_(response.context['membership_filter_form'], None)
def test_show_filter_accepting_new_members_yes(self):
self.group.curators.add(self.user_1.userprofile)
self.group.accepting_new_members = 'yes'
self.group.save()
with self.login(self.user_1) as client:
response = client.get(self.url, follow=True)
ok_('membership_filter_form' in response.context)
eq_(response.context['membership_filter_form'], None)
def test_show_filter_accepting_new_members_by_request(self):
self.group.curators.add(self.user_1.userprofile)
self.group.accepting_new_members = 'by_request'
self.group.save()
with self.login(self.user_1) as client:
response = client.get(self.url, follow=True)
ok_('membership_filter_form' in response.context)
ok_(response.context['membership_filter_form'])
def test_remove_button_confirms(self):
"""GET to remove_member view displays confirmation"""
# Make user 1 the group curator so they can remove users
self.group.curators.add(self.user_1.userprofile)
self.group.save()
group_url = reverse('groups:show_group', prefix='/en-US/', args=[self.group.url])
next_url = "%s?filtr=members" % group_url
# We must request the full path, with language, or the
# LanguageMiddleware will convert the request to GET.
url = reverse('groups:remove_member', prefix='/en-US/',
kwargs=dict(url=self.group.url, user_pk=self.user_2.userprofile.pk))
with self.login(self.user_1) as client:
response = client.get(url, data={'next_url': next_url}, follow=True)
self.assertTemplateUsed(response, 'groups/confirm_remove_member.html')
# make sure context variable next_url was populated properly
eq_(response.context['next_url'], next_url)
# Still a member
ok_(self.group.has_member(self.user_2.userprofile))
def test_post_remove_button_removes(self):
"""POST to remove_member view removes member"""
# Make user 1 the group curator so they can remove users
self.group.curators.add(self.user_1.userprofile)
self.group.accepting_new_members = 'by_request'
self.group.save()
group_url = reverse('groups:show_group', prefix='/en-US/', args=[self.group.url])
next_url = "%s?filtr=members" % group_url
# We must request the full path, with language, or the
# LanguageMiddleware will convert the request to GET.
url = reverse('groups:remove_member', prefix='/en-US/',
kwargs=dict(url=self.group.url, user_pk=self.user_2.userprofile.pk))
with self.login(self.user_1) as client:
response = client.post(url, data={'next_url': next_url}, follow=True)
self.assertTemplateNotUsed(response, 'groups/confirm_remove_member.html')
# make sure filter members is active
membership_filter_form = response.context['membership_filter_form']
eq_(membership_filter_form.cleaned_data['filtr'], 'members')
# Not a member anymore
ok_(not self.group.has_member(self.user_2.userprofile))
def test_confirm_user(self):
"""POST to confirm user view changes member from pending to member"""
# Make user 1 the group curator so they can remove users
self.group.curators.add(self.user_1.userprofile)
self.group.accepting_new_members = 'by_request'
self.group.save()
group_url = reverse('groups:show_group', prefix='/en-US/', args=[self.group.url])
next_url = "%s?filtr=pending_members" % group_url
# Make user 2 pending
GroupMembership.objects.filter(userprofile=self.user_2.userprofile,
group=self.group).update(status=GroupMembership.PENDING)
ok_(self.group.has_pending_member(self.user_2.userprofile))
# We must request the full path, with language, or the
# LanguageMiddleware will convert the request to GET.
url = reverse('groups:confirm_member', prefix='/en-US/',
kwargs=dict(url=self.group.url, user_pk=self.user_2.userprofile.pk))
with self.login(self.user_1) as client:
response = client.post(url, data={'next_url': next_url}, follow=True)
self.assertTemplateNotUsed(response, 'groups/confirm_remove_member.html')
# make sure filter pending_members is active
membership_filter_form = response.context['membership_filter_form']
eq_(membership_filter_form.cleaned_data['filtr'], 'pending_members')
# Now a member
ok_(self.group.has_member(self.user_2.userprofile))
def test_filter_members_only(self):
"""Filter `m` will filter out members that are only pending"""
# Make user 1 the group curator so they can see requests
self.group.curators.add(self.user_1.userprofile)
self.group.accepting_new_members = 'by_request'
self.group.save()
# Make user 2 a full member
self.group.add_member(self.user_2.userprofile, GroupMembership.MEMBER)
member_membership = self.group.groupmembership_set.get(userprofile__user=self.user_2)
# Make user 3 a pending member
self.user_3 = UserFactory.create()
self.group.add_member(self.user_3.userprofile, GroupMembership.PENDING)
pending_membership = self.group.groupmembership_set.get(userprofile__user=self.user_3)
url = urlparams(self.url, filtr='members')
with self.login(self.user_1) as client:
response = client.get(url, follow=True)
people = response.context['people'].object_list
ok_(member_membership in people, people)
ok_(pending_membership not in people)
def test_filter_pending_only(self):
"""Filter `r` will show only member requests (pending)"""
# Make user 1 the group curator so they can see requests
self.group.curators.add(self.user_1.userprofile)
self.group.accepting_new_members = 'by_request'
self.group.save()
# Make user 2 a full member
self.group.add_member(self.user_2.userprofile, GroupMembership.MEMBER)
member_membership = self.group.groupmembership_set.get(userprofile__user=self.user_2)
# Make user 3 a pending member
self.user_3 = UserFactory.create()
self.group.add_member(self.user_3.userprofile, GroupMembership.PENDING)
pending_membership = self.group.groupmembership_set.get(userprofile__user=self.user_3)
url = urlparams(self.url, filtr='pending_members')
with self.login(self.user_1) as client:
response = client.get(url, follow=True)
people = response.context['people'].object_list
ok_(member_membership not in people, people)
ok_(pending_membership in people)
def test_filter_both(self):
"""If they specify both filters, they get all the members"""
# Make user 1 the group curator so they can see requests
self.group.curators.add(self.user_1.userprofile)
self.group.accepting_new_members = 'by_request'
self.group.save()
# Make user 2 a full member
self.group.add_member(self.user_2.userprofile, GroupMembership.MEMBER)
member_membership = self.group.groupmembership_set.get(userprofile__user=self.user_2)
# Make user 3 a pending member
self.user_3 = UserFactory.create()
self.group.add_member(self.user_3.userprofile, GroupMembership.PENDING)
pending_membership = self.group.groupmembership_set.get(userprofile__user=self.user_3)
url = urlparams(self.url, filtr='all')
with self.login(self.user_1) as client:
response = client.get(url, follow=True)
people = response.context['people'].object_list
ok_(member_membership in people, people)
ok_(pending_membership in people)
def test_filter_pending_ignored_when_accepting_new_members_yes(self):
"""
Filter `pending_members` will be ignored if group is not accepting
new members by request
"""
# Make user 1 the group curator so they can see requests
self.group.curators.add(self.user_1.userprofile)
self.group.accepting_new_members = 'yes'
self.group.save()
# Make user 2 a full member
self.group.add_member(self.user_2.userprofile, GroupMembership.MEMBER)
member_membership = self.group.groupmembership_set.get(userprofile__user=self.user_2)
url = urlparams(self.url, filtr='pending_members')
with self.login(self.user_1) as client:
response = client.get(url, follow=True)
people = response.context['people'].object_list
ok_(member_membership in people)
class TermsTests(TestCase):
def test_review_terms_page(self):
group = GroupFactory.create(terms='Example terms')
user = UserFactory.create()
group.add_member(user.userprofile, status=GroupMembership.PENDING_TERMS)
url = reverse('groups:review_terms', kwargs={'url': group.url}, prefix='/en-US/')
with self.login(user) as client:
response = client.get(url, follow=True)
eq_(response.status_code, 200)
self.assertTemplateUsed(response, 'groups/terms.html')
def test_accept_review_terms(self):
group = GroupFactory.create(terms='Example terms')
user = UserFactory.create()
group.add_member(user.userprofile, status=GroupMembership.PENDING_TERMS)
url = reverse('groups:review_terms', kwargs={'url': group.url}, prefix='/en-US/')
membership = GroupMembership.objects.get(group=group, userprofile=user.userprofile)
eq_(membership.status, GroupMembership.PENDING_TERMS)
data = {
'terms_accepted': True
}
with self.login(user) as client:
response = client.post(url, data=data, follow=True)
eq_(response.status_code, 200)
self.assertTemplateUsed(response, 'groups/group.html')
membership = GroupMembership.objects.get(group=group, userprofile=user.userprofile)
eq_(membership.status, GroupMembership.MEMBER)
def test_deny_review_terms(self):
group = GroupFactory.create(terms='Example terms')
user = UserFactory.create()
group.add_member(user.userprofile, GroupMembership.PENDING_TERMS)
url = reverse('groups:review_terms', kwargs={'url': group.url}, prefix='/en-US/')
membership = GroupMembership.objects.get(group=group, userprofile=user.userprofile)
eq_(membership.status, GroupMembership.PENDING_TERMS)
data = {
'terms_accepted': False
}
with self.login(user) as client:
response = client.post(url, data=data, follow=True)
eq_(response.status_code, 200)
self.assertTemplateUsed(response, 'groups/group.html')
membership = GroupMembership.objects.filter(group=group, userprofile=user.userprofile)
ok_(not membership.exists())
|
{
"content_hash": "0ff1da78bc19ad4d828415eb4d427736",
"timestamp": "",
"source": "github",
"line_count": 569,
"max_line_length": 95,
"avg_line_length": 43.300527240773285,
"alnum_prop": 0.641326406364153,
"repo_name": "anistark/mozillians",
"id": "b10f77a7b14bf951efc1950cdeec4e988a3b7d2b",
"size": "24638",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mozillians/groups/tests/test_views/test_show.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "1986"
},
{
"name": "CSS",
"bytes": "210671"
},
{
"name": "HTML",
"bytes": "184994"
},
{
"name": "JavaScript",
"bytes": "154038"
},
{
"name": "Makefile",
"bytes": "478"
},
{
"name": "Python",
"bytes": "9184371"
},
{
"name": "Shell",
"bytes": "7758"
}
],
"symlink_target": ""
}
|
class Calculadora():
def __init__(self):
self.op1 = None
self.op2 = None
self.sinal = None
self.__operacoes = {}
def calcular(self):
operacao_escolhida = self.__operacoes.get(self.sinal)
return operacao_escolhida.calcular(self.op1, self.op2)
def operacoes_disponiveis(self):
return set(self.__operacoes.keys())
def adicionar_operacao(self, sinal, operacao):
self.__operacoes[sinal] = operacao
def obter_entradas(self):
raise NotImplementedError("obter_entradas é abstrato")
class CalculadoraInfixa(Calculadora):
def obter_entradas(self):
self.op1 = float(input("Insira o primeiro operando: "))
print("Operaçoes disponiveis: %s" % self.operacoes_disponiveis())
self.sinal = input("Insira a operacao: ")
self.op2 = float(input("Insira o segundo operando: "))
class Operacao():
def calcular(self, op1, op2):
raise NotImplementedError("calcular é abstrato")
class Adicao(Operacao):
def calcular(self, op1, op2):
return op1+op2
class Subtracao():
def calcular(self, op1, op2):
return op1-op2
calculadora = CalculadoraInfixa()
calculadora.adicionar_operacao("+", Adicao())
calculadora.adicionar_operacao("-", Subtracao())
calculadora.obter_entradas()
print("O restultado é %s" % calculadora.calcular())
|
{
"content_hash": "815f4b4fd5fca8bb2f2fd2ae3db9224d",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 73,
"avg_line_length": 27.12280701754386,
"alnum_prop": 0.5847347994825356,
"repo_name": "renzon/evento-python-para-javeiros",
"id": "6ac5339ea2b774d0d516d320575e784a0e5d4f1b",
"size": "1550",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "projetopython/calculadora.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "4396"
},
{
"name": "Python",
"bytes": "2732"
}
],
"symlink_target": ""
}
|
"""
This file (test_crud.py) contains the unit tests for
CRUD operations in hydrus.data.crud.
"""
import random
import uuid
from hydra_python_core.doc_writer import HydraLink, DocUrl
import hydrus.data.crud as crud
from hydrus.data.exceptions import PropertyNotGiven
from tests.conftest import gen_dummy_object
def test_crud_insert_response_is_str(drone_doc_parsed_classes, drone_doc, session,
init_db_for_crud_tests):
"""Test CRUD insert response is string"""
object_ = gen_dummy_object(random.choice(drone_doc_parsed_classes), drone_doc)
id_ = str(uuid.uuid4())
response = crud.insert(drone_doc, object_=object_, id_=id_, session=session)
assert isinstance(response, str)
def test_crud_get_returns_correct_object(drone_doc_parsed_classes, drone_doc, session):
"""Test CRUD get returns correct object"""
object_ = gen_dummy_object(random.choice(drone_doc_parsed_classes), drone_doc)
id_ = str(uuid.uuid4())
response = crud.insert(drone_doc, object_=object_, id_=id_, session=session)
object_ = crud.get(id_=id_, type_=object_['@type'], session=session, api_name='api')
assert isinstance(response, str)
assert object_['@id'].split('/')[-1] == id_
def test_get_for_nested_obj(drone_doc_parsed_classes, drone_doc, session, constants):
"""Test CRUD get operation for object that can contain other objects."""
expanded_base_url = DocUrl.doc_url
for class_ in drone_doc_parsed_classes:
for prop in drone_doc.parsed_classes[class_]['class'].supportedProperty:
if not isinstance(prop.prop, HydraLink):
if expanded_base_url in prop.prop:
dummy_obj = gen_dummy_object(class_, drone_doc)
nested_class = prop.prop.split(expanded_base_url)[1]
obj_id = str(uuid.uuid4())
response = crud.insert(drone_doc, object_=dummy_obj, id_=obj_id, session=session)
object_ = crud.get(id_=obj_id, type_=class_, session=session,
api_name='api')
assert prop.title in object_
nested_obj_id = object_[prop.title]
nested_obj = crud.get(id_=nested_obj_id, type_=nested_class,
session=session, api_name='api')
assert nested_obj['@id'].split('/')[-1] == nested_obj_id
break
def test_searching_over_collection_elements(drone_doc_parsed_classes, drone_doc, session):
"""Test searching over collection elements."""
expanded_base_url = DocUrl.doc_url
for class_ in drone_doc_parsed_classes:
target_property_1 = ''
target_property_2 = ''
for prop in drone_doc.parsed_classes[class_]['class'].supportedProperty:
if isinstance(prop.prop, HydraLink):
continue
# Find nested object so we can test searching of elements by
# properties of nested objects.
if expanded_base_url in prop.prop:
object_ = gen_dummy_object(class_, drone_doc)
# Setting property of a nested object as target
for property_ in object_[prop.title]:
if property_ != '@type':
object_[prop.title][property_] = 'target_1'
target_property_1 = '{}[{}]'.format(
prop.title, property_)
break
break
elif target_property_1 is not '':
for property_ in object_:
if property_ != '@type':
object_[property_] = 'target_2'
target_property_2 = property_
break
break
if target_property_1 is not '' and target_property_2 is not '':
# Set search parameters
search_params = {
target_property_1: 'target_1',
target_property_2: 'target_2'
}
obj_id = str(uuid.uuid4())
response = crud.insert(
drone_doc, object_=object_, id_=obj_id, session=session)
search_result = crud.get_collection(API_NAME='api', type_=class_,
session=session, paginate=True,
page_size=5, search_params=search_params)
assert len(search_result['members']) > 0
search_item_id = search_result['members'][0]['@id'].split(
'/')[-1]
assert search_item_id == obj_id
break
def test_update_on_object(drone_doc_parsed_classes, drone_doc, session):
"""Test CRUD update on object"""
random_class = random.choice(drone_doc_parsed_classes)
object_ = gen_dummy_object(random_class, drone_doc)
new_object = gen_dummy_object(random_class, drone_doc)
id_ = str(uuid.uuid4())
insert_response = crud.insert(drone_doc, object_=object_, id_=id_, session=session)
update_response = crud.update(
drone_doc,
id_=id_,
type_=object_['@type'],
object_=new_object,
session=session,
api_name='api')
test_object = crud.get(id_=id_, type_=object_['@type'], session=session, api_name='api')
assert isinstance(insert_response, str)
assert isinstance(update_response, str)
assert insert_response == update_response
assert test_object['@id'].split('/')[-1] == id_
def test_delete_on_object(drone_doc_parsed_classes, drone_doc, session):
"""Test CRUD delete on object"""
object_ = gen_dummy_object(random.choice(drone_doc_parsed_classes), drone_doc)
id_ = str(uuid.uuid4())
insert_response = crud.insert(drone_doc, object_=object_, id_=id_, session=session)
assert isinstance(insert_response, str)
delete_response = crud.delete(id_=id_, type_=object_['@type'], session=session)
response_code = None
try:
get_response = crud.get(
id_=id_,
type_=object_['@type'],
session=session,
api_name='api')
except Exception as e:
error = e.get_HTTP()
response_code = error.code
assert 404 == response_code
def test_get_on_wrong_id(drone_doc_parsed_classes, drone_doc, session):
"""Test CRUD get when wrong/undefined ID is given."""
id_ = str(uuid.uuid4())
type_ = random.choice(drone_doc_parsed_classes)
response_code = None
try:
get_response = crud.get(id_=id_, type_=type_, session=session, api_name='api')
except Exception as e:
error = e.get_HTTP()
response_code = error.code
assert 404 == response_code
def test_delete_on_wrong_id(drone_doc_parsed_classes, drone_doc, session):
"""Test CRUD delete when wrong/undefined ID is given."""
object_ = gen_dummy_object(random.choice(
drone_doc_parsed_classes), drone_doc)
id_ = str(uuid.uuid4())
insert_response = crud.insert(drone_doc, object_=object_, id_=id_, session=session)
response_code = None
try:
delete_response = crud.delete(id_=999, type_=object_['@type'], session=session)
except Exception as e:
error = e.get_HTTP()
response_code = error.code
assert 404 == response_code
assert isinstance(insert_response, str)
assert insert_response == id_
def test_insert_used_id(drone_doc_parsed_classes, drone_doc, session):
"""Test CRUD insert when used ID is given."""
object_ = gen_dummy_object(random.choice(drone_doc_parsed_classes), drone_doc)
id_ = str(uuid.uuid4())
insert_response = crud.insert(drone_doc, object_=object_, id_=id_, session=session)
response_code = None
try:
insert_response = crud.insert(
drone_doc, object_=object_, id_=id_, session=session)
except Exception as e:
error = e.get_HTTP()
response_code = error.code
assert 400 == response_code
def test_get_on_wrong_type(session):
"""Test CRUD get when wrong/undefined class is given."""
id_ = str(uuid.uuid4())
type_ = 'otherClass'
response_code = None
try:
get_response = crud.get(id_=id_, type_=type_, session=session, api_name='api')
except Exception as e:
error = e.get_HTTP()
assert 400 == error.code
def test_delete_on_wrong_type(drone_doc_parsed_classes, drone_doc, session):
"""Test CRUD delete when wrong/undefined class is given."""
object_ = gen_dummy_object(random.choice(drone_doc_parsed_classes), drone_doc)
id_ = str(uuid.uuid4())
insert_response = crud.insert(drone_doc, object_=object_, id_=id_, session=session)
assert isinstance(insert_response, str)
assert insert_response == id_
response_code = None
try:
delete_response = crud.delete(id_=id_, type_='otherClass', session=session)
except Exception as e:
error = e.get_HTTP()
response_code = error.code
assert 400 == response_code
def test_insert_on_wrong_type(drone_doc_parsed_classes, drone_doc, session):
"""Test CRUD insert when wrong/undefined class is given."""
object_ = gen_dummy_object(random.choice(drone_doc_parsed_classes), drone_doc)
id_ = str(uuid.uuid4())
object_['@type'] = 'otherClass'
response_code = None
try:
insert_response = crud.insert(drone_doc, object_=object_, id_=id_, session=session)
except Exception as e:
error = e.get_HTTP()
response_code = error.code
assert 400 == response_code
def test_insert_multiple_id(drone_doc_parsed_classes, drone_doc, session):
"""Test CRUD insert when multiple ID's are given """
objects = list()
ids = '{},{}'.format(str(uuid.uuid4()), str(uuid.uuid4()))
ids_list = ids.split(',')
for index in range(len(ids_list)):
object = gen_dummy_object(random.choice(drone_doc_parsed_classes), drone_doc)
objects.append(object)
insert_response = crud.insert_multiple(drone_doc, objects_=objects, session=session, id_=ids)
for id_ in ids_list:
assert id_ in insert_response
def test_delete_multiple_id(drone_doc_parsed_classes, drone_doc, session):
"""Test CRUD insert when multiple ID's are given """
objects = list()
ids = '{},{}'.format(str(uuid.uuid4()), str(uuid.uuid4()))
random_class = random.choice(drone_doc_parsed_classes)
for index in range(len(ids.split(','))):
object = gen_dummy_object(random_class, drone_doc)
objects.append(object)
insert_response = crud.insert_multiple(drone_doc, objects_=objects, session=session, id_=ids)
delete_response = crud.delete_multiple(id_=ids, type_=random_class, session=session)
response_code = None
id_list = ids.split(',')
try:
for index in range(len(id_list)):
get_response = crud.get(
id_=id_list[index],
type_=objects[index]['@type'],
session=session,
api_name='api')
except Exception as e:
error = e.get_HTTP()
response_code = error.code
assert 404 == response_code
def test_insert_when_property_not_given(drone_doc_parsed_classes, drone_doc,
session, constants):
"""Test CRUD insert operation when a required foreign key
property of that resource(column in the table) not given"""
expanded_base_url = DocUrl.doc_url
for class_ in drone_doc_parsed_classes:
for prop in drone_doc.parsed_classes[class_]['class'].supportedProperty:
if not isinstance(prop.prop, HydraLink) and expanded_base_url in prop.prop:
dummy_obj = gen_dummy_object(class_, drone_doc)
nested_prop_title = prop.title
continue
# remove the foreign key resource on purpose for testing
dummy_obj.pop(nested_prop_title)
id_ = str(uuid.uuid4())
try:
insert_response = crud.insert(drone_doc, object_=dummy_obj, id_=id_, session=session)
except PropertyNotGiven as e:
error = e.get_HTTP()
response_code = error.code
assert 400 == response_code
|
{
"content_hash": "a87e11142917c889e92e01ca82cf80f0",
"timestamp": "",
"source": "github",
"line_count": 291,
"max_line_length": 101,
"avg_line_length": 41.96907216494845,
"alnum_prop": 0.6036190944075984,
"repo_name": "HTTP-APIs/hydrus",
"id": "1887016274c94c392062e55597da16b370fdfe3e",
"size": "12213",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/unit/test_crud.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "607"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "404729"
}
],
"symlink_target": ""
}
|
"""
This is a self genrated test created by scaffolding.py.
you will need to fill it up with all your necessities.
Safe hacking :).
"""
from zope.interface import implements
from twisted.python import usage
from twisted.plugin import IPlugin
from ooni.plugoo.tests import ITest, OONITest
from ooni.plugoo.assets import Asset
from ooni.protocols import http
from ooni.utils import log
class httptArgs(usage.Options):
optParameters = [['urls', 'f', None, 'Urls file'],
['url', 'u', 'http://torproject.org/', 'Test single site'],
['resume', 'r', 0, 'Resume at this index'],
['rules', 'y', None, 'Specify the redirect rules file']]
class httptTest(http.HTTPTest):
implements(IPlugin, ITest)
shortName = "httpt"
description = "httpt"
requirements = None
options = httptArgs
blocking = False
def testPattern(self, value, pattern, type):
if type == 'eq':
return value == pattern
elif type == 're':
import re
if re.match(pattern, value):
return True
else:
return False
else:
return None
def testPatterns(self, patterns, location):
test_result = False
if type(patterns) == list:
for pattern in patterns:
test_result |= self.testPattern(location, pattern['value'], pattern['type'])
else:
test_result |= self.testPattern(location, patterns['value'], patterns['type'])
return test_result
def testRules(self, rules, location):
result = {}
blocked = False
for rule, value in rules.items():
current_rule = {}
current_rule['name'] = value['name']
current_rule['patterns'] = value['patterns']
current_rule['test'] = self.testPatterns(value['patterns'], location)
blocked |= current_rule['test']
result[rule] = current_rule
result['blocked'] = blocked
return result
def processRedirect(self, location):
self.result['redirect'] = None
try:
rules_file = self.local_options['rules']
import yaml
rules = yaml.load(open(rules_file))
log.msg("Testing rules %s" % rules)
redirect = self.testRules(rules, location)
self.result['redirect'] = redirect
except TypeError:
log.msg("No rules file. Got a redirect, but nothing to do.")
def control(self, experiment_result, args):
print self.response
print self.request
# What you return here ends up inside of the report.
log.msg("Running control")
return {}
def load_assets(self):
if self.local_options and self.local_options['urls']:
return {'url': Asset(self.local_options['urls'])}
else:
return {}
# We need to instantiate it otherwise getPlugins does not detect it
# XXX Find a way to load plugins without instantiating them.
#httpt = httptTest(None, None, None)
|
{
"content_hash": "af44a718b7b73a141c34e32191462d46",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 92,
"avg_line_length": 33,
"alnum_prop": 0.5918762088974855,
"repo_name": "hackerberry/ooni-probe",
"id": "358f1eaae634af0f8a952b370a17bbe99aa4ae30",
"size": "3102",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "old-to-be-ported-code/old-api/httpt.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "519235"
},
{
"name": "Shell",
"bytes": "10418"
}
],
"symlink_target": ""
}
|
from rest_framework import serializers
from task_board.tasks.models import Task, TaskStatuses
class TaskSerializer(serializers.ModelSerializer):
created_by_username = serializers.StringRelatedField(source='created_by')
accomplished_by_username = serializers.StringRelatedField(source='accomplished_by')
status_readable = serializers.ReadOnlyField(source='get_status_display')
class Meta:
model = Task
fields = ['id', 'name', 'description', 'status',
'created_by', 'created_by_username',
'accomplished_by', 'accomplished_by_username',
'status_readable']
def __init__(self, *args, **kwargs):
# specify the user who accomplished the task if the status 'done' is set
if 'data' in kwargs and 'context' in kwargs and 'request' in kwargs['context']:
request = kwargs['context']['request']
data = kwargs['data']
if 'status' in data and str(data['status']) == str(TaskStatuses.DONE):
data = kwargs['data'].copy()
data.update({'accomplished_by': request.user.pk})
kwargs['data'] = data
super(TaskSerializer, self).__init__(*args, **kwargs)
|
{
"content_hash": "81178f5c3b9de09081724a29177a04b8",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 87,
"avg_line_length": 42.58620689655172,
"alnum_prop": 0.6234817813765182,
"repo_name": "AlexanderKaluzhny/taskboard",
"id": "8e31b2dac8489336cd1e06cecbc5aadc0d2594bb",
"size": "1235",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "task_board/tasks/serializers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3415"
},
{
"name": "HTML",
"bytes": "37109"
},
{
"name": "JavaScript",
"bytes": "80420"
},
{
"name": "Python",
"bytes": "71447"
},
{
"name": "Shell",
"bytes": "4497"
}
],
"symlink_target": ""
}
|
from __future__ import annotations
from typing import NamedTuple
from marshmallow import Schema, fields
from airflow.api_connexion.schemas.common_schema import (
ClassReferenceSchema,
ColorField,
TimeDeltaSchema,
WeightRuleField,
)
from airflow.api_connexion.schemas.dag_schema import DAGSchema
from airflow.models.operator import Operator
class TaskSchema(Schema):
"""Task schema"""
class_ref = fields.Method("_get_class_reference", dump_only=True)
operator_name = fields.Method("_get_operator_name", dump_only=True)
task_id = fields.String(dump_only=True)
owner = fields.String(dump_only=True)
start_date = fields.DateTime(dump_only=True)
end_date = fields.DateTime(dump_only=True)
trigger_rule = fields.String(dump_only=True)
extra_links = fields.List(
fields.Nested(ClassReferenceSchema), dump_only=True, attribute="operator_extra_links"
)
depends_on_past = fields.Boolean(dump_only=True)
wait_for_downstream = fields.Boolean(dump_only=True)
retries = fields.Number(dump_only=True)
queue = fields.String(dump_only=True)
pool = fields.String(dump_only=True)
pool_slots = fields.Number(dump_only=True)
execution_timeout = fields.Nested(TimeDeltaSchema, dump_only=True)
retry_delay = fields.Nested(TimeDeltaSchema, dump_only=True)
retry_exponential_backoff = fields.Boolean(dump_only=True)
priority_weight = fields.Number(dump_only=True)
weight_rule = WeightRuleField(dump_only=True)
ui_color = ColorField(dump_only=True)
ui_fgcolor = ColorField(dump_only=True)
template_fields = fields.List(fields.String(), dump_only=True)
sub_dag = fields.Nested(DAGSchema, dump_only=True)
downstream_task_ids = fields.List(fields.String(), dump_only=True)
params = fields.Method('get_params', dump_only=True)
is_mapped = fields.Boolean(dump_only=True)
def _get_class_reference(self, obj):
result = ClassReferenceSchema().dump(obj)
return result.data if hasattr(result, "data") else result
def _get_operator_name(self, obj):
return obj.operator_name
@staticmethod
def get_params(obj):
"""Get the Params defined in a Task"""
params = obj.params
return {k: v.dump() for k, v in params.items()}
class TaskCollection(NamedTuple):
"""List of Tasks with metadata"""
tasks: list[Operator]
total_entries: int
class TaskCollectionSchema(Schema):
"""Schema for TaskCollection"""
tasks = fields.List(fields.Nested(TaskSchema))
total_entries = fields.Int()
task_schema = TaskSchema()
task_collection_schema = TaskCollectionSchema()
|
{
"content_hash": "b0a4299f1be7671f66fa1f6aa9836f89",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 93,
"avg_line_length": 33.94871794871795,
"alnum_prop": 0.7077039274924471,
"repo_name": "nathanielvarona/airflow",
"id": "89c1fa68c52acd22010b09df35f097eee34426a4",
"size": "3433",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "airflow/api_connexion/schemas/task_schema.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "70681"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "173025"
},
{
"name": "JavaScript",
"bytes": "142848"
},
{
"name": "Jinja",
"bytes": "38895"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "23169682"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "211967"
},
{
"name": "TypeScript",
"bytes": "484556"
}
],
"symlink_target": ""
}
|
from eutester import Eutester
import boto
from boto.ec2.regioninfo import RegionInfo
class CFNops(Eutester):
def __init__(self,
endpoint=None,
path=None,
port=None,
region=None,
credpath=None,
aws_access_key_id=None,
aws_secret_access_key=None,
is_secure=True,
boto_debug=0):
self.aws_access_key_id = aws_access_key_id
self.aws_secret_access_key = aws_secret_access_key
self.user_id = None
self.account_id = None
self.connection = None
super(CFNops, self).__init__(credpath=credpath)
self.setup_cfn_connection(endpoint=endpoint,
path=path,
port=port,
region=region,
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key,
is_secure=True,
boto_debug=0)
def setup_cfn_connection(self,
endpoint=None,
path="/",
port=443,
region=None,
aws_access_key_id=None,
aws_secret_access_key=None,
is_secure=True,
boto_debug=0):
cfn_region = RegionInfo()
if region:
self.debug("Check region: " + str(region))
try:
if not endpoint:
cfn_region.endpoint = "cloudformation.{0}.amazonaws.com".format(region)
else:
cfn_region.endpoint = endpoint
except KeyError:
raise Exception( 'Unknown region: %s' % region)
else:
cfn_region.name = 'eucalyptus'
if endpoint:
cfn_region.endpoint = endpoint
else:
cfn_region.endpoint = self.get_cfn_ip()
try:
cfn_connection_args = { 'aws_access_key_id' : aws_access_key_id,
'aws_secret_access_key': aws_secret_access_key,
'is_secure': is_secure,
'debug':boto_debug,
'port' : port,
'path' : path,
'region' : cfn_region}
self.debug("Attempting to create cloudformation connection to " + self.get_cfn_ip() + ':' + str(port) + path)
self.connection = boto.connect_cloudformation(**cfn_connection_args)
except Exception, e:
self.critical("Was unable to create cloudformation connection because of exception: " + str(e))
def create_stack(self, stack_name, template_body, template_url=None, parameters=None):
self.info("Creating stack: {0}".format(stack_name))
self.connection.create_stack(stack_name, template_body, template_url=template_url, parameters=parameters)
def delete_stack(self, stack_name):
self.info("Deleting stack: {0}".format(stack_name))
self.connection.delete_stack(stack_name)
|
{
"content_hash": "adf3f39f5a52296619ca4a7fcba734fc",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 121,
"avg_line_length": 43.166666666666664,
"alnum_prop": 0.4781704781704782,
"repo_name": "nephomaniac/eutester",
"id": "83a3000745186cec52f2ad997bf6f0cf6fee4f9b",
"size": "4849",
"binary": false,
"copies": "1",
"ref": "refs/heads/reorg1",
"path": "eutester/aws/cloudformation/cfnops.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Erlang",
"bytes": "15990"
},
{
"name": "Groovy",
"bytes": "162726"
},
{
"name": "HTML",
"bytes": "1792"
},
{
"name": "Java",
"bytes": "863698"
},
{
"name": "Python",
"bytes": "2054823"
},
{
"name": "RobotFramework",
"bytes": "4827"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.