text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from tornado.gen import coroutine
from tornado.ioloop import IOLoop
from smpipi.tornado import ESME
def async_run(func):
ioloop = IOLoop()
ioloop.make_current()
work = coroutine(func)
ioloop.run_sync(work, timeout=5)
def test_bind(smsc):
@async_run
def work():
esme = ESME()
yield esme.connect('127.0.0.1', 30001)
resp = yield esme.wait_for(esme.bind_transceiver('boo', 'foo'))
assert resp.command_status == 0
esme.send_message(short_message='close')
yield esme.run()
def test_delivery(smsc):
@async_run
def work():
@coroutine
def deliver(request, response, reply):
assert request.short_message == b'foo'
reply()
resp = yield esme.wait_for(esme.send_message(short_message='close'))
assert resp.command_status == 0
esme = ESME()
esme.on_deliver = deliver
yield esme.connect('127.0.0.1', 30001)
esme.send_message(short_message='deliver foo')
yield esme.run()
|
{
"content_hash": "26c8dcadb8082fef2e6fd28bb8153b2c",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 80,
"avg_line_length": 26.25,
"alnum_prop": 0.6057142857142858,
"repo_name": "baverman/smpipi",
"id": "8d61ff0fc9349695df60f1cb4c0817d3c6855c87",
"size": "1050",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_tornado.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46248"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('project', '0063_auto_20180803_1155'),
]
operations = [
migrations.AddField(
model_name='assessment',
name='is_a_committee_score',
field=models.BooleanField(default=False, help_text='If only one reviewer is selected, indicate that they are providing scores on behalf of a committee.'),
),
]
|
{
"content_hash": "1575f5de5cc2727e90ed7bf9d2357742",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 166,
"avg_line_length": 28.333333333333332,
"alnum_prop": 0.6450980392156863,
"repo_name": "unicef/un-partner-portal",
"id": "f60a02a25ab75fd7ce36a7ac46d4634152dd824d",
"size": "584",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "backend/unpp_api/apps/project/migrations/0064_assessment_is_a_committee_score.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "468629"
},
{
"name": "Dockerfile",
"bytes": "2303"
},
{
"name": "HTML",
"bytes": "49027"
},
{
"name": "JavaScript",
"bytes": "2199879"
},
{
"name": "Python",
"bytes": "1322681"
},
{
"name": "Shell",
"bytes": "4734"
},
{
"name": "Smarty",
"bytes": "751"
}
],
"symlink_target": ""
}
|
from .PBXItem import *
class PBXFileReference(PBX_Base_Reference):
def __init__(self, identifier, dictionary):
super(self.__class__, self).__init__(identifier, dictionary)
|
{
"content_hash": "2e997ae06077d2c12e9c70c7c308fa8a",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 68,
"avg_line_length": 36.8,
"alnum_prop": 0.6902173913043478,
"repo_name": "samdmarshall/pbProj",
"id": "5f6502772db4f15a02fabd8398a0379757672f89",
"size": "184",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pbProj/PBXFileReference.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "26268"
}
],
"symlink_target": ""
}
|
"""Main program.
This file is based on @Greut (https://github.com/greut) 's main file
in his TravisBot project (https://github.com/greut/travisbot)
"""
import asyncio
import logging
import shutil
import discord
from discord.ext import commands
from pyfiglet import Figlet
from bot import AutomaBot
from tools import load_params
from web import make_app
def is_owner(ctx):
"""Check owner."""
if isinstance(ctx.message.channel, discord.PrivateChannel):
# Yes, I keep a "backdoor" in the bot
author = ctx.message.author
return author.name + "#" + author.discriminator == "Maël Pedretti#1416"
return ctx.message.channel.server.owner == ctx.message.author
async def main(token, queue, channel, prefix, desc):
"""Run main program."""
bot = AutomaBot(get=queue, update_channel=channel,
command_prefix=prefix,
description=desc, self_bot=False)
@bot.command(pass_context=True)
@commands.check(is_owner)
async def sleep(ctx):
await bot.change_presence(status=discord.Status.dnd, afk=True)
msg = 'Going to sleep. See you :wave:'
for comm in bot.commands:
if comm is not "wakeup":
bot.commands[comm].enabled = False
await bot.say(msg)
@bot.command(pass_context=True, hidden=True)
@commands.check(is_owner)
async def wakeup(ctx):
for comm in bot.commands:
if comm is not "wakeup":
bot.commands[comm].enabled = True
await bot.change_presence(status=discord.Status.online, afk=False)
msg = 'Goooooooooood morniiing vietnammmmmm :bomb:'
await bot.say(msg)
await bot.start(token)
if __name__ == "__main__":
"""Catch main function."""
params = load_params(param="bot")
HOST = params['HOST']
PORT = params['PORT']
token = params['token']
channel = params['update_channel_id']
prefix = params['bot_command_prefix']
desc = params['bot_description']
debug = False
queue = asyncio.Queue()
app = make_app(queue.put)
loop = asyncio.get_event_loop()
if debug:
loop.set_debug(True)
logging.getLogger('asyncio').setLevel(logging.DEBUG)
handler = app.make_handler()
loop.run_until_complete(app.startup())
server = loop.create_server(handler, host=HOST, port=PORT)
try:
srv = loop.run_until_complete(server)
terminal_width = shutil.get_terminal_size((80, 20))[0]
TOPBAR = f"/{'#' * (terminal_width - 2)}\\\n"
print(TOPBAR)
print(Figlet(font='banner').renderText(' AUTOMABOT'))
print("\\" + "#" * (terminal_width-2) + "/")
print(TOPBAR)
print(f"{f' Listening on: {HOST}:{PORT} ': ^{terminal_width}}")
loop.run_until_complete(main(token, queue.get, channel, prefix, desc))
except KeyboardInterrupt:
pass
srv.close()
loop.close()
|
{
"content_hash": "f44a609636ee870813aa873fb5fcdc43",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 79,
"avg_line_length": 29.575757575757574,
"alnum_prop": 0.6263661202185792,
"repo_name": "WyllVern/AutomaBot",
"id": "81feefcc96cb7c92a25c7f7631a03b6efd13030a",
"size": "2929",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "automabot/__main__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15633"
},
{
"name": "Shell",
"bytes": "166"
}
],
"symlink_target": ""
}
|
"""Unit tests."""
import pytest
from google.rpc import status_pb2
from google.cloud import spanner_admin_database_v1
from google.cloud.spanner_admin_database_v1.proto import spanner_database_admin_pb2
from google.iam.v1 import iam_policy_pb2
from google.iam.v1 import policy_pb2
from google.longrunning import operations_pb2
from google.protobuf import empty_pb2
class MultiCallableStub(object):
"""Stub for the grpc.UnaryUnaryMultiCallable interface."""
def __init__(self, method, channel_stub):
self.method = method
self.channel_stub = channel_stub
def __call__(self, request, timeout=None, metadata=None, credentials=None):
self.channel_stub.requests.append((self.method, request))
response = None
if self.channel_stub.responses:
response = self.channel_stub.responses.pop()
if isinstance(response, Exception):
raise response
if response:
return response
class ChannelStub(object):
"""Stub for the grpc.Channel interface."""
def __init__(self, responses=[]):
self.responses = responses
self.requests = []
def unary_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return MultiCallableStub(method, self)
class CustomException(Exception):
pass
class TestDatabaseAdminClient(object):
def test_list_databases(self):
# Setup Expected Response
next_page_token = ''
databases_element = {}
databases = [databases_element]
expected_response = {
'next_page_token': next_page_token,
'databases': databases
}
expected_response = spanner_database_admin_pb2.ListDatabasesResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = spanner_admin_database_v1.DatabaseAdminClient(channel=channel)
# Setup Request
parent = client.instance_path('[PROJECT]', '[INSTANCE]')
paged_list_response = client.list_databases(parent)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.databases[0] == resources[0]
assert len(channel.requests) == 1
expected_request = spanner_database_admin_pb2.ListDatabasesRequest(
parent=parent)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_databases_exception(self):
channel = ChannelStub(responses=[CustomException()])
client = spanner_admin_database_v1.DatabaseAdminClient(channel=channel)
# Setup request
parent = client.instance_path('[PROJECT]', '[INSTANCE]')
paged_list_response = client.list_databases(parent)
with pytest.raises(CustomException):
list(paged_list_response)
def test_create_database(self):
# Setup Expected Response
name = 'name3373707'
expected_response = {'name': name}
expected_response = spanner_database_admin_pb2.Database(
**expected_response)
operation = operations_pb2.Operation(
name='operations/test_create_database', done=True)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
client = spanner_admin_database_v1.DatabaseAdminClient(channel=channel)
# Setup Request
parent = client.instance_path('[PROJECT]', '[INSTANCE]')
create_statement = 'createStatement552974828'
response = client.create_database(parent, create_statement)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = spanner_database_admin_pb2.CreateDatabaseRequest(
parent=parent, create_statement=create_statement)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_database_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name='operations/test_create_database_exception', done=True)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
client = spanner_admin_database_v1.DatabaseAdminClient(channel=channel)
# Setup Request
parent = client.instance_path('[PROJECT]', '[INSTANCE]')
create_statement = 'createStatement552974828'
response = client.create_database(parent, create_statement)
exception = response.exception()
assert exception.errors[0] == error
def test_get_database(self):
# Setup Expected Response
name_2 = 'name2-1052831874'
expected_response = {'name': name_2}
expected_response = spanner_database_admin_pb2.Database(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = spanner_admin_database_v1.DatabaseAdminClient(channel=channel)
# Setup Request
name = client.database_path('[PROJECT]', '[INSTANCE]', '[DATABASE]')
response = client.get_database(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = spanner_database_admin_pb2.GetDatabaseRequest(
name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_database_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = spanner_admin_database_v1.DatabaseAdminClient(channel=channel)
# Setup request
name = client.database_path('[PROJECT]', '[INSTANCE]', '[DATABASE]')
with pytest.raises(CustomException):
client.get_database(name)
def test_update_database_ddl(self):
# Setup Expected Response
expected_response = {}
expected_response = empty_pb2.Empty(**expected_response)
operation = operations_pb2.Operation(
name='operations/test_update_database_ddl', done=True)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
client = spanner_admin_database_v1.DatabaseAdminClient(channel=channel)
# Setup Request
database = client.database_path('[PROJECT]', '[INSTANCE]',
'[DATABASE]')
statements = []
response = client.update_database_ddl(database, statements)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = spanner_database_admin_pb2.UpdateDatabaseDdlRequest(
database=database, statements=statements)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_update_database_ddl_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name='operations/test_update_database_ddl_exception', done=True)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
client = spanner_admin_database_v1.DatabaseAdminClient(channel=channel)
# Setup Request
database = client.database_path('[PROJECT]', '[INSTANCE]',
'[DATABASE]')
statements = []
response = client.update_database_ddl(database, statements)
exception = response.exception()
assert exception.errors[0] == error
def test_drop_database(self):
channel = ChannelStub()
client = spanner_admin_database_v1.DatabaseAdminClient(channel=channel)
# Setup Request
database = client.database_path('[PROJECT]', '[INSTANCE]',
'[DATABASE]')
client.drop_database(database)
assert len(channel.requests) == 1
expected_request = spanner_database_admin_pb2.DropDatabaseRequest(
database=database)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_drop_database_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = spanner_admin_database_v1.DatabaseAdminClient(channel=channel)
# Setup request
database = client.database_path('[PROJECT]', '[INSTANCE]',
'[DATABASE]')
with pytest.raises(CustomException):
client.drop_database(database)
def test_get_database_ddl(self):
# Setup Expected Response
expected_response = {}
expected_response = spanner_database_admin_pb2.GetDatabaseDdlResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = spanner_admin_database_v1.DatabaseAdminClient(channel=channel)
# Setup Request
database = client.database_path('[PROJECT]', '[INSTANCE]',
'[DATABASE]')
response = client.get_database_ddl(database)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = spanner_database_admin_pb2.GetDatabaseDdlRequest(
database=database)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_database_ddl_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = spanner_admin_database_v1.DatabaseAdminClient(channel=channel)
# Setup request
database = client.database_path('[PROJECT]', '[INSTANCE]',
'[DATABASE]')
with pytest.raises(CustomException):
client.get_database_ddl(database)
def test_set_iam_policy(self):
# Setup Expected Response
version = 351608024
etag = b'21'
expected_response = {'version': version, 'etag': etag}
expected_response = policy_pb2.Policy(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = spanner_admin_database_v1.DatabaseAdminClient(channel=channel)
# Setup Request
resource = client.database_path('[PROJECT]', '[INSTANCE]',
'[DATABASE]')
policy = {}
response = client.set_iam_policy(resource, policy)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = iam_policy_pb2.SetIamPolicyRequest(
resource=resource, policy=policy)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_set_iam_policy_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = spanner_admin_database_v1.DatabaseAdminClient(channel=channel)
# Setup request
resource = client.database_path('[PROJECT]', '[INSTANCE]',
'[DATABASE]')
policy = {}
with pytest.raises(CustomException):
client.set_iam_policy(resource, policy)
def test_get_iam_policy(self):
# Setup Expected Response
version = 351608024
etag = b'21'
expected_response = {'version': version, 'etag': etag}
expected_response = policy_pb2.Policy(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = spanner_admin_database_v1.DatabaseAdminClient(channel=channel)
# Setup Request
resource = client.database_path('[PROJECT]', '[INSTANCE]',
'[DATABASE]')
response = client.get_iam_policy(resource)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = iam_policy_pb2.GetIamPolicyRequest(
resource=resource)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_iam_policy_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = spanner_admin_database_v1.DatabaseAdminClient(channel=channel)
# Setup request
resource = client.database_path('[PROJECT]', '[INSTANCE]',
'[DATABASE]')
with pytest.raises(CustomException):
client.get_iam_policy(resource)
def test_test_iam_permissions(self):
# Setup Expected Response
expected_response = {}
expected_response = iam_policy_pb2.TestIamPermissionsResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = spanner_admin_database_v1.DatabaseAdminClient(channel=channel)
# Setup Request
resource = client.database_path('[PROJECT]', '[INSTANCE]',
'[DATABASE]')
permissions = []
response = client.test_iam_permissions(resource, permissions)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = iam_policy_pb2.TestIamPermissionsRequest(
resource=resource, permissions=permissions)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_test_iam_permissions_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = spanner_admin_database_v1.DatabaseAdminClient(channel=channel)
# Setup request
resource = client.database_path('[PROJECT]', '[INSTANCE]',
'[DATABASE]')
permissions = []
with pytest.raises(CustomException):
client.test_iam_permissions(resource, permissions)
|
{
"content_hash": "7e22f3c1eead155cac4f96295b5e087b",
"timestamp": "",
"source": "github",
"line_count": 397,
"max_line_length": 83,
"avg_line_length": 36.65239294710327,
"alnum_prop": 0.6288914851212976,
"repo_name": "tseaver/gcloud-python",
"id": "b3f9d90cea0868e0cbc0ee5358ab78ddb56b1324",
"size": "15152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spanner/tests/unit/gapic/v1/test_database_admin_client_v1.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Protocol Buffer",
"bytes": "93642"
},
{
"name": "Python",
"bytes": "2874989"
},
{
"name": "Shell",
"bytes": "4436"
}
],
"symlink_target": ""
}
|
from copy import deepcopy
import httplib as http
import time
import mock
import pytest
import pytz
import datetime
from nose.tools import * # noqa
from tests.base import OsfTestCase, fake
from osf_tests.factories import (
UserFactory, NodeFactory, ProjectFactory,
AuthUserFactory, RegistrationFactory
)
from addons.wiki.tests.factories import WikiFactory, WikiVersionFactory
from website.exceptions import NodeStateError
from addons.wiki import settings
from addons.wiki import views
from addons.wiki.exceptions import InvalidVersionError
from addons.wiki.models import WikiPage, WikiVersion, render_content
from addons.wiki.utils import (
get_sharejs_uuid, generate_private_uuid, share_db, delete_share_doc,
migrate_uuid, format_wiki_version, serialize_wiki_settings, serialize_wiki_widget
)
from framework.auth import Auth
from django.utils import timezone
from addons.wiki.utils import to_mongo_key
from .config import EXAMPLE_DOCS, EXAMPLE_OPS
pytestmark = pytest.mark.django_db
# forward slashes are not allowed, typically they would be replaced with spaces
SPECIAL_CHARACTERS_ALL = u'`~!@#$%^*()-=_+ []{}\|/?.df,;:''"'
SPECIAL_CHARACTERS_ALLOWED = u'`~!@#$%^*()-=_+ []{}\|?.df,;:''"'
class TestWikiViews(OsfTestCase):
def setUp(self):
super(TestWikiViews, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(is_public=True, creator=self.user)
self.consolidate_auth = Auth(user=self.project.creator)
def test_wiki_url_get_returns_200(self):
url = self.project.web_url_for('project_wiki_view', wname='home')
res = self.app.get(url)
assert_equal(res.status_code, 200)
def test_wiki_url_404_with_no_write_permission(self): # and not public
url = self.project.web_url_for('project_wiki_view', wname='somerandomid')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 404)
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_wiki_deleted_404_with_no_write_permission(self, mock_sharejs):
self.project.update_node_wiki('funpage', 'Version 1', Auth(self.user))
self.project.save()
url = self.project.web_url_for('project_wiki_view', wname='funpage')
res = self.app.get(url)
assert_equal(res.status_code, 200)
delete_url = self.project.api_url_for('project_wiki_delete', wname='funpage')
self.app.delete(delete_url, auth=self.user.auth)
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 404)
def test_wiki_url_with_path_get_returns_200(self):
self.project.update_node_wiki('funpage', 'Version 1', Auth(self.user))
self.project.update_node_wiki('funpage', 'Version 2', Auth(self.user))
self.project.save()
url = self.project.web_url_for(
'project_wiki_view',
wname='funpage',
) + '?view&compare=1&edit'
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
def test_wiki_url_with_edit_get_redirects_to_no_edit_params_with_no_write_permission(self):
self.project.update_node_wiki('funpage', 'Version 1', Auth(self.user))
self.project.update_node_wiki('funpage', 'Version 2', Auth(self.user))
self.project.save()
url = self.project.web_url_for(
'project_wiki_view',
wname='funpage',
compare=1,
)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
# Public project, can_view, redirects without edit params
url = self.project.web_url_for(
'project_wiki_view',
wname='funpage',
) + '?edit'
res = self.app.get(url).maybe_follow()
assert_equal(res.status_code, 200)
# Check publicly editable
wiki = self.project.get_addon('wiki')
wiki.set_editing(permissions=True, auth=self.consolidate_auth, log=True)
res = self.app.get(url, auth=AuthUserFactory().auth, expect_errors=False)
assert_equal(res.status_code, 200)
# Check publicly editable but not logged in
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 401)
def test_wiki_url_for_pointer_returns_200(self):
# TODO: explain how this tests a pointer
project = ProjectFactory(is_public=True)
self.project.add_pointer(project, Auth(self.project.creator), save=True)
url = self.project.web_url_for('project_wiki_view', wname='home')
res = self.app.get(url)
assert_equal(res.status_code, 200)
@pytest.mark.skip('#TODO: Fix or mock mongodb for sharejs')
def test_wiki_draft_returns_200(self):
url = self.project.api_url_for('wiki_page_draft', wname='somerandomid')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
def test_wiki_content_returns_200(self):
url = self.project.api_url_for('wiki_page_content', wname='somerandomid')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
@mock.patch('addons.wiki.models.WikiVersion.rendered_before_update', new_callable=mock.PropertyMock)
def test_wiki_content_rendered_before_update(self, mock_rendered_before_update):
content = 'Some content'
self.project.update_node_wiki('somerandomid', content, Auth(self.user))
self.project.save()
mock_rendered_before_update.return_value = True
url = self.project.api_url_for('wiki_page_content', wname='somerandomid')
res = self.app.get(url, auth=self.user.auth)
assert_true(res.json['rendered_before_update'])
mock_rendered_before_update.return_value = False
res = self.app.get(url, auth=self.user.auth)
assert_false(res.json['rendered_before_update'])
def test_wiki_url_for_component_returns_200(self):
component = NodeFactory(parent=self.project, is_public=True)
url = component.web_url_for('project_wiki_view', wname='home')
res = self.app.get(url)
assert_equal(res.status_code, 200)
def test_project_wiki_edit_post(self):
self.project.update_node_wiki(
'home',
content='old content',
auth=Auth(self.project.creator)
)
url = self.project.web_url_for('project_wiki_edit_post', wname='home')
res = self.app.post(url, {'content': 'new content'}, auth=self.user.auth).follow()
assert_equal(res.status_code, 200)
self.project.reload()
# page was updated with new content
new_wiki = self.project.get_wiki_version('home')
assert_equal(new_wiki.content, 'new content')
def test_project_wiki_edit_post_with_new_wname_and_no_content(self):
# note: forward slashes not allowed in page_name
page_name = fake.catch_phrase().replace('/', ' ')
old_wiki_page_count = WikiVersion.objects.all().count()
url = self.project.web_url_for('project_wiki_edit_post', wname=page_name)
# User submits to edit form with no content
res = self.app.post(url, {'content': ''}, auth=self.user.auth).follow()
assert_equal(res.status_code, 200)
new_wiki_page_count = WikiVersion.objects.all().count()
# A new wiki page was created in the db
assert_equal(new_wiki_page_count, old_wiki_page_count + 1)
# Node now has the new wiki page associated with it
self.project.reload()
new_page = self.project.get_wiki_version(page_name)
assert_is_not_none(new_page)
def test_project_wiki_edit_post_with_new_wname_and_content(self):
# note: forward slashes not allowed in page_name
page_name = fake.catch_phrase().replace('/', ' ')
page_content = fake.bs()
old_wiki_page_count = WikiVersion.objects.all().count()
url = self.project.web_url_for('project_wiki_edit_post', wname=page_name)
# User submits to edit form with no content
res = self.app.post(url, {'content': page_content}, auth=self.user.auth).follow()
assert_equal(res.status_code, 200)
new_wiki_page_count = WikiVersion.objects.all().count()
# A new wiki page was created in the db
assert_equal(new_wiki_page_count, old_wiki_page_count + 1)
# Node now has the new wiki page associated with it
self.project.reload()
new_page = self.project.get_wiki_version(page_name)
assert_is_not_none(new_page)
# content was set
assert_equal(new_page.content, page_content)
def test_project_wiki_edit_post_with_non_ascii_title(self):
# regression test for https://github.com/CenterForOpenScience/openscienceframework.org/issues/1040
# wname doesn't exist in the db, so it will be created
new_wname = u'øˆ∆´ƒøßå√ß'
url = self.project.web_url_for('project_wiki_edit_post', wname=new_wname)
res = self.app.post(url, {'content': 'new content'}, auth=self.user.auth).follow()
assert_equal(res.status_code, 200)
self.project.reload()
wiki = self.project.get_wiki_page(new_wname)
assert_equal(wiki.page_name, new_wname)
# updating content should return correct url as well.
res = self.app.post(url, {'content': 'updated content'}, auth=self.user.auth).follow()
assert_equal(res.status_code, 200)
def test_project_wiki_edit_post_with_special_characters(self):
new_wname = 'title: ' + SPECIAL_CHARACTERS_ALLOWED
new_wiki_content = 'content: ' + SPECIAL_CHARACTERS_ALL
url = self.project.web_url_for('project_wiki_edit_post', wname=new_wname)
res = self.app.post(url, {'content': new_wiki_content}, auth=self.user.auth).follow()
assert_equal(res.status_code, 200)
self.project.reload()
wiki = self.project.get_wiki_version(new_wname)
assert_equal(wiki.wiki_page.page_name, new_wname)
assert_equal(wiki.content, new_wiki_content)
assert_equal(res.status_code, 200)
def test_wiki_edit_get_home(self):
url = self.project.web_url_for('project_wiki_view', wname='home')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
def test_project_wiki_view_scope(self):
self.project.update_node_wiki('home', 'Version 1', Auth(self.user))
self.project.update_node_wiki('home', 'Version 2', Auth(self.user))
self.project.save()
url = self.project.web_url_for('project_wiki_view', wname='home', view=2)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
url = self.project.web_url_for('project_wiki_view', wname='home', view=3)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
url = self.project.web_url_for('project_wiki_view', wname='home', view=0)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_project_wiki_compare_returns_200(self):
self.project.update_node_wiki('home', 'updated content', Auth(self.user))
self.project.save()
url = self.project.web_url_for('project_wiki_view', wname='home') + '?compare'
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
def test_project_wiki_compare_scope(self):
self.project.update_node_wiki('home', 'Version 1', Auth(self.user))
self.project.update_node_wiki('home', 'Version 2', Auth(self.user))
self.project.save()
url = self.project.web_url_for('project_wiki_view', wname='home', compare=2)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
url = self.project.web_url_for('project_wiki_view', wname='home', compare=3)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
url = self.project.web_url_for('project_wiki_view', wname='home', compare=0)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_wiki_page_creation_strips_whitespace(self):
# Regression test for:
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/1080
# wname has a trailing space
url = self.project.web_url_for('project_wiki_view', wname='cupcake ')
res = self.app.post(url, {'content': 'blah'}, auth=self.user.auth).follow()
assert_equal(res.status_code, 200)
self.project.reload()
wiki = self.project.get_wiki_version('cupcake')
assert_is_not_none(wiki)
def test_wiki_validate_name(self):
url = self.project.api_url_for('project_wiki_validate_name', wname='Capslock')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
def test_wiki_validate_name_creates_blank_page(self):
url = self.project.api_url_for('project_wiki_validate_name', wname='newpage', auth=self.consolidate_auth)
self.app.get(url, auth=self.user.auth)
self.project.reload()
assert_is_not_none(self.project.get_wiki_page('newpage'))
def test_wiki_validate_name_collision_doesnt_clear(self):
self.project.update_node_wiki('oldpage', 'some text', self.consolidate_auth)
url = self.project.api_url_for('project_wiki_validate_name', wname='oldpage', auth=self.consolidate_auth)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 409)
url = self.project.api_url_for('wiki_page_content', wname='oldpage', auth=self.consolidate_auth)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.json['wiki_content'], 'some text')
def test_wiki_validate_name_cannot_create_home(self):
url = self.project.api_url_for('project_wiki_validate_name', wname='home')
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 409)
def test_project_wiki_validate_name_mixed_casing(self):
url = self.project.api_url_for('project_wiki_validate_name', wname='CaPsLoCk')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.project.update_node_wiki('CaPsLoCk', 'hello', self.consolidate_auth)
assert_equal(self.project.get_wiki_page('CaPsLoCk').page_name, 'CaPsLoCk')
def test_project_wiki_validate_name_display_correct_capitalization(self):
url = self.project.api_url_for('project_wiki_validate_name', wname='CaPsLoCk')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_in('CaPsLoCk', res)
def test_project_wiki_validate_name_conflict_different_casing(self):
url = self.project.api_url_for('project_wiki_validate_name', wname='CAPSLOCK')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.project.update_node_wiki('CaPsLoCk', 'hello', self.consolidate_auth)
url = self.project.api_url_for('project_wiki_validate_name', wname='capslock')
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 409)
def test_project_dashboard_shows_no_wiki_content_text(self):
# Regression test for:
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/1104
project = ProjectFactory(creator=self.user)
url = project.web_url_for('view_project')
res = self.app.get(url, auth=self.user.auth)
assert_in('Add important information, links, or images here to describe your project.', res)
def test_project_dashboard_wiki_wname_get_shows_non_ascii_characters(self):
# Regression test for:
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/1104
text = u'你好'
self.project.update_node_wiki('home', text, Auth(self.user))
# can view wiki preview from project dashboard
url = self.project.web_url_for('view_project')
res = self.app.get(url, auth=self.user.auth)
assert_in(text, res)
def test_project_wiki_home_api_route(self):
url = self.project.api_url_for('project_wiki_home')
res = self.app.get(url, auth=self.user.auth)
assert_equals(res.status_code, 302)
# TODO: should this route exist? it redirects you to the web_url_for, not api_url_for.
# page_url = self.project.api_url_for('project_wiki_view', wname='home')
# assert_in(page_url, res.location)
def test_project_wiki_home_web_route(self):
page_url = self.project.web_url_for('project_wiki_view', wname='home', _guid=True)
url = self.project.web_url_for('project_wiki_home')
res = self.app.get(url, auth=self.user.auth)
assert_equals(res.status_code, 302)
assert_in(page_url, res.location)
def test_wiki_id_url_get_returns_302_and_resolves(self):
name = 'page by id'
self.project.update_node_wiki(name, 'some content', Auth(self.project.creator))
page = self.project.get_wiki_page(name)
page_url = self.project.web_url_for('project_wiki_view', wname=page.page_name, _guid=True)
url = self.project.web_url_for('project_wiki_id_page', wid=page._primary_key, _guid=True)
res = self.app.get(url)
assert_equal(res.status_code, 302)
assert_in(page_url, res.location)
res = res.follow()
assert_equal(res.status_code, 200)
assert_in(page_url, res.request.url)
def test_wiki_id_url_get_returns_404(self):
url = self.project.web_url_for('project_wiki_id_page', wid='12345', _guid=True)
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 404)
def test_home_is_capitalized_in_web_view(self):
url = self.project.web_url_for('project_wiki_home', wid='home', _guid=True)
res = self.app.get(url, auth=self.user.auth).follow(auth=self.user.auth)
page_name_elem = res.html.find('span', {'id': 'pageName'})
assert_in('Home', page_name_elem.text)
def test_wiki_widget_no_content(self):
res = serialize_wiki_widget(self.project)
assert_is_none(res['wiki_content'])
def test_wiki_widget_short_content_no_cutoff(self):
short_content = 'a' * 150
self.project.update_node_wiki('home', short_content, Auth(self.user))
res = serialize_wiki_widget(self.project)
assert_in(short_content, res['wiki_content'])
assert_not_in('...', res['wiki_content'])
assert_false(res['more'])
def test_wiki_widget_long_content_cutoff(self):
long_content = 'a' * 600
self.project.update_node_wiki('home', long_content, Auth(self.user))
res = serialize_wiki_widget(self.project)
assert_less(len(res['wiki_content']), 520) # wiggle room for closing tags
assert_in('...', res['wiki_content'])
assert_true(res['more'])
def test_wiki_widget_with_multiple_short_pages_has_more(self):
project = ProjectFactory(is_public=True, creator=self.user)
short_content = 'a' * 150
project.update_node_wiki('home', short_content, Auth(self.user))
project.update_node_wiki('andanotherone', short_content, Auth(self.user))
res = serialize_wiki_widget(project)
assert_true(res['more'])
@mock.patch('addons.wiki.models.WikiVersion.rendered_before_update', new_callable=mock.PropertyMock)
def test_wiki_widget_rendered_before_update(self, mock_rendered_before_update):
# New pages use js renderer
mock_rendered_before_update.return_value = False
self.project.update_node_wiki('home', 'updated content', Auth(self.user))
res = serialize_wiki_widget(self.project)
assert_false(res['rendered_before_update'])
# Old pages use a different version of js render
mock_rendered_before_update.return_value = True
res = serialize_wiki_widget(self.project)
assert_true(res['rendered_before_update'])
def test_read_only_users_cannot_view_edit_pane(self):
url = self.project.web_url_for('project_wiki_view', wname='home')
# No write permissions
res = self.app.get(url)
assert_equal(res.status_code, 200)
assert_not_in('data-osf-panel="Edit"', res.text)
# Write permissions
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_in('data-osf-panel="Edit"', res.text)
# Publicly editable
wiki = self.project.get_addon('wiki')
wiki.set_editing(permissions=True, auth=self.consolidate_auth, log=True)
res = self.app.get(url, auth=AuthUserFactory().auth)
assert_equal(res.status_code, 200)
assert_in('data-osf-panel="Edit"', res.text)
# Publicly editable but not logged in
res = self.app.get(url)
assert_equal(res.status_code, 200)
assert_not_in('data-osf-panel="Edit"', res.text)
def test_wiki_widget_not_show_in_registration_for_contributor(self):
registration = RegistrationFactory(project=self.project)
res = self.app.get(
registration.web_url_for('view_project'),
auth=self.user.auth
)
assert_equal(res.status_code, 200)
assert_not_in('Add important information, links, or images here to describe your project.', res.text)
class TestViewHelpers(OsfTestCase):
def setUp(self):
super(TestViewHelpers, self).setUp()
self.project = ProjectFactory()
self.wname = 'New page'
self.project.update_node_wiki(self.wname, 'some content', Auth(self.project.creator))
def test_get_wiki_web_urls(self):
urls = views._get_wiki_web_urls(self.project, self.wname)
assert_equal(urls['base'], self.project.web_url_for('project_wiki_home', _guid=True))
assert_equal(urls['edit'], self.project.web_url_for('project_wiki_view', wname=self.wname, _guid=True))
assert_equal(urls['home'], self.project.web_url_for('project_wiki_home', _guid=True))
assert_equal(urls['page'], self.project.web_url_for('project_wiki_view', wname=self.wname, _guid=True))
def test_get_wiki_api_urls(self):
urls = views._get_wiki_api_urls(self.project, self.wname)
assert_equal(urls['base'], self.project.api_url_for('project_wiki_home'))
assert_equal(urls['delete'], self.project.api_url_for('project_wiki_delete', wname=self.wname))
assert_equal(urls['rename'], self.project.api_url_for('project_wiki_rename', wname=self.wname))
assert_equal(urls['content'], self.project.api_url_for('wiki_page_content', wname=self.wname))
assert_equal(urls['settings'], self.project.api_url_for('edit_wiki_settings'))
class TestWikiDelete(OsfTestCase):
def setUp(self):
super(TestWikiDelete, self).setUp()
creator = AuthUserFactory()
self.project = ProjectFactory(is_public=True, creator=creator)
self.consolidate_auth = Auth(user=self.project.creator)
self.auth = creator.auth
self.project.update_node_wiki('Elephants', 'Hello Elephants', self.consolidate_auth)
self.project.update_node_wiki('Lions', 'Hello Lions', self.consolidate_auth)
self.elephant_wiki = self.project.get_wiki_page('Elephants')
self.lion_wiki = self.project.get_wiki_page('Lions')
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_project_wiki_delete(self, mock_shrejs):
page = self.elephant_wiki
assert_equal(page.page_name.lower(), 'elephants')
assert_equal(page.deleted, None)
url = self.project.api_url_for(
'project_wiki_delete',
wname='Elephants'
)
mock_now = datetime.datetime(2017, 3, 16, 11, 00, tzinfo=pytz.utc)
with mock.patch.object(timezone, 'now', return_value=mock_now):
self.app.delete(
url,
auth=self.auth
)
self.project.reload()
page.reload()
assert_equal(page.deleted, mock_now)
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_project_wiki_delete_w_valid_special_characters(self, mock_sharejs):
# TODO: Need to understand why calling update_node_wiki with failure causes transaction rollback issue later
# with assert_raises(NameInvalidError):
# self.project.update_node_wiki(SPECIAL_CHARACTERS_ALL, 'Hello Special Characters', self.consolidate_auth)
self.project.update_node_wiki(SPECIAL_CHARACTERS_ALLOWED, 'Hello Special Characters', self.consolidate_auth)
self.special_characters_wiki = self.project.get_wiki_page(SPECIAL_CHARACTERS_ALLOWED)
assert_equal(self.special_characters_wiki.page_name, SPECIAL_CHARACTERS_ALLOWED)
url = self.project.api_url_for(
'project_wiki_delete',
wname=SPECIAL_CHARACTERS_ALLOWED
)
mock_now = datetime.datetime(2017, 3, 16, 11, 00, tzinfo=pytz.utc)
with mock.patch.object(timezone, 'now', return_value=mock_now):
self.app.delete(
url,
auth=self.auth
)
self.project.reload()
self.special_characters_wiki.reload()
assert_equal(self.special_characters_wiki.deleted, mock_now)
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_wiki_versions_do_not_reappear_after_delete(self, mock_sharejs):
# Creates a wiki page
self.project.update_node_wiki('Hippos', 'Hello hippos', self.consolidate_auth)
# Edits it two times
wiki_page = self.project.get_wiki_page('Hippos')
assert_equal(wiki_page.deleted, None)
assert_equal(wiki_page.current_version_number, 1)
self.project.update_node_wiki('Hippos', 'Hello hippopotamus', self.consolidate_auth)
wiki_page.reload()
assert_equal(wiki_page.current_version_number, 2)
# Deletes the wiki page
mock_now = datetime.datetime(2017, 3, 16, 11, 00, tzinfo=pytz.utc)
with mock.patch.object(timezone, 'now', return_value=mock_now):
self.project.delete_node_wiki('Hippos', self.consolidate_auth)
wiki_page.reload()
assert_equal(wiki_page.deleted, mock_now)
# Creates new wiki with same name as deleted wiki
self.project.update_node_wiki('Hippos', 'Hello again hippos', self.consolidate_auth)
wiki_page = self.project.get_wiki_page('Hippos')
assert_equal(wiki_page.current_version_number, 1)
self.project.update_node_wiki('Hippos', 'Hello again hippopotamus', self.consolidate_auth)
wiki_page.reload()
assert_equal(wiki_page.current_version_number, 2)
class TestWikiRename(OsfTestCase):
def setUp(self):
super(TestWikiRename, self).setUp()
creator = AuthUserFactory()
self.project = ProjectFactory(is_public=True, creator=creator)
self.consolidate_auth = Auth(user=self.project.creator)
self.auth = creator.auth
self.project.update_node_wiki('home', 'Hello world', self.consolidate_auth)
self.page_name = 'page2'
self.project.update_node_wiki(self.page_name, 'content', self.consolidate_auth)
self.project.save()
self.page = self.project.get_wiki_version(self.page_name)
self.wiki = self.project.get_wiki_page('home')
self.url = self.project.api_url_for(
'project_wiki_rename',
wname=self.page_name,
)
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_rename_wiki_page_valid(self, mock_sharejs, new_name=u'away'):
self.app.put_json(
self.url,
{'value': new_name},
auth=self.auth
)
self.project.reload()
old_wiki = self.project.get_wiki_version(self.page_name)
assert_false(old_wiki)
new_wiki = self.project.get_wiki_version(new_name)
assert_true(new_wiki)
assert_equal(new_wiki.wiki_page._primary_key, self.page.wiki_page._primary_key)
assert_equal(new_wiki.content, self.page.content)
assert_equal(new_wiki.identifier, self.page.identifier)
def test_rename_wiki_page_invalid(self, new_name=u'invalid/name'):
res = self.app.put_json(
self.url,
{'value': new_name},
auth=self.auth,
expect_errors=True,
)
assert_equal(http.BAD_REQUEST, res.status_code)
assert_equal(res.json['message_short'], 'Invalid name')
assert_equal(res.json['message_long'], 'Page name cannot contain forward slashes.')
self.project.reload()
old_wiki = self.project.get_wiki_page(self.page_name)
assert_true(old_wiki)
def test_rename_wiki_page_duplicate(self):
self.project.update_node_wiki('away', 'Hello world', self.consolidate_auth)
new_name = 'away'
res = self.app.put_json(
self.url,
{'value': new_name},
auth=self.auth,
expect_errors=True,
)
assert_equal(res.status_code, 409)
def test_rename_wiki_name_not_found(self):
url = self.project.api_url_for('project_wiki_rename', wname='not_found_page_name')
res = self.app.put_json(url, {'value': 'new name'},
auth=self.auth, expect_errors=True)
assert_equal(res.status_code, 404)
def test_cannot_rename_wiki_page_to_home(self):
user = AuthUserFactory()
# A fresh project where the 'home' wiki page has no content
project = ProjectFactory(creator=user)
project.update_node_wiki('Hello', 'hello world', Auth(user=user))
url = project.api_url_for('project_wiki_rename', wname='Hello')
res = self.app.put_json(url, {'value': 'home'}, auth=user.auth, expect_errors=True)
assert_equal(res.status_code, 409)
def test_rename_wiki_name_with_value_missing(self):
# value is missing
res = self.app.put_json(self.url, {}, auth=self.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_rename_wiki_page_duplicate_different_casing(self):
# attempt to rename 'page2' from setup to different case of 'away'.
old_name = 'away'
new_name = 'AwAy'
self.project.update_node_wiki(old_name, 'Hello world', self.consolidate_auth)
res = self.app.put_json(
self.url,
{'value': new_name},
auth=self.auth,
expect_errors=True
)
assert_equal(res.status_code, 409)
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_rename_wiki_page_same_name_different_casing(self, mock_sharejs):
old_name = 'away'
new_name = 'AWAY'
self.project.update_node_wiki(old_name, 'Hello world', self.consolidate_auth)
url = self.project.api_url_for('project_wiki_rename', wname=old_name)
res = self.app.put_json(
url,
{'value': new_name},
auth=self.auth,
expect_errors=False
)
assert_equal(res.status_code, 200)
def test_cannot_rename_home_page(self):
url = self.project.api_url_for('project_wiki_rename', wname='home')
res = self.app.put_json(url, {'value': 'homelol'}, auth=self.auth, expect_errors=True)
assert_equal(res.status_code, 400)
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_can_rename_to_a_deleted_page(self, mock_sharejs):
self.project.delete_node_wiki(self.page_name, self.consolidate_auth)
self.project.save()
# Creates a new page
self.project.update_node_wiki('page3', 'moarcontent', self.consolidate_auth)
self.project.save()
# Renames the wiki to the deleted page
url = self.project.api_url_for('project_wiki_rename', wname='page3')
res = self.app.put_json(url, {'value': self.page_name}, auth=self.auth)
assert_equal(res.status_code, 200)
def test_rename_wiki_page_with_valid_html(self):
# script is not an issue since data is sanitized via bleach or mako before display.
self.test_rename_wiki_page_valid(new_name=u'<html>hello<html>')
def test_rename_wiki_page_with_invalid_html(self):
# script is not an issue since data is sanitized via bleach or mako before display.
# with that said routes still do not accept forward slashes
self.test_rename_wiki_page_invalid(new_name=u'<html>hello</html>')
def test_rename_wiki_page_with_non_ascii_title(self):
self.test_rename_wiki_page_valid(new_name=u'øˆ∆´ƒøßå√ß')
def test_rename_wiki_page_with_valid_special_character_title(self):
self.test_rename_wiki_page_valid(new_name=SPECIAL_CHARACTERS_ALLOWED)
def test_rename_wiki_page_with_invalid_special_character_title(self):
self.test_rename_wiki_page_invalid(new_name=SPECIAL_CHARACTERS_ALL)
class TestWikiLinks(OsfTestCase):
def test_links(self):
user = AuthUserFactory()
project = ProjectFactory(creator=user)
wiki_page = WikiFactory(
user=user,
node=project,
)
wiki = WikiVersionFactory(
content='[[wiki2]]',
wiki_page=wiki_page,
)
assert_in(
'/{}/wiki/wiki2/'.format(project._id),
wiki.html(project),
)
# Regression test for https://sentry.osf.io/osf/production/group/310/
def test_bad_links(self):
content = u'<span></span><iframe src="http://httpbin.org/"></iframe>'
user = AuthUserFactory()
node = ProjectFactory()
wiki_page = WikiFactory(
user=user,
node=node,
)
wiki = WikiVersionFactory(
content=content,
wiki_page=wiki_page,
)
expected = render_content(content, node)
assert_equal(
'<p><span></span><iframe src="<a href="http://httpbin.org/" rel="nofollow">http://httpbin.org/</a>"></iframe></p>',
wiki.html(node)
)
class TestWikiUuid(OsfTestCase):
def setUp(self):
super(TestWikiUuid, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(is_public=True, creator=self.user)
self.wname = 'foo.bar'
self.wkey = to_mongo_key(self.wname)
def test_uuid_generated_once(self):
assert_is_none(self.project.wiki_private_uuids.get(self.wkey))
url = self.project.web_url_for('project_wiki_view', wname=self.wname)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.project.reload()
private_uuid = self.project.wiki_private_uuids.get(self.wkey)
assert_true(private_uuid)
assert_not_in(private_uuid, res.body)
assert_in(get_sharejs_uuid(self.project, self.wname), res.body)
# Revisit page; uuid has not changed
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.project.reload()
assert_equal(private_uuid, self.project.wiki_private_uuids.get(self.wkey))
def test_uuid_not_visible_without_write_permission(self):
self.project.update_node_wiki(self.wname, 'some content', Auth(self.user))
self.project.save()
assert_is_none(self.project.wiki_private_uuids.get(self.wkey))
url = self.project.web_url_for('project_wiki_view', wname=self.wname)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.project.reload()
private_uuid = self.project.wiki_private_uuids.get(self.wkey)
assert_true(private_uuid)
assert_not_in(private_uuid, res.body)
assert_in(get_sharejs_uuid(self.project, self.wname), res.body)
# Users without write permission should not be able to access
res = self.app.get(url)
assert_equal(res.status_code, 200)
assert_not_in(get_sharejs_uuid(self.project, self.wname), res.body)
def test_uuid_not_generated_without_write_permission(self):
self.project.update_node_wiki(self.wname, 'some content', Auth(self.user))
self.project.save()
assert_is_none(self.project.wiki_private_uuids.get(self.wkey))
url = self.project.web_url_for('project_wiki_view', wname=self.wname)
res = self.app.get(url)
assert_equal(res.status_code, 200)
self.project.reload()
private_uuid = self.project.wiki_private_uuids.get(self.wkey)
assert_is_none(private_uuid)
def test_uuids_differ_between_pages(self):
wname1 = 'foo.bar'
url1 = self.project.web_url_for('project_wiki_view', wname=wname1)
res1 = self.app.get(url1, auth=self.user.auth)
assert_equal(res1.status_code, 200)
wname2 = 'bar.baz'
url2 = self.project.web_url_for('project_wiki_view', wname=wname2)
res2 = self.app.get(url2, auth=self.user.auth)
assert_equal(res2.status_code, 200)
self.project.reload()
uuid1 = get_sharejs_uuid(self.project, wname1)
uuid2 = get_sharejs_uuid(self.project, wname2)
assert_not_equal(uuid1, uuid2)
assert_in(uuid1, res1)
assert_in(uuid2, res2)
assert_not_in(uuid1, res2)
assert_not_in(uuid2, res1)
def test_uuids_differ_between_forks(self):
url = self.project.web_url_for('project_wiki_view', wname=self.wname)
project_res = self.app.get(url, auth=self.user.auth)
assert_equal(project_res.status_code, 200)
self.project.reload()
fork = self.project.fork_node(Auth(self.user))
assert_true(fork.is_fork_of(self.project))
fork_url = fork.web_url_for('project_wiki_view', wname=self.wname)
fork_res = self.app.get(fork_url, auth=self.user.auth)
assert_equal(fork_res.status_code, 200)
fork.reload()
# uuids are not copied over to forks
assert_not_equal(
self.project.wiki_private_uuids.get(self.wkey),
fork.wiki_private_uuids.get(self.wkey)
)
project_uuid = get_sharejs_uuid(self.project, self.wname)
fork_uuid = get_sharejs_uuid(fork, self.wname)
assert_not_equal(project_uuid, fork_uuid)
assert_in(project_uuid, project_res)
assert_in(fork_uuid, fork_res)
assert_not_in(project_uuid, fork_res)
assert_not_in(fork_uuid, project_res)
@pytest.mark.skip('#TODO: Fix or mock mongodb for sharejs')
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_migration_does_not_affect_forks(self, mock_sharejs):
original_uuid = generate_private_uuid(self.project, self.wname)
self.project.update_node_wiki(self.wname, 'Hello world', Auth(self.user))
fork = self.project.fork_node(Auth(self.user))
assert_equal(fork.wiki_private_uuids.get(self.wkey), None)
migrate_uuid(self.project, self.wname)
assert_not_equal(original_uuid, self.project.wiki_private_uuids.get(self.wkey))
assert_equal(fork.wiki_private_uuids.get(self.wkey), None)
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_uuid_persists_after_delete(self, mock_sharejs):
assert_is_none(self.project.wiki_private_uuids.get(self.wkey))
# Create wiki page
self.project.update_node_wiki(self.wname, 'Hello world', Auth(self.user))
# Visit wiki edit page
edit_url = self.project.web_url_for('project_wiki_view', wname=self.wname)
res = self.app.get(edit_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.project.reload()
original_private_uuid = self.project.wiki_private_uuids.get(self.wkey)
original_sharejs_uuid = get_sharejs_uuid(self.project, self.wname)
# Delete wiki
delete_url = self.project.api_url_for('project_wiki_delete', wname=self.wname)
res = self.app.delete(delete_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.project.reload()
assert_equal(original_private_uuid, self.project.wiki_private_uuids.get(self.wkey))
# Revisit wiki edit page
res = self.app.get(edit_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.project.reload()
assert_equal(original_private_uuid, self.project.wiki_private_uuids.get(self.wkey))
assert_in(original_sharejs_uuid, res.body)
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_uuid_persists_after_rename(self, mock_sharejs):
new_wname = 'barbaz'
new_wkey = to_mongo_key(new_wname)
assert_is_none(self.project.wiki_private_uuids.get(self.wkey))
assert_is_none(self.project.wiki_private_uuids.get(new_wkey))
# Create wiki page
self.project.update_node_wiki(self.wname, 'Hello world', Auth(self.user))
wiki_page = self.project.get_wiki_page(self.wname)
# Visit wiki edit page
original_edit_url = self.project.web_url_for('project_wiki_view', wname=self.wname)
res = self.app.get(original_edit_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.project.reload()
original_private_uuid = self.project.wiki_private_uuids.get(self.wkey)
original_sharejs_uuid = get_sharejs_uuid(self.project, self.wname)
# Rename wiki
rename_url = self.project.api_url_for('project_wiki_rename', wname=self.wname)
res = self.app.put_json(
rename_url,
{'value': new_wname, 'pk': wiki_page._id},
auth=self.user.auth,
)
assert_equal(res.status_code, 200)
self.project.reload()
assert_is_none(self.project.wiki_private_uuids.get(self.wkey))
assert_equal(original_private_uuid, self.project.wiki_private_uuids.get(new_wkey))
# Revisit original wiki edit page
res = self.app.get(original_edit_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.project.reload()
assert_not_equal(original_private_uuid, self.project.wiki_private_uuids.get(self.wkey))
assert_not_in(original_sharejs_uuid, res.body)
@pytest.mark.skip('#TODO: Fix or mock mongodb for sharejs')
class TestWikiShareJSMongo(OsfTestCase):
@classmethod
def setUpClass(cls):
super(TestWikiShareJSMongo, cls).setUpClass()
cls._original_sharejs_db_name = settings.SHAREJS_DB_NAME
settings.SHAREJS_DB_NAME = 'sharejs_test'
def setUp(self):
super(TestWikiShareJSMongo, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(is_public=True, creator=self.user)
self.wname = 'foo.bar'
self.wkey = to_mongo_key(self.wname)
self.private_uuid = generate_private_uuid(self.project, self.wname)
self.sharejs_uuid = get_sharejs_uuid(self.project, self.wname)
# Create wiki page
self.project.update_node_wiki(self.wname, 'Hello world', Auth(self.user))
self.wiki_page = self.project.get_wiki_page(self.wname)
# Insert mongo data for current project/wiki
self.db = share_db()
example_uuid = EXAMPLE_DOCS[0]['_id']
self.example_docs = deepcopy(EXAMPLE_DOCS)
self.example_docs[0]['_id'] = self.sharejs_uuid
self.db.docs.insert(self.example_docs)
self.example_ops = deepcopy(EXAMPLE_OPS)
for item in self.example_ops:
item['_id'] = item['_id'].replace(example_uuid, self.sharejs_uuid)
item['name'] = item['name'].replace(example_uuid, self.sharejs_uuid)
self.db.docs_ops.insert(self.example_ops)
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_migrate_uuid(self, mock_sharejs):
migrate_uuid(self.project, self.wname)
assert_is_none(self.db.docs.find_one({'_id': self.sharejs_uuid}))
assert_is_none(self.db.docs_ops.find_one({'name': self.sharejs_uuid}))
new_sharejs_uuid = get_sharejs_uuid(self.project, self.wname)
assert_equal(
EXAMPLE_DOCS[0]['_data'],
self.db.docs.find_one({'_id': new_sharejs_uuid})['_data']
)
assert_equal(
len([item for item in self.example_ops if item['name'] == self.sharejs_uuid]),
len([item for item in self.db.docs_ops.find({'name': new_sharejs_uuid})])
)
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_migrate_uuid_no_mongo(self, mock_sharejs):
# Case where no edits have been made to the wiki
wname = 'bar.baz'
wkey = to_mongo_key(wname)
share_uuid = generate_private_uuid(self.project, wname)
sharejs_uuid = get_sharejs_uuid(self.project, wname)
self.project.update_node_wiki(wname, 'Hello world', Auth(self.user))
migrate_uuid(self.project, wname)
assert_not_equal(share_uuid, self.project.wiki_private_uuids.get(wkey))
assert_is_none(self.db.docs.find_one({'_id': sharejs_uuid}))
assert_is_none(self.db.docs_ops.find_one({'name': sharejs_uuid}))
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_migrate_uuid_updates_node(self, mock_sharejs):
migrate_uuid(self.project, self.wname)
assert_not_equal(self.private_uuid, self.project.wiki_private_uuids[self.wkey])
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_manage_contributors_updates_uuid(self, mock_sharejs):
user = UserFactory()
self.project.add_contributor(
contributor=user,
permissions=['read', 'write', 'admin'],
auth=Auth(user=self.user),
)
self.project.save()
assert_equal(self.private_uuid, self.project.wiki_private_uuids[self.wkey])
# Removing admin permission does nothing
self.project.manage_contributors(
user_dicts=[
{'id': user._id, 'permission': 'write', 'visible': True},
{'id': self.user._id, 'permission': 'admin', 'visible': True},
],
auth=Auth(user=self.user),
save=True,
)
assert_equal(self.private_uuid, self.project.wiki_private_uuids[self.wkey])
# Removing write permission migrates uuid
self.project.manage_contributors(
user_dicts=[
{'id': user._id, 'permission': 'read', 'visible': True},
{'id': self.user._id, 'permission': 'admin', 'visible': True},
],
auth=Auth(user=self.user),
save=True,
)
assert_not_equal(self.private_uuid, self.project.wiki_private_uuids[self.wkey])
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_delete_share_doc(self, mock_sharejs):
delete_share_doc(self.project, self.wname)
assert_is_none(self.db.docs.find_one({'_id': self.sharejs_uuid}))
assert_is_none(self.db.docs_ops.find_one({'name': self.sharejs_uuid}))
@mock.patch('addons.wiki.utils.broadcast_to_sharejs')
def test_delete_share_doc_updates_node(self, mock_sharejs):
assert_equal(self.private_uuid, self.project.wiki_private_uuids[self.wkey])
delete_share_doc(self.project, self.wname)
assert_not_in(self.wkey, self.project.wiki_private_uuids)
def test_get_draft(self):
# draft is current with latest wiki save
current_content = self.wiki_page.get_draft(self.project)
assert_equals(current_content, self.wiki_page.content)
# modify the sharejs wiki page contents and ensure we
# return the draft contents
new_content = 'I am a teapot'
new_time = int(time.time() * 1000) + 10000
new_version = self.example_docs[0]['_v'] + 1
self.db.docs.update(
{'_id': self.sharejs_uuid},
{'$set': {
'_v': new_version,
'_m.mtime': new_time,
'_data': new_content
}}
)
current_content = self.wiki_page.get_draft(self.project)
assert_equals(current_content, new_content)
def tearDown(self):
super(TestWikiShareJSMongo, self).tearDown()
self.db.drop_collection('docs')
self.db.drop_collection('docs_ops')
@classmethod
def tearDownClass(cls):
share_db().connection.drop_database(settings.SHAREJS_DB_NAME)
settings.SHARE_DATABASE_NAME = cls._original_sharejs_db_name
class TestWikiUtils(OsfTestCase):
def setUp(self):
super(TestWikiUtils, self).setUp()
self.project = ProjectFactory()
def test_get_sharejs_uuid(self):
wname = 'foo.bar'
wname2 = 'bar.baz'
private_uuid = generate_private_uuid(self.project, wname)
sharejs_uuid = get_sharejs_uuid(self.project, wname)
# Provides consistent results
assert_equal(sharejs_uuid, get_sharejs_uuid(self.project, wname))
# Provides obfuscation
assert_not_in(wname, sharejs_uuid)
assert_not_in(sharejs_uuid, wname)
assert_not_in(private_uuid, sharejs_uuid)
assert_not_in(sharejs_uuid, private_uuid)
# Differs based on share uuid provided
assert_not_equal(sharejs_uuid, get_sharejs_uuid(self.project, wname2))
# Differs across projects and forks
project = ProjectFactory()
assert_not_equal(sharejs_uuid, get_sharejs_uuid(project, wname))
fork = self.project.fork_node(Auth(self.project.creator))
assert_not_equal(sharejs_uuid, get_sharejs_uuid(fork, wname))
def test_generate_share_uuid(self):
wname = 'bar.baz'
wkey = to_mongo_key(wname)
assert_is_none(self.project.wiki_private_uuids.get(wkey))
share_uuid = generate_private_uuid(self.project, wname)
self.project.reload()
assert_equal(self.project.wiki_private_uuids[wkey], share_uuid)
new_uuid = generate_private_uuid(self.project, wname)
self.project.reload()
assert_not_equal(share_uuid, new_uuid)
assert_equal(self.project.wiki_private_uuids[wkey], new_uuid)
def test_format_wiki_version(self):
assert_is_none(format_wiki_version(None, 5, False))
assert_is_none(format_wiki_version('', 5, False))
assert_equal(format_wiki_version('3', 5, False), 3)
assert_equal(format_wiki_version('4', 5, False), 'previous')
assert_equal(format_wiki_version('5', 5, False), 'current')
assert_equal(format_wiki_version('previous', 5, False), 'previous')
assert_equal(format_wiki_version('current', 5, False), 'current')
assert_equal(format_wiki_version('preview', 5, True), 'preview')
assert_equal(format_wiki_version('current', 0, False), 'current')
assert_equal(format_wiki_version('preview', 0, True), 'preview')
with assert_raises(InvalidVersionError):
format_wiki_version('1', 0, False)
with assert_raises(InvalidVersionError):
format_wiki_version('previous', 0, False)
with assert_raises(InvalidVersionError):
format_wiki_version('6', 5, False)
with assert_raises(InvalidVersionError):
format_wiki_version('0', 5, False)
with assert_raises(InvalidVersionError):
format_wiki_version('preview', 5, False)
with assert_raises(InvalidVersionError):
format_wiki_version('nonsense', 5, True)
class TestPublicWiki(OsfTestCase):
def setUp(self):
super(TestPublicWiki, self).setUp()
self.project = ProjectFactory()
self.consolidate_auth = Auth(user=self.project.creator)
self.user = AuthUserFactory()
def test_addon_on_children(self):
parent = ProjectFactory()
node = NodeFactory(parent=parent, category='project')
sub_component = NodeFactory(parent=node)
parent.delete_addon('wiki', self.consolidate_auth)
node.delete_addon('wiki', self.consolidate_auth)
sub_component.delete_addon('wiki', self.consolidate_auth)
NodeFactory(parent=node)
has_addon_on_child_node =\
node.has_addon_on_children('wiki')
assert_true(has_addon_on_child_node)
def test_check_user_has_addon_excludes_deleted_components(self):
parent = ProjectFactory()
parent.delete_addon('wiki', self.consolidate_auth)
node = NodeFactory(parent=parent, category='project')
mock_now = datetime.datetime(2017, 3, 16, 11, 00, tzinfo=pytz.utc)
with mock.patch.object(timezone, 'now', return_value=mock_now):
node.delete_addon('wiki', self.consolidate_auth)
sub_component = NodeFactory(parent=node)
sub_component.is_deleted = True
sub_component.save()
has_addon_on_child_node =\
node.has_addon_on_children('wiki')
assert_false(has_addon_on_child_node)
def test_set_editing(self):
parent = ProjectFactory()
node = NodeFactory(parent=parent, category='project', is_public=True)
wiki = node.get_addon('wiki')
# Set as publicly editable
wiki.set_editing(permissions=True, auth=self.consolidate_auth, log=True)
assert_true(wiki.is_publicly_editable)
assert_equal(node.logs.latest().action, 'made_wiki_public')
# Try to set public when the wiki is already public
with assert_raises(NodeStateError):
wiki.set_editing(permissions=True, auth=self.consolidate_auth, log=False)
# Turn off public editing
wiki.set_editing(permissions=False, auth=self.consolidate_auth, log=True)
assert_false(wiki.is_publicly_editable)
assert_equal(node.logs.latest().action, 'made_wiki_private')
node = NodeFactory(parent=parent, category='project')
wiki = node.get_addon('wiki')
# Try to set to private wiki already private
with assert_raises(NodeStateError):
wiki.set_editing(permissions=False, auth=self.consolidate_auth, log=False)
# Try to set public when the project is private
with assert_raises(NodeStateError):
wiki.set_editing(permissions=True, auth=self.consolidate_auth, log=False)
def test_serialize_wiki_settings(self):
node = NodeFactory(parent=self.project, creator=self.user, is_public=True)
node.get_addon('wiki').set_editing(
permissions=True, auth=self.consolidate_auth, log=True)
data = serialize_wiki_settings(self.user, [node])
expected = [{
'node': {
'id': node._id,
'title': node.title,
'url': node.url,
},
'children': [
{
'select': {
'title': 'permission',
'permission': 'public'
},
}
],
'kind': 'folder',
'nodeType': 'component',
'category': 'hypothesis',
'permissions': {'view': True}
}]
assert_equal(data, expected)
def test_serialize_wiki_settings(self):
node = NodeFactory(parent=self.project, creator=self.user, is_public=True)
node.get_addon('wiki').set_editing(
permissions=True, auth=self.consolidate_auth, log=True)
node.add_pointer(self.project, Auth(self.user))
node.save()
data = serialize_wiki_settings(self.user, [node])
expected = [{
'node': {
'id': node._id,
'title': node.title,
'url': node.url,
'is_public': True
},
'children': [
{
'select': {
'title': 'permission',
'permission': 'public'
},
}
],
'kind': 'folder',
'nodeType': 'component',
'category': 'hypothesis',
'permissions': {'view': True,
'admin': True}
}]
assert_equal(data, expected)
def test_serialize_wiki_settings_disabled_wiki(self):
node = NodeFactory(parent=self.project, creator=self.user)
node.delete_addon('wiki', self.consolidate_auth)
data = serialize_wiki_settings(self.user, [node])
expected = [{'node':
{'url': node.url,
'is_public': False,
'id': node._id,
'title': node.title},
'category': 'hypothesis',
'kind': 'folder',
'nodeType': 'component',
'children': [],
'permissions': {'admin': True,
'view': True}
}]
assert_equal(data, expected)
class TestWikiMenu(OsfTestCase):
def setUp(self):
super(TestWikiMenu, self).setUp()
self.user = UserFactory()
self.project = ProjectFactory(creator=self.user, is_public=True)
self.component = NodeFactory(creator=self.user, parent=self.project, is_public=True)
self.consolidate_auth = Auth(user=self.project.creator)
self.non_contributor = UserFactory()
def test_format_home_wiki_page_no_content(self):
data = views.format_home_wiki_page(self.project)
expected = {
'page': {
'url': self.project.web_url_for('project_wiki_home'),
'name': 'Home',
'id': 'None',
}
}
assert_equal(data, expected)
def test_format_project_wiki_pages_contributor(self):
self.project.update_node_wiki('home', 'content here', self.consolidate_auth)
self.project.update_node_wiki('zoo', 'koala', self.consolidate_auth)
home_page = self.project.get_wiki_page(name='home')
zoo_page = self.project.get_wiki_page(name='zoo')
data = views.format_project_wiki_pages(self.project, self.consolidate_auth)
expected = [
{
'page': {
'url': self.project.web_url_for('project_wiki_view', wname='home', _guid=True),
'name': 'Home',
'id': home_page._primary_key,
}
},
{
'page': {
'url': self.project.web_url_for('project_wiki_view', wname='zoo', _guid=True),
'name': 'zoo',
'id': zoo_page._primary_key,
}
}
]
assert_equal(data, expected)
def test_format_project_wiki_pages_no_content_non_contributor(self):
self.project.update_node_wiki('home', 'content here', self.consolidate_auth)
self.project.update_node_wiki('zoo', '', self.consolidate_auth)
home_page = self.project.get_wiki_version(name='home')
data = views.format_project_wiki_pages(self.project, auth=Auth(self.non_contributor))
expected = [
{
'page': {
'url': self.project.web_url_for('project_wiki_view', wname='home', _guid=True),
'name': 'Home',
'id': home_page.wiki_page._primary_key,
}
}
]
assert_equal(data, expected)
def test_format_component_wiki_pages_contributor(self):
self.component.update_node_wiki('home', 'home content', self.consolidate_auth)
self.component.update_node_wiki('zoo', 'koala', self.consolidate_auth)
zoo_page = self.component.get_wiki_page(name='zoo')
expected = [
{
'page': {
'name': self.component.title,
'url': self.component.web_url_for('project_wiki_view', wname='home', _guid=True),
},
'children': [
{
'page': {
'url': self.component.web_url_for('project_wiki_view', wname='home', _guid=True),
'name': 'Home',
'id': self.component._primary_key,
}
},
{
'page': {
'url': self.component.web_url_for('project_wiki_view', wname='zoo', _guid=True),
'name': 'zoo',
'id': zoo_page._primary_key,
},
}
],
'kind': 'component',
'category': self.component.category,
'pointer': False,
}
]
data = views.format_component_wiki_pages(node=self.project, auth=self.consolidate_auth)
assert_equal(data, expected)
def test_format_component_wiki_pages_no_content_non_contributor(self):
data = views.format_component_wiki_pages(node=self.project, auth=Auth(self.non_contributor))
expected = []
assert_equal(data, expected)
def test_project_wiki_grid_data(self):
self.project.update_node_wiki('home', 'project content', self.consolidate_auth)
self.component.update_node_wiki('home', 'component content', self.consolidate_auth)
data = views.project_wiki_grid_data(auth=self.consolidate_auth, wname='home', node=self.project)
expected = [
{
'title': 'Project Wiki Pages',
'kind': 'folder',
'type': 'heading',
'children': views.format_project_wiki_pages(node=self.project, auth=self.consolidate_auth),
},
{
'title': 'Component Wiki Pages',
'kind': 'folder',
'type': 'heading',
'children': views.format_component_wiki_pages(node=self.project, auth=self.consolidate_auth)
}
]
assert_equal(data, expected)
|
{
"content_hash": "3b3efd0921243e4227a2783cee203af6",
"timestamp": "",
"source": "github",
"line_count": 1422,
"max_line_length": 139,
"avg_line_length": 43.919127988748244,
"alnum_prop": 0.6233967943893808,
"repo_name": "icereval/osf.io",
"id": "29c22449a012eb85cb7a077bd96b10f1d6e677fc",
"size": "62544",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "addons/wiki/tests/test_wiki.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "108526"
},
{
"name": "HTML",
"bytes": "261937"
},
{
"name": "JavaScript",
"bytes": "1856123"
},
{
"name": "Mako",
"bytes": "691640"
},
{
"name": "Python",
"bytes": "8331919"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
}
|
from .agendadiaria import AgendaDiaria
from .analisis import Analisis
from .cupo import Cupo
from .documento import Documento
from .empleado import Empleado
from .telefono import Telefono
from .fichapaciente import FichaPaciente
from .institucionmedica import InstitucionMedica
from .laboratorioexterno import LaboratorioExterno
from .reserva_analisis import ReservaAnalisis
from .sucursal import Sucursal
from .tipoanalisis import TipoAnalisis
from .ubicacion_geografica import UbicacionGeografica
from .paciente import Paciente
class Creador(object):
"""
Clase encargada de crear cada uno de los objetos de la capa de negocio.
Note:
Suministra los métodos necesarios para crear.
Debe ser invocada de forma estática.
Args:
Ninguno
Attributes:
Ninguno
"""
@staticmethod
def agendadiaria(**kwargs):
return AgendaDiaria()
@staticmethod
def analisis(**kwargs):
return Analisis()
@staticmethod
def cupo(**kwargs):
return Cupo()
@staticmethod
def documento(**kwargs):
return Documento(pk=kwargs["pk"], numero=kwargs["numero_documento"],
tipo=kwargs["tipo_documento"])
@staticmethod
def telefono(**kwargs):
return Telefono(pk=kwargs["pk"], numero=kwargs["numero"], baja=0)
@staticmethod
def fichapaciente(**kwargs):
return FichaPaciente()
@staticmethod
def institucionmedica(**kwargs):
obj_ubicacion_geo = Creador.ubicacion_geografica(
pk=kwargs["obj_ubicacion_geo"][0],
ciudad=kwargs["obj_ubicacion_geo"][1],
departamento=kwargs["obj_ubicacion_geo"][2],
baja=kwargs["obj_ubicacion_geo"][3])
return InstitucionMedica(pk=kwargs["pk"], nombre=kwargs["nombre"],
direccion=kwargs["domicilio"],
obj_ubicacion_geo=obj_ubicacion_geo)
@staticmethod
def laboratorioexterno(**kwargs):
return LaboratorioExterno()
@staticmethod
def empleado(**kwargs):
obj_documento = Creador.documento(pk=kwargs["documento"][0],
numero_documento=kwargs["documento"]
[2], tipo_documento=kwargs
["documento"][1])
col_telefonos = []
for tel in kwargs["col_telefonos"]:
obj_telefono = Creador.telefono(pk=tel[0], numero=tel[1])
col_telefonos.append(obj_telefono)
obj_sucursal = kwargs["obj_sucursal"]
if not isinstance(kwargs["obj_sucursal"], Sucursal):
obj_sucursal = Creador.sucursal(pk=kwargs["obj_sucursal"][0],
domicilio=kwargs["obj_sucursal"][1],
col_telefonos=kwargs["obj_sucursal"][4],
ubicacion_geo=kwargs["obj_sucursal"][2],
baja=False)
obj_ubicacion_geo = kwargs["obj_ubicacion_geo"]
if not isinstance(kwargs["obj_ubicacion_geo"], UbicacionGeografica):
obj_ubicacion_geo = Creador.ubicacion_geografica(
pk=kwargs["obj_ubicacion_geo"][0],
ciudad=kwargs["obj_ubicacion_geo"][1],
departamento=kwargs["obj_ubicacion_geo"][2],
baja=kwargs["obj_ubicacion_geo"][3])
# kwargs["obj_ubicacion_geo"] trae un objeto
return Empleado(pk=kwargs["pk"], pk_persona=kwargs["pk_persona"],
tipo=kwargs["tipo"],
nombres=kwargs["nombres"],
apellidos=kwargs["apellidos"],
col_telefonos=col_telefonos,
domicilio=kwargs["domicilio"],
obj_documento=obj_documento,
obj_ubicacion_geo=obj_ubicacion_geo,
obj_sucursal=obj_sucursal,
baja=False)
@staticmethod
def paciente(**kwargs):
obj_documento = Creador.documento(pk=kwargs["documento"][0],
numero_documento=kwargs["documento"]
[2], tipo_documento=kwargs
["documento"][1])
col_telefonos = []
for tel in kwargs["col_telefonos"]:
obj_telefono = Creador.telefono(pk=tel[0], numero=tel[1])
col_telefonos.append(obj_telefono)
obj_ubicacion_geo = Creador.ubicacion_geografica(
pk=kwargs["ubicacion_geo"][0], ciudad=kwargs
["ubicacion_geo"][1], departamento=kwargs["ubicacion_geo"]
[2], baja=kwargs["ubicacion_geo"][3])
# kwargs["obj_ubicacion_geo"] trae un objeto
return Paciente(pk=kwargs["pk"],
pk_persona=kwargs["pk_persona"],
activo=True, penalizado=False,
nombres=kwargs["nombres"],
apellidos=kwargs["apellidos"],
col_telefonos=col_telefonos,
domicilio=kwargs["domicilio"],
obj_documento=obj_documento,
obj_ubicacion_geo=obj_ubicacion_geo,
obj_ficha_paciente=None, baja=False)
@staticmethod
def reserva_analisis(**kwargs):
return ReservaAnalisis()
@staticmethod
def sucursal(**kwargs):
col_telefonos = []
for tel in kwargs["col_telefonos"]:
if not isinstance(tel, Telefono):
obj_telefono = Creador.telefono(pk=tel[0], numero=tel[1])
col_telefonos.append(obj_telefono)
else:
col_telefonos.append(tel) # tel es un objeto Telefono
obj_ubicacion_geo = Creador.ubicacion_geografica(
pk=kwargs["ubicacion_geo"][0], ciudad=kwargs
["ubicacion_geo"][1], departamento=kwargs["ubicacion_geo"]
[2], baja=kwargs["ubicacion_geo"][3])
# kwargs["obj_ubicacion_geo"] trae un objeto
return Sucursal(pk=kwargs["pk"],
domicilio=kwargs["domicilio"],
col_telefonos=col_telefonos,
obj_ubicacion_geo=obj_ubicacion_geo,
baja=False)
@staticmethod
def tipoanalisis(**kwargs):
return TipoAnalisis()
@staticmethod
def ubicacion_geografica(**kwargs):
return UbicacionGeografica(kwargs['pk'], kwargs['ciudad'],
kwargs['departamento'], kwargs['baja'])
|
{
"content_hash": "59cd3beb7148d2d863ef345f50e682ab",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 78,
"avg_line_length": 39.8433734939759,
"alnum_prop": 0.5606289688539462,
"repo_name": "gabofer82/taller_programacion_2017",
"id": "94a5836dd42da3e42fb64fdfad1ae11b768e20b8",
"size": "6640",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Programa/negocio/fabrica.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "286708"
}
],
"symlink_target": ""
}
|
"""Tests for iAqualink integration."""
import asyncio
import logging
from unittest.mock import AsyncMock, patch
from iaqualink.device import (
AqualinkAuxToggle,
AqualinkBinarySensor,
AqualinkDevice,
AqualinkLightToggle,
AqualinkSensor,
AqualinkThermostat,
)
from iaqualink.exception import AqualinkServiceException
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR_DOMAIN
from homeassistant.components.climate import DOMAIN as CLIMATE_DOMAIN
from homeassistant.components.iaqualink.const import UPDATE_INTERVAL
from homeassistant.components.light import DOMAIN as LIGHT_DOMAIN
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.components.switch import DOMAIN as SWITCH_DOMAIN
from homeassistant.config_entries import ConfigEntryState
from homeassistant.const import ATTR_ASSUMED_STATE, STATE_ON, STATE_UNAVAILABLE
from homeassistant.util import dt as dt_util
from tests.common import async_fire_time_changed
from tests.components.iaqualink.conftest import get_aqualink_device, get_aqualink_system
async def _ffwd_next_update_interval(hass):
now = dt_util.utcnow()
async_fire_time_changed(hass, now + UPDATE_INTERVAL)
await hass.async_block_till_done()
async def test_setup_login_exception(hass, config_entry):
"""Test setup encountering a login exception."""
config_entry.add_to_hass(hass)
with patch(
"homeassistant.components.iaqualink.AqualinkClient.login",
side_effect=AqualinkServiceException,
):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert config_entry.state is ConfigEntryState.SETUP_ERROR
async def test_setup_login_timeout(hass, config_entry):
"""Test setup encountering a timeout while logging in."""
config_entry.add_to_hass(hass)
with patch(
"homeassistant.components.iaqualink.AqualinkClient.login",
side_effect=asyncio.TimeoutError,
):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert config_entry.state is ConfigEntryState.SETUP_RETRY
async def test_setup_systems_exception(hass, config_entry):
"""Test setup encountering an exception while retrieving systems."""
config_entry.add_to_hass(hass)
with patch(
"homeassistant.components.iaqualink.AqualinkClient.login",
return_value=None,
), patch(
"homeassistant.components.iaqualink.AqualinkClient.get_systems",
side_effect=AqualinkServiceException,
):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert config_entry.state is ConfigEntryState.SETUP_RETRY
async def test_setup_no_systems_recognized(hass, config_entry):
"""Test setup ending in no systems recognized."""
config_entry.add_to_hass(hass)
with patch(
"homeassistant.components.iaqualink.AqualinkClient.login",
return_value=None,
), patch(
"homeassistant.components.iaqualink.AqualinkClient.get_systems",
return_value={},
):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert config_entry.state is ConfigEntryState.SETUP_ERROR
async def test_setup_devices_exception(hass, config_entry, client):
"""Test setup encountering an exception while retrieving devices."""
config_entry.add_to_hass(hass)
system = get_aqualink_system(client)
systems = {system.serial: system}
with patch(
"homeassistant.components.iaqualink.AqualinkClient.login",
return_value=None,
), patch(
"homeassistant.components.iaqualink.AqualinkClient.get_systems",
return_value=systems,
), patch.object(
system, "get_devices"
) as mock_get_devices:
mock_get_devices.side_effect = AqualinkServiceException
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert config_entry.state is ConfigEntryState.SETUP_RETRY
async def test_setup_all_good_no_recognized_devices(hass, config_entry, client):
"""Test setup ending in no devices recognized."""
config_entry.add_to_hass(hass)
system = get_aqualink_system(client)
systems = {system.serial: system}
device = get_aqualink_device(system, AqualinkDevice, data={"name": "dev_1"})
devices = {device.name: device}
with patch(
"homeassistant.components.iaqualink.AqualinkClient.login",
return_value=None,
), patch(
"homeassistant.components.iaqualink.AqualinkClient.get_systems",
return_value=systems,
), patch.object(
system, "get_devices"
) as mock_get_devices:
mock_get_devices.return_value = devices
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert config_entry.state is ConfigEntryState.LOADED
assert len(hass.states.async_entity_ids(BINARY_SENSOR_DOMAIN)) == 0
assert len(hass.states.async_entity_ids(CLIMATE_DOMAIN)) == 0
assert len(hass.states.async_entity_ids(LIGHT_DOMAIN)) == 0
assert len(hass.states.async_entity_ids(SENSOR_DOMAIN)) == 0
assert len(hass.states.async_entity_ids(SWITCH_DOMAIN)) == 0
assert await hass.config_entries.async_unload(config_entry.entry_id)
await hass.async_block_till_done()
assert config_entry.state is ConfigEntryState.NOT_LOADED
async def test_setup_all_good_all_device_types(hass, config_entry, client):
"""Test setup ending in one device of each type recognized."""
config_entry.add_to_hass(hass)
system = get_aqualink_system(client)
systems = {system.serial: system}
devices = [
get_aqualink_device(system, AqualinkAuxToggle, data={"name": "aux_1"}),
get_aqualink_device(
system, AqualinkBinarySensor, data={"name": "freeze_protection"}
),
get_aqualink_device(system, AqualinkLightToggle, data={"name": "aux_2"}),
get_aqualink_device(system, AqualinkSensor, data={"name": "ph"}),
get_aqualink_device(
system, AqualinkThermostat, data={"name": "pool_set_point"}
),
]
devices = {d.name: d for d in devices}
system.get_devices = AsyncMock(return_value=devices)
with patch(
"homeassistant.components.iaqualink.AqualinkClient.login",
return_value=None,
), patch(
"homeassistant.components.iaqualink.AqualinkClient.get_systems",
return_value=systems,
):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert config_entry.state is ConfigEntryState.LOADED
assert len(hass.states.async_entity_ids(BINARY_SENSOR_DOMAIN)) == 1
assert len(hass.states.async_entity_ids(CLIMATE_DOMAIN)) == 1
assert len(hass.states.async_entity_ids(LIGHT_DOMAIN)) == 1
assert len(hass.states.async_entity_ids(SENSOR_DOMAIN)) == 1
assert len(hass.states.async_entity_ids(SWITCH_DOMAIN)) == 1
assert await hass.config_entries.async_unload(config_entry.entry_id)
await hass.async_block_till_done()
assert config_entry.state is ConfigEntryState.NOT_LOADED
async def test_multiple_updates(hass, config_entry, caplog, client):
"""Test all possible results of online status transition after update."""
config_entry.add_to_hass(hass)
system = get_aqualink_system(client)
systems = {system.serial: system}
system.get_devices = AsyncMock(return_value={})
caplog.set_level(logging.WARNING)
with patch(
"homeassistant.components.iaqualink.AqualinkClient.login",
return_value=None,
), patch(
"homeassistant.components.iaqualink.AqualinkClient.get_systems",
return_value=systems,
):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert config_entry.state is ConfigEntryState.LOADED
def set_online_to_true():
system.online = True
def set_online_to_false():
system.online = False
system.update = AsyncMock()
# True -> True
system.online = True
caplog.clear()
system.update.side_effect = set_online_to_true
await _ffwd_next_update_interval(hass)
assert len(caplog.records) == 0
# True -> False
system.online = True
caplog.clear()
system.update.side_effect = set_online_to_false
await _ffwd_next_update_interval(hass)
assert len(caplog.records) == 0
# True -> None / ServiceException
system.online = True
caplog.clear()
system.update.side_effect = AqualinkServiceException
await _ffwd_next_update_interval(hass)
assert len(caplog.records) == 1
assert "Failed" in caplog.text
# False -> False
system.online = False
caplog.clear()
system.update.side_effect = set_online_to_false
await _ffwd_next_update_interval(hass)
assert len(caplog.records) == 0
# False -> True
system.online = False
caplog.clear()
system.update.side_effect = set_online_to_true
await _ffwd_next_update_interval(hass)
assert len(caplog.records) == 1
assert "Reconnected" in caplog.text
# False -> None / ServiceException
system.online = False
caplog.clear()
system.update.side_effect = AqualinkServiceException
await _ffwd_next_update_interval(hass)
assert len(caplog.records) == 1
assert "Failed" in caplog.text
# None -> None / ServiceException
system.online = None
caplog.clear()
system.update.side_effect = AqualinkServiceException
await _ffwd_next_update_interval(hass)
assert len(caplog.records) == 0
# None -> True
system.online = None
caplog.clear()
system.update.side_effect = set_online_to_true
await _ffwd_next_update_interval(hass)
assert len(caplog.records) == 1
assert "Reconnected" in caplog.text
# None -> False
system.online = None
caplog.clear()
system.update.side_effect = set_online_to_false
await _ffwd_next_update_interval(hass)
assert len(caplog.records) == 0
assert await hass.config_entries.async_unload(config_entry.entry_id)
await hass.async_block_till_done()
assert config_entry.state is ConfigEntryState.NOT_LOADED
async def test_entity_assumed_and_available(hass, config_entry, client):
"""Test assumed_state and_available properties for all values of online."""
config_entry.add_to_hass(hass)
system = get_aqualink_system(client)
systems = {system.serial: system}
light = get_aqualink_device(
system, AqualinkLightToggle, data={"name": "aux_1", "state": "1"}
)
devices = {d.name: d for d in [light]}
system.get_devices = AsyncMock(return_value=devices)
system.update = AsyncMock()
with patch(
"homeassistant.components.iaqualink.AqualinkClient.login",
return_value=None,
), patch(
"homeassistant.components.iaqualink.AqualinkClient.get_systems",
return_value=systems,
):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids(LIGHT_DOMAIN)) == 1
name = f"{LIGHT_DOMAIN}.{light.name}"
# None means maybe.
light.system.online = None
await _ffwd_next_update_interval(hass)
state = hass.states.get(name)
assert state.state == STATE_UNAVAILABLE
assert state.attributes.get(ATTR_ASSUMED_STATE) is True
light.system.online = False
await _ffwd_next_update_interval(hass)
state = hass.states.get(name)
assert state.state == STATE_UNAVAILABLE
assert state.attributes.get(ATTR_ASSUMED_STATE) is True
light.system.online = True
await _ffwd_next_update_interval(hass)
state = hass.states.get(name)
assert state.state == STATE_ON
assert state.attributes.get(ATTR_ASSUMED_STATE) is None
|
{
"content_hash": "064d0367aa90e06e597d309f2b93fd2f",
"timestamp": "",
"source": "github",
"line_count": 354,
"max_line_length": 88,
"avg_line_length": 33.90677966101695,
"alnum_prop": 0.6999083562442723,
"repo_name": "rohitranjan1991/home-assistant",
"id": "3a35804f447e65dfc94f6be5215e04645f62c15c",
"size": "12003",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/components/iaqualink/test_init.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1017265"
},
{
"name": "Python",
"bytes": "1051086"
},
{
"name": "Shell",
"bytes": "3946"
}
],
"symlink_target": ""
}
|
import requests
import json
class TheTVDB(object):
token = None
def __init__(self, username, api_key, user_key):
self.username = username
self.api_key = api_key
self.user_key = user_key
def get_imdb_id(self, tvdb_id):
# TODO Cache
if not self.token:
self._refresh_token()
url = "https://api.thetvdb.com/series/{tvdb_id}".format(
tvdb_id=tvdb_id)
headers = {
'Authorization': 'Bearer {token}'.format(token=self.token)
}
r = requests.get(url, headers=headers)
if r.status_code == 200:
tv_show = r.json()
return tv_show['data']['imdbId']
else:
return None
def _refresh_token(self):
data = {
'apikey': self.api_key,
'userkey': self.user_key,
'username': self.username,
}
url = "https://api.thetvdb.com/login"
r = requests.post(url, json=data)
if r.status_code == 200:
result = r.json()
self.token = result['token']
else:
return None
def get_tvdb_from_imdb(self, imdb_id):
# TODO Cache
if not self.token:
self._refresh_token()
params = {
'imdbId': imdb_id
}
url = "https://api.thetvdb.com/search/series"
headers = {
'Authorization': 'Bearer {token}'.format(token=self.token)
}
r = requests.get(url, headers=headers, params=params)
if r.status_code == 200:
item = json.loads(r.text)
return item.get('data')[0] if item and item.get('data') else None
else:
return None
|
{
"content_hash": "fcbcd0325f3bf4cc1c9446c5a819b736",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 77,
"avg_line_length": 25.776119402985074,
"alnum_prop": 0.5118702953097858,
"repo_name": "adamgot/python-plexlibrary",
"id": "90613f0f919bb53c206c7301a3e22d9cb7fa6528",
"size": "1751",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plexlibrary/tvdb.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "64256"
}
],
"symlink_target": ""
}
|
"""
Django settings for test_project project.
Generated by 'django-admin startproject' using Django 1.10.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')7hd%peodgqm&5+zx@rko!5y1wtb$=nhf9pa+q_u+tqi^hs*(p'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# Application definition
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'loginurl'
]
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
|
{
"content_hash": "78e51b9de56dcc30dc58f6d1af32a069",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 72,
"avg_line_length": 25.933333333333334,
"alnum_prop": 0.7146529562982005,
"repo_name": "uploadcare/django-loginurl",
"id": "560d48e27e6625d13ab727f4a5c660c06258ba14",
"size": "1167",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_project/test_project/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "24423"
}
],
"symlink_target": ""
}
|
import glob
import os
path = os.path.abspath("../output/matrizesConfusao")
filtro = "*.csv"
path = os.path.join(path, filtro)
files = glob.glob(path)
#exemplo de arquivo a ser consumido
#[20155293416]-test-with-training[[201552795125]-experimento[class-10-uterances-10-num_loc_masc-8-num_loc_fem-8].csv]-assertiveness[[6.66666666666667 0 75 0 50 68.75 12.5 0 18.75 100]%]-user[F4]
class Experimento:
def __init__ (self, name) :
self.name = name
self._assertiveness = None
self._counter = 0
def plus_assertiveness (self, assertiveness) :
self._counter += 1
if self._assertiveness is None :
self._assertiveness = [ 0.0 for val in range(len(assertiveness)) ]
for index, value in enumerate(self._assertiveness) :
self._assertiveness[index] = value + float(assertiveness[index])
def mean (self) :
mean = [ (assertiveness / self._counter) for assertiveness in self._assertiveness ]
return mean
def single_mean (self) :
return (sum(self.mean()) / len(self.mean()))
def merge () :
'''
Merge all files
'''
experimentos = {}
print "Amount of files: ", len(files)
for file in files:
info = file.split("experimento")
name, assertiveness = info[1].split(".csv")[0], info[1].split("[[")[1].split("]%]")[0].split(" ")
if experimentos.get(name) is None :
e = Experimento(name)
e.plus_assertiveness(assertiveness)
experimentos[name] = e
else :
e = experimentos[name]
e.plus_assertiveness(assertiveness)
experimentos[name] = e
print "Reduced to", len(experimentos.keys())
return dict([ (k, v) for k, v in experimentos.items() ])
show = merge().values()
show.sort(key=lambda obj: - obj.single_mean())
for v in show:
print v.name, [ round(val, 2) for val in v.mean()], round(v.single_mean(),2)
|
{
"content_hash": "0221f6609b043fd537c2f4e0b516f64a",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 194,
"avg_line_length": 23.263157894736842,
"alnum_prop": 0.6713800904977375,
"repo_name": "LarryPavanery/tcc-si-facisa",
"id": "62740f1ecdcd51726e9426db2085339e804d6388",
"size": "1792",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/merge_teste_expetimentos.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Matlab",
"bytes": "9034"
},
{
"name": "Python",
"bytes": "1792"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, print_function, division)
class Model(object):
pass
|
{
"content_hash": "112dadf2d5ce864397634746b5bc0895",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 66,
"avg_line_length": 19.8,
"alnum_prop": 0.7171717171717171,
"repo_name": "zbuc/imaghost",
"id": "ce8fd8bb4e5fb88848de484a9808efc67642894b",
"size": "99",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Go",
"bytes": "860"
},
{
"name": "HTML",
"bytes": "405"
},
{
"name": "Python",
"bytes": "11566"
},
{
"name": "Shell",
"bytes": "231"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.core.exceptions import PermissionDenied
from django.shortcuts import render
def template_failure(request, status=403, **kwargs):
""" Renders a SAML-specific template with general authentication error description. """
return render(request, 'djangosaml2/login_error.html', status=status, using='django')
def exception_failure(request, exc_class=PermissionDenied, **kwargs):
""" Rather than using a custom SAML specific template that is rendered on failure,
this makes use of a standard exception handling machinery present in Django
and thus ends up rendering a project-wide error page for Permission Denied exceptions.
"""
raise exc_class
|
{
"content_hash": "479e43d373ab9edb813d1c0709732620",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 91,
"avg_line_length": 42.588235294117645,
"alnum_prop": 0.7651933701657458,
"repo_name": "cloudera/hue",
"id": "ce993ecabe7a32cbaecc9d5d15fd8074fa0a4d0e",
"size": "896",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/djangosaml2-0.16.11/djangosaml2/acs_failures.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "2347"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "BASIC",
"bytes": "2884"
},
{
"name": "Batchfile",
"bytes": "143575"
},
{
"name": "C",
"bytes": "5129166"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "718011"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "680715"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "Closure Templates",
"bytes": "1072"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "Cython",
"bytes": "1016963"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "13576"
},
{
"name": "EJS",
"bytes": "752"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GAP",
"bytes": "29873"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "641"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "28328425"
},
{
"name": "Haml",
"bytes": "920"
},
{
"name": "Handlebars",
"bytes": "173"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "Java",
"bytes": "457398"
},
{
"name": "JavaScript",
"bytes": "39181239"
},
{
"name": "Jinja",
"bytes": "356"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Less",
"bytes": "396102"
},
{
"name": "Lex",
"bytes": "218764"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1751"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "1025937"
},
{
"name": "Mako",
"bytes": "3644004"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "29403"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "84273"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "6235"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Pug",
"bytes": "584"
},
{
"name": "Python",
"bytes": "92881549"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "484108"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "SCSS",
"bytes": "78508"
},
{
"name": "Sass",
"bytes": "770"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "249165"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "SourcePawn",
"bytes": "948"
},
{
"name": "Stylus",
"bytes": "682"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "341963"
},
{
"name": "Twig",
"bytes": "761"
},
{
"name": "TypeScript",
"bytes": "1241396"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "Vue",
"bytes": "350385"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "522199"
},
{
"name": "Yacc",
"bytes": "1070437"
},
{
"name": "jq",
"bytes": "4"
}
],
"symlink_target": ""
}
|
def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, imp
__file__ = pkg_resources.resource_filename(__name__,'_sqlite.so')
__loader__ = None; del __bootstrap__, __loader__
imp.load_dynamic(__name__,__file__)
__bootstrap__()
|
{
"content_hash": "af732db54a0bc1877e1354e8acaf29ae",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 68,
"avg_line_length": 39.57142857142857,
"alnum_prop": 0.592057761732852,
"repo_name": "2013Commons/HUE-SHARK",
"id": "f8336143c2da5bfbb3aa3cd0a4a006617324f123",
"size": "277",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build/env/lib/python2.7/site-packages/pysqlite-2.5.5-py2.7-linux-i686.egg/pysqlite2/_sqlite.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "207947"
},
{
"name": "C",
"bytes": "9992379"
},
{
"name": "C++",
"bytes": "199612"
},
{
"name": "CSS",
"bytes": "419753"
},
{
"name": "Emacs Lisp",
"bytes": "3171"
},
{
"name": "Java",
"bytes": "3683071"
},
{
"name": "JavaScript",
"bytes": "1076553"
},
{
"name": "Perl",
"bytes": "138710"
},
{
"name": "Python",
"bytes": "40522057"
},
{
"name": "SQL",
"bytes": "522"
},
{
"name": "Shell",
"bytes": "27739"
},
{
"name": "TeX",
"bytes": "126420"
},
{
"name": "XSLT",
"bytes": "190688"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import mock
import os
from django.conf import settings
def pytest_configure(config):
# HACK: Only needed for testing!
os.environ.setdefault('_SENTRY_SKIP_CONFIGURATION', '1')
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sentry.conf.server')
if not settings.configured:
# only configure the db if its not already done
test_db = os.environ.get('DB', 'postgres')
if test_db == 'mysql':
settings.DATABASES['default'].update({
'ENGINE': 'django.db.backends.mysql',
'NAME': 'sentry',
'USER': 'root',
'HOST': '127.0.0.1',
})
# mysql requires running full migration all the time
settings.SOUTH_TESTS_MIGRATE = True
elif test_db == 'postgres':
settings.DATABASES['default'].update({
'ENGINE': 'sentry.db.postgres',
'USER': 'postgres',
'NAME': 'sentry',
})
# postgres requires running full migration all the time
# since it has to install stored functions which come from
# an actual migration.
settings.SOUTH_TESTS_MIGRATE = True
elif test_db == 'sqlite':
settings.DATABASES['default'].update({
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
})
settings.SOUTH_TESTS_MIGRATE = os.environ.get('SENTRY_SOUTH_TESTS_MIGRATE', '1') == '1'
else:
raise RuntimeError('oops, wrong database: %r' % test_db)
settings.TEMPLATE_DEBUG = True
# Disable static compiling in tests
settings.STATIC_BUNDLES = {}
# override a few things with our test specifics
settings.INSTALLED_APPS = tuple(settings.INSTALLED_APPS) + (
'tests',
)
# Need a predictable key for tests that involve checking signatures
settings.SENTRY_PUBLIC = False
if not settings.SENTRY_CACHE:
settings.SENTRY_CACHE = 'sentry.cache.django.DjangoCache'
settings.SENTRY_CACHE_OPTIONS = {}
# This speeds up the tests considerably, pbkdf2 is by design, slow.
settings.PASSWORD_HASHERS = [
'django.contrib.auth.hashers.MD5PasswordHasher',
]
# Replace real sudo middleware with our mock sudo middleware
# to assert that the user is always in sudo mode
middleware = list(settings.MIDDLEWARE_CLASSES)
sudo = middleware.index('sentry.middleware.sudo.SudoMiddleware')
middleware[sudo] = 'sentry.testutils.middleware.SudoMiddleware'
settings.MIDDLEWARE_CLASSES = tuple(middleware)
# enable draft features
settings.SENTRY_OPTIONS['mail.enable-replies'] = True
settings.SENTRY_ALLOW_ORIGIN = '*'
settings.SENTRY_TSDB = 'sentry.tsdb.inmemory.InMemoryTSDB'
settings.SENTRY_TSDB_OPTIONS = {}
settings.BROKER_BACKEND = 'memory'
settings.BROKER_URL = None
settings.CELERY_ALWAYS_EAGER = False
settings.CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
settings.DEBUG_VIEWS = True
settings.DISABLE_RAVEN = True
settings.CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
if not hasattr(settings, 'SENTRY_OPTIONS'):
settings.SENTRY_OPTIONS = {}
settings.SENTRY_OPTIONS.update({
'redis.clusters': {
'default': {
'hosts': {
0: {
'db': 9,
},
},
},
},
'mail.backend': 'django.core.mail.backends.locmem.EmailBackend',
'system.url-prefix': 'http://testserver',
})
# django mail uses socket.getfqdn which doesn't play nice if our
# networking isn't stable
patcher = mock.patch('socket.getfqdn', return_value='localhost')
patcher.start()
from sentry.runner.initializer import (
bootstrap_options, configure_structlog, initialize_receivers, fix_south,
bind_cache_to_option_store)
bootstrap_options(settings)
configure_structlog()
fix_south(settings)
bind_cache_to_option_store()
initialize_receivers()
from sentry.plugins import plugins
from sentry.plugins.utils import TestIssuePlugin2
plugins.register(TestIssuePlugin2)
from sentry.utils.redis import clusters
with clusters.get('default').all() as client:
client.flushdb()
# force celery registration
from sentry.celery import app # NOQA
# disable DISALLOWED_IPS
from sentry import http
http.DISALLOWED_IPS = set()
def pytest_runtest_teardown(item):
from sentry.app import tsdb
tsdb.flush()
from sentry.utils.redis import clusters
with clusters.get('default').all() as client:
client.flushdb()
from celery.task.control import discard_all
discard_all()
|
{
"content_hash": "38a8e4044be70f09283c0d179192c2b4",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 99,
"avg_line_length": 30.842767295597483,
"alnum_prop": 0.6217373572593801,
"repo_name": "alexm92/sentry",
"id": "0d35032db496cbca5d1d56be965dabd41060765b",
"size": "4904",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/sentry/utils/pytest/sentry.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "156715"
},
{
"name": "HTML",
"bytes": "191265"
},
{
"name": "JavaScript",
"bytes": "457236"
},
{
"name": "Makefile",
"bytes": "4689"
},
{
"name": "Python",
"bytes": "7262450"
}
],
"symlink_target": ""
}
|
import unittest
from airflow.utils.trigger_rule import TriggerRule
class TestTriggerRule(unittest.TestCase):
def test_valid_trigger_rules(self):
assert TriggerRule.is_valid(TriggerRule.ALL_SUCCESS)
assert TriggerRule.is_valid(TriggerRule.ALL_FAILED)
assert TriggerRule.is_valid(TriggerRule.ALL_DONE)
assert TriggerRule.is_valid(TriggerRule.ONE_SUCCESS)
assert TriggerRule.is_valid(TriggerRule.ONE_FAILED)
assert TriggerRule.is_valid(TriggerRule.NONE_FAILED)
assert TriggerRule.is_valid(TriggerRule.NONE_FAILED_OR_SKIPPED)
assert TriggerRule.is_valid(TriggerRule.NONE_SKIPPED)
assert TriggerRule.is_valid(TriggerRule.DUMMY)
assert TriggerRule.is_valid(TriggerRule.ALWAYS)
assert len(TriggerRule.all_triggers()) == 10
|
{
"content_hash": "b8951dae2439f2001a0b1956dedba6b4",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 71,
"avg_line_length": 45.05555555555556,
"alnum_prop": 0.7410604192355117,
"repo_name": "dhuang/incubator-airflow",
"id": "9a03808ac56fb5a17a9e5a0eaed09008c0661282",
"size": "1599",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/utils/test_trigger_rule.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "109698"
},
{
"name": "HTML",
"bytes": "264851"
},
{
"name": "JavaScript",
"bytes": "1988427"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "3357958"
},
{
"name": "Shell",
"bytes": "34442"
}
],
"symlink_target": ""
}
|
import re
from inaugurator import sh
class Network:
_CONFIG_SCRIPT_PATH = "/etc/udhcp_script.sh"
_NR_PING_ATTEMPTS = 20
def __init__(self, macAddress, ipAddress, netmask, gateway):
self._gateway = gateway
interfacesTable = self._interfacesTable()
assert macAddress.lower() in interfacesTable
interfaceName = interfacesTable[macAddress.lower()]
sh.run("/usr/sbin/ifconfig %s %s netmask %s" % (interfaceName, ipAddress, netmask))
sh.run("busybox route add default gw %s" % self._gateway)
self._validateLinkIsUp()
def _validateLinkIsUp(self):
print "Waiting for the connection to actually be up by pinging %s..." % (self._gateway,)
linkIsUp = False
for attemptIdx in xrange(self._NR_PING_ATTEMPTS):
attemptNr = attemptIdx + 1
try:
result = sh.run("busybox ping -w 1 -c 1 %s" % (self._gateway,))
linkIsUp = True
print "Ping attempt #%d succeeded." % (attemptNr,)
break
except:
print "Ping attempt #%d failed." % (attemptNr,)
if not linkIsUp:
raise Expception("No response from %s when trying to test if link was up" % (self._gateway,))
def _interfacesTable(self):
REGEX = re.compile(r'\d+:\s+([^:]+):\s+.*\s+link/ether\s+((?:[a-fA-F0-9]{2}:){5}[a-fA-F0-9]{2})')
ipOutput = sh.run("/usr/sbin/ip -o link")
return {mac.lower(): interface for interface, mac in REGEX.findall(ipOutput)}
|
{
"content_hash": "6717fd82e5a016c26590026c39176633",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 105,
"avg_line_length": 42.861111111111114,
"alnum_prop": 0.5904082955281919,
"repo_name": "eliran-stratoscale/inaugurator",
"id": "71ad2ed0237b0bb30b7e7eda3359decfa20edcbb",
"size": "1543",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "inaugurator/network.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Erlang",
"bytes": "2391"
},
{
"name": "Makefile",
"bytes": "2273"
},
{
"name": "Python",
"bytes": "229688"
},
{
"name": "Shell",
"bytes": "2542"
}
],
"symlink_target": ""
}
|
import json
import os
import pretend
import pytest
from static_precompiler import compilers, exceptions, utils
def test_compile_file(monkeypatch, tmpdir):
monkeypatch.setattr("static_precompiler.settings.ROOT", tmpdir.strpath)
convert_urls = pretend.call_recorder(lambda *args: None)
monkeypatch.setattr("static_precompiler.utils.convert_urls", convert_urls)
compiler = compilers.Stylus()
assert compiler.compile_file("styles/stylus/A.styl") == "COMPILED/styles/stylus/A.css"
full_output_path = compiler.get_full_output_path("styles/stylus/A.styl")
assert convert_urls.calls == [pretend.call(full_output_path, "styles/stylus/A.styl")]
assert os.path.exists(full_output_path)
with open(full_output_path) as compiled:
assert compiled.read() == """p {
color: #f00;
}
"""
def test_sourcemap(monkeypatch, tmpdir):
monkeypatch.setattr("static_precompiler.settings.ROOT", tmpdir.strpath)
monkeypatch.setattr("static_precompiler.utils.convert_urls", lambda *args: None)
compiler = compilers.Stylus(sourcemap_enabled=False)
compiler.compile_file("styles/stylus/A.styl")
full_output_path = compiler.get_full_output_path("styles/stylus/A.styl")
assert not os.path.exists(full_output_path + ".map")
compiler = compilers.Stylus(sourcemap_enabled=True)
compiler.compile_file("styles/stylus/A.styl")
full_output_path = compiler.get_full_output_path("styles/stylus/A.styl")
assert os.path.exists(full_output_path + ".map")
sourcemap = json.loads(open(full_output_path + ".map").read())
assert sourcemap["sourceRoot"] == "../../../styles/stylus"
assert sourcemap["sources"] == ["F.styl"]
assert sourcemap["file"] == "A.css"
def test_compile_source():
compiler = compilers.Stylus()
assert utils.fix_line_breaks(compiler.compile_source("p\n color: red;")) == "p {\n color: #f00;\n}\n\n"
with pytest.raises(exceptions.StaticCompilationError):
assert compiler.compile_source("broken")
def test_find_imports():
source = """
@import " "
@import "foo.styl"
@import 'foo'
@import "foo.css"
@import "http://foo.com/bar"
@import "https://foo.com/bar"
@import url(foo)
@import url(http://fonts.googleapis.com/css?family=Arvo:400,700,400italic,700italic)
@import url("http://fonts.googleapis.com/css?family=Open+Sans:300italic,400italic,600italic,700italic,400,700,600,300")
@require "foo.styl"
@require "foo/*"
"""
expected = [
"foo",
"foo.styl",
"foo/*",
]
compiler = compilers.Stylus()
assert compiler.find_imports(source) == expected
def test_locate_imported_file(monkeypatch):
compiler = compilers.Stylus()
root = os.path.dirname(__file__)
existing_files = set()
for f in ("A/B.styl", "C.styl"):
existing_files.add(os.path.join(root, "static", utils.normalize_path(f)))
monkeypatch.setattr("os.path.exists", lambda x: x in existing_files)
assert compiler.locate_imported_file("A", "B.styl") == "A/B.styl"
assert compiler.locate_imported_file("", "C.styl") == "C.styl"
with pytest.raises(exceptions.StaticCompilationError):
compiler.locate_imported_file("", "Z.styl")
def test_find_dependencies():
compiler = compilers.Stylus()
assert compiler.find_dependencies("styles/stylus/A.styl") == [
"styles/stylus/B/C.styl",
"styles/stylus/D.styl",
"styles/stylus/E/F.styl",
"styles/stylus/E/index.styl",
]
with pytest.raises(exceptions.StaticCompilationError):
compiler.find_dependencies("styles/stylus/broken1.styl")
with pytest.raises(exceptions.StaticCompilationError):
compiler.find_dependencies("styles/stylus/broken2.styl")
with pytest.raises(exceptions.StaticCompilationError):
compiler.find_dependencies("styles/stylus/broken3.styl")
|
{
"content_hash": "030d4161d3b5da0d65d0e852c3f01e0f",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 119,
"avg_line_length": 31.5,
"alnum_prop": 0.6880041634139995,
"repo_name": "liumengjun/django-static-precompiler",
"id": "f831292aee4cbc8c637f0bab63ecb0e589e58cc9",
"size": "3859",
"binary": false,
"copies": "1",
"ref": "refs/heads/lab",
"path": "static_precompiler/tests/test_stylus.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "981"
},
{
"name": "CoffeeScript",
"bytes": "166"
},
{
"name": "HTML",
"bytes": "421"
},
{
"name": "JavaScript",
"bytes": "58"
},
{
"name": "LiveScript",
"bytes": "28"
},
{
"name": "Python",
"bytes": "128244"
},
{
"name": "Ruby",
"bytes": "147"
},
{
"name": "Shell",
"bytes": "1244"
}
],
"symlink_target": ""
}
|
days=10
entries_per_day=100000
# file size calculations
# format entries_per_day file_size in MB
# csv 10000000 830 M file
# json 1000000 169 M
# json 3200000 500 M
log_format="csv"
#log_format="json"
## --- end config
import os
import datetime as dt
import random
import json
domains = ['facebook.com', 'yahoo.com', 'google.com', 'zynga.com', 'wikipedia.org', 'sf.craigslist.org', 'twitter.com', 'amazon.com', 'flickr.com', 'cnn.com', 'usatoday.com', 'npr.org', 'foxnews.com', 'comedycentral.com', 'youtube.com', 'hulu.com', 'bbc.co.uk', 'nytimes.com', 'sfgate.com', 'funnyordie.com']
actions = ['viewed', 'clicked', 'blocked']
total_users = 100000
total_ips = 1000
total_sessions = 100
total_campaigns = 20
# overwrite this function to customize log generation
def generate_log(timestamp):
user = "user_%d" % random.randint(1,total_users)
action = random.choice(actions)
domain = random.choice(domains)
ip_address = "ip_%d" % random.randint(1,total_ips)
campaign = "campaign_%d" % random.randint(1,total_campaigns)
session = "session_%s" % random.randint(1,total_sessions)
#cost is in cents, could be zero
cost = random.randint(1,200) - 20
if (cost < 0):
cost = 0
#csv
if (log_format == 'csv'):
logline = "%s,%s,%s,%s,%s,%s,%s,%s" % (timestamp, ip_address, user, action, domain, campaign, cost, session)
# generate JSON format
if (log_format == 'json'):
dict={'timestamp': timestamp, 'ip': ip_address, 'user': user, 'action': action, 'domain': domain, 'campaign':campaign, 'cost': cost, 'session': session}
logline = json.dumps(dict)
#print logline
return logline
#main
## --- script main
if __name__ == '__main__':
time_inc_ms = int ((24.0*3600*1000)/entries_per_day)
#print "time inc ms", time_inc_ms
#epoch = dt.datetime.fromtimestamp(0)
epoch = dt.datetime(1970,1,1)
year_start = dt.datetime(2015, 1, 1)
for day in range(0, days):
day_delta = dt.timedelta(days=day)
start_ts = year_start + day_delta
#end_ts = dt.datetime(start_ts.year, start_ts.month, start_ts.day, 23, 59, 59)
end_ts = dt.datetime(start_ts.year, start_ts.month, start_ts.day+1, 0, 0, 0)
filename = start_ts.strftime("%Y-%m-%d")
if (log_format == 'csv'):
filename = filename + ".csv"
if (log_format == 'json'):
filename = filename + ".json"
#print start_ts
#print end_ts
last_ts = start_ts
with open(filename, "w") as fout:
print "generating log ", filename
while (last_ts < end_ts):
delta_since_epoch = last_ts - epoch
millis = int((delta_since_epoch.microseconds + (delta_since_epoch.seconds + delta_since_epoch.days * 24 * 3600) * 10**6) / 1e3)
#print "last ts", last_ts
#print "millis", millis
logline = generate_log(millis)
fout.write(logline + "\n")
last_ts = last_ts + dt.timedelta(milliseconds=time_inc_ms)
|
{
"content_hash": "c08d5b4434ac198dfe24c9bb90df098d",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 356,
"avg_line_length": 33.05494505494506,
"alnum_prop": 0.6163563829787234,
"repo_name": "hongchhe/myhadoop",
"id": "31578309e6249b4808591a2ace8c1a39ed36d5f7",
"size": "3258",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "spark/data/clickstream/gen-clickstream.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "31363"
},
{
"name": "Shell",
"bytes": "23240"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
from setuptools import find_packages
setup(
name='adminactionview',
version='0.2',
author='Michiel De Paepe',
author_email='michiel.de.paepe@gmail.com',
packages=find_packages(),
url='https://github.com/MichielDePaepe/adminactionview',
license='MIT licence, see LICENCE.txt',
description='Add an intermediate page to the django admin when preforming actions. Here you can define extra parameters for your action',
long_description=open('README.txt').read(),
include_package_data=True,
zip_safe=False,
)
|
{
"content_hash": "43fc9a4b6e5bfbef192c3ef47e0b6cd8",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 141,
"avg_line_length": 36.3125,
"alnum_prop": 0.7246127366609294,
"repo_name": "MichielDePaepe/adminactionview",
"id": "54487296b2604e7844c8e82d2d027fa6d4968218",
"size": "605",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "485"
},
{
"name": "Python",
"bytes": "9105"
}
],
"symlink_target": ""
}
|
"""This example gets all active activities.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize appropriate service.
activity_service = client.GetService('ActivityService', version='v202208')
# Create a statement to select activities.
statement = (ad_manager.StatementBuilder(version='v202208')
.Where('status = :status')
.WithBindVariable('status', 'ACTIVE'))
# Retrieve a small amount of activities at a time, paging
# through until all activities have been retrieved.
while True:
response = activity_service.getActivitiesByStatement(statement.ToStatement(
))
if 'results' in response and len(response['results']):
for activity in response['results']:
# Print out some information for each activity.
print('Activity with ID "%d", name "%s", and type "%s" was found.\n' %
(activity['id'], activity['name'], activity['type']))
statement.offset += statement.limit
else:
break
print('\nNumber of results found: %s' % response['totalResultSetSize'])
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
|
{
"content_hash": "1e5b5367c99f7b29090806b264780586",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 79,
"avg_line_length": 35.666666666666664,
"alnum_prop": 0.6814641744548287,
"repo_name": "googleads/googleads-python-lib",
"id": "2d98022cd6c42f78d8681807cd61dcb72a5ffe5d",
"size": "1905",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/ad_manager/v202208/activity_service/get_active_activities.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "403821"
}
],
"symlink_target": ""
}
|
"""
raven.contrib.celery
~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import logging
from celery.exceptions import SoftTimeLimitExceeded
from celery.signals import (
after_setup_logger, task_failure, task_prerun, task_postrun
)
from raven.handlers.logging import SentryHandler
class CeleryFilter(logging.Filter):
def filter(self, record):
# Context is fixed in Celery 3.x so use internal flag instead
extra_data = getattr(record, 'data', {})
if not isinstance(extra_data, dict):
return record.funcName != '_log_error'
# Fallback to funcName for Celery 2.5
return extra_data.get('internal', record.funcName != '_log_error')
def register_signal(client, ignore_expected=False):
SentryCeleryHandler(client, ignore_expected=ignore_expected).install()
def register_logger_signal(client, logger=None, loglevel=logging.ERROR):
filter_ = CeleryFilter()
handler = SentryHandler(client)
handler.setLevel(loglevel)
handler.addFilter(filter_)
def process_logger_event(sender, logger, loglevel, logfile, format,
colorize, **kw):
# Attempt to find an existing SentryHandler, and if it exists ensure
# that the CeleryFilter is installed.
# If one is found, we do not attempt to install another one.
for h in logger.handlers:
if type(h) == SentryHandler:
h.addFilter(filter_)
return False
logger.addHandler(handler)
after_setup_logger.connect(process_logger_event, weak=False)
class SentryCeleryHandler(object):
def __init__(self, client, ignore_expected=False):
self.client = client
self.ignore_expected = ignore_expected
def install(self):
task_prerun.connect(self.handle_task_prerun, weak=False)
task_postrun.connect(self.handle_task_postrun, weak=False)
task_failure.connect(self.process_failure_signal, weak=False)
def uninstall(self):
task_prerun.disconnect(self.handle_task_prerun)
task_postrun.disconnect(self.handle_task_postrun)
task_failure.disconnect(self.process_failure_signal)
def process_failure_signal(self, sender, task_id, args, kwargs, einfo, **kw):
if self.ignore_expected and hasattr(sender, 'throws') and isinstance(einfo.exception, sender.throws):
return
# This signal is fired inside the stack so let raven do its magic
if isinstance(einfo.exception, SoftTimeLimitExceeded):
fingerprint = ['celery', 'SoftTimeLimitExceeded', sender]
else:
fingerprint = None
self.client.captureException(
extra={
'task_id': task_id,
'task': sender,
'args': args,
'kwargs': kwargs,
},
fingerprint=fingerprint,
)
def handle_task_prerun(self, sender, task_id, task, **kw):
self.client.transaction.push(task.name)
def handle_task_postrun(self, sender, task_id, task, **kw):
self.client.transaction.pop(task.name)
|
{
"content_hash": "964f7e1e035a8bd9e96b5c4492b9f830",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 109,
"avg_line_length": 34.58510638297872,
"alnum_prop": 0.6508766533374346,
"repo_name": "lightopa/Aiopa-Battles",
"id": "edf2c42837f3c1e0b68b720b1617b5b33be145a6",
"size": "3251",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/raven/contrib/celery/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "277"
},
{
"name": "Python",
"bytes": "1245305"
}
],
"symlink_target": ""
}
|
import argparse, sys
from argparse import RawTextHelpFormatter
import pysam
__author__ = "Author (email@site.com)"
__version__ = "$Revision: 0.0.1 $"
__date__ = "$Date: 2013-05-09 14:31 $"
# --------------------------------------
# define functions
def get_args():
parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter, description="\
bamheadrg.py\n\
author: " + __author__ + "\n\
version: " + __version__ + "\n\
description: Inject readgroup info")
parser.add_argument('-r', '--readgroup', default=None, required=False, help='Read group(s) to extract (comma separated)')
parser.add_argument('-d', '--donor', type=str, required=True, help='Donor BAM/SAM file to extract read group info')
parser.add_argument('-S', '--donor_is_sam', required=False, action='store_true', help='Donor file is SAM')
parser.add_argument('recipient', nargs='?', type=argparse.FileType('r'), default=None,
help='SAM file to inject header lines into. If \'-\' or absent then defaults to stdin.')
# parse the arguments
args = parser.parse_args()
# if no input, check if part of pipe and if so, read stdin.
if args.recipient == None:
if sys.stdin.isatty():
parser.print_help()
exit(1)
else:
args.recipient = sys.stdin
# send back the user input
return args
# extract read group information from header of original bam
def extract_rg_info(donor, donor_is_sam, rgs_to_extract):
if donor_is_sam:
bam = pysam.Samfile(donor, 'r', check_sq=False)
else:
bam = pysam.Samfile(donor, 'rb', check_sq=False)
rg_out = list()
for line in bam.text.split('\n'):
if line[:3] == "@RG":
v = line.rstrip().split('\t')
readgroup = dict(x.split(':',1) for x in v[1:])
# strip out any illegal fields
readgroup_clean = dict()
legal_fields = ['ID', 'CN', 'DS', 'DT', 'FO', 'KS', 'LB',
'PG', 'PI', 'PL', 'PU', 'SM']
for field in readgroup:
if (field in legal_fields
and readgroup[field].strip() != ""):
readgroup_clean[field] = readgroup[field]
# add to clean readgroups
if not rgs_to_extract or readgroup_clean['ID'] in rgs_to_extract:
rg_out.append(readgroup_clean)
bam.close()
return rg_out
# add read group info to header of new sam file
def bamheadrg(recipient, rg_out):
in_header = True
for line in recipient:
if in_header:
if line[0] != '@':
for readgroup in rg_out:
print '@RG\t' + '\t'.join([':'.join((t,readgroup[t])) for t in readgroup])
in_header = False
print line.rstrip()
return
# --------------------------------------
# main function
def main():
# parse the command line args
args = get_args()
if args.readgroup:
rgs_to_extract = args.readgroup.split(',')
else:
rgs_to_extract = None
# extract specified readgroups from original bam file
rg_out = extract_rg_info(args.donor, args.donor_is_sam, rgs_to_extract)
# add extracted readgroups to new sam file
bamheadrg(args.recipient, rg_out)
# close the input file
args.recipient.close()
# initialize the script
if __name__ == '__main__':
try:
sys.exit(main())
except IOError, e:
if e.errno != 32: # ignore SIGPIPE
raise
|
{
"content_hash": "4f1262cc15177286d41bd39938309872",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 125,
"avg_line_length": 33.76923076923077,
"alnum_prop": 0.5700455580865603,
"repo_name": "hall-lab/bamkit",
"id": "62c8e7c1c117bf65bc4aa27ebfd9764a568ea76f",
"size": "3535",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bamheadrg.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36225"
},
{
"name": "Shell",
"bytes": "99"
}
],
"symlink_target": ""
}
|
"""Configuration of Flask-OAuthlib provider."""
from datetime import datetime, timedelta
from flask import current_app, g
from flask_login import current_user
from flask_oauthlib.provider import OAuth2Provider
from flask_principal import Identity, identity_changed
from flask_security.utils import verify_password
from importlib_metadata import version
from invenio_db import db
from werkzeug.local import LocalProxy
from .models import Client, Token
from .scopes import email_scope
oauth2 = OAuth2Provider()
datastore = LocalProxy(lambda: current_app.extensions["security"].datastore)
@oauth2.usergetter
def get_user(email, password, *args, **kwargs):
"""Get user for grant type password.
Needed for grant type 'password'. Note, grant type password is by default
disabled.
:param email: User email.
:param password: Password.
:returns: The user instance or ``None``.
"""
user = datastore.find_user(email=email)
if user and user.active and verify_password(password, user.password):
return user
@oauth2.tokengetter
def get_token(access_token=None, refresh_token=None):
"""Load an access token.
Add support for personal access tokens compared to flask-oauthlib.
If the access token is ``None``, it looks for the refresh token.
:param access_token: The access token. (Default: ``None``)
:param refresh_token: The refresh token. (Default: ``None``)
:returns: The token instance or ``None``.
"""
if access_token:
t = Token.query.filter_by(access_token=access_token).first()
elif refresh_token:
t = (
Token.query.join(Token.client)
.filter(
Token.refresh_token == refresh_token,
Token.is_personal == False, # noqa
Client.is_confidential == True,
)
.first()
)
else:
return None
return t if t and t.user.active else None
@oauth2.clientgetter
def get_client(client_id):
"""Load the client.
Needed for grant_type client_credentials.
Add support for OAuth client_credentials access type, with user
inactivation support.
:param client_id: The client ID.
:returns: The client instance or ``None``.
"""
client = Client.query.get(client_id)
if client and client.user.active:
return client
@oauth2.tokensetter
def save_token(token, request, *args, **kwargs):
"""Token persistence.
:param token: A dictionary with the token data.
:param request: The request instance.
:returns: A :class:`invenio_oauth2server.models.Token` instance.
"""
# Exclude the personal access tokens which doesn't expire.
user = request.user if request.user else current_user
# Add user information in token endpoint response.
# Currently, this is the only way to have the access to the user of the
# token as well as the token response.
token.update(user={"id": user.get_id()})
# Add email if scope granted.
if email_scope.id in token.scopes:
token["user"].update(
email=user.email,
email_verified=user.confirmed_at is not None,
)
tokens = Token.query.filter_by(
client_id=request.client.client_id,
user_id=user.id,
is_personal=False,
)
# make sure that every client has only one token connected to a user
if tokens:
for tk in tokens:
db.session.delete(tk)
db.session.commit()
expires_in = token.get("expires_in")
expires = datetime.utcnow() + timedelta(seconds=int(expires_in))
tok = Token(
access_token=token["access_token"],
refresh_token=token.get("refresh_token"),
token_type=token["token_type"],
_scopes=token["scope"],
expires=expires,
client_id=request.client.client_id,
user_id=user.id,
is_personal=False,
)
db.session.add(tok)
db.session.commit()
return tok
@oauth2.after_request
def login_oauth2_user(valid, oauth):
"""Log in a user after having been verified."""
if valid:
oauth.user.login_via_oauth2 = True
# Flask-login==0.6.2 changed the way the user is saved i.e uses `flask.g`
# To keep backwards compatibility we fallback to the previous implementation
# for earlier versions.
if version("flask-login") <= "0.6.1":
from flask import _request_ctx_stack
_request_ctx_stack.top.user = oauth.user
else:
g._login_user = oauth.user
identity_changed.send(
current_app._get_current_object(), identity=Identity(oauth.user.id)
)
return valid, oauth
|
{
"content_hash": "093fcba486dd76be444e6932b1a7f81b",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 84,
"avg_line_length": 30.79605263157895,
"alnum_prop": 0.6530655842768639,
"repo_name": "inveniosoftware/invenio-oauth2server",
"id": "350d2cbf3ec91d0f792da54fcb8762a0046bd7ae",
"size": "4916",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "invenio_oauth2server/provider.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "44530"
},
{
"name": "Python",
"bytes": "175445"
},
{
"name": "Shell",
"bytes": "823"
}
],
"symlink_target": ""
}
|
import json
import six
_transform_registry = {}
def register_transform(from_type, func):
_transform_registry[from_type] = func
class BaseModel(object):
def __str__(self):
str_dict = {}
for key, value in six.iteritems(self.__dict__):
if key.startswith('_'):
continue
str_dict[key] = value
return json.dumps(str_dict)
def __repr__(self):
return self.__str__()
@classmethod
def transform(cls, from_model):
if isinstance(from_model, list):
if from_model:
key = type(from_model[0])
else:
return []
elif not from_model:
return
else:
key = type(from_model)
func = _transform_registry[key]
if isinstance(from_model, list):
return [func(item) for item in from_model]
return func(from_model)
class Server(BaseModel):
def __init__(self, uuid=None, name=None, flavor=None, image=None,
networks=None):
self.uuid = uuid
self.name = name
self.flavor = flavor
self.image = image
self.networks = networks or []
|
{
"content_hash": "6f72365b497514207bf95590da45100a",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 69,
"avg_line_length": 24.448979591836736,
"alnum_prop": 0.5383973288814691,
"repo_name": "omninubes/nubes",
"id": "402f10fb33748061283595c0b19b4d608f1e96be",
"size": "1198",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nubes/common/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8164"
}
],
"symlink_target": ""
}
|
from cobra.core.loading import feature_hidden
from cobra.views.decorators import permissions_required
class Application(object):
"""
Base application class.
This is subclassed by each app to provide a customisable container for an
app's views and permissions.
"""
#: Namespace name
name = None
#: A name that allows the functionality within this app to be disabled
hidable_feature_name = None
#: Maps view names to lists of permissions. We expect tuples of
#: lists as dictionary values. A list is a set of permissions that all
#: needto be fulfilled (AND). Only one set of permissions has to be
#: fulfilled (OR).
#: If there's only one set of permissions, as a shortcut, you can also
#: just define one list.
permissions_map = {}
#: Default permission for any view not in permissions_map
default_permissions = None
def __init__(self, app_name=None, **kwargs):
self.app_name = app_name
# Set all kwargs as object attributes
for key, value in kwargs.items():
setattr(self, key, value)
def get_urls(self):
"""
Return the url patterns for this app.
"""
return []
def post_process_urls(self, urlpatterns):
"""
Customise URL patterns.
This method allows decorators to be wrapped around an apps URL
patterns.
By default, this only allows custom decorators to be specified, but you
could override this method to do anything you want.
Args:
urlpatterns (list): A list of URL patterns
"""
# Test if this the URLs in the Application instance should be
# available. If the feature is hidden then we don't include the URLs.
if feature_hidden(self.hidable_feature_name):
return []
for pattern in urlpatterns:
if hasattr(pattern, 'url_patterns'):
self.post_process_urls(pattern.url_patterns)
if not hasattr(pattern, '_callback'):
continue
# Look for a custom decorator
decorator = self.get_url_decorator(pattern)
if decorator:
# Nasty way of modifying a RegexURLPattern
pattern._callback = decorator(pattern._callback)
return urlpatterns
def get_permissions(self, url):
"""
Return a list of permissions for a given URL name
Args:
url (str): A URL name (eg ``basket.basket``)
Returns:
list: A list of permission strings.
"""
# url namespaced?
if url is not None and ':' in url:
view_name = url.split(':')[1]
else:
view_name = url
return self.permissions_map.get(view_name, self.default_permissions)
def get_url_decorator(self, pattern):
"""
Return the appropriate decorator for the view function with the passed
URL name. Mainly used for access-protecting views.
It's possible to specify:
- no permissions necessary: use None
- a set of permissions: use a list
- two set of permissions (`or`): use a two-tuple of lists
See permissions_required decorator for details
"""
permissions = self.get_permissions(pattern.name)
if permissions:
return permissions_required(permissions)
@property
def urls(self):
# We set the application and instance namespace here
return self.get_urls(), self.app_name, self.name
|
{
"content_hash": "c71569b03f811a82327587aeee8aa1d6",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 79,
"avg_line_length": 32.76146788990825,
"alnum_prop": 0.616353962475497,
"repo_name": "lyoniionly/django-cobra",
"id": "722f53cd797b35264954d362674a7f3f4ce1578c",
"size": "3571",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/cobra/core/application.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "745958"
},
{
"name": "HTML",
"bytes": "254436"
},
{
"name": "JavaScript",
"bytes": "2679541"
},
{
"name": "Python",
"bytes": "1440198"
},
{
"name": "Shell",
"bytes": "893"
},
{
"name": "XSLT",
"bytes": "24882"
}
],
"symlink_target": ""
}
|
from typing import Union, Any, cast
from abc import ABC
from abc import abstractmethod
import eagerpy as ep
from ..devutils import flatten
from ..devutils import atleast_kd
from ..distances import l2, linf
from .base import FixedEpsilonAttack
from .base import Criterion
from .base import Model
from .base import T
from .base import get_criterion
from .base import get_is_adversarial
from .base import raise_if_kwargs
from ..external.clipping_aware_rescaling import l2_clipping_aware_rescaling
from .base import verify_input_bounds
class BaseAdditiveNoiseAttack(FixedEpsilonAttack, ABC):
def run(
self,
model: Model,
inputs: T,
criterion: Union[Criterion, Any] = None,
*,
epsilon: float,
**kwargs: Any,
) -> T:
raise_if_kwargs(kwargs)
x, restore_type = ep.astensor_(inputs)
del inputs, criterion, kwargs
verify_input_bounds(x, model)
min_, max_ = model.bounds
p = self.sample_noise(x)
epsilons = self.get_epsilons(x, p, epsilon, min_=min_, max_=max_)
x = x + epsilons * p
x = x.clip(min_, max_)
return restore_type(x)
@abstractmethod
def sample_noise(self, x: ep.Tensor) -> ep.Tensor:
raise NotImplementedError
@abstractmethod
def get_epsilons(
self, x: ep.Tensor, p: ep.Tensor, epsilon: float, min_: float, max_: float
) -> ep.Tensor:
raise NotImplementedError
class L2Mixin:
distance = l2
def get_epsilons(
self, x: ep.Tensor, p: ep.Tensor, epsilon: float, min_: float, max_: float
) -> ep.Tensor:
norms = flatten(p).norms.l2(axis=-1)
return epsilon / atleast_kd(norms, p.ndim)
class L2ClippingAwareMixin:
distance = l2
def get_epsilons(
self, x: ep.Tensor, p: ep.Tensor, epsilon: float, min_: float, max_: float
) -> ep.Tensor:
return cast(
ep.Tensor, l2_clipping_aware_rescaling(x, p, epsilon, a=min_, b=max_)
)
class LinfMixin:
distance = linf
def get_epsilons(
self, x: ep.Tensor, p: ep.Tensor, epsilon: float, min_: float, max_: float
) -> ep.Tensor:
norms = flatten(p).max(axis=-1)
return epsilon / atleast_kd(norms, p.ndim)
class GaussianMixin:
def sample_noise(self, x: ep.Tensor) -> ep.Tensor:
return x.normal(x.shape)
class UniformMixin:
def sample_noise(self, x: ep.Tensor) -> ep.Tensor:
return x.uniform(x.shape, -1, 1)
class L2AdditiveGaussianNoiseAttack(L2Mixin, GaussianMixin, BaseAdditiveNoiseAttack):
"""Samples Gaussian noise with a fixed L2 size."""
pass
class L2AdditiveUniformNoiseAttack(L2Mixin, UniformMixin, BaseAdditiveNoiseAttack):
"""Samples uniform noise with a fixed L2 size."""
pass
class L2ClippingAwareAdditiveGaussianNoiseAttack(
L2ClippingAwareMixin, GaussianMixin, BaseAdditiveNoiseAttack
):
"""Samples Gaussian noise with a fixed L2 size after clipping.
The implementation is based on [#Rauber20]_.
References:
.. [#Rauber20] Jonas Rauber, Matthias Bethge
"Fast Differentiable Clipping-Aware Normalization and Rescaling"
https://arxiv.org/abs/2007.07677
"""
pass
class L2ClippingAwareAdditiveUniformNoiseAttack(
L2ClippingAwareMixin, UniformMixin, BaseAdditiveNoiseAttack
):
"""Samples uniform noise with a fixed L2 size after clipping.
The implementation is based on [#Rauber20]_.
References:
.. [#Rauber20] Jonas Rauber, Matthias Bethge
"Fast Differentiable Clipping-Aware Normalization and Rescaling"
https://arxiv.org/abs/2007.07677
"""
pass
class LinfAdditiveUniformNoiseAttack(LinfMixin, UniformMixin, BaseAdditiveNoiseAttack):
"""Samples uniform noise with a fixed L-infinity size"""
pass
class BaseRepeatedAdditiveNoiseAttack(FixedEpsilonAttack, ABC):
def __init__(self, *, repeats: int = 100, check_trivial: bool = True):
self.repeats = repeats
self.check_trivial = check_trivial
def run(
self,
model: Model,
inputs: T,
criterion: Union[Criterion, Any] = None,
*,
epsilon: float,
**kwargs: Any,
) -> T:
raise_if_kwargs(kwargs)
x0, restore_type = ep.astensor_(inputs)
criterion_ = get_criterion(criterion)
del inputs, criterion, kwargs
verify_input_bounds(x0, model)
is_adversarial = get_is_adversarial(criterion_, model)
min_, max_ = model.bounds
result = x0
if self.check_trivial:
found = is_adversarial(result)
else:
found = ep.zeros(x0, len(result)).bool()
for _ in range(self.repeats):
if found.all():
break
p = self.sample_noise(x0)
epsilons = self.get_epsilons(x0, p, epsilon, min_=min_, max_=max_)
x = x0 + epsilons * p
x = x.clip(min_, max_)
is_adv = is_adversarial(x)
is_new_adv = ep.logical_and(is_adv, ep.logical_not(found))
result = ep.where(atleast_kd(is_new_adv, x.ndim), x, result)
found = ep.logical_or(found, is_adv)
return restore_type(result)
@abstractmethod
def sample_noise(self, x: ep.Tensor) -> ep.Tensor:
raise NotImplementedError
@abstractmethod
def get_epsilons(
self, x: ep.Tensor, p: ep.Tensor, epsilon: float, min_: float, max_: float
) -> ep.Tensor:
raise NotImplementedError
class L2RepeatedAdditiveGaussianNoiseAttack(
L2Mixin, GaussianMixin, BaseRepeatedAdditiveNoiseAttack
):
"""Repeatedly samples Gaussian noise with a fixed L2 size.
Args:
repeats : How often to sample random noise.
check_trivial : Check whether original sample is already adversarial.
"""
pass
class L2RepeatedAdditiveUniformNoiseAttack(
L2Mixin, UniformMixin, BaseRepeatedAdditiveNoiseAttack
):
"""Repeatedly samples uniform noise with a fixed L2 size.
Args:
repeats : How often to sample random noise.
check_trivial : Check whether original sample is already adversarial.
"""
pass
class L2ClippingAwareRepeatedAdditiveGaussianNoiseAttack(
L2ClippingAwareMixin, GaussianMixin, BaseRepeatedAdditiveNoiseAttack
):
"""Repeatedly samples Gaussian noise with a fixed L2 size after clipping.
The implementation is based on [#Rauber20]_.
References:
.. [#Rauber20] Jonas Rauber, Matthias Bethge
"Fast Differentiable Clipping-Aware Normalization and Rescaling"
https://arxiv.org/abs/2007.07677
Args:
repeats : How often to sample random noise.
check_trivial : Check whether original sample is already adversarial.
"""
pass
class L2ClippingAwareRepeatedAdditiveUniformNoiseAttack(
L2ClippingAwareMixin, UniformMixin, BaseRepeatedAdditiveNoiseAttack
):
"""Repeatedly samples uniform noise with a fixed L2 size after clipping.
The implementation is based on [#Rauber20]_.
References:
.. [#Rauber20] Jonas Rauber, Matthias Bethge
"Fast Differentiable Clipping-Aware Normalization and Rescaling"
https://arxiv.org/abs/2007.07677
Args:
repeats : How often to sample random noise.
check_trivial : Check whether original sample is already adversarial.
"""
pass
class LinfRepeatedAdditiveUniformNoiseAttack(
LinfMixin, UniformMixin, BaseRepeatedAdditiveNoiseAttack
):
"""Repeatedly samples uniform noise with a fixed L-infinity size.
Args:
repeats : How often to sample random noise.
check_trivial : Check whether original sample is already adversarial.
"""
pass
|
{
"content_hash": "9af614f5850bb31b8478674fd17eea42",
"timestamp": "",
"source": "github",
"line_count": 284,
"max_line_length": 87,
"avg_line_length": 27.390845070422536,
"alnum_prop": 0.6547114024938938,
"repo_name": "bethgelab/foolbox",
"id": "29d55f0b4c82bc2600bed2294354c5f529d92875",
"size": "7779",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "foolbox/attacks/additive_noise.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "668"
},
{
"name": "Jupyter Notebook",
"bytes": "23091"
},
{
"name": "Makefile",
"bytes": "2670"
},
{
"name": "Python",
"bytes": "405918"
},
{
"name": "TeX",
"bytes": "3946"
}
],
"symlink_target": ""
}
|
from typing import AnyStr, Dict
from pyre_extensions import safe_json
from backend.common.datafeed_parsers.exceptions import ParserInputException
from backend.common.models.keys import EventKey, MatchKey
from backend.common.models.match import Match
class JSONMatchVideoParser:
"""
Take a dict of match partial -> youtube video ID
Returns a dict of match key -> youtube ID
"""
@staticmethod
def parse(event_key: EventKey, videos_json: AnyStr) -> Dict[MatchKey, str]:
video_dict = safe_json.loads(videos_json, Dict[str, str])
bad_match_ids = list(
filter(
lambda match_partial: not Match.validate_key_name(
f"{event_key}_{match_partial}"
),
video_dict.keys(),
)
)
if bad_match_ids:
raise ParserInputException(f"Invalid match IDs provided: {bad_match_ids}")
return {
f"{event_key}_{match_partial}": video_id
for match_partial, video_id in video_dict.items()
}
|
{
"content_hash": "e67fd48a2a672c31c8f353b58ffadf58",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 86,
"avg_line_length": 31.323529411764707,
"alnum_prop": 0.6169014084507042,
"repo_name": "the-blue-alliance/the-blue-alliance",
"id": "50f26aaf9f2ec0db84f922b4f5da2389f7a0ba0c",
"size": "1065",
"binary": false,
"copies": "1",
"ref": "refs/heads/py3",
"path": "src/backend/api/api_trusted_parsers/json_match_video_parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "359032"
},
{
"name": "Dockerfile",
"bytes": "2503"
},
{
"name": "HTML",
"bytes": "5877313"
},
{
"name": "JavaScript",
"bytes": "755910"
},
{
"name": "Less",
"bytes": "244218"
},
{
"name": "PHP",
"bytes": "10727"
},
{
"name": "Pug",
"bytes": "1857"
},
{
"name": "Python",
"bytes": "4321885"
},
{
"name": "Ruby",
"bytes": "4677"
},
{
"name": "Shell",
"bytes": "27698"
}
],
"symlink_target": ""
}
|
import logging
import os
import re
import urlparse
from subprocess import PIPE
import traceback
import git
from django_git.management.commands.git_pull_utils.connectivity_manager import ConnectivityManager
from djangoautoconf.local_key_manager import get_local_key
from ufs_tools.app_framework import find_app_in_folders
from ufs_tools.short_decorator.ignore_exception import ignore_exc
log = logging.getLogger(__name__)
class RemoteRepo(object):
def __init__(self, remote_repo):
self.remote_repo = remote_repo
self.sync_msg = None
@staticmethod
def get_ref_name(ref):
return ref.name.split('/')[-1]
def sync_all_remote_branches(self, branch):
for remote_ref in self.remote_repo.refs:
log.info("remote ref: %s" % remote_ref) # origin/master
self.pull_and_push_changes(branch, remote_ref, self.remote_repo)
def pull(self, remote_branch_name):
print('pulling changes: %s' % remote_branch_name)
# Added istream to avoid error: WindowsError: [Error 6] The handle is invalid
try:
self.remote_repo.pull(remote_branch_name, istream=PIPE)
except AssertionError:
log.error('assert error may be caused by inconsistent log format between git and gitpython')
except git.GitCommandError, e:
log.error("%s: GitCommandError: %s" % (self.remote_repo.url, str(e)))
except Exception, e:
traceback.print_exc()
def push(self, branch, remote_ref):
log.info('pushing changes')
try:
self.remote_repo.push(remote_ref.__str__().split('/')[-1],
istream=PIPE)
except Exception, e:
traceback.print_exc()
def pull_and_push_changes(self, branch, remote_ref):
# print remote_ref#gitbucket/20130313_diagram_rewrite
if branch.name in self.get_ref_name(remote_ref):
old_commit_hexsha = branch.commit.hexsha
log.info('remote commit: %s, %s' % (remote_ref.commit, remote_ref.commit.message))
self.pull(self.get_ref_name(remote_ref))
if branch.commit.hexsha != old_commit_hexsha:
msg = '%s: new code pulled: %s' % (self.remote_repo.url, branch.commit.message)
self.report_result(msg)
if branch.commit.hexsha != remote_ref.commit.hexsha:
log.info('different to remote')
log.info('latest remote log: %s' % unicode(remote_ref.commit.message))
self.push(branch, remote_ref)
if branch.commit.hexsha == remote_ref.commit.hexsha:
msg = '%s sync done. Latest local log: %s' % (self.remote_repo.url, branch.commit.message)
self.report_result(msg)
else:
msg = '%s sync failed.' % remote_ref.repo.working_dir
def report_result(self, message):
log.info(message)
self.sync_msg = message
class GitSynchronizer(object):
def __init__(self, full_path, callback=None):
self.full_path = full_path
self.https_proxy_server = get_local_key("proxy_setting.https_proxy_server", "django_git")
self.connectivity_manager = ConnectivityManager()
self.sync_msg = None
self.call_back = callback
def pull_all_branches(self):
r = git.Repo(self.full_path)
print "processing: ", self.full_path
local_active_branch = r.active_branch
log.info('current branch: %s, %s' % (local_active_branch.name, local_active_branch.commit))
for remote_repo in r.remotes:
log.info("remote repo: %s" % unicode(remote_repo))
if self.is_proxy_needed(remote_repo):
self.set_proxy_env()
else:
self.unset_proxy_env()
self.process_remote_repo(local_active_branch, remote_repo)
def process_remote_repo(self, branch, remote_repo):
if self.is_valid_url(remote_repo.url) and (not self.is_ignored(remote_repo.url)):
if self.is_repo_ref_valid(remote_repo):
for remote_ref in remote_repo.refs:
log.info("remote branch:" + unicode(remote_ref).encode('utf8', 'replace'))
# self.pull_and_push_changes(branch, remote_branch, remote_repo)
pulling_repo = RemoteRepo(remote_repo)
pulling_repo.pull_and_push_changes(branch, remote_ref)
sync_message = pulling_repo.sync_msg
self.show_notification(sync_message)
else:
log.error("No valid pulling_repo url, repo is not synchronized")
@ignore_exc
def show_notification(self, sync_message):
if (not (self.call_back is None)) and (not (sync_message is None)):
self.call_back(u"%s: %s" % (self.full_path, sync_message))
# noinspection PyMethodMayBeStatic
def get_server(self, url):
r = urlparse.urlparse(url)
return "%s://%s" % (r.scheme, r.hostname)
def is_proxy_needed(self, repo):
server_url = self.get_server(repo.url)
return not self.connectivity_manager.is_connectable(server_url)
def set_proxy_env(self):
os.environ["https_proxy"] = self.https_proxy_server
@staticmethod
def unset_proxy_env():
try:
del os.environ["https_proxy"]
except:
pass
@staticmethod
def is_repo_ref_valid(remote_repo):
# if hasattr(i, "refs"):
# print 'no refs attr'
# print "length:", len(i.refs)
is_ref_valid = True
try:
len(remote_repo.refs)
except AssertionError, e:
import traceback
traceback.print_exc()
print remote_repo
is_ref_valid = False
return is_ref_valid
@staticmethod
def is_ignored(url):
# if "https" in url:
# print 'ignoring :', url
# return True
# else:
# return False
return False
@staticmethod
def is_valid_url(url):
if re.match("http(s)*://.+:.+@.+$", url) is None:
return False
return True
try:
from repo import proj_list, git_path
except:
repo = []
git_path = 'C:\\Program Files (x86)\\Git\\bin'
git_path = 'C:\\Users\\weijia\\AppData\\Local\\Programs\\Git\\bin'
def add_git_to_path():
folders = get_local_key("git_path.git_folder", "django_git")
folders.append('C:\\Program Files (x86)\\Git\\bin')
os.environ['PATH'] += ";"+find_app_in_folders(folders, "git.exe")
# print os.environ['PATH']
if os.name == 'nt':
add_git_to_path()
def main():
for path in proj_list:
print "processing:", path
p = GitSynchronizer(path)
p.pull_all_branches()
if __name__ == '__main__':
add_git_to_path()
main()
|
{
"content_hash": "fba4fb208f2a718c308bae361fdb4a84",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 110,
"avg_line_length": 35.08673469387755,
"alnum_prop": 0.5940090155591101,
"repo_name": "weijia/django-git",
"id": "9f8bf5b2a45706318ef2b423a232fd2f8e49eef1",
"size": "6877",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_git/management/commands/git_pull_utils/git_synchronizer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "661"
},
{
"name": "Makefile",
"bytes": "1240"
},
{
"name": "Python",
"bytes": "26530"
}
],
"symlink_target": ""
}
|
"""
The VMware API utility module.
"""
import netaddr
import suds
def get_moref(value, type_):
"""Get managed object reference.
:param value: value of the managed object
:param type_: type of the managed object
:returns: managed object reference with given value and type
"""
moref = suds.sudsobject.Property(value)
moref._type = type_
return moref
def build_selection_spec(client_factory, name):
"""Builds the selection spec.
:param client_factory: factory to get API input specs
:param name: name for the selection spec
:returns: selection spec
"""
sel_spec = client_factory.create('ns0:SelectionSpec')
sel_spec.name = name
return sel_spec
def build_traversal_spec(client_factory, name, type_, path, skip, select_set):
"""Builds the traversal spec.
:param client_factory: factory to get API input specs
:param name: name for the traversal spec
:param type_: type of the managed object
:param path: property path of the managed object
:param skip: whether or not to filter the object identified by param path
:param select_set: set of selection specs specifying additional objects
to filter
:returns: traversal spec
"""
traversal_spec = client_factory.create('ns0:TraversalSpec')
traversal_spec.name = name
traversal_spec.type = type_
traversal_spec.path = path
traversal_spec.skip = skip
traversal_spec.selectSet = select_set
return traversal_spec
def build_recursive_traversal_spec(client_factory):
"""Builds recursive traversal spec to traverse managed object hierarchy.
:param client_factory: factory to get API input specs
:returns: recursive traversal spec
"""
visit_folders_select_spec = build_selection_spec(client_factory,
'visitFolders')
# Next hop from Datacenter
dc_to_hf = build_traversal_spec(client_factory,
'dc_to_hf',
'Datacenter',
'hostFolder',
False,
[visit_folders_select_spec])
dc_to_vmf = build_traversal_spec(client_factory,
'dc_to_vmf',
'Datacenter',
'vmFolder',
False,
[visit_folders_select_spec])
# Next hop from HostSystem
h_to_vm = build_traversal_spec(client_factory,
'h_to_vm',
'HostSystem',
'vm',
False,
[visit_folders_select_spec])
# Next hop from ComputeResource
cr_to_h = build_traversal_spec(client_factory,
'cr_to_h',
'ComputeResource',
'host',
False,
[])
cr_to_ds = build_traversal_spec(client_factory,
'cr_to_ds',
'ComputeResource',
'datastore',
False,
[])
rp_to_rp_select_spec = build_selection_spec(client_factory, 'rp_to_rp')
rp_to_vm_select_spec = build_selection_spec(client_factory, 'rp_to_vm')
cr_to_rp = build_traversal_spec(client_factory,
'cr_to_rp',
'ComputeResource',
'resourcePool',
False,
[rp_to_rp_select_spec,
rp_to_vm_select_spec])
# Next hop from ClusterComputeResource
ccr_to_h = build_traversal_spec(client_factory,
'ccr_to_h',
'ClusterComputeResource',
'host',
False,
[])
ccr_to_ds = build_traversal_spec(client_factory,
'ccr_to_ds',
'ClusterComputeResource',
'datastore',
False,
[])
ccr_to_rp = build_traversal_spec(client_factory,
'ccr_to_rp',
'ClusterComputeResource',
'resourcePool',
False,
[rp_to_rp_select_spec,
rp_to_vm_select_spec])
# Next hop from ResourcePool
rp_to_rp = build_traversal_spec(client_factory,
'rp_to_rp',
'ResourcePool',
'resourcePool',
False,
[rp_to_rp_select_spec,
rp_to_vm_select_spec])
rp_to_vm = build_traversal_spec(client_factory,
'rp_to_vm',
'ResourcePool',
'vm',
False,
[rp_to_rp_select_spec,
rp_to_vm_select_spec])
# Get the assorted traversal spec which takes care of the objects to
# be searched for from the rootFolder
traversal_spec = build_traversal_spec(client_factory,
'visitFolders',
'Folder',
'childEntity',
False,
[visit_folders_select_spec,
h_to_vm,
dc_to_hf,
dc_to_vmf,
cr_to_ds,
cr_to_h,
cr_to_rp,
ccr_to_h,
ccr_to_ds,
ccr_to_rp,
rp_to_rp,
rp_to_vm])
return traversal_spec
def build_property_spec(client_factory, type_='VirtualMachine',
properties_to_collect=None, all_properties=False):
"""Builds the property spec.
:param client_factory: factory to get API input specs
:param type_: type of the managed object
:param properties_to_collect: names of the managed object properties to be
collected while traversal filtering
:param all_properties: whether all properties of the managed object need
to be collected
:returns: property spec
"""
if not properties_to_collect:
properties_to_collect = ['name']
property_spec = client_factory.create('ns0:PropertySpec')
property_spec.all = all_properties
property_spec.pathSet = properties_to_collect
property_spec.type = type_
return property_spec
def build_object_spec(client_factory, root_folder, traversal_specs):
"""Builds the object spec.
:param client_factory: factory to get API input specs
:param root_folder: root folder reference; the starting point of traversal
:param traversal_specs: filter specs required for traversal
:returns: object spec
"""
object_spec = client_factory.create('ns0:ObjectSpec')
object_spec.obj = root_folder
object_spec.skip = False
object_spec.selectSet = traversal_specs
return object_spec
def build_property_filter_spec(client_factory, property_specs, object_specs):
"""Builds the property filter spec.
:param client_factory: factory to get API input specs
:param property_specs: property specs to be collected for filtered objects
:param object_specs: object specs to identify objects to be filtered
:returns: property filter spec
"""
property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')
property_filter_spec.propSet = property_specs
property_filter_spec.objectSet = object_specs
return property_filter_spec
def get_objects(vim, type_, max_objects, properties_to_collect=None,
all_properties=False):
"""Get all managed object references of the given type.
It is the caller's responsibility to continue or cancel retrieval.
:param vim: Vim object
:param type_: type of the managed object
:param max_objects: maximum number of objects that should be returned in
a single call
:param properties_to_collect: names of the managed object properties to be
collected
:param all_properties: whether all properties of the managed object need to
be collected
:returns: all managed object references of the given type
:raises: VimException, VimFaultException, VimAttributeException,
VimSessionOverLoadException, VimConnectionException
"""
if not properties_to_collect:
properties_to_collect = ['name']
client_factory = vim.client.factory
recur_trav_spec = build_recursive_traversal_spec(client_factory)
object_spec = build_object_spec(client_factory,
vim.service_content.rootFolder,
[recur_trav_spec])
property_spec = build_property_spec(
client_factory,
type_=type_,
properties_to_collect=properties_to_collect,
all_properties=all_properties)
property_filter_spec = build_property_filter_spec(client_factory,
[property_spec],
[object_spec])
options = client_factory.create('ns0:RetrieveOptions')
options.maxObjects = max_objects
return vim.RetrievePropertiesEx(vim.service_content.propertyCollector,
specSet=[property_filter_spec],
options=options)
def get_object_properties(vim, moref, properties_to_collect):
"""Get properties of the given managed object.
:param vim: Vim object
:param moref: managed object reference
:param properties_to_collect: names of the managed object properties to be
collected
:returns: properties of the given managed object
:raises: VimException, VimFaultException, VimAttributeException,
VimSessionOverLoadException, VimConnectionException
"""
if moref is None:
return None
client_factory = vim.client.factory
all_properties = (properties_to_collect is None or
len(properties_to_collect) == 0)
property_spec = build_property_spec(
client_factory,
type_=moref._type,
properties_to_collect=properties_to_collect,
all_properties=all_properties)
object_spec = build_object_spec(client_factory, moref, [])
property_filter_spec = build_property_filter_spec(client_factory,
[property_spec],
[object_spec])
options = client_factory.create('ns0:RetrieveOptions')
options.maxObjects = 1
retrieve_result = vim.RetrievePropertiesEx(
vim.service_content.propertyCollector,
specSet=[property_filter_spec],
options=options)
cancel_retrieval(vim, retrieve_result)
return retrieve_result.objects
def _get_token(retrieve_result):
"""Get token from result to obtain next set of results.
:retrieve_result: Result of RetrievePropertiesEx API call
:returns: token to obtain next set of results; None if no more results.
"""
return getattr(retrieve_result, 'token', None)
def cancel_retrieval(vim, retrieve_result):
"""Cancels the retrieve operation if necessary.
:param vim: Vim object
:param retrieve_result: result of RetrievePropertiesEx API call
:raises: VimException, VimFaultException, VimAttributeException,
VimSessionOverLoadException, VimConnectionException
"""
token = _get_token(retrieve_result)
if token:
collector = vim.service_content.propertyCollector
vim.CancelRetrievePropertiesEx(collector, token=token)
def continue_retrieval(vim, retrieve_result):
"""Continue retrieving results, if available.
:param vim: Vim object
:param retrieve_result: result of RetrievePropertiesEx API call
:raises: VimException, VimFaultException, VimAttributeException,
VimSessionOverLoadException, VimConnectionException
"""
token = _get_token(retrieve_result)
if token:
collector = vim.service_content.propertyCollector
return vim.ContinueRetrievePropertiesEx(collector, token=token)
def get_object_property(vim, moref, property_name):
"""Get property of the given managed object.
:param vim: Vim object
:param moref: managed object reference
:param property_name: name of the property to be retrieved
:returns: property of the given managed object
:raises: VimException, VimFaultException, VimAttributeException,
VimSessionOverLoadException, VimConnectionException
"""
props = get_object_properties(vim, moref, [property_name])
prop_val = None
if props:
prop = None
if hasattr(props[0], 'propSet'):
# propSet will be set only if the server provides value
# for the field
prop = props[0].propSet
if prop:
prop_val = prop[0].val
return prop_val
def get_soap_url(protocol, host, path='sdk'):
"""Return ESX/VC server's SOAP service URL.
:param protocol: https or http
:param host: server IP address[:port] or host name[:port]
:param path: path part of the SOAP URL
:returns: SOAP service URL
"""
if netaddr.valid_ipv6(host):
return '%s://[%s]/%s' % (protocol, host, path)
return '%s://%s/%s' % (protocol, host, path)
|
{
"content_hash": "1e784969b0d5740e9979b21fbdaccafb",
"timestamp": "",
"source": "github",
"line_count": 366,
"max_line_length": 79,
"avg_line_length": 40.23770491803279,
"alnum_prop": 0.5397569090785632,
"repo_name": "JioCloud/oslo.vmware",
"id": "c7194c5a1298635e037162d4ec1037c48b221eaa",
"size": "15359",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oslo/vmware/vim_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['RelativeDifference'] , ['MovingMedian'] , ['Seasonal_Hour'] , ['ARX'] );
|
{
"content_hash": "45f0f549a23238c2165dccf2fd4fdc12",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 95,
"avg_line_length": 42,
"alnum_prop": 0.7261904761904762,
"repo_name": "antoinecarme/pyaf",
"id": "329dbb4297b83acc74c3100ed7775bc44a002386",
"size": "168",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/model_control/detailed/transf_RelativeDifference/model_control_one_enabled_RelativeDifference_MovingMedian_Seasonal_Hour_ARX.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
class interface(object):
specs={'port':1234,
'ip':'127.0.0.1'}
specsGui={'port':{'name':'Port','tooltip':'port for rtp streaming'},
'ip':{'name':'IP','tooltip':'ip for rtp streaming'}}
|
{
"content_hash": "81a51f7d92d276832ce74579288e3840",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 72,
"avg_line_length": 22.3,
"alnum_prop": 0.5336322869955157,
"repo_name": "schristakidis/p2ner",
"id": "3d2587ae680787538cefe377618a97b08affd99b",
"size": "842",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "p2ner/components/output/gstoutput/gstoutput/interface.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "303"
},
{
"name": "Python",
"bytes": "1319300"
}
],
"symlink_target": ""
}
|
"""
Structure Residues
==================
"""
import logging
from collections import defaultdict
from copy import deepcopy
from Bio.Alphabet import IUPAC
from Bio.PDB import Polypeptide
from Bio.PDB.HSExposure import ExposureCN, HSExposureCA, HSExposureCB
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
import numpy as np
from Bio.PDB import Selection
from Bio.PDB import NeighborSearch
import ssbio.protein.sequence.utils
from ssbio.protein.structure.utils.structureio import StructureIO
log = logging.getLogger(__name__)
def search_ss_bonds(model, threshold=3.0):
""" Searches S-S bonds based on distances
between atoms in the structure (first model only).
Average distance is 2.05A. Threshold is 3A default.
Returns iterator with tuples of residues.
ADAPTED FROM JOAO RODRIGUES' BIOPYTHON GSOC PROJECT (http://biopython.org/wiki/GSOC2010_Joao)
"""
# Taken from http://docs.python.org/library/itertools.html
# Python 2.4 does not include itertools.combinations
def combinations(iterable, r):
# combinations('ABCD', 2) --> AB AC AD BC BD CD
# combinations(range(4), 3) --> 012 013 023 123
pool = tuple(iterable)
n = len(pool)
if r > n:
return
indices = list(range(r))
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != i + n - r:
break
else:
return
indices[i] += 1
for j in range(i + 1, r):
indices[j] = indices[j - 1] + 1
yield tuple(pool[i] for i in indices)
cysteines = [r for r in model.get_residues() if r.get_resname() == 'CYS']
pairs = combinations(cysteines, 2) # Iterator with pairs
bridges = []
for cys_pair in pairs:
try:
if cys_pair[0]['SG'] - cys_pair[1]['SG'] < threshold:
bridges.append(cys_pair)
except KeyError: # This will occur when a CYS residue is missing a SG atom for some reason
log.error('{}: no SG atom found for one or both of the cysteine residues {}'.format(model, cys_pair))
continue
infodict = {}
if bridges:
infodict = defaultdict(list)
for disulfide_bridge in bridges:
residue1 = disulfide_bridge[0]
residue2 = disulfide_bridge[1]
chain = residue1.get_parent().id
infodict[chain].append((residue1.get_full_id()[3], residue2.get_full_id()[3]))
return infodict
def residue_distances(res_1_num, res_1_chain, res_2_num, res_2_chain, model):
"""Distance between the last atom of 2 residues"""
res1 = model[res_1_chain][res_1_num].child_list[-1]
res2 = model[res_2_chain][res_2_num].child_list[-1]
distance = res1 - res2
return distance
def resname_in_proximity(resname, model, chains, resnums, threshold=5):
"""Search within the proximity of a defined list of residue numbers and their chains for any specifed residue name.
Args:
resname (str): Residue name to search for in proximity of specified chains + resnums
model: Biopython Model object
chains (str, list): Chain ID or IDs to check
resnums (int, list): Residue numbers within the chain to check
threshold (float): Cutoff in Angstroms for returning True if a RESNAME is near
Returns:
bool: True if a RESNAME is within the threshold cutoff
"""
residues = [r for r in model.get_residues() if r.get_resname() == resname]
chains = ssbio.utils.force_list(chains)
resnums = ssbio.utils.force_list(resnums)
for chain in chains:
for resnum in resnums:
my_residue_last_atom = model[chain][resnum].child_list[-1]
for rz in residues:
distance = rz.child_list[-1] - my_residue_last_atom
if distance < threshold:
# print(resnum, rz, distance)
return True
return False
def within(resnum, angstroms, chain_id, model, use_ca=False, custom_coord=None):
"""See: https://www.biostars.org/p/1816/ https://www.biostars.org/p/269579/
Args:
resnum (int):
angstroms (float):
chain_id (str):
model (Model):
use_ca (bool): If the alpha-carbon atom should be used for the search, otherwise use the last atom of the residue
custom_coord (list): Custom XYZ coordinate to get within
Returns:
list: List of Bio.PDB.Residue.Residue objects
"""
# XTODO: documentation
# TODO: should have separate method for within a normal residue (can use "resnum" with a int) or a custom coord,
# where you don't need to specify resnum
atom_list = Selection.unfold_entities(model, 'A')
ns = NeighborSearch(atom_list)
if custom_coord: # a list of XYZ coord
target_atom_coord = np.array(custom_coord, 'f')
else:
target_residue = model[chain_id][resnum]
if use_ca:
target_atom = target_residue['CA']
else:
target_atom = target_residue.child_list[-1]
target_atom_coord = np.array(target_atom.get_coord(), 'f')
neighbors = ns.search(target_atom_coord, angstroms)
residue_list = Selection.unfold_entities(neighbors, 'R')
return residue_list
def get_structure_seqrecords(model):
"""Get a dictionary of a PDB file's sequences.
Special cases include:
- Insertion codes. In the case of residue numbers like "15A", "15B", both residues are written out. Example: 9LPR
- HETATMs. Currently written as an "X", or unknown amino acid.
Args:
model: Biopython Model object of a Structure
Returns:
list: List of SeqRecords
"""
structure_seq_records = []
# Loop over each chain of the PDB
for chain in model:
tracker = 0
chain_seq = ''
chain_resnums = []
# Loop over the residues
for res in chain.get_residues():
# NOTE: you can get the residue number too
res_id = res.id
res_num = res_id[1]
res_icode = res_id[2]
# Double check if the residue name is a standard residue
# If it is not a standard residue (ie. selenomethionine),
# it will be filled in with an X on the next iteration)
if Polypeptide.is_aa(res, standard=True):
end_tracker = res_num
res_aa_one = Polypeptide.three_to_one(res.get_resname())
# Tracker to fill in X's
if end_tracker != (tracker + 1):
if res_icode != ' ':
chain_seq += res_aa_one
chain_resnums.append(res_num)
tracker = end_tracker + 1
continue
else:
multiplier = (end_tracker - tracker - 1)
chain_seq += 'X' * multiplier
# Residue numbers for unresolved or nonstandard residues are Infinite
chain_resnums.extend([float("Inf")] * multiplier)
chain_seq += res_aa_one
chain_resnums.append(res_num)
tracker = end_tracker
else:
continue
chain_seq_record = SeqRecord(Seq(chain_seq, IUPAC.protein), id=chain.get_id())
chain_seq_record.letter_annotations['structure_resnums'] = chain_resnums
structure_seq_records.append(chain_seq_record)
return structure_seq_records
def get_structure_seqs(pdb_file, file_type):
"""Get a dictionary of a PDB file's sequences.
Special cases include:
- Insertion codes. In the case of residue numbers like "15A", "15B", both residues are written out. Example: 9LPR
- HETATMs. Currently written as an "X", or unknown amino acid.
Args:
pdb_file: Path to PDB file
Returns:
dict: Dictionary of:
{chain_id: sequence}
"""
# TODO: Please check out capitalization of chain IDs in mmcif files. example: 5afi - chain "l" is present but
# it seems like biopython capitalizes it to chain L
# Get the first model
my_structure = StructureIO(pdb_file)
model = my_structure.first_model
structure_seqs = {}
# Loop over each chain of the PDB
for chain in model:
chain_seq = ''
tracker = 0
# Loop over the residues
for res in chain.get_residues():
# NOTE: you can get the residue number too
# res_num = res.id[1]
# Double check if the residue name is a standard residue
# If it is not a standard residue (ie. selenomethionine),
# it will be filled in with an X on the next iteration)
if Polypeptide.is_aa(res, standard=True):
full_id = res.get_full_id()
end_tracker = full_id[3][1]
i_code = full_id[3][2]
aa = Polypeptide.three_to_one(res.get_resname())
# Tracker to fill in X's
if end_tracker != (tracker + 1):
if i_code != ' ':
chain_seq += aa
tracker = end_tracker + 1
continue
else:
chain_seq += 'X' * (end_tracker - tracker - 1)
chain_seq += aa
tracker = end_tracker
else:
continue
structure_seqs[chain.get_id()] = chain_seq
return structure_seqs
def match_structure_sequence(orig_seq, new_seq, match='X', fill_with='X', ignore_excess=False):
"""Correct a sequence to match inserted X's in a structure sequence
This is useful for mapping a sequence obtained from structural tools like MSMS or DSSP
to the sequence obtained by the get_structure_seqs method.
Examples:
>>> structure_seq = 'XXXABCDEF'
>>> prop_list = [4, 5, 6, 7, 8, 9]
>>> match_structure_sequence(structure_seq, prop_list)
['X', 'X', 'X', 4, 5, 6, 7, 8, 9]
>>> match_structure_sequence(structure_seq, prop_list, fill_with=float('Inf'))
[inf, inf, inf, 4, 5, 6, 7, 8, 9]
>>> structure_seq = '---ABCDEF---'
>>> prop_list = ('H','H','H','C','C','C')
>>> match_structure_sequence(structure_seq, prop_list, match='-', fill_with='-')
('-', '-', '-', 'H', 'H', 'H', 'C', 'C', 'C', '-', '-', '-')
>>> structure_seq = 'ABCDEF---'
>>> prop_list = 'HHHCCC'
>>> match_structure_sequence(structure_seq, prop_list, match='-', fill_with='-')
'HHHCCC---'
>>> structure_seq = 'AXBXCXDXEXF'
>>> prop_list = ['H', 'H', 'H', 'C', 'C', 'C']
>>> match_structure_sequence(structure_seq, prop_list, match='X', fill_with='X')
['H', 'X', 'H', 'X', 'H', 'X', 'C', 'X', 'C', 'X', 'C']
Args:
orig_seq (str, Seq, SeqRecord): Sequence to match to
new_seq (str, tuple, list): Sequence to fill in
match (str): What to match
fill_with: What to fill in when matches are found
ignore_excess (bool): If excess sequence on the tail end of new_seq should be ignored
Returns:
str, tuple, list: new_seq which will match the length of orig_seq
"""
if len(orig_seq) == len(new_seq):
log.debug('Lengths already equal, nothing to fill in')
return new_seq
if not ignore_excess:
if len(orig_seq) < len(new_seq):
raise ValueError('Original sequence has a length less than the sequence provided to match to')
else:
log.debug('New sequence will be truncated to length of original sequence - information may be lost!')
if not isinstance(new_seq, str) and not isinstance(new_seq, tuple) and not isinstance(new_seq, list):
raise ValueError('Invalid sequence provided, must be string, tuple, or list')
orig_seq = ssbio.protein.sequence.utils.cast_to_str(orig_seq)
new_thing = deepcopy(new_seq)
if isinstance(new_seq, tuple):
new_thing = list(new_thing)
for i, s in enumerate(orig_seq):
if s == match:
if isinstance(new_thing, str):
new_thing = new_thing[:i] + fill_with + new_thing[i:]
if isinstance(new_thing, list):
new_thing.insert(i, fill_with)
new_thing = new_thing[:len(orig_seq)]
if isinstance(new_seq, tuple):
new_thing = tuple(new_thing)
return new_thing
def site_centroid(residues, model):
"""Get the XYZ coordinate of the center of a list of residues.
Args:
residues: List of residue numbers
pdb_file: Path to PDB file
Returns:
tuple: (X, Y, Z) coordinate of centroid
"""
pass
def distance_to_site(residue_of_interest, residues, model):
"""Calculate the distance between an amino acid and a group of amino acids.
Args:
residue_of_interest: Residue number you are interested in (ie. a mutation)
residues: List of residue numbers
Returns:
float: Distance (in Angstroms) to the group of residues
"""
centroid = site_centroid(residues, residue_of_interest)
pass
# TODO: half sphere exposure
def hse_output(pdb_file, file_type):
"""
The solvent exposure of an amino acid residue is important for analyzing,
understanding and predicting aspects of protein structure and function [73].
A residue's solvent exposure can be classified as four categories: exposed, partly exposed,
buried and deeply buried residues. Hamelryck et al. [73] established a new 2D measure that provides a
different view of solvent exposure, i.e. half-sphere exposure (HSE). By conceptually dividing the sphere
of a residue into two halves- HSE-up and HSE-down, HSE provides a more detailed description of an amino
acid residue's spatial neighborhood. HSE is calculated by the hsexpo module implemented in the BioPython
package [74] from a PDB file.
http://onlinelibrary.wiley.com/doi/10.1002/prot.20379/abstract
Args:
pdb_file:
Returns:
"""
# Get the first model
my_structure = StructureIO(pdb_file)
model = my_structure.first_model
# Calculate HSEalpha
exp_ca = HSExposureCA(model)
# Calculate HSEbeta
exp_cb = HSExposureCB(model)
# Calculate classical coordination number
exp_fs = ExposureCN(model)
return
# def magni(a, b, c):
# """Calculate the magnitude of distance vector
# """
# return pow((pow(a, 2) + pow(b, 2) + pow(c, 2)), 1.0 / 2.0)
# @cachetools.func.ttl_cache(maxsize=256)
# def calculate_res_distance(res_1, res_2, pdb_file):
# """Calculate distance of one residue number to another in a PDB file
#
# Args:
# res_1: Residue number 1
# res_2: Residue number 2
# pdb_file: Path to PDB file
#
# Returns:
#
# """
#
# my_structure = StructureIO(pdb_file)
# model = my_structure.first_model
#
# res_list = PDB.Selection.unfold_entities(model, 'R')
#
# ires_list = []
# res_chk_1 = ''
# res_chk_2 = ''
# for j in res_list:
# if j.id[1] in [res_1, res_2] and j.resname != 'HOH':
# ires_list.append(j)
# if res_chk_1 == '' and res_chk_2 == '':
# res_chk_1 = j.id[1]
# else:
# res_chk_2 = j.id[1]
#
# paired = ssbio.utils.combinations(ires_list, 2)
# try:
# for k in paired:
# chainA = PDB.Selection.unfold_entities(k[0], 'C')[0]
# chainB = PDB.Selection.unfold_entities(k[1], 'C')[0]
# vec = list(
# np.array([x.get_coord() for x in k[0]]).mean(axis=0) - np.array([x.get_coord() for x in k[1]]).mean(
# axis=0))
# distance = magni(vec[0], vec[1], vec[2])
#
# return distance
# except UnboundLocalError:
# log.error("Unknown interaction")
# return None
|
{
"content_hash": "e0cb05304647c99aa6e242188cd1de82",
"timestamp": "",
"source": "github",
"line_count": 472,
"max_line_length": 121,
"avg_line_length": 34.00423728813559,
"alnum_prop": 0.591214953271028,
"repo_name": "nmih/ssbio",
"id": "e2270d76d27a37172a19db2a8aba8064334d8cb1",
"size": "16050",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ssbio/protein/structure/properties/residues.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "3957"
},
{
"name": "Perl",
"bytes": "170187"
},
{
"name": "Python",
"bytes": "2101974"
},
{
"name": "Scheme",
"bytes": "8711"
}
],
"symlink_target": ""
}
|
"""Testing utilities for PIX."""
import inspect
import types
from typing import Sequence, Tuple
def get_public_functions(
root_module: types.ModuleType) -> Sequence[Tuple[str, types.FunctionType]]:
"""Returns `(function_name, function)` for all functions of `root_module`."""
fns = []
for name in dir(root_module):
o = getattr(root_module, name)
if inspect.isfunction(o):
fns.append((name, o))
return fns
def get_public_symbols(
root_module: types.ModuleType) -> Sequence[Tuple[str, types.FunctionType]]:
"""Returns `(symbol_name, symbol)` for all symbols of `root_module`."""
fns = []
for name in getattr(root_module, '__all__'):
o = getattr(root_module, name)
fns.append((name, o))
return fns
|
{
"content_hash": "0d62e95de8424dd25dcac799e76dd3d3",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 79,
"avg_line_length": 28.692307692307693,
"alnum_prop": 0.6702412868632708,
"repo_name": "deepmind/dm_pix",
"id": "e615fd265ccac2963c11dbe746380399e0110969",
"size": "1361",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dm_pix/_src/test_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "110452"
},
{
"name": "Shell",
"bytes": "1439"
}
],
"symlink_target": ""
}
|
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Entry',
fields=[
(u'id', models.AutoField(verbose_name=u'ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=200, unique_for_date='published_on')),
('author', models.ForeignKey(to=settings.AUTH_USER_MODEL, to_field=u'id', editable=False)),
('published_on', models.DateTimeField(auto_now_add=True)),
('edited_on', models.DateTimeField(auto_now=True)),
('text', models.TextField()),
('slug', models.SlugField(editable=False)),
],
options={
u'verbose_name_plural': 'Entries',
},
bases=(models.Model,),
),
]
|
{
"content_hash": "9c3e7d870f7508e11fadb57cba8b2c69",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 116,
"avg_line_length": 36.82142857142857,
"alnum_prop": 0.565470417070805,
"repo_name": "ateoto/django-words",
"id": "5b63b7fa6f8ca7bc484640b246f3071677d06a7c",
"size": "1048",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "words/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2211"
},
{
"name": "Python",
"bytes": "27435"
}
],
"symlink_target": ""
}
|
__author__ = 'ShJashiashvili'
|
{
"content_hash": "0414816e9020a07279b44cd9c51c3480",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 29,
"avg_line_length": 30,
"alnum_prop": 0.6666666666666666,
"repo_name": "unixxxx/simplecms",
"id": "4a08984999a35251479c81ae4388e54593656264",
"size": "30",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "admin/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "832"
},
{
"name": "Python",
"bytes": "17926"
}
],
"symlink_target": ""
}
|
"""
******
Layout
******
Node positioning algorithms for graph drawing.
"""
# Copyright (C) 2004-2015 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import collections
import networkx as nx
__author__ = """Aric Hagberg (hagberg@lanl.gov)\nDan Schult(dschult@colgate.edu)"""
__all__ = ['circular_layout',
'random_layout',
'shell_layout',
'spring_layout',
'spectral_layout',
'fruchterman_reingold_layout']
def process_params(G, center, dim):
# Some boilerplate code.
import numpy as np
if not isinstance(G, nx.Graph):
empty_graph = nx.Graph()
empty_graph.add_nodes_from(G)
G = empty_graph
if center is None:
center = np.zeros(dim)
else:
center = np.asarray(center)
if len(center) != dim:
msg = "length of center coordinates must match dimension of layout"
raise ValueError(msg)
return G, center
def random_layout(G, dim=2, center=None):
"""Position nodes uniformly at random in the unit square.
For every node, a position is generated by choosing each of dim
coordinates uniformly at random on the interval [0.0, 1.0).
NumPy (http://scipy.org) is required for this function.
Parameters
----------
G : NetworkX graph or list of nodes
A position will be assigned to every node in G.
dim : int
Dimension of layout.
center : array-like or None
Coordinate pair around which to center the layout.
Returns
-------
pos : dict
A dictionary of positions keyed by node
Examples
--------
>>> G = nx.lollipop_graph(4, 3)
>>> pos = nx.random_layout(G)
"""
import numpy as np
G, center = process_params(G, center, dim)
shape = (len(G), dim)
pos = np.random.random(shape) + center
pos = pos.astype(np.float32)
pos = dict(zip(G, pos))
return pos
def circular_layout(G, dim=2, scale=1, center=None):
# dim=2 only
"""Position nodes on a circle.
Parameters
----------
G : NetworkX graph or list of nodes
dim : int
Dimension of layout, currently only dim=2 is supported
scale : float
Scale factor for positions
center : array-like or None
Coordinate pair around which to center the layout.
Returns
-------
dict :
A dictionary of positions keyed by node
Examples
--------
>>> G=nx.path_graph(4)
>>> pos=nx.circular_layout(G)
Notes
-----
This algorithm currently only works in two dimensions and does not
try to minimize edge crossings.
"""
import numpy as np
G, center = process_params(G, center, dim)
if len(G) == 0:
pos = {}
elif len(G) == 1:
pos = {G.nodes()[0]: center}
else:
# Discard the extra angle since it matches 0 radians.
theta = np.linspace(0, 1, len(G) + 1)[:-1] * 2 * np.pi
theta = theta.astype(np.float32)
pos = np.column_stack([np.cos(theta), np.sin(theta)])
pos = _rescale_layout(pos, scale=scale) + center
pos = dict(zip(G, pos))
return pos
def shell_layout(G, nlist=None, dim=2, scale=1, center=None):
"""Position nodes in concentric circles.
Parameters
----------
G : NetworkX graph or list of nodes
nlist : list of lists
List of node lists for each shell.
dim : int
Dimension of layout, currently only dim=2 is supported
scale : float
Scale factor for positions
center : array-like or None
Coordinate pair around which to center the layout.
Returns
-------
dict :
A dictionary of positions keyed by node
Examples
--------
>>> G = nx.path_graph(4)
>>> shells = [[0], [1,2,3]]
>>> pos = nx.shell_layout(G, shells)
Notes
-----
This algorithm currently only works in two dimensions and does not
try to minimize edge crossings.
"""
import numpy as np
G, center = process_params(G, center, dim)
if len(G) == 0:
return {}
elif len(G) == 1:
return {G.nodes()[0]: center}
if nlist is None:
# draw the whole graph in one shell
nlist = [list(G.nodes())]
if len(nlist[0]) == 1:
# single node at center
radius = 0.0
else:
# else start at r=1
radius = 1.0
npos={}
for nodes in nlist:
# Discard the extra angle since it matches 0 radians.
theta = np.linspace(0, 1, len(nodes) + 1)[:-1] * 2 * np.pi
theta = theta.astype(np.float32)
pos = np.column_stack([np.cos(theta), np.sin(theta)])
pos = _rescale_layout(pos, scale=scale * radius / len(nlist)) + center
npos.update(zip(nodes, pos))
radius += 1.0
return npos
def fruchterman_reingold_layout(G,dim=2,k=None,
pos=None,
fixed=None,
iterations=50,
weight='weight',
scale=1.0,
center=None):
"""Position nodes using Fruchterman-Reingold force-directed algorithm.
Parameters
----------
G : NetworkX graph or list of nodes
dim : int
Dimension of layout
k : float (default=None)
Optimal distance between nodes. If None the distance is set to
1/sqrt(n) where n is the number of nodes. Increase this value
to move nodes farther apart.
pos : dict or None optional (default=None)
Initial positions for nodes as a dictionary with node as keys
and values as a list or tuple. If None, then use random initial
positions.
fixed : list or None optional (default=None)
Nodes to keep fixed at initial position.
iterations : int optional (default=50)
Number of iterations of spring-force relaxation
weight : string or None optional (default='weight')
The edge attribute that holds the numerical value used for
the edge weight. If None, then all edge weights are 1.
scale : float (default=1.0)
Scale factor for positions. The nodes are positioned
in a box of size [0,scale] x [0,scale].
center : array-like or None
Coordinate pair around which to center the layout.
Returns
-------
dict :
A dictionary of positions keyed by node
Examples
--------
>>> G=nx.path_graph(4)
>>> pos=nx.spring_layout(G)
# The same using longer function name
>>> pos=nx.fruchterman_reingold_layout(G)
"""
import numpy as np
G, center = process_params(G, center, dim)
if fixed is not None:
nfixed = dict(zip(G, range(len(G))))
fixed = np.asarray([nfixed[v] for v in fixed])
if pos is not None:
# Determine size of existing domain to adjust initial positions
dom_size = max(flatten(pos.values()))
shape = (len(G), dim)
pos_arr = np.random.random(shape) * dom_size + center
for i,n in enumerate(G):
if n in pos:
pos_arr[i] = np.asarray(pos[n])
else:
pos_arr=None
if len(G) == 0:
return {}
if len(G) == 1:
return {G.nodes()[0]: center}
try:
# Sparse matrix
if len(G) < 500: # sparse solver for large graphs
raise ValueError
A = nx.to_scipy_sparse_matrix(G,weight=weight,dtype='f')
if k is None and fixed is not None:
# We must adjust k by domain size for layouts that are not near 1x1
nnodes,_ = A.shape
k = dom_size / np.sqrt(nnodes)
pos = _sparse_fruchterman_reingold(A, dim, k, pos_arr, fixed, iterations)
except:
A = nx.to_numpy_matrix(G,weight=weight)
if k is None and fixed is not None:
# We must adjust k by domain size for layouts that are not near 1x1
nnodes,_ = A.shape
k = dom_size / np.sqrt(nnodes)
pos = _fruchterman_reingold(A, dim, k, pos_arr, fixed, iterations)
if fixed is None:
pos = _rescale_layout(pos, scale=scale) + center
pos = dict(zip(G,pos))
return pos
spring_layout=fruchterman_reingold_layout
def _fruchterman_reingold(A, dim=2, k=None, pos=None, fixed=None,
iterations=50):
# Position nodes in adjacency matrix A using Fruchterman-Reingold
# Entry point for NetworkX graph is fruchterman_reingold_layout()
try:
import numpy as np
except ImportError:
raise ImportError("_fruchterman_reingold() requires numpy: http://scipy.org/ ")
try:
nnodes,_=A.shape
except AttributeError:
raise nx.NetworkXError(
"fruchterman_reingold() takes an adjacency matrix as input")
A=np.asarray(A) # make sure we have an array instead of a matrix
if pos is None:
# random initial positions
pos=np.asarray(np.random.random((nnodes,dim)),dtype=A.dtype)
else:
# make sure positions are of same type as matrix
pos=pos.astype(A.dtype)
# optimal distance between nodes
if k is None:
k=np.sqrt(1.0/nnodes)
# the initial "temperature" is about .1 of domain area (=1x1)
# this is the largest step allowed in the dynamics.
# We need to calculate this in case our fixed positions force our domain
# to be much bigger than 1x1
t = max(max(pos.T[0]) - min(pos.T[0]), max(pos.T[1]) - min(pos.T[1]))*0.1
# simple cooling scheme.
# linearly step down by dt on each iteration so last iteration is size dt.
dt=t/float(iterations+1)
delta = np.zeros((pos.shape[0],pos.shape[0],pos.shape[1]),dtype=A.dtype)
# the inscrutable (but fast) version
# this is still O(V^2)
# could use multilevel methods to speed this up significantly
for iteration in range(iterations):
# matrix of difference between points
for i in range(pos.shape[1]):
delta[:,:,i]= pos[:,i,None]-pos[:,i]
# distance between points
distance=np.sqrt((delta**2).sum(axis=-1))
# enforce minimum distance of 0.01
distance=np.where(distance<0.01,0.01,distance)
# displacement "force"
displacement=np.transpose(np.transpose(delta)*\
(k*k/distance**2-A*distance/k))\
.sum(axis=1)
# update positions
length=np.sqrt((displacement**2).sum(axis=1))
length=np.where(length<0.01,0.1,length)
delta_pos=np.transpose(np.transpose(displacement)*t/length)
if fixed is not None:
# don't change positions of fixed nodes
delta_pos[fixed]=0.0
pos+=delta_pos
# cool temperature
t-=dt
return pos
def _sparse_fruchterman_reingold(A, dim=2, k=None, pos=None, fixed=None,
iterations=50):
# Position nodes in adjacency matrix A using Fruchterman-Reingold
# Entry point for NetworkX graph is fruchterman_reingold_layout()
# Sparse version
try:
import numpy as np
except ImportError:
raise ImportError("_sparse_fruchterman_reingold() requires numpy: http://scipy.org/ ")
try:
nnodes,_=A.shape
except AttributeError:
raise nx.NetworkXError(
"fruchterman_reingold() takes an adjacency matrix as input")
try:
from scipy.sparse import spdiags,coo_matrix
except ImportError:
raise ImportError("_sparse_fruchterman_reingold() scipy numpy: http://scipy.org/ ")
# make sure we have a LIst of Lists representation
try:
A=A.tolil()
except:
A=(coo_matrix(A)).tolil()
if pos is None:
# random initial positions
pos=np.asarray(np.random.random((nnodes,dim)),dtype=A.dtype)
else:
# make sure positions are of same type as matrix
pos=pos.astype(A.dtype)
# no fixed nodes
if fixed is None:
fixed=[]
# optimal distance between nodes
if k is None:
k=np.sqrt(1.0/nnodes)
# the initial "temperature" is about .1 of domain area (=1x1)
# this is the largest step allowed in the dynamics.
t=0.1
# simple cooling scheme.
# linearly step down by dt on each iteration so last iteration is size dt.
dt=t/float(iterations+1)
displacement=np.zeros((dim,nnodes))
for iteration in range(iterations):
displacement*=0
# loop over rows
for i in range(A.shape[0]):
if i in fixed:
continue
# difference between this row's node position and all others
delta=(pos[i]-pos).T
# distance between points
distance=np.sqrt((delta**2).sum(axis=0))
# enforce minimum distance of 0.01
distance=np.where(distance<0.01,0.01,distance)
# the adjacency matrix row
Ai=np.asarray(A.getrowview(i).toarray())
# displacement "force"
displacement[:,i]+=\
(delta*(k*k/distance**2-Ai*distance/k)).sum(axis=1)
# update positions
length=np.sqrt((displacement**2).sum(axis=0))
length=np.where(length<0.01,0.1,length)
pos+=(displacement*t/length).T
# cool temperature
t-=dt
return pos
def spectral_layout(G, dim=2, weight='weight', scale=1, center=None):
"""Position nodes using the eigenvectors of the graph Laplacian.
Parameters
----------
G : NetworkX graph or list of nodes
dim : int
Dimension of layout
weight : string or None optional (default='weight')
The edge attribute that holds the numerical value used for
the edge weight. If None, then all edge weights are 1.
scale : float
Scale factor for positions
center : array-like or None
Coordinate pair around which to center the layout.
Returns
-------
dict :
A dictionary of positions keyed by node
Examples
--------
>>> G=nx.path_graph(4)
>>> pos=nx.spectral_layout(G)
Notes
-----
Directed graphs will be considered as undirected graphs when
positioning the nodes.
For larger graphs (>500 nodes) this will use the SciPy sparse
eigenvalue solver (ARPACK).
"""
# handle some special cases that break the eigensolvers
import numpy as np
G, center = process_params(G, center, dim)
if len(G) <= 2:
if len(G) == 0:
pos = np.array([])
elif len(G) == 1:
pos = np.array([center])
else:
pos = np.array([np.zeros(dim), np.array(center)*2.0])
return dict(zip(G,pos))
try:
# Sparse matrix
if len(G)< 500: # dense solver is faster for small graphs
raise ValueError
A = nx.to_scipy_sparse_matrix(G, weight=weight, dtype='d')
# Symmetrize directed graphs
if G.is_directed():
A = A + np.transpose(A)
pos = _sparse_spectral(A,dim)
except (ImportError, ValueError):
# Dense matrix
A = nx.to_numpy_matrix(G, weight=weight)
# Symmetrize directed graphs
if G.is_directed():
A = A + np.transpose(A)
pos = _spectral(A, dim)
pos = _rescale_layout(pos, scale) + center
pos = dict(zip(G,pos))
return pos
def _spectral(A, dim=2):
# Input adjacency matrix A
# Uses dense eigenvalue solver from numpy
try:
import numpy as np
except ImportError:
raise ImportError("spectral_layout() requires numpy: http://scipy.org/ ")
try:
nnodes,_=A.shape
except AttributeError:
raise nx.NetworkXError(\
"spectral() takes an adjacency matrix as input")
# form Laplacian matrix
# make sure we have an array instead of a matrix
A=np.asarray(A)
I=np.identity(nnodes,dtype=A.dtype)
D=I*np.sum(A,axis=1) # diagonal of degrees
L=D-A
eigenvalues,eigenvectors=np.linalg.eig(L)
# sort and keep smallest nonzero
index=np.argsort(eigenvalues)[1:dim+1] # 0 index is zero eigenvalue
return np.real(eigenvectors[:,index])
def _sparse_spectral(A,dim=2):
# Input adjacency matrix A
# Uses sparse eigenvalue solver from scipy
# Could use multilevel methods here, see Koren "On spectral graph drawing"
try:
import numpy as np
from scipy.sparse import spdiags
except ImportError:
raise ImportError("_sparse_spectral() requires scipy & numpy: http://scipy.org/ ")
try:
from scipy.sparse.linalg.eigen import eigsh
except ImportError:
# scipy <0.9.0 names eigsh differently
from scipy.sparse.linalg import eigen_symmetric as eigsh
try:
nnodes,_=A.shape
except AttributeError:
raise nx.NetworkXError(\
"sparse_spectral() takes an adjacency matrix as input")
# form Laplacian matrix
data=np.asarray(A.sum(axis=1).T)
D=spdiags(data,0,nnodes,nnodes)
L=D-A
k=dim+1
# number of Lanczos vectors for ARPACK solver.What is the right scaling?
ncv=max(2*k+1,int(np.sqrt(nnodes)))
# return smallest k eigenvalues and eigenvectors
eigenvalues,eigenvectors=eigsh(L,k,which='SM',ncv=ncv)
index=np.argsort(eigenvalues)[1:k] # 0 index is zero eigenvalue
return np.real(eigenvectors[:,index])
def _rescale_layout(pos,scale=1):
# rescale to (-scale,scale) in all axes
# shift origin to (0,0)
lim=0 # max coordinate for all axes
for i in range(pos.shape[1]):
pos[:,i]-=pos[:,i].mean()
lim=max(pos[:,i].max(),lim)
# rescale to (-scale,scale) in all directions, preserves aspect
for i in range(pos.shape[1]):
pos[:,i]*=scale/lim
return pos
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import numpy
except:
raise SkipTest("NumPy not available")
try:
import scipy
except:
raise SkipTest("SciPy not available")
def flatten(l):
try:
bs = basestring
except NameError:
# Py3k
bs = str
for el in l:
if isinstance(el, collections.Iterable) and not isinstance(el, bs):
for sub in flatten(el):
yield sub
else:
yield el
|
{
"content_hash": "093e8d2c482c17de41182d2eb53d96d1",
"timestamp": "",
"source": "github",
"line_count": 619,
"max_line_length": 94,
"avg_line_length": 29.7156704361874,
"alnum_prop": 0.5978036316190062,
"repo_name": "OrkoHunter/networkx",
"id": "9bbc6f98e3a542560c1c4c5d6d0df4de7601fc87",
"size": "18394",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "networkx/drawing/layout.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1786"
},
{
"name": "PowerShell",
"bytes": "3311"
},
{
"name": "Python",
"bytes": "3089811"
}
],
"symlink_target": ""
}
|
from models.usuario import Usuario
from config import Config
class Tesis:
tabla = "tesis"
def __init__(self, id, nombre, jurado_id=0, director_id=0):
self.id = id
self.nombre = nombre
self.jurado = Usuario.getById(jurado_id)
self.director = Usuario.getById(director_id)
@staticmethod
def create(nombre):
query = " INSERT INTO %s (id, nombre) VALUES (sequence_tesis.nextval, '%s')" % (Tesis.tabla, str(nombre))
cursor = Config.getCursor()
try:
cursor.execute(query)
cursor.execute("select sequence_tesis.currval from DUAL")
except Exception, e:
print e
print "No es posible guardar objeto"
id = cursor.fetchone()
return Tesis(id[0],nombre)
def setDirector(self, director):
query = "UPDATE %s SET director_id=%s WHERE id=%s" % (Tesis.tabla, str(director.id), str(self.id))
cursor = Config.getCursor()
try:
cursor.execute(query)
return True
except Exception, e:
print e
return False
def setJurado(self, jurado):
query = "UPDATE %s SET jurado_id=%s WHERE id=%s" % (Tesis.tabla, str(jurado.id), str(self.id))
cursor = Config.getCursor()
try:
cursor.execute(query)
return True
except Exception, e:
print e
return False
def getJurado(self):
return None
def getDirector(self):
return None
@staticmethod
def getById(id):
query = "SELECT * FROM %s WHERE id=%s" (Tesis.tabla, id)
cursor = Config.getCursor()
try:
cursor.execute(query)
row = cursor.fetchone()
except Exception, e:
print e
return None
return Tesis(row[0], row[3], row[1], row[2])
|
{
"content_hash": "3c9bec75da4e2160019a904e2848e0a1",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 113,
"avg_line_length": 29.515625,
"alnum_prop": 0.5579671784012705,
"repo_name": "carojasq/Evaluaciones-bases-de-datos-2",
"id": "2d2da44b971bf4a3cb16078b76bb205a2f0c32c7",
"size": "1889",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models/tesis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5049"
},
{
"name": "HTML",
"bytes": "13584"
},
{
"name": "JavaScript",
"bytes": "196"
},
{
"name": "Python",
"bytes": "51582"
}
],
"symlink_target": ""
}
|
"""
This module provides a library of Visual classes, which are drawable objects
intended to encapsulate simple graphic objects such as lines, meshes, points,
2D shapes, images, text, etc.
These classes define only the OpenGL machinery and connot be used directly in
a scenegraph. For scenegraph use, see the complementary Visual+Node classes
defined in vispy.scene.
"""
from .axis import AxisVisual # noqa
from .box import BoxVisual # noqa
from .cube import CubeVisual # noqa
from .ellipse import EllipseVisual # noqa
from .gridlines import GridLinesVisual # noqa
from .image import ImageVisual # noqa
from .gridmesh import GridMeshVisual # noqa
from .histogram import HistogramVisual # noqa
from .infinite_line import InfiniteLineVisual # noqa
from .isocurve import IsocurveVisual # noqa
from .isoline import IsolineVisual # noqa
from .isosurface import IsosurfaceVisual # noqa
from .line import LineVisual, ArrowVisual # noqa
from .linear_region import LinearRegionVisual # noqa
from .line_plot import LinePlotVisual # noqa
from .markers import MarkersVisual, marker_types # noqa
from .mesh import MeshVisual # noqa
from .plane import PlaneVisual # noqa
from .polygon import PolygonVisual # noqa
from .rectangle import RectangleVisual # noqa
from .regular_polygon import RegularPolygonVisual # noqa
from .scrolling_lines import ScrollingLinesVisual # noqa
from .spectrogram import SpectrogramVisual # noqa
from .sphere import SphereVisual # noqa
from .surface_plot import SurfacePlotVisual # noqa
from .text import TextVisual # noqa
from .tube import TubeVisual # noqa
from .visual import BaseVisual, Visual, CompoundVisual # noqa
from .volume import VolumeVisual # noqa
from .xyz_axis import XYZAxisVisual # noqa
from .border import _BorderVisual # noqa
from .colorbar import ColorBarVisual # noqa
from .graphs import GraphVisual # noqa
from .windbarb import WindbarbVisual # noqa
|
{
"content_hash": "d63c029d512d064b3c02314a572a5623",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 77,
"avg_line_length": 43.65909090909091,
"alnum_prop": 0.7917751171264966,
"repo_name": "Eric89GXL/vispy",
"id": "f58334292b1df94feb4843b57294da51c148cbe8",
"size": "2080",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vispy/visuals/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "143081"
},
{
"name": "GLSL",
"bytes": "195460"
},
{
"name": "JavaScript",
"bytes": "5007"
},
{
"name": "Makefile",
"bytes": "1638"
},
{
"name": "PowerShell",
"bytes": "4078"
},
{
"name": "Python",
"bytes": "2461885"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class OAuth2PermissionGrant(Model):
"""OAuth2PermissionGrant.
:param odatatype: Microsoft.DirectoryServices.OAuth2PermissionGrant
:type odatatype: str
:param client_id: The id of the resource's service principal granted
consent to impersonate the user when accessing the resource (represented
by the resourceId property).
:type client_id: str
:param object_id: The id of the permission grant
:type object_id: str
:param consent_type: Indicates if consent was provided by the
administrator (on behalf of the organization) or by an individual.
Possible values include: 'AllPrincipals', 'Principal'
:type consent_type: str or ~azure.graphrbac.models.ConsentType
:param principal_id: When consent type is Principal, this property
specifies the id of the user that granted consent and applies only for
that user.
:type principal_id: str
:param resource_id: Object Id of the resource you want to grant
:type resource_id: str
:param scope: Specifies the value of the scope claim that the resource
application should expect in the OAuth 2.0 access token. For example,
User.Read
:type scope: str
:param start_time: Start time for TTL
:type start_time: str
:param expiry_time: Expiry time for TTL
:type expiry_time: str
"""
_attribute_map = {
'odatatype': {'key': 'odata\\.type', 'type': 'str'},
'client_id': {'key': 'clientId', 'type': 'str'},
'object_id': {'key': 'objectId', 'type': 'str'},
'consent_type': {'key': 'consentType', 'type': 'str'},
'principal_id': {'key': 'principalId', 'type': 'str'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'scope': {'key': 'scope', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'str'},
'expiry_time': {'key': 'expiryTime', 'type': 'str'},
}
def __init__(self, *, odatatype: str=None, client_id: str=None, object_id: str=None, consent_type=None, principal_id: str=None, resource_id: str=None, scope: str=None, start_time: str=None, expiry_time: str=None, **kwargs) -> None:
super(OAuth2PermissionGrant, self).__init__(**kwargs)
self.odatatype = odatatype
self.client_id = client_id
self.object_id = object_id
self.consent_type = consent_type
self.principal_id = principal_id
self.resource_id = resource_id
self.scope = scope
self.start_time = start_time
self.expiry_time = expiry_time
|
{
"content_hash": "22131ab6e5638a78e184069692e3e59c",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 235,
"avg_line_length": 45.01754385964912,
"alnum_prop": 0.6484801247077163,
"repo_name": "Azure/azure-sdk-for-python",
"id": "cb745c9195ffec0312a3468c31518fa5ccbcc3cb",
"size": "3040",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/graphrbac/azure-graphrbac/azure/graphrbac/models/oauth2_permission_grant_py3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
"""
Filters that accept a `CommandLineInterface` as argument.
"""
from __future__ import unicode_literals
from .base import Filter
__all__ = (
'HasArg',
'HasCompletions',
'HasFocus',
'InFocusStack',
'HasSearch',
'HasSelection',
'HasValidationError',
'IsAborting',
'IsDone',
'IsMultiline',
'IsReadOnly',
'IsReturning',
'RendererHeightIsKnown',
)
class HasFocus(Filter):
"""
Enable when this buffer has the focus.
"""
def __init__(self, buffer_name):
self.buffer_name = buffer_name
def __call__(self, cli):
return cli.focus_stack.current == self.buffer_name
def __repr__(self):
return 'HasFocus(%r)' % self.buffer_name
class InFocusStack(Filter):
"""
Enable when this buffer appears on the focus stack.
"""
def __init__(self, buffer_name):
self.buffer_name = buffer_name
def __call__(self, cli):
return self.buffer_name in cli.focus_stack
def __repr__(self):
return 'InFocusStack(%r)' % self.buffer_name
class HasSelection(Filter):
"""
Enable when the current buffer has a selection.
"""
def __call__(self, cli):
return bool(cli.current_buffer.selection_state)
def __repr__(self):
return 'HasSelection()'
class HasCompletions(Filter):
"""
Enable when the current buffer has completions.
"""
def __call__(self, cli):
return cli.current_buffer.complete_state is not None
def __repr__(self):
return 'HasCompletions()'
class IsMultiline(Filter):
"""
Enable in multiline mode.
"""
def __call__(self, cli):
return cli.current_buffer.is_multiline()
def __repr__(self):
return 'IsMultiline()'
class IsReadOnly(Filter):
"""
True when the current buffer is read only.
"""
def __call__(self, cli):
return cli.current_buffer.read_only()
def __repr__(self):
return 'IsReadOnly()'
class HasValidationError(Filter):
"""
Current buffer has validation error.
"""
def __call__(self, cli):
return cli.current_buffer.validation_error is not None
def __repr__(self):
return 'HasValidationError()'
class HasArg(Filter):
"""
Enable when the input processor has an 'arg'.
"""
def __call__(self, cli):
return cli.input_processor.arg is not None
def __repr__(self):
return 'HasArg()'
class HasSearch(Filter):
"""
Incremental search is active.
"""
def __call__(self, cli):
return cli.is_searching
def __repr__(self):
return 'HasSearch()'
class IsReturning(Filter):
"""
When a return value has been set.
"""
def __call__(self, cli):
return cli.is_returning
def __repr__(self):
return 'IsReturning()'
class IsAborting(Filter):
"""
True when aborting. (E.g. Control-C pressed.)
"""
def __call__(self, cli):
return cli.is_aborting
def __repr__(self):
return 'IsAborting()'
class IsExiting(Filter):
"""
True when exiting. (E.g. Control-D pressed.)
"""
def __call__(self, cli):
return cli.is_exiting
def __repr__(self):
return 'IsExiting()'
class IsDone(Filter):
"""
True when the CLI is returning, aborting or exiting.
"""
def __call__(self, cli):
return cli.is_done
def __repr__(self):
return 'IsDone()'
class RendererHeightIsKnown(Filter):
"""
Only True when the renderer knows it's real height.
(On VT100 terminals, we have to wait for a CPR response, before we can be
sure of the available height between the cursor position and the bottom of
the terminal. And usually it's nicer to wait with drawing bottom toolbars
until we receive the height, in order to avoid flickering -- first drawing
somewhere in the middle, and then again at the bottom.)
"""
def __call__(self, cli):
return cli.renderer.height_is_known
def __repr__(self):
return 'RendererHeightIsKnown()'
|
{
"content_hash": "b6d6d1a6b9a3e7353a16bb5c827cce47",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 78,
"avg_line_length": 21.828877005347593,
"alnum_prop": 0.5999510044096031,
"repo_name": "amjith/python-prompt-toolkit",
"id": "f542be817ab3e073833d3abe590176aee764e7b5",
"size": "4082",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "prompt_toolkit/filters/cli.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "594539"
}
],
"symlink_target": ""
}
|
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-Wno-long-long',
'-Wno-variadic-macros',
'-DNDEBUG',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=c++11',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c++',
'-Isrc',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
|
{
"content_hash": "ec5915734bb1d3d90bc9c0a5b20af1c8",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 79,
"avg_line_length": 32.62809917355372,
"alnum_prop": 0.6859169199594731,
"repo_name": "roman-kashitsyn/libdocset",
"id": "04e044718f381e122fbe8c6b507463dc447d10d9",
"size": "3948",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": ".ycm_extra_conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "39113"
},
{
"name": "C++",
"bytes": "7875"
},
{
"name": "Python",
"bytes": "3948"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from st2tests.mocks.sensor import MockSensorWrapper
from st2tests.mocks.sensor import MockSensorService
from st2tests.pack_resource import BasePackResourceTestCase
__all__ = ["BaseSensorTestCase"]
class BaseSensorTestCase(BasePackResourceTestCase):
"""
Base class for sensor tests.
This class provides some utility methods for verifying that a trigger has
been dispatched, etc.
"""
sensor_cls = None
def setUp(self):
super(BaseSensorTestCase, self).setUp()
class_name = self.sensor_cls.__name__
sensor_wrapper = MockSensorWrapper(pack="tests", class_name=class_name)
self.sensor_service = MockSensorService(sensor_wrapper=sensor_wrapper)
def get_sensor_instance(self, config=None, poll_interval=None):
"""
Retrieve instance of the sensor class.
"""
kwargs = {"sensor_service": self.sensor_service}
if config:
kwargs["config"] = config
if poll_interval is not None:
kwargs["poll_interval"] = poll_interval
instance = self.sensor_cls(**kwargs) # pylint: disable=not-callable
return instance
def get_dispatched_triggers(self):
return self.sensor_service.dispatched_triggers
def get_last_dispatched_trigger(self):
return self.sensor_service.dispatched_triggers[-1]
def assertTriggerDispatched(self, trigger, payload=None, trace_context=None):
"""
Assert that the trigger with the provided values has been dispatched.
:param trigger: Name of the trigger.
:type trigger: ``str``
:param paylod: Trigger payload (optional). If not provided, only trigger name is matched.
type: payload: ``object``
:param trace_context: Trigger trace context (optional). If not provided, only trigger name
is matched.
type: payload: ``object``
"""
dispatched_triggers = self.get_dispatched_triggers()
for item in dispatched_triggers:
trigger_matches = item["trigger"] == trigger
if payload:
payload_matches = item["payload"] == payload
else:
payload_matches = True
if trace_context:
trace_context_matches = item["trace_context"] == trace_context
else:
trace_context_matches = True
if trigger_matches and payload_matches and trace_context_matches:
return True
msg = 'Trigger "%s" hasn\'t been dispatched' % (trigger)
raise AssertionError(msg)
|
{
"content_hash": "2279b374ebea426d6272e0e9cbf9ee47",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 98,
"avg_line_length": 33.45569620253165,
"alnum_prop": 0.6341278849791903,
"repo_name": "nzlosh/st2",
"id": "52c0451f45fb3a0b409d0d7264e02e6398292537",
"size": "3271",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "st2tests/st2tests/sensors.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "JavaScript",
"bytes": "444"
},
{
"name": "Jinja",
"bytes": "174532"
},
{
"name": "Makefile",
"bytes": "75242"
},
{
"name": "PowerShell",
"bytes": "856"
},
{
"name": "Python",
"bytes": "6453910"
},
{
"name": "Shell",
"bytes": "93607"
},
{
"name": "Starlark",
"bytes": "7236"
}
],
"symlink_target": ""
}
|
"""Subprocress execution
This module holds a subclass of subprocess.Popen with our own required
features, mainly that we get access to the subprocess output while it
is running rather than just at the end. This makes it easiler to show
progress information and filter output in real time.
"""
import errno
import os
import pty
import select
import subprocess
import sys
import unittest
# Import these here so the caller does not need to import subprocess also.
PIPE = subprocess.PIPE
STDOUT = subprocess.STDOUT
PIPE_PTY = -3 # Pipe output through a pty
stay_alive = True
class Popen(subprocess.Popen):
"""Like subprocess.Popen with ptys and incremental output
This class deals with running a child process and filtering its output on
both stdout and stderr while it is running. We do this so we can monitor
progress, and possibly relay the output to the user if requested.
The class is similar to subprocess.Popen, the equivalent is something like:
Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
But this class has many fewer features, and two enhancement:
1. Rather than getting the output data only at the end, this class sends it
to a provided operation as it arrives.
2. We use pseudo terminals so that the child will hopefully flush its output
to us as soon as it is produced, rather than waiting for the end of a
line.
Use CommunicateFilter() to handle output from the subprocess.
"""
def __init__(self, args, stdin=None, stdout=PIPE_PTY, stderr=PIPE_PTY,
shell=False, cwd=None, env=None, **kwargs):
"""Cut-down constructor
Args:
args: Program and arguments for subprocess to execute.
stdin: See subprocess.Popen()
stdout: See subprocess.Popen(), except that we support the sentinel
value of cros_subprocess.PIPE_PTY.
stderr: See subprocess.Popen(), except that we support the sentinel
value of cros_subprocess.PIPE_PTY.
shell: See subprocess.Popen()
cwd: Working directory to change to for subprocess, or None if none.
env: Environment to use for this subprocess, or None to inherit parent.
kwargs: No other arguments are supported at the moment. Passing other
arguments will cause a ValueError to be raised.
"""
stdout_pty = None
stderr_pty = None
if stdout == PIPE_PTY:
stdout_pty = pty.openpty()
stdout = os.fdopen(stdout_pty[1])
if stderr == PIPE_PTY:
stderr_pty = pty.openpty()
stderr = os.fdopen(stderr_pty[1])
super(Popen, self).__init__(args, stdin=stdin,
stdout=stdout, stderr=stderr, shell=shell, cwd=cwd, env=env,
**kwargs)
# If we're on a PTY, we passed the slave half of the PTY to the subprocess.
# We want to use the master half on our end from now on. Setting this here
# does make some assumptions about the implementation of subprocess, but
# those assumptions are pretty minor.
# Note that if stderr is STDOUT, then self.stderr will be set to None by
# this constructor.
if stdout_pty is not None:
self.stdout = os.fdopen(stdout_pty[0])
if stderr_pty is not None:
self.stderr = os.fdopen(stderr_pty[0])
# Insist that unit tests exist for other arguments we don't support.
if kwargs:
raise ValueError("Unit tests do not test extra args - please add tests")
def CommunicateFilter(self, output):
"""Interact with process: Read data from stdout and stderr.
This method runs until end-of-file is reached, then waits for the
subprocess to terminate.
The output function is sent all output from the subprocess and must be
defined like this:
def Output([self,] stream, data)
Args:
stream: the stream the output was received on, which will be
sys.stdout or sys.stderr.
data: a string containing the data
Note: The data read is buffered in memory, so do not use this
method if the data size is large or unlimited.
Args:
output: Function to call with each fragment of output.
Returns:
A tuple (stdout, stderr, combined) which is the data received on
stdout, stderr and the combined data (interleaved stdout and stderr).
Note that the interleaved output will only be sensible if you have
set both stdout and stderr to PIPE or PIPE_PTY. Even then it depends on
the timing of the output in the subprocess. If a subprocess flips
between stdout and stderr quickly in succession, by the time we come to
read the output from each we may see several lines in each, and will read
all the stdout lines, then all the stderr lines. So the interleaving
may not be correct. In this case you might want to pass
stderr=cros_subprocess.STDOUT to the constructor.
This feature is still useful for subprocesses where stderr is
rarely used and indicates an error.
Note also that if you set stderr to STDOUT, then stderr will be empty
and the combined output will just be the same as stdout.
"""
read_set = []
write_set = []
stdout = None # Return
stderr = None # Return
if self.stdin:
# Flush stdio buffer. This might block, if the user has
# been writing to .stdin in an uncontrolled fashion.
self.stdin.flush()
if input:
write_set.append(self.stdin)
else:
self.stdin.close()
if self.stdout:
read_set.append(self.stdout)
stdout = []
if self.stderr and self.stderr != self.stdout:
read_set.append(self.stderr)
stderr = []
combined = []
input_offset = 0
while read_set or write_set:
try:
rlist, wlist, _ = select.select(read_set, write_set, [], 0.2)
except select.error, e:
if e.args[0] == errno.EINTR:
continue
raise
if not stay_alive:
self.terminate()
if self.stdin in wlist:
# When select has indicated that the file is writable,
# we can write up to PIPE_BUF bytes without risk
# blocking. POSIX defines PIPE_BUF >= 512
chunk = input[input_offset : input_offset + 512]
bytes_written = os.write(self.stdin.fileno(), chunk)
input_offset += bytes_written
if input_offset >= len(input):
self.stdin.close()
write_set.remove(self.stdin)
if self.stdout in rlist:
data = ""
# We will get an error on read if the pty is closed
try:
data = os.read(self.stdout.fileno(), 1024)
except OSError:
pass
if data == "":
self.stdout.close()
read_set.remove(self.stdout)
else:
stdout.append(data)
combined.append(data)
if output:
output(sys.stdout, data)
if self.stderr in rlist:
data = ""
# We will get an error on read if the pty is closed
try:
data = os.read(self.stderr.fileno(), 1024)
except OSError:
pass
if data == "":
self.stderr.close()
read_set.remove(self.stderr)
else:
stderr.append(data)
combined.append(data)
if output:
output(sys.stderr, data)
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = ''.join(stdout)
else:
stdout = ''
if stderr is not None:
stderr = ''.join(stderr)
else:
stderr = ''
combined = ''.join(combined)
# Translate newlines, if requested. We cannot let the file
# object do the translation: It is based on stdio, which is
# impossible to combine with select (unless forcing no
# buffering).
if self.universal_newlines and hasattr(file, 'newlines'):
if stdout:
stdout = self._translate_newlines(stdout)
if stderr:
stderr = self._translate_newlines(stderr)
self.wait()
return (stdout, stderr, combined)
# Just being a unittest.TestCase gives us 14 public methods. Unless we
# disable this, we can only have 6 tests in a TestCase. That's not enough.
#
# pylint: disable=R0904
class TestSubprocess(unittest.TestCase):
"""Our simple unit test for this module"""
class MyOperation:
"""Provides a operation that we can pass to Popen"""
def __init__(self, input_to_send=None):
"""Constructor to set up the operation and possible input.
Args:
input_to_send: a text string to send when we first get input. We will
add \r\n to the string.
"""
self.stdout_data = ''
self.stderr_data = ''
self.combined_data = ''
self.stdin_pipe = None
self._input_to_send = input_to_send
if input_to_send:
pipe = os.pipe()
self.stdin_read_pipe = pipe[0]
self._stdin_write_pipe = os.fdopen(pipe[1], 'w')
def Output(self, stream, data):
"""Output handler for Popen. Stores the data for later comparison"""
if stream == sys.stdout:
self.stdout_data += data
if stream == sys.stderr:
self.stderr_data += data
self.combined_data += data
# Output the input string if we have one.
if self._input_to_send:
self._stdin_write_pipe.write(self._input_to_send + '\r\n')
self._stdin_write_pipe.flush()
def _BasicCheck(self, plist, oper):
"""Basic checks that the output looks sane."""
self.assertEqual(plist[0], oper.stdout_data)
self.assertEqual(plist[1], oper.stderr_data)
self.assertEqual(plist[2], oper.combined_data)
# The total length of stdout and stderr should equal the combined length
self.assertEqual(len(plist[0]) + len(plist[1]), len(plist[2]))
def test_simple(self):
"""Simple redirection: Get process list"""
oper = TestSubprocess.MyOperation()
plist = Popen(['ps']).CommunicateFilter(oper.Output)
self._BasicCheck(plist, oper)
def test_stderr(self):
"""Check stdout and stderr"""
oper = TestSubprocess.MyOperation()
cmd = 'echo fred >/dev/stderr && false || echo bad'
plist = Popen([cmd], shell=True).CommunicateFilter(oper.Output)
self._BasicCheck(plist, oper)
self.assertEqual(plist [0], 'bad\r\n')
self.assertEqual(plist [1], 'fred\r\n')
def test_shell(self):
"""Check with and without shell works"""
oper = TestSubprocess.MyOperation()
cmd = 'echo test >/dev/stderr'
self.assertRaises(OSError, Popen, [cmd], shell=False)
plist = Popen([cmd], shell=True).CommunicateFilter(oper.Output)
self._BasicCheck(plist, oper)
self.assertEqual(len(plist [0]), 0)
self.assertEqual(plist [1], 'test\r\n')
def test_list_args(self):
"""Check with and without shell works using list arguments"""
oper = TestSubprocess.MyOperation()
cmd = ['echo', 'test', '>/dev/stderr']
plist = Popen(cmd, shell=False).CommunicateFilter(oper.Output)
self._BasicCheck(plist, oper)
self.assertEqual(plist [0], ' '.join(cmd[1:]) + '\r\n')
self.assertEqual(len(plist [1]), 0)
oper = TestSubprocess.MyOperation()
# this should be interpreted as 'echo' with the other args dropped
cmd = ['echo', 'test', '>/dev/stderr']
plist = Popen(cmd, shell=True).CommunicateFilter(oper.Output)
self._BasicCheck(plist, oper)
self.assertEqual(plist [0], '\r\n')
def test_cwd(self):
"""Check we can change directory"""
for shell in (False, True):
oper = TestSubprocess.MyOperation()
plist = Popen('pwd', shell=shell, cwd='/tmp').CommunicateFilter(oper.Output)
self._BasicCheck(plist, oper)
self.assertEqual(plist [0], '/tmp\r\n')
def test_env(self):
"""Check we can change environment"""
for add in (False, True):
oper = TestSubprocess.MyOperation()
env = os.environ
if add:
env ['FRED'] = 'fred'
cmd = 'echo $FRED'
plist = Popen(cmd, shell=True, env=env).CommunicateFilter(oper.Output)
self._BasicCheck(plist, oper)
self.assertEqual(plist [0], add and 'fred\r\n' or '\r\n')
def test_extra_args(self):
"""Check we can't add extra arguments"""
self.assertRaises(ValueError, Popen, 'true', close_fds=False)
def test_basic_input(self):
"""Check that incremental input works
We set up a subprocess which will prompt for name. When we see this prompt
we send the name as input to the process. It should then print the name
properly to stdout.
"""
oper = TestSubprocess.MyOperation('Flash')
prompt = 'What is your name?: '
cmd = 'echo -n "%s"; read name; echo Hello $name' % prompt
plist = Popen([cmd], stdin=oper.stdin_read_pipe,
shell=True).CommunicateFilter(oper.Output)
self._BasicCheck(plist, oper)
self.assertEqual(len(plist [1]), 0)
self.assertEqual(plist [0], prompt + 'Hello Flash\r\r\n')
def test_isatty(self):
"""Check that ptys appear as terminals to the subprocess"""
oper = TestSubprocess.MyOperation()
cmd = ('if [ -t %d ]; then echo "terminal %d" >&%d; '
'else echo "not %d" >&%d; fi;')
both_cmds = ''
for fd in (1, 2):
both_cmds += cmd % (fd, fd, fd, fd, fd)
plist = Popen(both_cmds, shell=True).CommunicateFilter(oper.Output)
self._BasicCheck(plist, oper)
self.assertEqual(plist [0], 'terminal 1\r\n')
self.assertEqual(plist [1], 'terminal 2\r\n')
# Now try with PIPE and make sure it is not a terminal
oper = TestSubprocess.MyOperation()
plist = Popen(both_cmds, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True).CommunicateFilter(oper.Output)
self._BasicCheck(plist, oper)
self.assertEqual(plist [0], 'not 1\n')
self.assertEqual(plist [1], 'not 2\n')
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "bcbf15b77234aa37a8f899026436600e",
"timestamp": "",
"source": "github",
"line_count": 389,
"max_line_length": 88,
"avg_line_length": 39.552699228791774,
"alnum_prop": 0.579682828545431,
"repo_name": "Mogran/u-boot-2016.09-FS2416",
"id": "0fc4a06b5066af43b4207e58d4c9ec33d3e43044",
"size": "15717",
"binary": false,
"copies": "88",
"ref": "refs/heads/master",
"path": "tools/patman/cros_subprocess.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "223006"
},
{
"name": "Awk",
"bytes": "205"
},
{
"name": "Batchfile",
"bytes": "3387"
},
{
"name": "C",
"bytes": "31578117"
},
{
"name": "C++",
"bytes": "2361003"
},
{
"name": "CSS",
"bytes": "1316"
},
{
"name": "Lex",
"bytes": "6684"
},
{
"name": "Makefile",
"bytes": "199005"
},
{
"name": "Objective-C",
"bytes": "49575"
},
{
"name": "Perl",
"bytes": "287598"
},
{
"name": "Python",
"bytes": "148457"
},
{
"name": "Shell",
"bytes": "46384"
},
{
"name": "Yacc",
"bytes": "15395"
}
],
"symlink_target": ""
}
|
from cherrycommon.excel import XLS
import unittest
class ExcelTest(unittest.TestCase):
def test_cell_names(self):
self.assertEqual(XLS.get_cell_name(0, 0), 'A1')
self.assertEqual(XLS.get_cell_name(25, 0), 'Z1')
self.assertEqual(XLS.get_cell_name(26, 9), 'AA10')
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "d1692fad0d978009a44039920d732203",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 58,
"avg_line_length": 26.23076923076923,
"alnum_prop": 0.6422287390029325,
"repo_name": "sunrize531/cherry-common",
"id": "9751a8c0c8b2ea6ab7e76ad5d28d1a1f5baa177c",
"size": "341",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_excel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "93337"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import plotly
import plotly.graph_objs as go
def validate_coerce_fig_to_dict(fig, validate):
from plotly.basedatatypes import BaseFigure
if isinstance(fig, BaseFigure):
fig_dict = fig.to_dict()
elif isinstance(fig, dict):
if validate:
# This will raise an exception if fig is not a valid plotly figure
fig_dict = plotly.graph_objs.Figure(fig).to_plotly_json()
else:
fig_dict = fig
else:
raise ValueError(
"""
The fig parameter must be a dict or Figure.
Received value of type {typ}: {v}""".format(
typ=type(fig), v=fig
)
)
return fig_dict
def validate_coerce_output_type(output_type):
if output_type == "Figure" or output_type == go.Figure:
cls = go.Figure
elif output_type == "FigureWidget" or (
hasattr(go, "FigureWidget") and output_type == go.FigureWidget
):
cls = go.FigureWidget
else:
raise ValueError(
"""
Invalid output type: {output_type}
Must be one of: 'Figure', 'FigureWidget'"""
)
return cls
|
{
"content_hash": "c1448eb15b578044afb5ab3eed042e96",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 78,
"avg_line_length": 27.761904761904763,
"alnum_prop": 0.5969125214408233,
"repo_name": "plotly/python-api",
"id": "b3b376e9d89cf558d22bbfaaef45f4094517c067",
"size": "1166",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/io/_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
from django.contrib.staticfiles import finders
from ..base import BaseTestCase
class StaticFilesPanelTestCase(BaseTestCase):
def setUp(self):
super(StaticFilesPanelTestCase, self).setUp()
self.panel = self.toolbar.get_panel_by_id('StaticFilesPanel')
def test_default_case(self):
self.panel.process_request(self.request)
self.panel.process_response(self.request, self.response)
self.assertIn('django.contrib.staticfiles.finders.'
'AppDirectoriesFinder', self.panel.content)
self.assertIn('django.contrib.staticfiles.finders.'
'FileSystemFinder (2 files)', self.panel.content)
self.assertEqual(self.panel.num_used, 0)
self.assertNotEqual(self.panel.num_found, 0)
self.assertEqual(self.panel.get_staticfiles_apps(),
['django.contrib.admin', 'debug_toolbar'])
self.assertEqual(self.panel.get_staticfiles_dirs(),
finders.FileSystemFinder().locations)
|
{
"content_hash": "99d6f06092529d0c3735500c657b72e2",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 71,
"avg_line_length": 41.96153846153846,
"alnum_prop": 0.6672777268560953,
"repo_name": "marcoantoniooliveira/labweb",
"id": "be4f267053d8c93be7db410a1e5ff8d0da957428",
"size": "1108",
"binary": false,
"copies": "30",
"ref": "refs/heads/master",
"path": "oscar/lib/python2.7/site-packages/tests/panels/test_staticfiles.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "1534157"
},
{
"name": "CoffeeScript",
"bytes": "21"
},
{
"name": "JavaScript",
"bytes": "2968822"
},
{
"name": "LiveScript",
"bytes": "6103"
},
{
"name": "Puppet",
"bytes": "3507"
},
{
"name": "Python",
"bytes": "30402832"
},
{
"name": "Shell",
"bytes": "10782"
},
{
"name": "TeX",
"bytes": "56626"
},
{
"name": "XSLT",
"bytes": "49764"
}
],
"symlink_target": ""
}
|
from tempfile import mkdtemp
from settings import *
INSTALLED_APPS += [
'multipleindex',
]
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.solr_backend.SolrEngine',
'URL': 'http://localhost:8983/solr',
},
'whoosh': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': mkdtemp(prefix='haystack-multipleindex-whoosh-tests-'),
'EXCLUDED_INDEXES': ['multipleindex.search_indexes.BarIndex'],
},
'filtered_whoosh': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': mkdtemp(prefix='haystack-multipleindex-filtered-whoosh-tests-'),
'EXCLUDED_INDEXES': ['multipleindex.search_indexes.BarIndex'],
},
}
HAYSTACK_SIGNAL_PROCESSOR = 'haystack.signals.RealtimeSignalProcessor'
|
{
"content_hash": "3eacc519a8905748b5414d75ec8da8c3",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 80,
"avg_line_length": 31.26923076923077,
"alnum_prop": 0.6654366543665436,
"repo_name": "speedplane/django-haystack",
"id": "59dc62effa83c6f0380c287d8c2ae062950214ee",
"size": "813",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/multipleindex_settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1431"
},
{
"name": "Python",
"bytes": "713583"
},
{
"name": "Shell",
"bytes": "2888"
}
],
"symlink_target": ""
}
|
import json
class Task(object):
'''An individual task base.'''
# Internal task_id
_id = None
# Internal task state
_state = 'WAIT'
# & channel name
_channel = None
# Whether this task should be cleaned up (pushed to end queue)
_cleanup = True
# Any context for this task
_context = None
# Redis & helpers public objects
redis = None
helpers = None
class Error(Exception):
'''
An exception which, when raised, puts this task in the ``ERROR`` state.
'''
pass
def __init__(self, **kwargs):
pass
# Tasks which don't define a stop are assumed not to spawn any sub-greenlets
# this is called before we kill the task's greenlet (running task.start)
def stop(self):
pass
def emit(self, event, data=None):
'''
Emit task events -> pubsub channel.
'''
self.redis.publish(self._channel, json.dumps({
'event': event,
'data': data
}))
|
{
"content_hash": "68e6b56c7e853163cb5a0f3547051b17",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 80,
"avg_line_length": 21.375,
"alnum_prop": 0.5672514619883041,
"repo_name": "Oxygem/pytask",
"id": "7126f5f626ba7734ff8ef755389b94378a572093",
"size": "1092",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pytask/task.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29809"
},
{
"name": "Shell",
"bytes": "241"
}
],
"symlink_target": ""
}
|
from os import path, urandom
basedir = path.abspath(path.dirname(__file__))
dbname = "dbname.db"
SQLALCHEMY_DATABASE_URI = "sqlite:///" + path.join(basedir, dbname)
SQLALCHEMY_ECHO = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
DEBUG = False
TESTING = False
SECRET_KEY = urandom(30)
|
{
"content_hash": "b906e818da0b2e34c270d2cbd2978137",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 67,
"avg_line_length": 23.833333333333332,
"alnum_prop": 0.7342657342657343,
"repo_name": "JungWinter/yellowid-flask",
"id": "917c505f9e786e8e51115f88ca795971bb66b5b1",
"size": "286",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20052"
}
],
"symlink_target": ""
}
|
import logging
from optparse import Values
from typing import List
from pip._vendor.packaging.utils import canonicalize_name
from pip._internal.cli.base_command import Command
from pip._internal.cli.req_command import SessionCommandMixin, warn_if_run_as_root
from pip._internal.cli.status_codes import SUCCESS
from pip._internal.exceptions import InstallationError
from pip._internal.req import parse_requirements
from pip._internal.req.constructors import (
install_req_from_line,
install_req_from_parsed_requirement,
)
from pip._internal.utils.misc import protect_pip_from_modification_on_windows
logger = logging.getLogger(__name__)
class UninstallCommand(Command, SessionCommandMixin):
"""
Uninstall packages.
pip is able to uninstall most installed packages. Known exceptions are:
- Pure distutils packages installed with ``python setup.py install``, which
leave behind no metadata to determine what files were installed.
- Script wrappers installed by ``python setup.py develop``.
"""
usage = """
%prog [options] <package> ...
%prog [options] -r <requirements file> ..."""
def add_options(self) -> None:
self.cmd_opts.add_option(
'-r', '--requirement',
dest='requirements',
action='append',
default=[],
metavar='file',
help='Uninstall all the packages listed in the given requirements '
'file. This option can be used multiple times.',
)
self.cmd_opts.add_option(
'-y', '--yes',
dest='yes',
action='store_true',
help="Don't ask for confirmation of uninstall deletions.")
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options: Values, args: List[str]) -> int:
session = self.get_default_session(options)
reqs_to_uninstall = {}
for name in args:
req = install_req_from_line(
name, isolated=options.isolated_mode,
)
if req.name:
reqs_to_uninstall[canonicalize_name(req.name)] = req
else:
logger.warning(
"Invalid requirement: %r ignored -"
" the uninstall command expects named"
" requirements.",
name,
)
for filename in options.requirements:
for parsed_req in parse_requirements(
filename,
options=options,
session=session):
req = install_req_from_parsed_requirement(
parsed_req,
isolated=options.isolated_mode
)
if req.name:
reqs_to_uninstall[canonicalize_name(req.name)] = req
if not reqs_to_uninstall:
raise InstallationError(
f'You must give at least one requirement to {self.name} (see '
f'"pip help {self.name}")'
)
protect_pip_from_modification_on_windows(
modifying_pip="pip" in reqs_to_uninstall
)
for req in reqs_to_uninstall.values():
uninstall_pathset = req.uninstall(
auto_confirm=options.yes, verbose=self.verbosity > 0,
)
if uninstall_pathset:
uninstall_pathset.commit()
warn_if_run_as_root()
return SUCCESS
|
{
"content_hash": "6889ec9411efcd51e43829d725bd37f3",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 82,
"avg_line_length": 34.8,
"alnum_prop": 0.5818965517241379,
"repo_name": "sonntagsgesicht/regtest",
"id": "c590627eaa0a12a063bd2eb8a84e2935e11f6e71",
"size": "3480",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": ".aux/venv/lib/python3.9/site-packages/pip/_internal/commands/uninstall.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13888"
}
],
"symlink_target": ""
}
|
from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
import threading
class RPCApi():
functions = []
def __init__(self, config):
self.config = config
self.server = SimpleJSONRPCServer((self.config['rpc_host'], self.config['rpc_port']))
self.server.timeout = self.config['rpc_timeout'] if "rpc_timeout" in config else 1
self.register_function(self.list_functions, "list_functions")
def register_functions(self, **kwargs):
"""Registers functions with the server."""
for function_name in kwargs:
function = kwargs[function_name]
self.register_function(function, function_name)
def register_function(self, function, function_name):
"""Registers a single function with the server."""
self.server.register_function(function, function_name)
self.functions.append(function_name)
def list_functions(self):
"""An externally accessible function returning all the registered function names"""
return list(set(self.functions))
def poll(self):
"""Serves one request from the waiting requests and returns"""
self.server.handle_request()
def run(self):
"""Blocks execution and runs the server till the program shutdown"""
self.server.serve_forever()
def start_thread(self):
"""Starts self.run() in a separate thread"""
self.thread = threading.Thread(target=self.run)
self.thread.daemon = True
self.thread.start()
|
{
"content_hash": "f05c0c1ee35fbe7f7c59142ef151c7fe",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 93,
"avg_line_length": 37.36585365853659,
"alnum_prop": 0.6592689295039165,
"repo_name": "CRImier/pyLCI",
"id": "51f9bb8f978161fff883a7fa29faf4bf7ea1ed53",
"size": "1532",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "utils/rpc_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "269884"
},
{
"name": "Shell",
"bytes": "961"
}
],
"symlink_target": ""
}
|
"""
sentry.web.forms.fields
~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import six
from django.forms.widgets import RadioFieldRenderer, TextInput, Widget
from django.forms.util import flatatt
from django.forms import (
Field, CharField, IntegerField, TypedChoiceField, ValidationError
)
from django.utils.encoding import force_unicode
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from sentry.models import User
class CustomTypedChoiceField(TypedChoiceField):
# A patched version of TypedChoiceField which correctly validates a 0
# as a real input that may be invalid
# See https://github.com/django/django/pull/3774
def validate(self, value):
"""
Validates that the input is in self.choices.
"""
super(CustomTypedChoiceField, self).validate(value)
# this will validate itself twice due to the internal ChoiceField
# validation
if value is not None and not self.valid_value(value):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
class RangeInput(TextInput):
input_type = 'range'
class RadioFieldRenderer(RadioFieldRenderer):
"""
This is identical to Django's builtin widget, except that
it renders as a Bootstrap2 compatible widget. Would be great if
we didn't have to create this stupid code, but Django widgets are not
flexible.
"""
def render(self):
return mark_safe(u'\n<div class="inputs-list">%s</div>\n' % u'\n'.join([force_unicode(w) for w in self]))
class UserField(CharField):
class widget(TextInput):
def render(self, name, value, attrs=None):
if not attrs:
attrs = {}
if 'placeholder' not in attrs:
attrs['placeholder'] = 'username'
if isinstance(value, six.integer_types):
value = User.objects.get(id=value).username
return super(UserField.widget, self).render(name, value, attrs)
def clean(self, value):
value = super(UserField, self).clean(value)
if not value:
return None
try:
return User.objects.get(username=value)
except User.DoesNotExist:
raise ValidationError(_('Invalid username'))
class RangeField(IntegerField):
widget = RangeInput
def __init__(self, *args, **kwargs):
self.step_value = kwargs.pop('step_value', None)
super(RangeField, self).__init__(*args, **kwargs)
def widget_attrs(self, widget):
attrs = super(RangeField, self).widget_attrs(widget)
attrs.setdefault('min', self.min_value)
attrs.setdefault('max', self.max_value)
attrs.setdefault('step', self.step_value)
return attrs
class ReadOnlyTextWidget(Widget):
def render(self, name, value, attrs):
final_attrs = self.build_attrs(attrs)
if not value:
value = mark_safe("<em>%s</em>" % _("Not set"))
return format_html("<div{0}>{1}</div>", flatatt(final_attrs), value)
class ReadOnlyTextField(Field):
widget = ReadOnlyTextWidget
def __init__(self, *args, **kwargs):
kwargs.setdefault("required", False)
super(ReadOnlyTextField, self).__init__(*args, **kwargs)
def bound_data(self, data, initial):
# Always return initial because the widget doesn't
# render an input field.
return initial
|
{
"content_hash": "f49d2a0e338c1e56026a9e7bbd2ce2a7",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 113,
"avg_line_length": 33.008849557522126,
"alnum_prop": 0.6445040214477212,
"repo_name": "camilonova/sentry",
"id": "b8722edf59b9ee158677fe845996d925604416d5",
"size": "3730",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/sentry/web/forms/fields.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "575532"
},
{
"name": "JavaScript",
"bytes": "608497"
},
{
"name": "Makefile",
"bytes": "7296"
},
{
"name": "Python",
"bytes": "4505014"
},
{
"name": "Shell",
"bytes": "4106"
}
],
"symlink_target": ""
}
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ebooks', '0047_auto_20190605_0743'),
]
operations = [
migrations.RemoveField(
model_name='section',
name='super_title',
),
]
|
{
"content_hash": "b04b95a235392472eb3a64165c135888",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 46,
"avg_line_length": 18.933333333333334,
"alnum_prop": 0.5633802816901409,
"repo_name": "flavoi/diventi",
"id": "ffa5980e33f54ce60d2a7add14a6715e997adec5",
"size": "333",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "diventi/ebooks/migrations/0048_remove_section_super_title.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "385265"
},
{
"name": "Procfile",
"bytes": "46"
},
{
"name": "Python",
"bytes": "826530"
}
],
"symlink_target": ""
}
|
"""
Support for the Broadlink RM2 Pro (only temperature) and A1 devices.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.broadlink/
"""
from datetime import timedelta
import binascii
import logging
import socket
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (CONF_HOST, CONF_MAC,
CONF_MONITORED_CONDITIONS,
CONF_NAME, TEMP_CELSIUS, CONF_TIMEOUT)
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['broadlink==0.3']
_LOGGER = logging.getLogger(__name__)
CONF_UPDATE_INTERVAL = 'update_interval'
DEVICE_DEFAULT_NAME = 'Broadlink sensor'
DEFAULT_TIMEOUT = 10
SENSOR_TYPES = {
'temperature': ['Temperature', TEMP_CELSIUS],
'air_quality': ['Air Quality', ' '],
'humidity': ['Humidity', '%'],
'light': ['Light', ' '],
'noise': ['Noise', ' ']
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEVICE_DEFAULT_NAME): vol.Coerce(str),
vol.Optional(CONF_MONITORED_CONDITIONS, default=[]):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
vol.Optional(CONF_UPDATE_INTERVAL, default=timedelta(seconds=300)): (
vol.All(cv.time_period, cv.positive_timedelta)),
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_MAC): cv.string,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int
})
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Broadlink device sensors."""
mac = config.get(CONF_MAC).encode().replace(b':', b'')
mac_addr = binascii.unhexlify(mac)
broadlink_data = BroadlinkData(
config.get(CONF_UPDATE_INTERVAL),
config.get(CONF_HOST),
mac_addr, config.get(CONF_TIMEOUT))
dev = []
for variable in config[CONF_MONITORED_CONDITIONS]:
dev.append(BroadlinkSensor(
config.get(CONF_NAME),
broadlink_data,
variable))
add_devices(dev)
class BroadlinkSensor(Entity):
"""Representation of a Broadlink device sensor."""
def __init__(self, name, broadlink_data, sensor_type):
"""Initialize the sensor."""
self._name = "%s %s" % (name, SENSOR_TYPES[sensor_type][0])
self._state = None
self._type = sensor_type
self._broadlink_data = broadlink_data
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
self.update()
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return self._unit_of_measurement
def update(self):
"""Get the latest data from the sensor."""
self._broadlink_data.update()
if self._broadlink_data.data is None:
return
self._state = self._broadlink_data.data[self._type]
class BroadlinkData(object):
"""Representation of a Broadlink data object."""
def __init__(self, interval, ip_addr, mac_addr, timeout):
"""Initialize the data object."""
import broadlink
self.data = None
self._device = broadlink.a1((ip_addr, 80), mac_addr)
self._device.timeout = timeout
self.update = Throttle(interval)(self._update)
if not self._auth():
_LOGGER.error("Failed to connect to device.")
def _update(self, retry=2):
try:
self.data = self._device.check_sensors_raw()
except socket.timeout as error:
if retry < 1:
_LOGGER.error(error)
return
if not self._auth():
return
return self._update(max(0, retry-1))
def _auth(self, retry=2):
try:
auth = self._device.auth()
except socket.timeout:
auth = False
if not auth and retry > 0:
return self._auth(max(0, retry-1))
return auth
|
{
"content_hash": "4608d89e69dc2646f7552b450868a749",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 74,
"avg_line_length": 32.1865671641791,
"alnum_prop": 0.6234639462091351,
"repo_name": "eagleamon/home-assistant",
"id": "5fda261b61cd79ba1071404712546bea67896e9e",
"size": "4314",
"binary": false,
"copies": "6",
"ref": "refs/heads/dev",
"path": "homeassistant/components/sensor/broadlink.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1510047"
},
{
"name": "Python",
"bytes": "5066084"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "14079"
}
],
"symlink_target": ""
}
|
from .base import * # NOQA
import logging.config
# For security and performance reasons, DEBUG is turned off
DEBUG = False
TEMPLATE_DEBUG = False
# Must mention ALLOWED_HOSTS in production!
# ALLOWED_HOSTS = ["Student_Council_Website.com"]
# Cache the templates in memory for speed-up
loaders = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]),
]
TEMPLATES[0]['OPTIONS'].update({"loaders": loaders})
TEMPLATES[0].update({"APP_DIRS": False})
# Define STATIC_ROOT for the collectstatic command
STATIC_ROOT = join(BASE_DIR, '..', 'site', 'static')
# Log everything to the logs directory at the top
LOGFILE_ROOT = join(dirname(BASE_DIR), 'logs')
# Reset logging
LOGGING_CONFIG = None
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': "[%(asctime)s] %(levelname)s [%(pathname)s:%(lineno)s] %(message)s",
'datefmt': "%d/%b/%Y %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'proj_log_file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': join(LOGFILE_ROOT, 'project.log'),
'formatter': 'verbose'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
}
},
'loggers': {
'project': {
'handlers': ['proj_log_file'],
'level': 'DEBUG',
},
}
}
logging.config.dictConfig(LOGGING)
|
{
"content_hash": "c350c1f7665246eede199d9a2fba6699",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 90,
"avg_line_length": 26.96825396825397,
"alnum_prop": 0.5632725132430841,
"repo_name": "IE-NITK/NITK-Student-Council-Website",
"id": "58b679043bfa7c205ce7ca2531e4c277d7d16d78",
"size": "1827",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Student_Council_Website/settings/production.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "206993"
},
{
"name": "HTML",
"bytes": "158348"
},
{
"name": "JavaScript",
"bytes": "268338"
},
{
"name": "Python",
"bytes": "56768"
}
],
"symlink_target": ""
}
|
"""This example demonstrates how to subscribe to topics with WAMP."""
import logging
import sys
from asphalt.core import ContainerComponent, Context, run_application
from asphalt.wamp.context import EventContext
logger = logging.getLogger(__name__)
def subscriber(ctx: EventContext, message: str):
logger.info('Received message from %s: %s', ctx.topic, message)
class SubscriberComponent(ContainerComponent):
async def start(self, ctx: Context):
self.add_component('wamp')
await super().start(ctx)
topic = sys.argv[1]
await ctx.wamp.subscribe(subscriber, topic)
logger.info('Subscribed to topic: %s', topic)
if len(sys.argv) < 2:
print('Usage: {} <topic>'.format(sys.argv[0]), file=sys.stderr)
sys.exit(1)
run_application(SubscriberComponent(), logging=logging.INFO)
|
{
"content_hash": "811ed72db367051ad64482d05f1eae8d",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 69,
"avg_line_length": 27.8,
"alnum_prop": 0.7050359712230215,
"repo_name": "asphalt-framework/asphalt-wamp",
"id": "485e5fd82a8c4298dfe0a783078b2ebf964667d2",
"size": "834",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/pubsub/subscriber.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "71440"
}
],
"symlink_target": ""
}
|
"""
<DefineSource>
@Date : Fri Nov 14 13:20:38 2014 \n
@Author : Erwan Ledoux \n\n
</DefineSource>
The Doer defines instances that are going to decorate a big family of classes in this framework.
Staying on the idea, that one module should associate
one class, now a decorated class by a Doer should have a NameStr that is
a DoStr and express also method a method with the name <DoStr>[0].lower()+<DoStr>[1:]
All the attributes that are controlling this method process are <DoingStr><MiddleStr><TypeStr>
and all the ones resetted during the method are <DoneStr><MiddleStr><TypeStr>.
This helps a lot for defining a fisrt level of objects that are acting like input-output controllers.
"""
#<DefineAugmentation>
import ShareYourSystem as SYS
BaseModuleStr="ShareYourSystem.Standards.Classors.Defaultor")
DecorationModule=BaseModule
SYS.setSubModule(globals())
#</DefineAugmentation>
#<ImportSpecificModules>
import collections
import inspect
import six
#</ImportSpecificModules>
#<DefineDoStrsList>
DoStrsList=["Doer","Do","Doing","Done"]
#<DefineDoStrsList>
#<DefineLocals>
DoingPrefixStr='_'
DoingDecorationStr='@'
DoingDoMethodStr='do_'
#</DefineLocals>
#<DefineFunctions>
def getDoerStrWithKeyStr(_KeyStr):
#Check
if len(_KeyStr)>0:
#Split the Str into words
WordStrsList=SYS.getWordStrsListWithStr(_KeyStr)
if len(WordStrsList)>0:
PrefixWordStr="".join(WordStrsList[:-1])
LastWordStr=WordStrsList[-1]
#debug
'''
print('Doer getDoerStrWithKeyStr')
print('PrefixWordStr is '+str(PrefixWordStr))
print('LastWordStr is '+str(LastWordStr))
print('')
'''
if LastWordStr[0] in ["P","p"] and LastWordStr[1:]=="roperty":
return PrefixWordStr+LastWordStr[0]+"ropertize"
#Default return
return _KeyStr+'er'
#Return ""
return ""
def getDoStrWithDoerStr(_DoerStr):
#Check
if len(_DoerStr)>0:
#Split the Str into words
WordStrsList=SYS.getWordStrsListWithStr(_DoerStr)
if len(WordStrsList)>0:
PrefixWordStr="".join(WordStrsList[:-1])
LastWordStr=WordStrsList[-1]
#debug
'''
print('Doer getDoStrWithDoerStr')
print('PrefixWordStr is '+str(PrefixWordStr))
print('LastWordStr is '+str(LastWordStr))
print('')
'''
if LastWordStr[0] in ["A","a"] and LastWordStr[1:]=="pplyier":
return PrefixWordStr+LastWordStr[0]+"pply"
elif LastWordStr[0] in ["M","m"] and LastWordStr[1:]=="ultiplier":
return PrefixWordStr+LastWordStr[0]+"ultiply"
elif LastWordStr[0] in ["A","a"] and LastWordStr[1:]=="ttributer":
return PrefixWordStr+LastWordStr[0]+"ttribute"
elif LastWordStr[0] in ["A","a"] and LastWordStr[1:]=="nalyzer":
return PrefixWordStr+LastWordStr[0]+"nalyze"
elif LastWordStr[0] in ["I","i"] and LastWordStr[1:]=="nstancer":
return PrefixWordStr+LastWordStr[0]+"nstance"
elif LastWordStr[0] in ["C","c"] and LastWordStr[1:]=="oncluder":
return PrefixWordStr+LastWordStr[0]+"onclude"
elif LastWordStr[0] in ["N","n"] and LastWordStr[1:]=="oder":
return PrefixWordStr+LastWordStr[0]+"ode"
elif LastWordStr[0] in ["S","s"] and LastWordStr[1:]=="tructurer":
return PrefixWordStr+LastWordStr[0]+"tructure"
elif LastWordStr[0] in ["M","m"] and LastWordStr[1:]=="erger":
return PrefixWordStr+LastWordStr[0]+"erge"
elif LastWordStr[0] in ["R","r"] and LastWordStr[1:]=="unner":
return PrefixWordStr+LastWordStr[0]+"un"
elif LastWordStr[0] in ["D","d"] and LastWordStr[1:]=="ynamizer":
return PrefixWordStr+LastWordStr[0]+"ynamize"
elif LastWordStr[0] in ["M","m"] and LastWordStr[1:]=="obilizer":
return PrefixWordStr+LastWordStr[0]+"obilize"
elif LastWordStr[0] in ["S","s"] and LastWordStr[1:]=="ettler":
return PrefixWordStr+LastWordStr[0]+"ettle"
elif LastWordStr[0] in ["S","s"] and LastWordStr[1:]=="aver":
return PrefixWordStr+LastWordStr[0]+"ave"
elif LastWordStr[0] in ["D","d"] and LastWordStr[1:]=="atabaser":
return PrefixWordStr+LastWordStr[0]+"atabase"
elif LastWordStr[0] in ["F","f"] and LastWordStr[1:]=="indoer":
return PrefixWordStr+LastWordStr[0]+"ind"
elif LastWordStr[0] in ["P","p"] and LastWordStr[1:]=="roducer":
return PrefixWordStr+LastWordStr[0]+"roduce"
elif LastWordStr[0] in ["S","s"] and LastWordStr[1:]=="imulater":
return PrefixWordStr+LastWordStr[0]+"imulate"
elif LastWordStr[0] in ["C","c"] and LastWordStr[1:]=="apturer":
return PrefixWordStr+LastWordStr[0]+"apture"
elif LastWordStr[0] in ["C","c"] and LastWordStr[1:]=="loser":
return PrefixWordStr+LastWordStr[0]+"lose"
elif LastWordStr[0] in ["F","f"] and LastWordStr[1:]=="igurer":
return PrefixWordStr+LastWordStr[0]+"igure"
elif LastWordStr[0] in ["M","m"] and LastWordStr[1:]=="oniter":
return PrefixWordStr+LastWordStr[0]+"onit"
elif LastWordStr[0] in ["O","o"] and LastWordStr[1:]=="bserver":
return PrefixWordStr+LastWordStr[0]+"bserve"
elif LastWordStr[0] in ["P","p"] and LastWordStr[1:]=="opulater":
return PrefixWordStr+LastWordStr[0]+"opulate"
elif LastWordStr[0] in ["C","c"] and LastWordStr[1:]=="oupler":
return PrefixWordStr+LastWordStr[0]+"ouple"
elif LastWordStr[0] in ["S","s"] and LastWordStr[1:]=="canner":
return PrefixWordStr+LastWordStr[0]+"can"
elif LastWordStr[0] in ["R","r"] and LastWordStr[1:]=="etriever":
return PrefixWordStr+LastWordStr[0]+"etrieve"
elif LastWordStr[0] in ["S","s"] and LastWordStr[1:]=="haper":
return PrefixWordStr+LastWordStr[0]+"hape"
elif LastWordStr[0] in ["H","h"] and LastWordStr[1:]=="ierarchizer":
return PrefixWordStr+LastWordStr[0]+"ierarchize"
elif LastWordStr[0] in ["E","e"] and LastWordStr[1:]=="xecuter":
return PrefixWordStr+LastWordStr[0]+"xecute"
elif LastWordStr[0] in ["R","r"] and LastWordStr[1:]=="ecuperater":
return PrefixWordStr+LastWordStr[0]+"ecuperate"
elif LastWordStr[0] in ["C","c"] and LastWordStr[1:]=="loner":
return PrefixWordStr+LastWordStr[0]+"lone"
elif LastWordStr[0] in ["T","t"] and LastWordStr[1:]=="abler":
return PrefixWordStr+LastWordStr[0]+"able"
elif LastWordStr[0] in ["I","i"] and LastWordStr[1:]=="temizer":
return PrefixWordStr+LastWordStr[0]+"temize"
elif LastWordStr[0] in ["U","u"] and LastWordStr[1:]=="pdater":
return PrefixWordStr+LastWordStr[0]+"pdate"
elif LastWordStr[0] in ["C","c"] and LastWordStr[1:]=="oupler":
return PrefixWordStr+LastWordStr[0]+"ouple"
elif LastWordStr[0] in ["P","p"] and LastWordStr[1:]=="oker":
return PrefixWordStr+LastWordStr[0]+"oke"
elif LastWordStr[0] in ["G","g"] and LastWordStr[1:]=="uider":
return PrefixWordStr+LastWordStr[0]+"uide"
elif LastWordStr[0] in ["A","a"] and LastWordStr[1:]=="xer":
return PrefixWordStr+LastWordStr[0]+"xe"
elif LastWordStr[0] in ["R","r"] and LastWordStr[1:]=="ater":
return PrefixWordStr+LastWordStr[0]+"ate"
elif LastWordStr[0] in ["S","s"] and LastWordStr[1:]=="harer":
return PrefixWordStr+LastWordStr[0]+"hare"
elif LastWordStr[0] in ["R","r"] and LastWordStr[1:]=="esetter":
return PrefixWordStr+LastWordStr[0]+"eset"
elif LastWordStr[0] in ["W","w"] and LastWordStr[1:]=="riter":
return PrefixWordStr+LastWordStr[0]+"rite"
elif LastWordStr[0] in ["R","r"] and LastWordStr[1:]=="eadmer":
return PrefixWordStr+LastWordStr[0]+"eadme"
elif LastWordStr[0] in ["J","j"] and LastWordStr[1:]=="oiner":
return PrefixWordStr+LastWordStr[0]+"oin"
elif LastWordStr[0] in ["R","r"] and LastWordStr[1:]=="outer":
return PrefixWordStr+LastWordStr[0]+"oute"
elif LastWordStr[0] in ["W","w"] and LastWordStr[1:]=="eaver":
return PrefixWordStr+LastWordStr[0]+"eave"
elif LastWordStr[0] in ["D","d"] and LastWordStr[1:]=="ebugger":
return PrefixWordStr+LastWordStr[0]+"ebug"
elif LastWordStr[0] in ["D","d"] and LastWordStr[1:]=="eleter":
return PrefixWordStr+LastWordStr[0]+"elete"
elif LastWordStr[0] in ["I","i"] and LastWordStr[1:]=="mitater":
return PrefixWordStr+LastWordStr[0]+"mitate"
elif LastWordStr[0] in ["M","m"] and LastWordStr[1:]=="oduler":
return PrefixWordStr+LastWordStr[0]+"odule"
elif LastWordStr[0] in ["S","s"] and LastWordStr[1:]=="torer":
return PrefixWordStr+LastWordStr[0]+"tore"
elif LastWordStr[0] in ["F","f"] and LastWordStr[1:]=="iler":
return PrefixWordStr+LastWordStr[0]+"ile"
elif LastWordStr[0] in ["P","p"] and LastWordStr[1:]=="rinter":
return '_'+PrefixWordStr+LastWordStr[0]+"rint"
elif LastWordStr[0] in ["F","f"] and LastWordStr[1:]=="eaturer":
return PrefixWordStr+LastWordStr[0]+"eature"
elif LastWordStr[0] in ["D","d"] and LastWordStr[1:]=="eriver":
return PrefixWordStr+LastWordStr[0]+"erive"
elif LastWordStr[0] in ["G","g"] and LastWordStr[1:]=="rabber":
return PrefixWordStr+LastWordStr[0]+"rab"
elif LastWordStr[0] in ["F","f"] and LastWordStr[1:]=="lattener":
return PrefixWordStr+LastWordStr[0]+"latten"
elif LastWordStr[0] in ["M","m"] and LastWordStr[1:]=="odulizer":
return PrefixWordStr+LastWordStr[0]+"odulize"
elif LastWordStr[0] in ["M","m"] and LastWordStr[1:]=="aker":
return PrefixWordStr+LastWordStr[0]+"ake"
elif LastWordStr[0] in ["F","f"] and LastWordStr[1:]=="indor":
return PrefixWordStr+LastWordStr[0]+"ind"
elif LastWordStr[0] in ["M","m"] and LastWordStr[1:]=="apper":
return PrefixWordStr+LastWordStr[0]+"ap"
elif LastWordStr[0] in ["F","f"] and LastWordStr[1:]=="unctor":
return PrefixWordStr+LastWordStr[0]+"unc"
elif LastWordStr[0] in ["S","s"] and LastWordStr[1:]=="etter":
return PrefixWordStr+LastWordStr[0]+"et"
elif LastWordStr[0] in ["G","g"] and LastWordStr[1:]=="etter":
return PrefixWordStr+LastWordStr[0]+"et"
elif LastWordStr[0] in ["P","p"] and LastWordStr[1:]=="ropertiser":
return PrefixWordStr+LastWordStr[0]+"ropertize"
#Default return
return _DoerStr[:-2]
#Return ""
return ""
def getDoerStrWithDoStr(_DoStr):
#Check
if len(_DoStr)>0:
#Split the Str into words
WordStrsList=SYS.getWordStrsListWithStr(_DoStr)
if len(WordStrsList)>0:
PrefixWordStr="".join(WordStrsList[:-1])
LastWordStr=WordStrsList[-1]
#debug
'''
print('Doer getDoerStrWithDoStr')
print('PrefixWordStr is '+str(PrefixWordStr))
print('LastWordStr is '+str(LastWordStr))
print('')
'''
if LastWordStr[0] in ["A","a"] and LastWordStr[1:]=="pply":
return PrefixWordStr+LastWordStr[0]+"pplyier"
elif LastWordStr[0] in ["M","m"] and LastWordStr[1:]=="ultiply":
return PrefixWordStr+LastWordStr[0]+"ultiplier"
elif LastWordStr[0] in ["N","n"] and LastWordStr[1:]=="ode":
return PrefixWordStr+LastWordStr[0]+"oder"
elif LastWordStr[0] in ["S","s"] and LastWordStr[1:]=="ave":
return PrefixWordStr+LastWordStr[0]+"aver"
elif LastWordStr[0] in ["C","c"] and LastWordStr[1:]=="apture":
return PrefixWordStr+LastWordStr[0]+"apturer"
elif LastWordStr[0] in ["R","r"] and LastWordStr[1:]=="eset":
return PrefixWordStr+LastWordStr[0]+"esetter"
elif LastWordStr[0] in ["C","c"] and LastWordStr[1:]=="ouple":
return PrefixWordStr+LastWordStr[0]+"oupler"
elif LastWordStr[0] in ["S","s"] and LastWordStr[1:]=="cann":
return PrefixWordStr+LastWordStr[0]+"canner"
elif LastWordStr[0] in ["F","f"] and LastWordStr[1:]=="igure":
return PrefixWordStr+LastWordStr[0]+"igurer"
elif LastWordStr[0] in ["C","c"] and LastWordStr[1:]=="lose":
return PrefixWordStr+LastWordStr[0]+"loser"
elif LastWordStr[0] in ["P","p"] and LastWordStr[1:]=="oke":
return PrefixWordStr+LastWordStr[0]+"oker"
elif LastWordStr[0] in ["R","r"] and LastWordStr[1:]=="un":
return PrefixWordStr+LastWordStr[0]+"unner"
elif LastWordStr[0] in ["C","c"] and LastWordStr[1:]=="lone":
return PrefixWordStr+LastWordStr[0]+"loner"
elif LastWordStr[0] in ["M","m"] and LastWordStr[1:]=="onit":
return PrefixWordStr+LastWordStr[0]+"oniter"
elif LastWordStr[0] in ["P","p"] and LastWordStr[1:]=="roduce":
return PrefixWordStr+LastWordStr[0]+"roducer"
elif LastWordStr[0] in ["O","o"] and LastWordStr[1:]=="bserve":
return PrefixWordStr+LastWordStr[0]+"bserver"
elif LastWordStr[0] in ["I","i"] and LastWordStr[1:]=="nstance":
return PrefixWordStr+LastWordStr[0]+"nstancer"
elif LastWordStr[0] in ["M","m"] and LastWordStr[1:]=="obilize":
return PrefixWordStr+LastWordStr[0]+"obilizer"
elif LastWordStr[0] in ["S","s"] and LastWordStr[1:]=="hape":
return PrefixWordStr+LastWordStr[0]+"haper"
elif LastWordStr[0] in ["R","r"] and LastWordStr[1:]=="oute":
return PrefixWordStr+LastWordStr[0]+"outer"
elif LastWordStr[0] in ["I","i"] and LastWordStr[1:]=="temize":
return PrefixWordStr+LastWordStr[0]+"temizer"
elif LastWordStr[0] in ["S","s"] and LastWordStr[1:]=="imulate":
return PrefixWordStr+LastWordStr[0]+"imulater"
elif LastWordStr[0] in ["P","p"] and LastWordStr[1:]=="opulate":
return PrefixWordStr+LastWordStr[0]+"opulater"
elif LastWordStr[0] in ["R","r"] and LastWordStr[1:]=="etrieve":
return PrefixWordStr+LastWordStr[0]+"etriever"
elif LastWordStr[0] in ["D","d"] and LastWordStr[1:]=="ynamize":
return PrefixWordStr+LastWordStr[0]+"ynamizer"
elif LastWordStr[0] in ["G","g"] and LastWordStr[1:]=="uide":
return PrefixWordStr+LastWordStr[0]+"uider"
elif LastWordStr[0] in ["R","r"] and LastWordStr[1:]=="ate":
return PrefixWordStr+LastWordStr[0]+"ater"
elif LastWordStr[0] in ["C","c"] and LastWordStr[1:]=="ouple":
return PrefixWordStr+LastWordStr[0]+"oupler"
elif LastWordStr[0] in ["S","s"] and LastWordStr[1:]=="ettle":
return PrefixWordStr+LastWordStr[0]+"ettler"
elif LastWordStr[0] in ["A","a"] and LastWordStr[1:]=="xe":
return PrefixWordStr+LastWordStr[0]+"xer"
elif LastWordStr[0] in ["J","j"] and LastWordStr[1:]=="oin":
return PrefixWordStr+LastWordStr[0]+"oiner"
elif LastWordStr[0] in ["W","w"] and LastWordStr[1:]=="eave":
return PrefixWordStr+LastWordStr[0]+"eaver"
elif LastWordStr[0] in ["R","r"] and LastWordStr[1:]=="ecuperate":
return PrefixWordStr+LastWordStr[0]+"ecuperater"
elif LastWordStr[0] in ["M","m"] and LastWordStr[1:]=="erge":
return PrefixWordStr+LastWordStr[0]+"erger"
elif LastWordStr[0] in ["C","c"] and LastWordStr[1:]=="onclude":
return PrefixWordStr+LastWordStr[0]+"oncluder"
elif LastWordStr[0] in ["H","h"] and LastWordStr[1:]=="ierarchize":
return PrefixWordStr+LastWordStr[0]+"ierarchizer"
elif LastWordStr[0] in ["A","a"] and LastWordStr[1:]=="nalyze":
return PrefixWordStr+LastWordStr[0]+"nalyzer"
elif LastWordStr[0] in ["F","f"] and LastWordStr[1:]=="ind":
return PrefixWordStr+LastWordStr[0]+"indoer"
elif LastWordStr[0] in ["S","s"] and LastWordStr[1:]=="tore":
return PrefixWordStr+LastWordStr[0]+"torer"
elif LastWordStr[0] in ["F","f"] and LastWordStr[1:]=="eature":
return PrefixWordStr+LastWordStr[0]+"eaturer"
elif LastWordStr[0] in ["D","d"] and LastWordStr[1:]=="atabase":
return PrefixWordStr+LastWordStr[0]+"atabaser"
elif LastWordStr[0] in ["S","s"] and LastWordStr[1:]=="tructure":
return PrefixWordStr+LastWordStr[0]+"tructurer"
elif LastWordStr[0] in ["E","e"] and LastWordStr[1:]=="xecute":
return PrefixWordStr+LastWordStr[0]+"xecuter"
elif LastWordStr[0] in ["T","t"] and LastWordStr[1:]=="able":
return PrefixWordStr+LastWordStr[0]+"abler"
elif LastWordStr[0] in ["U","u"] and LastWordStr[1:]=="pdate":
return PrefixWordStr+LastWordStr[0]+"pdater"
elif LastWordStr[0] in ["M","m"] and LastWordStr[1:]=="odule":
return PrefixWordStr+LastWordStr[0]+"oduler"
elif LastWordStr[0] in ["R","r"] and LastWordStr[1:]=="eadme":
return PrefixWordStr+LastWordStr[0]+"eadmer"
elif LastWordStr[0] in ["A","a"] and LastWordStr[1:]=="ttribute":
return PrefixWordStr+LastWordStr[0]+"ttributer"
elif LastWordStr[0] in ["S","s"] and LastWordStr[1:]=="hare":
return PrefixWordStr+LastWordStr[0]+"harer"
elif LastWordStr[0] in ["I","i"] and LastWordStr[1:]=="mitate":
return PrefixWordStr+LastWordStr[0]+"mitater"
elif LastWordStr[0] in ["D","d"] and LastWordStr[1:]=="elete":
return PrefixWordStr+LastWordStr[0]+"eleter"
elif LastWordStr[0] in ["F","f"] and LastWordStr[1:]=="ile":
return PrefixWordStr+LastWordStr[0]+"iler"
elif LastWordStr[0] in ["D","d"] and LastWordStr[1:]=="ebug":
return PrefixWordStr+LastWordStr[0]+"ebugger"
elif LastWordStr[0]=='_' and LastWordStr[1] in ["P","p"
] and LastWordStr[1:]=="rint":
return '_'+PrefixWordStr+LastWordStr[1]+"rinter"
elif LastWordStr[0] in ["G","g"] and LastWordStr[1:]=="rab":
return PrefixWordStr+LastWordStr[0]+"rabber"
elif LastWordStr[0] in ["D","d"] and LastWordStr[1:]=="erive":
return PrefixWordStr+LastWordStr[0]+"eriver"
elif LastWordStr[0] in ["M","m"] and LastWordStr[1:]=="odulize":
return PrefixWordStr+LastWordStr[0]+"odulizer"
elif LastWordStr[0] in ["F","f"] and LastWordStr[1:]=="latten":
return PrefixWordStr+LastWordStr[0]+"lattener"
elif LastWordStr[0] in ["M","m"] and LastWordStr[1:]=="ake":
return PrefixWordStr+LastWordStr[0]+"aker"
elif LastWordStr[0] in ["F","f"] and LastWordStr[1:]=="ind":
return PrefixWordStr+LastWordStr[0]+"indor"
elif LastWordStr[0] in ["M","m"] and LastWordStr[1:]=="ap":
return PrefixWordStr+LastWordStr[0]+"apper"
elif LastWordStr[0] in ["F","f"] and LastWordStr[1:]=="unc":
return PrefixWordStr+LastWordStr[0]+"unctor"
elif LastWordStr[0] in ["S","s"] and LastWordStr[1:]=="et":
return PrefixWordStr+LastWordStr[0]+"etter"
elif LastWordStr[0] in ["G","g"] and LastWordStr[1:]=="et":
return PrefixWordStr+LastWordStr[0]+"etter"
elif LastWordStr[0] in ["P","p"] and LastWordStr[1:]=="ropertize":
return PrefixWordStr+LastWordStr[0]+"ropertiser"
#Default return
if _DoStr[-1]!='e':
return _DoStr+'er'
else:
return _DoStr+'r'
#Return ""
return ""
def getDoneStrWithDoStr(_DoStr):
#Check
if len(_DoStr)>0:
#Split the Str into words
WordStrsList=SYS.getWordStrsListWithStr(_DoStr)
if len(WordStrsList)>0:
PrefixWordStr="".join(WordStrsList[:-1])
LastWordStr=WordStrsList[-1]
#debug
'''
print('Doer getDoneStrWithDoStr')
print('PrefixWordStr is '+str(PrefixWordStr))
print('LastWordStr is '+str(LastWordStr))
print('')
'''
if LastWordStr[0] in ["A","a"] and LastWordStr[1:]=='pply':
return PrefixWordStr+LastWordStr[0]+'pplied'
elif LastWordStr[0] in ["M","m"] and LastWordStr[1:]=="ultiply":
return PrefixWordStr+LastWordStr[0]+"ultiplied"
elif LastWordStr[0]=='_' and LastWordStr[1] in ["P","p"
] and LastWordStr[1:]=="rint":
return PrefixWordStr+LastWordStr[0]+"rinted"
elif LastWordStr[0] in ["M","m"] and LastWordStr[1:]=="odulize":
return PrefixWordStr+LastWordStr[0]+"odulized"
elif LastWordStr[0] in ["T","t"] and LastWordStr[1:]=="able":
return PrefixWordStr+LastWordStr[0]+"abled"
elif LastWordStr[0] in ["C","c"] and LastWordStr[1:]=="lone":
return PrefixWordStr+LastWordStr[0]+"loned"
elif LastWordStr[0] in ["A","a"] and LastWordStr[1:]=="ttention":
return PrefixWordStr+LastWordStr[0]+"ttentioned"
elif LastWordStr[0] in ["O","o"] and LastWordStr[1:]=="bserve":
return PrefixWordStr+LastWordStr[0]+"bserved"
elif LastWordStr[0] in ["R","r"] and LastWordStr[1:]=="oute":
return PrefixWordStr+LastWordStr[0]+"outed"
elif LastWordStr[0] in ["R","r"] and LastWordStr[1:]=="eset":
return PrefixWordStr+LastWordStr[0]+"esetted"
elif LastWordStr[0] in ["C","c"] and LastWordStr[1:]=="apture":
return PrefixWordStr+LastWordStr[0]+"aptured"
elif LastWordStr[0] in ["S","s"] and LastWordStr[1:]=="ave":
return PrefixWordStr+LastWordStr[0]+"aved"
elif LastWordStr[0] in ["G","g"] and LastWordStr[1:]=="uide":
return PrefixWordStr+LastWordStr[0]+"uided"
elif LastWordStr[0] in ["F","f"] and LastWordStr[1:]=="igure":
return PrefixWordStr+LastWordStr[0]+"igured"
elif LastWordStr[0] in ["M","m"] and LastWordStr[1:]=="onit":
return PrefixWordStr+LastWordStr[0]+"onitered"
elif LastWordStr[0] in ["C","c"] and LastWordStr[1:]=="ouple":
return PrefixWordStr+LastWordStr[0]+"oupled"
elif LastWordStr[0] in ["R","r"] and LastWordStr[1:]=="un":
return PrefixWordStr+LastWordStr[0]+"unned"
elif LastWordStr[0] in ["M","m"] and LastWordStr[1:]=="obilize":
return PrefixWordStr+LastWordStr[0]+"obilized"
elif LastWordStr[0] in ["R","r"] and LastWordStr[1:]=="ecuperate":
return PrefixWordStr+LastWordStr[0]+"ecuperated"
elif LastWordStr[0] in ["W","w"] and LastWordStr[1:]=="eave":
return PrefixWordStr+LastWordStr[0]+"eaved"
elif LastWordStr[0] in ["F","f"] and LastWordStr[1:]=="eature":
return PrefixWordStr+LastWordStr[0]+"eatured"
elif LastWordStr[0] in ["R","r"] and LastWordStr[1:]=="ate":
return PrefixWordStr+LastWordStr[0]+"ated"
elif LastWordStr[0] in ["S","s"] and LastWordStr[1:]=="cann":
return PrefixWordStr+LastWordStr[0]+"canner"
elif LastWordStr[0] in ["S","s"] and LastWordStr[1:]=="hape":
return PrefixWordStr+LastWordStr[0]+"haped"
elif LastWordStr[0] in ["J","j"] and LastWordStr[1:]=="oin":
return PrefixWordStr+LastWordStr[0]+"oined"
elif LastWordStr[0] in ["H","h"] and LastWordStr[1:]=="ierarchize":
return PrefixWordStr+LastWordStr[0]+"ierarchized"
elif LastWordStr[0] in ["I","i"] and LastWordStr[1:]=="nstance":
return PrefixWordStr+LastWordStr[0]+"nstanced"
elif LastWordStr[0] in ["R","r"] and LastWordStr[1:]=="etrieve":
return PrefixWordStr+LastWordStr[0]+"etrieved"
elif LastWordStr[0] in ["S","s"] and LastWordStr[1:]=="tore":
return PrefixWordStr+LastWordStr[0]+"tored"
elif LastWordStr[0] in ["M","m"] and LastWordStr[1:]=="erge":
return PrefixWordStr+LastWordStr[0]+"erged"
elif LastWordStr[0] in ["S","s"] and LastWordStr[1:]=="ettle":
return PrefixWordStr+LastWordStr[0]+"ettled"
elif LastWordStr[0] in ["U","u"] and LastWordStr[1:]=="pdate":
return PrefixWordStr+LastWordStr[0]+"pdated"
elif LastWordStr[0] in ["S","s"] and LastWordStr[1:]=="tructure":
return PrefixWordStr+LastWordStr[0]+"tructured"
elif LastWordStr[0] in ["D","d"] and LastWordStr[1:]=="atabase":
return PrefixWordStr+LastWordStr[0]+"atabased"
elif LastWordStr[0] in ["A","a"] and LastWordStr[1:]=="nalyze":
return PrefixWordStr+LastWordStr[0]+"nalyzed"
elif LastWordStr[0] in ["E","e"] and LastWordStr[1:]=="xecute":
return PrefixWordStr+LastWordStr[0]+"xecuted"
elif LastWordStr[0] in ["N","n"] and LastWordStr[1:]=="ode":
return PrefixWordStr+LastWordStr[0]+"oded"
elif LastWordStr[0] in ["R","r"] and LastWordStr[1:]=="eadme":
return PrefixWordStr+LastWordStr[0]+"eadmed"
elif LastWordStr[0] in ["C","c"] and LastWordStr[1:]=="onclude":
return PrefixWordStr+LastWordStr[0]+"oncluded"
elif LastWordStr[0] in ["C","c"] and LastWordStr[1:]=="lose":
return PrefixWordStr+LastWordStr[0]+"losed"
elif LastWordStr[0] in ["S","s"] and LastWordStr[1:]=="hare":
return PrefixWordStr+LastWordStr[0]+"hared"
elif LastWordStr[0] in ["A","a"] and LastWordStr[1:]=="ttribute":
return PrefixWordStr+LastWordStr[0]+"ttributed"
elif LastWordStr[0] in ["F","f"] and LastWordStr[1:]=="ile":
return PrefixWordStr+LastWordStr[0]+"iled"
elif LastWordStr[0] in ["D","d"] and LastWordStr[1:]=="ebug":
return PrefixWordStr+LastWordStr[0]+"ebugged"
elif LastWordStr[0] in ["D","d"] and LastWordStr[1:]=="eleter":
return PrefixWordStr+LastWordStr[0]+"elete"
elif LastWordStr[0] in ["D","d"] and LastWordStr[1:]=="erive":
return PrefixWordStr+LastWordStr[0]+"erived"
elif LastWordStr[0] in ["G","g"] and LastWordStr[1:]=="rab":
return PrefixWordStr+LastWordStr[0]+"rabbed"
elif LastWordStr[0] in ["F","f"] and LastWordStr[1:]=="latten":
return PrefixWordStr+LastWordStr[0]+"lattened"
elif LastWordStr[0] in ["I","i"] and LastWordStr[1:]=='nit':
return PrefixWordStr+LastWordStr[0]+'nitiated'
elif LastWordStr[0] in ["B","b"] and LastWordStr[1:]=="rian":
return PrefixWordStr+LastWordStr[0]+"rianed"
elif LastWordStr[0] in ["C","c"] and LastWordStr[1:]=="ondition":
return PrefixWordStr+LastWordStr[0]+"onditioned"
elif LastWordStr[0] in ["M","m"] and LastWordStr[1:]=="ap":
return PrefixWordStr+LastWordStr[0]+"apped"
elif LastWordStr[0] in ["F","f"] and LastWordStr[1:]=='unc':
return PrefixWordStr+LastWordStr[0]+'uncted'
elif LastWordStr[0] in ['D','d']and LastWordStr[1:]=='o':
return PrefixWordStr+LastWordStr[0]+'one'
elif LastWordStr[0] in ['P','p'] and LastWordStr[1:]=='arameter':
return PrefixWordStr+LastWordStr[0]+'arameterized'
elif LastWordStr[0] in ['F','f'] and LastWordStr[1:]=='ind':
return PrefixWordStr+LastWordStr[0]+'ound'
elif LastWordStr[0] in ['M','m'] and LastWordStr[1:]=='ake':
return PrefixWordStr+LastWordStr[0]+'ade'
elif LastWordStr[0] in ['F','f'] and LastWordStr[1:]=='ind':
return PrefixWordStr+LastWordStr[0]+'ound'
elif LastWordStr[0] in ['S','s'] and LastWordStr[1:]=='et':
return PrefixWordStr+LastWordStr[0]+'etted'
elif LastWordStr[0] in ['G','g'] and LastWordStr[1:]=='et':
return PrefixWordStr+LastWordStr[0]+'etted'
#Default return
if _DoStr[-1] in ['n']:
return _DoStr+_DoStr[-1]+"ed"
elif _DoStr[-1] not in ['e','y']:
return _DoStr+"ed"
elif _DoStr[-1]=='y':
return _DoStr[:-1]+'ied'
else:
return _DoStr[:-1]+'ed'
#Return ""
return ""
def getDoingStrWithDoneStr(_DoneStr):
#Check
if len(_DoneStr)>0:
#Split the Str into words
WordStrsList=SYS.getWordStrsListWithStr(_DoneStr)
if len(WordStrsList)>0:
PrefixWordStr="".join(WordStrsList[:-1])
LastWordStr=WordStrsList[-1]
#debug
'''
print('Doer getDoingStrWithDoneStr')
print('PrefixWordStr is '+str(PrefixWordStr))
print('LastWordStr is '+str(LastWordStr))
print('')
'''
if LastWordStr[0] in ['M','m'] and LastWordStr[1:]=='ade':
return PrefixWordStr+LastWordStr[0]+'aking'
elif LastWordStr[0] in ['F','f'] and LastWordStr[1:]=='ound':
return PrefixWordStr+LastWordStr[0]+'inding'
elif LastWordStr[0] in ["F","f"] and LastWordStr[1:]=="lattened":
return PrefixWordStr+LastWordStr[0]+"lattening"
elif LastWordStr[0] in ["D","d"] and LastWordStr[1:]=="erived":
return PrefixWordStr+LastWordStr[0]+"eriving"
elif LastWordStr[0] in ["M","m"] and LastWordStr[1:]=="ultiplied":
return PrefixWordStr+LastWordStr[0]+"ultiplying"
elif LastWordStr[0] in ["M","m"] and LastWordStr[1:]=="odulized":
return PrefixWordStr+LastWordStr[0]+"odulizing"
elif LastWordStr[0] in ["G","g"] and LastWordStr[1:]=="rabbed":
return PrefixWordStr+LastWordStr[0]+"rabbing"
#Default return
return _DoneStr[:-2]+"ing" if _DoneStr[-3]!='i' else _DoneStr[:-3]+'ying'
#Return ""
return ""
def getDoStrWithDoneStr(_DoneStr):
#Check
if len(_DoneStr)>0:
if _DoneStr=='Parameterized':
return 'Parameter'
elif _DoneStr=='Found':
return 'Find'
elif len(_DoneStr)>0:
if _DoneStr[-3] in ['y']:
return _DoneStr[:-3]+'y'
elif _DoneStr in ['Structured']:
return _DoneStr[:-1]
#Return ""
return ""
#</DefineFunctions>
#<DefineClass>
@DecorationClass()
class DoerClass(BaseClass):
def default_init(self,
_DoClass=None,
_DoingGetBool=False,
**_KwargVariablesDict
):
#Call the parent init method
BaseClass.__init__(self,**_KwargVariablesDict)
def __call__(self,_Class):
#debug
'''
print('Doer l.247 __call__ method')
print('_Class is ',_Class)
print('')
'''
#Call the parent init method
BaseClass.__call__(self,_Class)
#Do
self.do(_Class)
#Debug
'''
print('do is done')
print('')
'''
#Return
return _Class
def do(self,_Class):
#set
self.DoClass=_Class
#debug
'''
print("Doer l.337 : self.DoClass is ",self.DoClass)
print('')
'''
#alias
DoClass=self.DoClass
#Definition
DoerStr=DoClass.NameStr
DoStr=getDoStrWithDoerStr(DoerStr)
DoMethodStr=DoStr[0].lower()+DoStr[1:] if DoStr[0]!='_' else '_'+DoStr[1].lower()+DoStr[2:]
DoneStr=getDoneStrWithDoStr(DoStr if DoStr[0]!='_' else DoStr[1:])
DoingStr=getDoingStrWithDoneStr(DoneStr)
LocalVariablesDict=vars()
#debug
print('Doer l.132 : DoerStr is '+DoerStr)
print('DoStr is '+DoStr)
print('DoMethodStr is '+DoMethodStr)
print('DoingStr is '+DoingStr)
print('DoneStr is '+DoneStr)
print('')
#set
map(
lambda __KeyStr:
setattr(DoClass,__KeyStr,LocalVariablesDict[__KeyStr]),
['DoerStr','DoStr','DoneStr','DoingStr']
)
#set a lists that will contain the tempory setting items during a call of the <do> method in the instance
DoClass.DoneAttributesOrderedDict=collections.OrderedDict()
DoClass.DoneNotAttributesOrderedDict=collections.OrderedDict()
#Check
if hasattr(DoClass,'DefaultAttributeItemTuplesList'):
#Debug
'''
print('Doer l.383')
print('DoClass.DefaultAttributeItemTuplesList is ',_Class.DefaultAttributeItemTuplesList)
print('')
'''
#Check for doing and done keyStrs
DoClass.DoneAttributeVariablesOrderedDict=collections.OrderedDict(SYS._filter(
lambda __DefaultAttributeTuple:
__DefaultAttributeTuple[0].startswith(DoneStr),
DoClass.DefaultAttributeItemTuplesList
))
DoClass.DoingAttributeVariablesOrderedDict=collections.OrderedDict(SYS._filter(
lambda __DefaultAttributeTuple:
__DefaultAttributeTuple[0].startswith(DoingStr),
DoClass.DefaultAttributeItemTuplesList
))
#Definition
DoMethodKeyStr=DoingDoMethodStr+DoMethodStr
#Debug
'''
print('Doer l.401')
print('DoClass.DoneAttributeVariablesOrderedDict is ',DoClass.DoneAttributeVariablesOrderedDict)
print('DoClass.DoingAttributeVariablesOrderedDict is ',DoClass.DoingAttributeVariablesOrderedDict)
print('DoMethodKeyStr is ',DoMethodKeyStr)
print('')
'''
#Check
if hasattr(DoClass,DoMethodKeyStr):
#Debug
'''
print('There is a DoMethod here already')
print('')
'''
#Get
DoneUnboundFunction=getattr(
DoClass,
DoMethodKeyStr
).im_func
else:
#Debug
'''
print('There is no DoMethod here')
print('')
'''
#Define
def DefaultDoneUnboundFunction(
_InstanceVariable,
*_LiargVariablesList,
**_KwargVariablesDict
):
return _InstanceVariable
#Definition of a default function
DoneUnboundFunction=DefaultDoneUnboundFunction
#debug
'''
print('DoneUnboundFunction is '+str(DoneUnboundFunction))
print('')
'''
#Definition of an initiating method for the mutable done variables
def initDo(_InstanceVariable,*_LiargVariablesList,**_KwargVariablesDict):
#debug
'''
print('Doer l.393 inside of the function initDo')
print('InstanceVariable is ',_InstanceVariable)
print('_LiargVariablesList is ',_LiargVariablesList)
print('_KwargVariablesDict is ',_KwargVariablesDict)
print('')
'''
#Definition of the DoneKwargTuplesList
DoneKwargTuplesList=map(
lambda __KwargTuple:
(
DoingStr+DoingPrefixStr.join(
__KwargTuple[0].split(DoingPrefixStr)[1:]),
__KwargTuple[1]
) if __KwargTuple[0].startswith(DoingPrefixStr)
else __KwargTuple,
_KwargVariablesDict.items()
)
#Check
if len(DoneKwargTuplesList)>0:
#group by
[DoClass.DoneAttributeTuplesList,DoClass.DoneNotAttributeTupleItemsList]=SYS.groupby(
lambda __AttributeTuple:
hasattr(_InstanceVariable,__AttributeTuple[0]),
DoneKwargTuplesList
)
#set in the instance the corresponding kwarged arguments
map(
lambda __AttributeTuple:
#set direct explicit attributes
_InstanceVariable.__setattr__(*__AttributeTuple),
DoClass.DoneAttributeTuplesList
)
#Define
DoneKwargDict=dict(DoClass.DoneNotAttributeTupleItemsList)
else:
#Define
DoneKwargDict={}
#map
TypeClassesList=map(
lambda __DoneKeyStr:
SYS.getTypeClassWithTypeStr(
SYS.getTypeStrWithKeyStr(__DoneKeyStr)
) if getattr(_InstanceVariable,__DoneKeyStr
)==None else None.__class__,
_Class.DoingAttributeVariablesOrderedDict.keys(
)+_Class.DoneAttributeVariablesOrderedDict.keys()
)
#debug
'''
print('TypeClassesList is '+str(TypeClassesList))
print('')
'''
#set in the instance
map(
lambda __DoneKeyStr,__TypeClass:
setattr(
_InstanceVariable,
__DoneKeyStr,
__TypeClass()
) if __TypeClass!=None.__class__ else None,
DoClass.DoingAttributeVariablesOrderedDict.keys(
)+DoClass.DoneAttributeVariablesOrderedDict.keys(),
TypeClassesList
)
#debug
'''
print('Doer l.476 we are going to call the DoneUnboundFunction')
print('DoneUnboundFunction is ',DoneUnboundFunction)
print('')
'''
#Return the call of the defined do method
if len(DoneKwargDict)>0:
return DoneUnboundFunction(
_InstanceVariable,
*_LiargVariablesList,
**DoneKwargDict
)
else:
return DoneUnboundFunction(
_InstanceVariable,
*_LiargVariablesList
)
#Link
DoingMethodKeyStr='init'+DoClass.NameStr
setattr(DoClass,DoingMethodKeyStr,initDo)
#Definition of the ExecStr that will define the function
DoneExecStr="def DoerFunction(_InstanceVariable,"
DoneExecStr+=",".join(
map(
lambda __KeyStr:
DoingPrefixStr+__KeyStr+"=None",
DoClass.DoingAttributeVariablesOrderedDict.keys()
)
)
DoneExecStr+="," if DoneExecStr[-1]!="," else ""
DoneExecStr+="*_LiargVariablesList,"
DoneExecStr+="**_KwargVariablesDict):\n\t"
#set in the DoneAttributeTuplesList
#Debug part
#DoneExecStr+='\n\tprint("In DoerFunction with DoneUnboundFunction '+str(DoneUnboundFunction)+' ") '
'''
DoneExecStr+="\n\t#Debug"
DoneExecStr+=('\n\t'+';\n\t'.join(
map(
lambda __KeyStr:
'print("In DoerFunction, '+DoingPrefixStr+__KeyStr+' is ",'+DoingPrefixStr+__KeyStr+')',
_Class.DoingAttributeVariablesOrderedDict.keys()
)
)+";") if len(_Class.DoingAttributeVariablesOrderedDict.keys())>0 else ''
DoneExecStr+='\n\tprint("In DoerFunction, _LiargVariablesList is ",_LiargVariablesList);'
DoneExecStr+='\n\tprint("In DoerFunction, _KwargVariablesDict is ",_KwargVariablesDict);\n\t'
'''
#Set the doing variables
DoneExecStr+="\n\t#set the doing variables"
DoneExecStr+="\n\tDoneAttributesOrderedDict=_InstanceVariable.__class__.DoneAttributesOrderedDict"
DoneExecStr+="\n\tif '"+DoMethodStr+"' not in DoneAttributesOrderedDict:DoneAttributesOrderedDict['"+DoMethodStr+"']=SYS.collections.OrderedDict()"
DoneExecStr+="\n\tDoneSpecificAttributesOrderedDict=DoneAttributesOrderedDict['"+DoMethodStr+"']"
DoneExecStr+=("\n"+";\n".join(
map(
lambda __KeyStr:
"\n".join(
[
"\tif "+DoingPrefixStr+__KeyStr+"!=None:",
"\t\t_InstanceVariable."+__KeyStr+"="+DoingPrefixStr+__KeyStr,
"\t\tDoneSpecificAttributesOrderedDict['"+__KeyStr+"']="+DoingPrefixStr+__KeyStr,
"\telse:",
"\t\tDoneSpecificAttributesOrderedDict['"+__KeyStr+"']=None"
]
),
DoClass.DoingAttributeVariablesOrderedDict.keys()
)
)+";\n") if len(
DoClass.DoingAttributeVariablesOrderedDict.keys())>0 else ''
#Give to the class this part (it can serve after for imitating methods...)
DoneExecStrKeyStr=DoClass.NameStr+'DoneExecStr'
setattr(DoClass,DoneExecStrKeyStr,DoneExecStr)
#Call the initDo method
DoneExecStr+="\n" if DoneExecStr[-1]!="\n" else ""
DoneExecStr+="\n\t#return\n\t"
#Check
setattr(DoClass,'DoingGetBool',self.DoingGetBool)
if self.DoingGetBool==False:
#Return the _InstanceVariable if it is not a getter object
DoneExecStr+="_InstanceVariable.init"+DoClass.NameStr+"("
DoneExecStr+="*_LiargVariablesList,"
DoneExecStr+="**_KwargVariablesDict);\n\t"
DoneExecStr+="return _InstanceVariable\n"
else:
#Return the output of the do method
DoneExecStr+="return _InstanceVariable."+DoingMethodKeyStr+"("
DoneExecStr+="*_LiargVariablesList,"
DoneExecStr+="**_KwargVariablesDict)\n"
#debug
'''
print('DoneExecStr is ')
print(DoneExecStr)
print('')
'''
#exec
six.exec_(DoneExecStr)
#set the name
locals(
)['DoerFunction'
].__name__='DoerFunction'+DoingDecorationStr+DoMethodStr+' with DoneUnboundFunction '+str(DoneUnboundFunction)
locals(
)['DoerFunction'
].DoneUnboundFunction=DoneUnboundFunction
#Debug
'''
print('l. 907 Doer')
print('DoClass is ',DoClass)
print('DoMethodStr is ',DoMethodStr)
print('DoneUnboundFunction is ',DoneUnboundFunction)
print("locals()['DoerFunction'] is ",locals()['DoerFunction'])
print('')
'''
#set a specific name
setattr(DoClass,DoMethodStr,locals()['DoerFunction'])
#set a unspecific do method
setattr(DoClass,'setDoneVariables',locals()['DoerFunction'])
#Add to the KeyStrsList
DoClass.KeyStrsList+=[
'DoerStr',
'DoStr',
'DoneStr',
'DoingStr',
'DoneAttributeVariablesOrderedDict',
'DoingAttributeVariablesOrderedDict',
DoneExecStrKeyStr,
'DoingGetBool',
'DoneAttributeTuplesList',
'DoneNotAttributeTupleItemsList'
]
#</DefineClass>
|
{
"content_hash": "517815ec639b0f23da7939bc405a4ac3",
"timestamp": "",
"source": "github",
"line_count": 988,
"max_line_length": 150,
"avg_line_length": 38.22570850202429,
"alnum_prop": 0.6674610109354727,
"repo_name": "Ledoux/ShareYourSystem",
"id": "215e00ae475de5d1834ddf7bdbbe3b0542b3260b",
"size": "37791",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Pythonlogy/ShareYourSystem/Standards/Classors/Doer/Drafts/__init__OOO.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "86"
},
{
"name": "C++",
"bytes": "4244220"
},
{
"name": "CSS",
"bytes": "142769"
},
{
"name": "CoffeeScript",
"bytes": "37331"
},
{
"name": "HTML",
"bytes": "36211676"
},
{
"name": "JavaScript",
"bytes": "2147968"
},
{
"name": "Jupyter Notebook",
"bytes": "7930602"
},
{
"name": "Makefile",
"bytes": "6362"
},
{
"name": "PHP",
"bytes": "11096341"
},
{
"name": "Python",
"bytes": "5700092"
},
{
"name": "Ruby",
"bytes": "60"
},
{
"name": "Scala",
"bytes": "2412"
},
{
"name": "Shell",
"bytes": "2525"
},
{
"name": "Swift",
"bytes": "154"
},
{
"name": "TeX",
"bytes": "2556"
},
{
"name": "XSLT",
"bytes": "20993"
}
],
"symlink_target": ""
}
|
import logging
import sys
from django.conf import settings
def init_logging():
if settings.DO_LOGGING:
logging.basicConfig(filename=settings.LOG_FILENAME, level=logging.DEBUG, format="%(asctime)s - %(levelname)s - %(message)s"
)
def debug(msg):
logging.debug(msg)
logInitDone=False
if not logInitDone:
logInitDone = True
init_logging()
|
{
"content_hash": "a7ba9e2392b5537b2f1c54d5640584fe",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 131,
"avg_line_length": 22.6875,
"alnum_prop": 0.71900826446281,
"repo_name": "Woseseltops/signbank",
"id": "5247783d040635049fc82b02d424eec117655977",
"size": "364",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "signbank/log.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "439986"
},
{
"name": "HTML",
"bytes": "187182"
},
{
"name": "JavaScript",
"bytes": "709951"
},
{
"name": "PHP",
"bytes": "1052"
},
{
"name": "Python",
"bytes": "513314"
}
],
"symlink_target": ""
}
|
import datetime
try:
import json
except ImportError:
import simplejson as json
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"""No-op to help upgrading from CDH 4.7.0 to CDH 5.x"""
def backwards(self, orm):
"""No-op to help downgrade CDH 5.x to CDH 4.7.0"""
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'oozie.bundle': {
'Meta': {'object_name': 'Bundle', '_ormbases': ['oozie.Job']},
'coordinators': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['oozie.Coordinator']", 'through': "orm['oozie.BundledCoordinator']", 'symmetrical': 'False'}),
'job_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Job']", 'unique': 'True', 'primary_key': 'True'}),
'kick_off_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 4, 30, 18, 44, 38, 149068)'})
},
'oozie.bundledcoordinator': {
'Meta': {'object_name': 'BundledCoordinator'},
'bundle': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Bundle']"}),
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Coordinator']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameters': ('django.db.models.fields.TextField', [], {'default': '\'[{"name":"oozie.use.system.libpath","value":"true"}]\''})
},
'oozie.coordinator': {
'Meta': {'object_name': 'Coordinator', '_ormbases': ['oozie.Job']},
'concurrency': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 5, 3, 18, 44, 38, 145158)'}),
'execution': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'frequency_number': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'frequency_unit': ('django.db.models.fields.CharField', [], {'default': "'days'", 'max_length': '20'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Job']", 'unique': 'True', 'primary_key': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 4, 30, 18, 44, 38, 145114)'}),
'throttle': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'timeout': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'timezone': ('django.db.models.fields.CharField', [], {'default': "'America/Los_Angeles'", 'max_length': '24'}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Workflow']", 'null': 'True'})
},
'oozie.datainput': {
'Meta': {'object_name': 'DataInput'},
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Coordinator']"}),
'dataset': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Dataset']", 'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'oozie.dataoutput': {
'Meta': {'object_name': 'DataOutput'},
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Coordinator']"}),
'dataset': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Dataset']", 'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'oozie.dataset': {
'Meta': {'object_name': 'Dataset'},
'advanced_end_instance': ('django.db.models.fields.CharField', [], {'default': "'0'", 'max_length': '128', 'blank': 'True'}),
'advanced_start_instance': ('django.db.models.fields.CharField', [], {'default': "'0'", 'max_length': '128'}),
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Coordinator']"}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'done_flag': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'blank': 'True'}),
'frequency_number': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'frequency_unit': ('django.db.models.fields.CharField', [], {'default': "'days'", 'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_choice': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '10'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'start': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 4, 30, 18, 44, 38, 145988)'}),
'timezone': ('django.db.models.fields.CharField', [], {'default': "'America/Los_Angeles'", 'max_length': '24'}),
'uri': ('django.db.models.fields.CharField', [], {'default': "'/data/${YEAR}${MONTH}${DAY}'", 'max_length': '1024'})
},
'oozie.decision': {
'Meta': {'object_name': 'Decision'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.decisionend': {
'Meta': {'object_name': 'DecisionEnd'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.distcp': {
'Meta': {'object_name': 'DistCp'},
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.email': {
'Meta': {'object_name': 'Email'},
'body': ('django.db.models.fields.TextField', [], {'default': "''"}),
'cc': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'subject': ('django.db.models.fields.TextField', [], {'default': "''"}),
'to': ('django.db.models.fields.TextField', [], {'default': "''"})
},
'oozie.end': {
'Meta': {'object_name': 'End'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.fork': {
'Meta': {'object_name': 'Fork'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.fs': {
'Meta': {'object_name': 'Fs'},
'chmods': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'}),
'deletes': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'}),
'mkdirs': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'}),
'moves': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'touchzs': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'})
},
'oozie.generic': {
'Meta': {'object_name': 'Generic'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'xml': ('django.db.models.fields.TextField', [], {'default': "''"})
},
'oozie.history': {
'Meta': {'object_name': 'History'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Job']"}),
'oozie_job_id': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'properties': ('django.db.models.fields.TextField', [], {}),
'submission_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'submitter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'oozie.hive': {
'Meta': {'object_name': 'Hive'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': '\'[{"name":"oozie.hive.defaults","value":"hive-site.xml"}]\''}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'script_path': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'oozie.java': {
'Meta': {'object_name': 'Java'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'args': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'capture_output': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'jar_path': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'java_opts': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'main_class': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.job': {
'Meta': {'object_name': 'Job'},
'deployment_dir': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_shared': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'is_trashed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'parameters': ('django.db.models.fields.TextField', [], {'default': '\'[{"name":"oozie.use.system.libpath","value":"true"}]\''}),
'schema_version': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'oozie.join': {
'Meta': {'object_name': 'Join'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.kill': {
'Meta': {'object_name': 'Kill'},
'message': ('django.db.models.fields.CharField', [], {'default': "'Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]'", 'max_length': '256'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.link': {
'Meta': {'object_name': 'Link'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parent_node'", 'to': "orm['oozie.Node']"}),
'comment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'child_node'", 'to': "orm['oozie.Node']"})
},
'oozie.mapreduce': {
'Meta': {'object_name': 'Mapreduce'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'jar_path': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True'}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.node': {
'Meta': {'object_name': 'Node'},
'children': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'parents'", 'symmetrical': 'False', 'through': "orm['oozie.Link']", 'to': "orm['oozie.Node']"}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'node_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Workflow']"})
},
'oozie.pig': {
'Meta': {'object_name': 'Pig'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'script_path': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'oozie.shell': {
'Meta': {'object_name': 'Shell'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'capture_output': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'command': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.sqoop': {
'Meta': {'object_name': 'Sqoop'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'script_path': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'})
},
'oozie.ssh': {
'Meta': {'object_name': 'Ssh'},
'capture_output': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'command': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'host': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'oozie.start': {
'Meta': {'object_name': 'Start'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True'})
},
'oozie.streaming': {
'Meta': {'object_name': 'Streaming'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'mapper': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'reducer': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
'oozie.subworkflow': {
'Meta': {'object_name': 'SubWorkflow'},
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'propagate_configuration': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'sub_workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Workflow']"})
},
'oozie.workflow': {
'Meta': {'object_name': 'Workflow', '_ormbases': ['oozie.Job']},
'end': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'end_workflow'", 'null': 'True', 'to': "orm['oozie.End']"}),
'is_single': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Job']", 'unique': 'True', 'primary_key': 'True'}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'managed': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'start': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'start_workflow'", 'null': 'True', 'to': "orm['oozie.Start']"})
}
}
complete_apps = ['oozie']
|
{
"content_hash": "9bf6d5e1ed70854d993a100c55ffe455",
"timestamp": "",
"source": "github",
"line_count": 317,
"max_line_length": 194,
"avg_line_length": 78.27129337539432,
"alnum_prop": 0.5300660970498146,
"repo_name": "yongshengwang/builthue",
"id": "c7bc5711ae997f45f721b4dd5fdb1c1a10542e9c",
"size": "24830",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/oozie/src/oozie/migrations/0022_change_examples_path_format.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "207947"
},
{
"name": "C",
"bytes": "10774013"
},
{
"name": "C++",
"bytes": "184593"
},
{
"name": "CSS",
"bytes": "655282"
},
{
"name": "Emacs Lisp",
"bytes": "14875"
},
{
"name": "GAP",
"bytes": "11337"
},
{
"name": "Java",
"bytes": "3080564"
},
{
"name": "JavaScript",
"bytes": "2418037"
},
{
"name": "Makefile",
"bytes": "86977"
},
{
"name": "Perl",
"bytes": "161801"
},
{
"name": "PigLatin",
"bytes": "282"
},
{
"name": "Prolog",
"bytes": "4590"
},
{
"name": "Python",
"bytes": "29990389"
},
{
"name": "Shell",
"bytes": "38643"
},
{
"name": "TeX",
"bytes": "129526"
},
{
"name": "Thrift",
"bytes": "99710"
},
{
"name": "XSLT",
"bytes": "367778"
}
],
"symlink_target": ""
}
|
from forms import PhoneNumberForm
ff = PhoneNumberForm()
ff.set(dict(country='CA', telephone='+41 22 774 0306'))
ff.validate()
ff.errors
ff['country'] = 'CH'
ff.validate()
|
{
"content_hash": "179f60050b52550471fbf4ae89e95da5",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 55,
"avg_line_length": 21.625,
"alnum_prop": 0.7167630057803468,
"repo_name": "scooterXL/flatland-europython-2010",
"id": "aec1f9fc84a23ac063f5714d3df4dc8e130e0488",
"size": "173",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/custom_validator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "125913"
},
{
"name": "Python",
"bytes": "35808"
},
{
"name": "Shell",
"bytes": "134"
}
],
"symlink_target": ""
}
|
"""
Example of how to trace a Fresnel Zone Plate
by Lucia Alianelli and Manuel Sanchez del Rio.
1) The source is from get_beam(), is a collimated source of squared cross section (800 microns),
monochromatic (1.54 A)
2) fresnel_zone_plane() calculates a fzp, centered at (0,0). It returns a new shadow3 beam
containing the beam after the fzp at the same plane of fzp.
The fzp parameters are: inner zone radius: 12.4 microns, diameter:
619 microns
focal distance (at nominal wavelength 1.54 A): 100 cm
3) main() does:
i) create the source with get_beam()
ii) Traces a FZP placed at the same source plane
iii) retraces noth the source and the focused source and displays both results.
One can see hoe the FWZ focuses well the beam
srio@esrf.eu - Written. Translated from macro_fresnelzonplate example in ShadowVUI
"""
import Shadow
import numpy
def get_beam():
#
# Python script to run shadow3. Created automatically with ShadowTools.make_python_script_from_list().
#
# write (1) or not (0) SHADOW files start.xx end.xx star.xx
iwrite = 0
#
# initialize shadow3 source (oe0) and beam
#
beam = Shadow.Beam()
oe0 = Shadow.Source()
oe1 = Shadow.OE()
#
# Define variables. See meaning of variables in:
# https://raw.githubusercontent.com/srio/shadow3/master/docs/source.nml
# https://raw.githubusercontent.com/srio/shadow3/master/docs/oe.nml
#
oe0.FSOUR = 1
oe0.HDIV1 = 1e-08
oe0.HDIV2 = 1e-08
oe0.IDO_VX = 0
oe0.IDO_VZ = 0
oe0.IDO_X_S = 0
oe0.IDO_Y_S = 0
oe0.IDO_Z_S = 0
oe0.PH1 = 1.54
oe0.VDIV1 = 1e-08
oe0.VDIV2 = 1e-08
oe0.WXSOU = 0.08
oe0.WZSOU = 0.08
#Run SHADOW to create the source
if iwrite:
oe0.write("start.00")
beam.genSource(oe0)
if iwrite:
oe0.write("end.00")
beam.write("begin.dat")
return beam
def fresnel_zone_plane(beam_in,
DDm = 618. , # FZP diameter in microns
nomlambdaA = 1.54 , # nominal wavelength in Angstrom
focal = 100. , # focal distance (cm)
R0m = 12.4 , # inner zone radius (microns)
):
"""
Fresnel zone plate. Simple calculation
Coded by Lucia Alianelli (alianell@ill.fr) and
Manuel Sanchez del Rio (srio@esrf.fr)
This shadow3 script calculates the effect of a Fresnel Zone Plate
It supposes the fzp is on top of a screen plane
centered on the optical axis.
:param beam: FZP diameter in microns
:param DDm: nominal wavelength in Angstrom
:param nomlambdaA:
:param focal: focal distance (cm)
:param R0m: inner zone radius (microns)
:return:
"""
#
# Widget_Control,/HourGlass ; create an horglass icon during calculation
# change units to cm
#
DD = DDm*1.e-4 # cm
R0 = R0m*1.e-4 # cm
nomlambda = nomlambdaA*1.e-8 # cm
beam = beam_in.duplicate()
#
# reading Shadow file variables
#
#
lambda1 = beam.getshonecol(19) # lambda in Angstroms
x = beam.getshonecol(1)
z = beam.getshonecol(3)
xpin = beam.getshonecol(4)
zpin = beam.getshonecol(6)
#
#
# ;
#
Kmod = 2 * numpy.pi / lambda1 # wavevector modulus in Angstrom-1
r = numpy.sqrt(x**2. + z**2.) # distance to center
Kxin = Kmod * xpin
Kzin = Kmod * zpin
nrays = x.size
n = numpy.zeros(nrays)
d = numpy.zeros(nrays)
#
# calculate n (index of n-th zone) and d (radius if the nth zone minus
# radius of the n-a zone)
#
# Rays that arrive onto the inner zone
# IN are the indices of rays that arrive inside the inner zone
IN = numpy.where(r <= R0)
IN = numpy.array(IN)
if IN.size > 0:
n[IN] = 0.0
d[IN] = 0.0
# Rays that arrive outside the inner zone
# (see formulas in A.G. Michette, "X-ray science and technology"
# Institute of Physics Publishing (1993))
OUT = numpy.where(r >= R0)
OUT = numpy.array(OUT)
if OUT.size > 0:
n[OUT] = (r[OUT]**2 - R0**2) / (nomlambda * focal)
d[OUT] = numpy.sqrt((n[OUT]+.5) * nomlambda * focal + R0**2) - \
numpy.sqrt((n[OUT]-.5) * nomlambda * focal + R0**2)
# computing G (the "grating" wavevector in Angstrom^-1)
dA = d * 1.e8 # Angstrom
Gx = -numpy.pi / dA * (x/r)
Gz = -numpy.pi / dA * (z/r)
# computing kout
Kxout = Kxin + Gx
Kzout = Kzin + Gz
xpout = Kxout / Kmod
zpout = Kzout / Kmod
# Handle rays that arrive outside the FZP
# flag for lost rays
LOST = numpy.where(r > DD/2)
LOST = numpy.array(LOST)
if LOST.size > 0:
beam.rays[LOST,9] = -100.0
beam.rays[:,3] = xpout
beam.rays[:,4] = numpy.sqrt(1 - xpout**2 - zpout**2)
beam.rays[:,5] = zpout
return beam
if __name__ == "__main__":
# get source
beam = get_beam()
# apply FZP ay the spurce position
beam_out = fresnel_zone_plane(beam)
# propagate both source and beam after FZP until focal plane at 1m
beam.retrace(100)
beam_out.retrace(100.0)
# make plots
Shadow.ShadowTools.plotxy(beam,1,3,nbins=101,nolost=1,title="Without FZP - Propagated source - Real space")
Shadow.ShadowTools.plotxy(beam_out,1,3,nbins=101,nolost=1,title="With FZP - Focused - Real space")
|
{
"content_hash": "fb8746d42b4433447841335cdc00b123",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 111,
"avg_line_length": 26.357487922705314,
"alnum_prop": 0.6009897360703812,
"repo_name": "srio/shadow3-scripts",
"id": "74a1d8b59fab5a85c1f87749bb38a0310604f97e",
"size": "5456",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fresnel_zone_plate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "MATLAB",
"bytes": "1499"
},
{
"name": "Python",
"bytes": "1088020"
},
{
"name": "Shell",
"bytes": "724"
}
],
"symlink_target": ""
}
|
"""Config flow for yolink."""
from __future__ import annotations
from collections.abc import Mapping
import logging
from typing import Any
from homeassistant.config_entries import ConfigEntry
from homeassistant.data_entry_flow import FlowResult
from homeassistant.helpers import config_entry_oauth2_flow
from .const import DOMAIN
class OAuth2FlowHandler(
config_entry_oauth2_flow.AbstractOAuth2FlowHandler, domain=DOMAIN
):
"""Config flow to handle yolink OAuth2 authentication."""
DOMAIN = DOMAIN
_reauth_entry: ConfigEntry | None = None
@property
def logger(self) -> logging.Logger:
"""Return logger."""
return logging.getLogger(__name__)
@property
def extra_authorize_data(self) -> dict:
"""Extra data that needs to be appended to the authorize url."""
scopes = ["create"]
return {"scope": " ".join(scopes)}
async def async_step_reauth(self, entry_data: Mapping[str, Any]) -> FlowResult:
"""Perform reauth upon an API authentication error."""
self._reauth_entry = self.hass.config_entries.async_get_entry(
self.context["entry_id"]
)
return await self.async_step_reauth_confirm()
async def async_step_reauth_confirm(self, user_input=None) -> FlowResult:
"""Dialog that informs the user that reauth is required."""
if user_input is None:
return self.async_show_form(step_id="reauth_confirm")
return await self.async_step_user()
async def async_oauth_create_entry(self, data: dict) -> FlowResult:
"""Create an oauth config entry or update existing entry for reauth."""
if existing_entry := self._reauth_entry:
self.hass.config_entries.async_update_entry(
existing_entry, data=existing_entry.data | data
)
await self.hass.config_entries.async_reload(existing_entry.entry_id)
return self.async_abort(reason="reauth_successful")
return self.async_create_entry(title="YoLink", data=data)
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle a flow start."""
existing_entry = await self.async_set_unique_id(DOMAIN)
if existing_entry and not self._reauth_entry:
return self.async_abort(reason="already_configured")
return await super().async_step_user(user_input)
|
{
"content_hash": "4f6831e818837a92bdfdd76d4a0876cf",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 83,
"avg_line_length": 37.96875,
"alnum_prop": 0.6641975308641975,
"repo_name": "w1ll1am23/home-assistant",
"id": "128cd6cb35c3624a6b4cf5b952c4219e0b6ced12",
"size": "2430",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/yolink/config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52277012"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
"""Functions useful to all element classes."""
import sys
import argparse
from subprocess import Popen, PIPE, STDOUT, CalledProcessError
# ==============================================================================
__version__ = "0.1"
__copyright__ = "Copyright 2017, devops.center"
__credits__ = ["Bob Lozano", "Gregg Jensen"]
__license__ = ' \
# Copyright 2014-2017 devops.center llc \
# \
# Licensed under the Apache License, Version 2.0 (the "License"); \
# you may not use this file except in compliance with the License. \
# You may obtain a copy of the License at \
# \
# http://www.apache.org/licenses/LICENSE-2.0 \
# \
# Unless required by applicable law or agreed to in writing, software \
# distributed under the License is distributed on an "AS IS" BASIS, \
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. \
# See the License for the specific language governing permissions and \
# limitations under the License. \
# '
__status__ = "Development"
# ==============================================================================
class ElementBase(object):
"""Serves base functionality for all element classes."""
def __init__(self, nameIn, argList):
"""Constructor for the base class."""
self.name = nameIn
# if "CONFIG_FILE" in argList:
# self.readConfigFile(argList["CONFIG_FILE"])
if "APP_NAME" in argList:
self.appName = argList["APP_NAME"]
if "ORGANIZATION" in argList:
self.profile = argList["ORGANIZATION"]
self.organization = argList["ORGANIZATION"]
if "REGION" in argList:
self.region = argList["REGION"]
if "ENV" in argList:
self.env = argList["ENV"]
if "SUFFIX" in argList:
self.suffix = argList["SUFFIX"]
if "STACK" in argList:
self.stack = argList["STACK"]
if "STACK_DIR" in argList:
self.stackDir = argList["STACK_DIR"]
if "PGVERSION" in argList:
self.postgresVersion = argList["PGVERSION"]
# print("ElementBase args: {}".format(argList))
# def readConfigFile(self, theFileName):
# """Read the config file for the key/value pairs listing the paths."""
# # check to see if the config file exists
# try:
# tmpFileHandle = open(theFileName, 'r')
# tmpFileHandle.close()
# except IOError:
# print("Unable to access the config file: {}".format(theFileName))
# sys.exit(1)
#
# self.config = ConfigParser()
# self.config.read(theFileName)
#
# for keyName, aValue in self.config.items("settings"):
# if not hasattr(self, keyName):
# setattr(self, keyName, aValue)
def runScript(self, shellScript):
"""Execute the passed in shell script."""
print(self.__class__.__name__ + " EXECUTING: " + shellScript)
# NOTE: this is a python3
with Popen(shellScript, shell=True, stdout=PIPE, bufsize=1,
universal_newlines=True) as p:
for line in p.stdout:
print(line, end='') # process line here
if p.returncode != 0:
raise CalledProcessError(p.returncode, p.args)
sys.exit(1)
def priorToRun(self):
"""Execute steps prior to running."""
return
def postRunning(self):
"""Execute steps after the run has completed."""
return
def checkArgs():
"""Check the command line arguments."""
parser = argparse.ArgumentParser(
description=('comment'))
parser.parse_args()
def main(argv):
"""Main code goes here."""
checkArgs()
if __name__ == "__main__":
main(sys.argv[1:])
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
{
"content_hash": "79e0d43272b009a4d482da5d9610b26b",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 80,
"avg_line_length": 38.0990990990991,
"alnum_prop": 0.5240009458500827,
"repo_name": "devopscenter/dcStack",
"id": "63b332c32cf4547c9a95b721ff969e47e65e5662",
"size": "4252",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "instancebuilder/elementbase.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "85453"
},
{
"name": "Go",
"bytes": "487"
},
{
"name": "JavaScript",
"bytes": "368"
},
{
"name": "Python",
"bytes": "84891"
},
{
"name": "Shell",
"bytes": "377652"
}
],
"symlink_target": ""
}
|
"""Tests Google Test's throw-on-failure mode with exceptions disabled.
This script invokes gtest_throw_on_failure_test_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
# Constants.
# The command line flag for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE = 'gtest_throw_on_failure'
# Path to the gtest_throw_on_failure_test_ program, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_throw_on_failure_test_')
# Utilities.
def SetEnvVar(env_var, value):
"""Sets an environment variable to a given value; unsets it when the
given value is None.
"""
env_var = env_var.upper()
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def Run(command):
"""Runs a command; returns True/False if its exit code is/isn't 0."""
print 'Running "%s". . .' % ' '.join(command)
p = gtest_test_utils.Subprocess(command)
return p.exited and p.exit_code == 0
# The tests. TODO(wan@google.com): refactor the class to share common
# logic with code in gtest_break_on_failure_unittest.py.
class ThrowOnFailureTest(gtest_test_utils.TestCase):
"""Tests the throw-on-failure mode."""
def RunAndVerify(self, env_var_value, flag_value, should_fail):
"""Runs gtest_throw_on_failure_test_ and verifies that it does
(or does not) exit with a non-zero code.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
should_fail: True iff the program is expected to fail.
"""
SetEnvVar(THROW_ON_FAILURE, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % THROW_ON_FAILURE
else:
flag = '--%s' % THROW_ON_FAILURE
command = [EXE_PATH]
if flag:
command.append(flag)
if should_fail:
should_or_not = 'should'
else:
should_or_not = 'should not'
failed = not Run(command)
SetEnvVar(THROW_ON_FAILURE, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a non-zero '
'exit code.' %
(THROW_ON_FAILURE, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(failed == should_fail, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None, flag_value=None, should_fail=False)
def testThrowOnFailureEnvVar(self):
"""Tests using the GTEST_THROW_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value=None,
should_fail=True)
def testThrowOnFailureFlag(self):
"""Tests using the --gtest_throw_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value=None,
flag_value='1',
should_fail=True)
def testThrowOnFailureFlagOverridesEnvVar(self):
"""Tests that --gtest_throw_on_failure overrides GTEST_THROW_ON_FAILURE."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='0',
flag_value='1',
should_fail=True)
self.RunAndVerify(env_var_value='1',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value='1',
should_fail=True)
if __name__ == '__main__':
gtest_test_utils.Main()
|
{
"content_hash": "0d6b521e2a68ee68abc85692f088a8e7",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 79,
"avg_line_length": 30.886524822695037,
"alnum_prop": 0.5908151549942595,
"repo_name": "2coding/Codec",
"id": "7ede7854ad0dc31f8b11597a8b2cbcb87e54ed84",
"size": "5937",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "thirdparty/gtest-1.6.0/test/gtest_throw_on_failure_test.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "54600"
},
{
"name": "C++",
"bytes": "19889"
},
{
"name": "Objective-C",
"bytes": "4802"
}
],
"symlink_target": ""
}
|
'''
Copyright (c) 2010 openpyxl
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
@license: http://www.opensource.org/licenses/mit-license.php
@author: Eric Gazoni
'''
from openpyxl.shared.xmltools import Element, SubElement, get_document_content
from openpyxl.chart import Chart, ErrorBar
class ChartWriter(object):
def __init__(self, chart):
self.chart = chart
def write(self):
""" write a chart """
root = Element('c:chartSpace',
{'xmlns:c':"http://schemas.openxmlformats.org/drawingml/2006/chart",
'xmlns:a':"http://schemas.openxmlformats.org/drawingml/2006/main",
'xmlns:r':"http://schemas.openxmlformats.org/officeDocument/2006/relationships"})
SubElement(root, 'c:lang', {'val':self.chart.lang})
self._write_chart(root)
self._write_print_settings(root)
self._write_shapes(root)
return get_document_content(root)
def _write_chart(self, root):
chart = self.chart
ch = SubElement(root, 'c:chart')
self._write_title(ch)
plot_area = SubElement(ch, 'c:plotArea')
layout = SubElement(plot_area, 'c:layout')
mlayout = SubElement(layout, 'c:manualLayout')
SubElement(mlayout, 'c:layoutTarget', {'val':'inner'})
SubElement(mlayout, 'c:xMode', {'val':'edge'})
SubElement(mlayout, 'c:yMode', {'val':'edge'})
SubElement(mlayout, 'c:x', {'val':str(chart._get_margin_left())})
SubElement(mlayout, 'c:y', {'val':str(chart._get_margin_top())})
SubElement(mlayout, 'c:w', {'val':str(chart.width)})
SubElement(mlayout, 'c:h', {'val':str(chart.height)})
if chart.type == Chart.SCATTER_CHART:
subchart = SubElement(plot_area, 'c:scatterChart')
SubElement(subchart, 'c:scatterStyle', {'val':str('lineMarker')})
else:
if chart.type == Chart.BAR_CHART:
subchart = SubElement(plot_area, 'c:barChart')
SubElement(subchart, 'c:barDir', {'val':'col'})
else:
subchart = SubElement(plot_area, 'c:lineChart')
SubElement(subchart, 'c:grouping', {'val':chart.grouping})
self._write_series(subchart)
SubElement(subchart, 'c:marker', {'val':'1'})
SubElement(subchart, 'c:axId', {'val':str(chart.x_axis.id)})
SubElement(subchart, 'c:axId', {'val':str(chart.y_axis.id)})
if chart.type == Chart.SCATTER_CHART:
self._write_axis(plot_area, chart.x_axis, 'c:valAx')
else:
self._write_axis(plot_area, chart.x_axis, 'c:catAx')
self._write_axis(plot_area, chart.y_axis, 'c:valAx')
self._write_legend(ch)
SubElement(ch, 'c:plotVisOnly', {'val':'1'})
def _write_title(self, chart):
if self.chart.title != '':
title = SubElement(chart, 'c:title')
tx = SubElement(title, 'c:tx')
rich = SubElement(tx, 'c:rich')
SubElement(rich, 'a:bodyPr')
SubElement(rich, 'a:lstStyle')
p = SubElement(rich, 'a:p')
pPr = SubElement(p, 'a:pPr')
SubElement(pPr, 'a:defRPr')
r = SubElement(p, 'a:r')
SubElement(r, 'a:rPr', {'lang':self.chart.lang})
t = SubElement(r, 'a:t').text = self.chart.title
SubElement(title, 'c:layout')
def _write_axis(self, plot_area, axis, label):
ax = SubElement(plot_area, label)
SubElement(ax, 'c:axId', {'val':str(axis.id)})
scaling = SubElement(ax, 'c:scaling')
SubElement(scaling, 'c:orientation', {'val':axis.orientation})
if label == 'c:valAx':
SubElement(scaling, 'c:max', {'val':str(axis.max)})
SubElement(scaling, 'c:min', {'val':str(axis.min)})
SubElement(ax, 'c:axPos', {'val':axis.position})
if label == 'c:valAx':
SubElement(ax, 'c:majorGridlines')
SubElement(ax, 'c:numFmt', {'formatCode':"General", 'sourceLinked':'1'})
if axis.title != '':
title = SubElement(ax, 'c:title')
tx = SubElement(title, 'c:tx')
rich = SubElement(tx, 'c:rich')
SubElement(rich, 'a:bodyPr')
SubElement(rich, 'a:lstStyle')
p = SubElement(rich, 'a:p')
pPr = SubElement(p, 'a:pPr')
SubElement(pPr, 'a:defRPr')
r = SubElement(p, 'a:r')
SubElement(r, 'a:rPr', {'lang':self.chart.lang})
t = SubElement(r, 'a:t').text = axis.title
SubElement(title, 'c:layout')
SubElement(ax, 'c:tickLblPos', {'val':axis.tick_label_position})
SubElement(ax, 'c:crossAx', {'val':str(axis.cross)})
SubElement(ax, 'c:crosses', {'val':axis.crosses})
if axis.auto:
SubElement(ax, 'c:auto', {'val':'1'})
if axis.label_align:
SubElement(ax, 'c:lblAlgn', {'val':axis.label_align})
if axis.label_offset:
SubElement(ax, 'c:lblOffset', {'val':str(axis.label_offset)})
if label == 'c:valAx':
if self.chart.type == Chart.SCATTER_CHART:
SubElement(ax, 'c:crossBetween', {'val':'midCat'})
else:
SubElement(ax, 'c:crossBetween', {'val':'between'})
SubElement(ax, 'c:majorUnit', {'val':str(axis.unit)})
def _write_series(self, subchart):
for i, serie in enumerate(self.chart._series):
ser = SubElement(subchart, 'c:ser')
SubElement(ser, 'c:idx', {'val':str(i)})
SubElement(ser, 'c:order', {'val':str(i)})
if serie.legend:
tx = SubElement(ser, 'c:tx')
self._write_serial(tx, serie.legend)
if serie.color:
sppr = SubElement(ser, 'c:spPr')
if self.chart.type == Chart.BAR_CHART:
# fill color
fillc = SubElement(sppr, 'a:solidFill')
SubElement(fillc, 'a:srgbClr', {'val':serie.color})
# edge color
ln = SubElement(sppr, 'a:ln')
fill = SubElement(ln, 'a:solidFill')
SubElement(fill, 'a:srgbClr', {'val':serie.color})
if serie.error_bar:
self._write_error_bar(ser, serie)
marker = SubElement(ser, 'c:marker')
SubElement(marker, 'c:symbol', {'val':serie.marker})
if serie.labels:
cat = SubElement(ser, 'c:cat')
self._write_serial(cat, serie.labels)
if self.chart.type == Chart.SCATTER_CHART:
if serie.xvalues:
xval = SubElement(ser, 'c:xVal')
self._write_serial(xval, serie.xvalues)
yval = SubElement(ser, 'c:yVal')
self._write_serial(yval, serie.values)
else:
val = SubElement(ser, 'c:val')
self._write_serial(val, serie.values)
def _write_serial(self, node, serie, literal=False):
cache = serie._get_cache()
if isinstance(cache[0], basestring):
typ = 'str'
else:
typ = 'num'
if not literal:
if typ == 'num':
ref = SubElement(node, 'c:numRef')
else:
ref = SubElement(node, 'c:strRef')
SubElement(ref, 'c:f').text = serie._get_ref()
if typ == 'num':
data = SubElement(ref, 'c:numCache')
else:
data = SubElement(ref, 'c:strCache')
else:
data = SubElement(node, 'c:numLit')
if typ == 'num':
SubElement(data, 'c:formatCode').text = 'General'
if literal:
values = (1,)
else:
values = cache
SubElement(data, 'c:ptCount', {'val':str(len(values))})
for j, val in enumerate(values):
point = SubElement(data, 'c:pt', {'idx':str(j)})
SubElement(point, 'c:v').text = str(val)
def _write_error_bar(self, node, serie):
flag = {ErrorBar.PLUS_MINUS:'both',
ErrorBar.PLUS:'plus',
ErrorBar.MINUS:'minus'}
eb = SubElement(node, 'c:errBars')
SubElement(eb, 'c:errBarType', {'val':flag[serie.error_bar.type]})
SubElement(eb, 'c:errValType', {'val':'cust'})
plus = SubElement(eb, 'c:plus')
self._write_serial(plus, serie.error_bar.values,
literal=(serie.error_bar.type==ErrorBar.MINUS))
minus = SubElement(eb, 'c:minus')
self._write_serial(minus, serie.error_bar.values,
literal=(serie.error_bar.type==ErrorBar.PLUS))
def _write_legend(self, chart):
legend = SubElement(chart, 'c:legend')
SubElement(legend, 'c:legendPos', {'val':self.chart.legend.position})
SubElement(legend, 'c:layout')
def _write_print_settings(self, root):
settings = SubElement(root, 'c:printSettings')
SubElement(settings, 'c:headerFooter')
margins = dict([(k, str(v)) for (k,v) in self.chart.print_margins.iteritems()])
SubElement(settings, 'c:pageMargins', margins)
SubElement(settings, 'c:pageSetup')
def _write_shapes(self, root):
if self.chart._shapes:
SubElement(root, 'c:userShapes', {'r:id':'rId1'})
def write_rels(self, drawing_id):
root = Element('Relationships', {'xmlns' : 'http://schemas.openxmlformats.org/package/2006/relationships'})
attrs = {'Id' : 'rId1',
'Type' : 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/chartUserShapes',
'Target' : '../drawings/drawing%s.xml' % drawing_id }
SubElement(root, 'Relationship', attrs)
return get_document_content(root)
|
{
"content_hash": "62d680994063347a3f09f0a096cf45b9",
"timestamp": "",
"source": "github",
"line_count": 273,
"max_line_length": 115,
"avg_line_length": 41.84249084249084,
"alnum_prop": 0.5385625492427558,
"repo_name": "chronossc/openpyxl",
"id": "5b500f976f6c2500fce13c1e5814589535bfd31e",
"size": "11439",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "openpyxl/writer/charts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "303430"
},
{
"name": "Shell",
"bytes": "4269"
}
],
"symlink_target": ""
}
|
"""Tests for collection domain objects and methods defined on them."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import datetime
from constants import constants
from core.domain import collection_domain
from core.domain import collection_services
from core.tests import test_utils
import feconf
import python_utils
import utils
# Dictionary-like data structures within sample YAML must be formatted
# alphabetically to match string equivalence with the YAML generation
# methods tested below.
#
# If evaluating differences in YAML, conversion to dict form via
# utils.dict_from_yaml can isolate differences quickly.
SAMPLE_YAML_CONTENT = (
"""category: A category
language_code: en
nodes:
- exploration_id: an_exploration_id
objective: An objective
schema_version: %d
tags: []
title: A title
""") % (feconf.CURRENT_COLLECTION_SCHEMA_VERSION)
class CollectionChangeTests(test_utils.GenericTestBase):
def test_collection_change_object_with_missing_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, 'Missing cmd key in change dict'):
collection_domain.CollectionChange({'invalid': 'data'})
def test_collection_change_object_with_invalid_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, 'Command invalid is not allowed'):
collection_domain.CollectionChange({'cmd': 'invalid'})
def test_collection_change_object_with_missing_attribute_in_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, (
'The following required attributes are missing: '
'exploration_id, new_value')):
collection_domain.CollectionChange({
'cmd': 'edit_collection_node_property',
'property_name': 'category',
'old_value': 'old_value'
})
def test_collection_change_object_with_extra_attribute_in_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, (
'The following extra attributes are present: invalid')):
collection_domain.CollectionChange({
'cmd': 'edit_collection_node_property',
'exploration_id': 'exploration_id',
'property_name': 'category',
'old_value': 'old_value',
'new_value': 'new_value',
'invalid': 'invalid'
})
def test_collection_change_object_with_invalid_collection_property(self):
with self.assertRaisesRegexp(
utils.ValidationError, (
'Value for property_name in cmd edit_collection_property: '
'invalid is not allowed')):
collection_domain.CollectionChange({
'cmd': 'edit_collection_property',
'property_name': 'invalid',
'old_value': 'old_value',
'new_value': 'new_value',
})
def test_collection_change_object_with_create_new(self):
col_change_object = collection_domain.CollectionChange({
'cmd': 'create_new',
'category': 'category',
'title': 'title'
})
self.assertEqual(col_change_object.cmd, 'create_new')
self.assertEqual(col_change_object.category, 'category')
self.assertEqual(col_change_object.title, 'title')
def test_collection_change_object_with_add_collection_node(self):
col_change_object = collection_domain.CollectionChange({
'cmd': 'add_collection_node',
'exploration_id': 'exploration_id',
})
self.assertEqual(col_change_object.cmd, 'add_collection_node')
self.assertEqual(col_change_object.exploration_id, 'exploration_id')
def test_collection_change_object_with_delete_collection_node(self):
col_change_object = collection_domain.CollectionChange({
'cmd': 'delete_collection_node',
'exploration_id': 'exploration_id',
})
self.assertEqual(col_change_object.cmd, 'delete_collection_node')
self.assertEqual(col_change_object.exploration_id, 'exploration_id')
def test_collection_change_object_with_swap_nodes(self):
col_change_object = collection_domain.CollectionChange({
'cmd': 'swap_nodes',
'first_index': 'first_index',
'second_index': 'second_index'
})
self.assertEqual(col_change_object.cmd, 'swap_nodes')
self.assertEqual(col_change_object.first_index, 'first_index')
self.assertEqual(col_change_object.second_index, 'second_index')
def test_collection_change_object_with_edit_collection_property(self):
col_change_object = collection_domain.CollectionChange({
'cmd': 'edit_collection_property',
'property_name': 'category',
'new_value': 'new_value',
'old_value': 'old_value'
})
self.assertEqual(col_change_object.cmd, 'edit_collection_property')
self.assertEqual(col_change_object.property_name, 'category')
self.assertEqual(col_change_object.new_value, 'new_value')
self.assertEqual(col_change_object.old_value, 'old_value')
def test_collection_change_object_with_edit_collection_node_property(self):
col_change_object = collection_domain.CollectionChange({
'cmd': 'edit_collection_node_property',
'exploration_id': 'exploration_id',
'property_name': 'title',
'new_value': 'new_value',
'old_value': 'old_value'
})
self.assertEqual(col_change_object.cmd, 'edit_collection_node_property')
self.assertEqual(col_change_object.exploration_id, 'exploration_id')
self.assertEqual(col_change_object.property_name, 'title')
self.assertEqual(col_change_object.new_value, 'new_value')
self.assertEqual(col_change_object.old_value, 'old_value')
def test_collection_change_object_with_migrate_schema_to_latest_version(
self):
col_change_object = collection_domain.CollectionChange({
'cmd': 'migrate_schema_to_latest_version',
'from_version': 'from_version',
'to_version': 'to_version',
})
self.assertEqual(
col_change_object.cmd, 'migrate_schema_to_latest_version')
self.assertEqual(col_change_object.from_version, 'from_version')
self.assertEqual(col_change_object.to_version, 'to_version')
def test_collection_change_object_with_add_collection_skill(self):
col_change_object = collection_domain.CollectionChange({
'cmd': 'add_collection_skill',
'name': 'name'
})
self.assertEqual(col_change_object.cmd, 'add_collection_skill')
self.assertEqual(col_change_object.name, 'name')
def test_collection_change_object_with_delete_collection_skill(self):
col_change_object = collection_domain.CollectionChange({
'cmd': 'delete_collection_skill',
'skill_id': 'skill_id'
})
self.assertEqual(col_change_object.cmd, 'delete_collection_skill')
self.assertEqual(col_change_object.skill_id, 'skill_id')
def test_collection_change_object_with_add_question_id_to_skill(self):
col_change_object = collection_domain.CollectionChange({
'cmd': 'add_question_id_to_skill',
'skill_id': 'skill_id',
'question_id': 'question_id'
})
self.assertEqual(col_change_object.cmd, 'add_question_id_to_skill')
self.assertEqual(col_change_object.skill_id, 'skill_id')
self.assertEqual(col_change_object.question_id, 'question_id')
def test_collection_change_object_with_remove_question_id_from_skill(self):
col_change_object = collection_domain.CollectionChange({
'cmd': 'remove_question_id_from_skill',
'skill_id': 'skill_id',
'question_id': 'question_id'
})
self.assertEqual(col_change_object.cmd, 'remove_question_id_from_skill')
self.assertEqual(col_change_object.skill_id, 'skill_id')
self.assertEqual(col_change_object.question_id, 'question_id')
def test_to_dict(self):
col_change_dict = {
'cmd': 'remove_question_id_from_skill',
'skill_id': 'skill_id',
'question_id': 'question_id'
}
col_change_object = collection_domain.CollectionChange(col_change_dict)
self.assertEqual(col_change_object.to_dict(), col_change_dict)
class CollectionDomainUnitTests(test_utils.GenericTestBase):
"""Test the collection domain object."""
COLLECTION_ID = 'collection_id'
EXPLORATION_ID = 'exp_id_0'
def setUp(self):
super(CollectionDomainUnitTests, self).setUp()
self.save_new_valid_collection(
self.COLLECTION_ID, 'user@example.com', title='Title',
category='Category', objective='Objective',
exploration_id=self.EXPLORATION_ID)
self.collection = collection_services.get_collection_by_id(
self.COLLECTION_ID)
def _assert_validation_error(self, expected_error_substring):
"""Checks that the collection passes strict validation."""
with self.assertRaisesRegexp(
utils.ValidationError, expected_error_substring):
self.collection.validate()
def test_initial_validation(self):
"""Test validating a new, valid collection."""
self.collection.validate()
def test_title_validation(self):
self.collection.title = 0
self._assert_validation_error('Expected title to be a string')
def test_category_validation(self):
self.collection.category = 0
self._assert_validation_error('Expected category to be a string')
def test_objective_validation(self):
self.collection.objective = ''
self._assert_validation_error('objective must be specified')
self.collection.objective = 0
self._assert_validation_error('Expected objective to be a string')
def test_language_code_validation(self):
self.collection.language_code = ''
self._assert_validation_error('language must be specified')
self.collection.language_code = 0
self._assert_validation_error('Expected language code to be a string')
self.collection.language_code = 'xz'
self._assert_validation_error('Invalid language code')
def test_tags_validation(self):
self.collection.tags = 'abc'
self._assert_validation_error('Expected tags to be a list')
self.collection.tags = [2, 3]
self._assert_validation_error('Expected each tag to be a string')
self.collection.tags = ['', 'tag']
self._assert_validation_error('Tags should be non-empty')
self.collection.tags = ['234']
self._assert_validation_error(
'Tags should only contain lowercase letters and spaces')
self.collection.tags = [' abc']
self._assert_validation_error(
'Tags should not start or end with whitespace')
self.collection.tags = ['abc def']
self._assert_validation_error(
'Adjacent whitespace in tags should be collapsed')
self.collection.tags = ['abc', 'abc']
self._assert_validation_error(
'Expected tags to be unique, but found duplicates')
def test_schema_version_validation(self):
self.collection.schema_version = 'some_schema_version'
self._assert_validation_error('Expected schema version to be an int')
self.collection.schema_version = 100
self._assert_validation_error(
'Expected schema version to be %s' %
feconf.CURRENT_COLLECTION_SCHEMA_VERSION)
def test_nodes_validation(self):
self.collection.nodes = {}
self._assert_validation_error('Expected nodes to be a list')
self.collection.nodes = [
collection_domain.CollectionNode.from_dict({
'exploration_id': '0'
}),
collection_domain.CollectionNode.from_dict({
'exploration_id': '0'
})
]
self._assert_validation_error(
'There are explorations referenced in the collection more than '
'once.')
def test_initial_explorations_validation(self):
# Having no collection nodes is fine for non-strict validation.
self.collection.nodes = []
self.collection.validate(strict=False)
# But it's not okay for strict validation.
self._assert_validation_error(
'Expected to have at least 1 exploration in the collection.')
def test_metadata_validation(self):
self.collection.title = ''
self.collection.objective = ''
self.collection.category = ''
self.collection.nodes = []
# Having no title is fine for non-strict validation.
self.collection.validate(strict=False)
# But it's not okay for strict validation.
self._assert_validation_error(
'A title must be specified for the collection.')
self.collection.title = 'A title'
# Having no objective is fine for non-strict validation.
self.collection.validate(strict=False)
# But it's not okay for strict validation.
self._assert_validation_error(
'An objective must be specified for the collection.')
self.collection.objective = 'An objective'
# Having no category is fine for non-strict validation.
self.collection.validate(strict=False)
# But it's not okay for strict validation.
self._assert_validation_error(
'A category must be specified for the collection.')
self.collection.category = 'A category'
# Having no exploration is fine for non-strict validation.
self.collection.validate(strict=False)
# But it's not okay for strict validation.
self._assert_validation_error(
'Expected to have at least 1 exploration in the collection.')
self.collection.add_node('exp_id_1')
# Now the collection passes both strict and non-strict validation.
self.collection.validate(strict=False)
self.collection.validate(strict=True)
def test_collection_node_exploration_id_validation(self):
# Validate CollectionNode's exploration_id.
collection_node0 = self.collection.get_node('exp_id_0')
collection_node0.exploration_id = 2
self._assert_validation_error('Expected exploration ID to be a string')
def test_is_demo_property(self):
"""Test the is_demo property."""
demo = collection_domain.Collection.create_default_collection('0')
self.assertEqual(demo.is_demo, True)
notdemo1 = collection_domain.Collection.create_default_collection('a')
self.assertEqual(notdemo1.is_demo, False)
notdemo2 = collection_domain.Collection.create_default_collection(
'abcd')
self.assertEqual(notdemo2.is_demo, False)
def test_collection_export_import(self):
"""Test that to_dict and from_dict preserve all data within an
collection.
"""
self.save_new_valid_exploration(
'0', 'user@example.com', end_state_name='End')
collection = collection_domain.Collection.create_default_collection(
'0', title='title', category='category', objective='objective')
collection_dict = collection.to_dict()
collection_from_dict = collection_domain.Collection.from_dict(
collection_dict)
self.assertEqual(collection_from_dict.to_dict(), collection_dict)
def test_add_delete_swap_nodes(self):
"""Test that add_node, delete_node and swap_nodes fail in the correct
situations.
"""
collection = collection_domain.Collection.create_default_collection(
'0')
self.assertEqual(len(collection.nodes), 0)
collection.add_node('test_exp')
self.assertEqual(len(collection.nodes), 1)
with self.assertRaisesRegexp(
ValueError,
'Exploration is already part of this collection: test_exp'
):
collection.add_node('test_exp')
collection.add_node('another_exp')
self.assertEqual(len(collection.nodes), 2)
collection.swap_nodes(0, 1)
self.assertEqual(collection.nodes[0].exploration_id, 'another_exp')
self.assertEqual(collection.nodes[1].exploration_id, 'test_exp')
with self.assertRaisesRegexp(
ValueError,
'Both indices point to the same collection node.'
):
collection.swap_nodes(0, 0)
collection.delete_node('another_exp')
self.assertEqual(len(collection.nodes), 1)
with self.assertRaisesRegexp(
ValueError,
'Exploration is not part of this collection: another_exp'
):
collection.delete_node('another_exp')
collection.delete_node('test_exp')
self.assertEqual(len(collection.nodes), 0)
def test_update_collection_contents_from_model(self):
versioned_collection_contents = {
'schema_version': 1,
'collection_contents': {}
}
collection_domain.Collection.update_collection_contents_from_model(
versioned_collection_contents, 1)
self.assertEqual(versioned_collection_contents['schema_version'], 2)
self.assertEqual(
versioned_collection_contents['collection_contents'], {})
def test_update_collection_contents_from_model_with_invalid_schema_version(
self):
versioned_collection_contents = {
'schema_version': feconf.CURRENT_COLLECTION_SCHEMA_VERSION,
'collection_contents': {}
}
with self.assertRaisesRegexp(
Exception,
'Collection is version .+ but current collection schema version '
'is %d' % feconf.CURRENT_COLLECTION_SCHEMA_VERSION):
collection_domain.Collection.update_collection_contents_from_model(
versioned_collection_contents,
feconf.CURRENT_COLLECTION_SCHEMA_VERSION)
def test_serialize_and_deserialize_returns_unchanged_collection(self):
"""Checks that serializing and then deserializing a default collection
works as intended by leaving the collection unchanged.
"""
self.assertEqual(
self.collection.to_dict(),
collection_domain.Collection.deserialize(
self.collection.serialize()).to_dict())
class ExplorationGraphUnitTests(test_utils.GenericTestBase):
"""Test the general structure of explorations within a collection."""
def test_initial_explorations(self):
"""Any exploration without prerequisites should be an initial
exploration.
"""
collection = collection_domain.Collection.create_default_collection(
'collection_id')
# If there are no explorations in the collection, there can be no
# initial explorations.
self.assertEqual(collection.nodes, [])
self.assertEqual(collection.first_exploration_id, None)
# A freshly added exploration will be an initial one.
collection.add_node('exp_id_0')
self.assertEqual(collection.first_exploration_id, 'exp_id_0')
# Having prerequisites will make an exploration no longer initial.
collection.add_node('exp_id_1')
self.assertEqual(len(collection.nodes), 2)
self.assertEqual(collection.first_exploration_id, 'exp_id_0')
def test_next_explorations(self):
"""Explorations should be suggested based on their index in the node
list.
"""
collection = collection_domain.Collection.create_default_collection(
'collection_id')
# There should be no next explorations for an empty collection.
self.assertEqual(collection.get_next_exploration_id([]), None)
# If a new exploration is added, the next exploration IDs should be the
# same as the initial exploration.
collection.add_node('exp_id_0')
self.assertEqual(collection.get_next_exploration_id([]), 'exp_id_0')
self.assertEqual(
collection.first_exploration_id,
collection.get_next_exploration_id([]))
# Completing the only exploration of the collection should lead to no
# available explorations thereafter.
self.assertEqual(
collection.get_next_exploration_id(['exp_id_0']), None)
# If another exploration has been added, then the first exploration
# should be the next one to complete.
collection.add_node('exp_id_1')
self.assertEqual(collection.get_next_exploration_id(
['exp_id_0']), 'exp_id_1')
# If another exploration is added, then based on explorations
# completed, the correct exploration should be shown as the next one.
collection.add_node('exp_id_2')
self.assertEqual(
collection.get_next_exploration_id([]), 'exp_id_0')
self.assertEqual(
collection.get_next_exploration_id(['exp_id_0']), 'exp_id_1')
self.assertEqual(
collection.get_next_exploration_id(['exp_id_0', 'exp_id_1']),
'exp_id_2')
# If all explorations have been completed, none should be suggested.
self.assertEqual(
collection.get_next_exploration_id(
['exp_id_0', 'exp_id_1', 'exp_id_2']), None)
def test_next_explorations_in_sequence(self):
collection = collection_domain.Collection.create_default_collection(
'collection_id')
exploration_id = 'exp_id_0'
collection.add_node(exploration_id)
# Completing the only exploration of the collection should lead to no
# available explorations thereafter.
self.assertEqual(
collection.get_next_exploration_id_in_sequence(exploration_id),
None)
collection.add_node('exp_id_1')
collection.add_node('exp_id_2')
self.assertEqual(
collection.get_next_exploration_id_in_sequence(exploration_id),
'exp_id_1')
self.assertEqual(
collection.get_next_exploration_id_in_sequence('exp_id_1'),
'exp_id_2')
def test_nodes_are_in_playble_order(self):
# Create collection.
collection = collection_domain.Collection.create_default_collection(
'collection_id')
# There should be an empty node list in playable order for an empty
# collection.
self.assertEqual(collection.nodes, [])
# Add nodes to collection.
collection.add_node('exp_id_0')
collection.add_node('exp_id_1')
collection.add_node('exp_id_2')
sorted_nodes = collection.nodes
expected_explorations_ids = ['exp_id_0', 'exp_id_1', 'exp_id_2']
observed_exploration_ids = [
node.exploration_id for node in sorted_nodes]
self.assertEqual(expected_explorations_ids, observed_exploration_ids)
def test_next_explorations_with_invalid_exploration_ids(self):
collection = collection_domain.Collection.create_default_collection(
'collection_id')
collection.add_node('exp_id_1')
# There should be one suggested exploration to complete by default.
self.assertEqual(collection.get_next_exploration_id([]), 'exp_id_1')
# If an invalid exploration ID is passed to get_next_exploration_id(),
# it should be ignored. This tests the situation where an exploration
# is deleted from a collection after being completed by a user.
self.assertEqual(
collection.get_next_exploration_id(['fake_exp_id']), 'exp_id_1')
class YamlCreationUnitTests(test_utils.GenericTestBase):
"""Test creation of collections from YAML files."""
COLLECTION_ID = 'a_collection_id'
EXPLORATION_ID = 'an_exploration_id'
def test_yaml_import_and_export(self):
"""Test the from_yaml() and to_yaml() methods."""
self.save_new_valid_exploration(
self.EXPLORATION_ID, 'user@example.com', end_state_name='End')
collection = collection_domain.Collection.create_default_collection(
self.COLLECTION_ID, title='A title', category='A category',
objective='An objective')
collection.add_node(self.EXPLORATION_ID)
self.assertEqual(len(collection.nodes), 1)
collection.validate()
yaml_content = collection.to_yaml()
self.assertEqual(yaml_content, SAMPLE_YAML_CONTENT)
collection2 = collection_domain.Collection.from_yaml(
'collection2', yaml_content)
self.assertEqual(len(collection2.nodes), 1)
yaml_content_2 = collection2.to_yaml()
self.assertEqual(yaml_content_2, yaml_content)
# Should not be able to create a collection from no YAML content.
with self.assertRaisesRegexp(
Exception, 'Please ensure that you are uploading a YAML text file, '
'not a zip file. The YAML parser returned the following error: '
'\'NoneType\' object has no attribute \'read\''):
collection_domain.Collection.from_yaml('collection3', None)
def test_from_yaml_with_no_schema_version_specified_raises_error(self):
collection = collection_domain.Collection(
self.COLLECTION_ID, 'title', 'category', 'objective', 'en', [],
None, [], 0)
yaml_content = collection.to_yaml()
with self.assertRaisesRegexp(
Exception, 'Invalid YAML file: no schema version specified.'):
collection_domain.Collection.from_yaml(
self.COLLECTION_ID, yaml_content)
def test_from_yaml_with_invalid_schema_version_raises_error(self):
collection = collection_domain.Collection(
self.COLLECTION_ID, 'title', 'category', 'objective', 'en', [],
0, [], 0)
yaml_content = collection.to_yaml()
with self.assertRaisesRegexp(
Exception,
'Sorry, we can only process v1 to .+ collection YAML files at '
'present.'):
collection_domain.Collection.from_yaml(
self.COLLECTION_ID, yaml_content)
class SchemaMigrationMethodsUnitTests(test_utils.GenericTestBase):
"""Tests the presence of appropriate schema migration methods in the
Collection domain object class.
"""
def test_correct_collection_contents_schema_conversion_methods_exist(self):
"""Test that the right collection_contents schema conversion methods
exist.
"""
current_collection_schema_version = (
feconf.CURRENT_COLLECTION_SCHEMA_VERSION)
for version_num in python_utils.RANGE(
1, current_collection_schema_version):
self.assertTrue(hasattr(
collection_domain.Collection,
'_convert_collection_contents_v%s_dict_to_v%s_dict' % (
version_num, version_num + 1)))
self.assertFalse(hasattr(
collection_domain.Collection,
'_convert_collection_contents_v%s_dict_to_v%s_dict' % (
current_collection_schema_version,
current_collection_schema_version + 1)))
def test_correct_collection_schema_conversion_methods_exist(self):
"""Test that the right collection schema conversion methods exist."""
current_collection_schema_version = (
feconf.CURRENT_COLLECTION_SCHEMA_VERSION)
for version_num in python_utils.RANGE(
1, current_collection_schema_version):
self.assertTrue(hasattr(
collection_domain.Collection,
'_convert_v%s_dict_to_v%s_dict' % (
version_num, version_num + 1)))
self.assertFalse(hasattr(
collection_domain.Collection,
'_convert_v%s_dict_to_v%s_dict' % (
current_collection_schema_version,
current_collection_schema_version + 1)))
class SchemaMigrationUnitTests(test_utils.GenericTestBase):
"""Test migration methods for yaml content."""
YAML_CONTENT_V1 = (
"""category: A category
nodes:
- acquired_skills:
- Skill1
- Skill2
exploration_id: Exp1
prerequisite_skills: []
- acquired_skills: []
exploration_id: Exp2
prerequisite_skills:
- Skill1
objective: ''
schema_version: 1
title: A title
""")
YAML_CONTENT_V2 = (
"""category: A category
language_code: en
nodes:
- acquired_skills:
- Skill1
- Skill2
exploration_id: Exp1
prerequisite_skills: []
- acquired_skills: []
exploration_id: Exp2
prerequisite_skills:
- Skill1
objective: ''
schema_version: 2
tags: []
title: A title
""")
YAML_CONTENT_V3 = (
"""category: A category
language_code: en
nodes:
- acquired_skills:
- Skill1
- Skill2
exploration_id: Exp1
prerequisite_skills: []
- acquired_skills: []
exploration_id: Exp2
prerequisite_skills:
- Skill1
objective: ''
schema_version: 2
tags: []
title: A title
""")
YAML_CONTENT_V4 = (
"""category: A category
language_code: en
next_skill_id: 2
nodes:
- acquired_skill_ids:
- skill0
- skill1
exploration_id: Exp1
prerequisite_skill_ids: []
- acquired_skill_ids: []
exploration_id: Exp2
prerequisite_skill_ids:
- skill0
objective: ''
schema_version: 4
skills:
skill0:
name: Skill1
question_ids: []
skill1:
name: Skill2
question_ids: []
tags: []
title: A title
""")
YAML_CONTENT_V5 = (
"""category: A category
language_code: en
next_skill_index: 2
nodes:
- acquired_skill_ids:
- skill0
- skill1
exploration_id: Exp1
prerequisite_skill_ids: []
- acquired_skill_ids: []
exploration_id: Exp2
prerequisite_skill_ids:
- skill0
objective: ''
schema_version: 5
skills:
skill0:
name: Skill1
question_ids: []
skill1:
name: Skill2
question_ids: []
tags: []
title: A title
""")
YAML_CONTENT_V6 = (
"""category: A category
language_code: en
nodes:
- exploration_id: Exp1
- exploration_id: Exp2
objective: ''
schema_version: 6
tags: []
title: A title
""")
_LATEST_YAML_CONTENT = YAML_CONTENT_V6
def test_load_from_v1(self):
"""Test direct loading from a v1 yaml file."""
self.save_new_valid_exploration(
'Exp1', 'user@example.com', end_state_name='End')
collection = collection_domain.Collection.from_yaml(
'cid', self.YAML_CONTENT_V1)
self.assertEqual(collection.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v2(self):
"""Test direct loading from a v2 yaml file."""
self.save_new_valid_exploration(
'Exp1', 'user@example.com', end_state_name='End')
collection = collection_domain.Collection.from_yaml(
'cid', self.YAML_CONTENT_V2)
self.assertEqual(collection.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v3(self):
"""Test direct loading from a v3 yaml file."""
self.save_new_valid_exploration(
'Exp1', 'user@example.com', end_state_name='End')
collection = collection_domain.Collection.from_yaml(
'cid', self.YAML_CONTENT_V3)
self.assertEqual(collection.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v4(self):
"""Test direct loading from a v4 yaml file."""
self.save_new_valid_exploration(
'Exp1', 'user@example.com', end_state_name='End')
collection = collection_domain.Collection.from_yaml(
'cid', self.YAML_CONTENT_V4)
self.assertEqual(collection.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v5(self):
"""Test direct loading from a v5 yaml file."""
self.save_new_valid_exploration(
'Exp1', 'user@example.com', end_state_name='End')
collection = collection_domain.Collection.from_yaml(
'cid', self.YAML_CONTENT_V5)
self.assertEqual(collection.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v6(self):
"""Test direct loading from a v6 yaml file."""
self.save_new_valid_exploration(
'Exp1', 'user@example.com', end_state_name='End')
collection = collection_domain.Collection.from_yaml(
'cid', self.YAML_CONTENT_V6)
self.assertEqual(collection.to_yaml(), self._LATEST_YAML_CONTENT)
class CollectionSummaryTests(test_utils.GenericTestBase):
def setUp(self):
super(CollectionSummaryTests, self).setUp()
current_time = datetime.datetime.utcnow()
self.collection_summary_dict = {
'category': 'category',
'status': constants.ACTIVITY_STATUS_PRIVATE,
'community_owned': True,
'viewer_ids': ['viewer_id'],
'version': 1,
'editor_ids': ['editor_id'],
'title': 'title',
'collection_model_created_on': current_time,
'tags': [],
'collection_model_last_updated': current_time,
'contributor_ids': ['contributor_id'],
'language_code': 'en',
'objective': 'objective',
'contributors_summary': {},
'id': 'col_id',
'owner_ids': ['owner_id']
}
self.collection_summary = collection_domain.CollectionSummary(
'col_id', 'title', 'category', 'objective', 'en', [],
constants.ACTIVITY_STATUS_PRIVATE, True, ['owner_id'],
['editor_id'], ['viewer_id'], ['contributor_id'], {}, 1, 1,
current_time, current_time)
def test_collection_summary_gets_created(self):
self.assertEqual(
self.collection_summary.to_dict(), self.collection_summary_dict)
def test_validation_passes_with_valid_properties(self):
self.collection_summary.validate()
def test_validation_fails_with_invalid_title(self):
self.collection_summary.title = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected title to be a string, received 0'):
self.collection_summary.validate()
def test_validation_fails_with_invalid_category(self):
self.collection_summary.category = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected category to be a string, received 0'):
self.collection_summary.validate()
def test_validation_fails_with_invalid_objective(self):
self.collection_summary.objective = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected objective to be a string, received 0'):
self.collection_summary.validate()
def test_validation_fails_with_missing_language_code(self):
self.collection_summary.language_code = None
with self.assertRaisesRegexp(
utils.ValidationError,
'A language must be specified \\(in the \'Settings\' tab\\).'):
self.collection_summary.validate()
def test_validation_fails_with_invalid_language_code(self):
self.collection_summary.language_code = 1
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected language code to be a string, received 1'):
self.collection_summary.validate()
def test_validation_fails_with_unallowed_language_code(self):
self.collection_summary.language_code = 'invalid'
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid language code: invalid'):
self.collection_summary.validate()
def test_validation_fails_with_invalid_tags(self):
self.collection_summary.tags = 'tags'
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected tags to be a list, received tags'):
self.collection_summary.validate()
def test_validation_fails_with_invalid_tag_in_tags(self):
self.collection_summary.tags = ['tag', 2]
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected each tag to be a string, received \'2\''):
self.collection_summary.validate()
def test_validation_fails_with_empty_tag_in_tags(self):
self.collection_summary.tags = ['', 'abc']
with self.assertRaisesRegexp(
utils.ValidationError, 'Tags should be non-empty'):
self.collection_summary.validate()
def test_validation_fails_with_unallowed_characters_in_tag(self):
self.collection_summary.tags = ['123', 'abc']
with self.assertRaisesRegexp(
utils.ValidationError, (
'Tags should only contain lowercase '
'letters and spaces, received \'123\'')):
self.collection_summary.validate()
def test_validation_fails_with_whitespace_in_tag_start(self):
self.collection_summary.tags = [' ab', 'abc']
with self.assertRaisesRegexp(
utils.ValidationError,
'Tags should not start or end with whitespace, received \' ab\''):
self.collection_summary.validate()
def test_validation_fails_with_whitespace_in_tag_end(self):
self.collection_summary.tags = ['ab ', 'abc']
with self.assertRaisesRegexp(
utils.ValidationError,
'Tags should not start or end with whitespace, received \'ab \''):
self.collection_summary.validate()
def test_validation_fails_with_adjacent_whitespace_in_tag(self):
self.collection_summary.tags = ['a b', 'abc']
with self.assertRaisesRegexp(
utils.ValidationError, (
'Adjacent whitespace in tags should '
'be collapsed, received \'a b\'')):
self.collection_summary.validate()
def test_validation_fails_with_duplicate_tags(self):
self.collection_summary.tags = ['abc', 'abc', 'ab']
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected tags to be unique, but found duplicates'):
self.collection_summary.validate()
def test_validation_fails_with_invalid_status(self):
self.collection_summary.status = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected status to be string, received 0'):
self.collection_summary.validate()
def test_validation_fails_with_invalid_community_owned(self):
self.collection_summary.community_owned = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected community_owned to be bool, received 0'):
self.collection_summary.validate()
def test_validation_fails_with_invalid_contributors_summary(self):
self.collection_summary.contributors_summary = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected contributors_summary to be dict, received 0'):
self.collection_summary.validate()
def test_validation_fails_with_invalid_owner_ids_type(self):
self.collection_summary.owner_ids = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected owner_ids to be list, received 0'):
self.collection_summary.validate()
def test_validation_fails_with_invalid_owner_id_in_owner_ids(self):
self.collection_summary.owner_ids = ['1', 2, '3']
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected each id in owner_ids to be string, received 2'):
self.collection_summary.validate()
def test_validation_fails_with_invalid_editor_ids_type(self):
self.collection_summary.editor_ids = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected editor_ids to be list, received 0'):
self.collection_summary.validate()
def test_validation_fails_with_invalid_editor_id_in_editor_ids(self):
self.collection_summary.editor_ids = ['1', 2, '3']
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected each id in editor_ids to be string, received 2'):
self.collection_summary.validate()
def test_validation_fails_with_invalid_viewer_ids_type(self):
self.collection_summary.viewer_ids = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected viewer_ids to be list, received 0'):
self.collection_summary.validate()
def test_validation_fails_with_invalid_viewer_id_in_viewer_ids(self):
self.collection_summary.viewer_ids = ['1', 2, '3']
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected each id in viewer_ids to be string, received 2'):
self.collection_summary.validate()
def test_validation_fails_with_invalid_contributor_ids_type(self):
self.collection_summary.contributor_ids = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected contributor_ids to be list, received 0'):
self.collection_summary.validate()
def test_validation_fails_with_invalid_contributor_id_in_contributor_ids(
self):
self.collection_summary.contributor_ids = ['1', 2, '3']
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected each id in contributor_ids to be string, received 2'):
self.collection_summary.validate()
def test_is_private(self):
self.assertTrue(self.collection_summary.is_private())
self.collection_summary = collection_domain.CollectionSummary(
'col_id', 'title', 'category', 'objective', 'en', [],
constants.ACTIVITY_STATUS_PUBLIC, True, ['owner_id'],
['editor_id'], ['viewer_id'], ['contributor_id'], {}, 1, 1,
datetime.datetime.utcnow(), datetime.datetime.utcnow())
self.assertFalse(self.collection_summary.is_private())
def test_is_solely_owned_by_user_one_owner(self):
self.assertTrue(
self.collection_summary.is_solely_owned_by_user('owner_id'))
self.assertFalse(
self.collection_summary.is_solely_owned_by_user('other_id'))
self.collection_summary = collection_domain.CollectionSummary(
'col_id', 'title', 'category', 'objective', 'en', [],
constants.ACTIVITY_STATUS_PUBLIC, True, ['other_id'],
['editor_id'], ['viewer_id'], ['contributor_id'], {}, 1, 1,
datetime.datetime.utcnow(), datetime.datetime.utcnow())
self.assertFalse(
self.collection_summary.is_solely_owned_by_user('owner_id'))
self.assertTrue(
self.collection_summary.is_solely_owned_by_user('other_id'))
def test_is_solely_owned_by_user_multiple_owners(self):
self.assertTrue(
self.collection_summary.is_solely_owned_by_user('owner_id'))
self.assertFalse(
self.collection_summary.is_solely_owned_by_user('other_id'))
self.collection_summary = collection_domain.CollectionSummary(
'col_id', 'title', 'category', 'objective', 'en', [],
constants.ACTIVITY_STATUS_PUBLIC, True, ['owner_id', 'other_id'],
['editor_id'], ['viewer_id'], ['contributor_id'], {}, 1, 1,
datetime.datetime.utcnow(), datetime.datetime.utcnow())
self.assertFalse(
self.collection_summary.is_solely_owned_by_user('owner_id'))
self.assertFalse(
self.collection_summary.is_solely_owned_by_user('other_id'))
def test_is_solely_owned_by_user_other_users(self):
self.assertFalse(
self.collection_summary.is_solely_owned_by_user('editor_id'))
self.assertFalse(
self.collection_summary.is_solely_owned_by_user('viewer_id'))
self.assertFalse(
self.collection_summary.is_solely_owned_by_user('contributor_id'))
|
{
"content_hash": "10c0f21cbe005a92850cb79e5591cc9c",
"timestamp": "",
"source": "github",
"line_count": 1130,
"max_line_length": 80,
"avg_line_length": 39.18230088495575,
"alnum_prop": 0.6346327581534014,
"repo_name": "prasanna08/oppia",
"id": "63c855aac942b5cefd1e7cc76a12f82d460ab59f",
"size": "44899",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "core/domain/collection_domain_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "97795"
},
{
"name": "HTML",
"bytes": "1128491"
},
{
"name": "JavaScript",
"bytes": "733121"
},
{
"name": "Python",
"bytes": "9362251"
},
{
"name": "Shell",
"bytes": "10639"
},
{
"name": "TypeScript",
"bytes": "6077851"
}
],
"symlink_target": ""
}
|
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class WebChannelTestCase(IntegrationTestCase):
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.flex_api.v1.web_channel.list()
self.holodeck.assert_has_request(Request(
'get',
'https://flex-api.twilio.com/v1/WebChannels',
))
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://flex-api.twilio.com/v1/WebChannels?PageSize=50&Page=0",
"previous_page_url": null,
"url": "https://flex-api.twilio.com/v1/WebChannels?PageSize=50&Page=0",
"next_page_url": null,
"key": "flex_chat_channels"
},
"flex_chat_channels": [
{
"flex_flow_sid": "FOaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sid": "CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "2016-08-01T22:10:40Z",
"date_updated": "2016-08-01T22:10:40Z",
"url": "https://flex-api.twilio.com/v1/WebChannels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
]
}
'''
))
actual = self.client.flex_api.v1.web_channel.list()
self.assertIsNotNone(actual)
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://flex-api.twilio.com/v1/WebChannels?PageSize=50&Page=0",
"previous_page_url": null,
"url": "https://flex-api.twilio.com/v1/WebChannels?PageSize=50&Page=0",
"next_page_url": null,
"key": "flex_chat_channels"
},
"flex_chat_channels": []
}
'''
))
actual = self.client.flex_api.v1.web_channel.list()
self.assertIsNotNone(actual)
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.flex_api.v1.web_channel("CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://flex-api.twilio.com/v1/WebChannels/CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"flex_flow_sid": "FOaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sid": "CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "2016-08-01T22:10:40Z",
"date_updated": "2016-08-01T22:10:40Z",
"url": "https://flex-api.twilio.com/v1/WebChannels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.flex_api.v1.web_channel("CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.flex_api.v1.web_channel.create(flex_flow_sid="FOXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", identity="identity", customer_friendly_name="customer_friendly_name", chat_friendly_name="chat_friendly_name")
values = {
'FlexFlowSid': "FOXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
'Identity': "identity",
'CustomerFriendlyName': "customer_friendly_name",
'ChatFriendlyName': "chat_friendly_name",
}
self.holodeck.assert_has_request(Request(
'post',
'https://flex-api.twilio.com/v1/WebChannels',
data=values,
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"flex_flow_sid": "FOaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sid": "CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "2016-08-01T22:10:40Z",
"date_updated": "2016-08-01T22:10:40Z",
"url": "https://flex-api.twilio.com/v1/WebChannels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.flex_api.v1.web_channel.create(flex_flow_sid="FOXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", identity="identity", customer_friendly_name="customer_friendly_name", chat_friendly_name="chat_friendly_name")
self.assertIsNotNone(actual)
def test_update_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.flex_api.v1.web_channel("CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.holodeck.assert_has_request(Request(
'post',
'https://flex-api.twilio.com/v1/WebChannels/CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_update_response(self):
self.holodeck.mock(Response(
200,
'''
{
"flex_flow_sid": "FOaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sid": "CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "2016-08-01T22:10:40Z",
"date_updated": "2016-08-01T22:10:40Z",
"url": "https://flex-api.twilio.com/v1/WebChannels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.flex_api.v1.web_channel("CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.assertIsNotNone(actual)
def test_delete_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.flex_api.v1.web_channel("CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.holodeck.assert_has_request(Request(
'delete',
'https://flex-api.twilio.com/v1/WebChannels/CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_delete_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.flex_api.v1.web_channel("CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.assertTrue(actual)
|
{
"content_hash": "8d223d43244fea507cc26788640e65ba",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 222,
"avg_line_length": 36.01005025125628,
"alnum_prop": 0.5597264861847614,
"repo_name": "twilio/twilio-python",
"id": "dcbb89fea9421245a73c67a53135df72ab2d1038",
"size": "7181",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/integration/flex_api/v1/test_web_channel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "234"
},
{
"name": "Makefile",
"bytes": "2157"
},
{
"name": "Python",
"bytes": "11241545"
}
],
"symlink_target": ""
}
|
"""The tests for the TCP binary sensor platform."""
from datetime import timedelta
from unittest.mock import call, patch
import pytest
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.setup import async_setup_component
from homeassistant.util.dt import utcnow
from tests.common import assert_setup_component, async_fire_time_changed
import tests.components.tcp.test_sensor as test_tcp
BINARY_SENSOR_CONFIG = test_tcp.TEST_CONFIG["sensor"]
TEST_CONFIG = {"binary_sensor": BINARY_SENSOR_CONFIG}
TEST_ENTITY = "binary_sensor.test_name"
@pytest.fixture(name="mock_socket")
def mock_socket_fixture():
"""Mock the socket."""
with patch(
"homeassistant.components.tcp.sensor.socket.socket"
) as mock_socket, patch(
"homeassistant.components.tcp.sensor.select.select",
return_value=(True, False, False),
):
# yield the return value of the socket context manager
yield mock_socket.return_value.__enter__.return_value
@pytest.fixture
def now():
"""Return datetime UTC now."""
return utcnow()
async def test_setup_platform_valid_config(hass, mock_socket):
"""Check a valid configuration."""
with assert_setup_component(1, "binary_sensor"):
assert await async_setup_component(hass, "binary_sensor", TEST_CONFIG)
await hass.async_block_till_done()
async def test_setup_platform_invalid_config(hass, mock_socket):
"""Check the invalid configuration."""
with assert_setup_component(0):
assert await async_setup_component(
hass,
"binary_sensor",
{"binary_sensor": {"platform": "tcp", "porrt": 1234}},
)
await hass.async_block_till_done()
async def test_state(hass, mock_socket, now):
"""Check the state and update of the binary sensor."""
mock_socket.recv.return_value = b"off"
assert await async_setup_component(hass, "binary_sensor", TEST_CONFIG)
await hass.async_block_till_done()
state = hass.states.get(TEST_ENTITY)
assert state
assert state.state == STATE_OFF
assert mock_socket.connect.called
assert mock_socket.connect.call_args == call(
(BINARY_SENSOR_CONFIG["host"], BINARY_SENSOR_CONFIG["port"])
)
assert mock_socket.send.called
assert mock_socket.send.call_args == call(BINARY_SENSOR_CONFIG["payload"].encode())
assert mock_socket.recv.called
assert mock_socket.recv.call_args == call(BINARY_SENSOR_CONFIG["buffer_size"])
mock_socket.recv.return_value = b"on"
async_fire_time_changed(hass, now + timedelta(seconds=45))
await hass.async_block_till_done()
state = hass.states.get(TEST_ENTITY)
assert state
assert state.state == STATE_ON
|
{
"content_hash": "60b5b0f70701be78ed91210c44df0a52",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 87,
"avg_line_length": 32.734939759036145,
"alnum_prop": 0.6919396393080604,
"repo_name": "adrienbrault/home-assistant",
"id": "21dd84b189211674cc224216a38181abcb2e4cbf",
"size": "2717",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/components/tcp/test_binary_sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "32021043"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from .client import Client, get_metadata # noqa
|
{
"content_hash": "69f1a1bce8dbdb0887cb875714e007dd",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 48,
"avg_line_length": 29.666666666666668,
"alnum_prop": 0.7528089887640449,
"repo_name": "eillarra/irekia",
"id": "8d3efcf59f1ae679322dc231bb9a8466aabe9ee1",
"size": "89",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "irekia/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14716"
}
],
"symlink_target": ""
}
|
from django.conf.urls.defaults import *
from django.conf import settings
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
#
(r'^facebook/', include('django_facebook.urls')),
#what to do with these?
(r'^accounts/', include('django_facebook.auth_urls')),
# Example:
# (r'^django_facebook_test/', include('django_facebook_test.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# (r'^admin/', include(admin.site.urls)),
)
if settings.MODE == 'userena':
urlpatterns += patterns('',
(r'^accounts/', include('userena.urls')),
)
elif settings.MODE == 'django_registration':
urlpatterns += patterns('',
(r'^accounts/', include(
'registration.backends.default.urls')),
)
if settings.DEBUG:
urlpatterns += patterns('',
url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.MEDIA_ROOT,
}),
)
|
{
"content_hash": "3bcf717732f91c0cc05e49544b166978",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 95,
"avg_line_length": 40.425,
"alnum_prop": 0.4551638837353123,
"repo_name": "fogcitymarathoner/djfb",
"id": "45dfcd10d7ab93596a4f33633ed577b3ed2fc07b",
"size": "1617",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "facebook_example/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "98423"
},
{
"name": "Python",
"bytes": "672000"
},
{
"name": "Shell",
"bytes": "4697"
}
],
"symlink_target": ""
}
|
import pytest
import txaio
def test_is_future_generic(framework):
'''
Returning an immediate value from as_future
'''
f = txaio.create_future('result')
assert txaio.is_future(f)
def test_is_future_coroutine(framework_aio):
'''
Returning an immediate value from as_future
'''
pytest.importorskip('asyncio') # 'aio' might be using trollius
from asyncio import coroutine
@coroutine
def some_coroutine():
yield 'answer'
obj = some_coroutine()
assert txaio.is_future(obj)
def test_is_called(framework):
f = txaio.create_future_success(None)
assert txaio.is_called(f)
|
{
"content_hash": "9190a75117c936ccd3cb1e6503ae3c6d",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 67,
"avg_line_length": 21.433333333333334,
"alnum_prop": 0.6671850699844479,
"repo_name": "crossbario/txaio",
"id": "fcadc7f026ec8e8af8ede05481283d8cccccdb0a",
"size": "1937",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "test/test_is_future.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1285"
},
{
"name": "Python",
"bytes": "146564"
},
{
"name": "Shell",
"bytes": "2121"
}
],
"symlink_target": ""
}
|
def setColorCount(qimage, color_count):
"""Compatibility function btw. Qt4 and Qt5"""
try:
qimage.setColorCount(color_count) # Qt 4.6 and later
except AttributeError:
qimage.setNumColors(color_count)
def colorCount(qimage):
"""Compatibility function btw. Qt4 and Qt5"""
try:
return qimage.colorCount() # Qt 4.6 and later
except AttributeError:
return qimage.numColors()
def sizeInBytes(qimage):
"""Compatibility function btw. Qt4, Qt5, and Qt6"""
try:
return qimage.sizeInBytes() # Qt 5.10 and later
except AttributeError:
try:
return qimage.byteCount() # Qt 4.6 and later
except AttributeError:
return qimage.numBytes()
# deprecated name for backwards compatibility
setNumColors = setColorCount
numColors = colorCount
numBytes = sizeInBytes
|
{
"content_hash": "62fadad3ef11424d751d1a84687d4506",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 60,
"avg_line_length": 25.5,
"alnum_prop": 0.6689734717416378,
"repo_name": "hmeine/qimage2ndarray",
"id": "002a0e422c61477684ced96264afd4bda93b8a4b",
"size": "869",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/compat.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1508"
},
{
"name": "Python",
"bytes": "55237"
}
],
"symlink_target": ""
}
|
import numpy as np
import pandas.compat as compat
import pandas as pd
class TablePlotter(object):
"""
Layout some DataFrames in vertical/horizontal layout for explanation.
Used in merging.rst
"""
def __init__(self, cell_width=0.37, cell_height=0.25, font_size=7.5):
self.cell_width = cell_width
self.cell_height = cell_height
self.font_size = font_size
def _shape(self, df):
"""
Calculate table chape considering index levels.
"""
row, col = df.shape
return row + df.columns.nlevels, col + df.index.nlevels
def _get_cells(self, left, right, vertical):
"""
Calculate appropriate figure size based on left and right data.
"""
if vertical:
# calculate required number of cells
vcells = max(sum(self._shape(l)[0] for l in left),
self._shape(right)[0])
hcells = (max(self._shape(l)[1] for l in left) +
self._shape(right)[1])
else:
vcells = max([self._shape(l)[0] for l in left] +
[self._shape(right)[0]])
hcells = sum([self._shape(l)[1] for l in left] +
[self._shape(right)[1]])
return hcells, vcells
def plot(self, left, right, labels=None, vertical=True):
"""
Plot left / right DataFrames in specified layout.
Parameters
----------
left : list of DataFrames before operation is applied
right : DataFrame of operation result
labels : list of str to be drawn as titles of left DataFrames
vertical : bool
If True, use vertical layout. If False, use horizontal layout.
"""
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
if not isinstance(left, list):
left = [left]
left = [self._conv(l) for l in left]
right = self._conv(right)
hcells, vcells = self._get_cells(left, right, vertical)
if vertical:
figsize = self.cell_width * hcells, self.cell_height * vcells
else:
# include margin for titles
figsize = self.cell_width * hcells, self.cell_height * vcells
fig = plt.figure(figsize=figsize)
if vertical:
gs = gridspec.GridSpec(len(left), hcells)
# left
max_left_cols = max(self._shape(l)[1] for l in left)
max_left_rows = max(self._shape(l)[0] for l in left)
for i, (l, label) in enumerate(zip(left, labels)):
ax = fig.add_subplot(gs[i, 0:max_left_cols])
self._make_table(ax, l, title=label,
height=1.0 / max_left_rows)
# right
ax = plt.subplot(gs[:, max_left_cols:])
self._make_table(ax, right, title='Result', height=1.05 / vcells)
fig.subplots_adjust(top=0.9, bottom=0.05, left=0.05, right=0.95)
else:
max_rows = max(self._shape(df)[0] for df in left + [right])
height = 1.0 / np.max(max_rows)
gs = gridspec.GridSpec(1, hcells)
# left
i = 0
for l, label in zip(left, labels):
sp = self._shape(l)
ax = fig.add_subplot(gs[0, i:i + sp[1]])
self._make_table(ax, l, title=label, height=height)
i += sp[1]
# right
ax = plt.subplot(gs[0, i:])
self._make_table(ax, right, title='Result', height=height)
fig.subplots_adjust(top=0.85, bottom=0.05, left=0.05, right=0.95)
return fig
def _conv(self, data):
"""Convert each input to appropriate for table outplot"""
if isinstance(data, pd.Series):
if data.name is None:
data = data.to_frame(name='')
else:
data = data.to_frame()
data = data.fillna('NaN')
return data
def _insert_index(self, data):
# insert is destructive
data = data.copy()
idx_nlevels = data.index.nlevels
if idx_nlevels == 1:
data.insert(0, 'Index', data.index)
else:
for i in range(idx_nlevels):
data.insert(i, 'Index{0}'.format(i),
data.index._get_level_values(i))
col_nlevels = data.columns.nlevels
if col_nlevels > 1:
col = data.columns._get_level_values(0)
values = [data.columns._get_level_values(i).values
for i in range(1, col_nlevels)]
col_df = pd.DataFrame(values)
data.columns = col_df.columns
data = pd.concat([col_df, data])
data.columns = col
return data
def _make_table(self, ax, df, title, height=None):
if df is None:
ax.set_visible(False)
return
import pandas.plotting as plotting
idx_nlevels = df.index.nlevels
col_nlevels = df.columns.nlevels
# must be convert here to get index levels for colorization
df = self._insert_index(df)
tb = plotting.table(ax, df, loc=9)
tb.set_fontsize(self.font_size)
if height is None:
height = 1.0 / (len(df) + 1)
props = tb.properties()
for (r, c), cell in compat.iteritems(props['celld']):
if c == -1:
cell.set_visible(False)
elif r < col_nlevels and c < idx_nlevels:
cell.set_visible(False)
elif r < col_nlevels or c < idx_nlevels:
cell.set_facecolor('#AAAAAA')
cell.set_height(height)
ax.set_title(title, size=self.font_size)
ax.axis('off')
class _WritableDoc(type):
# Remove this when Python2 support is dropped
# __doc__ is not mutable for new-style classes in Python2, which means
# we can't use @Appender to share class docstrings. This can be used
# with `add_metaclass` to make cls.__doc__ mutable.
pass
if __name__ == "__main__":
import matplotlib.pyplot as plt
p = TablePlotter()
df1 = pd.DataFrame({'A': [10, 11, 12],
'B': [20, 21, 22],
'C': [30, 31, 32]})
df2 = pd.DataFrame({'A': [10, 12],
'C': [30, 32]})
p.plot([df1, df2], pd.concat([df1, df2]),
labels=['df1', 'df2'], vertical=True)
plt.show()
df3 = pd.DataFrame({'X': [10, 12],
'Z': [30, 32]})
p.plot([df1, df3], pd.concat([df1, df3], axis=1),
labels=['df1', 'df2'], vertical=False)
plt.show()
idx = pd.MultiIndex.from_tuples([(1, 'A'), (1, 'B'), (1, 'C'),
(2, 'A'), (2, 'B'), (2, 'C')])
col = pd.MultiIndex.from_tuples([(1, 'A'), (1, 'B')])
df3 = pd.DataFrame({'v1': [1, 2, 3, 4, 5, 6],
'v2': [5, 6, 7, 8, 9, 10]},
index=idx)
df3.columns = col
p.plot(df3, df3, labels=['df3'])
plt.show()
|
{
"content_hash": "cf39b8210df2869362fea9b1c88efe8b",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 77,
"avg_line_length": 34.46116504854369,
"alnum_prop": 0.516410762079166,
"repo_name": "MJuddBooth/pandas",
"id": "4aee0a2e5350e0ad1229ca9d209d34a925a4b482",
"size": "7099",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pandas/util/_doctools.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4879"
},
{
"name": "C",
"bytes": "406766"
},
{
"name": "C++",
"bytes": "17248"
},
{
"name": "HTML",
"bytes": "606963"
},
{
"name": "Makefile",
"bytes": "529"
},
{
"name": "Python",
"bytes": "14858932"
},
{
"name": "Shell",
"bytes": "29575"
},
{
"name": "Smarty",
"bytes": "2040"
}
],
"symlink_target": ""
}
|
"""Provide a class for loading data from URL's that handles basic
authentication"""
ident = '$Id: URLopener.py 541 2004-01-31 04:20:06Z warnes $'
from version import __version__
from Config import Config
from urllib import FancyURLopener
class URLopener(FancyURLopener):
username = None
passwd = None
def __init__(self, username=None, passwd=None, *args, **kw):
FancyURLopener.__init__( self, *args, **kw)
self.username = username
self.passwd = passwd
def prompt_user_passwd(self, host, realm):
return self.username, self.passwd
|
{
"content_hash": "b57b14dbcfdabb8cebfeb643547804dc",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 65,
"avg_line_length": 25.391304347826086,
"alnum_prop": 0.678082191780822,
"repo_name": "asfaltboy/jirash",
"id": "2c04c8685418d2126a2bc4f59fd108bd016a9763",
"size": "584",
"binary": false,
"copies": "294",
"ref": "refs/heads/experimental",
"path": "deps/SOAPpy/URLopener.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "129054"
},
{
"name": "Shell",
"bytes": "87"
}
],
"symlink_target": ""
}
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for Azure Blob Storage client.
"""
# pytype: skip-file
import logging
import unittest
# Protect against environments where azure library is not available.
# pylint: disable=wrong-import-order, wrong-import-position
try:
from apache_beam.io.azure import blobstorageio
except ImportError:
blobstorageio = None # type: ignore[assignment]
# pylint: enable=wrong-import-order, wrong-import-position
@unittest.skipIf(blobstorageio is None, 'Azure dependencies are not installed')
class TestAZFSPathParser(unittest.TestCase):
BAD_AZFS_PATHS = [
'azfs://'
'azfs://storage-account/'
'azfs://storage-account/**'
'azfs://storage-account/**/*'
'azfs://container'
'azfs:///name'
'azfs:///'
'azfs:/blah/container/name'
'azfs://ab/container/name'
'azfs://accountwithmorethan24chars/container/name'
'azfs://***/container/name'
'azfs://storageaccount/my--container/name'
'azfs://storageaccount/CONTAINER/name'
'azfs://storageaccount/ct/name'
]
def test_azfs_path(self):
self.assertEqual(
blobstorageio.parse_azfs_path(
'azfs://storageaccount/container/name', get_account=True),
('storageaccount', 'container', 'name'))
self.assertEqual(
blobstorageio.parse_azfs_path(
'azfs://storageaccount/container/name/sub', get_account=True),
('storageaccount', 'container', 'name/sub'))
def test_bad_azfs_path(self):
for path in self.BAD_AZFS_PATHS:
self.assertRaises(ValueError, blobstorageio.parse_azfs_path, path)
self.assertRaises(
ValueError,
blobstorageio.parse_azfs_path,
'azfs://storageaccount/container/')
def test_azfs_path_blob_optional(self):
self.assertEqual(
blobstorageio.parse_azfs_path(
'azfs://storageaccount/container/name',
blob_optional=True,
get_account=True), ('storageaccount', 'container', 'name'))
self.assertEqual(
blobstorageio.parse_azfs_path(
'azfs://storageaccount/container/',
blob_optional=True,
get_account=True), ('storageaccount', 'container', ''))
def test_bad_azfs_path_blob_optional(self):
for path in self.BAD_AZFS_PATHS:
self.assertRaises(ValueError, blobstorageio.parse_azfs_path, path, True)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
{
"content_hash": "5068538d67299c5e940e939c00b1eeee",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 79,
"avg_line_length": 35.285714285714285,
"alnum_prop": 0.6863905325443787,
"repo_name": "robertwb/incubator-beam",
"id": "262a75b756a814e869369cc7dcb3de75813aeccc",
"size": "3211",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/io/azure/blobstorageio_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1598"
},
{
"name": "C",
"bytes": "3869"
},
{
"name": "CSS",
"bytes": "4957"
},
{
"name": "Cython",
"bytes": "59582"
},
{
"name": "Dart",
"bytes": "541526"
},
{
"name": "Dockerfile",
"bytes": "48191"
},
{
"name": "FreeMarker",
"bytes": "7933"
},
{
"name": "Go",
"bytes": "4688736"
},
{
"name": "Groovy",
"bytes": "888171"
},
{
"name": "HCL",
"bytes": "101646"
},
{
"name": "HTML",
"bytes": "164685"
},
{
"name": "Java",
"bytes": "38649211"
},
{
"name": "JavaScript",
"bytes": "105966"
},
{
"name": "Jupyter Notebook",
"bytes": "55818"
},
{
"name": "Kotlin",
"bytes": "209531"
},
{
"name": "Lua",
"bytes": "3620"
},
{
"name": "Python",
"bytes": "9785295"
},
{
"name": "SCSS",
"bytes": "312814"
},
{
"name": "Sass",
"bytes": "19336"
},
{
"name": "Scala",
"bytes": "1429"
},
{
"name": "Shell",
"bytes": "336583"
},
{
"name": "Smarty",
"bytes": "2618"
},
{
"name": "Thrift",
"bytes": "3260"
},
{
"name": "TypeScript",
"bytes": "181369"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from horizon import forms
from horizon.utils import functions as utils
from openstack_dashboard.dashboards.settings.user import forms as user_forms
class UserSettingsView(forms.ModalFormView):
form_class = user_forms.UserSettingsForm
template_name = 'settings/user/settings.html'
page_title = _("User Settings")
def get_initial(self):
return {
'language': self.request.session.get(
settings.LANGUAGE_COOKIE_NAME,
self.request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME,
self.request.LANGUAGE_CODE)),
'timezone': self.request.session.get(
'django_timezone',
self.request.COOKIES.get('django_timezone', 'UTC')),
'pagesize': utils.get_page_size(self.request),
'instance_log_length': utils.get_log_length(self.request)}
def form_valid(self, form):
return form.handle(self.request, form.cleaned_data)
|
{
"content_hash": "ce53c9ba2e521bb2d1329ac2e4b540cf",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 76,
"avg_line_length": 39.925925925925924,
"alnum_prop": 0.650278293135436,
"repo_name": "tsufiev/horizon",
"id": "6f81435948ca3943435083784541ae544e151511",
"size": "1683",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/settings/user/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "1536"
},
{
"name": "CSS",
"bytes": "70531"
},
{
"name": "HTML",
"bytes": "420092"
},
{
"name": "JavaScript",
"bytes": "277460"
},
{
"name": "Makefile",
"bytes": "588"
},
{
"name": "Python",
"bytes": "4417610"
},
{
"name": "Shell",
"bytes": "18318"
}
],
"symlink_target": ""
}
|
"""Module for logging subtasks.
"""
import abc
from typing import Any, List, Mapping, Optional
import dm_env
from dm_robotics.agentflow import subtask
from dm_robotics.agentflow.loggers import types
from dm_robotics.agentflow.loggers import utils
import numpy as np
class Aggregator(abc.ABC):
"""Base class for data-aggregators for SubTaskLogger.
An `Aggregator` handles the job of accumulating data to log from parent and
child timesteps & actions within a subtask.
"""
@abc.abstractmethod
def accumulate(self, parent_timestep: dm_env.TimeStep,
parent_action: np.ndarray, agent_timestep: dm_env.TimeStep,
agent_action: np.ndarray) -> Optional[Mapping[str, Any]]:
"""Step aggregator and optionally return a dict of information to log.
Args:
parent_timestep: The timestep passed to the SubTask by its parent.
parent_action: The action being returned to the parent. Typically an
exteneded or modified version of `agent_action`.
agent_timestep: The timestep this subtask passed to its agent. Typically a
reduced or modified version of `parent_timestep`.
agent_action: The action returned by the agent this step.
Returns:
A dictionary of information that can be passed to an acme logger. Can also
return None, which skips logging this step.
"""
pass
class EpisodeReturnAggregator(Aggregator):
"""An Aggregator that computes episode return and length when subtask ends."""
def __init__(self,
additional_discount: float = 1.,
return_name: str = 'episode_return',
length_name: str = 'episode_length'):
self._additional_discount = additional_discount
self._return_name = return_name
self._length_name = length_name
self._episode_rewards = [] # type: List[float]
self._episode_discounts = [] # type: List[float]
def accumulate(self, parent_timestep: dm_env.TimeStep,
parent_action: np.ndarray, agent_timestep: dm_env.TimeStep,
agent_action: np.ndarray) -> Optional[Mapping[str, Any]]:
if agent_timestep.first():
self._episode_rewards.clear()
self._episode_discounts.clear()
if agent_timestep.reward is None or agent_timestep.discount is None:
return # Some environments omit reward and discount on first step.
self._episode_rewards.append(agent_timestep.reward)
self._episode_discounts.append(agent_timestep.discount)
if agent_timestep.last():
return {
self._return_name: utils.compute_return(
self._episode_rewards,
np.array(self._episode_discounts) * self._additional_discount),
self._length_name: len(self._episode_rewards)
}
return
class SubTaskLogger(subtask.SubTaskObserver):
"""A subtask observer that logs agent performance to an Acme logger."""
def __init__(self, logger: types.Logger, aggregator: Aggregator):
"""Initialize SubTaskLogger."""
self._logger = logger
self._aggregator = aggregator
def step(self, parent_timestep: dm_env.TimeStep, parent_action: np.ndarray,
agent_timestep: dm_env.TimeStep, agent_action: np.ndarray) -> None:
# Fetch current data to log.
data = self._aggregator.accumulate(parent_timestep, agent_action,
agent_timestep, agent_action)
# Log the given results.
if data is not None:
self._logger.write(data)
|
{
"content_hash": "6e29e9c2efa1a354eaf7ca72ce7abd5f",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 80,
"avg_line_length": 36.9468085106383,
"alnum_prop": 0.677512237258854,
"repo_name": "deepmind/dm_robotics",
"id": "b38bbfc59ab010d524e87772067d0d478c08a86e",
"size": "4079",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "py/agentflow/loggers/subtask_logger.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "479450"
},
{
"name": "CMake",
"bytes": "34173"
},
{
"name": "Jupyter Notebook",
"bytes": "106284"
},
{
"name": "Python",
"bytes": "1413203"
},
{
"name": "Shell",
"bytes": "3244"
}
],
"symlink_target": ""
}
|
"""
gridsearch to find best values for max_depth and min_samples_leaf in
a decision tree that predicts all the output measures.
"""
import numpy as np
import time
from sklearn import tree
from sklearn.metrics import mean_squared_error
from sklearn.grid_search import GridSearchCV
from lignet_utils import gen_train_test
from constants import Y_COLUMNS
script_start_time = time.time()
x_train, x_test, y_train, y_test, x_scaler, y_scaler = gen_train_test()
dtr_full = tree.DecisionTreeRegressor(max_depth=20, min_samples_leaf=5)
# Set up the gridsearch
param_grid = {'max_depth': range(10, 40),
'min_samples_leaf': range(1, 15),
}
grid_search = GridSearchCV(dtr_full, param_grid, verbose=0, n_jobs=20,
pre_dispatch='2*n_jobs',
scoring='mean_squared_error')
# search the network parameters using a subset of the entire training set
grid_search.fit(x_train[:, :], y_train[:, :])
script_running_time = time.time() - script_start_time
with open('decision_tree_gridsearch_report.txt', 'w+') as report:
print >>report, ('took % sec\n\n' % script_running_time)
for entry in grid_search.grid_scores_:
print >>report, entry
print >>report, '\n\nbest: %s' % grid_search.best_params_
|
{
"content_hash": "50db0b71f63dc5e70daa9df942019fa7",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 73,
"avg_line_length": 32.1,
"alnum_prop": 0.6752336448598131,
"repo_name": "houghb/lignet",
"id": "35aa54d88ce0e023a5d34058bd8062f81265a172",
"size": "1284",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gridsearches/gridsearch_decision_tree.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "5246007"
},
{
"name": "Python",
"bytes": "56220"
},
{
"name": "Shell",
"bytes": "1350"
}
],
"symlink_target": ""
}
|
from os.path import join
import datetime
import logging
import time
# third party
import requests
# local modules
import fc.utils as utils
logger = logging.getLogger()
# WARNING level and worse for requests
logging.getLogger('requests').setLevel(logging.WARNING)
def process_one_email(q, count, id_val, dt, email):
""" Submit an email address to the Full Contact Person API and process
the responses
Process the response object based on the return status code
Parameters
----------
q : an instance of a Priority Queue
count : the count from the original placement in the queue
id_valm : id associated with email address
dt : datetime when the id was created
email : email address
Returns
-------
null
"""
# import global
from fc import (OUT_DIR,
RETRY_TIME)
dt = dt.split()[0]
logger.info(('Post | email: {_email} id: {_id}'
' | {_email} posted to the Full Contact Person API')
.format(_email=email, _id=id_val))
# blocking operation - not to worry as each request is
# its own thread
r = query_person('email', email)
# log results
# if status code is not in 200, 202, 404 then the
# header values are not available
if r.status_code in (200, 202, 404):
post_msg = ('Return | email: {_email} id: {_id}'
' | return status code: {_status}'
' | datetime: {_dt}'
' | rate limit: {_rl} calls / 60 seconds'
' | rate limit remaining: '
'{_rlrem} calls / {_rlres} seconds')
post_msg = post_msg.format(_email=email,
_id=id_val,
_status=r.status_code,
_dt=r.headers['Date'],
_rl=r.headers['X-Rate-Limit-Limit'],
_rlrem=r.headers['X-Rate-Limit-Remaining'],
_rlres=r.headers['X-Rate-Limit-Reset'])
else:
post_msg = ('Return | email: {_email} id: {_id}'
' | return status code: {_status}')
post_msg = post_msg.format(_email=email,
_id=id_val,
_status=r.status_code)
logger.info(post_msg)
out_file = join(OUT_DIR,
'{_dt}_{_id}.json'.format(_dt=dt,
_id=id_val))
logging_desc = ('Results | email: {_email} id: {_id}'
' | status {_status}')
logging_desc = logging_desc.format(_email=email,
_id=id_val,
_status=r.status_code)
# process responses
if r.status_code == 200:
logging_desc += ' | success | writing to {_dt}_{_id}.json'
logging_desc = \
logging_desc.format(_dt=dt, _id=id)
logger.info(logging_desc)
utils.write_json(r.json(), out_file)
elif r.status_code == 202:
logging_desc += (' | request is being processed'
' | adding email: {_email} id: {_id}'
' back to the queue and waiting {_retry}'
' seconds before resubmitting')
logging_desc = logging_desc.format(_email=email,
_id=id_val,
_retry=RETRY_TIME)
logger.info(logging_desc)
utils.write_json(r.json(), out_file)
# adding back to the queue
execute_time = time.time() + RETRY_TIME
q.put((execute_time, count, id_val, dt, email))
elif r.status_code == 400:
logging_desc += ' | bad / malformed request'
logger.info(logging_desc)
utils.write_json(r.json(), out_file)
elif r.status_code == 403:
logging_desc += (' | forbidden'
' | api key is invalid, missing, or exceeded quota')
logger.info(logging_desc)
utils.write_json(r.json(), out_file)
elif r.status_code == 404:
logging_desc += (' | not found'
' | person searched in the past 24 hours'
' and nothing was found')
logger.info(logging_desc)
utils.write_json(r.json(), out_file)
elif r.status_code == 405:
logging_desc += (' | method not allowed'
' | queried the API with an unsupported HTTP method')
logger.info(logging_desc)
utils.write_json(r.json(), out_file)
elif r.status_code == 410:
logging_desc += ' | gone | the resource cannot be found'
logger.info(logging_desc)
utils.write_json(r.json(), out_file)
elif r.status_code == 422:
logging_desc += ' | invalid ==> invalid or missing API query parameter'
logger.info(logging_desc)
utils.write_json(r.json(), out_file)
elif r.status_code == 500:
logging_desc += (' | internal server error'
' | an unexpected error at Full Contact; please contact'
'support@fullcontact.com')
logger.info(logging_desc)
utils.write_json(r.json(), out_file)
elif r.status_code == 503:
logging_desc += (' | service temporarily down'
' | check the Retry-After header')
logger.info(logging_desc)
utils.write_json(r.json(), out_file)
def query_person(lookup, lookup_value):
""" Query the Full Contact Person API
Parameters
----------
lookup : lookup type
possible values include email, phone, or twitter handle
lookup_value : lookup value associated with lookup
for instance, if the type of lookup is email then provide the
email string
Returns
-------
r : requests object
"""
API_KEY = utils.get_api_key('fc_key')
URL = 'https://api.fullcontact.com/v2/person.json'
headers = {'X-FullContact-APIKey': API_KEY}
# parameters
parameters = {'prettyPrint': 'true'}
if lookup == 'email':
parameters['email'] = lookup_value
elif lookup == 'phone':
no_dash_paren = string.maketrans('', '', '()-')
parameters['phone'] = '+1' + lookup_value.translate(no_dash_paren)
elif lookup == 'twitter':
parameters['twitter'] = lookup_value
else:
raise ValueError('lookup should be one of email, phone, or twitter')
# post request
r = requests.post(URL, headers=headers, params=parameters)
return r
|
{
"content_hash": "31ab4ae22f54bf1eb221d6e10ce69ab7",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 81,
"avg_line_length": 34.11340206185567,
"alnum_prop": 0.5314294348745845,
"repo_name": "curtisalexander/fc",
"id": "5b1efcfe587a4530e3f82127e35d4a6a5e1dabc8",
"size": "6637",
"binary": false,
"copies": "1",
"ref": "refs/heads/public",
"path": "fc/person.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32372"
}
],
"symlink_target": ""
}
|
"""
===============================================================================
Original code copyright (C) 2009-2022 Rudolf Cardinal (rudolf@pobox.com).
This file is part of cardinal_pythonlib.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===============================================================================
**Functions to inspect and copy SQLAlchemy ORM objects.**
"""
from typing import (
Dict,
Generator,
List,
Set,
Tuple,
Type,
TYPE_CHECKING,
Union,
)
# noinspection PyProtectedMember
from sqlalchemy.ext.declarative.base import _get_immediate_cls_attr
from sqlalchemy.inspection import inspect
from sqlalchemy.orm.base import class_mapper
from sqlalchemy.orm.mapper import Mapper
from sqlalchemy.orm.relationships import RelationshipProperty
from sqlalchemy.orm.session import Session
from sqlalchemy.sql.schema import Column, MetaData
from sqlalchemy.sql.type_api import TypeEngine
from sqlalchemy.sql.visitors import VisitableType
from sqlalchemy.util import OrderedProperties
from cardinal_pythonlib.classes import gen_all_subclasses
from cardinal_pythonlib.enumlike import OrderedNamespace
from cardinal_pythonlib.dicts import reversedict
from cardinal_pythonlib.logs import get_brace_style_log_with_null_handler
if TYPE_CHECKING:
from sqlalchemy.orm.state import InstanceState
from sqlalchemy.sql.schema import Table
log = get_brace_style_log_with_null_handler(__name__)
# =============================================================================
# Creating ORM objects conveniently, etc.
# =============================================================================
def coltype_as_typeengine(
coltype: Union[VisitableType, TypeEngine]
) -> TypeEngine:
"""
Instances of SQLAlchemy column types are subclasses of ``TypeEngine``.
It's possible to specify column types either as such instances, or as the
class type. This function ensures that such classes are converted to
instances.
To explain: you can specify columns like
.. code-block:: python
a = Column("a", Integer)
b = Column("b", Integer())
c = Column("c", String(length=50))
isinstance(Integer, TypeEngine) # False
isinstance(Integer(), TypeEngine) # True
isinstance(String(length=50), TypeEngine) # True
type(Integer) # <class 'sqlalchemy.sql.visitors.VisitableType'>
type(Integer()) # <class 'sqlalchemy.sql.sqltypes.Integer'>
type(String) # <class 'sqlalchemy.sql.visitors.VisitableType'>
type(String(length=50)) # <class 'sqlalchemy.sql.sqltypes.String'>
This function coerces things to a :class:`TypeEngine`.
"""
if isinstance(coltype, TypeEngine):
return coltype
return coltype() # type: TypeEngine
# =============================================================================
# SqlAlchemyAttrDictMixin
# =============================================================================
class SqlAlchemyAttrDictMixin(object):
"""
Mixin to:
- get a plain dictionary-like object (with attributes so we can use ``x.y``
rather than ``x['y']``) from an SQLAlchemy ORM object
- make a nice ``repr()`` default, maintaining field order
See https://stackoverflow.com/questions/2537471 and in particular
https://stackoverflow.com/questions/2441796.
"""
def get_attrdict(self) -> OrderedNamespace:
"""
Returns what looks like a plain object with the values of the
SQLAlchemy ORM object.
"""
# noinspection PyUnresolvedReferences
columns = self.__table__.columns.keys()
values = (getattr(self, x) for x in columns)
zipped = zip(columns, values)
return OrderedNamespace(zipped)
def __repr__(self) -> str:
return "<{classname}({kvp})>".format(
classname=type(self).__name__,
kvp=", ".join(
f"{k}={v!r}" for k, v in self.get_attrdict().items()
),
)
@classmethod
def from_attrdict(cls, attrdict: OrderedNamespace) -> object:
"""
Builds a new instance of the ORM object from values in an attrdict.
"""
dictionary = attrdict.__dict__
# noinspection PyArgumentList
return cls(**dictionary)
# =============================================================================
# Traverse ORM relationships (SQLAlchemy ORM)
# =============================================================================
def walk_orm_tree(
obj,
debug: bool = False,
seen: Set = None,
skip_relationships_always: List[str] = None,
skip_relationships_by_tablename: Dict[str, List[str]] = None,
skip_all_relationships_for_tablenames: List[str] = None,
skip_all_objects_for_tablenames: List[str] = None,
) -> Generator[object, None, None]:
"""
Starting with a SQLAlchemy ORM object, this function walks a
relationship tree, yielding each of the objects once.
To skip attributes by name, put the attribute name(s) in
``skip_attrs_always``. To skip by table name, pass
``skip_attrs_by_tablename`` as e.g.
.. code-block:: python
{'sometable': ['attr1_to_skip', 'attr2_to_skip']}
Args:
obj: the SQLAlchemy ORM object to walk
debug: be verbose
seen: usually ``None``, but can be a set of objects marked as "already
seen"; if an object is in this set, it is skipped
skip_relationships_always: relationships are skipped if the
relationship has a name in this (optional) list
skip_relationships_by_tablename: optional dictionary mapping table
names (keys) to relationship attribute names (values); if the
"related table"/"relationship attribute" pair are in this
dictionary, the relationship is skipped
skip_all_relationships_for_tablenames: relationships are skipped if the
the related table has a name in this (optional) list
skip_all_objects_for_tablenames: if the object belongs to a table whose
name is in this (optional) list, the object is skipped
Yields:
SQLAlchemy ORM objects (including the starting object)
"""
# http://docs.sqlalchemy.org/en/latest/faq/sessions.html#faq-walk-objects
skip_relationships_always = (
skip_relationships_always or []
) # type: List[str] # noqa
skip_relationships_by_tablename = (
skip_relationships_by_tablename or {}
) # type: Dict[str, List[str]] # noqa
skip_all_relationships_for_tablenames = (
skip_all_relationships_for_tablenames or []
) # type: List[str] # noqa
skip_all_objects_for_tablenames = (
skip_all_objects_for_tablenames or []
) # type: List[str] # noqa
stack = [obj]
if seen is None:
seen = set()
while stack:
obj = stack.pop(0)
if obj in seen:
continue
tablename = obj.__tablename__
if tablename in skip_all_objects_for_tablenames:
continue
seen.add(obj)
if debug:
log.debug("walk: yielding {!r}", obj)
yield obj
insp = inspect(obj) # type: InstanceState
for (
relationship
) in insp.mapper.relationships: # type: RelationshipProperty # noqa
attrname = relationship.key
# Skip?
if attrname in skip_relationships_always:
continue
if tablename in skip_all_relationships_for_tablenames:
continue
if (
tablename in skip_relationships_by_tablename
and attrname in skip_relationships_by_tablename[tablename]
):
continue
# Process relationship
if debug:
log.debug("walk: following relationship {}", relationship)
related = getattr(obj, attrname)
if debug and related:
log.debug("walk: queueing {!r}", related)
if relationship.uselist:
stack.extend(related)
elif related is not None:
stack.append(related)
# =============================================================================
# deepcopy an SQLAlchemy object
# =============================================================================
# Use case: object X is in the database; we want to clone it to object Y,
# which we can then save to the database, i.e. copying all SQLAlchemy field
# attributes of X except its PK. We also want it to copy anything that is
# dependent upon X, i.e. traverse relationships.
#
# https://groups.google.com/forum/#!topic/sqlalchemy/wb2M_oYkQdY
# https://groups.google.com/forum/#!searchin/sqlalchemy/cascade%7Csort:date/sqlalchemy/eIOkkXwJ-Ms/JLnpI2wJAAAJ # noqa
def copy_sqla_object(
obj: object,
omit_fk: bool = True,
omit_pk: bool = True,
omit_attrs: List[str] = None,
debug: bool = False,
) -> object:
"""
Given an SQLAlchemy object, creates a new object (FOR WHICH THE OBJECT
MUST SUPPORT CREATION USING ``__init__()`` WITH NO PARAMETERS), and copies
across all attributes, omitting PKs (by default), FKs (by default), and
relationship attributes (always omitted).
Args:
obj: the object to copy
omit_fk: omit foreign keys (FKs)?
omit_pk: omit primary keys (PKs)?
omit_attrs: attributes (by name) not to copy
debug: be verbose
Returns:
a new copy of the object
"""
omit_attrs = omit_attrs or [] # type: List[str]
cls = type(obj)
mapper = class_mapper(cls)
newobj = cls() # not: cls.__new__(cls)
rel_keys = set([c.key for c in mapper.relationships])
prohibited = rel_keys
if omit_pk:
pk_keys = set([c.key for c in mapper.primary_key])
prohibited |= pk_keys
if omit_fk:
fk_keys = set([c.key for c in mapper.columns if c.foreign_keys])
prohibited |= fk_keys
prohibited |= set(omit_attrs)
if debug:
log.debug("copy_sqla_object: skipping: {}", prohibited)
for k in [
p.key for p in mapper.iterate_properties if p.key not in prohibited
]:
try:
value = getattr(obj, k)
if debug:
log.debug(
"copy_sqla_object: processing attribute {} = {}", k, value
)
setattr(newobj, k, value)
except AttributeError:
if debug:
log.debug("copy_sqla_object: failed attribute {}", k)
pass
return newobj
def rewrite_relationships(
oldobj: object,
newobj: object,
objmap: Dict[object, object],
debug: bool = False,
skip_table_names: List[str] = None,
) -> None:
"""
A utility function only.
Used in copying objects between SQLAlchemy sessions.
Both ``oldobj`` and ``newobj`` are SQLAlchemy instances. The instance
``newobj`` is already a copy of ``oldobj`` but we wish to rewrite its
relationships, according to the map ``objmap``, which maps old to new
objects.
For example:
- Suppose a source session has a Customer record and a Sale record
containing ``sale.customer_id``, a foreign key to Customer.
- We may have corresponding Python SQLAlchemy ORM objects
``customer_1_src`` and ``sale_1_src``.
- We copy them into a destination database, where their Python ORM objects
are ``customer_1_dest`` and ``sale_1_dest``.
- In the process we set up an object map looking like:
.. code-block:: none
Old session New session
-------------------------------
customer_1_src customer_1_dest
sale_1_src sale_1_dest
- Now, we wish to make ``sale_1_dest`` have a relationship to
``customer_1_dest``, in the same way that ``sale_1_src`` has a
relationship to ``customer_1_src``. This function will modify
``sale_1_dest`` accordingly, given this object map. It will observe that
``sale_1_src`` (here ``oldobj``) has a relationship to
``customer_1_src``; it will note that ``objmap`` maps ``customer_1_src``
to ``customer_1_dest``; it will create the relationship from
``sale_1_dest`` (here ``newobj``) to ``customer_1_dest``.
Args:
oldobj: SQLAlchemy ORM object to read from
newobj: SQLAlchemy ORM object to write to
objmap: dictionary mapping "source" objects to their corresponding
"destination" object.
debug: be verbose
skip_table_names: if a related table's name is in this (optional) list,
that relationship is skipped
"""
skip_table_names = skip_table_names or [] # type: List[str]
insp = inspect(oldobj) # type: InstanceState
# insp.mapper.relationships is of type
# sqlalchemy.utils._collections.ImmutableProperties, which is basically
# a sort of AttrDict.
for (
attrname_rel
) in (
insp.mapper.relationships.items()
): # type: Tuple[str, RelationshipProperty] # noqa
attrname = attrname_rel[0]
rel_prop = attrname_rel[1]
if rel_prop.viewonly:
if debug:
log.debug("Skipping viewonly relationship")
continue # don't attempt to write viewonly relationships # noqa
related_class = rel_prop.mapper.class_
related_table_name = related_class.__tablename__ # type: str
if related_table_name in skip_table_names:
if debug:
log.debug(
"Skipping relationship for related table {!r}",
related_table_name,
)
continue
# The relationship is an abstract object (so getting the
# relationship from the old object and from the new, with e.g.
# newrel = newinsp.mapper.relationships[oldrel.key],
# yield the same object. All we need from it is the key name.
# rel_key = rel.key # type: str
# ... but also available from the mapper as attrname, above
related_old = getattr(oldobj, attrname)
if rel_prop.uselist:
related_new = [objmap[r] for r in related_old]
elif related_old is not None:
related_new = objmap[related_old]
else:
related_new = None
if debug:
log.debug(
"rewrite_relationships: relationship {} -> {}",
attrname,
related_new,
)
setattr(newobj, attrname, related_new)
def deepcopy_sqla_objects(
startobjs: List[object],
session: Session,
flush: bool = True,
debug: bool = False,
debug_walk: bool = True,
debug_rewrite_rel: bool = False,
objmap: Dict[object, object] = None,
) -> None:
"""
Makes a copy of the specified SQLAlchemy ORM objects, inserting them into a
new session.
This function operates in several passes:
1. Walk the ORM tree through all objects and their relationships, copying
every object thus found (via :func:`copy_sqla_object`, without their
relationships), and building a map from each source-session object to
its equivalent destination-session object.
2. Work through all the destination objects, rewriting their relationships
(via :func:`rewrite_relationships`) so they relate to each other (rather
than their source-session brethren).
3. Insert all the destination-session objects into the destination session.
For this to succeed, every object must take an ``__init__`` call with no
arguments (see :func:`copy_sqla_object`). (We can't specify the required
``args``/``kwargs``, since we are copying a tree of arbitrary objects.)
Args:
startobjs: SQLAlchemy ORM objects to copy
session: destination SQLAlchemy :class:`Session` into which to insert
the copies
flush: flush the session when we've finished?
debug: be verbose?
debug_walk: be extra verbose when walking the ORM tree?
debug_rewrite_rel: be extra verbose when rewriting relationships?
objmap: starting object map from source-session to destination-session
objects (see :func:`rewrite_relationships` for more detail);
usually ``None`` to begin with.
"""
if objmap is None:
objmap = {} # keys = old objects, values = new objects
if debug:
log.debug("deepcopy_sqla_objects: pass 1: create new objects")
# Pass 1: iterate through all objects. (Can't guarantee to get
# relationships correct until we've done this, since we don't know whether
# or where the "root" of the PK tree is.)
seen = set()
for startobj in startobjs:
for oldobj in walk_orm_tree(startobj, seen=seen, debug=debug_walk):
if debug:
log.debug("deepcopy_sqla_objects: copying {}", oldobj)
newobj = copy_sqla_object(oldobj, omit_pk=True, omit_fk=True)
# Don't insert the new object into the session here; it may trigger
# an autoflush as the relationships are queried, and the new
# objects are not ready for insertion yet (as their relationships
# aren't set).
# Note also the session.no_autoflush option:
# "sqlalchemy.exc.OperationalError: (raised as a result of Query-
# invoked autoflush; consider using a session.no_autoflush block if
# this flush is occurring prematurely)..."
objmap[oldobj] = newobj
# Pass 2: set all relationship properties.
if debug:
log.debug("deepcopy_sqla_objects: pass 2: set relationships")
for oldobj, newobj in objmap.items():
if debug:
log.debug("deepcopy_sqla_objects: newobj: {}", newobj)
rewrite_relationships(oldobj, newobj, objmap, debug=debug_rewrite_rel)
# Now we can do session insert.
if debug:
log.debug("deepcopy_sqla_objects: pass 3: insert into session")
for newobj in objmap.values():
session.add(newobj)
# Done
if debug:
log.debug("deepcopy_sqla_objects: done")
if flush:
session.flush()
def deepcopy_sqla_object(
startobj: object,
session: Session,
flush: bool = True,
debug: bool = False,
debug_walk: bool = False,
debug_rewrite_rel: bool = False,
objmap: Dict[object, object] = None,
) -> object:
"""
Makes a copy of the object, inserting it into ``session``.
Uses :func:`deepcopy_sqla_objects` (q.v.).
A problem is the creation of duplicate dependency objects if you call it
repeatedly.
Optionally, if you pass the objmap in (which maps old to new objects), you
can call this function repeatedly to clone a related set of objects...
... no, that doesn't really work, as it doesn't visit parents before
children. The :func:`cardinal_pythonlib.sqlalchemy.merge_db.merge_db`
function does that properly.
Args:
startobj: SQLAlchemy ORM object to deep-copy
session: see :func:`deepcopy_sqla_objects`
flush: see :func:`deepcopy_sqla_objects`
debug: see :func:`deepcopy_sqla_objects`
debug_walk: see :func:`deepcopy_sqla_objects`
debug_rewrite_rel: see :func:`deepcopy_sqla_objects`
objmap: see :func:`deepcopy_sqla_objects`
Returns:
the copied object matching ``startobj``
"""
if objmap is None:
objmap = {} # keys = old objects, values = new objects
deepcopy_sqla_objects(
startobjs=[startobj],
session=session,
flush=flush,
debug=debug,
debug_walk=debug_walk,
debug_rewrite_rel=debug_rewrite_rel,
objmap=objmap,
)
return objmap[startobj] # returns the new object matching startobj
# =============================================================================
# Get Columns from an ORM instance
# =============================================================================
def gen_columns(obj) -> Generator[Tuple[str, Column], None, None]:
"""
Asks a SQLAlchemy ORM object: "what are your SQLAlchemy columns?"
Yields tuples of ``(attr_name, Column)`` from an SQLAlchemy ORM object
instance. Also works with the corresponding SQLAlchemy ORM class. Examples:
.. code-block:: python
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.sql.schema import Column
from sqlalchemy.sql.sqltypes import Integer
Base = declarative_base()
class MyClass(Base):
__tablename__ = "mytable"
pk = Column("pk", Integer, primary_key=True, autoincrement=True)
a = Column("a", Integer)
x = MyClass()
list(gen_columns(x))
list(gen_columns(MyClass))
"""
mapper = obj.__mapper__ # type: Mapper
assert (
mapper
), f"gen_columns called on {obj!r} which is not an SQLAlchemy ORM object"
colmap = mapper.columns # type: OrderedProperties
if not colmap:
return
for attrname, column in colmap.items():
# NB: column.name is the SQL column name, not the attribute name
yield attrname, column
# Don't bother using
# cls = obj.__class_
# for attrname in dir(cls):
# cls_attr = getattr(cls, attrname)
# # ... because, for columns, these will all be instances of
# # sqlalchemy.orm.attributes.InstrumentedAttribute.
def get_pk_attrnames(obj) -> List[str]:
"""
Asks an SQLAlchemy ORM object: "what are your primary key(s)?"
Args:
obj: SQLAlchemy ORM object
Returns:
list of attribute names of primary-key columns
"""
return [
attrname for attrname, column in gen_columns(obj) if column.primary_key
]
def gen_columns_for_uninstrumented_class(
cls: Type
) -> Generator[Tuple[str, Column], None, None]:
"""
Generate ``(attr_name, Column)`` tuples from an UNINSTRUMENTED class, i.e.
one that does not inherit from ``declarative_base()``. Use this for mixins
of that kind.
SUBOPTIMAL. May produce warnings like:
.. code-block:: none
SAWarning: Unmanaged access of declarative attribute id from non-mapped class GenericTabletRecordMixin
Try to use :func:`gen_columns` instead.
""" # noqa
for attrname in dir(cls):
potential_column = getattr(cls, attrname)
if isinstance(potential_column, Column):
yield attrname, potential_column
def attrname_to_colname_dict(cls) -> Dict[str, str]:
"""
Asks an SQLAlchemy class how its attribute names correspond to database
column names.
Args:
cls: SQLAlchemy ORM class
Returns:
a dictionary mapping attribute names to database column names
"""
attr_col = {} # type: Dict[str, str]
for attrname, column in gen_columns(cls):
attr_col[attrname] = column.name
return attr_col
def colname_to_attrname_dict(cls) -> Dict[str, str]:
return reversedict(attrname_to_colname_dict(cls))
# =============================================================================
# Get relationships from an ORM instance
# =============================================================================
def gen_relationships(
obj
) -> Generator[Tuple[str, RelationshipProperty, Type], None, None]:
"""
Yields tuples of ``(attrname, RelationshipProperty, related_class)``
for all relationships of an ORM object.
The object 'obj' can be EITHER an instance OR a class.
"""
insp = inspect(obj) # type: InstanceState
# insp.mapper.relationships is of type
# sqlalchemy.utils._collections.ImmutableProperties, which is basically
# a sort of AttrDict.
for (
attrname,
rel_prop,
) in (
insp.mapper.relationships.items()
): # type: Tuple[str, RelationshipProperty] # noqa
# noinspection PyUnresolvedReferences
related_class = rel_prop.mapper.class_
# log.critical("gen_relationships: attrname={!r}, "
# "rel_prop={!r}, related_class={!r}, rel_prop.info={!r}",
# attrname, rel_prop, related_class, rel_prop.info)
yield attrname, rel_prop, related_class
# =============================================================================
# Inspect ORM objects (SQLAlchemy ORM)
# =============================================================================
def get_orm_columns(cls: Type) -> List[Column]:
"""
Gets :class:`Column` objects from an SQLAlchemy ORM class.
Does not provide their attribute names.
"""
mapper = inspect(cls) # type: Mapper
# ... returns InstanceState if called with an ORM object
# http://docs.sqlalchemy.org/en/latest/orm/session_state_management.html#session-object-states # noqa
# ... returns Mapper if called with an ORM class
# http://docs.sqlalchemy.org/en/latest/orm/mapping_api.html#sqlalchemy.orm.mapper.Mapper # noqa
colmap = mapper.columns # type: OrderedProperties
return colmap.values()
def get_orm_column_names(cls: Type, sort: bool = False) -> List[str]:
"""
Gets column names (that is, database column names) from an SQLAlchemy
ORM class.
"""
colnames = [col.name for col in get_orm_columns(cls)]
return sorted(colnames) if sort else colnames
# =============================================================================
# Inspect metadata (SQLAlchemy ORM)
# =============================================================================
def get_table_names_from_metadata(metadata: MetaData) -> List[str]:
"""
Returns all database table names found in an SQLAlchemy :class:`MetaData`
object.
"""
return [table.name for table in metadata.tables.values()]
def get_metadata_from_orm_class_or_object(cls: Type) -> MetaData:
"""
Returns the :class:`MetaData` object from an SQLAlchemy ORM class or
instance.
"""
# noinspection PyUnresolvedReferences
table = cls.__table__ # type: Table
return table.metadata
def gen_orm_classes_from_base(base: Type) -> Generator[Type, None, None]:
"""
From an SQLAlchemy ORM base class, yield all the subclasses (except those
that are abstract).
If you begin with the proper :class`Base` class, then this should give all
ORM classes in use.
"""
for cls in gen_all_subclasses(base):
if _get_immediate_cls_attr(cls, "__abstract__", strict=True):
# This is SQLAlchemy's own way of detecting abstract classes; see
# sqlalchemy.ext.declarative.base
continue # NOT an ORM class
yield cls
def get_orm_classes_by_table_name_from_base(base: Type) -> Dict[str, Type]:
"""
Given an SQLAlchemy ORM base class, returns a dictionary whose keys are
table names and whose values are ORM classes.
If you begin with the proper :class`Base` class, then this should give all
tables and ORM classes in use.
"""
# noinspection PyUnresolvedReferences
return {cls.__tablename__: cls for cls in gen_orm_classes_from_base(base)}
|
{
"content_hash": "dc69212bd38c14af154fe1a2e94d960c",
"timestamp": "",
"source": "github",
"line_count": 776,
"max_line_length": 119,
"avg_line_length": 35.72164948453608,
"alnum_prop": 0.6071067821067822,
"repo_name": "RudolfCardinal/pythonlib",
"id": "a4224084c78870b90238dd432daa2d52f0f866d5",
"size": "27790",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cardinal_pythonlib/sqlalchemy/orm_inspect.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1987146"
},
{
"name": "Shell",
"bytes": "111"
}
],
"symlink_target": ""
}
|
"""
### Creating a MongoQuery
`MongoQuery` is the main tool that lets you execute JSON Query Objects against an SqlAlchemy-handled database.
There are two ways to use it:
1. Construct `MongoQuery` manually, giving it your model:
```python
from mongosql import MongoQuery
from .models import User # Your model
ssn = Session()
# Create a MongoQuery, using an initial Query (possibly, with some initial filtering applied)
mq = MongoQuery(User).from_query(ssn.query(User))
```
2. Use the convenience mixin for your Base:
```python
from sqlalchemy.ext.declarative import declarative_base
from mongosql import MongoSqlBase
Base = declarative_base(cls=(MongoSqlBase,))
class User(Base):
#...
```
Using this Base, your models will have a shortcut method which returns a `MongoQuery`:
```python
User.mongoquery(session)
User.mongoquery(query)
```
With `mongoquery()`, you can construct a query from a session:
```python
mq = User.mongoquery(session)
```
.. or from an [sqlalchemy.orm.Query](https://docs.sqlalchemy.org/en/latest/orm/query.html),
which allows you to apply some initial filtering:
```python
mq = User.mongoquery(
session.query(User).filter_by(active=True) # Only query active users
)
```
### Reusable
A `MongoQuery` object itself is not reusable: it can make just one query.
However, it makes sense to save some initialization and keep it ready for new requests.
For performance reasons, this has to be done manually with the `Reusable` wrapper:
```python
mq_factory = Reusable(User.mongoquery(session))
```
The wrapped object has all the methods of a `MongoQuery`, but will make a proper copy when used.
Think of it as a factory.
### Querying: `MongoQuery.query()`
Once a `MongoQuery` is prepared, you can give it a QueryObject:
```python
# QueryObject
query_object = {
'filter': {
'sex': 'f',
'age': { '$gte': 18, '$lte': 25 }, # 18..25 years
},
'order': ['weight+'], # slims first
'limit': 50, # just enough :)
}
# MongoQuery
mq = User.mongoquery(ssn).query(**query_object)
```
### Getting Results: `MongoQuery.end()`
Because `MongoQuery` is just a wrapper around an SqlAlchemy's `Query`, you can get that `Query`
and get results out of it:
```python
# By calling the `MongoQuery.end()` method, you get an SqlAlchemy `Query`:
q = mq.end() # SqlALchemy Query
# Execute the query and fetch results
girls = q.all()
```
### Getting All Sorts of Results
Let's remember that the Query generated by MongoQuery can return three sorts of results:
1. Entities. When the API user has requested an entity of a list of them.
3. Integer. When the API user has used `{count: 1}`.
2. Tuples. This is what you get when the API user has used the [Aggregate Operation](#aggregate-operation).
`MongoQuery` has three methods that help you detect what you get:
1. `MongoQuery.result_contains_entities()`
2. `MongoQuery.result_is_scalar()`
3. `MongoQuery.result_is_tuples()`
Here's how to use it:
```python
def get_result(mq: MongoQuery, query: Query):
# Handle: Query Object has count
if mq.result_is_scalar():
return {'count': query.scalar()}
# Handle: Query Object has group_by and yields tuples
if mq.result_is_tuples():
# zip() column names together with the values, and make it into a dict
return {
'results': [dict(zip(row.keys(), row))
for row in query]
}
# Regular result: entities
return {
'users': query.all()
}
```
Most likely, you won't need to handle that at all: just use [CRUD Helpers](#crud-helpers)
that implement most of this logic for you.
"""
from copy import copy
from sqlalchemy import inspect, exc as sa_exc
from sqlalchemy.orm import Query, Load, defaultload
from mongosql import RuntimeQueryError, BaseMongoSqlException
from .bag import ModelPropertyBags
from . import handlers
from .exc import InvalidQueryError
from .util import MongoQuerySettingsHandler, CountingQuery
from typing import Union, Mapping, Iterable, Tuple, Any
from sqlalchemy.orm import Session
from sqlalchemy.ext.declarative import DeclarativeMeta
from sqlalchemy.orm import RelationshipProperty
from .util import MongoQuerySettingsDict
class MongoQuery:
""" MongoQuery is a wrapper around SqlAlchemy's `Query` that can safely execute JSON Query Objects """
# The class to use for getting structural data from a model
_MODEL_PROPERTY_BAGS_CLS = ModelPropertyBags
def __init__(self, model: DeclarativeMeta, handler_settings: Union[Mapping, MongoQuerySettingsDict, None] = None):
""" Init a MongoDB-style query
:param model: SqlAlchemy model to make a MongoSQL query for.
:param handler_settings: Settings for Query Object handlers. See MongoQuerySettingsDict
"""
# Aliases?
if inspect(model).is_aliased_class:
raise TypeError('MongoQuery does not accept aliases. '
'If you want to query an alias, do it like this: '
'MongoQuery(User).aliased(aliased(User))')
# Init with the model
self.model = model # model, or its alias (when used with self.aliased())
self.bags = self._MODEL_PROPERTY_BAGS_CLS.for_model(self.model)
# Initialize the settings
self.handler_settings = self._init_handler_settings(handler_settings or {})
self._query_options = {
# See: options()
'no_limit_offset': False,
}
# Initialized later
self._query = None # type: Query | None
self._parent_mongoquery = None # type: MongoQuery | None
self.input_value = None # type: dict | None
# Get ready: Query object handlers
self._init_query_object_handlers()
# Load interface join path
# These are just the defaults ; as_relation() will override them when working with
# deeper relationships
self._join_path = ()
self._as_relation = Load(self.model)
# Cached MongoQuery objects for nested relationships
self._nested_mongoqueries = dict() # type: dict[str, MongoQuery]
# NOTE: keep in mind that this object is copy()ed in order to make it reusable.
# This means that every property that can't be safely reused has to be copy()ied manually
# inside the __copy__() method.
# A good example is the `_as_relation()` method: if not copied properly, subsequent queries
# will inherit all option()s from the previous queries and lead to all sorts of weird effects!
# So, whenever you add a property to this object, make sure you understand its copy() behavior.
def __copy__(self) -> 'MongoQuery':
""" MongoQuery can be reused: wrap it with Reusable() which performs the automatic copy()
It actually makes sense to have reusable MongoQuery because there's plenty of settings
you don't want to parse over ang over again.
This method implements proper copying so that this MongoQuery can be reused.
"""
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
# Copy Query Object handlers
for name in self.HANDLER_ATTR_NAMES:
setattr(result, name, copy(getattr(result, name)))
# Copy mutable objects
result._query_options = result._query_options.copy()
# Re-initialize properties that can't be copied
self.as_relation(None) # reset the Load() interface. Outside code will have to set it up properly
self._query = None
return result
def options(self, *, no_limit_offset=False):
""" Set options for this query to alter its behavior
Args:
no_limit_offset: Disable putting LIMIT/OFFSET on the query.
This is useful when you already have a MongoQuery configured but want to make
an unrestricted query with the same settings.
Note that this setting only has effect on the immediate query; it does not remove limits/offsets
from nested queries (i.e. queries for related objects)
"""
# Option: no limit offset
assert isinstance(no_limit_offset, bool)
self._query_options['no_limit_offset'] = no_limit_offset
# Can apply immediately
self.handler_limit.skip_this_handler = no_limit_offset
return self
def from_query(self, query: Query) -> 'MongoQuery':
""" Specify a custom sqlalchemy query to work with.
It can have, say, initial filtering already applied to it.
It no default query is provided, _from_query() will use the default.
:param query: Initial sqlalchemy query to work with (e.g. with initial filters pre-applied)
:type query: sqlalchemy.orm.Query
"""
self._query = query
return self
def with_session(self, ssn: Session) -> 'MongoQuery':
""" Query with the given sqlalchemy Session
Args:
ssn: The SqlAlchemy `Session` to use for querying
"""
self._query = self._from_query().with_session(ssn)
return self
def as_relation(self, join_path: Union[Tuple[RelationshipProperty], None] = None):
""" Handle a model in relationship with another model
This internal method is used when working with deeper relations.
For example, when you're querying `User`, who has `User.articles`, and you want to specify lazyload() on
the fields of that article, you can't just do `lazyload(User.articles)` ; you have to tell sqlalchemy that
you actually mean a model that is going to be loaded through a relationship.
You do it this way:
defaultload(models.User.articles).lazyload(models.Article)
Then SqlAlchemy will know that you actually mean a related model.
To achieve this, we keep track of nested relations in the form of `join_path`.
`self._as_relation` is the Load() interface for chaining methods for deeper relationships.
:param join_path: A tuple of relationships leading to this query.
"""
if join_path:
self._join_path = join_path
self._as_relation = defaultload(*self._join_path)
else:
# Set default
# This behavior is used by the __copy__() method to reset the attribute
self._join_path = ()
self._as_relation = Load(self.model)
return self
def as_relation_of(self, mongoquery: 'MongoQuery', relationship: RelationshipProperty) -> 'MongoQuery':
""" Handle the query as a sub-query handling a relationship
This is used by the MongoJoin handler to build queries to related models.
:param mongoquery: The parent query
:param relationship: The relationship
"""
return self.as_relation(mongoquery._join_path + (relationship,))
def aliased(self, model: DeclarativeMeta) -> 'MongoQuery':
""" Make a query to an aliased model instead.
This is used by MongoJoin handler to issue subqueries.
Note that the method modifies the current object and does not make a copy!
Note: should always be called after as_relation_of(), not before!
:param model: Aliased model
"""
# Aliased bags
self.bags = self.bags.aliased(model)
self.model = model
# Aliased loader interface
# Currently, our join path looks like this: [..., User]
# Now, when we're using an alias instead, we have to replace that last element with an alias too
# SqlAlchemy 1.2.x used to work well without doing it;
# SqlAlchemy 1.3.x now requires adapting a relationship by using of_type() on it.
# See: https://github.com/sqlalchemy/sqlalchemy/issues/4566
if self._join_path: # not empty
# Okay. First. Replace the last element on the join path with the aliased model's relationship
new_join_path = self._join_path[0:-1] + (self._join_path[-1].of_type(model),)
# Second. Apply the new join path
self.as_relation(new_join_path)
else: # empty
self._as_relation = Load(self.model) # use the alias
# Aliased handlers
for handler_name in self.HANDLER_ATTR_NAMES:
setattr(self, handler_name,
getattr(self, handler_name).aliased(model))
return self
def query(self, **query_object: Any) -> 'MongoQuery':
""" Build a MongoSql query from an object
:param query_object: The Query Object to execute.
:raises InvalidQueryError: unknown Query Object operations provided (extra keys)
:raises InvalidQueryError: syntax error for any of the Query Object sections
:raises InvalidColumnError: Invalid column name provided in the input
:raises InvalidRelationError: Invalid relationship name provided in the input
:rtype: MongoQuery
"""
# Prepare Query Object
for handler_name, handler in self._handlers():
query_object = handler.input_prepare_query_object(query_object)
# Check if Query Object keys are all right
invalid_keys = set(query_object.keys()) - self.HANDLER_NAMES
if invalid_keys:
raise InvalidQueryError('Unknown Query Object operations: {}'.format(', '.join(invalid_keys)))
# Store
self.input_value = query_object
# Bind every handler with ourselves
# We do it as a separate step because some handlers want other handlers in a pristine condition.
# Namely, MongoAggregate wants to copy MongoFilter before it receives any input.
for handler_name, handler in self._handlers():
handler.with_mongoquery(self)
# Process every field with its method
# Every handler should be invoked because they may have defaults even when no input was provided
for handler_name, handler in self._handlers_ordered_for_query_method():
# Query Object value for this handler
input_value = query_object.get(handler_name, None)
# Disabled handlers exception
# But only test that if there actually was any input
if input_value is not None:
self._raise_if_handler_is_not_enabled(handler_name)
# Use the handler
# Run it even when it does not have any input
handler.input(input_value)
# Done
return self
def end(self) -> Query:
""" Get the resulting sqlalchemy `Query` object """
# The query
q = self._from_query()
# Apply every handler
for handler_name, handler in self._handlers_ordered_for_end_method():
if not handler.skip_this_handler:
# Apply the handler
try:
q = handler.alter_query(q, as_relation=self._as_relation)
# Enrich SqlAlchemy errors with MongoSQL context (because it's very difficult to debug its cryptic messages)
except sa_exc.SQLAlchemyError as e:
# Get model name by backtracing MongoQuery objects
model_name = []
mq = self
while mq is not None:
model_name.append(mq.bags.model_name)
mq = mq._parent_mongoquery
model_name = ' -> '.join(reversed(model_name))
# Finally, raise one rich error
raise RuntimeQueryError(f'Error processing MongoQuery({model_name}).{handler_name}: {e}') from e
return q
def end_count(self) -> CountingQuery:
""" Get the result, and also count the total number of rows.
Be aware that the cost will be substantially higher than without the total number,
but still cheaper than two separate queries.
Numbers: this gives about 50% boost to small result sets, and about 15% boost to larger result sets.
See [CountingQuery](#countingqueryquery) for more details.
Example:
```python
q = User.mongoquery(ssn).query(...).end_count()
# Get the count
q.count # -> 127
# Get results
list(q) # -> [User, ...]
# (!) only one actual SQL query was made
```
"""
# Get the query and wrap it with a counting query
return CountingQuery(self.end())
# Extra features
def result_contains_entities(self) -> bool:
""" Test whether the result will contain entities.
This is normally the case in the absence of 'aggregate', 'group', and 'count' queries.
"""
return self.handler_aggregate.is_input_empty() and \
self.handler_group.is_input_empty() and \
self.handler_count.is_input_empty()
def result_is_scalar(self) -> bool:
""" Test whether the result is a scalar value, like with count
In this case, you'll fetch it like this:
MongoQuery(...).end().scalar()
"""
return not self.handler_count.is_input_empty()
def result_is_tuples(self) -> bool:
""" Test whether the result is a list of keyed tuples, like with group_by
In this case, you might fetch it like this:
res = MongoQuery(...).end()
return [dict(zip(row.keys(), row)) for row in res], None
"""
return not self.handler_aggregate.is_input_empty() or \
not self.handler_group.is_input_empty()
def ensure_loaded(self, *cols: Iterable[str]) -> 'MongoQuery':
""" Ensure the given columns, relationships, and related columns are loaded
Despite any projections and joins the user may be doing, make sure that the given `cols` are loaded.
This will ensure that every column is loaded, every relationship is joined, and none of those is included
into `projection` and `pluck_instance`.
This method is to be used by the application code to handle the following situation:
* The API user has requested only fields 'a', 'b', 'c' to be loaded
* The application code needs field 'd' for its operation
* The user does not want to see no 'd' in the output.
Solution: use ensure_loaded('d'), and then pluck_instance()
Limitations:
1. If the user has requested filtering on a relationship, you can't use ensure_loaded() on it.
This method will raise an InvalidQueryError().
This makes sense, because if your application code relies on the presence of a certain relationship,
it certainly needs it fully loaded, and unfiltered.
2. If the request contains no entities (e.g. 'group' or 'aggregate' handlers are used),
this method would throw an AssertionError
If all you need is just to know whether something is loaded or not, use MongoQuery.__contains__() instead.
Remember that every time you use ensure_loaded() on a relationship, you disable the possibility of filtering for it!
:param cols: Column names ('age'), Relation names ('articles'), or Related column names ('articles.name')
:raises InvalidQueryError: cannot merge because the relationship has a filter on it
:raises ValueError: invalid column or relationship name given.
It does not throw `InvalidColumnError` because that's likely your error, not an error of the API user :)
"""
assert self.result_contains_entities(), 'Cannot use ensure_loaded() on a result set that does not contain entities'
# Tell columns and relationships apart
columns = []
relations = {}
for name in cols:
# Tell apart
if name in self.bags.related_columns:
# A related column will initialize a projection
relation_name, column_name = name.split('.', 1)
relations.setdefault(relation_name, {})
relations[relation_name].setdefault('project', {})
relations[relation_name]['project'][column_name] = 1
elif name in self.bags.relations:
# A relation will init an empty object
relations.setdefault(name, {})
elif name in self.bags.columns:
# A column will just be loaded
columns.append(name)
else:
raise ValueError('Invalid column or relation name given to ensure_loaded(): {!r}'
.format(name))
# Load all them
try:
self.handler_project.merge(columns, quietly=True, strict=True)
self.handler_join.merge(relations, quietly=True, strict=True)
except InvalidQueryError as e:
raise InvalidQueryError('Failed to process ensure_loaded({}): {}'.format(cols, str(e))) from e
# Done
return self
def get_final_query_object(self) -> dict:
""" Get the final Query Object dict (after all handlers have applied their defaults).
This Query Object will contain the name of every single handler, including those that were not given any input.
"""
ret = {
name: handler.get_final_input_value()
for name, handler in self._handlers()
}
# Fix limit: keys 'skip' and 'limit'
ret.update(ret['limit'])
# Done
return ret
def get_projection_tree(self) -> dict:
""" Get a projection-like dict that maps every included column to 1,
and every relationship to a nested projection dict.
Example:
```python
MongoQuery(User).query(join={'articles': dict(project=('id',))}).handler_join.projection
#-> {'articles': {'id': 1}}
```
This is mainly useful for debugging nested Query Objects.
Returns:
dict: the projection
"""
ret = {}
ret.update(self.handler_project.projection)
ret.update(self.handler_join.get_projection_tree())
ret.update(self.handler_joinf.get_projection_tree())
return ret
def get_full_projection_tree(self) -> dict:
""" Get a full projection tree that mentions every column, but only those relationships that are loaded
:rtype: dict
"""
ret = {}
ret.update(self.handler_project.get_full_projection())
ret.update(self.handler_join.get_full_projection_tree())
ret.update(self.handler_joinf.get_full_projection_tree())
return ret
def pluck_instance(self, instance: object) -> dict:
""" Pluck an sqlalchemy instance and make it into a dict
This method should be used to prepare an object for JSON encoding.
This makes sure that only the properties explicitly requested by the user get included
into the result, and *not* the properties that your code may have loaded.
Projection and Join properties are considered.
:param instance: object
:rtype: dict
"""
if not isinstance(instance, self.bags.model): # bags.model, because self.model may be aliased
raise ValueError('This MongoQuery.pluck_instance() expects {}, but {} was given'
.format(self.bags.model, type(instance)))
# First, projection will do what it wants.
# By the way, it will also generate a dictionary
dct = self.handler_project.pluck_instance(instance)
# Now, the joins will add more fields
dct.update(self.handler_join.pluck_instance(instance))
dct.update(self.handler_joinf.pluck_instance(instance))
# Seems like there's no one else?
# Done.
return dct
def __contains__(self, key: str) -> bool:
""" Test if a property is going to be loaded by this query """
return key in self.handler_project or key in self.handler_join
def __repr__(self):
return 'MongoQuery({})'.format(str(self.model))
# region Query Object handlers
# This section initializes every Query Object handler, one per method.
# Doing it this way enables you to override the way they are initialized, and use a custom query class with
# custom settings.
_QO_HANDLER_PROJECT = handlers.MongoProject
_QO_HANDLER_SORT = handlers.MongoSort
_QO_HANDLER_GROUP = handlers.MongoGroup
_QO_HANDLER_JOIN = handlers.MongoJoin
_QO_HANDLER_JOINF = handlers.MongoFilteringJoin
_QO_HANDLER_FILTER = handlers.MongoFilter
_QO_HANDLER_AGGREGATE = handlers.MongoAggregate # Use MongoAggregateInsecure for backwards compatibility
_QO_HANDLER_LIMIT = handlers.MongoLimit
_QO_HANDLER_COUNT = handlers.MongoCount
HANDLER_NAMES = frozenset(('project',
'sort',
'group',
'join',
'joinf',
'filter',
'aggregate',
'limit',
'count'))
HANDLER_ATTR_NAMES = frozenset('handler_'+name
for name in HANDLER_NAMES)
def _handlers(self):
""" Get the list of all (handler_name, handler) """
return (
# Note that the ordering of these handlers may actually influence the way queries are processed!
# Considerations for the input() method:
# 1. 'project' before 'join'
# Because 'project' will try to send relationships to the 'join' handler,
# and MongoJoin has to have input() already called by then.
# NOTE: this is the only handler that has preferences for its input() method.
# Because other handlers do not care, and this one does, the best way to bring it down
# to the bottom is to use reversed(self._handlers()).
#
# Considerations for the alter_query() method:
# 1. 'limit' after 'order_by':
# 'order_by' does not like limits
# 2. 'join' after 'filter' and 'limit'
# Because 'join' handler may make it into a subquery,
# and at that point is has to have all filters and limits applied
# 3. 'aggregate' before 'sort', 'group', 'filter'
# Because aggregate handler uses Query.select_from(), which can only be applied to a query
# without any clauses like WHERE, ORDER BY, GROUP BY
# 4. 'sort' before 'join'
# Because join makes a subquery, and it has to contain ordering within it.
# 5. 'limit' after everything
# Because it will wrap everything into a subquery, which has a different name.
# However, 'join' and 'joinf' somehow manage to handle the situation, so the requirement is restated:
# "after everything", but can be before "join".
# *. There may be others that the author is not aware of... yet.
('project', self.handler_project),
('aggregate', self.handler_aggregate),
('sort', self.handler_sort),
('group', self.handler_group),
('filter', self.handler_filter),
('limit', self.handler_limit),
('join', self.handler_join),
('joinf', self.handler_joinf),
('count', self.handler_count)
)
def _handlers_ordered_for_query_method(self):
""" Handlers in an order suitable for the query() method """
# reversed() is applied as a hack to move 'project' below 'join'.
return reversed(self._handlers())
def _handlers_ordered_for_end_method(self):
""" Handlers in an order suitable for the end() method """
return self._handlers()
# for IDE completion
handler_project = None # type: handlers.MongoProject
handler_sort = None # type: handlers.MongoSort
handler_group = None # type: handlers.MongoGroup
handler_join = None # type: handlers.MongoJoin
handler_joinf = None # type: handlers.MongoJoinf
handler_filter = None # type: handlers.MongoFilter
handler_aggregate = None # type: handlers.MongoAggregate
handler_limit = None # type: handlers.MongoLimit
handler_count = None # type: handlers.MongoCount
def _init_query_object_handlers(self):
""" Initialize every Query Object handler """
for name in self.HANDLER_NAMES:
# Every handler: name, attr, clas
handler_attr_name = 'handler_' + name
handler_cls_attr_name = '_QO_HANDLER_' + name.upper()
handler_cls = getattr(self, handler_cls_attr_name)
# Use _init_handler()
setattr(self, handler_attr_name,
self._init_handler(name, handler_cls)
)
# Check settings
self.handler_settings.raise_if_invalid_handler_settings(self)
def _init_handler(self, handler_name: str, handler_cls: type):
""" Init a handler, and load its settings """
handler_settings = self.handler_settings.get_settings(handler_name, handler_cls)
return handler_cls(self.model, self.bags, **handler_settings)
# endregion
# region Internals
def _init_handler_settings(self, handler_settings: Mapping) -> MongoQuerySettingsHandler:
""" Initialize: handler settings """
# A special case for 'join'
if handler_settings.get('join_enabled', True) is False:
# If 'join' is explicitly disabled, disable 'joinf' as well
# This is for security so that one doesn't forget to disable them both.
handler_settings['joinf_enabled'] = False
# A special case for 'raiseload'
if handler_settings.pop('raiseload', False):
# Can't combine
assert not handler_settings.get('raiseload_col', False)
assert not handler_settings.get('raiseload_rel', False)
# Both True
handler_settings['raiseload_col'] = True
handler_settings['raiseload_rel'] = True
# Create the object
hso = MongoQuerySettingsHandler(handler_settings)
hso.validate_related_settings(self.bags)
# Done
return hso
def _from_query(self) -> Query:
""" Get the query to work with, or initialize one
When the time comes to build an actual SqlAlchemy query, we're going to use the query that the user has
provided with from_query(). If none was provided, we'll use the default one.
"""
return self._query or Query([self.model])
def _init_mongoquery_for_related_model(self, relationship_name: str) -> 'MongoQuery':
""" Create a MongoQuery object for a model, related through a relationship with the given name.
This method configures queries made on related models.
Note that this method is only called once for every relationship.
See: _get_nested_mongoquery() for more info
"""
# Get the relationship
# There must be no exceptions here, because JoinHandler is the only guy using this method,
# and it should already have validated relationship name.
# Meaning, we can be pretty sure `relationship_name` exists
target_model = self.bags.relations.get_target_model(relationship_name)
# Make a new MongoQuery
handler_settings = self.handler_settings.settings_for_nested_mongoquery(relationship_name, target_model)
mongoquery = self.__class__(target_model, handler_settings)
# Done
return mongoquery
def _get_nested_mongoquery(self, relationship_name: str) -> 'MongoQuery':
""" Get a MongoQuery for a nested model (through a relationship)
Remember that the 'join' operation support nested queries!
And those queries also support projections, filters, joins, and whatnot.
This method will correctly load nested configuration from self.handler_settings,
which enables you to set up your security and preferences for queries on related models.
Example:
mq = MongoQuery(Comment, dict(
allowed_relations=('author',), # only allow one relationship to be joined
related={
'author': dict( # settings for queries on this relationship
join=False, # disable further joins
force_exclude=('password',) # can't get it
)
}
))
In this case, the API user won't be able to get the password by join()ing to it from other entities.
Note that this method does not call as_relation() nor aliased().
You'll have to do it yourself.
"""
# If there's no nested MongoQuery inited, make one
if relationship_name not in self._nested_mongoqueries:
self._nested_mongoqueries[relationship_name] = self._init_mongoquery_for_related_model(relationship_name)
# Get a cached nested MongoQuery
nested_mq = self._nested_mongoqueries[relationship_name]
# Make a copy, set as_relation() properly, put an alias on it
nested_mq = copy(nested_mq)
# Parent relationship to self
nested_mq._parent_mongoquery = self
# Done
return nested_mq
def _raise_if_handler_is_not_enabled(self, handler_name: str):
""" Raise an error if a handler is not enabled.
This is used by:
* query() method, to raise errors when a user provides input to a disabled handler
* MongoProject.input() method, which feeds MongoJoin with projections, and has to check settings
:return:
"""
self.handler_settings.raise_if_not_handler_enabled(self.bags.model_name, handler_name)
# endregion
|
{
"content_hash": "edff6d6317a9b83edad8b60c5da99db6",
"timestamp": "",
"source": "github",
"line_count": 837,
"max_line_length": 128,
"avg_line_length": 41.157706093189965,
"alnum_prop": 0.6231240384336265,
"repo_name": "kolypto/py-mongosql",
"id": "1fe75ddca726c1c19838feae5358b0120cc16591",
"size": "34449",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mongosql/query.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "704"
},
{
"name": "Python",
"bytes": "669556"
},
{
"name": "Shell",
"bytes": "156"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import json
import re
import subprocess
from collections import namedtuple
from pants.base.workunit import WorkUnit, WorkUnitLabel
from pants.task.task import Task
from pants.util.memo import memoized_method, memoized_property
from twitter.common.collections.orderedset import OrderedSet
from pants.contrib.go.subsystems.go_distribution import GoDistribution
from pants.contrib.go.targets.go_binary import GoBinary
from pants.contrib.go.targets.go_library import GoLibrary
from pants.contrib.go.targets.go_local_source import GoLocalSource
from pants.contrib.go.targets.go_remote_library import GoRemoteLibrary
from pants.contrib.go.targets.go_target import GoTarget
class GoTask(Task):
@classmethod
def subsystem_dependencies(cls):
return super(GoTask, cls).subsystem_dependencies() + (GoDistribution.Factory,)
@staticmethod
def is_binary(target):
return isinstance(target, GoBinary)
@staticmethod
def is_local_lib(target):
return isinstance(target, GoLibrary)
@staticmethod
def is_remote_lib(target):
return isinstance(target, GoRemoteLibrary)
@staticmethod
def is_local_src(target):
return isinstance(target, GoLocalSource)
@staticmethod
def is_go(target):
return isinstance(target, GoTarget)
@memoized_property
def go_dist(self):
return GoDistribution.Factory.global_instance().create()
@memoized_property
def import_oracle(self):
"""Return an import oracle that can help look up and categorize imports.
:rtype: :class:`ImportOracle`
"""
return ImportOracle(go_dist=self.go_dist, workunit_factory=self.context.new_workunit)
@memoized_property
def goos_goarch(self):
"""Return concatenated $GOOS and $GOARCH environment variables, separated by an underscore.
Useful for locating where the Go compiler is placing binaries ("$GOPATH/pkg/$GOOS_$GOARCH").
:rtype: string
"""
return '{goos}_{goarch}'.format(goos=self._lookup_go_env_var('GOOS'),
goarch=self._lookup_go_env_var('GOARCH'))
def _lookup_go_env_var(self, var):
return self.go_dist.create_go_cmd('env', args=[var]).check_output().strip()
class ImportOracle(object):
"""Answers questions about Go imports."""
class ListDepsError(Exception):
"""Indicates a problem listing import paths for one or more packages."""
def __init__(self, go_dist, workunit_factory):
self._go_dist = go_dist
self._workunit_factory = workunit_factory
@memoized_property
def go_stdlib(self):
"""Return the set of all Go standard library import paths.
:rtype: frozenset of string
"""
out = self._go_dist.create_go_cmd('list', args=['std']).check_output()
return frozenset(out.strip().split())
# This simple regex mirrors the behavior of the relevant go code in practice (see
# repoRootForImportDynamic and surrounding code in
# https://github.com/golang/go/blob/7bc40ffb05d8813bf9b41a331b45d37216f9e747/src/cmd/go/vcs.go).
_remote_import_re = re.compile('[^.]+(?:\.[^.]+)+\/')
def is_remote_import(self, import_path):
"""Whether the specified import_path denotes a remote import."""
return self._remote_import_re.match(import_path) is not None
def is_go_internal_import(self, import_path):
"""Return `True` if the given import path will be satisfied directly by the Go distribution.
For example, both the go standard library ("archive/tar", "bufio", "fmt", etc.) and "C" imports
are satisfiable by a Go distribution via linking of internal Go code and external c standard
library code respectively.
:rtype: bool
"""
# The "C" package is a psuedo-package that links through to the c stdlib, see:
# http://blog.golang.org/c-go-cgo
return import_path == 'C' or import_path in self.go_stdlib
class ImportListing(namedtuple('ImportListing', ['pkg_name',
'imports',
'test_imports',
'x_test_imports'])):
"""Represents all the imports of a given package."""
@property
def all_imports(self):
"""Return all imports for this package, including any test imports.
:rtype: list of string
"""
return list(OrderedSet(self.imports + self.test_imports + self.x_test_imports))
@memoized_method
def list_imports(self, pkg, gopath=None):
"""Return a listing of the dependencies of the given package.
:param string pkg: The package whose files to list all dependencies of.
:param string gopath: An optional $GOPATH which points to a Go workspace containing `pkg`.
:returns: The import listing for `pkg` that represents all its dependencies.
:rtype: :class:`ImportOracle.ImportListing`
:raises: :class:`ImportOracle.ListDepsError` if there was a problem listing the dependencies
of `pkg`.
"""
go_cmd = self._go_dist.create_go_cmd('list', args=['-json', pkg], gopath=gopath)
with self._workunit_factory(pkg, cmd=str(go_cmd), labels=[WorkUnitLabel.TOOL]) as workunit:
# TODO(John Sirois): It would be nice to be able to tee the stdout to the workunit to we have
# a capture of the json available for inspection in the server console.
process = go_cmd.spawn(stdout=subprocess.PIPE, stderr=workunit.output('stderr'))
out, _ = process.communicate()
returncode = process.returncode
workunit.set_outcome(WorkUnit.SUCCESS if returncode == 0 else WorkUnit.FAILURE)
if returncode != 0:
raise self.ListDepsError('Problem listing imports for {}: {} failed with exit code {}'
.format(pkg, go_cmd, returncode))
data = json.loads(out)
# XTestImports are for black box tests. These test files live inside the package dir but
# declare a different package and thus can only access the public members of the package's
# production code. This style of test necessarily means the test file will import the main
# package. For pants, this would lead to a cyclic self-dependency, so we omit the main
# package as implicitly included as its own dependency.
x_test_imports = [i for i in data.get('XTestImports', []) if i != pkg]
return self.ImportListing(pkg_name=data.get('Name'),
imports=data.get('Imports', []),
test_imports=data.get('TestImports', []),
x_test_imports=x_test_imports)
|
{
"content_hash": "604c8ded90c072585c965dc0c6531338",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 99,
"avg_line_length": 40.71341463414634,
"alnum_prop": 0.6794967799910139,
"repo_name": "peiyuwang/pants",
"id": "efa670fd49e01097d814c823e10797a0a84b4d6f",
"size": "6824",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "contrib/go/src/python/pants/contrib/go/tasks/go_task.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "781"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "GAP",
"bytes": "1283"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "1746"
},
{
"name": "HTML",
"bytes": "78744"
},
{
"name": "Java",
"bytes": "463179"
},
{
"name": "JavaScript",
"bytes": "30784"
},
{
"name": "Protocol Buffer",
"bytes": "4749"
},
{
"name": "Python",
"bytes": "5586816"
},
{
"name": "Rust",
"bytes": "168825"
},
{
"name": "Scala",
"bytes": "79707"
},
{
"name": "Shell",
"bytes": "64292"
},
{
"name": "Thrift",
"bytes": "2183"
}
],
"symlink_target": ""
}
|
import datetime
import decimal
from sqlalchemy.engine import create_engine
from sqlalchemy.schema import Table, MetaData, Column
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import types, func, case, inspect
from sqlalchemy.sql import expression, select, literal_column
from sqlalchemy.exc import NoSuchTableError
from sqlalchemy.orm import sessionmaker
import packaging.version
from pytz import timezone
import pytest
import sqlalchemy
import sqlalchemy_bigquery
ONE_ROW_CONTENTS_EXPANDED = [
588,
datetime.datetime(2013, 10, 10, 11, 27, 16, tzinfo=timezone("UTC")),
"W 52 St & 11 Ave",
40.76727216,
decimal.Decimal("40.76727216"),
False,
datetime.date(2013, 10, 10),
datetime.datetime(2013, 10, 10, 11, 27, 16),
datetime.time(11, 27, 16),
b"\xef",
{"age": 100, "name": "John Doe"},
"John Doe",
100,
{"record": {"age": 200, "name": "John Doe 2"}},
{"age": 200, "name": "John Doe 2"},
"John Doe 2",
200,
[1, 2, 3],
]
ONE_ROW_CONTENTS = [
588,
datetime.datetime(2013, 10, 10, 11, 27, 16, tzinfo=timezone("UTC")),
"W 52 St & 11 Ave",
40.76727216,
decimal.Decimal("40.76727216"),
False,
datetime.date(2013, 10, 10),
datetime.datetime(2013, 10, 10, 11, 27, 16),
datetime.time(11, 27, 16),
b"\xef",
{"name": "John Doe", "age": 100},
{"record": {"name": "John Doe 2", "age": 200}},
[1, 2, 3],
]
ONE_ROW_CONTENTS_DML = [
588,
datetime.datetime(2013, 10, 10, 11, 27, 16, tzinfo=timezone("UTC")),
"test",
40.76727216,
decimal.Decimal("40.76727216"),
False,
datetime.date(2013, 10, 10),
datetime.datetime(2013, 10, 10, 11, 27, 16),
datetime.time(11, 27, 16),
"test_bytes",
]
SAMPLE_COLUMNS = [
{"name": "integer", "type": types.Integer(), "nullable": True, "default": None},
{"name": "timestamp", "type": types.TIMESTAMP(), "nullable": True, "default": None},
{"name": "string", "type": types.String(), "nullable": True, "default": None},
{"name": "float", "type": types.Float(), "nullable": True, "default": None},
{"name": "numeric", "type": types.Numeric(), "nullable": True, "default": None},
{"name": "boolean", "type": types.Boolean(), "nullable": True, "default": None},
{"name": "date", "type": types.DATE(), "nullable": True, "default": None},
{"name": "datetime", "type": types.DATETIME(), "nullable": True, "default": None},
{"name": "time", "type": types.TIME(), "nullable": True, "default": None},
{"name": "bytes", "type": types.BINARY(), "nullable": True, "default": None},
{
"name": "record",
"type": sqlalchemy_bigquery.STRUCT(name=types.String, age=types.Integer),
"nullable": True,
"default": None,
"comment": "In Standard SQL this data type is a STRUCT<name STRING, age INT64>.",
},
{"name": "record.name", "type": types.String(), "nullable": True, "default": None},
{"name": "record.age", "type": types.Integer(), "nullable": True, "default": None},
{
"name": "nested_record",
"type": sqlalchemy_bigquery.STRUCT(
record=sqlalchemy_bigquery.STRUCT(name=types.String, age=types.Integer)
),
"nullable": True,
"default": None,
},
{
"name": "nested_record.record",
"type": sqlalchemy_bigquery.STRUCT(name=types.String, age=types.Integer),
"nullable": True,
"default": None,
},
{
"name": "nested_record.record.name",
"type": types.String(),
"nullable": True,
"default": None,
},
{
"name": "nested_record.record.age",
"type": types.Integer(),
"nullable": True,
"default": None,
},
{
"name": "array",
"type": types.ARRAY(types.Integer()),
"nullable": True,
"default": None,
},
]
@pytest.fixture(scope="session")
def engine_using_test_dataset(bigquery_dataset):
engine = create_engine(f"bigquery:///{bigquery_dataset}", echo=True)
return engine
@pytest.fixture(scope="session")
def engine_with_location():
engine = create_engine("bigquery://", echo=True, location="asia-northeast1")
return engine
@pytest.fixture(scope="session")
def table(engine, bigquery_dataset):
return Table(f"{bigquery_dataset}.sample", MetaData(bind=engine), autoload=True)
@pytest.fixture(scope="session")
def table_using_test_dataset(engine_using_test_dataset):
return Table("sample", MetaData(bind=engine_using_test_dataset), autoload=True)
@pytest.fixture(scope="session")
def table_one_row(engine, bigquery_dataset):
return Table(
f"{bigquery_dataset}.sample_one_row", MetaData(bind=engine), autoload=True
)
@pytest.fixture(scope="session")
def table_dml(engine, bigquery_empty_table):
return Table(bigquery_empty_table, MetaData(bind=engine), autoload=True)
@pytest.fixture(scope="session")
def session(engine):
Session = sessionmaker(bind=engine)
session = Session()
return session
@pytest.fixture(scope="session")
def session_using_test_dataset(engine_using_test_dataset):
Session = sessionmaker(bind=engine_using_test_dataset)
session = Session()
return session
@pytest.fixture(scope="session")
def inspector(engine):
return inspect(engine)
@pytest.fixture(scope="session")
def inspector_using_test_dataset(engine_using_test_dataset):
return inspect(engine_using_test_dataset)
@pytest.fixture(scope="session")
def query():
def query(table):
col1 = literal_column("TIMESTAMP_TRUNC(timestamp, DAY)").label(
"timestamp_label"
)
col2 = func.sum(table.c.integer)
# Test rendering of nested labels. Full expression should render in SELECT, but
# ORDER/GROUP BY should use label only.
col3 = (
func.sum(func.sum(table.c.integer.label("inner")).label("outer"))
.over()
.label("outer")
)
query = (
select([col1, col2, col3])
.where(col1 < "2017-01-01 00:00:00")
.group_by(col1)
.order_by(col2)
)
return query
return query
def test_engine_with_dataset(engine_using_test_dataset, bigquery_dataset):
rows = engine_using_test_dataset.execute("SELECT * FROM sample_one_row").fetchall()
assert list(rows[0]) == ONE_ROW_CONTENTS
table_one_row = Table(
"sample_one_row", MetaData(bind=engine_using_test_dataset), autoload=True
)
rows = table_one_row.select(use_labels=True).execute().fetchall()
assert list(rows[0]) == ONE_ROW_CONTENTS_EXPANDED
table_one_row = Table(
f"{bigquery_dataset}.sample_one_row",
MetaData(bind=engine_using_test_dataset),
autoload=True,
)
rows = table_one_row.select(use_labels=True).execute().fetchall()
# verify that we are pulling from the specifically-named dataset,
# instead of pulling from the default dataset of the engine (which
# does not have this table at all)
assert list(rows[0]) == ONE_ROW_CONTENTS_EXPANDED
def test_dataset_location(
engine_with_location, bigquery_dataset, bigquery_regional_dataset
):
rows = engine_with_location.execute(
f"SELECT * FROM {bigquery_regional_dataset}.sample_one_row"
).fetchall()
assert list(rows[0]) == ONE_ROW_CONTENTS
def test_reflect_select(table, table_using_test_dataset):
for table in [table, table_using_test_dataset]:
assert table.comment == "A sample table containing most data types."
assert len(table.c) == 18
assert isinstance(table.c.integer, Column)
assert isinstance(table.c.integer.type, types.Integer)
assert isinstance(table.c.timestamp.type, types.TIMESTAMP)
assert isinstance(table.c.string.type, types.String)
assert isinstance(table.c.float.type, types.Float)
assert isinstance(table.c.boolean.type, types.Boolean)
assert isinstance(table.c.date.type, types.DATE)
assert isinstance(table.c.datetime.type, types.DATETIME)
assert isinstance(table.c.time.type, types.TIME)
assert isinstance(table.c.bytes.type, types.BINARY)
assert isinstance(table.c["record.age"].type, types.Integer)
assert isinstance(table.c["record.name"].type, types.String)
assert isinstance(table.c["nested_record.record.age"].type, types.Integer)
assert isinstance(table.c["nested_record.record.name"].type, types.String)
assert isinstance(table.c.array.type, types.ARRAY)
# Force unique column labels using `use_labels` below to deal
# with BQ sometimes complaining about duplicate column names
# when a destination table is specified, even though no
# destination table is specified. When this test was written,
# `use_labels` was forced by the dialect.
rows = table.select(use_labels=True).execute().fetchall()
assert len(rows) == 1000
def test_content_from_raw_queries(engine, bigquery_dataset):
rows = engine.execute(f"SELECT * FROM {bigquery_dataset}.sample_one_row").fetchall()
assert list(rows[0]) == ONE_ROW_CONTENTS
def test_record_content_from_raw_queries(engine, bigquery_dataset):
rows = engine.execute(
f"SELECT record.name FROM {bigquery_dataset}.sample_one_row"
).fetchall()
assert rows[0][0] == "John Doe"
def test_content_from_reflect(engine, table_one_row):
rows = table_one_row.select(use_labels=True).execute().fetchall()
assert list(rows[0]) == ONE_ROW_CONTENTS_EXPANDED
def test_unicode(engine, table_one_row):
unicode_str = "白人看不懂"
returned_str = sqlalchemy.select(
[expression.bindparam("好", unicode_str)], from_obj=table_one_row,
).scalar()
assert returned_str == unicode_str
def test_reflect_select_shared_table(engine):
one_row = Table(
"bigquery-public-data.samples.natality", MetaData(bind=engine), autoload=True
)
row = one_row.select().limit(1).execute().first()
assert len(row) >= 1
def test_reflect_table_does_not_exist(engine, bigquery_dataset):
with pytest.raises(NoSuchTableError):
Table(
f"{bigquery_dataset}.table_does_not_exist",
MetaData(bind=engine),
autoload=True,
)
assert (
Table(
f"{bigquery_dataset}.table_does_not_exist", MetaData(bind=engine)
).exists()
is False
)
def test_reflect_dataset_does_not_exist(engine):
with pytest.raises(NoSuchTableError):
Table(
"dataset_does_not_exist.table_does_not_exist",
MetaData(bind=engine),
autoload=True,
)
def test_tables_list(engine, engine_using_test_dataset, bigquery_dataset):
tables = engine.table_names()
assert f"{bigquery_dataset}.sample" in tables
assert f"{bigquery_dataset}.sample_one_row" in tables
assert f"{bigquery_dataset}.sample_view" not in tables
tables = engine_using_test_dataset.table_names()
assert "sample" in tables
assert "sample_one_row" in tables
assert "sample_view" not in tables
def test_group_by(session, table, session_using_test_dataset, table_using_test_dataset):
"""labels in SELECT clause should be correclty formatted (dots are replaced with underscores)"""
for session, table in [
(session, table),
(session_using_test_dataset, table_using_test_dataset),
]:
result = (
session.query(table.c.string, func.count(table.c.integer))
.group_by(table.c.string)
.all()
)
assert len(result) > 0
def test_nested_labels(engine, table):
col = table.c.integer
exprs = [
sqlalchemy.func.sum(
sqlalchemy.func.sum(col.label("inner")).label("outer")
).over(),
sqlalchemy.func.sum(
sqlalchemy.case([[sqlalchemy.literal(True), col.label("inner")]]).label(
"outer"
)
),
sqlalchemy.func.sum(
sqlalchemy.func.sum(
sqlalchemy.case([[sqlalchemy.literal(True), col.label("inner")]]).label(
"middle"
)
).label("outer")
).over(),
]
for expr in exprs:
sql = str(expr.compile(engine))
assert "inner" not in sql
assert "middle" not in sql
assert "outer" not in sql
def test_session_query(
session, table, session_using_test_dataset, table_using_test_dataset
):
for session, table in [
(session, table),
(session_using_test_dataset, table_using_test_dataset),
]:
col_concat = func.concat(table.c.string).label("concat")
result = (
session.query(
table.c.string,
col_concat,
func.avg(table.c.integer),
func.sum(
case([(table.c.boolean == sqlalchemy.literal(True), 1)], else_=0)
),
)
.group_by(table.c.string, col_concat)
.having(func.avg(table.c.integer) > 10)
).all()
assert len(result) > 0
def test_labels(session, table, session_using_test_dataset, table_using_test_dataset):
for session, table in [
(session, table),
(session_using_test_dataset, table_using_test_dataset),
]:
result = session.query(
# Valid
table.c.string.label("abc"),
# Invalid, needs to start with underscore
table.c.string.label("123"),
# Valid
table.c.string.label("_123abc"),
# Invalid, contains illegal characters
table.c.string.label("!@#$%^&*()~`"),
)
result = result.all()
assert len(result) > 0
def test_custom_expression(
engine, engine_using_test_dataset, table, table_using_test_dataset, query
):
"""GROUP BY clause should use labels instead of expressions"""
q = query(table)
result = engine.execute(q).fetchall()
assert len(result) > 0
q = query(table_using_test_dataset)
result = engine_using_test_dataset.execute(q).fetchall()
assert len(result) > 0
def test_compiled_query_literal_binds(
engine, engine_using_test_dataset, table, table_using_test_dataset, query
):
q = query(table)
compiled = q.compile(engine, compile_kwargs={"literal_binds": True})
result = engine.execute(compiled).fetchall()
assert len(result) > 0
q = query(table_using_test_dataset)
compiled = q.compile(
engine_using_test_dataset, compile_kwargs={"literal_binds": True}
)
result = engine_using_test_dataset.execute(compiled).fetchall()
assert len(result) > 0
@pytest.mark.parametrize(
["column", "processed"],
[
(types.String(), "STRING"),
(types.NUMERIC(), "NUMERIC"),
(types.ARRAY(types.String), "ARRAY<STRING>"),
],
)
def test_compile_types(engine, column, processed):
result = engine.dialect.type_compiler.process(column)
assert result == processed
def test_joins(session, table, table_one_row):
result = (
session.query(table.c.string, func.count(table_one_row.c.integer))
.join(table_one_row, table_one_row.c.string == table.c.string)
.group_by(table.c.string)
.all()
)
assert len(result) > 0
def test_querying_wildcard_tables(engine):
table = Table(
"bigquery-public-data.noaa_gsod.gsod*", MetaData(bind=engine), autoload=True
)
rows = table.select().limit(1).execute().first()
assert len(rows) > 0
def test_dml(engine, session, table_dml):
# test insert
engine.execute(table_dml.insert(ONE_ROW_CONTENTS_DML))
result = table_dml.select(use_labels=True).execute().fetchall()
assert len(result) == 1
# test update
session.query(table_dml).filter(table_dml.c.string == "test").update(
{"string": "updated_row"}, synchronize_session=False
)
updated_result = table_dml.select(use_labels=True).execute().fetchone()
assert updated_result[table_dml.c.string] == "updated_row"
# test delete
session.query(table_dml).filter(table_dml.c.string == "updated_row").delete(
synchronize_session=False
)
result = table_dml.select(use_labels=True).execute().fetchall()
assert len(result) == 0
def test_create_table(engine, bigquery_dataset):
meta = MetaData()
Table(
f"{bigquery_dataset}.test_table_create",
meta,
Column("integer_c", sqlalchemy.Integer, doc="column description"),
Column("float_c", sqlalchemy.Float),
Column("decimal_c", sqlalchemy.DECIMAL),
Column("string_c", sqlalchemy.String),
Column("text_c", sqlalchemy.Text),
Column("boolean_c", sqlalchemy.Boolean),
Column("timestamp_c", sqlalchemy.TIMESTAMP),
Column("datetime_c", sqlalchemy.DATETIME),
Column("date_c", sqlalchemy.DATE),
Column("time_c", sqlalchemy.TIME),
Column("binary_c", sqlalchemy.BINARY),
bigquery_description="test table description",
bigquery_friendly_name="test table name",
)
meta.create_all(engine)
meta.drop_all(engine)
# Test creating tables with declarative_base
Base = declarative_base()
class TableTest(Base):
__tablename__ = f"{bigquery_dataset}.test_table_create2"
integer_c = Column(sqlalchemy.Integer, primary_key=True)
float_c = Column(sqlalchemy.Float)
Base.metadata.create_all(engine)
Base.metadata.drop_all(engine)
def test_schemas_names(inspector, inspector_using_test_dataset, bigquery_dataset):
datasets = inspector.get_schema_names()
assert f"{bigquery_dataset}" in datasets
datasets = inspector_using_test_dataset.get_schema_names()
assert f"{bigquery_dataset}" in datasets
def test_table_names_in_schema(
inspector, inspector_using_test_dataset, bigquery_dataset
):
tables = inspector.get_table_names(bigquery_dataset)
assert f"{bigquery_dataset}.sample" in tables
assert f"{bigquery_dataset}.sample_one_row" in tables
assert f"{bigquery_dataset}.sample_dml_empty" in tables
assert f"{bigquery_dataset}.sample_view" not in tables
assert len(tables) == 3
tables = inspector_using_test_dataset.get_table_names()
assert "sample" in tables
assert "sample_one_row" in tables
assert "sample_dml_empty" in tables
assert "sample_view" not in tables
assert len(tables) == 3
def test_view_names(inspector, inspector_using_test_dataset, bigquery_dataset):
view_names = inspector.get_view_names()
assert f"{bigquery_dataset}.sample_view" in view_names
assert f"{bigquery_dataset}.sample" not in view_names
view_names = inspector_using_test_dataset.get_view_names()
assert "sample_view" in view_names
assert "sample" not in view_names
def test_get_indexes(inspector, inspector_using_test_dataset, bigquery_dataset):
for _ in [f"{bigquery_dataset}.sample", f"{bigquery_dataset}.sample_one_row"]:
indexes = inspector.get_indexes(f"{bigquery_dataset}.sample")
assert len(indexes) == 2
assert indexes[0] == {
"name": "partition",
"column_names": ["timestamp"],
"unique": False,
}
assert indexes[1] == {
"name": "clustering",
"column_names": ["integer", "string"],
"unique": False,
}
def test_get_columns(inspector, inspector_using_test_dataset, bigquery_dataset):
columns_without_schema = inspector.get_columns(f"{bigquery_dataset}.sample")
columns_schema = inspector.get_columns("sample", bigquery_dataset)
columns_queries = [columns_without_schema, columns_schema]
for columns in columns_queries:
for i, col in enumerate(columns):
sample_col = SAMPLE_COLUMNS[i]
assert col["comment"] == sample_col.get("comment")
assert col["default"] == sample_col["default"]
assert col["name"] == sample_col["name"]
assert col["nullable"] == sample_col["nullable"]
assert (
col["type"].__class__.__name__ == sample_col["type"].__class__.__name__
)
columns_without_schema = inspector_using_test_dataset.get_columns("sample")
columns_schema = inspector_using_test_dataset.get_columns(
"sample", bigquery_dataset
)
columns_queries = [columns_without_schema, columns_schema]
for columns in columns_queries:
for i, col in enumerate(columns):
sample_col = SAMPLE_COLUMNS[i]
assert col["comment"] == sample_col.get("comment")
assert col["default"] == sample_col["default"]
assert col["name"] == sample_col["name"]
assert col["nullable"] == sample_col["nullable"]
assert (
col["type"].__class__.__name__ == sample_col["type"].__class__.__name__
)
@pytest.mark.parametrize(
"provided_schema_name,provided_table_name,client_project",
[
("dataset", "table", "project"),
(None, "dataset.table", "project"),
(None, "project.dataset.table", "other_project"),
("project", "dataset.table", "other_project"),
("project.dataset", "table", "other_project"),
],
)
def test_table_reference(
dialect, provided_schema_name, provided_table_name, client_project
):
ref = dialect._table_reference(
provided_schema_name, provided_table_name, client_project
)
assert ref.table_id == "table"
assert ref.dataset_id == "dataset"
assert ref.project == "project"
@pytest.mark.parametrize(
"provided_schema_name,provided_table_name,client_project",
[
("project.dataset", "other_dataset.table", "project"),
("project.dataset", "other_project.dataset.table", "project"),
("project.dataset.something_else", "table", "project"),
(None, "project.dataset.table.something_else", "project"),
],
)
def test_invalid_table_reference(
dialect, provided_schema_name, provided_table_name, client_project
):
with pytest.raises(ValueError):
dialect._table_reference(
provided_schema_name, provided_table_name, client_project
)
def test_has_table(engine, engine_using_test_dataset, bigquery_dataset):
assert engine.has_table("sample", bigquery_dataset) is True
assert engine.has_table(f"{bigquery_dataset}.sample") is True
assert engine.has_table(f"{bigquery_dataset}.nonexistent_table") is False
assert engine.has_table("nonexistent_table", "nonexistent_dataset") is False
assert engine_using_test_dataset.has_table("sample") is True
assert engine_using_test_dataset.has_table("sample", bigquery_dataset) is True
assert engine_using_test_dataset.has_table(f"{bigquery_dataset}.sample") is True
assert engine_using_test_dataset.has_table("sample_alt") is False
def test_distinct_188(engine, bigquery_dataset):
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
class MyTable(Base):
__tablename__ = f"{bigquery_dataset}.test_distinct_188"
id = Column(Integer, primary_key=True)
my_column = Column(Integer)
MyTable.__table__.create(engine)
Session = sessionmaker(bind=engine)
db = Session()
db.add_all([MyTable(id=i, my_column=i % 2) for i in range(9)])
db.commit()
expected = [(0,), (1,)]
assert sorted(db.query(MyTable.my_column).distinct().all()) == expected
assert (
sorted(
db.query(
sqlalchemy.distinct(MyTable.my_column).label("just_a_random_label")
).all()
)
== expected
)
assert sorted(db.query(sqlalchemy.distinct(MyTable.my_column)).all()) == expected
@pytest.mark.skipif(
packaging.version.parse(sqlalchemy.__version__) < packaging.version.parse("1.4"),
reason="requires sqlalchemy 1.4 or higher",
)
def test_huge_in():
engine = sqlalchemy.create_engine("bigquery://")
conn = engine.connect()
try:
assert list(
conn.execute(
sqlalchemy.select([sqlalchemy.literal(-1).in_(list(range(99999)))])
)
) == [(False,)]
except Exception:
error = True
else:
error = False
assert not error, "execution failed"
@pytest.mark.skipif(
packaging.version.parse(sqlalchemy.__version__) < packaging.version.parse("1.4"),
reason="unnest (and other table-valued-function) support required version 1.4",
)
def test_unnest(engine, bigquery_dataset):
from sqlalchemy import select, func, String
from sqlalchemy_bigquery import ARRAY
conn = engine.connect()
metadata = MetaData()
table = Table(
f"{bigquery_dataset}.test_unnest", metadata, Column("objects", ARRAY(String)),
)
metadata.create_all(engine)
conn.execute(
table.insert(), [dict(objects=["a", "b", "c"]), dict(objects=["x", "y"])]
)
query = select([func.unnest(table.c.objects).alias("foo_objects").column])
compiled = str(query.compile(engine))
assert " ".join(compiled.strip().split()) == (
f"SELECT `foo_objects`"
f" FROM"
f" `{bigquery_dataset}.test_unnest` `{bigquery_dataset}.test_unnest_1`,"
f" unnest(`{bigquery_dataset}.test_unnest_1`.`objects`) AS `foo_objects`"
)
assert sorted(r[0] for r in conn.execute(query)) == ["a", "b", "c", "x", "y"]
|
{
"content_hash": "f6b045d32508ceb3737f7437752e2de8",
"timestamp": "",
"source": "github",
"line_count": 752,
"max_line_length": 100,
"avg_line_length": 33.962765957446805,
"alnum_prop": 0.631440877055599,
"repo_name": "mxmzdlv/pybigquery",
"id": "564c5e68f4844c01df075f1d357d99f9ccc44400",
"size": "26686",
"binary": false,
"copies": "1",
"ref": "refs/heads/owl-bot-update-lock-0e18b9475fbeb12d9ad4302283171edebb6baf2dfca1bd215ee3b34ed79d95d7",
"path": "tests/system/test_sqlalchemy_bigquery.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "49667"
},
{
"name": "Shell",
"bytes": "1084"
}
],
"symlink_target": ""
}
|
import sys
import types
import unittest
import inspect
import datetime
from test.test_support import TESTFN, run_unittest
from test import inspect_fodder as mod
from test import inspect_fodder2 as mod2
# Functions tested in this suite:
# ismodule, isclass, ismethod, isfunction, istraceback, isframe, iscode,
# isbuiltin, isroutine, isgenerator, isgeneratorfunction, getmembers,
# getdoc, getfile, getmodule, getsourcefile, getcomments, getsource,
# getclasstree, getargspec, getargvalues, formatargspec, formatargvalues,
# currentframe, stack, trace, isdatadescriptor
modfile = mod.__file__
if modfile.endswith(('c', 'o')):
modfile = modfile[:-1]
import __builtin__
try:
1/0
except:
tb = sys.exc_traceback
git = mod.StupidGit()
class IsTestBase(unittest.TestCase):
predicates = set([inspect.isbuiltin, inspect.isclass, inspect.iscode,
inspect.isframe, inspect.isfunction, inspect.ismethod,
inspect.ismodule, inspect.istraceback,
inspect.isgenerator, inspect.isgeneratorfunction])
def istest(self, predicate, exp):
obj = eval(exp)
self.failUnless(predicate(obj), '%s(%s)' % (predicate.__name__, exp))
for other in self.predicates - set([predicate]):
if predicate == inspect.isgeneratorfunction and\
other == inspect.isfunction:
continue
self.failIf(other(obj), 'not %s(%s)' % (other.__name__, exp))
def generator_function_example(self):
for i in xrange(2):
yield i
class TestPredicates(IsTestBase):
def test_sixteen(self):
count = len(filter(lambda x:x.startswith('is'), dir(inspect)))
# This test is here for remember you to update Doc/library/inspect.rst
# which claims there are 16 such functions
expected = 16
err_msg = "There are %d (not %d) is* functions" % (count, expected)
self.assertEqual(count, expected, err_msg)
def test_excluding_predicates(self):
self.istest(inspect.isbuiltin, 'sys.exit')
self.istest(inspect.isbuiltin, '[].append')
self.istest(inspect.isclass, 'mod.StupidGit')
self.istest(inspect.iscode, 'mod.spam.func_code')
self.istest(inspect.isframe, 'tb.tb_frame')
self.istest(inspect.isfunction, 'mod.spam')
self.istest(inspect.ismethod, 'mod.StupidGit.abuse')
self.istest(inspect.ismethod, 'git.argue')
self.istest(inspect.ismodule, 'mod')
self.istest(inspect.istraceback, 'tb')
self.istest(inspect.isdatadescriptor, '__builtin__.file.closed')
self.istest(inspect.isdatadescriptor, '__builtin__.file.softspace')
self.istest(inspect.isgenerator, '(x for x in xrange(2))')
self.istest(inspect.isgeneratorfunction, 'generator_function_example')
if hasattr(types, 'GetSetDescriptorType'):
self.istest(inspect.isgetsetdescriptor,
'type(tb.tb_frame).f_locals')
else:
self.failIf(inspect.isgetsetdescriptor(type(tb.tb_frame).f_locals))
if hasattr(types, 'MemberDescriptorType'):
self.istest(inspect.ismemberdescriptor, 'datetime.timedelta.days')
else:
self.failIf(inspect.ismemberdescriptor(datetime.timedelta.days))
def test_isroutine(self):
self.assert_(inspect.isroutine(mod.spam))
self.assert_(inspect.isroutine([].count))
class TestInterpreterStack(IsTestBase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
git.abuse(7, 8, 9)
def test_abuse_done(self):
self.istest(inspect.istraceback, 'git.ex[2]')
self.istest(inspect.isframe, 'mod.fr')
def test_stack(self):
self.assert_(len(mod.st) >= 5)
self.assertEqual(mod.st[0][1:],
(modfile, 16, 'eggs', [' st = inspect.stack()\n'], 0))
self.assertEqual(mod.st[1][1:],
(modfile, 9, 'spam', [' eggs(b + d, c + f)\n'], 0))
self.assertEqual(mod.st[2][1:],
(modfile, 43, 'argue', [' spam(a, b, c)\n'], 0))
self.assertEqual(mod.st[3][1:],
(modfile, 39, 'abuse', [' self.argue(a, b, c)\n'], 0))
def test_trace(self):
self.assertEqual(len(git.tr), 3)
self.assertEqual(git.tr[0][1:], (modfile, 43, 'argue',
[' spam(a, b, c)\n'], 0))
self.assertEqual(git.tr[1][1:], (modfile, 9, 'spam',
[' eggs(b + d, c + f)\n'], 0))
self.assertEqual(git.tr[2][1:], (modfile, 18, 'eggs',
[' q = y / 0\n'], 0))
def test_frame(self):
args, varargs, varkw, locals = inspect.getargvalues(mod.fr)
self.assertEqual(args, ['x', 'y'])
self.assertEqual(varargs, None)
self.assertEqual(varkw, None)
self.assertEqual(locals, {'x': 11, 'p': 11, 'y': 14})
self.assertEqual(inspect.formatargvalues(args, varargs, varkw, locals),
'(x=11, y=14)')
def test_previous_frame(self):
args, varargs, varkw, locals = inspect.getargvalues(mod.fr.f_back)
self.assertEqual(args, ['a', 'b', 'c', 'd', ['e', ['f']]])
self.assertEqual(varargs, 'g')
self.assertEqual(varkw, 'h')
self.assertEqual(inspect.formatargvalues(args, varargs, varkw, locals),
'(a=7, b=8, c=9, d=3, (e=4, (f=5,)), *g=(), **h={})')
class GetSourceBase(unittest.TestCase):
# Subclasses must override.
fodderFile = None
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
self.source = file(inspect.getsourcefile(self.fodderFile)).read()
def sourcerange(self, top, bottom):
lines = self.source.split("\n")
return "\n".join(lines[top-1:bottom]) + "\n"
def assertSourceEqual(self, obj, top, bottom):
self.assertEqual(inspect.getsource(obj),
self.sourcerange(top, bottom))
class TestRetrievingSourceCode(GetSourceBase):
fodderFile = mod
def test_getclasses(self):
classes = inspect.getmembers(mod, inspect.isclass)
self.assertEqual(classes,
[('FesteringGob', mod.FesteringGob),
('MalodorousPervert', mod.MalodorousPervert),
('ParrotDroppings', mod.ParrotDroppings),
('StupidGit', mod.StupidGit)])
tree = inspect.getclasstree([cls[1] for cls in classes], 1)
self.assertEqual(tree,
[(mod.ParrotDroppings, ()),
(mod.StupidGit, ()),
[(mod.MalodorousPervert, (mod.StupidGit,)),
[(mod.FesteringGob, (mod.MalodorousPervert,
mod.ParrotDroppings))
]
]
])
def test_getfunctions(self):
functions = inspect.getmembers(mod, inspect.isfunction)
self.assertEqual(functions, [('eggs', mod.eggs),
('spam', mod.spam)])
def test_getdoc(self):
self.assertEqual(inspect.getdoc(mod), 'A module docstring.')
self.assertEqual(inspect.getdoc(mod.StupidGit),
'A longer,\n\nindented\n\ndocstring.')
self.assertEqual(inspect.getdoc(git.abuse),
'Another\n\ndocstring\n\ncontaining\n\ntabs')
def test_cleandoc(self):
self.assertEqual(inspect.cleandoc('An\n indented\n docstring.'),
'An\nindented\ndocstring.')
def test_getcomments(self):
self.assertEqual(inspect.getcomments(mod), '# line 1\n')
self.assertEqual(inspect.getcomments(mod.StupidGit), '# line 20\n')
def test_getmodule(self):
# Check actual module
self.assertEqual(inspect.getmodule(mod), mod)
# Check class (uses __module__ attribute)
self.assertEqual(inspect.getmodule(mod.StupidGit), mod)
# Check a method (no __module__ attribute, falls back to filename)
self.assertEqual(inspect.getmodule(mod.StupidGit.abuse), mod)
# Do it again (check the caching isn't broken)
self.assertEqual(inspect.getmodule(mod.StupidGit.abuse), mod)
# Check a builtin
self.assertEqual(inspect.getmodule(str), sys.modules["__builtin__"])
# Check filename override
self.assertEqual(inspect.getmodule(None, modfile), mod)
def test_getsource(self):
self.assertSourceEqual(git.abuse, 29, 39)
self.assertSourceEqual(mod.StupidGit, 21, 46)
def test_getsourcefile(self):
self.assertEqual(inspect.getsourcefile(mod.spam), modfile)
self.assertEqual(inspect.getsourcefile(git.abuse), modfile)
def test_getfile(self):
self.assertEqual(inspect.getfile(mod.StupidGit), mod.__file__)
def test_getmodule_recursion(self):
from types import ModuleType
name = '__inspect_dummy'
m = sys.modules[name] = ModuleType(name)
m.__file__ = "<string>" # hopefully not a real filename...
m.__loader__ = "dummy" # pretend the filename is understood by a loader
exec "def x(): pass" in m.__dict__
self.assertEqual(inspect.getsourcefile(m.x.func_code), '<string>')
del sys.modules[name]
inspect.getmodule(compile('a=10','','single'))
class TestDecorators(GetSourceBase):
fodderFile = mod2
def test_wrapped_decorator(self):
self.assertSourceEqual(mod2.wrapped, 14, 17)
def test_replacing_decorator(self):
self.assertSourceEqual(mod2.gone, 9, 10)
class TestOneliners(GetSourceBase):
fodderFile = mod2
def test_oneline_lambda(self):
# Test inspect.getsource with a one-line lambda function.
self.assertSourceEqual(mod2.oll, 25, 25)
def test_threeline_lambda(self):
# Test inspect.getsource with a three-line lambda function,
# where the second and third lines are _not_ indented.
self.assertSourceEqual(mod2.tll, 28, 30)
def test_twoline_indented_lambda(self):
# Test inspect.getsource with a two-line lambda function,
# where the second line _is_ indented.
self.assertSourceEqual(mod2.tlli, 33, 34)
def test_onelinefunc(self):
# Test inspect.getsource with a regular one-line function.
self.assertSourceEqual(mod2.onelinefunc, 37, 37)
def test_manyargs(self):
# Test inspect.getsource with a regular function where
# the arguments are on two lines and _not_ indented and
# the body on the second line with the last arguments.
self.assertSourceEqual(mod2.manyargs, 40, 41)
def test_twolinefunc(self):
# Test inspect.getsource with a regular function where
# the body is on two lines, following the argument list and
# continued on the next line by a \\.
self.assertSourceEqual(mod2.twolinefunc, 44, 45)
def test_lambda_in_list(self):
# Test inspect.getsource with a one-line lambda function
# defined in a list, indented.
self.assertSourceEqual(mod2.a[1], 49, 49)
def test_anonymous(self):
# Test inspect.getsource with a lambda function defined
# as argument to another function.
self.assertSourceEqual(mod2.anonymous, 55, 55)
class TestBuggyCases(GetSourceBase):
fodderFile = mod2
def test_with_comment(self):
self.assertSourceEqual(mod2.with_comment, 58, 59)
def test_multiline_sig(self):
self.assertSourceEqual(mod2.multiline_sig[0], 63, 64)
def test_nested_class(self):
self.assertSourceEqual(mod2.func69().func71, 71, 72)
def test_one_liner_followed_by_non_name(self):
self.assertSourceEqual(mod2.func77, 77, 77)
def test_one_liner_dedent_non_name(self):
self.assertSourceEqual(mod2.cls82.func83, 83, 83)
def test_with_comment_instead_of_docstring(self):
self.assertSourceEqual(mod2.func88, 88, 90)
def test_method_in_dynamic_class(self):
self.assertSourceEqual(mod2.method_in_dynamic_class, 95, 97)
# Helper for testing classify_class_attrs.
def attrs_wo_objs(cls):
return [t[:3] for t in inspect.classify_class_attrs(cls)]
class TestClassesAndFunctions(unittest.TestCase):
def test_classic_mro(self):
# Test classic-class method resolution order.
class A: pass
class B(A): pass
class C(A): pass
class D(B, C): pass
expected = (D, B, A, C)
got = inspect.getmro(D)
self.assertEqual(expected, got)
def test_newstyle_mro(self):
# The same w/ new-class MRO.
class A(object): pass
class B(A): pass
class C(A): pass
class D(B, C): pass
expected = (D, B, C, A, object)
got = inspect.getmro(D)
self.assertEqual(expected, got)
def assertArgSpecEquals(self, routine, args_e, varargs_e = None,
varkw_e = None, defaults_e = None,
formatted = None):
args, varargs, varkw, defaults = inspect.getargspec(routine)
self.assertEqual(args, args_e)
self.assertEqual(varargs, varargs_e)
self.assertEqual(varkw, varkw_e)
self.assertEqual(defaults, defaults_e)
if formatted is not None:
self.assertEqual(inspect.formatargspec(args, varargs, varkw, defaults),
formatted)
def test_getargspec(self):
self.assertArgSpecEquals(mod.eggs, ['x', 'y'], formatted = '(x, y)')
self.assertArgSpecEquals(mod.spam,
['a', 'b', 'c', 'd', ['e', ['f']]],
'g', 'h', (3, (4, (5,))),
'(a, b, c, d=3, (e, (f,))=(4, (5,)), *g, **h)')
def test_getargspec_method(self):
class A(object):
def m(self):
pass
self.assertArgSpecEquals(A.m, ['self'])
def test_getargspec_sublistofone(self):
def sublistOfOne((foo,)): return 1
self.assertArgSpecEquals(sublistOfOne, [['foo']])
def fakeSublistOfOne((foo)): return 1
self.assertArgSpecEquals(fakeSublistOfOne, ['foo'])
def test_classify_oldstyle(self):
class A:
def s(): pass
s = staticmethod(s)
def c(cls): pass
c = classmethod(c)
def getp(self): pass
p = property(getp)
def m(self): pass
def m1(self): pass
datablob = '1'
attrs = attrs_wo_objs(A)
self.assert_(('s', 'static method', A) in attrs, 'missing static method')
self.assert_(('c', 'class method', A) in attrs, 'missing class method')
self.assert_(('p', 'property', A) in attrs, 'missing property')
self.assert_(('m', 'method', A) in attrs, 'missing plain method')
self.assert_(('m1', 'method', A) in attrs, 'missing plain method')
self.assert_(('datablob', 'data', A) in attrs, 'missing data')
class B(A):
def m(self): pass
attrs = attrs_wo_objs(B)
self.assert_(('s', 'static method', A) in attrs, 'missing static method')
self.assert_(('c', 'class method', A) in attrs, 'missing class method')
self.assert_(('p', 'property', A) in attrs, 'missing property')
self.assert_(('m', 'method', B) in attrs, 'missing plain method')
self.assert_(('m1', 'method', A) in attrs, 'missing plain method')
self.assert_(('datablob', 'data', A) in attrs, 'missing data')
class C(A):
def m(self): pass
def c(self): pass
attrs = attrs_wo_objs(C)
self.assert_(('s', 'static method', A) in attrs, 'missing static method')
self.assert_(('c', 'method', C) in attrs, 'missing plain method')
self.assert_(('p', 'property', A) in attrs, 'missing property')
self.assert_(('m', 'method', C) in attrs, 'missing plain method')
self.assert_(('m1', 'method', A) in attrs, 'missing plain method')
self.assert_(('datablob', 'data', A) in attrs, 'missing data')
class D(B, C):
def m1(self): pass
attrs = attrs_wo_objs(D)
self.assert_(('s', 'static method', A) in attrs, 'missing static method')
self.assert_(('c', 'class method', A) in attrs, 'missing class method')
self.assert_(('p', 'property', A) in attrs, 'missing property')
self.assert_(('m', 'method', B) in attrs, 'missing plain method')
self.assert_(('m1', 'method', D) in attrs, 'missing plain method')
self.assert_(('datablob', 'data', A) in attrs, 'missing data')
# Repeat all that, but w/ new-style classes.
def test_classify_newstyle(self):
class A(object):
def s(): pass
s = staticmethod(s)
def c(cls): pass
c = classmethod(c)
def getp(self): pass
p = property(getp)
def m(self): pass
def m1(self): pass
datablob = '1'
attrs = attrs_wo_objs(A)
self.assert_(('s', 'static method', A) in attrs, 'missing static method')
self.assert_(('c', 'class method', A) in attrs, 'missing class method')
self.assert_(('p', 'property', A) in attrs, 'missing property')
self.assert_(('m', 'method', A) in attrs, 'missing plain method')
self.assert_(('m1', 'method', A) in attrs, 'missing plain method')
self.assert_(('datablob', 'data', A) in attrs, 'missing data')
class B(A):
def m(self): pass
attrs = attrs_wo_objs(B)
self.assert_(('s', 'static method', A) in attrs, 'missing static method')
self.assert_(('c', 'class method', A) in attrs, 'missing class method')
self.assert_(('p', 'property', A) in attrs, 'missing property')
self.assert_(('m', 'method', B) in attrs, 'missing plain method')
self.assert_(('m1', 'method', A) in attrs, 'missing plain method')
self.assert_(('datablob', 'data', A) in attrs, 'missing data')
class C(A):
def m(self): pass
def c(self): pass
attrs = attrs_wo_objs(C)
self.assert_(('s', 'static method', A) in attrs, 'missing static method')
self.assert_(('c', 'method', C) in attrs, 'missing plain method')
self.assert_(('p', 'property', A) in attrs, 'missing property')
self.assert_(('m', 'method', C) in attrs, 'missing plain method')
self.assert_(('m1', 'method', A) in attrs, 'missing plain method')
self.assert_(('datablob', 'data', A) in attrs, 'missing data')
class D(B, C):
def m1(self): pass
attrs = attrs_wo_objs(D)
self.assert_(('s', 'static method', A) in attrs, 'missing static method')
self.assert_(('c', 'method', C) in attrs, 'missing plain method')
self.assert_(('p', 'property', A) in attrs, 'missing property')
self.assert_(('m', 'method', B) in attrs, 'missing plain method')
self.assert_(('m1', 'method', D) in attrs, 'missing plain method')
self.assert_(('datablob', 'data', A) in attrs, 'missing data')
def test_main():
run_unittest(TestDecorators, TestRetrievingSourceCode, TestOneliners,
TestBuggyCases,
TestInterpreterStack, TestClassesAndFunctions, TestPredicates)
if __name__ == "__main__":
test_main()
|
{
"content_hash": "c89fb41767b5fdbf324267e1516d8176",
"timestamp": "",
"source": "github",
"line_count": 498,
"max_line_length": 83,
"avg_line_length": 39.407630522088354,
"alnum_prop": 0.5881783439490446,
"repo_name": "MalloyPower/parsing-python",
"id": "32eeb57d31e8eb4b67f1a8005d2089c57eab62fb",
"size": "19625",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-2.6/Lib/test/test_inspect.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
}
|
""" """
# Standard library modules.
# Third party modules.
import pytest
# Local modules.
from pyxray.cbook import ProgressMixin, ProgressReportMixin
# Globals and constants variables.
class MockProgress(ProgressMixin):
pass
class MockProgressReport(ProgressReportMixin):
pass
@pytest.fixture
def progress():
return MockProgress()
def test_progress_update(progress):
progress.update(50)
assert progress.progress == 50
@pytest.fixture
def progress_report(progress):
report = MockProgressReport()
report.add_reporthook(lambda p: progress.update(p))
return report
def test_progress_report_update(progress_report, progress):
assert progress.progress == 0
progress_report.update(50)
assert progress_report.progress == 50
assert progress.progress == 50
|
{
"content_hash": "3571fed1a1a2fc514cedca1fa72fdd6d",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 59,
"avg_line_length": 18.066666666666666,
"alnum_prop": 0.7355473554735548,
"repo_name": "ppinard/pyxray",
"id": "be71516c513d2de2d9188d3edd408fa0c7da24f9",
"size": "835",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_cbook.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "280994"
}
],
"symlink_target": ""
}
|
import datetime
from tapz.panels.options import PanelOptions
from tapz.panels.intervals import Day, Hour
from tapz.site import site
from tapz import exceptions, tasks
COOKIE_INTERVAL = 'interval'
class PanelMeta(type):
"""
Metaclass for all Panels
"""
def __new__(cls, name, bases, attrs):
super_new = super(PanelMeta, cls).__new__
parents = [b for b in bases if isinstance(b, PanelMeta)]
if not parents:
# If this isn't a subclass of Panel, don't do anything special.
return super_new(cls, name, bases, attrs)
new_class = super_new(cls, name, bases, {})
meta = attrs.pop('Meta', None)
new_class.add_to_class('_meta', PanelOptions(meta))
for attr_name, attr_value in attrs.items():
new_class.add_to_class(attr_name, attr_value)
site.register(new_class)
return new_class
def add_to_class(cls, name, value):
"""
Add the value to the class object either by calling
contribute_to_class or setting it directly
"""
if hasattr(value, 'contribute_to_class'):
value.contribute_to_class(cls, name)
else:
setattr(cls, name, value)
class Panel(object):
__metaclass__ = PanelMeta
@classmethod
def queue_event(cls, data):
"""
Queues an event that this panel will later process
"""
tasks.add_event.apply_async(
args=(cls._meta.event_type, data),
routing_key=cls._meta.routing_key
)
def add_event(self, data):
"""
Another event named `name` just occured (`data` contains all the
information for that event), process and store it.
"""
cleaned_data = self.clean(data)
# slice dimensions
dimensions = {}
for dim_name, dim in self._meta.dimensions.items():
dimensions[dim_name] = dim.split(cleaned_data)
site.storage.insert(self._meta.event_type, cleaned_data, dimensions)
def clean(self, data):
"""
Clean the data input data. If a method exists named ``clean_<field-name>``
then call it
"""
cleaned_data = {}
for key, value in data.iteritems():
clean_method = 'clean_%s' % key
if hasattr(self, clean_method):
cleaned_data[key] = getattr(self, clean_method)(value)
else:
cleaned_data[key] = value
return cleaned_data
def get_chart_data(self, rows, columns=None, aggregation=None, filters=None):
"""
Return data grib obtained from the OLAP storage. Result is a two
dimensional array containg rows (list of columns, dummy column if no
column_dimensions).
"""
return site.storage.aggregate(
self._meta.event_type,
aggregation=aggregation,
filters=filters,
rows=rows,
columns=columns
)
def get_last_instance(self):
return site.storage.get_last_instance(self._meta.event_type)
def get_data(self, dimensions, limit=None):
"""
Return all the data for this panel, if `limit` is given, only limit to
last `limit` records.
"""
pass
def add_cookie(self, request, name, value):
"""
Adds a cookie to the cookie queue
"""
request._new_cookies.append((name, value))
def get_response(self, request, sub_call=None):
"""
Dispatches a subcall to the panel instance.
Returns a template name and dictionary of context to render
"""
if not sub_call:
sub_call = 'index'
method = 'call_%s' % sub_call.replace('-', '_')
if not hasattr(self, method):
raise exceptions.PanelMethodDoesNotExist("Missing method %s on panel: %s" % \
(method, self.__class__.__name__))
request._new_cookies = []
context = {
'panels': site.get_panels(),
'current_panel': site.make_meta(self),
}
response = getattr(self, method)(request, context)
for name, value in request._new_cookies:
response.set_cookie(name, value=value, max_age=60*60*24*365)
return response
def get_filters(self, request, context):
"""
Get global filters set for this panel.
"""
return {}
def get_row_dimensions(self, request, context):
"""
Most (all?) of the time panels will display data based on time.
"""
dates = self.get_date_range(request, context)
return [{'timestamp': d} for d in dates]
def get_column_dimensions(self, request, context):
return []
def get_date_range(self, request, context):
"""
Get a range of dates based on the current request
"""
intervals = {
'month': (Day, datetime.timedelta(days=30)),
'day': (Hour, datetime.timedelta(days=1)),
}
i = request.GET.get('interval', request.COOKIES.get(COOKIE_INTERVAL, None))
i = i in intervals and i or 'month'
interval, delta = intervals[i]
end_date = datetime.datetime.now()
start_date = end_date - delta
self.add_cookie(request, COOKIE_INTERVAL, i)
context['current_interval'] = i
context['detail_interval'] = interval
rng = interval.range(start_date, end_date)
context['date_range'] = interval.display_format(rng)
context['packed_date_range'] = interval.pack_format(rng)
prev_rng = interval.range(start_date - delta, start_date)
context['previous_packed_date_range'] = interval.pack_format(prev_rng)
return context['packed_date_range']
|
{
"content_hash": "223a8f2cc04fbd02b2f07672f961459c",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 89,
"avg_line_length": 34.952095808383234,
"alnum_prop": 0.5782079835531951,
"repo_name": "teopeurt/tapz",
"id": "ba87e5b1441219f499c5a43b52f07e1a4bfcaa4e",
"size": "5837",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tapz/panels/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import os
import time
from slackclient import SlackClient
token = os.environ['SLACK_API_TOKEN']
sc = SlackClient(token)
checkforconnection()
def getlastmsg():
last_three = sc.api_call("channels.history", channel="C3B6Z61U3", count="1")
for m in last_three["messages"]:
print(m["user"] + ": " + m["text"])
sc.api_call("chat.postMessage", as_user="true:", channel="C3B6Z61U3", text="greeting")
'''
sc.rtm_send_message(channel="python", message="test")
while(i != "shutdown_bot")
i = m["text"]
sc.api_call("chat.postMessage", as_user="true:", channel="C3B6Z61U3", text="greeting")
'''
def checkforconnection():
if sc.rtm_connect():
print("Bot connected and running!")
else:
print("Connection failed. Invalid Slack token or bot ID?")
if __name__ == "__main__":
|
{
"content_hash": "7e2b41e69fa1ce4ce906f1bed08a5684",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 90,
"avg_line_length": 28.6,
"alnum_prop": 0.6142191142191142,
"repo_name": "xaner4/Slackbot",
"id": "3d056dface1eef382501691a22edd44e4586dc2c",
"size": "858",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "slackbot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1578"
}
],
"symlink_target": ""
}
|
from collections import defaultdict
from datetime import timedelta
from django.db.models.aggregates import Count
from django.http import HttpResponse
from corehq.apps.commtrack.models import StockState
from corehq.apps.locations.models import SQLLocation
from corehq.apps.products.models import SQLProduct
from corehq.apps.reports.cache import request_cache
from corehq.apps.reports.generic import GenericTabularReport
from custom.ilsgateway.tanzania.reports.utils import link_format
from dimagi.utils.decorators.memoized import memoized
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn
from corehq.apps.reports.graph_models import Axis
from custom.common import ALL_OPTION
from custom.ewsghana.filters import ProductByProgramFilter, ViewReportFilter, EWSDateFilter, \
EWSRestrictionLocationFilter
from custom.ewsghana.reports.stock_levels_report import StockLevelsReport, InventoryManagementData, \
StockLevelsLegend, FacilityReportData, InputStock, UsersData
from custom.ewsghana.reports import MultiReport, EWSData, EWSMultiBarChart, ProductSelectionPane, EWSLineChart
from casexml.apps.stock.models import StockTransaction
from custom.ewsghana.utils import get_descendants, make_url, get_second_week, get_country_id, get_supply_points
class ProductAvailabilityData(EWSData):
show_chart = True
show_table = False
slug = 'product_availability'
@property
def title(self):
if not self.location:
return ""
location_type = self.location.location_type.name.lower()
if location_type == 'country':
return "Product availability - National Aggregate"
elif location_type == 'region':
return "Product availability - Regional Aggregate"
elif location_type == 'district':
return "Product availability - District Aggregate"
@property
def headers(self):
return []
@property
def rows(self):
rows = []
if self.config['location_id']:
locations = get_descendants(self.config['location_id'])
unique_products = self.unique_products(locations, all=True).order_by('code')
for product in unique_products:
with_stock = self.config['with_stock'].get(product.product_id, 0)
without_stock = self.config['without_stock'].get(product.product_id, 0)
without_data = self.config['all'] - with_stock - without_stock
rows.append({"product_code": product.code,
"product_name": product.name,
"total": self.config['all'],
"with_stock": with_stock,
"without_stock": without_stock,
"without_data": without_data})
return rows
@property
def chart_config(self):
return {
'label_color': {
"Stocked out": "#a30808",
"Not Stocked out": "#7aaa7a",
"No Stock Data": "#efde7f"
},
'div': "product_availability_summary_plot_placeholder",
'legenddiv': "product_availability_summary_legend",
'xaxistitle': "Products",
'yaxistitle': "Facilities",
}
@property
def charts(self):
product_availability = self.rows
if product_availability:
def convert_product_data_to_stack_chart(rows, chart_config):
ret_data = []
for k in ['Stocked out', 'Not Stocked out', 'No Stock Data']:
def calculate_percent(x, y):
return float(x) / float((y or 1))
datalist = []
for row in rows:
total = row['total']
if k == 'No Stock Data':
datalist.append([row['product_code'], calculate_percent(row['without_data'], total),
row['product_name']])
elif k == 'Stocked out':
datalist.append([row['product_code'], calculate_percent(row['without_stock'], total),
row['product_name']])
elif k == 'Not Stocked out':
datalist.append([row['product_code'], calculate_percent(row['with_stock'], total),
row['product_name']])
ret_data.append({'color': chart_config['label_color'][k], 'label': k, 'data': datalist})
return ret_data
chart = EWSMultiBarChart('', x_axis=Axis('Products'), y_axis=Axis('', '%'))
chart.rotateLabels = -45
chart.marginBottom = 120
chart.stacked = False
chart.tooltipFormat = " on "
chart.forceY = [0, 1]
chart.product_code_map = {
sql_product.code: sql_product.name
for sql_product in SQLProduct.objects.filter(domain=self.domain)
}
chart.is_rendered_as_email = self.config.get('is_rendered_as_email', False)
for row in convert_product_data_to_stack_chart(product_availability, self.chart_config):
chart.add_dataset(row['label'], [
{'x': r[0], 'y': r[1], 'name': r[2]}
for r in sorted(row['data'], key=lambda x: x[0])], color=row['color']
)
return [chart]
return []
class MonthOfStockProduct(EWSData):
slug = 'mos_product'
show_chart = False
show_table = True
use_datatables = True
default_rows = 25
@property
def title(self):
if not self.location:
return ""
if self.config['export']:
return "Current MOS by Product"
location_type = self.location.location_type.name.lower()
if location_type == 'country':
return "Current MOS by Product - CMS, RMS, and Teaching Hospitals"
elif location_type == 'region':
return "Current MOS by Product - RMS and Teaching Hospitals"
elif location_type == 'district':
return "Current MOS by Product"
@property
def headers(self):
headers = DataTablesHeader(DataTablesColumn('Location'))
for product in self.unique_products(
get_supply_points(self.config['domain'], self.config['location_id']), all=(not self.config['export'])
):
if not self.config['export']:
headers.add_column(DataTablesColumn(product.code))
else:
headers.add_column(DataTablesColumn(u'{} ({})'.format(product.name, product.code)))
return headers
@property
def rows(self):
rows = []
unique_products = self.unique_products(
get_supply_points(self.config['domain'], self.config['location_id']), all=(not self.config['export'])
)
if self.config['location_id']:
for case_id, products in self.config['months_of_stock'].iteritems():
sp = SQLLocation.objects.get(supply_point_id=case_id)
if sp.location_type.administrative:
cls = StockLevelsReport
else:
cls = StockStatus
url = make_url(
cls,
self.config['domain'],
'?location_id=%s&filter_by_program=%s&startdate=%s&enddate=%s&report_type=%s',
(sp.location_id, self.config['program'] or ALL_OPTION, self.config['startdate'].date(),
self.config['enddate'].date(), self.config['report_type'])
)
row = [
link_format(sp.name, url) if not self.config.get('is_rendered_as_email', False) else sp.name
]
for p in unique_products:
product_data = products.get(p.product_id)
if product_data:
value = '%.1f' % product_data
else:
value = '-'
row.append(value)
rows.append(row)
return rows
class StockoutsProduct(EWSData):
slug = 'stockouts_product'
show_chart = True
show_table = False
chart_x_label = 'Months'
chart_y_label = 'Facility count'
title = 'Stockout by Product'
@property
def headers(self):
return []
@property
def rows(self):
rows = {}
if self.config['location_id']:
supply_points = get_descendants(self.config['location_id'])
products = self.unique_products(supply_points, all=True)
code_name_map = {}
for product in products:
rows[product.code] = []
code_name_map[product.code] = product.name
enddate = self.config['enddate']
startdate = self.config['startdate'] if 'custom_date' in self.config else enddate - timedelta(days=90)
for d in get_second_week(startdate, enddate):
txs = list(StockTransaction.objects.filter(
case_id__in=list(supply_points.values_list('supply_point_id', flat=True)),
sql_product__in=list(products),
report__date__range=[d['start_date'], d['end_date']],
report__domain=self.config['domain'],
type='stockonhand',
stock_on_hand=0
).values('sql_product__code').annotate(count=Count('case_id')))
for product in products:
if not any([product.code == tx['sql_product__code'] for tx in txs]):
rows[product.code].append({'x': d['start_date'], 'y': 0})
for tx in txs:
rows[tx['sql_product__code']].append(
{
'x': d['start_date'],
'y': tx['count'],
'name': code_name_map[tx['sql_product__code']]
}
)
return rows
@property
def charts(self):
rows = self.rows
if self.show_chart:
chart = EWSLineChart("Stockout by Product", x_axis=Axis(self.chart_x_label, dateFormat='%b %Y'),
y_axis=Axis(self.chart_y_label, 'd'))
chart.x_axis_uses_dates = True
chart.tooltipFormat = True
chart.is_rendered_as_email = self.config['is_rendered_as_email']
for key, value in rows.iteritems():
chart.add_dataset(key, value)
return [chart]
return []
class StockoutTable(EWSData):
slug = 'stockouts_product_table'
show_chart = False
show_table = True
@property
def title(self):
if not self.location:
return ""
if self.config['export']:
return 'Stockouts'
location_type = self.location.location_type.name.lower()
if location_type == 'country':
return "Stockouts - CMS, RMS, and Teaching Hospitals"
elif location_type == 'region':
return "Stockouts - RMS and Teaching Hospitals"
elif location_type == 'district':
return "Stockouts"
@property
def headers(self):
return DataTablesHeader(
DataTablesColumn('Location'),
DataTablesColumn('Stockouts')
)
@property
def rows(self):
rows = []
if self.config['location_id']:
product_id_to_name = {
product_id: product_name
for (product_id, product_name) in self.config['unique_products'].values_list('product_id', 'name')
}
for supply_point in self.config['stockout_table_supply_points']:
products_set = self.config['stockouts'].get(supply_point.supply_point_id)
url = link_format(supply_point.name, make_url(
StockLevelsReport,
self.config['domain'],
'?location_id=%s&startdate=%s&enddate=%s',
(supply_point.location_id, self.config['startdate'], self.config['enddate'])
))
if products_set:
rows.append(
[url if not self.config.get('is_rendered_as_email') else supply_point.name, ', '.join(
product_id_to_name[product_id] for product_id in products_set
)]
)
else:
rows.append(
[url if not self.config.get('is_rendered_as_email') else supply_point.name, '-']
)
return rows
class StockStatus(MultiReport):
name = 'Stock status'
title = 'Stock Status'
slug = 'stock_status'
fields = [EWSRestrictionLocationFilter, ProductByProgramFilter, EWSDateFilter, ViewReportFilter]
split = False
exportable = True
is_exportable = True
is_rendered_as_email = False
def unique_products(self, locations):
return SQLProduct.objects.filter(
pk__in=locations.values_list('_products', flat=True)
).exclude(is_archived=True)
def get_stock_transactions_for_supply_points_and_products(self, supply_points, unique_products,
**additional_params):
return StockTransaction.objects.filter(
type='stockonhand',
case_id__in=list(supply_points.values_list('supply_point_id', flat=True)),
report__domain=self.report_config['domain'],
report__date__lte=self.report_config['enddate'],
report__date__gte=self.report_config['startdate'],
product_id__in=list(unique_products.values_list('product_id', flat=True)),
**additional_params
).distinct('case_id', 'product_id').order_by('case_id', 'product_id', '-report__date').values_list(
'case_id', 'product_id'
)
def get_stockouts_for_supply_points_and_products(self, supply_points, unique_products):
return self.get_stock_transactions_for_supply_points_and_products(
supply_points,
unique_products,
stock_on_hand=0
)
def stockouts_data(self):
supply_points = get_supply_points(self.report_config['domain'], self.report_config['location_id'])
if not supply_points:
return {}
unique_products = self.unique_products(supply_points)
transactions = self.get_stockouts_for_supply_points_and_products(
supply_points, unique_products
).values_list('case_id', 'product_id')
stockouts = defaultdict(set)
for (case_id, product_id) in transactions:
stockouts[case_id].add(product_id)
return {
'stockouts': stockouts,
'unique_products': unique_products,
'stockout_table_supply_points': supply_points
}
def data(self):
locations = self.report_location.get_descendants()
locations_ids = locations.values_list('supply_point_id', flat=True)
if not locations_ids:
return {}
unique_products = self.unique_products(locations)
transactions = self.get_stock_transactions_for_supply_points_and_products(
locations_ids, unique_products
).values_list('case_id', 'product_id', 'report__date', 'stock_on_hand')
current_mos_locations = get_supply_points(self.report_config['domain'], self.report_config['location_id'])
current_mos_locations_ids = set(
current_mos_locations.values_list('supply_point_id', flat=True)
)
stock_states = StockState.objects.filter(
sql_product__domain=self.domain,
case_id__in=current_mos_locations_ids
)
product_case_with_stock = defaultdict(set)
product_case_without_stock = defaultdict(set)
months_of_stock = defaultdict(lambda: defaultdict(dict))
stock_state_map = {
(stock_state.case_id, stock_state.product_id):
stock_state.get_monthly_consumption() if stock_state.daily_consumption else None
for stock_state in stock_states
}
stockouts = defaultdict(set)
for (case_id, product_id, date, stock_on_hand) in transactions:
if stock_on_hand > 0:
product_case_with_stock[product_id].add(case_id)
if case_id in current_mos_locations_ids:
stock_state_dict = stock_state_map.get((case_id, product_id))
if stock_state_dict:
months_of_stock[case_id][product_id] = stock_on_hand / stock_state_dict
else:
months_of_stock[case_id][product_id] = None
else:
product_case_without_stock[product_id].add(case_id)
if case_id in current_mos_locations_ids:
stockouts[case_id].add(product_id)
months_of_stock[case_id][product_id] = 0
not_reporting_locations = [
case_id
for case_id in current_mos_locations_ids
if case_id not in months_of_stock
]
sohs = StockTransaction.objects.filter(
report__date__lte=self.report_config['enddate'],
case_id__in=not_reporting_locations
).order_by('case_id', 'product_id', '-report__date').distinct('case_id', 'product_id').values_list(
'case_id',
'product_id',
'stock_on_hand'
)
sohs_dict = {(case_id, product_id): stock_on_hand for case_id, product_id, stock_on_hand in sohs}
for case_id in not_reporting_locations:
if case_id not in months_of_stock:
for product in unique_products:
soh = sohs_dict.get((case_id, product.product_id))
consumption = stock_state_map.get((case_id, product.product_id))
if soh and consumption:
months_of_stock[case_id][product.product_id] = soh / consumption
return {
'without_stock': {
product_id: len(case_list)
for product_id, case_list in product_case_without_stock.iteritems()
},
'with_stock': {
product_id: len(case_list)
for product_id, case_list in product_case_with_stock.iteritems()
},
'all': locations.count(),
'months_of_stock': months_of_stock,
'stockouts': stockouts,
'unique_products': unique_products,
'stockout_table_supply_points': current_mos_locations
}
@property
def report_config(self):
program = self.request.GET.get('filter_by_program')
products = self.request.GET.getlist('filter_by_product')
location_id = self.request.GET.get('location_id')
return dict(
domain=self.domain,
startdate=self.datespan.startdate_utc,
enddate=self.datespan.enddate_utc,
location_id=location_id if location_id else get_country_id(self.domain),
program=program if program != ALL_OPTION else None,
products=products if products and products[0] != ALL_OPTION else [],
report_type=self.request.GET.get('report_type', None),
user=self.request.couch_user,
export=False,
is_rendered_as_email=self.is_rendered_as_email
)
@property
def data_providers(self):
config = self.report_config
report_type = self.request.GET.get('report_type', None)
if self.is_reporting_type():
self.split = True
if self.is_rendered_as_email:
return [FacilityReportData(config)]
else:
return [
FacilityReportData(config),
StockLevelsLegend(config),
InputStock(config),
UsersData(config),
InventoryManagementData(config),
ProductSelectionPane(config, hide_columns=False)
]
self.split = False
if report_type == 'stockouts':
config.update(self.stockouts_data())
return [
ProductSelectionPane(config=config, hide_columns=False),
StockoutsProduct(config=config),
StockoutTable(config=config)
]
elif report_type == 'asi':
config.update(self.data())
return [
ProductSelectionPane(config=config),
ProductAvailabilityData(config=config),
MonthOfStockProduct(config=config),
StockoutsProduct(config=config),
StockoutTable(config=config)
]
else:
config.update(self.data())
return [
ProductSelectionPane(config=config),
ProductAvailabilityData(config=config),
MonthOfStockProduct(config=config)
]
@property
def export_table(self):
if self.is_reporting_type():
return super(StockStatus, self).export_table
report_type = self.request.GET.get('report_type', None)
config = self.report_config
config.update(self.data())
config['export'] = True
if report_type == 'stockouts' or not report_type:
r = MonthOfStockProduct(config=config)
return [self._export(r.title, r.headers, r.rows)]
else:
reports = [
MonthOfStockProduct(config=config),
StockoutTable(config=config)
]
return [self._export(r.title, r.headers, r.rows) for r in reports]
def _export(self, export_sheet_name, headers, formatted_rows, total_row=None):
def _unformat_row(row):
return [col.get("sort_key", col) if isinstance(col, dict) else col for col in row]
table = headers.as_export_table
rows = [_unformat_row(row) for row in formatted_rows]
for row in rows:
row[0] = GenericTabularReport._strip_tags(row[0])
replace = ''
for k, v in enumerate(table[0]):
if v != ' ':
replace = v
else:
table[0][k] = replace
table.extend(rows)
if total_row:
table.append(_unformat_row(total_row))
return [export_sheet_name, self._report_info + table]
@property
@request_cache()
def print_response(self):
"""
Returns the report for printing.
"""
self.is_rendered_as_email = True
self.use_datatables = False
self.override_template = "ewsghana/stock_status_print_report.html"
return HttpResponse(self._async_context()['report'])
|
{
"content_hash": "1300cf7898ec7c1e31498476a8205e9b",
"timestamp": "",
"source": "github",
"line_count": 569,
"max_line_length": 114,
"avg_line_length": 40.60632688927944,
"alnum_prop": 0.5571521315732525,
"repo_name": "puttarajubr/commcare-hq",
"id": "6da1d08f7c56b0348b95456e0cd714e26faf7a8a",
"size": "23105",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "custom/ewsghana/reports/specific_reports/stock_status_report.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "581878"
},
{
"name": "HTML",
"bytes": "2790361"
},
{
"name": "JavaScript",
"bytes": "2572023"
},
{
"name": "Makefile",
"bytes": "3999"
},
{
"name": "Python",
"bytes": "11275678"
},
{
"name": "Shell",
"bytes": "23890"
}
],
"symlink_target": ""
}
|
"""
Django settings for Szkiz project.
Generated by 'django-admin startproject' using Django 1.9.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'u#rojt=%r5zo#f4wsr(f8boy)$puwxw9e)+dbs#&-_j6u!$1yu'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
if DEBUG:
ALLOWED_HOSTS = ['*']
else:
ALLOWED_HOSTS = ['77.55.236.168', 'maciejczuk.pl', 'www.maciejczuk.pl']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'main'
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Szkiz.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Szkiz.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
if DEBUG:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db.sqlite3'
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': 'localhost',
'NAME': 'collector',
'USER': 'collector_user',
'PASSWORD': 'collector'
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
#STATIC_ROOT = 'static/'
|
{
"content_hash": "b008dbcc654e87fc8a22facbf0ae00b5",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 91,
"avg_line_length": 26.28985507246377,
"alnum_prop": 0.6634509371554576,
"repo_name": "EricFelixLuther/Szkiz",
"id": "e790bbf853f6010cd34ba1c222c0ac94933de823",
"size": "3628",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Szkiz/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1239"
},
{
"name": "HTML",
"bytes": "6809"
},
{
"name": "JavaScript",
"bytes": "6474"
},
{
"name": "Python",
"bytes": "79995"
}
],
"symlink_target": ""
}
|
CONSTANT = "SOME OTHER CONSTANT"
|
{
"content_hash": "a5fbd94442e0a608e82e7dcba683308f",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 32,
"avg_line_length": 33,
"alnum_prop": 0.7575757575757576,
"repo_name": "zpao/buck",
"id": "abcde58fb5b77505f65a47a5b705437d01a7ad3d",
"size": "33",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "test/com/facebook/buck/features/python/testdata/python_binary/external_sources/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1796"
},
{
"name": "C",
"bytes": "250514"
},
{
"name": "CSS",
"bytes": "56119"
},
{
"name": "Dockerfile",
"bytes": "2094"
},
{
"name": "HTML",
"bytes": "11770"
},
{
"name": "Java",
"bytes": "33190146"
},
{
"name": "JavaScript",
"bytes": "931240"
},
{
"name": "Kotlin",
"bytes": "310039"
},
{
"name": "Lex",
"bytes": "14469"
},
{
"name": "Makefile",
"bytes": "1704"
},
{
"name": "PowerShell",
"bytes": "2154"
},
{
"name": "Python",
"bytes": "2153057"
},
{
"name": "Shell",
"bytes": "43626"
},
{
"name": "Smalltalk",
"bytes": "194"
},
{
"name": "Thrift",
"bytes": "18638"
}
],
"symlink_target": ""
}
|
from pyface.ui.qt4.gui import *
|
{
"content_hash": "9bd2aa0c1ccf782e941110cade574eef",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 31,
"avg_line_length": 32,
"alnum_prop": 0.75,
"repo_name": "enthought/etsproxy",
"id": "03d19dd393e69b836e3a585132016107796a2a6f",
"size": "47",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "enthought/pyface/ui/qt4/gui.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "363714"
}
],
"symlink_target": ""
}
|
import unittest
from openmdao.main.api import Component, VariableTree, Assembly, set_as_top
from openmdao.main.datatypes.api import Float, VarTree
class TstContainer(VariableTree):
dummy1 = Float(1.0)
class TstComponent(Component):
dummy_data = VarTree(TstContainer(), iotype='in')
dummy_data_out = VarTree(TstContainer(), iotype='out')
dummyin = Float(iotype='in')
def execute(self):
self.dummy_data_out = self.dummy_data.copy()
class TstAssembly(Assembly):
def configure(self):
self.add('comp', TstComponent())
self.create_passthrough('comp.dummy_data.dummy1')
self.create_passthrough('comp.dummy_data_out.dummy1', 'dummy1_out')
self.driver.workflow.add('comp')
class TstAssembly2(Assembly):
def configure(self):
self.add('comp', TstComponent())
self.create_passthrough('comp.dummy_data')
self.create_passthrough('comp.dummy_data_out', 'dummy1_out')
self.driver.workflow.add('comp')
class VarTreePassthroughTestCase(unittest.TestCase):
def test_vartree_passthrough(self):
# Tests that we can create a passthrough of one variable in a tree
blah = set_as_top(TstAssembly())
blah.dummy1 = 5.0
self.assertEqual(blah.dummy1_out, 1.0)
blah.run()
self.assertEqual(blah.comp.dummy_data.dummy1, 5.0)
self.assertEqual(blah.dummy1, 5.0)
self.assertEqual(blah.dummy1_out, 5.0)
def test_vartree_passthrough(self):
# Tests that we can create a passthrough of an entire variable tree
blah = set_as_top(TstAssembly2())
blah.dummy_data.dummy1 = 5.0
self.assertEqual(blah.dummy1_out.dummy1, 1.0)
blah.run()
self.assertEqual(blah.comp.dummy_data.dummy1, 5.0)
self.assertEqual(blah.dummy_data.dummy1, 5.0)
self.assertEqual(blah.dummy1_out.dummy1, 5.0)
def test_get_attributes(self):
# Tests the attributres dictionary for passthrough trees
blah = set_as_top(TstAssembly2())
attrs = blah.get_attributes(True)
self.assertTrue({'indent': 0,
'name': 'dummy_data',
'vt': 'vt',
'implicit': '',
'connected': '',
'connection_types': 0,
'ttype': 'vartree',
'type': 'TstContainer',
'id': 'dummy_data',
'target': 'comp.dummy_data'} in attrs['Inputs'])
self.assertTrue({'indent': 1,
'name': 'dummy1',
'parent': 'dummy_data',
'value': 1.0,
'high': None,
'connected': '',
'low': None,
'type': 'float',
'id': 'dummy_data.dummy1',
'assumed_default': False} in attrs['Inputs'])
self.assertTrue({'indent': 0,
'name': 'dummy1_out',
'vt': 'vt',
'implicit': '',
'connected': '',
'connection_types': 0,
'ttype': 'vartree',
'type': 'TstContainer',
'id': 'dummy1_out',
'target': 'comp.dummy_data_out'} in attrs['Outputs'])
self.assertTrue({'indent': 1,
'name': 'dummy1',
'parent': 'dummy1_out',
'value': 1.0,
'high': None,
'connected': '',
'low': None,
'type': 'float',
'id': 'dummy1_out.dummy1',
'assumed_default': False} in attrs['Outputs'])
blah = set_as_top(TstAssembly())
attrs = blah.get_attributes(True)
self.assertTrue({'indent': 0,
'name': 'dummy1',
'value': 1.0,
'high': None,
'implicit': '',
'connected': '',
'connection_types': 0,
'target': 'comp.dummy_data.dummy1',
'low': None,
'type': 'float',
'id': 'dummy1',
'assumed_default': False} in attrs['Inputs'])
self.assertTrue({'indent': 0,
'name': 'dummy1_out',
'value': 1.0,
'high': None,
'implicit': '',
'connected': '',
'connection_types': 0,
'target': 'comp.dummy_data_out.dummy1',
'low': None,
'type': 'float',
'id': 'dummy1_out',
'assumed_default': False} in attrs['Outputs'])
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "9f0704e51c0376f0426c1da278fac7af",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 78,
"avg_line_length": 38.97727272727273,
"alnum_prop": 0.45558794946550046,
"repo_name": "DailyActie/Surrogate-Model",
"id": "ff39a616a31f0f501fc393995bfd4cfe671ff3c8",
"size": "5145",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "01-codes/OpenMDAO-Framework-dev/openmdao.main/src/openmdao/main/test/test_vartree_passthrough.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "345"
},
{
"name": "Batchfile",
"bytes": "18746"
},
{
"name": "C",
"bytes": "13004913"
},
{
"name": "C++",
"bytes": "14692003"
},
{
"name": "CMake",
"bytes": "72831"
},
{
"name": "CSS",
"bytes": "303488"
},
{
"name": "Fortran",
"bytes": "7339415"
},
{
"name": "HTML",
"bytes": "854774"
},
{
"name": "Java",
"bytes": "38854"
},
{
"name": "JavaScript",
"bytes": "2432846"
},
{
"name": "Jupyter Notebook",
"bytes": "829689"
},
{
"name": "M4",
"bytes": "1379"
},
{
"name": "Makefile",
"bytes": "48708"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "PHP",
"bytes": "93585"
},
{
"name": "Pascal",
"bytes": "1449"
},
{
"name": "Perl",
"bytes": "1152272"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "34668203"
},
{
"name": "Roff",
"bytes": "5925"
},
{
"name": "Ruby",
"bytes": "92498"
},
{
"name": "Shell",
"bytes": "94698"
},
{
"name": "TeX",
"bytes": "156540"
},
{
"name": "TypeScript",
"bytes": "41691"
}
],
"symlink_target": ""
}
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 7, transform = "None", sigma = 0.0, exog_count = 100, ar_order = 12);
|
{
"content_hash": "5567e55f1a797957e49d5a3643f0b629",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 161,
"avg_line_length": 37.285714285714285,
"alnum_prop": 0.7011494252873564,
"repo_name": "antoinecarme/pyaf",
"id": "f986bc6266186f6687ed6138a84043a0b1f0b478",
"size": "261",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_None/trend_PolyTrend/cycle_7/ar_12/test_artificial_128_None_PolyTrend_7_12_100.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
"""
CoolFoursquareBro twitter bot, replies to foursquare tweets by a specific user with 'cool story bro' responses.
Copyright (C) 2013 Barry O'Neill
Author: Barry O'Neill <barry@barryoneill.net>
URL: <https://github.com/barryoneill/CoolFoursquareBro>
For license information, see LICENSE.txt
"""
from __future__ import print_function, unicode_literals
import twitter
import logging
import shelve
required_config_keys = ['twitter_consumer_key', 'twitter_consumer_secret',
'twitter_access_token', 'twitter_access_token_secret',
'db', 'target_userid']
class CoolFoursquareBro(object):
def __init__(self, config):
"""
Dictionary containing configuration, see 'coolfoursquarebro-sample.yaml' for options
"""
self.config = config
if not config:
raise ValueError('Required config parameter missing')
# catch config errors early
missing_keys = set(required_config_keys) - set(config.keys())
if missing_keys:
raise KeyError('Following config keys are missing: {}'.format(', '.join(missing_keys)))
if not all(key in config for key in required_config_keys):
raise KeyError('Invalid configuration, required keys: {}'.format(required_config_keys))
self.log = logging.getLogger(__name__)
self.twitter_api = twitter.Api(consumer_key=self.config['twitter_consumer_key'],
consumer_secret=self.config['twitter_consumer_secret'],
access_token_key=self.config['twitter_access_token'],
access_token_secret=self.config['twitter_access_token_secret'])
@staticmethod
def __is_foursquare_tweet(tweet):
"""
returns True if the supplied twitter.Status object is a foursquare tweet
"""
# rather than pattern match the tweet text, I'm going to crudely search for 'foursquare' in the client info
return 'foursquare' in tweet.source.lower()
@staticmethod
def __create_unique_token(tweet):
"""
Given this app generally sends the same 'cool story bro' message, twitter will reject all but the first
with a 187 'duplicate' status code. This method should return a token which (appended to the reply to the
provided twitter.Status) will make it unique.
This impl is a bit of a hack. I make list[10] of different unicode chars that generate space characters.
and return a string where each digit in the id is mapped to the corresponding space.
"""
spaces0to9 = ['\u0020', '\u00A0', '\u2000', '\u2001', '\u2002',
'\u2003', '\u2004', '\u2005', '\u2006', '\u2007']
# other spaces that can be used if some clients start rendering displayable chars
# '\u2008', '\u2009', '\u200A', '\u200B', '\u202F', '\u205F', '\u3000'
number_str = str(tweet.id)
token_chars = []
for c in number_str:
token_chars += spaces0to9[int(c)]
return ''.join(token_chars)
def __create_response_text(self, tweet):
"""
generate the response text for the supplied twitter.Status
"""
# default to 'cool story bro', or whatever the config says the default is
resp_text = self.config.get('coolstory_responsedefault', 'cool story, bro')
# override, if the user has mapped a keyword match to an alternate text
if 'coolstory_responses' in self.config:
responses = self.config['coolstory_responses']
for (place, response) in responses.items():
if place.lower() in tweet.text.lower():
resp_text = response
break
# '@{handle} cool story bro {uniquetoken}'
return u'@{} {} {}'.format(tweet.user.screen_name, resp_text, self.__create_unique_token(tweet))
def cool_story_bro(self, dry_run=False):
"""
Query the user's recent tweets, and if a foursquare tweet is found, reply to it. Records the
last seen tweet info in a file, so the next run doesn't repeat itself.
Set dry_run=True to prevent sending of tweets (last seen tweet is still recorded & obeyed)
"""
db_lastseen_key = 'LAST_SEEN_ID'.encode('ascii') # 'shelf' module struggles with unicode
db = shelve.open(self.config['db'], writeback=False)
target_userid = self.config['target_userid']
# pick up at the last tweet we saw. If none, start at the configured tweet. If none, query max 20 back
last_seen_id = db.get(db_lastseen_key, self.config.get('lastseen_startval', 0))
self.log.debug('Querying tweets for user {} since ID {}'.format(target_userid, last_seen_id))
tweets = self.twitter_api.GetUserTimeline(target_userid, count=10,
since_id=last_seen_id, include_rts=False,
trim_user=False, exclude_replies=True)
if not tweets:
self.log.info('No tweets since last check')
else:
self.log.debug('Processing {} candidate tweets'.format(len(tweets)))
# ensure oldest to newest
tweets.sort(key=lambda tweet: tweet.id, reverse=False)
# persist the 'max' id before doing anything - better to miss out on a few opportunities to be clever
# than to spam the user on repeat calls if something failed on a previous run.
newest = tweets[-1]
db[db_lastseen_key] = newest.id
self.log.info('Marked tweet {} as the max ID, text:{}'.format(newest.id, newest.text))
fs_tweets = [t for t in tweets if self.__is_foursquare_tweet(t)]
if not fs_tweets:
self.log.info('No foursquare tweets.')
else:
self.log.info('Got {} foursquare tweets!'.format(len(fs_tweets)))
for fs_tweet in fs_tweets:
resp_txt = self.__create_response_text(fs_tweet)
log_msg = 'text:\'{}\' in response to id:{} msg:\'{}\''.format(resp_txt, fs_tweet.id, fs_tweet.text)
if dry_run:
self.log.info('[dry run, no tweet sent]{}'.format(log_msg))
else:
self.log.warn('[sending tweet]{}'.format(log_msg))
resp_tweet = self.twitter_api.PostUpdate(resp_txt, in_reply_to_status_id=fs_tweet.id)
self.log.debug("tweet sent, id: {}".format(resp_tweet.id))
|
{
"content_hash": "8bb7f73615b4c0bfc6f00d1ad38a18f4",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 120,
"avg_line_length": 43.18181818181818,
"alnum_prop": 0.5984962406015037,
"repo_name": "barryoneill/CoolFoursquareBro",
"id": "f38400a9984ff7f3d4549464e8604b01c64f4227",
"size": "6650",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "coolfoursquarebro.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8298"
}
],
"symlink_target": ""
}
|
"""SCons.Executor
A module for executing actions with specific lists of target and source
Nodes.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Executor.py 3842 2008/12/20 22:59:52 scons"
import string
from SCons.Debug import logInstanceCreation
import SCons.Errors
import SCons.Memoize
class Executor:
"""A class for controlling instances of executing an action.
This largely exists to hold a single association of an action,
environment, list of environment override dictionaries, targets
and sources for later processing as needed.
"""
if SCons.Memoize.use_memoizer:
__metaclass__ = SCons.Memoize.Memoized_Metaclass
memoizer_counters = []
def __init__(self, action, env=None, overridelist=[{}],
targets=[], sources=[], builder_kw={}):
if __debug__: logInstanceCreation(self, 'Executor.Executor')
self.set_action_list(action)
self.pre_actions = []
self.post_actions = []
self.env = env
self.overridelist = overridelist
self.targets = targets
self.sources = sources[:]
self.sources_need_sorting = False
self.builder_kw = builder_kw
self._memo = {}
def set_action_list(self, action):
import SCons.Util
if not SCons.Util.is_List(action):
if not action:
import SCons.Errors
raise SCons.Errors.UserError, "Executor must have an action."
action = [action]
self.action_list = action
def get_action_list(self):
return self.pre_actions + self.action_list + self.post_actions
memoizer_counters.append(SCons.Memoize.CountValue('get_build_env'))
def get_build_env(self):
"""Fetch or create the appropriate build Environment
for this Executor.
"""
try:
return self._memo['get_build_env']
except KeyError:
pass
# Create the build environment instance with appropriate
# overrides. These get evaluated against the current
# environment's construction variables so that users can
# add to existing values by referencing the variable in
# the expansion.
overrides = {}
for odict in self.overridelist:
overrides.update(odict)
import SCons.Defaults
env = self.env or SCons.Defaults.DefaultEnvironment()
build_env = env.Override(overrides)
self._memo['get_build_env'] = build_env
return build_env
def get_build_scanner_path(self, scanner):
"""Fetch the scanner path for this executor's targets and sources.
"""
env = self.get_build_env()
try:
cwd = self.targets[0].cwd
except (IndexError, AttributeError):
cwd = None
return scanner.path(env, cwd, self.targets, self.get_sources())
def get_kw(self, kw={}):
result = self.builder_kw.copy()
result.update(kw)
return result
def do_nothing(self, target, kw):
return 0
def do_execute(self, target, kw):
"""Actually execute the action list."""
env = self.get_build_env()
kw = self.get_kw(kw)
status = 0
for act in self.get_action_list():
status = apply(act, (self.targets, self.get_sources(), env), kw)
if isinstance(status, SCons.Errors.BuildError):
status.executor = self
raise status
elif status:
msg = "Error %s" % status
raise SCons.Errors.BuildError(
errstr=msg,
node=self.targets,
executor=self,
action=act)
return status
# use extra indirection because with new-style objects (Python 2.2
# and above) we can't override special methods, and nullify() needs
# to be able to do this.
def __call__(self, target, **kw):
return self.do_execute(target, kw)
def cleanup(self):
self._memo = {}
def add_sources(self, sources):
"""Add source files to this Executor's list. This is necessary
for "multi" Builders that can be called repeatedly to build up
a source file list for a given target."""
self.sources.extend(sources)
self.sources_need_sorting = True
def get_sources(self):
if self.sources_need_sorting:
self.sources = SCons.Util.uniquer_hashables(self.sources)
self.sources_need_sorting = False
return self.sources
def prepare(self):
"""
Preparatory checks for whether this Executor can go ahead
and (try to) build its targets.
"""
for s in self.get_sources():
if s.missing():
msg = "Source `%s' not found, needed by target `%s'."
raise SCons.Errors.StopError, msg % (s, self.targets[0])
def add_pre_action(self, action):
self.pre_actions.append(action)
def add_post_action(self, action):
self.post_actions.append(action)
# another extra indirection for new-style objects and nullify...
def my_str(self):
env = self.get_build_env()
get = lambda action, t=self.targets, s=self.get_sources(), e=env: \
action.genstring(t, s, e)
return string.join(map(get, self.get_action_list()), "\n")
def __str__(self):
return self.my_str()
def nullify(self):
self.cleanup()
self.do_execute = self.do_nothing
self.my_str = lambda S=self: ''
memoizer_counters.append(SCons.Memoize.CountValue('get_contents'))
def get_contents(self):
"""Fetch the signature contents. This is the main reason this
class exists, so we can compute this once and cache it regardless
of how many target or source Nodes there are.
"""
try:
return self._memo['get_contents']
except KeyError:
pass
env = self.get_build_env()
get = lambda action, t=self.targets, s=self.get_sources(), e=env: \
action.get_contents(t, s, e)
result = string.join(map(get, self.get_action_list()), "")
self._memo['get_contents'] = result
return result
def get_timestamp(self):
"""Fetch a time stamp for this Executor. We don't have one, of
course (only files do), but this is the interface used by the
timestamp module.
"""
return 0
def scan_targets(self, scanner):
self.scan(scanner, self.targets)
def scan_sources(self, scanner):
if self.sources:
self.scan(scanner, self.get_sources())
def scan(self, scanner, node_list):
"""Scan a list of this Executor's files (targets or sources) for
implicit dependencies and update all of the targets with them.
This essentially short-circuits an N*M scan of the sources for
each individual target, which is a hell of a lot more efficient.
"""
env = self.get_build_env()
deps = []
if scanner:
for node in node_list:
node.disambiguate()
s = scanner.select(node)
if not s:
continue
path = self.get_build_scanner_path(s)
deps.extend(node.get_implicit_deps(env, s, path))
else:
kw = self.get_kw()
for node in node_list:
node.disambiguate()
scanner = node.get_env_scanner(env, kw)
if not scanner:
continue
scanner = scanner.select(node)
if not scanner:
continue
path = self.get_build_scanner_path(scanner)
deps.extend(node.get_implicit_deps(env, scanner, path))
deps.extend(self.get_implicit_deps())
for tgt in self.targets:
tgt.add_to_implicit(deps)
def _get_unignored_sources_key(self, ignore=()):
return tuple(ignore)
memoizer_counters.append(SCons.Memoize.CountDict('get_unignored_sources', _get_unignored_sources_key))
def get_unignored_sources(self, ignore=()):
ignore = tuple(ignore)
try:
memo_dict = self._memo['get_unignored_sources']
except KeyError:
memo_dict = {}
self._memo['get_unignored_sources'] = memo_dict
else:
try:
return memo_dict[ignore]
except KeyError:
pass
sourcelist = self.get_sources()
if ignore:
idict = {}
for i in ignore:
idict[i] = 1
sourcelist = filter(lambda s, i=idict: not i.has_key(s), sourcelist)
memo_dict[ignore] = sourcelist
return sourcelist
def _process_sources_key(self, func, ignore=()):
return (func, tuple(ignore))
memoizer_counters.append(SCons.Memoize.CountDict('process_sources', _process_sources_key))
def process_sources(self, func, ignore=()):
memo_key = (func, tuple(ignore))
try:
memo_dict = self._memo['process_sources']
except KeyError:
memo_dict = {}
self._memo['process_sources'] = memo_dict
else:
try:
return memo_dict[memo_key]
except KeyError:
pass
result = map(func, self.get_unignored_sources(ignore))
memo_dict[memo_key] = result
return result
def get_implicit_deps(self):
"""Return the executor's implicit dependencies, i.e. the nodes of
the commands to be executed."""
result = []
build_env = self.get_build_env()
for act in self.get_action_list():
result.extend(act.get_implicit_deps(self.targets, self.get_sources(), build_env))
return result
nullenv = None
def get_NullEnvironment():
"""Use singleton pattern for Null Environments."""
global nullenv
import SCons.Util
class NullEnvironment(SCons.Util.Null):
import SCons.CacheDir
_CacheDir_path = None
_CacheDir = SCons.CacheDir.CacheDir(None)
def get_CacheDir(self):
return self._CacheDir
if not nullenv:
nullenv = NullEnvironment()
return nullenv
class Null:
"""A null Executor, with a null build Environment, that does
nothing when the rest of the methods call it.
This might be able to disapper when we refactor things to
disassociate Builders from Nodes entirely, so we're not
going to worry about unit tests for this--at least for now.
"""
def __init__(self, *args, **kw):
if __debug__: logInstanceCreation(self, 'Executor.Null')
self.targets = kw['targets']
def get_build_env(self):
return get_NullEnvironment()
def get_build_scanner_path(self):
return None
def cleanup(self):
pass
def prepare(self):
pass
def get_unignored_sources(self, *args, **kw):
return tuple(())
def get_action_list(self):
return []
def __call__(self, *args, **kw):
return 0
def get_contents(self):
return ''
def _morph(self):
"""Morph this Null executor to a real Executor object."""
self.__class__ = Executor
self.__init__([], targets=self.targets)
# The following methods require morphing this Null Executor to a
# real Executor object.
def add_pre_action(self, action):
self._morph()
self.add_pre_action(action)
def add_post_action(self, action):
self._morph()
self.add_post_action(action)
def set_action_list(self, action):
self._morph()
self.set_action_list(action)
|
{
"content_hash": "d3b7d3dcce79e3f5993a59f3aeeccd03",
"timestamp": "",
"source": "github",
"line_count": 393,
"max_line_length": 106,
"avg_line_length": 33.040712468193384,
"alnum_prop": 0.6006931074316519,
"repo_name": "james-dibble/Embedded-Systems-Assignment",
"id": "a37da0719e2e0e3c527809df8c5c5b5c03240b8b",
"size": "12985",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "EmbeddedSystems.Applications/EmbeddedSystems.Client/jsoncpp-src-0.6.0-rc2/scons-local-1.2.0/SCons/Executor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "480"
},
{
"name": "C",
"bytes": "5203"
},
{
"name": "C#",
"bytes": "207641"
},
{
"name": "C++",
"bytes": "330101"
},
{
"name": "CSS",
"bytes": "1175"
},
{
"name": "IDL",
"bytes": "1618"
},
{
"name": "Python",
"bytes": "1803809"
}
],
"symlink_target": ""
}
|
from .resource import Resource
class RouteTable(Resource):
"""Route table resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param routes: Collection of routes contained within a route table.
:type routes: list[~azure.mgmt.network.v2017_03_01.models.Route]
:ivar subnets: A collection of references to subnets.
:vartype subnets: list[~azure.mgmt.network.v2017_03_01.models.Subnet]
:param provisioning_state: The provisioning state of the resource.
Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param etag: Gets a unique read-only string that changes whenever the
resource is updated.
:type etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'subnets': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'routes': {'key': 'properties.routes', 'type': '[Route]'},
'subnets': {'key': 'properties.subnets', 'type': '[Subnet]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, id=None, location=None, tags=None, routes=None, provisioning_state=None, etag=None):
super(RouteTable, self).__init__(id=id, location=location, tags=tags)
self.routes = routes
self.subnets = None
self.provisioning_state = provisioning_state
self.etag = etag
|
{
"content_hash": "7a9b4919f376a1ecaf36437d6fbbd3ea",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 107,
"avg_line_length": 37.2,
"alnum_prop": 0.6065493646138808,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "4a2c9d6c9e3535c4a1eb31e4c596c5171cbbb0b0",
"size": "2520",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2017_03_01/models/route_table.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
}
|
from kv1_811 import *
from inserter import insert,version_imported
from bs4 import BeautifulSoup
import urllib2
from datetime import datetime,timedelta
import logging
from settings.const import *
logger = logging.getLogger("importer")
def getDataSource():
return { '1' : {
'operator_id' : 'QBUZZ',
'name' : 'Qbuzz KV1',
'description' : 'Qbuzz KV1 leveringen',
'email' : None,
'url' : None}}
def getOperator():
return { 'QBUZZ' : {'privatecode' : 'QBUZZ',
'operator_id' : 'QBUZZ',
'name' : 'Qbuzz',
'phone' : '0900-7289965',
'url' : 'http://www.qbuzz.nl',
'timezone' : 'Europe/Amsterdam',
'language' : 'nl'},
'UOV' : {'privatecode' : 'UOV',
'operator_id' : 'UOV',
'name' : 'U-OV',
'phone' : '0900-5252241',
'url' : 'http://www.u-ov.info',
'timezone' : 'Europe/Amsterdam',
'language' : 'nl'}}
def getMergeStrategies(conn):
cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
cur.execute("""
SELECT 'UNITCODE' as type,dataownercode||':'||organizationalunitcode as unitcode,min(validdate) as fromdate,max(validdate) as todate FROM operday
GROUP BY dataownercode,organizationalunitcode
""")
rows = cur.fetchall()
cur.close()
return rows
def setLineColors():
conn = psycopg2.connect(database_connect)
cur = conn.cursor()
cur.execute("""
--QLINK
update line set color_shield = '00be5c' where operator_id = 'QBUZZ:g502';
update line set color_text = '000000' where operator_id = 'QBUZZ:g502';
update line set color_shield = '185099' where operator_id = 'QBUZZ:g503';
update line set color_text = 'ffffff' where operator_id = 'QBUZZ:g503';
update line set color_shield = '6ed1f6' where operator_id = 'QBUZZ:g554';
update line set color_text = '000000' where operator_id = 'QBUZZ:g554';
update line set color_shield = '7e1c90' where operator_id = 'QBUZZ:g505';
update line set color_text = 'ffffff' where operator_id = 'QBUZZ:g505';
update line set color_shield = 'd81118' where operator_id = 'QBUZZ:g506';
update line set color_text = 'ffffff' where operator_id = 'QBUZZ:g506';
update line set color_shield = 'fdd205' where operator_id = 'QBUZZ:g507';
update line set color_text = '000000' where operator_id = 'QBUZZ:g507';
update line set color_shield = 'dd9345' where operator_id = 'QBUZZ:g508';
update line set color_text = '000000' where operator_id = 'QBUZZ:g508';
update line set color_shield = 'f468bb' where operator_id = 'QBUZZ:g509';
update line set color_text = '000000' where operator_id = 'QBUZZ:g509';
update line set color_shield = 'ec008c' where operator_id = 'QBUZZ:g501';
update line set color_text = '000000' where operator_id = 'QBUZZ:g501';
update line set color_shield = 'ed028d' where operator_id = 'QBUZZ:g512';
update line set color_text = '000000' where operator_id = 'QBUZZ:g512';
update line set color_shield = 'f35e18' where operator_id = 'QBUZZ:g565';
update line set color_text = '000000' where operator_id = 'QBUZZ:g565';
update line set color_shield = 'f35e18' where operator_id = 'QBUZZ:g515';
update line set color_text = '000000' where operator_id = 'QBUZZ:g515';
update line set color_shield = 'd81118' where operator_id = 'QBUZZ:g516';
update line set color_text = 'ffffff' where operator_id = 'QBUZZ:g516';
update line set color_shield = 'f68512' where operator_id = 'QBUZZ:g517';
update line set color_text = '000000' where operator_id = 'QBUZZ:g517';
""")
cur.close()
conn.commit()
conn.close()
def import_zip(path,filename,version):
meta,conn = load(path,filename,point_from_pool=True)
try:
cur = conn.cursor()
cur.execute("SELECT COUNT(*) FROM link WHERE transporttype = 'TRAM'")
if pool_generation_enabled and cur.fetchone()[0] > 0:
cur.execute("""
UPDATE pool_utram set linkvalidfrom = (SELECT DISTINCT validfrom FROM LINK where transporttype = 'TRAM');
update point set locationx_ew = '135335', locationy_ns = '451223' where locationx_ew = '135639' and locationy_ns = '451663';
update point set locationx_ew = '134669', locationy_ns = '450853' where locationx_ew = '134591' and locationy_ns = '450911';
update point set locationx_ew = '133029', locationy_ns = '447900' where locationx_ew = '132473' and locationy_ns = '448026';
update point set locationx_ew = '132907', locationy_ns = '447965' where locationx_ew = '132672' and locationy_ns = '448044';
update point set locationx_ew = '135335', locationy_ns = '451314' where locationx_ew = '135533' and locationy_ns = '451628';
update point set locationx_ew = '134356', locationy_ns = '448631' where locationx_ew = '134318' and locationy_ns = '448697';
update point set locationx_ew = '131710', locationy_ns = '448728' where locationx_ew = '131731' and locationy_ns = '448705';
insert into POINT (SELECT * from point_utram);
insert into POOL (SELECT * FROM pool_utram WHERE userstopcodebegin||':'||userstopcodeend in (SELECT userstopcodebegin||':'||userstopcodeend));""")
data = {}
data['OPERATOR'] = getOperator()
data['MERGESTRATEGY'] = []#getMergeStrategies(conn)
data['DATASOURCE'] = getDataSource()
data['VERSION'] = {}
data['VERSION']['1'] = {'privatecode' : 'QBUZZ:'+filename,
'datasourceref' : '1',
'operator_id' : 'QBUZZ:'+filename,
'startdate' : meta['startdate'],
'enddate' : meta['enddate'],
'description' : filename}
data['DESTINATIONDISPLAY'] = getDestinationDisplays(conn)
data['LINE'] = getLines(conn)
data['STOPPOINT'] = getStopPoints(conn)
data['STOPAREA'] = getStopAreas(conn)
data['AVAILABILITYCONDITION'] = getAvailabilityConditionsUsingOperday(conn)
data['PRODUCTCATEGORY'] = getBISONproductcategories()
data['ADMINISTRATIVEZONE'] = getAdministrativeZones(conn)
timedemandGroupRefForJourney,data['TIMEDEMANDGROUP'] = calculateTimeDemandGroups(conn)
routeRefForPattern,data['ROUTE'] = clusterPatternsIntoRoute(conn,getPool811)
data['JOURNEYPATTERN'] = getJourneyPatterns(routeRefForPattern,conn,data['ROUTE'])
data['JOURNEY'] = getJourneys(timedemandGroupRefForJourney,conn)
data['NOTICEASSIGNMENT'] = {}
data['NOTICE'] = {}
data['NOTICEGROUP'] = {}
insert(data)
conn.close()
setLineColors()
except:
raise
def download(url,filename):
u = urllib2.urlopen(url)
f = open('/tmp/'+filename, 'wb')
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
print "Downloading: %s Bytes: %s" % (filename, file_size)
file_size_dl = 0
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
status = status + chr(8)*(len(status)+1)
print status,
print
f.close()
import_zip('/tmp',filename,None)
url = 'http://kv1.openov.nl/QBUZZ/'
def sync():
f = urllib2.urlopen(url+'?order=d')
soup = BeautifulSoup(f.read())
for link in soup.find_all('a'):
link = link.get('href')
filename = urllib2.unquote(link)
if '.zip' in link.lower():
if not version_imported('QBUZZ:'+filename):
try:
logger.info('Importing :'+filename)
download(url+link,filename)
except Exception as e:
logger.error(filename,exc_info=True)
pass
|
{
"content_hash": "0f6aeb2da7da8387fee8ef71cb773a68",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 146,
"avg_line_length": 47.953216374269005,
"alnum_prop": 0.605,
"repo_name": "bliksemlabs/bliksemintegration",
"id": "465b22bd319331cbaad9588c2d124824cf654cd4",
"size": "8200",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "importers/qbuzz.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "PLSQL",
"bytes": "4719"
},
{
"name": "PLpgSQL",
"bytes": "15144"
},
{
"name": "Python",
"bytes": "494673"
},
{
"name": "Shell",
"bytes": "438"
}
],
"symlink_target": ""
}
|
from ConnectionServer import ConnectionServer
from Connection import Connection
|
{
"content_hash": "3250309376c9b656b00c1687bd59b513",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 45,
"avg_line_length": 40,
"alnum_prop": 0.9,
"repo_name": "kustomzone/Fuzium",
"id": "5bd29c6e2ff5403446f834916c6a6e1994e762f7",
"size": "80",
"binary": false,
"copies": "23",
"ref": "refs/heads/master",
"path": "core/src/Connection/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1204"
},
{
"name": "C",
"bytes": "34092"
},
{
"name": "CSS",
"bytes": "373182"
},
{
"name": "CoffeeScript",
"bytes": "88917"
},
{
"name": "HTML",
"bytes": "123191"
},
{
"name": "JavaScript",
"bytes": "2133526"
},
{
"name": "Python",
"bytes": "2843920"
},
{
"name": "Shell",
"bytes": "898"
}
],
"symlink_target": ""
}
|
people = list()
people.append("Kiwi")
people.append(["Nanu", "Nani"])
people.append("Mama")
print people
# Using "in", return boolean
print "Kiwi" in people # True
print "Kiwi Dakri" in people # False
|
{
"content_hash": "0b37ecd75c9d9c6843a26b1bf4fe532b",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 36,
"avg_line_length": 22.444444444444443,
"alnum_prop": 0.698019801980198,
"repo_name": "rahulbohra/Python-Basic",
"id": "1d57e71697bae7b4df0ec3529e975ac552ef6d7e",
"size": "202",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "61_list_methods.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21539"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.