code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
import sys
import glob
import serial
import json
from datetime import datetime
import random
import time
class OutOfRangeError(Exception):
def __init__(self, name="sensor reading", value=0):
self.name = name
self.value = value
def __repr__(self):
return (f"the reading of {self.name} is out of range, "
f"read value is : {self.value}")
def __str__(self):
return (f"the reading of {self.name} is out of range, "
f"read value is : {self.value}")
def get_random_data(): # may be in external file not a very good way for testing idea:spawn a thread from main
time.sleep(4)
data = dict(time=datetime.utcnow().strftime('%H:%M:%S %d/%m/%Y'),
temp=random.randint(-20, 50), hum=random.randint(0, 95), co2=random.randint(0, 20000))
return json.dumps(data).encode()
def serial_ports():
#patched from a verison found on stack overflow may be refacorest
""" Lists serial port names
:raises EnvironmentError:
On unsupported or unknown platforms
:returns:
A list of the serial ports available on the system
"""
if sys.platform.startswith('win'):
ports = ['COM%s' % (i + 1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# this excludes your current terminal "/dev/tty"
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
result = []
for port in ports:
try:
s = serial.Serial(port)
s.close()
result.append(port)
except (OSError, serial.SerialException):
pass
if len(result) == 0:
return "loop://"
else:
return result[0]
|
mone27/deware
|
deware/core/utilis.py
|
Python
|
gpl-3.0
| 1,882
|
from os import environ
MY_NUMBER = environ['MY_NUMBER']
MY_NAME = environ['MY_NAME']
|
hwayne/safehouse
|
contact/env.py
|
Python
|
apache-2.0
| 86
|
# Copyright 2016 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Increase time resolution for status reports
Revision ID: 014
Create Date: 2016-04-28
"""
from alembic import op
import sqlalchemy.dialects.mysql as sa_mysql
# revision identifiers, used by Alembic.
revision = '014'
down_revision = '013'
MYSQL_ENGINE = 'InnoDB'
MYSQL_CHARSET = 'utf8'
def _check_dbms(engine):
dialect = engine.dialect.dialect_description
version = engine.dialect.server_version_info
if dialect.startswith('mysql') and version >= (5, 6, 4):
return True
if 'MariaDB' in version and version >= (5, 3):
return True
return False
def upgrade():
engine = op.get_bind()
if _check_dbms(engine):
with op.batch_alter_table('status') as batch_op:
batch_op.alter_column(
'created', type_=sa_mysql.DATETIME(fsp=6), nullable=False)
batch_op.alter_column(
'updated', type_=sa_mysql.DATETIME(fsp=6), nullable=False)
def downgrade():
engine = op.get_bind()
if _check_dbms(engine):
with op.batch_alter_table('status') as batch_op:
batch_op.alter_column(
'created', type_=sa_mysql.DATETIME(), nullable=False)
batch_op.alter_column(
'updated', type_=sa_mysql.DATETIME(), nullable=False)
|
openstack/murano
|
murano/db/migration/alembic_migrations/versions/014_increase_status_time_resolution.py
|
Python
|
apache-2.0
| 1,863
|
#/******************************************************************************
#* Portions Copyright (C) 2007 Novell, Inc. All rights reserved.
#*
#* Redistribution and use in source and binary forms, with or without
#* modification, are permitted provided that the following conditions are met:
#*
#* - Redistributions of source code must retain the above copyright notice,
#* this list of conditions and the following disclaimer.
#*
#* - Redistributions in binary form must reproduce the above copyright notice,
#* this list of conditions and the following disclaimer in the documentation
#* and/or other materials provided with the distribution.
#*
#* - Neither the name of Novell, Inc. nor the names of its
#* contributors may be used to endorse or promote products derived from this
#* software without specific prior written permission.
#*
#* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
#* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
#* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
#* ARE DISCLAIMED. IN NO EVENT SHALL Novell, Inc. OR THE CONTRIBUTORS
#* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
#* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
#* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
#* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
#* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
#* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
#* POSSIBILITY OF SUCH DAMAGE.
#*
#* Author: Brad Nicholes (bnicholes novell.com)
#******************************************************************************/
import os
OBSOLETE_POPEN = False
try:
import subprocess
except ImportError:
import popen2
OBSOLETE_POPEN = True
import threading
import time
_WorkerThread = None #Worker thread object
_glock = threading.Lock() #Synchronization lock
_refresh_rate = 30 #Refresh rate of the netstat data
#Global dictionary storing the counts of the last connection state
# read from the netstat output
_conns = {'tcp_established': 0,
'tcp_listen': 0,
'tcp_timewait':0,
'tcp_closewait':0,
'tcp_synsent':0,
'tcp_synrecv':0,
'tcp_synwait':0,
'tcp_finwait1':0,
'tcp_finwait2':0,
'tcp_closed':0,
'tcp_lastack':0,
'tcp_closing':0,
'tcp_unknown':0}
def TCP_Connections(name):
'''Return the requested connection type status.'''
global _WorkerThread
if _WorkerThread is None:
print 'Error: No netstat data gathering thread created for metric %s' % name
return 0
if not _WorkerThread.running and not _WorkerThread.shuttingdown:
try:
_WorkerThread.start()
except (AssertionError, RuntimeError):
pass
#Read the last connection total for the state requested. The metric
# name passed in matches the dictionary slot for the state value.
_glock.acquire()
ret = int(_conns[name])
_glock.release()
return ret
#Metric descriptions
_descriptors = [{'name': 'tcp_established',
'call_back': TCP_Connections,
'time_max': 20,
'value_type': 'uint',
'units': 'Sockets',
'slope': 'both',
'format': '%u',
'description': 'Total number of established TCP connections',
'groups': 'network',
},
{'name': 'tcp_listen',
'call_back': TCP_Connections,
'time_max': 20,
'value_type': 'uint',
'units': 'Sockets',
'slope': 'both',
'format': '%u',
'description': 'Total number of listening TCP connections',
'groups': 'network',
},
{'name': 'tcp_timewait',
'call_back': TCP_Connections,
'time_max': 20,
'value_type': 'uint',
'units': 'Sockets',
'slope': 'both',
'format': '%u',
'description': 'Total number of time_wait TCP connections',
'groups': 'network',
},
{'name': 'tcp_closewait',
'call_back': TCP_Connections,
'time_max': 20,
'value_type': 'uint',
'units': 'Sockets',
'slope': 'both',
'format': '%u',
'description': 'Total number of close_wait TCP connections',
'groups': 'network',
},
{'name': 'tcp_synsent',
'call_back': TCP_Connections,
'time_max': 20,
'value_type': 'uint',
'units': 'Sockets',
'slope': 'both',
'format': '%u',
'description': 'Total number of syn_sent TCP connections',
'groups': 'network',
},
{'name': 'tcp_synrecv',
'call_back': TCP_Connections,
'time_max': 20,
'value_type': 'uint',
'units': 'Sockets',
'slope': 'both',
'format': '%u',
'description': 'Total number of syn_recv TCP connections',
'groups': 'network',
},
{'name': 'tcp_synwait',
'call_back': TCP_Connections,
'time_max': 20,
'value_type': 'uint',
'units': 'Sockets',
'slope': 'both',
'format': '%u',
'description': 'Total number of syn_wait TCP connections',
'groups': 'network',
},
{'name': 'tcp_finwait1',
'call_back': TCP_Connections,
'time_max': 20,
'value_type': 'uint',
'units': 'Sockets',
'slope': 'both',
'format': '%u',
'description': 'Total number of fin_wait1 TCP connections',
'groups': 'network',
},
{'name': 'tcp_finwait2',
'call_back': TCP_Connections,
'time_max': 20,
'value_type': 'uint',
'units': 'Sockets',
'slope': 'both',
'format': '%u',
'description': 'Total number of fin_wait2 TCP connections',
'groups': 'network',
},
{'name': 'tcp_closed',
'call_back': TCP_Connections,
'time_max': 20,
'value_type': 'uint',
'units': 'Sockets',
'slope': 'both',
'format': '%u',
'description': 'Total number of closed TCP connections',
'groups': 'network',
},
{'name': 'tcp_lastack',
'call_back': TCP_Connections,
'time_max': 20,
'value_type': 'uint',
'units': 'Sockets',
'slope': 'both',
'format': '%u',
'description': 'Total number of last_ack TCP connections',
'groups': 'network',
},
{'name': 'tcp_closing',
'call_back': TCP_Connections,
'time_max': 20,
'value_type': 'uint',
'units': 'Sockets',
'slope': 'both',
'format': '%u',
'description': 'Total number of closing TCP connections',
'groups': 'network',
},
{'name': 'tcp_unknown',
'call_back': TCP_Connections,
'time_max': 20,
'value_type': 'uint',
'units': 'Sockets',
'slope': 'both',
'format': '%u',
'description': 'Total number of unknown TCP connections',
'groups': 'network',
}]
class NetstatThread(threading.Thread):
'''This thread continually gathers the current states of the tcp socket
connections on the machine. The refresh rate is controlled by the
RefreshRate parameter that is passed in through the gmond.conf file.'''
def __init__(self):
threading.Thread.__init__(self)
self.running = False
self.shuttingdown = False
self.popenChild = None
def shutdown(self):
self.shuttingdown = True
if self.popenChild != None:
try:
self.popenChild.wait()
except OSError, e:
if e.errno == 10: # No child processes
pass
if not self.running:
return
self.join()
def run(self):
global _conns, _refresh_rate
#Array positions for the connection type and state data
# acquired from the netstat output.
tcp_at = 0
tcp_state_at = 5
#Make a temporary copy of the tcp connecton dictionary.
tempconns = _conns.copy()
#Set the state of the running thread
self.running = True
#Continue running until a shutdown event is indicated
while not self.shuttingdown:
if self.shuttingdown:
break
#Zero out the temporary connection state dictionary.
for conn in tempconns:
tempconns[conn] = 0
#Call the netstat utility and split the output into separate lines
if not OBSOLETE_POPEN:
self.popenChild = subprocess.Popen(["netstat", '-t', '-a', '-n'], stdout=subprocess.PIPE)
lines = self.popenChild.communicate()[0].split('\n')
else:
self.popenChild = popen2.Popen3("netstat -t -a -n")
lines = self.popenChild.fromchild.readlines()
try:
self.popenChild.wait()
except OSError, e:
if e.errno == 10: # No child process
continue
#Iterate through the netstat output looking for the 'tcp' keyword in the tcp_at
# position and the state information in the tcp_state_at position. Count each
# occurance of each state.
for tcp in lines:
# skip empty lines
if tcp == '':
continue
line = tcp.split()
if line[tcp_at] == 'tcp':
if line[tcp_state_at] == 'ESTABLISHED':
tempconns['tcp_established'] += 1
elif line[tcp_state_at] == 'LISTEN':
tempconns['tcp_listen'] += 1
elif line[tcp_state_at] == 'TIME_WAIT':
tempconns['tcp_timewait'] += 1
elif line[tcp_state_at] == 'CLOSE_WAIT':
tempconns['tcp_closewait'] += 1
elif line[tcp_state_at] == 'SYN_SENT':
tempconns['tcp_synsent'] += 1
elif line[tcp_state_at] == 'SYN_RECV':
tempconns['tcp_synrecv'] += 1
elif line[tcp_state_at] == 'SYN_WAIT':
tempconns['tcp_synwait'] += 1
elif line[tcp_state_at] == 'FIN_WAIT1':
tempconns['tcp_finwait1'] += 1
elif line[tcp_state_at] == 'FIN_WAIT2':
tempconns['tcp_finwait2'] += 1
elif line[tcp_state_at] == 'CLOSED':
tempconns['tcp_closed'] += 1
elif line[tcp_state_at] == 'LAST_ACK':
tempconns['tcp_lastack'] += 1
elif line[tcp_state_at] == 'CLOSING':
tempconns['tcp_closing'] += 1
elif line[tcp_state_at] == 'UNKNOWN':
tempconns['tcp_unknown'] += 1
#Acquire a lock and copy the temporary connection state dictionary
# to the global state dictionary.
_glock.acquire()
for conn in _conns:
_conns[conn] = tempconns[conn]
_glock.release()
#Wait for the refresh_rate period before collecting the netstat data again.
if not self.shuttingdown:
time.sleep(_refresh_rate)
#Set the current state of the thread after a shutdown has been indicated.
self.running = False
def metric_init(params):
'''Initialize the tcp connection status module and create the
metric definition dictionary object for each metric.'''
global _refresh_rate, _WorkerThread
#Read the refresh_rate from the gmond.conf parameters.
if 'RefreshRate' in params:
_refresh_rate = int(params['RefreshRate'])
#Start the worker thread
_WorkerThread = NetstatThread()
#Return the metric descriptions to Gmond
return _descriptors
def metric_cleanup():
'''Clean up the metric module.'''
#Tell the worker thread to shutdown
_WorkerThread.shutdown()
#This code is for debugging and unit testing
if __name__ == '__main__':
params = {'Refresh': '20'}
metric_init(params)
while True:
try:
for d in _descriptors:
v = d['call_back'](d['name'])
print 'value for %s is %u' % (d['name'], v)
time.sleep(5)
except KeyboardInterrupt:
os._exit(1)
|
ClodoCorp/debian-ganglia
|
gmond/python_modules/network/tcpconn.py
|
Python
|
bsd-3-clause
| 12,782
|
from cerberus import Validator
def keys_and_values_are_strings(d):
"""
Checks to see if the keys and values of this dict are strings.
:param d: The Dictionary to check
:return: Boolean, True if all the keys and values are strings.
"""
schema = {
'a_dict': {
'keyschema': {'type': 'string'},
'valueschema': {'type': 'string'}}
}
v = Validator(schema)
return v.validate({'a_dict': d})
|
chrisbrake/PythonSandbox
|
sanitiser/sanitiser.py
|
Python
|
bsd-3-clause
| 453
|
"""Authentication testing resources."""
__copyright__ = "Copyright (C) 2014 Ivan D Vasin"
__docformat__ = "restructuredtext"
import bedframe as _bedframe
import bedframe.auth as _bedframe_auth
import bedframe.webtypes as _webtypes
class Welcome(_bedframe.WebResource):
@_bedframe.webmethod(auth_provisions=_webtypes.nonweb)
def __init__(self, auth_provisions=_bedframe_auth.SECPROV_CLIENT_AUTH):
super(Welcome, self).__init__()
self._auth_provisions = auth_provisions
@property
def auth_provisions(self):
return self._auth_provisions
@_bedframe.webmethod(_webtypes.unicode)
def get(self):
self.ensure_auth()
return 'Welcome, {}'.format(self.current_auth_info.user)
|
nisavid/testbed
|
testbed/resources/_auth.py
|
Python
|
lgpl-3.0
| 736
|
#Exercise 21: Functions can Return Something
def add (a, b):
print "ADDING %d + %d" % (a, b)
return a + b
def subtract(a, b):
print "SUBTRACTING %d - %d" % (a, b)
return a - b
def multiply(a, b):
print "MULTIPLYING %d * %d" % (a, b)
return a * b
def divide(a, b):
print "DIVIDING %d / %d" % (a, b)
return a / b
print "Let's do some math with just functions!"
age = add(30, 5)
height = subtract(78, 4)
weight = multiply(90, 2)
iq = divide(100, 2)
print "Age: %d, Height: %d, Weight: %d, IQ: %d" % (age, height, weight, iq)
# A puzzle for the extra credit, type it in anyway.
print "Here is a puzzle."
what = add(age, subtract(height, add(multiply(weight, divide(iq, 2)),100)))
print "That becomes: ", what, "Can you do it by hand?"
print "Here's my version of the simple math problem 5 * 4 + 9 / 3 * 2"
print" or otherwise shown as (5 * 4) + ((9 / 3) * 2)"
print "equals: %d" % add(multiply(5, 4), multiply(divide(9, 3), 2) )
|
jessehagberg/python-playground
|
ex21.py
|
Python
|
cc0-1.0
| 977
|
from django.contrib.sitemaps import Sitemap
from merchandise.music.models import Album, Single, Track
from people.constants import STATUS
from people.models import Group, Idol
class BaseSitemap(Sitemap):
changefreq = 'daily'
def lastmod(self, obj):
return obj.modified
class AlbumSitemap(BaseSitemap):
priority = 0.6
def items(self):
return Album.objects.all()
class GroupSitemap(BaseSitemap):
def items(self):
return Group.objects.all()
def priority(self, obj):
if obj.status == STATUS.active:
return 0.75
return 0.5
class IdolSitemap(BaseSitemap):
def items(self):
return Idol.objects.all()
def priority(self, obj):
if obj.status == STATUS.active:
return 0.75
return 0.5
class SingleSitemap(BaseSitemap):
priority = 0.6
def items(self):
return Single.objects.all()
class TrackSitemap(BaseSitemap):
priority = 0.5
def items(self):
return Track.objects.originals()
|
hello-base/web
|
apps/sitemaps.py
|
Python
|
apache-2.0
| 1,036
|
import decimal
from decimal import Decimal
from unittest import TestCase
from StringIO import StringIO
import simplejson as json
class TestDecimal(TestCase):
NUMS = "1.0", "10.00", "1.1", "1234567890.1234567890", "500"
def dumps(self, obj, **kw):
sio = StringIO()
json.dump(obj, sio, **kw)
res = json.dumps(obj, **kw)
self.assertEquals(res, sio.getvalue())
return res
def loads(self, s, **kw):
sio = StringIO(s)
res = json.loads(s, **kw)
self.assertEquals(res, json.load(sio, **kw))
return res
def test_decimal_encode(self):
for d in map(Decimal, self.NUMS):
self.assertEquals(self.dumps(d, use_decimal=True), str(d))
def test_decimal_decode(self):
for s in self.NUMS:
self.assertEquals(self.loads(s, parse_float=Decimal), Decimal(s))
def test_decimal_roundtrip(self):
for d in map(Decimal, self.NUMS):
# The type might not be the same (int and Decimal) but they
# should still compare equal.
self.assertEquals(
self.loads(
self.dumps(d, use_decimal=True), parse_float=Decimal),
d)
self.assertEquals(
self.loads(
self.dumps([d], use_decimal=True), parse_float=Decimal),
[d])
def test_decimal_defaults(self):
d = Decimal('1.1')
# use_decimal=True is the default
self.assertRaises(TypeError, json.dumps, d, use_decimal=False)
self.assertEqual('1.1', json.dumps(d))
self.assertEqual('1.1', json.dumps(d, use_decimal=True))
self.assertRaises(TypeError, json.dump, d, StringIO(),
use_decimal=False)
sio = StringIO()
json.dump(d, sio)
self.assertEqual('1.1', sio.getvalue())
sio = StringIO()
json.dump(d, sio, use_decimal=True)
self.assertEqual('1.1', sio.getvalue())
def test_decimal_reload(self):
# Simulate a subinterpreter that reloads the Python modules but not
# the C code https://github.com/simplejson/simplejson/issues/34
global Decimal
Decimal = reload(decimal).Decimal
import simplejson.encoder
simplejson.encoder.Decimal = Decimal
self.test_decimal_roundtrip()
|
sauloal/cufflinksviewer
|
venvwin/Lib/site-packages/simplejson-2.6.2-py2.7.egg/simplejson/tests/test_decimal.py
|
Python
|
mit
| 2,357
|
__author__ = 'Conscience'
from django.conf.urls import url
from django.contrib import admin
from . import views
urlpatterns = [
url(r'^$', views.post_list, name='post_list'),
url(r'^post/(?P<pk>[0-9]+)/$', views.post_detail, name='post_detail'),
url(r'^post/$', views.post_list, name='post_list'),
url(r'^post/new/$', views.post_new, name='post_new'),
url(r'^post/(?P<pk>[0-9]+)/edit/$', views.post_edit, name='post_edit'),
]
|
Rivares/MyBlog
|
blog/urls.py
|
Python
|
apache-2.0
| 446
|
# utf-8
import re
import requests
import configparser
from pathlib import Path
from twython import Twython, TwythonError
from wand.image import Image
class TweetProvider:
is_text_empty = True
photo = None
text = None
special = False
def __init__(self, twitter, tweet):
assert isinstance(twitter, Twython)
self.twitter = twitter
self.tweet = tweet
if self.tweet:
self.user = self.tweet['user']
self.screen_name = self.user['screen_name']
self.user_id_str = self.user['id_str']
self.tweet_id_str = tweet['id_str']
if self.twitter and self.tweet:
self.set_text()
self.set_mock_image()
def set_text(self):
text = self.tweet['text']
mentions = re.match('(^(@\w+\s)*)', text).group(0)
text = re.sub(r'<', '<', re.sub(r'>', '>', text))
text = re.sub(r'@', 'ҩ', re.sub(r'^(@\w+\s)*', '', re.sub(r'https://t.co/\S*', '', text)))
text = re.sub(r'(\w)\W*?(\w?)', lambda m: m.group(1).lower() + m.group(2).upper(), text)
self.text = '@' + self.screen_name + ' ' + mentions + text
text = re.sub(r'\W', '', text)
if len(text) > 0:
self.is_text_empty = False
def set_mock_image(self):
imageprovider = ImageProvider(tweet=self.tweet)
self.photo = imageprovider.return_photo()
self.special = imageprovider.special
def fire_tweet(self):
uploaded_photo = self.twitter.upload_media(media=self.photo)
print('INFO: Dropping tweet with text...')
print(self.text)
try:
self.twitter.update_status(status=self.text,
in_reply_to_status_id=self.tweet_id_str,
media_ids=[uploaded_photo['media_id']])
except TwythonError as twython_exception:
print(twython_exception)
class TimelineProvider:
user_tl = None
def __init__(self, twitter, screen_name, config):
assert isinstance(twitter, Twython)
assert isinstance(screen_name, str)
assert isinstance(config, configparser.RawConfigParser)
self.twitter = twitter
self.screen_name = screen_name
self.config = config
self.get_user_tl()
def get_user_tl(self):
last_tweet = ''
try:
last_tweet = self.config.get('lasttweets', self.screen_name)
except configparser.Error:
print('No last tweet saved for ' + self.screen_name)
try:
if last_tweet:
self.user_tl = self.twitter.get_user_timeline(screen_name=self.screen_name, count=10,
include_rts=False, since_id=last_tweet)
else:
self.user_tl = self.twitter.get_user_timeline(screen_name=self.screen_name, count=10,
include_rts=False)
except TwythonError as twython_exception:
error_str = str(twython_exception.error_code)
print('ERROR ' + error_str + ' for ' + self.screen_name)
if twython_exception.error_code == 404:
print('Screen name does not exist (anymore)')
user_id = None
try:
user_id = self.config.get('victims', self.screen_name)
except configparser.Error:
print('No user id saved')
if user_id:
print('Trying with user id: ' + user_id)
try:
if last_tweet:
self.user_tl = self.twitter.get_user_timeline(id_str=user_id, count=10,
include_rts=False, since_id=last_tweet)
else:
self.user_tl = self.twitter.get_user_timeline(id_str=user_id, count=10,
include_rts=False)
except TwythonError as twython_exception:
print(twython_exception)
def return_user_tl(self):
return self.user_tl
class ImageProvider:
photo = None
special = False
def __init__(self, tweet):
self.tweet = tweet
if self.tweet:
user = self.tweet['user']
self.screen_name = user['screen_name']
self.set_photo()
def set_photo(self):
try:
media_url = self.tweet['entities']['media'][0]['media_url']
if re.search(r'\.jpg$', media_url):
mock = Image(filename='mock.png')
response = requests.get(media_url)
img = Image(blob=bytes(response.content))
img.format = 'jpeg'
img.composite(image=mock, left=0, top=0)
img.save(filename='temp.jpg')
self.photo = open('temp.jpg', 'rb')
self.special = True
except KeyError:
self.photo = None
if not self.special:
special_photo = Path(self.screen_name + '.jpg')
if special_photo.is_file():
self.photo = open(self.screen_name + '.jpg', 'rb')
print('INFO: ' + self.screen_name + '.jpg exists')
else:
self.photo = open('mock.jpg', 'rb')
def return_photo(self):
return self.photo
|
kuboktaeder/mockbot
|
localclasses.py
|
Python
|
agpl-3.0
| 5,508
|
"""
The Plaid API
The Plaid REST API. Please see https://plaid.com/docs/api for more details. # noqa: E501
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from plaid.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class TransferUserAddressInRequest(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'street': (str,), # noqa: E501
'city': (str,), # noqa: E501
'region': (str,), # noqa: E501
'postal_code': (str,), # noqa: E501
'country': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'street': 'street', # noqa: E501
'city': 'city', # noqa: E501
'region': 'region', # noqa: E501
'postal_code': 'postal_code', # noqa: E501
'country': 'country', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""TransferUserAddressInRequest - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
street (str): The street number and name (i.e., \"100 Market St.\").. [optional] # noqa: E501
city (str): Ex. \"San Francisco\". [optional] # noqa: E501
region (str): The state or province (e.g., \"CA\").. [optional] # noqa: E501
postal_code (str): The postal code (e.g., \"94103\").. [optional] # noqa: E501
country (str): A two-letter country code (e.g., \"US\").. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
|
plaid/plaid-python
|
plaid/model/transfer_user_address_in_request.py
|
Python
|
mit
| 7,478
|
"""
kombu.pidbox
===============
Generic process mailbox.
"""
from __future__ import absolute_import, unicode_literals
import socket
import warnings
from collections import defaultdict, deque
from contextlib import contextmanager
from copy import copy
from itertools import count
from threading import local
from time import time
from . import Exchange, Queue, Consumer, Producer
from .clocks import LamportClock
from .common import maybe_declare, oid_from
from .exceptions import InconsistencyError
from .five import range
from .log import get_logger
from .utils import cached_property, uuid, reprcall
from .utils.functional import maybe_evaluate
REPLY_QUEUE_EXPIRES = 10
W_PIDBOX_IN_USE = """\
A node named {node.hostname} is already using this process mailbox!
Maybe you forgot to shutdown the other node or did not do so properly?
Or if you meant to start multiple nodes on the same host please make sure
you give each node a unique node name!
"""
__all__ = ['Node', 'Mailbox']
logger = get_logger(__name__)
debug, error = logger.debug, logger.error
class Node(object):
#: hostname of the node.
hostname = None
#: the :class:`Mailbox` this is a node for.
mailbox = None
#: map of method name/handlers.
handlers = None
#: current context (passed on to handlers)
state = None
#: current channel.
channel = None
def __init__(self, hostname, state=None, channel=None,
handlers=None, mailbox=None):
self.channel = channel
self.mailbox = mailbox
self.hostname = hostname
self.state = state
self.adjust_clock = self.mailbox.clock.adjust
if handlers is None:
handlers = {}
self.handlers = handlers
def Consumer(self, channel=None, no_ack=True, accept=None, **options):
queue = self.mailbox.get_queue(self.hostname)
def verify_exclusive(name, messages, consumers):
if consumers:
warnings.warn(W_PIDBOX_IN_USE.format(node=self))
queue.on_declared = verify_exclusive
return Consumer(
channel or self.channel, [queue], no_ack=no_ack,
accept=self.mailbox.accept if accept is None else accept,
**options
)
def handler(self, fun):
self.handlers[fun.__name__] = fun
return fun
def on_decode_error(self, message, exc):
error('Cannot decode message: %r', exc, exc_info=1)
def listen(self, channel=None, callback=None):
consumer = self.Consumer(channel=channel,
callbacks=[callback or self.handle_message],
on_decode_error=self.on_decode_error)
consumer.consume()
return consumer
def dispatch(self, method, arguments=None,
reply_to=None, ticket=None, **kwargs):
arguments = arguments or {}
debug('pidbox received method %s [reply_to:%s ticket:%s]',
reprcall(method, (), kwargs=arguments), reply_to, ticket)
handle = reply_to and self.handle_call or self.handle_cast
try:
reply = handle(method, arguments)
except SystemExit:
raise
except Exception as exc:
error('pidbox command error: %r', exc, exc_info=1)
reply = {'error': repr(exc)}
if reply_to:
self.reply({self.hostname: reply},
exchange=reply_to['exchange'],
routing_key=reply_to['routing_key'],
ticket=ticket)
return reply
def handle(self, method, arguments={}):
return self.handlers[method](self.state, **arguments)
def handle_call(self, method, arguments):
return self.handle(method, arguments)
def handle_cast(self, method, arguments):
return self.handle(method, arguments)
def handle_message(self, body, message=None):
destination = body.get('destination')
if message:
self.adjust_clock(message.headers.get('clock') or 0)
if not destination or self.hostname in destination:
return self.dispatch(**body)
dispatch_from_message = handle_message
def reply(self, data, exchange, routing_key, ticket, **kwargs):
self.mailbox._publish_reply(data, exchange, routing_key, ticket,
channel=self.channel,
serializer=self.mailbox.serializer)
class Mailbox(object):
node_cls = Node
exchange_fmt = '%s.pidbox'
reply_exchange_fmt = 'reply.%s.pidbox'
#: Name of application.
namespace = None
#: Connection (if bound).
connection = None
#: Exchange type (usually direct, or fanout for broadcast).
type = 'direct'
#: mailbox exchange (init by constructor).
exchange = None
#: exchange to send replies to.
reply_exchange = None
#: Only accepts json messages by default.
accept = ['json']
#: Message serializer
serializer = None
def __init__(self, namespace,
type='direct', connection=None, clock=None,
accept=None, serializer=None, producer_pool=None):
self.namespace = namespace
self.connection = connection
self.type = type
self.clock = LamportClock() if clock is None else clock
self.exchange = self._get_exchange(self.namespace, self.type)
self.reply_exchange = self._get_reply_exchange(self.namespace)
self._tls = local()
self.unclaimed = defaultdict(deque)
self.accept = self.accept if accept is None else accept
self.serializer = self.serializer if serializer is None else serializer
self._producer_pool = producer_pool
def __call__(self, connection):
bound = copy(self)
bound.connection = connection
return bound
def Node(self, hostname=None, state=None, channel=None, handlers=None):
hostname = hostname or socket.gethostname()
return self.node_cls(hostname, state, channel, handlers, mailbox=self)
def call(self, destination, command, kwargs={},
timeout=None, callback=None, channel=None):
return self._broadcast(command, kwargs, destination,
reply=True, timeout=timeout,
callback=callback,
channel=channel)
def cast(self, destination, command, kwargs={}):
return self._broadcast(command, kwargs, destination, reply=False)
def abcast(self, command, kwargs={}):
return self._broadcast(command, kwargs, reply=False)
def multi_call(self, command, kwargs={}, timeout=1,
limit=None, callback=None, channel=None):
return self._broadcast(command, kwargs, reply=True,
timeout=timeout, limit=limit,
callback=callback,
channel=channel)
def get_reply_queue(self):
oid = self.oid
return Queue(
'%s.%s' % (oid, self.reply_exchange.name),
exchange=self.reply_exchange,
routing_key=oid,
durable=False,
auto_delete=True,
queue_arguments={
'x-expires': int(REPLY_QUEUE_EXPIRES * 1000),
},
)
@cached_property
def reply_queue(self):
return self.get_reply_queue()
def get_queue(self, hostname):
return Queue('%s.%s.pidbox' % (hostname, self.namespace),
exchange=self.exchange,
durable=False,
auto_delete=True)
@contextmanager
def producer_or_acquire(self, producer=None, channel=None):
if producer:
yield producer
elif self.producer_pool:
with self.producer_pool.acquire() as producer:
yield producer
else:
yield Producer(channel, auto_declare=False)
def _publish_reply(self, reply, exchange, routing_key, ticket,
channel=None, producer=None, **opts):
chan = channel or self.connection.default_channel
exchange = Exchange(exchange, exchange_type='direct',
delivery_mode='transient',
durable=False)
with self.producer_or_acquire(producer, chan) as producer:
try:
producer.publish(
reply, exchange=exchange, routing_key=routing_key,
declare=[exchange], headers={
'ticket': ticket, 'clock': self.clock.forward(),
},
**opts
)
except InconsistencyError:
# queue probably deleted and no one is expecting a reply.
pass
def _publish(self, type, arguments, destination=None,
reply_ticket=None, channel=None, timeout=None,
serializer=None, producer=None):
message = {'method': type,
'arguments': arguments,
'destination': destination}
chan = channel or self.connection.default_channel
exchange = self.exchange
if reply_ticket:
maybe_declare(self.reply_queue(channel))
message.update(ticket=reply_ticket,
reply_to={'exchange': self.reply_exchange.name,
'routing_key': self.oid})
serializer = serializer or self.serializer
with self.producer_or_acquire(producer, chan) as producer:
producer.publish(
message, exchange=exchange.name, declare=[exchange],
headers={'clock': self.clock.forward(),
'expires': time() + timeout if timeout else 0},
serializer=serializer,
)
def _broadcast(self, command, arguments=None, destination=None,
reply=False, timeout=1, limit=None,
callback=None, channel=None, serializer=None):
if destination is not None and \
not isinstance(destination, (list, tuple)):
raise ValueError(
'destination must be a list/tuple not {0}'.format(
type(destination)))
arguments = arguments or {}
reply_ticket = reply and uuid() or None
chan = channel or self.connection.default_channel
# Set reply limit to number of destinations (if specified)
if limit is None and destination:
limit = destination and len(destination) or None
serializer = serializer or self.serializer
self._publish(command, arguments, destination=destination,
reply_ticket=reply_ticket,
channel=chan,
timeout=timeout,
serializer=serializer)
if reply_ticket:
return self._collect(reply_ticket, limit=limit,
timeout=timeout,
callback=callback,
channel=chan)
def _collect(self, ticket,
limit=None, timeout=1, callback=None,
channel=None, accept=None):
if accept is None:
accept = self.accept
chan = channel or self.connection.default_channel
queue = self.reply_queue
consumer = Consumer(channel, [queue], accept=accept, no_ack=True)
responses = []
unclaimed = self.unclaimed
adjust_clock = self.clock.adjust
try:
return unclaimed.pop(ticket)
except KeyError:
pass
def on_message(body, message):
# ticket header added in kombu 2.5
header = message.headers.get
adjust_clock(header('clock') or 0)
expires = header('expires')
if expires and time() > expires:
return
this_id = header('ticket', ticket)
if this_id == ticket:
if callback:
callback(body)
responses.append(body)
else:
unclaimed[this_id].append(body)
consumer.register_callback(on_message)
try:
with consumer:
for i in limit and range(limit) or count():
try:
self.connection.drain_events(timeout=timeout)
except socket.timeout:
break
return responses
finally:
chan.after_reply_message_received(queue.name)
def _get_exchange(self, namespace, type):
return Exchange(self.exchange_fmt % namespace,
type=type,
durable=False,
delivery_mode='transient')
def _get_reply_exchange(self, namespace):
return Exchange(self.reply_exchange_fmt % namespace,
type='direct',
durable=False,
delivery_mode='transient')
@cached_property
def oid(self):
try:
return self._tls.OID
except AttributeError:
oid = self._tls.OID = oid_from(self)
return oid
@cached_property
def producer_pool(self):
return maybe_evaluate(self._producer_pool)
|
Elastica/kombu
|
kombu/pidbox.py
|
Python
|
bsd-3-clause
| 13,438
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import with_statement
import os
import sys
import time
import twisted
from buildbot.scripts import start
from buildbot.test.util import compat
from buildbot.test.util import dirs
from buildbot.test.util import misc
from buildbot.test.util.flaky import flaky
from twisted.internet.utils import getProcessOutputAndValue
from twisted.python import versions
from twisted.trial import unittest
def mkconfig(**kwargs):
config = {
'quiet': False,
'basedir': os.path.abspath('basedir'),
'nodaemon': False,
}
config.update(kwargs)
return config
fake_master_tac = """\
from twisted.application import service
from twisted.internet import reactor
from twisted.python import log
application = service.Application('highscore')
class App(service.Service):
def startService(self):
service.Service.startService(self)
log.msg("BuildMaster is running") # heh heh heh
reactor.callLater(0, reactor.stop)
app = App()
app.setServiceParent(application)
# isBuildmasterDir wants to see this -> Application('buildmaster')
"""
class TestStart(misc.StdoutAssertionsMixin, dirs.DirsMixin, unittest.TestCase):
def setUp(self):
self.setUpDirs('basedir')
with open(os.path.join('basedir', 'buildbot.tac'), 'wt') as f:
f.write(fake_master_tac)
self.setUpStdoutAssertions()
def tearDown(self):
self.tearDownDirs()
# tests
def test_start_not_basedir(self):
self.assertEqual(start.start(mkconfig(basedir='doesntexist')), 1)
self.assertInStdout('invalid buildmaster directory')
def runStart(self, **config):
args = [
'-c',
'from buildbot.scripts.start import start; start(%r)' % (mkconfig(**config),),
]
env = os.environ.copy()
env['PYTHONPATH'] = os.pathsep.join(sys.path)
return getProcessOutputAndValue(sys.executable, args=args, env=env)
def test_start_no_daemon(self):
d = self.runStart(nodaemon=True)
@d.addCallback
def cb(res):
self.assertEquals(res, ('', '', 0))
print res
return d
def test_start_quiet(self):
d = self.runStart(quiet=True)
@d.addCallback
def cb(res):
self.assertEquals(res, ('', '', 0))
print res
return d
@flaky(bugNumber=2760)
@compat.skipUnlessPlatformIs('posix')
def test_start(self):
d = self.runStart()
@d.addCallback
def cb(xxx_todo_changeme):
(out, err, rc) = xxx_todo_changeme
self.assertEqual((rc, err), (0, ''))
self.assertSubstring('BuildMaster is running', out)
@d.addBoth
def flush(x):
# wait for the pidfile to go away after the reactor.stop
# in buildbot.tac takes effect
pidfile = os.path.join('basedir', 'twistd.pid')
while os.path.exists(pidfile):
time.sleep(0.01)
return x
return d
if twisted.version <= versions.Version('twisted', 9, 0, 0):
test_start.skip = test_start_quiet.skip = "Skipping due to suprious PotentialZombieWarning."
# the remainder of this script does obscene things:
# - forks
# - shells out to tail
# - starts and stops the reactor
# so testing it will be *far* more pain than is worthwhile
|
zozo123/buildbot
|
master/buildbot/test/unit/test_scripts_start.py
|
Python
|
gpl-3.0
| 4,104
|
"""Setup file for the s3bp package."""
# This file is part of s3bp.
# https://github.com/shaypal5/s3bp
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT-license
# Copyright (c) 2016, Shay Palachy <shaypal5@gmail.com>
from setuptools import setup, find_packages
import versioneer
setup(
name='s3bp',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description='Read and write Python objects from/to S3.',
license='MIT',
author='Shay Palachy',
author_email='shaypal5@gmail.com',
url='https://github.com/shaypal5/s3bp',
# download_url='https://github.com/shaypal5/s3po/tarball/0.1.1',
packages=find_packages(),
install_requires=[
'botocore',
'boto3',
'python-dateutil',
'pyyaml',
'pandas',
'feather-format'
],
keywords=['pandas', 'dataframe', 's3'],
classifiers=[],
)
|
shaypal5/s3bp
|
setup.py
|
Python
|
mit
| 920
|
import time
import hashlib
import isodate
import logging
from util import *
from twisted.internet import task
from twisted.internet.defer import inlineCallbacks, returnValue
from autobahn.websocket import WebSocketServerProtocol, WebSocketServerFactory
from autobahn.resource import WebSocketResource
SEND_REGISTRATIONS = 10
YOUTUBE_STATS_URL = 'https://www.googleapis.com/youtube/v3/videos?part=contentDetails,statistics&id={id}&key={api_key}'
SOUNDCLOUD_STATS_URL = 'http://api.soundcloud.com/tracks/{id}?client_id={api_key}'
class BillyRadioProtocol(WebSocketServerProtocol):
def onConnect(self, request):
self.factory.join(request.peer, self)
def onMessage(self, payload, isBinary):
if not isBinary:
json_payload = json.loads(payload)
type = json_payload.get('type', None)
if type == 'register' and 'name' in json_payload and 'radio_id' in json_payload:
reactor.callLater(0, self.factory.register, self.peer, json_payload['name'], json_payload['radio_id'])
elif type == 'unregister' and 'radio_id' in json_payload:
self.factory.unregister(self.peer, json_payload['radio_id'])
def connectionLost(self, reason):
WebSocketServerProtocol.connectionLost(self, reason)
self.factory.leave(self.peer)
class BillyRadioFactory(WebSocketServerFactory):
def __init__(self, *args, **kwargs):
self.database = kwargs.pop('database')
self.config = kwargs.pop('config')
WebSocketServerFactory.__init__(self, *args, **kwargs)
self.logger = logging.getLogger(__name__)
self.connections = {}
self.stations = {}
task.LoopingCall(self.fetch_playlist).start(300, now=False)
task.LoopingCall(self.send_status).start(30)
def join(self, peer, connection):
self.connections[peer] = connection
self.logger.debug('Peer %s joined', peer)
def leave(self, peer):
if peer in self.connections:
del self.connections[peer]
self.logger.debug('Peer %s left', peer)
for radio_id in self.stations.keys():
self.unregister(peer, radio_id)
def send(self, message, peers=None):
if peers is not None:
connections = [self.connections[peer] for peer in peers if peer in self.connections]
else:
connections = self.connections.values()
for connection in connections:
connection.sendMessage(json.dumps(message))
def send_status(self, radio_id=None, peers=None):
stations = {radio_id: self.stations[radio_id]} if radio_id != None else self.stations
for radio_id, station in stations.iteritems():
if station.start_time > 0:
self.send({'type': 'status',
'radio_id': radio_id,
'position': station.get_play_position()}, peers or station.get_peers())
def send_data(self, radio_id, peers=None):
station = self.stations[radio_id]
self.send({'type': 'data',
'radio_id': radio_id,
'tracks': station.get_tracks(),
'registrations': station.get_registrations()}, peers)
@inlineCallbacks
def register(self, peer, user_name, radio_id):
if radio_id not in self.stations:
self.stations[radio_id] = BillyRadioStation(radio_id, self.config, self.database)
yield self.stations[radio_id].update_tracks()
station = self.stations[radio_id]
peers = station.get_peers()
station.register(peer, user_name)
self.send_data(radio_id, [peer])
self.send_status(radio_id, [peer])
registration = station.get_registration(peer)
message = {'type': 'registered',
'user_id': registration['user_id'],
'user_name': registration['user_name'],
'radio_id': radio_id,
'time': registration['time']}
self.send(message, peers=peers)
def unregister(self, peer, radio_id):
station = self.stations[radio_id]
peers = station.get_peers()
registration = station.unregister(peer)
if registration:
message = {'type': 'unregistered',
'user_id': registration['user_id'],
'radio_id': radio_id}
self.send(message, peers=peers)
@inlineCallbacks
def fetch_playlist(self):
self.logger.info('Checking tracks')
for radio_id, station in self.stations.iteritems():
updated = yield station.update_tracks()
if updated:
# Notify everyone
self.send_data(radio_id)
self.send_status(radio_id)
self.logger.info('Done checking tracks')
class BillyRadioStation(object):
def __init__(self, radio_id, config, database):
self.logger = logging.getLogger(__name__)
self.start_time = 0
self.radio_id = radio_id
self.config = config
self.database = database
self.listeners = {}
self.tracks = []
radio = self.database.get_radio(self.radio_id)
self.session_id = radio['session_id']
self.playlist_name = radio['playlist_name']
def register(self, peer, user_name):
self.listeners[peer] = {'user_id': hashlib.sha1(str(peer)).hexdigest(),
'user_name': user_name,
'time': int(time.time())}
def unregister(self, peer):
return self.listeners.pop(peer, None)
def get_registration(self, peer):
return self.listeners.get(peer, None)
def get_registrations(self, limit=SEND_REGISTRATIONS):
return sorted(self.listeners.values(), key=lambda x: x['time'])[:limit]
def get_play_position(self):
play_position = int((time.time() - self.start_time))
track_index = 0
track = self.tracks[0]
while play_position > track['duration']:
play_position -= track['duration']
track_index += 1
track_index = track_index % len(self.tracks)
track = self.tracks[track_index]
return (track_index, play_position)
@inlineCallbacks
def update_tracks(self):
self.logger.info('Checking tracks for radio %s', self.radio_id)
session = self.database.get_session(self.session_id)
tracks = session['playlists'][self.playlist_name]['tracks']
# Only allow youtube tracks for now
tracks = [track for track in tracks if track['link'].startswith('youtube:')]
# Have the tracks changed?
if [t['link'] for t in self.tracks] != [t['link'] for t in tracks]:
# Update stats
yt_api_key = self.config.get('sources', 'youtube_api_key')
sc_api_key = self.config.get('sources', 'soundcloud_api_key')
for track in tracks:
url = YOUTUBE_STATS_URL.format(api_key=yt_api_key, id=track['link'][8:])
response = yield get_request(url)
response_dict = response.json
track['duration'] = int(isodate.parse_duration(response_dict['items'][0]['contentDetails']['duration']).total_seconds())
track['musicinfo'] = track.get('musicinfo', {})
track['musicinfo']['playback_count'] = response_dict['items'][0]['statistics']['viewCount']
track['musicinfo']['comment_count'] = response_dict['items'][0]['statistics']['commentCount']
# Update tracks
self.tracks = tracks
self.start_time = int(time.time())
returnValue(True)
returnValue(False)
def get_tracks(self):
return self.tracks
def get_peers(self):
return self.listeners.keys()
|
egbertbouman/billy
|
server/radio.py
|
Python
|
mit
| 7,871
|
# ***************************************************************************
# * (c) 2020 Eliud Cabrera Castillo <e.cabrera-castillo@tum.de> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * FreeCAD is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with FreeCAD; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
"""Provides utility functions that wrap around the Console methods.
The Console module has long function names, so we define some shorthands
that are suitable for use in every workbench. These shorthands also include
a newline character at the end of the string, so it doesn't have to be
added manually.
"""
## @package messages
# \ingroup draftutils
# \brief Provides utility functions that wrap around the Console methods.
## \addtogroup draftutils
# @{
import FreeCAD as App
def _msg(text, end="\n"):
"""Write messages to the console including the line ending."""
App.Console.PrintMessage(text + end)
def _wrn(text, end="\n"):
"""Write warnings to the console including the line ending."""
App.Console.PrintWarning(text + end)
def _err(text, end="\n"):
"""Write errors to the console including the line ending."""
App.Console.PrintError(text + end)
def _log(text, end="\n"):
"""Write messages to the log file including the line ending."""
App.Console.PrintLog(text + end)
## @}
|
sanguinariojoe/FreeCAD
|
src/Mod/Draft/draftutils/messages.py
|
Python
|
lgpl-2.1
| 2,740
|
#!/usr/bin/env python
# encoding: utf-8
"""
Utility script to generate/modify Firebreath plug-in projects.
Original Author(s): Ben Loveridge, Richard Bateman
Created: 14 December 2009
License: Dual license model; choose one of two:
New BSD License
http://www.opensource.org/licenses/bsd-license.php
- or -
GNU Lesser General Public License, version 2.1
http://www.gnu.org/licenses/lgpl-2.1.html
Copyright 2009 Packet Pass, Inc. and the Firebreath development team
"""
import os, re, sys, time, uuid
from fbgen.gen_templates import *
from optparse import OptionParser
from ConfigParser import SafeConfigParser
def getTemplateFiles(basePath, origPath=None):
"""
Obtains the location to the template files. Discovers any newly added files automatically.
@param basePath location from which to start searching for files.
@param origPath used to strip path information from the returned values. Defaults to None.
@returns array of strings each entry representing a single file.
"""
if origPath is None:
origPath = basePath
plen = len(origPath) + len(os.path.sep)
files = []
for filename in os.listdir(basePath):
tmpName = os.path.join(basePath, filename)
if filename == '.' or filename == ".." or tmpName is None:
continue
if os.path.isdir(tmpName):
files.extend(getTemplateFiles(tmpName, origPath) )
else:
files.append(tmpName[plen:])
return files
def createDir(dirName):
"""
Creates a directory, even if it has to create parent directories to do so
"""
parentDir = os.path.dirname(dirName)
print "Parent of %s is %s" % (dirName, parentDir)
if os.path.isdir(parentDir):
print "Creating dir %s" % dirName
os.mkdir(dirName)
else:
createDir(parentDir)
createDir(dirName)
def Main():
"""
Parse the commandline and execute the appropriate actions.
"""
# Define the command-line interface via OptionParser
usage = "usage: %prog [options]"
parser = OptionParser(usage)
parser.add_option("-p", "--plugin-name", dest = "pluginName")
parser.add_option("-i", "--plugin-identifier", dest = "pluginIdent",
help = "3 or more alphanumeric characters (underscores allowed after first position)")
parser.add_option("-c", "--company-name", dest = "companyName")
parser.add_option("-d", "--company-domain", dest = "companyDomain")
parser.add_option("-g", "--disable-gui", dest = "disableGUI")
options, args = parser.parse_args()
if options.pluginName and options.pluginIdent and options.companyName and options.companyDomain:
options.interactive = False
else:
options.interactive = True
scriptDir = os.path.dirname(os.path.abspath(__file__) )
cfgFilename = os.path.join(scriptDir, ".fbgen.cfg")
cfgFile = SafeConfigParser()
cfgFile.read(cfgFilename)
# Instantiate the appropriate classes
plugin = Plugin(name = options.pluginName, ident = options.pluginIdent, disable_gui = options.disableGUI)
plugin.readCfg(cfgFile)
company = Company(name = options.companyName)
company.readCfg(cfgFile)
if options.interactive:
try:
plugin.promptValues()
company.promptValues()
except KeyboardInterrupt:
print "" # get off of the line where the KeyboardInterrupt happened
sys.exit(0) # terminate gracefully
plugin.updateCfg(cfgFile)
company.updateCfg(cfgFile)
guid = GUID(ident = plugin.ident, domain = company.domain)
# Generate the guids needed by the templates
generatedGuids = AttrDictSimple()
generatedGuids.GUIDS_TYPELIB = guid.generate("TYPELIB")
generatedGuids.GUIDS_CONTROLIF = guid.generate("CONTROLIF")
generatedGuids.GUIDS_CONTROL = guid.generate("CONTROL")
generatedGuids.GUIDS_JSIF = guid.generate("JSIF")
generatedGuids.GUIDS_JSOBJ = guid.generate("JSOBJ")
generatedGuids.GUIDS_EVTSRC = guid.generate("EVTSRC")
generatedGuids.GUIDS_INSTPROD = guid.generate("INSTPROD")
generatedGuids.GUIDS_INSTUPGR = guid.generate("INSTUPGR")
generatedGuids.GUIDS_INSTUPGR64 = guid.generate("INSTUPGR64")
generatedGuids.GUIDS_companydircomp = guid.generate("companydircomp")
generatedGuids.GUIDS_installdircomp = guid.generate("installdircomp")
# Time-related values used in templates
templateTime = AttrDictSimple(YEAR = time.strftime("%Y"))
# Save configuration for another go
cfgFile.write(open(cfgFilename, "wb") )
# Make sure we can get into the projects directory
basePath = os.path.join(scriptDir, "projects")
if not os.path.isdir(basePath):
try:
os.mkdir(basePath)
except:
print "Unable to create directory", basePath
sys.exit(1)
# Try to create a directory for this project
projPath = os.path.abspath(os.path.join(basePath, "%s" % plugin.ident))
if os.path.isdir(projPath):
try:
overwrite = raw_input("\nDirectory already exists. Continue anyway? [y/N] ")
except KeyboardInterrupt:
print "" # get off of the line where the KeyboardInterrupt happened
sys.exit(0) # terminate gracefully
if len(overwrite) == 0 or overwrite[0] not in ("Y", "y"):
print "\nAborting"
sys.exit(1)
else:
try:
os.mkdir(projPath)
except:
print "Failed to create project directory", projPath
sys.exit(1)
print "\nProcessing templates"
srcDir = os.path.join(scriptDir, "fbgen", "src")
srcDirLen = len(srcDir) + len(os.path.sep)
templateFiles = getTemplateFiles(srcDir)
for tpl in templateFiles:
try:
tplPath, tplFilename = os.path.split(tpl)
if tplFilename.startswith("Template"):
tplFilename = tplFilename.replace("Template", plugin.ident, 1)
if tplPath:
filename = os.path.join(projPath, tplPath, tplFilename)
else:
filename = os.path.join(projPath, tplFilename)
dirname = os.path.dirname(filename)
if not os.path.isdir(dirname):
createDir(dirname)
tplFile = os.path.join("fbgen", "src", tpl)
print tplFile
template = Template(tplFile)
#Special case for binary files
if(tplFilename == "background.png"):
input = open(tplFile, "rb")
output = open(filename, "wb")
output.write(input.read())
else:
f = open(filename, "wb")
f.write(template.process(plugin, company, guid, generatedGuids, templateTime))
print " Processed", tpl
except:
print " Error processing", tpl
raise
print "Done. Files placed in", projPath
if __name__ == "__main__":
Main()
|
eSDK/esdk_uc_control_js
|
open_src/firebreath/fbgen.py
|
Python
|
apache-2.0
| 7,056
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2010 Prof. William H. Green (whgreen@mit.edu) and the
# RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
This module contains the :class:`RMGDatabase` class, which is the primary class
for working with the RMG database.
"""
import os.path
from base import ForbiddenStructures
from thermo import ThermoDatabase
from transport import TransportDatabase
from kinetics import KineticsDatabase
from statmech import StatmechDatabase
from solvation import SolvationDatabase
# Module-level variable to store the (only) instance of RMGDatabase in use.
database = None
################################################################################
class RMGDatabase:
"""
The primary class for working with the RMG database.
"""
def __init__(self):
self.thermo = None
self.transport = None
self.forbiddenStructures = None
self.kinetics = None
self.statmech = None
self.solvation = None
# Store the newly created database in the module.
global database
# assert database is None, "Should only make one instance of RMGDatabase because it's stored as a module-level variable."
if database is None:
database = self
else:
import logging
logging.warning("Should only make one instance of RMGDatabase because it's stored as a module-level variable!")
logging.warning("Unexpected behaviour may result!")
def load(self,
path,
thermoLibraries=None,
transportLibraries=None,
reactionLibraries=None,
seedMechanisms=None,
kineticsFamilies=None,
kineticsDepositories=None,
statmechLibraries=None,
depository=True,
solvation=True,
):
"""
Load the RMG database from the given `path` on disk, where `path`
points to the top-level folder of the RMG database. If none of the
optional arguments are provided, then the entire database will be
loaded. You can use the optional arguments to specify that only certain
components of the database be loaded.
"""
self.loadThermo(os.path.join(path, 'thermo'), thermoLibraries, depository)
self.loadTransport(os.path.join(path, 'transport'), transportLibraries)
self.loadForbiddenStructures(os.path.join(path, 'forbiddenStructures.py'))
self.loadKinetics(os.path.join(path, 'kinetics'),
reactionLibraries,
seedMechanisms,
kineticsFamilies,
kineticsDepositories
)
self.loadStatmech(os.path.join(path, 'statmech'), statmechLibraries, depository)
if solvation:
self.loadSolvation(os.path.join(path, 'solvation'))
def loadThermo(self, path, thermoLibraries=None, depository=True):
"""
Load the RMG thermo database from the given `path` on disk, where
`path` points to the top-level folder of the RMG thermo database.
"""
self.thermo = ThermoDatabase()
self.thermo.load(path, thermoLibraries, depository)
def loadTransport(self, path, transportLibraries=None):
"""
Load the RMG transport database from the given 'path' on disk, where
'path' points to the top-level folder of the RMG transport database.
"""
self.transport = TransportDatabase()
self.transport.load(path, transportLibraries)
def loadForbiddenStructures(self, path):
"""
Load the RMG forbidden structures from the given `path` on disk, where
`path` points to the forbidden structures file.
"""
self.forbiddenStructures = ForbiddenStructures()
self.forbiddenStructures.load(path)
def loadKinetics(self,
path,
reactionLibraries=None,
seedMechanisms=None,
kineticsFamilies=None,
kineticsDepositories=None
):
"""
Load the RMG kinetics database from the given `path` on disk, where
`path` points to the top-level folder of the RMG kinetics database.
"""
kineticsLibraries = []
libraryOrder = []
if seedMechanisms is None and reactionLibraries is None:
kineticsLibraries = None
if seedMechanisms is not None:
for library in seedMechanisms:
kineticsLibraries.append(library)
libraryOrder.append((library,'Seed'))
if reactionLibraries is not None:
for library in reactionLibraries:
kineticsLibraries.append(library)
libraryOrder.append((library,'Reaction Library'))
self.kinetics = KineticsDatabase()
self.kinetics.libraryOrder = libraryOrder
self.kinetics.load(path,
families=kineticsFamilies,
libraries=kineticsLibraries,
depositories=kineticsDepositories
)
def loadSolvation(self, path):
"""
Load the RMG solvation database from the given `path` on disk, where
`path` points to the top-level folder of the RMG solvation database.
"""
self.solvation = SolvationDatabase()
self.solvation.load(path)
def loadStatmech(self, path, statmechLibraries=None, depository=True):
"""
Load the RMG statmech database from the given `path` on disk, where
`path` points to the top-level folder of the RMG statmech database.
"""
self.statmech = StatmechDatabase()
self.statmech.load(path, statmechLibraries, depository)
def loadOld(self, path):
"""
Load the old RMG database from the given `path` on disk, where `path`
points to the top-level folder of the old RMG database.
"""
self.thermo = ThermoDatabase()
self.thermo.loadOld(path)
self.transport = TransportDatabase()
#self.transport.loadOld(path) # Currently no loadOld import function available for transport groups
self.forbiddenStructures = ForbiddenStructures()
self.forbiddenStructures.loadOld(os.path.join(path, 'ForbiddenStructures.txt'))
self.kinetics = KineticsDatabase()
self.kinetics.loadOld(path)
self.statmech = StatmechDatabase()
self.statmech.loadOld(path)
def save(self, path):
"""
Save the RMG database to the given `path` on disk.
"""
if not os.path.exists(path): os.makedirs(path)
self.forbiddenStructures.save(os.path.join(path, 'forbiddenStructures.py'))
self.thermo.save(os.path.join(path, 'thermo'))
# self.transport.save(os.path.join(path, 'transport')) #Currently no function for saving transport groups
self.kinetics.save(os.path.join(path, 'kinetics'))
self.statmech.save(os.path.join(path, 'statmech'))
self.solvation.save(os.path.join(path, 'solvation'))
self.transport.save(os.path.join(path, 'transport'))
def saveOld(self, path):
"""
Save the old RMG database to the given `path` on disk.
"""
if not os.path.exists(path): os.makedirs(path)
self.thermo.saveOld(path)
self.transport.saveOld(path)
self.forbiddenStructures.saveOld(os.path.join(path, 'ForbiddenStructures.txt'))
self.kinetics.saveOld(path)
self.statmech.saveOld(path)
|
faribas/RMG-Py
|
rmgpy/data/rmg.py
|
Python
|
mit
| 8,998
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import re
def to_rust_ident(name):
name = name.replace("-", "_")
if name in ["static", "super", "box", "move"]: # Rust keywords
name += "_"
return name
def to_camel_case(ident):
return re.sub("(^|_|-)([a-z])", lambda m: m.group(2).upper(), ident.strip("_").strip("-"))
class Keyword(object):
def __init__(self, name, values, gecko_constant_prefix=None,
gecko_enum_prefix=None,
extra_gecko_values=None, extra_servo_values=None):
self.name = name
self.values = values.split()
if gecko_constant_prefix and gecko_enum_prefix:
raise TypeError("Only one of gecko_constant_prefix and gecko_enum_prefix can be specified")
self.gecko_constant_prefix = gecko_constant_prefix or \
"NS_STYLE_" + self.name.upper().replace("-", "_")
self.gecko_enum_prefix = gecko_enum_prefix
self.extra_gecko_values = (extra_gecko_values or "").split()
self.extra_servo_values = (extra_servo_values or "").split()
def gecko_values(self):
return self.values + self.extra_gecko_values
def servo_values(self):
return self.values + self.extra_servo_values
def values_for(self, product):
if product == "gecko":
return self.gecko_values()
elif product == "servo":
return self.servo_values()
else:
raise Exception("Bad product: " + product)
def gecko_constant(self, value):
if self.gecko_enum_prefix:
if value == "none":
return self.gecko_enum_prefix + "::None_"
else:
parts = value.replace("-moz-", "").split("-")
parts = [p.title() for p in parts]
return self.gecko_enum_prefix + "::" + "".join(parts)
else:
return self.gecko_constant_prefix + "_" + value.replace("-moz-", "").replace("-", "_").upper()
def needs_cast(self):
return self.gecko_enum_prefix is None
class Longhand(object):
def __init__(self, style_struct, name, animatable=None, derived_from=None, keyword=None,
predefined_type=None, custom_cascade=False, experimental=False, internal=False,
need_clone=False, need_index=False, gecko_ffi_name=None, depend_on_viewport_size=False):
self.name = name
self.keyword = keyword
self.predefined_type = predefined_type
self.ident = to_rust_ident(name)
self.camel_case = to_camel_case(self.ident)
self.style_struct = style_struct
self.experimental = ("layout.%s.enabled" % name) if experimental else None
self.custom_cascade = custom_cascade
self.internal = internal
self.need_index = need_index
self.gecko_ffi_name = gecko_ffi_name or "m" + self.camel_case
self.depend_on_viewport_size = depend_on_viewport_size
self.derived_from = (derived_from or "").split()
# This is done like this since just a plain bool argument seemed like
# really random.
if animatable is None:
raise TypeError("animatable should be specified for " + name + ")")
if isinstance(animatable, bool):
self.animatable = animatable
else:
assert animatable == "True" or animatable == "False"
self.animatable = animatable == "True"
# NB: Animatable implies clone because a property animation requires a
# copy of the computed value.
#
# See components/style/helpers/animated_properties.mako.rs.
self.need_clone = need_clone or self.animatable
class Shorthand(object):
def __init__(self, name, sub_properties, experimental=False, internal=False):
self.name = name
self.ident = to_rust_ident(name)
self.camel_case = to_camel_case(self.ident)
self.derived_from = None
self.experimental = ("layout.%s.enabled" % name) if experimental else None
self.sub_properties = sub_properties
self.internal = internal
class Method(object):
def __init__(self, name, return_type=None, arg_types=None, is_mut=False):
self.name = name
self.return_type = return_type
self.arg_types = arg_types or []
self.is_mut = is_mut
def arg_list(self):
args = ["_: " + x for x in self.arg_types]
args = ["&mut self" if self.is_mut else "&self"] + args
return ", ".join(args)
def signature(self):
sig = "fn %s(%s)" % (self.name, self.arg_list())
if self.return_type:
sig = sig + " -> " + self.return_type
return sig
def declare(self):
return self.signature() + ";"
def stub(self):
return self.signature() + "{ unimplemented!() }"
class StyleStruct(object):
def __init__(self, name, inherited, gecko_name=None, additional_methods=None):
self.gecko_struct_name = "Gecko" + name
self.name = name
self.name_lower = name.lower()
self.ident = to_rust_ident(self.name_lower)
self.longhands = []
self.inherited = inherited
self.gecko_name = gecko_name or name
self.gecko_ffi_name = "nsStyle" + self.gecko_name
self.additional_methods = additional_methods or []
class PropertiesData(object):
def __init__(self, product):
self.product = product
self.style_structs = []
self.current_style_struct = None
self.longhands = []
self.longhands_by_name = {}
self.derived_longhands = {}
self.shorthands = []
def new_style_struct(self, *args, **kwargs):
style_struct = StyleStruct(*args, **kwargs)
self.style_structs.append(style_struct)
self.current_style_struct = style_struct
def active_style_structs(self):
return [s for s in self.style_structs if s.additional_methods or s.longhands]
def declare_longhand(self, name, products="gecko servo", **kwargs):
products = products.split()
if self.product not in products:
return
longand = Longhand(self.current_style_struct, name, **kwargs)
self.current_style_struct.longhands.append(longand)
self.longhands.append(longand)
self.longhands_by_name[name] = longand
for name in longand.derived_from:
self.derived_longhands.setdefault(name, []).append(longand)
return longand
def declare_shorthand(self, name, sub_properties, products="gecko servo", *args, **kwargs):
products = products.split()
if self.product not in products:
return
sub_properties = [self.longhands_by_name[s] for s in sub_properties]
shorthand = Shorthand(name, sub_properties, *args, **kwargs)
self.shorthands.append(shorthand)
return shorthand
|
sliz1/servo
|
components/style/properties/data.py
|
Python
|
mpl-2.0
| 7,031
|
import time
import os
import sys
try:
iterations = int(sys.argv[1])
except:
iterations = 10
try:
file = sys.argv[2]
testFile = open(file, "r")
testFile.close()
except:
file = "test.txt"
times = []
messages = []
messages.append("Selected test file: " + file)
messages.append("")
def display(iteration):
os.system("clear")
for line in messages:
print(line)
if not iteration == -1:
counter = ""
for i in range (iterations):
if i+1 <= iteration:
counter = counter + "|"
else:
counter = counter + "-"
print("\nIteration " + str(iteration) + "/" + str(iterations) + " " + counter)
for i in range(iterations):
display(i + 1)
currentTime = time.time()
os.system('cat ' + file + ' | python3 MACparser.py > /dev/null')
times.append(time.time() - currentTime)
messages.append("Average execution time: " + str(round(sum(times)/float(len(times)), 3)))
times = []
for i in range(iterations):
display(i + 1)
currentTime = time.time()
os.system('cat ' + file + ' | python3 MACparser.py -p > /dev/null')
times.append(time.time() - currentTime)
messages.append("Average execution time with partial flag: " + str(round(sum(times)/float(len(times)), 3)))
display(-1)
|
0xGREG/MACparser
|
test.py
|
Python
|
gpl-3.0
| 1,317
|
''' Tests for netcdf '''
from __future__ import division, print_function, absolute_import
import os
from os.path import join as pjoin, dirname
import shutil
import tempfile
import warnings
from io import BytesIO
from glob import glob
from contextlib import contextmanager
import numpy as np
from numpy.testing import (assert_, assert_allclose, assert_raises,
assert_equal, run_module_suite)
from scipy.io.netcdf import netcdf_file
from scipy._lib._tmpdirs import in_tempdir
TEST_DATA_PATH = pjoin(dirname(__file__), 'data')
N_EG_ELS = 11 # number of elements for example variable
VARTYPE_EG = 'b' # var type for example variable
@contextmanager
def make_simple(*args, **kwargs):
f = netcdf_file(*args, **kwargs)
f.history = 'Created for a test'
f.createDimension('time', N_EG_ELS)
time = f.createVariable('time', VARTYPE_EG, ('time',))
time[:] = np.arange(N_EG_ELS)
time.units = 'days since 2008-01-01'
f.flush()
yield f
f.close()
def check_simple(ncfileobj):
'''Example fileobj tests '''
assert_equal(ncfileobj.history, b'Created for a test')
time = ncfileobj.variables['time']
assert_equal(time.units, b'days since 2008-01-01')
assert_equal(time.shape, (N_EG_ELS,))
assert_equal(time[-1], N_EG_ELS-1)
def test_read_write_files():
# test round trip for example file
cwd = os.getcwd()
try:
tmpdir = tempfile.mkdtemp()
os.chdir(tmpdir)
with make_simple('simple.nc', 'w') as f:
pass
# read the file we just created in 'a' mode
with netcdf_file('simple.nc', 'a') as f:
check_simple(f)
# add something
f._attributes['appendRan'] = 1
# To read the NetCDF file we just created::
with netcdf_file('simple.nc') as f:
# Using mmap is the default
assert_(f.use_mmap)
check_simple(f)
assert_equal(f._attributes['appendRan'], 1)
# Read it in append (and check mmap is off)
with netcdf_file('simple.nc', 'a') as f:
assert_(not f.use_mmap)
check_simple(f)
assert_equal(f._attributes['appendRan'], 1)
# Now without mmap
with netcdf_file('simple.nc', mmap=False) as f:
# Using mmap is the default
assert_(not f.use_mmap)
check_simple(f)
# To read the NetCDF file we just created, as file object, no
# mmap. When n * n_bytes(var_type) is not divisible by 4, this
# raised an error in pupynere 1.0.12 and scipy rev 5893, because
# calculated vsize was rounding up in units of 4 - see
# http://www.unidata.ucar.edu/software/netcdf/docs/netcdf.html
with open('simple.nc', 'rb') as fobj:
with netcdf_file(fobj) as f:
# by default, don't use mmap for file-like
assert_(not f.use_mmap)
check_simple(f)
# Read file from fileobj, with mmap
with open('simple.nc', 'rb') as fobj:
with netcdf_file(fobj, mmap=True) as f:
assert_(f.use_mmap)
check_simple(f)
# Again read it in append mode (adding another att)
with open('simple.nc', 'r+b') as fobj:
with netcdf_file(fobj, 'a') as f:
assert_(not f.use_mmap)
check_simple(f)
f.createDimension('app_dim', 1)
var = f.createVariable('app_var', 'i', ('app_dim',))
var[:] = 42
# And... check that app_var made it in...
with netcdf_file('simple.nc') as f:
check_simple(f)
assert_equal(f.variables['app_var'][:], 42)
except:
os.chdir(cwd)
shutil.rmtree(tmpdir)
raise
os.chdir(cwd)
shutil.rmtree(tmpdir)
def test_read_write_sio():
eg_sio1 = BytesIO()
with make_simple(eg_sio1, 'w') as f1:
str_val = eg_sio1.getvalue()
eg_sio2 = BytesIO(str_val)
with netcdf_file(eg_sio2) as f2:
check_simple(f2)
# Test that error is raised if attempting mmap for sio
eg_sio3 = BytesIO(str_val)
assert_raises(ValueError, netcdf_file, eg_sio3, 'r', True)
# Test 64-bit offset write / read
eg_sio_64 = BytesIO()
with make_simple(eg_sio_64, 'w', version=2) as f_64:
str_val = eg_sio_64.getvalue()
eg_sio_64 = BytesIO(str_val)
with netcdf_file(eg_sio_64) as f_64:
check_simple(f_64)
assert_equal(f_64.version_byte, 2)
# also when version 2 explicitly specified
eg_sio_64 = BytesIO(str_val)
with netcdf_file(eg_sio_64, version=2) as f_64:
check_simple(f_64)
assert_equal(f_64.version_byte, 2)
def test_read_example_data():
# read any example data files
for fname in glob(pjoin(TEST_DATA_PATH, '*.nc')):
with netcdf_file(fname, 'r') as f:
pass
with netcdf_file(fname, 'r', mmap=False) as f:
pass
def test_itemset_no_segfault_on_readonly():
# Regression test for ticket #1202.
# Open the test file in read-only mode.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
filename = pjoin(TEST_DATA_PATH, 'example_1.nc')
with netcdf_file(filename, 'r') as f:
time_var = f.variables['time']
# time_var.assignValue(42) should raise a RuntimeError--not seg. fault!
assert_raises(RuntimeError, time_var.assignValue, 42)
def test_write_invalid_dtype():
dtypes = ['int64', 'uint64']
if np.dtype('int').itemsize == 8: # 64-bit machines
dtypes.append('int')
if np.dtype('uint').itemsize == 8: # 64-bit machines
dtypes.append('uint')
with netcdf_file(BytesIO(), 'w') as f:
f.createDimension('time', N_EG_ELS)
for dt in dtypes:
assert_raises(ValueError, f.createVariable, 'time', dt, ('time',))
def test_flush_rewind():
stream = BytesIO()
with make_simple(stream, mode='w') as f:
x = f.createDimension('x',4)
v = f.createVariable('v', 'i2', ['x'])
v[:] = 1
f.flush()
len_single = len(stream.getvalue())
f.flush()
len_double = len(stream.getvalue())
assert_(len_single == len_double)
def test_dtype_specifiers():
# Numpy 1.7.0-dev had a bug where 'i2' wouldn't work.
# Specifying np.int16 or similar only works from the same commit as this
# comment was made.
with make_simple(BytesIO(), mode='w') as f:
f.createDimension('x',4)
f.createVariable('v1', 'i2', ['x'])
f.createVariable('v2', np.int16, ['x'])
f.createVariable('v3', np.dtype(np.int16), ['x'])
def test_ticket_1720():
io = BytesIO()
items = [0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]
with netcdf_file(io, 'w') as f:
f.history = 'Created for a test'
f.createDimension('float_var', 10)
float_var = f.createVariable('float_var', 'f', ('float_var',))
float_var[:] = items
float_var.units = 'metres'
f.flush()
contents = io.getvalue()
io = BytesIO(contents)
with netcdf_file(io, 'r') as f:
assert_equal(f.history, b'Created for a test')
float_var = f.variables['float_var']
assert_equal(float_var.units, b'metres')
assert_equal(float_var.shape, (10,))
assert_allclose(float_var[:], items)
def test_mmaps_segfault():
filename = pjoin(TEST_DATA_PATH, 'example_1.nc')
with warnings.catch_warnings():
warnings.simplefilter("error")
with netcdf_file(filename, mmap=True) as f:
x = f.variables['lat'][:]
# should not raise warnings
del x
def doit():
with netcdf_file(filename, mmap=True) as f:
return f.variables['lat'][:]
# should not crash
with warnings.catch_warnings():
warnings.simplefilter("ignore")
x = doit()
x.sum()
def test_zero_dimensional_var():
io = BytesIO()
with make_simple(io, 'w') as f:
v = f.createVariable('zerodim', 'i2', [])
# This is checking that .isrec returns a boolean - don't simplify it
# to 'assert not ...'
assert v.isrec is False, v.isrec
f.flush()
def test_byte_gatts():
# Check that global "string" atts work like they did before py3k
# unicode and general bytes confusion
with in_tempdir():
filename = 'g_byte_atts.nc'
f = netcdf_file(filename, 'w')
f._attributes['holy'] = b'grail'
f._attributes['witch'] = 'floats'
f.close()
f = netcdf_file(filename, 'r')
assert_equal(f._attributes['holy'], b'grail')
assert_equal(f._attributes['witch'], b'floats')
f.close()
def test_open_append():
# open 'w' put one attr
with in_tempdir():
filename = 'append_dat.nc'
f = netcdf_file(filename, 'w')
f._attributes['Kilroy'] = 'was here'
f.close()
# open again in 'a', read the att and and a new one
f = netcdf_file(filename, 'a')
assert_equal(f._attributes['Kilroy'], b'was here')
f._attributes['naughty'] = b'Zoot'
f.close()
# open yet again in 'r' and check both atts
f = netcdf_file(filename, 'r')
assert_equal(f._attributes['Kilroy'], b'was here')
assert_equal(f._attributes['naughty'], b'Zoot')
f.close()
def test_maskandscale():
t = np.linspace(20, 30, 15)
t[3] = 100
tm = np.ma.masked_greater(t, 99)
fname = pjoin(TEST_DATA_PATH, 'example_2.nc')
with netcdf_file(fname, maskandscale=True) as f:
Temp = f.variables['Temperature']
assert_equal(Temp.missing_value, 9999)
assert_equal(Temp.add_offset, 20)
assert_equal(Temp.scale_factor, np.float32(0.01))
found = Temp[:].compressed()
del Temp # Remove ref to mmap, so file can be closed.
expected = np.round(tm.compressed(), 2)
assert_allclose(found, expected)
with in_tempdir():
newfname = 'ms.nc'
f = netcdf_file(newfname, 'w', maskandscale=True)
f.createDimension('Temperature', len(tm))
temp = f.createVariable('Temperature', 'i', ('Temperature',))
temp.missing_value = 9999
temp.scale_factor = 0.01
temp.add_offset = 20
temp[:] = tm
f.close()
with netcdf_file(newfname, maskandscale=True) as f:
Temp = f.variables['Temperature']
assert_equal(Temp.missing_value, 9999)
assert_equal(Temp.add_offset, 20)
assert_equal(Temp.scale_factor, np.float32(0.01))
expected = np.round(tm.compressed(), 2)
found = Temp[:].compressed()
del Temp
assert_allclose(found, expected)
if __name__ == "__main__":
run_module_suite()
|
jlcarmic/producthunt_simulator
|
venv/lib/python2.7/site-packages/scipy/io/tests/test_netcdf.py
|
Python
|
mit
| 10,833
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Logit'] , ['MovingAverage'] , ['Seasonal_MonthOfYear'] , ['MLP'] );
|
antoinecarme/pyaf
|
tests/model_control/detailed/transf_Logit/model_control_one_enabled_Logit_MovingAverage_Seasonal_MonthOfYear_MLP.py
|
Python
|
bsd-3-clause
| 163
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
PredicatePanel.py
---------------------
Date : January 2015
Copyright : (C) 2015 by Arnaud Morvan
Email : arnaud dot morvan at camptocamp dot com
Contributors : Arnaud Morvan
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Arnaud Morvan'
__date__ = 'January 2015'
__copyright__ = '(C) 2015, Arnaud Morvan'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt import uic
from qgis.PyQt.QtWidgets import QCheckBox
from qgis.core import Qgis, QgsVectorLayer, QgsWkbTypes, QgsWkbTypes
from processing.core.parameters import ParameterGeometryPredicate
pluginPath = os.path.split(os.path.dirname(__file__))[0]
WIDGET, BASE = uic.loadUiType(
os.path.join(pluginPath, 'ui', 'widgetGeometryPredicateSelector.ui'))
class GeometryPredicateSelectionPanel(BASE, WIDGET):
unusablePredicates = {
QgsWkbTypes.PointGeometry: {
QgsWkbTypes.PointGeometry: ('touches', 'crosses'),
QgsWkbTypes.LineGeometry: ('equals', 'contains', 'overlaps'),
QgsWkbTypes.PolygonGeometry: ('equals', 'contains', 'overlaps')
},
QgsWkbTypes.LineGeometry: {
QgsWkbTypes.PointGeometry: ('equals', 'within', 'overlaps'),
QgsWkbTypes.LineGeometry: [],
QgsWkbTypes.PolygonGeometry: ('equals', 'contains', 'overlaps')
},
QgsWkbTypes.PolygonGeometry: {
QgsWkbTypes.PointGeometry: ('equals', 'within', 'overlaps'),
QgsWkbTypes.LineGeometry: ('equals', 'within', 'overlaps'),
QgsWkbTypes.PolygonGeometry: ('crosses')
}
}
def __init__(self,
enabledPredicated=ParameterGeometryPredicate.predicates,
rows=4):
super(GeometryPredicateSelectionPanel, self).__init__(None)
self.setupUi(self)
self.enabledPredicated = enabledPredicated
self.leftLayer = None
self.rightLayer = None
self.setRows(rows)
self.updatePredicates()
def onLeftLayerChange(self):
sender = self.sender()
self.leftLayer = sender.itemData(sender.currentIndex())
self.updatePredicates()
def onRightLayerChange(self):
sender = self.sender()
self.rightLayer = sender.itemData(sender.currentIndex())
self.updatePredicates()
def updatePredicates(self):
if (isinstance(self.leftLayer, QgsVectorLayer)
and isinstance(self.rightLayer, QgsVectorLayer)):
leftType = self.leftLayer.geometryType()
rightType = self.rightLayer.geometryType()
unusablePredicates = self.unusablePredicates[leftType][rightType]
else:
unusablePredicates = []
for predicate in ParameterGeometryPredicate.predicates:
widget = self.getWidget(predicate)
widget.setEnabled(predicate in self.enabledPredicated
and predicate not in unusablePredicates)
def setRows(self, rows):
widgets = []
for predicate in ParameterGeometryPredicate.predicates:
widget = self.getWidget(predicate)
self.gridLayout.removeWidget(widget)
widgets.append(widget)
for i in xrange(0, len(widgets)):
widget = widgets[i]
self.gridLayout.addWidget(widget, i % rows, i / rows)
def getWidget(self, predicate):
return self.findChild(QCheckBox, predicate + 'Box')
def value(self):
values = []
for predicate in ParameterGeometryPredicate.predicates:
widget = self.getWidget(predicate)
if widget.isEnabled() and widget.isChecked():
values.append(predicate)
return values
def setValue(self, values):
if values:
for predicate in ParameterGeometryPredicate.predicates:
widget = self.getWidget(predicate)
widget.setChecked(predicate in values)
return True
|
alexbruy/QGIS
|
python/plugins/processing/gui/GeometryPredicateSelectionPanel.py
|
Python
|
gpl-2.0
| 4,731
|
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Check that all of the certs on all service endpoints validate.
"""
import unittest
import boto.rds
class CertVerificationTest(unittest.TestCase):
rds = True
ssl = True
def test_certs(self):
for region in boto.rds.regions():
c = region.connect()
c.get_all_dbinstances()
|
lochiiconnectivity/boto
|
tests/integration/rds/test_cert_verification.py
|
Python
|
mit
| 1,501
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import tendenci.apps.social_auth.fields
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Association',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('server_url', models.CharField(max_length=255)),
('handle', models.CharField(max_length=255)),
('secret', models.CharField(max_length=255)),
('issued', models.IntegerField()),
('lifetime', models.IntegerField()),
('assoc_type', models.CharField(max_length=64)),
],
),
migrations.CreateModel(
name='Nonce',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('server_url', models.CharField(max_length=255)),
('timestamp', models.IntegerField()),
('salt', models.CharField(max_length=40)),
],
),
migrations.CreateModel(
name='UserSocialAuth',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('provider', models.CharField(max_length=32)),
('uid', models.CharField(max_length=255)),
('extra_data', tendenci.apps.social_auth.fields.JSONField(blank=True)),
('user', models.ForeignKey(related_name='social_auth', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterUniqueTogether(
name='usersocialauth',
unique_together=set([('provider', 'uid')]),
),
]
|
alirizakeles/tendenci
|
tendenci/apps/social_auth/migrations/0001_initial.py
|
Python
|
gpl-3.0
| 2,008
|
# -*- coding: utf-8 -*-
from copy import deepcopy
from time import time
from django.conf import settings
from django.db.models import Q
from django.db.models.base import ModelBase
from django.utils import tree
from django.utils.encoding import force_unicode
from haystack.constants import DJANGO_CT, VALID_FILTERS, FILTER_SEPARATOR, DEFAULT_ALIAS
from haystack.exceptions import MoreLikeThisError, FacetingError
from haystack.models import SearchResult
from haystack.utils.loading import UnifiedIndex
VALID_GAPS = ['year', 'month', 'day', 'hour', 'minute', 'second']
def log_query(func):
"""
A decorator for pseudo-logging search queries. Used in the ``SearchBackend``
to wrap the ``search`` method.
"""
def wrapper(obj, query_string, *args, **kwargs):
start = time()
try:
return func(obj, query_string, *args, **kwargs)
finally:
stop = time()
if settings.DEBUG:
from haystack import connections
connections[obj.connection_alias].queries.append({
'query_string': query_string,
'additional_args': args,
'additional_kwargs': kwargs,
'time': "%.3f" % (stop - start),
})
return wrapper
class EmptyResults(object):
hits = 0
docs = []
def __len__(self):
return 0
def __getitem__(self, k):
if isinstance(k, slice):
return []
else:
raise IndexError("It's not here.")
class BaseSearchBackend(object):
"""
Abstract search engine base class.
"""
# Backends should include their own reserved words/characters.
RESERVED_WORDS = []
RESERVED_CHARACTERS = []
def __init__(self, connection_alias, **connection_options):
self.connection_alias = connection_alias
self.timeout = connection_options.get('TIMEOUT', 10)
self.include_spelling = connection_options.get('INCLUDE_SPELLING', False)
self.batch_size = connection_options.get('BATCH_SIZE', 1000)
def update(self, index, iterable):
"""
Updates the backend when given a SearchIndex and a collection of
documents.
This method MUST be implemented by each backend, as it will be highly
specific to each one.
"""
raise NotImplementedError
def remove(self, obj_or_string):
"""
Removes a document/object from the backend. Can be either a model
instance or the identifier (i.e. ``app_name.model_name.id``) in the
event the object no longer exists.
This method MUST be implemented by each backend, as it will be highly
specific to each one.
"""
raise NotImplementedError
def clear(self, models=[], commit=True):
"""
Clears the backend of all documents/objects for a collection of models.
This method MUST be implemented by each backend, as it will be highly
specific to each one.
"""
raise NotImplementedError
@log_query
def search(self, query_string, sort_by=None, start_offset=0, end_offset=None,
fields='', highlight=False, facets=None, date_facets=None, query_facets=None,
narrow_queries=None, spelling_query=None,
limit_to_registered_models=None, result_class=None, **kwargs):
"""
Takes a query to search on and returns dictionary.
The query should be a string that is appropriate syntax for the backend.
The returned dictionary should contain the keys 'results' and 'hits'.
The 'results' value should be an iterable of populated SearchResult
objects. The 'hits' should be an integer count of the number of matched
results the search backend found.
This method MUST be implemented by each backend, as it will be highly
specific to each one.
"""
raise NotImplementedError
def prep_value(self, value):
"""
Hook to give the backend a chance to prep an attribute value before
sending it to the search engine. By default, just force it to unicode.
"""
return force_unicode(value)
def more_like_this(self, model_instance, additional_query_string=None, result_class=None):
"""
Takes a model object and returns results the backend thinks are similar.
This method MUST be implemented by each backend, as it will be highly
specific to each one.
"""
raise NotImplementedError("Subclasses must provide a way to fetch similar record via the 'more_like_this' method if supported by the backend.")
def build_schema(self, fields):
"""
Takes a dictionary of fields and returns schema information.
This method MUST be implemented by each backend, as it will be highly
specific to each one.
"""
raise NotImplementedError("Subclasses must provide a way to build their schema.")
def build_models_list(self):
"""
Builds a list of models for searching.
The ``search`` method should use this and the ``django_ct`` field to
narrow the results (unless the user indicates not to). This helps ignore
any results that are not currently handled models and ensures
consistent caching.
"""
from haystack import connections
models = []
for model in connections[self.connection_alias].get_unified_index().get_indexed_models():
models.append(u"%s.%s" % (model._meta.app_label, model._meta.module_name))
return models
# Alias for easy loading within SearchQuery objects.
SearchBackend = BaseSearchBackend
class SearchNode(tree.Node):
"""
Manages an individual condition within a query.
Most often, this will be a lookup to ensure that a certain word or phrase
appears in the documents being indexed. However, it also supports filtering
types (such as 'lt', 'gt', 'in' and others) for more complex lookups.
This object creates a tree, with children being a list of either more
``SQ`` objects or the expressions/values themselves.
"""
AND = 'AND'
OR = 'OR'
default = AND
def __repr__(self):
return '<SQ: %s %s>' % (self.connector, self.as_query_string(self._repr_query_fragment_callback))
def _repr_query_fragment_callback(self, field, filter_type, value):
return '%s%s%s=%s' % (field, FILTER_SEPARATOR, filter_type, force_unicode(value).encode('utf8'))
def as_query_string(self, query_fragment_callback):
"""
Produces a portion of the search query from the current SQ and its
children.
"""
result = []
for child in self.children:
if hasattr(child, 'as_query_string'):
result.append(child.as_query_string(query_fragment_callback))
else:
expression, value = child
field, filter_type = self.split_expression(expression)
result.append(query_fragment_callback(field, filter_type, value))
conn = ' %s ' % self.connector
query_string = conn.join(result)
if query_string:
if self.negated:
query_string = 'NOT (%s)' % query_string
elif len(self.children) != 1:
query_string = '(%s)' % query_string
return query_string
def split_expression(self, expression):
"""Parses an expression and determines the field and filter type."""
parts = expression.split(FILTER_SEPARATOR)
field = parts[0]
if len(parts) == 1 or parts[-1] not in VALID_FILTERS:
filter_type = 'exact'
else:
filter_type = parts.pop()
return (field, filter_type)
class SQ(Q, SearchNode):
"""
Manages an individual condition within a query.
Most often, this will be a lookup to ensure that a certain word or phrase
appears in the documents being indexed. However, it also supports filtering
types (such as 'lt', 'gt', 'in' and others) for more complex lookups.
"""
pass
class BaseSearchQuery(object):
"""
A base class for handling the query itself.
This class acts as an intermediary between the ``SearchQuerySet`` and the
``SearchBackend`` itself.
The ``SearchQuery`` object maintains a tree of ``SQ`` objects. Each ``SQ``
object supports what field it looks up against, what kind of lookup (i.e.
the __'s), what value it's looking for, if it's a AND/OR/NOT and tracks
any children it may have. The ``SearchQuery.build_query`` method starts with
the root of the tree, building part of the final query at each node until
the full final query is ready for the ``SearchBackend``.
Backends should extend this class and provide implementations for
``build_query_fragment``, ``clean`` and ``run``. See the ``solr`` backend for an example
implementation.
"""
def __init__(self, using=DEFAULT_ALIAS):
self.query_filter = SearchNode()
self.order_by = []
self.models = set()
self.boost = {}
self.start_offset = 0
self.end_offset = None
self.highlight = False
self.facets = set()
self.date_facets = {}
self.query_facets = []
self.narrow_queries = set()
self._raw_query = None
self._raw_query_params = {}
self._more_like_this = False
self._mlt_instance = None
self._results = None
self._hit_count = None
self._facet_counts = None
self._spelling_suggestion = None
self.result_class = SearchResult
from haystack import connections
self._using = using
self.backend = connections[self._using].get_backend()
def __str__(self):
return self.build_query()
def __getstate__(self):
"""For pickling."""
obj_dict = self.__dict__.copy()
del(obj_dict['backend'])
return obj_dict
def __setstate__(self, obj_dict):
"""For unpickling."""
from haystack import connections
self.__dict__.update(obj_dict)
self.backend = connections[self._using].get_backend()
def has_run(self):
"""Indicates if any query has been been run."""
return None not in (self._results, self._hit_count)
def build_params(self, spelling_query=None):
"""Generates a list of params to use when searching."""
kwargs = {
'start_offset': self.start_offset,
}
if self.order_by:
kwargs['sort_by'] = self.order_by
if self.end_offset is not None:
kwargs['end_offset'] = self.end_offset
if self.highlight:
kwargs['highlight'] = self.highlight
if self.facets:
kwargs['facets'] = list(self.facets)
if self.date_facets:
kwargs['date_facets'] = self.date_facets
if self.query_facets:
kwargs['query_facets'] = self.query_facets
if self.narrow_queries:
kwargs['narrow_queries'] = self.narrow_queries
if spelling_query:
kwargs['spelling_query'] = spelling_query
if self.boost:
kwargs['boost'] = self.boost
if self.result_class:
kwargs['result_class'] = self.result_class
return kwargs
def run(self, spelling_query=None):
"""Builds and executes the query. Returns a list of search results."""
final_query = self.build_query()
kwargs = self.build_params(spelling_query=spelling_query)
results = self.backend.search(final_query, **kwargs)
self._results = results.get('results', [])
self._hit_count = results.get('hits', 0)
self._facet_counts = self.post_process_facets(results)
self._spelling_suggestion = results.get('spelling_suggestion', None)
def run_mlt(self):
"""
Executes the More Like This. Returns a list of search results similar
to the provided document (and optionally query).
"""
if self._more_like_this is False or self._mlt_instance is None:
raise MoreLikeThisError("No instance was provided to determine 'More Like This' results.")
kwargs = {
'result_class': self.result_class,
}
additional_query_string = self.build_query()
results = self.backend.more_like_this(self._mlt_instance, additional_query_string, **kwargs)
self._results = results.get('results', [])
self._hit_count = results.get('hits', 0)
def run_raw(self):
"""Executes a raw query. Returns a list of search results."""
kwargs = self.build_params()
kwargs.update(self._raw_query_params)
results = self.backend.search(self._raw_query, **kwargs)
self._results = results.get('results', [])
self._hit_count = results.get('hits', 0)
self._facet_counts = results.get('facets', {})
self._spelling_suggestion = results.get('spelling_suggestion', None)
def get_count(self):
"""
Returns the number of results the backend found for the query.
If the query has not been run, this will execute the query and store
the results.
"""
if self._hit_count is None:
# Limit the slice to 10 so we get a count without consuming
# everything.
if not self.end_offset:
self.end_offset = 10
if self._more_like_this:
# Special case for MLT.
self.run_mlt()
elif self._raw_query:
# Special case for raw queries.
self.run_raw()
else:
self.run()
return self._hit_count
def get_results(self):
"""
Returns the results received from the backend.
If the query has not been run, this will execute the query and store
the results.
"""
if self._results is None:
if self._more_like_this:
# Special case for MLT.
self.run_mlt()
elif self._raw_query:
# Special case for raw queries.
self.run_raw()
else:
self.run()
return self._results
def get_facet_counts(self):
"""
Returns the facet counts received from the backend.
If the query has not been run, this will execute the query and store
the results.
"""
if self._facet_counts is None:
self.run()
return self._facet_counts
def get_spelling_suggestion(self, preferred_query=None):
"""
Returns the spelling suggestion received from the backend.
If the query has not been run, this will execute the query and store
the results.
"""
if self._spelling_suggestion is None:
self.run(spelling_query=preferred_query)
return self._spelling_suggestion
def boost_fragment(self, boost_word, boost_value):
"""Generates query fragment for boosting a single word/value pair."""
return "%s^%s" % (boost_word, boost_value)
def matching_all_fragment(self):
"""Generates the query that matches all documents."""
return '*'
def build_query(self):
"""
Interprets the collected query metadata and builds the final query to
be sent to the backend.
"""
query = self.query_filter.as_query_string(self.build_query_fragment)
if not query:
# Match all.
query = self.matching_all_fragment()
if len(self.models):
models = sorted(['%s:%s.%s' % (DJANGO_CT, model._meta.app_label, model._meta.module_name) for model in self.models])
models_clause = ' OR '.join(models)
if query != self.matching_all_fragment():
final_query = '(%s) AND (%s)' % (query, models_clause)
else:
final_query = models_clause
else:
final_query = query
if self.boost:
boost_list = []
for boost_word, boost_value in self.boost.items():
boost_list.append(self.boost_fragment(boost_word, boost_value))
final_query = "%s %s" % (final_query, " ".join(boost_list))
return final_query
def combine(self, rhs, connector=SQ.AND):
if connector == SQ.AND:
self.add_filter(rhs.query_filter)
elif connector == SQ.OR:
self.add_filter(rhs.query_filter, use_or=True)
# Methods for backends to implement.
def build_query_fragment(self, field, filter_type, value):
"""
Generates a query fragment from a field, filter type and a value.
Must be implemented in backends as this will be highly backend specific.
"""
raise NotImplementedError("Subclasses must provide a way to generate query fragments via the 'build_query_fragment' method.")
# Standard methods to alter the query.
def clean(self, query_fragment):
"""
Provides a mechanism for sanitizing user input before presenting the
value to the backend.
A basic (override-able) implementation is provided.
"""
words = query_fragment.split()
cleaned_words = []
for word in words:
if word in self.backend.RESERVED_WORDS:
word = word.replace(word, word.lower())
for char in self.backend.RESERVED_CHARACTERS:
word = word.replace(char, '\\%s' % char)
cleaned_words.append(word)
return ' '.join(cleaned_words)
def add_filter(self, query_filter, use_or=False):
"""
Adds a SQ to the current query.
"""
# TODO: consider supporting add_to_query callbacks on q objects
if use_or:
connector = SQ.OR
else:
connector = SQ.AND
if self.query_filter and query_filter.connector != SQ.AND and len(query_filter) > 1:
self.query_filter.start_subtree(connector)
subtree = True
else:
subtree = False
for child in query_filter.children:
if isinstance(child, tree.Node):
self.query_filter.start_subtree(connector)
self.add_filter(child)
self.query_filter.end_subtree()
else:
expression, value = child
self.query_filter.add((expression, value), connector)
connector = query_filter.connector
if query_filter.negated:
self.query_filter.negate()
if subtree:
self.query_filter.end_subtree()
def add_order_by(self, field):
"""Orders the search result by a field."""
self.order_by.append(field)
def clear_order_by(self):
"""
Clears out all ordering that has been already added, reverting the
query to relevancy.
"""
self.order_by = []
def add_model(self, model):
"""
Restricts the query requiring matches in the given model.
This builds upon previous additions, so you can limit to multiple models
by chaining this method several times.
"""
if not isinstance(model, ModelBase):
raise AttributeError('The model being added to the query must derive from Model.')
self.models.add(model)
def set_limits(self, low=None, high=None):
"""Restricts the query by altering either the start, end or both offsets."""
if low is not None:
self.start_offset = int(low)
if high is not None:
self.end_offset = int(high)
def clear_limits(self):
"""Clears any existing limits."""
self.start_offset, self.end_offset = 0, None
def add_boost(self, term, boost_value):
"""Adds a boosted term and the amount to boost it to the query."""
self.boost[term] = boost_value
def raw_search(self, query_string, **kwargs):
"""
Runs a raw query (no parsing) against the backend.
This method causes the SearchQuery to ignore the standard query
generating facilities, running only what was provided instead.
Note that any kwargs passed along will override anything provided
to the rest of the ``SearchQuerySet``.
"""
self._raw_query = query_string
self._raw_query_params = kwargs
def more_like_this(self, model_instance):
"""
Allows backends with support for "More Like This" to return results
similar to the provided instance.
"""
self._more_like_this = True
self._mlt_instance = model_instance
def add_highlight(self):
"""Adds highlighting to the search results."""
self.highlight = True
def add_field_facet(self, field):
"""Adds a regular facet on a field."""
from haystack import connections
self.facets.add(connections[self._using].get_unified_index().get_facet_fieldname(field))
def add_date_facet(self, field, start_date, end_date, gap_by, gap_amount=1):
"""Adds a date-based facet on a field."""
from haystack import connections
if not gap_by in VALID_GAPS:
raise FacetingError("The gap_by ('%s') must be one of the following: %s." % (gap_by, ', '.join(VALID_GAPS)))
details = {
'start_date': start_date,
'end_date': end_date,
'gap_by': gap_by,
'gap_amount': gap_amount,
}
self.date_facets[connections[self._using].get_unified_index().get_facet_fieldname(field)] = details
def add_query_facet(self, field, query):
"""Adds a query facet on a field."""
from haystack import connections
self.query_facets.append((connections[self._using].get_unified_index().get_facet_fieldname(field), query))
def add_narrow_query(self, query):
"""
Narrows a search to a subset of all documents per the query.
Generally used in conjunction with faceting.
"""
self.narrow_queries.add(query)
def set_result_class(self, klass):
"""
Sets the result class to use for results.
Overrides any previous usages. If ``None`` is provided, Haystack will
revert back to the default ``SearchResult`` object.
"""
if klass is None:
klass = SearchResult
self.result_class = klass
def post_process_facets(self, results):
# Handle renaming the facet fields. Undecorate and all that.
from haystack import connections
revised_facets = {}
field_data = connections[self._using].get_unified_index().all_searchfields()
for facet_type, field_details in results.get('facets', {}).items():
temp_facets = {}
for field, field_facets in field_details.items():
fieldname = field
if field in field_data and hasattr(field_data[field], 'get_facet_for_name'):
fieldname = field_data[field].get_facet_for_name()
temp_facets[fieldname] = field_facets
revised_facets[facet_type] = temp_facets
return revised_facets
def using(self, using=None):
"""
Allows for overriding which connection should be used. This
disables the use of routers when performing the query.
If ``None`` is provided, it has no effect on what backend is used.
"""
return self._clone(using=using)
def _reset(self):
"""
Resets the instance's internal state to appear as though no query has
been run before. Only need to tweak a few variables we check.
"""
self._results = None
self._hit_count = None
self._facet_counts = None
self._spelling_suggestion = None
def _clone(self, klass=None, using=None):
if using is None:
using = self._using
else:
from haystack import connections
klass = connections[using].query
if klass is None:
klass = self.__class__
clone = klass(using=using)
clone.query_filter = deepcopy(self.query_filter)
clone.order_by = self.order_by[:]
clone.models = self.models.copy()
clone.boost = self.boost.copy()
clone.highlight = self.highlight
clone.facets = self.facets.copy()
clone.date_facets = self.date_facets.copy()
clone.query_facets = self.query_facets[:]
clone.narrow_queries = self.narrow_queries.copy()
clone.start_offset = self.start_offset
clone.end_offset = self.end_offset
clone.result_class = self.result_class
clone._raw_query = self._raw_query
clone._raw_query_params = self._raw_query_params
return clone
class BaseEngine(object):
backend = BaseSearchBackend
query = BaseSearchQuery
unified_index = UnifiedIndex
def __init__(self, using=None):
if using is None:
using = DEFAULT_ALIAS
self.using = using
self.options = settings.HAYSTACK_CONNECTIONS.get(self.using, {})
self.queries = []
self._index = None
def get_backend(self):
return self.backend(self.using, **self.options)
def get_query(self):
return self.query(using=self.using)
def reset_queries(self):
self.queries = []
def get_unified_index(self):
if self._index is None:
self._index = self.unified_index()
return self._index
|
gregplaysguitar/django-haystack
|
haystack/backends/__init__.py
|
Python
|
bsd-3-clause
| 26,733
|
from .models import MongoConnectionModel
from .fields import MongoField
|
kgritesh/django-mongo-connect-field
|
mongo_connect_field/__init__.py
|
Python
|
mit
| 71
|
from sha3 import sha3_256
from ethereum.utils import big_endian_to_int
def sha3(seed):
return sha3_256(bytes(seed)).digest()
# colors
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def DEBUG(*args, **kargs):
print(FAIL + repr(args) + repr(kargs) + ENDC)
colors = ['\033[9%dm' % i for i in range(0, 7)]
colors += ['\033[4%dm' % i for i in range(1, 8)]
num_colors = len(colors)
def cstr(num, txt):
if isinstance(num, bytes):
num = big_endian_to_int(num)
return '%s%s%s' % (colors[num % len(colors)], txt, ENDC)
def cprint(num, txt):
print cstr(num, txt)
def phx(x):
return x.encode('hex')[:8]
if __name__ == '__main__':
for i in range(len(colors)):
cprint(i, 'test')
|
HydraChain/hydrachain
|
hydrachain/consensus/utils.py
|
Python
|
mit
| 750
|
class SuperClass(object):
def __init__(self):
pass
|
caot/intellij-community
|
python/testData/refactoring/pullup/properties/SuperClass.py
|
Python
|
apache-2.0
| 63
|
#!/usr/bin/env python3
import numpy as np
import pandas as pd
import json
import treetaggerwrapper
import nltk
import string
import re
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.ensemble import RandomForestClassifier
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import accuracy_score
from nltk import word_tokenize
from nltk.corpus import stopwords
nltk.download('stopwords')
# removes plurals
stemmer = nltk.stem.snowball.ItalianStemmer(ignore_stopwords=True)
def loadData(fileName):
with open(fileName) as trainFile:
commentsData = json.load(trainFile)
commentsFrame = pd.DataFrame.from_dict(commentsData['comments'], orient='columns')
# dataframe conversion into list type to feed it into CountVectorizer fit_transform
return commentsFrame[['body']].values.flatten().tolist()
def cleanData(corpus):
stopList = stopwords.words('italian')
# tagger = treetaggerwrapper.TreeTagger(TAGLANG='it')
# removes apostrophes and splits into words
filteredCorpus = [phrase.replace('l\'', 'la ').replace('un\'', 'una ').replace('m\'', 'mi ').replace('t\'', 'ti ').replace('c\'', 'ci ').replace('v\'', 'vi ').replace('s\'', 'si ').lower().split() for phrase in corpus]
# remove punctuation
filteredCorpus = [[re.sub("[^\w\d'\s]+", ' ', word) for word in phrase] for phrase in filteredCorpus]
# lemmatize words
# filteredCorpus = [tagger.make_tags(unicode(phrase,"utf-8")) for phrase in filteredCorpus]
# remove stopwords and join the words back into one string separated by space
filteredCorpus = [" ".join([word for word in phrase if word not in stopList and not word.isdigit()]) for phrase in filteredCorpus]
return filteredCorpus
def splitDataForTraining(data):
# splits data into trainData, crossValidationData and testData
# according to split percentages
split = [60, 20, 20]
trainStart = 0
trainEnd = int(len(data) * split[0] / 100)
crossValidationStart = int(len(data) * split[0] / 100)
crossValidationEnd = int(len(data) * (split[0] + split[1]) / 100)
testStart = int(len(data) * (split[0] + split[1]) / 100)
testEnd = int(len(data))
dataTrain = [data[i] for i in range(trainStart, trainEnd)]
dataCrossValidation = [data[i] for i in range(crossValidationStart, crossValidationEnd)]
dataTest = [data[i] for i in range(testStart, testEnd)]
return dataTrain, dataCrossValidation, dataTest
# adds language stemmer to count vectorizer by overriding build_analyser()
# class StemmedCountVectorizer(CountVectorizer):
#
# def build_analyzer(self):
# italianStemmer = nltk.stem.SnowballStemmer('italian')
# analyzer = super(StemmedCountVectorizer, self).build_analyzer()
# return lambda doc: ([italianStemmer.stem(w) for w in analyzer(doc)])
def stemWords(doc):
analyzer = CountVectorizer(lowercase=False).build_analyzer()
return (stemmer.stem(w) for w in analyzer(doc))
def tokenize(text):
# text = "".join([ch for ch in text if ch not in string.punctuation])
text = re.sub("(^|\W)\d+($|\W)", " ", text)
tokens = nltk.word_tokenize(text)
stems = stem_tokens(tokens, stemmer)
return stems
# reads the bad data
fileBad = 'not_approved.json'
fileGood = 'approved.json'
# loads comments
goodComments = loadData(fileGood)
badComments = loadData(fileBad)
print(sorted(goodComments[:20]))
# cleans comments
goodComments = cleanData(goodComments)
badComments = cleanData(badComments)
print(goodComments[:20])
# adds results for each comment, band and good
goodResults = [0 for x in goodComments]
badResults = [1 for x in badComments]
# shuffles the datasets
comments, results = shuffle(goodComments + badComments, goodResults + badResults, random_state=0)
(commentsTrain, commentsCrossValidation, commentsTest) = splitDataForTraining(comments)
(resultsTrain, resultsCrossValidation, resultsTest) = splitDataForTraining(results)
print("Train comments size: ", len(commentsTrain))
# http://www.nltk.org/_modules/nltk/stem/snowball.html
# Creating bag-of-words using CountVectorizer
vectorizer = CountVectorizer(analyzer=stemWords,
stop_words=stopwords.words('italian'),
tokenizer=tokenize,
strip_accents=None,
preprocessor=None,
max_features=None,
min_df=0,
ngram_range=(1, 2)
)
# fit_transform() does two functions: First, it fits the model
# and learns the vocabulary; second, it transforms our training data
# into feature vectors. The input to fit_transform should be a list of
# strings.
Xtrain = vectorizer.fit_transform(commentsTrain).toarray()
Ytrain = resultsTrain
Xtest = vectorizer.transform(commentsTest).toarray()
Ytest = resultsTest
print("Xtrain size: ", len(Xtrain))
print("Ytrain size: ", len(Ytrain))
print("Xtest size: ", len(Xtest))
print("Ytest size: ", len(Ytest))
# Take a look at the words in the vocabulary
vocabulary = vectorizer.get_feature_names()
print("Vocabulary features count: ", len(vocabulary))
print("Vocabulary features: ", sorted(vocabulary)[:30])
# For each, print the vocabulary word and the number of times it
# appears in the training set
# for tag, count in zip(vocabulary, dist):
# print count, tag
# exit
sampleLeafOptions = [1, 3, 9, 30, 90, 300, 900]
estimatorsOptions = [1, 3, 9, 30, 90, 300, 900]
depthOptions = [1, 3, 9, 30, 90, 300, 900]
singleOption = [1]
estimators = estimatorsOptions[3]
leafSize = sampleLeafOptions[0]
depth = depthOptions[6]
parameters = singleOption
# for leafSize in sampleLeafOptions :
for param in parameters:
# Lastly, classify the text using random forest tree classifier.
classifier = RandomForestClassifier(
n_jobs=4,
max_depth=depth,
min_samples_split=2,
min_samples_leaf=leafSize,
max_leaf_nodes=None,
n_estimators=estimators,
random_state=1
)
classifier.fit(Xtrain, Ytrain)
print("__________________")
print("leafSize: ", leafSize)
print("Feature importances: ", classifier.feature_importances_)
# print("Random Forest score: ", classifier.score(Xtest, Ytest))
# Train and Test Accuracy
print("Train Accuracy :: ", accuracy_score(Ytrain, classifier.predict(Xtrain)))
print("Test Accuracy :: ", accuracy_score(Ytest, classifier.predict(Xtest)))
# Find how to print/plot AUC and ROC
prediction = classifier.predict(
vectorizer.transform([
'sei semplicemente un idiota, stai zitto e vergognati',
'hai rotto le p@lle, perchè non vai a quel paese?',
'quel giocatore mi sembra davvero poco bravo, io lo terrei sempre in panchina',
'bravo lui, scarsi tutti gli altri. Non potevano fare meglio di così'
]).toarray()
)
print("Prediction for aggressive phrase [should be 1, 1, 0, 0]: ", prediction)
# Copy the results to a pandas dataframe with an "id" column and
# a "sentiment" column
# output = pd.DataFrame(data={"id": test["id"], "sentiment": prediction})
# Use pandas to write the comma-separated output file
# output.to_csv("Bag_of_Words_model.csv", index=False, quoting=3)
|
drinky78/minosse
|
commenti.py
|
Python
|
mit
| 7,376
|
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
import cocos
from cocos.director import director
import pyglet
class KeyDisplay(cocos.layer.Layer):
is_event_handler = True #: enable pyglet's events
def __init__(self):
super( KeyDisplay, self ).__init__()
self.text = cocos.text.Label("", x=100, y=280 )
# To keep track of which keys are pressed:
self.keys_pressed = set()
self.update_text()
self.add(self.text)
def update_text(self):
key_names = [pyglet.window.key.symbol_string (k) for k in self.keys_pressed]
text = 'Keys: '+','.join (key_names)
# Update self.text
self.text.element.text = text
def on_key_press (self, key, modifiers):
"""This function is called when a key is pressed.
'key' is a constant indicating which key was pressed.
'modifiers' is a bitwise or of several constants indicating which
modifiers are active at the time of the press (ctrl, shift, capslock, etc.)
"""
self.keys_pressed.add (key)
self.update_text()
def on_key_release (self, key, modifiers):
"""This function is called when a key is released.
'key' is a constant indicating which key was pressed.
'modifiers' is a bitwise or of several constants indicating which
modifiers are active at the time of the press (ctrl, shift, capslock, etc.)
Constants are the ones from pyglet.window.key
"""
self.keys_pressed.remove (key)
self.update_text()
class MouseDisplay(cocos.layer.Layer):
# If you want that your layer receives events
# you must set this variable to 'True',
# otherwise it won't receive any event.
is_event_handler = True
def __init__(self):
super( MouseDisplay, self ).__init__()
self.posx = 100
self.posy = 240
self.text = cocos.text.Label('No mouse events yet', font_size=18, x=self.posx, y=self.posy )
self.add( self.text )
def update_text (self, x, y):
text = 'Mouse @ %d,%d' % (x, y)
self.text.element.text = text
self.text.element.x = self.posx
self.text.element.y = self.posy
def on_mouse_motion (self, x, y, dx, dy):
"""Called when the mouse moves over the app window with no button pressed
(x, y) are the physical coordinates of the mouse
(dx, dy) is the distance vector covered by the mouse pointer since the
last call.
"""
self.update_text (x, y)
def on_mouse_drag (self, x, y, dx, dy, buttons, modifiers):
"""Called when the mouse moves over the app window with some button(s) pressed
(x, y) are the physical coordinates of the mouse
(dx, dy) is the distance vector covered by the mouse pointer since the
last call.
'buttons' is a bitwise or of pyglet.window.mouse constants LEFT, MIDDLE, RIGHT
'modifiers' is a bitwise or of pyglet.window.key modifier constants
(values like 'SHIFT', 'OPTION', 'ALT')
"""
self.update_text (x, y)
def on_mouse_press (self, x, y, buttons, modifiers):
"""This function is called when any mouse button is pressed
(x, y) are the physical coordinates of the mouse
'buttons' is a bitwise or of pyglet.window.mouse constants LEFT, MIDDLE, RIGHT
'modifiers' is a bitwise or of pyglet.window.key modifier constants
(values like 'SHIFT', 'OPTION', 'ALT')
"""
self.posx, self.posy = director.get_virtual_coordinates (x, y)
self.update_text (x,y)
if __name__ == "__main__":
director.init(resizable=True)
# Run a scene with our event displayers:
director.run( cocos.scene.Scene( KeyDisplay(), MouseDisplay() ) )
|
shadowmint/nwidget
|
lib/cocos2d-0.5.5/samples/handling_events.py
|
Python
|
apache-2.0
| 4,085
|
# --------------------------------------------------------
# Fully Convolutional Instance-aware Semantic Segmentation
# Copyright (c) 2017 Microsoft
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Written by Yi Li
# --------------------------------------------------------
from skimage.draw import polygon
import numpy as np
import cv2
def encodeMask(M):
"""
Encode binary mask M using run-length encoding.
:param M (bool 2D array) : binary mask to encode
:return: R (object RLE) : run-length encoding of binary mask
"""
[h, w] = M.shape
M = M.flatten(order='F')
N = len(M)
counts_list = []
pos = 0
# counts
counts_list.append(1)
diffs = np.logical_xor(M[0:N - 1], M[1:N])
for diff in diffs:
if diff:
pos += 1
counts_list.append(1)
else:
counts_list[pos] += 1
# if array starts from 1. start with 0 counts for 0
if M[0] == 1:
counts_list = [0] + counts_list
return {'size': [h, w],
'counts': counts_list,
}
def mask_voc2coco(voc_masks, voc_boxes, im_height, im_width, binary_thresh = 0.4):
from dataset.pycocotools.mask import encode as encodeMask_c
num_pred = len(voc_masks)
assert(num_pred==voc_boxes.shape[0])
mask_img = np.zeros((im_height, im_width, num_pred), dtype=np.uint8, order='F')
for i in xrange(num_pred):
pred_box = np.round(voc_boxes[i, :4]).astype(int)
pred_mask = voc_masks[i]
pred_mask = cv2.resize(pred_mask.astype(np.float32), (pred_box[2] - pred_box[0] + 1, pred_box[3] - pred_box[1] + 1))
mask_img[pred_box[1]:pred_box[3]+1, pred_box[0]:pred_box[2]+1, i] = pred_mask >= binary_thresh
coco_mask = encodeMask_c(mask_img)
return coco_mask
|
vincentlooi/FCIS
|
lib/utils/mask_voc2coco.py
|
Python
|
apache-2.0
| 1,803
|
"""
API's subpackage:
Tools for assist Django
"""
def open_Django_Proj(path_to_proj):
"""
To run methods related with django objects, we
need to make our python recognize the Django Project
"""
import os, sys
# This is so Django knows where to find stuff.
os.environ.setdefault(
"DJANGO_SETTINGS_MODULE",
"{}.settings".format(os.path.basename(path_to_proj))
)
sys.path.append(path_to_proj)
# This is so my local_settings.py gets loaded.
os.chdir(path_to_proj)
# This is so models get loaded.
from django.core.wsgi import get_wsgi_application
return get_wsgi_application()
def list_djg_apps(path_to_django_proj):
"""
List Django App's avaiable in a Django Project
"""
import os
from gasp.oss.info import list_folders_subfiles
# Get project name
projectName = os.path.basename(path_to_django_proj)
# List folders and files in the folders
projFolders = list_folders_subfiles(
path_to_django_proj, files_format='.py',
only_filename=True
)
apps = []
# Check if the folder is a app
for folder in projFolders:
if os.path.basename(folder) == projectName:
continue
if '__init__.py' in projFolders[folder] or \
'apps.py' in projFolders[folder]:
apps.append(os.path.basename(folder))
return apps
|
JoaquimPatriarca/senpy-for-gis
|
gasp/djg/__init__.py
|
Python
|
gpl-3.0
| 1,455
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2009-2011 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from qmf.console import Session
from sys import exit, argv, stdin
import time
cmds = ['SetJobAttribute', 'HoldJob', 'ReleaseJob', 'RemoveJob']
cmdarg = len(argv) > 1 and argv[1]
jobid = len(argv) > 2 and argv[2]
url = len(argv) > 3 and argv[3] or "amqp://localhost:5672"
if cmdarg not in cmds:
print "error unknown command: ", cmdarg
print "available commands are: ",cmds
exit(1)
try:
session = Session();
broker = session.addBroker(url)
schedulers = session.getObjects(_class="scheduler", _package="com.redhat.grid")
except Exception, e:
print "unable to access broker or scheduler"
exit(1)
result = schedulers[0]._invoke(cmdarg,[jobid,"test"],[None])
if result.status:
print "error invoking: ", cmdarg
print result.text
session.delBroker(broker)
exit(1)
session.delBroker(broker)
|
djw8605/htcondor
|
src/condor_contrib/mgmt/qmf/test/jobcontrol.py
|
Python
|
apache-2.0
| 1,434
|
pybabel extract -F babel.cfg -o messages.pot .
pybabel init -i messages.pot -d translations -l zh_Hans_CN
#pybabel update -i messages.pot -d translations
pybabel compile -d translations
|
raysinensis/tcgaAPP
|
babelscript.py
|
Python
|
mit
| 186
|
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
from utils import conv2d_flipkernel, adjecent_matrix, adjecent_sparse
def dot(x, y, sparse=False):
if sparse:
return tf.sparse_tensor_dense_matmul(x, y)
else:
return tf.matmul(x, y)
def kernel_net(coord, weight, config):
x = tf.reshape(coord, [-1, 8])
x = slim.fully_connected(x, 32, activation_fn=tf.nn.relu, weights_initializer=tf.truncated_normal_initializer(stddev=0.01), scope="fc_1")
x = slim.fully_connected(x, 64, activation_fn=tf.nn.relu, weights_initializer=tf.truncated_normal_initializer(stddev=0.01), scope="fc_2")
x = slim.fully_connected(x, 1, activation_fn=None, weights_initializer=tf.truncated_normal_initializer(stddev=0.01), scope="fc_3")
return tf.reshape(x, [-1, config.nodesize, config.nodesize])
def ir_Block(X, Adj, Dist, Support, Coord, S, config):
ka = config.ka
k = config.k # Number of value iterations performed
t = config.t
ch_i = config.ch_i # Channels in input layer
ch_h = config.ch_h # Channels in initial hidden layer
ch_q = config.ch_q # Channels in q layer (~actions)
state_batch_size = config.statebatchsize # k+1 state inputs for each channel
img_s = config.nodesize
diag = np.zeros([img_s, img_s])
np.fill_diagonal(diag, 1.0)
DO_SHARE=None
degree = []
P = []
for i in range(ch_q):
with tf.variable_scope('p_'+str(i)):
coeff = kernel_net(Coord, Dist, config)
coeff = coeff * Support
P.append(coeff)
P_fb = []
for i in range(ch_q):
with tf.variable_scope('pb_'+str(i)):
coeff = kernel_net(Coord, Dist, config)
coeff = coeff * Support
P_fb.append(coeff)
P = tf.transpose(tf.stack(P), [1,0,2,3])
P_fb = tf.transpose(tf.stack(P_fb), [1,0,2,3])
r_ = X
r_repeat = []
for j in range(ch_q):
r_repeat.append(r_)
r_repeat = tf.stack(r_repeat)
r_repeat = tf.transpose(r_repeat, [1,0,2])
r_repeat = tf.expand_dims(r_repeat, axis=-1)
q = tf.matmul(P, r_repeat)
v = tf.reduce_max(q, axis=1, keep_dims=True, name="v")
v_ = tf.reshape(v, [-1, img_s])
v_repeat = []
for i in range(ch_q):
v_repeat.append(v_)
v_repeat = tf.stack(v_repeat)
v_repeat = tf.transpose(v_repeat, [1,0,2])
v_repeat = tf.expand_dims(v_repeat, axis=-1)
for i in range(0, k-1):
q1 = tf.matmul(P, r_repeat)
q2 = tf.matmul(P_fb, v_repeat)
q = q1 + q2
v = tf.reduce_max(q, axis=1, keep_dims=True, name="v")
v_ = tf.reshape(v, [-1, img_s])
v_repeat = []
for j in range(ch_q):
v_repeat.append(v_)
v_repeat = tf.stack(v_repeat)
v_repeat = tf.transpose(v_repeat, [1,0,2])
v_repeat = tf.expand_dims(v_repeat, axis=-1)
q1 = tf.matmul(P, r_repeat)
q2 = tf.matmul(P_fb, v_repeat)
q = q1 + q2
bs = tf.shape(q)[0]
rprn = tf.reshape(tf.tile(tf.reshape(tf.range(bs), [-1, 1]), [1, state_batch_size]), [-1])
ins = tf.cast(tf.reshape(S, [-1]), tf.int32)
idx_in = tf.transpose(tf.stack([ins, rprn]), [1, 0])
v_idx = tf.gather_nd(tf.transpose(Adj, [2,0,1]), idx_in, name="v_out")
v_out_rp = []
for j in range(state_batch_size):
v_out_rp.append(v_)
v_out_rp = tf.stack(v_out_rp)
v_out_rp = tf.transpose(v_out_rp, [1,0,2])
v_out_rp = tf.reshape(v_out_rp, [-1, img_s])
logits = tf.multiply(v_idx, v_out_rp)
# add logits
# output = 2.0*np.pi*tf.nn.sigmoid(tf.matmul(q_out_l1, w_o))
# softmax output weights
# logits = tf.matmul(q_out, w_o)
output = tf.nn.softmax(logits, name="output")
return v_, logits, output
|
sufengniu/GVIN
|
irregular/IL/model.py
|
Python
|
mit
| 3,747
|
import time
import simplejson as json
import threading
import socket
from .request import Request, extended_decoder_hook
from .rpc_wrapper import RPCWrapper
from mfp.utils import QuittableThread
from mfp import log
from .worker_pool import WorkerPool
def blather(func):
from datetime import datetime
def inner(self, *args, **kwargs):
if self.node_id in (1, None):
print("%s DEBUG %s -- enter" % (datetime.now(), func.__name__))
rv = func(self, *args, **kwargs)
if self.node_id in (1, None):
print("%s DEBUG %s -- leave (%s)" % (datetime.now(), func.__name__, rv))
return rv
return inner
class RPCHost (QuittableThread):
'''
RPCHost -- create and manage connections and proxy objects. Both client and
server need an RPCHost, one per process.
'''
SYNC_MAGIC = b"[ SYNC ]"
class SyncError (Exception):
pass
class RecvError (Exception):
pass
class RPCError (Exception):
pass
class RequestError (Exception):
pass
def __init__(self, status_cb=None):
QuittableThread.__init__(self)
# FIXME -- one lock/condition per RPCHost means lots of
# unneeded waking up if lots of requests are queued
self.lock = threading.Lock()
self.condition = threading.Condition(self.lock)
self.pending = {}
self.node_id = None
self.fdsockets = {}
self.status_cb = status_cb
self.served_classes = {}
self.managed_sockets = {}
self.peers_by_socket = {}
self.read_workers = WorkerPool(self.dispatch_rpcdata, 15)
def __repr__(self):
return "<RPCHost node=%s>" % self.node_id
def manage(self, peer_id, sock):
if peer_id not in self.managed_sockets:
self.managed_sockets[peer_id] = sock
self.peers_by_socket[sock] = peer_id
self.notify_peer(peer_id)
if self.status_cb:
cbthread = QuittableThread(target=self.status_cb, args=(peer_id, "manage"))
cbthread.start()
def unmanage(self, peer_id):
if peer_id in self.managed_sockets:
# remove this peer as a publisher for any classes
for clsname, cls in RPCWrapper.rpctype.items():
if peer_id in cls.publishers:
cls.publishers.remove(peer_id)
oldsock = self.managed_sockets[peer_id]
del self.managed_sockets[peer_id]
del self.peers_by_socket[oldsock]
if oldsock.fileno() in self.fdsockets:
del self.fdsockets[oldsock.fileno()]
if self.status_cb:
cbthread = QuittableThread(target=self.status_cb, args=(peer_id, "unmanage"))
cbthread.start()
def notify_peer(self, peer_id):
req = Request("publish", dict(classes=list(self.served_classes.keys())))
self.put(req, peer_id)
self.wait(req)
def notify_all(self):
for peer_id in self.managed_sockets:
self.notify_peer(peer_id)
def publish(self, cls):
'''
RPCHost.publish: Register a class as constructable on this end
'''
self.served_classes[cls.__name__] = cls
cls.local = True
self.notify_all()
def unpublish(self, cls):
cls.local = False
cls.rpchost = None
del self.served_classes[cls.__name__]
self.notify_all()
def subscribe(self, cls):
'''
RPCHost.subscribe: Wait for a class to become available
'''
while not cls.publishers:
time.sleep(0.1)
def put(self, req, peer_id):
from datetime import datetime
# find the right socket
sock = self.managed_sockets.get(peer_id)
if sock is None:
print("[%s] RPCHost.put: peer_id %s has no mapped socket" % (datetime.now(),
peer_id))
print(req.serialize())
raise Exception()
# is this a request? if so, put it in the pending dict
if req.method is not None:
if req.request_id is not None:
self.pending[req.request_id] = req
req.state = Request.SUBMITTED
# write the data to the socket
try:
jdata = req.serialize()
with self.lock:
sock.send(self.SYNC_MAGIC)
sock.send(b"% 8d" % len(jdata))
sock.send(jdata.encode())
except Exception as e:
print("[%s] RPCHost.put: SEND error: %s" % (datetime.now(), e))
print(req.serialize())
raise Exception()
def wait(self, req, timeout=None):
import datetime
endtime = None
if timeout is not None:
endtime = datetime.datetime.now() + datetime.timedelta(seconds=timeout)
with self.lock:
while req.state not in (Request.RESPONSE_RCVD, Request.RPC_ERROR):
self.condition.wait(0.1)
if self.join_req:
return False
elif timeout is not None and datetime.datetime.now() > endtime:
log.warning("rpc_host: Request timed out after %s sec -- %s" %
(timeout, req))
raise Exception()
if req.state == Request.RPC_ERROR:
raise RPCHost.RPCError()
def dispatch_rpcdata(self, rpc_worker, rpcdata):
try:
json_data, peer_id = rpcdata
obj = json.loads(json_data, object_hook=extended_decoder_hook)
except Exception as e:
log.error("Can't parse JSON for peer:", peer_id, json_data)
log.debug_traceback()
return True
req = Request.from_dict(obj)
# is someone waiting on this response?
rpc_worker.data = "Request %s started" % req.request_id
if req.is_response() and req.request_id in self.pending:
rpc_worker.data = "Request %s processing response" % req.request_id
oldreq = self.pending.get(req.request_id)
del self.pending[req.request_id]
oldreq.result = req.result
oldreq.state = req.state
oldreq.diagnostic = req.diagnostic
with self.lock:
self.condition.notify()
elif req.is_request():
rpc_worker.data = "Request %s calling local %s (%s)" % (req.request_id, req.method, req.params)
# actually call the local handler
self.handle_request(req, peer_id)
# and send back the response
rpc_worker.data = "Request %s sending response" % req.request_id
if req.request_id is not None:
self.put(req, peer_id)
rpc_worker.data = "Request %s done (%s)" % (req.request_id, req.method)
return True
def run(self):
'''
RPCHost.run: perform IO on managed sockets, dispatch data
'''
self.read_workers.start()
if RPCWrapper.rpchost is None:
RPCWrapper.rpchost = self
import select
self.fdsockets = {}
while not self.join_req:
rdy = None
for s in self.managed_sockets.values():
if s.fileno() not in self.fdsockets:
self.fdsockets[s.fileno()] = s
try:
sockets = list(self.fdsockets.keys())
if sockets:
rdy, _w, _x = select.select(list(self.fdsockets.keys()), [], [], 0.1)
else:
time.sleep(0.1)
except Exception as e:
print("select exception:", e)
if not rdy:
continue
syncbytes = 8
sync = b''
for rsock in rdy:
jdata = b''
retry = 1
while retry:
sock = self.fdsockets.get(rsock)
if sock is None:
retry = 0
jdata = None
continue
try:
sync = sync[syncbytes:]
syncbit = sock.recv(syncbytes)
if not syncbit:
raise self.RecvError()
sync += syncbit
if sync != RPCHost.SYNC_MAGIC:
syncbytes = 1
retry = 1
raise self.SyncError()
else:
syncbytes = 8
retry = 0
jlen = sock.recv(8)
jlen = int(jlen)
recvlen = 0
while recvlen < jlen:
jdata += sock.recv(jlen-recvlen)
recvlen = len(jdata)
if recvlen < jlen:
log.warning("RPCHost: got short packet (%d of %d)"
% (recvlen, jlen))
except RPCHost.SyncError as e:
log.warning("RPCHost: sync error, resyncing")
pass
except (socket.error, RPCHost.RecvError) as e:
log.warning("RPCHost: communication error")
retry = 0
jdata = None
deadpeer = self.peers_by_socket[sock]
self.unmanage(deadpeer)
except Exception as e:
log.error("RPCHost: unhandled exception", type(e), e)
log.debug(jdata)
log.debug_traceback()
retry = 0
jdata = b""
if jdata is not None and len(jdata) >= jlen:
peer_id = self.peers_by_socket.get(sock)
self.read_workers.submit((jdata, peer_id))
if self.node_id == 0:
peers = list(self.managed_sockets.keys())
for node in peers:
req = Request("exit_request", {})
self.put(req, node)
self.wait(req)
del self.managed_sockets[node]
elif 0 in self.managed_sockets:
req = Request("exit_notify", {})
self.put(req, 0)
self.wait(req)
for clsname, cls in list(self.served_classes.items()):
self.unpublish(cls)
for clsname, cls in RPCWrapper.rpctype.items():
cls.publishers = []
if RPCWrapper.rpchost == self:
RPCWrapper.rpchost = None
self.read_workers.finish()
def handle_request(self, req, peer_id):
from datetime import datetime
method = req.method
rpcdata = req.params
rpcid = rpcdata.get('rpcid')
args = rpcdata.get('args') or []
kwargs = rpcdata.get('kwargs') or {}
req.state = Request.RESPONSE_DONE
req.diagnostic['local_call_started'] = str(datetime.now())
if method == 'create':
factory = RPCWrapper.rpctype.get(rpcdata.get('type'))
if factory:
obj = factory(*args, **kwargs)
req.result = (True, obj.rpcid)
else:
req.result = (RPCWrapper.NO_CLASS, None)
elif method == 'delete':
del RPCWrapper.objects[rpcid]
req.result = (True, None)
elif method == 'call':
obj = RPCWrapper.rpcobj.get(rpcid)
try:
retval = obj.call_locally(rpcdata)
req.result = (RPCWrapper.METHOD_OK, retval)
except RPCWrapper.MethodNotFound as e:
req.result = (RPCWrapper.NO_METHOD, None)
except RPCWrapper.MethodFailed as e:
req.result = (RPCWrapper.METHOD_FAILED, e.traceback)
except Exception as e:
import traceback
einfo = ("Method call failed rpcid=%s node=%s\nobj=%s data=%s\n" %
(rpcid, peer_id, obj, rpcdata))
req.result = (RPCWrapper.METHOD_FAILED, einfo + traceback.format_exc())
elif method == 'publish':
for clsname in req.params.get("classes"):
cls = RPCWrapper.rpctype.get(clsname)
if cls is not None:
cls.publishers.append(peer_id)
if self.status_cb:
cbthread = QuittableThread(target=self.status_cb,
args=(peer_id, "publish",
req.params.get("classes"),
req.params.get("pubdata")))
cbthread.start()
req.result = (True, None)
elif method == "ready":
req.result = (True, peer_id)
elif method == "exit_request":
if not self.join_req:
self.finish()
req.request_id = None
elif method == "exit_notify":
self.unmanage(peer_id)
# FIXME: exit_notify should cause patches to be closed
req.request_id = None
elif method == "node_status":
pass
else:
print("rpc_wrapper: WARNING: no handler for method '%s'" % method)
print("call data:", rpcid, method, rpcdata)
req.method = None
req.params = None
req.diagnostic['local_call_complete'] = str(datetime.now())
|
bgribble/mfp
|
mfp/rpc/rpc_host.py
|
Python
|
gpl-2.0
| 13,690
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("orgs", "0014_auto_20150722_1419")]
operations = [
migrations.CreateModel(
name="Boundary",
fields=[
("id", models.AutoField(verbose_name="ID", serialize=False, auto_created=True, primary_key=True)),
(
"osm_id",
models.CharField(help_text="This is the OSM id for this administrative boundary", max_length=15),
),
("name", models.CharField(help_text="The name of our administrative boundary", max_length=128)),
(
"level",
models.IntegerField(
help_text="The level of the boundary, 0 for country, 1 for state, 2 for district"
),
),
(
"geometry",
models.TextField(
help_text="The json representing the geometry type and coordinates of the boundaries",
verbose_name="Geometry",
),
),
(
"org",
models.ForeignKey(
related_name="boundaries", on_delete=models.PROTECT, verbose_name="Organization", to="orgs.Org"
),
),
(
"parent",
models.ForeignKey(
related_name="children",
on_delete=models.PROTECT,
to="locations.Boundary",
help_text="The parent to this political boundary if any",
null=True,
),
),
],
),
migrations.AlterUniqueTogether(name="boundary", unique_together=set([("org", "osm_id")])),
]
|
rapidpro/ureport
|
ureport/locations/migrations/0001_initial.py
|
Python
|
agpl-3.0
| 2,060
|
# -*- coding: utf-8 -*-
from django.utils.translation import gettext as _
import plotly.offline as plotly
import plotly.graph_objs as go
from reports import utils
def diaperchange_lifetimes(changes):
"""
Create a graph showing how long diapers last (time between changes).
:param changes: a QuerySet of Diaper Change instances.
:returns: a tuple of the the graph's html and javascript.
"""
changes = changes.order_by('time')
durations = []
last_change = changes.first()
for change in changes[1:]:
duration = change.time - last_change.time
if duration.seconds > 0:
durations.append(duration)
last_change = change
trace = go.Box(
y=[round(d.seconds/3600, 2) for d in durations],
name=_('Changes'),
jitter=0.3,
pointpos=-1.8,
boxpoints='all'
)
layout_args = utils.default_graph_layout_options()
layout_args['height'] = 800
layout_args['title'] = _('<b>Diaper Lifetimes</b>')
layout_args['yaxis']['title'] = _('Time between changes (hours)')
layout_args['yaxis']['zeroline'] = False
layout_args['yaxis']['dtick'] = 1
fig = go.Figure({
'data': [trace],
'layout': go.Layout(**layout_args)
})
output = plotly.plot(fig, output_type='div', include_plotlyjs=False)
return utils.split_graph_output(output)
|
cdubz/babybuddy
|
reports/graphs/diaperchange_lifetimes.py
|
Python
|
bsd-2-clause
| 1,378
|
from datetime import date, timedelta
from django.contrib import admin
from django.test import TestCase
from .admin import CorporateMemberAdmin
from .models import CorporateMember
class CorporateMemberAdminTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.member = CorporateMember.objects.create(
display_name='Corporation',
billing_name='foo',
billing_email='c@example.com',
contact_email='c@example.com',
membership_level=2,
)
def test_membership_expires(self):
today = date.today()
yesterday = date.today() - timedelta(days=1)
plus_thirty_one_days = today + timedelta(days=31)
modeladmin = CorporateMemberAdmin(CorporateMember, admin.site)
self.assertIsNone(modeladmin.membership_expires(self.member))
self.member.invoice_set.create(amount=500)
self.assertIsNone(modeladmin.membership_expires(self.member))
self.member.invoice_set.create(amount=500, expiration_date=yesterday)
self.assertIn('red', modeladmin.membership_expires(self.member))
self.member.invoice_set.create(amount=500, expiration_date=today)
self.assertIn('orange', modeladmin.membership_expires(self.member))
self.member.invoice_set.create(amount=500, expiration_date=plus_thirty_one_days)
self.assertIn('green', modeladmin.membership_expires(self.member))
|
nanuxbe/django
|
members/test_admin.py
|
Python
|
bsd-3-clause
| 1,432
|
import sys
import pymongo
import requests
import itertools
import genemania
from itertools import islice
from app.util import set_status, create_edges_index, cleanup_edges
from app.status import Status
import app
log = app.get_logger('humannet')
def parse(columns, metadata, lines):
status = Status('networks', logger=log).n(len(lines)).start()
for idx, line in enumerate(lines):
status.log(idx)
tokens = line.split('\t')
if not len(tokens) == len(columns) + 3:
continue
source = tokens[0]
target = tokens[1]
# humannet composite score
#score = float(tokens[-1])
for column, token in itertools.izip(columns, tokens[2:-1]):
try:
# individual edge score
score = float(token)
metadata[column]['count'] += 1
yield {
'source': source,
'target': target,
'score': score,
'meta': metadata[column]['_id']
}
except ValueError:
pass
status.stop()
def main():
client = pymongo.MongoClient()
db = client.networks
# collection stores metadata about source networks
meta = db.meta
# collection stores edge data
edges = db.edges
# create index, if necessary
create_edges_index()
# get list of previously loaded networks to delete, if any
_ids = [result['_id'] for result in meta.find({'collection': 'humannet'})]
# From http://www.functionalnet.org/humannet/HumanNet.v1.evidence_code.txt:
# File format: [gene1] [gene2] [CE-CC] [CE-CX] [CE-GT] [CE-LC] [CE-YH] [DM-PI] [HS-CC] [HS-CX] [HS-DC] [HS-GN] [HS-LC] [HS-MS] [HS-PG] [HS-YH] [SC-CC] [SC-CX] [SC-GT] [SC-LC] [SC-MS] [SC-TS] [SC-YH] [IntNet]
# CE-CC = Co-citation of worm gene
# CE-CX = Co-expression among worm genes
# CE-GT = Worm genetic interactions
# CE-LC = Literature curated worm protein physical interactions
# CE-YH = High-throughput yeast 2-hybrid assays among worm genes
# DM-PI = Fly protein physical interactions
# HS-CC = Co-citation of human genes
# HS-CX = Co-expression among human genes
# HS-DC = Co-occurrence of domains among human proteins
# HS-GN = Gene neighbourhoods of bacterial and archaeal orthologs of human genes
# HS-LC = Literature curated human protein physical interactions
# HS-MS = human protein complexes from affinity purification/mass spectrometry
# HS-PG = Co-inheritance of bacterial and archaeal orthologs of human genes
# HS-YH = High-throughput yeast 2-hybrid assays among human genes
# SC-CC = Co-citation of yeast genes
# SC-CX = Co-expression among yeast genes
# SC-GT = Yeast genetic interactions
# SC-LC = Literature curated yeast protein physical interactions
# SC-MS = Yeast protein complexes from affinity purification/mass spectrometry
# SC-TS = Yeast protein interactions inferred from tertiary structures of complexes
# SC-YH = High-throughput yeast 2-hybrid assays among yeast genes
# IntNet = Integrated network (HumanNet)
columns = [
'co-citation of worm gene',
'co-expression among worm genes',
'worm genetic interactions',
'literature curated worm protein physical interactions',
'high-throughput yeast 2-hybrid assays among worm genes',
'fly protein physical interactions',
'co-citation of human genes',
'co-expression among human genes',
'co-occurrence of domains among human proteins',
'gene neighbourhoods of bacterial and archaeal orthologs of human genes',
'literature curated human protein physical interactions',
'human protein complexes from affinity purification/mass spectrometry',
'co-inheritance of bacterial and archaeal orthologs of human genes',
'high-throughput yeast 2-hybrid assays among human genes',
'co-citation of yeast genes',
'co-expression among yeast genes',
'yeast genetic interactions',
'literature curated yeast protein physical interactions',
'yeast protein complexes from affinity purification/mass spectrometry',
'yeast protein interactions inferred from tertiary structures of complexes',
'high-throughput yeast 2-hybrid assays among yeast genes'
]
metadata = {}
for column in columns:
m = {
'collection': 'humannet',
'name': column,
'count': 0
}
set_status(m, 'parsing')
m['_id'] = meta.insert_one(m).inserted_id
metadata[column] = m
url = 'http://www.functionalnet.org/humannet/HumanNet.v1.join.txt'
log.info('reading network list from %s', url)
r = requests.get(url)
lines = list(r.iter_lines())
count = 0
iterator = parse(columns, metadata, lines)
while True:
records = [record for record in islice(iterator, 1000)]
if len(records) > 0:
name_to_id = genemania.id_lookup_table(set(it['source'] for it in records) | set(it['target'] for it in records))
for record in records:
source = name_to_id[record['source']]
if source is None:
log.warning('unknown source %s', record['source'])
record['source'] = source
target = name_to_id[record['target']]
if target is None:
log.warning('unknown target %s', record['target'])
record['target'] = target
records = [record for record in records if record['source'] is not None and record['target'] is not None]
count += len(records)
edges.insert_many(records)
log.debug('inserted %d edges (%d total)', len(records), count)
else:
break
for m in metadata.itervalues():
set_status(m, 'success')
meta.replace_one({'_id': m['_id']}, m)
if len(_ids) > 0:
log.info('dropping old network metadata')
meta.delete_many({'_id': {'$in': _ids}})
cleanup_edges()
return 0
if __name__ == '__main__':
sys.exit(main())
|
ucsd-ccbb/Oncolist
|
src/restLayer/app/humannet.py
|
Python
|
mit
| 6,211
|
import types
import croi
class AttrTest(object):
class_field0 = 'class_field'
class_field1 = 0
@classmethod
def class_method0(cls):
pass
@classmethod
def class_method1(cls):
pass
@property
def property0(self):
return 'property0'
@property
def property1(self):
return 'property1'
def __init__(self):
self.instance_field0 = 'instance_field'
self.instance_field1 = 0
def instance_method0(self):
pass
def instance_method1(self):
pass
def test_public_attrs_field_methods():
test = AttrTest()
instance_fields = ['instance_field0', 'instance_field1']
instance_properties = ['property0', 'property1']
instance_members = ['instance_field0', 'instance_field1', 'property0', 'property1']
instance_methods = ['instance_method0', 'instance_method1']
instance_attrs = [
'instance_field0', 'instance_field1',
'instance_method0', 'instance_method1',
'property0', 'property1',
]
assert croi.instance_fields(test) == instance_fields
assert croi.instance_properties(test) == instance_properties
assert croi.instance_members(test) == instance_members
assert croi.instance_methods(test) == instance_methods
assert croi.instance_attrs(test) == instance_attrs
class_fields = ['class_field0', 'class_field1']
class_methods = ['class_method0', 'class_method1']
assert croi.class_fields(test) == class_fields
assert croi.class_methods(test) == class_methods
assert croi.class_attrs(test) == class_fields + class_methods
|
dacjames/croi
|
croi/test/test_reflection.py
|
Python
|
mit
| 1,617
|
"""an example a python console"""
import pygame
from pygame.locals import *
# the following line is not needed if pgu is installed
import sys; sys.path.insert(0, "..")
import traceback
from pgu import gui
from pgu import html
class StringStream:
def __init__(self):
self._data = ''
def write(self,data):
self._data = self._data+data
_lines = self._data.split("\n")
for line in _lines[:-1]:
lines.tr()
lines.td(gui.Label(str(line)),align=-1)
self._data = _lines[-1:][0]
_locals = {}
def lkey(_event):
e = _event
if e.key == K_RETURN:
_stdout = sys.stdout
s = sys.stdout = StringStream()
val = line.value
line.value = ''
line.focus()
print('>>> '+val)
try:
code = compile(val,'<string>','single')
eval(code,globals(),_locals)
except:
e_type,e_value,e_traceback = sys.exc_info()
print('Traceback (most recent call last):')
traceback.print_tb(e_traceback,None,s)
print(e_type,e_value)
sys.stdout = _stdout
app = gui.Desktop()
t = gui.Table(width=500,height=400)
t.tr()
lines = gui.Table()
box = gui.ScrollArea(lines,500,380)
t.td(box)
t.tr()
line = gui.Input(size=49)
line.connect(gui.KEYDOWN,lkey)
t.td(line)
t.tr()
class Hack(gui.Spacer):
def resize(self,width=None,height=None):
box.set_vertical_scroll(65535)
return 1,1
t.td(Hack(1,1))
app.connect(gui.QUIT,app.quit,None)
app.run(t)
|
danstoner/python_experiments
|
pgu/examples/gui17.py
|
Python
|
gpl-2.0
| 1,557
|
__all__ = [
'AbstractBasicAuthHandler',
'AbstractDigestAuthHandler',
'BaseHandler',
'Browser',
'BrowserStateError',
'CacheFTPHandler',
'ContentTooShortError',
'Cookie',
'CookieJar',
'CookiePolicy',
'DefaultCookiePolicy',
'DefaultFactory',
'FTPHandler',
'Factory',
'FileCookieJar',
'FileHandler',
'FormNotFoundError',
'FormsFactory',
'HTTPBasicAuthHandler',
'HTTPCookieProcessor',
'HTTPDefaultErrorHandler',
'HTTPDigestAuthHandler',
'HTTPEquivProcessor',
'HTTPError',
'HTTPErrorProcessor',
'HTTPHandler',
'HTTPPasswordMgr',
'HTTPPasswordMgrWithDefaultRealm',
'HTTPProxyPasswordMgr',
'HTTPRedirectDebugProcessor',
'HTTPRedirectHandler',
'HTTPRefererProcessor',
'HTTPRefreshProcessor',
'HTTPResponseDebugProcessor',
'HTTPRobotRulesProcessor',
'HTTPSClientCertMgr',
'HeadParser',
'History',
'LWPCookieJar',
'Link',
'LinkNotFoundError',
'LinksFactory',
'LoadError',
'MSIECookieJar',
'MozillaCookieJar',
'OpenerDirector',
'OpenerFactory',
'ParseError',
'ProxyBasicAuthHandler',
'ProxyDigestAuthHandler',
'ProxyHandler',
'Request',
'RobotExclusionError',
'RobustFactory',
'RobustFormsFactory',
'RobustLinksFactory',
'RobustTitleFactory',
'SeekableResponseOpener',
'TitleFactory',
'URLError',
'USE_BARE_EXCEPT',
'UnknownHandler',
'UserAgent',
'UserAgentBase',
'XHTMLCompatibleHeadParser',
'__version__',
'build_opener',
'install_opener',
'lwp_cookie_str',
'make_response',
'request_host',
'response_seek_wrapper', # XXX deprecate in public interface?
'seek_wrapped_response', # XXX should probably use this internally in place of response_seek_wrapper()
'str2time',
'urlopen',
'urlretrieve',
'urljoin',
# ClientForm API
'AmbiguityError',
'ControlNotFoundError',
'FormParser',
'ItemCountError',
'ItemNotFoundError',
'LocateError',
'Missing',
'ParseFile',
'ParseFileEx',
'ParseResponse',
'ParseResponseEx',
'ParseString',
'XHTMLCompatibleFormParser',
# deprecated
'CheckboxControl',
'Control',
'FileControl',
'HTMLForm',
'HiddenControl',
'IgnoreControl',
'ImageControl',
'IsindexControl',
'Item',
'Label',
'ListControl',
'PasswordControl',
'RadioControl',
'ScalarControl',
'SelectControl',
'SubmitButtonControl',
'SubmitControl',
'TextControl',
'TextareaControl',
]
import logging
import sys
from _version import __version__
# high-level stateful browser-style interface
from _mechanize import \
Browser, History, \
BrowserStateError, LinkNotFoundError, FormNotFoundError
# configurable URL-opener interface
from _useragent import UserAgentBase, UserAgent
from _html import \
Link, \
Factory, DefaultFactory, RobustFactory, \
FormsFactory, LinksFactory, TitleFactory, \
RobustFormsFactory, RobustLinksFactory, RobustTitleFactory
# urllib2 work-alike interface. This is a superset of the urllib2 interface.
from _urllib2 import *
import _urllib2
if hasattr(_urllib2, "HTTPSHandler"):
__all__.append("HTTPSHandler")
del _urllib2
# misc
from _http import HeadParser
from _http import XHTMLCompatibleHeadParser
from _opener import ContentTooShortError, OpenerFactory, urlretrieve
from _response import \
response_seek_wrapper, seek_wrapped_response, make_response
from _rfc3986 import urljoin
from _util import http2time as str2time
# cookies
from _clientcookie import Cookie, CookiePolicy, DefaultCookiePolicy, \
CookieJar, FileCookieJar, LoadError, request_host_lc as request_host, \
effective_request_host
from _lwpcookiejar import LWPCookieJar, lwp_cookie_str
# 2.4 raises SyntaxError due to generator / try/finally use
if sys.version_info[:2] > (2,4):
try:
import sqlite3
except ImportError:
pass
else:
from _firefox3cookiejar import Firefox3CookieJar
from _mozillacookiejar import MozillaCookieJar
from _msiecookiejar import MSIECookieJar
# forms
from _form import (
AmbiguityError,
ControlNotFoundError,
FormParser,
ItemCountError,
ItemNotFoundError,
LocateError,
Missing,
ParseError,
ParseFile,
ParseFileEx,
ParseResponse,
ParseResponseEx,
ParseString,
XHTMLCompatibleFormParser,
# deprecated
CheckboxControl,
Control,
FileControl,
HTMLForm,
HiddenControl,
IgnoreControl,
ImageControl,
IsindexControl,
Item,
Label,
ListControl,
PasswordControl,
RadioControl,
ScalarControl,
SelectControl,
SubmitButtonControl,
SubmitControl,
TextControl,
TextareaControl,
)
# If you hate the idea of turning bugs into warnings, do:
# import mechanize; mechanize.USE_BARE_EXCEPT = False
USE_BARE_EXCEPT = True
logger = logging.getLogger("mechanize")
if logger.level is logging.NOTSET:
logger.setLevel(logging.CRITICAL)
del logger
|
mzdaniel/oh-mainline
|
vendor/packages/mechanize/mechanize/__init__.py
|
Python
|
agpl-3.0
| 5,098
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2009 Edgewall Software
# Copyright (C) 2003-2005 Jonas Borgström <jonas@edgewall.com>
# Copyright (C) 2005 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Jonas Borgström <jonas@edgewall.com>
# Christopher Lenz <cmlenz@gmx.de>
from __future__ import with_statement
from datetime import datetime
from trac.core import *
from trac.resource import Resource, ResourceSystem
from trac.util.datefmt import from_utimestamp, to_utimestamp, utc
from trac.util.translation import _
from trac.wiki.api import WikiSystem, validate_page_name
class WikiPage(object):
"""Represents a wiki page (new or existing)."""
realm = 'wiki'
def __init__(self, env, name=None, version=None, db=None):
self.env = env
if isinstance(name, Resource):
self.resource = name
name = self.resource.id
else:
if version:
version = int(version) # must be a number or None
self.resource = Resource('wiki', name, version)
self.name = name
if name:
self._fetch(name, version, db)
else:
self.version = 0
self.text = self.comment = self.author = ''
self.time = None
self.readonly = 0
self.old_text = self.text
self.old_readonly = self.readonly
def _fetch(self, name, version=None, db=None):
if version is not None:
sql = """SELECT version, time, author, text, comment, readonly
FROM wiki WHERE name=%s AND version=%s"""
args = (name, int(version))
else:
sql = """SELECT version, time, author, text, comment, readonly
FROM wiki WHERE name=%s ORDER BY version DESC LIMIT 1"""
args = (name,)
for version, time, author, text, comment, readonly in \
self.env.db_query(sql, args):
self.version = int(version)
self.author = author
self.time = from_utimestamp(time)
self.text = text
self.comment = comment
self.readonly = int(readonly) if readonly else 0
break
else:
self.version = 0
self.text = self.comment = self.author = ''
self.time = None
self.readonly = 0
exists = property(lambda self: self.version > 0)
def delete(self, version=None, db=None):
"""Delete one or all versions of a page.
:since 1.0: the `db` parameter is no longer needed and will be removed
in version 1.1.1
"""
assert self.exists, "Cannot delete non-existent page"
with self.env.db_transaction as db:
if version is None:
# Delete a wiki page completely
db("DELETE FROM wiki WHERE name=%s", (self.name,))
self.env.log.info("Deleted page %s", self.name)
else:
# Delete only a specific page version
db("DELETE FROM wiki WHERE name=%s and version=%s",
(self.name, version))
self.env.log.info("Deleted version %d of page %s", version,
self.name)
if version is None or version == self.version:
self._fetch(self.name, None)
if not self.exists:
# Invalidate page name cache
del WikiSystem(self.env).pages
# Delete orphaned attachments
from trac.attachment import Attachment
Attachment.delete_all(self.env, 'wiki', self.name)
# Let change listeners know about the deletion
if not self.exists:
for listener in WikiSystem(self.env).change_listeners:
listener.wiki_page_deleted(self)
ResourceSystem(self.env).resource_deleted(self)
else:
for listener in WikiSystem(self.env).change_listeners:
if hasattr(listener, 'wiki_page_version_deleted'):
listener.wiki_page_version_deleted(self)
ResourceSystem(self.env).resource_version_deleted(self)
def save(self, author, comment, remote_addr, t=None, db=None):
"""Save a new version of a page.
:since 1.0: the `db` parameter is no longer needed and will be removed
in version 1.1.1
"""
if not validate_page_name(self.name):
raise TracError(_("Invalid Wiki page name '%(name)s'",
name=self.name))
new_text = self.text != self.old_text
if not new_text and self.readonly == self.old_readonly:
raise TracError(_("Page not modified"))
t = t or datetime.now(utc)
with self.env.db_transaction as db:
if new_text:
db("""INSERT INTO wiki (name, version, time, author, ipnr,
text, comment, readonly)
VALUES (%s,%s,%s,%s,%s,%s,%s,%s)
""", (self.name, self.version + 1, to_utimestamp(t),
author, remote_addr, self.text, comment,
self.readonly))
self.version += 1
self.resource = self.resource(version=self.version)
else:
db("UPDATE wiki SET readonly=%s WHERE name=%s",
(self.readonly, self.name))
if self.version == 1:
# Invalidate page name cache
del WikiSystem(self.env).pages
self.author = author
self.comment = comment
self.time = t
for listener in WikiSystem(self.env).change_listeners:
if self.version == 1:
listener.wiki_page_added(self)
else:
listener.wiki_page_changed(self, self.version, t, comment,
author, remote_addr)
context=dict(
version=self.version,
time=t,
comment=comment,
author=author,
remote_addr=remote_addr)
if self.version == 1:
ResourceSystem(self.env).resource_created(self, context)
else:
old_values = dict()
if self.readonly != self.old_readonly:
old_values["readonly"] = self.old_readonly
if self.text != self.old_text:
old_values["text"] = self.old_text
ResourceSystem(self.env).resource_changed(
self,
old_values,
context)
self.old_readonly = self.readonly
self.old_text = self.text
def rename(self, new_name):
"""Rename wiki page in-place, keeping the history intact.
Renaming a page this way will eventually leave dangling references
to the old page - which litterally doesn't exist anymore.
"""
assert self.exists, "Cannot rename non-existent page"
if not validate_page_name(new_name):
raise TracError(_("Invalid Wiki page name '%(name)s'",
name=new_name))
old_name = self.name
with self.env.db_transaction as db:
new_page = WikiPage(self.env, new_name)
if new_page.exists:
raise TracError(_("Can't rename to existing %(name)s page.",
name=new_name))
db("UPDATE wiki SET name=%s WHERE name=%s", (new_name, old_name))
# Invalidate page name cache
del WikiSystem(self.env).pages
# Reparent attachments
from trac.attachment import Attachment
Attachment.reparent_all(self.env, 'wiki', old_name, 'wiki',
new_name)
self.name = new_name
self.env.log.info('Renamed page %s to %s', old_name, new_name)
for listener in WikiSystem(self.env).change_listeners:
if hasattr(listener, 'wiki_page_renamed'):
listener.wiki_page_renamed(self, old_name)
ResourceSystem(self.env).resource_changed(
self,
dict(name=old_name)
)
def get_history(self, db=None):
"""Retrieve the edit history of a wiki page.
:since 1.0: the `db` parameter is no longer needed and will be removed
in version 1.1.1
"""
for version, ts, author, comment, ipnr in self.env.db_query("""
SELECT version, time, author, comment, ipnr FROM wiki
WHERE name=%s AND version<=%s ORDER BY version DESC
""", (self.name, self.version)):
yield version, from_utimestamp(ts), author, comment, ipnr
|
apache/bloodhound
|
trac/trac/wiki/model.py
|
Python
|
apache-2.0
| 9,151
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# measurement.py
#
# Copyright 2014 Manu Varkey <manuvarkey@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
from gi.repository import Gtk, Gdk, GLib
import copy, logging, importlib, sys
# local files import
from .. import misc
# Get logger object
log = logging.getLogger()
class Measurement:
"""Stores a Measurement groups"""
def __init__(self, model = None):
if model is not None:
self.caption = model[0]
self.items = []
class_list = ['MeasurementItemHeading',
'MeasurementItemCustom',
'MeasurementItemAbstract']
for item_model in model[1]:
if item_model[0] in class_list:
item_type = globals()[item_model[0]]
item = item_type()
item.set_model(item_model)
self.items.append(item)
else:
self.caption = ''
self.items = []
def append_item(self,item):
self.items.append(item)
def insert_item(self,index,item):
self.items.insert(index,item)
def remove_item(self,index):
del(self.items[index])
def __setitem__(self, index, value):
self.items[index] = value
def __getitem__(self, index):
return self.items[index]
def set_caption(self,caption):
self.caption = caption
def get_caption(self):
return self.caption
def length(self):
return len(self.items)
def get_model(self, clean=False):
"""Get data model
Arguments:
clean: Removes static items if True
"""
items_model = []
for item in self.items:
items_model.append(item.get_model(clean))
return ['Measurement', [self.caption, items_model]]
def get_net_measurement(self):
# Fill in values from measurement items
self.paths = dict()
self.qtys = dict()
self.sums = dict()
for slno, item in enumerate(self.items):
if not isinstance(item, MeasurementItemHeading):
for itemno, qty in zip(item.itemnos, item.get_total()):
if itemno not in self.paths:
self.paths[itemno] = []
self.qtys[itemno] = []
self.sums[itemno] = 0
self.paths[itemno].append(slno)
self.qtys[itemno].append(qty)
self.sums[itemno] += qty
return (self.paths, self.qtys, self.sums)
def set_model(self, model):
"""Set data model"""
if model[0] == 'Measurement':
self.__init__(model[1])
def get_spreadsheet_buffer(self, schedule, codes, start_row):
spreadsheet = misc.Spreadsheet()
row = start_row
# Set datas of children
for slno, item in enumerate(self.items):
item_sheet = item.get_spreadsheet_buffer([slno+1], schedule, codes, row)
spreadsheet.append(item_sheet)
row = row + item_sheet.length()
return spreadsheet
def clear(self):
self.items = []
def get_text(self):
return "<b>Measurement captioned." + misc.clean_markup(self.caption) + "</b>"
def get_tooltip(self):
return None
def print_item(self):
print(" " + "Measurement captioned " + self.caption)
for item in self.items:
item.print_item()
class MeasurementItem:
"""Base class for storing Measurement items"""
def __init__(self, itemnos=None, records=None, remark="", item_remarks=None):
if itemnos is None:
itemnos = []
if records is None:
records = []
if item_remarks is None:
item_remarks = []
self.itemnos = itemnos
self.records = records
self.remark = remark
self.item_remarks = item_remarks
def set_item(self,index,itemno):
self.itemnos[index] = itemno
def get_item(self,index):
return self.itemnos[index]
def append_record(self,record):
self.records.append(record)
def insert_record(self,index,record):
self.records.insert(index,record)
def remove_record(self,index):
del(self.records[index])
def __setitem__(self, index, value):
self.records[index] = value
def __getitem__(self, index):
return self.records[index]
def set_remark(self,remark):
self.remark = remark
def get_remark(self):
return self.remark
def length(self):
return len(self.records)
def clear(self):
self.itemnos = []
self.records = []
self.remark = ''
self.item_remarks = []
class MeasurementItemHeading(MeasurementItem):
"""Stores an item heading"""
def __init__(self, model=None):
if model is not None:
MeasurementItem.__init__(self,remark=model[0])
else:
MeasurementItem.__init__(self)
def get_model(self, clean=False):
"""Get data model
Arguments:
clean: Dummy variable
"""
model = ['MeasurementItemHeading', [self.remark]]
return model
def set_model(self, model):
"""Set data model"""
if model[0] == 'MeasurementItemHeading':
self.__init__(model[1])
def get_spreadsheet_buffer(self, path, schedule, codes, row):
spreadsheet = misc.Spreadsheet()
spreadsheet.append_data([[str(path), self.remark], [None]], bold=True, wrap_text=True)
return spreadsheet
def get_text(self):
heading = self.remark.splitlines()[0]
return "<b>" + misc.clean_markup(heading) + "</b>"
def get_tooltip(self):
return None
def print_item(self):
print(" " + self.remark)
class RecordCustom:
"""An individual record of a MeasurementItemCustom"""
def __init__(self, items, cust_funcs, total_func, columntypes):
self.data_string = items
self.data = []
# Populate Data
for x,columntype in zip(self.data_string,columntypes):
if columntype not in [misc.MEAS_DESC, misc.MEAS_CUST]:
try:
num = eval(x)
self.data.append(num)
except:
self.data.append(0)
else:
self.data.append(0)
self.cust_funcs = cust_funcs
self.total_func = total_func
self.columntypes = columntypes
self.total = self.find_total()
def get_model(self):
"""Get data model"""
return self.data_string
def get_model_rendered(self, row=None):
"""Get data model with results of custom functions included for rendering"""
item = self.get_model()
rendered_item = []
for item_elem, columntype, render_func in zip(item, self.columntypes, self.cust_funcs):
try:
if item_elem != "" or columntype == misc.MEAS_CUST:
if columntype == misc.MEAS_CUST:
try:
# Try for numerical values
value = float(render_func(item, row))
except:
# If evaluation fails gracefully fallback to string
value = render_func(item, row)
rendered_item.append(value)
if columntype == misc.MEAS_DESC:
rendered_item.append(item_elem)
elif columntype == misc.MEAS_NO:
value = int(eval(item_elem)) if item_elem not in ['0','0.0'] else 0
rendered_item.append(value)
elif columntype == misc.MEAS_L:
value = round(eval(item_elem),3) if item_elem not in ['0','0.0'] else 0
rendered_item.append(value)
else:
rendered_item.append(None)
except TypeError:
rendered_item.append(None)
log.warning('RecordCustom - Wrong value loaded in item - ' + str(item_elem))
return rendered_item
def set_model(self, items, cust_funcs, total_func, columntypes):
"""Set data model"""
self.__init__(items, cust_funcs, total_func, columntypes)
def find_total(self):
return self.total_func(self.data)
def find_custom(self,index):
return self.cust_funcs[index](self.data)
def print_item(self):
print(" " + str([self.data_string,self.total]))
class MeasurementItemCustom(MeasurementItem):
"""Stores a custom record set [As per plugin loaded]"""
def __init__(self, data = None, plugin=None):
self.name = ''
self.itemtype = None
self.itemnos_mask = []
self.captions = []
self.columntypes = []
self.cust_funcs = []
self.total_func_item = None
self.total_func = None
# For user data support
self.captions_udata = []
self.columntypes_udata = []
self.user_data = None
self.dimensions = None
# Read description from file
if plugin is not None:
try:
spec = importlib.util.spec_from_file_location(plugin, misc.abs_path('meas_templates', plugin+'.py'))
module = importlib.util.module_from_spec(spec)
sys.modules[spec.name] = module
spec.loader.exec_module(module)
self.custom_object = module.CustomItem()
self.name = self.custom_object.name
self.itemtype = plugin
self.itemnos_mask = self.custom_object.itemnos_mask
self.captions = self.custom_object.captions
self.columntypes = self.custom_object.columntypes
self.cust_funcs = self.custom_object.cust_funcs
self.total_func_item = self.custom_object.total_func_item
self.total_func = self.custom_object.total_func
# For user data support
self.captions_udata = self.custom_object.captions_udata
self.columntypes_udata = self.custom_object.columntypes_udata
self.user_data = self.custom_object.user_data_default
self.dimensions = self.custom_object.dimensions
except ImportError:
log.error('Error Loading plugin - MeasurementItemCustom - ' + str(plugin))
if data != None:
itemnos = data[0]
records = []
for item_model in data[1]:
item = RecordCustom(item_model, self.cust_funcs,
self.total_func_item, self.columntypes)
records.append(item)
remark = data[2]
item_remarks = data[3]
self.user_data = data[4]
MeasurementItem.__init__(self, itemnos, records, remark, item_remarks)
else:
MeasurementItem.__init__(self, [None]*self.item_width(), [],
'', ['']*self.item_width())
else:
MeasurementItem.__init__(self)
def model_width(self):
"""Returns number of columns being measured"""
return len(self.columntypes)
def item_width(self):
"""Returns number of itemnos being measured"""
return len(self.itemnos_mask)
def get_model(self, clean=False):
"""Get data model
Arguments:
clean: Dummy variable
"""
item_schedule = []
for item in self.records:
item_schedule.append(item.get_model())
data = [self.itemnos, item_schedule, self.remark, self.item_remarks,
self.user_data, self.itemtype]
return ['MeasurementItemCustom', data]
def set_model(self, model):
"""Set data model"""
if model[0] == 'MeasurementItemCustom':
self.clear()
self.__init__(model[1], model[1][5])
def get_spreadsheet_buffer(self, path, schedule, codes, s_row):
spreadsheet = misc.Spreadsheet()
row = 1
# Item no and description
for slno, key in enumerate(self.itemnos):
if key in codes.keys():
itemno = codes[key]
else:
itemno = None
if itemno is not None:
spreadsheet.append_data([[str(path), 'Item No:' + itemno, self.item_remarks[slno]]], bold=True, wrap_text=True)
spreadsheet.append_data([[None, schedule[itemno][1]]])
row = row + 2
# Remarks columns
if self.remark != '':
spreadsheet.append_data([[None, 'Remarks: ' + self.remark]], bold=True)
row = row + 1
# Data rows
spreadsheet.append_data([[None], [None] + self.captions], bold=True)
row = row + 1
for slno, record in enumerate(self.records,1):
values = record.get_model_rendered(slno)
spreadsheet.append_data([[slno] + values])
row = row + 1
# User data
if self.captions_udata:
spreadsheet.append_data([[None], [None, 'User Data Captions'] + self.captions_udata], bold=True)
spreadsheet.append_data([[None, 'User Datas'] + self.user_data])
row = row + 2
# Total values
spreadsheet.append_data([[None, 'TOTAL'] + self.get_total(), [None]], bold=True)
row = row + 2
return spreadsheet
def print_item(self):
print(" Item No." + str(self.itemnos))
for i in range(self.length()):
self[i].print_item()
print(" " + "Total: " + str(self.get_total()))
def get_total(self):
if self.total_func is not None:
return self.total_func(self.records,self.user_data)
else:
return []
def get_text(self):
total = self.get_total()
return self.name + ": "+ self.remark + ", #" + \
str(self.length()) + ", Σ " + str(total)
def get_tooltip(self):
if self.remark != "":
return "Remark: " + self.remark
else:
return None
|
manuvarkey/GEstimator
|
estimator/data/measurement.py
|
Python
|
gpl-3.0
| 15,031
|
import bson.json_util
import json
from bson import ObjectId
import string
import tangelo
from os import listdir
from os.path import isfile, join
# start annotations as an empty dictionary, each type will have a key and its
# own dictionary indexed by name/identifier
annotations = dict()
annotations['entities'] = dict()
annotations['relations'] = dict()
# needed for cleaning the file to get rid of specisl characters
def remove_non_ascii(text):
return ''.join(i for i in text if ord(i)<128)
# test this this particular row's identifier shows it is a relation (not an entity)
def isRelation(ident):
return (ident[0] == 'R')
def foundEntity(ident,value):
# the entity name had non-ascii characters and always an extra \n at the end
annotations['entities'][ident] = remove_non_ascii(value)[:-1]
def foundBinaryRelation(ident,name,arg1,arg2):
annotations['relations'][ident] = {'type': name, 'arg1': arg1, 'arg2':arg2}
# this routine looks through the entities and returns the value that matches a property
# when it has been assigned through a relationship in the text
def returnValueIdentForAttributeIdentity(ident):
for rel in annotations['relations']:
if annotations['relations'][rel]['type'] == 'PropertyValue':
if annotations['relations'][rel]['arg1'] == ident:
return annotations['relations'][rel]['arg2']
elif annotations['relations'][rel]['arg2'] == ident:
return annotations['relations'][rel]['arg1']
# this loop goes through all annotations and outputs them in a format compatible with the nanomaterial
# registry or further processing. It uses two passes: first pass outputs properties which are assigned
# to specific nanomaterials through the "NanoProperty" relation.
def processAnnotations(materialname,materialid):
outstring = ''
outtable = []
# put in the header row for the CSV
entry = ['ID','Name','Measurement','Value']
outtable.append(entry)
# now loop through all the relations and output rows for each
for rel in annotations['relations']:
#print rel
# find the properties actually assigned to a nanomaterial
if annotations['relations'][rel]['type'] == 'NanoProperty':
# still need to check for out of order args. This assumes it is material, property
entName = annotations['entities'][annotations['relations'][rel]['arg1']]
attrib = annotations['entities'][annotations['relations'][rel]['arg2']]
attribIdent = annotations['relations'][rel]['arg2']
# we know somewhere else in the rules, is a value for this property, so find this rule.
# but if there isn't a rule, this might be a dangling particle/property pair, skip it if no
# match is found
try:
valueIdent = returnValueIdentForAttributeIdentity(attribIdent)
value = annotations['entities'][valueIdent]
#print "(",entName,attrib, value,")"
#outstring += materialid + ','+ entName + ',' + attrib + ',' + value+ '\n'
entry = [materialid,entName,attrib,value]
outtable.append(entry)
except:
pass
print "output properties and values:"
for rel in annotations['relations']:
#print rel
# find the properties actually assigned to a nanomaterial. We are trying a complex
# reference to look for the entity
if annotations['relations'][rel]['type'] == 'PropertyValue':
# actually need to check for out of order args
try:
propName = annotations['entities'][annotations['relations'][rel]['arg1']]
#print 'found propname:', propName
value = annotations['entities'][annotations['relations'][rel]['arg2']]
#print 'found value:',value
entry = [materialid,materialname,propName,value]
outtable.append(entry)
except:
pass
#print outtable
return outtable
# create a simple service that looks for files in a directory and retuns ones with the
# type extension .ann
def run(filename=None,materialname=None,materialid=None,directory=None):
# Create an empty response object.
response = {}
# assign default directory if none is provided
#if directory == None:
# directory = '/Users/clisle/code/brat-v1.3_Crunchy_Frog/data/nano_papers/'
#if filename == None:
# filename = '01-body.ann'
# if the user assigned an ID, it is to be assigned to all unassigned records.
# Therefore, any records that don't have a particle name specifically annotated will
# have derived values assigned.
outputID = int(materialid) if materialid != '' else None
outputname = materialname if materialname != '' else None
f = open(directory+'/'+filename)
for line in f:
#print line
splits = line.split('\t')
ident = splits[0]
name = splits[1].split(' ')[0]
if isRelation(ident):
arg1 = splits[1].split(' ')[1].split(':')[1]
arg2 = splits[1].split(' ')[2].split(':')[1].split('\n')[0]
# sometimes the second argument might have a \n at the end, so trim
if arg2[len(arg2)-1] == '\n':
arg2 = arg2[:-1]
foundBinaryRelation(ident,name,arg1,arg2)
else:
# after the second tab is the identifier value
value = splits[2]
foundEntity(ident, value)
annotations = processAnnotations(outputname,outputID)
f.close()
# Pack the results into the response object, and return it.
response['result'] = annotations
# Return the response object.
#tangelo.log(str(response))
return bson.json_util.dumps(response)
|
curtislisle/nanomaterial-dashboard
|
nanoUtilities/service/exportBratAnnotation.py
|
Python
|
apache-2.0
| 5,907
|
"""Tests for configuration provider linking."""
from dependency_injector import containers, providers
class Core(containers.DeclarativeContainer):
config = providers.Configuration("core")
value_getter = providers.Callable(lambda _: _, config.value)
class Services(containers.DeclarativeContainer):
config = providers.Configuration("services")
value_getter = providers.Callable(lambda _: _, config.value)
def test():
root_config = providers.Configuration("main")
core = Core(config=root_config.core)
services = Services(config=root_config.services)
root_config.override(
{
"core": {
"value": "core",
},
"services": {
"value": "services",
},
},
)
assert core.config() == {"value": "core"}
assert core.config.value() == "core"
assert core.value_getter() == "core"
assert services.config() == {"value": "services"}
assert services.config.value() == "services"
assert services.value_getter() == "services"
def test_double_override():
root_config = providers.Configuration("main")
core = Core(config=root_config.core)
services = Services(config=root_config.services)
root_config.override(
{
"core": {
"value": "core1",
},
"services": {
"value": "services1",
},
},
)
root_config.override(
{
"core": {
"value": "core2",
},
"services": {
"value": "services2",
},
},
)
assert core.config() == {"value": "core2"}
assert core.config.value() == "core2"
assert core.value_getter() == "core2"
assert services.config() == {"value": "services2"}
assert services.config.value() == "services2"
assert services.value_getter() == "services2"
def test_reset_overriding_cache():
# See: https://github.com/ets-labs/python-dependency-injector/issues/428
class Core(containers.DeclarativeContainer):
config = providers.Configuration()
greetings = providers.Factory(str, config.greeting)
class Application(containers.DeclarativeContainer):
config = providers.Configuration()
core = providers.Container(
Core,
config=config,
)
greetings = providers.Factory(str, config.greeting)
container = Application()
container.config.set("greeting", "Hello World")
assert container.greetings() == "Hello World"
assert container.core.greetings() == "Hello World"
container.config.set("greeting", "Hello Bob")
assert container.greetings() == "Hello Bob"
assert container.core.greetings() == "Hello Bob"
def test_reset_overriding_cache_for_option():
# See: https://github.com/ets-labs/python-dependency-injector/issues/428
class Core(containers.DeclarativeContainer):
config = providers.Configuration()
greetings = providers.Factory(str, config.greeting)
class Application(containers.DeclarativeContainer):
config = providers.Configuration()
core = providers.Container(
Core,
config=config.option,
)
greetings = providers.Factory(str, config.option.greeting)
container = Application()
container.config.set("option.greeting", "Hello World")
assert container.greetings() == "Hello World"
assert container.core.greetings() == "Hello World"
container.config.set("option.greeting", "Hello Bob")
assert container.greetings() == "Hello Bob"
assert container.core.greetings() == "Hello Bob"
|
rmk135/dependency_injector
|
tests/unit/providers/configuration/test_config_linking_py2_py3.py
|
Python
|
bsd-3-clause
| 3,698
|
# imports
import pandas as pd
from sklearn import model_selection, preprocessing, metrics
import argparse
import torch
import torch.nn as nn
import numpy as np
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader
# data, here we will us the movielens dataset of movie ranking
class MovieDataset():
def __init__(self, users, movies, ratings):
self.users = users
self.movies = movies
self.ratings = ratings
def __len__(self):
return len(self.users)
def __getitem__(self,item):
user = self.users[item]
movie = self.movies[item]
rating = self.ratings[item]
x = torch.tensor([user, movie], dtype=torch.long)
y = torch.tensor([rating], dtype=torch.float)
return x, y
class Net(nn.Module):
def __init__(self,num_users,num_movies):
super().__init__()
self.user_embed = nn.Embedding(num_users,32)
self.movie_embed = nn.Embedding(num_movies,32)
self.output = nn.Linear(64,1)
def forward(self, x):
user_embeds = self.user_embed(x[:,0])
movie_embeds = self.movie_embed(x[:,1])
output = torch.cat([user_embeds, movie_embeds], dim=1)
output = self.output(output)
return output
def loss_batch(model, loss_func, xb, yb, opt=None):
loss = loss_func(model(xb), yb)
if opt is not None:
loss.backward()
opt.step()
opt.zero_grad()
return loss.item(), len(xb)
def fit(args, model, loss_func, opt, scheduler, train_dl, valid_dl):
for epoch in range(args.epochs):
# train
model.train()
for xb, yb in train_dl:
loss_batch(model, loss_func, xb, yb, opt)
# eval
model.eval()
with torch.no_grad():
losses, nums = zip(
*[loss_batch(model, loss_func, xb, yb) for xb, yb in valid_dl]
)
val_loss = np.sum(np.multiply(losses, nums)) / np.sum(nums)
scheduler.step(val_loss)
print(epoch, val_loss)
def get_data(train_ds, valid_ds, bs):
return (
DataLoader(train_ds, batch_size=bs, shuffle=True),
DataLoader(valid_ds, batch_size=bs * 2),
)
if __name__ == '__main__':
# Training settings
parser = argparse.ArgumentParser(description='PyTorch Example')
parser.add_argument('--batch-size', type=int, default=128, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=60, metavar='N',
help='number of epochs to train (default: 20)')
parser.add_argument('--lr', type=float, default=3e-4, metavar='LR',
help='learning rate (default: 3e-4)')
parser.add_argument('--gamma', type=float, default=0.7, metavar='M',
help='Learning rate step gamma (default: 0.7)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save-model', type=bool, default=False,
help='For Saving the current Model')
args = parser.parse_args()
# GPU / CPU set up
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
# Data
df = pd.read_csv('./data/ml-latest-small/ratings.csv')
# userId, movieId rating timestamp
lbl_user = preprocessing.LabelEncoder()
lbl_movie = preprocessing.LabelEncoder()
df.userId = lbl_user.fit_transform(df.userId.values)
df.movieId = lbl_movie.fit_transform(df.movieId.values)
df_train, df_valid = model_selection.train_test_split(df, test_size=0.1, random_state=42, stratify=df.rating.values)
train_dataset = MovieDataset(users=df_train.userId.values,movies=df_train.movieId.values,ratings=df_train.rating.values)
valid_dataset = MovieDataset(users=df_valid.userId.values,movies=df_valid.movieId.values,ratings=df_valid.rating.values)
train_dl, valid_dl = get_data(train_dataset, valid_dataset, args.batch_size)
# Model
model = Net(num_users=len(lbl_user.classes_),num_movies=len(lbl_movie.classes_)).to(device)
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
print(model)
n = count_parameters(model)
print("Number of parameters: %s" % n)
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)
loss_func = nn.MSELoss()
scheduler = ReduceLROnPlateau(optimizer, 'min',patience=5, min_lr = args.lr/100., verbose=True)
#scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
fit(args, model, loss_func, optimizer, scheduler, train_dl, valid_dl)
# Save
if args.save_model:
torch.save(model.state_dict(), "saved_model.pt")
|
asengsta/DataScience
|
pytorch/train_model.py
|
Python
|
gpl-3.0
| 5,557
|
from __future__ import absolute_import
import inspect
import collections
import six
from six.moves import zip
TYPE_MAP = {
'complex64': 'complex', 'complex': 'complex',
'float32': 'float', 'float': 'float',
'int32': 'int', 'uint32': 'int',
'int16': 'short', 'uint16': 'short',
'int8': 'byte', 'uint8': 'byte',
}
BlockIO = collections.namedtuple('BlockIO', 'name cls params sinks sources doc callbacks')
def _ports(sigs, msgs):
ports = list()
for i, dtype in enumerate(sigs):
port_type = TYPE_MAP.get(dtype.base.name, None)
if not port_type:
raise ValueError("Can't map {0!r} to GRC port type".format(dtype))
vlen = dtype.shape[0] if len(dtype.shape) > 0 else 1
ports.append((str(i), port_type, vlen))
for msg_key in msgs:
if msg_key == 'system':
continue
ports.append((msg_key, 'message', 1))
return ports
def _find_block_class(source_code, cls):
ns = {}
try:
exec(source_code, ns)
except Exception as e:
raise ValueError("Can't interpret source code: " + str(e))
for var in six.itervalues(ns):
if inspect.isclass(var) and issubclass(var, cls):
return var
raise ValueError('No python block class found in code')
def extract(cls):
try:
from gnuradio import gr
import pmt
except ImportError:
raise EnvironmentError("Can't import GNU Radio")
if not inspect.isclass(cls):
cls = _find_block_class(cls, gr.gateway.gateway_block)
spec = inspect.getargspec(cls.__init__)
init_args = spec.args[1:]
defaults = [repr(arg) for arg in (spec.defaults or ())]
doc = cls.__doc__ or cls.__init__.__doc__ or ''
cls_name = cls.__name__
if len(defaults) + 1 != len(spec.args):
raise ValueError("Need all __init__ arguments to have default values")
try:
instance = cls()
except Exception as e:
raise RuntimeError("Can't create an instance of your block: " + str(e))
name = instance.name()
params = list(zip(init_args, defaults))
def settable(attr):
try:
return callable(getattr(cls, attr).fset) # check for a property with setter
except AttributeError:
return attr in instance.__dict__ # not dir() - only the instance attribs
callbacks = [attr for attr in dir(instance) if attr in init_args and settable(attr)]
sinks = _ports(instance.in_sig(),
pmt.to_python(instance.message_ports_in()))
sources = _ports(instance.out_sig(),
pmt.to_python(instance.message_ports_out()))
return BlockIO(name, cls_name, params, sinks, sources, doc, callbacks)
if __name__ == '__main__':
blk_code = """
import numpy as np
from gnuradio import gr
import pmt
class blk(gr.sync_block):
def __init__(self, param1=None, param2=None, param3=None):
"Test Docu"
gr.sync_block.__init__(
self,
name='Embedded Python Block',
in_sig = (np.float32,),
out_sig = (np.float32,np.complex64,),
)
self.message_port_register_in(pmt.intern('msg_in'))
self.message_port_register_out(pmt.intern('msg_out'))
self.param1 = param1
self._param2 = param2
self._param3 = param3
@property
def param2(self):
return self._param2
@property
def param3(self):
return self._param3
@param3.setter
def param3(self, value):
self._param3 = value
def work(self, inputs_items, output_items):
return 10
"""
from pprint import pprint
pprint(dict(extract(blk_code)._asdict()))
|
iohannez/gnuradio
|
grc/core/utils/epy_block_io.py
|
Python
|
gpl-3.0
| 3,687
|
# Copyright (c) 2012-2013, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from aws import Action
service_name = 'AWS CloudFormation'
prefix = 'cloudformation'
class CloudformationAction(Action):
def __init__(self, action=None):
self.prefix = prefix
self.action = action
CancelUpdateStack = CloudformationAction('CancelUpdateStack')
CreateStack = CloudformationAction('CreateStack')
DeleteStack = CloudformationAction('DeleteStack')
DescribeStackEvents = CloudformationAction('DescribeStackEvents')
DescribeStackResource = CloudformationAction('DescribeStackResource')
DescribeStackResources = CloudformationAction('DescribeStackResources')
DescribeStacks = CloudformationAction('DescribeStacks')
EstimateTemplateCost = CloudformationAction('EstimateTemplateCost')
GetStackPolicy = CloudformationAction('GetStackPolicy')
GetTemplate = CloudformationAction('GetTemplate')
GetTemplateSummary = CloudformationAction('GetTemplateSummary')
ListStackResources = CloudformationAction('ListStackResources')
ListStacks = CloudformationAction('ListStacks')
SetStackPolicy = CloudformationAction('SetStackPolicy')
SignalResource = CloudformationAction('SignalResource')
UpdateStack = CloudformationAction('UpdateStack')
ValidateTemplate = CloudformationAction('ValidateTemplate')
|
craigbruce/awacs
|
awacs/cloudformation.py
|
Python
|
bsd-2-clause
| 1,335
|
'''
Created on Dec 12, 2011
@author: sean
'''
from ..asttools import Visitor
import ast
#FIXME: add tests
class CopyVisitor(Visitor):
'''
Copy only ast nodes and lists
'''
def visitDefault(self, node):
Node = type(node)
new_node = Node()
for _field in Node._fields:
if hasattr(node, _field):
field = getattr(node, _field)
if isinstance(field, (list, tuple)):
new_list = []
for item in field:
if isinstance(item, ast.AST):
new_item = self.visit(item)
else:
new_item = item
new_list.append(new_item)
setattr(new_node, _field, new_list)
elif isinstance(field, ast.AST):
setattr(new_node, _field, self.visit(field))
else:
setattr(new_node, _field, field)
for _attr in node._attributes:
if hasattr(node, _attr):
setattr(new_node, _attr, getattr(node, _attr))
return new_node
def copy_node(node):
return CopyVisitor().visit(node)
|
jasonyaw/SFrame
|
oss_src/unity/python/sframe/meta/asttools/visitors/copy_tree.py
|
Python
|
bsd-3-clause
| 1,264
|
from stemming.porter2 import stem
from os import listdir
from os.path import isfile, join
from operator import itemgetter
import string
import sys
table = string.maketrans(string.punctuation+"0123456789", " ")
if len(sys.argv) != 2:
print "Usage: python main.py input_directory"
exit()
files = [f for f in listdir(sys.argv[1]) if isfile(join(sys.argv[1], f))] #get files
words = []
new_words = []
newest_words = []
inverted_index = {}
unique_word_list = []
stopwords = open("./input/stopword.txt", "r").read().split()
for file in files: #for each file do
for word in open(sys.argv[1]+'/'+file).read().translate(table).split(): #for each word
if word.lower() not in stopwords: #remove stopwords
new_words.append(word.lower())
for word in new_words: #do stemming
newest_words.append(stem(word))
new_words = []
for word in newest_words: #remove stopwords
if word not in stopwords:
new_words.append(word)
newest_words = []
for word in new_words:
if word not in unique_word_list:
unique_word_list.append(word)
try:
data = inverted_index[word] #if the word is already available\
inverted_index[word].append([file, new_words.count(word)])
except Exception as e:
inverted_index[word] = []
inverted_index[word].append([file, new_words.count(word)])
unique_word_list = []
new_words = []
flag = 0 #adding entries of files with 0 count of words
tf_index_on_disk = inverted_index
for file in files:
for word in tf_index_on_disk:
for element in tf_index_on_disk[word]:
if file in element:
flag = 1
break
if flag == 0:
tf_index_on_disk[word].append([file, 0])
flag = 0
x = 0
new_index = {}
for i in sorted(inverted_index):
print i,
d = 10 - len(i)
while x < d:
print "",
x = x + 1
x = 0
new_index[i] = sorted(inverted_index[i], key=itemgetter(0))
print sorted(inverted_index[i], key=itemgetter(1), reverse=True)
write_file = open("./input/tf_modelDemo", "w")
write_file.write(str(new_index))
write_file.close()
|
TusharAgey/seventhsem
|
Info_Retrieval/main.py
|
Python
|
gpl-3.0
| 2,025
|
"""
Django settings for mosteit_django project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^s(=^0szb81p=gxi7zgr4%8p&l_61=(y%w^(6n1q2=o!vag-1d'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'forum',
'forum.models'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'mosteit_django.urls'
WSGI_APPLICATION = 'mosteit_django.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR+'/mosteit_forum.sqlite',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
|
mmosteit/django_forum
|
mosteit_django/settings.py
|
Python
|
gpl-3.0
| 2,027
|
#v+
#!/usr/bin/env python
import imaplib
import email
import imaplib
def checkMessages(incoming_server, user, password):
has_data = False
subject = ''
obj = imaplib.IMAP4_SSL(incoming_server, '993')
obj.login(user, password)
obj.select('Inbox')
typ ,data = obj.search(None,'UnSeen')
string = data[0]
msg_count = len(string.split(' '))
index = string.find(' ')
if not index == -1:
string = string[:string.find(' ')]
if(msg_count >= 1 and len(string) > 0):
obj.store(string,'+FLAGS','\Seen')
typ, data = obj.fetch(string, '(RFC822)')
#TODO: filter by imei subject
#get attachment
text = data[0][1]
msg = email.message_from_string(text)
for part in msg.walk():
if part.get_content_maintype() == 'multipart':
continue
if part.get('Content-Disposition') is None:
continue
filename = part.get_filename()
data = part.get_payload(decode=True)
has_data = True
if not data:
has_data = False
data = []
subject = ''
msg = ''
continue
subject = msg['Subject']
return data,subject,has_data,max(msg_count - 1,0)
#def main():
# checkMessages('imap.gmail.com','jmalsbury.personal@gmail.com','sweet525',0)
#if __name__ == '__main__':
# main()
|
astronewts/Flight1
|
misc/allaloft/groundstation/python/imap_stuff.py
|
Python
|
gpl-3.0
| 1,482
|
from unittest import TestCase
from sorting.merge import merge_list, merge_sort, merge_sort_recursive
class TestSortingMerge(TestCase):
def test_merge_list(self):
left = [1, 4, 6]
right = [2, 3, 5]
result = merge_list(left, right)
self.assertEquals(6, len(result))
for i in range(len(result)):
self.assertEquals(i + 1, result[i])
left = [1, 4, 6, 7, 8, 9]
right = [2, 3, 5]
result = merge_list(left, right)
self.assertEquals(9, len(result))
for i in range(len(result)):
self.assertEquals(i + 1, result[i])
def test_merge_sort(self):
array = [1, 7, 5, 4, 6, 8, 5, 3, 9]
result = merge_sort(array)
self.assertEquals(9, len(result))
self.assertEquals(1, result[0])
self.assertEquals(3, result[1])
self.assertEquals(4, result[2])
self.assertEquals(5, result[3])
self.assertEquals(5, result[4])
self.assertEquals(6, result[5])
self.assertEquals(7, result[6])
self.assertEquals(8, result[7])
self.assertEquals(9, result[8])
def test_merge_sort_recursive(self):
array = [1, 7, 5, 4, 6, 8, 5, 3, 9, 8]
result = merge_sort_recursive(array)
self.assertEquals(10, len(result))
self.assertEquals(1, result[0])
self.assertEquals(3, result[1])
self.assertEquals(4, result[2])
self.assertEquals(5, result[3])
self.assertEquals(5, result[4])
self.assertEquals(6, result[5])
self.assertEquals(7, result[6])
self.assertEquals(8, result[7])
self.assertEquals(8, result[8])
self.assertEquals(9, result[9])
|
narfman0/challenges
|
algorithms/tests/sorting/test_merge.py
|
Python
|
gpl-3.0
| 1,709
|
#!/usr/local/bin/python
import socket
import time
import exceptions as e
from config import *
def change_nick(irc)
irc.send("NICK " + cfg['botnick'] + "\r\n")
x = "_"
while("Nickname is already in use" in irc.recv(4096)):
irc.send("NICK " + nick + x + "\r\n")
x += "_"
time.sleep(1)
def check_config():
for x,y in cfg.iteritems():
if not y:
if x not in cfg['optional_cfg']:
raise e.ConfigError("Found an error in config.py file")
def connect():
'''Connects to the server through a socket,
identifies the bot to the server and to NickServ'''
irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #defines the socket
irc.connect((cfg['server'],cfg['port']))
irc.send("USER " + cfg['botnick'] + " 8 * :" + cfg['realname'] + "\r\n")
name_bot(cfg)
irc.send("JOIN "+ cfg['channels'] +"\r\n")
def name_bot(irc):
nick = change_nick(irc)
irc.send("PRIVMSG nickserv :identify %s %s\r\n" % (cfg['botnick'], cfg['password']))
def parse_cmd(text):
resp = text.split()
|
gerardduenas/geni
|
functions.py
|
Python
|
mit
| 1,109
|
"""
Basic profiling code for working out where we're spending our time
Invoke with:
./manage.py shell -c 'from matrixstore.profile import profile; profile()'
Currently set up to profile the total spending code, but easy to adapt to
profile other functions (e.g. PPU savings - see commented out code)
"""
from cProfile import Profile
import datetime
import time
# from frontend.price_per_unit.savings import get_all_savings_for_orgs
from api.views_spending import _get_total_prescribing_entries
def test():
# get_all_savings_for_orgs("2019-11-01", "ccg", ["99C"])
# get_all_savings_for_orgs("2019-11-01", "all_standard_practices", [None])
list(_get_total_prescribing_entries(["02"]))
def profile():
num_attempts = 5
attempts = []
for _ in range(num_attempts):
profiler = Profile()
start = time.time()
profiler.runcall(test)
duration = time.time() - start
attempts.append((duration, profiler))
attempts.sort()
profile_file = "profile.{}.prof".format(
datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
attempts[0][1].dump_stats(profile_file)
print(
"{}s (best of {}), profile saved as: {}".format(
attempts[0][0], num_attempts, profile_file
)
)
|
ebmdatalab/openprescribing
|
openprescribing/matrixstore/profile.py
|
Python
|
mit
| 1,279
|
# -*- coding: utf-8 -*-
##############################################################################
#
# GNU Health: The Free Health and Hospital Information System
# Copyright (C) 2008-2013 Luis Falcon <falcon@gnu.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from trytond.pool import Pool
from .school import *
from .student import *
def register():
Pool.register(
School,
Student,
Configuration,
module='school', type_='model'
)
Pool.register(
SchoolReport,
module='school', type_='report'
)
|
kret0s/gnuhealth-live
|
tryton/server/trytond-3.8.3/trytond/modules/school/__init__.py
|
Python
|
gpl-3.0
| 1,280
|
from django.template import TemplateSyntaxError
from django.test import SimpleTestCase
from ..utils import setup
class CycleTagTests(SimpleTestCase):
@setup({'cycle01': '{% cycle a %}'})
def test_cycle01(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('cycle01')
@setup({'cycle05': '{% cycle %}'})
def test_cycle05(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('cycle05')
@setup({'cycle06': '{% cycle a %}'})
def test_cycle06(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('cycle06')
@setup({'cycle07': '{% cycle a,b,c as foo %}{% cycle bar %}'})
def test_cycle07(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('cycle07')
@setup({'cycle10': "{% cycle 'a' 'b' 'c' as abc %}{% cycle abc %}"})
def test_cycle10(self):
output = self.engine.render_to_string('cycle10')
self.assertEqual(output, 'ab')
@setup({'cycle11': "{% cycle 'a' 'b' 'c' as abc %}{% cycle abc %}{% cycle abc %}"})
def test_cycle11(self):
output = self.engine.render_to_string('cycle11')
self.assertEqual(output, 'abc')
@setup({'cycle12': "{% cycle 'a' 'b' 'c' as abc %}{% cycle abc %}{% cycle abc %}{% cycle abc %}"})
def test_cycle12(self):
output = self.engine.render_to_string('cycle12')
self.assertEqual(output, 'abca')
@setup({'cycle13': "{% for i in test %}{% cycle 'a' 'b' %}{{ i }},{% endfor %}"})
def test_cycle13(self):
output = self.engine.render_to_string('cycle13', {'test': list(range(5))})
self.assertEqual(output, 'a0,b1,a2,b3,a4,')
@setup({'cycle14': '{% cycle one two as foo %}{% cycle foo %}'})
def test_cycle14(self):
output = self.engine.render_to_string('cycle14', {'one': '1', 'two': '2'})
self.assertEqual(output, '12')
@setup({'cycle15': '{% for i in test %}{% cycle aye bee %}{{ i }},{% endfor %}'})
def test_cycle15(self):
output = self.engine.render_to_string('cycle15', {'test': list(range(5)), 'aye': 'a', 'bee': 'b'})
self.assertEqual(output, 'a0,b1,a2,b3,a4,')
@setup({'cycle16': '{% cycle one|lower two as foo %}{% cycle foo %}'})
def test_cycle16(self):
output = self.engine.render_to_string('cycle16', {'one': 'A', 'two': '2'})
self.assertEqual(output, 'a2')
@setup({'cycle17': "{% cycle 'a' 'b' 'c' as abc silent %}"
"{% cycle abc %}{% cycle abc %}{% cycle abc %}{% cycle abc %}"})
def test_cycle17(self):
output = self.engine.render_to_string('cycle17')
self.assertEqual(output, '')
@setup({'cycle18': "{% cycle 'a' 'b' 'c' as foo invalid_flag %}"})
def test_cycle18(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('cycle18')
@setup({'cycle19': "{% cycle 'a' 'b' as silent %}{% cycle silent %}"})
def test_cycle19(self):
output = self.engine.render_to_string('cycle19')
self.assertEqual(output, 'ab')
@setup({'cycle20': '{% cycle one two as foo %} & {% cycle foo %}'})
def test_cycle20(self):
output = self.engine.render_to_string('cycle20', {'two': 'C & D', 'one': 'A & B'})
self.assertEqual(output, 'A & B & C & D')
@setup({'cycle21': '{% filter force_escape %}'
'{% cycle one two as foo %} & {% cycle foo %}{% endfilter %}'})
def test_cycle21(self):
output = self.engine.render_to_string('cycle21', {'two': 'C & D', 'one': 'A & B'})
self.assertEqual(output, 'A &amp; B & C &amp; D')
@setup({'cycle22': "{% for x in values %}{% cycle 'a' 'b' 'c' as abc silent %}{{ x }}{% endfor %}"})
def test_cycle22(self):
output = self.engine.render_to_string('cycle22', {'values': [1, 2, 3, 4]})
self.assertEqual(output, '1234')
@setup({'cycle23': "{% for x in values %}"
"{% cycle 'a' 'b' 'c' as abc silent %}{{ abc }}{{ x }}{% endfor %}"})
def test_cycle23(self):
output = self.engine.render_to_string('cycle23', {'values': [1, 2, 3, 4]})
self.assertEqual(output, 'a1b2c3a4')
@setup({
'cycle24': "{% for x in values %}"
"{% cycle 'a' 'b' 'c' as abc silent %}{% include 'included-cycle' %}{% endfor %}",
'included-cycle': '{{ abc }}',
})
def test_cycle24(self):
output = self.engine.render_to_string('cycle24', {'values': [1, 2, 3, 4]})
self.assertEqual(output, 'abca')
@setup({'cycle25': '{% cycle a as abc %}'})
def test_cycle25(self):
output = self.engine.render_to_string('cycle25', {'a': '<'})
self.assertEqual(output, '<')
@setup({'cycle26': '{% cycle a b as ab %}{% cycle ab %}'})
def test_cycle26(self):
output = self.engine.render_to_string('cycle26', {'a': '<', 'b': '>'})
self.assertEqual(output, '<>')
@setup({'cycle27': '{% autoescape off %}{% cycle a b as ab %}{% cycle ab %}{% endautoescape %}'})
def test_cycle27(self):
output = self.engine.render_to_string('cycle27', {'a': '<', 'b': '>'})
self.assertEqual(output, '<>')
@setup({'cycle28': '{% cycle a|safe b as ab %}{% cycle ab %}'})
def test_cycle28(self):
output = self.engine.render_to_string('cycle28', {'a': '<', 'b': '>'})
self.assertEqual(output, '<>')
@setup({
'cycle29': "{% cycle 'a' 'b' 'c' as cycler silent %}"
"{% for x in values %}"
"{% ifchanged x %}"
"{% cycle cycler %}{{ cycler }}"
"{% else %}"
"{{ cycler }}"
"{% endifchanged %}"
"{% endfor %}"
})
def test_cycle29(self):
"""
A named {% cycle %} tag works inside an {% ifchanged %} block and a
{% for %} loop.
"""
output = self.engine.render_to_string('cycle29', {'values': [1, 2, 3, 4, 5, 6, 7, 8, 8, 8, 9, 9]})
self.assertEqual(output, 'bcabcabcccaa')
@setup({
'cycle30': "{% cycle 'a' 'b' 'c' as cycler silent %}"
"{% for x in values %}"
"{% with doesnothing=irrelevant %}"
"{% ifchanged x %}"
"{% cycle cycler %}{{ cycler }}"
"{% else %}"
"{{ cycler }}"
"{% endifchanged %}"
"{% endwith %}"
"{% endfor %}"})
def test_cycle30(self):
"""
A {% with %} tag shouldn't reset the {% cycle %} variable.
"""
output = self.engine.render_to_string(
'cycle30', {
'irrelevant': 1,
'values': [1, 2, 3, 4, 5, 6, 7, 8, 8, 8, 9, 9]
})
self.assertEqual(output, 'bcabcabcccaa')
|
Beauhurst/django
|
tests/template_tests/syntax_tests/test_cycle.py
|
Python
|
bsd-3-clause
| 6,974
|
''' This script is prepared by Tyler Pubben and is licensed under the MIT license framework.
It is free to use and distribute however please reference http://www.tjscientific.com or my
GIT repository at https://github.com/tpubben/SequoiaStacking/'''
import numpy as np
import cv2
import os
def align_images(in_fldr, out_fldr, moving, fixed):
MIN_MATCH_COUNT = 10
moving_im = cv2.imread(moving, 0) # image to be distorted
fixed_im = cv2.imread(fixed, 0) # image to be matched
# Initiate SIFT detector
sift = cv2.xfeatures2d.SIFT_create()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(moving_im, None)
kp2, des2 = sift.detectAndCompute(fixed_im, None)
# use FLANN method to match keypoints. Brute force matches not appreciably better
# and added processing time is significant.
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
# store all the good matches following Lowe's ratio test.
good = []
for m, n in matches:
if m.distance < 0.7 * n.distance:
good.append(m)
if len(good) > MIN_MATCH_COUNT:
src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
h, w = moving_im.shape # shape of input images, needs to remain the same for output
outimg = cv2.warpPerspective(moving_im, M, (w, h))
return outimg
else:
print("Not enough matches are found for moving image")
matchesMask = None
ch1 = int(input('Which band do you want for channel 1 on output image? Green(1), Red(2), Red Edge(3) or NIR(4)'))
ch2 = int(input('Which band do you want for channel 2 on output image? Green(1), Red(2), Red Edge(3) or NIR(4)'))
ch3 = int(input('Which band do you want for channel 3 on output image? Green(1), Red(2), Red Edge(3) or NIR(4)'))
channel_order = [ch1,ch2,ch3]
output_folder = str(input('Enter path to output folder: '))
input_folder = str(input('Enter path to input folder: '))
image_list = [f for f in os.listdir(input_folder) if os.path.isfile(os.path.join(input_folder,f))]
image_tups = zip(*[image_list[i::4] for i in range(4)])
# set the fixed image to minimize amount of translation that needs to occur
if 1 in channel_order and 2 in channel_order and 3 in channel_order:
fixed_image = 1
moving_im1 = 0
moving_im2 = 2
elif 2 in channel_order and 3 in channel_order and 4 in channel_order:
fixed_image = 2
moving_im1 = 1
moving_im2 = 3
elif 1 in channel_order and 3 in channel_order and 4 in channel_order:
fixed_image = 2
moving_im1 = 0
moving_im2 = 3
elif 1 in channel_order and 2 in channel_order and 4 in channel_order:
fixed_image = 1
moving_im1 = 0
moving_im2 = 3
# iterate through each set of 4 images
for tup in image_tups:
band1 = align_images(input_folder, output_folder, os.path.join(input_folder, tup[moving_im1]),
os.path.join(input_folder, tup[fixed_image]))
band2 = align_images(input_folder, output_folder, os.path.join(input_folder, tup[moving_im2]),
os.path.join(input_folder, tup[fixed_image]))
band3 = cv2.imread(os.path.join(input_folder, tup[fixed_image]), 0)
merged = cv2.merge((band1, band2, band3))
cv2.imwrite(os.path.join(output_folder, tup[fixed_image][-30:-4]) + '_merged.jpg', merged)
|
tpubben/SequoiaStacking
|
parrotStacking.py
|
Python
|
mit
| 3,776
|
import os
import logging
import shutil
import zipfile
import xmltodict
from osgeo import ogr, osr
from django.core.exceptions import ValidationError
from django.core.files.uploadedfile import UploadedFile
from django.db import models, transaction
from django.utils.html import strip_tags
from django.template import Template, Context
from dominate.tags import legend, table, tbody, tr, th, div
from hs_core.models import Title
from hs_core.hydroshare import utils
from hs_core.hydroshare.resource import delete_resource_file
from hs_core.forms import CoverageTemporalForm
from hs_geographic_feature_resource.models import GeographicFeatureMetaDataMixin, \
OriginalCoverage, GeometryInformation, FieldInformation
from base import AbstractFileMetaData, AbstractLogicalFile
UNKNOWN_STR = "unknown"
class GeoFeatureFileMetaData(GeographicFeatureMetaDataMixin, AbstractFileMetaData):
# the metadata element models are from the geographic feature resource type app
model_app_label = 'hs_geographic_feature_resource'
def get_metadata_elements(self):
elements = super(GeoFeatureFileMetaData, self).get_metadata_elements()
elements += [self.originalcoverage, self.geometryinformation]
elements += list(self.fieldinformations.all())
return elements
@classmethod
def get_metadata_model_classes(cls):
metadata_model_classes = super(GeoFeatureFileMetaData, cls).get_metadata_model_classes()
metadata_model_classes['originalcoverage'] = OriginalCoverage
metadata_model_classes['geometryinformation'] = GeometryInformation
metadata_model_classes['fieldinformation'] = FieldInformation
return metadata_model_classes
def get_html(self):
"""overrides the base class function"""
html_string = super(GeoFeatureFileMetaData, self).get_html()
html_string += self.geometryinformation.get_html()
if self.spatial_coverage:
html_string += self.spatial_coverage.get_html()
if self.originalcoverage:
html_string += self.originalcoverage.get_html()
if self.temporal_coverage:
html_string += self.temporal_coverage.get_html()
html_string += self._get_field_informations_html()
template = Template(html_string)
context = Context({})
return template.render(context)
def _get_field_informations_html(self):
root_div = div(cls="col-md-12 col-sm-12 pull-left", style="margin-bottom:40px;")
with root_div:
legend('Field Information')
with table(style="width: 100%;"):
with tbody():
with tr(cls='row'):
th('Name')
th('Type')
th('Width')
th('Precision')
for field_info in self.fieldinformations.all():
field_info.get_html(pretty=False)
return root_div.render()
def get_html_forms(self, datatset_name_form=True):
"""overrides the base class function to generate html needed for metadata editing"""
root_div = div("{% load crispy_forms_tags %}")
with root_div:
super(GeoFeatureFileMetaData, self).get_html_forms()
with div(cls="col-lg-6 col-xs-12"):
div("{% crispy geometry_information_form %}")
with div(cls="col-lg-6 col-xs-12 col-md-pull-6", style="margin-top:40px;"):
div("{% crispy spatial_coverage_form %}")
with div(cls="col-lg-6 col-xs-12"):
div("{% crispy original_coverage_form %}")
template = Template(root_div.render())
context_dict = dict()
context_dict["geometry_information_form"] = self.get_geometry_information_form()
update_action = "/hydroshare/hsapi/_internal/GeoFeatureLogicalFile/{0}/{1}/{2}/update-file-metadata/"
create_action = "/hydroshare/hsapi/_internal/GeoFeatureLogicalFile/{0}/{1}/add-file-metadata/"
temp_cov_form = self.get_temporal_coverage_form()
if self.temporal_coverage:
form_action = update_action.format(self.logical_file.id, "coverage",
self.temporal_coverage.id)
temp_cov_form.action = form_action
else:
form_action = create_action.format(self.logical_file.id, "coverage")
temp_cov_form.action = form_action
context_dict["temp_form"] = temp_cov_form
context_dict['original_coverage_form'] = self.get_original_coverage_form()
context_dict['spatial_coverage_form'] = self.get_spatial_coverage_form()
context = Context(context_dict)
rendered_html = template.render(context)
rendered_html += self._get_field_informations_html()
return rendered_html
def get_geometry_information_form(self):
return GeometryInformation.get_html_form(resource=None, element=self.geometryinformation,
file_type=True, allow_edit=False)
def get_original_coverage_form(self):
return OriginalCoverage.get_html_form(resource=None, element=self.originalcoverage,
file_type=True, allow_edit=False)
@classmethod
def validate_element_data(cls, request, element_name):
"""overriding the base class method"""
# the only metadata that we are allowing for editing is the temporal coverage
element_name = element_name.lower()
if element_name != 'coverage' or 'start' not in request.POST:
err_msg = 'Data for temporal coverage is missing'
return {'is_valid': False, 'element_data_dict': None, "errors": err_msg}
element_form = CoverageTemporalForm(data=request.POST)
if element_form.is_valid():
return {'is_valid': True, 'element_data_dict': element_form.cleaned_data}
else:
return {'is_valid': False, 'element_data_dict': None, "errors": element_form.errors}
def add_to_xml_container(self, container):
"""Generates xml+rdf representation of all metadata elements associated with this
logical file type instance"""
container_to_add_to = super(GeoFeatureFileMetaData, self).add_to_xml_container(container)
if self.geometryinformation:
self.geometryinformation.add_to_xml_container(container_to_add_to)
for fieldinfo in self.fieldinformations.all():
fieldinfo.add_to_xml_container(container_to_add_to)
if self.originalcoverage:
self.originalcoverage.add_to_xml_container(container_to_add_to)
class GeoFeatureLogicalFile(AbstractLogicalFile):
metadata = models.OneToOneField(GeoFeatureFileMetaData, related_name="logical_file")
data_type = "GeographicFeature"
@classmethod
def get_allowed_uploaded_file_types(cls):
"""only .zip or .shp file can be set to this logical file group"""
# See Shapefile format:
# http://resources.arcgis.com/en/help/main/10.2/index.html#//005600000003000000
return (".zip", ".shp", ".shx", ".dbf", ".prj",
".sbx", ".sbn", ".cpg", ".xml", ".fbn",
".fbx", ".ain", ".aih", ".atx", ".ixs",
".mxs")
@classmethod
def get_allowed_storage_file_types(cls):
"""file types allowed in this logical file group are the followings"""
return [".shp", ".shx", ".dbf", ".prj",
".sbx", ".sbn", ".cpg", ".xml", ".fbn",
".fbx", ".ain", ".aih", ".atx", ".ixs",
".mxs"
]
@classmethod
def create(cls):
"""this custom method MUST be used to create an instance of this class"""
feature_metadata = GeoFeatureFileMetaData.objects.create(keywords=[])
return cls.objects.create(metadata=feature_metadata)
@property
def supports_resource_file_move(self):
"""resource files that are part of this logical file can't be moved"""
return False
@property
def supports_resource_file_add(self):
"""doesn't allow a resource file to be added"""
return False
@property
def supports_resource_file_rename(self):
"""resource files that are part of this logical file can't be renamed"""
return False
@property
def supports_delete_folder_on_zip(self):
"""does not allow the original folder to be deleted upon zipping of that folder"""
return False
@classmethod
def set_file_type(cls, resource, file_id, user):
"""
Sets a .shp or .zip resource file to GeoFeatureFile type
:param resource: an instance of resource type CompositeResource
:param file_id: id of the resource file to be set as GeoFeatureFile type
:param user: user who is setting the file type
:return:
"""
# had to import it here to avoid import loop
from hs_core.views.utils import create_folder, remove_folder
log = logging.getLogger()
# get the file from irods
res_file = utils.get_resource_file_by_id(resource, file_id)
if res_file is None or not res_file.exists:
raise ValidationError("File not found.")
if res_file.extension.lower() not in ('.zip', '.shp'):
raise ValidationError("Not a valid geographic feature file.")
if not res_file.has_generic_logical_file:
raise ValidationError("Selected file must be part of a generic file type.")
try:
meta_dict, shape_files, shp_res_files = extract_metadata_and_files(resource, res_file)
except ValidationError as ex:
log.exception(ex.message)
raise ex
# hold on to temp dir for final clean up
temp_dir = os.path.dirname(shape_files[0])
file_name = res_file.file_name
# file name without the extension
base_file_name = file_name[:-len(res_file.extension)]
xml_file = ''
for f in shape_files:
if f.lower().endswith('.shp.xml'):
xml_file = f
break
file_folder = res_file.file_folder
file_type_success = False
upload_folder = ''
msg = "GeoFeature file type. Error when setting file type. Error:{}"
with transaction.atomic():
# create a GeoFeature logical file object to be associated with
# resource files
logical_file = cls.create()
# by default set the dataset_name attribute of the logical file to the
# name of the file selected to set file type
logical_file.dataset_name = base_file_name
logical_file.save()
try:
# create a folder for the geofeature file type using the base file
# name as the name for the new folder
new_folder_path = cls.compute_file_type_folder(resource, file_folder,
base_file_name)
create_folder(resource.short_id, new_folder_path)
log.info("Folder created:{}".format(new_folder_path))
new_folder_name = new_folder_path.split('/')[-1]
if file_folder is None:
upload_folder = new_folder_name
else:
upload_folder = os.path.join(file_folder, new_folder_name)
# add all new files to the resource
files_to_add_to_resource = shape_files
for fl in files_to_add_to_resource:
uploaded_file = UploadedFile(file=open(fl, 'rb'),
name=os.path.basename(fl))
# the added resource file will be part of a new generic logical file by default
new_res_file = utils.add_file_to_resource(
resource, uploaded_file, folder=upload_folder
)
# delete the generic logical file object
if new_res_file.logical_file is not None:
# deleting the file level metadata object will delete the associated
# logical file object
new_res_file.logical_file.metadata.delete()
# make each resource file we added part of the logical file
logical_file.add_resource_file(new_res_file)
log.info("GeoFeature file type - files were added to the file type.")
add_metadata(resource, meta_dict, xml_file, logical_file)
log.info("GeoFeature file type and resource level metadata updated.")
# delete the original resource files used as part of setting file type
for fl in shp_res_files:
delete_resource_file(resource.short_id, fl.id, user)
log.info("Deleted original resource files.")
file_type_success = True
except Exception as ex:
msg = msg.format(ex.message)
log.exception(msg)
finally:
# remove temp dir
if os.path.isdir(temp_dir):
shutil.rmtree(temp_dir)
if not file_type_success and upload_folder:
# delete any new files uploaded as part of setting file type
folder_to_remove = os.path.join('data', 'contents', upload_folder)
remove_folder(user, resource.short_id, folder_to_remove)
log.info("Deleted newly created file type folder")
raise ValidationError(msg)
def extract_metadata_and_files(resource, res_file, file_type=True):
"""
validates shape files and extracts metadata
:param resource: an instance of BaseResource
:param res_file: an instance of ResourceFile
:param file_type: A flag to control if extraction being done for file type or resource type
:return: a dict of extracted metadata, a list file paths of shape related files on the
temp directory, a list of resource files retrieved from iRODS for this processing
"""
shape_files, shp_res_files = get_all_related_shp_files(resource, res_file, file_type=file_type)
temp_dir = os.path.dirname(shape_files[0])
if not _check_if_shape_files(shape_files):
if res_file.extension.lower() == '.shp':
err_msg = "One or more dependent shape files are missing at location: " \
"{folder_path} or one or more files are not of shape file type."
err_msg = err_msg.format(folder_path=res_file.short_path)
else:
err_msg = "One or more dependent shape files are missing in the selected zip file " \
"or one or more files are not of shape file type."
if os.path.isdir(temp_dir):
shutil.rmtree(temp_dir)
raise ValidationError(err_msg)
shp_file = ''
for f in shape_files:
if f.lower().endswith('.shp'):
shp_file = f
break
try:
meta_dict = extract_metadata(shp_file_full_path=shp_file)
return meta_dict, shape_files, shp_res_files
except Exception as ex:
# remove temp dir
if os.path.isdir(temp_dir):
shutil.rmtree(temp_dir)
if file_type:
msg = "GeoFeature file type. Error when setting file type. Error:{}"
else:
msg = "Failed to parse the .shp file. Error{}"
msg = msg.format(ex.message)
raise ValidationError(msg)
def add_metadata(resource, metadata_dict, xml_file, logical_file=None):
"""
creates/updates metadata at resource and file level
:param resource: an instance of BaseResource
:param metadata_dict: dict containing extracted metadata
:param xml_file: file path (on temp directory) of the xml file that is part of the
geo feature files
:param logical_file: an instance of GeoFeatureLogicalFile if metadata needs to be part of the
logical file
:return:
"""
# populate resource and logical file level metadata
target_obj = logical_file if logical_file is not None else resource
if "coverage" in metadata_dict.keys():
coverage_dict = metadata_dict["coverage"]['Coverage']
target_obj.metadata.coverages.all().filter(type='box').delete()
target_obj.metadata.create_element('coverage',
type=coverage_dict['type'],
value=coverage_dict['value'])
originalcoverage_dict = metadata_dict["originalcoverage"]['originalcoverage']
if target_obj.metadata.originalcoverage is not None:
target_obj.metadata.originalcoverage.delete()
target_obj.metadata.create_element('originalcoverage', **originalcoverage_dict)
field_info_array = metadata_dict["field_info_array"]
target_obj.metadata.fieldinformations.all().delete()
for field_info in field_info_array:
field_info_dict = field_info["fieldinformation"]
target_obj.metadata.create_element('fieldinformation', **field_info_dict)
geometryinformation_dict = metadata_dict["geometryinformation"]
if target_obj.metadata.geometryinformation is not None:
target_obj.metadata.geometryinformation.delete()
target_obj.metadata.create_element('geometryinformation', **geometryinformation_dict)
if xml_file:
shp_xml_metadata_list = parse_shp_xml(xml_file)
for shp_xml_metadata in shp_xml_metadata_list:
if 'description' in shp_xml_metadata:
# overwrite existing description metadata - at the resource level
if not resource.metadata.description:
abstract = shp_xml_metadata['description']['abstract']
resource.metadata.create_element('description',
abstract=abstract)
elif 'title' in shp_xml_metadata:
title = shp_xml_metadata['title']['value']
title_element = resource.metadata.title
if title_element.value.lower() == 'untitled resource':
resource.metadata.update_element('title', title_element.id, value=title)
if logical_file is not None:
logical_file.dataset_name = title
logical_file.save()
elif 'subject' in shp_xml_metadata:
# append new keywords to existing keywords - at the resource level
existing_keywords = [subject.value.lower() for
subject in resource.metadata.subjects.all()]
keyword = shp_xml_metadata['subject']['value']
if keyword.lower() not in existing_keywords:
resource.metadata.create_element('subject', value=keyword)
# add keywords at the logical file level
if logical_file is not None:
if keyword not in logical_file.metadata.keywords:
logical_file.metadata.keywords += [keyword]
logical_file.metadata.save()
def get_all_related_shp_files(resource, selected_resource_file, file_type):
"""
This helper function copies all the related shape files to a temp directory
and return a list of those temp file paths as well as a list of existing related
resource file objects
:param resource: an instance of BaseResource to which the *selecetd_resource_file* belongs
:param selected_resource_file: an instance of ResourceFile selected by the user to set
GeoFeaureFile type (the file must be a .shp or a .zip file)
:param file_type: a flag (True/False) to control resource VS file type actions
:return: a list of temp file paths for all related shape files, and a list of corresponding
resource file objects
"""
def collect_shape_resource_files(res_file):
# compare without the file extension (-4)
if res_file.short_path.lower().endswith('.shp.xml'):
if selected_resource_file.short_path[:-4] == res_file.short_path[:-8]:
shape_res_files.append(f)
elif selected_resource_file.short_path[:-4] == res_file.short_path[:-4]:
shape_res_files.append(res_file)
shape_temp_files = []
shape_res_files = []
temp_dir = ''
if selected_resource_file.extension.lower() == '.shp':
for f in resource.files.all():
if f.file_folder == selected_resource_file.file_folder:
if f.extension.lower() == '.xml' and not f.file_name.lower().endswith('.shp.xml'):
continue
if f.extension.lower() in GeoFeatureLogicalFile.get_allowed_storage_file_types():
if file_type:
if f.has_generic_logical_file:
collect_shape_resource_files(f)
else:
collect_shape_resource_files(f)
for f in shape_res_files:
temp_file = utils.get_file_from_irods(f)
if not temp_dir:
temp_dir = os.path.dirname(temp_file)
else:
file_temp_dir = os.path.dirname(temp_file)
dst_dir = os.path.join(temp_dir, os.path.basename(temp_file))
shutil.copy(temp_file, dst_dir)
shutil.rmtree(file_temp_dir)
temp_file = dst_dir
shape_temp_files.append(temp_file)
elif selected_resource_file.extension.lower() == '.zip':
temp_file = utils.get_file_from_irods(selected_resource_file)
temp_dir = os.path.dirname(temp_file)
if not zipfile.is_zipfile(temp_file):
if os.path.isdir(temp_dir):
shutil.rmtree(temp_dir)
raise ValidationError('Selected file is not a zip file')
zf = zipfile.ZipFile(temp_file, 'r')
zf.extractall(temp_dir)
zf.close()
for dirpath, _, filenames in os.walk(temp_dir):
for name in filenames:
if name == selected_resource_file.file_name:
# skip the user selected zip file
continue
file_path = os.path.abspath(os.path.join(dirpath, name))
shape_temp_files.append(file_path)
shape_res_files.append(selected_resource_file)
return shape_temp_files, shape_res_files
def _check_if_shape_files(files):
"""
checks if the list of file temp paths in *files* are part of shape files
must have all these file extensions: (shp, shx, dbf)
:param files: list of files located in temp directory in django
:return: True/False
"""
# Note: this is the original function (check_fn_for_shp) in geo feature resource receivers.py
# used by is_shapefiles
# at least needs to have 3 mandatory files: shp, shx, dbf
if len(files) >= 3:
# check that there are no files with same extension
file_extensions = set([os.path.splitext(os.path.basename(f).lower())[1] for f in files])
if len(file_extensions) != len(files):
return False
# check if there is the xml file
xml_file = ''
for f in files:
if f.lower().endswith('.shp.xml'):
xml_file = f
file_names = set([os.path.splitext(os.path.basename(f))[0] for f in files if
not f.lower().endswith('.shp.xml')])
if len(file_names) > 1:
# file names are not the same
return False
# check if xml file name matches with other file names
if xml_file:
# -8 for '.shp.xml'
if os.path.basename(xml_file)[:-8] not in file_names:
return False
for ext in file_extensions:
if ext not in GeoFeatureLogicalFile.get_allowed_storage_file_types():
return False
for ext in ('.shp', '.shx', '.dbf'):
if ext not in file_extensions:
return False
else:
return False
# test if we can open the shp file
shp_file = [f for f in files if f.lower().endswith('.shp')][0]
driver = ogr.GetDriverByName('ESRI Shapefile')
dataset = driver.Open(shp_file)
if dataset is None:
return False
dataset = None
return True
def extract_metadata(shp_file_full_path):
"""
Collects metadata from a .shp file specified by *shp_file_full_path*
:param shp_file_full_path:
:return: returns a dict of collected metadata
"""
# TODO: Pabitra - try to simplify the logic in this function
try:
metadata_dict = {}
# wgs84 extent
parsed_md_dict = parse_shp(shp_file_full_path)
if parsed_md_dict["wgs84_extent_dict"]["westlimit"] != UNKNOWN_STR:
wgs84_dict = parsed_md_dict["wgs84_extent_dict"]
# if extent is a point, create point type coverage
if wgs84_dict["westlimit"] == wgs84_dict["eastlimit"] \
and wgs84_dict["northlimit"] == wgs84_dict["southlimit"]:
coverage_dict = {"Coverage": {"type": "point",
"value": {"east": wgs84_dict["eastlimit"],
"north": wgs84_dict["northlimit"],
"units": wgs84_dict["units"],
"projection": wgs84_dict["projection"]}
}}
else: # otherwise, create box type coverage
coverage_dict = {"Coverage": {"type": "box",
"value": parsed_md_dict["wgs84_extent_dict"]}}
metadata_dict["coverage"] = coverage_dict
# original extent
original_coverage_dict = {}
original_coverage_dict["originalcoverage"] = {"northlimit":
parsed_md_dict
["origin_extent_dict"]["northlimit"],
"southlimit":
parsed_md_dict
["origin_extent_dict"]["southlimit"],
"westlimit":
parsed_md_dict
["origin_extent_dict"]["westlimit"],
"eastlimit":
parsed_md_dict
["origin_extent_dict"]["eastlimit"],
"projection_string":
parsed_md_dict
["origin_projection_string"],
"projection_name":
parsed_md_dict["origin_projection_name"],
"datum": parsed_md_dict["origin_datum"],
"unit": parsed_md_dict["origin_unit"]
}
metadata_dict["originalcoverage"] = original_coverage_dict
# field
field_info_array = []
field_name_list = parsed_md_dict["field_meta_dict"]['field_list']
for field_name in field_name_list:
field_info_dict_item = {}
field_info_dict_item['fieldinformation'] = \
parsed_md_dict["field_meta_dict"]["field_attr_dict"][field_name]
field_info_array.append(field_info_dict_item)
metadata_dict['field_info_array'] = field_info_array
# geometry
geometryinformation = {"featureCount": parsed_md_dict["feature_count"],
"geometryType": parsed_md_dict["geometry_type"]}
metadata_dict["geometryinformation"] = geometryinformation
return metadata_dict
except:
raise ValidationError("Parse Shapefiles Failed!")
def parse_shp(shp_file_path):
"""
:param shp_file_path: full file path fo the .shp file
output dictionary format
shp_metadata_dict["origin_projection_string"]: original projection string
shp_metadata_dict["origin_projection_name"]: origin_projection_name
shp_metadata_dict["origin_datum"]: origin_datum
shp_metadata_dict["origin_unit"]: origin_unit
shp_metadata_dict["field_meta_dict"]["field_list"]: list [fieldname1, fieldname2...]
shp_metadata_dict["field_meta_dict"]["field_attr_dic"]:
dict {"fieldname": dict {
"fieldName":fieldName,
"fieldTypeCode":fieldTypeCode,
"fieldType":fieldType,
"fieldWidth:fieldWidth,
"fieldPrecision:fieldPrecision"
}
}
shp_metadata_dict["feature_count"]: feature count
shp_metadata_dict["geometry_type"]: geometry_type
shp_metadata_dict["origin_extent_dict"]:
dict{"west": east, "north":north, "east":east, "south":south}
shp_metadata_dict["wgs84_extent_dict"]:
dict{"west": east, "north":north, "east":east, "south":south}
"""
shp_metadata_dict = {}
# read shapefile
driver = ogr.GetDriverByName('ESRI Shapefile')
dataset = driver.Open(shp_file_path)
# get layer
layer = dataset.GetLayer()
# get spatialRef from layer
spatialRef_from_layer = layer.GetSpatialRef()
if spatialRef_from_layer is not None:
shp_metadata_dict["origin_projection_string"] = str(spatialRef_from_layer)
prj_name = spatialRef_from_layer.GetAttrValue('projcs')
if prj_name is None:
prj_name = spatialRef_from_layer.GetAttrValue('geogcs')
shp_metadata_dict["origin_projection_name"] = prj_name
shp_metadata_dict["origin_datum"] = spatialRef_from_layer.GetAttrValue('datum')
shp_metadata_dict["origin_unit"] = spatialRef_from_layer.GetAttrValue('unit')
else:
shp_metadata_dict["origin_projection_string"] = UNKNOWN_STR
shp_metadata_dict["origin_projection_name"] = UNKNOWN_STR
shp_metadata_dict["origin_datum"] = UNKNOWN_STR
shp_metadata_dict["origin_unit"] = UNKNOWN_STR
field_list = []
filed_attr_dic = {}
field_meta_dict = {"field_list": field_list, "field_attr_dict": filed_attr_dic}
shp_metadata_dict["field_meta_dict"] = field_meta_dict
# get Attributes
layerDefinition = layer.GetLayerDefn()
for i in range(layerDefinition.GetFieldCount()):
fieldName = layerDefinition.GetFieldDefn(i).GetName()
field_list.append(fieldName)
attr_dict = {}
field_meta_dict["field_attr_dict"][fieldName] = attr_dict
attr_dict["fieldName"] = fieldName
fieldTypeCode = layerDefinition.GetFieldDefn(i).GetType()
attr_dict["fieldTypeCode"] = fieldTypeCode
fieldType = layerDefinition.GetFieldDefn(i).GetFieldTypeName(fieldTypeCode)
attr_dict["fieldType"] = fieldType
fieldWidth = layerDefinition.GetFieldDefn(i).GetWidth()
attr_dict["fieldWidth"] = fieldWidth
fieldPrecision = layerDefinition.GetFieldDefn(i).GetPrecision()
attr_dict["fieldPrecision"] = fieldPrecision
# get layer extent
layer_extent = layer.GetExtent()
# get feature count
featureCount = layer.GetFeatureCount()
shp_metadata_dict["feature_count"] = featureCount
# get a feature from layer
feature = layer.GetNextFeature()
# get geometry from feature
geom = feature.GetGeometryRef()
# get geometry name
shp_metadata_dict["geometry_type"] = geom.GetGeometryName()
# reproject layer extent
# source SpatialReference
source = spatialRef_from_layer
# target SpatialReference
target = osr.SpatialReference()
target.ImportFromEPSG(4326)
# create two key points from layer extent
left_upper_point = ogr.Geometry(ogr.wkbPoint)
left_upper_point.AddPoint(layer_extent[0], layer_extent[3]) # left-upper
right_lower_point = ogr.Geometry(ogr.wkbPoint)
right_lower_point.AddPoint(layer_extent[1], layer_extent[2]) # right-lower
# source map always has extent, even projection is unknown
shp_metadata_dict["origin_extent_dict"] = {}
shp_metadata_dict["origin_extent_dict"]["westlimit"] = layer_extent[0]
shp_metadata_dict["origin_extent_dict"]["northlimit"] = layer_extent[3]
shp_metadata_dict["origin_extent_dict"]["eastlimit"] = layer_extent[1]
shp_metadata_dict["origin_extent_dict"]["southlimit"] = layer_extent[2]
# reproject to WGS84
shp_metadata_dict["wgs84_extent_dict"] = {}
if source is not None:
# define CoordinateTransformation obj
transform = osr.CoordinateTransformation(source, target)
# project two key points
left_upper_point.Transform(transform)
right_lower_point.Transform(transform)
shp_metadata_dict["wgs84_extent_dict"]["westlimit"] = left_upper_point.GetX()
shp_metadata_dict["wgs84_extent_dict"]["northlimit"] = left_upper_point.GetY()
shp_metadata_dict["wgs84_extent_dict"]["eastlimit"] = right_lower_point.GetX()
shp_metadata_dict["wgs84_extent_dict"]["southlimit"] = right_lower_point.GetY()
shp_metadata_dict["wgs84_extent_dict"]["projection"] = "WGS 84 EPSG:4326"
shp_metadata_dict["wgs84_extent_dict"]["units"] = "Decimal degrees"
else:
shp_metadata_dict["wgs84_extent_dict"]["westlimit"] = UNKNOWN_STR
shp_metadata_dict["wgs84_extent_dict"]["northlimit"] = UNKNOWN_STR
shp_metadata_dict["wgs84_extent_dict"]["eastlimit"] = UNKNOWN_STR
shp_metadata_dict["wgs84_extent_dict"]["southlimit"] = UNKNOWN_STR
shp_metadata_dict["wgs84_extent_dict"]["projection"] = UNKNOWN_STR
shp_metadata_dict["wgs84_extent_dict"]["units"] = UNKNOWN_STR
return shp_metadata_dict
def parse_shp_xml(shp_xml_full_path):
"""
Parse ArcGIS 10.X ESRI Shapefile Metadata XML. file to extract metadata for the following
elements:
title
abstract
keywords
:param shp_xml_full_path: Expected fullpath to the .shp.xml file
:return: a list of metadata dict
"""
metadata = []
try:
if os.path.isfile(shp_xml_full_path):
with open(shp_xml_full_path) as fd:
xml_dict = xmltodict.parse(fd.read())
dataIdInfo_dict = xml_dict['metadata']['dataIdInfo']
if 'idCitation' in dataIdInfo_dict:
if 'resTitle' in dataIdInfo_dict['idCitation']:
if '#text' in dataIdInfo_dict['idCitation']['resTitle']:
title_value = dataIdInfo_dict['idCitation']['resTitle']['#text']
else:
title_value = dataIdInfo_dict['idCitation']['resTitle']
title_max_length = Title._meta.get_field('value').max_length
if len(title_value) > title_max_length:
title_value = title_value[:title_max_length-1]
title = {'title': {'value': title_value}}
metadata.append(title)
if 'idAbs' in dataIdInfo_dict:
description_value = strip_tags(dataIdInfo_dict['idAbs'])
description = {'description': {'abstract': description_value}}
metadata.append(description)
if 'searchKeys' in dataIdInfo_dict:
searchKeys_dict = dataIdInfo_dict['searchKeys']
if 'keyword' in searchKeys_dict:
keyword_list = []
if type(searchKeys_dict["keyword"]) is list:
keyword_list += searchKeys_dict["keyword"]
else:
keyword_list.append(searchKeys_dict["keyword"])
for k in keyword_list:
metadata.append({'subject': {'value': k}})
except Exception:
# Catch any exception silently and return an empty list
# Due to the variant format of ESRI Shapefile Metadata XML
# among different ArcGIS versions, an empty list will be returned
# if any exception occurs
metadata = []
finally:
return metadata
|
ResearchSoftwareInstitute/MyHPOM
|
hs_file_types/models/geofeature.py
|
Python
|
bsd-3-clause
| 36,730
|
# -*- coding: utf-8 -*-
"""
:copyright: (c) 2015 by Jan-Hendrik Dolling.
:license: Apache 2.0, see LICENSE for more details.
"""
import abc
import six
import json
import logging
from six import string_types
from collections import namedtuple
from configvalidator.tools.exceptions import LoadException, ValidatorException
from configvalidator.tools.parser import ParseObj
logger = logging.getLogger(__name__)
DATA_SECTION_FEATURE = {}
DATA_OPTION_FEATURE = {}
DATA_VALIDATOR = {}
GLOBAL_DATA = {}
def add_data(key, value):
"""Add a value to the global data store
Validators and Features can access this data.
If you create an object an *data* attribute is automatic added to the instance.
This data attribute hold all information that there president during initialization.
So it's possible to add additional meta data to Validators and Features.
Args:
key: The key under which that information is stored.
value: The information
"""
GLOBAL_DATA[key] = value
def remove_data(key):
"""remove a value from the global data store
This removes the data only for new instances.
The information remain available under the key for existing instances.
Args:
key: The key under which that information is stored.
"""
del GLOBAL_DATA[key]
def load_validator(validator_name):
"""loads a validator class
Args:
validator_name: the validator name
Returns:
A validator class which than can be instanced
Raises:
KeyError: iff the validator_name is unknown
"""
try:
return DATA_VALIDATOR[validator_name]
except KeyError:
raise LoadException("no validator with the name {name}".format(name=validator_name))
def load_section_feature(feature_name):
try:
return DATA_SECTION_FEATURE[feature_name]
except KeyError:
raise LoadException(
"no Section feature with the name {name}".format(name=feature_name))
def load_option_feature(feature_name):
try:
return DATA_OPTION_FEATURE[feature_name]
except KeyError:
raise LoadException(
"no option feature with the name {name}".format(name=feature_name))
def load_validator_form_dict(option_dict):
validator_class_name = "default"
validator_class_dict = {}
if isinstance(option_dict, dict) and "validator" in option_dict and option_dict["validator"] is not None:
if isinstance(option_dict["validator"], string_types):
validator_class_name = option_dict["validator"]
else:
validator_class_dict = option_dict["validator"]
if "type" in validator_class_dict:
validator_class_name = validator_class_dict["type"]
del validator_class_dict["type"]
return load_validator(validator_class_name), validator_class_dict
def list_objects():
return dict(validators=[x for x in DATA_VALIDATOR],
option_features=[x for x in DATA_OPTION_FEATURE],
section_features=[x for x in DATA_SECTION_FEATURE])
def decorate_fn(func):
def with_check_input_is_string(self, value):
if not isinstance(value, string_types):
raise ValidatorException("input must be a string.")
return func(self, value)
return with_check_input_is_string
class CollectMetaclass(abc.ABCMeta):
"""Metaclass which safes the class, so that the loads methods can find them.
all classes with this metaclass are automatically collected
The then can be accessed with there name (which is the class attribute
name or the class name if the class has no attribute entry_name)
"""
def __init__(self, name, bases, dct):
"""
called then a new class is created.
the method sets the "name" attribute if not set.
if the attribute inactive is not False, the class
is sort into the Singleton object
- Validator to _env.validators
- Feature to _env.features
"""
super(CollectMetaclass, self).__init__(name, bases, dct)
if object in bases:
# skip base classes
return
if "name" not in dct:
self.name = name
if "inactive" not in dct or dct["inactive"] is not True:
if issubclass(self, Validator):
# only string input for validator functions
self.validate = decorate_fn(self.validate)
DATA_VALIDATOR[self.name] = self
if issubclass(self, SectionFeature):
DATA_SECTION_FEATURE[self.name] = self
if issubclass(self, OptionFeature):
DATA_OPTION_FEATURE[self.name] = self
def __call__(self, *args, **kwargs):
pars_obj = None
if len(args) > 0 and isinstance(args[0], ParseObj):
pars_obj = args[0]
args = args[1:]
res = self.__new__(self, *args, **kwargs)
if isinstance(res, self):
res.data = dict(GLOBAL_DATA)
if pars_obj is not None:
res.data.update(pars_obj.context_data)
res.__init__(*args, **kwargs)
return res
@six.add_metaclass(CollectMetaclass)
class Validator(object):
"""Superclass for Validator's
If you want to write your own Validator use this Superclass.
For Attribute information see Entry class.
a instance lives in one section/option from ini_validator dict
"""
@abc.abstractmethod
def validate(self, value):
"""determine if one input satisfies this validator.
IMPORTAND:
The input is always are String
Args:
value (String): the value to check if it suffused this Validator
Returns:
True or False dependent of if the input suffused the Validator.
"""
@six.add_metaclass(CollectMetaclass)
class SectionFeature(object):
def __init__(self, **kwargs):
"""
:param kwargs: parameter will be ignored
:return:
"""
@abc.abstractmethod
def parse_section(self, parse_obj, section_dict):
"""
:param parse_obj: parser object which stores the data
:param section_dict: the configuration dict for the current section
:return:
"""
@six.add_metaclass(CollectMetaclass)
class OptionFeature(object):
def __init__(self, **kwargs):
"""
:param kwargs: parameter will be ignored
:return:
"""
@abc.abstractmethod
def parse_option(self, parse_obj, option_dict):
"""
:param parse_obj: parser object which stores the data
:param option_dict: the configuration dict for the current option
:return:
"""
|
JanHendrikDolling/configvalidator
|
configvalidator/tools/basics.py
|
Python
|
apache-2.0
| 6,745
|
"""Builder for websites."""
import string
from regolith.dates import date_to_float
doc_date_key = lambda x: date_to_float(
x.get("year", 1970), x.get("month", "jan")
)
ene_date_key = lambda x: date_to_float(
x.get("end_year", 4242), x.get("end_month", "dec")
)
category_val = lambda x: x.get("category", "<uncategorized>")
level_val = lambda x: x.get("level", "<no-level>")
id_key = lambda x: x.get("_id", "")
def date_key(x):
if "end_year" in x:
v = date_to_float(
x["end_year"], x.get("end_month", "jan"), x.get("end_day", 0)
)
elif "year" in x:
v = date_to_float(x["year"], x.get("month", "jan"), x.get("day", 0))
elif "begin_year" in x:
v = date_to_float(
x["begin_year"], x.get("begin_month", "jan"), x.get("begin_day", 0)
)
else:
raise KeyError("could not find year in " + str(x))
return v
POSITION_LEVELS = {
"": -1,
"editor": -1,
"unknown": -1,
"undergraduate research assistant": 1,
"intern": 1,
"masters research assistant": 2,
"visiting student": 1,
"graduate research assistant": 3,
"teaching assistant": 3,
"research assistant": 2,
"post-doctoral scholar": 4,
"research fellow": 4,
"assistant scientist": 4,
"assistant lecturer": 4,
"lecturer": 5,
"research scientist": 4.5,
"associate scientist": 5,
"adjunct scientist": 5,
"senior assistant lecturer": 5,
"research associate": 5,
"reader": 5,
"ajunct professor": 5,
"adjunct professor": 5,
"consultant": 5,
"programer": 5,
"programmer": 5,
"visiting scientist": 6,
"research assistant professor": 4,
"assistant professor": 8,
"assistant physicist": 8,
"associate professor": 9,
"associate physicist": 9,
"professor emeritus": 9,
"visiting professor": 9,
"manager": 10,
"director": 10,
"scientist": 10,
"engineer": 10,
"physicist": 10,
"professor": 11,
"president": 10,
"distinguished professor": 12
}
def position_key(x):
"""Sorts a people based on thier position in the research group."""
pos = x.get("position", "").lower()
first_letter_last = x.get("name", "zappa").rsplit(None, 1)[-1][0].upper()
backward_position = 26 - string.ascii_uppercase.index(first_letter_last)
return POSITION_LEVELS.get(pos, -1), backward_position
|
scopatz/regolith
|
regolith/sorters.py
|
Python
|
cc0-1.0
| 2,389
|
#!/usr/bin/python
#-*- coding: utf-8 -*-
import os
import sys
import re
import optparse
import myLib
import time
CHECK_FILE = ['AndroidManifest.xml', 'res', 'src', 'src_lib']
FILE_SUBFIX = ['.java', '.xml']
MENIFEST_FILE = 'AndroidManifest.xml'
STRING_FILE = 'res/values/strings.xml'
BUILD_RES_DIR = 'build_res/'
ASSETS_DIR = 'assets/'
ICON_RES_PATH = 'res/drawable-xhdpi/'
CONFIG_FILE = 'src/com/xstd/qm/Config.java'
LIB_CONFIGL_FILE = 'src_lib/mcuslib/src/com/plugin/common/utils/UtilsConfig.java'
init_optprarse = optparse.OptionParser(usage='python build.py [-d debug] [-c channel_code] [-t target_save] [-f force start day]')
init_optprarse.add_option('-d', '--debug', dest='debug')
init_optprarse.add_option('-t', '--targetPath', dest='target')
init_optprarse.add_option('-c', '--channel', dest='channel')
init_optprarse.add_option('-f', '--force', dest='forceday')
class ARGUMENTS_ERROR(Exception):
""" replace text failure
"""
class RES_ERROR(Exception):
""" build resource error
"""
#根据Menifest来获取现在的packageName
def __getPackageName():
if os.path.exists(MENIFEST_FILE):
with open(MENIFEST_FILE, 'r') as mfile:
for line in mfile:
m = re.search('package=\".*\"', line)
if m:
oldStr = m.group(0)
#print oldStr + ' left index = ' + str(oldStr.find('\"')) + ' right index = ' + str(oldStr.rfind('\"'))
return oldStr[oldStr.find('\"') + 1:oldStr.rfind('\"')]
return None
#更具Menifest获取当前的VersionName
def __getVersionName():
if os.path.exists(MENIFEST_FILE):
with open(MENIFEST_FILE, 'r') as file:
for line in file:
m = re.search('android:versionName=\".*\"', line)
if m:
oldStr = m.group(0)
return oldStr[oldStr.find('\"') + 1:oldStr.rfind('\"')]
return None
#替换filename中的文案。如果filename是文件,直接替换,如果filename是文件夹
#递归替换filename文件夹下的所有文件
def __walk_replace_file(filename, old, new):
if filename == None or len(filename) == 0:
raise ARGUMENTS_ERROR()
if os.path.isfile(filename):
if __check_file_extend(filename):
print 'find one file can replace, file : %s' % filename
if filename != 'Config.java':
myLib.replce_text_in_file(filename, old, new)
elif os.path.isdir(filename):
wpath = os.walk(filename)
for item in wpath:
files = item[2]
parentPath = item[0]
for f in files:
if __check_file_extend(f):
#注意,Config文件比较特殊,不做替换
if f != 'Config.java':
print 'find one file can replace, file : %s/%s' % (parentPath, f)
myLib.replce_text_in_file('%s/%s' % (parentPath, f), old, new)
return True
#检查当前文件是否是.java 和 .xml文件
def __check_file_extend(filename):
for end in FILE_SUBFIX:
if filename.endswith(end):
return True
return False
def __replace_package_name(new_package_name):
if new_package_name == None or len(new_package_name) == 0:
raise ARGUMENTS_ERROR()
old_package = __getPackageName()
print '[[replace.py]] try to replace old package : %s to new pacakge : %s' % (old_package, new_package_name)
for item in CHECK_FILE:
__walk_replace_file(item, old_package, new_package_name)
return True
def __onceBuild(debug, channel, target, forceday):
print '//' + '*' * 30
print '|| begin once build for channel:%s to %s' %(channel, target)
print '\\' + '*' * 30
if forceday != None:
myLib.replce_text_in_file(CONFIG_FILE, 'FORCE_START_DAY\ =.*;', 'FORCE_START_DAY = %s;' % forceday)
if debug == 'false':
myLib.replce_text_in_file(CONFIG_FILE, 'DEBUG\ =.*;', 'DEBUG = %s;' % 'false')
myLib.replce_text_in_file(LIB_CONFIGL_FILE, 'UTILS_DEBUG\ =.*;', 'UTILS_DEBUG = %s;' % 'false')
if channel != None:
myLib.replce_text_in_file(CONFIG_FILE, 'CHANNEL_CODE\ =.*;', 'CHANNEL_CODE = \"%s\";' % channel)
myLib.replce_text_in_file(STRING_FILE, 'channel_code.*>', 'channel_code">%s</string>' % channel)
print '='*20 + ' build prepare finish ' + '='*20
print 'begin build now'
os.system('ant clean ; ant release')
if os.path.exists('bin/QuickSetting-release.apk') and target != None:
if not os.path.exists(target):
os.mkdirs(target)
version_name = __getVersionName()
target_apk_file = '%s_%s_%s_%s.apk' % ('QuickSetting', version_name, channel, time.strftime("%Y-%m-%d-%H-%M", time.localtime()))
os.system('cp -rf bin/QuickSetting-release.apk %s/%s' % (target, target_apk_file))
print 'backup the build target %s/%s success >>>>>>>>' % (target, target_apk_file)
print 'after build for channel : %s, just reset code ' % channel
os.system('git reset --hard HEAD')
print '-' * 40
print '-' * 40
def __main(args):
opt, arg = init_optprarse.parse_args(args)
debug = opt.debug
target = opt.target
channel = opt.channel
forceday = opt.forceday
if target == None or channel == None:
raise ARGUMENTS_ERROR()
__onceBuild(debug, channel, target, forceday)
return None
if __name__ == '__main__':
__main(sys.argv[1:])
|
xstd/quick_setting
|
build.py
|
Python
|
apache-2.0
| 5,506
|
from utils import create_newfig, create_moving_polygon, create_still_polygon, run_or_export
func_code = 'as'
func_name = 'test_one_moving_one_stationary_along_path_no_intr'
def setup_fig01():
fig, ax, renderer = create_newfig('{}01'.format(func_code), ylim=(-1, 7))
create_moving_polygon(fig, ax, renderer, ((0, 0), (0, 1), (1, 1), (1, 0)), (4, 3), 'none')
create_still_polygon(fig, ax, renderer, ((3, 1, 'botright'), (4, 1), (4, 0), (3, 0)), 'none')
return fig, ax, '{}01_{}'.format(func_code, func_name)
def setup_fig02():
fig, ax, renderer = create_newfig('{}02'.format(func_code), xlim=(-2, 12), ylim=(-1, 10))
create_moving_polygon(fig, ax, renderer, ((11, 5), (8, 8), (7, 7), (6, 3), (9, 3)), (-1, -3))
create_still_polygon(fig, ax, renderer, ((3.5, 8.5), (1.5, 8.5), (-0.5, 7.5), (0.5, 3.5), (1.5, 2.5), (4.5, 2.5), (5.5, 6.5)))
return fig, ax, '{}02_{}'.format(func_code, func_name)
def setup_fig03():
fig, ax, renderer = create_newfig('{}03'.format(func_code), xlim=(-3, 9), ylim=(-1, 15))
create_moving_polygon(fig, ax, renderer, ((0.5, 9.0), (-1.5, 8.0), (-1.5, 6.0), (1.5, 5.0), (2.5, 5.0), (2.5, 9.0)), (0, 5))
create_still_polygon(fig, ax, renderer, ((7.0, 6.0), (4.0, 5.0), (4.0, 3.0), (6.0, 2.0), (8.0, 3.0)))
return fig, ax, '{}03_{}'.format(func_code, func_name)
def setup_fig04():
fig, ax, renderer = create_newfig('{}04'.format(func_code), xlim=(-2, 12), ylim=(-3, 10))
create_moving_polygon(fig, ax, renderer, ((5.5, 4.5), (3.5, -1.5), (9.5, -1.5), (10.5, 0.5)), (-4, 0))
create_still_polygon(fig, ax, renderer, ((7.5, 8.5), (6.5, 5.5), (7.5, 4.5), (9.5, 4.5), (10.5, 7.5)))
return fig, ax, '{}04_{}'.format(func_code, func_name)
run_or_export(setup_fig01, setup_fig02, setup_fig03, setup_fig04)
|
OmkarPathak/pygorithm
|
imgs/test_geometry/test_extrapolated_intersection/as_test_one_moving_one_stationary_along_path_no_intr.py
|
Python
|
mit
| 1,807
|
# -*- coding: utf-8 -*-
from __future__ import with_statement, print_function, absolute_import
import os
from requests_oauthlib import OAuth1Session
def create_oauth_token(expiration=None, scope=None, key=None, secret=None, name=None, output=True):
"""
Script to obtain an OAuth token from Trello.
Must have TRELLO_API_KEY and TRELLO_API_SECRET set in your environment
To set the token's expiration, set TRELLO_EXPIRATION as a string in your
environment settings (eg. 'never'), otherwise it will default to 30 days.
More info on token scope here:
https://trello.com/docs/gettingstarted/#getting-a-token-from-a-user
"""
request_token_url = 'https://trello.com/1/OAuthGetRequestToken'
authorize_url = 'https://trello.com/1/OAuthAuthorizeToken'
access_token_url = 'https://trello.com/1/OAuthGetAccessToken'
expiration = expiration or os.environ.get('TRELLO_EXPIRATION', "30days")
scope = scope or os.environ.get('TRELLO_SCOPE', 'read,write')
trello_key = key or os.environ['TRELLO_API_KEY']
trello_secret = secret or os.environ['TRELLO_API_SECRET']
name = name or os.environ.get('TRELLO_NAME', 'py-trello')
# Step 1: Get a request token. This is a temporary token that is used for
# having the user authorize an access token and to sign the request to obtain
# said access token.
session = OAuth1Session(client_key=trello_key, client_secret=trello_secret)
response = session.fetch_request_token(request_token_url)
resource_owner_key, resource_owner_secret = response.get('oauth_token'), response.get('oauth_token_secret')
if output:
print("Request Token:")
print(" - oauth_token = %s" % resource_owner_key)
print(" - oauth_token_secret = %s" % resource_owner_secret)
print("")
# Step 2: Redirect to the provider. Since this is a CLI script we do not
# redirect. In a web application you would redirect the user to the URL
# below.
print("Go to the following link in your browser:")
print("{authorize_url}?oauth_token={oauth_token}&scope={scope}&expiration={expiration}&name={name}".format(
authorize_url=authorize_url,
oauth_token=resource_owner_key,
expiration=expiration,
scope=scope,
name=name
))
# After the user has granted access to you, the consumer, the provider will
# redirect you to whatever URL you have told them to redirect to. You can
# usually define this in the oauth_callback argument as well.
# Python 3 compatibility (raw_input was renamed to input)
try:
inputFunc = raw_input
except NameError:
inputFunc = input
accepted = 'n'
while accepted.lower() == 'n':
accepted = inputFunc('Have you authorized me? (y/n) ')
oauth_verifier = inputFunc('What is the PIN? ')
# Step 3: Once the consumer has redirected the user back to the oauth_callback
# URL you can request the access token the user has approved. You use the
# request token to sign this request. After this is done you throw away the
# request token and use the access token returned. You should store this
# access token somewhere safe, like a database, for future use.
session = OAuth1Session(client_key=trello_key, client_secret=trello_secret,
resource_owner_key=resource_owner_key, resource_owner_secret=resource_owner_secret,
verifier=oauth_verifier)
access_token = session.fetch_access_token(access_token_url)
if output:
print("Access Token:")
print(" - oauth_token = %s" % access_token['oauth_token'])
print(" - oauth_token_secret = %s" % access_token['oauth_token_secret'])
print("")
print("You may now access protected resources using the access tokens above.")
print("")
return access_token
if __name__ == '__main__':
create_oauth_token()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
mehdy/py-trello
|
trello/util.py
|
Python
|
bsd-3-clause
| 4,017
|
import os
import gi
import math
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, GObject, Gdk
from sicc import GitAssistant
gtk_builder_file = os.path.splitext(__file__)[0] + '.ui'
COLOR_CYCLE = [
('#ffffff'), ('#d6e685'),
('#8cc665'), ('#44a340'),
('#1e6823')
]
class SiccWindow(object):
def __init__(self, *args, **kwargs):
self.color_to_counter = {}
for color in COLOR_CYCLE:
self.color_to_counter[color] = 0
self.assistant = GitAssistant()
self.builder = Gtk.Builder()
self.builder.add_from_file(gtk_builder_file)
self.window = self.builder.get_object('main_window')
self.window.connect('destroy', self.signal_window_destroy)
self.grid = self.builder.get_object('calendar_grid')
self.entry = self.builder.get_object('date_entry')
self.entry.connect('changed', self.signal_entry_changed)
self.entry.set_text('2016')
self.export = self.builder.get_object('export')
self.export.connect('clicked', self.signal_export)
self.window.show_all()
def signal_window_destroy(self, _):
self.window.destroy()
Gtk.main_quit()
def populate_calendar(self, cols, last, beginning):
for child in self.grid.get_children():
self.grid.remove(child)
for i in range(cols + 1):
for j in range(7):
if i == cols and j >= last:
continue
button = Gtk.Button()
button.connect('clicked', self.signal_button_press)
rgb = Gdk.RGBA()
rgb.parse(COLOR_CYCLE[0])
button.override_background_color(Gtk.StateFlags.NORMAL, rgb)
self.grid.attach(button, i, j, 1, 1)
self.grid.show_all()
def signal_button_press(self, button):
curr = button.get_style_context().get_background_color(Gtk.StateFlags.NORMAL)
counter = 0
color = COLOR_CYCLE[counter]
rgb = Gdk.RGBA()
rgb.parse(color)
while not rgb.equal(curr):
counter = (counter + 1) % len(COLOR_CYCLE)
color = COLOR_CYCLE[counter]
rgb.parse(color)
rgb.parse(COLOR_CYCLE[(counter + 1) % len(COLOR_CYCLE)])
button.override_background_color(Gtk.StateFlags.NORMAL, rgb)
if counter % len(COLOR_CYCLE) != 0:
self.color_to_counter[COLOR_CYCLE[counter % len(COLOR_CYCLE)]] -= 1
if (counter + 1) % len(COLOR_CYCLE) != 0:
self.color_to_counter[COLOR_CYCLE[(counter + 1) % len(COLOR_CYCLE)]] += 1
def signal_entry_changed(self, text):
text = text.get_text()
if text.isnumeric() and int(text) > 1900 and int(text) < 3000:
self.year = int(text)
self.params = self.assistant.calculate_start_date(self.year)
self.populate_calendar(self.params[0], self.params[1], self.params[2])
def signal_export(self, _):
color_max = 0
counter = len(COLOR_CYCLE) - 1
while counter > 0:
if self.color_to_counter[COLOR_CYCLE[counter]] != 0:
color_max = counter
counter = -1
counter -= 1
mask = self.get_mask()
for color in mask:
if color != 0 :
if color != math.ceil(int(color * 1.0 / color_max * 100) / 25):
print("Not a possible color scheme, still parsing...")
break
startday = self.params[2].toordinal()
self.assistant.generate_repo(startday, mask)
def get_mask(self):
mask = []
for i in range(self.params[0] + 1):
for j in range(7):
if i == self.params[0] and j >= self.params[1]:
break
button = self.grid.get_child_at(i, j)
curr = button.get_style_context().get_background_color(Gtk.StateFlags.NORMAL)
counter = 0
color = COLOR_CYCLE[counter]
rgb = Gdk.RGBA()
rgb.parse(color)
while not rgb.equal(curr):
counter = (counter + 1) % len(COLOR_CYCLE)
color = COLOR_CYCLE[counter]
rgb.parse(color)
mask.append(counter)
return mask
if __name__ == '__main__':
x = SiccWindow()
Gtk.main()
|
jacobj10/GitSicc
|
sicc_window.py
|
Python
|
mit
| 4,452
|
# MIT License
# Copyright (c) 2017 MassChallenge, Inc.
from django.urls import reverse
from impact.middleware.method_override_middleware import METHOD_OVERRIDE_HEADER
from impact.tests.api_test_case import APITestCase
from impact.tests.contexts import UserContext
class TestMethodOverrideMiddleware(APITestCase):
def test_patch_via_post(self):
context = UserContext()
user = context.user
with self.login(email=self.basic_user().email):
url = reverse("user_detail", args=[user.id])
new_first_name = "David"
self.client.post(
url,
**{METHOD_OVERRIDE_HEADER: "PATCH"},
data={"first_name": new_first_name})
user.refresh_from_db()
assert user.first_name == new_first_name
def test_patch_via_get_makes_no_change(self):
context = UserContext()
user = context.user
with self.login(email=self.basic_user().email):
url = reverse("user_detail", args=[user.id])
new_first_name = "David"
self.client.get(
url,
**{METHOD_OVERRIDE_HEADER: "PATCH"},
data={"first_name": new_first_name})
user.refresh_from_db()
assert user.first_name != new_first_name
|
masschallenge/impact-api
|
web/impact/impact/tests/test_method_override_middleware.py
|
Python
|
mit
| 1,313
|
# Copyright 2015 Tesora Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import getpass
from mock import call
from mock import DEFAULT
from mock import MagicMock
from mock import Mock
from mock import patch
import os
import tempfile
from trove.common.stream_codecs import IniCodec
from trove.guestagent.common.configuration import ConfigurationManager
from trove.guestagent.common.configuration import ImportOverrideStrategy
from trove.guestagent.common.configuration import OneFileOverrideStrategy
from trove.guestagent.common import operating_system
from trove.guestagent.common.operating_system import FileMode
from trove.tests.unittests import trove_testtools
class TestConfigurationManager(trove_testtools.TestCase):
@patch.multiple('trove.guestagent.common.operating_system',
read_file=DEFAULT, write_file=DEFAULT,
chown=DEFAULT, chmod=DEFAULT)
def test_read_write_configuration(self, read_file, write_file,
chown, chmod):
sample_path = Mock()
sample_owner = Mock()
sample_group = Mock()
sample_codec = MagicMock()
sample_requires_root = Mock()
sample_strategy = MagicMock()
sample_strategy.configure = Mock()
sample_strategy.parse_updates = Mock(return_value={})
manager = ConfigurationManager(
sample_path, sample_owner, sample_group, sample_codec,
requires_root=sample_requires_root,
override_strategy=sample_strategy)
manager.parse_configuration()
read_file.assert_called_with(sample_path, codec=sample_codec)
with patch.object(manager, 'parse_configuration',
return_value={'key1': 'v1', 'key2': 'v2'}):
self.assertEqual('v1', manager.get_value('key1'))
self.assertIsNone(manager.get_value('key3'))
sample_contents = Mock()
manager.save_configuration(sample_contents)
write_file.assert_called_with(
sample_path, sample_contents, as_root=sample_requires_root)
chown.assert_called_with(sample_path, sample_owner, sample_group,
as_root=sample_requires_root)
chmod.assert_called_with(
sample_path, FileMode.ADD_READ_ALL, as_root=sample_requires_root)
sample_data = {}
manager.apply_system_override(sample_data)
manager.apply_user_override(sample_data)
manager.apply_system_override(sample_data, change_id='sys1')
manager.apply_user_override(sample_data, change_id='usr1')
sample_strategy.apply.has_calls([
call(manager.SYSTEM_GROUP, manager.DEFAULT_CHANGE_ID, sample_data),
call(manager.USER_GROUP, manager.DEFAULT_CHANGE_ID, sample_data),
call(manager.SYSTEM_GROUP, 'sys1', sample_data),
call(manager.USER_GROUP, 'usr1', sample_data)
])
class TestConfigurationOverrideStrategy(trove_testtools.TestCase):
def setUp(self):
trove_testtools.TestCase.setUp(self)
self._temp_files_paths = []
self.chmod_patch = patch.object(
operating_system, 'chmod',
MagicMock(return_value=None))
self.chmod_patch_mock = self.chmod_patch.start()
self.addCleanup(self.chmod_patch.stop)
def tearDown(self):
trove_testtools.TestCase.tearDown(self)
# Remove temprary files in the LIFO order.
while self._temp_files_paths:
try:
os.remove(self._temp_files_paths.pop())
except Exception:
pass # Do not fail in cleanup.
def _create_temp_dir(self):
path = tempfile.mkdtemp()
self._temp_files_paths.append(path)
return path
def test_import_override_strategy(self):
# Data structures representing overrides.
# ('change id', 'values', 'expected import index',
# 'expected final import data')
# Distinct IDs within each group mean that there is one file for each
# override.
user_overrides_v1 = ('id1',
{'Section_1': {'name': 'sqrt(2)',
'value': '1.4142'}},
1,
{'Section_1': {'name': 'sqrt(2)',
'value': '1.4142'}}
)
user_overrides_v2 = ('id2',
{'Section_1': {'is_number': 'False'}},
2,
{'Section_1': {'is_number': 'False'}}
)
system_overrides_v1 = ('id1',
{'Section_1': {'name': 'e',
'value': '2.7183'}},
1,
{'Section_1': {'name': 'e',
'value': '2.7183'}}
)
system_overrides_v2 = ('id2',
{'Section_2': {'is_number': 'True'}},
2,
{'Section_2': {'is_number': 'True'}}
)
self._test_import_override_strategy(
[system_overrides_v1, system_overrides_v2],
[user_overrides_v1, user_overrides_v2], True)
# Same IDs within a group mean that the overrides get written into a
# single file.
user_overrides_v1 = ('id1',
{'Section_1': {'name': 'sqrt(2)',
'value': '1.4142'}},
1,
{'Section_1': {'name': 'sqrt(2)',
'is_number': 'False',
'value': '1.4142'}}
)
user_overrides_v2 = ('id1',
{'Section_1': {'is_number': 'False'}},
1,
{'Section_1': {'name': 'sqrt(2)',
'is_number': 'False',
'value': '1.4142'}}
)
system_overrides_v1 = ('id1',
{'Section_1': {'name': 'e',
'value': '2.7183'}},
1,
{'Section_1': {'name': 'e',
'value': '2.7183'},
'Section_2': {'is_number': 'True'}}
)
system_overrides_v2 = ('id1',
{'Section_2': {'is_number': 'True'}},
1,
{'Section_1': {'name': 'e',
'value': '2.7183'},
'Section_2': {'is_number': 'True'}}
)
self._test_import_override_strategy(
[system_overrides_v1, system_overrides_v2],
[user_overrides_v1, user_overrides_v2], False)
def _test_import_override_strategy(
self, system_overrides, user_overrides, test_multi_rev):
base_config_contents = {'Section_1': {'name': 'pi',
'is_number': 'True',
'value': '3.1415'}
}
codec = IniCodec()
current_user = getpass.getuser()
revision_dir = self._create_temp_dir()
with tempfile.NamedTemporaryFile() as base_config:
# Write initial config contents.
operating_system.write_file(
base_config.name, base_config_contents, codec)
strategy = ImportOverrideStrategy(revision_dir, 'ext')
strategy.configure(
base_config.name, current_user, current_user, codec, False)
self._assert_import_override_strategy(
strategy, system_overrides, user_overrides, test_multi_rev)
def _assert_import_override_strategy(
self, strategy, system_overrides, user_overrides, test_multi_rev):
def import_path_builder(
root, group_name, change_id, file_index, file_ext):
return os.path.join(
root, '%s-%03d-%s.%s'
% (group_name, file_index, change_id, file_ext))
# Apply and remove overrides sequentially.
##########################################
# Apply the overrides and verify the files as they are created.
self._apply_import_overrides(
strategy, 'system', system_overrides, import_path_builder)
self._apply_import_overrides(
strategy, 'user', user_overrides, import_path_builder)
# Verify the files again after applying all overrides.
self._assert_import_overrides(
strategy, 'system', system_overrides, import_path_builder)
self._assert_import_overrides(
strategy, 'user', user_overrides, import_path_builder)
# Remove the overrides and verify the files are gone.
self._remove_import_overrides(
strategy, 'user', user_overrides, import_path_builder)
self._remove_import_overrides(
strategy, 'system', user_overrides, import_path_builder)
# Remove a whole group.
##########################################
# Apply overrides first.
self._apply_import_overrides(
strategy, 'system', system_overrides, import_path_builder)
self._apply_import_overrides(
strategy, 'user', user_overrides, import_path_builder)
# Remove all user overrides and verify the files are gone.
self._remove_import_overrides(
strategy, 'user', None, import_path_builder)
# Assert that the system files are still there intact.
self._assert_import_overrides(
strategy, 'system', system_overrides, import_path_builder)
# Remove all system overrides and verify the files are gone.
self._remove_import_overrides(
strategy, 'system', None, import_path_builder)
if test_multi_rev:
# Remove at the end (only if we have multiple revision files).
##########################################
# Apply overrides first.
self._apply_import_overrides(
strategy, 'system', system_overrides, import_path_builder)
self._apply_import_overrides(
strategy, 'user', user_overrides, import_path_builder)
# Remove the last user and system overrides.
self._remove_import_overrides(
strategy, 'user', [user_overrides[-1]], import_path_builder)
self._remove_import_overrides(
strategy, 'system', [system_overrides[-1]],
import_path_builder)
# Assert that the first overrides are still there intact.
self._assert_import_overrides(
strategy, 'user', [user_overrides[0]], import_path_builder)
self._assert_import_overrides(
strategy, 'system', [system_overrides[0]], import_path_builder)
# Re-apply all overrides.
self._apply_import_overrides(
strategy, 'system', system_overrides, import_path_builder)
self._apply_import_overrides(
strategy, 'user', user_overrides, import_path_builder)
# This should overwrite the existing files and resume counting from
# their indices.
self._assert_import_overrides(
strategy, 'user', user_overrides, import_path_builder)
self._assert_import_overrides(
strategy, 'system', system_overrides, import_path_builder)
def _apply_import_overrides(
self, strategy, group_name, overrides, path_builder):
# Apply the overrides and immediately check the file and its contents.
for change_id, contents, index, _ in overrides:
strategy.apply(group_name, change_id, contents)
expected_path = path_builder(
strategy._revision_dir, group_name, change_id, index,
strategy._revision_ext)
self._assert_file_exists(expected_path, True)
def _remove_import_overrides(
self, strategy, group_name, overrides, path_builder):
if overrides:
# Remove the overrides and immediately check the file was removed.
for change_id, _, index, _ in overrides:
strategy.remove(group_name, change_id)
expected_path = path_builder(
strategy._revision_dir, group_name, change_id, index,
strategy._revision_ext)
self._assert_file_exists(expected_path, False)
else:
# Remove the entire group.
strategy.remove(group_name)
found = operating_system.list_files_in_directory(
strategy._revision_dir, pattern='^%s-.+$' % group_name)
self.assertEqual(set(), found, "Some import files from group '%s' "
"were not removed." % group_name)
def _assert_import_overrides(
self, strategy, group_name, overrides, path_builder):
# Check all override files and their contents,
for change_id, _, index, expected in overrides:
expected_path = path_builder(
strategy._revision_dir, group_name, change_id, index,
strategy._revision_ext)
self._assert_file_exists(expected_path, True)
# Assert that the file contents.
imported = operating_system.read_file(
expected_path, codec=strategy._codec)
self.assertEqual(expected, imported)
def _assert_file_exists(self, file_path, exists):
if exists:
self.assertTrue(os.path.exists(file_path),
"Revision import '%s' does not exist."
% file_path)
else:
self.assertFalse(os.path.exists(file_path),
"Revision import '%s' was not removed."
% file_path)
def test_get_value(self):
revision_dir = self._create_temp_dir()
self._assert_get_value(ImportOverrideStrategy(revision_dir, 'ext'))
self._assert_get_value(OneFileOverrideStrategy(revision_dir))
def _assert_get_value(self, override_strategy):
base_config_contents = {'Section_1': {'name': 'pi',
'is_number': 'True',
'value': '3.1415'}
}
config_overrides_v1a = {'Section_1': {'name': 'sqrt(2)',
'value': '1.4142'}
}
config_overrides_v2 = {'Section_1': {'name': 'e',
'value': '2.7183'},
'Section_2': {'foo': 'bar'}
}
config_overrides_v1b = {'Section_1': {'name': 'sqrt(4)',
'value': '2.0'}
}
codec = IniCodec()
current_user = getpass.getuser()
with tempfile.NamedTemporaryFile() as base_config:
# Write initial config contents.
operating_system.write_file(
base_config.name, base_config_contents, codec)
manager = ConfigurationManager(
base_config.name, current_user, current_user, codec,
requires_root=False, override_strategy=override_strategy)
# Test default value.
self.assertIsNone(manager.get_value('Section_2'))
self.assertEqual('foo', manager.get_value('Section_2', 'foo'))
# Test value before applying overrides.
self.assertEqual('pi', manager.get_value('Section_1')['name'])
self.assertEqual('3.1415', manager.get_value('Section_1')['value'])
# Test value after applying overrides.
manager.apply_user_override(config_overrides_v1a, change_id='id1')
self.assertEqual('sqrt(2)', manager.get_value('Section_1')['name'])
self.assertEqual('1.4142', manager.get_value('Section_1')['value'])
manager.apply_user_override(config_overrides_v2, change_id='id2')
self.assertEqual('e', manager.get_value('Section_1')['name'])
self.assertEqual('2.7183', manager.get_value('Section_1')['value'])
self.assertEqual('bar', manager.get_value('Section_2')['foo'])
# Editing change 'id1' become visible only after removing
# change 'id2', which overrides 'id1'.
manager.apply_user_override(config_overrides_v1b, change_id='id1')
self.assertEqual('e', manager.get_value('Section_1')['name'])
self.assertEqual('2.7183', manager.get_value('Section_1')['value'])
# Test value after removing overrides.
# The edited values from change 'id1' should be visible after
# removing 'id2'.
manager.remove_user_override(change_id='id2')
self.assertEqual('sqrt(4)', manager.get_value('Section_1')['name'])
self.assertEqual('2.0', manager.get_value('Section_1')['value'])
# Back to the base.
manager.remove_user_override(change_id='id1')
self.assertEqual('pi', manager.get_value('Section_1')['name'])
self.assertEqual('3.1415', manager.get_value('Section_1')['value'])
self.assertIsNone(manager.get_value('Section_2'))
# Test system overrides.
manager.apply_system_override(
config_overrides_v1b, change_id='id1')
self.assertEqual('sqrt(4)', manager.get_value('Section_1')['name'])
self.assertEqual('2.0', manager.get_value('Section_1')['value'])
# The system values should take precedence over the user
# override.
manager.apply_user_override(
config_overrides_v1a, change_id='id1')
self.assertEqual('sqrt(4)', manager.get_value('Section_1')['name'])
self.assertEqual('2.0', manager.get_value('Section_1')['value'])
# The user values should become visible only after removing the
# system change.
manager.remove_system_override(change_id='id1')
self.assertEqual('sqrt(2)', manager.get_value('Section_1')['name'])
self.assertEqual('1.4142', manager.get_value('Section_1')['value'])
# Back to the base.
manager.remove_user_override(change_id='id1')
self.assertEqual('pi', manager.get_value('Section_1')['name'])
self.assertEqual('3.1415', manager.get_value('Section_1')['value'])
self.assertIsNone(manager.get_value('Section_2'))
|
cp16net/trove
|
trove/tests/unittests/guestagent/test_configuration.py
|
Python
|
apache-2.0
| 19,830
|
import os
from os.path import join
from numpy.distutils.system_info import get_info
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('utils', parent_package, top_path)
config.add_subpackage('sparsetools')
# cd fast needs CBLAS
blas_info = get_info('blas_opt', 0)
if (not blas_info) or (
('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', [])):
cblas_libs = ['cblas']
blas_info.pop('libraries', None)
else:
cblas_libs = blas_info.pop('libraries', [])
libraries = []
if os.name == 'posix':
libraries.append('m')
cblas_libs.append('m')
config.add_extension('arraybuilder',
sources=['arraybuilder.c'])
config.add_extension('sparsefuncs',
sources=['sparsefuncs.c'],
libraries=libraries)
config.add_extension('arrayfuncs',
sources=['arrayfuncs.c'],
depends=[join('src', 'cholesky_delete.c')],
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args', []),
**blas_info
)
config.add_extension(
'murmurhash',
sources=['murmurhash.c', join('src', 'MurmurHash3.cpp')],
include_dirs=['src'])
config.add_extension('graph_shortest_path',
sources=['graph_shortest_path.c'],
include_dirs=[numpy.get_include()])
config.add_extension('seq_dataset',
sources=['seq_dataset.c'],
include_dirs=[numpy.get_include()])
config.add_extension('weight_vector',
sources=['weight_vector.c'],
include_dirs=[numpy.get_include()],
libraries=libraries)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
sgenoud/scikit-learn
|
sklearn/utils/setup.py
|
Python
|
bsd-3-clause
| 2,025
|
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Rolls third_party/boringssl/src in DEPS and updates generated build files."""
import os
import os.path
import shutil
import subprocess
import sys
SCRIPT_PATH = os.path.abspath(__file__)
SRC_PATH = os.path.dirname(os.path.dirname(os.path.dirname(SCRIPT_PATH)))
DEPS_PATH = os.path.join(SRC_PATH, 'DEPS')
BORINGSSL_PATH = os.path.join(SRC_PATH, 'third_party', 'boringssl')
BORINGSSL_SRC_PATH = os.path.join(BORINGSSL_PATH, 'src')
if not os.path.isfile(DEPS_PATH) or not os.path.isdir(BORINGSSL_SRC_PATH):
raise Exception('Could not find Chromium checkout')
# Pull OS_ARCH_COMBOS out of the BoringSSL script.
sys.path.append(os.path.join(BORINGSSL_SRC_PATH, 'util'))
import generate_build_files
GENERATED_FILES = [
'BUILD.generated.gni',
'BUILD.generated_tests.gni',
'boringssl.gypi',
'boringssl_tests.gypi',
'err_data.c',
]
def IsPristine(repo):
"""Returns True if a git checkout is pristine."""
cmd = ['git', 'diff', '--ignore-submodules']
return not (subprocess.check_output(cmd, cwd=repo).strip() or
subprocess.check_output(cmd + ['--cached'], cwd=repo).strip())
def RevParse(repo, rev):
"""Resolves a string to a git commit."""
return subprocess.check_output(['git', 'rev-parse', rev], cwd=repo).strip()
def UpdateDEPS(deps, from_hash, to_hash):
"""Updates all references of |from_hash| to |to_hash| in |deps|."""
with open(deps, 'rb') as f:
contents = f.read()
if from_hash not in contents:
raise Exception('%s not in DEPS' % from_hash)
contents = contents.replace(from_hash, to_hash)
with open(deps, 'wb') as f:
f.write(contents)
def main():
if len(sys.argv) > 2:
sys.stderr.write('Usage: %s [COMMIT]' % sys.argv[0])
return 1
if not IsPristine(SRC_PATH):
print >>sys.stderr, 'Chromium checkout not pristine.'
return 0
if not IsPristine(BORINGSSL_SRC_PATH):
print >>sys.stderr, 'BoringSSL checkout not pristine.'
return 0
if len(sys.argv) > 1:
commit = RevParse(BORINGSSL_SRC_PATH, sys.argv[1])
else:
subprocess.check_call(['git', 'fetch', 'origin'], cwd=BORINGSSL_SRC_PATH)
commit = RevParse(BORINGSSL_SRC_PATH, 'origin/master')
head = RevParse(BORINGSSL_SRC_PATH, 'HEAD')
if head == commit:
print 'BoringSSL already up-to-date.'
return 0
print 'Rolling BoringSSL from %s to %s...' % (head, commit)
UpdateDEPS(DEPS_PATH, head, commit)
# Checkout third_party/boringssl/src to generate new files.
subprocess.check_call(['git', 'checkout', commit], cwd=BORINGSSL_SRC_PATH)
# Clear the old generated files.
for (osname, arch, _, _, _) in generate_build_files.OS_ARCH_COMBOS:
path = os.path.join(BORINGSSL_PATH, osname + '-' + arch)
shutil.rmtree(path)
for file in GENERATED_FILES:
path = os.path.join(BORINGSSL_PATH, file)
os.unlink(path)
# Generate new ones.
subprocess.check_call(['python',
os.path.join(BORINGSSL_SRC_PATH, 'util',
'generate_build_files.py'),
'gn', 'gyp'],
cwd=BORINGSSL_PATH)
# Commit everything.
subprocess.check_call(['git', 'add', DEPS_PATH], cwd=SRC_PATH)
for (osname, arch, _, _, _) in generate_build_files.OS_ARCH_COMBOS:
path = os.path.join(BORINGSSL_PATH, osname + '-' + arch)
subprocess.check_call(['git', 'add', path], cwd=SRC_PATH)
for file in GENERATED_FILES:
path = os.path.join(BORINGSSL_PATH, file)
subprocess.check_call(['git', 'add', path], cwd=SRC_PATH)
message = """Roll src/third_party/boringssl/src %s..%s
https://boringssl.googlesource.com/boringssl/+log/%s..%s
BUG=none
""" % (head[:9], commit[:9], head, commit)
subprocess.check_call(['git', 'commit', '-m', message], cwd=SRC_PATH)
return 0
if __name__ == '__main__':
sys.exit(main())
|
axinging/chromium-crosswalk
|
third_party/boringssl/roll_boringssl.py
|
Python
|
bsd-3-clause
| 4,004
|
import json
import uuid
from collections import namedtuple
from functools import partial
import pytest
from flask import url_for
from freezegun import freeze_time
from tests import broadcast_message_json, sample_uuid, user_json
from tests.app.broadcast_areas.custom_polygons import BRISTOL, SKYE
from tests.conftest import (
SERVICE_ONE_ID,
create_active_user_approve_broadcasts_permissions,
create_active_user_create_broadcasts_permissions,
create_active_user_view_permissions,
create_platform_admin_user,
normalize_spaces,
)
sample_uuid = sample_uuid()
@pytest.mark.parametrize('endpoint, extra_args, expected_get_status, expected_post_status', (
(
'.broadcast_dashboard', {},
403, 405,
),
(
'.broadcast_dashboard_updates', {},
403, 405,
),
(
'.broadcast_dashboard_previous', {},
403, 405,
),
(
'.new_broadcast', {},
403, 403,
),
(
'.write_new_broadcast', {},
403, 403,
),
(
'.broadcast',
{'template_id': sample_uuid},
403, 405,
),
(
'.preview_broadcast_areas', {'broadcast_message_id': sample_uuid},
403, 405,
),
(
'.choose_broadcast_library', {'broadcast_message_id': sample_uuid},
403, 405,
),
(
'.choose_broadcast_area', {'broadcast_message_id': sample_uuid, 'library_slug': 'countries'},
403, 403,
),
(
'.remove_broadcast_area', {'broadcast_message_id': sample_uuid, 'area_slug': 'countries-E92000001'},
403, 405,
),
(
'.preview_broadcast_message', {'broadcast_message_id': sample_uuid},
403, 403,
),
(
'.view_current_broadcast', {'broadcast_message_id': sample_uuid},
403, 403,
),
(
'.view_previous_broadcast', {'broadcast_message_id': sample_uuid},
403, 405,
),
(
'.cancel_broadcast_message', {'broadcast_message_id': sample_uuid},
403, 403,
),
))
def test_broadcast_pages_403_without_permission(
client_request,
endpoint,
extra_args,
expected_get_status,
expected_post_status,
):
client_request.get(
endpoint,
service_id=SERVICE_ONE_ID,
_expected_status=expected_get_status,
**extra_args
)
client_request.post(
endpoint,
service_id=SERVICE_ONE_ID,
_expected_status=expected_post_status,
**extra_args
)
@pytest.mark.parametrize('user_is_platform_admin', [True, False])
@pytest.mark.parametrize('endpoint, extra_args, expected_get_status, expected_post_status', (
(
'.new_broadcast', {},
403, 403,
),
(
'.write_new_broadcast', {},
403, 403,
),
(
'.broadcast',
{'template_id': sample_uuid},
403, 405,
),
(
'.preview_broadcast_areas', {'broadcast_message_id': sample_uuid},
403, 405,
),
(
'.choose_broadcast_library', {'broadcast_message_id': sample_uuid},
403, 405,
),
(
'.choose_broadcast_area', {'broadcast_message_id': sample_uuid, 'library_slug': 'countries'},
403, 403,
),
(
'.remove_broadcast_area', {'broadcast_message_id': sample_uuid, 'area_slug': 'england'},
403, 405,
),
(
'.preview_broadcast_message', {'broadcast_message_id': sample_uuid},
403, 403,
),
))
def test_broadcast_pages_403_for_user_without_permission(
client_request,
service_one,
active_user_view_permissions,
platform_admin_user_no_service_permissions,
endpoint,
extra_args,
expected_get_status,
expected_post_status,
user_is_platform_admin
):
"""
Checks that users without permissions, including admin users, cannot create or edit broadcasts.
"""
service_one['permissions'] += ['broadcast']
if user_is_platform_admin:
client_request.login(platform_admin_user_no_service_permissions)
else:
client_request.login(active_user_view_permissions)
client_request.get(
endpoint,
service_id=SERVICE_ONE_ID,
_expected_status=expected_get_status,
**extra_args
)
client_request.post(
endpoint,
service_id=SERVICE_ONE_ID,
_expected_status=expected_post_status,
**extra_args
)
@pytest.mark.parametrize('user', [
create_active_user_view_permissions(),
create_platform_admin_user(),
create_active_user_create_broadcasts_permissions(),
])
def test_user_cannot_accept_broadcast_without_permission(
client_request,
service_one,
user,
):
service_one['permissions'] += ['broadcast']
client_request.login(user)
client_request.post(
'.approve_broadcast_message',
service_id=SERVICE_ONE_ID,
broadcast_message_id=sample_uuid,
_expected_status=403,
)
@pytest.mark.parametrize('user_is_platform_admin', [True, False])
def test_user_cannot_reject_broadcast_without_permission(
client_request,
service_one,
active_user_view_permissions,
platform_admin_user_no_service_permissions,
user_is_platform_admin
):
service_one['permissions'] += ['broadcast']
if user_is_platform_admin:
client_request.login(platform_admin_user_no_service_permissions)
else:
client_request.login(active_user_view_permissions)
client_request.get(
'.reject_broadcast_message',
service_id=SERVICE_ONE_ID,
broadcast_message_id=sample_uuid,
_expected_status=403,
)
def test_user_cannot_cancel_broadcast_without_permission(
mocker,
client_request,
service_one,
active_user_view_permissions,
):
"""
separate test for cancel_broadcast endpoint, because admin users are allowed to cancel broadcasts
"""
service_one['permissions'] += ['broadcast']
mocker.patch('app.user_api_client.get_user', return_value=active_user_view_permissions)
client_request.get(
'.cancel_broadcast_message',
service_id=SERVICE_ONE_ID,
_expected_status=403,
**{'broadcast_message_id': sample_uuid}
)
client_request.post(
'.cancel_broadcast_message',
service_id=SERVICE_ONE_ID,
_expected_status=403,
**{'broadcast_message_id': sample_uuid}
)
@pytest.mark.parametrize('endpoint, step_index, expected_link_text, expected_link_href', (
('.broadcast_tour', 1, 'Continue', partial(url_for, '.broadcast_tour', step_index=2)),
('.broadcast_tour', 2, 'Continue', partial(url_for, '.broadcast_tour', step_index=3)),
('.broadcast_tour', 3, 'Continue', partial(url_for, '.broadcast_tour', step_index=4)),
('.broadcast_tour', 4, 'Continue', partial(url_for, '.broadcast_tour', step_index=5)),
('.broadcast_tour', 5, 'Continue', partial(url_for, '.service_dashboard')),
('.broadcast_tour', 6, 'Continue', partial(url_for, '.service_dashboard')),
('.broadcast_tour_live', 1, 'Continue', partial(url_for, '.broadcast_tour_live', step_index=2)),
('.broadcast_tour_live', 2, 'Continue', partial(url_for, '.service_dashboard')),
))
def test_broadcast_tour_pages_have_continue_link(
client_request,
service_one,
endpoint,
step_index,
expected_link_text,
expected_link_href,
):
service_one['permissions'] += ['broadcast']
page = client_request.get(
endpoint,
service_id=SERVICE_ONE_ID,
step_index=step_index,
)
link = page.select_one('.banner-tour a')
assert normalize_spaces(link.text) == expected_link_text
assert link['href'] == expected_link_href(service_id=SERVICE_ONE_ID)
@pytest.mark.parametrize('endpoint, step_index', (
pytest.param('.broadcast_tour', 1, marks=pytest.mark.xfail),
pytest.param('.broadcast_tour', 2, marks=pytest.mark.xfail),
pytest.param('.broadcast_tour', 3, marks=pytest.mark.xfail),
pytest.param('.broadcast_tour', 4, marks=pytest.mark.xfail),
('.broadcast_tour', 5),
('.broadcast_tour', 6),
('.broadcast_tour_live', 1),
('.broadcast_tour_live', 2),
))
def test_some_broadcast_tour_pages_show_service_name(
client_request,
service_one,
endpoint,
step_index,
):
service_one['permissions'] += ['broadcast']
page = client_request.get(
endpoint,
service_id=SERVICE_ONE_ID,
step_index=step_index,
)
assert normalize_spaces(
page.select_one('.navigation-service').text
).startswith(
'service one Training'
)
@pytest.mark.parametrize(
'trial_mode, channel, allowed_broadcast_provider, selector, expected_text, expected_tagged_text',
(
(
True,
None,
'all',
'.navigation-service-type.navigation-service-type--training',
'service one Training Switch service',
'Training',
),
(
True,
'test',
'all',
'.navigation-service-type.navigation-service-type--training',
'service one Training Switch service',
'Training',
),
(
False,
'severe',
'all',
'.navigation-service-type.navigation-service-type--live',
'service one Live Switch service',
'Live',
),
(
False,
'operator',
'all',
'.navigation-service-type.navigation-service-type--operator',
'service one Operator Switch service',
'Operator',
),
(
False,
'operator',
'vodafone',
'.navigation-service-type.navigation-service-type--operator',
'service one Operator (Vodafone) Switch service',
'Operator (Vodafone)',
),
(
False,
'test',
'all',
'.navigation-service-type.navigation-service-type--test',
'service one Test Switch service',
'Test',
),
(
False,
'test',
'vodafone',
'.navigation-service-type.navigation-service-type--test',
'service one Test (Vodafone) Switch service',
'Test (Vodafone)',
),
(
False,
'government',
'all',
'.navigation-service-type.navigation-service-type--government',
'service one Government Switch service',
'Government',
),
(
False,
'government',
'vodafone',
'.navigation-service-type.navigation-service-type--government',
'service one Government (Vodafone) Switch service',
'Government (Vodafone)',
),
(
False,
'severe',
'vodafone',
'.navigation-service-type.navigation-service-type--live',
'service one Live (Vodafone) Switch service',
'Live (Vodafone)',
),
)
)
def test_broadcast_service_shows_channel_settings(
client_request,
service_one,
mock_get_no_broadcast_messages,
trial_mode,
allowed_broadcast_provider,
channel,
selector,
expected_text,
expected_tagged_text,
):
service_one['allowed_broadcast_provider'] = allowed_broadcast_provider
service_one['permissions'] += ['broadcast']
service_one['restricted'] = trial_mode
service_one['broadcast_channel'] = channel
page = client_request.get(
'.broadcast_dashboard',
service_id=SERVICE_ONE_ID,
)
assert normalize_spaces(
page.select_one('.navigation-service').text
) == (
expected_text
)
assert normalize_spaces(
page.select_one('.navigation-service').select_one(selector).text
) == (
expected_tagged_text
)
@pytest.mark.parametrize('endpoint, step_index', (
('.broadcast_tour', 0),
('.broadcast_tour', 7),
('.broadcast_tour_live', 0),
('.broadcast_tour_live', 3),
))
def test_broadcast_tour_page_404s_out_of_range(
client_request,
service_one,
endpoint,
step_index,
):
service_one['permissions'] += ['broadcast']
client_request.get(
endpoint,
service_id=SERVICE_ONE_ID,
step_index=step_index,
_expected_status=404,
)
def test_dashboard_redirects_to_broadcast_dashboard(
client_request,
service_one,
):
service_one['permissions'] += ['broadcast']
client_request.get(
'.service_dashboard',
service_id=SERVICE_ONE_ID,
_expected_redirect=url_for(
'.broadcast_dashboard',
service_id=SERVICE_ONE_ID,
_external=True,
),
),
def test_empty_broadcast_dashboard(
client_request,
service_one,
mock_get_no_broadcast_messages,
):
service_one['permissions'] += ['broadcast']
page = client_request.get(
'.broadcast_dashboard',
service_id=SERVICE_ONE_ID,
)
assert normalize_spaces(page.select_one('h1').text) == (
'Current alerts'
)
assert [
normalize_spaces(row.text) for row in page.select('.table-empty-message')
] == [
'You do not have any current alerts',
]
@pytest.mark.parametrize('user', [
create_active_user_approve_broadcasts_permissions(),
create_active_user_create_broadcasts_permissions(),
])
@freeze_time('2020-02-20 02:20')
def test_broadcast_dashboard(
client_request,
service_one,
mock_get_broadcast_messages,
user,
):
service_one['permissions'] += ['broadcast']
client_request.login(user)
page = client_request.get(
'.broadcast_dashboard',
service_id=SERVICE_ONE_ID,
)
assert len(page.select('.ajax-block-container')) == len(page.select('h1')) == 1
assert [
normalize_spaces(row.text)
for row in page.select('.ajax-block-container')[0].select('.file-list')
] == [
'Half an hour ago This is a test Waiting for approval England Scotland',
'Hour and a half ago This is a test Waiting for approval England Scotland',
'Example template This is a test Live since today at 2:20am England Scotland',
'Example template This is a test Live since today at 1:20am England Scotland',
]
@pytest.mark.parametrize('user', [
create_platform_admin_user(),
create_active_user_view_permissions(),
create_active_user_approve_broadcasts_permissions(),
])
@pytest.mark.parametrize('endpoint', (
'.broadcast_dashboard', '.broadcast_dashboard_previous', '.broadcast_dashboard_rejected',
))
def test_broadcast_dashboard_does_not_have_button_if_user_does_not_have_permission_to_create_broadcast(
client_request,
service_one,
mock_get_broadcast_messages,
endpoint,
user,
):
client_request.login(user)
service_one['permissions'] += ['broadcast']
page = client_request.get(
endpoint,
service_id=SERVICE_ONE_ID,
)
assert not page.select('a.govuk-button')
@pytest.mark.parametrize('endpoint', (
'.broadcast_dashboard', '.broadcast_dashboard_previous', '.broadcast_dashboard_rejected',
))
def test_broadcast_dashboard_has_new_alert_button_if_user_has_permission_to_create_broadcasts(
client_request,
service_one,
mock_get_broadcast_messages,
active_user_create_broadcasts_permission,
endpoint,
):
client_request.login(active_user_create_broadcasts_permission)
service_one['permissions'] += ['broadcast']
page = client_request.get(
endpoint,
service_id=SERVICE_ONE_ID,
)
button = page.select_one(
'.js-stick-at-bottom-when-scrolling a.govuk-button.govuk-button--secondary'
)
assert normalize_spaces(button.text) == 'New alert'
assert button['href'] == url_for(
'main.new_broadcast',
service_id=SERVICE_ONE_ID,
)
@freeze_time('2020-02-20 02:20')
def test_broadcast_dashboard_json(
client_request,
service_one,
mock_get_broadcast_messages,
):
service_one['permissions'] += ['broadcast']
response = client_request.get_response(
'.broadcast_dashboard_updates',
service_id=SERVICE_ONE_ID,
)
json_response = json.loads(response.get_data(as_text=True))
assert json_response.keys() == {'current_broadcasts'}
assert 'Waiting for approval' in json_response['current_broadcasts']
assert 'Live since today at 2:20am' in json_response['current_broadcasts']
@pytest.mark.parametrize('user', [
create_active_user_approve_broadcasts_permissions(),
create_active_user_create_broadcasts_permissions(),
])
@freeze_time('2020-02-20 02:20')
def test_previous_broadcasts_page(
client_request,
service_one,
mock_get_broadcast_messages,
user,
):
service_one['permissions'] += ['broadcast']
client_request.login(user)
page = client_request.get(
'.broadcast_dashboard_previous',
service_id=SERVICE_ONE_ID,
)
assert normalize_spaces(page.select_one('main h1').text) == (
'Past alerts'
)
assert len(page.select('.ajax-block-container')) == 1
assert [
normalize_spaces(row.text)
for row in page.select('.ajax-block-container')[0].select('.file-list')
] == [
'Example template This is a test Yesterday at 2:20pm England Scotland',
'Example template This is a test Yesterday at 2:20am England Scotland',
]
@pytest.mark.parametrize('user', [
create_active_user_approve_broadcasts_permissions(),
create_active_user_create_broadcasts_permissions(),
])
@freeze_time('2020-02-20 02:20')
def test_rejected_broadcasts_page(
client_request,
service_one,
mock_get_broadcast_messages,
user,
):
service_one['permissions'] += ['broadcast']
client_request.login(user)
page = client_request.get(
'.broadcast_dashboard_rejected',
service_id=SERVICE_ONE_ID,
)
assert normalize_spaces(page.select_one('main h1').text) == (
'Rejected alerts'
)
assert len(page.select('.ajax-block-container')) == 1
assert [
normalize_spaces(row.text)
for row in page.select('.ajax-block-container')[0].select('.file-list')
] == [
'Example template This is a test Today at 1:20am England Scotland',
]
def test_new_broadcast_page(
client_request,
service_one,
active_user_create_broadcasts_permission,
):
service_one['permissions'] += ['broadcast']
client_request.login(active_user_create_broadcasts_permission)
page = client_request.get(
'.new_broadcast',
service_id=SERVICE_ONE_ID,
)
assert normalize_spaces(page.select_one('h1').text) == 'New alert'
form = page.select_one('form')
assert form['method'] == 'post'
assert 'action' not in form
assert [
(
choice.select_one('input')['name'],
choice.select_one('input')['value'],
normalize_spaces(choice.select_one('label').text),
)
for choice in form.select('.govuk-radios__item')
] == [
('content', 'freeform', 'Write your own message'),
('content', 'template', 'Use a template'),
]
@pytest.mark.parametrize('value, expected_redirect_endpoint', (
('freeform', 'main.write_new_broadcast'),
('template', 'main.choose_template'),
))
def test_new_broadcast_page_redirects(
client_request,
service_one,
active_user_create_broadcasts_permission,
value,
expected_redirect_endpoint,
):
service_one['permissions'] += ['broadcast']
client_request.login(active_user_create_broadcasts_permission)
client_request.post(
'.new_broadcast',
service_id=SERVICE_ONE_ID,
_data={
'content': value,
},
_expected_redirect=url_for(
expected_redirect_endpoint,
service_id=SERVICE_ONE_ID,
_external=True,
)
)
def test_write_new_broadcast_page(
client_request,
service_one,
active_user_create_broadcasts_permission,
):
service_one['permissions'] += ['broadcast']
client_request.login(active_user_create_broadcasts_permission)
page = client_request.get(
'.write_new_broadcast',
service_id=SERVICE_ONE_ID,
)
assert normalize_spaces(page.select_one('h1').text) == 'New alert'
form = page.select_one('form')
assert form['method'] == 'post'
assert 'action' not in form
assert normalize_spaces(page.select_one('label[for=name]').text) == 'Reference'
assert page.select_one('input[type=text]')['name'] == 'name'
assert normalize_spaces(page.select_one('label[for=template_content]').text) == 'Message'
assert page.select_one('textarea')['name'] == 'template_content'
assert page.select_one('textarea')['data-module'] == 'enhanced-textbox'
assert page.select_one('textarea')['data-highlight-placeholders'] == 'false'
assert (
page.select_one('[data-module=update-status]')['data-updates-url']
) == url_for(
'.count_content_length',
service_id=SERVICE_ONE_ID,
template_type='broadcast',
)
assert (
page.select_one('[data-module=update-status]')['data-target']
) == (
page.select_one('textarea')['id']
) == (
'template_content'
)
assert (
page.select_one('[data-module=update-status]')['aria-live']
) == (
'polite'
)
def test_write_new_broadcast_posts(
client_request,
service_one,
mock_create_broadcast_message,
fake_uuid,
active_user_create_broadcasts_permission,
):
service_one['permissions'] += ['broadcast']
client_request.login(active_user_create_broadcasts_permission)
client_request.post(
'.write_new_broadcast',
service_id=SERVICE_ONE_ID,
_data={
'name': 'My new alert',
'template_content': 'This is a test',
},
_expected_redirect=url_for(
'.choose_broadcast_library',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
_external=True,
),
)
mock_create_broadcast_message.assert_called_once_with(
service_id=SERVICE_ONE_ID,
reference='My new alert',
content='This is a test',
template_id=None,
)
@pytest.mark.parametrize('content, expected_error_message', (
('', 'Cannot be empty'),
('ŵ' * 616, 'Content must be 615 characters or fewer because it contains ŵ'),
('w' * 1_396, 'Content must be 1,395 characters or fewer'),
('hello ((name))', 'You can’t use ((double brackets)) to personalise this message'),
))
def test_write_new_broadcast_bad_content(
client_request,
service_one,
mock_create_broadcast_message,
active_user_create_broadcasts_permission,
content,
expected_error_message,
):
service_one['permissions'] += ['broadcast']
client_request.login(active_user_create_broadcasts_permission)
page = client_request.post(
'.write_new_broadcast',
service_id=SERVICE_ONE_ID,
_data={
'name': 'My new alert',
'template_content': content,
},
_expected_status=200,
)
assert normalize_spaces(
page.select_one('.error-message').text
) == (
expected_error_message
)
assert mock_create_broadcast_message.called is False
def test_broadcast_page(
client_request,
service_one,
fake_uuid,
mock_create_broadcast_message,
active_user_create_broadcasts_permission,
):
service_one['permissions'] += ['broadcast']
client_request.login(active_user_create_broadcasts_permission)
client_request.get(
'.broadcast',
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
_expected_redirect=url_for(
'.choose_broadcast_library',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
_external=True,
),
),
@pytest.mark.parametrize('areas_selected, areas_listed, estimates', (
([
'ctry19-E92000001',
'ctry19-S92000003',
], [
'England Remove England',
'Scotland Remove Scotland',
], [
'An area of 100,000 square miles Will get the alert',
'An extra area of 6,000 square miles is Likely to get the alert',
'40,000,000 phones estimated',
]),
([
'wd20-E05003224',
'wd20-E05003225',
'wd20-E05003227',
'wd20-E05003228',
'wd20-E05003229',
], [
'Penrith Carleton Remove Penrith Carleton',
'Penrith East Remove Penrith East',
'Penrith Pategill Remove Penrith Pategill',
'Penrith South Remove Penrith South',
'Penrith West Remove Penrith West',
], [
'An area of 4 square miles Will get the alert',
'An extra area of 10 square miles is Likely to get the alert',
'9,000 to 10,000 phones',
]),
([
'lad20-E09000019',
], [
'Islington Remove Islington',
], [
'An area of 6 square miles Will get the alert',
'An extra area of 4 square miles is Likely to get the alert',
'200,000 to 600,000 phones',
]),
([
'ctyua19-E10000019',
], [
'Lincolnshire Remove Lincolnshire',
], [
'An area of 2,000 square miles Will get the alert',
'An extra area of 500 square miles is Likely to get the alert',
'500,000 to 600,000 phones',
]),
([
'ctyua19-E10000019',
'ctyua19-E10000023'
], [
'Lincolnshire Remove Lincolnshire', 'North Yorkshire Remove North Yorkshire',
], [
'An area of 6,000 square miles Will get the alert',
'An extra area of 1,000 square miles is Likely to get the alert',
'1,000,000 phones estimated',
]),
))
def test_preview_broadcast_areas_page(
mocker,
client_request,
service_one,
fake_uuid,
areas_selected,
areas_listed,
estimates,
active_user_create_broadcasts_permission,
):
service_one['permissions'] += ['broadcast']
mocker.patch(
'app.broadcast_message_api_client.get_broadcast_message',
return_value=broadcast_message_json(
id_=fake_uuid,
template_id=fake_uuid,
created_by_id=fake_uuid,
service_id=SERVICE_ONE_ID,
status='draft',
area_ids=areas_selected,
),
)
client_request.login(active_user_create_broadcasts_permission)
page = client_request.get(
'.preview_broadcast_areas',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
assert [
normalize_spaces(item.text)
for item in page.select('ul.area-list li.area-list-item')
] == areas_listed
assert len(page.select('#area-list-map')) == 1
assert [
normalize_spaces(item.text)
for item in page.select('.area-list-key')
] == estimates
@pytest.mark.parametrize('polygons, expected_list_items', (
(
[
[[1, 2], [3, 4], [5, 6]],
[[7, 8], [9, 10], [11, 12]],
],
[
'An area of 1,000 square miles Will get the alert',
'An extra area of 1,000 square miles is Likely to get the alert',
'Unknown number of phones',
]
),
(
[BRISTOL],
[
'An area of 4 square miles Will get the alert',
'An extra area of 3 square miles is Likely to get the alert',
'70,000 to 100,000 phones',
]
),
(
[SKYE],
[
'An area of 2,000 square miles Will get the alert',
'An extra area of 600 square miles is Likely to get the alert',
'3,000 to 4,000 phones',
]
),
))
def test_preview_broadcast_areas_page_with_custom_polygons(
mocker,
client_request,
service_one,
fake_uuid,
polygons,
expected_list_items,
active_user_create_broadcasts_permission,
):
service_one['permissions'] += ['broadcast']
mocker.patch(
'app.broadcast_message_api_client.get_broadcast_message',
return_value=broadcast_message_json(
id_=fake_uuid,
template_id=fake_uuid,
created_by_id=fake_uuid,
service_id=SERVICE_ONE_ID,
status='draft',
areas={
'names': ['Area one', 'Area two', 'Area three'],
'simple_polygons': polygons,
}
),
)
client_request.login(active_user_create_broadcasts_permission)
page = client_request.get(
'.preview_broadcast_areas',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
assert [
normalize_spaces(item.text)
for item in page.select('ul.area-list li.area-list-item')
] == [
'Area one Remove Area one', 'Area two Remove Area two', 'Area three Remove Area three',
]
assert len(page.select('#area-list-map')) == 1
assert [
normalize_spaces(item.text)
for item in page.select('.area-list-key')
] == expected_list_items
@pytest.mark.parametrize('area_ids, expected_list', (
([], [
'Countries',
'Demo areas',
'Local authorities',
'Test areas',
]),
([
# Countries have no parent areas
'ctry19-E92000001',
'ctry19-S92000003',
], [
'Countries',
'Demo areas',
'Local authorities',
'Test areas',
]),
([
# If you’ve chosen the whole of a county or unitary authority
# there’s no reason to also pick districts of it
'ctyua19-E10000013', # Gloucestershire, a county
'lad20-E06000052', # Cornwall, a unitary authority
], [
'Countries',
'Demo areas',
'Local authorities',
'Test areas',
]),
([
'wd20-E05004299', # Pitville, in Cheltenham, in Gloucestershire
'wd20-E05004290', # Benhall and the Reddings, in Cheltenham, in Gloucestershire
'wd20-E05010951', # Abbeymead, in Gloucester, in Gloucestershire
'wd20-S13002775', # Shetland Central, in Shetland Isles
'lad20-E07000037', # High Peak, a district in Derbyshire
], [
'Cheltenham',
'Derbyshire',
'Gloucester',
'Gloucestershire',
'Shetland Islands',
# ---
'Countries',
'Demo areas',
'Local authorities',
'Test areas',
]),
))
def test_choose_broadcast_library_page(
mocker,
client_request,
service_one,
fake_uuid,
active_user_create_broadcasts_permission,
area_ids,
expected_list,
):
service_one['permissions'] += ['broadcast']
mocker.patch(
'app.broadcast_message_api_client.get_broadcast_message',
return_value=broadcast_message_json(
id_=fake_uuid,
template_id=fake_uuid,
created_by_id=fake_uuid,
service_id=SERVICE_ONE_ID,
status='draft',
area_ids=area_ids,
),
)
client_request.login(active_user_create_broadcasts_permission)
page = client_request.get(
'.choose_broadcast_library',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
assert [
normalize_spaces(title.text)
for title in page.select('main a.govuk-link')
] == expected_list
assert normalize_spaces(page.select('.file-list-hint-large')[0].text) == (
'England, Northern Ireland, Scotland and Wales'
)
assert page.select_one('a.file-list-filename-large.govuk-link')['href'] == url_for(
'.choose_broadcast_area',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
library_slug='ctry19',
)
def test_suggested_area_has_correct_link(
mocker,
client_request,
service_one,
fake_uuid,
active_user_create_broadcasts_permission,
):
service_one['permissions'] += ['broadcast']
mocker.patch(
'app.broadcast_message_api_client.get_broadcast_message',
return_value=broadcast_message_json(
id_=fake_uuid,
template_id=fake_uuid,
created_by_id=fake_uuid,
service_id=SERVICE_ONE_ID,
status='draft',
area_ids=[
'wd20-E05004299', # Pitville, a ward of Cheltenham
],
),
)
client_request.login(active_user_create_broadcasts_permission)
page = client_request.get(
'.choose_broadcast_library',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
link = page.select_one('main a.govuk-link')
assert link.text == 'Cheltenham'
assert link['href'] == url_for(
'main.choose_broadcast_sub_area',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
library_slug='wd20-lad20-ctyua19',
area_slug='lad20-E07000078',
)
def test_choose_broadcast_area_page(
client_request,
service_one,
mock_get_draft_broadcast_message,
fake_uuid,
active_user_create_broadcasts_permission,
):
service_one['permissions'] += ['broadcast']
client_request.login(active_user_create_broadcasts_permission)
page = client_request.get(
'.choose_broadcast_area',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
library_slug='ctry19',
)
assert normalize_spaces(page.select_one('h1').text) == (
'Choose countries'
)
assert [
(
choice.select_one('input')['value'],
normalize_spaces(choice.select_one('label').text),
)
for choice in page.select('form[method=post] .govuk-checkboxes__item')
] == [
('ctry19-E92000001', 'England'),
('ctry19-N92000002', 'Northern Ireland'),
('ctry19-S92000003', 'Scotland'),
('ctry19-W92000004', 'Wales'),
]
def test_choose_broadcast_area_page_for_area_with_sub_areas(
client_request,
service_one,
mock_get_draft_broadcast_message,
fake_uuid,
active_user_create_broadcasts_permission,
):
service_one['permissions'] += ['broadcast']
client_request.login(active_user_create_broadcasts_permission)
page = client_request.get(
'.choose_broadcast_area',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
library_slug='wd20-lad20-ctyua19',
)
assert normalize_spaces(page.select_one('h1').text) == (
'Choose a local authority'
)
live_search = page.select_one("[data-module=live-search]")
assert live_search['data-targets'] == '.file-list-item'
assert live_search.select_one('input')['type'] == 'search'
partial_url_for = partial(
url_for,
'main.choose_broadcast_sub_area',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
library_slug='wd20-lad20-ctyua19',
)
choices = [
(
choice.select_one('a.file-list-filename-large')['href'],
normalize_spaces(choice.text),
)
for choice in page.select('.file-list-item')
]
assert len(choices) == 394
assert choices[0] == (partial_url_for(area_slug='lad20-S12000033'), 'Aberdeen City',)
# note: we don't populate prev_area_slug query param, so the back link will come here rather than to a county page,
# even though ashford belongs to kent
assert choices[12] == (partial_url_for(area_slug='lad20-E07000105'), 'Ashford',)
assert choices[-1] == (partial_url_for(area_slug='lad20-E06000014'), 'York',)
def test_choose_broadcast_sub_area_page_for_district_shows_checkboxes_for_wards(
client_request,
service_one,
mock_get_draft_broadcast_message,
fake_uuid,
active_user_create_broadcasts_permission,
):
service_one['permissions'] += ['broadcast']
client_request.login(active_user_create_broadcasts_permission)
page = client_request.get(
'main.choose_broadcast_sub_area',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
library_slug='wd20-lad20-ctyua19',
area_slug='lad20-S12000033',
)
assert normalize_spaces(page.select_one('h1').text) == (
'Choose an area of Aberdeen City'
)
live_search = page.select_one("[data-module=live-search]")
assert live_search['data-targets'] == '#sub-areas .govuk-checkboxes__item'
assert live_search.select_one('input')['type'] == 'search'
all_choices = [
(
choice.select_one('input')['value'],
normalize_spaces(choice.select_one('label').text),
)
for choice in page.select('form[method=post] .govuk-checkboxes__item')
]
sub_choices = [
(
choice.select_one('input')['value'],
normalize_spaces(choice.select_one('label').text),
)
for choice in page.select('form[method=post] #sub-areas .govuk-checkboxes__item')
]
assert all_choices[:3] == [
('y', 'All of Aberdeen City'),
('wd20-S13002845', 'Airyhall/Broomhill/Garthdee'),
('wd20-S13002836', 'Bridge of Don'),
]
assert sub_choices[:3] == [
('wd20-S13002845', 'Airyhall/Broomhill/Garthdee'),
('wd20-S13002836', 'Bridge of Don'),
('wd20-S13002835', 'Dyce/Bucksburn/Danestone'),
]
assert all_choices[-1:] == sub_choices[-1:] == [
('wd20-S13002846', 'Torry/Ferryhill'),
]
@pytest.mark.parametrize('prev_area_slug, expected_back_link_url, expected_back_link_extra_kwargs', [
(
'ctyua19-E10000016',
'main.choose_broadcast_sub_area',
{
'area_slug': 'ctyua19-E10000016' # Kent
}
),
(
None,
'.choose_broadcast_area',
{}
)
])
def test_choose_broadcast_sub_area_page_for_district_has_back_link(
client_request,
service_one,
mock_get_draft_broadcast_message,
active_user_create_broadcasts_permission,
prev_area_slug,
expected_back_link_url,
expected_back_link_extra_kwargs
):
service_one['permissions'] += ['broadcast']
client_request.login(active_user_create_broadcasts_permission)
page = client_request.get(
'main.choose_broadcast_sub_area',
service_id=SERVICE_ONE_ID,
broadcast_message_id=str(uuid.UUID(int=0)),
library_slug='wd20-lad20-ctyua19',
area_slug='lad20-E07000105', # Ashford
prev_area_slug=prev_area_slug,
)
assert normalize_spaces(page.select_one('h1').text) == (
'Choose an area of Ashford'
)
back_link = page.select_one('.govuk-back-link')
assert back_link['href'] == url_for(
expected_back_link_url,
service_id=SERVICE_ONE_ID,
broadcast_message_id=str(uuid.UUID(int=0)),
library_slug='wd20-lad20-ctyua19',
**expected_back_link_extra_kwargs
)
def test_choose_broadcast_sub_area_page_for_county_shows_links_for_districts(
client_request,
service_one,
mock_get_draft_broadcast_message,
fake_uuid,
active_user_create_broadcasts_permission,
):
service_one['permissions'] += ['broadcast']
client_request.login(active_user_create_broadcasts_permission)
page = client_request.get(
'main.choose_broadcast_sub_area',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
library_slug='wd20-lad20-ctyua19',
area_slug='ctyua19-E10000016', # Kent
)
assert normalize_spaces(page.select_one('h1').text) == (
'Choose an area of Kent'
)
live_search = page.select_one("[data-module=live-search]")
assert live_search['data-targets'] == '.file-list-item'
assert live_search.select_one('input')['type'] == 'search'
all_choices_checkbox = [
(
choice.select_one('input')['value'],
normalize_spaces(choice.select_one('label').text),
)
for choice in page.select('form[method=post] .govuk-checkboxes__item')
]
districts = [
(
district['href'],
district.text,
)
for district in page.select('form[method=post] a')
]
assert all_choices_checkbox == [
('y', 'All of Kent'),
]
assert len(districts) == 12
assert districts[0][0] == url_for(
'main.choose_broadcast_sub_area',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
library_slug='wd20-lad20-ctyua19',
area_slug='lad20-E07000105',
prev_area_slug='ctyua19-E10000016', # Kent
)
assert districts[0][1] == 'Ashford'
assert districts[-1][0] == url_for(
'main.choose_broadcast_sub_area',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
library_slug='wd20-lad20-ctyua19',
area_slug='lad20-E07000116',
prev_area_slug='ctyua19-E10000016', # Kent
)
assert districts[-1][1] == 'Tunbridge Wells'
def test_add_broadcast_area(
client_request,
service_one,
mock_get_draft_broadcast_message,
mock_update_broadcast_message,
fake_uuid,
mocker,
active_user_create_broadcasts_permission,
):
service_one['permissions'] += ['broadcast']
polygon_class = namedtuple("polygon_class", ["as_coordinate_pairs_lat_long"])
coordinates = [[50.1, 0.1], [50.2, 0.2], [50.3, 0.2]]
polygons = polygon_class(as_coordinate_pairs_lat_long=coordinates)
mock_get_polygons_from_areas = mocker.patch(
'app.models.broadcast_message.BroadcastMessage.get_polygons_from_areas',
return_value=polygons,
)
client_request.login(active_user_create_broadcasts_permission)
client_request.post(
'.choose_broadcast_area',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
library_slug='ctry19',
_data={
'areas': ['ctry19-E92000001', 'ctry19-W92000004']
}
)
mock_get_polygons_from_areas.assert_called_once_with(area_attribute='simple_polygons')
mock_update_broadcast_message.assert_called_once_with(
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
data={
'areas': {
'ids': ['ctry19-E92000001', 'ctry19-S92000003', 'ctry19-W92000004'],
'names': ['England', 'Scotland', 'Wales'],
'aggregate_names': ['England', 'Scotland', 'Wales'],
'simple_polygons': coordinates
}
},
)
@pytest.mark.parametrize('post_data, expected_data', (
(
{
'select_all': 'y', 'areas': ['wd20-S13002845']
},
{
# wd20-S13002845 is ignored because the user chose ‘Select all…’
'ids': ['lad20-S12000033'],
'names': ['Aberdeen City'],
'aggregate_names': ['Aberdeen City']
}
),
(
{
'areas': ['wd20-S13002845', 'wd20-S13002836']
},
{
'ids': ['wd20-S13002845', 'wd20-S13002836'],
'names': ['Bridge of Don', 'Airyhall/Broomhill/Garthdee'],
'aggregate_names': ['Aberdeen City'],
}
),
))
def test_add_broadcast_sub_area_district_view(
client_request,
service_one,
mock_get_draft_broadcast_message,
mock_update_broadcast_message,
fake_uuid,
post_data,
expected_data,
mocker,
active_user_create_broadcasts_permission,
):
service_one['permissions'] += ['broadcast']
polygon_class = namedtuple("polygon_class", ["as_coordinate_pairs_lat_long"])
coordinates = [[50.1, 0.1], [50.2, 0.2], [50.3, 0.2]]
polygons = polygon_class(as_coordinate_pairs_lat_long=coordinates)
mock_get_polygons_from_areas = mocker.patch(
'app.models.broadcast_message.BroadcastMessage.get_polygons_from_areas',
return_value=polygons,
)
client_request.login(active_user_create_broadcasts_permission)
client_request.post(
'.choose_broadcast_sub_area',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
library_slug='wd20-lad20-ctyua19',
area_slug='lad20-S12000033',
_data=post_data,
)
# These two areas are on the broadcast already
expected_data['ids'] = ['ctry19-E92000001', 'ctry19-S92000003'] + expected_data['ids']
expected_data['names'] = ['England', 'Scotland'] + expected_data['names']
expected_data['aggregate_names'] = sorted(['England', 'Scotland'] + expected_data['aggregate_names'])
mock_get_polygons_from_areas.assert_called_once_with(area_attribute='simple_polygons')
mock_update_broadcast_message.assert_called_once_with(
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
data={
'areas': {
'simple_polygons': coordinates,
**expected_data,
}
},
)
def test_add_broadcast_sub_area_county_view(
client_request,
service_one,
mock_get_draft_broadcast_message,
mock_update_broadcast_message,
fake_uuid,
mocker,
active_user_create_broadcasts_permission,
):
service_one['permissions'] += ['broadcast']
polygon_class = namedtuple("polygon_class", ["as_coordinate_pairs_lat_long"])
coordinates = [[50.1, 0.1], [50.2, 0.2], [50.3, 0.2]]
polygons = polygon_class(as_coordinate_pairs_lat_long=coordinates)
mock_get_polygons_from_areas = mocker.patch(
'app.models.broadcast_message.BroadcastMessage.get_polygons_from_areas',
return_value=polygons,
)
client_request.login(active_user_create_broadcasts_permission)
client_request.post(
'.choose_broadcast_sub_area',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
library_slug='wd20-lad20-ctyua19',
area_slug='ctyua19-E10000016', # Kent
_data={'select_all': 'y'},
)
mock_get_polygons_from_areas.assert_called_once_with(area_attribute='simple_polygons')
mock_update_broadcast_message.assert_called_once_with(
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
data={
'areas': {
'simple_polygons': coordinates,
'ids': [
# These two areas are on the broadcast already
'ctry19-E92000001',
'ctry19-S92000003',
] + [
'ctyua19-E10000016'
],
'names': ['England', 'Scotland', 'Kent'],
'aggregate_names': ['England', 'Kent', 'Scotland']
}
},
)
def test_remove_broadcast_area_page(
client_request,
service_one,
mock_get_draft_broadcast_message,
mock_update_broadcast_message,
fake_uuid,
mocker,
active_user_create_broadcasts_permission,
):
service_one['permissions'] += ['broadcast']
polygon_class = namedtuple("polygon_class", ["as_coordinate_pairs_lat_long"])
coordinates = [[50.1, 0.1], [50.2, 0.2], [50.3, 0.2]]
polygons = polygon_class(as_coordinate_pairs_lat_long=coordinates)
mock_get_polygons_from_areas = mocker.patch(
'app.models.broadcast_message.BroadcastMessage.get_polygons_from_areas',
return_value=polygons,
)
client_request.login(active_user_create_broadcasts_permission)
client_request.get(
'.remove_broadcast_area',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
area_slug='ctry19-E92000001',
_expected_redirect=url_for(
'.preview_broadcast_areas',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
_external=True,
),
)
mock_get_polygons_from_areas.assert_called_once_with(area_attribute='simple_polygons')
mock_update_broadcast_message.assert_called_once_with(
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
data={
'areas': {
'simple_polygons': coordinates,
'names': ['Scotland'],
'aggregate_names': ['Scotland'],
'ids': ['ctry19-S92000003']
},
},
)
def test_preview_broadcast_message_page(
client_request,
service_one,
mock_get_draft_broadcast_message,
fake_uuid,
active_user_create_broadcasts_permission,
):
service_one['permissions'] += ['broadcast']
client_request.login(active_user_create_broadcasts_permission)
page = client_request.get(
'.preview_broadcast_message',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
assert [
normalize_spaces(area.text)
for area in page.select('.area-list-item.area-list-item--unremoveable')
] == [
'England',
'Scotland',
]
assert normalize_spaces(
page.select_one('h2.broadcast-message-heading').text
) == (
'Emergency alert'
)
assert normalize_spaces(
page.select_one('.broadcast-message-wrapper').text
) == (
'Emergency alert '
'This is a test'
)
form = page.select_one('form')
assert form['method'] == 'post'
assert 'action' not in form
def test_start_broadcasting(
client_request,
service_one,
mock_get_draft_broadcast_message,
mock_update_broadcast_message_status,
fake_uuid,
active_user_create_broadcasts_permission,
):
service_one['permissions'] += ['broadcast']
client_request.login(active_user_create_broadcasts_permission)
client_request.post(
'.preview_broadcast_message',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
_expected_redirect=url_for(
'main.view_current_broadcast',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
_external=True,
),
),
mock_update_broadcast_message_status.assert_called_once_with(
'pending-approval',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
@pytest.mark.parametrize('endpoint, created_by_api, extra_fields, expected_paragraphs', (
('.view_current_broadcast', False, {
'status': 'broadcasting',
'finishes_at': '2020-02-23T23:23:23.000000',
}, [
'Live since 20 February at 8:20pm Stop sending',
'Created by Alice and approved by Bob.',
'Broadcasting stops tomorrow at 11:23pm.'
]),
('.view_current_broadcast', True, {
'status': 'broadcasting',
'finishes_at': '2020-02-23T23:23:23.000000',
}, [
'Live since 20 February at 8:20pm Stop sending',
'Created from an API call and approved by Alice.',
'Broadcasting stops tomorrow at 11:23pm.'
]),
('.view_previous_broadcast', False, {
'status': 'broadcasting',
'finishes_at': '2020-02-22T22:20:20.000000', # 2 mins before now()
}, [
'Sent on 20 February at 8:20pm.',
'Created by Alice and approved by Bob.',
'Finished broadcasting today at 10:20pm.'
]),
('.view_previous_broadcast', True, {
'status': 'broadcasting',
'finishes_at': '2020-02-22T22:20:20.000000', # 2 mins before now()
}, [
'Sent on 20 February at 8:20pm.',
'Created from an API call and approved by Alice.',
'Finished broadcasting today at 10:20pm.'
]),
('.view_previous_broadcast', False, {
'status': 'completed',
'finishes_at': '2020-02-21T21:21:21.000000',
}, [
'Sent on 20 February at 8:20pm.',
'Created by Alice and approved by Bob.',
'Finished broadcasting yesterday at 9:21pm.',
]),
('.view_previous_broadcast', False, {
'status': 'cancelled',
'cancelled_by_id': sample_uuid,
'cancelled_at': '2020-02-21T21:21:21.000000',
}, [
'Sent on 20 February at 8:20pm.',
'Created by Alice and approved by Bob.',
'Stopped by Carol yesterday at 9:21pm.',
]),
('.view_previous_broadcast', False, {
'status': 'cancelled',
'cancelled_by_id': None,
'cancelled_at': '2020-02-21T21:21:21.000000',
}, [
'Sent on 20 February at 8:20pm.',
'Created by Alice and approved by Bob.',
'Stopped by an API call yesterday at 9:21pm.',
]),
('.view_rejected_broadcast', False, {
'status': 'rejected',
'updated_at': '2020-02-21T21:21:21.000000',
}, [
'Rejected yesterday at 9:21pm.',
'Created by Alice and approved by Bob.',
]),
))
@freeze_time('2020-02-22T22:22:22.000000')
def test_view_broadcast_message_page(
mocker,
client_request,
service_one,
active_user_view_permissions,
fake_uuid,
endpoint,
created_by_api,
extra_fields,
expected_paragraphs,
):
mocker.patch(
'app.broadcast_message_api_client.get_broadcast_message',
return_value=broadcast_message_json(
id_=fake_uuid,
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
created_by_id=None if created_by_api else fake_uuid,
approved_by_id=fake_uuid,
starts_at='2020-02-20T20:20:20.000000',
**extra_fields
),
)
mocker.patch('app.user_api_client.get_user', side_effect=[
active_user_view_permissions,
user_json(name='Alice'),
user_json(name='Bob'),
user_json(name='Carol'),
])
service_one['permissions'] += ['broadcast']
page = client_request.get(
endpoint,
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
assert [
normalize_spaces(p.text) for p in page.select('main p.govuk-body')
] == expected_paragraphs
@pytest.mark.parametrize('endpoint', (
'.view_current_broadcast',
'.view_previous_broadcast',
'.view_rejected_broadcast',
))
@pytest.mark.parametrize('status, expected_highlighted_navigation_item, expected_back_link_endpoint', (
(
'pending-approval',
'Current alerts',
'.broadcast_dashboard',
),
(
'broadcasting',
'Current alerts',
'.broadcast_dashboard',
),
(
'completed',
'Past alerts',
'.broadcast_dashboard_previous',
),
(
'cancelled',
'Past alerts',
'.broadcast_dashboard_previous',
),
(
'rejected',
'Rejected alerts',
'.broadcast_dashboard_rejected',
),
))
@freeze_time('2020-02-22T22:22:22.000000')
def test_view_broadcast_message_shows_correct_highlighted_navigation(
mocker,
client_request,
service_one,
active_user_approve_broadcasts_permission,
fake_uuid,
endpoint,
status,
expected_highlighted_navigation_item,
expected_back_link_endpoint,
):
mocker.patch(
'app.broadcast_message_api_client.get_broadcast_message',
return_value=broadcast_message_json(
id_=fake_uuid,
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
created_by_id=fake_uuid,
approved_by_id=fake_uuid,
starts_at='2020-02-20T20:20:20.000000',
finishes_at='2021-12-21T21:21:21.000000',
cancelled_at='2021-01-01T01:01:01.000000',
updated_at='2021-01-01T01:01:01.000000',
status=status,
),
)
service_one['permissions'] += ['broadcast']
client_request.login(active_user_approve_broadcasts_permission)
page = client_request.get(
endpoint,
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
_follow_redirects=True
)
assert normalize_spaces(
page.select_one('.navigation .selected').text
) == (
expected_highlighted_navigation_item
)
assert page.select_one('.govuk-back-link')['href'] == url_for(
expected_back_link_endpoint,
service_id=SERVICE_ONE_ID,
)
def test_view_pending_broadcast(
mocker,
client_request,
service_one,
fake_uuid,
active_user_approve_broadcasts_permission,
):
broadcast_creator = create_active_user_create_broadcasts_permissions(with_unique_id=True)
mocker.patch(
'app.broadcast_message_api_client.get_broadcast_message',
return_value=broadcast_message_json(
id_=fake_uuid,
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
created_by_id=broadcast_creator['id'],
finishes_at=None,
status='pending-approval',
),
)
client_request.login(active_user_approve_broadcasts_permission)
mocker.patch('app.user_api_client.get_user', side_effect=[
active_user_approve_broadcasts_permission, # Current user
broadcast_creator, # User who created broadcast
])
service_one['permissions'] += ['broadcast']
page = client_request.get(
'.view_current_broadcast',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
assert (
normalize_spaces(page.select_one('.banner').text)
) == (
'Test User Create Broadcasts Permission wants to broadcast Example template '
'No phones will get this alert. '
'Start broadcasting now Reject this alert'
)
assert not page.select('.banner input[type=checkbox]')
form = page.select_one('form.banner')
assert form['method'] == 'post'
assert 'action' not in form
assert form.select_one('button[type=submit]')
link = form.select_one('a.govuk-link.govuk-link--destructive')
assert link.text == 'Reject this alert'
assert link['href'] == url_for(
'.reject_broadcast_message',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
@pytest.mark.parametrize('extra_broadcast_json_fields, expected_banner_text', (
({'reference': 'ABC123'}, (
'Test User Create Broadcasts Permission wants to broadcast ABC123 '
'No phones will get this alert. '
'Start broadcasting now Reject this alert'
)),
({'cap_event': 'Severe flood warning', 'reference': 'ABC123'}, (
'Test User Create Broadcasts Permission wants to broadcast Severe flood warning '
'No phones will get this alert. '
'Start broadcasting now Reject this alert'
)),
))
def test_view_pending_broadcast_without_template(
mocker,
client_request,
service_one,
fake_uuid,
active_user_approve_broadcasts_permission,
extra_broadcast_json_fields,
expected_banner_text,
):
broadcast_creator = create_active_user_create_broadcasts_permissions(with_unique_id=True)
mocker.patch(
'app.broadcast_message_api_client.get_broadcast_message',
return_value=broadcast_message_json(
id_=fake_uuid,
service_id=SERVICE_ONE_ID,
template_id=None,
created_by_id=broadcast_creator['id'],
finishes_at=None,
status='pending-approval',
content='Uh-oh',
**extra_broadcast_json_fields,
),
)
client_request.login(active_user_approve_broadcasts_permission)
mocker.patch('app.user_api_client.get_user', side_effect=[
active_user_approve_broadcasts_permission, # Current user
broadcast_creator, # User who created broadcast
])
service_one['permissions'] += ['broadcast']
page = client_request.get(
'.view_current_broadcast',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
assert (
normalize_spaces(page.select_one('.banner').text)
) == expected_banner_text
assert (
normalize_spaces(page.select_one('.broadcast-message-wrapper').text)
) == (
'Emergency alert '
'Uh-oh'
)
def test_view_pending_broadcast_from_api_call(
mocker,
client_request,
service_one,
fake_uuid,
active_user_approve_broadcasts_permission,
):
mocker.patch(
'app.broadcast_message_api_client.get_broadcast_message',
return_value=broadcast_message_json(
id_=fake_uuid,
service_id=SERVICE_ONE_ID,
template_id=None,
created_by_id=None, # No user created this broadcast
finishes_at=None,
status='pending-approval',
reference='abc123',
content='Uh-oh',
),
)
service_one['permissions'] += ['broadcast']
client_request.login(active_user_approve_broadcasts_permission)
page = client_request.get(
'.view_current_broadcast',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
assert (
normalize_spaces(page.select_one('.banner').text)
) == (
'An API call wants to broadcast abc123 '
'No phones will get this alert. '
'Start broadcasting now Reject this alert'
)
assert (
normalize_spaces(page.select_one('.broadcast-message-wrapper').text)
) == (
'Emergency alert '
'Uh-oh'
)
@pytest.mark.parametrize('channel, expected_label_text', (
('test', (
'I understand this will alert anyone who has switched on the test channel'
)),
('operator', (
'I understand this will alert anyone who has switched on the operator channel'
)),
('severe', (
'I understand this will alert millions of people'
)),
('government', (
'I understand this will alert millions of people, even if they’ve opted out'
)),
))
def test_checkbox_to_confirm_non_training_broadcasts(
mocker,
client_request,
service_one,
fake_uuid,
active_user_approve_broadcasts_permission,
channel,
expected_label_text,
):
mocker.patch(
'app.broadcast_message_api_client.get_broadcast_message',
return_value=broadcast_message_json(
id_=fake_uuid,
service_id=SERVICE_ONE_ID,
template_id=None,
created_by_id=None,
status='pending-approval',
),
)
service_one['permissions'] += ['broadcast']
service_one['restricted'] = False
service_one['allowed_broadcast_provider'] = 'all'
service_one['broadcast_channel'] = channel
client_request.login(active_user_approve_broadcasts_permission)
page = client_request.get(
'.view_current_broadcast',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
label = page.select_one('form.banner label')
assert label['for'] == 'confirm'
assert (
normalize_spaces(page.select_one('form.banner label').text)
) == expected_label_text
assert page.select_one('form.banner input[type=checkbox]')['name'] == 'confirm'
assert page.select_one('form.banner input[type=checkbox]')['value'] == 'y'
def test_confirm_approve_non_training_broadcasts_errors_if_not_ticked(
mocker,
client_request,
service_one,
fake_uuid,
mock_update_broadcast_message,
mock_update_broadcast_message_status,
active_user_approve_broadcasts_permission,
):
mocker.patch(
'app.broadcast_message_api_client.get_broadcast_message',
return_value=broadcast_message_json(
id_=fake_uuid,
service_id=SERVICE_ONE_ID,
template_id=None,
created_by_id=None,
status='pending-approval',
),
)
service_one['permissions'] += ['broadcast']
service_one['restricted'] = False
service_one['allowed_broadcast_provider'] = 'all'
service_one['broadcast_channel'] = 'severe'
client_request.login(active_user_approve_broadcasts_permission)
page = client_request.post(
'.approve_broadcast_message',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
_data={},
_expected_status=200,
)
error_message = page.select_one('form.banner .govuk-error-message')
assert error_message['id'] == 'confirm-error'
assert normalize_spaces(error_message.text) == (
'Error: You need to confirm that you understand'
)
assert mock_update_broadcast_message.called is False
assert mock_update_broadcast_message_status.called is False
@freeze_time('2020-02-22T22:22:22.000000')
def test_can_approve_own_broadcast_in_training_mode(
mocker,
client_request,
service_one,
fake_uuid,
active_user_approve_broadcasts_permission,
):
mocker.patch(
'app.broadcast_message_api_client.get_broadcast_message',
return_value=broadcast_message_json(
id_=fake_uuid,
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
created_by_id=fake_uuid,
finishes_at='2020-02-23T23:23:23.000000',
status='pending-approval',
),
)
client_request.login(active_user_approve_broadcasts_permission)
mocker.patch('app.user_api_client.get_user', side_effect=[
active_user_approve_broadcasts_permission, # Current user
active_user_approve_broadcasts_permission, # User who created broadcast (the same)
])
service_one['permissions'] += ['broadcast']
page = client_request.get(
'.view_current_broadcast',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
assert (
normalize_spaces(page.select_one('.banner h1').text)
) == (
'Example template is waiting for approval'
)
assert (
normalize_spaces(page.select_one('.banner p').text)
) == (
'When you use a live account you’ll need another member of '
'your team to approve your alert.'
)
assert (
normalize_spaces(page.select_one('.banner details summary').text)
) == (
'Approve your own alert'
)
assert (
normalize_spaces(page.select_one('.banner details ').text)
) == (
'Approve your own alert '
'Because you’re in training mode you can approve your own '
'alerts, to see how it works. '
'No real alerts will be broadcast to anyone’s phone. '
'Start broadcasting now '
'Reject this alert'
)
form = page.select_one('.banner details form')
assert form['method'] == 'post'
assert 'action' not in form
assert normalize_spaces(form.select_one('button[type=submit]').text) == (
'Start broadcasting now'
)
link = page.select_one('.banner a.govuk-link.govuk-link--destructive')
assert link.text == 'Reject this alert'
assert link['href'] == url_for(
'.reject_broadcast_message',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
@freeze_time('2020-02-22T22:22:22.000000')
@pytest.mark.parametrize('user', [
create_active_user_approve_broadcasts_permissions(),
create_active_user_create_broadcasts_permissions(),
])
def test_cant_approve_own_broadcast_if_service_is_live(
mocker,
client_request,
service_one,
fake_uuid,
user,
):
service_one['restricted'] = False
mocker.patch(
'app.broadcast_message_api_client.get_broadcast_message',
return_value=broadcast_message_json(
id_=fake_uuid,
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
created_by_id=fake_uuid,
finishes_at='2020-02-23T23:23:23.000000',
status='pending-approval',
),
)
client_request.login(user)
mocker.patch('app.user_api_client.get_user', side_effect=[
user, # Current user
user, # User who created broadcast (the same)
])
service_one['permissions'] += ['broadcast']
page = client_request.get(
'.view_current_broadcast',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
assert (
normalize_spaces(page.select_one('.banner h1').text)
) == (
'Example template is waiting for approval'
)
assert (
normalize_spaces(page.select_one('.banner p').text)
) == (
'You need another member of your team to approve your alert.'
)
assert not page.select('form')
link = page.select_one('.banner a.govuk-link.govuk-link--destructive')
assert link.text == 'Discard this alert'
assert link['href'] == url_for(
'.reject_broadcast_message',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
@freeze_time('2020-02-22T22:22:22.000000')
@pytest.mark.parametrize("user_is_platform_admin", [True, False])
def test_view_only_user_cant_approve_broadcast_created_by_someone_else(
mocker,
client_request,
service_one,
active_user_create_broadcasts_permission,
active_user_view_permissions,
platform_admin_user_no_service_permissions,
fake_uuid,
user_is_platform_admin
):
mocker.patch(
'app.broadcast_message_api_client.get_broadcast_message',
return_value=broadcast_message_json(
id_=fake_uuid,
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
created_by_id=fake_uuid,
finishes_at='2020-02-23T23:23:23.000000',
status='pending-approval',
),
)
if user_is_platform_admin:
current_user = platform_admin_user_no_service_permissions
else:
current_user = active_user_view_permissions
mocker.patch('app.user_api_client.get_user', side_effect=[
current_user, # Current user
active_user_create_broadcasts_permission, # User who created broadcast
])
service_one['permissions'] += ['broadcast']
page = client_request.get(
'.view_current_broadcast',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
assert (
normalize_spaces(page.select_one('.banner').text)
) == (
'This alert is waiting for approval '
'You don’t have permission to approve alerts.'
)
assert not page.select_one('form')
assert not page.select_one('.banner a')
def test_view_only_user_cant_approve_broadcasts_they_created(
mocker,
client_request,
service_one,
fake_uuid,
active_user_create_broadcasts_permission,
active_user_view_permissions,
):
mocker.patch(
'app.broadcast_message_api_client.get_broadcast_message',
return_value=broadcast_message_json(
id_=fake_uuid,
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
created_by_id=fake_uuid,
finishes_at='2020-02-23T23:23:23.000000',
status='pending-approval',
),
)
client_request.login(active_user_view_permissions)
# active_user_view_permissions and active_user_create_broadcasts_permission have the same
# id. This mocks the same user being returned, but with different permissions each time.
mocker.patch('app.user_api_client.get_user', side_effect=[
active_user_view_permissions, # Current user
active_user_create_broadcasts_permission, # User who created broadcast
])
service_one['permissions'] += ['broadcast']
service_one['restriced'] = False
page = client_request.get(
'.view_current_broadcast',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
assert (
normalize_spaces(page.select_one('.banner').text)
) == (
'This alert is waiting for approval '
'You don’t have permission to approve alerts.'
)
assert not page.select_one('form')
assert not page.select_one('.banner a')
@pytest.mark.parametrize('is_service_training_mode,banner_text', [
(
True,
('This alert is waiting for approval '
'Another member of your team needs to approve this alert. '
'This service is in training mode. No real alerts will be sent. '
'Reject this alert')
),
(
False,
('This alert is waiting for approval '
'Another member of your team needs to approve this alert. '
'Reject this alert')
)
])
def test_user_without_approve_permission_cant_approve_broadcast_created_by_someone_else(
mocker,
client_request,
service_one,
active_user_create_broadcasts_permission,
fake_uuid,
is_service_training_mode,
banner_text,
):
current_user = create_active_user_create_broadcasts_permissions(with_unique_id=True)
mocker.patch(
'app.broadcast_message_api_client.get_broadcast_message',
return_value=broadcast_message_json(
id_=fake_uuid,
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
created_by_id=fake_uuid,
finishes_at='2020-02-23T23:23:23.000000',
status='pending-approval',
),
)
client_request.login(current_user)
mocker.patch('app.user_api_client.get_user', side_effect=[
current_user, # Current user
active_user_create_broadcasts_permission, # User who created broadcast
])
service_one['permissions'] += ['broadcast']
service_one['restricted'] = is_service_training_mode
page = client_request.get(
'.view_current_broadcast',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
assert (
normalize_spaces(page.select_one('.banner').text)
) == banner_text
assert not page.select_one('form')
link = page.select_one('.banner a')
assert link['href'] == url_for('.reject_broadcast_message',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid)
def test_user_without_approve_permission_cant_approve_broadcast_they_created(
mocker,
client_request,
service_one,
fake_uuid,
active_user_create_broadcasts_permission,
):
mocker.patch(
'app.broadcast_message_api_client.get_broadcast_message',
return_value=broadcast_message_json(
id_=fake_uuid,
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
created_by_id=active_user_create_broadcasts_permission['id'],
finishes_at=None,
status='pending-approval',
),
)
client_request.login(active_user_create_broadcasts_permission)
mocker.patch('app.user_api_client.get_user', side_effect=[
active_user_create_broadcasts_permission, # Current user
active_user_create_broadcasts_permission, # Same created the broadcast
])
service_one['permissions'] += ['broadcast']
page = client_request.get(
'.view_current_broadcast',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
assert (
normalize_spaces(page.select_one('.banner').text)
) == (
'Example template is waiting for approval '
'You need another member of your team to approve this alert. '
'This service is in training mode. No real alerts will be sent. '
'Discard this alert'
)
assert not page.select('.banner input[type=checkbox]')
link = page.select_one('a.govuk-link.govuk-link--destructive')
assert link.text == 'Discard this alert'
assert link['href'] == url_for(
'.reject_broadcast_message',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
@pytest.mark.parametrize('channel, expected_finishes_at', (
# 4 hours later
('operator', '2020-02-23T02:22:22'),
('test', '2020-02-23T02:22:22'),
# 22 hours 30 minutes later
('severe', '2020-02-23T20:52:22'),
('government', '2020-02-23T20:52:22'),
(None, '2020-02-23T20:52:22'), # Training mode
))
@pytest.mark.parametrize(
'trial_mode, initial_status, post_data, expected_approval, expected_redirect',
(
(True, 'draft', {}, False, partial(
url_for,
'.view_current_broadcast',
broadcast_message_id=sample_uuid,
)),
(True, 'pending-approval', {}, True, partial(
url_for,
'.broadcast_tour',
step_index=6,
)),
(False, 'pending-approval', {'confirm': 'y'}, True, partial(
url_for,
'.view_current_broadcast',
broadcast_message_id=sample_uuid,
)),
(True, 'rejected', {}, False, partial(
url_for,
'.view_current_broadcast',
broadcast_message_id=sample_uuid,
)),
(True, 'broadcasting', {}, False, partial(
url_for,
'.view_current_broadcast',
broadcast_message_id=sample_uuid,
)),
(True, 'cancelled', {}, False, partial(
url_for,
'.view_current_broadcast',
broadcast_message_id=sample_uuid,
)),
)
)
@freeze_time('2020-02-22T22:22:22.000000')
def test_confirm_approve_broadcast(
mocker,
client_request,
service_one,
fake_uuid,
mock_update_broadcast_message,
mock_update_broadcast_message_status,
active_user_approve_broadcasts_permission,
initial_status,
post_data,
expected_approval,
trial_mode,
expected_redirect,
channel,
expected_finishes_at,
):
mocker.patch(
'app.broadcast_message_api_client.get_broadcast_message',
return_value=broadcast_message_json(
id_=fake_uuid,
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
created_by_id=fake_uuid,
finishes_at='2020-02-23T23:23:23.000000',
status=initial_status,
),
)
service_one['restricted'] = trial_mode
service_one['permissions'] += ['broadcast']
service_one['broadcast_channel'] = channel
client_request.login(active_user_approve_broadcasts_permission)
client_request.post(
'.approve_broadcast_message',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
_expected_redirect=expected_redirect(
service_id=SERVICE_ONE_ID,
_external=True,
),
_data=post_data,
)
if expected_approval:
mock_update_broadcast_message.assert_called_once_with(
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
data={
'starts_at': '2020-02-22T22:22:22',
'finishes_at': expected_finishes_at,
},
)
mock_update_broadcast_message_status.assert_called_once_with(
'broadcasting',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
else:
assert mock_update_broadcast_message.called is False
assert mock_update_broadcast_message_status.called is False
@pytest.mark.parametrize('user', (
create_active_user_create_broadcasts_permissions(),
create_active_user_approve_broadcasts_permissions(),
))
@freeze_time('2020-02-22T22:22:22.000000')
def test_reject_broadcast(
mocker,
client_request,
service_one,
fake_uuid,
mock_update_broadcast_message,
mock_update_broadcast_message_status,
user,
):
mocker.patch(
'app.broadcast_message_api_client.get_broadcast_message',
return_value=broadcast_message_json(
id_=fake_uuid,
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
created_by_id=fake_uuid,
finishes_at='2020-02-23T23:23:23.000000',
status='pending-approval',
),
)
service_one['permissions'] += ['broadcast']
client_request.login(user)
client_request.get(
'.reject_broadcast_message',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
_expected_redirect=url_for(
'.broadcast_dashboard',
service_id=SERVICE_ONE_ID,
_external=True,
)
)
assert mock_update_broadcast_message.called is False
mock_update_broadcast_message_status.assert_called_once_with(
'rejected',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
@pytest.mark.parametrize('user', [
create_active_user_create_broadcasts_permissions(),
create_active_user_approve_broadcasts_permissions(),
])
@pytest.mark.parametrize('initial_status', (
'draft',
'rejected',
'broadcasting',
'cancelled',
))
@freeze_time('2020-02-22T22:22:22.000000')
def test_cant_reject_broadcast_in_wrong_state(
mocker,
client_request,
service_one,
mock_get_broadcast_template,
fake_uuid,
mock_update_broadcast_message,
mock_update_broadcast_message_status,
user,
initial_status,
):
mocker.patch(
'app.broadcast_message_api_client.get_broadcast_message',
return_value=broadcast_message_json(
id_=fake_uuid,
service_id=SERVICE_ONE_ID,
template_id=fake_uuid,
created_by_id=fake_uuid,
finishes_at='2020-02-23T23:23:23.000000',
status=initial_status,
),
)
service_one['permissions'] += ['broadcast']
client_request.login(user)
client_request.get(
'.reject_broadcast_message',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
_expected_redirect=url_for(
'.view_current_broadcast',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
_external=True,
)
)
assert mock_update_broadcast_message.called is False
assert mock_update_broadcast_message_status.called is False
@pytest.mark.parametrize('endpoint', (
'.view_current_broadcast',
'.view_previous_broadcast',
))
def test_no_view_page_for_draft(
client_request,
service_one,
mock_get_draft_broadcast_message,
fake_uuid,
endpoint,
):
service_one['permissions'] += ['broadcast']
client_request.get(
endpoint,
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
_expected_status=404,
)
@pytest.mark.parametrize('user', (
create_active_user_create_broadcasts_permissions(),
create_active_user_approve_broadcasts_permissions(),
create_platform_admin_user(),
))
def test_cancel_broadcast(
client_request,
service_one,
mock_get_live_broadcast_message,
mock_update_broadcast_message_status,
fake_uuid,
user,
):
"""
users with 'send_messages' permissions and platform admins should be able to cancel broadcasts.
"""
service_one['permissions'] += ['broadcast']
client_request.login(user)
page = client_request.get(
'.cancel_broadcast_message',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
assert normalize_spaces(page.select_one('.banner-dangerous').text) == (
'Are you sure you want to stop this broadcast now? '
'Yes, stop broadcasting'
)
form = page.select_one('form')
assert form['method'] == 'post'
assert 'action' not in form
assert normalize_spaces(form.select_one('button[type=submit]').text) == (
'Yes, stop broadcasting'
)
assert mock_update_broadcast_message_status.called is False
assert url_for(
'.cancel_broadcast_message',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
) not in page
@pytest.mark.parametrize('user', [
create_platform_admin_user(),
create_active_user_create_broadcasts_permissions(),
create_active_user_approve_broadcasts_permissions(),
])
def test_confirm_cancel_broadcast(
client_request,
service_one,
mock_get_live_broadcast_message,
mock_update_broadcast_message_status,
fake_uuid,
user,
):
"""
Platform admins and users with any of the broadcast permissions can cancel broadcasts.
"""
service_one['permissions'] += ['broadcast']
client_request.login(user)
client_request.post(
'.cancel_broadcast_message',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
_expected_redirect=url_for(
'.view_previous_broadcast',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
_external=True,
),
)
mock_update_broadcast_message_status.assert_called_once_with(
'cancelled',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
)
@pytest.mark.parametrize('method', ('post', 'get'))
def test_cant_cancel_broadcast_in_a_different_state(
client_request,
service_one,
mock_get_draft_broadcast_message,
mock_update_broadcast_message_status,
fake_uuid,
active_user_create_broadcasts_permission,
method,
):
service_one['permissions'] += ['broadcast']
client_request.login(active_user_create_broadcasts_permission)
getattr(client_request, method)(
'.cancel_broadcast_message',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
_expected_redirect=url_for(
'.view_current_broadcast',
service_id=SERVICE_ONE_ID,
broadcast_message_id=fake_uuid,
_external=True,
),
)
assert mock_update_broadcast_message_status.called is False
|
alphagov/notifications-admin
|
tests/app/main/views/test_broadcast.py
|
Python
|
mit
| 85,153
|
#!/user/bin/env python
"""
@package mi.dataset.param_dict
@file mi/dataset/param_dict.py
@author Emily Hahn
@brief Extend the protocol param dict to handle dataset encoding exceptions
"""
import re
from mi.core.instrument.protocol_param_dict import ProtocolParameterDict, ParameterDescription
from mi.core.instrument.protocol_param_dict import ParameterValue, ParameterDictVisibility
from mi.core.log import get_logger ; log = get_logger()
class DatasetParameterValue(ParameterValue):
def clear_value(self):
"""
Ensure value is cleared to None
"""
self.value = None
class Parameter(object):
"""
A parameter dictionary item.
"""
def __init__(self, name, f_format, value=None, expiration=None):
"""
Parameter value constructor.
@param name The parameter name.
@param f_format The function that formats the parameter value for a set command.
@param value The parameter value (initializes to None).
"""
self.description = ParameterDescription(name,
menu_path_read=None,
submenu_read=None,
menu_path_write=None,
submenu_write=None,
multi_match=False,
visibility=ParameterDictVisibility.READ_WRITE,
direct_access=False,
startup_param=False,
default_value=None,
init_value=None,
get_timeout=10,
set_timeout=10,
display_name=None,
description=None,
type=None,
units=None,
value_description=None)
self.value = DatasetParameterValue(name, f_format, value=value,
expiration=expiration)
self.name = name
def update(self, input):
"""
Attempt to udpate a parameter value. By default, this assumes the input
will be new new value. In subclasses, this must be updated to handle
a real string of data appropriately.
@param input A string that is the parameter value.
@retval True if an update was successful, False otherwise.
"""
self.value.set_value(input)
return True
def get_value(self, timestamp=None):
"""
Get the value of the parameter that has been stored in the ParameterValue
object.
@param timestamp timestamp to use for expiration calculation
@retval The actual data value if it is valid
@raises InstrumentParameterExpirationException If the value has expired
"""
return self.value.get_value(timestamp)
def clear_value(self):
"""
Clear the value in the parameter by setting it to None
"""
self.value.clear_value()
class RegexParameter(Parameter):
def __init__(self, name, pattern, f_getval, f_format, value=None,
regex_flags=None, expiration=None):
"""
Parameter value constructor.
@param name The parameter name.
@param pattern The regex that matches the parameter in line output.
@param f_getval The fuction that extracts the value from a regex match.
@param f_format The function that formats the parameter value for a set command.
@param value The parameter value (initializes to None).
@param regex_flags Flags that should be passed to the regex in this
parameter. Should comply with regex compile() interface (XORed flags).
@throws TypeError if regex flags are bad
@see ProtocolParameterDict.add() for details of parameters
"""
Parameter.__init__(self, name, f_format, value=value, expiration=expiration)
self.pattern = pattern
if regex_flags == None:
self.regex = re.compile(pattern)
else:
self.regex = re.compile(pattern, regex_flags)
self.f_getval = f_getval
def update(self, input):
"""
Attempt to update a parameter value. If the input string matches the
value regex, extract and update the dictionary value.
@param input A string possibly containing the parameter value.
@retval True if an update was successful, False otherwise.
"""
if not (isinstance(input, str)):
match = self.regex.search(str(input))
else:
match = self.regex.search(input)
if match:
self.value.set_value(self.f_getval(match))
return True
else:
return False
class DatasetParameterDict(ProtocolParameterDict):
"""
Dataset parameter dictionary. Manages, matches and formats parameters.
"""
def __init__(self):
"""
Constructor.
"""
super(DatasetParameterDict, self).__init__()
self._encoding_errors = []
def add(self, name, pattern, f_getval, f_format, value=None, regex_flags=None):
"""
Add a parameter object to the dictionary using a regex for extraction.
@param name The parameter name.
@param pattern The regex that matches the parameter in line output.
@param f_getval The fuction that extracts the value from a regex match.
@param f_format The function that formats the parameter value for a set command.
@param regex_flags Flags that should be passed to the regex in this
parameter. Should comply with regex compile() interface (XORed flags).
"""
val = RegexParameter(name, pattern, f_getval, f_format, value=value, regex_flags=regex_flags)
self._param_dict[name] = val
def update(self, in_data):
"""
Update the dictionaray with a line input. Iterate through all objects
and attempt to match and update a parameter. Only updates the first
match encountered. If we pass in a target params list then will will
only iterate through those allowing us to limit upstate to only specific
parameters.
@param in_data A set of data to match to a dictionary object.
@raise InstrumentParameterException on invalid target prams
@raise KeyError on invalid parameter name
"""
params = self._param_dict.keys()
for name in params:
log.trace("update param dict name: %s", name)
try:
val = self._param_dict[name]
val.update(in_data)
except Exception as e:
# set the value to None if we failed
val.clear_value()
log.error("Dataset parameter dict error encoding Name:%s, set to None", name)
self._encoding_errors.append({name: None})
def get_encoding_errors(self):
"""
Return the encoding errors list
"""
return self._encoding_errors
|
janeen666/mi-instrument
|
mi/dataset/param_dict.py
|
Python
|
bsd-2-clause
| 7,444
|
#!/usr/bin/env python
# - coding: utf-8 -
# Copyright (C) 2014 Toms Baugis <toms.baugis@gmail.com>
"""Exploring symmetry. Feel free to add more handles!"""
import math
import random
from collections import defaultdict
from gi.repository import Gtk as gtk
from gi.repository import GObject as gobject
from gi.repository import Pango as pango
from lib import graphics
from lib.pytweener import Easing
import sprites
class Point(gobject.GObject):
__gsignals__ = {
"on-point-changed": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
}
def __init__(self, x=0, y=0):
gobject.GObject.__init__(self)
self.x = x
self.y = y
def __setattr__(self, name, val):
if hasattr(self, name) and getattr(self, name) == val:
return
gobject.GObject.__setattr__(self, name, val)
self.emit("on-point-changed")
def __repr__(self):
return "<%s x=%d, y=%d>" % (self.__class__.__name__, self.x, self.y)
def __iter__(self):
yield self.x
yield self.y
class Line(object):
def __init__(self, a, b, anchor=None):
self.a, self.b = a, b
# anchor can be either dot A or dot B
self.anchor = anchor or self.a
@property
def length(self):
return math.sqrt((self.a.x - self.b.x) ** 2 + (self.a.y - self.b.y) ** 2)
@property
def rotation(self):
a = self.anchor
b = self.b if a != self.b else self.a
return math.atan2(b.x - a.x, b.y - a.y)
def __setattr__(self, name, val):
if name == "rotation":
self.set_rotation(val)
else:
object.__setattr__(self, name, val)
def set_rotation(self, radians):
a = self.anchor
b = self.b if a != self.b else self.a
length = self.length
b.x = a.x + math.cos(radians - math.radians(90)) * length
b.y = a.y + math.sin(radians - math.radians(90)) * length
class SymmetricalRepeater(graphics.Sprite):
def __init__(self, sides, poly=None, **kwargs):
graphics.Sprite.__init__(self, **kwargs)
poly = poly or [(0, 0), (0, 0)]
self.master_poly = [Point(*coords) for coords in poly]
for point in self.master_poly:
point.connect("on-point-changed", self.on_master_changed)
self.sides = []
for i in range(sides):
side = [Point(*coords) for coords in poly]
self.sides.append(side)
for point in side:
point.connect("on-point-changed", self.on_side_changed)
self.connect("on-render", self.on_render)
def on_master_changed(self, point):
"""propagate to the kids"""
idx = self.master_poly.index(point)
for side in self.sides:
side[idx].x, side[idx].y = point.x, point.y
def on_side_changed(self, point):
self._sprite_dirty = True
def on_render(self, sprite):
angle = 360.0 / len(self.sides)
# debug
"""
self.graphics.save_context()
for i in range(len(self.sides)):
self.graphics.move_to(0, 0)
self.graphics.line_to(1000, 0)
self.graphics.rotate(math.radians(angle))
self.graphics.stroke("#3d3d3d")
self.graphics.restore_context()
"""
self.graphics.set_line_style(3)
for side in self.sides:
self.graphics.move_to(*side[0])
for point in side[1:]:
self.graphics.line_to(*point)
self.graphics.rotate(math.radians(angle))
self.graphics.stroke("#fafafa")
class Scene(graphics.Scene):
def __init__(self):
graphics.Scene.__init__(self, background_color="#333")
self.repeater2 = None
self.container = graphics.Sprite()
self.add_child(self.container)
self.connect("on-first-frame", self.on_first_frame)
self.connect("on-resize", self.on_resize)
def appear1(self, parent, callback):
def clone_grow(repeater, on_complete=None):
repeater2 = SymmetricalRepeater(len(repeater.sides),
repeater.master_poly)
parent.add_child(repeater2)
a, b = repeater2.master_poly
self.animate(a, x=diagonal, delay=0.3, duration=1.3)
self.animate(b, y=-diagonal, delay=0.3, duration=1.3)
if on_complete:
on_complete()
repeater = SymmetricalRepeater(4)
parent.add_child(repeater)
a, b = repeater.master_poly
# push the dots away at the beginning
a.x, b.x = 1000, 1000
size = 100
diagonal = math.sqrt(100**2 + 100**2)
graphics.chain(
# fly in
self.animate, {"sprite": a, "x": size, "duration": 1, "easing": Easing.Expo.ease_in_out},
self.animate, {"sprite": Line(a, b), "rotation": math.radians(-45), "duration": 0.8},
#clone_grow, {"repeater": repeater},
#repeater, {"rotation": math.radians(-45), "duration": 1.3, "delay": 0.3},
callback, {},
)
# parallel chains
graphics.chain(
self.animate, {"sprite": b, "x": size + diagonal, "duration": 1, "easing": Easing.Expo.ease_in_out},
)
def appear2(self, parent, callback):
size = 100
diagonal = math.sqrt((2 * size) ** 2)
repeater = SymmetricalRepeater(4)
parent.add_child(repeater)
poly = [(1000, 0), (size, 0), (1000, 0)]
repeater2 = SymmetricalRepeater(4, poly)
def appear21(on_complete=None):
parent.add_child(repeater2)
a, b, c = repeater2.master_poly
self.animate(Line(b, a), rotation=math.radians(-45), duration=0.7, easing=Easing.Expo.ease_in_out)
self.animate(Line(b, c), rotation=math.radians(225), duration=0.7, easing=Easing.Expo.ease_in_out)
repeater2.animate(rotation=math.radians(-90), duration=0.7, easing=Easing.Expo.ease_in_out,
on_complete=on_complete)
def disappear21(on_complete):
a, b = repeater.master_poly
c, d, e = repeater2.master_poly
graphics.chain(
self.animate, {"sprite": b, "x": 0, "duration": 0.6, "easing": Easing.Expo.ease_out},
on_complete, {}
)
self.animate(d, x=d.x + 3000, duration=2.3, easing=Easing.Expo.ease_out)
self.animate(c, x=c.x + 3000, duration=2.3, easing=Easing.Expo.ease_out)
self.animate(e, x=e.x + 3000, duration=2.3, easing=Easing.Expo.ease_out)
a, b = repeater.master_poly
# push the dots away at the beginning
a.x, b.x = 1000, 1000
def add_outline(on_complete=None):
self._add_outline(parent, on_complete)
graphics.chain(
# fly in
self.animate, {"sprite": a, "x": 0, "duration": 1.3},
appear21, {},
add_outline, {},
disappear21, {},
callback, {},
)
def _add_outline(self, parent, on_complete=None):
cube2 = graphics.Polygon([(100, 0), (0, -100), (-100, 0), (0, 100), (100, 0)],
stroke="#fafafa", line_width=3)
parent.add_child(cube2)
if on_complete:
on_complete()
def appear3(self, parent, callback):
repeater = SymmetricalRepeater(4)
parent.add_child(repeater)
size = 100
diagonal = math.sqrt(100**2 + 100**2)
def appear31(on_complete=None):
poly = [(size, 0), (size, 0), (size, 0)]
repeater2 = SymmetricalRepeater(4, poly)
parent.add_child(repeater2)
a, b, c = repeater2.master_poly
self.animate(a, x=0, y=size, duration=1)
self.animate(c, x=0, y=-size, duration=1, on_complete=on_complete)
a, b = repeater.master_poly
# push the dots away at the beginning
a.x, b.x = 1000, 1000
graphics.chain(
# fly in
self.animate, {"sprite": a, "x": 0, "duration": 1.3},
appear31, {},
callback, {},
)
graphics.chain(
# fly in
self.animate, {"sprite": b, "x": size, "duration": 1.3},
)
def on_first_frame(self, scene, context):
func = random.choice([self.appear1, self.appear2, self.appear3])
func(self.container, lambda: self.on_intro_done())
def on_resize(self, scene, event):
self.container.x = self.width / 2
self.container.y = self.height / 2
def on_intro_done(self):
container = self[0]
cube = graphics.Polygon([(100, 0), (0, -100), (-100, 0), (0, 100)],
fill="#fafafa", opacity=0)
title = sprites.Label("APX", size=200, y=150, opacity=0)
title.x = -title.width / 2
description = sprites.Label("A cousine of QIX\nPress <Space>!",
y=350, opacity=0, alignment=pango.Alignment.CENTER)
description.x = -description.width / 2
container.add_child(cube, title, description)
def announce_ready():
pass
graphics.chain(
cube, {"opacity": 1,
"duration": 0.7, "delay": 0.3, "easing": Easing.Sine.ease_in_out},
announce_ready, {}
)
container.animate(y=150, duration=0.7, delay=0.3, easing= Easing.Sine.ease_out)
title.animate(opacity=1, y=110, duration=0.7, delay=0.5, easing= Easing.Expo.ease_out)
description.animate(opacity=1, y=300, duration=0.5, delay=0.5, easing= Easing.Expo.ease_out)
class BasicWindow:
def __init__(self):
window = gtk.Window()
window.set_default_size(600, 600)
window.connect("delete_event", lambda *args: gtk.main_quit())
self.scene = Scene()
window.add(self.scene)
window.show_all()
if __name__ == '__main__':
window = BasicWindow()
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL) # gtk3 screws up ctrl+c
gtk.main()
|
jean/apx
|
apx/splash.py
|
Python
|
mit
| 10,146
|
from django.db import models
from django.contrib.auth.models import User
class Client(models.Model):
FEMALE = 'F'
MALE = 'M'
SEX_CHOICE = (
(FEMALE, 'F'),
(MALE, 'M'),
)
user = models.OneToOneField(User)
address = models.CharField(max_length=200, null=True)
preference = models.CharField(max_length=40)
sex = models.CharField(max_length=2, choices=SEX_CHOICE, default=" ")
# size = models.CharField(max_length=40, null=True)
def __str__(self):
return self.user.username
class Tailor(models.Model):
# #links UserProfile to django User model
user = models.OneToOneField(User)
rate = models.IntegerField(default=0)
phone_number = models.CharField(max_length=13, default=0)
address = address = models.CharField(max_length=300, null=True)
specialty = models.CharField(max_length=300, null=True)
approved = models.BooleanField(default=False)
# company_size = models.CharField(max_length=300, null=True)
# sample_pics = models.CharField(max_length=300, null=True) #image field
def __str__(self):
return self.user.username
class Fabric(models.Model):
name = models.CharField(max_length=100)
cost = models.IntegerField(null=True)
sex = models.CharField(max_length=2, null=True)
pattern = models.CharField(max_length=150, null=True)
image_url = models.CharField(max_length=150)
description = models.CharField(max_length=200, null=True)
def __str__(self):
return ('%s %s' %(self.name, self.cost))
class SizeTable(models.Model):
size_value = models.CharField(max_length=15)
collar = models.CharField(max_length=10)
waist = models.CharField(max_length=10)
hips = models.CharField(max_length=10)
def __str__(self):
return self.size_value
class Order(models.Model):
client = models.ForeignKey(Client, null=True)
#--sizetable
sizetable = models.ForeignKey(SizeTable, null=True)
#sizetable
tailor = models.ForeignKey(Tailor, null=True)
fabric = models.CharField(max_length=50, null=True)
style = models.CharField(max_length=100, null=True)
main_order_id = models.CharField(max_length=15, null=True)
details = models.CharField(max_length=250, null=True) #*
delivery_option = models.CharField(max_length=100, null=True)
service_option = models.CharField(max_length=100, null=True)
sex = models.CharField(max_length=2, default=' ')
status = models.CharField(max_length=20, default='OPEN') #*
## final cost of order
cost = models.IntegerField(default=0000)
date = models.DateField(auto_now_add=True)
## Change to True on First Delete incase error was made
soft_delete = models.BooleanField(default=False)
class Style(models.Model):
name = models.CharField(max_length=50)
sex = models.CharField(max_length=3)
cost = models.IntegerField(default=000)
pattern = models.CharField(max_length=150, null=True)
# image_url = models.CharField(max_length=150)
description = models.CharField(max_length=200, null=True)
#manipulat img to give them equal size and make different version before saving
style_img = models.ImageField(upload_to='styles', blank=True, default='no_image.png')
def __str__(self):
return ('%s N%s sex- %s' %( self.name, self.cost, self.sex))
|
avoajaugochukwu/suaveproject
|
suave/models.py
|
Python
|
mit
| 3,133
|
from __future__ import unicode_literals
import base64
import datetime
import hashlib
import json
import netrc
import os
import re
import socket
import sys
import time
import math
from ..compat import (
compat_cookiejar,
compat_cookies,
compat_etree_fromstring,
compat_getpass,
compat_http_client,
compat_os_name,
compat_str,
compat_urllib_error,
compat_urllib_parse_urlencode,
compat_urllib_request,
compat_urlparse,
)
from ..downloader.f4m import remove_encrypted_media
from ..utils import (
NO_DEFAULT,
age_restricted,
bug_reports_message,
clean_html,
compiled_regex_type,
determine_ext,
error_to_compat_str,
ExtractorError,
fix_xml_ampersands,
float_or_none,
int_or_none,
parse_iso8601,
RegexNotFoundError,
sanitize_filename,
sanitized_Request,
unescapeHTML,
unified_strdate,
unified_timestamp,
url_basename,
xpath_element,
xpath_text,
xpath_with_ns,
determine_protocol,
parse_duration,
mimetype2ext,
update_Request,
update_url_query,
parse_m3u8_attributes,
extract_attributes,
parse_codecs,
)
class InfoExtractor(object):
"""Information Extractor class.
Information extractors are the classes that, given a URL, extract
information about the video (or videos) the URL refers to. This
information includes the real video URL, the video title, author and
others. The information is stored in a dictionary which is then
passed to the YoutubeDL. The YoutubeDL processes this
information possibly downloading the video to the file system, among
other possible outcomes.
The type field determines the type of the result.
By far the most common value (and the default if _type is missing) is
"video", which indicates a single video.
For a video, the dictionaries must include the following fields:
id: Video identifier.
title: Video title, unescaped.
Additionally, it must contain either a formats entry or a url one:
formats: A list of dictionaries for each format available, ordered
from worst to best quality.
Potential fields:
* url Mandatory. The URL of the video file
* ext Will be calculated from URL if missing
* format A human-readable description of the format
("mp4 container with h264/opus").
Calculated from the format_id, width, height.
and format_note fields if missing.
* format_id A short description of the format
("mp4_h264_opus" or "19").
Technically optional, but strongly recommended.
* format_note Additional info about the format
("3D" or "DASH video")
* width Width of the video, if known
* height Height of the video, if known
* resolution Textual description of width and height
* tbr Average bitrate of audio and video in KBit/s
* abr Average audio bitrate in KBit/s
* acodec Name of the audio codec in use
* asr Audio sampling rate in Hertz
* vbr Average video bitrate in KBit/s
* fps Frame rate
* vcodec Name of the video codec in use
* container Name of the container format
* filesize The number of bytes, if known in advance
* filesize_approx An estimate for the number of bytes
* player_url SWF Player URL (used for rtmpdump).
* protocol The protocol that will be used for the actual
download, lower-case.
"http", "https", "rtsp", "rtmp", "rtmpe",
"m3u8", "m3u8_native" or "http_dash_segments".
* preference Order number of this format. If this field is
present and not None, the formats get sorted
by this field, regardless of all other values.
-1 for default (order by other properties),
-2 or smaller for less than default.
< -1000 to hide the format (if there is
another one which is strictly better)
* language Language code, e.g. "de" or "en-US".
* language_preference Is this in the language mentioned in
the URL?
10 if it's what the URL is about,
-1 for default (don't know),
-10 otherwise, other values reserved for now.
* quality Order number of the video quality of this
format, irrespective of the file format.
-1 for default (order by other properties),
-2 or smaller for less than default.
* source_preference Order number for this video source
(quality takes higher priority)
-1 for default (order by other properties),
-2 or smaller for less than default.
* http_headers A dictionary of additional HTTP headers
to add to the request.
* stretched_ratio If given and not 1, indicates that the
video's pixels are not square.
width : height ratio as float.
* no_resume The server does not support resuming the
(HTTP or RTMP) download. Boolean.
url: Final video URL.
ext: Video filename extension.
format: The video format, defaults to ext (used for --get-format)
player_url: SWF Player URL (used for rtmpdump).
The following fields are optional:
alt_title: A secondary title of the video.
display_id An alternative identifier for the video, not necessarily
unique, but available before title. Typically, id is
something like "4234987", title "Dancing naked mole rats",
and display_id "dancing-naked-mole-rats"
thumbnails: A list of dictionaries, with the following entries:
* "id" (optional, string) - Thumbnail format ID
* "url"
* "preference" (optional, int) - quality of the image
* "width" (optional, int)
* "height" (optional, int)
* "resolution" (optional, string "{width}x{height"},
deprecated)
* "filesize" (optional, int)
thumbnail: Full URL to a video thumbnail image.
description: Full video description.
uploader: Full name of the video uploader.
license: License name the video is licensed under.
creator: The creator of the video.
release_date: The date (YYYYMMDD) when the video was released.
timestamp: UNIX timestamp of the moment the video became available.
upload_date: Video upload date (YYYYMMDD).
If not explicitly set, calculated from timestamp.
uploader_id: Nickname or id of the video uploader.
uploader_url: Full URL to a personal webpage of the video uploader.
location: Physical location where the video was filmed.
subtitles: The available subtitles as a dictionary in the format
{language: subformats}. "subformats" is a list sorted from
lower to higher preference, each element is a dictionary
with the "ext" entry and one of:
* "data": The subtitles file contents
* "url": A URL pointing to the subtitles file
"ext" will be calculated from URL if missing
automatic_captions: Like 'subtitles', used by the YoutubeIE for
automatically generated captions
duration: Length of the video in seconds, as an integer or float.
view_count: How many users have watched the video on the platform.
like_count: Number of positive ratings of the video
dislike_count: Number of negative ratings of the video
repost_count: Number of reposts of the video
average_rating: Average rating give by users, the scale used depends on the webpage
comment_count: Number of comments on the video
comments: A list of comments, each with one or more of the following
properties (all but one of text or html optional):
* "author" - human-readable name of the comment author
* "author_id" - user ID of the comment author
* "id" - Comment ID
* "html" - Comment as HTML
* "text" - Plain text of the comment
* "timestamp" - UNIX timestamp of comment
* "parent" - ID of the comment this one is replying to.
Set to "root" to indicate that this is a
comment to the original video.
age_limit: Age restriction for the video, as an integer (years)
webpage_url: The URL to the video webpage, if given to youtube-dl it
should allow to get the same result again. (It will be set
by YoutubeDL if it's missing)
categories: A list of categories that the video falls in, for example
["Sports", "Berlin"]
tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"]
is_live: True, False, or None (=unknown). Whether this video is a
live stream that goes on instead of a fixed-length video.
start_time: Time in seconds where the reproduction should start, as
specified in the URL.
end_time: Time in seconds where the reproduction should end, as
specified in the URL.
The following fields should only be used when the video belongs to some logical
chapter or section:
chapter: Name or title of the chapter the video belongs to.
chapter_number: Number of the chapter the video belongs to, as an integer.
chapter_id: Id of the chapter the video belongs to, as a unicode string.
The following fields should only be used when the video is an episode of some
series or programme:
series: Title of the series or programme the video episode belongs to.
season: Title of the season the video episode belongs to.
season_number: Number of the season the video episode belongs to, as an integer.
season_id: Id of the season the video episode belongs to, as a unicode string.
episode: Title of the video episode. Unlike mandatory video title field,
this field should denote the exact title of the video episode
without any kind of decoration.
episode_number: Number of the video episode within a season, as an integer.
episode_id: Id of the video episode, as a unicode string.
The following fields should only be used when the media is a track or a part of
a music album:
track: Title of the track.
track_number: Number of the track within an album or a disc, as an integer.
track_id: Id of the track (useful in case of custom indexing, e.g. 6.iii),
as a unicode string.
artist: Artist(s) of the track.
genre: Genre(s) of the track.
album: Title of the album the track belongs to.
album_type: Type of the album (e.g. "Demo", "Full-length", "Split", "Compilation", etc).
album_artist: List of all artists appeared on the album (e.g.
"Ash Borer / Fell Voices" or "Various Artists", useful for splits
and compilations).
disc_number: Number of the disc or other physical medium the track belongs to,
as an integer.
release_year: Year (YYYY) when the album was released.
Unless mentioned otherwise, the fields should be Unicode strings.
Unless mentioned otherwise, None is equivalent to absence of information.
_type "playlist" indicates multiple videos.
There must be a key "entries", which is a list, an iterable, or a PagedList
object, each element of which is a valid dictionary by this specification.
Additionally, playlists can have "title", "description" and "id" attributes
with the same semantics as videos (see above).
_type "multi_video" indicates that there are multiple videos that
form a single show, for examples multiple acts of an opera or TV episode.
It must have an entries key like a playlist and contain all the keys
required for a video at the same time.
_type "url" indicates that the video must be extracted from another
location, possibly by a different extractor. Its only required key is:
"url" - the next URL to extract.
The key "ie_key" can be set to the class name (minus the trailing "IE",
e.g. "Youtube") if the extractor class is known in advance.
Additionally, the dictionary may have any properties of the resolved entity
known in advance, for example "title" if the title of the referred video is
known ahead of time.
_type "url_transparent" entities have the same specification as "url", but
indicate that the given additional information is more precise than the one
associated with the resolved URL.
This is useful when a site employs a video service that hosts the video and
its technical metadata, but that video service does not embed a useful
title, description etc.
Subclasses of this one should re-define the _real_initialize() and
_real_extract() methods and define a _VALID_URL regexp.
Probably, they should also be added to the list of extractors.
Finally, the _WORKING attribute should be set to False for broken IEs
in order to warn the users and skip the tests.
"""
_ready = False
_downloader = None
_WORKING = True
def __init__(self, downloader=None):
"""Constructor. Receives an optional downloader."""
self._ready = False
self.set_downloader(downloader)
@classmethod
def suitable(cls, url):
"""Receives a URL and returns True if suitable for this IE."""
# This does not use has/getattr intentionally - we want to know whether
# we have cached the regexp for *this* class, whereas getattr would also
# match the superclass
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
return cls._VALID_URL_RE.match(url) is not None
@classmethod
def _match_id(cls, url):
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
m = cls._VALID_URL_RE.match(url)
assert m
return m.group('id')
@classmethod
def working(cls):
"""Getter method for _WORKING."""
return cls._WORKING
def initialize(self):
"""Initializes an instance (authentication, etc)."""
if not self._ready:
self._real_initialize()
self._ready = True
def extract(self, url):
"""Extracts URL information and returns it in list of dicts."""
try:
self.initialize()
return self._real_extract(url)
except ExtractorError:
raise
except compat_http_client.IncompleteRead as e:
raise ExtractorError('A network error has occurred.', cause=e, expected=True)
except (KeyError, StopIteration) as e:
raise ExtractorError('An extractor error has occurred.', cause=e)
def set_downloader(self, downloader):
"""Sets the downloader for this IE."""
self._downloader = downloader
def _real_initialize(self):
"""Real initialization process. Redefine in subclasses."""
pass
def _real_extract(self, url):
"""Real extraction process. Redefine in subclasses."""
pass
@classmethod
def ie_key(cls):
"""A string for getting the InfoExtractor with get_info_extractor"""
return compat_str(cls.__name__[:-2])
@property
def IE_NAME(self):
return compat_str(type(self).__name__[:-2])
def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, data=None, headers={}, query={}):
""" Returns the response handle """
if note is None:
self.report_download_webpage(video_id)
elif note is not False:
if video_id is None:
self.to_screen('%s' % (note,))
else:
self.to_screen('%s: %s' % (video_id, note))
if isinstance(url_or_request, compat_urllib_request.Request):
url_or_request = update_Request(
url_or_request, data=data, headers=headers, query=query)
else:
if query:
url_or_request = update_url_query(url_or_request, query)
if data is not None or headers:
url_or_request = sanitized_Request(url_or_request, data, headers)
try:
return self._downloader.urlopen(url_or_request)
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
if errnote is False:
return False
if errnote is None:
errnote = 'Unable to download webpage'
errmsg = '%s: %s' % (errnote, error_to_compat_str(err))
if fatal:
raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
else:
self._downloader.report_warning(errmsg)
return False
def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None, data=None, headers={}, query={}):
""" Returns a tuple (page content as string, URL handle) """
# Strip hashes from the URL (#1038)
if isinstance(url_or_request, (compat_str, str)):
url_or_request = url_or_request.partition('#')[0]
urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal, data=data, headers=headers, query=query)
if urlh is False:
assert not fatal
return False
content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal, encoding=encoding)
return (content, urlh)
@staticmethod
def _guess_encoding_from_content(content_type, webpage_bytes):
m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
if m:
encoding = m.group(1)
else:
m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
webpage_bytes[:1024])
if m:
encoding = m.group(1).decode('ascii')
elif webpage_bytes.startswith(b'\xff\xfe'):
encoding = 'utf-16'
else:
encoding = 'utf-8'
return encoding
def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None, encoding=None):
content_type = urlh.headers.get('Content-Type', '')
webpage_bytes = urlh.read()
if prefix is not None:
webpage_bytes = prefix + webpage_bytes
if not encoding:
encoding = self._guess_encoding_from_content(content_type, webpage_bytes)
if self._downloader.params.get('dump_intermediate_pages', False):
try:
url = url_or_request.get_full_url()
except AttributeError:
url = url_or_request
self.to_screen('Dumping request to ' + url)
dump = base64.b64encode(webpage_bytes).decode('ascii')
self._downloader.to_screen(dump)
if self._downloader.params.get('write_pages', False):
try:
url = url_or_request.get_full_url()
except AttributeError:
url = url_or_request
basen = '%s_%s' % (video_id, url)
if len(basen) > 240:
h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
basen = basen[:240 - len(h)] + h
raw_filename = basen + '.dump'
filename = sanitize_filename(raw_filename, restricted=True)
self.to_screen('Saving request to ' + filename)
# Working around MAX_PATH limitation on Windows (see
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
if compat_os_name == 'nt':
absfilepath = os.path.abspath(filename)
if len(absfilepath) > 259:
filename = '\\\\?\\' + absfilepath
with open(filename, 'wb') as outf:
outf.write(webpage_bytes)
try:
content = webpage_bytes.decode(encoding, 'replace')
except LookupError:
content = webpage_bytes.decode('utf-8', 'replace')
if ('<title>Access to this site is blocked</title>' in content and
'Websense' in content[:512]):
msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
blocked_iframe = self._html_search_regex(
r'<iframe src="([^"]+)"', content,
'Websense information URL', default=None)
if blocked_iframe:
msg += ' Visit %s for more details' % blocked_iframe
raise ExtractorError(msg, expected=True)
if '<title>The URL you requested has been blocked</title>' in content[:512]:
msg = (
'Access to this webpage has been blocked by Indian censorship. '
'Use a VPN or proxy server (with --proxy) to route around it.')
block_msg = self._html_search_regex(
r'</h1><p>(.*?)</p>',
content, 'block message', default=None)
if block_msg:
msg += ' (Message: "%s")' % block_msg.replace('\n', ' ')
raise ExtractorError(msg, expected=True)
return content
def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None, data=None, headers={}, query={}):
""" Returns the data of the page as a string """
success = False
try_count = 0
while success is False:
try:
res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal, encoding=encoding, data=data, headers=headers, query=query)
success = True
except compat_http_client.IncompleteRead as e:
try_count += 1
if try_count >= tries:
raise e
self._sleep(timeout, video_id)
if res is False:
return res
else:
content, _ = res
return content
def _download_xml(self, url_or_request, video_id,
note='Downloading XML', errnote='Unable to download XML',
transform_source=None, fatal=True, encoding=None, data=None, headers={}, query={}):
"""Return the xml as an xml.etree.ElementTree.Element"""
xml_string = self._download_webpage(
url_or_request, video_id, note, errnote, fatal=fatal, encoding=encoding, data=data, headers=headers, query=query)
if xml_string is False:
return xml_string
if transform_source:
xml_string = transform_source(xml_string)
return compat_etree_fromstring(xml_string.encode('utf-8'))
def _download_json(self, url_or_request, video_id,
note='Downloading JSON metadata',
errnote='Unable to download JSON metadata',
transform_source=None,
fatal=True, encoding=None, data=None, headers={}, query={}):
json_string = self._download_webpage(
url_or_request, video_id, note, errnote, fatal=fatal,
encoding=encoding, data=data, headers=headers, query=query)
if (not fatal) and json_string is False:
return None
return self._parse_json(
json_string, video_id, transform_source=transform_source, fatal=fatal)
def _parse_json(self, json_string, video_id, transform_source=None, fatal=True):
if transform_source:
json_string = transform_source(json_string)
try:
return json.loads(json_string)
except ValueError as ve:
errmsg = '%s: Failed to parse JSON ' % video_id
if fatal:
raise ExtractorError(errmsg, cause=ve)
else:
self.report_warning(errmsg + str(ve))
def report_warning(self, msg, video_id=None):
idstr = '' if video_id is None else '%s: ' % video_id
self._downloader.report_warning(
'[%s] %s%s' % (self.IE_NAME, idstr, msg))
def to_screen(self, msg):
"""Print msg to screen, prefixing it with '[ie_name]'"""
self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg))
def report_extraction(self, id_or_name):
"""Report information extraction."""
self.to_screen('%s: Extracting information' % id_or_name)
def report_download_webpage(self, video_id):
"""Report webpage download."""
self.to_screen('%s: Downloading webpage' % video_id)
def report_age_confirmation(self):
"""Report attempt to confirm age."""
self.to_screen('Confirming age')
def report_login(self):
"""Report attempt to log in."""
self.to_screen('Logging in')
@staticmethod
def raise_login_required(msg='This video is only available for registered users'):
raise ExtractorError(
'%s. Use --username and --password or --netrc to provide account credentials.' % msg,
expected=True)
@staticmethod
def raise_geo_restricted(msg='This video is not available from your location due to geo restriction'):
raise ExtractorError(
'%s. You might want to use --proxy to workaround.' % msg,
expected=True)
# Methods for following #608
@staticmethod
def url_result(url, ie=None, video_id=None, video_title=None):
"""Returns a URL that points to a page that should be processed"""
# TODO: ie should be the class used for getting the info
video_info = {'_type': 'url',
'url': url,
'ie_key': ie}
if video_id is not None:
video_info['id'] = video_id
if video_title is not None:
video_info['title'] = video_title
return video_info
@staticmethod
def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None):
"""Returns a playlist"""
video_info = {'_type': 'playlist',
'entries': entries}
if playlist_id:
video_info['id'] = playlist_id
if playlist_title:
video_info['title'] = playlist_title
if playlist_description:
video_info['description'] = playlist_description
return video_info
def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Perform a regex search on the given string, using a single or a list of
patterns returning the first matching group.
In case of failure return a default value or raise a WARNING or a
RegexNotFoundError, depending on fatal, specifying the field name.
"""
if isinstance(pattern, (str, compat_str, compiled_regex_type)):
mobj = re.search(pattern, string, flags)
else:
for p in pattern:
mobj = re.search(p, string, flags)
if mobj:
break
if not self._downloader.params.get('no_color') and compat_os_name != 'nt' and sys.stderr.isatty():
_name = '\033[0;34m%s\033[0m' % name
else:
_name = name
if mobj:
if group is None:
# return the first matching group
return next(g for g in mobj.groups() if g is not None)
else:
return mobj.group(group)
elif default is not NO_DEFAULT:
return default
elif fatal:
raise RegexNotFoundError('Unable to extract %s' % _name)
else:
self._downloader.report_warning('unable to extract %s' % _name + bug_reports_message())
return None
def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Like _search_regex, but strips HTML tags and unescapes entities.
"""
res = self._search_regex(pattern, string, name, default, fatal, flags, group)
if res:
return clean_html(res).strip()
else:
return res
def _get_login_info(self):
"""
Get the login info as (username, password)
It will look in the netrc file using the _NETRC_MACHINE value
If there's no info available, return (None, None)
"""
if self._downloader is None:
return (None, None)
username = None
password = None
downloader_params = self._downloader.params
# Attempt to use provided username and password or .netrc data
if downloader_params.get('username') is not None:
username = downloader_params['username']
password = downloader_params['password']
elif downloader_params.get('usenetrc', False):
try:
info = netrc.netrc().authenticators(self._NETRC_MACHINE)
if info is not None:
username = info[0]
password = info[2]
else:
raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
except (IOError, netrc.NetrcParseError) as err:
self._downloader.report_warning('parsing .netrc: %s' % error_to_compat_str(err))
return (username, password)
def _get_tfa_info(self, note='two-factor verification code'):
"""
Get the two-factor authentication info
TODO - asking the user will be required for sms/phone verify
currently just uses the command line option
If there's no info available, return None
"""
if self._downloader is None:
return None
downloader_params = self._downloader.params
if downloader_params.get('twofactor') is not None:
return downloader_params['twofactor']
return compat_getpass('Type %s and press [Return]: ' % note)
# Helper functions for extracting OpenGraph info
@staticmethod
def _og_regexes(prop):
content_re = r'content=(?:"([^"]+?)"|\'([^\']+?)\'|\s*([^\s"\'=<>`]+?))'
property_re = (r'(?:name|property)=(?:\'og:%(prop)s\'|"og:%(prop)s"|\s*og:%(prop)s\b)'
% {'prop': re.escape(prop)})
template = r'<meta[^>]+?%s[^>]+?%s'
return [
template % (property_re, content_re),
template % (content_re, property_re),
]
@staticmethod
def _meta_regex(prop):
return r'''(?isx)<meta
(?=[^>]+(?:itemprop|name|property|id|http-equiv)=(["\']?)%s\1)
[^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop)
def _og_search_property(self, prop, html, name=None, **kargs):
if name is None:
name = 'OpenGraph %s' % prop
escaped = self._search_regex(self._og_regexes(prop), html, name, flags=re.DOTALL, **kargs)
if escaped is None:
return None
return unescapeHTML(escaped)
def _og_search_thumbnail(self, html, **kargs):
return self._og_search_property('image', html, 'thumbnail URL', fatal=False, **kargs)
def _og_search_description(self, html, **kargs):
return self._og_search_property('description', html, fatal=False, **kargs)
def _og_search_title(self, html, **kargs):
return self._og_search_property('title', html, **kargs)
def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
regexes = self._og_regexes('video') + self._og_regexes('video:url')
if secure:
regexes = self._og_regexes('video:secure_url') + regexes
return self._html_search_regex(regexes, html, name, **kargs)
def _og_search_url(self, html, **kargs):
return self._og_search_property('url', html, **kargs)
def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
if not isinstance(name, (list, tuple)):
name = [name]
if display_name is None:
display_name = name[0]
return self._html_search_regex(
[self._meta_regex(n) for n in name],
html, display_name, fatal=fatal, group='content', **kwargs)
def _dc_search_uploader(self, html):
return self._html_search_meta('dc.creator', html, 'uploader')
def _rta_search(self, html):
# See http://www.rtalabel.org/index.php?content=howtofaq#single
if re.search(r'(?ix)<meta\s+name="rating"\s+'
r' content="RTA-5042-1996-1400-1577-RTA"',
html):
return 18
return 0
def _media_rating_search(self, html):
# See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/
rating = self._html_search_meta('rating', html)
if not rating:
return None
RATING_TABLE = {
'safe for kids': 0,
'general': 8,
'14 years': 14,
'mature': 17,
'restricted': 19,
}
return RATING_TABLE.get(rating.lower())
def _family_friendly_search(self, html):
# See http://schema.org/VideoObject
family_friendly = self._html_search_meta('isFamilyFriendly', html)
if not family_friendly:
return None
RATING_TABLE = {
'1': 0,
'true': 0,
'0': 18,
'false': 18,
}
return RATING_TABLE.get(family_friendly.lower())
def _twitter_search_player(self, html):
return self._html_search_meta('twitter:player', html,
'twitter card player')
def _search_json_ld(self, html, video_id, expected_type=None, **kwargs):
json_ld = self._search_regex(
r'(?s)<script[^>]+type=(["\'])application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>',
html, 'JSON-LD', group='json_ld', **kwargs)
if not json_ld:
return {}
return self._json_ld(
json_ld, video_id, fatal=kwargs.get('fatal', True),
expected_type=expected_type)
def _json_ld(self, json_ld, video_id, fatal=True, expected_type=None):
if isinstance(json_ld, compat_str):
json_ld = self._parse_json(json_ld, video_id, fatal=fatal)
if not json_ld:
return {}
info = {}
if json_ld.get('@context') == 'http://schema.org':
item_type = json_ld.get('@type')
if expected_type is not None and expected_type != item_type:
return info
if item_type == 'TVEpisode':
info.update({
'episode': unescapeHTML(json_ld.get('name')),
'episode_number': int_or_none(json_ld.get('episodeNumber')),
'description': unescapeHTML(json_ld.get('description')),
})
part_of_season = json_ld.get('partOfSeason')
if isinstance(part_of_season, dict) and part_of_season.get('@type') == 'TVSeason':
info['season_number'] = int_or_none(part_of_season.get('seasonNumber'))
part_of_series = json_ld.get('partOfSeries')
if isinstance(part_of_series, dict) and part_of_series.get('@type') == 'TVSeries':
info['series'] = unescapeHTML(part_of_series.get('name'))
elif item_type == 'Article':
info.update({
'timestamp': parse_iso8601(json_ld.get('datePublished')),
'title': unescapeHTML(json_ld.get('headline')),
'description': unescapeHTML(json_ld.get('articleBody')),
})
elif item_type == 'VideoObject':
info.update({
'url': json_ld.get('contentUrl'),
'title': unescapeHTML(json_ld.get('name')),
'description': unescapeHTML(json_ld.get('description')),
'thumbnail': json_ld.get('thumbnailUrl'),
'duration': parse_duration(json_ld.get('duration')),
'timestamp': unified_timestamp(json_ld.get('uploadDate')),
'filesize': float_or_none(json_ld.get('contentSize')),
'tbr': int_or_none(json_ld.get('bitrate')),
'width': int_or_none(json_ld.get('width')),
'height': int_or_none(json_ld.get('height')),
})
return dict((k, v) for k, v in info.items() if v is not None)
@staticmethod
def _hidden_inputs(html):
html = re.sub(r'<!--(?:(?!<!--).)*-->', '', html)
hidden_inputs = {}
for input in re.findall(r'(?i)<input([^>]+)>', html):
if not re.search(r'type=(["\'])(?:hidden|submit)\1', input):
continue
name = re.search(r'(?:name|id)=(["\'])(?P<value>.+?)\1', input)
if not name:
continue
value = re.search(r'value=(["\'])(?P<value>.*?)\1', input)
if not value:
continue
hidden_inputs[name.group('value')] = value.group('value')
return hidden_inputs
def _form_hidden_inputs(self, form_id, html):
form = self._search_regex(
r'(?is)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id,
html, '%s form' % form_id, group='form')
return self._hidden_inputs(form)
def _sort_formats(self, formats, field_preference=None):
if not formats:
raise ExtractorError('No video formats found')
for f in formats:
# Automatically determine tbr when missing based on abr and vbr (improves
# formats sorting in some cases)
if 'tbr' not in f and f.get('abr') is not None and f.get('vbr') is not None:
f['tbr'] = f['abr'] + f['vbr']
def _formats_key(f):
# TODO remove the following workaround
from ..utils import determine_ext
if not f.get('ext') and 'url' in f:
f['ext'] = determine_ext(f['url'])
if isinstance(field_preference, (list, tuple)):
return tuple(
f.get(field)
if f.get(field) is not None
else ('' if field == 'format_id' else -1)
for field in field_preference)
preference = f.get('preference')
if preference is None:
preference = 0
if f.get('ext') in ['f4f', 'f4m']: # Not yet supported
preference -= 0.5
proto_preference = 0 if determine_protocol(f) in ['http', 'https'] else -0.1
if f.get('vcodec') == 'none': # audio only
preference -= 50
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus']
else:
ORDER = ['webm', 'opus', 'ogg', 'mp3', 'aac', 'm4a']
ext_preference = 0
try:
audio_ext_preference = ORDER.index(f['ext'])
except ValueError:
audio_ext_preference = -1
else:
if f.get('acodec') == 'none': # video only
preference -= 40
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['flv', 'mp4', 'webm']
else:
ORDER = ['webm', 'flv', 'mp4']
try:
ext_preference = ORDER.index(f['ext'])
except ValueError:
ext_preference = -1
audio_ext_preference = 0
return (
preference,
f.get('language_preference') if f.get('language_preference') is not None else -1,
f.get('quality') if f.get('quality') is not None else -1,
f.get('tbr') if f.get('tbr') is not None else -1,
f.get('filesize') if f.get('filesize') is not None else -1,
f.get('vbr') if f.get('vbr') is not None else -1,
f.get('height') if f.get('height') is not None else -1,
f.get('width') if f.get('width') is not None else -1,
proto_preference,
ext_preference,
f.get('abr') if f.get('abr') is not None else -1,
audio_ext_preference,
f.get('fps') if f.get('fps') is not None else -1,
f.get('filesize_approx') if f.get('filesize_approx') is not None else -1,
f.get('source_preference') if f.get('source_preference') is not None else -1,
f.get('format_id') if f.get('format_id') is not None else '',
)
formats.sort(key=_formats_key)
def _check_formats(self, formats, video_id):
if formats:
formats[:] = filter(
lambda f: self._is_valid_url(
f['url'], video_id,
item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'),
formats)
@staticmethod
def _remove_duplicate_formats(formats):
format_urls = set()
unique_formats = []
for f in formats:
if f['url'] not in format_urls:
format_urls.add(f['url'])
unique_formats.append(f)
formats[:] = unique_formats
def _is_valid_url(self, url, video_id, item='video'):
url = self._proto_relative_url(url, scheme='http:')
# For now assume non HTTP(S) URLs always valid
if not (url.startswith('http://') or url.startswith('https://')):
return True
try:
self._request_webpage(url, video_id, 'Checking %s URL' % item)
return True
except ExtractorError as e:
if isinstance(e.cause, compat_urllib_error.URLError):
self.to_screen(
'%s: %s URL is invalid, skipping' % (video_id, item))
return False
raise
def http_scheme(self):
""" Either "http:" or "https:", depending on the user's preferences """
return (
'http:'
if self._downloader.params.get('prefer_insecure', False)
else 'https:')
def _proto_relative_url(self, url, scheme=None):
if url is None:
return url
if url.startswith('//'):
if scheme is None:
scheme = self.http_scheme()
return scheme + url
else:
return url
def _sleep(self, timeout, video_id, msg_template=None):
if msg_template is None:
msg_template = '%(video_id)s: Waiting for %(timeout)s seconds'
msg = msg_template % {'video_id': video_id, 'timeout': timeout}
self.to_screen(msg)
time.sleep(timeout)
def _extract_f4m_formats(self, manifest_url, video_id, preference=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=True, m3u8_id=None):
manifest = self._download_xml(
manifest_url, video_id, 'Downloading f4m manifest',
'Unable to download f4m manifest',
# Some manifests may be malformed, e.g. prosiebensat1 generated manifests
# (see https://github.com/rg3/youtube-dl/issues/6215#issuecomment-121704244)
transform_source=transform_source,
fatal=fatal)
if manifest is False:
return []
return self._parse_f4m_formats(
manifest, manifest_url, video_id, preference=preference, f4m_id=f4m_id,
transform_source=transform_source, fatal=fatal, m3u8_id=m3u8_id)
def _parse_f4m_formats(self, manifest, manifest_url, video_id, preference=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=True, m3u8_id=None):
# currently youtube-dl cannot decode the playerVerificationChallenge as Akamai uses Adobe Alchemy
akamai_pv = manifest.find('{http://ns.adobe.com/f4m/1.0}pv-2.0')
if akamai_pv is not None and ';' in akamai_pv.text:
playerVerificationChallenge = akamai_pv.text.split(';')[0]
if playerVerificationChallenge.strip() != '':
return []
formats = []
manifest_version = '1.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
if not media_nodes:
manifest_version = '2.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
# Remove unsupported DRM protected media from final formats
# rendition (see https://github.com/rg3/youtube-dl/issues/8573).
media_nodes = remove_encrypted_media(media_nodes)
if not media_nodes:
return formats
base_url = xpath_text(
manifest, ['{http://ns.adobe.com/f4m/1.0}baseURL', '{http://ns.adobe.com/f4m/2.0}baseURL'],
'base URL', default=None)
if base_url:
base_url = base_url.strip()
bootstrap_info = xpath_element(
manifest, ['{http://ns.adobe.com/f4m/1.0}bootstrapInfo', '{http://ns.adobe.com/f4m/2.0}bootstrapInfo'],
'bootstrap info', default=None)
for i, media_el in enumerate(media_nodes):
tbr = int_or_none(media_el.attrib.get('bitrate'))
width = int_or_none(media_el.attrib.get('width'))
height = int_or_none(media_el.attrib.get('height'))
format_id = '-'.join(filter(None, [f4m_id, compat_str(i if tbr is None else tbr)]))
# If <bootstrapInfo> is present, the specified f4m is a
# stream-level manifest, and only set-level manifests may refer to
# external resources. See section 11.4 and section 4 of F4M spec
if bootstrap_info is None:
media_url = None
# @href is introduced in 2.0, see section 11.6 of F4M spec
if manifest_version == '2.0':
media_url = media_el.attrib.get('href')
if media_url is None:
media_url = media_el.attrib.get('url')
if not media_url:
continue
manifest_url = (
media_url if media_url.startswith('http://') or media_url.startswith('https://')
else ((base_url or '/'.join(manifest_url.split('/')[:-1])) + '/' + media_url))
# If media_url is itself a f4m manifest do the recursive extraction
# since bitrates in parent manifest (this one) and media_url manifest
# may differ leading to inability to resolve the format by requested
# bitrate in f4m downloader
ext = determine_ext(manifest_url)
if ext == 'f4m':
f4m_formats = self._extract_f4m_formats(
manifest_url, video_id, preference=preference, f4m_id=f4m_id,
transform_source=transform_source, fatal=fatal)
# Sometimes stream-level manifest contains single media entry that
# does not contain any quality metadata (e.g. http://matchtv.ru/#live-player).
# At the same time parent's media entry in set-level manifest may
# contain it. We will copy it from parent in such cases.
if len(f4m_formats) == 1:
f = f4m_formats[0]
f.update({
'tbr': f.get('tbr') or tbr,
'width': f.get('width') or width,
'height': f.get('height') or height,
'format_id': f.get('format_id') if not tbr else format_id,
})
formats.extend(f4m_formats)
continue
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
manifest_url, video_id, 'mp4', preference=preference,
m3u8_id=m3u8_id, fatal=fatal))
continue
formats.append({
'format_id': format_id,
'url': manifest_url,
'ext': 'flv' if bootstrap_info is not None else None,
'tbr': tbr,
'width': width,
'height': height,
'preference': preference,
})
return formats
def _m3u8_meta_format(self, m3u8_url, ext=None, preference=None, m3u8_id=None):
return {
'format_id': '-'.join(filter(None, [m3u8_id, 'meta'])),
'url': m3u8_url,
'ext': ext,
'protocol': 'm3u8',
'preference': preference - 1 if preference else -1,
'resolution': 'multiple',
'format_note': 'Quality selection URL',
}
def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None,
entry_protocol='m3u8', preference=None,
m3u8_id=None, note=None, errnote=None,
fatal=True, live=False):
formats = [self._m3u8_meta_format(m3u8_url, ext, preference, m3u8_id)]
format_url = lambda u: (
u
if re.match(r'^https?://', u)
else compat_urlparse.urljoin(m3u8_url, u))
res = self._download_webpage_handle(
m3u8_url, video_id,
note=note or 'Downloading m3u8 information',
errnote=errnote or 'Failed to download m3u8 information',
fatal=fatal)
if res is False:
return []
m3u8_doc, urlh = res
m3u8_url = urlh.geturl()
# We should try extracting formats only from master playlists [1], i.e.
# playlists that describe available qualities. On the other hand media
# playlists [2] should be returned as is since they contain just the media
# without qualities renditions.
# Fortunately, master playlist can be easily distinguished from media
# playlist based on particular tags availability. As of [1, 2] master
# playlist tags MUST NOT appear in a media playist and vice versa.
# As of [3] #EXT-X-TARGETDURATION tag is REQUIRED for every media playlist
# and MUST NOT appear in master playlist thus we can clearly detect media
# playlist with this criterion.
# 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.4
# 2. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3
# 3. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3.1
if '#EXT-X-TARGETDURATION' in m3u8_doc: # media playlist, return as is
return [{
'url': m3u8_url,
'format_id': m3u8_id,
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
}]
last_info = None
last_media = None
for line in m3u8_doc.splitlines():
if line.startswith('#EXT-X-STREAM-INF:'):
last_info = parse_m3u8_attributes(line)
elif line.startswith('#EXT-X-MEDIA:'):
last_media = parse_m3u8_attributes(line)
elif line.startswith('#') or not line.strip():
continue
else:
if last_info is None:
formats.append({'url': format_url(line)})
continue
tbr = int_or_none(last_info.get('BANDWIDTH'), scale=1000)
format_id = []
if m3u8_id:
format_id.append(m3u8_id)
last_media_name = last_media.get('NAME') if last_media and last_media.get('TYPE') not in ('SUBTITLES', 'CLOSED-CAPTIONS') else None
# Despite specification does not mention NAME attribute for
# EXT-X-STREAM-INF it still sometimes may be present
stream_name = last_info.get('NAME') or last_media_name
# Bandwidth of live streams may differ over time thus making
# format_id unpredictable. So it's better to keep provided
# format_id intact.
if not live:
format_id.append(stream_name if stream_name else '%d' % (tbr if tbr else len(formats)))
f = {
'format_id': '-'.join(format_id),
'url': format_url(line.strip()),
'tbr': tbr,
'ext': ext,
'fps': float_or_none(last_info.get('FRAME-RATE')),
'protocol': entry_protocol,
'preference': preference,
}
resolution = last_info.get('RESOLUTION')
if resolution:
width_str, height_str = resolution.split('x')
f['width'] = int(width_str)
f['height'] = int(height_str)
# Unified Streaming Platform
mobj = re.search(
r'audio.*?(?:%3D|=)(\d+)(?:-video.*?(?:%3D|=)(\d+))?', f['url'])
if mobj:
abr, vbr = mobj.groups()
abr, vbr = float_or_none(abr, 1000), float_or_none(vbr, 1000)
f.update({
'vbr': vbr,
'abr': abr,
})
f.update(parse_codecs(last_info.get('CODECS')))
if last_media is not None:
f['m3u8_media'] = last_media
last_media = None
formats.append(f)
last_info = {}
return formats
@staticmethod
def _xpath_ns(path, namespace=None):
if not namespace:
return path
out = []
for c in path.split('/'):
if not c or c == '.':
out.append(c)
else:
out.append('{%s}%s' % (namespace, c))
return '/'.join(out)
def _extract_smil_formats(self, smil_url, video_id, fatal=True, f4m_params=None, transform_source=None):
smil = self._download_smil(smil_url, video_id, fatal=fatal, transform_source=transform_source)
if smil is False:
assert not fatal
return []
namespace = self._parse_smil_namespace(smil)
return self._parse_smil_formats(
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
def _extract_smil_info(self, smil_url, video_id, fatal=True, f4m_params=None):
smil = self._download_smil(smil_url, video_id, fatal=fatal)
if smil is False:
return {}
return self._parse_smil(smil, smil_url, video_id, f4m_params=f4m_params)
def _download_smil(self, smil_url, video_id, fatal=True, transform_source=None):
return self._download_xml(
smil_url, video_id, 'Downloading SMIL file',
'Unable to download SMIL file', fatal=fatal, transform_source=transform_source)
def _parse_smil(self, smil, smil_url, video_id, f4m_params=None):
namespace = self._parse_smil_namespace(smil)
formats = self._parse_smil_formats(
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
subtitles = self._parse_smil_subtitles(smil, namespace=namespace)
video_id = os.path.splitext(url_basename(smil_url))[0]
title = None
description = None
upload_date = None
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
name = meta.attrib.get('name')
content = meta.attrib.get('content')
if not name or not content:
continue
if not title and name == 'title':
title = content
elif not description and name in ('description', 'abstract'):
description = content
elif not upload_date and name == 'date':
upload_date = unified_strdate(content)
thumbnails = [{
'id': image.get('type'),
'url': image.get('src'),
'width': int_or_none(image.get('width')),
'height': int_or_none(image.get('height')),
} for image in smil.findall(self._xpath_ns('.//image', namespace)) if image.get('src')]
return {
'id': video_id,
'title': title or video_id,
'description': description,
'upload_date': upload_date,
'thumbnails': thumbnails,
'formats': formats,
'subtitles': subtitles,
}
def _parse_smil_namespace(self, smil):
return self._search_regex(
r'(?i)^{([^}]+)?}smil$', smil.tag, 'namespace', default=None)
def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
base = smil_url
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
b = meta.get('base') or meta.get('httpBase')
if b:
base = b
break
formats = []
rtmp_count = 0
http_count = 0
m3u8_count = 0
srcs = []
media = smil.findall(self._xpath_ns('.//video', namespace)) + smil.findall(self._xpath_ns('.//audio', namespace))
for medium in media:
src = medium.get('src')
if not src or src in srcs:
continue
srcs.append(src)
bitrate = float_or_none(medium.get('system-bitrate') or medium.get('systemBitrate'), 1000)
filesize = int_or_none(medium.get('size') or medium.get('fileSize'))
width = int_or_none(medium.get('width'))
height = int_or_none(medium.get('height'))
proto = medium.get('proto')
ext = medium.get('ext')
src_ext = determine_ext(src)
streamer = medium.get('streamer') or base
if proto == 'rtmp' or streamer.startswith('rtmp'):
rtmp_count += 1
formats.append({
'url': streamer,
'play_path': src,
'ext': 'flv',
'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
'tbr': bitrate,
'filesize': filesize,
'width': width,
'height': height,
})
if transform_rtmp_url:
streamer, src = transform_rtmp_url(streamer, src)
formats[-1].update({
'url': streamer,
'play_path': src,
})
continue
src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src)
src_url = src_url.strip()
if proto == 'm3u8' or src_ext == 'm3u8':
m3u8_formats = self._extract_m3u8_formats(
src_url, video_id, ext or 'mp4', m3u8_id='hls', fatal=False)
if len(m3u8_formats) == 1:
m3u8_count += 1
m3u8_formats[0].update({
'format_id': 'hls-%d' % (m3u8_count if bitrate is None else bitrate),
'tbr': bitrate,
'width': width,
'height': height,
})
formats.extend(m3u8_formats)
continue
if src_ext == 'f4m':
f4m_url = src_url
if not f4m_params:
f4m_params = {
'hdcore': '3.2.0',
'plugin': 'flowplayer-3.2.0.1',
}
f4m_url += '&' if '?' in f4m_url else '?'
f4m_url += compat_urllib_parse_urlencode(f4m_params)
formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False))
continue
if src_url.startswith('http') and self._is_valid_url(src, video_id):
http_count += 1
formats.append({
'url': src_url,
'ext': ext or src_ext or 'flv',
'format_id': 'http-%d' % (bitrate or http_count),
'tbr': bitrate,
'filesize': filesize,
'width': width,
'height': height,
})
continue
return formats
def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'):
urls = []
subtitles = {}
for num, textstream in enumerate(smil.findall(self._xpath_ns('.//textstream', namespace))):
src = textstream.get('src')
if not src or src in urls:
continue
urls.append(src)
ext = textstream.get('ext') or mimetype2ext(textstream.get('type')) or determine_ext(src)
lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName') or textstream.get('lang') or subtitles_lang
subtitles.setdefault(lang, []).append({
'url': src,
'ext': ext,
})
return subtitles
def _extract_xspf_playlist(self, playlist_url, playlist_id, fatal=True):
xspf = self._download_xml(
playlist_url, playlist_id, 'Downloading xpsf playlist',
'Unable to download xspf manifest', fatal=fatal)
if xspf is False:
return []
return self._parse_xspf(xspf, playlist_id)
def _parse_xspf(self, playlist, playlist_id):
NS_MAP = {
'xspf': 'http://xspf.org/ns/0/',
's1': 'http://static.streamone.nl/player/ns/0',
}
entries = []
for track in playlist.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP)):
title = xpath_text(
track, xpath_with_ns('./xspf:title', NS_MAP), 'title', default=playlist_id)
description = xpath_text(
track, xpath_with_ns('./xspf:annotation', NS_MAP), 'description')
thumbnail = xpath_text(
track, xpath_with_ns('./xspf:image', NS_MAP), 'thumbnail')
duration = float_or_none(
xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'), 1000)
formats = [{
'url': location.text,
'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)),
'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))),
'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))),
} for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP))]
self._sort_formats(formats)
entries.append({
'id': playlist_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
})
return entries
def _extract_mpd_formats(self, mpd_url, video_id, mpd_id=None, note=None, errnote=None, fatal=True, formats_dict={}):
res = self._download_webpage_handle(
mpd_url, video_id,
note=note or 'Downloading MPD manifest',
errnote=errnote or 'Failed to download MPD manifest',
fatal=fatal)
if res is False:
return []
mpd, urlh = res
mpd_base_url = re.match(r'https?://.+/', urlh.geturl()).group()
return self._parse_mpd_formats(
compat_etree_fromstring(mpd.encode('utf-8')), mpd_id, mpd_base_url, formats_dict=formats_dict)
def _parse_mpd_formats(self, mpd_doc, mpd_id=None, mpd_base_url='', formats_dict={}):
if mpd_doc.get('type') == 'dynamic':
return []
namespace = self._search_regex(r'(?i)^{([^}]+)?}MPD$', mpd_doc.tag, 'namespace', default=None)
def _add_ns(path):
return self._xpath_ns(path, namespace)
def is_drm_protected(element):
return element.find(_add_ns('ContentProtection')) is not None
def extract_multisegment_info(element, ms_parent_info):
ms_info = ms_parent_info.copy()
segment_list = element.find(_add_ns('SegmentList'))
if segment_list is not None:
segment_urls_e = segment_list.findall(_add_ns('SegmentURL'))
if segment_urls_e:
ms_info['segment_urls'] = [segment.attrib['media'] for segment in segment_urls_e]
initialization = segment_list.find(_add_ns('Initialization'))
if initialization is not None:
ms_info['initialization_url'] = initialization.attrib['sourceURL']
else:
segment_template = element.find(_add_ns('SegmentTemplate'))
if segment_template is not None:
start_number = segment_template.get('startNumber')
if start_number:
ms_info['start_number'] = int(start_number)
segment_timeline = segment_template.find(_add_ns('SegmentTimeline'))
if segment_timeline is not None:
s_e = segment_timeline.findall(_add_ns('S'))
if s_e:
ms_info['total_number'] = 0
for s in s_e:
ms_info['total_number'] += 1 + int(s.get('r', '0'))
else:
timescale = segment_template.get('timescale')
if timescale:
ms_info['timescale'] = int(timescale)
segment_duration = segment_template.get('duration')
if segment_duration:
ms_info['segment_duration'] = int(segment_duration)
media_template = segment_template.get('media')
if media_template:
ms_info['media_template'] = media_template
initialization = segment_template.get('initialization')
if initialization:
ms_info['initialization_url'] = initialization
else:
initialization = segment_template.find(_add_ns('Initialization'))
if initialization is not None:
ms_info['initialization_url'] = initialization.attrib['sourceURL']
return ms_info
mpd_duration = parse_duration(mpd_doc.get('mediaPresentationDuration'))
formats = []
for period in mpd_doc.findall(_add_ns('Period')):
period_duration = parse_duration(period.get('duration')) or mpd_duration
period_ms_info = extract_multisegment_info(period, {
'start_number': 1,
'timescale': 1,
})
for adaptation_set in period.findall(_add_ns('AdaptationSet')):
if is_drm_protected(adaptation_set):
continue
adaption_set_ms_info = extract_multisegment_info(adaptation_set, period_ms_info)
for representation in adaptation_set.findall(_add_ns('Representation')):
if is_drm_protected(representation):
continue
representation_attrib = adaptation_set.attrib.copy()
representation_attrib.update(representation.attrib)
# According to page 41 of ISO/IEC 29001-1:2014, @mimeType is mandatory
mime_type = representation_attrib['mimeType']
content_type = mime_type.split('/')[0]
if content_type == 'text':
# TODO implement WebVTT downloading
pass
elif content_type == 'video' or content_type == 'audio':
base_url = ''
for element in (representation, adaptation_set, period, mpd_doc):
base_url_e = element.find(_add_ns('BaseURL'))
if base_url_e is not None:
base_url = base_url_e.text + base_url
if re.match(r'^https?://', base_url):
break
if mpd_base_url and not re.match(r'^https?://', base_url):
if not mpd_base_url.endswith('/') and not base_url.startswith('/'):
mpd_base_url += '/'
base_url = mpd_base_url + base_url
representation_id = representation_attrib.get('id')
lang = representation_attrib.get('lang')
url_el = representation.find(_add_ns('BaseURL'))
filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength') if url_el is not None else None)
f = {
'format_id': '%s-%s' % (mpd_id, representation_id) if mpd_id else representation_id,
'url': base_url,
'ext': mimetype2ext(mime_type),
'width': int_or_none(representation_attrib.get('width')),
'height': int_or_none(representation_attrib.get('height')),
'tbr': int_or_none(representation_attrib.get('bandwidth'), 1000),
'asr': int_or_none(representation_attrib.get('audioSamplingRate')),
'fps': int_or_none(representation_attrib.get('frameRate')),
'vcodec': 'none' if content_type == 'audio' else representation_attrib.get('codecs'),
'acodec': 'none' if content_type == 'video' else representation_attrib.get('codecs'),
'language': lang if lang not in ('mul', 'und', 'zxx', 'mis') else None,
'format_note': 'DASH %s' % content_type,
'filesize': filesize,
}
representation_ms_info = extract_multisegment_info(representation, adaption_set_ms_info)
if 'segment_urls' not in representation_ms_info and 'media_template' in representation_ms_info:
if 'total_number' not in representation_ms_info and 'segment_duration':
segment_duration = float(representation_ms_info['segment_duration']) / float(representation_ms_info['timescale'])
representation_ms_info['total_number'] = int(math.ceil(float(period_duration) / segment_duration))
media_template = representation_ms_info['media_template']
media_template = media_template.replace('$RepresentationID$', representation_id)
media_template = re.sub(r'\$(Number|Bandwidth)\$', r'%(\1)d', media_template)
media_template = re.sub(r'\$(Number|Bandwidth)%([^$]+)\$', r'%(\1)\2', media_template)
media_template.replace('$$', '$')
representation_ms_info['segment_urls'] = [
media_template % {
'Number': segment_number,
'Bandwidth': representation_attrib.get('bandwidth')}
for segment_number in range(
representation_ms_info['start_number'],
representation_ms_info['total_number'] + representation_ms_info['start_number'])]
if 'segment_urls' in representation_ms_info:
f.update({
'segment_urls': representation_ms_info['segment_urls'],
'protocol': 'http_dash_segments',
})
if 'initialization_url' in representation_ms_info:
initialization_url = representation_ms_info['initialization_url'].replace('$RepresentationID$', representation_id)
f.update({
'initialization_url': initialization_url,
})
if not f.get('url'):
f['url'] = initialization_url
try:
existing_format = next(
fo for fo in formats
if fo['format_id'] == representation_id)
except StopIteration:
full_info = formats_dict.get(representation_id, {}).copy()
full_info.update(f)
formats.append(full_info)
else:
existing_format.update(f)
else:
self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type)
return formats
def _parse_html5_media_entries(self, base_url, webpage):
def absolute_url(video_url):
return compat_urlparse.urljoin(base_url, video_url)
def parse_content_type(content_type):
if not content_type:
return {}
ctr = re.search(r'(?P<mimetype>[^/]+/[^;]+)(?:;\s*codecs="?(?P<codecs>[^"]+))?', content_type)
if ctr:
mimetype, codecs = ctr.groups()
f = parse_codecs(codecs)
f['ext'] = mimetype2ext(mimetype)
return f
return {}
entries = []
for media_tag, media_type, media_content in re.findall(r'(?s)(<(?P<tag>video|audio)[^>]*>)(.*?)</(?P=tag)>', webpage):
media_info = {
'formats': [],
'subtitles': {},
}
media_attributes = extract_attributes(media_tag)
src = media_attributes.get('src')
if src:
media_info['formats'].append({
'url': absolute_url(src),
'vcodec': 'none' if media_type == 'audio' else None,
})
media_info['thumbnail'] = media_attributes.get('poster')
if media_content:
for source_tag in re.findall(r'<source[^>]+>', media_content):
source_attributes = extract_attributes(source_tag)
src = source_attributes.get('src')
if not src:
continue
f = parse_content_type(source_attributes.get('type'))
f.update({
'url': absolute_url(src),
'vcodec': 'none' if media_type == 'audio' else None,
})
media_info['formats'].append(f)
for track_tag in re.findall(r'<track[^>]+>', media_content):
track_attributes = extract_attributes(track_tag)
kind = track_attributes.get('kind')
if not kind or kind == 'subtitles':
src = track_attributes.get('src')
if not src:
continue
lang = track_attributes.get('srclang') or track_attributes.get('lang') or track_attributes.get('label')
media_info['subtitles'].setdefault(lang, []).append({
'url': absolute_url(src),
})
if media_info['formats']:
entries.append(media_info)
return entries
def _live_title(self, name):
""" Generate the title for a live video """
now = datetime.datetime.now()
now_str = now.strftime('%Y-%m-%d %H:%M')
return name + ' ' + now_str
def _int(self, v, name, fatal=False, **kwargs):
res = int_or_none(v, **kwargs)
if 'get_attr' in kwargs:
print(getattr(v, kwargs['get_attr']))
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _float(self, v, name, fatal=False, **kwargs):
res = float_or_none(v, **kwargs)
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _set_cookie(self, domain, name, value, expire_time=None):
cookie = compat_cookiejar.Cookie(
0, name, value, None, None, domain, None,
None, '/', True, False, expire_time, '', None, None, None)
self._downloader.cookiejar.set_cookie(cookie)
def _get_cookies(self, url):
""" Return a compat_cookies.SimpleCookie with the cookies for the url """
req = sanitized_Request(url)
self._downloader.cookiejar.add_cookie_header(req)
return compat_cookies.SimpleCookie(req.get_header('Cookie'))
def get_testcases(self, include_onlymatching=False):
t = getattr(self, '_TEST', None)
if t:
assert not hasattr(self, '_TESTS'), \
'%s has _TEST and _TESTS' % type(self).__name__
tests = [t]
else:
tests = getattr(self, '_TESTS', [])
for t in tests:
if not include_onlymatching and t.get('only_matching', False):
continue
t['name'] = type(self).__name__[:-len('IE')]
yield t
def is_suitable(self, age_limit):
""" Test whether the extractor is generally suitable for the given
age limit (i.e. pornographic sites are not, all others usually are) """
any_restricted = False
for tc in self.get_testcases(include_onlymatching=False):
if 'playlist' in tc:
tc = tc['playlist'][0]
is_restricted = age_restricted(
tc.get('info_dict', {}).get('age_limit'), age_limit)
if not is_restricted:
return True
any_restricted = any_restricted or is_restricted
return not any_restricted
def extract_subtitles(self, *args, **kwargs):
if (self._downloader.params.get('writesubtitles', False) or
self._downloader.params.get('listsubtitles')):
return self._get_subtitles(*args, **kwargs)
return {}
def _get_subtitles(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
@staticmethod
def _merge_subtitle_items(subtitle_list1, subtitle_list2):
""" Merge subtitle items for one language. Items with duplicated URLs
will be dropped. """
list1_urls = set([item['url'] for item in subtitle_list1])
ret = list(subtitle_list1)
ret.extend([item for item in subtitle_list2 if item['url'] not in list1_urls])
return ret
@classmethod
def _merge_subtitles(cls, subtitle_dict1, subtitle_dict2):
""" Merge two subtitle dictionaries, language by language. """
ret = dict(subtitle_dict1)
for lang in subtitle_dict2:
ret[lang] = cls._merge_subtitle_items(subtitle_dict1.get(lang, []), subtitle_dict2[lang])
return ret
def extract_automatic_captions(self, *args, **kwargs):
if (self._downloader.params.get('writeautomaticsub', False) or
self._downloader.params.get('listsubtitles')):
return self._get_automatic_captions(*args, **kwargs)
return {}
def _get_automatic_captions(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
def mark_watched(self, *args, **kwargs):
if (self._downloader.params.get('mark_watched', False) and
(self._get_login_info()[0] is not None or
self._downloader.params.get('cookiefile') is not None)):
self._mark_watched(*args, **kwargs)
def _mark_watched(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
def geo_verification_headers(self):
headers = {}
geo_verification_proxy = self._downloader.params.get('geo_verification_proxy')
if geo_verification_proxy:
headers['Ytdl-request-proxy'] = geo_verification_proxy
return headers
class SearchInfoExtractor(InfoExtractor):
"""
Base class for paged search queries extractors.
They accept URLs in the format _SEARCH_KEY(|all|[0-9]):{query}
Instances should define _SEARCH_KEY and _MAX_RESULTS.
"""
@classmethod
def _make_valid_url(cls):
return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
@classmethod
def suitable(cls, url):
return re.match(cls._make_valid_url(), url) is not None
def _real_extract(self, query):
mobj = re.match(self._make_valid_url(), query)
if mobj is None:
raise ExtractorError('Invalid search query "%s"' % query)
prefix = mobj.group('prefix')
query = mobj.group('query')
if prefix == '':
return self._get_n_results(query, 1)
elif prefix == 'all':
return self._get_n_results(query, self._MAX_RESULTS)
else:
n = int(prefix)
if n <= 0:
raise ExtractorError('invalid download number %s for query "%s"' % (n, query))
elif n > self._MAX_RESULTS:
self._downloader.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
n = self._MAX_RESULTS
return self._get_n_results(query, n)
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
raise NotImplementedError('This method must be implemented by subclasses')
@property
def SEARCH_KEY(self):
return self._SEARCH_KEY
|
mdmdmdmdmd/pyyt
|
youtube_dl/extractor/common.py
|
Python
|
gpl-2.0
| 84,820
|
"""
Existing users can request to change their email address by
PUTing to ``/-/email/``:
.. testsetup::
>>> _ = getfixture('db_session')
>>> Principal = getfixture('principals').Principal
>>> _ = Principal(email=u'alice@foo.com', password=u'alice',
... firstname=u'Alice', lastname=u'Kingsleigh')
>>> browser = getfixture('browser')
>>> browser.put_json('http://example.com/-/login', {
... "login": "alice@foo.com",
... "password": "alice"
... }).json['status']
u'success'
.. doctest::
>>> browser.put_json('http://example.com/-/email/', {
... "email": "alice@bar.com",
... "password": "alice"
... }).json['status']
u'success'
"""
from colander import MappingSchema, SchemaNode, String
from colander import All, Email
from cornice.service import Service
from pyramid.httpexceptions import HTTPForbidden
from pyramid_mailer import get_mailer
from .. import _, security, utils, path
from . import signup, change_password, user_factory
class Schema(MappingSchema):
email = SchemaNode(String(), title=u'Email',
validator=All(Email(), signup.email_not_registered))
password = SchemaNode(String(), title=u'Current password',
validator=change_password.validate_current_password)
service = Service(name='email-change', path=path('email/{token:.*}'),
factory=user_factory)
def make_token(user, email):
tokenizer = security.make_tokenizer(salt=service.name)
return tokenizer((user.id.hex, email))
def send_confirmation_mail(user, email, request):
url = request.route_url(service.name, token=make_token(user, email))
message = utils.render_mail(request=request, template='email_change',
recipients=[email], subject=_('Confirm your email address'),
data=dict(user=user, url=url, email=email))
get_mailer(request).send(message)
@service.put(schema=Schema, accept='application/json', permission='edit')
def request_email_change(request):
user = request.user
email = request.validated['email']
if not email == user.email:
send_confirmation_mail(user, email, request)
return dict(status='success')
@service.get(permission='edit')
def change_email(request):
factory = security.make_factory(salt=service.name)
id, email = factory(request) # token payload is (id, email)
user = request.user
if user is None or not user.id.hex == id:
raise HTTPForbidden
user.update(email=email)
return request.redirect(target='change_email.success')
|
pyfidelity/rest-seed
|
backend/backrest/views/change_email.py
|
Python
|
bsd-2-clause
| 2,534
|
import sys
import os.path
import setuptools # Fix distutils issues
from cffi import FFI
ffi = FFI()
mod_name = 'instrumental.drivers.cameras._pixelfly.errortext'
if sys.platform.startswith('win'):
ffi.set_source(mod_name, """
#define PCO_ERR_H_CREATE_OBJECT
#define PCO_ERRT_H_CREATE_OBJECT
#include <windows.h>
#include "PCO_errt.h"
""", include_dirs=[os.path.dirname(__file__)])
ffi.cdef("void PCO_GetErrorText(DWORD dwerr, char* pbuf, DWORD dwlen);")
else:
ffi.set_source(mod_name, '')
if __name__ == '__main__':
ffi.compile()
|
mabuchilab/Instrumental
|
instrumental/drivers/cameras/_pixelfly/_cffi_build/build_errortext.py
|
Python
|
gpl-3.0
| 586
|
"""External routes."""
from .auth import login, logout, get_public_key
from .changelog import (
get_changelog,
get_metric_changelog,
get_subject_changelog,
get_source_changelog,
get_report_changelog,
)
from .datamodel import get_data_model
from .documentation import get_api
from .logo import get_logo
from .measurement import get_measurements, set_entity_attribute, stream_nr_measurements
from .metric import delete_metric, post_metric_attribute, post_metric_copy, post_metric_new, post_move_metric
from .notification import (
delete_notification_destination,
post_new_notification_destination,
post_notification_destination_attributes,
)
from .report import (
delete_report,
export_report_as_json,
export_report_as_pdf,
get_report,
post_report_import,
post_report_copy,
post_report_attribute,
post_report_issue_tracker_attribute,
post_report_new,
)
from .reports_overview import get_reports_overview, get_reports, post_reports_overview_attribute
from .server import get_server, QUALITY_TIME_VERSION
from .source import (
delete_source,
post_move_source,
post_source_attribute,
post_source_copy,
post_source_new,
post_source_parameter,
)
from .subject import (
delete_subject,
post_move_subject,
post_new_subject,
post_subject_attribute,
post_subject_copy,
get_subject_measurements,
)
|
ICTU/quality-time
|
components/server/src/external/routes/__init__.py
|
Python
|
apache-2.0
| 1,403
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from flask import Blueprint, redirect, url_for
routes = Blueprint('routes', __name__)
@routes.route('/')
def index():
return redirect(url_for('Airflow.index'))
|
wileeam/airflow
|
airflow/www/blueprints.py
|
Python
|
apache-2.0
| 956
|
# Copyright 2016 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import nova.scheduler.utils
import nova.servicegroup
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional.api import client
from nova.tests.unit import cast_as_call
import nova.tests.unit.image.fake
from nova.tests.unit import policy_fixture
class TestServerGet(test.TestCase):
REQUIRES_LOCKING = True
def setUp(self):
super(TestServerGet, self).setUp()
self.useFixture(policy_fixture.RealPolicyFixture())
self.useFixture(nova_fixtures.NeutronFixture(self))
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
# The non-admin API client is fine to stay at 2.1 since it just creates
# and deletes the server.
self.api = api_fixture.api
self.admin_api = api_fixture.admin_api
# The admin API client needs to be at microversion 2.16 to exhibit the
# regression.
self.admin_api.microversion = '2.16'
# the image fake backend needed for image discovery
nova.tests.unit.image.fake.stub_out_image_service(self)
self.start_service('conductor')
self.flags(driver='chance_scheduler', group='scheduler')
self.start_service('scheduler')
self.compute = self.start_service('compute')
self.consoleauth = self.start_service('consoleauth')
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
self.image_id = self.api.get_images()[0]['id']
self.flavor_id = self.api.get_flavors()[0]['id']
def test_list_deleted_instances(self):
"""Regression test for bug #1548980.
Before fixing this bug, listing deleted instances returned a 404
because lazy-loading services from a deleted instance failed. Now
we should be able to list the deleted instance and the host_state
attribute should be "".
"""
server = dict(name='server1',
imageRef=self.image_id,
flavorRef=self.flavor_id)
server = self.api.post_server({'server': server})
self.api.delete_server(server['id'])
# Wait 30 seconds for it to be gone.
for x in range(30):
try:
self.api.get_server(server['id'])
time.sleep(1)
except client.OpenStackApiNotFoundException:
break
else:
self.fail('Timed out waiting to delete server: %s' % server['id'])
servers = self.admin_api.get_servers(search_opts={'deleted': 1})
self.assertEqual(1, len(servers))
self.assertEqual(server['id'], servers[0]['id'])
# host_status is returned in the 2.16 microversion and since the server
# is deleted it should be the empty string
self.assertEqual(0, len(servers[0]['host_status']))
|
hanlind/nova
|
nova/tests/functional/regressions/test_bug_1548980.py
|
Python
|
apache-2.0
| 3,498
|
# This would need MEMEX VPN identification
#imagenet_mean_npy_urldlpath = "https://isi.memexproxy.com/datacuimgsearch/sentibank/imagenet_mean.npy"
#tfsentibank_npy_urldlpath = "https://isi.memexproxy.com/datacuimgsearch/sentibank/tfdeepsentibank.npy"
imagenet_mean_npy_urldlpath = "https://www.dropbox.com/s/s5oqp801tgiktra/imagenet_mean.npy?dl=1"
tfsentibank_npy_urldlpath = "https://www.dropbox.com/s/3d938qmtm6kngoo/tfdeepsentibank.npy?dl=1"
|
svebk/DeepSentiBank_memex
|
cu_image_search/feature_extractor/__init__.py
|
Python
|
bsd-2-clause
| 445
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.factor
~~~~~~~~~~~~~~~~~~~~~~
Lexers for the Factor language.
:copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, default, words
from pygments.token import Text, Comment, Keyword, Name, String, Number
__all__ = ['FactorLexer']
class FactorLexer(RegexLexer):
"""
Lexer for the `Factor <http://factorcode.org>`_ language.
.. versionadded:: 1.4
"""
name = 'Factor'
aliases = ['factor']
filenames = ['*.factor']
mimetypes = ['text/x-factor']
flags = re.MULTILINE | re.UNICODE
builtin_kernel = words((
'-rot', '2bi', '2bi@', '2bi*', '2curry', '2dip', '2drop', '2dup', '2keep', '2nip',
'2over', '2tri', '2tri@', '2tri*', '3bi', '3curry', '3dip', '3drop', '3dup', '3keep',
'3tri', '4dip', '4drop', '4dup', '4keep', '<wrapper>', '=', '>boolean', 'clone',
'?', '?execute', '?if', 'and', 'assert', 'assert=', 'assert?', 'bi', 'bi-curry',
'bi-curry@', 'bi-curry*', 'bi@', 'bi*', 'boa', 'boolean', 'boolean?', 'both?',
'build', 'call', 'callstack', 'callstack>array', 'callstack?', 'clear', '(clone)',
'compose', 'compose?', 'curry', 'curry?', 'datastack', 'die', 'dip', 'do', 'drop',
'dup', 'dupd', 'either?', 'eq?', 'equal?', 'execute', 'hashcode', 'hashcode*',
'identity-hashcode', 'identity-tuple', 'identity-tuple?', 'if', 'if*',
'keep', 'loop', 'most', 'new', 'nip', 'not', 'null', 'object', 'or', 'over',
'pick', 'prepose', 'retainstack', 'rot', 'same?', 'swap', 'swapd', 'throw',
'tri', 'tri-curry', 'tri-curry@', 'tri-curry*', 'tri@', 'tri*', 'tuple',
'tuple?', 'unless', 'unless*', 'until', 'when', 'when*', 'while', 'with',
'wrapper', 'wrapper?', 'xor'), suffix=r'\s')
builtin_assocs = words((
'2cache', '<enum>', '>alist', '?at', '?of', 'assoc', 'assoc-all?',
'assoc-any?', 'assoc-clone-like', 'assoc-combine', 'assoc-diff',
'assoc-diff!', 'assoc-differ', 'assoc-each', 'assoc-empty?',
'assoc-filter', 'assoc-filter!', 'assoc-filter-as', 'assoc-find',
'assoc-hashcode', 'assoc-intersect', 'assoc-like', 'assoc-map',
'assoc-map-as', 'assoc-partition', 'assoc-refine', 'assoc-size',
'assoc-stack', 'assoc-subset?', 'assoc-union', 'assoc-union!',
'assoc=', 'assoc>map', 'assoc?', 'at', 'at+', 'at*', 'cache', 'change-at',
'clear-assoc', 'delete-at', 'delete-at*', 'enum', 'enum?', 'extract-keys',
'inc-at', 'key?', 'keys', 'map>assoc', 'maybe-set-at', 'new-assoc', 'of',
'push-at', 'rename-at', 'set-at', 'sift-keys', 'sift-values', 'substitute',
'unzip', 'value-at', 'value-at*', 'value?', 'values', 'zip'), suffix=r'\s')
builtin_combinators = words((
'2cleave', '2cleave>quot', '3cleave', '3cleave>quot', '4cleave',
'4cleave>quot', 'alist>quot', 'call-effect', 'case', 'case-find',
'case>quot', 'cleave', 'cleave>quot', 'cond', 'cond>quot', 'deep-spread>quot',
'execute-effect', 'linear-case-quot', 'no-case', 'no-case?', 'no-cond',
'no-cond?', 'recursive-hashcode', 'shallow-spread>quot', 'spread',
'to-fixed-point', 'wrong-values', 'wrong-values?'), suffix=r'\s')
builtin_math = words((
'-', '/', '/f', '/i', '/mod', '2/', '2^', '<', '<=', '<fp-nan>', '>',
'>=', '>bignum', '>fixnum', '>float', '>integer', '(all-integers?)',
'(each-integer)', '(find-integer)', '*', '+', '?1+',
'abs', 'align', 'all-integers?', 'bignum', 'bignum?', 'bit?', 'bitand',
'bitnot', 'bitor', 'bits>double', 'bits>float', 'bitxor', 'complex',
'complex?', 'denominator', 'double>bits', 'each-integer', 'even?',
'find-integer', 'find-last-integer', 'fixnum', 'fixnum?', 'float',
'float>bits', 'float?', 'fp-bitwise=', 'fp-infinity?', 'fp-nan-payload',
'fp-nan?', 'fp-qnan?', 'fp-sign', 'fp-snan?', 'fp-special?',
'if-zero', 'imaginary-part', 'integer', 'integer>fixnum',
'integer>fixnum-strict', 'integer?', 'log2', 'log2-expects-positive',
'log2-expects-positive?', 'mod', 'neg', 'neg?', 'next-float',
'next-power-of-2', 'number', 'number=', 'number?', 'numerator', 'odd?',
'out-of-fixnum-range', 'out-of-fixnum-range?', 'power-of-2?',
'prev-float', 'ratio', 'ratio?', 'rational', 'rational?', 'real',
'real-part', 'real?', 'recip', 'rem', 'sgn', 'shift', 'sq', 'times',
'u<', 'u<=', 'u>', 'u>=', 'unless-zero', 'unordered?', 'when-zero',
'zero?'), suffix=r'\s')
builtin_sequences = words((
'1sequence', '2all?', '2each', '2map', '2map-as', '2map-reduce', '2reduce',
'2selector', '2sequence', '3append', '3append-as', '3each', '3map', '3map-as',
'3sequence', '4sequence', '<repetition>', '<reversed>', '<slice>', '?first',
'?last', '?nth', '?second', '?set-nth', 'accumulate', 'accumulate!',
'accumulate-as', 'all?', 'any?', 'append', 'append!', 'append-as',
'assert-sequence', 'assert-sequence=', 'assert-sequence?',
'binary-reduce', 'bounds-check', 'bounds-check?', 'bounds-error',
'bounds-error?', 'but-last', 'but-last-slice', 'cartesian-each',
'cartesian-map', 'cartesian-product', 'change-nth', 'check-slice',
'check-slice-error', 'clone-like', 'collapse-slice', 'collector',
'collector-for', 'concat', 'concat-as', 'copy', 'count', 'cut', 'cut-slice',
'cut*', 'delete-all', 'delete-slice', 'drop-prefix', 'each', 'each-from',
'each-index', 'empty?', 'exchange', 'filter', 'filter!', 'filter-as', 'find',
'find-from', 'find-index', 'find-index-from', 'find-last', 'find-last-from',
'first', 'first2', 'first3', 'first4', 'flip', 'follow', 'fourth', 'glue', 'halves',
'harvest', 'head', 'head-slice', 'head-slice*', 'head*', 'head?',
'if-empty', 'immutable', 'immutable-sequence', 'immutable-sequence?',
'immutable?', 'index', 'index-from', 'indices', 'infimum', 'infimum-by',
'insert-nth', 'interleave', 'iota', 'iota-tuple', 'iota-tuple?', 'join',
'join-as', 'last', 'last-index', 'last-index-from', 'length', 'lengthen',
'like', 'longer', 'longer?', 'longest', 'map', 'map!', 'map-as', 'map-find',
'map-find-last', 'map-index', 'map-integers', 'map-reduce', 'map-sum',
'max-length', 'member-eq?', 'member?', 'midpoint@', 'min-length',
'mismatch', 'move', 'new-like', 'new-resizable', 'new-sequence',
'non-negative-integer-expected', 'non-negative-integer-expected?',
'nth', 'nths', 'pad-head', 'pad-tail', 'padding', 'partition', 'pop', 'pop*',
'prefix', 'prepend', 'prepend-as', 'produce', 'produce-as', 'product', 'push',
'push-all', 'push-either', 'push-if', 'reduce', 'reduce-index', 'remove',
'remove!', 'remove-eq', 'remove-eq!', 'remove-nth', 'remove-nth!', 'repetition',
'repetition?', 'replace-slice', 'replicate', 'replicate-as', 'rest',
'rest-slice', 'reverse', 'reverse!', 'reversed', 'reversed?', 'second',
'selector', 'selector-for', 'sequence', 'sequence-hashcode', 'sequence=',
'sequence?', 'set-first', 'set-fourth', 'set-last', 'set-length', 'set-nth',
'set-second', 'set-third', 'short', 'shorten', 'shorter', 'shorter?',
'shortest', 'sift', 'slice', 'slice-error', 'slice-error?', 'slice?',
'snip', 'snip-slice', 'start', 'start*', 'subseq', 'subseq?', 'suffix',
'suffix!', 'sum', 'sum-lengths', 'supremum', 'supremum-by', 'surround', 'tail',
'tail-slice', 'tail-slice*', 'tail*', 'tail?', 'third', 'trim',
'trim-head', 'trim-head-slice', 'trim-slice', 'trim-tail', 'trim-tail-slice',
'unclip', 'unclip-last', 'unclip-last-slice', 'unclip-slice', 'unless-empty',
'virtual-exemplar', 'virtual-sequence', 'virtual-sequence?', 'virtual@',
'when-empty'), suffix=r'\s')
builtin_namespaces = words((
'+@', 'change', 'change-global', 'counter', 'dec', 'get', 'get-global',
'global', 'inc', 'init-namespaces', 'initialize', 'is-global', 'make-assoc',
'namespace', 'namestack', 'off', 'on', 'set', 'set-global', 'set-namestack',
'toggle', 'with-global', 'with-scope', 'with-variable', 'with-variables'),
suffix=r'\s')
builtin_arrays = words((
'1array', '2array', '3array', '4array', '<array>', '>array', 'array',
'array?', 'pair', 'pair?', 'resize-array'), suffix=r'\s')
builtin_io = words((
'(each-stream-block-slice)', '(each-stream-block)',
'(stream-contents-by-block)', '(stream-contents-by-element)',
'(stream-contents-by-length-or-block)',
'(stream-contents-by-length)', '+byte+', '+character+',
'bad-seek-type', 'bad-seek-type?', 'bl', 'contents', 'each-block',
'each-block-size', 'each-block-slice', 'each-line', 'each-morsel',
'each-stream-block', 'each-stream-block-slice', 'each-stream-line',
'error-stream', 'flush', 'input-stream', 'input-stream?',
'invalid-read-buffer', 'invalid-read-buffer?', 'lines', 'nl',
'output-stream', 'output-stream?', 'print', 'read', 'read-into',
'read-partial', 'read-partial-into', 'read-until', 'read1', 'readln',
'seek-absolute', 'seek-absolute?', 'seek-end', 'seek-end?',
'seek-input', 'seek-output', 'seek-relative', 'seek-relative?',
'stream-bl', 'stream-contents', 'stream-contents*', 'stream-copy',
'stream-copy*', 'stream-element-type', 'stream-flush',
'stream-length', 'stream-lines', 'stream-nl', 'stream-print',
'stream-read', 'stream-read-into', 'stream-read-partial',
'stream-read-partial-into', 'stream-read-partial-unsafe',
'stream-read-unsafe', 'stream-read-until', 'stream-read1',
'stream-readln', 'stream-seek', 'stream-seekable?', 'stream-tell',
'stream-write', 'stream-write1', 'tell-input', 'tell-output',
'with-error-stream', 'with-error-stream*', 'with-error>output',
'with-input-output+error-streams',
'with-input-output+error-streams*', 'with-input-stream',
'with-input-stream*', 'with-output-stream', 'with-output-stream*',
'with-output>error', 'with-output+error-stream',
'with-output+error-stream*', 'with-streams', 'with-streams*',
'write', 'write1'), suffix=r'\s')
builtin_strings = words((
'1string', '<string>', '>string', 'resize-string', 'string',
'string?'), suffix=r'\s')
builtin_vectors = words((
'1vector', '<vector>', '>vector', '?push', 'vector', 'vector?'),
suffix=r'\s')
builtin_continuations = words((
'<condition>', '<continuation>', '<restart>', 'attempt-all',
'attempt-all-error', 'attempt-all-error?', 'callback-error-hook',
'callcc0', 'callcc1', 'cleanup', 'compute-restarts', 'condition',
'condition?', 'continuation', 'continuation?', 'continue',
'continue-restart', 'continue-with', 'current-continuation',
'error', 'error-continuation', 'error-in-thread', 'error-thread',
'ifcc', 'ignore-errors', 'in-callback?', 'original-error', 'recover',
'restart', 'restart?', 'restarts', 'rethrow', 'rethrow-restarts',
'return', 'return-continuation', 'thread-error-hook', 'throw-continue',
'throw-restarts', 'with-datastack', 'with-return'), suffix=r'\s')
tokens = {
'root': [
# factor allows a file to start with a shebang
(r'#!.*$', Comment.Preproc),
default('base'),
],
'base': [
(r'\s+', Text),
# defining words
(r'((?:MACRO|MEMO|TYPED)?:[:]?)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Function)),
(r'(M:[:]?)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Class, Text, Name.Function)),
(r'(C:)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Function, Text, Name.Class)),
(r'(GENERIC:)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Function)),
(r'(HOOK:|GENERIC#)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Function, Text, Name.Function)),
(r'\(\s', Name.Function, 'stackeffect'),
(r';\s', Keyword),
# imports and namespaces
(r'(USING:)(\s+)',
bygroups(Keyword.Namespace, Text), 'vocabs'),
(r'(USE:|UNUSE:|IN:|QUALIFIED:)(\s+)(\S+)',
bygroups(Keyword.Namespace, Text, Name.Namespace)),
(r'(QUALIFIED-WITH:)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword.Namespace, Text, Name.Namespace, Text, Name.Namespace)),
(r'(FROM:|EXCLUDE:)(\s+)(\S+)(\s+=>\s)',
bygroups(Keyword.Namespace, Text, Name.Namespace, Text), 'words'),
(r'(RENAME:)(\s+)(\S+)(\s+)(\S+)(\s+=>\s+)(\S+)',
bygroups(Keyword.Namespace, Text, Name.Function, Text, Name.Namespace, Text, Name.Function)),
(r'(ALIAS:|TYPEDEF:)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword.Namespace, Text, Name.Function, Text, Name.Function)),
(r'(DEFER:|FORGET:|POSTPONE:)(\s+)(\S+)',
bygroups(Keyword.Namespace, Text, Name.Function)),
# tuples and classes
(r'(TUPLE:|ERROR:)(\s+)(\S+)(\s+<\s+)(\S+)',
bygroups(Keyword, Text, Name.Class, Text, Name.Class), 'slots'),
(r'(TUPLE:|ERROR:|BUILTIN:)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Class), 'slots'),
(r'(MIXIN:|UNION:|INTERSECTION:)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Class)),
(r'(PREDICATE:)(\s+)(\S+)(\s+<\s+)(\S+)',
bygroups(Keyword, Text, Name.Class, Text, Name.Class)),
(r'(C:)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Function, Text, Name.Class)),
(r'(INSTANCE:)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Class, Text, Name.Class)),
(r'(SLOT:)(\s+)(\S+)', bygroups(Keyword, Text, Name.Function)),
(r'(SINGLETON:)(\s+)(\S+)', bygroups(Keyword, Text, Name.Class)),
(r'SINGLETONS:', Keyword, 'classes'),
# other syntax
(r'(CONSTANT:|SYMBOL:|MAIN:|HELP:)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Function)),
(r'SYMBOLS:\s', Keyword, 'words'),
(r'SYNTAX:\s', Keyword),
(r'ALIEN:\s', Keyword),
(r'(STRUCT:)(\s+)(\S+)', bygroups(Keyword, Text, Name.Class)),
(r'(FUNCTION:)(\s+\S+\s+)(\S+)(\s+\(\s+[^)]+\)\s)',
bygroups(Keyword.Namespace, Text, Name.Function, Text)),
(r'(FUNCTION-ALIAS:)(\s+)(\S+)(\s+\S+\s+)(\S+)(\s+\(\s+[^)]+\)\s)',
bygroups(Keyword.Namespace, Text, Name.Function, Text, Name.Function, Text)),
# vocab.private
(r'(?:<PRIVATE|PRIVATE>)\s', Keyword.Namespace),
# strings
(r'"""\s+(?:.|\n)*?\s+"""', String),
(r'"(?:\\\\|\\"|[^"])*"', String),
(r'\S+"\s+(?:\\\\|\\"|[^"])*"', String),
(r'CHAR:\s+(?:\\[\\abfnrstv]|[^\\]\S*)\s', String.Char),
# comments
(r'!\s+.*$', Comment),
(r'#!\s+.*$', Comment),
(r'/\*\s+(?:.|\n)*?\s\*/\s', Comment),
# boolean constants
(r'[tf]\s', Name.Constant),
# symbols and literals
(r'[\\$]\s+\S+', Name.Constant),
(r'M\\\s+\S+\s+\S+', Name.Constant),
# numbers
(r'[+-]?(?:[\d,]*\d)?\.(?:\d([\d,]*\d)?)?(?:[eE][+-]?\d+)?\s', Number),
(r'[+-]?\d(?:[\d,]*\d)?(?:[eE][+-]?\d+)?\s', Number),
(r'0x[a-fA-F\d](?:[a-fA-F\d,]*[a-fA-F\d])?(?:p\d([\d,]*\d)?)?\s', Number),
(r'NAN:\s+[a-fA-F\d](?:[a-fA-F\d,]*[a-fA-F\d])?(?:p\d([\d,]*\d)?)?\s', Number),
(r'0b[01]+\s', Number.Bin),
(r'0o[0-7]+\s', Number.Oct),
(r'(?:\d([\d,]*\d)?)?\+\d(?:[\d,]*\d)?/\d(?:[\d,]*\d)?\s', Number),
(r'(?:\-\d([\d,]*\d)?)?\-\d(?:[\d,]*\d)?/\d(?:[\d,]*\d)?\s', Number),
# keywords
(r'(?:deprecated|final|foldable|flushable|inline|recursive)\s',
Keyword),
# builtins
(builtin_kernel, Name.Builtin),
(builtin_assocs, Name.Builtin),
(builtin_combinators, Name.Builtin),
(builtin_math, Name.Builtin),
(builtin_sequences, Name.Builtin),
(builtin_namespaces, Name.Builtin),
(builtin_arrays, Name.Builtin),
(builtin_io, Name.Builtin),
(builtin_strings, Name.Builtin),
(builtin_vectors, Name.Builtin),
(builtin_continuations, Name.Builtin),
# everything else is text
(r'\S+', Text),
],
'stackeffect': [
(r'\s+', Text),
(r'\(\s+', Name.Function, 'stackeffect'),
(r'\)\s', Name.Function, '#pop'),
(r'--\s', Name.Function),
(r'\S+', Name.Variable),
],
'slots': [
(r'\s+', Text),
(r';\s', Keyword, '#pop'),
(r'(\{\s+)(\S+)(\s+[^}]+\s+\}\s)',
bygroups(Text, Name.Variable, Text)),
(r'\S+', Name.Variable),
],
'vocabs': [
(r'\s+', Text),
(r';\s', Keyword, '#pop'),
(r'\S+', Name.Namespace),
],
'classes': [
(r'\s+', Text),
(r';\s', Keyword, '#pop'),
(r'\S+', Name.Class),
],
'words': [
(r'\s+', Text),
(r';\s', Keyword, '#pop'),
(r'\S+', Name.Function),
],
}
|
tmm1/pygments.rb
|
vendor/pygments-main/pygments/lexers/factor.py
|
Python
|
mit
| 17,864
|
#! /usr/bin/env python
import figleaf
from figleaf import internals
from sets import Set as set
import sys
from cPickle import load
import os
from optparse import OptionParser
def main():
#### OPTIONS
parser = OptionParser()
parser.add_option('-c', '--coverage', nargs=1, action="store",
dest="coverage_file",
help = 'load coverage info from this file',
default='.figleaf_sections')
####
(options, args) = parser.parse_args(sys.argv[1:])
coverage_file = options.coverage_file
figleaf.load_pickled_coverage(open(coverage_file))
data = internals.CoverageData(figleaf._t)
full_cov = data.gather_files()
for filename in args:
annotate_file_with_sections(filename, data, full_cov)
def annotate_file_with_sections(short, data, full_cov):
full = os.path.abspath(short)
tags = {}
sections = data.gather_sections(full)
sections.update(data.gather_sections(short))
print data.sections
print '*** PROCESSING:', short, '\n\t==>', short + '.sections'
for tag, cov in sections.items():
if cov:
tags[tag] = cov
if not tags:
print '*** No coverage info for file', short
tag_names = tags.keys()
tag_names.sort()
tag_names.reverse()
tags["-- all coverage --"] = full_cov.get(full, set())
tag_names.insert(0, "-- all coverage --")
n_tags = len(tag_names)
fp = open(short + '.sections', 'w')
for i, tag in enumerate(tag_names):
fp.write('%s%s\n' % ('| ' * i, tag))
fp.write('| ' * n_tags)
fp.write('\n\n')
source = open(full)
for n, line in enumerate(source):
marks = ""
for tag in tag_names:
cov = tags[tag]
symbol = ' '
if (n+1) in cov:
symbol = '+ '
marks += symbol
fp.write('%s | %s' % (marks, line))
fp.close()
|
hornn/interviews
|
tools/bin/ext/figleaf/annotate_sections.py
|
Python
|
apache-2.0
| 1,955
|
"""
flask.ext.restless.search
~~~~~~~~~~~~~~~~~~~~~~~~~
Provides querying, searching, and function evaluation on SQLAlchemy models.
The most important functions in this module are the :func:`create_query`
and :func:`search` functions, which create a SQLAlchemy query object and
execute that query on a given model, respectively.
:copyright: 2011 by Lincoln de Sousa <lincoln@comum.org>
:copyright: 2012 Jeffrey Finkelstein <jeffrey.finkelstein@gmail.com>
:license: GNU AGPLv3+ or BSD
"""
import inspect
from sqlalchemy import and_ as AND
from sqlalchemy import or_ as OR
from sqlalchemy.ext.associationproxy import AssociationProxy
from sqlalchemy.orm.attributes import InstrumentedAttribute
from .helpers import unicode_keys_to_strings
from .helpers import session_query
from .helpers import get_related_association_proxy_model
def _sub_operator(model, argument, fieldname):
"""Recursively calls :func:`QueryBuilder._create_operation` when argument
is a dictionary of the form specified in :ref:`search`.
This function is for use with the ``has`` and ``any`` search operations.
"""
if isinstance(model, InstrumentedAttribute):
submodel = model.property.mapper.class_
elif isinstance(model, AssociationProxy):
submodel = get_related_association_proxy_model(model)
else: # TODO what to do here?
pass
if isinstance(argument, dict):
fieldname = argument['name']
operator = argument['op']
argument = argument['val']
relation = None
if '__' in fieldname:
fieldname, relation = fieldname.split('__')
return QueryBuilder._create_operation(submodel, fieldname, operator,
argument, relation)
# Support legacy has/any with implicit eq operator
return getattr(submodel, fieldname) == argument
#: The mapping from operator name (as accepted by the search method) to a
#: function which returns the SQLAlchemy expression corresponding to that
#: operator.
#:
#: Each of these functions accepts either one, two, or three arguments. The
#: first argument is the field object on which to apply the operator. The
#: second argument, where it exists, is either the second argument to the
#: operator or a dictionary as described below. The third argument, where it
#: exists, is the name of the field.
#:
#: For functions that accept three arguments, the second argument may be a
#: dictionary containing ``'name'``, ``'op'``, and ``'val'`` mappings so that
#: :func:`QueryBuilder._create_operation` may be applied recursively. For more
#: information and examples, see :ref:`search`.
#:
#: Some operations have multiple names. For example, the equality operation can
#: be described by the strings ``'=='``, ``'eq'``, ``'equals'``, etc.
OPERATORS = {
# Operators which accept a single argument.
'is_null': lambda f: f == None,
'is_not_null': lambda f: f != None,
# TODO what are these?
'desc': lambda f: f.desc,
'asc': lambda f: f.asc,
# Operators which accept two arguments.
'==': lambda f, a: f == a,
'eq': lambda f, a: f == a,
'equals': lambda f, a: f == a,
'equal_to': lambda f, a: f == a,
'!=': lambda f, a: f != a,
'ne': lambda f, a: f != a,
'neq': lambda f, a: f != a,
'not_equal_to': lambda f, a: f != a,
'does_not_equal': lambda f, a: f != a,
'>': lambda f, a: f > a,
'gt': lambda f, a: f > a,
'<': lambda f, a: f < a,
'lt': lambda f, a: f < a,
'>=': lambda f, a: f >= a,
'ge': lambda f, a: f >= a,
'gte': lambda f, a: f >= a,
'geq': lambda f, a: f >= a,
'<=': lambda f, a: f <= a,
'le': lambda f, a: f <= a,
'lte': lambda f, a: f <= a,
'leq': lambda f, a: f <= a,
'ilike': lambda f, a: f.ilike(a),
'like': lambda f, a: f.like(a),
'in': lambda f, a: f.in_(a),
'not_in': lambda f, a: ~f.in_(a),
# Operators which accept three arguments.
'has': lambda f, a, fn: f.has(_sub_operator(f, a, fn)),
'any': lambda f, a, fn: f.any(_sub_operator(f, a, fn)),
}
class OrderBy(object):
"""Represents an "order by" in a SQL query expression."""
def __init__(self, field, direction='asc'):
"""Instantiates this object with the specified attributes.
`field` is the name of the field by which to order the result set.
`direction` is either ``'asc'`` or ``'desc'``, for "ascending" and
"descending", respectively.
"""
self.field = field
self.direction = direction
def __repr__(self):
"""Returns a string representation of this object."""
return '<OrderBy {}, {}>'.format(self.field, self.direction)
class Filter(object):
"""Represents a filter to apply to a SQL query.
A filter can be, for example, a comparison operator applied to a field of a
model and a value or a comparison applied to two fields of the same
model. For more information on possible filters, see :ref:`search`.
"""
def __init__(self, fieldname, operator, argument=None, otherfield=None):
"""Instantiates this object with the specified attributes.
`fieldname` is the name of the field of a model which will be on the
left side of the operator.
`operator` is the string representation of an operator to apply. The
full list of recognized operators can be found at :ref:`search`.
If `argument` is specified, it is the value to place on the right side
of the operator. If `otherfield` is specified, that field on the model
will be placed on the right side of the operator.
.. admonition:: About `argument` and `otherfield`
Some operators don't need either argument and some need exactly one.
However, this constructor will not raise any errors or otherwise
inform you of which situation you are in; it is basically just a
named tuple. Calling code must handle errors caused by missing
required arguments.
"""
self.fieldname = fieldname
self.operator = operator
self.argument = argument
self.otherfield = otherfield
def __repr__(self):
"""Returns a string representation of this object."""
return '<Filter {} {} {}>'.format(self.fieldname, self.operator,
self.argument or self.otherfield)
@staticmethod
def from_dictionary(dictionary):
"""Returns a new :class:`Filter` object with arguments parsed from
`dictionary`.
`dictionary` is a dictionary of the form::
{'name': 'age', 'op': 'lt', 'val': 20}
or::
{'name': 'age', 'op': 'lt', 'other': height}
where ``dictionary['name']`` is the name of the field of the model on
which to apply the operator, ``dictionary['op']`` is the name of the
operator to apply, ``dictionary['val']`` is the value on the right to
which the operator will be applied, and ``dictionary['other']`` is the
name of the other field of the model to which the operator will be
applied.
"""
fieldname = dictionary.get('name')
operator = dictionary.get('op')
argument = dictionary.get('val')
otherfield = dictionary.get('field')
return Filter(fieldname, operator, argument, otherfield)
class SearchParameters(object):
"""Aggregates the parameters for a search, including filters, search type,
limit, offset, and order by directives.
"""
def __init__(self, filters=None, limit=None, offset=None, order_by=None,
junction=None):
"""Instantiates this object with the specified attributes.
`filters` is a list of :class:`Filter` objects, representing filters to
be applied during the search.
`limit`, if not ``None``, specifies the maximum number of results to
return in the search.
`offset`, if not ``None``, specifies the number of initial results to
skip in the result set.
`order_by` is a list of :class:`OrderBy` objects, representing the
ordering directives to apply to the result set which matches the
search.
`junction` is either :func:`sqlalchemy.or_` or :func:`sqlalchemy.and_`
(if ``None``, this will default to :func:`sqlalchemy.and_`), specifying
how the filters should be interpreted (that is, as a disjunction or a
conjunction).
"""
self.filters = filters or []
self.limit = limit
self.offset = offset
self.order_by = order_by or []
self.junction = junction or AND
def __repr__(self):
"""Returns a string representation of the search parameters."""
template = ('<SearchParameters filters={}, order_by={}, limit={},'
' offset={}, junction={}>')
return template.format(self.filters, self.order_by, self.limit,
self.offset, self.junction.__name__)
@staticmethod
def from_dictionary(dictionary):
"""Returns a new :class:`SearchParameters` object with arguments parsed
from `dictionary`.
`dictionary` is a dictionary of the form::
{
'filters': [{'name': 'age', 'op': 'lt', 'val': 20}, ...],
'order_by': [{'field': 'age', 'direction': 'desc'}, ...]
'limit': 10,
'offset': 3,
'disjunction': True
}
where ``dictionary['filters']`` is the list of :class:`Filter` objects
(in dictionary form), ``dictionary['order_by']`` is the list of
:class:`OrderBy` objects (in dictionary form), ``dictionary['limit']``
is the maximum number of matching entries to return,
``dictionary['offset']`` is the number of initial entries to skip in
the matching result set, and ``dictionary['disjunction']`` is whether
the filters should be joined as a disjunction or conjunction.
The provided dictionary may have other key/value pairs, but they are
ignored.
"""
# for the sake of brevity...
from_dict = Filter.from_dictionary
filters = [from_dict(f) for f in dictionary.get('filters', [])]
# HACK In Python 2.5, unicode dictionary keys are not allowed.
order_by_list = dictionary.get('order_by', [])
order_by_list = (unicode_keys_to_strings(o) for o in order_by_list)
order_by = [OrderBy(**o) for o in order_by_list]
limit = dictionary.get('limit')
offset = dictionary.get('offset')
disjunction = dictionary.get('disjunction')
junction = OR if disjunction else AND
return SearchParameters(filters=filters, limit=limit, offset=offset,
order_by=order_by, junction=junction)
class QueryBuilder(object):
"""Provides a static function for building a SQLAlchemy query object based
on a :class:`SearchParameters` instance.
Use the static :meth:`create_query` method to create a SQLAlchemy query on
a given model.
"""
@staticmethod
def _create_operation(model, fieldname, operator, argument, relation=None):
"""Translates an operation described as a string to a valid SQLAlchemy
query parameter using a field or relation of the specified model.
More specifically, this translates the string representation of an
operation, for example ``'gt'``, to an expression corresponding to a
SQLAlchemy expression, ``field > argument``. The recognized operators
are given by the keys of :data:`OPERATORS`. For more information on
recognized search operators, see :ref:`search`.
If `relation` is not ``None``, the returned search parameter will
correspond to a search on the field named `fieldname` on the entity
related to `model` whose name, as a string, is `relation`.
`model` is an instance of a SQLAlchemy declarative model being
searched.
`fieldname` is the name of the field of `model` to which the operation
will be applied as part of the search. If `relation` is specified, the
operation will be applied to the field with name `fieldname` on the
entity related to `model` whose name, as a string, is `relation`.
`operation` is a string representating the operation which will be
executed between the field and the argument received. For example,
``'gt'``, ``'lt'``, ``'like'``, ``'in'`` etc.
`argument` is the argument to which to apply the `operator`.
`relation` is the name of the relationship attribute of `model` to
which the operation will be applied as part of the search, or ``None``
if this function should not use a related entity in the search.
This function raises the following errors:
* :exc:`KeyError` if the `operator` is unknown (that is, not in
:data:`OPERATORS`)
* :exc:`TypeError` if an incorrect number of arguments are provided for
the operation (for example, if `operation` is `'=='` but no
`argument` is provided)
* :exc:`AttributeError` if no column with name `fieldname` or
`relation` exists on `model`
"""
# raises KeyError if operator not in OPERATORS
opfunc = OPERATORS[operator]
argspec = inspect.getargspec(opfunc)
# in Python 2.6 or later, this should be `argspec.args`
numargs = len(argspec[0])
# raises AttributeError if `fieldname` or `relation` does not exist
field = getattr(model, relation or fieldname)
# each of these will raise a TypeError if the wrong number of argments
# is supplied to `opfunc`.
if numargs == 1:
return opfunc(field)
if argument is None:
raise TypeError
if numargs == 2:
return opfunc(field, argument)
return opfunc(field, argument, fieldname)
@staticmethod
def _create_filters(model, search_params):
"""Returns the list of operations on `model` specified in the
:attr:`filters` attribute on the `search_params` object.
`search-params` is an instance of the :class:`SearchParameters` class
whose fields represent the parameters of the search.
Raises one of :exc:`AttributeError`, :exc:`KeyError`, or
:exc:`TypeError` if there is a problem creating the query. See the
documentation for :func:`_create_operation` for more information.
"""
filters = []
for filt in search_params.filters:
fname = filt.fieldname
val = filt.argument
# get the relationship from the field name, if it exists
relation = None
if '__' in fname:
relation, fname = fname.split('__')
# get the other field to which to compare, if it exists
if filt.otherfield:
val = getattr(model, filt.otherfield)
# for the sake of brevity...
create_op = QueryBuilder._create_operation
param = create_op(model, fname, filt.operator, val, relation)
filters.append(param)
return filters
@staticmethod
def create_query(session, model, search_params):
"""Builds an SQLAlchemy query instance based on the search parameters
present in ``search_params``, an instance of :class:`SearchParameters`.
This method returns a SQLAlchemy query in which all matched instances
meet the requirements specified in ``search_params``.
`model` is SQLAlchemy declarative model on which to create a query.
`search_params` is an instance of :class:`SearchParameters` which
specify the filters, order, limit, offset, etc. of the query.
Building the query proceeds in this order:
1. filtering the query
2. ordering the query
3. limiting the query
4. offsetting the query
Raises one of :exc:`AttributeError`, :exc:`KeyError`, or
:exc:`TypeError` if there is a problem creating the query. See the
documentation for :func:`_create_operation` for more information.
"""
# Adding field filters
query = session_query(session, model)
# may raise exception here
filters = QueryBuilder._create_filters(model, search_params)
query = query.filter(search_params.junction(*filters))
# Order the search
for val in search_params.order_by:
field = getattr(model, val.field)
direction = getattr(field, val.direction)
query = query.order_by(direction())
# Limit it
if search_params.limit:
query = query.limit(search_params.limit)
if search_params.offset:
query = query.offset(search_params.offset)
return query
def create_query(session, model, searchparams):
"""Returns a SQLAlchemy query object on the given `model` where the search
for the query is defined by `searchparams`.
The returned query matches the set of all instances of `model` which meet
the parameters of the search given by `searchparams`. For more information
on search parameters, see :ref:`search`.
`model` is a SQLAlchemy declarative model representing the database model
to query.
`searchparams` is either a dictionary (as parsed from a JSON request from
the client, for example) or a :class:`SearchParameters` instance defining
the parameters of the query (as returned by
:func:`SearchParameters.from_dictionary`, for example).
"""
if isinstance(searchparams, dict):
searchparams = SearchParameters.from_dictionary(searchparams)
return QueryBuilder.create_query(session, model, searchparams)
def search(session, model, search_params):
"""Performs the search specified by the given parameters on the model
specified in the constructor of this class.
This function essentially calls :func:`create_query` to create a query
which matches the set of all instances of ``model`` which meet the search
parameters defined in ``search_params``, then returns all results (or just
one if ``search_params['single'] == True``).
This function returns a single instance of the model matching the search
parameters if ``search_params['single']`` is ``True``, or a list of all
such instances otherwise. If ``search_params['single']`` is ``True``, then
this method will raise :exc:`sqlalchemy.orm.exc.NoResultFound` if no
results are found and :exc:`sqlalchemy.orm.exc.MultipleResultsFound` if
multiple results are found.
`model` is a SQLAlchemy declarative model class representing the database
model to query.
`search_params` is a dictionary containing all available search
parameters. For more information on available search parameters, see
:ref:`search`. Implementation note: this dictionary will be converted to a
:class:`SearchParameters` object when the :func:`create_query` function is
called.
"""
# `is_single` is True when 'single' is a key in ``search_params`` and its
# corresponding value is anything except those values which evaluate to
# False (False, 0, the empty string, the empty list, etc.).
is_single = search_params.get('single')
query = create_query(session, model, search_params)
if is_single:
# may raise NoResultFound or MultipleResultsFound
return query.one()
return query.all()
|
mahrz/kernkrieg
|
flask_restless/search.py
|
Python
|
mit
| 19,666
|
from google.appengine.ext import ndb
from django.db import models
from django.test import TestCase
import mock
from potatopage.paginator import (
DjangoNonrelPaginator,
GaeNdbPaginator,
EmptyPage
)
class DjangoNonrelPaginationModel(models.Model):
field1 = models.IntegerField()
class DjangoNonrelPaginatorTests(TestCase):
def setUp(self):
for i in xrange(12):
DjangoNonrelPaginationModel.objects.create(field1=i)
def test_basic_usage(self):
paginator = DjangoNonrelPaginator(DjangoNonrelPaginationModel.objects.all().order_by("field1"), 5)
page1 = paginator.page(1)
self.assertEqual(5, len(page1.object_list))
self.assertEqual(0, page1.object_list[0].field1)
self.assertTrue(page1.has_next())
self.assertFalse(page1.has_previous())
self.assertEqual([1, 2], page1.available_pages())
page2 = paginator.page(2)
self.assertEqual(5, len(page2.object_list))
self.assertEqual(5, page2.object_list[0].field1)
self.assertTrue(page2.has_next())
self.assertTrue(page2.has_previous())
self.assertEqual([1, 2, 3], page2.available_pages())
page3 = paginator.page(3)
self.assertEqual(2, len(page3.object_list))
self.assertEqual(10, page3.object_list[0].field1)
self.assertFalse(page3.has_next())
self.assertTrue(page3.has_previous())
self.assertEqual([2, 3], page3.available_pages())
self.assertRaises(EmptyPage, paginator.page, 4)
def test_cursor_caching(self):
paginator = DjangoNonrelPaginator(DjangoNonrelPaginationModel.objects.all().order_by("field1"), 5, batch_size=2)
paginator.page(3)
self.assertFalse(paginator.has_cursor_for_page(2))
self.assertFalse(paginator.has_cursor_for_page(3))
self.assertTrue(paginator.has_cursor_for_page(5))
paginator.page(1)
self.assertFalse(paginator.has_cursor_for_page(2))
self.assertTrue(paginator.has_cursor_for_page(3))
self.assertTrue(paginator.has_cursor_for_page(5))
with mock.patch("potatopage.paginator.DjangoNonrelPaginator._process_batch_hook") as mock_obj:
#Should now use the cached cursor
page3 = paginator.page(3)
#Should have been called with a cursor as the 3rd argument
self.assertTrue(mock_obj.call_args[0][2])
self.assertEqual(2, len(page3.object_list))
self.assertEqual(10, page3.object_list[0].field1)
def test_in_query(self):
paginator = DjangoNonrelPaginator(DjangoNonrelPaginationModel.objects.filter(field1__in=xrange(12)).all().order_by("field1"), 5)
page1 = paginator.page(1)
self.assertEqual(5, len(page1.object_list))
self.assertEqual(0, page1.object_list[0].field1)
self.assertTrue(page1.has_next())
self.assertFalse(page1.has_previous())
self.assertEqual([1, 2], page1.available_pages())
page2 = paginator.page(2)
self.assertEqual(5, len(page2.object_list))
self.assertEqual(5, page2.object_list[0].field1)
self.assertTrue(page2.has_next())
self.assertTrue(page2.has_previous())
self.assertEqual([1, 2, 3], page2.available_pages())
page3 = paginator.page(3)
self.assertEqual(2, len(page3.object_list))
self.assertEqual(10, page3.object_list[0].field1)
self.assertFalse(page3.has_next())
self.assertTrue(page3.has_previous())
self.assertEqual([2, 3], page3.available_pages())
self.assertRaises(EmptyPage, paginator.page, 4)
def test_total_items_count(self):
""" Test total items count
We don't know the real count until we reach the last page
and because of that we cannt say what's the total number of items straight away.
The _get_known_items_count should always return max known number of items (estimated or exact one).
"""
per_page = 5
paginator = DjangoNonrelPaginator(DjangoNonrelPaginationModel.objects.all().order_by("field1"), per_page)
# get the first page
page = paginator.page(1)
self.assertEqual(page.paginator._get_known_items_count(), per_page) # estimated number
# go to the next (but not last) page
page = paginator.page(page.next_page_number())
self.assertEqual(page.paginator._get_known_items_count(), 2 * per_page) # estimated number
# go back to the first page
page = paginator.page(page.previous_page_number())
self.assertEqual(page.paginator._get_known_items_count(), 2 * per_page) # estimated number (from cache)
# go to the next (but not last) page
page = paginator.page(page.next_page_number())
self.assertEqual(page.paginator._get_known_items_count(), 2 * per_page) # estimated number (from cache)
# go to the last page
page = paginator.page(page.next_page_number())
self.assertEqual(page.paginator._get_known_items_count(), 2 * per_page + 2) # exact number
# go back to the previous page
page = paginator.page(page.previous_page_number())
self.assertEqual(page.paginator._get_known_items_count(), 2 * per_page + 2) # exact number (from cache)
class GaeNdbPaginationModel(ndb.Model):
field1 = ndb.IntegerProperty()
class GaeNdbPaginatorTests(TestCase):
def setUp(self):
for i in xrange(12):
pm = GaeNdbPaginationModel(field1=i)
pm.put()
def test_basic_usage(self):
paginator = GaeNdbPaginator(GaeNdbPaginationModel.query().order(GaeNdbPaginationModel.field1), 5)
page1 = paginator.page(1)
self.assertEqual(5, len(page1.object_list))
self.assertEqual(0, page1.object_list[0].field1)
self.assertTrue(page1.has_next())
self.assertFalse(page1.has_previous())
self.assertEqual([1, 2], page1.available_pages())
page2 = paginator.page(2)
self.assertEqual(5, len(page2.object_list))
self.assertEqual(5, page2.object_list[0].field1)
self.assertTrue(page2.has_next())
self.assertTrue(page2.has_previous())
self.assertEqual([1, 2, 3], page2.available_pages())
page3 = paginator.page(3)
self.assertEqual(2, len(page3.object_list))
self.assertEqual(10, page3.object_list[0].field1)
self.assertFalse(page3.has_next())
self.assertTrue(page3.has_previous())
self.assertEqual([2, 3], page3.available_pages())
self.assertRaises(EmptyPage, paginator.page, 4)
def test_cursor_caching(self):
paginator = GaeNdbPaginator(GaeNdbPaginationModel.query().order(GaeNdbPaginationModel.field1), 5, batch_size=2)
paginator.page(3)
self.assertFalse(paginator.has_cursor_for_page(2))
self.assertFalse(paginator.has_cursor_for_page(3))
self.assertTrue(paginator.has_cursor_for_page(5))
paginator.page(1)
self.assertFalse(paginator.has_cursor_for_page(2))
self.assertTrue(paginator.has_cursor_for_page(3))
self.assertTrue(paginator.has_cursor_for_page(5))
with mock.patch("potatopage.paginator.GaeNdbPaginator._process_batch_hook") as mock_obj:
#Should now use the cached cursor
page3 = paginator.page(3)
#Should have been called with a cursor as the 3rd argument
self.assertTrue(mock_obj.call_args[0][2])
self.assertEqual(2, len(page3.object_list))
self.assertEqual(10, page3.object_list[0].field1)
|
potatolondon/potatopage
|
tests.py
|
Python
|
mit
| 7,589
|
""" Main ngi_reports module
"""
__version__="0.1.13"
|
ewels/ngi_reports
|
ngi_reports/__init__.py
|
Python
|
mit
| 53
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# OpenCenter(TM) is Copyright 2013 by Rackspace US, Inc.
##############################################################################
#
# OpenCenter is licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. This
# version of OpenCenter includes Rackspace trademarks and logos, and in
# accordance with Section 6 of the License, the provision of commercial
# support services in conjunction with a version of OpenCenter which includes
# Rackspace trademarks and logos is prohibited. OpenCenter source code and
# details are available at: # https://github.com/rcbops/opencenter or upon
# written request.
#
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 and a copy, including this
# notice, is available in the LICENSE file accompanying this software.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the # specific language governing permissions and limitations
# under the License.
#
##############################################################################
from util import OpenCenterTestCase
import opencenter.backends
class AstTests(OpenCenterTestCase):
def setUp(self):
if opencenter.backends.primitive_by_name('test.set_test_fact') is None:
opencenter.backends.load_specific_backend('tests.test',
'TestBackend')
self.container = self._model_create('nodes', name='container')
self.node1 = self._model_create('nodes', name='node1')
self.node2 = self._model_create('nodes', name='node2')
self._model_create('facts', node_id=self.node1['id'],
key='parent_id',
value=self.container['id'])
self._model_create('facts', node_id=self.node2['id'],
key='parent_id',
value=self.container['id'])
self._model_create('facts', node_id=self.node1['id'],
key='array_fact', value=[1, 2])
self._model_create('facts', node_id=self.node1['id'],
key='map_fact',
value={'1': '2', '3': '4', '9': '5'})
self._model_create('facts', node_id=self.node1['id'],
key='str_fact', value='azbycxdw')
self._model_create('facts', node_id=self.node1['id'],
key='node1', value=True)
self._model_create('facts', node_id=self.node1['id'],
key='selfref', value='node1')
def tearDown(self):
self._clean_all()
def test_int_equality(self):
result = self._model_filter('nodes', 'facts.parent_id=%d' %
self.container['id'])
self.app.logger.debug('result: %s' % result)
self.assertEquals(len(result), 2)
def test_str_equality(self):
result = self._model_filter('nodes', 'name=\'node1\'')
self.app.logger.debug('result: %s' % result)
self.assertEquals(len(result), 1)
self.assertTrue(result[0]['name'] == 'node1')
def test_str_format(self):
result = self._model_filter('nodes', 'name="node1"')
self.app.logger.debug('result: %s' % result)
self.assertEquals(len(result), 1)
self.assertTrue(result[0]['name'] == 'node1')
def test_other_str_format(self):
result = self._model_filter('nodes', "name='node1'")
self.app.logger.debug('result: %s' % result)
self.assertEquals(len(result), 1)
self.assertTrue(result[0]['name'] == 'node1')
def test_greater_than(self):
result = self._model_filter('nodes', 'facts.parent_id > 0')
self.app.logger.debug('result: %s' % result)
self.assertEquals(len(result), 2)
def test_less_than(self):
result = self._model_filter('nodes', 'facts.parent_id < 999')
self.app.logger.debug('result: %s' % result)
self.assertEquals(len(result), 2)
def test_identity_filter(self):
result = self._model_filter('nodes', 'true')
self.app.logger.debug('result: %s' % result)
self.assertEquals(len(result), 3)
def test_unary_not(self):
result = self._model_filter('nodes', 'facts.parent_id !< 999')
self.app.logger.debug('result: %s' % result)
self.assertEquals(len(result), 0)
def test_less_than_equal(self):
result = self._model_filter('nodes', 'facts.parent_id <= %s' %
self.container['id'])
self.app.logger.debug('result: %s' % result)
self.assertEquals(len(result), 2)
result = self._model_filter('nodes', 'facts.parent_id < %s' %
self.container['id'])
self.app.logger.debug('result: %s' % result)
self.assertEquals(len(result), 0)
def test_greater_than_equal(self):
result = self._model_filter('nodes', 'facts.parent_id >= %s' %
self.container['id'])
self.app.logger.debug('result: %s' % result)
self.assertEquals(len(result), 2)
result = self._model_filter('nodes', 'facts.parent_id > %s' %
self.container['id'])
self.app.logger.debug('result: %s' % result)
self.assertEquals(len(result), 0)
def test_in(self):
# we aren't testing inheritance here, that's in the inheritance
# tests.
result = self._model_filter('nodes', '2 in facts.array_fact')
self.app.logger.debug('result: %s' % result)
self.assertEquals(len(result), 1)
self.assertTrue(result[0]['name'] == 'node1')
def test_nth(self):
result = self._model_filter('nodes', 'nth(0, facts.array_fact) = 1')
self.app.logger.debug('result: %s' % result)
self.assertEquals(len(result), 1)
self.assertTrue(result[0]['name'] == 'node1')
result = self._model_filter('nodes', 'nth(2, facts.array_fact)')
self.app.logger.debug('result: %s' % result)
self.assertEqual(len(result), 0)
result = self._model_filter('nodes', 'nth("0", facts.array_fact)')
self.app.logger.debug('result: %s' % result)
self.assertEqual(len(result), 0)
self.assertRaises(RuntimeError, self._model_filter,
'nodes', 'nth(-1, facts.array_fact)')
def test_max(self):
result = self._model_filter('nodes', 'max(facts.array_fact) = 2')
self.app.logger.debug('result: %s' % result)
self.assertEquals(len(result), 1)
self.assertTrue(result[0]['name'] == 'node1')
result = self._model_filter('nodes', 'max(facts.map_fact) = "9"')
self.app.logger.debug('result: %s' % result)
self.assertEquals(len(result), 0)
result = self._model_filter('nodes', 'max(facts.str_fact) = "z"')
self.app.logger.debug('result: %s' % result)
self.assertEquals(len(result), 0)
def test_str(self):
self._model_create('facts', node_id=self.node1['id'],
key='int',
value=3)
result = self._model_filter('nodes', 'str(facts.int) = "3"')
self.app.logger.debug('result: %s' % result)
self.assertEquals(len(result), 1)
self.assertTrue(result[0]['name'] == 'node1')
self._model_create('facts', node_id=self.node1['id'],
key='empty',
value='')
result = self._model_filter('nodes', 'str(facts.empty) = ""')
self.app.logger.debug('result: %s' % result)
self.assertEquals(len(result), 0)
def test_int(self):
self._model_create('facts', node_id=self.node1['id'],
key='string',
value='3')
result = self._model_filter('nodes', 'int(facts.string) = 3')
self.app.logger.debug('result: %s' % result)
self.assertEquals(len(result), 1)
self.assertTrue(result[0]['name'] == 'node1')
self._model_create('facts', node_id=self.node1['id'],
key='zero_int',
value=0)
result = self._model_filter('nodes', 'int(facts.zero_int) = 0')
self.app.logger.debug('result: %s' % result)
self.assertEquals(len(result), 0)
self._model_create('facts', node_id=self.node1['id'],
key='one_int',
value=1)
result = self._model_filter('nodes', 'int(facts.one_int) = 1')
self.app.logger.debug('result: %s' % result)
self.assertEquals(len(result), 1)
self.assertTrue(result[0]['name'] == 'node1')
self._model_create('facts', node_id=self.node1['id'],
key='neg_int',
value=-1)
result = self._model_filter('nodes', 'int(facts.neg_int) < 0')
self.assertEqual(len(result), 1)
self.assertTrue(result[0]['name'] == 'node1')
# self.assertEquals(len(result), 1)
def test_count(self):
result = self._model_filter('nodes', 'count(facts.array_fact) = 2')
self.app.logger.debug('result: %s' % result)
self.assertEquals(len(result), 1)
self.assertTrue(result[0]['name'] == 'node1')
result = self._model_filter('nodes', 'count(facts.map_fact) = 3')
self.app.logger.debug('result: %s' % result)
self.assertEquals(len(result), 0)
result = self._model_filter('nodes', 'count(facts.str_fact) = 8')
self.app.logger.debug('result: %s' % result)
self.assertEquals(len(result), 0)
# self.assertEquals(len(result), 1)
def test_filter(self):
query = 'count(filter("nodes", "facts.parent_id=' \
'{facts.parent_id}")) > 1'
result = self._model_filter('nodes', query)
self.app.logger.debug('result: %s' % result)
self.assertEquals(len(result), 2)
def test_union(self):
query = 'count(union(facts.array_fact, 3)) > 1'
result = self._model_filter('nodes', query)
self.app.logger.debug('result: %s' % result)
self.assertEquals(len(result), 1)
self.assertTrue(result[0]['name'] == 'node1')
query = 'count(union(facts.array_fact, 2)) > 2'
result = self._model_filter('nodes', query)
self.app.logger.debug('result: %s' % result)
self.assertEquals(len(result), 0)
def test_remove(self):
query = 'count(remove(facts.array_fact, 2)) = 1'
result = self._model_filter('nodes', query)
self.app.logger.debug('result: %s' % result)
self.assertEquals(len(result), 1)
self.assertTrue(result[0]['name'] == 'node1')
self._model_create('facts', node_id=self.node1['id'],
key='dups_array',
value=[1, 1, 2, 3])
query = 'count(remove(facts.dups_array, 1)) = 3'
result = self._model_filter('nodes', query)
self.app.logger.debug('result: %s' % result)
self.assertEquals(len(result), 1)
self.assertTrue(result[0]['name'] == 'node1')
query = 'remove(facts.map_fact, "1")'
self.assertRaisesRegexp(SyntaxError, 'remove on non-list type',
self._model_filter, 'nodes', query)
def test_union_of_null(self):
query = '("node" in name) and (count(union(facts.array_fact, 3)) = 1)'
result = self._model_filter('nodes', query)
self.app.logger.debug('result: %s' % result)
# node 2
self.assertEquals(len(result), 1)
self.assertTrue(result[0]['name'] == 'node2')
def test_identifier_interpolation(self):
query = 'facts.{name} = true'
result = self._model_filter('nodes', query)
self.app.logger.debug('result: %s' % result)
self.assertEquals(len(result), 1)
self.assertTrue(result[0]['name'] == 'node1')
def test_string_interpolation(self):
query = 'facts.selfref = "{name}"'
result = self._model_filter('nodes', query)
self.app.logger.debug('result: %s' % result)
self.assertEquals(len(result), 1)
self.assertTrue(result[0]['name'] == 'node1')
def test_childof(self):
query = 'childof("container")'
result = self._model_filter('nodes', query)
self.assertEqual(len(result), 2)
query = 'childof("badname")'
result = self._model_filter('nodes', query)
self.assertEqual(len(result), 0)
self.node3 = self._model_create('nodes', name='node3')
self.node4 = self._model_create('nodes', name='node4')
self._model_create('facts', node_id=self.node3['id'],
key='parent_id', value=self.node4['id'])
self._model_create('facts', node_id=self.node4['id'],
key='parent_id', value=self.node3['id'])
query = 'childof("node2")'
result = self._model_filter('nodes', query)
#this tests for (gets stuck) infinite loop
self.assertEqual(len(result), 0)
def test_printf(self):
query = 'facts.str_fact = printf("azby%s", "cxdw")'
result = self._model_filter('nodes', query)
self.assertEquals(len(result), 1)
self.assertTrue(result[0]['name'] == 'node1')
# fix this by db abstraction...
# def test_017_relations(self):
# # this should actually work....
# result = self._model_filter('node', 'parent.name="%s"' %
# self.cluster['name'])
# self.app.logger.debug('result: %s' % result)
# self.assertEquals(len(result), len(self.nodes))
|
rcbops/opencenter
|
tests/test_ast.py
|
Python
|
apache-2.0
| 13,977
|
import re
################################################################################
# Human Command Protocol
################################################################################
class HumanCommandProtocol:
def __init__(self):
self.commands = (('^help', self.help),)
def help(self, *args):
"""
Returns help text, list of lines..
"""
return ['describe controls']
def parse(self, text):
"""
Parse text and execute appropriate command.
"""
for (pattern, handler) in self.commands:
match = re.search(pattern, text)
if match:
return handler(text)
return ('Unknown command',)
|
michaeldove/abode
|
chat/protocol.py
|
Python
|
mit
| 735
|
import pickle
import random
import os
import numpy as np
'''
Note to Lukas:
Wie in der Mail beschrieben, würde wir hier diese "old" if sache
weg lassen, weil sie im normalen Preprocessing gemacht worden wäre
'''
old = True
# get absolute path of this file
file_path = ('/'.join(os.path.realpath(__file__).split("/")[:-2]))
#################### Get Pickle Data
def get_data(filename):
print('------>[*] Loading Pickle Data...')
try:
with open(filename, "rb") as f:
data = pickle.load(f)
print('------>[+] Pickle Data Loaded!')
except:
print('------>[-] Pickle Data Load failed!')
return data
#################### CelebA Class
'''
Purpose: Creates an Object, which is able to supply batches
'''
class CelebA():
def __init__(self, filename= file_path + "/data/pickle/celeba_pickle.dat"):
# Save Whole Data Set in "whole_Data"
self.filename = filename
# Take random sample out of "current", until it is to small to take a batch
# and then fill it up using "whole_Data"with
self.current = get_data(self.filename)
def get_batch(self, batch_size):
#In case we do not have enough samples in our "current" data
if len(self.current) < batch_size:
#missing equals the amount those samples, which we a missing
missing = batch_size-len(self.current)
#take all samples that are left
batch = self.current
#shuffle "whole_Data" and assign it to the "current" data
self.current = get_data(self.filename)
random.shuffle(self.current)
#take the missing samples:
batch += self.current[:missing]
#delete those you took out from "current"
self.current = self.current[missing:]
#In case our "current" data is big enough
else:
#take your batch from "current"
batch = self.current[:batch_size]
#delete those you took out from "current"
self.current = self.current[batch_size:]
# temporary
if old:
for i in range(len(batch)):
batch[i] = np.expand_dims(batch[i],-1)
batch[i] = (batch[i]/255.0 - 0.5) * 2.0
return batch
|
D3vvy/iannwtf_DCGAN
|
functions/celeba_input.py
|
Python
|
gpl-3.0
| 2,015
|
#!/usr/bin/python
import sys, rospy, tf, actionlib
from geometry_msgs.msg import *
from tf.transformations import quaternion_from_euler
if __name__ == '__main__':
rospy.init_node('initial_localization')
pub = rospy.Publisher('initialpose', PoseWithCovarianceStamped, queue_size=1)
p = PoseWithCovarianceStamped()
p.header.frame_id = "map"
p.pose.pose.orientation = Quaternion(*quaternion_from_euler(0, 0, 0))
p.pose.covariance = \
[ 0.1 , 0, 0, 0, 0, 0,
0 , 0.1 , 0, 0, 0, 0,
0 , 0 , 0, 0, 0, 0,
0 , 0 , 0, 0, 0, 0,
0 , 0 , 0, 0, 0, 0,
0 , 0 , 0, 0, 0, 0.1 ]
for t in range(0,5):
rospy.sleep(1)
pub.publish(p)
|
mkhuthir/catkin_ws
|
src/stockroom_bot/src/initial_localization.py
|
Python
|
gpl-3.0
| 688
|
from itertools import permutations
import pytest
import networkx as nx
from networkx.testing import almost_equal
class TestNeighborConnectivity(object):
def test_degree_p4(self):
G = nx.path_graph(4)
answer = {1: 2.0, 2: 1.5}
nd = nx.average_degree_connectivity(G)
assert nd == answer
D = G.to_directed()
answer = {2: 2.0, 4: 1.5}
nd = nx.average_degree_connectivity(D)
assert nd == answer
answer = {1: 2.0, 2: 1.5}
D = G.to_directed()
nd = nx.average_degree_connectivity(D, source='in', target='in')
assert nd == answer
D = G.to_directed()
nd = nx.average_degree_connectivity(D, source='in', target='in')
assert nd == answer
def test_degree_p4_weighted(self):
G = nx.path_graph(4)
G[1][2]['weight'] = 4
answer = {1: 2.0, 2: 1.8}
nd = nx.average_degree_connectivity(G, weight='weight')
assert nd == answer
answer = {1: 2.0, 2: 1.5}
nd = nx.average_degree_connectivity(G)
assert nd == answer
D = G.to_directed()
answer = {2: 2.0, 4: 1.8}
nd = nx.average_degree_connectivity(D, weight='weight')
assert nd == answer
answer = {1: 2.0, 2: 1.8}
D = G.to_directed()
nd = nx.average_degree_connectivity(D, weight='weight', source='in',
target='in')
assert nd == answer
D = G.to_directed()
nd = nx.average_degree_connectivity(D, source='in', target='out',
weight='weight')
assert nd == answer
def test_weight_keyword(self):
G = nx.path_graph(4)
G[1][2]['other'] = 4
answer = {1: 2.0, 2: 1.8}
nd = nx.average_degree_connectivity(G, weight='other')
assert nd == answer
answer = {1: 2.0, 2: 1.5}
nd = nx.average_degree_connectivity(G, weight=None)
assert nd == answer
D = G.to_directed()
answer = {2: 2.0, 4: 1.8}
nd = nx.average_degree_connectivity(D, weight='other')
assert nd == answer
answer = {1: 2.0, 2: 1.8}
D = G.to_directed()
nd = nx.average_degree_connectivity(D, weight='other', source='in',
target='in')
assert nd == answer
D = G.to_directed()
nd = nx.average_degree_connectivity(D, weight='other', source='in',
target='in')
assert nd == answer
def test_degree_barrat(self):
G = nx.star_graph(5)
G.add_edges_from([(5, 6), (5, 7), (5, 8), (5, 9)])
G[0][5]['weight'] = 5
nd = nx.average_degree_connectivity(G)[5]
assert nd == 1.8
nd = nx.average_degree_connectivity(G, weight='weight')[5]
assert almost_equal(nd, 3.222222, places=5)
nd = nx.k_nearest_neighbors(G, weight='weight')[5]
assert almost_equal(nd, 3.222222, places=5)
def test_zero_deg(self):
G = nx.DiGraph()
G.add_edge(1, 2)
G.add_edge(1, 3)
G.add_edge(1, 4)
c = nx.average_degree_connectivity(G)
assert c == {1: 0, 3: 1}
c = nx.average_degree_connectivity(G, source='in', target='in')
assert c == {0: 0, 1: 0}
c = nx.average_degree_connectivity(G, source='in', target='out')
assert c == {0: 0, 1: 3}
c = nx.average_degree_connectivity(G, source='in', target='in+out')
assert c == {0: 0, 1: 3}
c = nx.average_degree_connectivity(G, source='out', target='out')
assert c == {0: 0, 3: 0}
c = nx.average_degree_connectivity(G, source='out', target='in')
assert c == {0: 0, 3: 1}
c = nx.average_degree_connectivity(G, source='out', target='in+out')
assert c == {0: 0, 3: 1}
def test_in_out_weight(self):
G = nx.DiGraph()
G.add_edge(1, 2, weight=1)
G.add_edge(1, 3, weight=1)
G.add_edge(3, 1, weight=1)
for s, t in permutations(['in', 'out', 'in+out'], 2):
c = nx.average_degree_connectivity(G, source=s, target=t)
cw = nx.average_degree_connectivity(G, source=s, target=t,
weight='weight')
assert c == cw
def test_invalid_source(self):
with pytest.raises(ValueError):
G = nx.DiGraph()
nx.average_degree_connectivity(G, source='bogus')
def test_invalid_target(self):
with pytest.raises(ValueError):
G = nx.DiGraph()
nx.average_degree_connectivity(G, target='bogus')
def test_single_node(self):
# TODO Is this really the intended behavior for providing a
# single node as the argument `nodes`? Shouldn't the function
# just return the connectivity value itself?
G = nx.trivial_graph()
conn = nx.average_degree_connectivity(G, nodes=0)
assert conn == {0: 0}
|
sserrot/champion_relationships
|
venv/Lib/site-packages/networkx/algorithms/assortativity/tests/test_connectivity.py
|
Python
|
mit
| 5,020
|
import DeepFried2 as df
import scipy.io
import numpy as np
def model_head(fully_conv=True):
if fully_conv:
return [
df.SpatialConvolutionCUDNN( 512, 4096, 7, 7, border='valid'), df.ReLU(),
df.Dropout(0.5),
df.SpatialConvolutionCUDNN(4096, 4096, 1, 1, border='valid'), df.ReLU(),
df.Dropout(0.5),
df.SpatialConvolutionCUDNN(4096, 1000, 1, 1, border='valid'),
df.SpatialSoftMaxCUDNN(),
]
else:
return [
df.Reshape(-1, 512*7*7),
df.Linear(512*7*7, 4096), df.ReLU(),
df.Dropout(0.5),
df.Linear(4096, 4096), df.ReLU(),
df.Dropout(0.5),
df.Linear(4096, 1000),
df.SoftMax()
]
def params(large=True, fully_conv=True, fname=None):
# Thanks a lot to @317070 (Jonas Degrave) for this!
if large:
fname = fname or df.zoo.download('http://www.vlfeat.org/matconvnet/models/imagenet-vgg-verydeep-19.mat', saveas='vgg19-imagenet.mat', desc='vgg19-imagenet.mat')
layers = [0,2,5,7,10,12,14,16,19,21,23,25,28,30,32,34,37,39,41]
else:
fname = fname or df.zoo.download('http://www.vlfeat.org/matconvnet/models/imagenet-vgg-verydeep-16.mat', saveas='vgg16-imagenet.mat', desc='vgg16-imagenet.mat')
layers = [0,2,5,7,10,12,14,17,19,21,24,26,28,31,33,35]
params = []
mat = scipy.io.loadmat(fname)
for l in layers:
W = mat['layers'][0,l][0,0][0][0,0]
W = W.transpose(3,2,0,1)
b = mat['layers'][0,l][0,0][0][0,1]
b = b.squeeze()
params += [W, b]
# For the "classic" case of fully-connected layers as GEMM, we need to
# reshape the parameters into the matrices they are.
if not fully_conv:
params[-6] = params[-6].reshape(4096, -1).T
params[-4] = params[-4].squeeze().T
params[-2] = params[-2].squeeze().T
# The mean is actually a single scalar per color channel.
mean = mat['normalization'][0,0][0] # This is H,W,C
mean = np.mean(mean, axis=(0,1))
classes = np.array([cls[0] for cls in mat['classes'][0,0][0][0,:]])
return params, mean, classes
|
elPistolero/DeepFried2
|
DeepFried2/zoo/vgg.py
|
Python
|
mit
| 2,179
|
__author__ = 'Allison MacLeay'
"""
Issues
---------
GD
Linear - My MSE's are unbelievably good but predicte value
look good too
Logisitc - Either my classifier function is completely wrong
or W is wrong. I assume I could use the linear reg function
but results are mostly less than .1 rather than .5 as
expected
"""
import CS6140_A_MacLeay.utils as utils
import CS6140_A_MacLeay.utils.Tree as mytree
import CS6140_A_MacLeay.utils.Stats as mystats
import CS6140_A_MacLeay.utils.GradientDescent as gd
import CS6140_A_MacLeay.utils.NNet as nnet
import CS6140_A_MacLeay.utils.Perceptron as perc
#import CS6140_A_MacLeay.Homeworks.HW4 as treeHW4
import HW4 as treeHW4
import hw1
import numpy as np
import pandas as pd
import sys
class Model_w():
def __init__(self, w_array=None):
self.w = w_array
def update(self, w_array):
if self.w == None:
self.w = w_array
else:
if len(w_array) != len(self.w):
print "ERROR!!! Lengths are not the same"
# Take the average
sum = 0
for i in range(len(w_array)):
self.w[i] = float(w_array[i] + self.w[i])/2
def dec_or_reg_tree(df_train, df_test, Y):
binary = utils.check_binary(df_train[Y])
if binary:
newtree = treeHW4.TreeOptimal(max_depth=1)
y = list(df_train[Y])
nondf_train = utils.pandas_to_data(df_train)
nondf_test = utils.pandas_to_data(df_test)
newtree.fit(nondf_train, y)
predict = newtree.predict(nondf_train)
error_train = mystats.get_error(predict, y, binary)
y = utils.pandas_to_data(df_test[Y])
predict = newtree.predict(nondf_test)
error_test = mystats.get_error(predict, y)
else:
node = mytree.Node(np.ones(len(df_train)))
hw1.branch_node(node, df_train, 5, Y)
model = mytree.Tree(node)
predict = model.predict_obj()
error_train = mystats.get_error(predict, df_train[Y], binary)
node.presence = np.ones(len(df_test))
hw1.test_node(node, df_test, Y)
test_tree = mytree.Tree(node)
predict = test_tree.predict_obj()
error_test = mystats.get_error(predict, df_test[Y], binary)
return [error_train, error_test]
def linear_reg_errors(df_train, df_test, Y, ridge=False, sigmoid=False):
binary = utils.check_binary(df_train[Y])
error_train = linear_reg(df_train, Y, binary, ridge, sigmoid)
error_test = linear_reg(df_test, Y, binary, ridge, sigmoid)
return [error_train, error_test]
def linear_reg(df, Y, binary=False, ridge=False, sigmoid=False):
means = []
columns = [col for col in df.columns if (col != 'is_spam' and col != 'MEDV' and col != 'y')]
if ridge:
w = mystats.get_linridge_w(df[columns], df[Y], binary)
else:
for col in df.columns:
mean = df[col].mean()
means.append(mean)
df[col] -= mean
w = mystats.get_linreg_w(df[columns], df[Y])
print('w:')
print(w)
predict = mystats.predict(df[columns], w, binary, means=means)
error = mystats.get_error(predict, df[Y], binary)
return error
def k_folds_linear_gd(df_test, df_train, Y):
k = 10
df_test = gd.pandas_to_data(df_test)
k_folds = partition_folds(df_test, k)
model = Model_w()
theta = None
for ki in range(k - 1):
print 'k fold is {}'.format(k)
data, truth = get_data_and_truth(k_folds[ki])
binary = True
model.update(gd.gradient(data, np.array(truth), .00001, max_iterations=5, binary=binary))
print model.w
if theta is None:
theta, max_acc = get_best_theta(data, truth, model.w, binary, False)
predict = gd.predict_data(data, model.w, binary, False, theta)
error = mystats.get_error(predict, truth, binary)
print 'Error for fold {} is {} with theta = {}'.format(k, error, theta)
test, truth = get_data_and_truth(k_folds[k-1])
predict = gd.predict_data(test, model.w, binary, False, theta)
test_error = mystats.get_error(predict, truth, binary)
return [error, test_error]
def get_best_theta(data, truth, model, binary, logistic):
best_theta = None
max_acc = 0
modmin = min(model)
modmax = max(model)
for theta_i in range(100):
theta = modmin + float(theta_i)/(modmax - modmin)
predict = gd.predict_data(data, model, binary, False, theta)
acc = mystats.get_error(predict, truth, binary)
if best_theta is None:
best_theta = theta
max_acc = acc
elif acc > max_acc:
best_theta = theta
max_acc = acc
return best_theta, max_acc
def linear_gd_error(df, Y):
binary = utils.check_binary(df[Y])
model = gd.gradient(df, df[Y], .00001, max_iterations=50)
print model
predict = gd.predict(df, model, binary)
print predict
error = mystats.get_error(predict, df_train[Y], binary)
return error
def linear_gd(df_train, df_test, Y):
""" linear gradient descent """
binary = utils.check_binary(df_train[Y])
model = gd.gradient(df_train, df_train[Y], .00001, max_iterations=50)
print model
predict = gd.predict(df_train, model, binary)
print predict
error_train = mystats.get_error(predict, df_train[Y], binary)
predict = gd.predict(df_test, model, binary)
print predict
error_test = mystats.get_error(predict, df_test[Y], binary)
return [error_train, error_test]
def logistic_gd(df_train, df_test, Y):
""" logistic gradient descent """
binary = utils.check_binary(df_train[Y])
model = gd.logistic_gradient(df_train, df_train[Y], .1, max_iterations=5)
print model
predict = gd.predict(df_train, model, binary, True)
print predict
error_train = mystats.get_error(predict, df_train[Y], binary)
predict = gd.predict(df_test, model, binary, True)
print predict
error_test = mystats.get_error(predict, df_test[Y], binary)
return [error_train, error_test]
def print_results_1(spam, housing):
j = 0
line = ' '
fields = ['Dec or Reg', 'Linear Reg', 'Linear Ridge', 'Linear Grad', 'Log Grad']
for i in fields:
line += ' {} |'.format(i)
print line
line = ' Spam ACC train '
for i in spam:
line += ' {} |'.format(i[0])
print line
line = ' ACC test '
for i in spam:
line += ' {} |'.format(i[1])
print line
line = ' Housing MSE train '
for i in housing:
line += ' {} |'.format(i[0])
print line
line = ' MSE test '
for i in housing:
line += ' {} |'.format(i[1])
print line
def q_1():
h_test, h_train = utils.load_and_normalize_housing_set()
h_results = []
s_results = []
#h_results.append(dec_or_reg_tree(h_train, h_test, 'MEDV')) # MSE - 568 test- 448
#h_results.append(linear_reg_errors(h_train, h_test, 'MEDV')) # MSE - 27 test -14
#h_results.append(linear_reg_errors(h_train, h_test, 'MEDV', True)) # 24176 - 68289
#h_results.append(linear_gd(h_train, h_test, 'MEDV')) # works but MSE too low? .0022 - .0013
#h_results.append(logistic_gd(h_train, h_test, 'MEDV')) # 1.46e_13 - 1.17e+13
s_test, s_train = utils.split_test_and_train(utils.load_and_normalize_spam_data())
s_results.append(dec_or_reg_tree(s_train, s_test, 'is_spam')) # works .845 - .86
s_results.append(linear_reg_errors(s_train, s_test, 'is_spam')) # works .8609 - .903
s_results.append(linear_reg_errors(s_train, s_test, 'is_spam', True)) # works .8416 - .8543
s_results.append(k_folds_linear_gd(s_train, s_test, 'is_spam')) # does not work .6114 - .6114
s_results.append(logistic_gd(s_train, s_test, 'is_spam')) # returns perfect... 1- 1
print_results_1(s_results, h_results)
def q_2():
""" Perceptron """
test, train = utils.load_perceptron_data()
print test[4]
print train.head(5)
model = perc.Perceptron(train, 4, .05, 100)
def q_3():
print 'Run Neural Network for question 3 in homework 2'
nnet.run()
def homework2():
#q_1()
q_2()
#q_3()
def partition_folds(data, k):
if len(data) > k:
array = [[] for _ in range(k)]
else:
array = [[] for _ in range(len(data))]
for i in range(len(data)):
array[i % 10].append(data[i])
return array
def get_data_and_truth(data):
print data
x = []
truth = []
for r in range(len(data)):
row = data[r]
x.append(row[:-1])
truth.append(row[-1])
return x, truth
if __name__ =='__main__':
homework2()
|
alliemacleay/MachineLearning_CS6140
|
Homeworks/hw2_new.py
|
Python
|
mit
| 8,649
|
#
# Copyright 2014-2017 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from __future__ import absolute_import
import errno
import io
import logging
import os
import select
import time
from . import constants
from . common import errors
from . common import osutils
from . config import config
from . common.time import monotonic_time
SUDO_NON_INTERACTIVE_FLAG = "-n"
# receive() source names
OUT = "out"
ERR = "err"
log = logging.getLogger("procutils")
class Error(errors.Base):
msg = ("Command {self.cmd} failed with rc={self.rc} out={self.out!r} "
"err={self.err!r}")
def __init__(self, cmd, rc, out, err):
self.cmd = cmd
self.rc = rc
self.out = out
self.err = err
class TimeoutExpired(errors.Base):
msg = "Timeout waiting for process pid={self.pid}"
def __init__(self, pid):
self.pid = pid
def nice(cmd, nice):
command = [constants.EXT_NICE, '-n', str(nice)]
command.extend(cmd)
return command
def ionice(cmd, ioclass, ioclassdata=None):
command = [constants.EXT_IONICE, '-c', str(ioclass)]
if ioclassdata is not None:
command.extend(('-n', str(ioclassdata)))
command.extend(cmd)
return command
def setsid(cmd):
command = [constants.EXT_SETSID]
command.extend(cmd)
return command
def sudo(cmd):
if os.geteuid() == 0:
return cmd
command = [constants.EXT_SUDO, SUDO_NON_INTERACTIVE_FLAG]
command.extend(cmd)
return command
def taskset(cmd, cpu_list):
command = [constants.EXT_TASKSET, "--cpu-list", ",".join(cpu_list)]
command.extend(cmd)
return command
_ANY_CPU = ["0-%d" % (os.sysconf('SC_NPROCESSORS_CONF') - 1)]
_USING_CPU_AFFINITY = config.get('vars', 'cpu_affinity') != ""
def wrap_command(command, with_ioclass=None, ioclassdata=None,
with_nice=None, with_setsid=False, with_sudo=False,
reset_cpu_affinity=True):
if with_ioclass is not None:
command = ionice(command, ioclass=with_ioclass,
ioclassdata=ioclassdata)
if with_nice is not None:
command = nice(command, nice=with_nice)
if with_setsid:
command = setsid(command)
if with_sudo:
command = sudo(command)
# warning: the order of commands matters. If we add taskset
# after sudo, we'll need to configure sudoers to allow both
# 'sudo <command>' and 'sudo taskset <command>', which is
# impractical. On the other hand, using 'taskset sudo <command>'
# is much simpler and delivers the same end result.
if reset_cpu_affinity and _USING_CPU_AFFINITY:
# only VDSM itself should be bound
command = taskset(command, _ANY_CPU)
return command
def receive(p, timeout=None, bufsize=io.DEFAULT_BUFFER_SIZE):
"""
Receive data from a process, yielding data read from stdout and stderr
until proccess terminates or timeout expires.
Unlike Popen.communicate(), this supports a timeout, and allows
reading both stdout and stderr with a single thread.
Example usage::
# Reading data from both stdout and stderr until process
# terminates:
for src, data in cmdutils.receive(p):
if src == cmdutils.OUT:
# handle output
elif src == cmdutils.ERR:
# handler errors
# Receiving data with a timeout:
try:
received = list(cmdutils.receive(p, timeout=10))
except cmdutils.TimeoutExpired:
# handle timeout
Arguments:
p (`subprocess.Popen`): A subprocess created with
subprocess.Popen or cpopen.CPopen.
timeout (float): Number of seconds to wait for process. Timeout
resolution is limited by the resolution of
`common.time.monotonic_time`, typically 10 milliseconds.
bufsize (int): Number of bytes to read from the process in each
iteration.
Returns:
Generator of tuples (SRC, bytes). SRC may be either
`cmdutils.OUT` or `cmdutils.ERR`, and bytes is a bytes object
read from process stdout or stderr.
Raises:
`cmdutils.TimeoutExpired` if process did not terminate within
the specified timeout.
"""
if timeout is not None:
deadline = monotonic_time() + timeout
remaining = timeout
else:
deadline = None
remaining = None
fds = {}
if p.stdout:
fds[p.stdout.fileno()] = OUT
if p.stderr:
fds[p.stderr.fileno()] = ERR
if fds:
poller = select.poll()
for fd in fds:
poller.register(fd, select.POLLIN)
def discard(fd):
if fd in fds:
del fds[fd]
poller.unregister(fd)
while fds:
log.debug("Waiting for process (pid=%d, remaining=%s)",
p.pid, remaining)
# Unlike all other time apis, poll is using milliseconds
remaining_msec = remaining * 1000 if deadline else None
try:
ready = poller.poll(remaining_msec)
except select.error as e:
if e[0] != errno.EINTR:
raise
log.debug("Polling process (pid=%d) interrupted", p.pid)
else:
for fd, mode in ready:
if mode & select.POLLIN:
data = osutils.uninterruptible(os.read, fd, bufsize)
if not data:
log.debug("Fd %d closed, unregistering", fd)
discard(fd)
continue
yield fds[fd], data
else:
log.debug("Fd %d hangup/error, unregistering", fd)
discard(fd)
if deadline:
remaining = deadline - monotonic_time()
if remaining <= 0:
raise TimeoutExpired(p.pid)
_wait(p, deadline)
def _wait(p, deadline=None):
"""
Wait until process terminates, or if deadline is specified,
`common.time.monotonic_time` exceeds deadline.
Raises:
`cmdutils.TimeoutExpired` if process did not terminate within
deadline.
"""
log.debug("Waiting for process (pid=%d)", p.pid)
if deadline is None:
p.wait()
else:
# We need to wait until deadline, Popen.wait() does not support
# timeout. Python 3 is using busy wait in this case with a timeout of
# 0.0005 seocnds. In vdsm we cannot allow such busy loops, and we don't
# have a need to support very exact wait time. This loop uses
# exponential backoff to detect termination quickly if the process
# terminates quickly, and avoid busy loop if the process is stuck for
# long time. Timeout will double from 0.0078125 to 1.0, and then
# continue at 1.0 seconds, until deadline is reached.
timeout = 1.0 / 256
while p.poll() is None:
remaining = deadline - monotonic_time()
if remaining <= 0:
raise TimeoutExpired(p.pid)
time.sleep(min(timeout, remaining))
if timeout < 1.0:
timeout *= 2
log.debug("Process (pid=%d) terminated", p.pid)
|
EdDev/vdsm
|
lib/vdsm/cmdutils.py
|
Python
|
gpl-2.0
| 7,970
|
from setuptools import setup
setup(
name = 'NFL_Draftkings',
packages = ['NFL_Draftkings'],
version = '2',
description = 'Python wrapper around NFL api that calculates Draftkings scores for players',
author = 'Kacper Adach',
author_email = 'kacperadach@gmail.com',
url = 'https://github.com/kacperadach/NFL_Draftkings',
download_url = 'https://github.com/kacperadach/NFL_Draftkings/tarball/2',
keywords = ['nfl', 'draftkings', 'python', 'api', 'wrapper', 'player', 'scores', 'points'],
classifiers = [],
)
|
kacperadach/draftkings_points_scripts
|
setup.py
|
Python
|
mit
| 526
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.signup, name='signup'),
]
|
jnyborg/nospammail
|
backend/login/urls.py
|
Python
|
apache-2.0
| 118
|
from hachoir_parser.video.asf import AsfFile
from hachoir_parser.video.flv import FlvFile
from hachoir_parser.video.mov import MovFile
from hachoir_parser.video.mpeg_video import MPEGVideoFile
from hachoir_parser.video.mpeg_ts import MPEG_TS
from hachoir_parser.video.avchd import AVCHDINDX, AVCHDMOBJ, AVCHDMPLS, AVCHDCLPI
|
kreatorkodi/repository.torrentbr
|
plugin.video.yatp/site-packages/hachoir_parser/video/__init__.py
|
Python
|
gpl-2.0
| 324
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.