code
stringlengths 1
199k
|
|---|
from debug_toolbar.panels.templates import TemplatesPanel as BaseTemplatesPanel
class TemplatesPanel(BaseTemplatesPanel):
def generate_stats(self, *args):
template = self.templates[0]['template']
if not hasattr(template, 'engine') and hasattr(template, 'backend'):
template.engine = template.backend
return super().generate_stats(*args)
|
from django.shortcuts import render, get_object_or_404
from vehicles.context_processor import global_context_processor
from vehicles.models import Vehicle, VehicleMake, Category
from settings.models import SliderImage
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from dynamic_preferences.registries import global_preferences_registry
def home_page(request):
# instanciate a manager for global preferences
global_preferences = global_preferences_registry.manager()
MAX_VEHICLES_TO_SHOW = global_preferences['homepage__number_of_vehicles']
MAX_CATEGORIES_TO_SHOW = 4
# get list of slider objects
sliders = SliderImage.objects.all()
# get categories to show on homepage
top_categories = Category.objects.get_home_page_categories()
if top_categories:
top_categories = top_categories[:MAX_CATEGORIES_TO_SHOW]
# get recently added vehicles
top_vehicles = Vehicle.objects.all().order_by(
'-timestamp').prefetch_related('images')
if top_vehicles:
top_vehicles = top_vehicles[:MAX_VEHICLES_TO_SHOW]
context = global_context_processor(locals())
return render(request, "home_page.html", context)
def exports_page(request):
context = global_context_processor(locals())
return render(request, "exports_page.html", context)
def how_to_buy(request):
context = global_context_processor(locals())
return render(request, "how_to_buy.html", context)
def category_page(request, slug):
# check if make slug parameter is passed into the url
vehicle_make_slug = request.GET.get('make', None)
# get category by slug
category = Category.objects.get_category_by_slug(slug)
# get all the vehicles by the category and make (if provided)
if vehicle_make_slug:
# get make by slug
make = VehicleMake.objects.get_make_by_slug(vehicle_make_slug)
if category:
vehicles_list = Vehicle.objects.get_vehicles_by_category_and_make(
category, make
).prefetch_related('images')
else:
vehicles_list = Vehicle.objects.get_vehicles_by_make(
make
).prefetch_related('images')
else:
# if category is not found then get all of the vehicles
if category:
vehicles_list = Vehicle.objects.get_vehicles_by_category(
category
).prefetch_related('images')
else:
vehicles_list = Vehicle.objects.all().prefetch_related('images')
# paginate vehicle list for 10 items per page
paginator = Paginator(vehicles_list, 16)
try:
page = int(request.GET.get("page", '1'))
except ValueError:
page = 1
try:
vehicles = paginator.page(page)
except (InvalidPage, EmptyPage):
vehicles = paginator.page(paginator.num_pages)
makes = get_makes_in_category(category)
context = global_context_processor(locals())
return render(request, "categories_page.html", context)
def vehicle_detail_page(request, category_slug, vehicle_id, vehicle_slug):
# get vehicle details by vehicle_id
vehicle = get_object_or_404(Vehicle, id=vehicle_id)
related_vehicles = Vehicle.objects.get_vehicles_by_category(
vehicle.category)
return render(request, "detail_page.html", global_context_processor(locals()))
def get_makes_in_category(category):
makes_in_category = []
# get all the vehicle objects by category
vehicles_in_category = Vehicle.objects.get_vehicles_by_category(
category=category)
for vehicle in vehicles_in_category:
makes_in_category.append(vehicle.make)
# remove duplicate makes from the list
makes_in_category = list(set(makes_in_category))
makes_in_category = sorted(makes_in_category, key=lambda x: x.v_make)
return makes_in_category
|
import sys
import zlib, base64
_g = ("Ah+LCAAAAAAABACT7+ZgAAEWhre3/LNvG0iwP1i/yPTlUbXVqdvzlJoi+a3Lj8v6RJl1JZacmaK7/Otuf07ZXEnrN/zZZ+cdV4iexrfrz59Tftsevr0tcO7wz0oLK678"
+ "PLvaHVX/Lff8K6otFRbb/W/369X9D7+oMAiXlZWJlbEzGIQaM4yCUTAKRsEoGPzgnzcjw4w9ejJ35HS6A8KTT0zfPp3dVXBWrHr2qoXeofNfZVm8eZ31+0g2a93585ut"
+ "w3JN9984E/ele8axTZZS1/4XxB6I/8bdWrVmWqrMqqVnDpeUFEb23t0kFaTV171P99WmM7e/nr75LancfFrm1OPBq7oXnf9bc4u/fb3/3oIH/XuqLEPeHm7aK7k69NbU"
+ "j1ON+IS38DrntEX0b9Q9bSi3fJNHZfS+7LDknKDAKz+17ksmzxX7nszEf/ni27IX/L83eufKdO3eW73qcUGUSaGGf9fjO+ecNvY8rjv2ff2Hw4HBfJrnv1rKzVuvl26p"
+ "vrMvWfi4740pH/MS7p499OejfabZ97vdb3Nqb4b/3CLxyEjzg4Hnz617Yp9s/1T2f3VU6Pf2nZ5/lcKOCtzecu+YOz+jZzvnrad7/hg+31n1vtguPv/Tkp0Vh4u/824s"
+ "fMX7Q1acAQDKcaipZwcAAA==")
g = base64.b64decode(_g)[1:]
for i in range(ord(base64.b64decode(_g)[0])):
g = zlib.decompress(g, 16+zlib.MAX_WBITS)
g=list(map(ord, g))
def gr(x,y):
if(x>=0 and y>=0 and x<1000 and y<1515):
return g[y*1000 + x];
return 0;
def gw(x,y,v):
if(x>=0 and y>=0 and x<1000 and y<1515):
g[y*1000 + x]=v;
def td(a,b):
return ((0)if(b==0)else(a//b))
def tm(a,b):
return ((0)if(b==0)else(a%b))
s=[]
def sp():
global s
if (len(s) == 0):
return 0
return s.pop()
def sa(v):
global s
s.append(v)
def sr():
global s
if (len(s) == 0):
return 0
return s[-1]
def _0():
gw(2,0,1000)
gw(3,0,1500000)
sa(gr(3,0)-1)
sa(gr(3,0))
gw(tm(gr(3,0),gr(2,0)),(td(gr(3,0),gr(2,0)))+3,0)
return 1
def _1():
return (2)if(sp()!=0)else(3)
def _2():
sa(sr());
sa(0)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(tm(sr(),gr(2,0)))
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(td(sp(),gr(2,0)))
sa(sp()+3)
v0=sp()
v1=sp()
gw(v1,v0,sp())
sa(sr()-1)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
return 1
def _3():
gw(6,0,0)
gw(8,0,1)
sp();
return 4
def _4():
return (21)if(((gr(8,0)*gr(8,0)*4)+(gr(8,0)*6)+2)>gr(3,0))else(5)
def _5():
sa((gr(8,0)+1)*(gr(8,0)+1)*2)
sa(gr(8,0)+1)
gw(9,0,gr(8,0)+1)
return 6
def _6():
global t0
sa(sp()*gr(8,0)*2)
sa(sp()+sp());
t0=sp()
t0=(1)if(t0>gr(3,0))else(0)
return (20)if((t0)!=0)else(7)
def _7():
global t0
global t1
global t2
t0=(gr(9,0)*gr(9,0))-(gr(8,0)*gr(8,0))
gw(2,1,(gr(9,0)*gr(9,0))-(gr(8,0)*gr(8,0)))
t1=gr(8,0)*gr(9,0)*2
gw(3,1,gr(8,0)*gr(9,0)*2)
t1=t1+(gr(9,0)*gr(9,0))+(gr(8,0)*gr(8,0))
gw(4,1,(gr(9,0)*gr(9,0))+(gr(8,0)*gr(8,0)))
t2=t0+t1
gw(6,1,t2)
return (19)if(gr(2,1)>gr(3,1))else(8)
def _8():
sa(1)
sa((1)if(gr(6,1)>gr(3,0))else(0))
return 9
def _9():
return (18)if(sp()!=0)else(10)
def _10():
gw(8,1,sr()*((((gr(2,1)*7)+gr(3,1))*5)+gr(4,1)))
sa(sr()*gr(6,1))
sa(tm(sr(),gr(2,0)))
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(td(sp(),gr(2,0)))
sa(sp()+3)
v0=sp()
sa(gr(sp(),v0))
sa(sr());
return (13)if(sp()!=0)else(11)
def _11():
sp();
sa(sr()*gr(6,1))
sa(gr(8,1))
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(tm(sr(),gr(2,0)))
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(td(sp(),gr(2,0)))
sa(sp()+3)
v0=sp()
v1=sp()
gw(v1,v0,sp())
gw(6,0,gr(6,0)+1)
return 12
def _12():
sa(sp()+1)
sa((1)if((sr()*gr(6,1))>gr(3,0))else(0))
return 9
def _13():
return (17)if((sr()-gr(8,1))!=0)else(14)
def _14():
sp();
sa(1)
return 15
def _15():
return (12)if(sp()!=0)else(16)
def _16():
sa(sr()*gr(6,1))
sa(-1)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(tm(sr(),gr(2,0)))
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(td(sp(),gr(2,0)))
sa(sp()+3)
v0=sp()
v1=sp()
gw(v1,v0,sp())
gw(6,0,gr(6,0)-1)
return 12
def _17():
sa((1)if(sp()<0)else(0))
return 15
def _18():
sp();
sa((gr(9,0)+1)*(gr(9,0)+1)*2)
sa(gr(9,0)+1)
gw(9,0,gr(9,0)+1)
return 6
def _19():
global t0
t0=gr(2,1)
gw(2,1,gr(3,1))
gw(3,1,t0)
return 8
def _20():
gw(8,0,gr(8,0)+1)
return 4
def _21():
sys.stdout.write(str(gr(6,0))+" ")
sys.stdout.flush()
return 22
m=[_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_10,_11,_12,_13,_14,_15,_16,_17,_18,_19,_20,_21]
c=0
while c<22:
c=m[c]()
|
__version__ = '0.8.0'
__author__ = 'Steven Loria'
__license__ = 'MIT'
from webargs.core import Arg, WebargsError, ValidationError, Missing
__all__ = ['Arg', 'WebargsError', 'ValidationError', 'Missing']
|
import numpy
import numpy.linalg
def training(inputs, minvar=0.1):
"""Trains a naive-bayes classifier using inputs
Returns means and variances of the classifiers
"""
return numpy.mean(inputs, axis=0), numpy.maximum(minvar, numpy.var(inputs, axis=0))
def gaussian(input, mu, sigma2):
"""Calculates gaussian value for each input in the array
"""
return (1/ (2*numpy.sqrt(3.14*sigma2))) * \
numpy.exp( - ((input-mu)**2)/(2*sigma2))
def likelihood(inputs, means, variances):
"""Minimum distances between inputs and any reference
Each element should be in a row!
"""
out = numpy.ones(inputs.shape[0])
for j in xrange(inputs.shape[1]):
if variances[j] != 0:
out = out * \
(gaussian (inputs[:,j], means[j], variances[j]))
return out
def naive_bayes(test, train):
"""Implements the whole naive bayes flow.
Returns a likelihood array
"""
m, v = training(train)
return likelihood(test, m, v)
def naive_bayes_multidimensional(test, train):
"""Naive bayes analysis keeping dimensions isolated
"""
m, v = training(train)
out = numpy.ones( (test.shape) )
for i in xrange(test.shape[0]):
for j in xrange(test.shape[1]):
out[i,j] = out[i,j] * \
(gaussian (test[i,j], m[j], v[j]))
return out
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.home),
url(r'^interviewer/$', views.interviewer),
url(r'^candidate/$', views.candidate),
]
|
import pytest
from ezdxf.entities.appid import AppID
@pytest.fixture
def appid():
return AppID.new(
"FFFF",
dxfattribs={
"name": "EZDXF",
},
)
def test_name(appid):
assert appid.dxf.name == "EZDXF"
|
import os
from twilio.rest import Client
account = os.environ['TWILIO_ACCOUNT_SID']
token = os.environ['TWILIO_AUTH_TOKEN']
client = Client(account, token)
role = client.chat \
.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.roles("RLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.fetch()
new_permissions = ['sendMediaMessage'] + (role.permissions or [])
role.update(permission=new_permissions)
print(role.friendly_name)
|
from flask import render_template, flash, redirect
from app import app
from .forms import Deck
@app.route('/submit', methods=('GET', 'POST'))
def submit():
form = Deck()
if form.validate_on_submit():
return redirect('/index')
return render_template('submit.html',
title='Create Card',
form=form)
@app.route('/')
@app.route('/index')
def index():
# This is displayed on client's web browser
user = {'nickname': 'Enrique Iglesias'} #fake user
decks = [
{
'title': 'GRE Words',
'cards': [
{
'word': 'combust',
'definition': 'to catch on fire'
},
{
'word': 'phaze',
'definition': 'to be affected'
}
]
},
{
'title': 'Food words',
'cards': [
{
'word': 'amuse bouche',
'definition': 'little serving'
},
{
'word': 'kimchii',
'definition': 'femented cabbage'
}
]
}
]
return render_template('index.html',
title ='Home',
user=user,
posts=decks)
|
"""
rio.blueprints.api_1
~~~~~~~~~~~~~~~~~~~~~
"""
from flask import Blueprint
bp = Blueprint('api_1', __name__)
|
import math
from ..df import DocumentFrequencyVectorCreator
from . import InverseDocumentFrequencyVector
class InverseDocumentFrequencyVectorCreator(DocumentFrequencyVectorCreator):
"""Creates inverse-document-frequency vectors
Inherits from :class:`recommender.vector.abstractvector.VectorCreator`
:parameter sqlite3_connection: connection to a database build with :class:`recommender.vector.vectortablecreator.VectorTableCreator`
:type sqlite3_connection: sqlite3.Connection
:raises: TypeError
"""
def __init__(self, db_connection_str):
super(InverseDocumentFrequencyVectorCreator, self).__init__(db_connection_str)
self._create_inverse_document_frequency_view()
pass
def _create_vector(self, document_id=None):
vector = InverseDocumentFrequencyVector()
with self._get_db_connection() as conn:
cursor = conn.cursor()
self._create_log_function(conn)
values = self._get_vector_values_from_db(cursor)
for value in [] if values is None else values:
vector.add_to_vector(value)
return vector
def _get_vector_values_from_db(self, c):
c.execute(
'''
SELECT
[term_id]
, [name]
, [value]
FROM
[InverseDocumentFrequency]
;
''')
vector_values = []
for result in c.fetchall():
vector_values.append((result[0], result[1], result[2]))
pass
return None if not vector_values else vector_values
def _create_log_function(self, conn):
conn.create_function('log10', 1, InverseDocumentFrequencyVectorCreator.log_10)
pass
@staticmethod
def log_10(x):
"""simply a method calculating log_10 used by the view in :func:`_create_inverse_document_frequency_view`
"""
base = 10
return math.log(x, base)
def _create_inverse_document_frequency_view(self):
"""Creates a view in the database required for building idf-vectors
"""
with self._get_db_connection() as conn:
self._create_log_function(conn)
c = conn.cursor()
c.execute(
'''
CREATE VIEW IF NOT EXISTS [InverseDocumentFrequency] AS
SELECT
[term_id]
, [name]
, log10
(
CAST ((SELECT [document_count] from [N]) AS REAL) / [df].[value]
)
AS [value]
FROM
[DocumentFrequency] AS [df]
ORDER BY
[term_id]
;
''')
pass
|
from scripts.db_api import accident
def usa_query(hour):
return '''
SELECT count(*), (select count(*) from accident
join vehicle on(acc_id = accident.id)
where country = 'USA'
and vehicle.speed > accident.speed_limit
and vehicle.speed > -1
and accident.speed_limit > 0
and date_part('hour', timestamp) = {0}) as exceeded
from accident
where country = 'USA' and date_part('hour', timestamp) = {0};
'''.format(hour)
def get_value(age, dictionary):
if age not in dictionary:
return 0
return dictionary[age]
if __name__ == '__main__':
print('HOUR\tALL\tEXCEEDED')
for i in xrange(0, 24):
usa_count = accident.execute_query(usa_query(i))
print('{0}\t{1}\t{2}'.format(i, usa_count[0][0], usa_count[0][1]))
|
class TestDevice:
def __init__(self, cf):
self.type = cf.get('device_test_type', 'test')
self.host = ('test', 80)
self.mac = [1, 2, 3, 4, 5, 6]
def auth(self):
pass
# RM2/RM4
def check_temperature(self):
return 23.5
# RM4
def check_humidity(self):
return 56
def enter_learning(self):
pass
def check_data(self):
payload = bytearray(5)
payload[0] = 0xAA
payload[1] = 0xBB
payload[2] = 0xCC
payload[3] = 0xDD
payload[4] = 0xEE
return payload
def send_data(self, data):
pass
def check_sensors(self):
return {'temperature': 23.5, 'humidity': 36, 'light': 'dim', 'air_quality': 'normal', 'noise': 'noisy'}
def check_sensors_raw(self):
return {'temperature': 23.5, 'humidity': 36, 'light': 1, 'air_quality': 3, 'noise': 2}
def get_percentage(self):
return 33
def open(self):
pass
def get_state(self):
return {'pwr': 1, 'pwr1': 1, 'pwr2': 0, 'maxworktime': 60, 'maxworktime1': 60, 'maxworktime2': 0, 'idcbrightness': 50}
def check_power(self):
return {'s1': True, 's2': False, 's3': True, 's4': False}
|
import os
import stat
import socket
import paramiko
from transfert.statresult import stat_result
from transfert.resources._resource import _Resource
from transfert.exceptions import TransfertFileExistsError, TransfertFileNotFoundError
class SftpResource(_Resource):
KNOW_HOSt_FILE = '~/.ssh/known_hosts'
GSS_AUTH = False
GSS_KEX = False
_DEFAULT_PORT = 22
def __init__(self, url):
_Resource.__init__(self, url)
self.__client = None
self._transport = None
self._fd = None
def exists(self):
try:
return self.isfile() or self.isdir()
except FileNotFoundError:
return False
def _get_hostkey(self):
try:
host_keys = paramiko.util.load_host_keys(os.path.expanduser(self.KNOW_HOSt_FILE))
htype = host_keys[self.url.host].keys()[0]
return host_keys[self.url.host][htype]
except (IOError, KeyError):
return None
def _connect(self):
self._transport = paramiko.Transport((self.url.host, self.url.port or self._DEFAULT_PORT))
self._transport.connect(self._get_hostkey(),
self.url.user,
self.url.password,
gss_host=socket.getfqdn(self.url.host),
gss_auth=self.GSS_AUTH,
gss_kex=self.GSS_KEX)
self.__client = paramiko.SFTPClient.from_transport(self._transport)
self.__client.chdir()
def open(self, flags):
self._fd = self._client.open(self.url.path[1:], flags)
@property
def _client(self):
if self.__client is None:
self._connect()
return self.__client
def isfile(self):
try:
with self('r'):
return stat.S_ISREG(self.stat().st_mode)
except IOError:
return False
def isdir(self):
try:
with self('r'):
return stat.S_ISDIR(self.stat().st_mode)
except IOError:
return False
def listdir(self, path=None):
if self.isfile():
yield self
elif self.isdir():
for entry in self._client.listdir(self.url.path[1:] + '/'):
yield self.join(entry)
else:
raise FileNotFoundError(self)
def close(self):
if self._fd:
self._fd.close()
self._fd = None
if self._transport:
self._transport.close()
self._transport = None
if self.__client is not None:
self.__client.close()
self.__client = None
def stat(self):
stat_res = self._client.stat(self.url.path[1:])
return stat_result(
st_atime=stat_res.st_atime,
st_gid=stat_res.st_gid,
st_mode=stat_res.st_mode,
st_mtime=stat_res.st_mtime,
st_size=stat_res.st_size,
st_uid=stat_res.st_uid,
)
def size(self):
return self.stat().st_size
def delete(self):
if self.isfile():
self._client.remove(self.url.path[1:])
elif self.isdir():
self._client.rmdir(self.url.path[1:])
else:
raise TransfertFileNotFoundError(self)
def chmod(self, mode):
self._client.chmod(self.url.path[1:], mode)
def read(self, size):
return iter(lambda: self._fd.read(size), b'')
def write(self, data):
self._fd.write(data)
def mkdir(self, name=None):
# Can be optimized after connection pool setup
if name is None:
if self.isfile():
raise TransfertFileExistsError(self)
elif not self.isdir():
self._client.mkdir(self.url.path[1:])
return self
else:
dire = self.join(name)
if dire.isfile():
raise TransfertFileExistsError(self)
elif not dire.isdir():
return dire.mkdir()
return dire
def __del__(self):
self.close()
|
import unittest
import numpy as np
from collections import Counter
from diogenes.utils import remove_cols,cast_list_of_list_to_sa
import utils_for_tests
import unittest
import numpy as np
from numpy.random import rand
import diogenes.read
import diogenes.utils
from diogenes.modify import remove_cols_where
from diogenes.modify import col_val_eq
from diogenes.modify import col_val_eq_any
from diogenes.modify import col_fewer_than_n_nonzero
from diogenes.modify import where_all_are_true
from diogenes.modify import choose_rows_where
from diogenes.modify import remove_rows_where
from diogenes.modify import row_val_eq
from diogenes.modify import row_val_lt
from diogenes.modify import row_val_between
from diogenes.modify import combine_cols
from diogenes.modify import combine_sum
from diogenes.modify import combine_mean
from diogenes.modify import label_encode
from diogenes.modify import generate_bin
from diogenes.modify import normalize
from diogenes.modify import replace_missing_vals
from diogenes.modify import distance_from_point
class TestModify(unittest.TestCase):
def test_col_val_eq(self):
M = cast_list_of_list_to_sa(
[[1,2,3], [1,3,4], [1,4,5]],
col_names=['height','weight', 'age'])
arguments = [{'func': col_val_eq, 'vals': 1}]
M = remove_cols_where(M, arguments)
correct = cast_list_of_list_to_sa(
[[2,3], [3,4], [4,5]],
col_names=['weight', 'age'])
self.assertTrue(np.array_equal(M, correct))
def test_col_val_eq_any(self):
M = cast_list_of_list_to_sa(
[[1,2,3], [1,3,4], [1,4,5]],
col_names=['height','weight', 'age'])
arguments = [{'func': col_val_eq_any, 'vals': None}]
M = remove_cols_where(M, arguments)
correct = cast_list_of_list_to_sa(
[[2,3], [3,4], [4,5]],
col_names=['weight', 'age'])
self.assertTrue(np.array_equal(M, correct))
def test_col_fewer_than_n_nonzero(self):
M = cast_list_of_list_to_sa(
[[0,2,3], [0,3,4], [1,4,5]],
col_names=['height','weight', 'age'])
arguments = [{'func': col_fewer_than_n_nonzero, 'vals': 2}]
M = remove_cols_where(M, arguments)
correct = cast_list_of_list_to_sa(
[[2,3], [3,4], [4,5]],
col_names=['weight', 'age'])
self.assertTrue(np.array_equal(M, correct))
def test_label_encoding(self):
M = np.array(
[('a', 0, 'Martin'),
('b', 1, 'Tim'),
('b', 2, 'Martin'),
('c', 3, 'Martin')],
dtype=[('letter', 'O'), ('idx', int), ('name', 'O')])
ctrl = np.array(
[(0, 0, 0),
(1, 1, 1),
(1, 2, 0),
(2, 3, 0)],
dtype=[('letter', int), ('idx', int), ('name', int)])
ctrl_classes = {'letter': np.array(['a', 'b', 'c']),
'name': np.array(['Martin', 'Tim'])}
new_M, classes = label_encode(M)
self.assertTrue(np.array_equal(ctrl, new_M))
self.assertEqual(ctrl_classes.keys(), classes.keys())
for key in ctrl_classes:
self.assertTrue(np.array_equal(ctrl_classes[key], classes[key]))
def test_replace_missing_vals(self):
M = np.array([('a', 0, 0.0, 0.1),
('b', 1, 1.0, np.nan),
('', -999, np.nan, 0.0),
('d', 1, np.nan, 0.2),
('', -999, 2.0, np.nan)],
dtype=[('str', 'O'), ('int', int), ('float1', float),
('float2', float)])
ctrl = M.copy()
ctrl['float1'] = np.array([0.0, 1.0, -1.0, -1.0, 2.0])
ctrl['float2'] = np.array([0.1, -1.0, 0.0, 0.2, -1.0])
res = replace_missing_vals(M, 'constant', constant=-1.0)
self.assertTrue(np.array_equal(ctrl, res))
ctrl = M.copy()
ctrl['int'] = np.array([100, 1, -999, 1, -999])
ctrl['float1'] = np.array([100, 1.0, np.nan, np.nan, 2.0])
ctrl['float2'] = np.array([0.1, np.nan, 100, 0.2, np.nan])
res = replace_missing_vals(M, 'constant', missing_val=0, constant=100)
self.assertTrue(utils_for_tests.array_equal(ctrl, res))
ctrl = M.copy()
ctrl['int'] = np.array([0, 1, 1, 1, 1])
res = replace_missing_vals(M, 'most_frequent', missing_val=-999)
self.assertTrue(utils_for_tests.array_equal(ctrl, res))
ctrl = M.copy()
ctrl['float1'] = np.array([0.0, 1.0, 1.0, 1.0, 2.0])
ctrl['float2'] = np.array([0.1, 0.1, 0.0, 0.2, 0.1])
res = replace_missing_vals(M, 'mean', missing_val=np.nan)
self.assertTrue(utils_for_tests.array_equal(ctrl, res))
def test_generate_bin(self):
M = [1, 1, 1, 3, 3, 3, 5, 5, 5, 5, 2, 6]
ctrl = [0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 0, 3]
self.assertTrue(np.array_equal(ctrl, generate_bin(M, 3)))
M = np.array([0.1, 3.0, 0.0, 1.2, 2.5, 1.7, 2])
ctrl = [0, 3, 0, 1, 2, 1, 2]
self.assertTrue(np.array_equal(ctrl, generate_bin(M, 3)))
def test_where_all_are_true(self):
M = [[1,2,3], [2,3,4], [3,4,5]]
col_names = ['heigh','weight', 'age']
lables= [0,0,1]
M = diogenes.utils.cast_list_of_list_to_sa(
M,
col_names=col_names)
arguments = [{'func': row_val_eq, 'col_name': 'heigh', 'vals': 1},
{'func': row_val_lt, 'col_name': 'weight', 'vals': 3},
{'func': row_val_between, 'col_name': 'age', 'vals':
(3, 4)}]
res = where_all_are_true(
M,
arguments)
ctrl = np.array([True, False, False])
self.assertTrue(np.array_equal(res, ctrl))
def test_choose_rows_where(self):
M = [[1,2,3], [2,3,4], [3,4,5]]
col_names = ['heigh','weight', 'age']
lables= [0,0,1]
M = diogenes.utils.cast_list_of_list_to_sa(
M,
col_names=col_names)
arguments = [{'func': row_val_eq, 'col_name': 'heigh', 'vals': 1},
{'func': row_val_lt, 'col_name': 'weight', 'vals': 3},
{'func': row_val_between, 'col_name': 'age', 'vals':
(3, 4)}]
res = choose_rows_where(
M,
arguments)
ctrl = cast_list_of_list_to_sa([[1,2,3]],col_names=['heigh','weight', 'age'])
self.assertTrue(np.array_equal(res, ctrl))
def test_remove_rows_where(self):
M = [[1,2,3], [2,3,4], [3,4,5]]
col_names = ['heigh','weight', 'age']
lables= [0,0,1]
M = diogenes.utils.cast_list_of_list_to_sa(
M,
col_names=col_names)
arguments = [{'func': row_val_eq, 'col_name': 'heigh', 'vals': 1},
{'func': row_val_lt, 'col_name': 'weight', 'vals': 3},
{'func': row_val_between, 'col_name': 'age', 'vals':
(3, 4)}]
res = remove_rows_where(
M,
arguments)
ctrl = cast_list_of_list_to_sa([[2,3,4],[3,4,5]],col_names=['heigh','weight', 'age'])
self.assertTrue(np.array_equal(res, ctrl))
def test_combine_cols(self):
M = np.array(
[(0, 1, 2), (3, 4, 5), (6, 7, 8)],
dtype=[('f0', float), ('f1', float), ('f2', float)])
ctrl_sum = np.array([1, 7, 13])
ctrl_mean = np.array([1.5, 4.5, 7.5])
res_sum = combine_cols(M, combine_sum, ('f0', 'f1'))
res_mean = combine_cols(M, combine_mean, ('f1', 'f2'))
self.assertTrue(np.array_equal(res_sum, ctrl_sum))
self.assertTrue(np.array_equal(res_mean, ctrl_mean))
def test_normalize(self):
col = np.array([-2, -1, 0, 1, 2])
res, mean, stddev = normalize(col, return_fit=True)
self.assertTrue(np.allclose(np.std(res), 1.0))
self.assertTrue(np.allclose(np.mean(res), 0.0))
col = np.arange(10)
res = normalize(col, mean=mean, stddev=stddev)
self.assertTrue(np.allclose(res, (col - mean) / stddev))
def test_distance_from_point(self):
# Coords according to https://tools.wmflabs.org/geohack/
# Paris
lat_origin = 48.8567
lng_origin = 2.3508
# New York, Beijing, Jerusalem
lat_col = [40.7127, 39.9167, 31.7833]
lng_col = [-74.0059, 116.3833, 35.2167]
# According to http://www.movable-type.co.uk/scripts/latlong.html
# (Rounds to nearest km)
ctrl = np.array([5837, 8215, 3331])
res = distance_from_point(lat_origin, lng_origin, lat_col, lng_col)
# get it right within 1km
self.assertTrue(np.allclose(ctrl, res, atol=1, rtol=0))
if __name__ == '__main__':
unittest.main()
|
from .base import BaseType
class SavedActionApproval(BaseType):
_soap_tag = 'saved_action_approval'
def __init__(self):
BaseType.__init__(
self,
simple_properties={'id': int,
'name': str,
'approved_flag': int},
complex_properties={'metadata': MetadataList},
list_properties={},
)
self.id = None
self.name = None
self.approved_flag = None
self.metadata = None
from metadata_list import MetadataList
|
import unittest
import os
from os.path import dirname
import sys
import json
from rtree import index
from . import ROOT
from geotweet.mapreduce.utils.lookup import project, SpatialLookup
testdata = os.path.join(dirname(os.path.abspath(__file__)), 'testdata')
def read(geojson):
return json.loads(open(os.path.join(testdata, geojson), 'r').read())
"""
P53000
______
| |
| |
|______| Polygon 2
P3200
______
| |
| P |
|______| Polygon 2
"""
POLYGON_1 = read('polygon_102500_1.geojson')
POLYGON_2 = read('polygon_102500_2.geojson')
POINT_WITHIN = read('point_within.geojson')
POINT_53000M = read('point_53000m.geojson')
POINT_3200M = read('point_3200m.geojson')
def init_polygon_1_index():
location = SpatialLookup()
idx = index.Rtree()
polygon = location._build_obj(POLYGON_1)
location.data_store[1] = polygon
idx.insert(1, polygon['geometry'].bounds)
location.idx = idx
return location
def init_polygon_2_index():
location = init_polygon_1_index()
polygon = location._build_obj(POLYGON_2)
location.data_store[2] = polygon
location.idx.insert(2, polygon['geometry'].bounds)
return location
class GetObjectBasic(unittest.TestCase):
def setUp(self):
self.location = init_polygon_1_index()
def assert_found(self, point):
found = self.location.get_object(point)
error = "get_object failed to return object"
self.assertIsNotNone(found, error)
def assert_none(self, point):
found = self.location.get_object(point)
error = "get_object should return None: Actual < {0} >".format(found)
self.assertIsNone(found, error)
def test_basic(self):
self.assert_found(project(POINT_WITHIN['geometry']['coordinates']))
self.assert_none(project(POINT_3200M['geometry']['coordinates']))
self.assert_none(project(POINT_53000M['geometry']['coordinates']))
def test_buffer_none(self):
point = project(POINT_3200M['geometry']['coordinates'])
found = self.location.get_object(point, buffer_size=0)
self.assertIsNone(found)
def test_buffer_outside_buffer(self):
point = project(POINT_3200M['geometry']['coordinates'])
found = self.location.get_object(point, buffer_size=3000)
self.assertIsNone(found)
def test_buffer_within_buffer(self):
point = project(POINT_3200M['geometry']['coordinates'])
found = self.location.get_object(point, buffer_size=4000)
self.assertIsNotNone(found)
class GetObjectOrder(unittest.TestCase):
def setUp(self):
self.location = init_polygon_2_index()
def assert_found(self, point):
found = self.location.get_object(point)
error = "get_object failed to return object"
self.assertIsNotNone(found, error)
def assert_none(self, point):
found = self.location.get_object(point)
error = "get_object should return None: Actual < {0} >".format(found)
self.assertIsNone(found, error)
def test_buffer_nearest1(self):
point = project(POINT_WITHIN['geometry']['coordinates'])
found = self.location.get_object(point, buffer_size=100000)
self.assertIsNotNone(found, "get_object failed to return object")
error = "get_object failed to return object with id=polygon1: Actual < {0} >"
self.assertEqual('polygon1', found['id'], error.format(found['id']))
def test_buffer_nearest2(self):
point = project(POINT_3200M['geometry']['coordinates'])
found = self.location.get_object(point, buffer_size=100000)
self.assertIsNotNone(found, "get_object failed to return object")
error = "get_object failed to return object with id=polygon1: Actual < {0} >"
self.assertEqual('polygon1', found['id'], error.format(found['id']))
if __name__ == "__main__":
unittest.main()
|
import os
from PIL import Image
import glob
start_dir = "images/full_sprites/opaque/kanto/"
end_dir = "images/full_sprites/transparent/kanto/"
iconmap = os.listdir(start_dir)
print(len(iconmap))
for filename in iconmap:
image = Image.open(start_dir+filename)
image_width, image_height = image.size
print( "the image will by %d by %d" % (image_width, image_height))
print( "creating image...")
master = Image.new(
mode='RGBA',
size=(image_width, image_height),
color=(0,0,0,0)) # fully transparent
master.paste(image,(0,0))
data = master.getdata()
newdata = []
for item in data:
if item[0] == 255 and item[1] == 255 and item[2] == 255:
newdata.append((255,255,255,0))
else:
newdata.append(item)
master.putdata(newdata)
print( "saving master.jpg...")
master.save(end_dir+filename)
print( "saved!")
|
"""
Fields represent CronTrigger options which map to :class:`~datetime.datetime`
fields.
"""
from calendar import monthrange
from apscheduler.triggers.cron.expressions import (
AllExpression, RangeExpression, WeekdayPositionExpression, LastDayOfMonthExpression, WeekdayRangeExpression)
__all__ = ('MIN_VALUES', 'MAX_VALUES', 'DEFAULT_VALUES', 'BaseField', 'WeekField', 'DayOfMonthField', 'DayOfWeekField')
MIN_VALUES = {'year': 1970, 'month': 1, 'day': 1, 'week': 1, 'day_of_week': 0, 'hour': 0, 'minute': 0, 'second': 0}
MAX_VALUES = {'year': 2 ** 63, 'month': 12, 'day:': 31, 'week': 53, 'day_of_week': 6, 'hour': 23, 'minute': 59,
'second': 59}
DEFAULT_VALUES = {'year': '*', 'month': 1, 'day': 1, 'week': '*', 'day_of_week': '*', 'hour': 0, 'minute': 0,
'second': 0}
class BaseField(object):
REAL = True
COMPILERS = [AllExpression, RangeExpression]
def __init__(self, name, exprs, is_default=False):
self.name = name
self.is_default = is_default
self.compile_expressions(exprs)
def get_min(self, dateval):
return MIN_VALUES[self.name]
def get_max(self, dateval):
return MAX_VALUES[self.name]
def get_value(self, dateval):
return getattr(dateval, self.name)
def get_next_value(self, dateval):
smallest = None
for expr in self.expressions:
value = expr.get_next_value(dateval, self)
if smallest is None or (value is not None and value < smallest):
smallest = value
return smallest
def compile_expressions(self, exprs):
self.expressions = []
# Split a comma-separated expression list, if any
exprs = str(exprs).strip()
if ',' in exprs:
for expr in exprs.split(','):
self.compile_expression(expr)
else:
self.compile_expression(exprs)
def compile_expression(self, expr):
for compiler in self.COMPILERS:
match = compiler.value_re.match(expr)
if match:
compiled_expr = compiler(**match.groupdict())
self.expressions.append(compiled_expr)
return
raise ValueError('Unrecognized expression "%s" for field "%s"' % (expr, self.name))
def __str__(self):
expr_strings = (str(e) for e in self.expressions)
return ','.join(expr_strings)
def __repr__(self):
return "%s('%s', '%s')" % (self.__class__.__name__, self.name, self)
class WeekField(BaseField):
REAL = False
def get_value(self, dateval):
return dateval.isocalendar()[1]
class DayOfMonthField(BaseField):
COMPILERS = BaseField.COMPILERS + [WeekdayPositionExpression, LastDayOfMonthExpression]
def get_max(self, dateval):
return monthrange(dateval.year, dateval.month)[1]
class DayOfWeekField(BaseField):
REAL = False
COMPILERS = BaseField.COMPILERS + [WeekdayRangeExpression]
def get_value(self, dateval):
return dateval.weekday()
|
from django.utils.translation import ugettext_lazy as _
from reviewboard.admin.read_only import is_site_read_only_for
from reviewboard.reviews.actions import (BaseReviewRequestAction,
BaseReviewRequestMenuAction)
from reviewboard.reviews.features import general_comments_feature
from reviewboard.reviews.models import ReviewRequest
from reviewboard.site.urlresolvers import local_site_reverse
from reviewboard.urls import diffviewer_url_names
class CloseMenuAction(BaseReviewRequestMenuAction):
"""A menu action for closing the corresponding review request."""
action_id = 'close-review-request-action'
label = _('Close')
def should_render(self, context):
"""Return whether the action should render.
Args:
context (dict):
The current render context.
Returns:
bool:
Whether the action should render.
"""
review_request = context['review_request']
user = context['request'].user
return (review_request.status == ReviewRequest.PENDING_REVIEW and
not is_site_read_only_for(user) and
(context['request'].user.pk == review_request.submitter_id or
(context['perms']['reviews']['can_change_status'] and
review_request.public)))
class SubmitAction(BaseReviewRequestAction):
"""An action for submitting the review request."""
action_id = 'submit-review-request-action'
label = _('Submitted')
def should_render(self, context):
"""Return whether the action should render.
Args:
context (dict):
The current render context.
Returns:
bool:
Whether the action should render.
"""
return (context['review_request'].public and
not is_site_read_only_for(context['request'].user))
class DiscardAction(BaseReviewRequestAction):
"""An action for discarding the review request."""
action_id = 'discard-review-request-action'
label = _('Discarded')
class DeleteAction(BaseReviewRequestAction):
"""An action for permanently deleting the review request."""
action_id = 'delete-review-request-action'
label = _('Delete Permanently')
def should_render(self, context):
"""Return whether the action should render.
Args:
context (dict):
The current render context.
Returns:
bool:
Whether the action should render.
"""
return (context['perms']['reviews']['delete_reviewrequest'] and
not is_site_read_only_for(context['request'].user))
class UpdateMenuAction(BaseReviewRequestMenuAction):
"""A menu action for updating the corresponding review request."""
action_id = 'update-review-request-action'
label = _('Update')
def should_render(self, context):
"""Return whether the action should render.
Args:
context (dict):
The current render context.
Returns:
bool:
Whether the action should render.
"""
review_request = context['review_request']
user = context['request'].user
return (review_request.status == ReviewRequest.PENDING_REVIEW and
not is_site_read_only_for(user) and
(user.pk == review_request.submitter_id or
context['perms']['reviews']['can_edit_reviewrequest']))
class UploadDiffAction(BaseReviewRequestAction):
"""An action for updating/uploading a diff for the review request."""
action_id = 'upload-diff-action'
def get_label(self, context):
"""Return this action's label.
The label will change depending on whether or not the corresponding
review request already has a diff.
Args:
context (django.template.Context):
The collection of key-value pairs from the template.
Returns:
unicode: The label that displays this action to the user.
"""
review_request = context['review_request']
draft = review_request.get_draft(context['request'].user)
if (draft and draft.diffset) or review_request.get_diffsets():
return _('Update Diff')
return _('Upload Diff')
def should_render(self, context):
"""Return whether or not this action should render.
If the corresponding review request has a repository, then an upload
diff form exists, so we should render this UploadDiffAction.
Args:
context (django.template.Context):
The collection of key-value pairs available in the template
just before this action is to be rendered.
Returns:
bool: Determines if this action should render.
"""
return (context['review_request'].repository_id is not None and
not is_site_read_only_for(context['request'].user))
class UploadFileAction(BaseReviewRequestAction):
"""An action for uploading a file for the review request."""
action_id = 'upload-file-action'
label = _('Add File')
class DownloadDiffAction(BaseReviewRequestAction):
"""An action for downloading a diff from the review request."""
action_id = 'download-diff-action'
label = _('Download Diff')
def get_url(self, context):
"""Return this action's URL.
Args:
context (django.template.Context):
The collection of key-value pairs from the template.
Returns:
unicode: The URL to invoke if this action is clicked.
"""
match = context['request'].resolver_match
# We want to use a relative URL in the diff viewer as we will not be
# re-rendering the page when switching between revisions.
if match.url_name in diffviewer_url_names:
return 'raw/'
return local_site_reverse('raw-diff', context['request'], kwargs={
'review_request_id': context['review_request'].display_id,
})
def get_hidden(self, context):
"""Return whether this action should be initially hidden to the user.
Args:
context (django.template.Context):
The collection of key-value pairs from the template.
Returns:
bool: Whether this action should be initially hidden to the user.
"""
match = context['request'].resolver_match
if match.url_name in diffviewer_url_names:
return match.url_name == 'view-interdiff'
return super(DownloadDiffAction, self).get_hidden(context)
def should_render(self, context):
"""Return whether or not this action should render.
Args:
context (django.template.Context):
The collection of key-value pairs available in the template
just before this action is to be rendered.
Returns:
bool: Determines if this action should render.
"""
review_request = context['review_request']
request = context['request']
match = request.resolver_match
# If we're on a diff viewer page, then this DownloadDiffAction should
# initially be rendered, but possibly hidden.
if match.url_name in diffviewer_url_names:
return True
return review_request.repository_id is not None
class EditReviewAction(BaseReviewRequestAction):
"""An action for editing a review intended for the review request."""
action_id = 'review-action'
label = _('Review')
def should_render(self, context):
"""Return whether the action should render.
Args:
context (dict):
The current render context.
Returns:
bool:
Whether the action should render.
"""
user = context['request'].user
return (user.is_authenticated and
not is_site_read_only_for(user))
class AddGeneralCommentAction(BaseReviewRequestAction):
"""An action for adding a new general comment to a review."""
action_id = 'general-comment-action'
label = _('Add General Comment')
def should_render(self, context):
"""Return whether the action should render.
Args:
context (dict):
The current render context.
Returns:
bool:
Whether the action should render.
"""
request = context['request']
user = request.user
return (user.is_authenticated and
not is_site_read_only_for(user) and
general_comments_feature.is_enabled(request=request))
class ShipItAction(BaseReviewRequestAction):
"""An action for quickly approving the review request without comments."""
action_id = 'ship-it-action'
label = _('Ship It!')
def should_render(self, context):
"""Return whether the action should render.
Args:
context (dict):
The current render context.
Returns:
bool:
Whether the action should render.
"""
user = context['request'].user
return (user.is_authenticated and
not is_site_read_only_for(user))
def get_default_actions():
"""Return a copy of all the default actions.
Returns:
list of BaseReviewRequestAction: A copy of all the default actions.
"""
return [
CloseMenuAction([
SubmitAction(),
DiscardAction(),
DeleteAction(),
]),
UpdateMenuAction([
UploadDiffAction(),
UploadFileAction(),
]),
DownloadDiffAction(),
EditReviewAction(),
AddGeneralCommentAction(),
ShipItAction(),
]
|
"""
Retrieves menu from Drupal site
"""
from aashestrap.models import Menu
from django.core.management.base import BaseCommand
import urllib2
from django.http import HttpResponse
from BeautifulSoup import BeautifulSoup
from django.core.exceptions import ObjectDoesNotExist
class Command(BaseCommand):
def handle(self, *args, **options):
get_menu()
def get_menu():
# Try to retrieve the existing menu object
try:
menu = Menu.objects.get(pk=1)
# If there isn't one, instantiate one
except ObjectDoesNotExist:
menu = Menu(pk=1)
# Request aashe home page
request = urllib2.Request('http://www.aashe.org/')
response = urllib2.urlopen(request)
# Soup it
soup = BeautifulSoup(response)
# Search and extract the footer
results = soup.findAll(id="block-menu_block-3")
footer = results[0].__str__('utf8')
# Search and extract the navigation bar
results = soup.findAll(id="navigation")
header = results[0].__str__('utf8')
menu.footer = footer
menu.header = header
menu.save()
|
from __future__ import unicode_literals
from django.apps import AppConfig
class FileuploadConfig(AppConfig):
name = 'fileupload'
|
"""
Django settings for blog project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'sm@g)(fbwdh5wc*xe@j++m9rh^uza5se9a57c5ptwkg*b@ki0x'
DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'posts',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'blog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'blog.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
#'/var/www/static/',
]
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static_cdn")
MEDIA_URL = "/media/"
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), "media_cdn")
|
import cPickle
import logging
import numpy
import os
import time
from collections import deque
from copy import deepcopy
from datetime import datetime
from pytz import timezone
from threading import Event, Thread
from coinbase.wallet.client import Client
from jarvis.utils.messaging.client import TwilioMessenger
from jarvis.modules.base import JarvisThreadedModule
def configure_debug_logging():
logging.basicConfig(level=logging.DEBUG)
def load_coinbase_config():
coinbase_key = os.environ.get('COINBASE_KEY')
coinbase_secret = os.environ.get('COINBASE_SECRET')
if not all([coinbase_key, coinbase_secret]):
raise Exception('Coinbase config not configured properly')
return (coinbase_key, coinbase_secret)
def load_from_file(path):
if os.path.exists(path):
with open(path,'r') as f:
return cPickle.loads(f.read())
return None
def store_to_file(path, obj):
with open(path,'w') as f:
f.write(cPickle.dumps(obj))
class CoinbaseClient(object):
def __init__(self):
self.api_key, self.api_secret = load_coinbase_config()
self.client = Client(self.api_key, self.api_secret)
def do(self, func, *args, **kwargs):
return getattr(self.client,func)(*args, **kwargs)
class TickerTimeseries(object):
def __init__(self, max_length, recent_cutoff,
load_path=None, poll_period=30, name=None):
self.timeseries = load_from_file(load_path)
if not self.timeseries:
self.timeseries = deque(maxlen=max_length)
self.large_movement_timeseries = deepcopy(self.timeseries)
self.recent_cutoff = recent_cutoff
self.max_length = max_length
self.poll_period = poll_period
self.name = name
def append(self, val):
self.timeseries.append(val)
self.large_movement_timeseries.append(val)
@property
def head(self):
return self.timeseries[-1]
@property
def tail(self):
return self.timeseries[0]
@property
def mean(self):
return numpy.mean(self.timeseries)
@property
def length(self):
return len(self.timeseries)
@classmethod
def anomaly(cls, series, recent_cutoff):
'''
Naive anomaly detection. Given a series it computes
the standard deviation and returns True if any of the values
in the last :recent_cutoff points are are more than
3 standard deviationsm above the mean
:series array of timeseries data
:recent_cutoff only consider anomalies on the most recent points
'''
std_dev = numpy.std(series)
mean = numpy.mean(series)
for point in series[-recent_cutoff:]:
abs_diff = abs(point - mean)
if abs_diff >= std_dev * 3 and abs_diff >= 3:
return True
return False
def is_anomalous(self):
# If we don't have enough data, don't do anything
if len(self.timeseries) < self.recent_cutoff:
return False
return self.anomaly(self.timeseries, self.recent_cutoff)
@classmethod
def large_movement(self, series):
if float(abs(series[0] - series[-1])) / series[0] > 0.03:
return True
return False
def is_large_movement(self):
if self.large_movement(self.large_movement_timeseries):
msg = MOVEMENT_NOTIFICATION % \
(self.name,
len(self.large_movement_timeseries) * self.poll_period / 60,
self.large_movement_timeseries[0],
self.large_movement_timeseries[-1])
self.large_movement_timeseries = deque(
[self.large_movement_timeseries[-1]],
maxlen=self.max_length)
return msg
return None
ANOMALY_NOTIFICATION = \
'''Anomalous bitcoin price activity detected. Mean price over the
past %d minutes is %.2f, current price is %.2f'''
MOVEMENT_NOTIFICATION = \
'''Large %s movement detected. Price %d minutes ago was %.2f,
current price is %.2f'''
class CoinbaseWatcher(object):
POLL_PERIOD = 30
RECENT_DATA = 60 * 5
MAX_LENGTH_MULTIPLE = 12 * 24
COOLDOWN_TICKS = 10
BTCTICKERPATH = "/tmp/bitccointicker"
ETHTICKERPATH = "/tmp/ethticker"
MSGPATH = "/tmp/bitcoinmsgs"
def __init__(self, stop):
recent_points = self.RECENT_DATA / self.POLL_PERIOD
self.twilio_client = TwilioMessenger()
self.coinbase_client = CoinbaseClient()
self.btc_timeseries = TickerTimeseries(
max_length=recent_points*self.MAX_LENGTH_MULTIPLE,
recent_cutoff=recent_points,
load_path=self.BTCTICKERPATH,
poll_period=self.POLL_PERIOD,
name='Bitcoin')
self.eth_timeseries = TickerTimeseries(
max_length=recent_points*self.MAX_LENGTH_MULTIPLE,
recent_cutoff=recent_points,
load_path=self.ETHTICKERPATH,
poll_period=self.POLL_PERIOD,
name='Ethereum')
self.cooldown = 0
self.stop = stop
self.sent_messages = load_from_file(self.MSGPATH)
if not self.sent_messages:
self.sent_messages = deque(maxlen=3)
@property
def raw_btc_timeseries(self):
return self.btc_timeseries.timeseries
@property
def raw_eth_timeseries(self):
return self.eth_timeseries.timeseries
@property
def in_cooldown(self):
self.cooldown = max(0,self.cooldown - 1)
if self.cooldown <= 0:
return False
return True
def initiate_cooldown(self):
self.cooldown = self.COOLDOWN_TICKS
def start(self):
while not self.stop.is_set():
try:
spot_price = self.coinbase_client.do(
'get_spot_price',currency_pair='BTC-USD')
self.btc_timeseries.append(float(spot_price['amount']))
# coinbase client doesn't actually support currency_pair
rates = self.coinbase_client.do('get_exchange_rates')
self.eth_timeseries.append(1 / float(rates['rates']['ETH']))
if not self.in_cooldown:
msg = self.btc_timeseries.is_large_movement()
if msg:
self.twilio_client.send_message(msg)
self.sent_messages.append((msg, time.time()))
self.initiate_cooldown()
msg = self.eth_timeseries.is_large_movement()
if msg:
self.twilio_client.send_message(msg)
self.sent_messages.append((msg, time.time()))
self.initiate_cooldown()
except Exception:
logging.exception("Exception in main loop")
time.sleep(self.POLL_PERIOD)
store_to_file(self.MSGPATH,self.sent_messages)
store_to_file(self.BTCTICKERPATH,self.btc_timeseries.timeseries)
store_to_file(self.ETHTICKERPATH,self.eth_timeseries.timeseries)
class CoinbaseWatcherModule(JarvisThreadedModule):
def init_module(self, event):
self.coinbase_watcher = CoinbaseWatcher(event)
return Thread(target=self.coinbase_watcher.start)
def get_recent_messages(self):
return [
(msg, self.convert_timestamp(timestamp)) \
for (msg,timestamp) in \
reversed(self.coinbase_watcher.sent_messages)
]
@classmethod
def convert_timestamp(cls, timestamp):
pacific = timezone("US/Pacific-New")
utc = timezone("UTC")
return utc.localize(datetime.utcfromtimestamp(
timestamp)).astimezone(pacific).strftime('%Y-%m-%d %H:%M:%S')
def get_bitcoin_ticker_timeseries(self):
seconds_per_point = self.coinbase_watcher.POLL_PERIOD
now = time.time()
return [
{
'date' : self.convert_timestamp(now-seconds_per_point*i),
'value' : val
} for i,val in enumerate(reversed(
self.coinbase_watcher.raw_btc_timeseries))
][::-1]
def get_eth_ticker_timeseries(self):
seconds_per_point = self.coinbase_watcher.POLL_PERIOD
now = time.time()
return [
{
'date' : self.convert_timestamp(now-seconds_per_point*i),
'value' : val
} for i,val in enumerate(reversed(
self.coinbase_watcher.raw_eth_timeseries))
][::-1]
if __name__ == '__main__':
configure_debug_logging()
watcher = CoinbaseWatcher()
watcher.start()
|
def NUMBER(value):
return ("NUMBER", value)
def NAME(value):
return ("NAME", value)
def SYMBOL(value):
return ("SYMBOL", value)
def SEMICOLON():
return ("SEMICOLON", )
def OPENPAREN():
return ("OPENPAREN", )
def CLOSEPAREN():
return ("CLOSEPAREN", )
def OPENBRACKET():
return ("OPENBRACKET", )
def CLOSEBRACKET():
return ("CLOSEBRACKET", )
def ASSIGNMENT():
return ("ASSIGNMENT", )
def EOF():
return ("EOF", )
def FUNCTIONDEF():
return ("FUNCTIONDEF", )
def FUNCTIONRETURN():
return ("FUNCTIONRETURN", )
|
"""
Talon.One API
The Talon.One API is used to manage applications and campaigns, as well as to integrate with your application. The operations in the _Integration API_ section are used to integrate with our platform, while the other operations are used to manage applications and campaigns. ### Where is the API? The API is available at the same hostname as these docs. For example, if you are reading this page at `https://mycompany.talon.one/docs/api/`, the URL for the [updateCustomerProfile][] operation is `https://mycompany.talon.one/v1/customer_profiles/id` [updateCustomerProfile]: #operation--v1-customer_profiles--integrationId--put # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import talon_one
from talon_one.models.update_loyalty_program import UpdateLoyaltyProgram # noqa: E501
from talon_one.rest import ApiException
class TestUpdateLoyaltyProgram(unittest.TestCase):
"""UpdateLoyaltyProgram unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test UpdateLoyaltyProgram
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = talon_one.models.update_loyalty_program.UpdateLoyaltyProgram() # noqa: E501
if include_optional :
return UpdateLoyaltyProgram(
title = '0',
description = '0',
subscribed_applications = [
56
],
default_validity = '0',
default_pending = '0',
allow_subledger = True
)
else :
return UpdateLoyaltyProgram(
)
def testUpdateLoyaltyProgram(self):
"""Test UpdateLoyaltyProgram"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
|
import os
import re
from . import utils
PARTIAL = re.compile('(?P<tag>{{>\s*(?P<name>.+?)\s*}})')
PARTIAL_CUSTOM = re.compile('^(?P<whitespace>\s*)(?P<tag>{{>\s*(?P<name>.+?)\s*}}(?(1)\r?\n?))', re.M)
def build(template, partials=None):
template = '{}\n'.format(template)
for regex in (PARTIAL_CUSTOM, PARTIAL):
for match in regex.finditer(template):
if partials is None:
substitution = get_template(match.group('name'))
else:
substitution = partials.get(match.group('name'), u'')
if substitution:
try:
substitution = '\n'.join('{}{}'.format(match.group('whitespace'), s) if s else s for s in substitution.split('\n'))
except IndexError:
pass
else:
substitution = substitution[len(match.group('whitespace')):]
template = template.replace(match.group('tag'), substitution)
return utils.purify(template)
|
import socket as sk
from kivy.logger import Logger
def getWebsite():
return "www.google.com"
def getIpPort():
sock_info=sk.getaddrinfo(getWebsite(),80,proto=sk.IPPROTO_TCP)
return sock_info[0][-1]
def checkInternet():
sock=sk.socket()
sock.settimeout(1)
try:
sock.connect(getIpPort())
sock.send(b'GET /HTTP/1.0\r\n\r\n')
resp=sock.recv(8)
sock.shutdown(1)
sock.close()
if(resp==b'HTTP/1.0'):
return True
else:
return False
except Exception as e:
Logger.error(e)
return False
def checkSpeed():
import psutil
import time
init=[psutil.net_io_counters().bytes_sent,psutil.net_io_counters().bytes_recv]
time.sleep(1)
final=[psutil.net_io_counters().bytes_sent,psutil.net_io_counters().bytes_recv]
readings=[(final[0]-init[0]),(final[1]-init[1])]
print(readings)
if readings[0] < 200 or readings[1] < 200:
return False
else:
return True
|
import os
import subprocess
import tempfile
from awscli.customizations.emr import constants
from awscli.customizations.emr import emrutils
from awscli.customizations.emr import sshutils
from awscli.customizations.emr.command import Command
KEY_PAIR_FILE_HELP_TEXT = '\nA value for the variable Key Pair File ' \
'can be set in the AWS CLI config file using the ' \
'"aws configure set emr.key_pair_file <value>" command.\n'
class Socks(Command):
NAME = 'socks'
DESCRIPTION = ('Create a socks tunnel on port 8157 from your machine '
'to the master.\n%s' % KEY_PAIR_FILE_HELP_TEXT)
ARG_TABLE = [
{'name': 'cluster-id', 'required': True,
'help_text': 'Cluster Id of cluster you want to ssh into'},
{'name': 'key-pair-file', 'required': True,
'help_text': 'Private key file to use for login'},
]
def _run_main_command(self, parsed_args, parsed_globals):
try:
master_dns = sshutils.validate_and_find_master_dns(
session=self._session,
parsed_globals=parsed_globals,
cluster_id=parsed_args.cluster_id)
key_file = parsed_args.key_pair_file
sshutils.validate_ssh_with_key_file(key_file)
f = tempfile.NamedTemporaryFile(delete=False)
if (emrutils.which('ssh') or emrutils.which('ssh.exe')):
command = ['ssh', '-o', 'StrictHostKeyChecking=no', '-o',
'ServerAliveInterval=10', '-ND', '8157', '-i',
parsed_args.key_pair_file, constants.SSH_USER +
'@' + master_dns]
else:
command = ['putty', '-ssh', '-i', parsed_args.key_pair_file,
constants.SSH_USER + '@' + master_dns, '-N', '-D',
'8157']
print(' '.join(command))
rc = subprocess.call(command)
return rc
except KeyboardInterrupt:
print('Disabling Socks Tunnel.')
return 0
class SSH(Command):
NAME = 'ssh'
DESCRIPTION = ('SSH into master node of the cluster.\n%s' %
KEY_PAIR_FILE_HELP_TEXT)
ARG_TABLE = [
{'name': 'cluster-id', 'required': True,
'help_text': 'Cluster Id of cluster you want to ssh into'},
{'name': 'key-pair-file', 'required': True,
'help_text': 'Private key file to use for login'},
{'name': 'command', 'help_text': 'Command to execute on Master Node'}
]
def _run_main_command(self, parsed_args, parsed_globals):
master_dns = sshutils.validate_and_find_master_dns(
session=self._session,
parsed_globals=parsed_globals,
cluster_id=parsed_args.cluster_id)
key_file = parsed_args.key_pair_file
sshutils.validate_ssh_with_key_file(key_file)
f = tempfile.NamedTemporaryFile(delete=False)
if (emrutils.which('ssh') or emrutils.which('ssh.exe')):
command = ['ssh', '-o', 'StrictHostKeyChecking=no', '-o',
'ServerAliveInterval=10', '-i',
parsed_args.key_pair_file, constants.SSH_USER +
'@' + master_dns, '-t']
if parsed_args.command:
command.append(parsed_args.command)
else:
command = ['putty', '-ssh', '-i', parsed_args.key_pair_file,
constants.SSH_USER + '@' + master_dns, '-t']
if parsed_args.command:
f.write(parsed_args.command)
f.write('\nread -n1 -r -p "Command completed. Press any key."')
command.append('-m')
command.append(f.name)
f.close()
print(' '.join(command))
rc = subprocess.call(command)
os.remove(f.name)
return rc
class Put(Command):
NAME = 'put'
DESCRIPTION = ('Put file onto the master node.\n%s' %
KEY_PAIR_FILE_HELP_TEXT)
ARG_TABLE = [
{'name': 'cluster-id', 'required': True,
'help_text': 'Cluster Id of cluster you want to put file onto'},
{'name': 'key-pair-file', 'required': True,
'help_text': 'Private key file to use for login'},
{'name': 'src', 'required': True,
'help_text': 'Source file path on local machine'},
{'name': 'dest', 'help_text': 'Destination file path on remote host'}
]
def _run_main_command(self, parsed_args, parsed_globals):
master_dns = sshutils.validate_and_find_master_dns(
session=self._session,
parsed_globals=parsed_globals,
cluster_id=parsed_args.cluster_id)
key_file = parsed_args.key_pair_file
sshutils.validate_scp_with_key_file(key_file)
if (emrutils.which('scp') or emrutils.which('scp.exe')):
command = ['scp', '-r', '-o StrictHostKeyChecking=no',
'-i', parsed_args.key_pair_file, parsed_args.src,
constants.SSH_USER + '@' + master_dns]
else:
command = ['pscp', '-scp', '-r', '-i', parsed_args.key_pair_file,
parsed_args.src, constants.SSH_USER + '@' + master_dns]
# if the instance is not terminated
if parsed_args.dest:
command[-1] = command[-1] + ":" + parsed_args.dest
else:
command[-1] = command[-1] + ":" + parsed_args.src.split('/')[-1]
print(' '.join(command))
rc = subprocess.call(command)
return rc
class Get(Command):
NAME = 'get'
DESCRIPTION = ('Get file from master node.\n%s' % KEY_PAIR_FILE_HELP_TEXT)
ARG_TABLE = [
{'name': 'cluster-id', 'required': True,
'help_text': 'Cluster Id of cluster you want to get file from'},
{'name': 'key-pair-file', 'required': True,
'help_text': 'Private key file to use for login'},
{'name': 'src', 'required': True,
'help_text': 'Source file path on remote host'},
{'name': 'dest', 'help_text': 'Destination file path on your machine'}
]
def _run_main_command(self, parsed_args, parsed_globals):
master_dns = sshutils.validate_and_find_master_dns(
session=self._session,
parsed_globals=parsed_globals,
cluster_id=parsed_args.cluster_id)
key_file = parsed_args.key_pair_file
sshutils.validate_scp_with_key_file(key_file)
if (emrutils.which('scp') or emrutils.which('scp.exe')):
command = ['scp', '-r', '-o StrictHostKeyChecking=no', '-i',
parsed_args.key_pair_file, constants.SSH_USER + '@' +
master_dns + ':' + parsed_args.src]
else:
command = ['pscp', '-scp', '-r', '-i', parsed_args.key_pair_file,
constants.SSH_USER + '@' + master_dns + ':' +
parsed_args.src]
if parsed_args.dest:
command.append(parsed_args.dest)
else:
command.append(parsed_args.src.split('/')[-1])
print(' '.join(command))
rc = subprocess.call(command)
return rc
|
from __future__ import absolute_import
from __future__ import unicode_literals
import io
import os.path
import sqlite3
import mock
import pytest
import six
from pre_commit.store import _get_default_directory
from pre_commit.store import Store
from pre_commit.util import cmd_output
from pre_commit.util import cwd
from pre_commit.util import rmtree
from testing.fixtures import git_dir
from testing.util import get_head_sha
def test_our_session_fixture_works():
"""There's a session fixture which makes `Store` invariantly raise to
prevent writing to the home directory.
"""
with pytest.raises(AssertionError):
Store()
def test_get_default_directory_defaults_to_home():
# Not we use the module level one which is not mocked
ret = _get_default_directory()
assert ret == os.path.join(os.path.expanduser('~/.cache'), 'pre-commit')
def test_adheres_to_xdg_specification():
with mock.patch.dict(
os.environ, {'XDG_CACHE_HOME': '/tmp/fakehome'},
):
ret = _get_default_directory()
assert ret == os.path.join('/tmp/fakehome', 'pre-commit')
def test_uses_environment_variable_when_present():
with mock.patch.dict(
os.environ, {'PRE_COMMIT_HOME': '/tmp/pre_commit_home'},
):
ret = _get_default_directory()
assert ret == '/tmp/pre_commit_home'
def test_store_require_created(store):
assert not os.path.exists(store.directory)
store.require_created()
# Should create the store directory
assert os.path.exists(store.directory)
# Should create a README file indicating what the directory is about
with io.open(os.path.join(store.directory, 'README')) as readme_file:
readme_contents = readme_file.read()
for text_line in (
'This directory is maintained by the pre-commit project.',
'Learn more: https://github.com/pre-commit/pre-commit',
):
assert text_line in readme_contents
def test_store_require_created_does_not_create_twice(store):
assert not os.path.exists(store.directory)
store.require_created()
# We intentionally delete the directory here so we can figure out if it
# calls it again.
rmtree(store.directory)
assert not os.path.exists(store.directory)
# Call require_created, this should not trigger a call to create
store.require_created()
assert not os.path.exists(store.directory)
def test_does_not_recreate_if_directory_already_exists(store):
assert not os.path.exists(store.directory)
# We manually create the directory.
# Note: we're intentionally leaving out the README file. This is so we can
# know that `Store` didn't call create
os.mkdir(store.directory)
open(store.db_path, 'a').close()
# Call require_created, this should not call create
store.require_created()
assert not os.path.exists(os.path.join(store.directory, 'README'))
def test_clone(store, tempdir_factory, log_info_mock):
path = git_dir(tempdir_factory)
with cwd(path):
cmd_output('git', 'commit', '--allow-empty', '-m', 'foo')
sha = get_head_sha(path)
cmd_output('git', 'commit', '--allow-empty', '-m', 'bar')
ret = store.clone(path, sha)
# Should have printed some stuff
assert log_info_mock.call_args_list[0][0][0].startswith(
'Initializing environment for ',
)
# Should return a directory inside of the store
assert os.path.exists(ret)
assert ret.startswith(store.directory)
# Directory should start with `repo`
_, dirname = os.path.split(ret)
assert dirname.startswith('repo')
# Should be checked out to the sha we specified
assert get_head_sha(ret) == sha
# Assert there's an entry in the sqlite db for this
with sqlite3.connect(store.db_path) as db:
path, = db.execute(
'SELECT path from repos WHERE repo = ? and ref = ?',
[path, sha],
).fetchone()
assert path == ret
def test_clone_cleans_up_on_checkout_failure(store):
try:
# This raises an exception because you can't clone something that
# doesn't exist!
store.clone('/i_dont_exist_lol', 'fake_sha')
except Exception as e:
assert '/i_dont_exist_lol' in six.text_type(e)
things_starting_with_repo = [
thing for thing in os.listdir(store.directory)
if thing.startswith('repo')
]
assert things_starting_with_repo == []
def test_clone_when_repo_already_exists(store):
# Create an entry in the sqlite db that makes it look like the repo has
# been cloned.
store.require_created()
with sqlite3.connect(store.db_path) as db:
db.execute(
'INSERT INTO repos (repo, ref, path) '
'VALUES ("fake_repo", "fake_ref", "fake_path")',
)
assert store.clone('fake_repo', 'fake_ref') == 'fake_path'
def test_require_created_when_directory_exists_but_not_db(store):
# In versions <= 0.3.5, there was no sqlite db causing a need for
# backward compatibility
os.makedirs(store.directory)
store.require_created()
assert os.path.exists(store.db_path)
|
import sys, logging
import numpy as np
from math import ceil
from gseapy.stats import multiple_testing_correction
from joblib import delayed, Parallel
def enrichment_score(gene_list, correl_vector, gene_set, weighted_score_type=1,
nperm=1000, seed=None, single=False, scale=False):
"""This is the most important function of GSEApy. It has the same algorithm with GSEA and ssGSEA.
:param gene_list: The ordered gene list gene_name_list, rank_metric.index.values
:param gene_set: gene_sets in gmt file, please use gsea_gmt_parser to get gene_set.
:param weighted_score_type: It's the same with gsea's weighted_score method. Weighting by the correlation
is a very reasonable choice that allows significant gene sets with less than perfect coherence.
options: 0(classic),1,1.5,2. default:1. if one is interested in penalizing sets for lack of
coherence or to discover sets with any type of nonrandom distribution of tags, a value p < 1
might be appropriate. On the other hand, if one uses sets with large number of genes and only
a small subset of those is expected to be coherent, then one could consider using p > 1.
Our recommendation is to use p = 1 and use other settings only if you are very experienced
with the method and its behavior.
:param correl_vector: A vector with the correlations (e.g. signal to noise scores) corresponding to the genes in
the gene list. Or rankings, rank_metric.values
:param nperm: Only use this parameter when computing esnull for statistical testing. Set the esnull value
equal to the permutation number.
:param seed: Random state for initializing gene list shuffling. Default: seed=None
:return:
ES: Enrichment score (real number between -1 and +1)
ESNULL: Enrichment score calculated from random permutations.
Hits_Indices: Index of a gene in gene_list, if gene is included in gene_set.
RES: Numerical vector containing the running enrichment score for all locations in the gene list .
"""
N = len(gene_list)
# Test whether each element of a 1-D array is also present in a second array
# It's more intuitive here than original enrichment_score source code.
# use .astype to covert bool to integer
tag_indicator = np.in1d(gene_list, gene_set, assume_unique=True).astype(int) # notice that the sign is 0 (no tag) or 1 (tag)
if weighted_score_type == 0 :
correl_vector = np.repeat(1, N)
else:
correl_vector = np.abs(correl_vector)**weighted_score_type
# get indices of tag_indicator
hit_ind = np.flatnonzero(tag_indicator).tolist()
# if used for compute esnull, set esnull equal to permutation number, e.g. 1000
# else just compute enrichment scores
# set axis to 1, because we have 2D array
axis = 1
tag_indicator = np.tile(tag_indicator, (nperm+1,1))
correl_vector = np.tile(correl_vector,(nperm+1,1))
# gene list permutation
rs = np.random.RandomState(seed)
for i in range(nperm): rs.shuffle(tag_indicator[i])
# np.apply_along_axis(rs.shuffle, 1, tag_indicator)
Nhint = tag_indicator.sum(axis=axis, keepdims=True)
sum_correl_tag = np.sum(correl_vector*tag_indicator, axis=axis, keepdims=True)
# compute ES score, the code below is identical to gsea enrichment_score method.
no_tag_indicator = 1 - tag_indicator
Nmiss = N - Nhint
norm_tag = 1.0/sum_correl_tag
norm_no_tag = 1.0/Nmiss
RES = np.cumsum(tag_indicator * correl_vector * norm_tag - no_tag_indicator * norm_no_tag, axis=axis)
if scale: RES = RES / N
if single:
es_vec = RES.sum(axis=axis)
else:
max_ES, min_ES = RES.max(axis=axis), RES.min(axis=axis)
es_vec = np.where(np.abs(max_ES) > np.abs(min_ES), max_ES, min_ES)
# extract values
es, esnull, RES = es_vec[-1], es_vec[:-1], RES[-1,:]
return es, esnull, hit_ind, RES
def enrichment_score_tensor(gene_mat, cor_mat, gene_sets, weighted_score_type, nperm=1000,
seed=None, single=False, scale=False):
"""Next generation algorithm of GSEA and ssGSEA. Works for 3d array
:param gene_mat: the ordered gene list(vector) with or without gene indices matrix.
:param cor_mat: correlation vector or matrix (e.g. signal to noise scores)
corresponding to the genes in the gene list or matrix.
:param dict gene_sets: gmt file dict.
:param float weighted_score_type: weighting by the correlation.
options: 0(classic), 1, 1.5, 2. default:1 for GSEA and 0.25 for ssGSEA.
:param int nperm: permutation times.
:param bool scale: If True, normalize the scores by number of genes_mat.
:param bool single: If True, use ssGSEA algorithm, otherwise use GSEA.
:param seed: Random state for initialize gene list shuffling.
Default: seed=None
:return: a tuple contains::
| ES: Enrichment score (real number between -1 and +1), for ssGSEA, set scale eq to True.
| ESNULL: Enrichment score calculated from random permutation.
| Hits_Indices: Indices of genes if genes are included in gene_set.
| RES: The running enrichment score for all locations in the gene list.
"""
rs = np.random.RandomState(seed)
# gene_mat -> 1d: prerank, ssSSEA or 2d: GSEA
keys = sorted(gene_sets.keys())
if weighted_score_type == 0:
# don't bother doing calcuation, just set to 1
cor_mat = np.ones(cor_mat.shape)
elif weighted_score_type > 0:
pass
else:
logging.error("Using negative values of weighted_score_type, not allowed")
raise ValueError("weighted_score_type should be postive numerics")
cor_mat = np.abs(cor_mat)
if cor_mat.ndim ==1:
# ssGSEA or Prerank
# genestes->M, genes->N, perm-> axis=2
N, M = len(gene_mat), len(keys)
# generate gene hits matrix
# for 1d ndarray of gene_mat, set assume_unique=True,
# means the input arrays are both assumed to be unique,
# which can speed up the calculation.
tag_indicator = np.vstack([np.in1d(gene_mat, gene_sets[key], assume_unique=True) for key in keys])
tag_indicator = tag_indicator.astype(int)
# index of hits
hit_ind = [ np.flatnonzero(tag).tolist() for tag in tag_indicator ]
# generate permutated hits matrix
perm_tag_tensor = np.repeat(tag_indicator, nperm+1).reshape((M,N,nperm+1))
# shuffle matrix, last matrix is not shuffled when nperm > 0
if nperm: np.apply_along_axis(lambda x: np.apply_along_axis(rs.shuffle,0,x),1, perm_tag_tensor[:,:,:-1])
# missing hits
no_tag_tensor = 1 - perm_tag_tensor
# calculate numerator, denominator of each gene hits
rank_alpha = (perm_tag_tensor*cor_mat[np.newaxis,:,np.newaxis])** weighted_score_type
elif cor_mat.ndim == 2:
# GSEA
# 2d ndarray, gene_mat and cor_mat are shuffled already
# reshape matrix
cor_mat = cor_mat.T
# gene_mat is a tuple contains (gene_name, permuate_gene_name_indices)
genes, genes_ind = gene_mat
# genestes->M, genes->N, perm-> axis=2
# don't use assume_unique=True in 2d array when use np.isin().
# elements in gene_mat are not unique, or will cause unwanted results
tag_indicator = np.vstack([np.in1d(genes, gene_sets[key], assume_unique=True) for key in keys])
tag_indicator = tag_indicator.astype(int)
perm_tag_tensor = np.stack([tag.take(genes_ind).T for tag in tag_indicator], axis=0)
#index of hits
hit_ind = [ np.flatnonzero(tag).tolist() for tag in perm_tag_tensor[:,:,-1] ]
# nohits
no_tag_tensor = 1 - perm_tag_tensor
# calculate numerator, denominator of each gene hits
rank_alpha = (perm_tag_tensor*cor_mat[np.newaxis,:,:])** weighted_score_type
else:
logging.error("Program die because of unsupported input")
raise ValueError("Correlation vector or matrix (cor_mat) is not supported")
# Nhint = tag_indicator.sum(1)
# Nmiss = N - Nhint
axis=1
P_GW_denominator = np.sum(rank_alpha, axis=axis, keepdims=True)
P_NG_denominator = np.sum(no_tag_tensor, axis=axis, keepdims=True)
REStensor = np.cumsum(rank_alpha / P_GW_denominator - no_tag_tensor / P_NG_denominator, axis=axis)
# ssGSEA: scale es by gene numbers ?
# https://gist.github.com/gaoce/39e0907146c752c127728ad74e123b33
if scale: REStensor = REStensor / len(gene_mat)
if single:
#ssGSEA
esmatrix = REStensor.sum(axis=axis)
else:
#GSEA
esmax, esmin = REStensor.max(axis=axis), REStensor.min(axis=axis)
esmatrix = np.where(np.abs(esmax)>np.abs(esmin), esmax, esmin)
es, esnull, RES = esmatrix[:,-1], esmatrix[:,:-1], REStensor[:,:,-1]
return es, esnull, hit_ind, RES
def ranking_metric_tensor(exprs, method, permutation_num, pos, neg, classes,
ascending, seed=None, skip_last=False):
"""Build shuffled ranking matrix when permutation_type eq to phenotype.
Works for 3d array.
:param exprs: gene_expression DataFrame, gene_name indexed.
:param str method: calculate correlation or ranking. methods including:
1. 'signal_to_noise' (s2n) or 'abs_signal_to_noise' (abs_s2n).
2. 't_test'.
3. 'ratio_of_classes' (also referred to as fold change).
4. 'diff_of_classes'.
5. 'log2_ratio_of_classes'.
:param int permuation_num: how many times of classes is being shuffled
:param str pos: one of labels of phenotype's names.
:param str neg: one of labels of phenotype's names.
:param list classes: a list of phenotype labels, to specify which column of
dataframe belongs to what class of phenotype.
:param bool ascending: bool. Sort ascending vs. descending.
:param seed: random_state seed
:param bool skip_last: (internal use only) whether to skip the permutation of the last rankings.
:return:
returns two 2d ndarray with shape (nperm, gene_num).
| cor_mat_indices: the indices of sorted and permutated (exclude last row) ranking matrix.
| cor_mat: sorted and permutated (exclude last row) ranking matrix.
"""
rs = np.random.RandomState(seed)
# S: samples, G: gene number
G, S = exprs.shape
# genes = exprs.index.values
expr_mat = exprs.values.T
perm_cor_tensor = np.tile(expr_mat, (permutation_num,1,1))
if skip_last:
# random shuffle on the first dim, the last matrix (expr_mat) is not shuffled
for arr in perm_cor_tensor[:-1]: rs.shuffle(arr)
else:
for arr in perm_cor_tensor: rs.shuffle(arr)
# metrics
classes = np.array(classes)
pos = classes == pos
neg = classes == neg
n_pos = np.sum(pos)
n_neg = np.sum(neg)
pos_cor_mean = perm_cor_tensor[:,pos,:].mean(axis=1)
neg_cor_mean = perm_cor_tensor[:,neg,:].mean(axis=1)
pos_cor_std = perm_cor_tensor[:,pos,:].std(axis=1, ddof=1)
neg_cor_std = perm_cor_tensor[:,neg,:].std(axis=1, ddof=1)
if method in ['signal_to_noise', 's2n']:
cor_mat = (pos_cor_mean - neg_cor_mean)/(pos_cor_std + neg_cor_std)
elif method in ['abs_signal_to_noise', 'abs_s2n']:
cor_mat = np.abs((pos_cor_mean - neg_cor_mean)/(pos_cor_std + neg_cor_std))
elif method == 't_test':
denom = np.sqrt((pos_cor_std**2)/n_pos + (neg_cor_std**2)/n_neg)
cor_mat = (pos_cor_mean - neg_cor_mean)/ denom
elif method == 'ratio_of_classes':
cor_mat = pos_cor_mean / neg_cor_mean
elif method == 'diff_of_classes':
cor_mat = pos_cor_mean - neg_cor_mean
elif method == 'log2_ratio_of_classes':
cor_mat = np.log2(pos_cor_mean / neg_cor_mean)
else:
logging.error("Please provide correct method name!!!")
raise LookupError("Input method: %s is not supported"%method)
# return matix[nperm+1, perm_cors]
cor_mat_ind = cor_mat.argsort()
# ndarray: sort in place
cor_mat.sort()
# genes_mat = genes.take(cor_mat_ind)
if ascending: return cor_mat_ind, cor_mat
# descending order of ranking and genes
# return genes_mat[:,::-1], cor_mat[:,::-1]
return cor_mat_ind[:, ::-1], cor_mat[:, ::-1]
def ranking_metric(df, method, pos, neg, classes, ascending):
"""The main function to rank an expression table. works for 2d array.
:param df: gene_expression DataFrame.
:param method: The method used to calculate a correlation or ranking. Default: 'log2_ratio_of_classes'.
Others methods are:
1. 'signal_to_noise' (s2n) or 'abs_signal_to_noise' (abs_s2n)
You must have at least three samples for each phenotype to use this metric.
The larger the signal-to-noise ratio, the larger the differences of the means (scaled by the standard deviations);
that is, the more distinct the gene expression is in each phenotype and the more the gene acts as a “class marker.”
2. 't_test'
Uses the difference of means scaled by the standard deviation and number of samples.
Note: You must have at least three samples for each phenotype to use this metric.
The larger the tTest ratio, the more distinct the gene expression is in each phenotype
and the more the gene acts as a “class marker.”
3. 'ratio_of_classes' (also referred to as fold change).
Uses the ratio of class means to calculate fold change for natural scale data.
4. 'diff_of_classes'
Uses the difference of class means to calculate fold change for natural scale data
5. 'log2_ratio_of_classes'
Uses the log2 ratio of class means to calculate fold change for natural scale data.
This is the recommended statistic for calculating fold change for log scale data.
:param str pos: one of labels of phenotype's names.
:param str neg: one of labels of phenotype's names.
:param dict classes: column id to group mapping.
:param bool ascending: bool or list of bool. Sort ascending vs. descending.
:return:
returns a pd.Series of correlation to class of each variable. Gene_name is index, and value is rankings.
visit here for more docs: http://software.broadinstitute.org/gsea/doc/GSEAUserGuideFrame.html
"""
# exclude any zero stds.
df_mean = df.groupby(by=classes, axis=1).mean()
df_std = df.groupby(by=classes, axis=1).std()
n_pos = np.sum(classes == pos)
n_neg = np.sum(classes == neg)
if method in ['signal_to_noise', 's2n']:
ser = (df_mean[pos] - df_mean[neg])/(df_std[pos] + df_std[neg])
elif method in ['abs_signal_to_noise', 'abs_s2n']:
ser = ((df_mean[pos] - df_mean[neg])/(df_std[pos] + df_std[neg])).abs()
elif method == 't_test':
ser = (df_mean[pos] - df_mean[neg])/ np.sqrt(df_std[pos]**2/n_pos+df_std[neg]**2/n_neg)
elif method == 'ratio_of_classes':
ser = df_mean[pos] / df_mean[neg]
elif method == 'diff_of_classes':
ser = df_mean[pos] - df_mean[neg]
elif method == 'log2_ratio_of_classes':
ser = np.log2(df_mean[pos] / df_mean[neg])
else:
logging.error("Please provide correct method name!!!")
raise LookupError("Input method: %s is not supported"%method)
ser = ser.sort_values(ascending=ascending)
return ser
def gsea_compute_tensor(data, gmt, n, weighted_score_type, permutation_type,
method, pheno_pos, pheno_neg, classes, ascending,
processes=1, seed=None, single=False, scale=False):
"""compute enrichment scores and enrichment nulls.
This function will split large array into smaller pieces to advoid memroy overflow.
:param data: preprocessed expression dataframe or a pre-ranked file if prerank=True.
:param dict gmt: all gene sets in .gmt file. need to call load_gmt() to get results.
:param int n: permutation number. default: 1000.
:param str method: ranking_metric method. see above.
:param str pheno_pos: one of labels of phenotype's names.
:param str pheno_neg: one of labels of phenotype's names.
:param list classes: a list of phenotype labels, to specify which column of dataframe belongs to what category of phenotype.
:param float weighted_score_type: default:1
:param bool ascending: sorting order of rankings. Default: False.
:param seed: random seed. Default: np.random.RandomState()
:param bool scale: if true, scale es by gene number.
:return: a tuple contains::
| zipped results of es, nes, pval, fdr.
| nested list of hit indices of input gene_list.
| nested list of ranked enrichment score of each input gene_sets.
| list of enriched terms
"""
w = weighted_score_type
subsets = sorted(gmt.keys())
genes_mat, cor_mat = data.index.values, data.values
base = 5 if data.shape[0] >= 5000 else 10
## phenotype permutation
np.random.seed(seed) # control the ranodm numbers
if permutation_type == "phenotype":
# shuffling classes and generate random correlation rankings
logging.debug("Start to permutate classes..............................")
if (n + 1) % base == 0: # n+1: last permute is for orignial ES calculation
num_bases = [ base ] * ((n + 1) // base)
skip_last = [0] * ( n // base) + [1] # last is not permuted
else:
num_bases = [ base ] * ((n + 1) // base) + [ (n +1) % base]
skip_last = [0] * ((n + 1) // base) + [ (n +1) % base]
random_seeds = np.random.randint(np.iinfo(np.int32).max, size=len(num_bases))
genes_ind = []
cor_mat = []
# split permutation array into smaller blocks to save memory
temp_rnk = Parallel(n_jobs=processes, require='sharedmem')(delayed(ranking_metric_tensor)(
data, method, b, pheno_pos, pheno_neg, classes, ascending, se, skip)
for b, skip, se in zip(num_bases, skip_last, random_seeds))
for k, temp in enumerate(temp_rnk):
gi, cor = temp
genes_ind.append(gi)
cor_mat.append(cor)
genes_ind, cor_mat = np.vstack(genes_ind), np.vstack(cor_mat)
# convert to tuple
genes_mat = (data.index.values, genes_ind)
logging.debug("Start to compute es and esnulls........................")
# Prerank, ssGSEA, GSEA
es = []
RES = []
hit_ind = []
esnull = []
temp_esnu = []
# split gmt dataset, too
block = ceil(len(subsets) / base)
random_seeds = np.random.randint(np.iinfo(np.int32).max, size=block)
# split large array into smaller blocks to avoid memory overflow
i, m = 1, 0
gmt_block = []
while i <= block:
# you have to reseed, or all your processes are sharing the same seed value
rs = random_seeds[i-1]
gmtrim = {k: gmt.get(k) for k in subsets[m:base * i]}
gmt_block.append(gmtrim)
m = base * i
i += 1
## if permutation_type == "phenotype": n = 0
## NOTE for GSEA: cor_mat is 2d array, it won't permute again when call enrichment_score_tensor
temp_esnu = Parallel(n_jobs=processes, require='sharedmem')(delayed(enrichment_score_tensor)(
genes_mat, cor_mat, gmtrim, w, n, rs, single, scale)
for gmtrim, rs in zip(gmt_block, random_seeds))
# esn is a list, don't need to use append method.
for si, temp in enumerate(temp_esnu):
# e, enu, hit, rune = temp.get()
e, enu, hit, rune = temp
esnull.append(enu)
es.append(e)
RES.append(rune)
hit_ind += hit
# concate results
es, esnull, RES = np.hstack(es), np.vstack(esnull), np.vstack(RES)
return gsea_significance(es, esnull), hit_ind, RES, subsets
def gsea_compute(data, gmt, n, weighted_score_type, permutation_type,
method, pheno_pos, pheno_neg, classes, ascending,
processes=1, seed=None, single=False, scale=False):
"""compute enrichment scores and enrichment nulls.
:param data: preprocessed expression dataframe or a pre-ranked file if prerank=True.
:param dict gmt: all gene sets in .gmt file. need to call load_gmt() to get results.
:param int n: permutation number. default: 1000.
:param str method: ranking_metric method. see above.
:param str pheno_pos: one of labels of phenotype's names.
:param str pheno_neg: one of labels of phenotype's names.
:param list classes: a list of phenotype labels, to specify which column of dataframe belongs to what category of phenotype.
:param float weighted_score_type: default:1
:param bool ascending: sorting order of rankings. Default: False.
:param seed: random seed. Default: np.random.RandomState()
:param bool scale: if true, scale es by gene number.
:return: a tuple contains::
| zipped results of es, nes, pval, fdr.
| nested list of hit indices of input gene_list.
| nested list of ranked enrichment score of each input gene_sets.
| list of enriched terms
"""
w = weighted_score_type
subsets = sorted(gmt.keys())
es = []
RES=[]
hit_ind=[]
esnull = [ [] for a in range(len(subsets)) ]
np.random.seed(seed) # control the ranodm numbers
logging.debug("Start to compute enrichment scores......................")
if permutation_type == "phenotype":
logging.debug("Start to permutate classes..............................")
# this version won't split large array into smaller ones
genes_mat, cor_mat = ranking_metric_tensor(exprs=data, method=method,
permutation_num=n+1,
pos=pheno_pos, neg=pheno_neg,
classes=classes,
ascending=ascending, seed=seed,
skip_last=True)
# compute es, esnulls. hits, RES
logging.debug("Start to compute enrichment nulls.......................")
es, esnull, hit_ind, RES = enrichment_score_tensor(gene_mat=genes_mat,
cor_mat=cor_mat,
gene_sets=gmt,
weighted_score_type=w,
nperm=n, seed=seed,
single=False, scale=False,)
else:
# Prerank, ssGSEA, GSEA with gene_set permutation
gl, cor_vec = data.index.values, data.values
logging.debug("Start to compute es and esnulls........................")
## this version don't split large array into smaller ones
# es, esnull, hit_ind, RES = enrichment_score_tensor(gene_mat=gl,
# cor_mat=cor_vec,
# gene_sets=gmt,
# weighted_score_type=w,
# nperm=n, rs=rs
# single=single, scale=scale)
temp_esnu=[]
# you have to reseed, or all your processes are sharing the same seed value
# np.random.seed(seed)
random_seeds= np.random.randint(np.iinfo(np.int32).max, size=len(subsets))
temp_esnu = Parallel(n_jobs=processes, require='sharedmem')(delayed(enrichment_score)(
gl, cor_vec, gmt.get(subset), w, n,
rs, single, scale)
for subset, rs in zip(subsets, random_seeds))
# esn is a list, don't need to use append method.
for si, temp in enumerate(temp_esnu):
e, enu, hit, rune = temp
esnull[si] = enu
es.append(e)
RES.append(rune)
hit_ind.append(hit)
return gsea_significance(es, esnull), hit_ind, RES, subsets
def normalize(es, esnull):
"""normalize the ES(S,pi) and the observed ES(S), separately rescaling
the positive and negative scores by dividing the mean of the ES(S,pi).
return: NES, NESnull
"""
nEnrichmentScores =np.zeros(es.shape)
nEnrichmentNulls=np.zeros(esnull.shape)
# esnullmean = np.zeros(es.shape)
# # calculate nESnulls
# for i in range(esnull.shape[0]):
# # NES
# enrNull = esnull[i]
# if es[i] >= 0:
# mes = enrNull[enrNull >= 0].mean()
# nEnrichmentScores[i] = es[i] / mes
# else:
# mes = enrNull[enrNull < 0 ].mean()
# nEnrichmentScores[i] = - es[i] / mes
# esnullmean[i] = mes
# # NESnull
# for j in range(esnull.shape[1]):
# if esnull[i,j] >= 0:
# nEnrichmentNulls[i,j] = esnull[i,j] / esnullmean[i]
# else:
# nEnrichmentNulls[i,j] = - esnull[i,j] / esnullmean[i]
esnull_pos = np.ma.MaskedArray(esnull, mask=(esnull<0)).mean(axis=1)
esnull_neg = np.ma.MaskedArray(esnull, mask=(esnull>=0)).mean(axis=1)
esnull_pos = np.array(esnull_pos)
esnull_neg = np.array(esnull_neg)
# NES
nEnrichmentScores = np.where(es>=0, es/esnull_pos, -es/esnull_neg)
# NES_NULL
nEnrichmentNulls = np.where(esnull>=0, esnull/esnull_pos[:,np.newaxis],
-esnull/esnull_neg[:,np.newaxis])
return nEnrichmentScores, nEnrichmentNulls
def gsea_pval(es, esnull):
"""Compute nominal p-value.
From article (PNAS):
estimate nominal p-value for S from esnull by using the positive
or negative portion of the distribution corresponding to the sign
of the observed ES(S).
"""
# to speed up, using numpy function to compute pval in parallel.
condlist = [ es < 0, es >=0]
choicelist = [(esnull < es.reshape(len(es),1)).sum(axis=1)/ (esnull < 0).sum(axis=1),
(esnull >= es.reshape(len(es),1)).sum(axis=1)/ (esnull >= 0).sum(axis=1)]
pvals = np.select(condlist, choicelist)
return pvals
def gsea_fdr(nEnrichmentScores, nEnrichmentNulls):
"""Create a histogram of all NES(S,pi) over all S and pi.
Use this null distribution to compute an FDR q value.
:param nEnrichmentScores: normalized ES
:param nEnrichmentNulls: normalized ESnulls
:return: FDR
"""
# FDR null distribution histogram
# vals = reduce(lambda x,y: x+y, nEnrichmentNulls, [])
# nvals = np.array(sorted(vals))
# or
nvals = np.sort(nEnrichmentNulls.flatten())
nnes = np.sort(nEnrichmentScores)
fdrs = []
# FDR computation
for i in range(len(nEnrichmentScores)):
nes = nEnrichmentScores[i]
# use the same pval method to calculate fdr
if nes >= 0:
allPos = int(len(nvals) - np.searchsorted(nvals, 0, side="left"))
allHigherAndPos = int(len(nvals) - np.searchsorted(nvals, nes, side="left"))
nesPos = len(nnes) - int(np.searchsorted(nnes, 0, side="left"))
nesHigherAndPos = len(nnes) - int(np.searchsorted(nnes, nes, side="left"))
# allPos = (nvals >= 0).sum()
# allHigherAndPos = (nvals >= nes).sum()
# nesPos = (nnes >=0).sum()
# nesHigherAndPos = (nnes >= nes).sum()
else:
allPos = int(np.searchsorted(nvals, 0, side="left"))
allHigherAndPos = int(np.searchsorted(nvals, nes, side="right"))
nesPos = int(np.searchsorted(nnes, 0, side="left"))
nesHigherAndPos = int(np.searchsorted(nnes, nes, side="right"))
# allPos = (nvals < 0).sum()
# allHigherAndPos = (nvals < nes).sum()
# nesPos = (nnes < 0).sum()
# nesHigherAndPos = (nnes < nes).sum()
try:
pi_norm = allHigherAndPos / float(allPos)
pi_obs = nesHigherAndPos / float(nesPos)
fdr = pi_norm / pi_obs
fdrs.append(fdr if fdr < 1 else 1.0)
except:
fdrs.append(1000000000.0)
logging.debug("Statistical testing finished.............................")
return fdrs
def gsea_significance(enrichment_scores, enrichment_nulls):
"""Compute nominal pvals, normalized ES, and FDR q value.
For a given NES(S) = NES* >= 0. The FDR is the ratio of the percentage of all (S,pi) with
NES(S,pi) >= 0, whose NES(S,pi) >= NES*, divided by the percentage of
observed S wih NES(S) >= 0, whose NES(S) >= NES*, and similarly if NES(S) = NES* <= 0.
"""
# For a zero by zero division (undetermined, results in a NaN),
np.seterr(divide='ignore', invalid='ignore')
# import warnings
# warnings.simplefilter("ignore")
es = np.array(enrichment_scores)
esnull = np.array(enrichment_nulls)
logging.debug("Start to compute pvals..................................")
# P-values.
pvals = gsea_pval(es, esnull).tolist()
logging.debug("Start to compute nes and nesnull........................")
# NES
nEnrichmentScores, nEnrichmentNulls = normalize(es, esnull)
logging.debug("Start to compute fdrs..................................")
# FDR
fdrs = gsea_fdr(nEnrichmentScores, nEnrichmentNulls)
#TODO: use multiple testing correction for ssgsea? ssGSEA2.0 use BH correction.
# https://github.com/broadinstitute/ssGSEA2.0/blob/master/src/ssGSEA2.0.R
# line 969
# fdrs, _ = multiple_testing_correction(pvals, alpha=0.05)
return zip(enrichment_scores, nEnrichmentScores, pvals, fdrs)
|
import os
import csv
from collections import (
defaultdict as dd,
OrderedDict as od
)
from math import log
import datetime
from flask import (
Flask,
render_template,
g,
request,
redirect,
url_for,
send_from_directory,
flash,
jsonify,
make_response,
Markup,
Response
)
from flask_login import (
login_required,
login_user,
logout_user,
current_user
)
from packaging.version import Version
import gwadoc
import networkx as nx
from omw.utils.utils import fetch_sorted_meta_by_version
app = Flask(__name__)
app.config['REMEMBER_COOKIE_DURATION'] = datetime.timedelta(minutes=30)
app.config.from_object('config')
from .common_login import *
from .common_sql import *
from .omw_sql import *
from .wn_syntax import *
import omw.cli
login_manager.init_app(app)
@app.route("/login", methods=["GET", "POST"])
def login():
""" This login function checks if the username & password
match the admin.db; if the authentication is successful,
it passes the id of the user into login_user() """
if request.method == "POST" and \
"username" in request.form and \
"password" in request.form:
username = request.form["username"]
password = request.form["password"]
user = User.get(username)
# If we found a user based on username then compare that the submitted
# password matches the password in the database. The password is stored
# is a slated hash format, so you must hash the password before comparing it.
if user and hash_pass(password) == user.password:
login_user(user, remember=True)
# FIXME! Get this to work properly...
# return redirect(request.args.get("next") or url_for("index"))
return redirect(url_for("index"))
else:
flash(u"Invalid username, please try again.")
return render_template("login.html")
@app.route("/logout")
@login_required(role=0, group='open')
def logout():
logout_user()
return redirect(url_for("index"))
@app.before_request
def connect_dbs():
connect_admin()
connect_omw()
@app.teardown_appcontext
def teardown_dbs(exception):
db = g.pop('admin', None)
if db is not None:
db.close()
db = g.pop('omw', None)
if db is not None:
db.close()
def _get_cookie(name, default):
if name in request.cookies:
return request.cookies.get(name)
else:
return default
@app.route('/_thumb_up_id')
def thumb_up_id():
user = fetch_id_from_userid(current_user.id)
ili_id = request.args.get('ili_id', None)
rate = 1
r = rate_ili_id(ili_id, rate, user)
counts, up_who, down_who = f_rate_summary([ili_id])
html = """ <span style="color:green" title="Who voted up: {}">+{}</span><br>
<span style="color:red" title="Who voted down: {}">-{}</span>
""".format(up_who[int(ili_id)], counts[int(ili_id)]['up'],
down_who[int(ili_id)], counts[int(ili_id)]['down'])
return jsonify(result=html)
@app.route('/_thumb_down_id')
def thumb_down_id():
user = fetch_id_from_userid(current_user.id)
ili_id = request.args.get('ili_id', None)
rate = -1
r = rate_ili_id(ili_id, rate, user)
counts, up_who, down_who = f_rate_summary([ili_id])
html = """ <span style="color:green" title="Who voted up: {}">+{}</span><br>
<span style="color:red" title="Who voted down: {}">-{}</span>
""".format(up_who[int(ili_id)], counts[int(ili_id)]['up'],
down_who[int(ili_id)], counts[int(ili_id)]['down'])
return jsonify(result=html)
@app.route('/_comment_id')
def comment_id():
user = fetch_id_from_userid(current_user.id)
ili_id = request.args.get('ili_id', None)
comment = request.args.get('comment', None)
comment = str(Markup.escape(comment))
dbinsert = comment_ili_id(ili_id, comment, user)
return jsonify(result=dbinsert)
@app.route('/_detailed_id')
def detailed_id():
ili_id = request.args.get('ili_id', None)
rate_hist = fetch_rate_id([ili_id])
comm_hist = fetch_comment_id([ili_id])
users = fetch_allusers()
r_html = ""
for r, u, t in rate_hist[int(ili_id)]:
r_html += '{} ({} — {}): {} <br>'.format(
users[u]['full_name'], users[u]['userID'], t, r)
c_html = ""
for c, u, t in comm_hist[int(ili_id)]:
c_html += '{} ({} — {}): {} <br>'.format(
users[u]['full_name'], users[u]['userID'], t, c)
html = """
<td colspan="9">
<div style="width: 49%; float:left;">
<h6>Ratings</h6>
{}</div>
<div style="width: 49%; float:right;">
<h6>Comments</h6>
{}</div>
</td>""".format(r_html, c_html)
return jsonify(result=html)
@app.route('/_confirm_wn_upload')
def confirm_wn_upload_id():
"""
Ingest the uploaded wordnet into the database and return a report.
This happens when the user has confirmed they want to add a
validated wordnet.
"""
user = fetch_id_from_userid(current_user.id)
fn = request.args.get('fn', None)
report = ingest_wordnet(fn, user)
updateLabels()
return jsonify(result=report)
@app.route('/_add_new_project')
def add_new_project():
user = fetch_id_from_userid(current_user.id)
proj = request.args.get('proj_code', None)
proj = str(Markup.escape(proj))
if user and proj:
dbinsert = insert_new_project(proj, user)
return jsonify(result=dbinsert)
else:
return jsonify(result=False)
@app.route("/_load_lang_selector",methods=["GET"])
def omw_lang_selector():
selected_lang = int(_get_cookie('selected_lang', 1))
selected_lang2 = int(_get_cookie('selected_lang', 1))
lang_id, lang_code = fetch_langs()
html = '<select name="lang" style="font-size: 85%; width: 9em" required>'
for lid in lang_id.keys():
if selected_lang == lid:
html += """<option value="{}" selected>{}</option>
""".format(lid, lang_id[lid][1])
else:
html += """<option value="{}">{}</option>
""".format(lid, lang_id[lid][1])
html += '</select>'
html += '<select name="lang2" style="font-size: 85%; width: 9em" required>'
for lid in lang_id.keys():
if selected_lang2 == lid:
html += """<option value="{}" selected>{}</option>
""".format(lid, lang_id[lid][1])
else:
html += """<option value="{}">{}</option>
""".format(lid, lang_id[lid][1])
html += '</select>'
return jsonify(result=html)
@app.route('/_add_new_language')
def add_new_language():
user = fetch_id_from_userid(current_user.id)
bcp = request.args.get('bcp', None)
bcp = str(Markup.escape(bcp))
iso = request.args.get('iso', None)
iso = str(Markup.escape(iso))
name = request.args.get('name', None)
name = str(Markup.escape(name))
if bcp and name:
dbinsert = insert_new_language(bcp, iso, name, user)
return jsonify(result=dbinsert)
else:
return jsonify(result=False)
@app.route('/_load_proj_details')
def load_proj_details():
proj_id = request.args.get('proj', 0)
if proj_id:
proj_id = int(proj_id)
else:
proj_id = None
projs = fetch_proj()
srcs = fetch_src()
srcs_meta = fetch_src_meta()
html = str()
if proj_id:
i = 0
for src_id in srcs.keys():
if srcs[src_id][0] == projs[proj_id]:
i += 1
html += "<br><p><b>Source {}: {}-{}</b></p>".format(i,
projs[proj_id],srcs[src_id][1])
for attr, val in srcs_meta[src_id].items():
html += "<p style='margin-left: 40px'>"
html += attr + ": " + val
html += "</p>"
return jsonify(result=html)
@app.route('/_load_min_omw_concept/<ss>')
@app.route('/_load_min_omw_concept_ili/<ili_id>')
def min_omw_concepts(ss=None, ili_id=None):
if ili_id:
ss_ids = f_ss_id_by_ili_id(ili_id)
else:
ss_ids = [ss]
pos = fetch_pos()
langs_id, langs_code = fetch_langs()
ss, senses, defs, exes, links = fetch_ss_basic(ss_ids)
ssrels = fetch_ssrel()
selected_lang = int(_get_cookie('selected_lang', 1))
labels = fetch_labels( selected_lang, set(senses.keys()))
return jsonify(result=render_template('min_omw_concept.html',
pos = pos,
langs = langs_id,
senses=senses,
ss=ss,
links=links,
ssrels=ssrels,
defs=defs,
exes=exes,
labels=labels))
@app.route('/_load_min_omw_sense/<sID>')
def min_omw_sense(sID=None):
if sID:
s_id=int(sID)
langs_id, langs_code = fetch_langs()
pos = fetch_pos()
sense = fetch_sense(s_id)
forms=fetch_forms(sense[3])
selected_lang = int(_get_cookie('selected_lang', 1))
labels= fetch_labels(selected_lang,[sense[4]])
src_meta= fetch_src_meta()
src_sid=fetch_src_for_s_id([s_id])
sdefs = fetch_defs_by_sense([s_id])
if selected_lang in sdefs[s_id]:
sdef = sdefs[s_id][selected_lang] ## requested language
else:
sdef = sdefs[s_id][min(sdefs[s_id].keys())] ## a language
if not sdef:
sdef="no definition"
# return jsonify(result=render_template('omw_sense.html',
return jsonify(result=render_template('min_omw_sense.html',
s_id = s_id,
sdef=sdef,
sense = sense,
forms=forms,
langs = langs_id,
pos = pos,
labels = labels,
src_sid = src_sid,
src_meta = src_meta))
@app.route('/_report_val2', methods=['GET', 'POST'])
@login_required(role=0, group='open')
def report_val2():
filename = request.args.get('fn', None)
vr, filename, wn, wn_dtls = validateFile(current_user.id, filename)
return jsonify(result=render_template('validation-report.html',
vr=vr, wn=wn, wn_dtls=wn_dtls, filename=filename))
# validateFile()
# filename = request.args.get('fn', None)
# if filename:
# vr = val1_DTD(current_user, filename)
# if vr['dtd_val'] == True:
# html = "DTD PASSED"
# return jsonify(result=html)
# else:
# html = "DTD FAILED" + '<br>' + vr['dtd_val_errors']
# return jsonify(result=html)
# else:
# return jsonify(result="ERROR")
# return jsonify(result="TEST_VAL2")
@app.route('/', methods=['GET', 'POST'])
def index():
return render_template('index.html')
@app.route('/ili', methods=['GET', 'POST'])
def ili_welcome(name=None):
return render_template('ili_welcome.html')
@app.route('/omw', methods=['GET', 'POST'])
def omw_welcome(name=None):
projects = request.args.get('projects','current')
lang_id, lang_code = fetch_langs()
src_meta=fetch_src_meta()
### sort by language, project version (Newest first)
src_meta_sorted = fetch_sorted_meta_by_version(projects, src_meta, lang_id, lang_code)
return render_template('omw_welcome.html',
src_meta=src_meta_sorted,
lang_id=lang_id,
lang_code=lang_code,
licenses=licenses)
@app.route('/wordnet', methods=['GET', 'POST'])
def wordnet_license(name=None):
return render_template('wordnet_license.html')
@app.route('/omw_wns', methods=['GET', 'POST'])
def omw_wns(name=None):
projects = request.args.get('projects','current')
src_meta=fetch_src_meta()
stats = []
lang_id, lang_code = fetch_langs()
### sort by language name (1), id, version (FIXME -- reverse version)
src_sort=od()
keys=list(src_meta.keys())
keys.sort(key=lambda x: Version(src_meta[x]['version']),reverse=True) #Version
keys.sort(key=lambda x: src_meta[x]['id']) #id
keys.sort(key=lambda x: lang_id[lang_code['code'][src_meta[x]['language']]][1]) #Language
for k in keys:
if projects=='current': # only get the latest version
if src_meta[k]['version'] != max((src_meta[i]['version'] for i in src_meta
if src_meta[i]['id'] == src_meta[k]['id']),
key=lambda x: Version(x)):
continue
stats.append((src_meta[k], fetch_src_id_stats(k)))
return render_template('omw_wns.html',
stats=stats,
src_meta=src_meta,
lang_id=lang_id,
lang_code=lang_code,
licenses=licenses)
@app.route('/omw_stats', methods=['GET', 'POST'])
def omw_stats():
"""
statistics about wordnet as a big graph
"""
### get language
selected_lang = int(_get_cookie('selected_lang', 1))
### get hypernym graph
hypernym_dict=fetch_graph()
G = nx.DiGraph(hypernym_dict, name='OMW')
info = nx.info(G).splitlines()
cycles = list(nx.simple_cycles(G))
### get the synsets we need to label
sss = []
for c in cycles:
for ss in c:
sss.append(ss)
label = fetch_labels(selected_lang, sss)
return render_template('omw_stats.html',
info=info,
cycles=cycles,
label=label,
gwadoc=gwadoc)
@app.route("/useradmin",methods=["GET"])
@login_required(role=99, group='admin')
def useradmin():
users = fetch_allusers()
return render_template("useradmin.html", users=users)
@app.route("/langadmin",methods=["GET"])
@login_required(role=99, group='admin')
def langadmin():
lang_id, lang_code = fetch_langs()
return render_template("langadmin.html", langs=lang_id)
@app.route("/projectadmin",methods=["GET"])
@login_required(role=99, group='admin')
def projectadmin():
projs = fetch_proj()
srcs = fetch_src()
srcs_by_proj = dd(list)
for src_id in srcs: # should be in the right order, as versions must go up
srcs_by_proj[srcs[src_id][0]].append((srcs[src_id][1], src_id))
srcs_meta = fetch_src_meta()
return render_template("projectadmin.html",
projs=projs,
srcs_by_proj=srcs_by_proj,
srcs_meta=srcs_meta)
@app.route('/allconcepts', methods=['GET', 'POST'])
def allconcepts():
ili, ili_defs = fetch_ili()
rsumm, up_who, down_who = f_rate_summary(list(ili.keys()))
return render_template('concept-list.html', ili=ili,
rsumm=rsumm, up_who=up_who, down_who=down_who)
@app.route('/temporary', methods=['GET', 'POST'])
def temporary():
ili = fetch_ili_status(2)
rsumm, up_who, down_who = f_rate_summary(list(ili.keys()))
return render_template('concept-list.html', ili=ili,
rsumm=rsumm, up_who=up_who, down_who=down_who)
@app.route('/deprecated', methods=['GET', 'POST'])
def deprecated():
ili = fetch_ili_status(0)
rsumm, up_who, down_who = f_rate_summary(list(ili.keys()))
return render_template('concept-list.html', ili=ili,
rsumm=rsumm, up_who=up_who, down_who=down_who)
@app.route('/ili/concepts/<c>', methods=['GET', 'POST'])
def concepts_ili(c=None):
c = c.split(',')
ili, ili_defs = fetch_ili(c)
rsumm, up_who, down_who = f_rate_summary(list(ili.keys()))
return render_template('concept-list.html', ili=ili,
rsumm=rsumm, up_who=up_who, down_who=down_who)
@app.route('/ili/search', methods=['GET', 'POST'])
@app.route('/ili/search/<q>', methods=['GET', 'POST'])
def search_ili(q=None):
if q:
query = q
else:
query = request.form['query']
src_id = fetch_src()
kind_id = fetch_kind()
status_id = fetch_status()
ili = dict()
for c in query_omw("""SELECT * FROM ili WHERE def GLOB ?
""", [query]):
ili[c['id']] = (kind_id[c['kind_id']], c['def'],
src_id[c['origin_src_id']], c['src_key'],
status_id[c['status_id']], c['superseded_by_id'],
c['t'])
rsumm, up_who, down_who = f_rate_summary(list(ili.keys()))
return render_template('concept-list.html', ili=ili,
rsumm=rsumm, up_who=up_who, down_who=down_who)
@app.route('/upload', methods=['GET', 'POST'])
@login_required(role=0, group='open')
def upload():
return render_template('upload.html')
@app.route('/join', methods=['GET', 'POST'])
def join():
return render_template('join.html')
@app.route('/omw/uploads/<filename>')
def download_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'],
filename, as_attachment=True)
@app.route('/ili/validation-report', methods=['GET', 'POST'])
@login_required(role=0, group='open')
def validationReport():
vr, filename, wn, wn_dtls = validateFile(current_user.id)
return render_template('validation-report.html',
vr=vr, wn=wn, wn_dtls=wn_dtls,
filename=filename)
@app.route('/ili/report', methods=['GET', 'POST'])
@login_required(role=0, group='open')
def report():
inputfile = request.files.get('file')
inputurl = request.form.get('url')
if inputfile:
thing, ftype = inputfile, 'webfile'
elif inputurl:
thing, ftype = inputfile, 'url'
else:
thing, ftype = None, None
passed, filename = uploadFile(current_user.id, thing, ftype)
return render_template('report.html',
passed=passed,
filename=filename)
# return render_template('report.html')
@app.route('/omw/search', methods=['GET', 'POST'])
@app.route('/omw/search/<lang>/<q>', methods=['GET', 'POST'])
@app.route('/omw/search/<lang>,<lang2>/<q>', methods=['GET', 'POST'])
def search_omw(lang=None, lang2=None, q=None):
lang_dct, lang_code = fetch_langs()
if lang and q:
lang_id = int(lang_code['code'][lang])
if not lang2:
lang2 = lang
lang_id2 = int(lang_code['code'][lang2])
query = q
else:
lang_id = request.form['lang']
lang_id2 = request.form['lang2']
query = request.form['query']
query = query.strip()
sense = od()
lang_sense = dd(lambda: dd(list))
if query[0].isalpha(): ### search for inital character of both cases
if query[0].upper() != query[0].lower():
query = '['+query[0].upper() + query[0].lower()+']'+query[1:]
# GO FROM FORM TO SENSE, order results by pos
for s in query_omw("""
SELECT s.id as s_id, ss_id, wid, fid, lang_id, pos_id, lemma
FROM (SELECT w_id as wid, form.id as fid, lang_id, pos_id, lemma
FROM (SELECT id, lang_id, pos_id, lemma
FROM f WHERE lemma GLOB ? AND lang_id in (?,?)) as form
JOIN wf_link ON form.id = wf_link.f_id) word
JOIN s ON wid=w_id ORDER BY pos_id
""", (query, lang_id, lang_id2)):
sense[s['ss_id']] = [s['s_id'], s['wid'], s['fid'],
s['lang_id'], s['pos_id'], s['lemma']]
lang_sense[s['lang_id']][s['ss_id']] = [s['s_id'], s['wid'], s['fid'],
s['pos_id'], s['lemma']]
pos = fetch_pos()
ss, senses, defs, exes, links = fetch_ss_basic(sense.keys())
ili, ili_defs = fetch_ili([ss[k][0] for k in ss])
labels = fetch_labels(lang_id, set(senses.keys()))
projects = request.args.get('projects', 'current')
lang_idm, lang_codem = fetch_langs()
src_meta = fetch_src_meta()
src_meta_sorted = fetch_sorted_meta_by_version(projects, src_meta, lang_idm, lang_codem)
resp = make_response(render_template('omw_results.html',
query =query,
langsel = int(lang_id),
langsel2 = int(lang_id2),
pos = pos,
lang_dct = lang_dct,
sense=sense,
senses=senses,
ss=ss,
ili=ili,
links=links,
defs=defs,
exes=exes,
labels=labels,
src_meta=src_meta_sorted))
resp.set_cookie('selected_lang', str(lang_id))
resp.set_cookie('selected_lang2', str(lang_id2))
return resp
@app.route('/omw/core', methods=['GET', 'POST'])
def omw_core(): ### FIXME add lang as a paramater?
return render_template('omw_core.html')
@app.route('/omw/concepts/<ssID>', methods=['GET', 'POST'])
@app.route('/omw/concepts/ili/<iliID>', methods=['GET', 'POST'])
def concepts_omw(ssID=None, iliID=None):
if iliID:
ss_ids = f_ss_id_by_ili_id(iliID)
ili, ilidefs = fetch_ili([iliID])
else:
ss_ids = [ssID]
ili, ili_defs = dict(), dict()
pos = fetch_pos()
langs_id, langs_code = fetch_langs()
ss, senses, defs, exes, links = fetch_ss_basic(ss_ids)
if (not iliID) and int(ssID) in ss:
iliID = ss[int(ssID)][0]
ili, ilidefs = fetch_ili([iliID])
sss = list(ss.keys())
for s in links:
for l in links[s]:
sss.extend(links[s][l])
selected_lang = int(_get_cookie('selected_lang', 1))
selected_lang2 = int(_get_cookie('selected_lang2', 1))
labels = fetch_labels(selected_lang, set(sss))
ssrels = fetch_ssrel()
ss_srcs=fetch_src_for_ss_id(ss_ids)
src_meta=fetch_src_meta()
core_ss, core_ili = fetch_core()
s_ids = []
for x in senses:
for y in senses[x]:
for (s_id, lemma, freq) in senses[x][y]:
s_ids.append(s_id)
slinks = fetch_sense_links(s_ids)
## get the canonical form for each linked sense
srl = fetch_srel()
return render_template('omw_concept.html',
ssID=ssID,
iliID=iliID,
pos = pos,
langs = langs_id,
senses=senses,
slinks=slinks,
ss=ss,
links=links,
ssrels=ssrels,
defs=defs,
exes=exes,
ili=ili,
selected_lang = selected_lang,
selected_lang2 = selected_lang2,
labels=labels,
ss_srcs=ss_srcs,
src_meta=src_meta,
core=core_ss,
gwadoc=gwadoc,
srl=srl)
@app.route('/omw/senses/<sID>', methods=['GET', 'POST'])
def omw_sense(sID=None):
"""display a single sense (and its variants)"""
if sID:
langs_id, langs_code = fetch_langs()
pos = fetch_pos()
s_id=int(sID)
sense = fetch_sense(s_id)
slinks = fetch_sense_links([s_id])
forms=fetch_forms(sense[3])
selected_lang = int(_get_cookie('selected_lang',1))
labels= fetch_labels(selected_lang,[sense[4]])
src_meta= fetch_src_meta()
src_sid=fetch_src_for_s_id([s_id])
srel = fetch_srel()
## get the canonical form for each linked sense
slabel=fetch_sense_labels([x for v in slinks[int(s_id)].values() for x in v])
sdefs = fetch_defs_by_sense([s_id])
sdef = ''
if selected_lang in sdefs[s_id]:
sdef = sdefs[s_id][selected_lang] ## requested language
else:
sdef = sdefs[min(sdefs[s_id].keys())] ## a language
return render_template('omw_sense.html',
s_id = sID,
sdef = sdef,
sense = sense,
slinks = slinks[s_id],
srel = srel,
forms=forms,
langs = langs_id,
pos = pos,
labels = labels,
slabel = slabel,
src_sid = src_sid,
src_meta = src_meta,
gwadoc=gwadoc)
@app.route('/omw/src/<proj>/<ver>/<originalkey>', methods=['GET', 'POST'])
def src_omw(proj=None, ver=None, originalkey=None):
try:
src_id = f_src_id_by_proj_ver(proj, ver)
except:
src_id = None
if src_id:
ss = fetch_ss_id_by_src_orginalkey(src_id, originalkey)
else:
ss = None
return concepts_omw(ss)
@app.route('/omw/src/<proj>/<ver>', methods=['GET', 'POST'])
def omw_wn(proj=None,ver=None):
"""
Present a page describing a single wordnet
"""
### default to full = false (short version)
full = request.args.get('full') in ['true', 'True']
if proj and ver:
try:
src_id = f_src_id_by_proj_ver(proj, ver)
except:
src_id = None
srcs_meta = fetch_src_meta()
src_info = srcs_meta[src_id]
if full and src_id: ### give more stats
ssrel_stats=fetch_ssrel_stats(src_id)
srel_stats=fetch_srel_stats(src_id)
else:
ssrel_stats= {}
srel_stats= {}
pos_stats= fetch_src_id_pos_stats(src_id)
# get the pos names
pos = fetch_pos()
# get the examples for the POS
pos_ids= [ pos_stats[p]['id'] for p in pos_stats ]
pos_exe = fetch_pos_id_ss_mf(pos_ids, src_id = src_id)
### get the wordnet lang
langs_id, langs_code = fetch_langs()
wn_lang = src_info['language']
wn_lang_id = langs_code['code'][wn_lang]
# Get the labels for the synsets
sss = set()
for p in pos_exe:
for (ss_id, freq) in pos_exe[p]:
sss.add(ss_id)
label= fetch_labels(wn_lang_id,sss)
return render_template('omw_wn.html',
proj = proj,
ver = ver,
src_id=src_id,
src_info=src_info,
ssrel_stats=ssrel_stats,
srel_stats=srel_stats,
pos=pos,
pos_stats= pos_stats,
pos_exe=pos_exe,
label=label,
src_stats=fetch_src_id_stats(src_id),
licenses=licenses,
gwadoc=gwadoc)
@app.route('/omw/src-latex/<proj>/<ver>', methods=['GET', 'POST'])
def omw_wn_latex(proj=None, ver=None,full=False):
if proj and ver:
try:
src_id = f_src_id_by_proj_ver(proj, ver)
except:
src_id = None
srcs_meta = fetch_src_meta()
src_info = srcs_meta[src_id]
if full and src_id:
ssrel_stats=fetch_ssrel_stats(src_id)
else:
ssrel_stats= {}
return render_template('omw_wn_latex.html',
proj = proj,
ver = ver,
src_id=src_id,
src_info=src_info,
ssrel_stats=ssrel_stats,
pos_stats= fetch_src_id_pos_stats(src_id),
src_stats=fetch_src_id_stats(src_id))
@app.route('/cili.tsv')
def generate_cili_tsv():
tsv = fetch_cili_tsv()
return Response(tsv, mimetype='text/tab-separated-values')
@app.route('/core.tsv')
def generate_core_tsv():
"""output a list of the core ili concepts
ToDO: sort by frequency"""
tsv="""# ili_id\n"""
core_ss, core_ili = fetch_core()
for ili in core_ili:
tsv += "i{}\n".format(ili)
return Response(tsv, mimetype='text/tab-separated-values')
@app.context_processor
def utility_processor():
def scale_freq(f, maxfreq=1000):
if f > 0:
return 100 + 100 * log(f)/log(maxfreq)
else:
return 100
return dict(scale_freq=scale_freq)
@app.route('/omw/doc/if', methods=['GET', 'POST'])
def omw_doc_if(name=None):
return render_template('doc/interface.html')
@app.route('/omw/doc/search', methods=['GET', 'POST'])
def omw_doc_search(name=None):
return render_template('doc/search.html')
@app.route('/omw/doc/validator', methods=['GET', 'POST'])
def omw_doc_validator(name=None):
return render_template('doc/validator.html')
@app.route('/omw/doc/feedback', methods=['GET', 'POST'])
def omw_doc_feedback(name=None):
return render_template('doc/feedback.html')
@app.route('/omw/doc/glob', methods=['GET', 'POST'])
def omw_doc_glob(name=None):
return render_template('doc/glob.html')
@app.route('/omw/doc/contribute', methods=['GET', 'POST'])
def omw_doc_contribute(name=None):
return render_template('doc/contribute.html')
@app.route('/omw/doc/feedback-doc', methods=['GET', 'POST'])
def omw_doc_feedback_documentation(name=None):
return render_template('doc/feedback_documentation.html')
@app.route('/omw/doc/upload', methods=['GET', 'POST'])
def omw_doc_upload(name=None):
return render_template('doc/upload.html',
title="Upload How-To")
@app.route('/omw/doc/metadata', methods=['GET', 'POST'])
def omw_doc_metadata():
licenses = fetch_licenses()
return render_template('doc/metadata.html',
licenses=licenses)
@app.route('/omw/doc/lmf', methods=['GET', 'POST'])
def omw_doc_lmf():
return render_template('doc/lmf.html')
@app.route('/omw/doc/', methods=['GET', 'POST'])
@app.route('/omw/doc/wn', methods=['GET', 'POST'])
def omw_doc_wn(name=None):
return render_template('doc/wn.html',
gwadoc=gwadoc)
@app.route('/omw/doc/pos', methods=['GET', 'POST'])
def omw_doc_pos(name=None):
"""
Provide dynamic documentation for the POS
ToDo: maybe do per src and or per lang
"""
### get the interface language
selected_lang = int(_get_cookie('selected_lang',1))
# get the pos names
pos = fetch_pos()
# get the examples for the POS
pos_exe = fetch_pos_id_ss_mf(pos['id'].keys(),
num=5)
# Get the labels for the synsets
sss = set()
for p in pos_exe:
for (ss_id, freq) in pos_exe[p]:
sss.add(ss_id)
label= fetch_labels(selected_lang,sss)
pos_freq = fetch_pos_id_freq()
return render_template('doc/pos.html',
pos=pos,
pos_exe=pos_exe,
pos_freq=pos_freq,
label=label)
@app.route('/omw/doc/variants', methods=['GET', 'POST'])
def omw_doc_variants(name=None):
"""
Give some documentation on how variants are represented
"""
fma = fetch_form_meta_attr()
fmv = fetch_form_meta_val()
return render_template('doc/variants.html',
fma=fma,
fmv=fmv)
@app.route('/omw/doc/glossary', methods=['GET', 'POST'])
def omw_doc_glossary(name=None):
return render_template('doc/glossary.html',
gwadoc=gwadoc)
@app.route('/omw/doc/tsv2lmf', methods=['GET', 'POST'])
def omw_doc_tsv2lmf(name=None):
return render_template('doc/tsv2lmf.html',
gwadoc=gwadoc)
@app.route('/omw/doc/add-wn', methods=['GET', 'POST'])
def omw_doc_add_wn(name=None):
return render_template('doc/add-wn.html',
title="Add WN from the Command Line")
@app.route('/omw/doc/doc', methods=['GET', 'POST'])
def omw_doc_doc(name=None):
return render_template('doc/doc.html',
gwadoc=gwadoc)
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', threaded=True)
|
from datetime import timedelta
import logging
from django.utils.timezone import now
from django.core.management.base import BaseCommand
from TWLight.users.models import Editor
from TWLight.users.helpers.editor_data import (
editor_global_userinfo,
editor_valid,
editor_enough_edits,
editor_not_blocked,
editor_bundle_eligible,
editor_account_old_enough,
)
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Updates editor info and Bundle eligibility for currently-eligible Editors."
def add_arguments(self, parser):
"""
Adds command arguments.
"""
parser.add_argument(
"--datetime",
action="store",
help="ISO datetime used for calculating eligibility. Defaults to now. Currently only used for backdating command runs in tests.",
)
parser.add_argument(
"--global_userinfo",
action="store",
help="Specify Wikipedia global_userinfo data. Defaults to fetching live data. Currently only used for faking command runs in tests.",
)
parser.add_argument(
"--timedelta_days",
action="store",
help="Number of days used to define 'recent' edits. Defaults to 30. Currently only used for faking command runs in tests.",
)
parser.add_argument(
"--wp_username",
action="store",
help="Specify a single editor to update. Other arguments and filters still apply.",
)
def handle(self, *args, **options):
"""
Updates editor info and Bundle eligibility for currently-eligible Editors.
Parameters
----------
args
options
Returns
-------
None
"""
# Default behavior is to use current datetime for timestamps to check all editors.
now_or_datetime = now()
datetime_override = None
timedelta_days = 0
wp_username = None
editors = Editor.objects.all()
# This may be overridden so that values may be treated as if they were valid for an arbitrary datetime.
# This is also passed to the model method.
if options["datetime"]:
datetime_override = now_or_datetime.fromisoformat(options["datetime"])
now_or_datetime = datetime_override
# These are used to limit the set of editors updated by the command.
# Nothing is passed to the model method.
if options["timedelta_days"]:
timedelta_days = int(options["timedelta_days"])
# Get editors that haven't been updated in the specified time range, with an option to limit on wp_username.
if timedelta_days:
editors = editors.exclude(
editorlogs__timestamp__gt=now_or_datetime
- timedelta(days=timedelta_days),
)
# Optional wp_username filter.
if options["wp_username"]:
editors = editors.filter(wp_username=str(options["wp_username"]))
# Iterator reduces memory footprint for large querysets
for editor in editors.iterator():
# T296853: avoid stale editor data while looping through big sets.
editor.refresh_from_db()
# `global_userinfo` data may be overridden.
if options["global_userinfo"]:
global_userinfo = options["global_userinfo"]
editor.check_sub(global_userinfo["id"])
# Default behavior is to fetch live `global_userinfo`
else:
global_userinfo = editor_global_userinfo(editor.wp_sub)
if global_userinfo:
editor.update_editcount(global_userinfo["editcount"], datetime_override)
# Determine editor validity.
editor.wp_enough_edits = editor_enough_edits(editor.wp_editcount)
editor.wp_not_blocked = editor_not_blocked(global_userinfo["merged"])
# We will only check if the account is old enough if the value is False
# Accounts that are already old enough will never cease to be old enough
if not editor.wp_account_old_enough:
editor.wp_account_old_enough = editor_account_old_enough(
editor.wp_registered
)
editor.wp_valid = editor_valid(
editor.wp_enough_edits,
editor.wp_account_old_enough,
# editor.wp_not_blocked can only be rechecked on login, so we're going with the existing value.
editor.wp_not_blocked,
editor.ignore_wp_blocks,
)
# Determine Bundle eligibility.
editor.wp_bundle_eligible = editor_bundle_eligible(editor)
# Save editor.
editor.save()
# Prune EditorLogs, with daily_prune_range set to only check the previous day to improve performance.
editor.prune_editcount(
current_datetime=datetime_override, daily_prune_range=2
)
# Update bundle authorizations.
editor.update_bundle_authorization()
|
import os
import shutil
from jinja2 import Environment, FileSystemLoader
from webassets import Environment as AssetsEnvironment
from webassets.ext.jinja2 import AssetsExtension
from webassets.loaders import YAMLLoader
class TemplateBuilder(object):
def __init__(self, path, output,
static_path='static', static_url='static',
asset_config='config.yml'):
self.path = path
self.output = output
self.output_path = os.path.join(path, output)
self.env = Environment(loader=FileSystemLoader(path),
extensions=[AssetsExtension])
try:
config_path = os.path.join(self.path, asset_config)
asset_config = YAMLLoader(config_path)
self.assets_env = asset_config.load_environment()
except IOError:
self.assets_env = AssetsEnvironment()
if 'directory' not in self.assets_env.config:
self.assets_env.directory = self.output_path
if 'url' not in self.assets_env.config:
self.assets_env.url = static_url
self.assets_env.load_path = [self.path]
self.env.assets_environment = self.assets_env
def build_template(self, template, context={}):
tmpl = self.env.get_template(template)
dump_path = os.path.join(self.output_path, template)
tmpl.stream().dump(dump_path)
def list_files(self):
templates, other = set(), set()
if getattr(self.assets_env, '_named_bundles', None):
bundles = [fp for name, bundle in self.assets_env._named_bundles.iteritems()
for fp in bundle.contents]
else:
bundles = []
for dirpath, dirnames, filenames in os.walk(self.path):
for filename in filenames:
filepath = os.path.join(dirpath, filename) \
[len(self.path):].strip(os.path.sep).replace(os.path.sep, '/')
if filepath[:2] == './':
filepath = filepath[2:]
if self.output in filepath or filepath in bundles:
continue
elif '.html' in filepath:
templates.add(filepath)
else:
other.add(filepath)
return sorted(templates), sorted(bundles), sorted(other)
class SiteBuilder(object):
def __init__(self, path, output='public', tmpl_builder_class=TemplateBuilder, **kwargs):
self.path = path
self.output_path = os.path.join(path, output)
self.tmpl_builder = tmpl_builder_class(self.path, output, **kwargs)
def build(self):
if not os.path.exists(self.output_path):
os.mkdir(self.output_path)
templates, bundles, others = self.tmpl_builder.list_files()
for template in templates:
# XXX: for now we are not handling contexts
self.tmpl_builder.build_template(template)
for other in others:
dirname = os.path.join(self.output_path, os.path.dirname(other))
if not os.path.exists(dirname):
os.makedirs(dirname)
shutil.copyfile(os.path.join(self.path, other), os.path.join(self.output_path, other))
|
import string
from time import strftime
def append_new_end(word,user_end):
space_count = word.count('~') # count number of placeholders
if space_count > 0:
total_word = word[:-space_count] + user_end # supplied from raw input
else:
total_word = word
return total_word
def create_updated_array(text_complete,text_new,number_sentences):
sentence = 0
while sentence < number_sentences:
word = 0
print
print text_new[sentence]
print
while word < len(text_new[sentence]):
user_end = raw_input(text_new[sentence][word].encode('utf-8') + ' ')
total_word = append_new_end(text_new[sentence][word],user_end)
total_word.encode('utf-8')
text_complete[sentence].append(total_word)
word += 1
sentence += 1
return text_complete
def print_output(text_complete,text_orig,number_sentences):
sentence = 0
while sentence < number_sentences:
contained = [x for x in text_complete[sentence] if x not in text_orig[sentence]]
print
print "Original Sentence: " ,
write_output(strftime("%Y-%m-%d %H:%M:%S"))
write_output('\n')
write_output("Original Sentence: ")
write_output('\n')
for each in text_orig[sentence]:
print each,
write_output(each.encode('utf-8') + ' '),
print
write_output('\n')
print
write_output("User Completed Sentence: ")
write_output('\n')
print "User completed text: " ,
for each in text_complete[sentence]:
print each,
write_output(each.encode('utf-8') + ' '),
print
print
write_output('\n')
write_output("User Mistakes: ")
write_output('\n')
print "User Mistakes: "
for each in contained:
print each
write_output(each.encode('utf-8') + ' '),
print
print
sentence += 1
def write_output(input_text):
with open('output.txt', 'a') as f:
f.write(input_text)
|
__revision__ = "test/Platform.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', """
env = Environment()
Platform('cygwin')(env)
print "'%s'" % env['PROGSUFFIX']
assert env['SHELL'] == 'sh'
Platform('os2')(env)
print "'%s'" % env['PROGSUFFIX']
env.Platform('posix')
print "'%s'" % env['PROGSUFFIX']
Platform('win32')(env)
print "'%s'" % env['PROGSUFFIX']
SConscript('SConscript')
""")
test.write('SConscript', """
env = Environment()
Platform('cygwin')(env)
print "'%s'" % env['LIBSUFFIX']
Platform('os2')(env)
print "'%s'" % env['LIBSUFFIX']
env.Platform('posix')
print "'%s'" % env['LIBSUFFIX']
Platform('win32')(env)
print "'%s'" % env['LIBSUFFIX']
""")
expect = test.wrap_stdout(read_str = """'.exe'
'.exe'
''
'.exe'
'.a'
'.lib'
'.a'
'.lib'
""", build_str = "scons: `.' is up to date.\n")
test.run(arguments = ".", stdout = expect)
test.pass_test()
|
"""Run Monte Carlo simulations."""
from joblib import Parallel, delayed
from frbpoppy import Survey, CosmicPopulation, SurveyPopulation, pprint
from datetime import datetime
from copy import deepcopy
from glob import glob
import frbpoppy.paths
import os
import numpy as np
import pandas as pd
from tqdm import tqdm
import uuid
POP_SIZE = 5e7
class SimulationOverview:
"""Given values, return uid
Load from file, or make."""
def __init__(self, load_csv=True):
p = frbpoppy.paths.populations()
self.filename = f'{p}mc/simluation_overview.csv'
if load_csv and os.path.isfile(self.filename):
self.load()
else:
self.df = pd.DataFrame()
def load(self):
self.df = pd.read_csv(self.filename, index_col=0)
self.df = self.df.loc[:, ~self.df.columns.str.contains('^Unnamed')]
def save(self):
self.df.to_csv(self.filename)
def append(self, df):
self.df = self.df.append(df, ignore_index=True)
def map_surveys(self, ix, names):
mapping = dict(zip(ix, names))
self.df.replace({"survey": mapping}, inplace=True)
class MonteCarlo:
def __init__(self, pop_size=1e2, load_csv=True):
self.survey_names = ['parkes-htru',
'chime-frb',
'askap-incoh',
'wsrt-apertif']
self.load_csv = load_csv
self.pop_size = pop_size
self.survey_ix = [i for i in range(len(self.survey_names))]
self.surveys = self.set_up_surveys()
self.so = SimulationOverview(load_csv=self.load_csv)
self.set_up_dirs()
def set_up_surveys(self):
"""Set up surveys."""
surveys = []
for name in self.survey_names:
survey = Survey(name=name)
survey.set_beam(model='airy', n_sidelobes=1)
if name in ('chime-frb', 'wsrt-apertif', 'parkes-htru'):
survey.set_beam(model=name)
surveys.append(survey)
return surveys
def set_up_dirs(self, run=np.nan):
"""Create subdirectory for saving populations.
Returns True if directory had to be set up."""
f = f'{frbpoppy.paths.populations()}mc/'
if not os.path.isdir(f):
os.mkdir(f)
return True
if not np.isnan(run):
f = f'{frbpoppy.paths.populations()}mc/run_{run}/'
if not os.path.isdir(f):
os.mkdir(f)
return True
return False
def gen_par_set_1(self,
parallel=True,
lum_min=np.nan,
lum_max=np.nan,
w_mean=np.nan,
w_std=np.nan,
dm_igm_slope=np.nan,
dm_host=np.nan,
run=0):
alphas = np.linspace(-2.5, -1, 11)
sis = np.linspace(-2, 2, 11)
lis = np.linspace(-2, 0, 11)
# Put all options into a dataframe
if 'run' in self.so.df:
self.so.df = self.so.df[self.so.df.run != run]
opt = np.meshgrid(alphas, sis, lis, self.survey_ix)
options = np.array(opt).T.reshape(-1, 4)
df = pd.DataFrame(options, columns=('alpha', 'si', 'li', 'survey'))
df['run'] = run
df['par_set'] = 1
df['uuid'] = [uuid.uuid4() for _ in range(len(df.index))]
df['date'] = datetime.today()
self.so.append(df)
self.so.map_surveys(self.survey_ix, self.survey_names)
self.so.save()
# Remove previous par_set of the same number
if not self.set_up_dirs(run=run):
fs = f'{frbpoppy.paths.populations()}mc/run_{run}/*'
for f in glob(fs):
os.remove(f)
def iter_alpha(i):
alpha = alphas[i]
pop = CosmicPopulation.complex(self.pop_size)
pop.set_dist(model='vol_co', z_max=1.0, alpha=alpha)
pop.set_lum(model='constant', value=1)
if not np.isnan(w_mean):
pop.set_w(model='lognormal', mean=w_mean, std=w_std)
if not np.isnan(dm_igm_slope):
pop.set_dm_igm(model='ioka', slope=dm_igm_slope)
pop.set_dm_host(model='constant', value=dm_host)
pop.generate()
for si in sis:
pop.set_si(model='constant', value=si)
pop.gen_si()
for li in lis:
pop.set_lum(model='powerlaw',
low=1e40,
high=1e45, power=li)
if not np.isnan(lum_min):
pop.set_lum(model='powerlaw', low=lum_min,
high=lum_max, index=li)
pop.gen_lum()
for survey in self.surveys:
surv_pop = SurveyPopulation(pop, survey)
# Get unique identifier
mask = (self.so.df.par_set == 1)
mask &= (self.so.df.run == run)
mask &= (self.so.df.alpha == alpha)
mask &= (self.so.df.si == si)
mask &= (self.so.df.li == li)
mask &= (self.so.df.survey == survey.name)
uuid = self.so.df[mask].uuid.iloc[0]
surv_pop.name = f'mc/run_{run}/{uuid}'
surv_pop.save()
if parallel:
n_cpu = min([3, os.cpu_count() - 1])
pprint(f'{os.cpu_count()} CPUs available')
r = range(len(alphas))
Parallel(n_jobs=n_cpu)(delayed(iter_alpha)(i) for i in tqdm(r))
else:
[iter_alpha(i) for i in tqdm(range(len(alphas)))]
def gen_par_set_2(self,
parallel=True,
alpha=-1.5,
si=0,
w_mean=np.nan,
w_std=np.nan,
dm_igm_slope=np.nan,
dm_host=np.nan,
run=np.nan):
lis = np.linspace(-1.5, 0, 11)
lum_mins = 10**np.linspace(38, 46, 11)
lum_maxs = 10**np.linspace(38, 46, 11)
# Put all options into a dataframe
self.so.df = self.so.df[self.so.df.run != run]
opt = np.meshgrid(lis, lum_mins, lum_maxs, self.survey_ix)
options = np.array(opt).T.reshape(-1, 4)
cols = ('li', 'lum_min', 'lum_max', 'survey')
df = pd.DataFrame(options, columns=cols)
df['par_set'] = 2
df['run'] = run
df['uuid'] = [uuid.uuid4() for _ in range(len(df.index))]
df['date'] = datetime.today()
df = df[~(df.lum_max < df.lum_min)]
self.so.append(df)
self.so.map_surveys(self.survey_ix, self.survey_names)
self.so.save()
# Remove previous par_set of the same number
if not self.set_up_dirs(run=run):
fs = f'{frbpoppy.paths.populations()}mc/run_{run}/*'
for f in glob(fs):
os.remove(f)
pop = CosmicPopulation.complex(self.pop_size)
if not np.isnan(alpha):
pop.set_dist(model='vol_co', z_max=1.0, alpha=alpha)
pop.set_si(model='constant', value=si)
pop.set_lum(model='constant', value=1)
if not np.isnan(w_mean):
pop.set_w(model='lognormal', mean=w_mean, std=w_std)
if not np.isnan(dm_igm_slope):
pop.set_dm_igm(model='ioka', slope=dm_igm_slope)
pop.set_dm_host(model='constant', value=dm_host)
pop.generate()
def adapt_pop(e):
li, lum_min, lum_max = e
if lum_max < lum_min:
return
t_pop = deepcopy(pop)
t_pop.set_lum(model='powerlaw', low=lum_min, high=lum_max,
power=li)
t_pop.gen_lum()
for survey in self.surveys:
surv_pop = SurveyPopulation(t_pop, survey)
# Get unique identifier
mask = (self.so.df.par_set == 2)
mask &= (self.so.df.run == run)
mask &= (self.so.df.li == li)
mask &= (self.so.df.lum_min == lum_min)
mask &= (self.so.df.lum_max == lum_max)
mask &= (self.so.df.survey == survey.name)
uuid = self.so.df[mask].uuid.iloc[0]
surv_pop.name = f'mc/run_{run}/{uuid}'
surv_pop.save()
n_cpu = min([3, os.cpu_count() - 1])
pprint(f'{os.cpu_count()} CPUs available')
mg = np.meshgrid(lis, lum_mins, lum_maxs)
loop = np.array(mg).T.reshape(-1, 3)
if parallel:
Parallel(n_jobs=n_cpu)(delayed(adapt_pop)(e) for e in tqdm(loop))
else:
[adapt_pop(e) for e in tqdm(loop)]
def gen_par_set_3(self,
parallel=True,
alpha=-1.5,
si=0,
li=-1,
lum_min=1e40,
lum_max=1e40,
dm_igm_slope=np.nan,
dm_host=np.nan,
run=np.nan):
w_means = 10**np.linspace(-3, 1, 11)
w_stds = np.linspace(0, 3, 11)
# Put all options into a dataframe
self.so.df = self.so.df[self.so.df.run != run]
opt = np.meshgrid(w_means, w_stds, self.survey_ix)
options = np.array(opt).T.reshape(-1, 3)
cols = ('w_mean', 'w_std', 'survey')
df = pd.DataFrame(options, columns=cols)
df['run'] = run
df['par_set'] = 3
df['uuid'] = [uuid.uuid4() for _ in range(len(df.index))]
df['date'] = datetime.today()
self.so.append(df)
self.so.map_surveys(self.survey_ix, self.survey_names)
self.so.save()
# Remove previous par_set of the same number
if not self.set_up_dirs(run=run):
fs = f'{frbpoppy.paths.populations()}mc/run_{run}/*'
for f in glob(fs):
os.remove(f)
pop = CosmicPopulation.complex(self.pop_size)
if not np.isnan(alpha):
pop.set_dist(model='vol_co', z_max=1.0, alpha=alpha)
pop.set_si(model='constant', value=si)
if not np.isnan(lum_min):
pop.set_lum(model='powerlaw', low=lum_min, high=lum_max, index=li)
if not np.isnan(dm_igm_slope):
pop.set_dm_igm(model='ioka', slope=dm_igm_slope)
pop.set_dm_host(model='constant', value=dm_host)
pop.generate()
def adapt_pop(e):
w_mean, w_std = e
t_pop = deepcopy(pop)
t_pop.set_w(model='lognormal', mean=w_mean, std=w_std)
t_pop.gen_w()
for survey in self.surveys:
surv_pop = SurveyPopulation(t_pop, survey)
# Get unique identifier
mask = (self.so.df.par_set == 3)
mask &= (self.so.df.run == run)
mask &= (self.so.df.run == run)
mask &= (self.so.df.w_mean == w_mean)
mask &= (self.so.df.w_std == w_std)
mask &= (self.so.df.survey == survey.name)
uuid = self.so.df[mask].uuid.iloc[0]
surv_pop.name = f'mc/run_{run}/{uuid}'
surv_pop.save()
n_cpu = min([3, os.cpu_count() - 1])
pprint(f'{os.cpu_count()} CPUs available')
mg = np.meshgrid(w_means, w_stds)
loop = np.array(mg).T.reshape(-1, 2)
if parallel:
Parallel(n_jobs=n_cpu)(delayed(adapt_pop)(e) for e in tqdm(loop))
else:
[adapt_pop(e) for e in tqdm(loop)]
def gen_par_set_4(self,
parallel=True,
alpha=-1.5,
si=0,
li=-1,
lum_min=1e40,
lum_max=1e40,
w_mean=np.nan,
w_std=np.nan,
run=np.nan):
dm_igm_slopes = np.linspace(800, 1200, 11)
dm_hosts = np.linspace(0, 500, 11)
# Put all options into a dataframe
self.so.df = self.so.df[self.so.df.run != run]
opt = np.meshgrid(dm_igm_slopes, dm_hosts, self.survey_ix)
options = np.array(opt).T.reshape(-1, 3)
cols = ('dm_igm_slope', 'dm_host', 'survey')
df = pd.DataFrame(options, columns=cols)
df['run'] = run
df['par_set'] = 4
df['uuid'] = [uuid.uuid4() for _ in range(len(df.index))]
df['date'] = datetime.today()
self.so.append(df)
self.so.map_surveys(self.survey_ix, self.survey_names)
self.so.save()
# Remove previous par_set of the same number
if not self.set_up_dirs(run=run):
fs = f'{frbpoppy.paths.populations()}mc/run_{run}/*'
for f in glob(fs):
os.remove(f)
pop = CosmicPopulation.complex(self.pop_size)
if not np.isnan(alpha):
pop.set_dist(model='vol_co', z_max=1.0, alpha=alpha)
pop.set_si(model='constant', value=si)
if not np.isnan(lum_min):
pop.set_lum(model='powerlaw', low=lum_min, high=lum_max, index=li)
if not np.isnan(w_mean):
pop.set_w(model='lognormal', mean=w_mean, std=w_std)
pop.generate()
def adapt_pop(e):
dm_igm_slope, dm_host = e
t_pop = deepcopy(pop)
t_pop.set_dm_igm(model='ioka', slope=dm_igm_slope)
t_pop.gen_dm_igm()
t_pop.set_dm_host(model='constant', value=dm_host)
t_pop.gen_dm_host()
t_pop.frbs.dm = t_pop.frbs.dm_mw + t_pop.frbs.dm_igm
t_pop.frbs.dm += t_pop.frbs.dm_host
for survey in self.surveys:
surv_pop = SurveyPopulation(t_pop, survey)
# Get unique identifier
mask = (self.so.df.par_set == 4)
mask &= (self.so.df.run == run)
mask &= (self.so.df.dm_igm_slope == dm_igm_slope)
mask &= (self.so.df.dm_host == dm_host)
mask &= (self.so.df.survey == survey.name)
uuid = self.so.df[mask].uuid.iloc[0]
surv_pop.name = f'mc/run_{run}/{uuid}'
surv_pop.save()
n_cpu = min([4, os.cpu_count() - 1])
pprint(f'{os.cpu_count()} CPUs available')
mg = np.meshgrid(dm_igm_slopes, dm_hosts)
loop = np.array(mg).T.reshape(-1, 2)
if parallel:
Parallel(n_jobs=n_cpu)(delayed(adapt_pop)(e) for e in tqdm(loop))
else:
[adapt_pop(e) for e in tqdm(loop)]
|
try: from setuptools import setup
except: from distutils.core import setup
setup( long_description=open("README.rst").read(),
name="""tinypath""",
license="""MIT""",
author="""Karim Bahgat""",
author_email="""karim.bahgat.norway@gmail.com""",
py_modules=['tinypath'],
url="""http://github.com/karimbahgat/tinypath""",
version="""0.1.1""",
keywords="""paths files folders organizing""",
classifiers=['License :: OSI Approved', 'Programming Language :: Python', 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Intended Audience :: Science/Research', 'Intended Audience :: End Users/Desktop'],
description="""Tinypath is a tiny object-oriented file path module that provides only the most crucial and commonly needed functionality, making it easy to learn and efficient to use.""",
)
|
from south.db import db
from django.db import models
from askmeanything.models import *
class Migration:
def forwards(self, orm):
"Write your forwards migration here"
def backwards(self, orm):
"Write your backwards migration here"
models = {
'askmeanything.poll': {
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polls'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'open': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'askmeanything.response': {
'answer': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'poll': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'responses'", 'to': "orm['askmeanything.Poll']"}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['askmeanything']
|
import os
from shutil import copyfile
from photomanip.metadata import ImageExif, SetExifTool
from nose import tools
ORIGINAL_IMAGE_FILENAME = 'photomanip/tests/turd_ferguson.jpeg'
TEST_IMAGE_FILENAME = 'photomanip/tests/image_exif_test.jpg'
ORIGINAL_PHOTO_FILENAME = 'photomanip/tests/test_photo_0.jpg'
TEST_PHOTO_01_FILENAME = 'photomanip/tests/image_exposure_test_01.jpg'
TEST_PHOTO_02_FILENAME = 'photomanip/tests/image_exposure_test_02.jpg'
class TestImageExif:
@classmethod
def setup_class(cls):
cls.image_exif = ImageExif()
copyfile(ORIGINAL_IMAGE_FILENAME, TEST_IMAGE_FILENAME)
copyfile(ORIGINAL_PHOTO_FILENAME, TEST_PHOTO_01_FILENAME)
copyfile(ORIGINAL_PHOTO_FILENAME, TEST_PHOTO_02_FILENAME)
@classmethod
def teardown_class(cls):
os.remove(TEST_IMAGE_FILENAME)
os.remove(TEST_PHOTO_01_FILENAME)
os.remove(TEST_PHOTO_02_FILENAME)
def get_stored_tags(self, tag_list, filename):
with SetExifTool() as et:
stored_tags = et.get_tags(tag_list, filename)
return stored_tags
def test_imageexif_generate_tag_list(self):
get_list = self.image_exif.get_list
# test get list
tag_list = self.image_exif._generate_tag_list(get_list)
tools.eq_(set(tag_list), set([
'EXIF:DateTimeOriginal',
'File:ImageHeight',
'IPTC:Keywords',
'EXIF:ExposureTime',
'File:ImageWidth']))
# test set list
tag_list = self.image_exif._generate_tag_list(get_list, True)
tools.eq_(tag_list, {
'date_created': 'EXIF:DateTimeOriginal={}',
'exposure_time': 'EXIF:ExposureTime={}',
'image_height': 'File:ImageHeight={}',
'image_width': 'File:ImageWidth={}',
'keywords': 'IPTC:Keywords={}'})
def test_set_image_metadata(self):
output_meta = {
"name": "Terd Ferguson",
"keywords": "one, two, three",
"caption": "suck it, trebeck",
}
result = self.image_exif.set_image_metadata(TEST_IMAGE_FILENAME,
output_meta)
tools.eq_(result, '1 image files updated\n')
check_tags = self.image_exif._generate_tag_list(output_meta.keys())
stored_tags = self.get_stored_tags(check_tags, TEST_IMAGE_FILENAME)
# now check if the metadata matches
for key, val in output_meta.items():
mapped_key = self.image_exif.metadata_map[key]
tools.eq_(val, stored_tags[mapped_key])
def test_calculate_exposure_time(self):
tag_list = self.image_exif._generate_tag_list(['exposure_time'])
stored_tags = self.get_stored_tags(tag_list, TEST_PHOTO_01_FILENAME)
tools.eq_(stored_tags['EXIF:ExposureTime'], 0.001333333333)
def test_get_tags_containing(self):
tag_list = self.image_exif._generate_tag_list(['keywords'])
stored_tags = self.get_stored_tags(tag_list, TEST_PHOTO_01_FILENAME)
result = self.image_exif.get_tags_containing(
stored_tags['IPTC:Keywords'], 'faceit365')
tools.eq_(result, 'faceit365:date=20190308')
def test_get_metadata_batch(self):
fname_list = [TEST_PHOTO_01_FILENAME, TEST_PHOTO_02_FILENAME]
meta_list = self.image_exif.get_metadata_batch(fname_list)
meta_list[0].pop('SourceFile')
meta_list[1].pop('SourceFile')
tools.eq_(meta_list[0], meta_list[1])
|
__author__ = 'jhala'
import types
import os.path, time
import json
import logging
import logging.config
logging.config.fileConfig('logging.conf')
logger = logging.getLogger(__name__)
import re
appInfo='appinfo.json'
''' Helper Functions '''
''' get the file as an array of arrays ( header + rows and columns) '''
def fileInfo(fil):
fileArr=[]
for i in open(fil):
fileArr.append(i.strip().split(","))
return fileArr
''' Return the header as an array '''
def getHeader(fileArr):
for rowOne in fileArr:
return rowOne
def fileLastTouchedTime(fileName):
mtim= int(os.path.getmtime(fileName))
ctim= int(os.path.getctime(fileName))
tims = [ mtim, ctim]
tims.sort()
return tims[len(tims)-1]
def getImageLocation():
f=open(appInfo,'r')
loc=json.load(f)
return loc['imageLocation']
def getImageDataLocation():
f=open(appInfo,'r')
loc=json.load(f)
return loc['imageData']
def getMatLabFeatureExtractScript():
f=open(appInfo,'r')
loc=json.load(f)
return loc['matlabFeatureExtractScript']
def getMatLabSemanticElementsScript():
f=open(appInfo,'r')
loc=json.load(f)
return loc['matlabSemanticElementsScript']
def getMatlabSemanticElementsOutputFile():
f=open(appInfo,'r')
loc=json.load(f)
return loc['matlabSemanticElementsOutputFile']
def removeMatlabSemanticElementsOutputFile():
f=getMatlabSemanticElementsOutputFile()
if os.path.exists(f) and os.path.isfile(f):
os.remove(f)
def getMatlabFeatureOutputFile():
f=open(appInfo,'r')
loc=json.load(f)
return loc['matlabFeatureOutputFile']
def getTestImageName():
f=open(appInfo,'r')
loc=json.load(f)
return loc['testImage']
def removeMatlabFeatureOutputFile():
f=getMatlabFeatureOutputFile()
if os.path.exists(f) and os.path.isfile(f):
os.remove(f)
def checkFileNameExists(filName=str):
return os.path.exists(filName) and os.path.isfile(filName)
def getMainImageFileList():
fileList=[]
epoch=time.mktime(time.strptime('1970','%Y'))
for root, dirs, files in os.walk(getImageLocation()):
#print root
#print dirs
for fil in files:
thisFileName=os.path.join(root, fil)
dataFileExists=False
imageFileNewerThanDataFile=False
dataFileRequiresUpdate=False
if isMainImageFile(thisFileName) and checkFileNameExists(thisFileName):
mainImageLastTouched=fileLastTouchedTime(thisFileName)
expectedDataFileName = os.path.join(getImageDataLocation(), os.path.basename(root)+'_'+fil+'.json')
if checkFileNameExists(expectedDataFileName ):
dataFileExists=True
dataFileLastTouched=fileLastTouchedTime(expectedDataFileName)
else:
dataFileExists=False
dataFileLastTouched=epoch
if dataFileExists and ( mainImageLastTouched > dataFileLastTouched) :
dataFileRequiresUpdate=True
if not dataFileExists:
dataFileRequiresUpdate=True
lcImageExists=False
lcImageName = getLCImageName(thisFileName)
if lcImageName != None:
lcImageExists=True
fileList.append({ 'lcImageExists': lcImageExists , 'lcImageName' : lcImageName, 'dataFileRequiresUpdate' : dataFileRequiresUpdate, 'imageFile' : str(thisFileName), 'dataFile' : expectedDataFileName, 'imageLastTouched': mainImageLastTouched, 'dataLastTouched': dataFileLastTouched, 'dataFileExists' : dataFileExists} )
return fileList
def isMainImageFile(fileName):
if re.search('.jpg$',fileName, flags=re.IGNORECASE) and not re.search('LC.jpg$',fileName, flags=re.IGNORECASE):
return True
else:
return False
def getLCImageName(imageFileName):
r=re.match("(.*)(.jpg)", imageFileName, flags=re.IGNORECASE)
if not r:
logger.error("Invalid image file name given" + imageFileName)
return None
else:
lcImageName = r.group(1) + "LC"+ r.group(2)
if checkFileNameExists(lcImageName):
return lcImageName
else:
logger.error('Image file does not exist: ' +lcImageName)
return None
|
from django.contrib.sites.models import Site
from django.utils._os import safe_join
from django.views.generic import TemplateView
from skin.conf import settings
from skin.template.loaders.util import get_site_skin
class TemplateSkinView(TemplateView):
"""
A view that extends Djangos base TemplateView to allow you to set up skins.
"""
skin_name = None
skin_path = None
def get_skin_name(self):
if self.skin_name is None:
return settings.SKIN_NAME
else:
return self.skin_name
def get_skin(self):
return get_site_skin(site=Site.objects.get_current(), name=self.get_skin_name())
def get_skin_path(self):
if self.skin_path is not None:
return self.skin_path
skin = self.get_skin()
if skin is not None:
return skin.path
else:
return None
def get_template_names(self):
template_names = super(TemplateSkinView, self).get_template_names()
skin_path = self.get_skin_path()
skin_template_names = []
if skin_path is not None:
for template_name in template_names:
skin_template_names.append(safe_join(skin_path, template_name))
return skin_template_names + template_names
|
from direct.directnotify.DirectNotifyGlobal import *
from otp.ai.AIBaseGlobal import *
from toontown.building import DistributedBuildingAI
from toontown.building import GagshopBuildingAI
from toontown.building import HQBuildingAI
from toontown.building import KartShopBuildingAI
from toontown.building import PetshopBuildingAI
from toontown.hood import ZoneUtil
class DistributedBuildingMgrAI:
notify = directNotify.newCategory('DistributedBuildingMgrAI')
def __init__(self, air, branchId, dnaStore, trophyMgr):
self.air = air
self.branchId = branchId
self.canonicalBranchId = ZoneUtil.getCanonicalZoneId(self.branchId)
self.dnaStore = dnaStore
self.trophyMgr = trophyMgr
self.__buildings = {}
self.findAllLandmarkBuildings()
def cleanup(self):
for building in self.__buildings.values():
building.cleanup()
self.__buildings = {}
def isValidBlockNumber(self, blockNumber):
return blockNumber in self.__buildings
def isSuitBlock(self, blockNumber):
if not self.isValidBlockNumber(blockNumber):
return False
return self.__buildings[blockNumber].isSuitBlock()
def getSuitBlocks(self):
blocks = []
for blockNumber, building in self.__buildings.items():
if building.isSuitBlock():
blocks.append(blockNumber)
return blocks
def getEstablishedSuitBlocks(self):
blocks = []
for blockNumber, building in self.__buildings.items():
if building.isEstablishedSuitBlock():
blocks.append(blockNumber)
return blocks
def getToonBlocks(self):
blocks = []
for blockNumber, building in self.__buildings.items():
if isinstance(building, HQBuildingAI.HQBuildingAI):
continue
if isinstance(building, GagshopBuildingAI.GagshopBuildingAI):
continue
if isinstance(building, PetshopBuildingAI.PetshopBuildingAI):
continue
if isinstance(building, KartShopBuildingAI.KartShopBuildingAI):
continue
if not building.isSuitBlock():
blocks.append(blockNumber)
return blocks
def getBuildings(self):
return self.__buildings.values()
def getFrontDoorPoint(self, blockNumber):
if self.isValidBlockNumber(blockNumber):
return self.__buildings[blockNumber].getFrontDoorPoint()
def getBuildingTrack(self, blockNumber):
if self.isValidBlockNumber(blockNumber):
return self.__buildings[blockNumber].track
def getBuilding(self, blockNumber):
if self.isValidBlockNumber(blockNumber):
return self.__buildings[blockNumber]
def setFrontDoorPoint(self, blockNumber, point):
if self.isValidBlockNumber(blockNumber):
return self.__buildings[blockNumber].setFrontDoorPoint(point)
def getDNABlockLists(self):
blocks = []
hqBlocks = []
gagshopBlocks = []
petshopBlocks = []
kartshopBlocks = []
animBldgBlocks = []
for i in xrange(self.dnaStore.getNumBlockNumbers()):
blockNumber = self.dnaStore.getBlockNumberAt(i)
buildingType = self.dnaStore.getBlockBuildingType(blockNumber)
if buildingType == 'hq':
hqBlocks.append(blockNumber)
elif buildingType == 'gagshop':
gagshopBlocks.append(blockNumber)
elif buildingType == 'petshop':
petshopBlocks.append(blockNumber)
elif buildingType == 'kartshop':
kartshopBlocks.append(blockNumber)
elif buildingType == 'animbldg':
animBldgBlocks.append(blockNumber)
else:
blocks.append(blockNumber)
return (blocks, hqBlocks, gagshopBlocks, petshopBlocks, kartshopBlocks,
animBldgBlocks)
def findAllLandmarkBuildings(self):
backups = simbase.backups.load('blockinfo', (self.air.districtId, self.branchId), default={})
(blocks, hqBlocks, gagshopBlocks, petshopBlocks, kartshopBlocks,
animBldgBlocks) = self.getDNABlockLists()
for blockNumber in blocks:
self.newBuilding(blockNumber, backup=backups.get(blockNumber, None))
for blockNumber in animBldgBlocks:
self.newAnimBuilding(blockNumber, backup=backups.get(blockNumber, None))
for blockNumber in hqBlocks:
self.newHQBuilding(blockNumber)
for blockNumber in gagshopBlocks:
self.newGagshopBuilding(blockNumber)
for block in petshopBlocks:
self.newPetshopBuilding(block)
for block in kartshopBlocks:
self.newKartShopBuilding(block)
def newBuilding(self, blockNumber, backup=None):
building = DistributedBuildingAI.DistributedBuildingAI(
self.air, blockNumber, self.branchId, self.trophyMgr)
building.generateWithRequired(self.branchId)
if backup is not None:
state = backup.get('state', 'toon')
if ((state == 'suit') and simbase.air.wantCogbuildings) or (
(state == 'cogdo') and simbase.air.wantCogdominiums):
building.track = backup.get('track', 'c')
building.difficulty = backup.get('difficulty', 1)
building.numFloors = backup.get('numFloors', 1)
building.updateSavedBy(backup.get('savedBy'))
building.becameSuitTime = backup.get('becameSuitTime', time.mktime(time.gmtime()))
if (state == 'suit') and simbase.air.wantCogbuildings:
building.setState('suit')
elif (state == 'cogdo') and simbase.air.wantCogdominiums:
building.setState('cogdo')
else:
building.setState('toon')
else:
building.setState('toon')
else:
building.setState('toon')
self.__buildings[blockNumber] = building
return building
def newAnimBuilding(self, blockNumber, backup=None):
return self.newBuilding(blockNumber, backup=backup)
def newHQBuilding(self, blockNumber):
dnaStore = self.air.dnaStoreMap[self.canonicalBranchId]
exteriorZoneId = dnaStore.getZoneFromBlockNumber(blockNumber)
exteriorZoneId = ZoneUtil.getTrueZoneId(exteriorZoneId, self.branchId)
interiorZoneId = (self.branchId - (self.branchId%100)) + 500 + blockNumber
building = HQBuildingAI.HQBuildingAI(
self.air, exteriorZoneId, interiorZoneId, blockNumber)
self.__buildings[blockNumber] = building
return building
def newGagshopBuilding(self, blockNumber):
dnaStore = self.air.dnaStoreMap[self.canonicalBranchId]
exteriorZoneId = dnaStore.getZoneFromBlockNumber(blockNumber)
exteriorZoneId = ZoneUtil.getTrueZoneId(exteriorZoneId, self.branchId)
interiorZoneId = (self.branchId - (self.branchId%100)) + 500 + blockNumber
building = GagshopBuildingAI.GagshopBuildingAI(
self.air, exteriorZoneId, interiorZoneId, blockNumber)
self.__buildings[blockNumber] = building
return building
def newPetshopBuilding(self, blockNumber):
dnaStore = self.air.dnaStoreMap[self.canonicalBranchId]
exteriorZoneId = dnaStore.getZoneFromBlockNumber(blockNumber)
exteriorZoneId = ZoneUtil.getTrueZoneId(exteriorZoneId, self.branchId)
interiorZoneId = (self.branchId - (self.branchId%100)) + 500 + blockNumber
building = PetshopBuildingAI.PetshopBuildingAI(
self.air, exteriorZoneId, interiorZoneId, blockNumber)
self.__buildings[blockNumber] = building
return building
def newKartShopBuilding(self, blockNumber):
dnaStore = self.air.dnaStoreMap[self.canonicalBranchId]
exteriorZoneId = dnaStore.getZoneFromBlockNumber(blockNumber)
exteriorZoneId = ZoneUtil.getTrueZoneId(exteriorZoneId, self.branchId)
interiorZoneId = (self.branchId - (self.branchId%100)) + 500 + blockNumber
building = KartShopBuildingAI.KartShopBuildingAI(
self.air, exteriorZoneId, interiorZoneId, blockNumber)
self.__buildings[blockNumber] = building
return building
def save(self):
buildings = {}
for blockNumber in self.getSuitBlocks():
building = self.getBuilding(blockNumber)
backup = {
'state': building.fsm.getCurrentState().getName(),
'block': building.block,
'track': building.track,
'difficulty': building.difficulty,
'numFloors': building.numFloors,
'savedBy': building.savedBy,
'becameSuitTime': building.becameSuitTime
}
buildings[blockNumber] = backup
simbase.backups.save('blockinfo', (self.air.districtId, self.branchId), buildings)
|
__author__ = 'ivan.shynkarenka'
import argparse
from TTWebClient.TickTraderWebClient import TickTraderWebClient
def main():
parser = argparse.ArgumentParser(description='TickTrader Web API sample')
parser.add_argument('web_api_address', help='TickTrader Web API address')
args = parser.parse_args()
# Create instance of the TickTrader Web API client
client = TickTraderWebClient(args.web_api_address)
# Public currencies
currencies = client.get_public_all_currencies()
for c in currencies:
print('Currency: {0}'.format(c['Name']))
currency = client.get_public_currency(currencies[0]['Name'])
print("{0} currency precision: {1}".format(currency[0]['Name'], currency[0]['Precision']))
if __name__ == '__main__':
main()
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import spectator.core.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
("spectator_core", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="Publication",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"time_created",
models.DateTimeField(
auto_now_add=True,
help_text="The time this item was created in the database.",
),
),
(
"time_modified",
models.DateTimeField(
auto_now=True,
help_text="The time this item was last saved to the database.",
),
),
(
"title",
models.CharField(
help_text="e.g. 'Aurora' or 'Vol. 39 No. 4, 16 February 2017'.",
max_length=255,
),
),
(
"title_sort",
spectator.core.fields.NaturalSortField(
"title",
db_index=True,
default="",
editable=False,
help_text="e.g. 'clockwork orange, a' or 'world cities, the'.",
max_length=255,
),
),
(
"kind",
models.CharField(
choices=[("book", "Book"), ("periodical", "Periodical")],
default="book",
max_length=20,
),
),
(
"official_url",
models.URLField(
blank=True,
help_text="Official URL for this book/issue.",
max_length=255,
verbose_name="Official URL",
),
),
(
"isbn_uk",
models.CharField(
blank=True,
help_text="e.g. '0356500489'.",
max_length=20,
verbose_name="UK ISBN",
),
),
(
"isbn_us",
models.CharField(
blank=True,
help_text="e.g. '0316098094'.",
max_length=20,
verbose_name="US ISBN",
),
),
(
"notes_url",
models.URLField(
blank=True,
help_text="URL of your notes/review.",
max_length=255,
verbose_name="Notes URL",
),
),
],
options={"ordering": ("title_sort",)},
),
migrations.CreateModel(
name="PublicationRole",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"time_created",
models.DateTimeField(
auto_now_add=True,
help_text="The time this item was created in the database.",
),
),
(
"time_modified",
models.DateTimeField(
auto_now=True,
help_text="The time this item was last saved to the database.",
),
),
(
"role_name",
models.CharField(
blank=True,
help_text="e.g. 'Headliner', 'Support', 'Editor', 'Illustrator', 'Director', etc. Optional.", # noqa: E501
max_length=50,
),
),
(
"role_order",
models.PositiveSmallIntegerField(
default=1,
help_text="The order in which the Creators will be listed.",
),
),
(
"creator",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="publication_roles",
to="spectator_core.Creator",
),
),
(
"publication",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="roles",
to="spectator_reading.Publication",
),
),
],
options={"ordering": ("role_order", "role_name"), "abstract": False},
),
migrations.CreateModel(
name="PublicationSeries",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"time_created",
models.DateTimeField(
auto_now_add=True,
help_text="The time this item was created in the database.",
),
),
(
"time_modified",
models.DateTimeField(
auto_now=True,
help_text="The time this item was last saved to the database.",
),
),
(
"title",
models.CharField(
help_text="e.g. 'The London Review of Books'.", max_length=255
),
),
(
"title_sort",
spectator.core.fields.NaturalSortField(
"title",
db_index=True,
default="",
editable=False,
help_text="e.g. 'london review of books, the'.",
max_length=255,
),
),
(
"url",
models.URLField(
blank=True,
help_text="e.g. 'https://www.lrb.co.uk/'.",
max_length=255,
verbose_name="URL",
),
),
],
options={
"verbose_name_plural": "Publication series",
"ordering": ("title_sort",),
},
),
migrations.CreateModel(
name="Reading",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"time_created",
models.DateTimeField(
auto_now_add=True,
help_text="The time this item was created in the database.",
),
),
(
"time_modified",
models.DateTimeField(
auto_now=True,
help_text="The time this item was last saved to the database.",
),
),
("start_date", models.DateField(blank=True, null=True)),
(
"start_granularity",
models.PositiveSmallIntegerField(
choices=[(3, "Y-m-d"), (4, "Y-m"), (6, "Y")], default=3
),
),
("end_date", models.DateField(blank=True, null=True)),
(
"end_granularity",
models.PositiveSmallIntegerField(
choices=[(3, "Y-m-d"), (4, "Y-m"), (6, "Y")], default=3
),
),
(
"is_finished",
models.BooleanField(
default=False, help_text="Did you finish the publication?"
),
),
(
"publication",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="spectator_reading.Publication",
),
),
],
options={"abstract": False},
),
migrations.AddField(
model_name="publication",
name="creators",
field=models.ManyToManyField(
related_name="publications",
through="spectator_reading.PublicationRole",
to="spectator_core.Creator",
),
),
migrations.AddField(
model_name="publication",
name="series",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="spectator_reading.PublicationSeries",
),
),
]
|
from gluon.storage import Storage
settings = Storage()
settings.logon_methods = 'web2pyandjanrain'
settings.verification = False
settings.approval = False
|
import bottle
import settings
from controller import admin as admin_controller
from controller import email as email_controller
app = application = bottle.Bottle()
app.route(settings.BASEPATH, 'GET', admin_controller.index)
app.route(settings.BASEPATH + '/', 'GET', admin_controller.index)
app.route(
settings.BASEPATH + '/tasks/<id>',
'GET',
admin_controller.read_user_tasks
)
app.route(
settings.BASEPATH + '/update/<id>',
'POST',
admin_controller.update_self
)
email = bottle.Bottle()
app.mount(settings.EMAIL_PATH, email)
email.route('/', 'POST', email_controller.receive_email)
email.route('/', 'GET', email_controller.test_form)
email.route('', 'GET', email_controller.test_form)
admin = bottle.Bottle()
app.mount(settings.ADMIN_PATH, admin)
admin.route('/tasks', 'GET', admin_controller.read_tasks)
admin.route('/create', 'POST', admin_controller.create_person)
admin.route('/delete', 'POST', admin_controller.delete_people)
admin.route('/<id>', 'GET', admin_controller.read_person)
admin.route('/<id>', 'POST', admin_controller.update_person)
admin.route('/', 'GET', admin_controller.admin)
app.route(
settings.STATIC_PATH + '/<type>/<filename>',
'GET',
lambda **kwargs: bottle.static_file(
filename=kwargs['filename'], root='static/' + kwargs['type']
)
)
if __name__ == '__main__':
bottle.run(app=app, reloader=True, **settings.SERVER)
|
import vk
import json
from sentiment_classifiers import SentimentClassifier, binary_dict, files
class VkFeatureProvider(object):
def __init__(self):
self._vk_api = vk.API(vk.Session())
self._vk_delay = 0.3
self._clf = SentimentClassifier(files['binary_goods'], binary_dict)
def _vk_grace(self):
import time
time.sleep(self._vk_delay)
def get_news(self, sources, amount=10):
# entry for Alex Anlysis tool
result = []
for source in sources:
try:
data = self._vk_api.wall.get(domain=source, count=amount, extended=1, fields='name')
self._vk_grace()
except:
return {}
news = []
for node in data['wall'][1:]:
try:
if node['post_type'] != 'post':
continue
text = node['text']
#print('{}'.format(text.encode('utf-8')))
rate = self._clf.predict_text(text)[0]
news.append({'text' : '{}'.format(text.encode('utf-8')), 'rate' : rate})
except Exception as e:
print('Exception: {}'.format(e))
result.append({'source': data['groups'][0]['name'], 'news': news})
#return json.dumps(result)
return result
# NOTE: the completely other feature, very usefull personally for me
def friends_intersect(self, uid_list):
result = None
try:
result = set(self._vk_api.friends.get(user_id=uid_list[0]))
self._vk_grace()
except:
pass
for i, uid in enumerate(uid_list[1:]):
try:
tmp = set(self._vk_api.friends.get(user_id=uid))
self._vk_grace()
except:
continue
if result is not None:
result = result.intersection(tmp)
else:
result = tmp
return result
def get_user_info(self, entry_uid, fname=None, lname=None):
try:
friend_list = self._vk_api.friends.get(user_id=entry_uid, fields='personal', name_case='nom')
self._vk_grace()
except:
return []
return [x for x in friend_list
if (not fname or fname in x['first_name']) and (not lname or lname in x['last_name'])]
def get_uid_set_info(self, uid_set):
result = []
for friend_uid in uid_set:
try:
friend = self._vk_api.users.get(user_id=friend_uid, fields='sex,personal', name_case='nom')
self._vk_grace()
except:
continue
result.append(friend)
return result
if __name__ == '__main__':
provider = VkFeatureProvider()
res = provider.get_news(['scientific.american'], 5)
print(res)
|
"""Settings to be used for running tests."""
from settings import *
INSTALLED_APPS.append('integration_tests')
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
}
EMAIL_SUBJECT_PREFIX = '[test] '
EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
SOUTH_TESTS_MIGRATE = False
|
import sublime, sublime_plugin, re
class EmmetCssFromOneLineCommand(sublime_plugin.TextCommand):
def run(self, edit):
view = self.view
line_region = view.line(view.sel()[0])
line_str = view.substr(line_region)
left_padding = re.findall(r'^(\s+)', line_str)[0]
# find commands in line
props_array = re.findall(r'([a-zA-Z0-9:!;().,?/\-+#]+)', line_str)
# Delete long string
view.replace(edit, line_region, '')
def runEmmet():
view.run_command("expand_abbreviation_by_tab")
# Processing first element
view.insert(edit, view.sel()[0].end(), left_padding + props_array[0])
runEmmet()
i = 1
while i < len(props_array):
view.insert(edit, view.sel()[0].end(), '\n' + left_padding + props_array[i])
runEmmet()
i += 1
|
try:
from ._models_py3 import Error
from ._models_py3 import Key
from ._models_py3 import KeyListResult
from ._models_py3 import KeyValue
from ._models_py3 import KeyValueListResult
from ._models_py3 import Label
from ._models_py3 import LabelListResult
except (SyntaxError, ImportError):
from ._models import Error # type: ignore
from ._models import Key # type: ignore
from ._models import KeyListResult # type: ignore
from ._models import KeyValue # type: ignore
from ._models import KeyValueListResult # type: ignore
from ._models import Label # type: ignore
from ._models import LabelListResult # type: ignore
from ._azure_app_configuration_enums import (
Enum4,
Enum5,
Get6ItemsItem,
Get7ItemsItem,
Head6ItemsItem,
Head7ItemsItem,
)
__all__ = [
'Error',
'Key',
'KeyListResult',
'KeyValue',
'KeyValueListResult',
'Label',
'LabelListResult',
'Enum4',
'Enum5',
'Get6ItemsItem',
'Get7ItemsItem',
'Head6ItemsItem',
'Head7ItemsItem',
]
|
"""
https://mitpress.mit.edu/sicp/full-text/book/book-Z-H-16.html#%_thm_2.60
"""
from Chapter2.themes.lisp_list_structured_data import car, cdr, cons, lisp_list, nil, print_lisp_list
from Chapter2.themes.sequences_as_conventional_interfaces import accumulate
def element_of_set(x, set):
"""Tests if x is element of set with a representation of sets that allows duplicates"""
if set is nil:
return False
if x == car(set):
return True
return element_of_set(x, cdr(set))
def adjoin_set(x, set):
"""Adds x to set"""
return cons(x, set)
def union_set(set1, set2):
"""Computes union of set1 and set2"""
return accumulate(adjoin_set, set2, set1)
def intersection_set(set1, set2):
"""Computes intersection of set1 and set2"""
if set1 is nil or set2 is nil:
return nil
if element_of_set(car(set1), set2):
return cons(car(set1), intersection_set(cdr(set1), set2))
return intersection_set(cdr(set1), set2)
def run_the_magic():
s1 = lisp_list(2, 3, 2, 1, 3, 2, 2)
s2 = lisp_list(1, 1, 3)
s3 = lisp_list(1, 2, 3)
print(element_of_set(3, s1))
print_lisp_list(adjoin_set(4, s1))
print_lisp_list(intersection_set(s1, s2))
print_lisp_list(union_set(s1, s2))
from timeit import Timer
t1_element_of = Timer(stmt='element_of_set(3, %(s1)s)' % locals(),
setup='from Chapter2.exercise2_60 import element_of_set')
t2_element_of = Timer(stmt='element_of_set(3, %(s1)s)' % locals(),
setup='from Chapter2.sets_as_unordered_lists import element_of_set')
t1_adjoin = Timer(stmt='adjoin_set(4, %(s1)s)' % locals(), setup='from Chapter2.exercise2_60 import adjoin_set')
t2_adjoin = Timer(stmt='adjoin_set(4, %(s3)s)' % locals(),
setup='from Chapter2.sets_as_unordered_lists import adjoin_set')
t1_intersection = Timer(stmt='intersection_set(%(s1)s, %(s2)s)' % locals(),
setup='from Chapter2.exercise2_60 import intersection_set')
t2_intersection = Timer(stmt='intersection_set(%(s1)s, %(s3)s)' % locals(),
setup='from Chapter2.sets_as_unordered_lists import intersection_set')
t1_union = Timer(stmt='union_set(%(s1)s, %(s2)s)' % locals(),
setup='from Chapter2.exercise2_60 import union_set')
t2_union = Timer(stmt='union_set(%(s1)s, %(s2)s)' % locals(),
setup='from Chapter2.exercise2_59 import union_set')
header = '-----------Timing for *%s* operation'
def do_timing(timer1, timer2, op_name):
print(header % op_name)
t1 = timer1.timeit()
t2 = timer2.timeit()
print('-> With duplicate: %s' % t1)
print('-> Without duplicate: %s' % t2)
do_timing(t1_element_of, t2_element_of, 'element_of_set')
do_timing(t1_adjoin, t2_adjoin, 'adjoin_set')
do_timing(t2_intersection, t2_intersection, 'intersection_set')
do_timing(t1_union, t2_union, 'union_set')
print('The representation using unordered list with duplicates is better suited for applications where there are '
'many insertions in the data structure')
if __name__ == "__main__":
run_the_magic()
|
""" You've recently read "The Gold-Bug" by Edgar Allan Poe, and was so impressed by the cryptogram in it that
decided to try and decipher an encrypted text yourself. You asked your friend to encode a piece of text using
a substitution cipher, and now have an encryptedText that you'd like to decipher.
The encryption process in the story you read involves frequency analysis: it is known that letter 'e' is the
most frequent one in the English language, so it's pretty safe to assume that the most common character in the
encryptedText stands for 'e'. To begin with, implement a function that will find the most frequent character
in the given encryptedText.
Example
For encryptedText = "$~NmiNmim$/NVeirp@dlzrCCCCfFfQQQ", the output should be
frequencyAnalysis(encryptedText) = 'C'.
Letter 'C' appears in the text more than any other character (4 times), which is why it is the answer.
""
from collections import Counter # "Counter" is what CodeFights asks for
def frequencyAnalysis(encryptedText):
return max(Counter(encryptedText), key=Counter(encryptedText).get) # CodeFights asks to change this line only
|
def True(
foo
):
True(
foo
)
def False(
foo
):
False(
foo
)
def None(
foo
):
None(
foo
)
def nonlocal (
foo
):
nonlocal(
foo
)
|
import tensorflow as tf
from tensorflow.contrib import slim as slim
from avb.ops import *
import math
def encoder(x, config, is_training=True):
df_dim = config['df_dim']
z_dim = config['z_dim']
a_dim = config['iaf_a_dim']
# Center x at 0
x = 2*x - 1
net = flatten_spatial(x)
net = slim.fully_connected(net, 300, activation_fn=tf.nn.softplus, scope="fc_0")
net = slim.fully_connected(net, 300, activation_fn=tf.nn.softplus, scope="fc_1")
zmean = slim.fully_connected(net, z_dim, activation_fn=None)
log_zstd = slim.fully_connected(net, z_dim, activation_fn=None)
a = slim.fully_connected(net, a_dim, activation_fn=None)
return zmean, log_zstd, a
|
import sys
sys.path.append("./")
import pandas as pd
import gensim
from utility.mongodb import MongoDBManager
from utility.sentence import segment, sent2vec
class Doc2Vector(object):
"""
文本转向量
"""
def __init__(self):
"""
:param keep_val: 设定的阈值
"""
self.mongo_db = MongoDBManager()
def doc2vect(self):
"""
所有文档转成向量存储到数据库
:return:
"""
model = gensim.models.Doc2Vec.load('./models/doc2vec_v1.model')
df_data = pd.read_excel("./data/new_prd.xlsx", names=["SysNo", "Title", "Content"])
content = []
title = []
for idx, row in df_data.iterrows():
seg_title = segment(row.Title)
seg_content = segment(row.Content)
# 转向量
content_vect = sent2vec(model, ' '.join(seg_content))
title_vect = sent2vec(model, ' '.join(seg_title))
content_vect = map(str, content_vect.tolist())
title_vect = map(str, title_vect.tolist())
content.append({"_id": int(idx) + 1, "data": list(content_vect)})
title.append({"_id": int(idx) + 1, "data": list(title_vect)})
self.mongo_db.insert("content_vector", content)
self.mongo_db.insert("title_vector", title)
print("finished")
if __name__ == '__main__':
doc2vect = Doc2Vector()
doc2vect.doc2vect()
|
from __future__ import unicode_literals
from future.builtins import int
from collections import defaultdict
from django.core.urlresolvers import reverse
from django.template.defaultfilters import linebreaksbr, urlize
from mezzanine import template
from mezzanine.conf import settings
from mezzanine.generic.forms import ThreadedCommentForm
from mezzanine.generic.models import ThreadedComment
from mezzanine.utils.importing import import_dotted_path
from mezzanine.pages.models import Page, RichTextPage
register = template.Library()
@register.assignment_tag
def allpages():
page_fields = [ 'content', 'created', 'description', 'expiry_date', 'gen_description', u'id', 'keywords', u'keywords_string', 'publish_date', 'short_url', 'slug', 'status', 'title', 'titles', 'updated']
output = []
# import pdb;pdb.set_trace()
AllPages = RichTextPage.objects.all()
for item in AllPages:
temp = {}
for fld in page_fields:
temp[fld] = getattr(item, fld)
output.append(temp)
return {
'pages': output
}
@register.filter()
def remove_slash(value):
return '#' + value[1:-1]
@register.filter()
def lower(value):
# import pdb;pdb.set_trace()
return value.lower()
|
import _plotly_utils.basevalidators
class ComputedValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="computed", parent_name="layout", **kwargs):
super(ComputedValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
|
""" sysdiag
Pierre Haessig — September 2013
"""
from __future__ import division, print_function
def _create_name(name_list, base):
'''Returns a name (str) built on `base` that doesn't exist in `name_list`.
Useful for automatic creation of subsystems or wires
'''
base = str(base).strip()
if base == '':
# avoid having '' as name (although it would not break the code...)
raise ValueError('base name should not be empty!')
if base not in name_list:
return base
# Else: build another name by counting
i = 0
name = base + str(i)
while name in name_list:
i += 1
name = base + str(i)
return name
class System(object):
'''Diagram description of a system
a System is either an interconnecion of subsystems
or an atomic element (a leaf of the tree)
'''
def __init__(self, name='root', parent=None):
self.name = name
# Parent system, if any (None for top-level):
self.parent = None
# Children systems, if any (None for leaf-level):
self.subsystems = []
self.wires = []
self.ports = []
self.params = {}
# If a parent system is provided, request its addition as a subsystem
if parent is not None:
parent.add_subsystem(self)
#end __init__()
def is_empty(self):
'''True if the System contains no subsystems and no wires'''
return (not self.subsystems) and (not self.wires)
@property
def ports_dict(self):
'''dict of ports, which keys are the names of the ports'''
return {p.name:p for p in self.ports}
@property
def subsystems_dict(self):
'''dict of subsystems, which keys are the names of the systems'''
return {s.name:s for s in self.subsystems}
def add_port(self, port, created_by_system = False):
'''add a Port to the System'''
if port in self.ports:
raise ValueError('port already added!')
# extract the port's name
name = port.name
port_names = [p.name for p in self.ports]
if name in port_names:
raise ValueError("port name '{}' already exists in {:s}!".format(
name, repr(self))
)
# Add parent relationship and add to the ports dict:
port.system = self
port._created_by_system = bool(created_by_system)
self.ports.append(port)
def del_port(self, port):
'''delete a Port of the System (and disconnect any connected wire)
'''
if (port.wire is not None) or (port.internal_wire is not None):
# TODO : implement the wire disconnection
raise NotImplementedError('Cannot yet delete a connected Port')
# Remove the ports list:
self.ports.remove(port)
def add_subsystem(self, subsys):
# 1) Check name uniqueness
name = subsys.name
subsys_names = [s.name for s in self.subsystems]
if name in subsys_names:
raise ValueError("system name '{}' already exists in {:s}!".format(
name, repr(self))
)
# 2) Add parent relationship and add to the system list
subsys.parent = self
self.subsystems.append(subsys)
def add_wire(self, wire):
# 1) Check name uniqueness
name = wire.name
wire_names = [w.name for w in self.wires]
if name in wire_names:
raise ValueError("wire name '{}' already exists in {:s}!".format(
name, repr(self))
)
# Add parent relationship and add to the ports dict:
wire.parent = self
self.wires.append(wire)
def create_name(self, category, base):
'''Returns a name (str) built on `base` that doesn't exist in
within the names of `category`.
'''
if category == 'subsystem':
components = self.subsystems
elif category == 'wire':
components = self.wires
else:
raise ValueError("Unknown category '{}'!".format(str(category)))
name_list = [c.name for c in components]
return _create_name(name_list, base)
def __repr__(self):
cls_name = self.__class__.__name__
s = "{:s}('{.name}')".format(cls_name, self)
return s
def __str__(self):
s = repr(self)
if self.parent:
s += '\n Parent: {:s}'.format(repr(self.parent))
if self.params:
s += '\n Parameters: {:s}'.format(str(self.params))
if self.ports:
s += '\n Ports: {:s}'.format(str(self.ports))
if self.subsystems:
s += '\n Subsytems: {:s}'.format(str(self.subsystems))
return s
def __eq__(self, other):
'''Systems compare equal if their class, `name` and `params` are equal.
and also their lists of ports and wires are *similar*
(see `_is_similar` methods of Port and Wire)
and finally their subsystems recursively compare equal.
parent systems are not compared (would generate infinite recursion).
'''
if not isinstance(other, System):
return NotImplemented
# Basic similarity
basic_sim = self.__class__ == other.__class__ and \
self.name == other.name and \
self.params == other.params
if not basic_sim:
return False
# Port similarity: (sensitive to the order)
ports_sim = all(p1._is_similar(p2) for (p1,p2)
in zip(self.ports, other.ports))
if not ports_sim:
return False
# Wires similarity
wires_sim = all(w1._is_similar(w2) for (w1,w2)
in zip(self.wires, other.wires))
if not wires_sim:
return False
print('equality at level {} is true'.format(self.name))
# Since everything matches, compare subsystems:
return self.subsystems == other.subsystems
# end __eq__()
def __ne__(self,other):
return not (self==other)
def _to_json(self):
'''convert the System instance to a JSON-serializable object
System is serialized with list of ports, subsystems and wires
but without connectivity information (e.g. no parent information)
ports created at the initialization of the system ("default ports")
are not serialized.
'''
# Filter out ports created at the initialization of the system
ports_list = [p for p in self.ports if not p._created_by_system]
cls_name = self.__module__ +'.'+ self.__class__.__name__
return {'__sysdiagclass__': 'System',
'__class__': cls_name,
'name':self.name,
'subsystems':self.subsystems,
'wires':self.wires,
'ports':ports_list,
'params':self.params
}
# end _to_json
def json_dump(self, output=None, indent=2, sort_keys=True):
'''dump (e.g. save) the System structure in json format
if `output` is None: return a json string
if `output` is a writable file: write in this file
'''
import json
if output is None:
return json.dumps(self, default=to_json, indent=indent, sort_keys=sort_keys)
else:
json.dump(self, output, default=to_json, indent=indent, sort_keys=sort_keys)
return
# end json_dump
class Port(object):
'''Port enables the connection of a System to a Wire
Each port has a `type` which only allows the connection of a Wire
of the same type.
it also have a `direction` ('none', 'in', 'out') that is set
at the class level
private attribute `_created_by_system` tells whether the port was created
automatically by the system's class at initialization or by a custom code
(if True, the port is not serialized by its system).
'''
direction = 'none'
def __init__(self, name, ptype):
self.name = name
self.type = ptype
self.system = None
self.wire = None
self.internal_wire = None
self._created_by_system = False
def __repr__(self):
cls_name = self.__class__.__name__
s = '{:s}({:s}, {:s})'.format(cls_name, repr(self.name), repr(self.type))
return s
def __str__(self):
s = repr(self) + ' of ' + repr(self.system)
return s
def _is_similar(self, other):
'''Ports are *similar* if their class, `type` and `name` are equal.
(their parent system are not compared)
'''
if not isinstance(other, Port):
return NotImplemented
return self.__class__ == other.__class__ and \
self.type == other.type and \
self.name == other.name
def _to_json(self):
'''convert the Port instance to a JSON-serializable object
Ports are serialized without any connectivity information
'''
cls_name = self.__module__ +'.'+ self.__class__.__name__
return {'__sysdiagclass__': 'Port',
'__class__': cls_name,
'name':self.name,
'type':self.type
}
# end _to_json
class InputPort(Port):
'''Input Port'''
direction = 'in'
def __init__(self, name, ptype=''):
super(InputPort, self).__init__(name, ptype)
class OutputPort(Port):
'''Output Port'''
direction = 'out'
def __init__(self, name, ptype=''):
super(OutputPort, self).__init__(name, ptype)
class Wire(object):
'''Wire enables the interconnection of several Systems
through their Ports'''
def __init__(self, name, wtype, parent=None):
self.name = name
self.parent = None
self.type = wtype
self.ports = []
# If a parent system is provided, request its addition as a wire
if parent is not None:
parent.add_wire(self)
def is_connect_allowed(self, port, port_level, raise_error=False):
'''Check that a connection between Wire ̀ self` and a Port `port` is allowed.
Parameters
----------
`port`: the Port instance to connect to
`port_level`: whether `port` belongs to a 'sibling' (usual case) or a
'parent' system (to enable connections to the upper level)
`raise_error`: if True, raising an error replaces returning False
Returns
-------
allowed: True or False
'''
assert port_level in ['sibling', 'parent']
# Port availability (is there already a wire connected?):
if port_level == 'sibling':
connected_wire = port.wire
elif port_level == 'parent':
connected_wire = port.internal_wire
if connected_wire is not None:
if raise_error:
raise ValueError('port is already connected to '+\
'{:s}!'.format(repr(connected_wire)))
else:
return False
# Check parent relationship:
if port_level == 'sibling':
# Check that the wire and port.system are siblings:
if self.parent is not port.system.parent:
if raise_error:
raise ValueError('wire and port.system should have a common parent!')
else:
return False
elif port_level == 'parent':
# Check that the port.system is the parent of the wire:
if self.parent is not port.system:
if raise_error:
raise ValueError('port.system should be the parent of the wire!')
else:
return False
# Wire-Port Type checking:
if self.type == '':
# untyped wire: connection is always possible
return True
elif port.type == self.type:
return True
else:
# Incompatible types
if raise_error:
raise TypeError("Wire type '{:s}'".format(str(self.type)) + \
" and Port type '{:s}'".format(str(port.type)) + \
" are not compatible!")
else:
return False
def connect_port(self, port, port_level='sibling'):
'''Connect the Wire to a Port `port`'''
if port in self.ports:
return # Port is aleady connected
# Type checking:
self.is_connect_allowed(port, port_level, raise_error=True)
# Add parent relationship:
assert port_level in ['sibling', 'parent']
if port_level=='sibling':
port.wire = self
elif port_level == 'parent':
port.internal_wire = self
# Book keeping of ports:
self.ports.append(port)
@property
def ports_by_name(self):
'''triplet representation of port connections
(level, port.system.name, port.name)
(used for serialization)
'''
def port_triplet(p):
'''triplet representation (level, port.system.name, port.name)'''
if p.system is self.parent:
level = 'parent'
elif p.system.parent is self.parent:
level = 'sibling'
else:
raise ValueError('The system of Port {}'.format(repr(p)) +\
'is neither a parent nor a sibling!')
return (level, p.system.name, p.name)
return [port_triplet(p) for p in self.ports]
def connect_by_name(self, s_name, p_name, level='sibling'):
'''Connects the ports named `p_name` of system named `s_name`
to be found at level `level` ('parent' or 'sibling' (default))
'''
# TODO (?) merge the notion of level in the name (make parent a reserved name)
assert level in ['sibling', 'parent']
# 1) find the system:
if level == 'parent':
syst = self.parent
assert self.parent.name == s_name
elif level == 'sibling':
syst = self.parent.subsystems_dict[s_name]
port = syst.ports_dict[p_name]
self.connect_port(port, level)
def __repr__(self):
cls_name = self.__class__.__name__
s = '{:s}({:s}, {:s})'.format(cls_name, repr(self.name), repr(self.type))
return s
def _is_similar(self, other):
'''Wires are *similar* if their class, `type` and `name` are equal
and if their connectivity (`ports_by_name`) is the same
(their parent system are not compared)
'''
if not isinstance(other, Wire):
return NotImplemented
return self.__class__ == other.__class__ and \
self.type == other.type and \
self.name == other.name and \
self.ports_by_name == other.ports_by_name
def _to_json(self):
'''convert the Wire instance to a JSON-serializable object
Wires are serialized with the port connectivity in tuples
(but parent relationship is not serialized)
'''
cls_name = self.__module__ +'.'+ self.__class__.__name__
return {'__sysdiagclass__': 'Wire',
'__class__': cls_name,
'name': self.name,
'type': self.type,
'ports': self.ports_by_name
}
# end _to_json
class SignalWire(Wire):
'''Signal Wire for the interconnection of several Systems
through their Input and Output Ports.
Each SignalWire can be connected to a unique Output Port (signal source)
and several Input Ports (signal sinks)
'''
def __init__(self, name, wtype='', parent=None):
super(SignalWire, self).__init__(name, wtype, parent)
def is_connect_allowed(self, port, port_level, raise_error=False):
'''Check that a connection between SignalWire ̀ self` and a Port `port`
is allowed.
Parameters
----------
`port`: the Port instance to connect to
`port_level`: whether `port` belongs to a 'sibling' (usual case) or a
'parent' system (to enable connections to the upper level)
`raise_error`: if True, raising an error replaces returning False
Returns
-------
allowed: True or False
'''
if port.direction not in ['in', 'out']:
if raise_error:
raise TypeError('Only Input/Output Port can be connected!')
else:
return False
def is_output(port, level):
'''an output port is either:
* a sibling system'port with direction == 'out' or
* a parent system'port with direction == 'in'
'''
if level=='detect':
wire = self
if wire.parent == port.system:
level = 'parent'
elif wire.parent == port.system.parent:
level = 'sibling'
else:
raise ValueError('Port is neither sibling nor parent')
is_out = (level=='sibling' and port.direction == 'out') or \
(level=='parent' and port.direction == 'in')
return is_out
# Now we have an I/O Port for sure:
if is_output(port, port_level):
# check that there is not already a signal source
other_ports = [p for p in self.ports if (is_output(p, 'detect')
and p is not port)]
if other_ports:
if raise_error:
raise ValueError('Only one output port can be connected!')
else:
return False
# Now the I/O aspect is fine. Launch some further checks:
return super(SignalWire, self).is_connect_allowed(port, port_level, raise_error)
def connect_systems(source, dest, s_pname, d_pname, wire_cls=Wire):
'''Connect systems `source` to `dest` using
port names `s_pname` and `d_pname`
with a wire of instance `wire_cls` (defaults to Wire)
The wire is created if necessary
Returns: the wire used for the connection
'''
# 1) find the ports
s_port = source.ports_dict[s_pname]
d_port = dest.ports_dict[d_pname]
# 2) find a prexisting wire:
w = None
if s_port.wire is not None:
w = s_port.wire
elif d_port.wire is not None:
w = d_port.wire
else:
parent = s_port.system.parent
wname = parent.create_name('wire','W')
wtype = s_port.type
w = wire_cls(wname, wtype, parent)
# 3) Make the connection:
w.connect_port(s_port)
w.connect_port(d_port)
return w
def to_json(py_obj):
'''convert `py_obj` to JSON-serializable objects
`py_obj` should be an instance of `System`, `Wire` or `Port`
'''
if isinstance(py_obj, System):
return py_obj._to_json()
if isinstance(py_obj, Wire):
return py_obj._to_json()
if isinstance(py_obj, Port):
return py_obj._to_json()
raise TypeError(repr(py_obj) + ' is not JSON serializable')
import sys
def _str_to_class(mod_class):
'''retreives the class from a "module.class" string'''
mod_name, cls_name = mod_class.split('.')
mod = sys.modules[mod_name]
return getattr(mod, cls_name)
def from_json(json_object):
'''deserializes a sysdiag json object'''
if '__sysdiagclass__' in json_object:
cls = _str_to_class(json_object['__class__'])
if json_object['__sysdiagclass__'] == 'Port':
port = cls(name = json_object['name'], ptype = json_object['type'])
return port
if json_object['__sysdiagclass__'] == 'System':
# TODO: specialize the instanciation for each class using
# _from_json class methods
syst = cls(name = json_object['name'])
syst.params = json_object['params']
# add ports if any:
for p in json_object['ports']:
syst.add_port(p)
# add subsystems
for s in json_object['subsystems']:
syst.add_subsystem(s)
# add wires
for w_dict in json_object['wires']:
# 1) decode the wire:
w_cls = _str_to_class(w_dict['__class__'])
w = w_cls(name = w_dict['name'], wtype = w_dict['type'])
syst.add_wire(w)
# make the connections:
for level, s_name, p_name in w_dict['ports']:
w.connect_by_name(s_name, p_name, level)
# end for each wire
return syst
return json_object
def json_load(json_dump):
import json
syst = json.loads(json_dump, object_hook=from_json)
return syst
|
import _plotly_utils.basevalidators
class HoverlabelValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="hoverlabel", parent_name="sankey", **kwargs):
super(HoverlabelValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Hoverlabel"),
data_docs=kwargs.pop(
"data_docs",
"""
align
Sets the horizontal alignment of the text
content within hover label box. Has an effect
only if the hover label text spans more two or
more lines
alignsrc
Sets the source reference on Chart Studio Cloud
for align .
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud
for bgcolor .
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud
for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of
characters) of the trace name in the hover
labels for all traces. -1 shows the whole name
regardless of length. 0-3 shows the first 0-3
characters, and an integer >3 will show the
whole name if it is less than that many
characters, but if it is longer, will truncate
to `namelength - 3` characters and add an
ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud
for namelength .
""",
),
**kwargs
)
|
import ConfigParser
import os
import sys
import utils
site_list_location = os.path.dirname(__file__) + '/sitelist.txt'
parser = ConfigParser.RawConfigParser()
parser.read(os.path.dirname(__file__) + '/config.cfg')
general = dict(parser.items('general'))
gmail_account = dict(parser.items('gmail_account'))
write_error = parser.getboolean('logging', 'log_errors')
write_change = parser.getboolean('logging', 'log_changes')
write_unchanged = parser.getboolean('logging', 'log_unchanged')
receiver = parser.get('mailing', 'mail_receivers')
mail_error = parser.getboolean('mailing', 'send_errors')
mail_change = parser.getboolean('mailing', 'send_changes')
tool_name = 'Dynamic DNS Updater'
logger = False
mailer = False
error_messages = {
'invalid_login' : 'Your Gmail username or password is incorrect.',
'logger_missing' : 'Problem writing to log file.',
'read_cache' : 'Problem reading from IP cache.',
'read_sitelist' : 'Problem reading the sitelist.',
'empty_url' : 'You have not provided an update URL.',
'check_ip' : 'Problem checking your IP address.',
'update_dns' : 'Problem updating your Dynamic DNS.'
}
def error_processor(code):
if write_error and logger: logger.log_error(error_messages[code])
if mail_error and mailer:
mailer.send_error(receiver, error_messages[code])
print '%s: Error - %s' % (tool_name, error_messages[code])
sys.exit()
if write_error or write_change or write_unchanged:
try: logger = utils.logger.Logger(general['log_file'])
except: logger = False
if mail_error or mail_change:
try: mailer = utils.mailer.Mailer(
gmail_account['gmail_user'],
gmail_account['gmail_password'])
except: error_processor('invalid_login')
if not logger and mailer:
if write_error or write_change or write_unchanged:
error_processor('logger_missing')
try: cacher = utils.cacher.Cacher(general['ip_cache_file'])
except: error_processor('read_cache')
try: checker = utils.checker.Checker(site_list_location)
except: error_processor('read_sitelist')
try: updater = utils.updater.Updater(general['update_urls'])
except: error_processor('empty_url')
old_ip = cacher.get_ip()
try: current_ip = checker.get_ip()
except: error_processor('check_ip')
if old_ip == current_ip:
if write_unchanged:
logger.log_no_change(old_ip)
print '%s: %s remains unchanged.' % (tool_name, old_ip)
sys.exit()
try: updater.update_dns()
except: error_processor('update_dns')
cacher.store_ip(current_ip)
print '%s: %s has been updated to %s' % (tool_name, old_ip, current_ip)
if write_change: logger.log_change(old_ip, current_ip)
if mail_change and mailer:
mailer.send_change(receiver, old_ip, current_ip)
|
import traceback # for logging exceptions
import logging
logging.getLogger().setLevel(logging.INFO) #before doing anything else, set the desired logging level, so all modules log correctly.
from ConfigParser import *
import RPi.GPIO as GPIO #provides pin support
import ATT_IOT as IOT #provide cloud support
from time import sleep #pause the app
import picamera
import cameraStreamer
import sys
import datetime # for generating a unique file name
ConfigName = 'rpicamera.config'
hasLISIPAROI = False
LISIPAROIPin = 4
streamer = None
camera = None
PreviewId = 1 # turn on/off preview on the stream server
RecordId = 2 # turn on/off recording on disk
StreamServerId = 3 # assign the destination to stream the video to.
ToggleLISIPAROIId = 4
PictureId = 5
_isPreview = False
_isRecording = False
def tryLoadConfig():
'load the config from file'
global hasLISIPAROI, LISIPAROIPin
c = ConfigParser()
if c.read(ConfigName):
#set up the ATT internet of things platform
IOT.DeviceId = c.get('cloud', 'deviceId')
IOT.ClientId = c.get('cloud', 'clientId')
IOT.ClientKey = c.get('cloud', 'clientKey')
hasLISIPAROI = bool(c.get('camera', 'has LISIPAROI'))
logging.info("has LISIPAROI:" + str(hasLISIPAROI) )
if hasLISIPAROI:
LISIPAROIPin = int(c.get('camera', 'LISIPAROI pin'))
logging.info("LISIPAROI pin:" + str(LISIPAROIPin) )
return True
else:
return False
def setupCamera():
'create the camera responsible for recording video and streaming object responsible for sending it to the server.'
global streamer, camera
camera = picamera.PiCamera()
camera.resolution = (640, 480)
camera.framerate = 30
streamer = cameraStreamer.CameraStreamer(camera)
def setBacklight(value):
'''turn on/off the backlight
value: string ('true' or 'false')
returns: true when input was succesfully processed, otherwise false
'''
if value == "true":
GPIO.output(LISIPAROIPin, GPIO.HIGH)
elif value == "false":
GPIO.output(LISIPAROIPin, GPIO.LOW)
else:
print("unknown value: " + value)
IOT.send(value, ToggleLISIPAROIId) #provide feedback to the cloud that the operation was succesful
def setPreview(value):
if _isRecording:
print("recording not allowed during preview, shutting down recording.")
setRecord(False)
if value == "true":
_isPreview = True
streamer.start_preview()
elif value == "false":
_isPreview = False
streamer.stop_preview()
else:
print("unknown value: " + value)
IOT.send(value, PreviewId) #provide feedback to the cloud that the operation was succesful
def setRecord(value):
if _isPreview:
print("preview not allowed during recording, shutting down preview.")
setPreview(False)
if value == "true":
camera.resolution = (1920, 1080) #set to max resulotion for record
camera.start_recording('video' + datetime.date.today().strftime("%d_%b_%Y_%H_%M%_S") + '.h264')
elif value == "false":
camera.stop_recording()
camera.resolution = (640, 480) #reset resulotion for preview
else:
print("unknown value: " + value)
IOT.send(value, RecordId) #provide feedback to the cloud that the operation was succesful
def takePicture():
'take a single picture, max resoution'
prevWasPreview = _isPreview
prevWasRecording = _isRecording
if _isRecording:
print("record not allowed while taking picture.")
setRecord(False)
if not _isPreview:
print("preview required for taking picture.")
setPreview(True)
sleep(2) # if preview was not running yet, give it some time to startup
camera.capture('picture' + datetime.date.today().strftime("%d_%b_%Y_%H_%M%_S") + '.jpg')
if prevWasPreview:
print("reactivating preview.")
setPreview(True)
elif prevWasRecording:
print("reactivating record.")
setRecord(True)
def on_message(id, value):
if id.endswith(str(ToggleLISIPAROIId)) == True:
value = value.lower() #make certain that the value is in lower case, for 'True' vs 'true'
setBacklight(value)
elif id.endswith(str(PreviewId)) == True:
value = value.lower() #make certain that the value is in lower case, for 'True' vs 'true'
setPreview(value)
elif id.endswith(str(RecordId)) == True:
value = value.lower() #make certain that the value is in lower case, for 'True' vs 'true'
setRecord(value)
elif id.endswith(str(StreamServerId)) == True:
streamer.streamServerIp = value
IOT.send(value, StreamServerId) #provide feedback to the cloud that the operation was succesful
elif id.endswith(str(PictureId)) == True:
if value.lower() == "true":
takePicture()
else:
print("unknown actuator: " + id)
def setupCloud():
IOT.on_message = on_message
#make certain that the device & it's features are defined in the cloudapp
IOT.connect()
if hasLISIPAROI:
IOT.addAsset(ToggleLISIPAROIId, "LISIPAROI", "Control the light on the camera", False, "boolean")
IOT.addAsset(PreviewId, "Preview", "Show/close a preview on the monitor that is connected to the RPI", True, "boolean")
IOT.addAsset(RecordId, "Record", "Start/stop recording the video stream on sd-card", True, "boolean")
IOT.addAsset(PictureId, "Picture", "take a picture (max resoution) and store on sd-card", True, "boolean")
IOT.addAsset(StreamServerId, "Stream server", "set the ip address of the server that manages the video", True, "string")
# get any previously defined settings
streamer.streamServerIp = IOT.getAssetState(StreamServerId)
if streamer.streamServerIp:
streamer.streamServerIp = streamer.streamServerIp['state']['value']
logging.info("sending stream to: " + streamer.streamServerIp)
else:
logging.info("no stream endpoint defined")
IOT.subscribe() #starts the bi-directional communication
# set current state of the device
IOT.send("false", ToggleLISIPAROIId)
IOT.send("false", PreviewId)
IOT.send("false", RecordId)
tryLoadConfig()
setupCamera() # needs to be done before setting up the cloud, cause we will get the settings from the cloud and assign them to the camera.
setupCloud()
if hasLISIPAROI:
try:
#setup GPIO using Board numbering
#GPIO.setmode(GPIO.BCM)
GPIO.setmode(GPIO.BOARD)
#set up the pins
GPIO.setup(LISIPAROIPin, GPIO.OUT)
except:
logging.error(traceback.format_exc())
while True:
#main thread doesn't have to do much, all is handled on the thread calling the message handler (for the actuators)
sleep(5)
|
from time import sleep
import unittest2 as unittest
from tweepy.api import API
from tweepy.auth import OAuthHandler
from tweepy.models import Status
from tweepy.streaming import Stream, StreamListener
from config import create_auth
from test_utils import mock_tweet
from mock import MagicMock, patch
class MockStreamListener(StreamListener):
def __init__(self, test_case):
super(MockStreamListener, self).__init__()
self.test_case = test_case
self.status_count = 0
self.status_stop_count = 0
self.connect_cb = None
def on_connect(self):
if self.connect_cb:
self.connect_cb()
def on_timeout(self):
self.test_case.fail('timeout')
return False
def on_error(self, code):
print("response: %s" % code)
return True
def on_status(self, status):
self.status_count += 1
self.test_case.assertIsInstance(status, Status)
if self.status_stop_count == self.status_count:
return False
class TweepyStreamTests(unittest.TestCase):
def setUp(self):
self.auth = create_auth()
self.listener = MockStreamListener(self)
self.stream = Stream(self.auth, self.listener, timeout=3.0)
def tearDown(self):
self.stream.disconnect()
def test_userstream(self):
# Generate random tweet which should show up in the stream.
def on_connect():
API(self.auth).update_status(mock_tweet())
self.listener.connect_cb = on_connect
self.listener.status_stop_count = 1
self.stream.userstream()
self.assertEqual(self.listener.status_count, 1)
def test_userstream_with_params(self):
# Generate random tweet which should show up in the stream.
def on_connect():
API(self.auth).update_status(mock_tweet())
self.listener.connect_cb = on_connect
self.listener.status_stop_count = 1
self.stream.userstream(_with='user', replies='all', stall_warnings=True)
self.assertEqual(self.listener.status_count, 1)
def test_sample(self):
self.listener.status_stop_count = 10
self.stream.sample()
self.assertEquals(self.listener.status_count,
self.listener.status_stop_count)
def test_filter_track(self):
self.listener.status_stop_count = 5
phrases = ['twitter']
self.stream.filter(track=phrases)
self.assertEquals(self.listener.status_count,
self.listener.status_stop_count)
def test_track_encoding(self):
s = Stream(None, None)
s._start = lambda async: None
s.filter(track=[u'Caf\xe9'])
# Should be UTF-8 encoded
self.assertEqual(u'Caf\xe9'.encode('utf8'), s.parameters['track'])
def test_follow_encoding(self):
s = Stream(None, None)
s._start = lambda async: None
s.filter(follow=[u'Caf\xe9'])
# Should be UTF-8 encoded
self.assertEqual(u'Caf\xe9'.encode('utf8'), s.parameters['follow'])
class TweepyStreamBackoffTests(unittest.TestCase):
def setUp(self):
#bad auth causes twitter to return 401 errors
self.auth = OAuthHandler("bad-key", "bad-secret")
self.auth.set_access_token("bad-token", "bad-token-secret")
self.listener = MockStreamListener(self)
self.stream = Stream(self.auth, self.listener)
def tearDown(self):
self.stream.disconnect()
def test_exp_backoff(self):
self.stream = Stream(self.auth, self.listener, timeout=3.0,
retry_count=1, retry_time=1.0, retry_time_cap=100.0)
self.stream.sample()
# 1 retry, should be 4x the retry_time
self.assertEqual(self.stream.retry_time, 4.0)
def test_exp_backoff_cap(self):
self.stream = Stream(self.auth, self.listener, timeout=3.0,
retry_count=1, retry_time=1.0, retry_time_cap=3.0)
self.stream.sample()
# 1 retry, but 4x the retry_time exceeds the cap, so should be capped
self.assertEqual(self.stream.retry_time, 3.0)
mock_resp = MagicMock()
mock_resp.return_value.status = 420
@patch('httplib.HTTPConnection.getresponse', mock_resp)
def test_420(self):
self.stream = Stream(self.auth, self.listener, timeout=3.0, retry_count=0,
retry_time=1.0, retry_420=1.5, retry_time_cap=20.0)
self.stream.sample()
# no retries, but error 420, should be double the retry_420, not double the retry_time
self.assertEqual(self.stream.retry_time, 3.0)
|
import sys, math, os
import matplotlib.pyplot as plt
def main():
# Check that there's at least one argument
if len(sys.argv) < 2:
print("Usage python {} <file1> [<file2> ...]".format(sys.argv[0]))
return 1
# Automatically detect if decayed
if "decayed" in sys.argv[1]:
plotDecayed = True
else:
plotDecayed = False
# Read input file
fil = "finalGraph.in"
if os.path.isfile(fil):
with open(fil, "r") as fread:
lstyles = fread.readline().strip().split()
labs = []
for line in fread:
labs.append(line.strip())
lowZ = 27 # Lowest z value to represent
# Read "species.dat" and store all the values in lists
species = "../../data/species.dat"
atomicNum = []; atomicMass = []; namesZ = {}
with open(species, "r") as fread:
for line in fread:
lnlst = line.split()
# Correct special names
if lnlst[1] == "d" or lnlst[2] == "0":
lnlst[1] = "h"
# Now relate positions with atomic numbers, atomic masses, and names
zNum = int(lnlst[0]) - int(lnlst[2])
atomicNum.append(zNum)
atomicMass.append(int(lnlst[0]))
namesZ[lnlst[1]] = zNum
# Read all initial solar values
solar = "../../data/solarVals.dat"
solarValues = {}
with open(solar, "r") as fread:
for line in fread:
lnlst = line.split()
isotName = lnlst[0] + lnlst[2]
# Add mass fraction value per atomic number
key = namesZ[lnlst[0]]; val = float(lnlst[1])*float(lnlst[2])
solarValues[key] = solarValues.get(key, 0) + val
# Go file by file
numDens = []
for archivo in sys.argv[1:]:
# Open file for reading
dens = []
fread = open(archivo, "r")
# Each line has mass, temperature, rho, radiat
# and elements in number fraction
newline = None
for line in fread:
if "#" in line:
continue
lnlst = line.split()
if len(lnlst) == 0:
if plotDecayed:
break
else:
continue
if not plotDecayed:
# Surface (newline[0] is the mass)
prevline = newline
newline = [float(x) for x in lnlst]
if newline[0] > 0.85:
break
if plotDecayed:
dens.append(float(lnlst[1]))
# Close file
fread.close()
# Calculate values of interest
if plotDecayed:
numDens.append(dens)
else:
numDens.append([(x + y)*0.5 for (x, y) in
zip(prevline[4:], newline[4:])])
# Calculate now the agb values and print the surface mass fractions per
# each isotope
print("# Surface number fraction values")
agbValues = []
for ii in range(len(numDens)):
dic = {}
dens = numDens[ii]
# Print the model name
print("# {}".format(sys.argv[ii + 1]))
# Add the values for each element
for jj in range(len(atomicNum)):
key = atomicNum[jj]
dic[key] = dic.get(key, 0) + dens[jj]*atomicMass[jj]
# Print the number fraction
print(dens[jj])
agbValues.append(dic)
print("")
# Now identify iron:
ironNumber = namesZ["fe"]
# Now divide every element by iron
for dens in agbValues:
ironDens = dens[ironNumber]
for key in dens:
dens[key] /= ironDens
# Solar as well
ironDens = solarValues[ironNumber]
for key in solarValues:
solarValues[key] /= ironDens
# Now create the final values
finalValues = []
zList = [x for x in solarValues.keys()]
zList.sort()
for dens in agbValues:
thisDens = []
for key in zList:
if key < lowZ:
continue
val = math.log10(dens[key]/solarValues[key])
thisDens.append(val)
finalValues.append(thisDens)
# Create xaxis:
xx = [x for x in zList if x >= lowZ]
# Print final values
print("# [X/Fe] values")
for ii in range(len(sys.argv[1:])):
print("# {}".format(sys.argv[ii + 1]))
print("")
for jj in range(len(xx)):
print(xx[jj], finalValues[ii][jj])
print("")
# From zList create contIndx. This list contains a number of
# tuples with the first and last index of any contiguous sequence
indx = 1; first = 0
prevKey = None; contIndx = []
for key in xx:
if prevKey is None:
prevKey = key
continue
# Check if keys are contiguous
if key - prevKey > 1:
contIndx.append((first, indx))
first = indx
prevKey = key
indx += 1
# Add last tuple
contIndx.append((first, indx + 1))
# Begin plot
figure = plt.figure()
plt.xlabel("Atomic number Z", size = 14)
plt.ylabel("[X/Fe]", size = 14)
# Plot values
if labs is None:
labs = sys.argv[1:]
ii = 0
for dens in finalValues:
# Plot first range
first, last = contIndx[0]
if lstyles is None:
lin, = plt.plot(xx[first:last], dens[first:last],
label = labs[ii], lw = 2)
else:
lin, = plt.plot(xx[first:last], dens[first:last], lstyles[ii],
label = labs[ii], lw = 2)
# Get color and line style
col, lst = lin.get_color(), lin.get_linestyle()
colStyle = col + lst
for elem in contIndx[1:]:
first, last = elem
plt.plot(xx[first:last], dens[first:last], colStyle, lw = 2)
ii += 1
# Set floating text
namAtm = {"Co":27, "Ge":32, "Se":34, "Kr":36, "Sr":38, "Zr":40,
"Mo":42, "Pd":46, "Cd":48, "Sn":50, "Te":52, "Ba":56,
"Ce":58, "Nd":60, "Sm":62, "Gd":64, "Dy":66, "Er":68,
"Yb":70, "Hf":72, "W":74, "Os":76, "Hg":80, "Pb":82,
"Rb":37, "Cs":55}
rNamAtm = ["Rb", "Cs"]
for name in namAtm:
yVal = 0
for ii in range(len(xx)):
if xx[ii] == namAtm[name]:
yVal = finalValues[-1][ii]
break
plt.text(namAtm[name] - 0.5, yVal*1.01, name, size = 14)
if name in rNamAtm:
plt.plot(namAtm[name], yVal, "ro")
else:
plt.plot(namAtm[name], yVal, "ko")
plt.legend(loc=0, ncol = 2)
plt.text(30, 1.1, "3M$_\odot$", fontsize = 16)
plt.show()
if __name__ == "__main__":
main()
|
T = int(raw_input())
while(not T == 0):
word = str(raw_input())
if len(word)>10:
print word[0]+str(len(word[1:len(word)-1]))+word[len(word)-1]
else:
print word
T-=1
|
BOT_NAME = 'aCloudGuru'
SPIDER_MODULES = ['aCloudGuru.spiders']
NEWSPIDER_MODULE = 'aCloudGuru.spiders'
ROBOTSTXT_OBEY = True
|
import asyncio
import functools
import random
import time
from testing import Client
from testing import default_test_setup
from testing import gen_data
from testing import gen_points
from testing import gen_series
from testing import InsertError
from testing import PoolError
from testing import QueryError
from testing import run_test
from testing import Series
from testing import Server
from testing import ServerError
from testing import SiriDB
from testing import TestBase
from testing import UserAuthError
from testing import parse_args
TIME_PRECISION = 'ms'
class TestCompression(TestBase):
title = 'Test compression'
GEN_POINTS = functools.partial(
gen_points, n=100, time_precision=TIME_PRECISION)
async def _test_series(self, client):
result = await client.query('select * from "series float"')
self.assertEqual(result['series float'], self.series_float)
result = await client.query('select * from "series int"')
self.assertEqual(result['series int'], self.series_int)
result = await client.query(
'list series name, length, type, start, end')
result['series'].sort()
self.assertEqual(
result,
{'columns': ['name', 'length', 'type', 'start', 'end'],
'series': [[
'series float',
10000, 'float',
self.series_float[0][0],
self.series_float[-1][0]], [
'series int',
10000, 'integer',
self.series_int[0][0],
self.series_int[-1][0]],
]})
@default_test_setup(
1,
time_precision=TIME_PRECISION,
optimize_interval=500,
compression=True)
async def run(self):
await self.client0.connect()
self.series_float = gen_points(
tp=float, n=10000, time_precision=TIME_PRECISION, ts_gap='5m')
random.shuffle(self.series_float)
self.series_int = gen_points(
tp=int, n=10000, time_precision=TIME_PRECISION, ts_gap='5m')
random.shuffle(self.series_int)
self.assertEqual(
await self.client0.insert({
'series float': self.series_float,
'series int': self.series_int
}), {'success_msg': 'Successfully inserted 20000 point(s).'})
self.series_float.sort()
self.series_int.sort()
await self._test_series(self.client0)
await self.client0.query('drop series /.*/ set ignore_threshold true')
# Create some random series and start 25 insert task parallel
series = gen_series(n=40)
for i in range(40):
await self.client0.insert_some_series(
series,
n=0.8,
timeout=0,
points=self.GEN_POINTS)
# Check the result
await self.assertSeries(self.client0, series)
for i in range(40):
await self.client0.insert_some_series(
series,
n=0.8,
timeout=0,
points=self.GEN_POINTS)
# Check the result
await self.assertSeries(self.client0, series)
self.client0.close()
result = await self.server0.stop()
self.assertTrue(result)
await self.server0.start(sleep=20)
await self.client0.connect()
# Check the result after rebooting the server
await self.assertSeries(self.client0, series)
if __name__ == '__main__':
random.seed(1)
parse_args()
run_test(TestCompression())
|
"""
Production Configurations
- Use djangosecure
- Use Amazon's S3 for storing static files and uploaded media
- Use mailgun to send emails
- Use Redis on Heroku
"""
from __future__ import absolute_import, unicode_literals
from boto.s3.connection import OrdinaryCallingFormat
from django.utils import six
from .common import * # noqa
SECRET_KEY = env('DJANGO_SECRET_KEY')
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
'DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
'DJANGO_SECURE_CONTENT_TYPE_NOSNIFF', default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True)
CSRF_COOKIE_SECURE = True
CSRF_COOKIE_HTTPONLY = True
X_FRAME_OPTIONS = 'DENY'
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['mertisconsulting.com'])
INSTALLED_APPS += ('gunicorn', )
INSTALLED_APPS += (
'storages',
)
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
AWS_S3_CALLING_FORMAT = OrdinaryCallingFormat()
AWS_EXPIRY = 60 * 60 * 24 * 7
AWS_HEADERS = {
'Cache-Control': six.b('max-age=%d, s-maxage=%d, must-revalidate' % (
AWS_EXPIRY, AWS_EXPIRY))
}
from storages.backends.s3boto import S3BotoStorage
StaticRootS3BotoStorage = lambda: S3BotoStorage(location='static')
MediaRootS3BotoStorage = lambda: S3BotoStorage(location='media')
DEFAULT_FILE_STORAGE = 'config.settings.production.MediaRootS3BotoStorage'
MEDIA_URL = 'https://s3.amazonaws.com/%s/media/' % AWS_STORAGE_BUCKET_NAME
STATIC_URL = 'https://s3.amazonaws.com/%s/static/' % AWS_STORAGE_BUCKET_NAME
STATICFILES_STORAGE = 'config.settings.production.StaticRootS3BotoStorage'
AWS_PRELOAD_METADATA = True
INSTALLED_APPS = ('collectfast', ) + INSTALLED_APPS
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL',
default='cpq-exporter <noreply@mertisconsulting.com>')
EMAIL_SUBJECT_PREFIX = env('DJANGO_EMAIL_SUBJECT_PREFIX', default='[cpq-exporter] ')
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
INSTALLED_APPS += ("anymail", )
ANYMAIL = {
"MAILGUN_API_KEY": env('DJANGO_MAILGUN_API_KEY'),
}
EMAIL_BACKEND = "anymail.backends.mailgun.MailgunBackend"
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ]),
]
DATABASES['default'] = env.db('DATABASE_URL')
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': '{0}/{1}'.format(env('REDIS_URL', default='redis://127.0.0.1:6379'), 0),
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
'IGNORE_EXCEPTIONS': True, # mimics memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
}
}
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'mail_admins'],
'propagate': True
}
}
}
ADMIN_URL = env('DJANGO_ADMIN_URL')
|
from .base import * # TODO: import the relevant names instead of importing everything.
import cuda_convnet
import corrmm
|
"""
FILE: sample_recognize_business_cards.py
DESCRIPTION:
This sample demonstrates how to recognize fields on business cards.
See fields found on a business card here:
https://aka.ms/formrecognizer/businesscardfields
USAGE:
python sample_recognize_business_cards.py
Set the environment variables with your own values before running the sample:
1) AZURE_FORM_RECOGNIZER_ENDPOINT - the endpoint to your Cognitive Services resource.
2) AZURE_FORM_RECOGNIZER_KEY - your Form Recognizer API key
"""
import os
class RecognizeBusinessCardSample(object):
def recognize_business_card(self):
path_to_sample_forms = os.path.abspath(os.path.join(os.path.abspath(__file__),
"..", "..", "./sample_forms/business_cards/business-card-english.jpg"))
# [START recognize_business_cards]
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer import FormRecognizerClient
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
form_recognizer_client = FormRecognizerClient(
endpoint=endpoint, credential=AzureKeyCredential(key)
)
with open(path_to_sample_forms, "rb") as f:
poller = form_recognizer_client.begin_recognize_business_cards(business_card=f, locale="en-US")
business_cards = poller.result()
for idx, business_card in enumerate(business_cards):
print("--------Recognizing business card #{}--------".format(idx+1))
contact_names = business_card.fields.get("ContactNames")
if contact_names:
for contact_name in contact_names.value:
print("Contact First Name: {} has confidence: {}".format(
contact_name.value["FirstName"].value, contact_name.value["FirstName"].confidence
))
print("Contact Last Name: {} has confidence: {}".format(
contact_name.value["LastName"].value, contact_name.value["LastName"].confidence
))
company_names = business_card.fields.get("CompanyNames")
if company_names:
for company_name in company_names.value:
print("Company Name: {} has confidence: {}".format(company_name.value, company_name.confidence))
departments = business_card.fields.get("Departments")
if departments:
for department in departments.value:
print("Department: {} has confidence: {}".format(department.value, department.confidence))
job_titles = business_card.fields.get("JobTitles")
if job_titles:
for job_title in job_titles.value:
print("Job Title: {} has confidence: {}".format(job_title.value, job_title.confidence))
emails = business_card.fields.get("Emails")
if emails:
for email in emails.value:
print("Email: {} has confidence: {}".format(email.value, email.confidence))
websites = business_card.fields.get("Websites")
if websites:
for website in websites.value:
print("Website: {} has confidence: {}".format(website.value, website.confidence))
addresses = business_card.fields.get("Addresses")
if addresses:
for address in addresses.value:
print("Address: {} has confidence: {}".format(address.value, address.confidence))
mobile_phones = business_card.fields.get("MobilePhones")
if mobile_phones:
for phone in mobile_phones.value:
print("Mobile phone number: {} has confidence: {}".format(phone.value, phone.confidence))
faxes = business_card.fields.get("Faxes")
if faxes:
for fax in faxes.value:
print("Fax number: {} has confidence: {}".format(fax.value, fax.confidence))
work_phones = business_card.fields.get("WorkPhones")
if work_phones:
for work_phone in work_phones.value:
print("Work phone number: {} has confidence: {}".format(work_phone.value, work_phone.confidence))
other_phones = business_card.fields.get("OtherPhones")
if other_phones:
for other_phone in other_phones.value:
print("Other phone number: {} has confidence: {}".format(other_phone.value, other_phone.confidence))
# [END recognize_business_cards]
if __name__ == '__main__':
sample = RecognizeBusinessCardSample()
sample.recognize_business_card()
|
from time import sleep
import os
import shutil
import merfi
from merfi import logger
from merfi import util
from merfi.collector import RepoCollector
from merfi.backends import base
class RpmSign(base.BaseBackend):
help_menu = 'rpm-sign handler for signing files'
_help = """
Signs files with rpm-sign. Crawls a given path looking for Debian repos.
Note: this sub-command tells merfi to use Red Hat's internal signing tool
inconveniently named "rpm-sign", not the rpmsign(8) command that is a part of
the http://rpm.org open-source project.
%s
Options
--key Name of the key to use (see rpm-sign --list-keys)
--keyfile File path location of the public keyfile, for example
/etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
or /etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-beta
--nat A NAT is between this system and the signing server.
Positional Arguments:
[path] The path to crawl for signing repos. Defaults to current
working directory
"""
executable = 'rpm-sign'
name = 'rpm-sign'
options = ['--key', '--keyfile', '--nat']
def clear_sign(self, path, command):
"""
When doing a "clearsign" with rpm-sign, the output goes to stdout, so
that needs to be captured and written to the default output file for
clear signed signatures (InRelease).
"""
logger.info('signing: %s' % path)
out, err, code = util.run_output(command)
# Sometimes rpm-sign will fail with this error. I've opened
# rhbz#1557014 to resolve this server-side. For now, sleep and retry
# as a workaround. These sleep/retry values are suggestions from the
# team that runs the signing service.
known_failure = "ERROR: unhandled exception occurred: ('')."
tries = 1
while known_failure in err and tries < 30:
logger.warning('hit known rpm-sign failure.')
tries += 1
logger.warning('sleeping, running try #%d in 30 seconds.' % tries)
sleep(2)
out, err, code = util.run_output(command)
if code != 0:
for line in err.split('\n'):
logger.error('stderr: %s' % line)
for line in out.split('\n'):
logger.error('stdout: %s' % line)
raise RuntimeError('rpm-sign non-zero exit code %d', code)
if out.strip() == '':
for line in err.split('\n'):
logger.error('stderr: %s' % line)
logger.error('rpm-sign clearsign provided nothing on stdout')
raise RuntimeError('no clearsign signature available')
absolute_directory = os.path.dirname(os.path.abspath(path))
with open(os.path.join(absolute_directory, 'InRelease'), 'w') as f:
f.write(out)
def detached(self, command):
return util.run(command)
def sign(self):
self.keyfile = self.parser.get('--keyfile')
if self.keyfile:
self.keyfile = os.path.abspath(self.keyfile)
if not os.path.isfile(self.keyfile):
raise RuntimeError('%s is not a file' % self.keyfile)
logger.info('using keyfile "%s" as release.asc' % self.keyfile)
self.key = self.parser.get('--key')
if not self.key:
raise RuntimeError('specify a --key for signing')
logger.info('Starting path collection, looking for files to sign')
repos = RepoCollector(self.path)
if repos:
logger.info('%s repos found' % len(repos))
# FIXME: this should spit the actual verified command
logger.info('will sign with the following commands:')
logger.info('rpm-sign --key "%s" --detachsign Release --output Release.gpg' % self.key)
logger.info('rpm-sign --key "%s" --clearsign Release --output InRelease' % self.key)
else:
logger.warning('No paths found that matched')
for repo in repos:
# Debian "Release" files:
for path in repo.releases:
self.sign_release(path)
# Public key:
if self.keyfile:
logger.info('placing release.asc in %s' % repo.path)
if merfi.config.get('check'):
logger.info('[CHECKMODE] writing release.asc')
else:
shutil.copyfile(
self.keyfile,
os.path.join(repo.path, 'release.asc'))
def sign_release(self, path):
""" Sign a "Release" file from a Debian repo. """
if merfi.config.get('check'):
new_gpg_path = path.split('Release')[0]+'Release.gpg'
new_in_path = path.split('Release')[0]+'InRelease'
logger.info('[CHECKMODE] signing: %s' % path)
logger.info('[CHECKMODE] signed: %s' % new_gpg_path)
logger.info('[CHECKMODE] signed: %s' % new_in_path)
else:
os.chdir(os.path.dirname(path))
detached = ['rpm-sign', '--key', self.key, '--detachsign',
'Release', '--output', 'Release.gpg']
clearsign = ['rpm-sign', '--key', self.key, '--clearsign',
'Release']
if self.parser.has('--nat'):
detached.insert(1, '--nat')
clearsign.insert(1, '--nat')
logger.info('signing: %s' % path)
self.detached(detached)
self.clear_sign(path, clearsign)
|
# -*- coding: utf-8 -*-
import arrow
import datetime
import ujson
import timeit
from flask.ext.login import login_required
from flask import (
Blueprint, render_template
)
from feedback.dashboard.vendorsurveys import (
get_rating_scale, get_surveys_by_role,
get_surveys_by_completion, get_surveys_by_purpose,
get_all_survey_responses, get_rating_by_lang,
get_rating_by_purpose, get_rating_by_role
)
from feedback.surveys.constants import SURVEY_DAYS
from feedback.surveys.models import Survey
from feedback.dashboard.permits import (
api_health, get_lifespan,
get_permit_types, trade,
get_master_permit_counts,
dump_socrata_api
)
blueprint = Blueprint(
"dashboard", __name__,
template_folder='../templates',
static_folder="../static"
)
def to_bucket(str_date):
''' Converts the DB string time to a MM-DD string format.
'''
result = arrow.get(str_date)
return result.strftime("%m-%d")
@blueprint.route("/", methods=["GET", "POST"])
def home():
json_obj = {}
json_obj_home = {}
surveys_by_date = {}
surveys_date_array = []
surveys_value_array = []
for i in range(SURVEY_DAYS, -1, -1):
time_i = (datetime.date.today() - datetime.timedelta(i))
date_index = time_i.strftime("%m-%d")
surveys_by_date[date_index] = 0
surveys_date_array.append(date_index)
survey_table = get_all_survey_responses(SURVEY_DAYS)
sms_rows = [x.lang for x in survey_table if x.method == 'sms']
web_rows = [x.lang for x in survey_table if x.method == 'web']
# ANALYTICS CODE
for i in range(SURVEY_DAYS, -1, -1):
time_i = (datetime.date.today() - datetime.timedelta(i))
date_index = time_i.strftime("%m-%d")
surveys_value_array.append(
len([x for x in survey_table if to_bucket(x.date_submitted) == date_index]))
dashboard_collection_home = [
{
"id": "graph",
"title": "Surveys Submitted".format(SURVEY_DAYS),
"data": {
"graph": {
"datetime": {
"data": surveys_date_array
},
"series": [
{
"data": surveys_value_array
}
]
}
}
},
{
"title": "Satisfaction Rating".format(SURVEY_DAYS),
"data": "{0:.2f}".format(get_rating_scale(survey_table))
},
{
"title": "Survey Type".format(SURVEY_DAYS),
"data": {
"web_en": web_rows.count('en'),
"web_es": web_rows.count('es'),
"sms_en": sms_rows.count('en'),
"sms_es": sms_rows.count('es')
},
"labels": {
"web_en": "Web (English)",
"web_es": "Web (Spanish)",
"sms_en": "Text (English)",
"sms_es": "Text (Spanish)"
}
},
{},
{},
{},
{},
{},
{},
{},
{
"title": "Surveys by Survey Role",
"data": get_surveys_by_role(survey_table)
},
{},
{
"title": "How many completions?",
"data": get_surveys_by_completion(survey_table)
},
{
"title": "Respondents by Purpose",
"data": get_surveys_by_purpose(survey_table)
},
{
"title": "Ratings",
"data": {
"en": get_rating_by_lang(survey_table, 'en'),
"es": get_rating_by_lang(survey_table, 'es'),
"p1": get_rating_by_purpose(survey_table, 1),
"p2": get_rating_by_purpose(survey_table, 2),
"p3": get_rating_by_purpose(survey_table, 3),
"p4": get_rating_by_purpose(survey_table, 4),
"p5": get_rating_by_purpose(survey_table, 5),
"contractor": get_rating_by_role(survey_table, 1),
"architect": get_rating_by_role(survey_table, 2),
"permitconsultant": get_rating_by_role(survey_table, 3),
"homeowner": get_rating_by_role(survey_table, 4),
"bizowner": get_rating_by_role(survey_table, 5)
}
}
]
json_obj_home['daily_graph'] = ujson.dumps(dashboard_collection_home[0]['data']['graph'])
json_obj_home['surveys_type'] = ujson.dumps(dashboard_collection_home[2])
json_obj_home['survey_role'] = ujson.dumps(dashboard_collection_home[10])
json_obj_home['survey_complete'] = ujson.dumps(dashboard_collection_home[12])
json_obj_home['survey_purpose'] = ujson.dumps(dashboard_collection_home[13])
today = datetime.date.today()
return render_template(
"public/home.html",
api=1,
date=today.strftime('%B %d, %Y'),
json_obj=json_obj_home,
dash_obj=dashboard_collection_home,
resp_obj=survey_table,
title='Dashboard - Main'
)
@blueprint.route("/metrics", methods=["GET", "POST"])
def metrics():
json_obj = {}
surveys_by_date = {}
surveys_date_array = []
surveys_value_array = []
for i in range(SURVEY_DAYS, -1, -1):
time_i = (datetime.date.today() - datetime.timedelta(i))
date_index = time_i.strftime("%m-%d")
surveys_by_date[date_index] = 0
surveys_date_array.append(date_index)
survey_table = get_all_survey_responses(SURVEY_DAYS)
sms_rows = [x.lang for x in survey_table if x.method == 'sms']
web_rows = [x.lang for x in survey_table if x.method == 'web']
# ANALYTICS CODE
for i in range(SURVEY_DAYS, -1, -1):
time_i = (datetime.date.today() - datetime.timedelta(i))
date_index = time_i.strftime("%m-%d")
surveys_value_array.append(
len([x for x in survey_table if to_bucket(x.date_submitted) == date_index]))
dashboard_collection = [
{
"id": "graph",
"title": "Surveys Submitted".format(SURVEY_DAYS),
"data": {
"graph": {
"datetime": {
"data": surveys_date_array
},
"series": [
{
"data": surveys_value_array
}
]
}
}
},
{
"title": "Satisfaction Rating".format(SURVEY_DAYS),
"data": "{0:.2f}".format(get_rating_scale(survey_table))
},
{
"title": "Survey Type".format(SURVEY_DAYS),
"data": {
"web_en": web_rows.count('en'),
"web_es": web_rows.count('es'),
"sms_en": sms_rows.count('en'),
"sms_es": sms_rows.count('es')
},
"labels": {
"web_en": "Web (English)",
"web_es": "Web (Spanish)",
"sms_en": "Text (English)",
"sms_es": "Text (Spanish)"
}
},
{
"title": "Commercial",
"data": {
"nc": get_lifespan('nc'),
"rc": get_lifespan('rc'),
"s": get_lifespan('s')
}
},
{
"title": "Residential",
"data": {
"nr": get_lifespan('nr'),
"rr": get_lifespan('rr'),
"p": get_lifespan('p'),
"f": get_lifespan('f'),
"e": get_lifespan('e')
}
},
{
"title": "Average time from application date to permit issuance, Owner/Builder Permits, Last 30 Days",
"data": 0
},
{
"title": "Same Day Trade Permits",
"data": {
"PLUM": trade(30, 'PLUM'),
"BLDG": trade(30, 'BLDG'),
"ELEC": trade(30, 'ELEC'),
"FIRE": trade(30, 'FIRE'),
"ZIPS": trade(30, 'ZIPS')
}
},
{
"title": "(UNUSED) Avg Cost of an Open Residential Permit",
"data": 0
},
{
"title": "(UNUSED) Avg Cost of an Owner/Builder Permit",
"data": 0
},
{
"title": "Permits & sub-permits issued by type, Last 30 Days",
"data": get_permit_types()
},
{
"title": "Surveys by Survey Role",
"data": get_surveys_by_role(survey_table)
},
{
"title": "Master Permits Issued, Last 30 Days",
"data": get_master_permit_counts('permit_issued_date')
},
{
"title": "How many completions?",
"data": get_surveys_by_completion(survey_table)
},
{
"title": "Purpose",
"data": get_surveys_by_purpose(survey_table)
},
{
"title": "Ratings",
"data": {
"en": get_rating_by_lang(survey_table, 'en'),
"es": get_rating_by_lang(survey_table, 'es'),
"p1": get_rating_by_purpose(survey_table, 1),
"p2": get_rating_by_purpose(survey_table, 2),
"p3": get_rating_by_purpose(survey_table, 3),
"p4": get_rating_by_purpose(survey_table, 4),
"p5": get_rating_by_purpose(survey_table, 5),
"contractor": get_rating_by_role(survey_table, 1),
"architect": get_rating_by_role(survey_table, 2),
"permitconsultant": get_rating_by_role(survey_table, 3),
"homeowner": get_rating_by_role(survey_table, 4),
"bizowner": get_rating_by_role(survey_table, 5)
}
}
]
json_obj['daily_graph'] = ujson.dumps(dashboard_collection[0]['data']['graph'])
json_obj['surveys_type'] = ujson.dumps(dashboard_collection[2])
json_obj['permits_type'] = ujson.dumps(dashboard_collection[9])
json_obj['survey_role'] = ujson.dumps(dashboard_collection[10])
json_obj['survey_complete'] = ujson.dumps(dashboard_collection[12])
json_obj['survey_purpose'] = ujson.dumps(dashboard_collection[13])
json_obj['permits_rawjson'] = ujson.dumps(dump_socrata_api('p'))
json_obj['violations_rawjson'] = ujson.dumps(dump_socrata_api('v'))
json_obj['violations_locations_json'] = ujson.dumps(dump_socrata_api('vl'))
json_obj['violations_type_json'] = ujson.dumps(dump_socrata_api('vt'))
json_obj['violations_per_month_json'] = ujson.dumps(dump_socrata_api('vm'))
today = datetime.date.today()
return render_template(
"public/home-metrics.html",
api=api_health(),
date=today.strftime('%B %d, %Y'),
json_obj=json_obj,
dash_obj=dashboard_collection,
resp_obj=survey_table,
title='Dashboard - PIC Metrics'
)
@blueprint.route("/violations", methods=["GET", "POST"])
def violations():
json_obj = {}
surveys_by_date = {}
surveys_date_array = []
surveys_value_array = []
for i in range(SURVEY_DAYS, -1, -1):
time_i = (datetime.date.today() - datetime.timedelta(i))
date_index = time_i.strftime("%m-%d")
surveys_by_date[date_index] = 0
surveys_date_array.append(date_index)
survey_table = get_all_survey_responses(SURVEY_DAYS)
sms_rows = [x.lang for x in survey_table if x.method == 'sms']
web_rows = [x.lang for x in survey_table if x.method == 'web']
# ANALYTICS CODE
for i in range(SURVEY_DAYS, -1, -1):
time_i = (datetime.date.today() - datetime.timedelta(i))
date_index = time_i.strftime("%m-%d")
surveys_value_array.append(
len([x for x in survey_table if to_bucket(x.date_submitted) == date_index]))
dashboard_collection = [
{
"id": "graph",
"title": "Surveys Submitted".format(SURVEY_DAYS),
"data": {
"graph": {
"datetime": {
"data": surveys_date_array
},
"series": [
{
"data": surveys_value_array
}
]
}
}
},
{
"title": "Satisfaction Rating".format(SURVEY_DAYS),
"data": "{0:.2f}".format(get_rating_scale(survey_table))
},
{
"title": "Survey Type".format(SURVEY_DAYS),
"data": {
"web_en": web_rows.count('en'),
"web_es": web_rows.count('es'),
"sms_en": sms_rows.count('en'),
"sms_es": sms_rows.count('es')
},
"labels": {
"web_en": "Web (English)",
"web_es": "Web (Spanish)",
"sms_en": "Text (English)",
"sms_es": "Text (Spanish)"
}
},
{
"title": "Commercial",
"data": {
"nc": get_lifespan('nc'),
"rc": get_lifespan('rc'),
"s": get_lifespan('s')
}
},
{
"title": "Residential",
"data": {
"nr": get_lifespan('nr'),
"rr": get_lifespan('rr'),
"p": get_lifespan('p'),
"f": get_lifespan('f'),
"e": get_lifespan('e')
}
},
{
"title": "Average time from application date to permit issuance, Owner/Builder Permits, Last 30 Days",
"data": 0
},
{
"title": "Same Day Trade Permits",
"data": {
"PLUM": trade(30, 'PLUM'),
"BLDG": trade(30, 'BLDG'),
"ELEC": trade(30, 'ELEC'),
"FIRE": trade(30, 'FIRE'),
"ZIPS": trade(30, 'ZIPS')
}
},
{
"title": "(UNUSED) Avg Cost of an Open Residential Permit",
"data": 0
},
{
"title": "(UNUSED) Avg Cost of an Owner/Builder Permit",
"data": 0
},
{
"title": "Permits & sub-permits issued by type, Last 30 Days",
"data": get_permit_types()
},
{
"title": "Surveys by Survey Role",
"data": get_surveys_by_role(survey_table)
},
{
"title": "Master Permits Issued, Last 30 Days",
"data": get_master_permit_counts('permit_issued_date')
},
{
"title": "How many completions?",
"data": get_surveys_by_completion(survey_table)
},
{
"title": "Purpose",
"data": get_surveys_by_purpose(survey_table)
},
{
"title": "Ratings",
"data": {
"en": get_rating_by_lang(survey_table, 'en'),
"es": get_rating_by_lang(survey_table, 'es'),
"p1": get_rating_by_purpose(survey_table, 1),
"p2": get_rating_by_purpose(survey_table, 2),
"p3": get_rating_by_purpose(survey_table, 3),
"p4": get_rating_by_purpose(survey_table, 4),
"p5": get_rating_by_purpose(survey_table, 5),
"contractor": get_rating_by_role(survey_table, 1),
"architect": get_rating_by_role(survey_table, 2),
"permitconsultant": get_rating_by_role(survey_table, 3),
"homeowner": get_rating_by_role(survey_table, 4),
"bizowner": get_rating_by_role(survey_table, 5)
}
}
]
json_obj['daily_graph'] = ujson.dumps(dashboard_collection[0]['data']['graph'])
json_obj['surveys_type'] = ujson.dumps(dashboard_collection[2])
json_obj['permits_type'] = ujson.dumps(dashboard_collection[9])
json_obj['survey_role'] = ujson.dumps(dashboard_collection[10])
json_obj['survey_complete'] = ujson.dumps(dashboard_collection[12])
json_obj['survey_purpose'] = ujson.dumps(dashboard_collection[13])
json_obj['permits_rawjson'] = ujson.dumps(dump_socrata_api('p'))
json_obj['violations_rawjson'] = ujson.dumps(dump_socrata_api('v'))
json_obj['violations_locations_json'] = ujson.dumps(dump_socrata_api('vl'))
json_obj['violations_type_json'] = ujson.dumps(dump_socrata_api('vt'))
json_obj['violations_per_month_json'] = ujson.dumps(dump_socrata_api('vm'))
today = datetime.date.today()
return render_template(
"public/home-violations.html",
api=api_health(),
date=today.strftime('%B %d, %Y'),
json_obj=json_obj,
dash_obj=dashboard_collection,
resp_obj=survey_table,
title='Dashboard - Neighborhood Compliance'
)
@blueprint.route('/dashboard/feedback/', methods=['GET'])
def all_surveys():
survey_table = get_all_survey_responses(SURVEY_DAYS)
today = datetime.date.today()
return render_template(
"dashboard/all-surveys.html",
resp_obj=survey_table,
title='All Survey Responses',
date=today.strftime('%B %d, %Y')
)
@blueprint.route('/dashboard/feedback/<id>', methods=['GET'])
@login_required
def survey_detail(id):
survey = Survey.query.filter_by(id=id)
today = datetime.date.today()
return render_template(
"dashboard/survey-detail.html",
resp_obj=survey,
title='Permitting & Inspection Center User Survey Metrics: Detail',
date=today.strftime('%B %d, %Y'))
@blueprint.route("/dashboard/violations/", methods=['GET'])
def violations_detail():
json_obj = {}
json_obj['violations_type_json'] = ujson.dumps(dump_socrata_api('vt'))
today = datetime.date.today()
return render_template(
"public/violations-detail.html",
title='Violations by Type: Detail',
json_obj=json_obj,
date=today.strftime('%B %d, %Y'))
|
import unittest
import requests
class TranslationTests(unittest.TestCase):
def setUp(self):
self.url = 'http://127.0.0.1/api/translate'
def test_given_words(self):
"""Should pass for the basic test cases provided"""
test_words = ['pig', 'banana', 'trash', 'happy', 'duck', 'glove',
'eat', 'omelet', 'are']
expected_words = ['igpay', 'ananabay', 'ashtray', 'appyhay', 'uckday',
'oveglay', 'eatyay', 'omeletyay', 'areyay']
responses = [requests.post(self.url, x).text for x in test_words]
self.assertEqual(responses, expected_words,
'Should pass for the basic test cases provided')
def test_capitalization(self):
"""Should preserve capitalization in words"""
test_words = ['Capitalized', 'Words', 'Should', 'Work']
expected_words = ['Apitalizedcay', 'Ordsway', 'Ouldshay', 'Orkway']
responses = [requests.post(self.url, x).text for x in test_words]
self.assertEqual(responses, expected_words,
'Words should preserve their capitalization')
def test_sentences(self):
"""Should translate sentences with preserved punctuation"""
test_sentence = ('Long sentences should retain their capitalization, '
'as well as punctuation - hopefully!!')
expected_result = ('Onglay entencessay ouldshay etainray eirthay '
'apitalizationcay, asyay ellway asyay unctuationpay'
' - opefullyhay!!')
response = requests.post(self.url, test_sentence).text
self.assertEqual(response, expected_result,
'Should translate sentences accurately')
def test_edge_cases(self):
"""Should be able to handle words with no vowels"""
test_word = 'sky'
expected_result = 'skyay'
response = requests.post(self.url, test_word).text
self.assertEqual(response, expected_result,
'Should be able to translate words without vowels')
def test_error_cases(self):
"""Should return errors for invalid input"""
self.assertEqual(requests.post(self.url, '').status_code, 406,
'Should return HTTP/406 for empty strings')
def test_long_paragraphs(self):
"""Should translate long paragraphs with new lines intact"""
self.maxDiff = None
expected_result = ''
test_paragraph = ''
with open('tests/lorem_ipsum.txt') as input_paragraph:
test_paragraph = input_paragraph.read()
with open('tests/lorem_ipsum_translated.txt') as expected:
expected_result = expected.read()
response = requests.post(self.url, test_paragraph).text
self.assertEqual(response, expected_result,
'Should translate long paragraphs accurately')
if __name__ == '__main__':
unittest.main()
|
import datetime
__all__ = [
'info',
]
def info():
return {
'birthday': datetime.date(1992, 2, 10),
'class': 3,
'family_name_en': u'nakagawa',
'family_name_kana': u'なかがわ',
'first_name_en': u'haruka',
'first_name_kana': u'はるか',
'graduate_date': None,
'hometown': u'東京',
'name_en': u'Nakagawa Haruka',
'name_ja': u'仲川遥香',
'name_kana': u'なかがわ はるか',
'nick': u'はるごん',
'team': u'A',
}
|
<!DOCTYPE html>
<html lang="en" class="">
<head prefix="og: http://ogp.me/ns# fb: http://ogp.me/ns/fb# object: http://ogp.me/ns/object# article: http://ogp.me/ns/article# profile: http://ogp.me/ns/profile#">
<meta charset='utf-8'>
<link crossorigin="anonymous" href="https://assets-cdn.github.com/assets/frameworks-130b94ff796a9660d814b59665547ebaf99cc439323c908f41c6ff46e4255c8e.css" media="all" rel="stylesheet" />
<link crossorigin="anonymous" href="https://assets-cdn.github.com/assets/github-907704789dc9e0c1cd78c2f3adfc91e42ed23a0a97b2790c4171d9d8959f7cdc.css" media="all" rel="stylesheet" />
<link crossorigin="anonymous" href="https://assets-cdn.github.com/assets/site-becbb68a5e0ae3f94214b9e9edea2c49974f6d60b9eae715b70e5d017ff1b935.css" media="all" rel="stylesheet" />
<link as="script" href="https://assets-cdn.github.com/assets/frameworks-74e2880351ce368d8f0a52f12a7452b422bef6397d5477d8120207ea79f0dfd9.js" rel="preload" />
<link as="script" href="https://assets-cdn.github.com/assets/github-2a591b51a4438c7a3e39b82d3119de5d8894bf09aeb9148fc057632c7a2aca9f.js" rel="preload" />
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta http-equiv="Content-Language" content="en">
<meta name="viewport" content="width=device-width">
<title>android-platform_sdk/monkey_playback.py at master · miracle2k/android-platform_sdk · GitHub</title>
<link rel="search" type="application/opensearchdescription+xml" href="/opensearch.xml" title="GitHub">
<link rel="fluid-icon" href="https://github.com/fluidicon.png" title="GitHub">
<link rel="apple-touch-icon" href="/apple-touch-icon.png">
<link rel="apple-touch-icon" sizes="57x57" href="/apple-touch-icon-57x57.png">
<link rel="apple-touch-icon" sizes="60x60" href="/apple-touch-icon-60x60.png">
<link rel="apple-touch-icon" sizes="72x72" href="/apple-touch-icon-72x72.png">
<link rel="apple-touch-icon" sizes="76x76" href="/apple-touch-icon-76x76.png">
<link rel="apple-touch-icon" sizes="114x114" href="/apple-touch-icon-114x114.png">
<link rel="apple-touch-icon" sizes="120x120" href="/apple-touch-icon-120x120.png">
<link rel="apple-touch-icon" sizes="144x144" href="/apple-touch-icon-144x144.png">
<link rel="apple-touch-icon" sizes="152x152" href="/apple-touch-icon-152x152.png">
<link rel="apple-touch-icon" sizes="180x180" href="/apple-touch-icon-180x180.png">
<meta property="fb:app_id" content="1401488693436528">
<meta content="https://avatars0.githubusercontent.com/u/13807?v=3&s=400" name="twitter:image:src" /><meta content="@github" name="twitter:site" /><meta content="summary" name="twitter:card" /><meta content="miracle2k/android-platform_sdk" name="twitter:title" /><meta content="android-platform_sdk - To keep the deprecated apkbuilder tool alive should it break." name="twitter:description" />
<meta content="https://avatars0.githubusercontent.com/u/13807?v=3&s=400" property="og:image" /><meta content="GitHub" property="og:site_name" /><meta content="object" property="og:type" /><meta content="miracle2k/android-platform_sdk" property="og:title" /><meta content="https://github.com/miracle2k/android-platform_sdk" property="og:url" /><meta content="android-platform_sdk - To keep the deprecated apkbuilder tool alive should it break." property="og:description" />
<meta name="browser-stats-url" content="https://api.github.com/_private/browser/stats">
<meta name="browser-errors-url" content="https://api.github.com/_private/browser/errors">
<link rel="assets" href="https://assets-cdn.github.com/">
<meta name="pjax-timeout" content="1000">
<meta name="request-id" content="DABD7F08:88D1:CDCD340:57C0022F" data-pjax-transient>
<meta name="msapplication-TileImage" content="/windows-tile.png">
<meta name="msapplication-TileColor" content="#ffffff">
<meta name="selected-link" value="repo_source" data-pjax-transient>
<meta name="google-site-verification" content="KT5gs8h0wvaagLKAVWq8bbeNwnZZK1r1XQysX3xurLU">
<meta name="google-site-verification" content="ZzhVyEFwb7w3e0-uOTltm8Jsck2F5StVihD0exw2fsA">
<meta name="google-analytics" content="UA-3769691-2">
<meta content="collector.githubapp.com" name="octolytics-host" /><meta content="github" name="octolytics-app-id" /><meta content="DABD7F08:88D1:CDCD340:57C0022F" name="octolytics-dimension-request_id" />
<meta content="/<user-name>/<repo-name>/blob/show" data-pjax-transient="true" name="analytics-location" />
<meta class="js-ga-set" name="dimension1" content="Logged Out">
<meta name="hostname" content="github.com">
<meta name="user-login" content="">
<meta name="expected-hostname" content="github.com">
<meta name="js-proxy-site-detection-payload" content="M2U1NjI5YmZkMTkzMDk4NWZjNzdlMmU1YjU5NzZjMzhmYjZhMTc4M2Y3YzUzODI3N2M4YmEwYTA0ZmY2YTBhZXx7InJlbW90ZV9hZGRyZXNzIjoiMjE4LjE4OS4xMjcuOCIsInJlcXVlc3RfaWQiOiJEQUJEN0YwODo4OEQxOkNEQ0QzNDA6NTdDMDAyMkYiLCJ0aW1lc3RhbXAiOjE0NzIyMDEyNjR9">
<link rel="mask-icon" href="https://assets-cdn.github.com/pinned-octocat.svg" color="#4078c0">
<link rel="icon" type="image/x-icon" href="https://assets-cdn.github.com/favicon.ico">
<meta name="html-safe-nonce" content="a7a43ef8cf73c3d6bb870f4fca27a80272027589">
<meta content="4c09cb75b1ca694c4c736f01d9c7db0f3117e090" name="form-nonce" />
<meta http-equiv="x-pjax-version" content="8c57ae2406b1076c4bddfbcd9b742e31">
<meta name="description" content="android-platform_sdk - To keep the deprecated apkbuilder tool alive should it break.">
<meta name="go-import" content="github.com/miracle2k/android-platform_sdk git https://github.com/miracle2k/android-platform_sdk.git">
<meta content="13807" name="octolytics-dimension-user_id" /><meta content="miracle2k" name="octolytics-dimension-user_login" /><meta content="1420024" name="octolytics-dimension-repository_id" /><meta content="miracle2k/android-platform_sdk" name="octolytics-dimension-repository_nwo" /><meta content="true" name="octolytics-dimension-repository_public" /><meta content="false" name="octolytics-dimension-repository_is_fork" /><meta content="1420024" name="octolytics-dimension-repository_network_root_id" /><meta content="miracle2k/android-platform_sdk" name="octolytics-dimension-repository_network_root_nwo" />
<link href="https://github.com/miracle2k/android-platform_sdk/commits/master.atom" rel="alternate" title="Recent Commits to android-platform_sdk:master" type="application/atom+xml">
<link rel="canonical" href="https://github.com/miracle2k/android-platform_sdk/blob/master/monkeyrunner/scripts/monkey_playback.py" data-pjax-transient>
</head>
<body class="logged-out env-production vis-public page-blob">
<div id="js-pjax-loader-bar" class="pjax-loader-bar"><div class="progress"></div></div>
<a href="#start-of-content" tabindex="1" class="accessibility-aid js-skip-to-content">Skip to content</a>
<header class="site-header js-details-container" role="banner">
<div class="container-responsive">
<a class="header-logo-invertocat" href="https://github.com/" aria-label="Homepage" data-ga-click="(Logged out) Header, go to homepage, icon:logo-wordmark">
<svg aria-hidden="true" class="octicon octicon-mark-github" height="32" version="1.1" viewBox="0 0 16 16" width="32"><path d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0 0 16 8c0-4.42-3.58-8-8-8z"></path></svg>
</a>
<button class="btn-link float-right site-header-toggle js-details-target" type="button" aria-label="Toggle navigation">
<svg aria-hidden="true" class="octicon octicon-three-bars" height="24" version="1.1" viewBox="0 0 12 16" width="18"><path d="M11.41 9H.59C0 9 0 8.59 0 8c0-.59 0-1 .59-1H11.4c.59 0 .59.41.59 1 0 .59 0 1-.59 1h.01zm0-4H.59C0 5 0 4.59 0 4c0-.59 0-1 .59-1H11.4c.59 0 .59.41.59 1 0 .59 0 1-.59 1h.01zM.59 11H11.4c.59 0 .59.41.59 1 0 .59 0 1-.59 1H.59C0 13 0 12.59 0 12c0-.59 0-1 .59-1z"></path></svg>
</button>
<div class="site-header-menu">
<nav class="site-header-nav site-header-nav-main">
<a href="/personal" class="js-selected-navigation-item nav-item nav-item-personal" data-ga-click="Header, click, Nav menu - item:personal" data-selected-links="/personal /personal">
Personal
</a> <a href="/open-source" class="js-selected-navigation-item nav-item nav-item-opensource" data-ga-click="Header, click, Nav menu - item:opensource" data-selected-links="/open-source /open-source">
Open source
</a> <a href="/business" class="js-selected-navigation-item nav-item nav-item-business" data-ga-click="Header, click, Nav menu - item:business" data-selected-links="/business /business/partners /business/features /business/customers /business">
Business
</a> <a href="/explore" class="js-selected-navigation-item nav-item nav-item-explore" data-ga-click="Header, click, Nav menu - item:explore" data-selected-links="/explore /trending /trending/developers /integrations /integrations/feature/code /integrations/feature/collaborate /integrations/feature/ship /explore">
Explore
</a> </nav>
<div class="site-header-actions">
<a class="btn btn-primary site-header-actions-btn" href="/join?source=header-repo" data-ga-click="(Logged out) Header, clicked Sign up, text:sign-up">Sign up</a>
<a class="btn site-header-actions-btn mr-2" href="/login?return_to=%2Fmiracle2k%2Fandroid-platform_sdk%2Fblob%2Fmaster%2Fmonkeyrunner%2Fscripts%2Fmonkey_playback.py" data-ga-click="(Logged out) Header, clicked Sign in, text:sign-in">Sign in</a>
</div>
<nav class="site-header-nav site-header-nav-secondary">
<a class="nav-item" href="/pricing">Pricing</a>
<a class="nav-item" href="/blog">Blog</a>
<a class="nav-item" href="https://help.github.com">Support</a>
<a class="nav-item header-search-link" href="https://github.com/search">Search GitHub</a>
<div class="header-search scoped-search site-scoped-search js-site-search" role="search">
<!-- </textarea> --><!-- '"` --><form accept-charset="UTF-8" action="/miracle2k/android-platform_sdk/search" class="js-site-search-form" data-scoped-search-url="/miracle2k/android-platform_sdk/search" data-unscoped-search-url="/search" method="get"><div style="margin:0;padding:0;display:inline"><input name="utf8" type="hidden" value="✓" /></div>
<label class="form-control header-search-wrapper js-chromeless-input-container">
<div class="header-search-scope">This repository</div>
<input type="text"
class="form-control header-search-input js-site-search-focus js-site-search-field is-clearable"
data-hotkey="s"
name="q"
placeholder="Search"
aria-label="Search this repository"
data-unscoped-placeholder="Search GitHub"
data-scoped-placeholder="Search"
autocapitalize="off">
</label>
</form></div>
</nav>
</div>
</div>
</header>
<div id="start-of-content" class="accessibility-aid"></div>
<div id="js-flash-container">
</div>
<div role="main">
<div itemscope itemtype="http://schema.org/SoftwareSourceCode">
<div id="js-repo-pjax-container" data-pjax-container>
<div class="pagehead repohead instapaper_ignore readability-menu experiment-repo-nav">
<div class="container repohead-details-container">
<ul class="pagehead-actions">
<li>
<a href="/login?return_to=%2Fmiracle2k%2Fandroid-platform_sdk"
class="btn btn-sm btn-with-count tooltipped tooltipped-n"
aria-label="You must be signed in to watch a repository" rel="nofollow">
<svg aria-hidden="true" class="octicon octicon-eye" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path d="M8.06 2C3 2 0 8 0 8s3 6 8.06 6C13 14 16 8 16 8s-3-6-7.94-6zM8 12c-2.2 0-4-1.78-4-4 0-2.2 1.8-4 4-4 2.22 0 4 1.8 4 4 0 2.22-1.78 4-4 4zm2-4c0 1.11-.89 2-2 2-1.11 0-2-.89-2-2 0-1.11.89-2 2-2 1.11 0 2 .89 2 2z"></path></svg>
Watch
</a>
<a class="social-count" href="/miracle2k/android-platform_sdk/watchers"
aria-label="8 users are watching this repository">
8
</a>
</li>
<li>
<a href="/login?return_to=%2Fmiracle2k%2Fandroid-platform_sdk"
class="btn btn-sm btn-with-count tooltipped tooltipped-n"
aria-label="You must be signed in to star a repository" rel="nofollow">
<svg aria-hidden="true" class="octicon octicon-star" height="16" version="1.1" viewBox="0 0 14 16" width="14"><path d="M14 6l-4.9-.64L7 1 4.9 5.36 0 6l3.6 3.26L2.67 14 7 11.67 11.33 14l-.93-4.74z"></path></svg>
Star
</a>
<a class="social-count js-social-count" href="/miracle2k/android-platform_sdk/stargazers"
aria-label="24 users starred this repository">
24
</a>
</li>
<li>
<a href="/login?return_to=%2Fmiracle2k%2Fandroid-platform_sdk"
class="btn btn-sm btn-with-count tooltipped tooltipped-n"
aria-label="You must be signed in to fork a repository" rel="nofollow">
<svg aria-hidden="true" class="octicon octicon-repo-forked" height="16" version="1.1" viewBox="0 0 10 16" width="10"><path d="M8 1a1.993 1.993 0 0 0-1 3.72V6L5 8 3 6V4.72A1.993 1.993 0 0 0 2 1a1.993 1.993 0 0 0-1 3.72V6.5l3 3v1.78A1.993 1.993 0 0 0 5 15a1.993 1.993 0 0 0 1-3.72V9.5l3-3V4.72A1.993 1.993 0 0 0 8 1zM2 4.2C1.34 4.2.8 3.65.8 3c0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2zm3 10c-.66 0-1.2-.55-1.2-1.2 0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2zm3-10c-.66 0-1.2-.55-1.2-1.2 0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2z"></path></svg>
Fork
</a>
<a href="/miracle2k/android-platform_sdk/network" class="social-count"
aria-label="66 users are forked this repository">
66
</a>
</li>
</ul>
<h1 class="public ">
<svg aria-hidden="true" class="octicon octicon-repo" height="16" version="1.1" viewBox="0 0 12 16" width="12"><path d="M4 9H3V8h1v1zm0-3H3v1h1V6zm0-2H3v1h1V4zm0-2H3v1h1V2zm8-1v12c0 .55-.45 1-1 1H6v2l-1.5-1.5L3 16v-2H1c-.55 0-1-.45-1-1V1c0-.55.45-1 1-1h10c.55 0 1 .45 1 1zm-1 10H1v2h2v-1h3v1h5v-2zm0-10H2v9h9V1z"></path></svg>
<span class="author" itemprop="author"><a href="/miracle2k" class="url fn" rel="author">miracle2k</a></span><!--
--><span class="path-divider">/</span><!--
--><strong itemprop="name"><a href="/miracle2k/android-platform_sdk" data-pjax="#js-repo-pjax-container">android-platform_sdk</a></strong>
</h1>
</div>
<div class="container">
<nav class="reponav js-repo-nav js-sidenav-container-pjax"
itemscope
itemtype="http://schema.org/BreadcrumbList"
role="navigation"
data-pjax="#js-repo-pjax-container">
<span itemscope itemtype="http://schema.org/ListItem" itemprop="itemListElement">
<a href="/miracle2k/android-platform_sdk" aria-selected="true" class="js-selected-navigation-item selected reponav-item" data-hotkey="g c" data-selected-links="repo_source repo_downloads repo_commits repo_releases repo_tags repo_branches /miracle2k/android-platform_sdk" itemprop="url">
<svg aria-hidden="true" class="octicon octicon-code" height="16" version="1.1" viewBox="0 0 14 16" width="14"><path d="M9.5 3L8 4.5 11.5 8 8 11.5 9.5 13 14 8 9.5 3zm-5 0L0 8l4.5 5L6 11.5 2.5 8 6 4.5 4.5 3z"></path></svg>
<span itemprop="name">Code</span>
<meta itemprop="position" content="1">
</a> </span>
<span itemscope itemtype="http://schema.org/ListItem" itemprop="itemListElement">
<a href="/miracle2k/android-platform_sdk/issues" class="js-selected-navigation-item reponav-item" data-hotkey="g i" data-selected-links="repo_issues repo_labels repo_milestones /miracle2k/android-platform_sdk/issues" itemprop="url">
<svg aria-hidden="true" class="octicon octicon-issue-opened" height="16" version="1.1" viewBox="0 0 14 16" width="14"><path d="M7 2.3c3.14 0 5.7 2.56 5.7 5.7s-2.56 5.7-5.7 5.7A5.71 5.71 0 0 1 1.3 8c0-3.14 2.56-5.7 5.7-5.7zM7 1C3.14 1 0 4.14 0 8s3.14 7 7 7 7-3.14 7-7-3.14-7-7-7zm1 3H6v5h2V4zm0 6H6v2h2v-2z"></path></svg>
<span itemprop="name">Issues</span>
<span class="counter">0</span>
<meta itemprop="position" content="2">
</a> </span>
<span itemscope itemtype="http://schema.org/ListItem" itemprop="itemListElement">
<a href="/miracle2k/android-platform_sdk/pulls" class="js-selected-navigation-item reponav-item" data-hotkey="g p" data-selected-links="repo_pulls /miracle2k/android-platform_sdk/pulls" itemprop="url">
<svg aria-hidden="true" class="octicon octicon-git-pull-request" height="16" version="1.1" viewBox="0 0 12 16" width="12"><path d="M11 11.28V5c-.03-.78-.34-1.47-.94-2.06C9.46 2.35 8.78 2.03 8 2H7V0L4 3l3 3V4h1c.27.02.48.11.69.31.21.2.3.42.31.69v6.28A1.993 1.993 0 0 0 10 15a1.993 1.993 0 0 0 1-3.72zm-1 2.92c-.66 0-1.2-.55-1.2-1.2 0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2zM4 3c0-1.11-.89-2-2-2a1.993 1.993 0 0 0-1 3.72v6.56A1.993 1.993 0 0 0 2 15a1.993 1.993 0 0 0 1-3.72V4.72c.59-.34 1-.98 1-1.72zm-.8 10c0 .66-.55 1.2-1.2 1.2-.65 0-1.2-.55-1.2-1.2 0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2zM2 4.2C1.34 4.2.8 3.65.8 3c0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2z"></path></svg>
<span itemprop="name">Pull requests</span>
<span class="counter">1</span>
<meta itemprop="position" content="3">
</a> </span>
<a href="/miracle2k/android-platform_sdk/pulse" class="js-selected-navigation-item reponav-item" data-selected-links="pulse /miracle2k/android-platform_sdk/pulse">
<svg aria-hidden="true" class="octicon octicon-pulse" height="16" version="1.1" viewBox="0 0 14 16" width="14"><path d="M11.5 8L8.8 5.4 6.6 8.5 5.5 1.6 2.38 8H0v2h3.6l.9-1.8.9 5.4L9 8.5l1.6 1.5H14V8z"></path></svg>
Pulse
</a>
<a href="/miracle2k/android-platform_sdk/graphs" class="js-selected-navigation-item reponav-item" data-selected-links="repo_graphs repo_contributors /miracle2k/android-platform_sdk/graphs">
<svg aria-hidden="true" class="octicon octicon-graph" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path d="M16 14v1H0V0h1v14h15zM5 13H3V8h2v5zm4 0H7V3h2v10zm4 0h-2V6h2v7z"></path></svg>
Graphs
</a>
</nav>
</div>
</div>
<div class="container new-discussion-timeline experiment-repo-nav">
<div class="repository-content">
<a href="/miracle2k/android-platform_sdk/blob/a43666cc4f8d3e9eaf0ef7bdb24696c02c2622b5/monkeyrunner/scripts/monkey_playback.py" class="d-none js-permalink-shortcut" data-hotkey="y">Permalink</a>
<!-- blob contrib key: blob_contributors:v21:662d0810ab048d9a87d95357160e8c3f -->
<div class="file-navigation js-zeroclipboard-container">
<div class="select-menu branch-select-menu js-menu-container js-select-menu float-left">
<button class="btn btn-sm select-menu-button js-menu-target css-truncate" data-hotkey="w"
type="button" aria-label="Switch branches or tags" tabindex="0" aria-haspopup="true">
<i>Branch:</i>
<span class="js-select-button css-truncate-target">master</span>
</button>
<div class="select-menu-modal-holder js-menu-content js-navigation-container" data-pjax aria-hidden="true">
<div class="select-menu-modal">
<div class="select-menu-header">
<svg aria-label="Close" class="octicon octicon-x js-menu-close" height="16" role="img" version="1.1" viewBox="0 0 12 16" width="12"><path d="M7.48 8l3.75 3.75-1.48 1.48L6 9.48l-3.75 3.75-1.48-1.48L4.52 8 .77 4.25l1.48-1.48L6 6.52l3.75-3.75 1.48 1.48z"></path></svg>
<span class="select-menu-title">Switch branches/tags</span>
</div>
<div class="select-menu-filters">
<div class="select-menu-text-filter">
<input type="text" aria-label="Filter branches/tags" id="context-commitish-filter-field" class="form-control js-filterable-field js-navigation-enable" placeholder="Filter branches/tags">
</div>
<div class="select-menu-tabs">
<ul>
<li class="select-menu-tab">
<a href="#" data-tab-filter="branches" data-filter-placeholder="Filter branches/tags" class="js-select-menu-tab" role="tab">Branches</a>
</li>
<li class="select-menu-tab">
<a href="#" data-tab-filter="tags" data-filter-placeholder="Find a tag…" class="js-select-menu-tab" role="tab">Tags</a>
</li>
</ul>
</div>
</div>
<div class="select-menu-list select-menu-tab-bucket js-select-menu-tab-bucket" data-tab-filter="branches" role="menu">
<div data-filterable-for="context-commitish-filter-field" data-filterable-type="substring">
<a class="select-menu-item js-navigation-item js-navigation-open selected"
href="/miracle2k/android-platform_sdk/blob/master/monkeyrunner/scripts/monkey_playback.py"
data-name="master"
data-skip-pjax="true"
rel="nofollow">
<svg aria-hidden="true" class="octicon octicon-check select-menu-item-icon" height="16" version="1.1" viewBox="0 0 12 16" width="12"><path d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"></path></svg>
<span class="select-menu-item-text css-truncate-target js-select-menu-filter-text">
master
</span>
</a>
</div>
<div class="select-menu-no-results">Nothing to show</div>
</div>
<div class="select-menu-list select-menu-tab-bucket js-select-menu-tab-bucket" data-tab-filter="tags">
<div data-filterable-for="context-commitish-filter-field" data-filterable-type="substring">
</div>
<div class="select-menu-no-results">Nothing to show</div>
</div>
</div>
</div>
</div>
<div class="btn-group float-right">
<a href="/miracle2k/android-platform_sdk/find/master"
class="js-pjax-capture-input btn btn-sm"
data-pjax
data-hotkey="t">
Find file
</a>
<button aria-label="Copy file path to clipboard" class="js-zeroclipboard btn btn-sm zeroclipboard-button tooltipped tooltipped-s" data-copied-hint="Copied!" type="button">Copy path</button>
</div>
<div class="breadcrumb js-zeroclipboard-target">
<span class="repo-root js-repo-root"><span class="js-path-segment"><a href="/miracle2k/android-platform_sdk"><span>android-platform_sdk</span></a></span></span><span class="separator">/</span><span class="js-path-segment"><a href="/miracle2k/android-platform_sdk/tree/master/monkeyrunner"><span>monkeyrunner</span></a></span><span class="separator">/</span><span class="js-path-segment"><a href="/miracle2k/android-platform_sdk/tree/master/monkeyrunner/scripts"><span>scripts</span></a></span><span class="separator">/</span><strong class="final-path">monkey_playback.py</strong>
</div>
</div>
<div class="commit-tease">
<span class="right">
<a class="commit-tease-sha" href="/miracle2k/android-platform_sdk/commit/6db57208c8fb964bba0bc6da098e8aac94ea6b93" data-pjax>
6db5720
</a>
<relative-time datetime="2010-10-18T20:54:38Z">Oct 19, 2010</relative-time>
</span>
<div>
<img alt="@billnapier" class="avatar" height="20" src="https://avatars2.githubusercontent.com/u/163577?v=3&s=40" width="20" />
<a href="/billnapier" class="user-mention" rel="contributor">billnapier</a>
<a href="/miracle2k/android-platform_sdk/commit/6db57208c8fb964bba0bc6da098e8aac94ea6b93" class="message" data-pjax="true" title="Initial cut at MonkeyRecorder.
MonkeyRecorder (and MonkeyPlayback) are a set of tools for using MonkeyRunner to record and playback actions. The current implementation is not very sophisticated, but it works.
Please don't review yet. Needs a lot of style cleanup.
Change-Id: Id300a27294b5dc13a842fade900e8b9916b8a17b">Initial cut at MonkeyRecorder.</a>
</div>
<div class="commit-tease-contributors">
<button type="button" class="btn-link muted-link contributors-toggle" data-facebox="#blob_contributors_box">
<strong>1</strong>
contributor
</button>
</div>
<div id="blob_contributors_box" style="display:none">
<h2 class="facebox-header" data-facebox-id="facebox-header">Users who have contributed to this file</h2>
<ul class="facebox-user-list" data-facebox-id="facebox-description">
<li class="facebox-user-list-item">
<img alt="@billnapier" height="24" src="https://avatars0.githubusercontent.com/u/163577?v=3&s=48" width="24" />
<a href="/billnapier">billnapier</a>
</li>
</ul>
</div>
</div>
<div class="file">
<div class="file-header">
<div class="file-actions">
<div class="btn-group">
<a href="/miracle2k/android-platform_sdk/raw/master/monkeyrunner/scripts/monkey_playback.py" class="btn btn-sm " id="raw-url">Raw</a>
<a href="/miracle2k/android-platform_sdk/blame/master/monkeyrunner/scripts/monkey_playback.py" class="btn btn-sm js-update-url-with-hash">Blame</a>
<a href="/miracle2k/android-platform_sdk/commits/master/monkeyrunner/scripts/monkey_playback.py" class="btn btn-sm " rel="nofollow">History</a>
</div>
<button type="button" class="btn-octicon disabled tooltipped tooltipped-nw"
aria-label="You must be signed in to make or propose changes">
<svg aria-hidden="true" class="octicon octicon-pencil" height="16" version="1.1" viewBox="0 0 14 16" width="14"><path d="M0 12v3h3l8-8-3-3-8 8zm3 2H1v-2h1v1h1v1zm10.3-9.3L12 6 9 3l1.3-1.3a.996.996 0 0 1 1.41 0l1.59 1.59c.39.39.39 1.02 0 1.41z"></path></svg>
</button>
<button type="button" class="btn-octicon btn-octicon-danger disabled tooltipped tooltipped-nw"
aria-label="You must be signed in to make or propose changes">
<svg aria-hidden="true" class="octicon octicon-trashcan" height="16" version="1.1" viewBox="0 0 12 16" width="12"><path d="M11 2H9c0-.55-.45-1-1-1H5c-.55 0-1 .45-1 1H2c-.55 0-1 .45-1 1v1c0 .55.45 1 1 1v9c0 .55.45 1 1 1h7c.55 0 1-.45 1-1V5c.55 0 1-.45 1-1V3c0-.55-.45-1-1-1zm-1 12H3V5h1v8h1V5h1v8h1V5h1v8h1V5h1v9zm1-10H2V3h9v1z"></path></svg>
</button>
</div>
<div class="file-info">
71 lines (55 sloc)
<span class="file-info-divider"></span>
2.11 KB
</div>
</div>
<div itemprop="text" class="blob-wrapper data type-python">
<table class="highlight tab-size js-file-line-container" data-tab-size="8">
<tr>
<td id="L1" class="blob-num js-line-number" data-line-number="1"></td>
<td id="LC1" class="blob-code blob-code-inner js-file-line"><span class="pl-c">#!/usr/bin/env monkeyrunner</span></td>
</tr>
<tr>
<td id="L2" class="blob-num js-line-number" data-line-number="2"></td>
<td id="LC2" class="blob-code blob-code-inner js-file-line"><span class="pl-c"># Copyright 2010, The Android Open Source Project</span></td>
</tr>
<tr>
<td id="L3" class="blob-num js-line-number" data-line-number="3"></td>
<td id="LC3" class="blob-code blob-code-inner js-file-line"><span class="pl-c">#</span></td>
</tr>
<tr>
<td id="L4" class="blob-num js-line-number" data-line-number="4"></td>
<td id="LC4" class="blob-code blob-code-inner js-file-line"><span class="pl-c"># Licensed under the Apache License, Version 2.0 (the "License");</span></td>
</tr>
<tr>
<td id="L5" class="blob-num js-line-number" data-line-number="5"></td>
<td id="LC5" class="blob-code blob-code-inner js-file-line"><span class="pl-c"># you may not use this file except in compliance with the License.</span></td>
</tr>
<tr>
<td id="L6" class="blob-num js-line-number" data-line-number="6"></td>
<td id="LC6" class="blob-code blob-code-inner js-file-line"><span class="pl-c"># You may obtain a copy of the License at</span></td>
</tr>
<tr>
<td id="L7" class="blob-num js-line-number" data-line-number="7"></td>
<td id="LC7" class="blob-code blob-code-inner js-file-line"><span class="pl-c">#</span></td>
</tr>
<tr>
<td id="L8" class="blob-num js-line-number" data-line-number="8"></td>
<td id="LC8" class="blob-code blob-code-inner js-file-line"><span class="pl-c"># http://www.apache.org/licenses/LICENSE-2.0</span></td>
</tr>
<tr>
<td id="L9" class="blob-num js-line-number" data-line-number="9"></td>
<td id="LC9" class="blob-code blob-code-inner js-file-line"><span class="pl-c">#</span></td>
</tr>
<tr>
<td id="L10" class="blob-num js-line-number" data-line-number="10"></td>
<td id="LC10" class="blob-code blob-code-inner js-file-line"><span class="pl-c"># Unless required by applicable law or agreed to in writing, software</span></td>
</tr>
<tr>
<td id="L11" class="blob-num js-line-number" data-line-number="11"></td>
<td id="LC11" class="blob-code blob-code-inner js-file-line"><span class="pl-c"># distributed under the License is distributed on an "AS IS" BASIS,</span></td>
</tr>
<tr>
<td id="L12" class="blob-num js-line-number" data-line-number="12"></td>
<td id="LC12" class="blob-code blob-code-inner js-file-line"><span class="pl-c"># WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.</span></td>
</tr>
<tr>
<td id="L13" class="blob-num js-line-number" data-line-number="13"></td>
<td id="LC13" class="blob-code blob-code-inner js-file-line"><span class="pl-c"># See the License for the specific language governing permissions and</span></td>
</tr>
<tr>
<td id="L14" class="blob-num js-line-number" data-line-number="14"></td>
<td id="LC14" class="blob-code blob-code-inner js-file-line"><span class="pl-c"># limitations under the License.</span></td>
</tr>
<tr>
<td id="L15" class="blob-num js-line-number" data-line-number="15"></td>
<td id="LC15" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L16" class="blob-num js-line-number" data-line-number="16"></td>
<td id="LC16" class="blob-code blob-code-inner js-file-line"><span class="pl-k">import</span> sys</td>
</tr>
<tr>
<td id="L17" class="blob-num js-line-number" data-line-number="17"></td>
<td id="LC17" class="blob-code blob-code-inner js-file-line"><span class="pl-k">from</span> com.android.monkeyrunner <span class="pl-k">import</span> MonkeyRunner</td>
</tr>
<tr>
<td id="L18" class="blob-num js-line-number" data-line-number="18"></td>
<td id="LC18" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L19" class="blob-num js-line-number" data-line-number="19"></td>
<td id="LC19" class="blob-code blob-code-inner js-file-line"><span class="pl-c"># The format of the file we are parsing is very carfeully constructed.</span></td>
</tr>
<tr>
<td id="L20" class="blob-num js-line-number" data-line-number="20"></td>
<td id="LC20" class="blob-code blob-code-inner js-file-line"><span class="pl-c"># Each line corresponds to a single command. The line is split into 2</span></td>
</tr>
<tr>
<td id="L21" class="blob-num js-line-number" data-line-number="21"></td>
<td id="LC21" class="blob-code blob-code-inner js-file-line"><span class="pl-c"># parts with a | character. Text to the left of the pipe denotes</span></td>
</tr>
<tr>
<td id="L22" class="blob-num js-line-number" data-line-number="22"></td>
<td id="LC22" class="blob-code blob-code-inner js-file-line"><span class="pl-c"># which command to run. The text to the right of the pipe is a python</span></td>
</tr>
<tr>
<td id="L23" class="blob-num js-line-number" data-line-number="23"></td>
<td id="LC23" class="blob-code blob-code-inner js-file-line"><span class="pl-c"># dictionary (it can be evaled into existence) that specifies the</span></td>
</tr>
<tr>
<td id="L24" class="blob-num js-line-number" data-line-number="24"></td>
<td id="LC24" class="blob-code blob-code-inner js-file-line"><span class="pl-c"># arguments for the command. In most cases, this directly maps to the</span></td>
</tr>
<tr>
<td id="L25" class="blob-num js-line-number" data-line-number="25"></td>
<td id="LC25" class="blob-code blob-code-inner js-file-line"><span class="pl-c"># keyword argument dictionary that could be passed to the underlying</span></td>
</tr>
<tr>
<td id="L26" class="blob-num js-line-number" data-line-number="26"></td>
<td id="LC26" class="blob-code blob-code-inner js-file-line"><span class="pl-c"># command. </span></td>
</tr>
<tr>
<td id="L27" class="blob-num js-line-number" data-line-number="27"></td>
<td id="LC27" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L28" class="blob-num js-line-number" data-line-number="28"></td>
<td id="LC28" class="blob-code blob-code-inner js-file-line"><span class="pl-c"># Lookup table to map command strings to functions that implement that</span></td>
</tr>
<tr>
<td id="L29" class="blob-num js-line-number" data-line-number="29"></td>
<td id="LC29" class="blob-code blob-code-inner js-file-line"><span class="pl-c"># command.</span></td>
</tr>
<tr>
<td id="L30" class="blob-num js-line-number" data-line-number="30"></td>
<td id="LC30" class="blob-code blob-code-inner js-file-line"><span class="pl-c1">CMD_MAP</span> <span class="pl-k">=</span> {</td>
</tr>
<tr>
<td id="L31" class="blob-num js-line-number" data-line-number="31"></td>
<td id="LC31" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">'</span>TOUCH<span class="pl-pds">'</span></span>: <span class="pl-k">lambda</span> <span class="pl-smi">dev</span>, <span class="pl-smi">arg</span>: dev.touch(<span class="pl-k">**</span>arg),</td>
</tr>
<tr>
<td id="L32" class="blob-num js-line-number" data-line-number="32"></td>
<td id="LC32" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">'</span>DRAG<span class="pl-pds">'</span></span>: <span class="pl-k">lambda</span> <span class="pl-smi">dev</span>, <span class="pl-smi">arg</span>: dev.drag(<span class="pl-k">**</span>arg),</td>
</tr>
<tr>
<td id="L33" class="blob-num js-line-number" data-line-number="33"></td>
<td id="LC33" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">'</span>PRESS<span class="pl-pds">'</span></span>: <span class="pl-k">lambda</span> <span class="pl-smi">dev</span>, <span class="pl-smi">arg</span>: dev.press(<span class="pl-k">**</span>arg),</td>
</tr>
<tr>
<td id="L34" class="blob-num js-line-number" data-line-number="34"></td>
<td id="LC34" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">'</span>TYPE<span class="pl-pds">'</span></span>: <span class="pl-k">lambda</span> <span class="pl-smi">dev</span>, <span class="pl-smi">arg</span>: dev.type(<span class="pl-k">**</span>arg),</td>
</tr>
<tr>
<td id="L35" class="blob-num js-line-number" data-line-number="35"></td>
<td id="LC35" class="blob-code blob-code-inner js-file-line"> <span class="pl-s"><span class="pl-pds">'</span>WAIT<span class="pl-pds">'</span></span>: <span class="pl-k">lambda</span> <span class="pl-smi">dev</span>, <span class="pl-smi">arg</span>: MonkeyRunner.sleep(<span class="pl-k">**</span>arg)</td>
</tr>
<tr>
<td id="L36" class="blob-num js-line-number" data-line-number="36"></td>
<td id="LC36" class="blob-code blob-code-inner js-file-line"> }</td>
</tr>
<tr>
<td id="L37" class="blob-num js-line-number" data-line-number="37"></td>
<td id="LC37" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L38" class="blob-num js-line-number" data-line-number="38"></td>
<td id="LC38" class="blob-code blob-code-inner js-file-line"><span class="pl-c"># Process a single file for the specified device.</span></td>
</tr>
<tr>
<td id="L39" class="blob-num js-line-number" data-line-number="39"></td>
<td id="LC39" class="blob-code blob-code-inner js-file-line"><span class="pl-k">def</span> <span class="pl-en">process_file</span>(<span class="pl-smi">fp</span>, <span class="pl-smi">device</span>):</td>
</tr>
<tr>
<td id="L40" class="blob-num js-line-number" data-line-number="40"></td>
<td id="LC40" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">for</span> line <span class="pl-k">in</span> fp:</td>
</tr>
<tr>
<td id="L41" class="blob-num js-line-number" data-line-number="41"></td>
<td id="LC41" class="blob-code blob-code-inner js-file-line"> (cmd, rest) <span class="pl-k">=</span> line.split(<span class="pl-s"><span class="pl-pds">'</span>|<span class="pl-pds">'</span></span>)</td>
</tr>
<tr>
<td id="L42" class="blob-num js-line-number" data-line-number="42"></td>
<td id="LC42" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">try</span>:</td>
</tr>
<tr>
<td id="L43" class="blob-num js-line-number" data-line-number="43"></td>
<td id="LC43" class="blob-code blob-code-inner js-file-line"> <span class="pl-c"># Parse the pydict</span></td>
</tr>
<tr>
<td id="L44" class="blob-num js-line-number" data-line-number="44"></td>
<td id="LC44" class="blob-code blob-code-inner js-file-line"> rest <span class="pl-k">=</span> <span class="pl-c1">eval</span>(rest)</td>
</tr>
<tr>
<td id="L45" class="blob-num js-line-number" data-line-number="45"></td>
<td id="LC45" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">except</span>:</td>
</tr>
<tr>
<td id="L46" class="blob-num js-line-number" data-line-number="46"></td>
<td id="LC46" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">print</span> <span class="pl-s"><span class="pl-pds">'</span>unable to parse options<span class="pl-pds">'</span></span></td>
</tr>
<tr>
<td id="L47" class="blob-num js-line-number" data-line-number="47"></td>
<td id="LC47" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">continue</span></td>
</tr>
<tr>
<td id="L48" class="blob-num js-line-number" data-line-number="48"></td>
<td id="LC48" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L49" class="blob-num js-line-number" data-line-number="49"></td>
<td id="LC49" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> cmd <span class="pl-k">not</span> <span class="pl-k">in</span> <span class="pl-c1">CMD_MAP</span>:</td>
</tr>
<tr>
<td id="L50" class="blob-num js-line-number" data-line-number="50"></td>
<td id="LC50" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">print</span> <span class="pl-s"><span class="pl-pds">'</span>unknown command: <span class="pl-pds">'</span></span> <span class="pl-k">+</span> cmd</td>
</tr>
<tr>
<td id="L51" class="blob-num js-line-number" data-line-number="51"></td>
<td id="LC51" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">continue</span></td>
</tr>
<tr>
<td id="L52" class="blob-num js-line-number" data-line-number="52"></td>
<td id="LC52" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L53" class="blob-num js-line-number" data-line-number="53"></td>
<td id="LC53" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">CMD_MAP</span>[cmd](device, rest)</td>
</tr>
<tr>
<td id="L54" class="blob-num js-line-number" data-line-number="54"></td>
<td id="LC54" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L55" class="blob-num js-line-number" data-line-number="55"></td>
<td id="LC55" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L56" class="blob-num js-line-number" data-line-number="56"></td>
<td id="LC56" class="blob-code blob-code-inner js-file-line"><span class="pl-k">def</span> <span class="pl-en">main</span>():</td>
</tr>
<tr>
<td id="L57" class="blob-num js-line-number" data-line-number="57"></td>
<td id="LC57" class="blob-code blob-code-inner js-file-line"> <span class="pl-v">file</span> <span class="pl-k">=</span> sys.argv[<span class="pl-c1">1</span>]</td>
</tr>
<tr>
<td id="L58" class="blob-num js-line-number" data-line-number="58"></td>
<td id="LC58" class="blob-code blob-code-inner js-file-line"> fp <span class="pl-k">=</span> <span class="pl-c1">open</span>(<span class="pl-v">file</span>, <span class="pl-s"><span class="pl-pds">'</span>r<span class="pl-pds">'</span></span>)</td>
</tr>
<tr>
<td id="L59" class="blob-num js-line-number" data-line-number="59"></td>
<td id="LC59" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L60" class="blob-num js-line-number" data-line-number="60"></td>
<td id="LC60" class="blob-code blob-code-inner js-file-line"> device <span class="pl-k">=</span> MonkeyRunner.waitForConnection()</td>
</tr>
<tr>
<td id="L61" class="blob-num js-line-number" data-line-number="61"></td>
<td id="LC61" class="blob-code blob-code-inner js-file-line"> </td>
</tr>
<tr>
<td id="L62" class="blob-num js-line-number" data-line-number="62"></td>
<td id="LC62" class="blob-code blob-code-inner js-file-line"> process_file(fp, device)</td>
</tr>
<tr>
<td id="L63" class="blob-num js-line-number" data-line-number="63"></td>
<td id="LC63" class="blob-code blob-code-inner js-file-line"> fp.close()<span class="pl-id">;</span></td>
</tr>
<tr>
<td id="L64" class="blob-num js-line-number" data-line-number="64"></td>
<td id="LC64" class="blob-code blob-code-inner js-file-line"> </td>
</tr>
<tr>
<td id="L65" class="blob-num js-line-number" data-line-number="65"></td>
<td id="LC65" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L66" class="blob-num js-line-number" data-line-number="66"></td>
<td id="LC66" class="blob-code blob-code-inner js-file-line"><span class="pl-k">if</span> <span class="pl-c1">__name__</span> <span class="pl-k">==</span> <span class="pl-s"><span class="pl-pds">'</span>__main__<span class="pl-pds">'</span></span>:</td>
</tr>
<tr>
<td id="L67" class="blob-num js-line-number" data-line-number="67"></td>
<td id="LC67" class="blob-code blob-code-inner js-file-line"> main()</td>
</tr>
<tr>
<td id="L68" class="blob-num js-line-number" data-line-number="68"></td>
<td id="LC68" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L69" class="blob-num js-line-number" data-line-number="69"></td>
<td id="LC69" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L70" class="blob-num js-line-number" data-line-number="70"></td>
<td id="LC70" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
</table>
</div>
</div>
<button type="button" data-facebox="#jump-to-line" data-facebox-class="linejump" data-hotkey="l" class="d-none">Jump to Line</button>
<div id="jump-to-line" style="display:none">
<!-- </textarea> --><!-- '"` --><form accept-charset="UTF-8" action="" class="js-jump-to-line-form" method="get"><div style="margin:0;padding:0;display:inline"><input name="utf8" type="hidden" value="✓" /></div>
<input class="form-control linejump-input js-jump-to-line-field" type="text" placeholder="Jump to line…" aria-label="Jump to line" autofocus>
<button type="submit" class="btn">Go</button>
</form></div>
</div>
<div class="modal-backdrop js-touch-events"></div>
</div>
</div>
</div>
</div>
<div class="container site-footer-container">
<div class="site-footer" role="contentinfo">
<ul class="site-footer-links float-right">
<li><a href="https://github.com/contact" data-ga-click="Footer, go to contact, text:contact">Contact GitHub</a></li>
<li><a href="https://developer.github.com" data-ga-click="Footer, go to api, text:api">API</a></li>
<li><a href="https://training.github.com" data-ga-click="Footer, go to training, text:training">Training</a></li>
<li><a href="https://shop.github.com" data-ga-click="Footer, go to shop, text:shop">Shop</a></li>
<li><a href="https://github.com/blog" data-ga-click="Footer, go to blog, text:blog">Blog</a></li>
<li><a href="https://github.com/about" data-ga-click="Footer, go to about, text:about">About</a></li>
</ul>
<a href="https://github.com" aria-label="Homepage" class="site-footer-mark" title="GitHub">
<svg aria-hidden="true" class="octicon octicon-mark-github" height="24" version="1.1" viewBox="0 0 16 16" width="24"><path d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0 0 16 8c0-4.42-3.58-8-8-8z"></path></svg>
</a>
<ul class="site-footer-links">
<li>© 2016 <span title="0.06421s from github-fe161-cp1-prd.iad.github.net">GitHub</span>, Inc.</li>
<li><a href="https://github.com/site/terms" data-ga-click="Footer, go to terms, text:terms">Terms</a></li>
<li><a href="https://github.com/site/privacy" data-ga-click="Footer, go to privacy, text:privacy">Privacy</a></li>
<li><a href="https://github.com/security" data-ga-click="Footer, go to security, text:security">Security</a></li>
<li><a href="https://status.github.com/" data-ga-click="Footer, go to status, text:status">Status</a></li>
<li><a href="https://help.github.com" data-ga-click="Footer, go to help, text:help">Help</a></li>
</ul>
</div>
</div>
<div id="ajax-error-message" class="ajax-error-message flash flash-error">
<svg aria-hidden="true" class="octicon octicon-alert" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path d="M8.865 1.52c-.18-.31-.51-.5-.87-.5s-.69.19-.87.5L.275 13.5c-.18.31-.18.69 0 1 .19.31.52.5.87.5h13.7c.36 0 .69-.19.86-.5.17-.31.18-.69.01-1L8.865 1.52zM8.995 13h-2v-2h2v2zm0-3h-2V6h2v4z"></path></svg>
<button type="button" class="flash-close js-flash-close js-ajax-error-dismiss" aria-label="Dismiss error">
<svg aria-hidden="true" class="octicon octicon-x" height="16" version="1.1" viewBox="0 0 12 16" width="12"><path d="M7.48 8l3.75 3.75-1.48 1.48L6 9.48l-3.75 3.75-1.48-1.48L4.52 8 .77 4.25l1.48-1.48L6 6.52l3.75-3.75 1.48 1.48z"></path></svg>
</button>
You can't perform that action at this time.
</div>
<script crossorigin="anonymous" src="https://assets-cdn.github.com/assets/compat-40e365359d1c4db1e36a55be458e60f2b7c24d58b5a00ae13398480e7ba768e0.js"></script>
<script crossorigin="anonymous" src="https://assets-cdn.github.com/assets/frameworks-74e2880351ce368d8f0a52f12a7452b422bef6397d5477d8120207ea79f0dfd9.js"></script>
<script async="async" crossorigin="anonymous" src="https://assets-cdn.github.com/assets/github-2a591b51a4438c7a3e39b82d3119de5d8894bf09aeb9148fc057632c7a2aca9f.js"></script>
<div class="js-stale-session-flash stale-session-flash flash flash-warn flash-banner d-none">
<svg aria-hidden="true" class="octicon octicon-alert" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path d="M8.865 1.52c-.18-.31-.51-.5-.87-.5s-.69.19-.87.5L.275 13.5c-.18.31-.18.69 0 1 .19.31.52.5.87.5h13.7c.36 0 .69-.19.86-.5.17-.31.18-.69.01-1L8.865 1.52zM8.995 13h-2v-2h2v2zm0-3h-2V6h2v4z"></path></svg>
<span class="signed-in-tab-flash">You signed in with another tab or window. <a href="">Reload</a> to refresh your session.</span>
<span class="signed-out-tab-flash">You signed out in another tab or window. <a href="">Reload</a> to refresh your session.</span>
</div>
<div class="facebox" id="facebox" style="display:none;">
<div class="facebox-popup">
<div class="facebox-content" role="dialog" aria-labelledby="facebox-header" aria-describedby="facebox-description">
</div>
<button type="button" class="facebox-close js-facebox-close" aria-label="Close modal">
<svg aria-hidden="true" class="octicon octicon-x" height="16" version="1.1" viewBox="0 0 12 16" width="12"><path d="M7.48 8l3.75 3.75-1.48 1.48L6 9.48l-3.75 3.75-1.48-1.48L4.52 8 .77 4.25l1.48-1.48L6 6.52l3.75-3.75 1.48 1.48z"></path></svg>
</button>
</div>
</div>
</body>
</html>
|
'''
modified by Chongxuan Li (chongxuanli1991@gmail.com)
'''
import sys
sys.path.append('..')
sys.path.append('../../data/')
import os, numpy as np
import scipy.io as sio
import time
import anglepy as ap
import anglepy.paramgraphics as paramgraphics
import anglepy.ndict as ndict
import theano
import theano.tensor as T
from collections import OrderedDict
import preprocessing as pp
import color
def zca_dec(zca_mean, zca_winv, data):
return zca_winv.dot(data) + zca_mean
def labelToMat(y):
label = np.unique(y)
newy = np.zeros((len(y), len(label)))
for i in range(len(y)):
newy[i, y[i]] = 1
return newy.T
def main(n_z, n_hidden, dataset, seed, comment, gfx=True):
# Initialize logdir
import time
pre_dir = 'models/gpulearn_z_x_mnist_96-(500, 500)'
if os.environ.has_key('pretrain') and bool(int(os.environ['pretrain'])) == True:
comment+='_pre-train'
if os.environ.has_key('prior') and bool(int(os.environ['prior'])) == True:
comment+='_prior'
pre_dir+='_prior'
if os.environ.has_key('cutoff'):
comment+=('_'+str(int(os.environ['cutoff'])))
if os.environ.has_key('train_residual') and bool(int(os.environ['train_residual'])) == True:
comment+='_train-residual'
pre_dir+='_train-residual'
if os.environ.has_key('sigma_square'):
comment+=('_'+str(float(os.environ['sigma_square'])))
pre_dir+=('_'+str(float(os.environ['sigma_square'])))
pre_dir+='/'
logdir = 'results/gpulearn_z_x_'+dataset+'_'+str(n_z)+'-'+str(n_hidden)+comment+'_'+str(int(time.time()))+'/'
if not os.path.exists(logdir): os.makedirs(logdir)
print 'logdir:', logdir
print 'gpulearn_z_x', n_z, n_hidden, dataset, seed
with open(logdir+'hook.txt', 'a') as f:
print >>f, 'learn_z_x', n_z, n_hidden, dataset, seed
np.random.seed(seed)
gfx_freq = 1
weight_decay = 0
# Init data
if dataset == 'mnist':
import anglepy.data.mnist as mnist
# MNIST
size = 28
train_x, train_y, valid_x, valid_y, test_x, test_y = mnist.load_numpy(size)
f_enc, f_dec = pp.Identity()
if os.environ.has_key('prior') and bool(int(os.environ['prior'])) == True:
color.printBlue('Loading prior')
mnist_prior = sio.loadmat('data/mnist_prior/mnist_prior.mat')
train_mean_prior = mnist_prior['z_train']
test_mean_prior = mnist_prior['z_test']
valid_mean_prior = mnist_prior['z_valid']
else:
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
valid_mean_prior = np.zeros((n_z,valid_x.shape[1]))
x = {'x': train_x.astype(np.float32), 'mean_prior': train_mean_prior.astype(np.float32)}
x_train = x
x_valid = {'x': valid_x.astype(np.float32), 'mean_prior': valid_mean_prior.astype(np.float32)}
x_test = {'x': test_x.astype(np.float32), 'mean_prior': test_mean_prior.astype(np.float32)}
L_valid = 1
dim_input = (size,size)
n_x = size*size
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
nonlinear = 'softplus'
type_px = 'bernoulli'
n_train = 50000
n_test = 10000
n_valid = 10000
n_batch = 1000
colorImg = False
bernoulli_x = True
byteToFloat = False
weight_decay = float(n_batch)/n_train
elif dataset == 'higgs':
size = 28
f_enc, f_dec = pp.Identity()
inputfile = 'data/higgs/HIGGS.csv'
print 'loading file.'
x = np.loadtxt(inputfile, dtype='f4', delimiter=',')
print 'done.'
y = x[:,0].reshape((-1,1))
x = x[:,1:]
x = np.array(x, dtype='float32')
y = np.array(y, dtype='float32')
n_train = 10000000
n_valid = 500000
n_test = 500000
n_batch = 1000
derived_feat = 'all'
if os.environ.has_key('derived_feat'):
derived_feat = os.environ['derived_feat']
color.printBlue(derived_feat)
if derived_feat == 'high':
# Only the 7 high level features.
x = x[:, 21:28]
elif derived_feat == 'low':
# Only the 21 raw features.
x = x[:, 0:21]
else:
pass
train_x = x[0:n_train, :].T
y_train = y[0:n_train, :]
valid_x = x[n_train:n_train+n_valid, :].T
y_valid = y[n_train:n_train+n_valid, :]
test_x = x[n_train+n_valid:n_train+n_valid+n_test, :].T
y_test = y[n_train+n_valid:n_train+n_valid+n_test, :]
n_y = 2
n_x = train_x.shape[0]
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
valid_mean_prior = np.zeros((n_z,valid_x.shape[1]))
x = {'x': train_x.astype(np.float32), 'mean_prior': train_mean_prior.astype(np.float32)}
x_train = x
x_valid = {'x': valid_x.astype(np.float32), 'mean_prior': valid_mean_prior.astype(np.float32)}
x_test = {'x': test_x.astype(np.float32), 'mean_prior': test_mean_prior.astype(np.float32)}
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
nonlinear = 'tanh'
if os.environ.has_key('nonlinear'):
nonlinear = os.environ['nonlinear']
color.printBlue(nonlinear)
L_valid = 1
dim_input = (1,size)
type_px = 'gaussian'
colorImg = False
bernoulli_x = False
byteToFloat = False
weight_decay = float(n_batch)/n_train
elif dataset == 'cifar10':
import anglepy.data.cifar10 as cifar10
size = 32
train_x, train_y, test_x, test_y = cifar10.load_numpy()
train_x = train_x.astype(np.float32).T
test_x = test_x.astype(np.float32).T
##
f_enc, f_dec = pp.Identity()
if os.environ.has_key('prior') and bool(int(os.environ['prior'])) == True:
color.printBlue('Loading prior')
cifar_prior = sio.loadmat('data/cifar10_prior/cifar10_prior.mat')
train_mean_prior = cifar_prior['z_train']
test_mean_prior = cifar_prior['z_test']
else:
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
x = {'x': train_x.astype(np.float32), 'mean_prior': train_mean_prior.astype(np.float32)}
x_train = x
x_test = {'x': test_x.astype(np.float32), 'mean_prior': test_mean_prior.astype(np.float32)}
x_valid = x_test
L_valid = 1
n_y = 10
dim_input = (size,size)
n_x = x['x'].shape[0]
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
nonlinear = 'softplus'
type_px = 'gaussian'
if os.environ.has_key('type_px'):
type_px = os.environ['type_px']
color.printBlue('Generative type: '+type_px)
n_train = 50000
n_test = 10000
n_batch = 5000
colorImg = True
bernoulli_x = False
byteToFloat = False
#weight_decay = float(n_batch)/n_train
elif dataset == 'cifar10_zca':
import anglepy.data.cifar10 as cifar10
size = 32
train_x, train_y, test_x, test_y = cifar10.load_numpy()
train_x = train_x.astype(np.float32).T
test_x = test_x.astype(np.float32).T
##
f_enc, f_dec = pp.Identity()
zca_mean, zca_w, zca_winv = cifar10.zca(train_x)
train_x = zca_w.dot(train_x-zca_mean)
test_x = zca_w.dot(test_x-zca_mean)
if os.environ.has_key('prior') and bool(int(os.environ['prior'])) == True:
color.printBlue('Loading prior')
cifar_prior = sio.loadmat('data/cifar10_prior/cifar10_prior.mat')
train_mean_prior = cifar_prior['z_train']
test_mean_prior = cifar_prior['z_test']
else:
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
x = {'x': train_x.astype(np.float32), 'mean_prior': train_mean_prior.astype(np.float32)}
x_train = x
x_test = {'x': test_x.astype(np.float32), 'mean_prior': test_mean_prior.astype(np.float32)}
x_valid = x_test
L_valid = 1
dim_input = (size,size)
n_y = 10
n_x = x['x'].shape[0]
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
nonlinear = 'softplus'
type_px = 'gaussian'
n_train = 50000
n_test = 10000
n_batch = 5000
colorImg = True
bernoulli_x = False
byteToFloat = False
if os.environ.has_key('type_px'):
type_px = os.environ['type_px']
color.printBlue('Generative type: '+type_px)
nonlinear = 'softplus'
elif dataset == 'mnist_basic':
# MNIST
size = 28
data_dir = os.environ['ML_DATA_PATH']+'/mnist_variations/'+'mnist_'
tmp = sio.loadmat(data_dir+'train.mat')
#color.printRed(data_dir+'train.mat')
train_x = tmp['x_train'].T
train_y = tmp['t_train'].T.astype(np.int32)
# validation 2000
valid_x = train_x[:,10000:]
valid_y = train_y[10000:]
train_x = train_x[:,:10000]
train_y = train_y[:10000]
tmp = sio.loadmat(data_dir+'test.mat')
test_x = tmp['x_test'].T
test_y = tmp['t_test'].T.astype(np.int32)
print train_x.shape
print train_y.shape
print test_x.shape
print test_y.shape
f_enc, f_dec = pp.Identity()
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
valid_mean_prior = np.zeros((n_z,valid_x.shape[1]))
'''
x = {'x': train_x.astype(np.float32), 'y': labelToMat(train_y).astype(np.float32)}
x_train = x
x_valid = {'x': valid_x.astype(np.float32), 'y': labelToMat(valid_y).astype(np.float32)}
x_test = {'x': test_x.astype(np.float32), 'y': labelToMat(test_y).astype(np.float32)}
'''
x = {'x': train_x.astype(np.float32), 'mean_prior': train_mean_prior.astype(np.float32)}
x_train = x
x_valid = {'x': valid_x.astype(np.float32), 'mean_prior': valid_mean_prior.astype(np.float32)}
x_test = {'x': test_x.astype(np.float32), 'mean_prior': test_mean_prior.astype(np.float32)}
L_valid = 1
dim_input = (size,size)
n_x = size*size
n_y = 10
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
nonlinear = 'softplus'
type_px = 'bernoulli'
n_train = 10000
n_valid = 2000
n_test = 50000
n_batch = 200
colorImg = False
bernoulli_x = True
byteToFloat = False
weight_decay = float(n_batch)/n_train
elif dataset == 'rectangle':
# MNIST
size = 28
data_dir = os.environ['ML_DATA_PATH']+'/mnist_variations/'+'rectangles_'
tmp = sio.loadmat(data_dir+'train.mat')
color.printRed(data_dir+'train.mat')
train_x = tmp['x_train'].T
train_y = tmp['t_train'].T.astype(np.int32)
# validation 2000
valid_x = train_x[:,1000:]
valid_y = train_y[1000:]
train_x = train_x[:,:1000]
train_y = train_y[:1000]
tmp = sio.loadmat(data_dir+'test.mat')
test_x = tmp['x_test'].T
test_y = tmp['t_test'].T.astype(np.int32)
print train_x.shape
print train_y.shape
print test_x.shape
print test_y.shape
f_enc, f_dec = pp.Identity()
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
valid_mean_prior = np.zeros((n_z,valid_x.shape[1]))
'''
x = {'x': train_x.astype(np.float32), 'y': labelToMat(train_y).astype(np.float32)}
x_train = x
x_valid = {'x': valid_x.astype(np.float32), 'y': labelToMat(valid_y).astype(np.float32)}
x_test = {'x': test_x.astype(np.float32), 'y': labelToMat(test_y).astype(np.float32)}
'''
x = {'x': train_x.astype(np.float32), 'mean_prior': train_mean_prior.astype(np.float32)}
x_train = x
x_valid = {'x': valid_x.astype(np.float32), 'mean_prior': valid_mean_prior.astype(np.float32)}
x_test = {'x': test_x.astype(np.float32), 'mean_prior': test_mean_prior.astype(np.float32)}
L_valid = 1
dim_input = (size,size)
n_x = size*size
n_y = 2
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
nonlinear = 'softplus'
type_px = 'bernoulli'
n_train = 1000
n_valid = 200
n_test = 50000
n_batch = 500
colorImg = False
bernoulli_x = True
byteToFloat = False
weight_decay = float(n_batch)/n_train
#print '3', n_x
elif dataset == 'convex':
# MNIST
size = 28
data_dir = os.environ['ML_DATA_PATH']+'/mnist_variations/'+'convex_'
tmp = sio.loadmat(data_dir+'train.mat')
train_x = tmp['x_train'].T
train_y = tmp['t_train'].T.astype(np.int32)
# validation 2000
valid_x = train_x[:,6000:]
valid_y = train_y[6000:]
train_x = train_x[:,:6000]
train_y = train_y[:6000]
tmp = sio.loadmat(data_dir+'test.mat')
test_x = tmp['x_test'].T
test_y = tmp['t_test'].T.astype(np.int32)
print train_x.shape
print train_y.shape
print test_x.shape
print test_y.shape
f_enc, f_dec = pp.Identity()
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
valid_mean_prior = np.zeros((n_z,valid_x.shape[1]))
'''
x = {'x': train_x.astype(np.float32), 'y': labelToMat(train_y).astype(np.float32)}
x_train = x
x_valid = {'x': valid_x.astype(np.float32), 'y': labelToMat(valid_y).astype(np.float32)}
x_test = {'x': test_x.astype(np.float32), 'y': labelToMat(test_y).astype(np.float32)}
'''
x = {'x': train_x.astype(np.float32), 'mean_prior': train_mean_prior.astype(np.float32)}
x_train = x
x_valid = {'x': valid_x.astype(np.float32), 'mean_prior': valid_mean_prior.astype(np.float32)}
x_test = {'x': test_x.astype(np.float32), 'mean_prior': test_mean_prior.astype(np.float32)}
L_valid = 1
dim_input = (size,size)
n_x = size*size
n_y = 2
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
nonlinear = 'softplus'
type_px = 'bernoulli'
n_train = 6000
n_valid = 2000
n_test = 50000
n_batch = 120
colorImg = False
bernoulli_x = True
byteToFloat = False
weight_decay = float(n_batch)/n_train
elif dataset == 'rectangle_image':
# MNIST
size = 28
data_dir = os.environ['ML_DATA_PATH']+'/mnist_variations/'+'rectangles_im_'
tmp = sio.loadmat(data_dir+'train.mat')
train_x = tmp['x_train'].T
train_y = tmp['t_train'].T.astype(np.int32)
# validation 2000
valid_x = train_x[:,10000:]
valid_y = train_y[10000:]
train_x = train_x[:,:10000]
train_y = train_y[:10000]
tmp = sio.loadmat(data_dir+'test.mat')
test_x = tmp['x_test'].T
test_y = tmp['t_test'].T.astype(np.int32)
print train_x.shape
print train_y.shape
print test_x.shape
print test_y.shape
f_enc, f_dec = pp.Identity()
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
valid_mean_prior = np.zeros((n_z,valid_x.shape[1]))
'''
x = {'x': train_x.astype(np.float32), 'y': labelToMat(train_y).astype(np.float32)}
x_train = x
x_valid = {'x': valid_x.astype(np.float32), 'y': labelToMat(valid_y).astype(np.float32)}
x_test = {'x': test_x.astype(np.float32), 'y': labelToMat(test_y).astype(np.float32)}
'''
x = {'x': train_x.astype(np.float32), 'mean_prior': train_mean_prior.astype(np.float32)}
x_train = x
x_valid = {'x': valid_x.astype(np.float32), 'mean_prior': valid_mean_prior.astype(np.float32)}
x_test = {'x': test_x.astype(np.float32), 'mean_prior': test_mean_prior.astype(np.float32)}
L_valid = 1
dim_input = (size,size)
n_x = size*size
n_y = 2
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
nonlinear = 'softplus'
type_px = 'bernoulli'
n_train = 10000
n_valid = 2000
n_test = 50000
n_batch = 200
colorImg = False
bernoulli_x = True
byteToFloat = False
weight_decay = float(n_batch)/n_train
elif dataset == 'mnist_rot':
# MNIST
size = 28
data_dir = os.environ['ML_DATA_PATH']+'/mnist_variations/'+'mnist_all_rotation_normalized_float_'
tmp = sio.loadmat(data_dir+'train.mat')
train_x = tmp['x_train'].T
train_y = tmp['t_train'].T.astype(np.int32)
# validation 2000
valid_x = train_x[:,10000:]
valid_y = train_y[10000:]
train_x = train_x[:,:10000]
train_y = train_y[:10000]
tmp = sio.loadmat(data_dir+'test.mat')
test_x = tmp['x_test'].T
test_y = tmp['t_test'].T.astype(np.int32)
print train_x.shape
print train_y.shape
print test_x.shape
print test_y.shape
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
valid_mean_prior = np.zeros((n_z,valid_x.shape[1]))
f_enc, f_dec = pp.Identity()
x = {'x': train_x.astype(np.float32), 'mean_prior': train_mean_prior.astype(np.float32)}
x_train = x
x_valid = {'x': valid_x.astype(np.float32), 'mean_prior': valid_mean_prior.astype(np.float32)}
x_test = {'x': test_x.astype(np.float32), 'mean_prior': test_mean_prior.astype(np.float32)}
L_valid = 1
dim_input = (size,size)
n_x = size*size
n_y = 10
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
nonlinear = 'softplus'
type_px = 'bernoulli'
n_train = 10000
n_valid = 2000
n_test = 50000
n_batch = 200
colorImg = False
bernoulli_x = True
byteToFloat = False
weight_decay = float(n_batch)/n_train
elif dataset == 'mnist_back_rand':
# MNIST
size = 28
data_dir = os.environ['ML_DATA_PATH']+'/mnist_variations/'+'mnist_background_random_'
tmp = sio.loadmat(data_dir+'train.mat')
train_x = tmp['x_train'].T
train_y = tmp['t_train'].T.astype(np.int32)
# validation 2000
valid_x = train_x[:,10000:]
valid_y = train_y[10000:]
train_x = train_x[:,:10000]
train_y = train_y[:10000]
tmp = sio.loadmat(data_dir+'test.mat')
test_x = tmp['x_test'].T
test_y = tmp['t_test'].T.astype(np.int32)
print train_x.shape
print train_y.shape
print test_x.shape
print test_y.shape
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
valid_mean_prior = np.zeros((n_z,valid_x.shape[1]))
f_enc, f_dec = pp.Identity()
x = {'x': train_x.astype(np.float32), 'mean_prior': train_mean_prior.astype(np.float32)}
x_train = x
x_valid = {'x': valid_x.astype(np.float32), 'mean_prior': valid_mean_prior.astype(np.float32)}
x_test = {'x': test_x.astype(np.float32), 'mean_prior': test_mean_prior.astype(np.float32)}
L_valid = 1
dim_input = (size,size)
n_x = size*size
n_y = 10
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
nonlinear = 'softplus'
type_px = 'bernoulli'
n_train = 10000
n_valid = 2000
n_test = 50000
n_batch = 200
colorImg = False
bernoulli_x = True
byteToFloat = False
weight_decay = float(n_batch)/n_train
elif dataset == 'mnist_back_image':
# MNIST
size = 28
data_dir = os.environ['ML_DATA_PATH']+'/mnist_variations/'+'mnist_background_images_'
tmp = sio.loadmat(data_dir+'train.mat')
train_x = tmp['x_train'].T
train_y = tmp['t_train'].T.astype(np.int32)
# validation 2000
valid_x = train_x[:,10000:]
valid_y = train_y[10000:]
train_x = train_x[:,:10000]
train_y = train_y[:10000]
tmp = sio.loadmat(data_dir+'test.mat')
test_x = tmp['x_test'].T
test_y = tmp['t_test'].T.astype(np.int32)
print train_x.shape
print train_y.shape
print test_x.shape
print test_y.shape
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
valid_mean_prior = np.zeros((n_z,valid_x.shape[1]))
f_enc, f_dec = pp.Identity()
x = {'x': train_x.astype(np.float32), 'mean_prior': train_mean_prior.astype(np.float32)}
x_train = x
x_valid = {'x': valid_x.astype(np.float32), 'mean_prior': valid_mean_prior.astype(np.float32)}
x_test = {'x': test_x.astype(np.float32), 'mean_prior': test_mean_prior.astype(np.float32)}
L_valid = 1
dim_input = (size,size)
n_x = size*size
n_y = 10
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
nonlinear = 'softplus'
type_px = 'bernoulli'
n_train = 10000
n_valid = 2000
n_test = 50000
n_batch = 200
colorImg = False
bernoulli_x = True
byteToFloat = False
weight_decay = float(n_batch)/n_train
elif dataset == 'mnist_back_image_rot':
# MNIST
size = 28
data_dir = os.environ['ML_DATA_PATH']+'/mnist_variations/'+'mnist_all_background_images_rotation_normalized_'
tmp = sio.loadmat(data_dir+'train.mat')
train_x = tmp['x_train'].T
train_y = tmp['t_train'].T.astype(np.int32)
# validation 2000
valid_x = train_x[:,10000:]
valid_y = train_y[10000:]
train_x = train_x[:,:10000]
train_y = train_y[:10000]
tmp = sio.loadmat(data_dir+'test.mat')
test_x = tmp['x_test'].T
test_y = tmp['t_test'].T.astype(np.int32)
print train_x.shape
print train_y.shape
print test_x.shape
print test_y.shape
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
valid_mean_prior = np.zeros((n_z,valid_x.shape[1]))
f_enc, f_dec = pp.Identity()
x = {'x': train_x.astype(np.float32), 'mean_prior': train_mean_prior.astype(np.float32)}
x_train = x
x_valid = {'x': valid_x.astype(np.float32), 'mean_prior': valid_mean_prior.astype(np.float32)}
x_test = {'x': test_x.astype(np.float32), 'mean_prior': test_mean_prior.astype(np.float32)}
L_valid = 1
dim_input = (size,size)
n_x = size*size
n_y = 10
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
nonlinear = 'softplus'
type_px = 'bernoulli'
n_train = 10000
n_valid = 2000
n_test = 50000
n_batch = 200
colorImg = False
bernoulli_x = True
byteToFloat = False
weight_decay = float(n_batch)/n_train
elif dataset == 'mnist_binarized':
#import anglepy.data.mnist_binarized as mnist_binarized
# MNIST
import anglepy.data.mnist as mnist
size = 28
data_dir = '/home/lichongxuan/regbayes2/data/mat_data/'+'binarized_mnist_'
tmp = sio.loadmat(data_dir+'train.mat')
train_x = tmp['x_train'].T
#train_y = tmp['t_train'].T.astype(np.int32)
tmp = sio.loadmat(data_dir+'test.mat')
test_x = tmp['x_test'].T
tmp = sio.loadmat(data_dir+'valid.mat')
#print tmp.keys()
valid_x = tmp['x_valid'].T
#test_y = tmp['t_test'].T.astype(np.int32)
f_enc, f_dec = pp.Identity()
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
valid_mean_prior = np.zeros((n_z,valid_x.shape[1]))
train_x = np.hstack((train_x, valid_x)).astype(np.float32)
train_mean_prior = np.hstack((train_mean_prior,valid_mean_prior)).astype(np.float32)
print train_mean_prior.shape
print train_x.shape
x = {'x': train_x.astype(np.float32), 'mean_prior':train_mean_prior.astype(np.float32)}
x_train = x
x_valid = {'x': test_x.astype(np.float32),'mean_prior':test_mean_prior.astype(np.float32)}
x_test = x_valid
L_valid = 1
dim_input = (28,28)
n_x = 28*28
n_y = 10
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
nonlinear = 'softplus'
type_px = 'bernoulli'
n_train = 60000
n_valid = 10000
n_batch = 1000
colorImg = False
bernoulli_x = False
byteToFloat = False
weight_decay = float(n_batch)/n_train
elif dataset == 'mnist_binarized_own':
#import anglepy.data.mnist_binarized as mnist_binarized
# MNIST
import anglepy.data.mnist as mnist
size = 28
data_dir = 'data/mnist_binarized_own/'+'binarized_mnist_'
tmp = sio.loadmat(data_dir+'train.mat')
train_x = tmp['train_x'].T
#train_y = tmp['t_train'].T.astype(np.int32)
tmp = sio.loadmat(data_dir+'test.mat')
test_x = tmp['test_x'].T
tmp = sio.loadmat(data_dir+'valid.mat')
#print tmp.keys()
valid_x = tmp['valid_x'].T
#test_y = tmp['t_test'].T.astype(np.int32)
f_enc, f_dec = pp.Identity()
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
valid_mean_prior = np.zeros((n_z,valid_x.shape[1]))
train_x = np.hstack((train_x, valid_x)).astype(np.float32)
train_mean_prior = np.hstack((train_mean_prior,valid_mean_prior)).astype(np.float32)
print train_mean_prior.shape
print train_x.shape
x = {'x': train_x.astype(np.float32), 'mean_prior':train_mean_prior.astype(np.float32)}
x_train = x
x_valid = {'x': test_x.astype(np.float32),'mean_prior':test_mean_prior.astype(np.float32)}
x_test = x_valid
L_valid = 1
dim_input = (28,28)
n_x = 28*28
n_y = 10
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
nonlinear = 'softplus'
type_px = 'bernoulli'
n_train = 60000
n_valid = 10000
n_batch = 1000
colorImg = False
bernoulli_x = False
byteToFloat = False
weight_decay = float(n_batch)/n_train
elif dataset == 'freyface':
# Frey's face
import anglepy.data.freyface as freyface
n_train = 1600
train_x = freyface.load_numpy()
np.random.shuffle(train_x)
x = {'x': train_x.T[:,0:n_train]}
x_valid = {'x': train_x.T[:,n_train:]}
L_valid = 1
dim_input = (28,20)
n_x = 20*28
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
type_px = 'bounded01'
nonlinear = 'tanh' #tanh works better with freyface #'softplus'
n_batch = 100
colorImg = False
bernoulli_x = False
byteToFloat = False
weight_decay = float(n_batch)/n_train
elif dataset == 'freyface_pca':
# Frey's face
import anglepy.data.freyface as freyface
n_train = 1600
train_x = freyface.load_numpy().T
np.random.shuffle(train_x.T)
f_enc, f_dec, _ = pp.PCA(train_x, 0.99)
train_x = f_enc(train_x)
x = {'x': train_x[:,0:n_train].astype(np.float32)}
x_valid = {'x': train_x[:,n_train:].astype(np.float32)}
L_valid = 1
dim_input = (28,20)
n_x = train_x.shape[0]
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
type_px = 'gaussian'
nonlinear = 'softplus'
n_batch = 100
colorImg = False
bernoulli_x = False
byteToFloat = False
elif dataset == 'freyface_bernoulli':
# Frey's face
import anglepy.data.freyface as freyface
n_train = 1600
train_x = freyface.load_numpy().T
np.random.shuffle(train_x.T)
x = {'x': train_x[:,0:n_train].astype(np.float32)}
x_valid = {'x': train_x[:,n_train:].astype(np.float32)}
L_valid = 1
dim_input = (28,20)
n_x = train_x.shape[0]
type_pz = 'gaussianmarg'
type_px = 'bernoulli'
nonlinear = 'softplus'
n_batch = 100
colorImg = False
bernoulli_x = False
byteToFloat = False
elif dataset == 'norb_48_24300_pca':
size = 48
train_x, train_y, test_x, test_y = np.load('data/norb/norb_48_24300.npy')
_x = {'x': train_x, 'y': train_y}
#ndict.shuffleCols(_x)
#train_x = _x['x']
#train_y = _x['y']
#print _x['x'][:,:10000].shape
# Do PCA
print 'pca'
f_enc, f_dec, pca_params = pp.PCA(_x['x'][:,:10000], cutoff=500, toFloat=False)
ndict.savez(pca_params, logdir+'pca_params')
print 'done'
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
x = {'x': f_enc(train_x).astype(np.float32), 'mean_prior' : train_mean_prior.astype(np.float32)}
x_valid = {'x': f_enc(test_x).astype(np.float32), 'mean_prior' : test_mean_prior.astype(np.float32)}
x_test = {'x': f_enc(test_x).astype(np.float32), 'mean_prior' : test_mean_prior.astype(np.float32)}
x_train = x
print x['x'].shape
print x['mean_prior'].shape
L_valid = 1
n_y = 5
n_x = x['x'].shape[0]
dim_input = (size,size)
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
type_px = 'gaussian'
nonlinear = 'softplus'
n_batch = 900 #23400/900 = 27
colorImg = False
#binarize = False
bernoulli_x = False
byteToFloat = False
weight_decay= float(n_batch)/train_x.shape[1]
elif dataset == 'norb':
# small NORB dataset
import anglepy.data.norb as norb
size = 48
train_x, train_y, test_x, test_y = norb.load_resized(size, binarize_y=True)
x = {'x': train_x.astype(np.float32)}
x_valid = {'x': test_x.astype(np.float32)}
L_valid = 1
n_x = train_x.shape[0]
dim_input = (size,size)
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
type_px = 'gaussian'
nonlinear = 'softplus'
n_batch = 900 #23400/900 = 27
colorImg = False
#binarize = False
byteToFloat = False
bernoulli_x = False
weight_decay= float(n_batch)/train_x.shape[1]
elif dataset == 'norb_pca':
# small NORB dataset
import anglepy.data.norb as norb
size = 48
train_x, train_y, test_x, test_y = norb.load_resized(size, binarize_y=True)
f_enc, f_dec, _ = pp.PCA(train_x, 0.999)
#f_enc, f_dec, _ = pp.normalize_random(train_x)
train_x = f_enc(train_x)
test_x = f_enc(test_x)
x = {'x': train_x.astype(np.float32)}
x_valid = {'x': test_x.astype(np.float32)}
L_valid = 1
n_x = train_x.shape[0]
dim_input = (size,size)
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
type_px = 'gaussian'
nonlinear = 'softplus'
n_batch = 900 #23400/900 = 27
colorImg = False
#binarize = False
bernoulli_x = False
byteToFloat = False
weight_decay= float(n_batch)/train_x.shape[1]
elif dataset == 'norb_normalized':
# small NORB dataset
import anglepy.data.norb as norb
size = 48
train_x, train_y, test_x, test_y = norb.load_resized(size, binarize_y=True)
#f_enc, f_dec, _ = pp.PCA(train_x, 0.99)
#f_enc, f_dec, _ = pp.normalize_random(train_x)
f_enc, f_dec, _ = pp.normalize(train_x)
train_x = f_enc(train_x)
test_x = f_enc(test_x)
x = {'x': train_x.astype(np.float32)}
x_valid = {'x': test_x.astype(np.float32)}
L_valid = 1
n_x = train_x.shape[0]
dim_input = (size,size)
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
type_px = 'gaussian'
nonlinear = 'softplus'
n_batch = 900 #23400/900 = 27
colorImg = False
#binarize = False
bernoulli_x = False
byteToFloat = False
weight_decay= float(n_batch)/train_x.shape[1]
elif dataset == 'svhn':
# SVHN dataset
#import anglepy.data.svhn as svhn
size = 32
train_x, train_y, test_x, test_y = np.load('data/svhn/svhn.npy')
#extra_x, extra_y = svhn.load_numpy_extra(False, binarize_y=True)
#x = {'x': np.hstack((train_x, extra_x)), 'y':np.hstack((train_y, extra_y))}
#ndict.shuffleCols(x)
x = {'x' : train_x, 'y': train_y}
print 'Performing PCA, can take a few minutes... '
cutoff = 300
if os.environ.has_key('cutoff'):
cutoff = int(os.environ['cutoff'])
color.printBlue('cutoff: '+str(cutoff))
f_enc, f_dec, pca_params = pp.PCA(x['x'][:,:10000], cutoff=cutoff, toFloat=True)
ndict.savez(pca_params, logdir+'pca_params')
print 'Done.'
n_y = 10
if os.environ.has_key('prior') and bool(int(os.environ['prior'])) == True:
color.printBlue('Loading prior')
train_mean_prior, train_y1, test_mean_prior, test_y1 = np.load('data/svhn/svhn_prior.npy')
print np.sum((train_y1 == train_y).astype(np.int32))
print np.sum((test_y1 == test_y).astype(np.int32))
else:
train_mean_prior = np.zeros((n_z,train_x.shape[1]))
test_mean_prior = np.zeros((n_z,test_x.shape[1]))
x = {'x': f_enc(x['x']).astype(np.float32), 'mean_prior':train_mean_prior.astype(np.float32)}
x_train = x
x_test = {'x': f_enc(test_x).astype(np.float32), 'mean_prior':test_mean_prior.astype(np.float32)}
x_valid = x_test
print x_train['x'].shape
print x_test['x'].shape
print train_y.shape
print test_y.shape
print x_train['mean_prior'].shape
print x_test['mean_prior'].shape
L_valid = 1
n_x = x['x'].shape[0]
dim_input = (size,size)
n_batch = 5000
n_train = 604388
n_valid = 26032
n_test = 26032
colorImg = True
bernoulli_x = False
byteToFloat = False
type_qz = 'gaussianmarg'
type_pz = 'gaussianmarg'
type_px = 'gaussian'
nonlinear = 'softplus'
else:
print 'invalid data set'
exit()
#print '2', n_x
# Construct model
from anglepy.models import GPUVAE_Z_X
learning_rate1 = 3e-4
if os.environ.has_key('stepsize'):
learning_rate1 = float(os.environ['stepsize'])
color.printBlue(str(learning_rate1))
if os.environ.has_key('preoption'):
pre = int(os.environ['preoption'])
if pre == 1:
updates = get_adam_optimizer(learning_rate=3e-4, decay1=0.9, decay2=0.999, weight_decay=0)
elif pre ==2:
updates = get_adam_optimizer(learning_rate=3e-4, decay1=0.9, decay2=0.999, weight_decay=weight_decay)
else:
raise Exception('Prepotion unknown')
with open(logdir+'hook.txt', 'a') as f:
print >>f, 'preoption ' + str(pre)
else:
updates = get_adam_optimizer(learning_rate=learning_rate1, weight_decay=weight_decay)
#print '1', n_x
model = GPUVAE_Z_X(updates, n_x, n_hidden, n_z, n_hidden[::-1], nonlinear, nonlinear, type_px, type_qz=type_qz, type_pz=type_pz, prior_sd=100, init_sd=1e-3)
if os.environ.has_key('pretrain') and bool(int(os.environ['pretrain'])) == True:
#dir = '/Users/dpkingma/results/learn_z_x_mnist_binarized_50-(500, 500)_mog_1412689061/'
#dir = '/Users/dpkingma/results/learn_z_x_svhn_bernoulli_300-(1000, 1000)_l1l2_sharing_and_1000HU_1412676966/'
#dir = '/Users/dpkingma/results/learn_z_x_svhn_bernoulli_300-(1000, 1000)_l1l2_sharing_and_1000HU_1412695481/'
#dir = '/Users/dpkingma/results/learn_z_x_mnist_binarized_50-(500, 500)_mog_1412695455/'
#dir = '/Users/dpkingma/results/gpulearn_z_x_svhn_pca_300-(500, 500)__1413904756/'
if len(n_hidden) == 1:
color.printBlue('pre-training-1-layer')
layer_str = '-500'
elif len(n_hidden) == 2:
color.printBlue('pre-training-2-layers')
layer_str = '-(500, 500)'
else:
raise Exception()
pre_str = 'models/gpulearn_z_x_'
if dataset == 'mnist':
#dir = pre_str + 'mnist_'+str(n_z)+layer_str+'_longrun/'
dir = 'models/mnist_z_x_50-500-500_longrun/'
elif dataset == 'mnist_rot':
dir = pre_str + 'mnist_rot_'+str(n_z)+layer_str+'_longrun/'
elif dataset == 'mnist_back_rand':
dir = pre_str + 'mnist_back_rand_'+str(n_z)+layer_str+'_longrun/'
elif dataset == 'mnist_back_image':
dir = pre_str + 'mnist_back_image_'+str(n_z)+layer_str+'_longrun/'
elif dataset == 'mnist_back_image_rot':
dir = pre_str + 'mnist_back_image_rot_'+str(n_z)+layer_str+'_longrun/'
elif dataset == 'rectangle':
dir = pre_str + 'rectangle_'+str(n_z)+layer_str+'_longrun/'
elif dataset == 'rectangle_image':
dir = pre_str + 'rectangle_image_'+str(n_z)+layer_str+'_longrun/'
elif dataset == 'convex':
dir = pre_str + 'convex_'+str(n_z)+layer_str+'_longrun/'
elif dataset == 'mnist_basic':
dir = pre_str + 'mnist_basic_'+str(n_z)+layer_str+'_longrun/'
if dataset == 'svhn':
if (os.environ.has_key('prior') and bool(int(os.environ['prior'])) == True):
print 'prior-------------------'
pre_dir = 'results/gpulearn_z_x_svhn_'+str(n_z)+'-500-500_prior_'+str(cutoff)+'_longrun/'
else:
pre_dir = 'results/gpulearn_z_x_svhn_'+str(n_z)+'-500-500_'+str(cutoff)+'_longrun/'
color.printBlue(pre_dir)
w = ndict.loadz(pre_dir+'w_best.ndict.tar.gz')
v = ndict.loadz(pre_dir+'v_best.ndict.tar.gz')
elif n_z == 50:
print 'n_z = 50', dir
w = ndict.loadz(dir+'w_best.ndict.tar.gz')
v = ndict.loadz(dir+'v_best.ndict.tar.gz')
else:
print 'n_z != 50'
w = ndict.loadz(pre_dir+'w_best.ndict.tar.gz')
v = ndict.loadz(pre_dir+'v_best.ndict.tar.gz')
ndict.set_value2(model.w, w)
ndict.set_value2(model.v, v)
# Some statistics for optimization
ll_valid_stats = [-1e99, 0]
# Progress hook
def hook(epoch, t, ll):
if epoch%10 != 0: return
n_batch_n = n_batch
if n_batch_n > n_valid:
n_batch_n = n_valid
ll_valid, _ = model.est_loglik(x_valid, n_samples=L_valid, n_batch=n_batch_n, byteToFloat=byteToFloat)
ll_test = ll_valid
#if not dataset == 'mnist_binarized':
if not dataset == 'svhn':
ll_test, _ = model.est_loglik(x_test, n_samples=L_valid, n_batch=n_batch, byteToFloat=byteToFloat)
# Log
ndict.savez(ndict.get_value(model.v), logdir+'v')
ndict.savez(ndict.get_value(model.w), logdir+'w')
def infer(data, n_batch=1000):
#print '--', n_batch
size = data['x'].shape[1]
res = np.zeros((sum(n_hidden), size))
res1 = np.zeros((n_z,size))
res2 = np.zeros((n_hidden[-1],size))
res3 = np.zeros((n_z,size))
for i in range(0, size, n_batch):
idx_to = min(size, i+n_batch)
x_batch = ndict.getCols(data, i, idx_to)
# may have bugs
nn_batch = idx_to - i
_x, _z, _z_confab = model.gen_xz(x_batch, {}, nn_batch)
x_samples = _z_confab['x']
for (hi, hidden) in enumerate(_z_confab['hidden']):
res[sum(n_hidden[:hi]):sum(n_hidden[:hi+1]),i:i+nn_batch] = hidden
res1[:,i:i+nn_batch] = _z_confab['mean']
res2[:,i:i+nn_batch] = _z_confab['hidden'][-1]
res3[:,i:i+nn_batch] = _z_confab['logvar']
#print '--'
return res, res1, res2, res3
#print '..', n_batch
#if not dataset == 'mnist_binarized':
if not dataset == 'svhn':
z_test, z_test1, z_test2, vv_test = infer(x_test)
z_train, z_train1, z_train2, vv_train = infer(x_train)
if ll_valid > ll_valid_stats[0]:
ll_valid_stats[0] = ll_valid
ll_valid_stats[1] = 0
ndict.savez(ndict.get_value(model.v), logdir+'v_best')
ndict.savez(ndict.get_value(model.w), logdir+'w_best')
#if not dataset == 'mnist_binarized':
if dataset == 'svhn':
pass
#np.save(logdir+'full_latent', ('z_test': z_test, 'train_y':train_y, 'test_y':test_y, 'z_train': z_train))
#np.save(logdir+'last_latent', ('z_test': z_test2, 'train_y':train_y, 'test_y':test_y, 'z_train': z_train2))
else:
sio.savemat(logdir+'full_latent.mat', {'z_test': z_test, 'train_y':train_y, 'test_y':test_y, 'z_train': z_train})
sio.savemat(logdir+'mean_latent.mat', {'z_test': z_test1, 'train_y':train_y, 'test_y':test_y, 'z_train': z_train1})
sio.savemat(logdir+'last_latent.mat', {'z_test': z_test2, 'train_y':train_y, 'test_y':test_y, 'z_train': z_train2})
else:
ll_valid_stats[1] += 1
# Stop when not improving validation set performance in 100 iterations
if ll_valid_stats[1] > 1000:
print "Finished"
with open(logdir+'hook.txt', 'a') as f:
print >>f, "Finished"
exit()
print epoch, t, ll, ll_valid, ll_test, ll_valid_stats
with open(logdir+'hook.txt', 'a') as f:
print >>f, epoch, t, ll, ll_valid, ll_test, ll_valid_stats
'''
if dataset != 'svhn':
l_t, px_t, pz_t, qz_t = model.test(x_train, n_samples=1, n_batch=n_batch, byteToFloat=byteToFloat)
print 'Elogpx', px_t, 'Elogpz', pz_t, '-Elogqz', qz_t
#sigma_square = float(os.environ['sigma_square'])
print 'var', np.mean(np.exp(vv_train)), 'q', np.mean(np.abs(z_train1)), 'p', np.mean(np.abs(train_mean_prior)), 'd', np.mean(np.abs(z_train1-train_mean_prior))
with open(logdir+'hook.txt', 'a') as f:
print >>f, 'Elogpx', px_t, 'Elogpz', pz_t, '-Elogqz', qz_t
print >>f, 'var', np.mean(np.exp(vv_train)), 'q', np.mean(np.abs(z_train1)), 'p', np.mean(np.abs(train_mean_prior)), 'd', np.mean(np.abs(z_train1-train_mean_prior))
'''
# Graphics
if gfx and epoch%gfx_freq == 0:
#tail = '.png'
tail = '-'+str(epoch)+'.png'
v = {i: model.v[i].get_value() for i in model.v}
w = {i: model.w[i].get_value() for i in model.w}
if 'pca' not in dataset and 'random' not in dataset and 'normalized' not in dataset and 'zca' not in dataset:
if 'w0' in v:
image = paramgraphics.mat_to_img(f_dec(v['w0'][:].T), dim_input, True, colorImg=colorImg)
image.save(logdir+'q_w0'+tail, 'PNG')
image = paramgraphics.mat_to_img(f_dec(w['out_w'][:]), dim_input, True, colorImg=colorImg)
image.save(logdir+'out_w'+tail, 'PNG')
if 'out_unif' in w:
image = paramgraphics.mat_to_img(f_dec(w['out_unif'].reshape((-1,1))), dim_input, True, colorImg=colorImg)
image.save(logdir+'out_unif'+tail, 'PNG')
if n_z == 2:
n_width = 10
import scipy.stats
z = {'z':np.zeros((2,n_width**2))}
for i in range(0,n_width):
for j in range(0,n_width):
z['z'][0,n_width*i+j] = scipy.stats.norm.ppf(float(i)/n_width+0.5/n_width)
z['z'][1,n_width*i+j] = scipy.stats.norm.ppf(float(j)/n_width+0.5/n_width)
x, _, _z = model.gen_xz({}, z, n_width**2)
if dataset == 'mnist':
x = 1 - _z['x']
image = paramgraphics.mat_to_img(f_dec(_z['x']), dim_input)
image.save(logdir+'2dmanifold'+tail, 'PNG')
else:
if 'norb' in dataset or dataset=='svhn':
nn_batch_nn = 64
else:
nn_batch_nn = 144
if not(os.environ.has_key('train_residual') and bool(int(os.environ['train_residual'])) == True) and (os.environ.has_key('prior') and bool(int(os.environ['prior'])) == True):
mp_in = np.random.randint(0,x_train['mean_prior'].shape[1],nn_batch_nn)
m_p = x_train['mean_prior'][:,mp_in]
s_s = 1
if os.environ.has_key('sigma_square'):
s_s = float(os.environ['sigma_square'])
x_samples = model.gen_xz_prior({}, {}, m_p, s_s, n_batch=nn_batch_nn)
x_samples = x_samples['x']
m_p1 = (np.ones((n_z, nn_batch_nn)).T * np.mean(x_train['mean_prior'], axis = 1)).T
x_samples1 = model.gen_xz_prior({}, {}, m_p1.astype(np.float32), s_s, n_batch=nn_batch_nn)
image = paramgraphics.mat_to_img(f_dec(x_samples1['x']), dim_input, colorImg=colorImg)
image.save(logdir+'mean_samples-prior'+tail, 'PNG')
x_samples11 = model.gen_xz_prior11({}, {}, m_p, s_s, n_batch=nn_batch_nn)
image = paramgraphics.mat_to_img(f_dec(x_samples11['x']), dim_input, colorImg=colorImg)
image.save(logdir+'prior-image'+tail, 'PNG')
else:
_x, _, _z_confab = model.gen_xz({}, {}, n_batch=nn_batch_nn)
x_samples = _z_confab['x']
image = paramgraphics.mat_to_img(f_dec(x_samples), dim_input, colorImg=colorImg)
image.save(logdir+'samples-prior'+tail, 'PNG')
#x_samples = _x['x']
#image = paramgraphics.mat_to_img(x_samples, dim_input, colorImg=colorImg)
#image.save(logdir+'samples2'+tail, 'PNG')
else:
# Model with preprocessing
if 'w0' in v:
tmp = f_dec(v['w0'][:].T)
#print dim_input
#print tmp.shape
if 'zca' in dataset or dataset=='svhn':
tmp = zca_dec(zca_mean, zca_winv, tmp)
image = paramgraphics.mat_to_img(tmp, dim_input, True, colorImg=colorImg)
image.save(logdir+'q_w0'+tail, 'PNG')
tmp = f_dec(w['out_w'][:])
if 'zca' in dataset:
tmp = zca_dec(zca_mean, zca_winv, tmp)
image = paramgraphics.mat_to_img(tmp, dim_input, True, colorImg=colorImg)
image.save(logdir+'out_w'+tail, 'PNG')
if dataset == 'svhn':
nn_batch_nn = 64
else:
nn_batch_nn = 144
if not(os.environ.has_key('train_residual') and bool(int(os.environ['train_residual'])) == True) and (os.environ.has_key('prior') and bool(int(os.environ['prior'])) == True):
mp_in = np.random.randint(0,x_train['mean_prior'].shape[1],nn_batch_nn)
m_p = x_train['mean_prior'][:,mp_in]
s_s = 1
if os.environ.has_key('sigma_square'):
s_s = float(os.environ['sigma_square'])
x_samples = model.gen_xz_prior({}, {}, m_p, s_s, n_batch=nn_batch_nn)
x_samples = zca_dec(zca_mean, zca_winv,x_samples['x'])
x_samples = np.minimum(np.maximum(x_samples, 0), 1)
x_samples11 = model.gen_xz_prior11({}, {}, m_p, s_s, n_batch=nn_batch_nn)
x_samples11 = zca_dec(zca_mean,zca_winv,x_samples11['x'])
x_samples11 = np.minimum(np.maximum(x_samples11, 0), 1)
image = paramgraphics.mat_to_img(x_samples11, dim_input, colorImg=colorImg)
image.save(logdir+'prior-image'+tail, 'PNG')
else:
_x, _z, _z_confab = model.gen_xz({}, {}, n_batch=nn_batch_nn)
x_samples = f_dec(_z_confab['x'])
x_samples = np.minimum(np.maximum(x_samples, 0), 1)
image = paramgraphics.mat_to_img(x_samples, dim_input, colorImg=colorImg)
image.save(logdir+'samples'+tail, 'PNG')
'''
def infer(data, n_batch=1000):
#print '--', n_batch
size = data['x'].shape[1]
res = np.zeros((sum(n_hidden), size))
res1 = np.zeros((n_z,size))
res2 = np.zeros((n_hidden[-1],size))
res3 = np.zeros((n_z,size))
for i in range(0, size, n_batch):
idx_to = min(size, i+n_batch)
x_batch = ndict.getCols(data, i, idx_to)
# may have bugs
nn_batch = idx_to - i
_x, _z, _z_confab = model.gen_xz(x_batch, {}, nn_batch)
x_samples = _z_confab['x']
for (hi, hidden) in enumerate(_z_confab['hidden']):
res[sum(n_hidden[:hi]):sum(n_hidden[:hi+1]),i:i+nn_batch] = hidden
res1[:,i:i+nn_batch] = _z_confab['mean']
res2[:,i:i+nn_batch] = _z_confab['hidden'][-1]
res3[:,i:i+nn_batch] = _z_confab['logvar']
#
return res, res1, res2, res3
#print n_batch
#if not dataset == 'mnist_binarized':
z_test, z_test1, z_test2, vv_test = infer(x_test)
z_train, z_train1, z_train2, vv_train = infer(x_train)
l_t, px_t, pz_t, qz_t = model.test(x_train, n_samples=1, n_batch=n_batch, byteToFloat=byteToFloat)
print 'Elogpx', px_t, 'Elogpz', pz_t, '-Elogqz', qz_t
#sigma_square = float(os.environ['sigma_square'])
print 'var', np.mean(np.exp(vv_train)), 'q', np.mean(np.abs(z_train1)), 'p', np.mean(np.abs(train_mean_prior)), 'd', np.mean(np.abs(z_train1-train_mean_prior))
with open(logdir+'hook.txt', 'a') as f:
print >>f, 'Elogpx', px_t, 'Elogpz', pz_t, '-Elogqz', qz_t
print >>f, 'var', np.mean(np.exp(vv_train)), 'q', np.mean(np.abs(z_train1)), 'p', np.mean(np.abs(train_mean_prior)), 'd', np.mean(np.abs(z_train1-train_mean_prior))
#if not dataset == 'mnist_binarized':
sio.savemat(logdir+'full_latent.mat', {'z_test': z_test, 'train_y':train_y, 'test_y':test_y, 'z_train': z_train})
sio.savemat(logdir+'mean_latent.mat', {'z_test': z_test1, 'train_y':train_y, 'test_y':test_y, 'z_train': z_train1})
sio.savemat(logdir+'last_latent.mat', {'z_test': z_test2, 'train_y':train_y, 'test_y':test_y, 'z_train': z_train2})
'''
# Optimize
#SFO
dostep = epoch_vae_adam(model, x, n_batch=n_batch, bernoulli_x=bernoulli_x, byteToFloat=byteToFloat)
loop_va(dostep, hook)
pass
def loop_va(doEpoch, hook, n_epochs=1201):
t0 = time.time()
for t in xrange(1, n_epochs):
L = doEpoch()
hook(t, time.time() - t0, L)
print 'Optimization loop finished'
def epoch_vae_adam(model, x, n_batch=100, convertImgs=False, bernoulli_x=False, byteToFloat=False):
print 'Variational Auto-Encoder', n_batch
def doEpoch():
from collections import OrderedDict
n_tot = x.itervalues().next().shape[1]
idx_from = 0
L = 0
while idx_from < n_tot:
idx_to = min(n_tot, idx_from+n_batch)
x_minibatch = ndict.getCols(x, idx_from, idx_to)
idx_from += n_batch
if byteToFloat: x_minibatch['x'] = x_minibatch['x'].astype(np.float32)/256.
if bernoulli_x: x_minibatch['x'] = np.random.binomial(n=1, p=x_minibatch['x']).astype(np.float32)
# Do gradient ascent step
L += model.evalAndUpdate(x_minibatch, {}).sum()
#model.profmode.print_summary()
L /= n_tot
return L
return doEpoch
def get_adam_optimizer(learning_rate=0.001, decay1=0.1, decay2=0.001, weight_decay=0.0):
print 'AdaM', learning_rate, decay1, decay2, weight_decay
def shared32(x, name=None, borrow=False):
return theano.shared(np.asarray(x, dtype='float32'), name=name, borrow=borrow)
def get_optimizer(w, g):
updates = OrderedDict()
it = shared32(0.)
updates[it] = it + 1.
fix1 = 1.-(1.-decay1)**(it+1.) # To make estimates unbiased
fix2 = 1.-(1.-decay2)**(it+1.) # To make estimates unbiased
lr_t = learning_rate * T.sqrt(fix2) / fix1
for i in w:
gi = g[i]
if weight_decay > 0:
gi -= weight_decay * w[i] #T.tanh(w[i])
# mean_squared_grad := E[g^2]_{t-1}
mom1 = shared32(w[i].get_value() * 0.)
mom2 = shared32(w[i].get_value() * 0.)
# Update moments
mom1_new = mom1 + decay1 * (gi - mom1)
mom2_new = mom2 + decay2 * (T.sqr(gi) - mom2)
# Compute the effective gradient and effective learning rate
effgrad = mom1_new / (T.sqrt(mom2_new) + 1e-10)
effstep_new = lr_t * effgrad
# Do update
w_new = w[i] + effstep_new
# Apply update
updates[w[i]] = w_new
updates[mom1] = mom1_new
updates[mom2] = mom2_new
return updates
return get_optimizer
|
import praw
import smtplib
import requests
import parsel
import re
import io
import json
import os
from email.header import Header
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from argparse import ArgumentParser
from premailer import Premailer
HEADERS = requests.utils.default_headers()
HEADERS.update({'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:52.0) Gecko/20100101 Firefox/52.0'})
SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__))
REDDIT_CSS = os.path.join(SCRIPT_PATH, 'css', 'reddit.css')
def _concat_css(input_name, output):
with open(input_name, encoding='utf-8') as f:
output.write('\n<style>\n')
output.write(f.read())
output.write('\n</style>\n')
def _extract_external_css(selector):
for p in selector.xpath("/html/head/link[@rel='stylesheet']"):
href = re.sub(r"^//", r"https://", p.xpath("@href").extract_first())
sheet = requests.get(href, headers=HEADERS).text if href else ""
yield sheet
def weekly_page(subreddit, file, css=None):
if isinstance(file, str):
with open(file, 'w', encoding='utf-8') as f:
return weekly_page(subreddit, file=f, css=css)
r = requests.get("https://www.reddit.com/r/{}/top/?sort=top&t=week".format(subreddit),
headers=HEADERS)
if r.status_code != 200:
raise RuntimeError("Request status code is {}.".format(r.status_code))
if r.encoding.lower() != 'utf-8':
raise RuntimeError("Request didn't return a UTF-8 output.")
sel = parsel.Selector(text=r.text)
file.write('<!DOCTYPE html>')
file.write('<html>')
if css == 1: # Download External
file.write('<head>')
file.write('<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">')
for stylesheet in _extract_external_css(sel):
file.write('\n<style>\n')
file.write(stylesheet)
file.write('\n</style>\n')
file.write('</head>')
elif css == 2: # Keep External
head = sel.xpath("/html/head").extract_first()
head = re.sub(r'="//', '="https://', head)
file.write(head)
elif isinstance(css, str):
file.write('<head>')
file.write('<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">')
_concat_css(css, file)
file.write('</head>')
elif isinstance(css, list):
file.write('<head>')
file.write('<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">')
for c in css:
_concat_css(c, file)
file.write('</head>')
else:
file.write('<head>')
file.write('<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">')
file.write('</head>')
file.write('<body class="">')
file.write('<div class="content" role="main">')
for spacer in sel.xpath("/html/body/div[@class='content']/div[@class='spacer' and style]"):
content = spacer.extract()
content = re.sub(r'="//', r'="https://', content)
file.write(content)
file.write('</div>')
file.write('</body>')
file.write('</html>')
def send_email(subject, to, message):
fromaddr = os.environ['REWE_SENDER']
frompass = os.environ['REWE_PASS']
msg = MIMEMultipart('alternative')
msg['Subject'] = Header(subject, 'utf-8')
msg['From'] = fromaddr
msg['To'] = to
msg.attach(MIMEText('Weekly Subreddit', 'plain'))
msg.attach(MIMEText(message, 'html'))
with smtplib.SMTP(host='smtp.gmail.com', port=587) as server:
server.ehlo()
server.starttls()
server.ehlo()
server.login(fromaddr, frompass)
server.sendmail(fromaddr, [to], msg.as_string())
def user_subreddits(token):
reddit = praw.Reddit(client_id=os.environ['REWE_APP_ID'],
client_secret=os.environ['REWE_APP_SECRET'],
user_agent='Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:52.0) Gecko/20100101 Firefox/52.0',
refresh_token=token)
return reddit.user.subreddits()
def send_newsletter(token, email):
for subreddit in user_subreddits(token):
subreddit = subreddit.display_name
with io.StringIO() as body:
print("Sending {} weekly for {}...".format(subreddit, email))
weekly_page(subreddit, body, css=REDDIT_CSS)
email_body = Premailer(body.getvalue(),
base_url='https://www.reddit.com',
disable_leftover_css=True).transform()
send_email(subject='Reddit weekly r/{}'.format(subreddit),
to=email, message=email_body)
def main(filepath):
with io.open(filepath, 'r') as file:
users = json.load(file)
for email in users:
token = users[email]
send_newsletter(token, email)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('-u', '--users', required=True, help='load users and their tokens from a JSON file')
opt = parser.parse_args()
main(opt.users)
|
from __future__ import unicode_literals
from django.forms import ModelForm, TextInput
from django.contrib import admin
from blog.models import Post
class PostAdmin(admin.ModelAdmin):
list_display = ['id', 'title', 'created', 'status']
list_filter = ('status', )
admin.site.register(Post, PostAdmin)
|
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test for BitBucket PR 126:
SConf doesn't work well with 'io' module on pre-3.0 Python. This is because
io.StringIO (used by SCons.SConf.Streamer) accepts only unicode strings.
Non-unicode input causes it to raise an exception.
"""
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', """
def hello(target, source, env):
import traceback
try:
print 'hello!\\n' # this breaks the script
with open(env.subst('$TARGET', target = target),'w') as f:
f.write('yes')
except:
# write to file, as stdout/stderr is broken
traceback.print_exc(file=open('traceback','w'))
return 0
def CheckHello(context):
import sys
context.Display('Checking whether hello works... ')
stat,out = context.TryAction(hello,'','.in')
if stat and out:
context.Result(out)
else:
context.Result('failed')
return out
env = Environment()
cfg = Configure(env)
cfg.AddTest('CheckHello', CheckHello)
cfg.CheckHello()
env = cfg.Finish()
""")
test.run(arguments = '.')
test.must_contain_all_lines(test.stdout(), ['Checking whether hello works... yes'])
test.must_not_exist('traceback')
test.pass_test()
|
import os
import inspect
import vcr
def build_path(function):
return os.path.join(os.path.dirname(inspect.getfile(function)),
'cassettes',
function.__module__.split('.')[1],
function.__name__ + '.yml')
vcr = vcr.config.VCR(
func_path_generator=build_path,
cassette_library_dir='tests/cassettes',
match_on=['uri', 'method'],
decode_compressed_response=True,
record_mode='once'
)
|
import os
class Config(object):
SPOTIPY_REDIRECT_URI = os.environ['SPOTIPY_REDIRECT_URI']
SPOTIPY_CLIENT_ID = os.environ['SPOTIPY_CLIENT_ID']
SPOTIPY_CLIENT_SECRET = os.environ['SPOTIPY_CLIENT_SECRET']
SPOTIFY_ACCESS_SCOPE = 'playlist-modify-public playlist-modify-private playlist-read-private user-library-read'
###########
# Options #
###########
# TRACKS_PER_ARTIST #
# Number of tracks per artist to add to the playlist.
# I recommend 5 or less. Max is 10.
TRACKS_PER_ARTIST = 3
# COLLATE #
# By default, the playlist will be ordered like:
# - ARTIST A TRACK 1
# - ARTIST A TRACK 2
# - ARTIST A TRACK 3
# - ARTIST A TRACK 4
# - ARTIST A TRACK 5
# - ARTIST B TRACK 1
# - ARTIST B TRACK 2
# - ARTIST B TRACK 3
# ...
# if COLLATE is set to True, it will instead be ordered like so:
# - ARTIST A TRACK 1
# - ARTIST B TRACK 1
# - ARTIST C TRACK 1
# ...
# - ARTIST Z TRACK 1
# - ARTIST A TRACK 2
# - ARTIST B TRACK 2
# ...
COLLATE = False
# PUBLIC #
# Default False. Set True to make your generated playlist public.
PUBLIC = False
|
candidates = set([str(a * b) for a in range(100, 1000) for b in range(100, 1000)])
candidates = filter(lambda x: x == x[::-1], candidates)
print max([int(x) for x in candidates])
|
from distutils.core import setup
import py2exe
build_dir =
data_files = [('',['settings.ini',
'LICENSE',
'README.md']),
('sessions',[])]
options = {'py2exe': {
'dist_dir': build_dir}}
setup(
windows=['youtube_downloader.py'],
data_files=data_files,
options=options)
|
import random
import pytest
from microbial_ai.regulation import Event, Action, Memory
@pytest.fixture
def random_action():
return Action(type='fixed', phi={'rxn1': (random.random(), '+')})
@pytest.fixture
def random_event(random_action):
return Event(state=random.randint(0, 100), action=random_action,
next_state=random.randint(0, 100), reward=random.random())
@pytest.mark.usefixtures("random_event")
class TestMemory:
"""
Tests for the Memory class
"""
def test_initialization(self):
memory = Memory(1000)
assert memory.capacity == 1000
assert memory.idx == 0
def test_add_event(self, random_event):
memory = Memory(1000)
memory.add_event(random_event)
assert len(memory.memory) == 1
assert memory.idx == 1
for _ in range(1500):
memory.add_event(random_event)
assert len(memory.memory) == memory.capacity
assert memory.idx == (1000 - 500 + 1)
def test_sample(self, random_event):
memory = Memory(1000)
with pytest.raises(ValueError):
memory.sample(100)
for _ in range(400):
memory.add_event(random_event)
assert len(memory.sample(200)) == 200
|
from .utils import do, do_ex, trace
from .version import meta
from os.path import abspath, realpath
FILES_COMMAND = 'git ls-files'
DEFAULT_DESCRIBE = 'git describe --dirty --tags --long --match *.*'
def parse(root, describe_command=DEFAULT_DESCRIBE):
real_root, _, ret = do_ex('git rev-parse --show-toplevel', root)
if ret:
return
trace('real root', real_root)
if abspath(realpath(real_root)) != abspath(realpath(root)):
return
rev_node, _, ret = do_ex('git rev-parse --verify --quiet HEAD', root)
if ret:
return meta('0.0')
rev_node = rev_node[:7]
out, err, ret = do_ex(describe_command, root)
if '-' not in out and '.' not in out:
revs = do('git rev-list HEAD', root)
count = revs.count('\n')
if ret:
out = rev_node
return meta('0.0', distance=count + 1, node=out)
if ret:
return
dirty = out.endswith('-dirty')
if dirty:
out = out.rsplit('-', 1)[0]
tag, number, node = out.rsplit('-', 2)
number = int(number)
if number:
return meta(tag, distance=number, node=node, dirty=dirty)
else:
return meta(tag, dirty=dirty, node=node)
|
import logging
import utils
import options
_Warning = logging.Warning
_Info = logging.Info
_site_setup = []
_user_setup = {}
_tools_setup = {}
_tools_post_setup = {}
def ResetSetup( site_setup = _site_setup,
user_setup = _user_setup,
tools_setup = _tools_setup,
tools_post_setup = _tools_post_setup ):
if __debug__:
_Info( "ResetSetup" )
del site_setup[:]
user_setup.clear()
tools_setup.clear()
tools_post_setup.clear()
def AddSiteSetup( setup_function, _site_setup = _site_setup, toList = utils.toList ):
_site_setup.append( setup_function )
def siteSetup( setup_function ):
AddSiteSetup( setup_function )
return setup_function
def SiteSetup( options, os_env ):
global _site_setup
for f in _site_setup:
if __debug__:
_Info( "Site setup: " + f.__name__ )
f( options = options, os_env = os_env )
UserSetup( options, os_env )
def AddUserSetup( setup_id, setup_function, user_setup = _user_setup ):
user_setup.setdefault( setup_id, [] ).append( setup_function )
def UserSetup( options, os_env, user_setup = _user_setup ):
for s in options.setup.Value():
if __debug__:
_Info( "User setup: " + s )
for f in user_setup.get( s, [] ):
f( options = options, os_env = os_env )
def AddToolSetup( tool_name, setup_function, tools_setup = _tools_setup, toList = utils.toList ):
tools_setup.setdefault( tool_name, [] ).append( setup_function )
def toolSetup( tool_name ):
def addToolSetup( setup_function ):
AddToolSetup( tool_name, setup_function )
return setup_function
return addToolSetup
def _tool_setup( tool_name, env, tools_setup = _tools_setup ):
options = env.get( 'AQL_OPTIONS' )
if options is None:
if __debug__:
_Warning( "Tool setup: No AQL_OPTIONS in env: " + id(env) )
return
options.SetEnv( env )
os_env = env['ENV']
setup_functions = tools_setup.get( tool_name, [] )
if __debug__:
if not setup_functions:
#~ _Info( "Setup tool: No setup for tool: " + tool_name )
return
for f in setup_functions:
if __debug__:
_Info( "Tool setup: " + tool_name + ' (' + f.__name__ + ')' )
if f( env = env, options = options, os_env = os_env ):
break
def AddToolPostSetup( tool_name, setup_function, tools_post_setup = _tools_post_setup ):
tools_post_setup.setdefault( tool_name, [] ).append( setup_function )
def toolPostSetup( tool_name ):
def addToolPostSetup( setup_function ):
AddToolPostSetup( tool_name, setup_function )
return setup_function
return addToolPostSetup
def _tool_post_setup( tool_name, env, tools_post_setup = _tools_post_setup ):
options = env.get( 'AQL_OPTIONS' )
if options is None:
return
options.SetEnv( env )
os_env = env['ENV']
setup_functions = tools_post_setup.get( tool_name, [] )
if __debug__:
if not setup_functions:
#~ _Info( "Tool post setup: No setup for tool: " + tool_name )
return
for f in setup_functions:
if __debug__:
_Info( "Tool post setup: " + tool_name + ' (' + f.__name__ + ')' )
f( env = env, options = options, os_env = os_env )
def _tool_exists( self, env ):
if self._aql_is_exist is None:
_tool_setup( self.name, env )
self._aql_is_exist = self._aql_exists( env )
return self._aql_is_exist
def _tool_generate( self, env ):
if self._aql_is_exist is None:
if not _tool_exists( self, env ):
_Warning( "Tool: '%s' has not been found, but it has been added." % (self.name) )
self._aql_generate( env )
_tool_post_setup( self.name, env )
def _init_tool( self, name, toolpath = [], **kw ):
_SCons_Tool_Tool_init( self, name, toolpath, **kw )
self._aql_is_exist = None
self._aql_generate = self.generate
self._aql_exists = self.exists
self.exists = lambda env, self = self: _tool_exists( self, env )
self.generate = lambda env, self = self: _tool_generate( self, env )
import SCons.Tool
_SCons_Tool_Tool_init = SCons.Tool.Tool.__init__
SCons.Tool.Tool.__init__ = _init_tool
|
import os
from src.core import prep
from sgprocessor import *
def ProcessSg(p, opts):
if opts.anno == True:
if 'BEDDB' not in os.environ:
p.error('$BEDDB Not Exist. See README')
str_path_sgfq = opts.sg
str_nm = os.path.basename(os.path.splitext(opts.sg)[0])
str_proj = 'aux'
str_path_proj = os.path.join(opts.tdir, str_proj)
if not os.path.exists(str_path_proj):
os.makedirs(str_path_proj)
str_path_sgpsam = os.path.join(str_path_proj, str_nm + '.sgpsam')
str_path_sgsam = os.path.join(str_path_proj, str_nm + '.sgsam')
str_path_sg = os.path.join(opts.tdir, str_nm + '.sg')
print('Mapping sgRNA seq to ref genome with Bwa...')
prep.CallBWA(str_path_sgfq, '', opts.ref, str_path_sgpsam, False, opts.thrd)
prep.FilterSam(str_path_sgpsam, str_path_sgsam, False)
print('Done')
print('Processing sgsam...')
OrganizeSgsam(str_path_sgsam, str_path_sg)
print('Done')
if opts.anno == True:
str_path_sgbed = os.path.join(str_path_proj, str_nm + '.sgbed')
str_path_sgmap = os.path.join(str_path_proj, str_nm + '.sgmap')
str_path_sga = os.path.join(opts.tdir, str_nm + '.sga')
print('Annotating sgRNA...')
int_status = AnnotateSg(str_path_sgsam, opts.ref, str_path_sgbed, str_path_sgmap)
if int_status == 1:
print('Annotated with RefSeq')
elif int_status ==2:
print('Annotated with RefSeq and UCSC Gene')
elif int_status ==3:
print('Annotated with RefSeq, UCSC Gene and GENCODE')
elif int_status == 4:
print('Annotated with RefSeq and UCSC Gene')
print('Warning: Some are marked with None')
elif int_status == 5:
print('Annotated with RefSeq, UCSC Gene and GENCODE')
print('Warning: Some are marked with None')
print('Done')
print('Merging sg and sgmap...')
MergeSg(str_path_sg, str_path_sgmap, str_path_sga)
print('Done')
|
import pandas as pd
import numpy as np
from dateutil.relativedelta import relativedelta
def get_first_visit_date(data_patient):
''' Determines the first visit for a given patient'''
#IDEA Could be parallelized in Dask
data_patient['first_visit_date'] = min(data_patient.visit_date)
return data_patient
def subset_analysis_data(data, date_analysis):
''' Function that subsets the full dataset to only the data available for a certain analysis date'''
if type(data.date_entered.iloc[0]) is str :
data.date_entered = pd.to_datetime(data.date_entered)
data = data[data.date_entered < date_analysis]
return data
def subset_cohort(data, horizon_date, horizon_time, bandwidth):
''' Function that subsets data from a cohort that has initiated care a year before the horizon_date, and after a year + bandwith'''
horizon_date = pd.to_datetime(horizon_date)
data['first_visit_date'] = pd.to_datetime(data['first_visit_date'])
cohort_data = data[(data['first_visit_date'] >= horizon_date - relativedelta(days=horizon_time + bandwidth)) &
(data['first_visit_date'] < horizon_date - relativedelta(days=horizon_time))]
return cohort_data
def status_patient(data_patient, reference_date, grace_period):
''' Determines the status of a patient at a given reference_date, given the data available at a given analysis_date
TODO Also select the available data for Death and Transfer and other outcomes based on data entry time
'''
#IDEA Could be parallelized in Dask
data_patient = get_first_visit_date(data_patient)
date_out = pd.NaT
date_last_appointment = pd.to_datetime(max(data_patient.next_visit_date))
late_time = reference_date - date_last_appointment
if late_time.days > grace_period:
status = 'LTFU'
date_out = date_last_appointment
if late_time.days <= grace_period:
status = 'Followed'
if (data_patient.reasonDescEn.iloc[0] is not np.nan) & (pd.to_datetime(data_patient.discDate.iloc[0]) < reference_date):
status = data_patient.reasonDescEn.iloc[0]
date_out = pd.to_datetime(data_patient.discDate.iloc[0])
return pd.DataFrame([{'status': status,
'late_time': late_time,
'last_appointment': date_last_appointment,
'date_out':date_out ,
'first_visit_date':data_patient.first_visit_date.iloc[0],
'facility':data_patient.facility.iloc[0]}])
def horizon_outcome(data_cohort, reference_date, horizon_time):
# TODO Make sure dates are dates
data_cohort['first_visit_date'] = pd.to_datetime(data_cohort['first_visit_date']) #TODO This conversion should happen earlier
data_cohort.loc[:, 'horizon_date'] = data_cohort['first_visit_date'] + np.timedelta64(horizon_time, 'D')
data_cohort.loc[: , 'horizon_status'] = data_cohort['status']
# If the patient exited the cohort after his horizon date, still consider him followed
# BUG This is marginally invalid, for example if a patient was considered LTFU before he died
data_cohort.horizon_status[~(data_cohort['status'] == 'Followed') & (data_cohort['date_out'] > data_cohort['horizon_date'])] = 'Followed'
return data_cohort
def n_visits(data, month):
reporting_month = pd.to_datetime(data['visit_date']).dt.to_period('M')
n_vis = sum(reporting_month == month)
return n_vis
def make_report(data, reference_date, date_analysis, grace_period, horizon_time, cohort_width):
assert reference_date <= date_analysis, 'You should not analyze a period before you have the data (date of analysis is before reference date)'
if type(reference_date) is str :
reference_date = pd.to_datetime(reference_date)
if type(date_analysis) is str:
date_analysis = pd.to_datetime(date_analysis)
report_data = subset_analysis_data(data, date_analysis)
if len(report_data) > 0:
month = reference_date.to_period('M') - 1
n_visits_month = report_data.groupby('facility').apply(n_visits, month)
df_status = report_data.groupby('patient_id').apply(status_patient, reference_date, 90)
cohort_data = subset_cohort(df_status, reference_date, horizon_time, cohort_width)
# print(df_status.head())
horizon_outcome_data = horizon_outcome(cohort_data, month, 365)
transversal_reports = df_status.groupby('facility').status.value_counts()
longitudinal_reports = horizon_outcome_data.groupby('facility').status.value_counts()
out_reports = {'transversal':transversal_reports,
'longitudinal':longitudinal_reports,
'n_visits':n_visits_month}
return out_reports
|
import os
import sys
from Bio.Seq import Seq
def main(*args, **kwargs):
fpath = os.path.join(os.getcwd(), args[-2])
tmp = []
with open(fpath,'r') as f:
for line in f:
txt = line.strip()
tmp.append(txt)
S1 = set(tmp)
S2 = set([str(Seq(s).reverse_complement()) for s in tmp])
S = S1.union(S2)
res = []
for s in S:
res.append((s[:-1],s[1:]))
for t1,t2 in res:
print '(%s, %s)' % (t1,t2)
out = os.path.join(os.getcwd(),args[-1])
f = open(out, 'w')
for t1,t2 in res:
txt = '(%s, %s)\n' % (t1,t2)
f.write(txt)
f.close()
if __name__ == '__main__':
main(*sys.argv)
|
from ballot.models import OFFICE, CANDIDATE, POLITICIAN, MEASURE, KIND_OF_BALLOT_ITEM_CHOICES
from django.db import models
from exception.models import handle_exception, handle_record_found_more_than_one_exception,\
handle_record_not_saved_exception
import wevote_functions.admin
from wevote_functions.functions import convert_to_int, positive_value_exists
from wevote_settings.models import fetch_next_we_vote_id_quick_info_integer, \
fetch_next_we_vote_id_quick_info_master_integer, fetch_site_unique_id_prefix
SPANISH = 'es'
ENGLISH = 'en'
TAGALOG = 'tl'
VIETNAMESE = 'vi'
CHINESE = 'zh'
LANGUAGE_CHOICES = (
(ENGLISH, 'English'),
(SPANISH, 'Spanish'),
(TAGALOG, 'Tagalog'),
(VIETNAMESE, 'Vietnamese'),
(CHINESE, 'Chinese'),
)
NOT_SPECIFIED = 'not_specified'
BALLOTPEDIA = 'ballotpedia'
DIRECT_ENTRY = 'direct'
WIKIPEDIA = 'wikipedia'
SOURCE_SITE_CHOICES = (
(NOT_SPECIFIED, 'Not Specified'),
(BALLOTPEDIA, 'Ballotpedia'),
(DIRECT_ENTRY, 'Direct Entry'),
(WIKIPEDIA, 'Wikipedia'),
)
logger = wevote_functions.admin.get_logger(__name__)
class QuickInfo(models.Model):
"""
The information that shows when you click an info icon next to a ballot item
"""
# We are relying on built-in Python id field
# The we_vote_id identifier is unique across all We Vote sites, and allows us to share our org info with other
# organizations
# It starts with "wv" then we add on a database specific identifier like "3v" (WeVoteSetting.site_unique_id_prefix)
# then the string "info", and then a sequential integer like "123".
# We keep the last value in WeVoteSetting.we_vote_id_last_quick_info_integer
we_vote_id = models.CharField(
verbose_name="we vote permanent id", max_length=255, default=None, null=True, blank=True, unique=True)
# The language that this text is in
language = models.CharField(max_length=5, choices=LANGUAGE_CHOICES, default=ENGLISH)
info_text = models.TextField(null=True, blank=True)
info_html = models.TextField(null=True, blank=True)
ballot_item_display_name = models.CharField(verbose_name="text name for ballot item for quick display",
max_length=255, null=True, blank=True)
# See also more_info_credit_text
more_info_credit = models.CharField(max_length=15, choices=SOURCE_SITE_CHOICES, default=NOT_SPECIFIED,
null=True, blank=True)
# A link to any location with more information about this quick information
more_info_url = models.URLField(blank=True, null=True, verbose_name='url with more the full entry for this info')
last_updated = models.DateTimeField(verbose_name='date entered', null=True, auto_now=True) # TODO Convert to date_last_changed
# The unique id of the last person who edited this entry.
last_editor_we_vote_id = models.CharField(
verbose_name="last editor we vote id", max_length=255, null=True, blank=True, unique=False)
# This is the office that the quick_info refers to.
# Either contest_measure is filled, contest_office OR candidate, but not all three
contest_office_we_vote_id = models.CharField(
verbose_name="we vote permanent id for the contest_office", max_length=255, null=True, blank=True, unique=False)
# This is the candidate/politician that the quick_info refers to.
# Either candidate is filled, contest_office OR contest_measure, but not all three
candidate_campaign_we_vote_id = models.CharField(
verbose_name="we vote permanent id for the candidate", max_length=255, null=True,
blank=True, unique=False)
# Useful for queries based on Politicians
politician_we_vote_id = models.CharField(
verbose_name="we vote permanent id for politician", max_length=255, null=True,
blank=True, unique=False)
# This is the measure/initiative/proquick_info that the quick_info refers to.
# Either contest_measure is filled, contest_office OR candidate, but not all three
contest_measure_we_vote_id = models.CharField(
verbose_name="we vote permanent id for the contest_measure", max_length=255, null=True,
blank=True, unique=False)
# There are many ballot items that don't have (or need) a custom quick_info entry, and can reference a general
# entry. This field is the we_vote_id of the master quick_info entry that has the general text.
quick_info_master_we_vote_id = models.CharField(
verbose_name="we vote id of other entry which is the master", max_length=255, default=None, null=True,
blank=True, unique=True)
# The unique ID of the election containing this contest. (Provided by Google Civic)
google_civic_election_id = models.PositiveIntegerField(
verbose_name="google civic election id", default=0, null=True, blank=True)
def __unicode__(self):
return self.we_vote_id
class Meta:
ordering = ('last_updated',)
# We override the save function so we can auto-generate we_vote_id
def save(self, *args, **kwargs):
# Even if this organization came from another source we still need a unique we_vote_id
if self.we_vote_id:
self.we_vote_id = self.we_vote_id.strip().lower()
if self.we_vote_id == "" or self.we_vote_id is None: # If there isn't a value...
# ...generate a new id
site_unique_id_prefix = fetch_site_unique_id_prefix()
next_local_integer = fetch_next_we_vote_id_quick_info_integer()
# "wv" = We Vote
# site_unique_id_prefix = a generated (or assigned) unique id for one server running We Vote
# "info" = tells us this is a unique id for a quick_info entry
# next_integer = a unique, sequential integer for this server - not necessarily tied to database id
self.we_vote_id = "wv{site_unique_id_prefix}info{next_integer}".format(
site_unique_id_prefix=site_unique_id_prefix,
next_integer=next_local_integer,
)
super(QuickInfo, self).save(*args, **kwargs)
def is_english(self):
if self.language == ENGLISH:
return True
return False
def is_spanish(self):
if self.language == SPANISH:
return True
return False
def is_vietnamese(self):
if self.language == VIETNAMESE:
return True
return False
def is_chinese(self):
if self.language == CHINESE:
return True
return False
def is_tagalog(self):
if self.language == TAGALOG:
return True
return False
def get_kind_of_ballot_item(self):
if positive_value_exists(self.contest_office_we_vote_id):
return OFFICE
elif positive_value_exists(self.candidate_campaign_we_vote_id):
return CANDIDATE
elif positive_value_exists(self.politician_we_vote_id):
return POLITICIAN
elif positive_value_exists(self.contest_measure_we_vote_id):
return MEASURE
return None
def get_ballot_item_we_vote_id(self):
if positive_value_exists(self.contest_office_we_vote_id):
return self.contest_office_we_vote_id
elif positive_value_exists(self.candidate_campaign_we_vote_id):
return self.candidate_campaign_we_vote_id
elif positive_value_exists(self.politician_we_vote_id):
return self.politician_we_vote_id
elif positive_value_exists(self.contest_measure_we_vote_id):
return self.contest_measure_we_vote_id
return None
def more_info_credit_text(self):
if self.more_info_credit == BALLOTPEDIA:
return "Courtesy of Ballotpedia.org"
if self.more_info_credit == WIKIPEDIA:
return "Courtesy of Wikipedia.org"
return ""
class QuickInfoManager(models.Manager):
def __unicode__(self):
return "QuickInfoManager"
def fetch_we_vote_id_from_local_id(self, quick_info_id):
if positive_value_exists(quick_info_id):
results = self.retrieve_quick_info_from_id(quick_info_id)
if results['quick_info_found']:
quick_info = results['quick_info']
return quick_info.we_vote_id
else:
return None
else:
return None
def retrieve_contest_office_quick_info(self, contest_office_we_vote_id):
quick_info_id = 0
quick_info_we_vote_id = None
candidate_we_vote_id = None
politician_we_vote_id = None
contest_measure_we_vote_id = None
quick_info_manager = QuickInfoManager()
return quick_info_manager.retrieve_quick_info(
quick_info_id, quick_info_we_vote_id,
contest_office_we_vote_id,
candidate_we_vote_id,
politician_we_vote_id,
contest_measure_we_vote_id
)
def retrieve_candidate_quick_info(self, candidate_we_vote_id):
quick_info_id = 0
quick_info_we_vote_id = None
politician_we_vote_id = None
contest_measure_we_vote_id = None
contest_office_we_vote_id = None
quick_info_manager = QuickInfoManager()
return quick_info_manager.retrieve_quick_info(
quick_info_id, quick_info_we_vote_id,
contest_office_we_vote_id,
candidate_we_vote_id,
politician_we_vote_id,
contest_measure_we_vote_id
)
def retrieve_contest_measure_quick_info(self, contest_measure_we_vote_id):
quick_info_id = 0
quick_info_we_vote_id = None
candidate_we_vote_id = None
politician_we_vote_id = None
contest_office_we_vote_id = None
quick_info_manager = QuickInfoManager()
return quick_info_manager.retrieve_quick_info(
quick_info_id, quick_info_we_vote_id,
contest_office_we_vote_id,
candidate_we_vote_id,
politician_we_vote_id,
contest_measure_we_vote_id
)
def retrieve_quick_info_from_id(self, quick_info_id):
quick_info_we_vote_id = None
candidate_we_vote_id = None
politician_we_vote_id = None
contest_office_we_vote_id = None
contest_measure_we_vote_id = None
quick_info_manager = QuickInfoManager()
return quick_info_manager.retrieve_quick_info(
quick_info_id, quick_info_we_vote_id,
contest_office_we_vote_id,
candidate_we_vote_id,
politician_we_vote_id,
contest_measure_we_vote_id
)
def retrieve_quick_info_from_we_vote_id(self, quick_info_we_vote_id):
quick_info_id = 0
candidate_we_vote_id = None
politician_we_vote_id = None
contest_office_we_vote_id = None
contest_measure_we_vote_id = None
quick_info_manager = QuickInfoManager()
return quick_info_manager.retrieve_quick_info(
quick_info_id, quick_info_we_vote_id,
contest_office_we_vote_id,
candidate_we_vote_id,
politician_we_vote_id,
contest_measure_we_vote_id
)
def retrieve_quick_info(self, quick_info_id, quick_info_we_vote_id=None,
contest_office_we_vote_id=None,
candidate_we_vote_id=None,
politician_we_vote_id=None,
contest_measure_we_vote_id=None):
error_result = False
exception_does_not_exist = False
exception_multiple_object_returned = False
quick_info_on_stage = QuickInfo()
success = False
try:
if positive_value_exists(quick_info_id):
status = "RETRIEVE_QUICK_INFO_FOUND_WITH_QUICK_INFO_ID"
quick_info_on_stage = QuickInfo.objects.get(id=quick_info_id)
quick_info_id = quick_info_on_stage.id
success = True
elif positive_value_exists(quick_info_we_vote_id):
status = "RETRIEVE_QUICK_INFO_FOUND_WITH_WE_VOTE_ID"
quick_info_on_stage = QuickInfo.objects.get(we_vote_id=quick_info_we_vote_id)
quick_info_id = quick_info_on_stage.id
success = True
elif positive_value_exists(contest_office_we_vote_id):
status = "RETRIEVE_QUICK_INFO_FOUND_WITH_OFFICE_WE_VOTE_ID"
quick_info_on_stage = QuickInfo.objects.get(
contest_office_we_vote_id=contest_office_we_vote_id)
quick_info_id = quick_info_on_stage.id
success = True
elif positive_value_exists(candidate_we_vote_id):
status = "RETRIEVE_QUICK_INFO_FOUND_WITH_CANDIDATE_WE_VOTE_ID"
quick_info_on_stage = QuickInfo.objects.get(
candidate_campaign_we_vote_id=candidate_we_vote_id)
quick_info_id = quick_info_on_stage.id
success = True
elif positive_value_exists(politician_we_vote_id):
status = "RETRIEVE_QUICK_INFO_FOUND_WITH_POLITICIAN_WE_VOTE_ID"
quick_info_on_stage = QuickInfo.objects.get(
politician_we_vote_id=politician_we_vote_id)
quick_info_id = quick_info_on_stage.id
success = True
elif positive_value_exists(contest_measure_we_vote_id):
status = "RETRIEVE_QUICK_INFO_FOUND_WITH_MEASURE_WE_VOTE_ID"
quick_info_on_stage = QuickInfo.objects.get(
contest_measure_we_vote_id=contest_measure_we_vote_id)
quick_info_id = quick_info_on_stage.id
success = True
else:
status = "RETRIEVE_QUICK_INFO_INSUFFICIENT_VARIABLES"
except QuickInfo.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
error_result = True
exception_multiple_object_returned = True
success = False
status = "RETRIEVE_QUICK_INFO_MULTIPLE_FOUND"
except QuickInfo.DoesNotExist:
error_result = False
exception_does_not_exist = True
success = True
status = "RETRIEVE_QUICK_INFO_NONE_FOUND"
results = {
'success': success,
'status': status,
'error_result': error_result,
'DoesNotExist': exception_does_not_exist,
'MultipleObjectsReturned': exception_multiple_object_returned,
'quick_info_found': True if quick_info_id > 0 else False,
'quick_info_id': quick_info_id,
'quick_info_we_vote_id': quick_info_on_stage.we_vote_id,
'quick_info': quick_info_on_stage,
'is_chinese': quick_info_on_stage.is_chinese(),
'is_english': quick_info_on_stage.is_english(),
'is_spanish': quick_info_on_stage.is_spanish(),
'is_tagalog': quick_info_on_stage.is_tagalog(),
'is_vietnamese': quick_info_on_stage.is_vietnamese(),
}
return results
def retrieve_quick_info_list(self, google_civic_election_id, quick_info_search_str=''):
google_civic_election_id = convert_to_int(google_civic_election_id)
quick_info_list = []
quick_info_list_found = False
try:
quick_info_queryset = QuickInfo.objects.all()
if positive_value_exists(quick_info_search_str):
filters = []
# new_filter = Q(id__iexact=quick_info_search_str)
# filters.append(new_filter)
#
# new_filter = Q(ballot_location_display_name__icontains=quick_info_search_str)
# filters.append(new_filter)
# Add the first query
if len(filters):
final_filters = filters.pop()
# ...and "OR" the remaining items in the list
for item in filters:
final_filters |= item
quick_info_queryset = quick_info_queryset.filter(final_filters)
quick_info_queryset = quick_info_queryset.filter(
google_civic_election_id=google_civic_election_id)
# if positive_value_exists(state_code):
# quick_info_queryset = quick_info_queryset.filter(normalized_state__iexact=state_code)
quick_info_list = quick_info_queryset
if len(quick_info_list):
quick_info_list_found = True
status = 'QUICK_INFO_LIST_FOUND'
else:
status = 'NO_QUICK_INFO_LIST_FOUND'
except QuickInfo.DoesNotExist:
status = 'NO_QUICK_INFO_LIST_FOUND_DOES_NOT_EXIST'
quick_info_list = []
except Exception as e:
handle_exception(e, logger=logger)
status = 'FAILED retrieve_quick_info_list_for_election ' \
'{error} [type: {error_type}]'.format(error=e, error_type=type(e))
results = {
'success': True if quick_info_list_found else False,
'status': status,
'quick_info_list_found': quick_info_list_found,
'quick_info_list': quick_info_list,
}
return results
def update_or_create_quick_info(self, quick_info_id, quick_info_we_vote_id,
ballot_item_display_name,
contest_office_we_vote_id,
candidate_we_vote_id,
politician_we_vote_id,
contest_measure_we_vote_id,
info_html,
info_text,
language,
last_editor_we_vote_id,
quick_info_master_we_vote_id,
more_info_url,
more_info_credit,
google_civic_election_id
):
# Does a quick_info entry already exist?
quick_info_manager = QuickInfoManager()
results = quick_info_manager.retrieve_quick_info(quick_info_id, quick_info_we_vote_id,
contest_office_we_vote_id,
candidate_we_vote_id,
politician_we_vote_id,
contest_measure_we_vote_id)
quick_info_on_stage_found = False
quick_info_on_stage_id = 0
quick_info_on_stage = QuickInfo()
if results['quick_info_found']:
quick_info_on_stage = results['quick_info']
# Update this quick_info entry with new values - we do not delete because we might be able to use
# noinspection PyBroadException
try:
# Figure out if the update is a change to a master entry
if positive_value_exists(quick_info_master_we_vote_id):
uses_master_entry = True
elif (info_html is not False) or (info_text is not False) or (more_info_url is not False):
uses_master_entry = False
elif positive_value_exists(quick_info_on_stage.info_textx) or \
positive_value_exists(quick_info_on_stage.info_html) or \
positive_value_exists(quick_info_on_stage.more_info_url):
uses_master_entry = False
elif positive_value_exists(quick_info_on_stage.quick_info_master_we_vote_id):
uses_master_entry = True
else:
uses_master_entry = True
if ballot_item_display_name is not False:
quick_info_on_stage.ballot_item_display_name = ballot_item_display_name
if language is not False:
quick_info_on_stage.language = language
if last_editor_we_vote_id is not False:
quick_info_on_stage.last_editor_we_vote_id = last_editor_we_vote_id
if contest_office_we_vote_id is not False:
quick_info_on_stage.contest_office_we_vote_id = contest_office_we_vote_id
if candidate_we_vote_id is not False:
quick_info_on_stage.candidate_campaign_we_vote_id = candidate_we_vote_id
if politician_we_vote_id is not False:
quick_info_on_stage.politician_we_vote_id = politician_we_vote_id
if contest_measure_we_vote_id is not False:
quick_info_on_stage.contest_measure_we_vote_id = contest_measure_we_vote_id
if google_civic_election_id is not False:
quick_info_on_stage.google_civic_election_id = google_civic_election_id
if uses_master_entry:
if quick_info_master_we_vote_id is not False:
quick_info_on_stage.quick_info_master_we_vote_id = quick_info_master_we_vote_id
# Clear out unique entry values
quick_info_on_stage.info_text = ""
quick_info_on_stage.info_html = ""
quick_info_on_stage.more_info_url = ""
quick_info_on_stage.more_info_credit = NOT_SPECIFIED
else:
# If here, this is NOT a master entry
if info_text is not False:
quick_info_on_stage.info_text = info_text
if info_html is not False:
quick_info_on_stage.info_html = info_html
if more_info_url is not False:
quick_info_on_stage.more_info_url = more_info_url
if more_info_credit is not False:
quick_info_on_stage.more_info_credit = more_info_credit
# Clear out master entry value
quick_info_on_stage.quick_info_master_we_vote_id = ""
if google_civic_election_id is not False:
quick_info_on_stage.google_civic_election_id = google_civic_election_id
# We don't need to update date_last_changed here because set set auto_now=True in the field
quick_info_on_stage.save()
quick_info_on_stage_id = quick_info_on_stage.id
quick_info_on_stage_found = True
status = 'QUICK_INFO_UPDATED'
except Exception as e:
status = 'FAILED_TO_UPDATE_QUICK_INFO'
elif results['MultipleObjectsReturned']:
status = 'QUICK_INFO MultipleObjectsReturned'
elif results['DoesNotExist']:
try:
# Create new quick_info entry
if ballot_item_display_name is False:
ballot_item_display_name = ""
if language is False:
language = ENGLISH
if last_editor_we_vote_id is False:
last_editor_we_vote_id = ""
if contest_office_we_vote_id is False:
contest_office_we_vote_id = ""
if candidate_we_vote_id is False:
candidate_we_vote_id = ""
if politician_we_vote_id is False:
politician_we_vote_id = ""
if contest_measure_we_vote_id is False:
contest_measure_we_vote_id = ""
if google_civic_election_id is False:
google_civic_election_id = 0
# Master related data
if quick_info_master_we_vote_id is False:
quick_info_master_we_vote_id = ""
# Unique related data
if info_html is False:
info_html = ""
if info_text is False:
info_text = ""
if more_info_url is False:
more_info_url = ""
if more_info_credit is False:
more_info_credit = None
quick_info_on_stage = QuickInfo(
ballot_item_display_name=ballot_item_display_name,
contest_office_we_vote_id=contest_office_we_vote_id,
candidate_campaign_we_vote_id=candidate_we_vote_id,
politician_we_vote_id=politician_we_vote_id,
contest_measure_we_vote_id=contest_measure_we_vote_id,
info_html=info_html,
info_text=info_text,
language=language,
last_editor_we_vote_id=last_editor_we_vote_id,
quick_info_master_we_vote_id=quick_info_master_we_vote_id,
more_info_url=more_info_url,
more_info_credit=more_info_credit,
google_civic_election_id=google_civic_election_id
# We don't need to update last_updated here because set set auto_now=True in the field
)
quick_info_on_stage.save()
quick_info_on_stage_id = quick_info_on_stage.id
quick_info_on_stage_found = True
status = 'CREATED_QUICK_INFO'
except Exception as e:
status = 'FAILED_TO_CREATE_NEW_QUICK_INFO'
handle_record_not_saved_exception(e, logger=logger, exception_message_optional=status)
else:
status = results['status']
results = {
'success': True if quick_info_on_stage_found else False,
'status': status,
'quick_info_found': quick_info_on_stage_found,
'quick_info_id': quick_info_on_stage_id,
'quick_info': quick_info_on_stage,
}
return results
def delete_quick_info(self, quick_info_id):
quick_info_id = convert_to_int(quick_info_id)
quick_info_deleted = False
try:
if quick_info_id:
results = self.retrieve_quick_info(quick_info_id)
if results['quick_info_found']:
quick_info = results['quick_info']
quick_info_id = quick_info.id
quick_info.delete()
quick_info_deleted = True
except Exception as e:
handle_exception(e, logger=logger)
results = {
'success': quick_info_deleted,
'quick_info_deleted': quick_info_deleted,
'quick_info_id': quick_info_id,
}
return results
class QuickInfoMaster(models.Model):
"""
Master data that can be applied to multiple ballot items
"""
# We are relying on built-in Python id field
# The we_vote_id identifier is unique across all We Vote sites, and allows us to share our org info with other
# organizations
# It starts with "wv" then we add on a database specific identifier like "3v" (WeVoteSetting.site_unique_id_prefix)
# then the string "infom" (for "info master"), and then a sequential integer like "123".
# We keep the last value in WeVoteSetting.we_vote_id_last_quick_info_master_integer
we_vote_id = models.CharField(
verbose_name="we vote permanent id", max_length=255, default=None, null=True, blank=True, unique=True)
# What kind of ballot item is this a master entry for? Mostly used so we can organize these entries
kind_of_ballot_item = models.CharField(max_length=10, choices=KIND_OF_BALLOT_ITEM_CHOICES, default=OFFICE)
# The language that this text is in
language = models.CharField(max_length=5, choices=LANGUAGE_CHOICES, default=ENGLISH)
info_text = models.TextField(null=True, blank=True)
info_html = models.TextField(null=True, blank=True)
master_entry_name = models.CharField(verbose_name="text name for quick info master entry",
max_length=255, null=True, blank=True)
more_info_credit = models.CharField(max_length=15, choices=SOURCE_SITE_CHOICES, default=BALLOTPEDIA,
null=True, blank=True)
# A link to any location with more information about this quick information
more_info_url = models.URLField(blank=True, null=True, verbose_name='url with more the full entry for this info')
last_updated = models.DateTimeField(verbose_name='date entered', null=True, auto_now=True) # TODO convert to date_last_changed
# The unique id of the last person who edited this entry.
last_editor_we_vote_id = models.CharField(
verbose_name="last editor we vote id", max_length=255, null=True, blank=True, unique=False)
def __unicode__(self):
return self.we_vote_id
class Meta:
ordering = ('last_updated',)
# We override the save function so we can auto-generate we_vote_id
def save(self, *args, **kwargs):
# Even if this organization came from another source we still need a unique we_vote_id
if self.we_vote_id:
self.we_vote_id = self.we_vote_id.strip().lower()
if self.we_vote_id == "" or self.we_vote_id is None: # If there isn't a value...
# ...generate a new id
site_unique_id_prefix = fetch_site_unique_id_prefix()
next_local_integer = fetch_next_we_vote_id_quick_info_master_integer()
# "wv" = We Vote
# site_unique_id_prefix = a generated (or assigned) unique id for one server running We Vote
# "infom" = tells us this is a unique id for a quick_info_master entry
# next_integer = a unique, sequential integer for this server - not necessarily tied to database id
self.we_vote_id = "wv{site_unique_id_prefix}infom{next_integer}".format(
site_unique_id_prefix=site_unique_id_prefix,
next_integer=next_local_integer,
)
super(QuickInfoMaster, self).save(*args, **kwargs)
def is_english(self):
if self.language == ENGLISH:
return True
return False
def is_spanish(self):
if self.language == SPANISH:
return True
return False
def is_vietnamese(self):
if self.language == VIETNAMESE:
return True
return False
def is_chinese(self):
if self.language == CHINESE:
return True
return False
def is_tagalog(self):
if self.language == TAGALOG:
return True
return False
def more_info_credit_text(self):
if self.more_info_credit == BALLOTPEDIA:
return "Courtesy of Ballotpedia.org"
if self.more_info_credit == WIKIPEDIA:
return "Courtesy of Wikipedia.org"
return ""
class QuickInfoMasterManager(models.Manager):
def __unicode__(self):
return "QuickInfoMasterManager"
def fetch_we_vote_id_from_local_id(self, quick_info_master_id):
if positive_value_exists(quick_info_master_id):
results = self.retrieve_quick_info_master_from_id(quick_info_master_id)
if results['quick_info_master_found']:
quick_info_master = results['quick_info_master']
return quick_info_master.we_vote_id
else:
return None
else:
return None
def retrieve_quick_info_master_from_id(self, quick_info_master_id):
quick_info_master_we_vote_id = None
quick_info_master_manager = QuickInfoMasterManager()
return quick_info_master_manager.retrieve_quick_info_master(quick_info_master_id, quick_info_master_we_vote_id)
def retrieve_quick_info_master_from_we_vote_id(self, quick_info_master_we_vote_id):
quick_info_master_id = 0
quick_info_master_manager = QuickInfoMasterManager()
return quick_info_master_manager.retrieve_quick_info_master(quick_info_master_id, quick_info_master_we_vote_id)
def retrieve_quick_info_master(self, quick_info_master_id, quick_info_master_we_vote_id=None):
error_result = False
exception_does_not_exist = False
exception_multiple_object_returned = False
quick_info_master = QuickInfoMaster()
success = False
try:
if positive_value_exists(quick_info_master_id):
status = "RETRIEVE_QUICK_INFO_MASTER_FOUND_WITH_ID"
quick_info_master = QuickInfoMaster.objects.get(id=quick_info_master_id)
quick_info_master_id = quick_info_master.id
success = True
elif positive_value_exists(quick_info_master_we_vote_id):
status = "RETRIEVE_QUICK_INFO_MASTER_FOUND_WITH_WE_VOTE_ID"
quick_info_master = QuickInfoMaster.objects.get(we_vote_id=quick_info_master_we_vote_id)
quick_info_master_id = quick_info_master.id
success = True
else:
status = "RETRIEVE_QUICK_INFO_MASTER_INSUFFICIENT_VARIABLES"
except QuickInfoMaster.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
error_result = True
exception_multiple_object_returned = True
success = False
status = "RETRIEVE_QUICK_INFO_MASTER_MULTIPLE_FOUND"
except QuickInfoMaster.DoesNotExist:
error_result = False
exception_does_not_exist = True
success = True
status = "RETRIEVE_QUICK_INFO_MASTER_NONE_FOUND"
results = {
'success': success,
'status': status,
'error_result': error_result,
'DoesNotExist': exception_does_not_exist,
'MultipleObjectsReturned': exception_multiple_object_returned,
'quick_info_master_found': True if quick_info_master_id > 0 else False,
'quick_info_master_id': quick_info_master_id,
'quick_info_master': quick_info_master,
}
return results
def update_or_create_quick_info_master(self, quick_info_master_id,
quick_info_master_we_vote_id,
master_entry_name,
info_html, info_text,
language,
kind_of_ballot_item,
last_editor_we_vote_id,
more_info_url,
more_info_credit,
):
# Does a quick_info_master entry already exist?
quick_info_master_manager = QuickInfoMasterManager()
if positive_value_exists(quick_info_master_id) or positive_value_exists(quick_info_master_we_vote_id):
results = quick_info_master_manager.retrieve_quick_info_master(quick_info_master_id,
quick_info_master_we_vote_id)
quick_info_master_found = results['quick_info_master_found']
else:
quick_info_master_found = False
if quick_info_master_found:
quick_info_master = results['quick_info_master']
# noinspection PyBroadException
try:
if master_entry_name is not False:
quick_info_master.master_entry_name = master_entry_name
if info_html is not False:
quick_info_master.info_html = info_html
if info_text is not False:
quick_info_master.info_text = info_text
if language is not False:
quick_info_master.language = language
if kind_of_ballot_item is not False:
quick_info_master.kind_of_ballot_item = kind_of_ballot_item
if last_editor_we_vote_id is not False:
quick_info_master.last_editor_we_vote_id = last_editor_we_vote_id
if more_info_url is not False:
quick_info_master.more_info_url = more_info_url
if more_info_credit is not False:
quick_info_master.more_info_credit = more_info_credit
# We don't need to update date_last_changed here because set set auto_now=True in the field
quick_info_master.save()
quick_info_master_id = quick_info_master.id
quick_info_master_found = True
status = 'QUICK_INFO_MASTER_UPDATED'
except Exception as e:
status = 'FAILED_TO_UPDATE_QUICK_INFO_MASTER'
else:
try:
# Create new quick_info_master entry
# Create new quick_info entry
if master_entry_name is False:
master_entry_name = None
if info_html is False:
info_html = None
if info_text is False:
info_text = None
if language is False:
language = ENGLISH
if last_editor_we_vote_id is False:
last_editor_we_vote_id = None
if more_info_url is False:
more_info_url = None
if more_info_credit is False:
more_info_credit = None
quick_info_master = QuickInfoMaster(
master_entry_name=master_entry_name,
info_html=info_html,
info_text=info_text,
language=language,
kind_of_ballot_item=kind_of_ballot_item,
last_editor_we_vote_id=last_editor_we_vote_id,
more_info_url=more_info_url,
more_info_credit=more_info_credit,
# We don't need to update last_updated here because set set auto_now=True in the field
)
quick_info_master.save()
quick_info_master_id = quick_info_master.id
quick_info_master_found = True
status = 'CREATED_QUICK_INFO_MASTER'
except Exception as e:
status = 'FAILED_TO_CREATE_NEW_QUICK_INFO_MASTER'
handle_record_not_saved_exception(e, logger=logger, exception_message_optional=status)
results = {
'success': True if quick_info_master_found else False,
'status': status,
'quick_info_master_found': quick_info_master_found,
'quick_info_master_id': quick_info_master_id,
'quick_info_master': quick_info_master,
}
return results
def delete_quick_info_master(self, quick_info_master_id):
quick_info_master_id = convert_to_int(quick_info_master_id)
quick_info_master_deleted = False
try:
if quick_info_master_id:
results = self.retrieve_quick_info_master(quick_info_master_id)
if results['quick_info_master_found']:
quick_info_master = results['quick_info_master']
quick_info_master_id = quick_info_master.id
quick_info_master.delete()
quick_info_master_deleted = True
except Exception as e:
handle_exception(e, logger=logger)
results = {
'success': quick_info_master_deleted,
'quick_info_master_deleted': quick_info_master_deleted,
'quick_info_master_id': quick_info_master_id,
}
return results
|
from models.anchor import *
if __name__=='__main__':
# TEMP: Wipe existing anchors
# THIS IS TEMPORARY:
anchors = {'Vaccination', 'Vaccinations', 'Vaccine', 'Vaccines', 'Inoculation', 'Immunization', 'Shot', 'Chickenpox', 'Disease', 'Diseases', 'Hepatitis A', 'Hepatitis B', 'infection', 'infections', 'measles', 'outbreak', 'mumps', 'rabies', 'tetanus', 'virus', 'autism'}
seed = 'vaccination'
for anchor in anchors:
a = Anchor.getOrCreate(anchor)
a.findInstances()
a.save()
"""
query = {
"size": 0,
"query": {
"filtered": {
"query": {
"query_string": {
"query": "*",
"analyze_wildcard": True
}
}
}
},
"aggs": {
"2": {
"terms": {
"field": "title",
"size": 100,
"order": {
"_count": "desc"
}
}
}
}
}
response = es.search(index="crowdynews"', 'body=query)
retrieved = now()
anchors = {}
# go through each retrieved document
for hit in response['aggregations']['2']['buckets']:
key = hit['key']
if validKey(key):
anchors[key] = hit['doc_count']
addBulk(anchors)
"""
|
from bs4 import BeautifulSoup
import httplib, codecs, datetime
import cPickle as pickle
import time
def stan_tag(criteria, server):
tagged = []
file_count = 47
for ix, c in enumerate(criteria[2250000:]):
# initialize list of sentences
sents = []
try:
# send text to server
server.request('', c)
res = BeautifulSoup(server.getresponse().read())
# loop through sentences to generate lists of tagged/lemmatized tuples
for sentence in res.findAll('sentence'):
sent_tag = []
for word in sentence.findAll('word'):
sent_tag.append((word.get_text(), word['pos'], word['lemma']))
sents.append(sent_tag)
except:
print c
print ix
server = httplib.HTTPConnection('127.0.0.1:2020')
sents.append(c)
# add sentence to tagged list
tagged.append(sents)
#save every 50,000 lines
if ix % 50000 == 0:
print 'Line: ', ix
print 'File: ', file_count
print
pickle.dump(tagged, open('data/stanford_tagged/stanford_tagged_criteria_%d.pkl' % (file_count), 'wb'))
file_count += 1
del tagged
tagged = []
pickle.dump(tagged, open('data/stanford_tagged/stanford_tagged_criteria_%d.pkl' % (file_count), 'wb'))
print 'Complete'
def main():
server = httplib.HTTPConnection('127.0.0.1:2020')
criteria = codecs.open('data/stanford_sentence_list.csv','r').readlines()
stan_tag(criteria, server)
if __name__ == '__main__':
main()
|
try:
from ._models_py3 import ARMBaseModel
from ._models_py3 import Address
from ._models_py3 import Alert
from ._models_py3 import AlertErrorDetails
from ._models_py3 import AlertList
from ._models_py3 import AsymmetricEncryptedSecret
from ._models_py3 import Authentication
from ._models_py3 import AzureContainerInfo
from ._models_py3 import BandwidthSchedule
from ._models_py3 import BandwidthSchedulesList
from ._models_py3 import ClientAccessRight
from ._models_py3 import CloudErrorBody
from ._models_py3 import ContactDetails
from ._models_py3 import DataBoxEdgeDevice
from ._models_py3 import DataBoxEdgeDeviceExtendedInfo
from ._models_py3 import DataBoxEdgeDeviceList
from ._models_py3 import DataBoxEdgeDevicePatch
from ._models_py3 import FileEventTrigger
from ._models_py3 import FileSourceInfo
from ._models_py3 import IoTDeviceInfo
from ._models_py3 import IoTRole
from ._models_py3 import Ipv4Config
from ._models_py3 import Ipv6Config
from ._models_py3 import Job
from ._models_py3 import JobErrorDetails
from ._models_py3 import JobErrorItem
from ._models_py3 import MetricDimensionV1
from ._models_py3 import MetricSpecificationV1
from ._models_py3 import MountPointMap
from ._models_py3 import NetworkAdapter
from ._models_py3 import NetworkAdapterPosition
from ._models_py3 import NetworkSettings
from ._models_py3 import Node
from ._models_py3 import NodeList
from ._models_py3 import Operation
from ._models_py3 import OperationDisplay
from ._models_py3 import OperationsList
from ._models_py3 import Order
from ._models_py3 import OrderList
from ._models_py3 import OrderStatus
from ._models_py3 import PeriodicTimerEventTrigger
from ._models_py3 import PeriodicTimerSourceInfo
from ._models_py3 import RefreshDetails
from ._models_py3 import Role
from ._models_py3 import RoleList
from ._models_py3 import RoleSinkInfo
from ._models_py3 import SecuritySettings
from ._models_py3 import ServiceSpecification
from ._models_py3 import Share
from ._models_py3 import ShareAccessRight
from ._models_py3 import ShareList
from ._models_py3 import Sku
from ._models_py3 import StorageAccountCredential
from ._models_py3 import StorageAccountCredentialList
from ._models_py3 import SymmetricKey
from ._models_py3 import TrackingInfo
from ._models_py3 import Trigger
from ._models_py3 import TriggerList
from ._models_py3 import UpdateDownloadProgress
from ._models_py3 import UpdateInstallProgress
from ._models_py3 import UpdateSummary
from ._models_py3 import UploadCertificateRequest
from ._models_py3 import UploadCertificateResponse
from ._models_py3 import User
from ._models_py3 import UserAccessRight
from ._models_py3 import UserList
except (SyntaxError, ImportError):
from ._models import ARMBaseModel # type: ignore
from ._models import Address # type: ignore
from ._models import Alert # type: ignore
from ._models import AlertErrorDetails # type: ignore
from ._models import AlertList # type: ignore
from ._models import AsymmetricEncryptedSecret # type: ignore
from ._models import Authentication # type: ignore
from ._models import AzureContainerInfo # type: ignore
from ._models import BandwidthSchedule # type: ignore
from ._models import BandwidthSchedulesList # type: ignore
from ._models import ClientAccessRight # type: ignore
from ._models import CloudErrorBody # type: ignore
from ._models import ContactDetails # type: ignore
from ._models import DataBoxEdgeDevice # type: ignore
from ._models import DataBoxEdgeDeviceExtendedInfo # type: ignore
from ._models import DataBoxEdgeDeviceList # type: ignore
from ._models import DataBoxEdgeDevicePatch # type: ignore
from ._models import FileEventTrigger # type: ignore
from ._models import FileSourceInfo # type: ignore
from ._models import IoTDeviceInfo # type: ignore
from ._models import IoTRole # type: ignore
from ._models import Ipv4Config # type: ignore
from ._models import Ipv6Config # type: ignore
from ._models import Job # type: ignore
from ._models import JobErrorDetails # type: ignore
from ._models import JobErrorItem # type: ignore
from ._models import MetricDimensionV1 # type: ignore
from ._models import MetricSpecificationV1 # type: ignore
from ._models import MountPointMap # type: ignore
from ._models import NetworkAdapter # type: ignore
from ._models import NetworkAdapterPosition # type: ignore
from ._models import NetworkSettings # type: ignore
from ._models import Node # type: ignore
from ._models import NodeList # type: ignore
from ._models import Operation # type: ignore
from ._models import OperationDisplay # type: ignore
from ._models import OperationsList # type: ignore
from ._models import Order # type: ignore
from ._models import OrderList # type: ignore
from ._models import OrderStatus # type: ignore
from ._models import PeriodicTimerEventTrigger # type: ignore
from ._models import PeriodicTimerSourceInfo # type: ignore
from ._models import RefreshDetails # type: ignore
from ._models import Role # type: ignore
from ._models import RoleList # type: ignore
from ._models import RoleSinkInfo # type: ignore
from ._models import SecuritySettings # type: ignore
from ._models import ServiceSpecification # type: ignore
from ._models import Share # type: ignore
from ._models import ShareAccessRight # type: ignore
from ._models import ShareList # type: ignore
from ._models import Sku # type: ignore
from ._models import StorageAccountCredential # type: ignore
from ._models import StorageAccountCredentialList # type: ignore
from ._models import SymmetricKey # type: ignore
from ._models import TrackingInfo # type: ignore
from ._models import Trigger # type: ignore
from ._models import TriggerList # type: ignore
from ._models import UpdateDownloadProgress # type: ignore
from ._models import UpdateInstallProgress # type: ignore
from ._models import UpdateSummary # type: ignore
from ._models import UploadCertificateRequest # type: ignore
from ._models import UploadCertificateResponse # type: ignore
from ._models import User # type: ignore
from ._models import UserAccessRight # type: ignore
from ._models import UserList # type: ignore
from ._data_box_edge_management_client_enums import (
AccountType,
AlertSeverity,
AuthenticationType,
AzureContainerDataFormat,
ClientPermissionType,
DataBoxEdgeDeviceStatus,
DataPolicy,
DayOfWeek,
DeviceType,
DownloadPhase,
EncryptionAlgorithm,
InstallRebootBehavior,
JobStatus,
JobType,
MetricAggregationType,
MetricCategory,
MetricUnit,
MonitoringStatus,
NetworkAdapterDHCPStatus,
NetworkAdapterRDMAStatus,
NetworkAdapterStatus,
NetworkGroup,
NodeStatus,
OrderState,
PlatformType,
RoleStatus,
RoleTypes,
SSLStatus,
ShareAccessProtocol,
ShareAccessType,
ShareStatus,
SkuName,
SkuTier,
TimeGrain,
TriggerEventType,
UpdateOperation,
UpdateOperationStage,
)
__all__ = [
'ARMBaseModel',
'Address',
'Alert',
'AlertErrorDetails',
'AlertList',
'AsymmetricEncryptedSecret',
'Authentication',
'AzureContainerInfo',
'BandwidthSchedule',
'BandwidthSchedulesList',
'ClientAccessRight',
'CloudErrorBody',
'ContactDetails',
'DataBoxEdgeDevice',
'DataBoxEdgeDeviceExtendedInfo',
'DataBoxEdgeDeviceList',
'DataBoxEdgeDevicePatch',
'FileEventTrigger',
'FileSourceInfo',
'IoTDeviceInfo',
'IoTRole',
'Ipv4Config',
'Ipv6Config',
'Job',
'JobErrorDetails',
'JobErrorItem',
'MetricDimensionV1',
'MetricSpecificationV1',
'MountPointMap',
'NetworkAdapter',
'NetworkAdapterPosition',
'NetworkSettings',
'Node',
'NodeList',
'Operation',
'OperationDisplay',
'OperationsList',
'Order',
'OrderList',
'OrderStatus',
'PeriodicTimerEventTrigger',
'PeriodicTimerSourceInfo',
'RefreshDetails',
'Role',
'RoleList',
'RoleSinkInfo',
'SecuritySettings',
'ServiceSpecification',
'Share',
'ShareAccessRight',
'ShareList',
'Sku',
'StorageAccountCredential',
'StorageAccountCredentialList',
'SymmetricKey',
'TrackingInfo',
'Trigger',
'TriggerList',
'UpdateDownloadProgress',
'UpdateInstallProgress',
'UpdateSummary',
'UploadCertificateRequest',
'UploadCertificateResponse',
'User',
'UserAccessRight',
'UserList',
'AccountType',
'AlertSeverity',
'AuthenticationType',
'AzureContainerDataFormat',
'ClientPermissionType',
'DataBoxEdgeDeviceStatus',
'DataPolicy',
'DayOfWeek',
'DeviceType',
'DownloadPhase',
'EncryptionAlgorithm',
'InstallRebootBehavior',
'JobStatus',
'JobType',
'MetricAggregationType',
'MetricCategory',
'MetricUnit',
'MonitoringStatus',
'NetworkAdapterDHCPStatus',
'NetworkAdapterRDMAStatus',
'NetworkAdapterStatus',
'NetworkGroup',
'NodeStatus',
'OrderState',
'PlatformType',
'RoleStatus',
'RoleTypes',
'SSLStatus',
'ShareAccessProtocol',
'ShareAccessType',
'ShareStatus',
'SkuName',
'SkuTier',
'TimeGrain',
'TriggerEventType',
'UpdateOperation',
'UpdateOperationStage',
]
|
import os
import web
import rediswebpy
from web.contrib.template import render_jinja
import misc
db = web.database(dbn='mysql', db='geeksoho', user='geeksoho', passwd='geeksoho')
urls = (
'/', 'index',
'/test', 'test'
)
class index:
"""Home"""
def GET(self):
# return pjax('jobs.html')
jobsList = GetJobs()
return render.jobs(jobsList=jobsList)
def POST(self):
data = web.input(title='', link='', company='', company_weibo='', company_website='', city='', salary='', intro='')
CreatNewJob(data)
raise web.seeother('/')
class test:
"""test"""
def GET(self):
# return pjax('test.html')
return render.test()
def CreatNewJob(data):
db.insert(
'jobs',
title = data.title,
link = data.link,
company = data.company,
company_weibo = data.company_weibo,
company_website = data.company_website,
city = data.city,
salary = data.salary,
intro = data.intro)
def GetJobs():
return db.select('jobs', limit = 100, order='id DESC')
app = web.application(urls, globals())
web.config.debug = True
cache = False
session = web.session.Session(app, rediswebpy.RedisStore(), initializer={'count': 0})
render = render_jinja(
'templates', # 设置模板路径.
encoding = 'utf-8', # 编码.
)
myFilters = {'filter_tags': misc.filter_tags,}
render._lookup.filters.update(myFilters)
if __name__ == "__main__":
web.wsgi.runwsgi = lambda func, addr=None: web.wsgi.runfcgi(func, addr)
app.run()
|
from azure.cli.testsdk import ScenarioTest, record_only, ResourceGroupPreparer
class TestClusterScenarios(ScenarioTest):
@record_only()
@ResourceGroupPreparer(name_prefix='cli_test_monitor_log_analytics_cluster_c', parameter_name='rg1', key='rg1', location='centralus')
def test_monitor_log_analytics_cluster_default(self, rg1):
new_cluster_name = self.create_random_name('clitest-cluster-', 20)
sku_capacity = 1000
self.kwargs.update({
'new_cluster_name': new_cluster_name,
'sku_capacity': sku_capacity
})
self.cmd("monitor log-analytics cluster create -g {rg1} -n {new_cluster_name} --sku-capacity {sku_capacity}",
checks=[])
self.cmd("monitor log-analytics cluster show -g {rg1} -n {new_cluster_name}", checks=[
self.check('provisioningState', 'Succeeded'),
self.check('name', new_cluster_name),
self.check('sku.capacity', sku_capacity)
])
new_sku_capacity = 2000
self.kwargs.update({
'sku_capacity': new_sku_capacity
})
self.cmd("monitor log-analytics cluster update -g {rg1} -n {new_cluster_name} "
"--sku-capacity {sku_capacity}",
checks=[
self.check('sku.capacity', new_sku_capacity)
])
self.cmd("monitor log-analytics cluster show -g {rg1} -n {new_cluster_name}", checks=[
self.check('provisioningState', 'Succeeded'),
self.check('sku.capacity', new_sku_capacity)
])
self.cmd("monitor log-analytics cluster list -g {rg1}", checks=[
self.check('length(@)', 1)
])
self.cmd("monitor log-analytics cluster delete -g {rg1} -n {new_cluster_name} -y", checks=[])
with self.assertRaisesRegex(SystemExit, '3'):
self.cmd('monitor log-analytics cluster show -g {rg1} -n {new_cluster_name}')
@record_only()
def test_monitor_log_analytics_cluster_update_key(self):
new_key_name = 'key2'
new_key_version = 'dc814576e6b34de69a10b186a4723035'
self.kwargs.update({
'rg': 'azure-cli-test-scus',
'key_name': new_key_name,
'key_version': new_key_version,
'key_vault_uri': 'https://yu-vault-1.vault.azure.net/',
'cluster_name': 'yu-test-cluster2'
})
self.cmd("monitor log-analytics cluster update -g {rg} -n {cluster_name} --key-name {key_name} "
"--key-vault-uri {key_vault_uri} --key-version {key_version}",
checks=[])
self.cmd("monitor log-analytics cluster wait -g {rg} -n {cluster_name} --updated", checks=[])
self.cmd("monitor log-analytics cluster show -g {rg} -n {cluster_name}", checks=[
self.check('provisioningState', 'Succeeded'),
self.check('keyVaultProperties.keyName', new_key_name),
self.check('keyVaultProperties.keyVersion', new_key_version)
])
|
import factory
from dominion.games.models import Game
class GameFactory(factory.django.DjangoModelFactory):
class Meta:
model = Game
|
from django.core.management.base import BaseCommand
from aspc.courses.models import Schedule
from datetime import datetime, timedelta
from django.core.mail import EmailMultiAlternatives
from django.template.loader import get_template
from django.template import Context
from aspc.settings import EMAIL_HOST_USER
MIN_DAYS = 90
MAX_DAYS = 300
EMAIL_TITLE = "Have you taken these classes?"
class Command(BaseCommand):
args = ''
help = 'imports terms'
def handle(self, *args, **options):
plaintext = get_template('email/solicit_reviews.txt')
htmly = get_template('email/solicit_reviews.html')
schedules = Schedule.objects.filter(create_ts__lte=datetime.now()-timedelta(days=MIN_DAYS),
create_ts__gte=datetime.now()-timedelta(days=MAX_DAYS))
emails_sent = 0
for schedule in schedules:
try:
context = Context({'user': schedule.user, 'courses': schedule.sections.all()})
text_content = plaintext.render(context)
html_content = htmly.render(context)
user_data = schedule.user.user.all()
if user_data and user_data[0].subscribed_email:
msg = EmailMultiAlternatives(EMAIL_TITLE, text_content, EMAIL_HOST_USER, [schedule.user.email])
msg.attach_alternative(html_content, "text/html")
msg.send()
emails_sent += 1
except Exception as e:
self.stdout.write('Error: %s\n' % e)
self.stdout.write('Successfully send %s emails\n' % emails_sent)
|
"""
The utility module.
"""
import traceback
def extract_traceback(exception):
"""
Utility function for extracting the traceback from an exception.
:param exception: The exception to extract the traceback from.
:return: The extracted traceback.
"""
return ''.join(traceback.format_tb(exception.__traceback__))
|
"""
@name: Modules/Computer/Nodes/nodes.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2014-2030 by D. Brian Kimmel
@license: MIT License
@note: Created on Mar 6, 2014
@summary: This module does everything for nodes.
Nodes are read in from the config Xml file.
Then node local is run to update the local node
Finally, the nodes are synced between each other.
"""
__updated__ = '2020-01-25'
__version_info__ = (20, 1, 24)
__version__ = '.'.join(map(str, __version_info__))
from Modules.Computer.Nodes.node_local import Api as localApi
from Modules.Computer.Nodes.node_sync import Api as syncApi
from Modules.Core.Utilities.debug_tools import PrettyFormatAny
from Modules.Core.Utilities import extract_tools
from Modules.Core import logging_pyh as Logger
LOG = Logger.getLogger('PyHouse.Nodes ')
class MqttActions:
"""
"""
def __init__(self, p_pyhouse_obj):
self.m_pyhouse_obj = p_pyhouse_obj
def decode(self, p_msg):
""" Decode the computer specific portions of the message and append them to the log string.
@param p-logmsg: is the partially decoded Mqtt message json
@param p_msg.Topic: is a list of topic part strings ( pyhouse, housename have been dropped
@param p_message: is the payload that is JSON
"""
l_topic = p_msg.UnprocessedTopic
p_msg.UnprocessedTopic = p_msg.UnprocessedTopic[1:]
p_msg.LogMessage += '\tNodes:\n'
l_topic = l_topic[0].lower()
if l_topic == 'sync':
syncApi(self.m_pyhouse_obj).DecodeMqttMessage(p_msg)
else:
p_msg.LogMessage += '\tUnknown sub-topic {}'.format(PrettyFormatAny.form(p_msg.Payload, 'Computer msg'))
LOG.warning('Unknown Node sub-topic: {}\n\tMsg: {}'.format(l_topic, p_msg.Payload))
class Yaml:
def load_yaml_config(self, p_pyhouse_obj):
"""
"""
pass
def save_yaml_config(self, p_pyhouse_obj):
"""
"""
pass
class Api():
m_pyhouse_obj = None
def __init__(self, p_pyhouse_obj):
self.m_pyhouse_obj = p_pyhouse_obj
self._add_storage()
self.m_local = localApi(p_pyhouse_obj)
self.m_sync = syncApi(p_pyhouse_obj)
LOG.info('Initialized - Version:{}'.format(__version__))
def _add_storage(self):
"""
"""
def LoadConfig(self):
""" Load the Node xml info.
"""
Yaml().load_yaml_config(self.m_pyhouse_obj)
# p_pyhouse_obj.Computer.Nodes = l_nodes
LOG.info('Loaded Config - Version:{}'.format(__version__))
return
def Start(self):
self.m_local.Start()
self.m_sync.Start()
LOG.info('Started - Version:{}'.format(__version__))
def SaveConfig(self):
# l_xml, l_count = nodesXml.write_nodes_xml(self.m_pyhouse_obj)
# p_xml.append(l_xml)
Yaml().save_yaml_config(self.m_pyhouse_obj)
LOG.info("Saved Config")
return
def Stop(self):
self.m_local.Stop()
self.m_sync.Stop()
|
"""
Created on Fri Oct 14 15:30:11 2016
@author: worm_rig
"""
import json
import os
import tables
from tierpsy.analysis.compress.compressVideo import compressVideo, initMasksGroups
from tierpsy.analysis.compress.selectVideoReader import selectVideoReader
from tierpsy.helper.misc import TimeCounter, print_flush
DFLT_SAVE_FULL_INTERVAL = 5000
DFLT_BUFFER_SIZE = 5
DFLT_MASK_PARAMS = {'min_area' : 50,
'max_area' : 500000000,
'thresh_C' : 15,
'thresh_block_size' : 61,
'dilation_size' : 7
}
def _getWormEnconderParams(fname):
def numOrStr(x):
x = x.strip()
try:
return int(x)
except:
return x
if os.path.exists(fname):
with open(fname, 'r') as fid:
dd = fid.read().split('\n')
plugin_params = {a.strip() : numOrStr(b) for a,b in
[x.split('=') for x in dd if x and x[0].isalpha()]}
else:
plugin_params = {}
return plugin_params
def _getReformatParams(plugin_params):
if plugin_params:
save_full_interval = plugin_params['UNMASKEDFRAMES']
buffer_size = plugin_params['MASK_RECALC_RATE']
mask_params = {'min_area' : plugin_params['MINBLOBSIZE'],
'max_area' : plugin_params['MAXBLOBSIZE'],
'thresh_C' : plugin_params['THRESHOLD_C'],
'thresh_block_size' : plugin_params['THRESHOLD_BLOCK_SIZE'],
'dilation_size' : plugin_params['DILATION_KERNEL_SIZE']}
else:
#if an empty dictionary was given return default values
save_full_interval = DFLT_SAVE_FULL_INTERVAL
buffer_size = DFLT_BUFFER_SIZE
mask_params = DFLT_MASK_PARAMS
return save_full_interval, buffer_size, mask_params
def _isValidSource(original_file):
try:
with tables.File(original_file, 'r') as fid:
fid.get_node('/mask')
return True
except tables.exceptions.HDF5ExtError:
return False
def reformatRigMaskedVideo(original_file, new_file, plugin_param_file, expected_fps, microns_per_pixel):
plugin_params = _getWormEnconderParams(plugin_param_file)
base_name = original_file.rpartition('.')[0].rpartition(os.sep)[-1]
if not _isValidSource(original_file):
print_flush(new_file + ' ERROR. File might be corrupt. ' + original_file)
return
save_full_interval, buffer_size, mask_params = _getReformatParams(plugin_params)
with tables.File(original_file, 'r') as fid_old, \
tables.File(new_file, 'w') as fid_new:
mask_old = fid_old.get_node('/mask')
tot_frames, im_height, im_width = mask_old.shape
progress_timer = TimeCounter('Reformating Gecko plugin hdf5 video.', tot_frames)
attr_params = dict(
expected_fps = expected_fps,
microns_per_pixel = microns_per_pixel,
is_light_background = True
)
mask_new, full_new, _ = initMasksGroups(fid_new, tot_frames, im_height, im_width,
attr_params, save_full_interval, is_expandable=False)
mask_new.attrs['plugin_params'] = json.dumps(plugin_params)
img_buff_ini = mask_old[:buffer_size]
full_new[0] = img_buff_ini[0]
mask_new[:buffer_size] = img_buff_ini*(mask_old[buffer_size] != 0)
for frame in range(buffer_size, tot_frames):
if frame % save_full_interval != 0:
mask_new[frame] = mask_old[frame]
else:
full_frame_n = frame //save_full_interval
img = mask_old[frame]
full_new[full_frame_n] = img
mask_new[frame] = img*(mask_old[frame-1] != 0)
if frame % 500 == 0:
# calculate the progress and put it in a string
progress_str = progress_timer.get_str(frame)
print_flush(base_name + ' ' + progress_str)
print_flush(
base_name +
' Compressed video done. Total time:' +
progress_timer.get_time_str())
def isGoodVideo(video_file):
try:
vid = selectVideoReader(video_file)
# i have problems with corrupt videos that can create infinite loops...
#it is better to test it before start a large taks
vid.release()
return True
except OSError:
# corrupt file, cannot read the size
return False
def processVideo(video_file, masked_image_file, compress_vid_param):
if video_file.endswith('hdf5'):
plugin_param_file = os.path.join(os.path.dirname(video_file), 'wormencoder.ini')
expected_fps = compress_vid_param['expected_fps']
microns_per_pixel = compress_vid_param['microns_per_pixel']
reformatRigMaskedVideo(video_file, masked_image_file, plugin_param_file, expected_fps=expected_fps, microns_per_pixel=microns_per_pixel)
else:
compressVideo(video_file, masked_image_file, **compress_vid_param)
if __name__ == '__main__':
import argparse
fname_wenconder = os.path.join(os.path.dirname(__file__), 'wormencoder.ini')
parser = argparse.ArgumentParser(description='Reformat the files produced by the Gecko plugin in to the format of tierpsy.')
parser.add_argument('original_file', help='path of the original file produced by the plugin')
parser.add_argument('new_file', help='new file name')
parser.add_argument(
'--plugin_param_file',
default = fname_wenconder,
help='wormencoder file used by the Gecko plugin.')
parser.add_argument(
'--expected_fps',
default=25,
help='Expected recording rate in frame per seconds.')
args = parser.parse_args()
reformatRigMaskedVideo(**vars(args))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.