commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
9f9955ff920d88cb0c9dd7ce4abeaac54a1c4977 | add tests for the migration command | corehq/motech/repeaters/tests/test_repeaters_migration.py | corehq/motech/repeaters/tests/test_repeaters_migration.py | from django.core.management import call_command
from django.test import TestCase
from corehq.motech.dhis2.repeaters import (
SQLDhis2EntityRepeater,
SQLDhis2Repeater,
)
from corehq.motech.fhir.repeaters import SQLFHIRRepeater
from corehq.motech.models import ConnectionSettings
from corehq.motech.repeaters.dbaccessors import delete_all_repeaters
from corehq.motech.repeaters.expression.repeaters import (
SQLCaseExpressionRepeater,
)
from corehq.motech.repeaters.models import (
Repeater,
SQLAppStructureRepeater,
SQLCaseRepeater,
SQLCreateCaseRepeater,
SQLDataRegistryCaseUpdateRepeater,
SQLFormRepeater,
SQLLocationRepeater,
SQLReferCaseRepeater,
SQLRepeater,
SQLShortFormRepeater,
SQLUpdateCaseRepeater,
SQLUserRepeater,
)
from corehq.motech.openmrs.repeaters import SQLOpenmrsRepeater
from .data.repeaters import repeater_test_data
class TestMigrationCommand(TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.conn = ConnectionSettings(id=1, url="http://url.com", domain='rtest')
cls.conn.save()
cls.couch_repeaters = []
for r in repeater_test_data:
r = Repeater.wrap(r)
r.save(sync_to_sql=False)
cls.couch_repeaters.append(r)
return super().setUpClass()
@classmethod
def tearDownClass(cls) -> None:
delete_all_repeaters()
return super().tearDownClass()
def test_case_repeater_docs_are_migrated(self):
call_command('migrate_caserepeater')
self._assert_repeaters_equality(SQLCaseRepeater, "CaseRepeater")
call_command('migrate_formrepeater')
self._assert_repeaters_equality(SQLFormRepeater, "FormRepeater")
call_command('migrate_shortformrepeater')
self._assert_repeaters_equality(SQLShortFormRepeater, "ShortFormRepeater")
call_command('migrate_createcaserepeater')
self._assert_repeaters_equality(SQLCreateCaseRepeater, "CreateCaseRepeater")
call_command('migrate_refercaserrepeater')
self._assert_repeaters_equality(SQLReferCaseRepeater, "ReferCaseRepeater")
call_command('migrate_dhis2repeater')
self._assert_repeaters_equality(SQLDhis2Repeater, "Dhis2Repeater")
call_command('migrate_userrepeater')
self._assert_repeaters_equality(SQLUserRepeater, "UserRepeater")
call_command('migrate_fhirrepeater')
self._assert_repeaters_equality(SQLFHIRRepeater, "FHIRRepeater")
call_command('migrate_appstructurerepeater')
self._assert_repeaters_equality(SQLAppStructureRepeater, "AppStructureRepeater")
call_command('migrate_caseexpressionrepeater')
self._assert_repeaters_equality(SQLCaseExpressionRepeater, "CaseExpressionRepeater")
call_command('migrate_dataregistrycaseupdaterepeater')
self._assert_repeaters_equality(SQLDataRegistryCaseUpdateRepeater, "DataRegistryCaseUpdateRepeater")
call_command('migrate_dhis2entityrepeater')
self._assert_repeaters_equality(SQLDhis2EntityRepeater, "Dhis2EntityRepeater")
call_command('migrate_openmrsrepeater')
self._assert_repeaters_equality(SQLOpenmrsRepeater, "OpenmrsRepeater")
call_command('migrate_locationrepeater')
self._assert_repeaters_equality(SQLLocationRepeater, "LocationRepeater")
call_command('migrate_updatecaserepeater')
self._assert_repeaters_equality(SQLUpdateCaseRepeater, "UpdateCaseRepeater")
# test for count
self.assertEqual(SQLRepeater.objects.count(), len(self.couch_repeaters))
def _assert_repeaters_equality(self, sql_class, doc_type):
sql_ids = set(sql_class.objects.all().values_list('repeater_id', flat=True))
couch_ids = {r._id for r in self._get_repeater_objects(doc_type)}
self.assertEqual(len(couch_ids), 2)
self.assertEqual(len(sql_ids), 2)
self.assertCountEqual(sql_ids, couch_ids)
self.assertEqual(sql_ids, couch_ids)
def _get_repeater_objects(self, repeater_type):
return [r for r in self.couch_repeaters if r.doc_type == repeater_type]
| Python | 0.000001 | |
f4bf1c83f55013051037b4380f1b579375bad3d7 | Add test for ContextAwareForm | backend/tests/api/test_forms.py | backend/tests/api/test_forms.py | import pytest
from api.forms import ContextAwareForm
from users.models import User
def test_cannot_use_form_context_if_its_not_passed():
class TestModelForm(ContextAwareForm):
class Meta:
model = User
fields = ('id',)
form = TestModelForm()
with pytest.raises(ValueError) as e:
form.context
assert str(e.value) == 'Make sure you pass the context when instancing the Form'
| Python | 0 | |
3d40378e0e42f62615199daf97a48f24d5b9eb12 | add basic test for LIS | test_lis.py | test_lis.py | import unittest
import lis
class TestLis(unittest.TestCase):
def test_basic(self):
l = lis.Lis()
answer = [[0, 4, 6, 9, 13, 15], [0, 2, 6, 9, 13, 15], [0, 4, 6, 9, 11, 15], [0, 2, 6, 9, 11, 15]]
self.assertEquals(answer, l.lis([0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15]))
if __name__ == '__main__':
unittest.main() | Python | 0.000001 | |
e12cbd29d6180b79eb408f8f34709af609256552 | add missing file | src/spectrum/transfer.py | src/spectrum/transfer.py | """Linear systems"""
#import numpy
#from levinson import *
#from linear_prediction import *
__all__ = ["tf2zp"]
"""to be done
latc2tf Convert lattice filter parameters to transfer function form
polyscale Scale roots of polynomial
polystab Stabilize polynomial
residuez z-transform partial-fraction expansion
sos2ss Convert digital filter second-order section parameters to state-space form
sos2tf Convert digital filter second-order section data to transfer function form
sos2zp Convert digital filter second-order section parameters to zero-pole-gain form
ss2sos Convert digital filter state-space parameters to second-order sections form
ss2tf Convert state-space filter parameters to transfer function form
ss2zp Convert state-space filter parameters to zero-pole-gain form
tf2sos Convert digital filter transfer function data to second-order sections form
tf2ss Convert transfer function filter parameters to state-space form
zp2sos Convert zero-pole-gain filter parameters to second-order sections form
zp2ss Convert zero-pole-gain filter parameters to state-space form
zp2tf Convert zero-pole-gain filter parameters to transfer function form
"""
def tf2zp(b,a):
"""Convert transfer function filter parameters to zero-pole-gain form
Find the zeros, poles, and gains of this continuous-time system:
.. warning:; b and a must have the same length.
b = [2,3,0];
a = [1, 0.4, 1];
[z,p,k] = tf2zp(b,a) % Obtain zero-pole-gain form
z =
0
-1.5000
p =
-0.2000 + 0.9798i
-0.2000 - 0.9798i
k =
2
:param b:
:param a:
:param fill: If True, check that the length of a and b are the same. If not, create a copy of the shortest element and append zeros to it.
:return: z (zeros),p (poles) ,g (gain)
Convert transfer function f(x)=sum(b*x^n)/sum(a*x^n) to
zero-pole-gain form f(x)=g*prod(1-z*x)/prod(1-p*x)
## TODO: See if tf2ss followed by ss2zp gives better results. These
## TODO: are available from the control system toolbox. Note that
## TODO: the control systems toolbox doesn't bother, but instead uses
.. seealso:: scipy.signal.tf2zpk, which gives the same results but uses a different
algorithm (z^-1 instead of z).
"""
from numpy import roots, array
assert len(b) == len(a), "length of the vectors a and b must be identical. fill with zeros if needed."
g = b[0]/a[0]
z = roots(b)
p = roots(a)
return z, p, g
def eqtflength(b,a):
"""
:param b: list
:param a: lsit
.. todo: for arrays, this doc
"""
d = abs(len(b)-len(a))
if d!=0:
if len(a)>len(b):
b.extend([0.]*d)
elif len(b)>len(a):
a.extend([0.]*d)
return b,a
else:
return b,a
def tf2latc(num=[1.], den=[1.]):
"""Convert transfer function filter parameters to lattice filter form"""
if len(num) == 1:
k, v = allpole2latc(num, den)
def allpole2latc(num, den):
# All-pole filter, simply call poly2rc
k = poly2rc(den)
#v = [num;numpy.zeros(size(v))];
#return k, v
def latc2tf():
raise NotImplementedError
def latcfilt():
raise NotImplementedError
def tf2sos():
raise NotImplementedError
def tf2ss():
raise NotImplementedError
def tf2zpk(b, a):
"""Return zero, pole, gain (z,p,k) representation from a numerator,
denominator representation of a linear filter.
Convert zero-pole-gain filter parameters to transfer function form
:param ndarray b: numerator polynomial.
:param ndarray a: numerator and denominator polynomials.
:return:
* z : ndarray Zeros of the transfer function.
* p : ndarray Poles of the transfer function.
* k : float System gain.
If some values of b are too close to 0, they are removed. In that case, a
BadCoefficients warning is emitted.
.. doctest::
>>> from spectrum.transfer import tf2zpk
>>> [b, a] = scipy.signal.butter(3.,.4)
>>> tf2zpk(b,a)
.. seealso:: :func:`zpk2tf`
"""
import scipy.signal
z,p,k = scipy.signal.tf2zpk(b, a)
return z,p,k
def ss2zpk(a,b,c,d, input=0):
"""State-space representation to zero-pole-gain representation.
:param A, B, C, D : ndarray State-space representation of linear system.
:param int input: optional For multiple-input systems, the input to use.
:return:
* z, p : sequence Zeros and poles.
* k : float System gain.
"""
import scipy.signal
z,p,k = scipy.signal.ss2zpk(a,b,c,d, input=input)
return b, a
def zpk2tf(z, p, k):
"""Return polynomial transfer function representation from zeros and poles
:param ndarray z: Zeros of the transfer function.
:param ndarray p: Poles of the transfer function.
:param float k: System gain.
:return:
b : ndarray Numerator polynomial.
a : ndarray Numerator and denominator polynomials.
zpk2tf forms transfer function polynomials from the zeros, poles, and gains
of a system in factored form.
zpk2tf(z,p,k) finds a rational transfer function
.. math:: \frac{B(s)}{A(s)} = \frac{b_1 s^{n-1}+\dots b_{n-1}s+b_n}{a_1 s^{m-1}+\dots a_{m-1}s+a_m}
given a system in factored transfer function form
.. math:: H(s) = \frac{Z(s)}{P(s)} = k \frac{(s-z_1)(s-z_2)\dots(s-z_m)}{(s-p_1)(s-p_2)\dots(s-p_n)}
with p being the pole locations, and z the zero locations, with as many.
The gains for each numerator transfer function are in vector k.
The zeros and poles must be real or come in complex conjugate pairs.
The polynomial denominator coefficients are returned in row vector a and
the polynomial numerator coefficients are returned in matrix b, which has
as many rows as there are columns of z.
Inf values can be used as place holders in z if some columns have fewer zeros than others.
"""
import scipy.signal
b, a = scipy.signal.zpk2tf(z, p, k)
return b, a
def zpk2ss(z, p, k):
"""Zero-pole-gain representation to state-space representation
:param sequence z,p: Zeros and poles.
:param float k: System gain.
:return:
* A, B, C, D : ndarray State-space matrices.
"""
import scipy.signal
return scipy.signal.zpk2ss(z,p,k)
| Python | 0.000003 | |
71675f81214ea510c377abf23fe2a11dfb113717 | create module | pyAhocorasick/pyAhocorasick.py | pyAhocorasick/pyAhocorasick.py | #-*- encoding=utf-8 -*-
'''
Created on Mar 15, 2014
@author: tonyzhang
'''
| Python | 0.000001 | |
963866e795df42121f972ee2170ddeb890f7e5b7 | Create pytest test file | python-practice/test_arrays.py | python-practice/test_arrays.py | import arrays
# Reverse an array in place
def test_reverse_array():
input = [1, 2, 3]
assert arrays.reverse_array(input) == [3, 2, 1]
# Search a sorted list
def test_binary_search_no_list():
input_array = []
target = 1
assert arrays.binary_search(input_array, target) == -1
def test_binary_search_short_list_found():
input_array = [1]
target = 1
assert arrays.binary_search(input_array, target) == 0
def test_binary_search_short_list_not_found():
input_array = [1]
target = 10
assert arrays.binary_search(input_array, target) == -1
def test_binary_search_even_list():
input_array = [1, 4, 8, 10]
target = 4
assert arrays.binary_search(input_array, target) == 1
def test_binary_search_odd_list():
input_array = [1, 5, 10]
target = 1
assert arrays.binary_search(input_array, target) == 0
def test_binary_search_last_in_list():
input_array = [1, 5, 10]
target = 10
assert arrays.binary_search(input_array, target) == 2
def test_binary_search_not_in_list_big():
input_array = [1, 5, 10]
target = 100
assert arrays.binary_search(input_array, target) == -1
def test_binary_search_not_in_list_small():
input_array = [1, 5, 10]
target = -100
assert arrays.binary_search(input_array, target) == -1
| Python | 0.000001 | |
4932483b10876eddab39477063a9b8546e5e0f33 | Create a.py | a.py | a.py | a
| Python | 0.000489 | |
051bbd588e7ad20dd9a00918c437a86d46ba8f7e | Create transfer.py | transfer.py | transfer.py | #! /usr/bin/env python
#-*-coding:utf-8-*-
import MySQLdb
import psutil
import urllib
import time
import sys
import os
#########################################################################################################################
## MySQLdb : 在部署前需要确定系统安装了该python模块
## psutil : 在python中进行系统进程管理的模块
#########################################################################################################################
(TRANSFERSERVERCOMMAND, TRANSFERCLIENTCOMMAND, CMSSERVER, TMPDIR) = ("./transferserver", "./transferclient", 'http://cms.legaldaily.dev', './tmp', )
def serverwatchdog (): #
""" 内容接受服务器端监控程序:保证服务器端程序接受正常运行;查看系统传输监听进程是否在运行,若没有运行,启动监听进程 """
checkdirectory()
while True:
found = False
for process in psutil.get_process_list():
processname = process.name()
if processname==TRANSFERSERVERCOMMAND:
found = True
break
if not found:
os.system ( TRANSFERSERVERCOMMAND+' &' )
time.sleep(5)
def clientwatchdog ():
""" 内容发送程序监控进程:保证内容发送程序的正常运行;启动内容发送程序并监控内容发送程序是否运行,若没有运行,启动发送程序 """
checkdirectory ()
while True:
transfertask ()
time.sleep(5)
def checkdirectory ():
"""启动服务器端监听任务进程,如果当前目录不在适当的目录下,则给出错误提示并退出程序"""
if not os.path.isfile ( TRANSFERSERVERCOMMAND ):
print "transfer server command not compiled in current directory or run command not in current directory"
exit(0)
def transfertask ():
"""在发送的客户端运行传输任务,将需要发送的文件保存在指定的远程机器的指定位置"""
try:
if not os.path.isdir (TMPDIR):
os.mkdir(TMPDIR)
conn = MySQLdb.connect ( host='168.0.0.98', user='username', passwd='password', port=3306 )
cur = conn.cursor()
conn.select_db('cms')
cur.execute(' SELECT `uuid` FROM `cms_content_publish` WHERE `status`=1 ')
cur.scroll ( 0, mode='absolute' )
results = cur.fetchall()
for r in results:
cur.execute (' SELECT * FROM `cms_content_publish` WHERE `uuid`= %s ' % r[0] )
cur.scroll ( 0, mode='absolute' )
publish = cur.fetchone()
localurl = CMSSERVER+publish[12]
publicurl = '/var/www/cms/public'+publish[11]
serverhost = publish[8]
serverport = publish[9]
serverpswd = publish[10]
filename = TMPDIR + '/' + os.path.basename ( publicurl )
socket = urllib.urlopen(localurl)
fout = file ( filename, 'w' )
fout.write ( socket.read() )
transfercommand = "{0} {1} {2} {3} {4} {5}".format(TRANSFERCLIENTCOMMAND, filename, publicurl, serverhost, serverport, serverpswd)
deletecommand = 'DELETE FROM `cms_content_publish` WHERE `uuid`= {0} '.format ( r[0] )
# print transfercommand
# print deletecommand
os.system ( transfercommand ) # translate file to public server
#cur.execute ( deletecommand )
os.unlink ( filename ) # delete file that fetched from server
conn.commit()
cur.close()
conn.close()
except MySQLdb.Error,e:
print "Mysql Error %d: %s" % (e.args[0], e.args[1])
exit ()
if __name__=="__main__":
argc = len ( sys.argv )
if argc>1 :
clientwatchdog () # 客户端守护进程
else :
serverwatchdog () # 服务器端守护进程
| Python | 0.000001 | |
50e24b0445f259d975e5dd78dd34a8e760e4ed88 | Create SQLite database and table and insert data from CSV file | DB.py | DB.py | # Create a database
import sqlite3
import csv
from datetime import datetime
import sys
reload(sys)
sys.setdefaultencoding('utf8')
class createDB():
def readCSV(self, filename):
conn = sqlite3.connect('CIUK.db')
print 'DB Creation Successful!'
cur = conn.cursor()
# cur.execute('''DROP TABLE PRODUCTS;''')
cur.execute('''CREATE TABLE PRODUCTS
(ID INTEGER PRIMARY KEY AUTOINCREMENT,
TITLE TEXT NOT NULL,
DESCRIPTION TEXT NOT NULL,
PRICE INTEGER NOT NULL,
CREATED_AT TIMESTAMP,
UPDATED_AT TIMESTAMP);''')
print 'Table Creation Successful!'
with open(filename) as f:
reader = csv.reader(f)
for row in reader:
cur.execute("INSERT INTO PRODUCTS VALUES (null, ?, ?, ?, ?, ?);", (unicode(row[0]), unicode(row[1]), unicode(row[2]), datetime.now(), datetime.now()))
print 'Successfully read data from CSV file!'
conn.commit()
conn.close()
c = createDB().readCSV('products.csv') | Python | 0 | |
874c01374397014e7c99afd67f5680ed32f1c5c6 | Build and revision number script | bn.py | bn.py | import sys
from time import gmtime
year, mon, mday, hour, min, sec, wday, yday, isdst = gmtime()
bld = ((year - 2000) * 12 + mon - 1) * 100 + mday
rev = hour * 100 + min
print 'Your build and revision number for today is %d.%d.' % (bld, rev)
| Python | 0 | |
480b0bd80f65646da52824403ade92880af1af2e | Add circle ci settings | project/circleci_settings.py | project/circleci_settings.py | # -*- coding: utf-8 -*-
DEBUG = True
LOCAL_DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'circle_test',
'USER': 'circleci',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '5432',
}
}
LOCALLY_INSTALLED_APPS = [
]
ENABLE_EMAILS = False
LOCALLY_ALLOWED_HOSTS = [
]
ADMINS = []
| Python | 0.000001 | |
0b01ef18535941618f833b29c7f27198e7db96dd | Create lastfm.py | apis/lastfm.py | apis/lastfm.py | """ Contains functions to fetch API information from last.fm API."""
import logging
import youtube
from utilities import web
log = logging.getLogger(__name__)
def get_lastfm_chart(chart_items=5):
"""
Finds the currently most played tunes on last.fm and turns them in to a youtube list of tracks.
:param chart_items: int the amount of tracks we want.
:return: list[ dict{'type=youtube', 'video_id', 'int(video_time)', 'video_title'} ] or None on error.
"""
url = 'http://lastfm-ajax-vip1.phx1.cbsig.net/kerve/charts?nr=%s&type=track&format=json' % chart_items
lastfm = web.http_get(url, json=True)
log.debug('lastfm response %s' % lastfm)
if lastfm['json'] is not None:
if 'results' in lastfm['json']:
if 'track' in lastfm['json']['results']:
if len(lastfm['json']['results']['track']) is not 0:
yt_tracks = []
for track in lastfm['json']['results']['track']:
search_str = '%s - %s' % (track['artist'], track['name'])
yt = youtube.youtube_search(search_str)
log.info(yt)
if yt is not None:
yt_tracks.append(yt)
return yt_tracks
return None
def search_lastfm_by_tag(search_str, by_id=True, max_tunes=40):
"""
Search last.fm for tunes matching the search term and turns them in to a youtube list of tracks.
:param search_str: str the search term to search for.
:param by_id: bool if set to True, only tunes that have a youtube id will be added(recommended)
:param max_tunes: int the max amount of tunes to return.
:return: list[ dict{'type=youtube', 'video_id', 'int(video_time)', 'video_title'} ] or None on error.
"""
url = 'http://lastfm-ajax-vip1.phx1.cbsig.net/kerve/charts?nr=%s&type=track&f=tag:%s&format=json' % \
(max_tunes, search_str)
lastfm = web.http_get(url, json=True)
log.debug('lastfm response %s' % lastfm)
if lastfm['json'] is not None:
if 'track' in lastfm['json']['results']:
if len(lastfm['json']['results']['track']) is not 0:
yt_tracks = []
for track in lastfm['json']['results']['track']:
search_str = '%s - %s' % (track['artist'], track['name'])
if 'playlink' in track:
if 'data-youtube-id' in track['playlink']:
youtube_id = track['playlink']['data-youtube-id']
yt = youtube.youtube_time(youtube_id)
log.debug(yt)
if yt is not None:
yt_tracks.append(yt)
else:
if not by_id:
yt = youtube.youtube_search(search_str)
log.debug('search by search string: %s result: %s' % (search_str, yt))
if yt is not None:
yt_tracks.append(yt)
else:
if not by_id:
yt = youtube.youtube_search(search_str)
log.debug('search by search string: %s result: %s' % (search_str, yt))
if yt is not None:
yt_tracks.append(yt)
return yt_tracks
return None
def lastfm_listening_now(max_tunes, by_id=True):
"""
Gets a list of tunes other people using last.fm are listening to, and turns them in to a youtube list of tracks.
:param max_tunes: int the amount of tracks we want.
:param by_id: bool if set to True, only tunes that have a youtube id will be added(recommended)
:return: list[ dict{'type=youtube', 'video_id', 'int(video_time)', 'video_title'} ] or None on error.
"""
url = 'http://lastfm-ajax-vip1.phx1.cbsig.net/kerve/listeningnow?limit=%s&format=json' % max_tunes
lastfm = web.http_get(url, json=True)
log.debug('lastfm response %s' % lastfm)
if lastfm['json'] is not None:
if len(lastfm['json']['Users']) is not 0:
yt_tracks = []
for user in lastfm['json']['Users']:
if 'playlink' in user:
if 'data-youtube-id' in user['playlink']:
youtube_id = user['playlink']['data-youtube-id']
yt = youtube.youtube_time(youtube_id)
log.debug(yt)
if yt is not None:
yt_tracks.append(yt)
else:
if 'Track' in user:
search_str = '%s - %s' % (user['Track']['Artist'], user['Track']['Name'])
if not by_id:
yt = youtube.youtube_search(search_str)
log.debug('search by search string: %s result: %s' % (search_str, yt))
if yt is not None:
yt_tracks.append(yt)
return yt_tracks
return None
| Python | 0.000004 | |
339798bbed673253358866bf083e7d974f79956c | Make sure proper_count is populated by metainfo_series | flexget/plugins/metainfo/series.py | flexget/plugins/metainfo/series.py | import logging
from string import capwords
from flexget.plugin import priority, register_plugin
from flexget.utils.titles import SeriesParser
from flexget.utils.titles.parser import ParseWarning
import re
log = logging.getLogger('metanfo_series')
class MetainfoSeries(object):
"""
Check if entry appears to be a series, and populate series info if so.
"""
def validator(self):
from flexget import validator
return validator.factory('boolean')
# Run after series plugin so we don't try to re-parse it's entries
@priority(120)
def on_feed_metainfo(self, feed):
# Don't run if we are disabled
if not feed.config.get('metainfo_series', True):
return
for entry in feed.entries:
# If series plugin already parsed this, don't touch it.
if entry.get('series_name'):
continue
self.guess_entry(entry)
def guess_entry(self, entry, allow_seasonless=False):
"""Populates series_* fields for entries that are successfully parsed."""
if entry.get('series_parser') and entry['series_parser'].valid:
# Return true if we already parsed this, false if series plugin parsed it
return entry.get('series_guessed')
parser = self.guess_series(entry['title'], allow_seasonless=allow_seasonless)
if parser:
entry['series_name'] = parser.name
entry['series_season'] = parser.season
entry['series_episode'] = parser.episode
entry['series_id'] = parser.identifier
entry['series_guessed'] = True
entry['series_parser'] = parser
entry['proper'] = parser.proper
entry['proper_count'] = parser.proper_count
return True
return False
def guess_series(self, title, allow_seasonless=False):
"""Returns a valid series parser if this :title: appears to be a series"""
parser = SeriesParser(identified_by='ep', allow_seasonless=allow_seasonless)
# We need to replace certain characters with spaces to make sure episode parsing works right
# We don't remove anything, as the match positions should line up with the original title
clean_title = re.sub('[_.,\[\]\(\):]', ' ', title)
match = parser.parse_episode(clean_title)
if match:
if parser.parse_unwanted(clean_title):
return
elif match['match'].start() > 1:
# We start using the original title here, so we can properly ignore unwanted prefixes.
# Look for unwanted prefixes to find out where the series title starts
start = 0
prefix = re.match('|'.join(parser.ignore_prefixes), title)
if prefix:
start = prefix.end()
# If an episode id is found, assume everything before it is series name
name = title[start:match['match'].start()]
# Remove possible episode title from series name (anything after a ' - ')
name = name.split(' - ')[0]
# Replace some special characters with spaces
name = re.sub('[\._\(\) ]+', ' ', name).strip(' -')
# Normalize capitalization to title case
name = capwords(name)
# If we didn't get a series name, return
if not name:
return
parser.name = name
parser.data = title
try:
parser.parse(data=title)
except ParseWarning, pw:
log.debug('ParseWarning: %s' % pw.value)
if parser.valid:
return parser
register_plugin(MetainfoSeries, 'metainfo_series')
| import logging
from string import capwords
from flexget.plugin import priority, register_plugin
from flexget.utils.titles import SeriesParser
from flexget.utils.titles.parser import ParseWarning
import re
log = logging.getLogger('metanfo_series')
class MetainfoSeries(object):
"""
Check if entry appears to be a series, and populate series info if so.
"""
def validator(self):
from flexget import validator
return validator.factory('boolean')
# Run after series plugin so we don't try to re-parse it's entries
@priority(120)
def on_feed_metainfo(self, feed):
# Don't run if we are disabled
if not feed.config.get('metainfo_series', True):
return
for entry in feed.entries:
# If series plugin already parsed this, don't touch it.
if entry.get('series_name'):
continue
self.guess_entry(entry)
def guess_entry(self, entry, allow_seasonless=False):
"""Populates series_* fields for entries that are successfully parsed."""
if entry.get('series_parser') and entry['series_parser'].valid:
# Return true if we already parsed this, false if series plugin parsed it
return entry.get('series_guessed')
parser = self.guess_series(entry['title'], allow_seasonless=allow_seasonless)
if parser:
entry['series_name'] = parser.name
entry['series_season'] = parser.season
entry['series_episode'] = parser.episode
entry['series_id'] = parser.identifier
entry['series_guessed'] = True
entry['series_parser'] = parser
entry['proper'] = parser.proper
return True
return False
def guess_series(self, title, allow_seasonless=False):
"""Returns a valid series parser if this :title: appears to be a series"""
parser = SeriesParser(identified_by='ep', allow_seasonless=allow_seasonless)
# We need to replace certain characters with spaces to make sure episode parsing works right
# We don't remove anything, as the match positions should line up with the original title
clean_title = re.sub('[_.,\[\]\(\):]', ' ', title)
match = parser.parse_episode(clean_title)
if match:
if parser.parse_unwanted(clean_title):
return
elif match['match'].start() > 1:
# We start using the original title here, so we can properly ignore unwanted prefixes.
# Look for unwanted prefixes to find out where the series title starts
start = 0
prefix = re.match('|'.join(parser.ignore_prefixes), title)
if prefix:
start = prefix.end()
# If an episode id is found, assume everything before it is series name
name = title[start:match['match'].start()]
# Remove possible episode title from series name (anything after a ' - ')
name = name.split(' - ')[0]
# Replace some special characters with spaces
name = re.sub('[\._\(\) ]+', ' ', name).strip(' -')
# Normalize capitalization to title case
name = capwords(name)
# If we didn't get a series name, return
if not name:
return
parser.name = name
parser.data = title
try:
parser.parse(data=title)
except ParseWarning, pw:
log.debug('ParseWarning: %s' % pw.value)
if parser.valid:
return parser
register_plugin(MetainfoSeries, 'metainfo_series')
| Python | 0.000017 |
780e4eb03420d75c18d0b21b5e616f2952aeda41 | Test sending headers with end stream. | test/test_basic_logic.py | test/test_basic_logic.py | # -*- coding: utf-8 -*-
"""
test_basic_logic
~~~~~~~~~~~~~~~~
Test the basic logic of the h2 state machines.
"""
import h2.connection
from hyperframe import frame
class TestBasicConnection(object):
"""
Basic connection tests.
"""
example_request_headers = [
(':authority', 'example.com'),
(':path', '/'),
(':scheme', 'https'),
(':method', 'GET'),
]
def test_begin_connection(self):
c = h2.connection.H2Connection()
frames = c.send_headers_on_stream(1, self.example_request_headers)
assert len(frames) == 1
def test_sending_some_data(self):
c = h2.connection.H2Connection()
frames = c.send_headers_on_stream(1, self.example_request_headers)
frames.append(c.send_data_on_stream(1, b'test', end_stream=True))
assert len(frames) == 2
def test_receive_headers_frame(self):
f = frame.HeadersFrame(1)
f.data = b'fake headers'
f.flags = set(['END_STREAM', 'END_HEADERS'])
c = h2.connection.H2Connection()
assert c.receive_frame(f) is None
def test_send_headers_end_stream(self):
c = h2.connection.H2Connection()
frames = c.send_headers_on_stream(
1, self.example_request_headers, end_stream=True
)
assert len(frames) == 1
assert frames[-1].flags == set(['END_STREAM', 'END_HEADERS'])
| # -*- coding: utf-8 -*-
"""
test_basic_logic
~~~~~~~~~~~~~~~~
Test the basic logic of the h2 state machines.
"""
import h2.connection
from hyperframe import frame
class TestBasicConnection(object):
"""
Basic connection tests.
"""
example_request_headers = [
(':authority', 'example.com'),
(':path', '/'),
(':scheme', 'https'),
(':method', 'GET'),
]
def test_begin_connection(self):
c = h2.connection.H2Connection()
frames = c.send_headers_on_stream(1, self.example_request_headers)
assert len(frames) == 1
def test_sending_some_data(self):
c = h2.connection.H2Connection()
frames = c.send_headers_on_stream(1, self.example_request_headers)
frames.append(c.send_data_on_stream(1, b'test', end_stream=True))
assert len(frames) == 2
def test_receive_headers_frame(self):
f = frame.HeadersFrame(1)
f.data = b'fake headers'
f.flags = set(['END_STREAM', 'END_HEADERS'])
c = h2.connection.H2Connection()
assert c.receive_frame(f) is None
| Python | 0 |
8adac46cd59c562ec494508ad735843253adc1f2 | add frequencies benchmark | bench/test_frequencies.py | bench/test_frequencies.py | from toolz import frequencies, identity
data = range(1000)*1000
def test_frequencies():
frequencies(data)
| Python | 0.000001 | |
892740ce17c2906de996089f07f005c7812270ef | add init back | src/__init__.py | src/__init__.py | """ Source Files, and a location for Global Imports """
| Python | 0.000001 | |
94acf181f063808c2b6444dbc15ea40ee17bdee3 | print structure | bin/print_h5_structure.py | bin/print_h5_structure.py | import sys
file_name = sys.argv[1]
# python3 print_data_structure.py filename
import glob
import os
import numpy as n
import h5py # HDF5 support
f0 = h5py.File(file_name, "r")
def print_attr(h5item):
for attr in h5item:
print(attr, h5item[attr])
def print_all_key(h5item):
for key in h5item.keys():
print('========================================')
print(key, h5item[key])
print('- - - - - - - - - - - - - - - - - - - - ')
print_attr(h5item[key])
def print_data_structure(h5item):
print('+ + + + + + + HEADER + + + + + + + + +')
print_attr(h5item.attrs)
print('\n')
print('+ + + + + + + DATA + + + + + + + + + +')
print_all_key(h5item)
print_data_structure(f0)
| Python | 0.000004 | |
fea9e1e80d03b87c05eacd02b5440fc783eb456d | Fix buildfier | package_managers/apt_get/repos.bzl | package_managers/apt_get/repos.bzl | # Copyright 2017 Google Inc. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Rules that create additional apt-get repo files."""
load("@bazel_tools//tools/build_defs/pkg:pkg.bzl", "pkg_tar")
def _impl(ctx):
ctx.actions.write(ctx.outputs.out, content="%s\n" % ctx.attr.repo)
_generate_additional_repo = rule(
attrs = {
"repo": attr.string(doc = "Additional repo to add, in sources.list format"),
},
executable = False,
outputs = {
"out": "%{name}.list",
},
implementation = _impl,
)
def generate_additional_repos(name, repos):
all_repo_files=[]
for i, repo in enumerate(repos):
repo_name = "%s_%s" % (name, i)
all_repo_files.append(repo_name)
_generate_additional_repo(
name=repo_name,
repo=repo
)
pkg_tar(
name=name,
srcs=all_repo_files,
package_dir="/etc/apt/sources.list.d/"
)
"""Generates /etc/apt/sources.list.d/ files with the specified repos.
Args:
repos: List of repos to add in sources.list format.
"""
| # Copyright 2017 Google Inc. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Rules that create additional apt-get repo files."""
load("@bazel_tools//tools/build_defs/pkg:pkg.bzl", "pkg_tar")
def _impl(ctx):
ctx.actions.write(ctx.outputs.out, content="%s\n" % ctx.attr.repo)
_generate_additional_repo = rule(
attrs = {
"repo": attr.string(doc = "Additional repo to add, in sources.list format"),
},
executable = False,
outputs = {
"out": "%{name}.list",
},
implementation = _impl,
)
def generate_additional_repos(name, repos):
all_repo_files=[]
for i, repo in enumerate(repos):
repo_name = "%s_%s" % (name, i)
all_repo_files.append(repo_name)
_generate_additional_repo(
name=repo_name,
repo=repo
)
pkg_tar(
name=name,
srcs=all_repo_files,
package_dir="/etc/apt/sources.list.d/"
)
"""Generates /etc/apt/sources.list.d/ files with the specified repos.
Args:
repos: List of repos to add in sources.list format.
"""
| Python | 0.000001 |
05c103238d977fe8c5d6b614f21f581069373524 | Increase tidy column limit to 100 | src/etc/tidy.py | src/etc/tidy.py | #!/usr/bin/env python
# xfail-license
import sys, fileinput, subprocess, re
from licenseck import *
err=0
cols=100
# Be careful to support Python 2.4, 2.6, and 3.x here!
config_proc=subprocess.Popen([ "git", "config", "core.autocrlf" ],
stdout=subprocess.PIPE)
result=config_proc.communicate()[0]
true="true".encode('utf8')
autocrlf=result.strip() == true if result is not None else False
def report_error_name_no(name, no, s):
global err
print("%s:%d: %s" % (name, no, s))
err=1
def report_err(s):
report_error_name_no(fileinput.filename(), fileinput.filelineno(), s)
def report_warn(s):
print("%s:%d: %s" % (fileinput.filename(),
fileinput.filelineno(),
s))
def do_license_check(name, contents):
if not check_license(name, contents):
report_error_name_no(name, 1, "incorrect license")
file_names = [s for s in sys.argv[1:] if (not s.endswith("_gen.rs"))
and (not ".#" in s)]
current_name = ""
current_contents = ""
try:
for line in fileinput.input(file_names,
openhook=fileinput.hook_encoded("utf-8")):
if fileinput.filename().find("tidy.py") == -1:
if line.find("FIXME") != -1:
if re.search("FIXME.*#\d+", line) == None:
report_err("FIXME without issue number")
if line.find("TODO") != -1:
report_err("TODO is deprecated; use FIXME")
idx = line.find("// NOTE")
if idx != -1:
report_warn("NOTE" + line[idx + len("// NOTE"):])
if (line.find('\t') != -1 and
fileinput.filename().find("Makefile") == -1):
report_err("tab character")
if not autocrlf and line.find('\r') != -1:
report_err("CR character")
if line.endswith(" \n") or line.endswith("\t\n"):
report_err("trailing whitespace")
line_len = len(line)-2 if autocrlf else len(line)-1
if line_len > cols:
report_err("line longer than %d chars" % cols)
if fileinput.isfirstline() and current_name != "":
do_license_check(current_name, current_contents)
if fileinput.isfirstline():
current_name = fileinput.filename()
current_contents = ""
current_contents += line
if current_name != "":
do_license_check(current_name, current_contents)
except UnicodeDecodeError, e:
report_err("UTF-8 decoding error " + str(e))
sys.exit(err)
| #!/usr/bin/env python
# xfail-license
import sys, fileinput, subprocess, re
from licenseck import *
err=0
cols=78
# Be careful to support Python 2.4, 2.6, and 3.x here!
config_proc=subprocess.Popen([ "git", "config", "core.autocrlf" ],
stdout=subprocess.PIPE)
result=config_proc.communicate()[0]
true="true".encode('utf8')
autocrlf=result.strip() == true if result is not None else False
def report_error_name_no(name, no, s):
global err
print("%s:%d: %s" % (name, no, s))
err=1
def report_err(s):
report_error_name_no(fileinput.filename(), fileinput.filelineno(), s)
def report_warn(s):
print("%s:%d: %s" % (fileinput.filename(),
fileinput.filelineno(),
s))
def do_license_check(name, contents):
if not check_license(name, contents):
report_error_name_no(name, 1, "incorrect license")
file_names = [s for s in sys.argv[1:] if (not s.endswith("_gen.rs"))
and (not ".#" in s)]
current_name = ""
current_contents = ""
try:
for line in fileinput.input(file_names,
openhook=fileinput.hook_encoded("utf-8")):
if fileinput.filename().find("tidy.py") == -1:
if line.find("FIXME") != -1:
if re.search("FIXME.*#\d+", line) == None:
report_err("FIXME without issue number")
if line.find("TODO") != -1:
report_err("TODO is deprecated; use FIXME")
idx = line.find("// NOTE")
if idx != -1:
report_warn("NOTE" + line[idx + len("// NOTE"):])
if (line.find('\t') != -1 and
fileinput.filename().find("Makefile") == -1):
report_err("tab character")
if not autocrlf and line.find('\r') != -1:
report_err("CR character")
if line.endswith(" \n") or line.endswith("\t\n"):
report_err("trailing whitespace")
line_len = len(line)-2 if autocrlf else len(line)-1
if line_len > cols:
report_err("line longer than %d chars" % cols)
if fileinput.isfirstline() and current_name != "":
do_license_check(current_name, current_contents)
if fileinput.isfirstline():
current_name = fileinput.filename()
current_contents = ""
current_contents += line
if current_name != "":
do_license_check(current_name, current_contents)
except UnicodeDecodeError, e:
report_err("UTF-8 decoding error " + str(e))
sys.exit(err)
| Python | 0.000193 |
f5a561494ece69c32d4bbd3e23c435a0fe74788a | Add local enum capability (needed for contentwrapper) | processrunner/enum.py | processrunner/enum.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
# For use with deployment statuses
# https://stackoverflow.com/a/1695250
def enum(*sequential, **named):
"""An implementation of the Enum data type
Usage
myEnum= enum(
'Apple'
, 'Banana')
"""
enums = dict(zip(sequential, range(len(sequential))), **named)
reverse = dict((value, key) for key, value in list(enums.items()))
enums['reverse_mapping'] = reverse
return type(str('Enum'), (), enums) | Python | 0 | |
bb649f299538c76d555e30ac0d31e2560e0acd3e | Add test | tests/test_calculator.py | tests/test_calculator.py | import unittest
from app.calculator import Calculator
class TestCalculator(unittest.TestCase):
def setUp(self):
self.calc = Calculator()
def test_calculator_addition_method_returns_correct_result(self):
calc = Calculator()
result = calc.addition(2,2)
self.assertEqual(4, result)
def test_calculator_subtraction_method_returns_correct_result(self):
calc = Calculator()
result = calc.subtraction(4,2)
self.assertEqual(2, result)
| Python | 0.000005 | |
630309837989e79ba972358a3098df40892982f5 | Create rrd_ts_sync.py | rrd_ts_sync.py | rrd_ts_sync.py | #-------------------------------------------------------------------------------
#
# Controls shed weather station
#
# The MIT License (MIT)
#
# Copyright (c) 2015 William De Freitas
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#-------------------------------------------------------------------------------
#!usr/bin/env python
#===============================================================================
# Import modules
#===============================================================================
import settings as s
import rrdtool
import thingspeak
#===============================================================================
# MAIN
#===============================================================================
def main():
# --- Set up thingspeak account ---
#Set up inital values for variables
thingspeak_write_api_key = ''
#Set up thingspeak account
thingspeak_acc = thingspeak.ThingspeakAcc(s.THINGSPEAK_HOST_ADDR,
s.THINGSPEAK_API_KEY_FILENAME,
s.THINGSPEAK_CHANNEL_ID)
#Create RRD files if none exist
if not os.path.exists(s.RRDTOOL_RRD_FILE):
return
# ========== Timed Loop ==========
try:
while True:
#Fetch values from rrd
data_values = rrdtool.fetch(s.RRDTOOL_RRD_FILE, 'LAST',
'-s', str(s.UPDATE_RATE * -2))
# --- Send data to thingspeak ---
#Create dictionary with field as key and value
sensor_data = {}
for key, value in sorted(sensors.items(), key=lambda e: e[1][0]):
sensor_data[value[s.TS_FIELD]] = value[s.VALUE]
response = thingspeak_acc.update_channel(sensor_data)
# ========== User exit command ==========
except KeyboardInterrupt:
sys.exit(0)
#===============================================================================
# Boiler plate
#===============================================================================
if __name__=='__main__':
main()
| Python | 0.000005 | |
b20a6ccc211060644ff3e6f89428420fa59f5a5d | add a couple of tests for the build_scripts command | tests/test_build_scripts.py | tests/test_build_scripts.py | """Tests for distutils.command.build_scripts."""
import os
import unittest
from distutils.command.build_scripts import build_scripts
from distutils.core import Distribution
from distutils.tests import support
class BuildScriptsTestCase(support.TempdirManager, unittest.TestCase):
def test_default_settings(self):
cmd = self.get_build_scripts_cmd("/foo/bar", [])
self.assert_(not cmd.force)
self.assert_(cmd.build_dir is None)
cmd.finalize_options()
self.assert_(cmd.force)
self.assertEqual(cmd.build_dir, "/foo/bar")
def test_build(self):
source = self.mkdtemp()
target = self.mkdtemp()
expected = self.write_sample_scripts(source)
cmd = self.get_build_scripts_cmd(target,
[os.path.join(source, fn)
for fn in expected])
cmd.finalize_options()
cmd.run()
built = os.listdir(target)
for name in expected:
self.assert_(name in built)
def get_build_scripts_cmd(self, target, scripts):
dist = Distribution()
dist.scripts = scripts
dist.command_obj["build"] = support.DummyCommand(
build_scripts=target,
force=1
)
return build_scripts(dist)
def write_sample_scripts(self, dir):
expected = []
expected.append("script1.py")
self.write_script(dir, "script1.py",
("#! /usr/bin/env python2.3\n"
"# bogus script w/ Python sh-bang\n"
"pass\n"))
expected.append("script2.py")
self.write_script(dir, "script2.py",
("#!/usr/bin/python\n"
"# bogus script w/ Python sh-bang\n"
"pass\n"))
expected.append("shell.sh")
self.write_script(dir, "shell.sh",
("#!/bin/sh\n"
"# bogus shell script w/ sh-bang\n"
"exit 0\n"))
return expected
def write_script(self, dir, name, text):
f = open(os.path.join(dir, name), "w")
f.write(text)
f.close()
def test_suite():
return unittest.makeSuite(BuildScriptsTestCase)
| Python | 0 | |
3cf30bac4d20dbebf6185351ba0c10426a489de9 | Add sanity linter to catch future use | tools/run_tests/sanity/check_channel_arg_usage.py | tools/run_tests/sanity/check_channel_arg_usage.py | #!/usr/bin/env python
# Copyright 2018 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import sys
os.chdir(os.path.join(os.path.dirname(sys.argv[0]), '../../..'))
# set of files that are allowed to use the raw GRPC_ARG_* types
_EXCEPTIONS = set([
'src/core/lib/channel/channel_args.cc',
'src/core/lib/channel/channel_args.h',
])
_BANNED = set([
"GRPC_ARG_POINTER",
])
errors = 0
num_files = 0
for root, dirs, files in os.walk('src/core'):
for filename in files:
num_files += 1
path = os.path.join(root, filename)
if path in _EXCEPTIONS: continue
with open(path) as f:
text = f.read()
for banned in _BANNED:
if banned in text:
print('Illegal use of "%s" in %s' % (banned, path))
errors += 1
assert errors == 0
# This check comes about from this issue:
# https://github.com/grpc/grpc/issues/15381
# Basically, a change rendered this script useless and we did not realize it.
# This dumb check ensures that this type of issue doesn't occur again.
assert num_files > 300 # we definitely have more than 300 files
| Python | 0 | |
51d0623da276aa60a0da4d48343f215f0c517a29 | Add module for ids2vecs | thinc/neural/ids2vecs.py | thinc/neural/ids2vecs.py | from ._classes.window_encode import MaxoutWindowEncode
| Python | 0 | |
16101618e41f136bf22d6c8c1c258ab42b0bb3ed | add ei algorithms | src/pymor/algorithms/ei.py | src/pymor/algorithms/ei.py | # This file is part of the pyMor project (http://www.pymor.org).
# Copyright Holders: Felix Albrecht, Rene Milk, Stephan Rave
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
from numbers import Number
import math as m
import numpy as np
from scipy.linalg import solve_triangular
from pymor.core import getLogger, BasicInterface
from pymor.core.cache import Cachable, cached, DEFAULT_DISK_CONFIG
from pymor.la import VectorArrayInterface
from pymor.operators.ei import EmpiricalInterpolatedOperator
def generate_interpolation_data(evaluations, error_norm=None, target_error=None, max_interpolation_dofs=None):
assert isinstance(evaluations, VectorArrayInterface) or all(isinstance(ev, VectorArrayInterface) for ev in evaluations)
if isinstance(evaluations, VectorArrayInterface):
evaluations = (evaluations,)
logger = getLogger('pymor.algorithms.ei.generate_interpolation_data')
logger.info('Generating Interpolation Data ...')
interpolation_dofs = np.zeros((0,), dtype=np.int32)
collateral_basis = type(next(iter(evaluations))).empty(dim=next(iter(evaluations)).dim)
max_errs = []
while True:
max_err = -1.
for AU in evaluations:
if len(interpolation_dofs) > 0:
interpolation_coefficients = solve_triangular(interpolation_matrix, AU.components(interpolation_dofs).T,
lower=True, unit_diagonal=True).T
# interpolation_coefficients = np.linalg.solve(interpolation_matrix, AU.components(interpolation_dofs).T).T
AU_interpolated = collateral_basis.lincomb(interpolation_coefficients)
ERR = AU - AU_interpolated
else:
ERR = AU
errs = discretization.l2_norm(ERR) if error_norm is None else error_norm(ERR)
local_max_err_ind = np.argmax(errs)
local_max_err = errs[local_max_err_ind]
if local_max_err > max_err:
max_err = local_max_err
new_vec = ERR.copy(ind=local_max_err_ind)
logger.info('Maximum interpolation error with {} interpolation DOFs: {}'.format(len(interpolation_dofs),
max_err))
if target_error is not None and max_err <= target_error:
logger.info('Target error reached! Stopping extension loop.')
break
new_dof = new_vec.argmax_abs()[0]
if new_dof in interpolation_dofs:
logger.info('DOF {} selected twice for interplation! Stopping extension loop.'.format(new_dof))
break
new_vec *= 1 / new_vec.components([new_dof])[0]
interpolation_dofs = np.hstack((interpolation_dofs, new_dof))
collateral_basis.append(new_vec, remove_from_other=True)
interpolation_matrix = collateral_basis.components(interpolation_dofs).T
max_errs.append(max_err)
triangularity_error = np.max(np.abs(interpolation_matrix - np.tril(interpolation_matrix)))
logger.info('Interpolation matrix is not lower triangular with maximum error of {}'
.format(triangularity_error))
if len(interpolation_dofs) >= max_interpolation_dofs:
logger.info('Maximum number of interpolation DOFs reached. Stopping extension loop.')
break
logger.info('')
data = {'errors': max_errs}
return interpolation_dofs, collateral_basis, data
def interpolate_operators(discretization, operator_names, parameter_sample, error_norm=None,
target_error=None, max_interpolation_dofs=None, separately=False):
class EvaluationProvider(BasicInterface, Cachable):
# evil hack to prevent deadlock ...
from tempfile import gettempdir
from os.path import join
DEFAULT_MEMORY_CONFIG = {"backend": 'LimitedMemory', 'arguments.max_kbytes': 20000}
DISK_CONFIG = {"backend": 'LimitedFile',
"arguments.filename": join(gettempdir(), 'pymor.ei_cache.dbm'),
'arguments.max_keys': 2000}
def __init__(self, discretization, operator, sample, operator_sample):
Cachable.__init__(self, config=self.DEFAULT_MEMORY_CONFIG)
self.discretization = discretization
self.sample = sample
self.operator = operator
self.operator_sample = operator_sample
@cached
def data(self, k):
mu = self.sample[k]
mu_op = self.operator_sample[k]
return self.operator.apply(self.discretization.solve(mu=mu), mu=mu_op)
def __len__(self):
return len(self.sample)
def __getitem__(self, ind):
if not 0 <= ind < len(self.sample):
raise IndexError
return self.data(ind)
if isinstance(operator_names, str):
operator_names = (operator_names,)
if len(operator_names) > 1:
raise NotImplementedError
sample = tuple(parameter_sample)
operator_sample = tuple(discretization.map_parameter(mu, operator_names[0]) for mu in sample)
operator = discretization.operators[operator_names[0]]
evaluations = EvaluationProvider(discretization, operator, sample, operator_sample)
dofs, basis, data = generate_interpolation_data(evaluations, error_norm, target_error,
max_interpolation_dofs)
ei_operator = EmpiricalInterpolatedOperator(operator, dofs, basis)
ei_operators = discretization.operators.copy()
ei_operators['operator'] = ei_operator
ei_discretization = discretization.with_operators(ei_operators, name='{}_interpolated'.format(discretization.name))
return ei_discretization, data
| Python | 0.002161 | |
ada91bd1ed76d59b7ec41d765af188aed2f8fd62 | add a module for collecting Warnings | src/pymor/core/warnings.py | src/pymor/core/warnings.py | '''
Created on Nov 19, 2012
@author: r_milk01
'''
class CallOrderWarning(UserWarning):
'''I am raised when there's a preferred call order, but the user didn't follow it.
For an Example see pymor.discretizer.stationary.elliptic.cg
'''
pass | Python | 0 | |
03c1f7040cc971c6e05f79f537fc501c550edaa8 | Add back manage.py (doh). | manage.py | manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
conf = os.path.dirname(__file__)
wafer = os.path.join(conf, '..', 'wafer')
sys.path.append(wafer)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| Python | 0 | |
0b88d652ddb23a385e79bfccb1db89c954d7d27f | Set up Restaurant class | get_a_lunch_spot.py | get_a_lunch_spot.py | import json
restaurants_string = """[{
"name" : "sweetgreen"
}]"""
print restaurants_string
restaurants_json = json.loads(restaurants_string)
print restaurants_json
class Restaurant:
name = ""
def __init__(self, data):
self.name = self.getName(data)
def getName(self, data):
for i in data:
return i['name']
restaurant_response = Restaurant(restaurants_json)
print restaurant_response.name
class GetALunchSpotResponse:
restaurant = None
def _init_(self, intent):
self.restaurant = self.restaurants[0]
def generateStringResponse():
return "Why don't you go to: " + restaurant.name
| Python | 0.000003 | |
6757f4b74f29142b0be7e32b8bdf210d109056ea | Create missing.py | missing.py | missing.py | import tensorflow as tf
import glob as glob
import getopt
import sys
import cPickle as pkl
import numpy as np
opts, _ = getopt.getopt(sys.argv[1:],"",["chunk_file_path=", "comp_file_path=", "means_file_path=", "output_dir=", "input_dir="])
chunk_file_path = "../video_level_feat_v1/train*.tfrecord"
comp_file_path = "../video_level_feat_v1/train*.tfrecord"
means_file_path = "../video_level_feat_v1/means.pkl"
output_dir = "../video_level_feat_v1/"
input_dir = "../video_level_feat_v1/"
print(opts)
for opt, arg in opts:
if opt in ("--chunk_file_path"):
chunk_file_path = arg
if opt in ("--comp_file_path"):
comp_file_path = arg
if opt in ("--means_file_path"):
means_file_path = arg
if opt in ("--output_dir"):
output_dir = arg
if opt in ("--input_dir"):
input_dir = arg
# filepaths to do
f = file(chunk_file_path, 'rb')
records_chunk = pkl.load(f)
f.close()
# means
f = open(means_file_path, 'rb')
means = pkl.load(f)
f.close()
filepaths = [input_dir+x for x in records_chunk]
filepaths_queue = tf.train.string_input_producer(filepaths, num_epochs=1)
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filepaths_queue)
features_format = {}
feature_names = []
for x in ['q0', 'q1', 'q2', 'q3', 'q4', 'mean', 'stddv', 'skew', 'kurt', 'iqr', 'rng', 'coeffvar', 'efficiency']:
features_format[x + '_rgb_frame'] = tf.FixedLenFeature([1024], tf.float32)
features_format[x + '_audio_frame'] = tf.FixedLenFeature([128], tf.float32)
feature_names.append(str(x + '_rgb_frame'))
feature_names.append(str(x + '_audio_frame'))
features_format['video_id'] = tf.FixedLenFeature([], tf.string)
features_format['labels'] = tf.VarLenFeature(tf.int64)
features_format['video_length'] = tf.FixedLenFeature([], tf.float32)
features = tf.parse_single_example(serialized_example,features=features_format)
start_time = time.time()
for record in records_chunk:
# filepaths done
f = file(comp_file_path, 'rb')
records_comp = pkl.load(f)
f.close()
if record in records_comp:
print(record + ' : Skipped')
print(len(records_comp)/float(len(records_chunk)))
continue
new_filepath = output_dir+record
writer = tf.python_io.TFRecordWriter(new_filepath)
with tf.Session() as sess:
init_op = tf.group(tf.global_variables_initializer(),tf.local_variables_initializer())
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
counter = 0
try:
while True:
features_proc, = sess.run([features])
counter += 1
for feature_name in feature_names:
if np.isnan(proc_features[feature_name]).sum() > 0:
proc_features[feature_name][np.isnan(proc_features[feature_name])] = means[feature_name][np.isnan(proc_features[feature_name])]
elif np.isinf(proc_features[feature_name]).sum() > 0:
proc_features[feature_name][np.isinf(proc_features[feature_name])] = means[feature_name][np.isinf(proc_features[feature_name])]
# writing tfrecord v1
features_to_write = {key : value for key, value in features.items()}
features_to_write['video_id'] = [video_id]
features_to_write['video_length'] = [video_length]
features_to_write['labels'] = labels.values
tf_features_format = {}
for key, value in features_to_write.items():
if key == 'video_id':
tf_features_format[key] = tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
elif key == 'labels':
tf_features_format[key] = tf.train.Feature(int64_list=tf.train.Int64List(value=value))
else:
tf_features_format[key] = tf.train.Feature(float_list=tf.train.FloatList(value=value))
example = tf.train.Example(features=tf.train.Features(feature=tf_features_format))
writer.write(example.SerializeToString())
if(counter%100000 == 1):
print(counter)
except tf.errors.OutOfRangeError, e:
coord.request_stop(e)
finally:
coord.request_stop()
coord.join(threads)
print(record + ' : Done')
records_comp[filepath] = 1
print(len(records_comp)/float(len(records_chunk)))
f = file(comp_file_path, 'wb')
pkl.dump(records_comp, f, protocol=pkl.HIGHEST_PROTOCOL)
f.close()
# writing tfrecord v1
writer.close()
print(time.time() - start_time)
| Python | 0.000063 | |
23e778c78c2d77a9eeb0904856429546e379f8b5 | change the version of openerp | bin/release.py | bin/release.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2004-2008 Tiny SPRL (http://tiny.be) All Rights Reserved.
#
# $Id$
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
###############################################################################
name = 'openerp-server'
version = '4.3.99'
description = 'OpenERP Server'
long_desc = '''\
OpenERP is a complete ERP and CRM. The main features are accounting (analytic
and financial), stock management, sales and purchases management, tasks
automation, marketing campaigns, help desk, POS, etc. Technical features include
a distributed server, flexible workflows, an object database, a dynamic GUI,
customizable reports, and SOAP and XML-RPC interfaces.
'''
classifiers = """\
Development Status :: 5 - Production/Stable
License :: OSI Approved :: GNU General Public License Version 2 (GPL-2)
Programming Language :: Python
"""
url = 'http://www.openerp.com'
author = 'Tiny.be'
author_email = 'info@tiny.be'
license = 'GPL-2'
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| #!/usr/bin/env python
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2004-2008 Tiny SPRL (http://tiny.be) All Rights Reserved.
#
# $Id$
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
###############################################################################
name = 'openerp-server'
version = '4.3.0'
description = 'OpenERP Server'
long_desc = '''\
OpenERP is a complete ERP and CRM. The main features are accounting (analytic
and financial), stock management, sales and purchases management, tasks
automation, marketing campaigns, help desk, POS, etc. Technical features include
a distributed server, flexible workflows, an object database, a dynamic GUI,
customizable reports, and SOAP and XML-RPC interfaces.
'''
classifiers = """\
Development Status :: 5 - Production/Stable
License :: OSI Approved :: GNU General Public License Version 2 (GPL-2)
Programming Language :: Python
"""
url = 'http://www.openerp.com'
author = 'Tiny.be'
author_email = 'info@tiny.be'
license = 'GPL-2'
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| Python | 0 |
23257c56b58c26694773fb12d3ba167de43bd43b | Add validate.py tool | validate.py | validate.py | import json
import sys
import urllib2
data=json.loads(open("bootstrap/{}.json".format(sys.argv[1])).read())
for f in data['storage']['files']:
if 'source' not in f['contents'] or 'http' not in f['contents']['source']:
continue
url = f['contents']['source']
digest = f['contents']['verification']['hash'].lstrip('sha512-')
print('{} {}'.format(url, digest))
print('Fetching {}..'.format(url))
response = urllib2.urlopen(url)
html = response.read()
with open('/tmp/{}'.format(digest), 'w+') as tmpfile:
tmpfile.write(html)
print('Wrote /tmp/{}'.format(digest))
# if 'source', fetch and compare with 'verification'['hash']
| Python | 0.000001 | |
dd1c49eb12bf69580a8727353aa19741059df6d5 | add 102 | vol3/102.py | vol3/102.py | import urllib2
if __name__ == "__main__":
ans = 0
for line in urllib2.urlopen('https://projecteuler.net/project/resources/p102_triangles.txt'):
ax, ay, bx, by, cx, cy = map(int, line.split(','))
a = ax * by - ay * bx > 0
b = bx * cy - by * cx > 0
c = cx * ay - cy * ax > 0
ans += a == b == c
print ans
| Python | 0.999996 | |
feac5a01059a95910c76a0de5f83ad2473cf09c8 | Create app.py | app.py | app.py | import os
import sys
import tweepy
import requests
import numpy as np
import json
import os
from __future__ import print_function
from future.standard_library import install_aliases
install_aliases()
from urllib.parse import urlparse, urlencode
from urllib.request import urlopen, Request
from urllib.error import HTTPError
from flask import Flask
from flask import request
from flask import make_response
# Flask app should start in global layout
app = Flask(__name__)
from keras.models import Sequential
from keras.layers import Dense
from textblob import TextBlob
# Where the csv file will live
FILE_NAME = 'historical.csv'
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
print("Request:")
print(json.dumps(req, indent=4))
res = processRequest(req)
res = json.dumps(res, indent=4)
# print(res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def processRequest(req):
quote=req.get("result").get("parameters").get("STOCK")
get_historical(quote)
res = stock_prediction()
return res
def get_historical(quote):
# Download our file from google finance
url = 'http://www.google.com/finance/historical?q=NASDAQ%3A'+quote+'&output=csv'
r = requests.get(url, stream=True)
if r.status_code != 400:
with open(FILE_NAME, 'wb') as f:
for chunk in r:
f.write(chunk)
return True
def stock_prediction():
# Collect data points from csv
dataset = []
with open(FILE_NAME) as f:
for n, line in enumerate(f):
if n != 0:
dataset.append(float(line.split(',')[1]))
dataset = np.array(dataset)
# Create dataset matrix (X=t and Y=t+1)
def create_dataset(dataset):
dataX = [dataset[n+1] for n in range(len(dataset)-2)]
return np.array(dataX), dataset[2:]
trainX, trainY = create_dataset(dataset)
# Create and fit Multilinear Perceptron model
model = Sequential()
model.add(Dense(8, input_dim=1, activation='relu'))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(trainX, trainY, nb_epoch=200, batch_size=2, verbose=2)
# Our prediction for tomorrow
prediction = model.predict(np.array([dataset[0]]))
result = 'The price will move from %s to %s' % (dataset[0], prediction[0][0])
return result
return {
"speech": result,
"displayText": ,
# "data": data,
# "contextOut": [],
}
# We have our file so we create the neural net and get the prediction
print stock_prediction()
# We are done so we delete the csv file
os.remove(FILE_NAME)
| Python | 0.000003 | |
1490f438693a5727c722d933a712c889d3c09556 | test where SSL proxying works | test/others/ProxyTest.py | test/others/ProxyTest.py | import urllib2
httpTarget = "http://www.collab.net"
httpsTargetTrusted = "https://ctf.open.collab.net/sf/sfmain/do/home"
httpsTargetUntrusted = "https://www.collab.net"
proxyHost = "cu182.cloud.sp.collab.net"
proxyPort = "80"
proxyUser = "proxyuser"
proxyPwd = "proxypass"
def main():
print "Testing proxy: %s\n" % (getProxyUrl(),)
testProxy(httpTarget)
testProxy(httpsTargetTrusted)
testProxy(httpsTargetUntrusted)
def getProxyUrl():
proxyUrl = "http://%s:%s@%s:%s" % (proxyUser, proxyPwd, proxyHost, proxyPort)
return proxyUrl
def testProxy(url):
req = urllib2.Request(url)
scheme = "https" if url.startswith("https") else "http"
# build a new opener that uses a proxy requiring authorization
proxy_support = urllib2.ProxyHandler({scheme : getProxyUrl()})
opener = urllib2.build_opener(proxy_support, urllib2.HTTPHandler)
# install it
urllib2.install_opener(opener)
try:
print "Testing proxy to target: %s ..." % (url, )
response = urllib2.urlopen(req)
if response.read():
print "Proxy connection was successful\n"
except IOError, e:
if hasattr(e, 'reason'):
print 'Failed to reach a server.'
print 'Reason: \n', e.reason
elif hasattr(e, 'code'):
print 'The server couldn\'t fulfill the request.'
print 'Error code: \n', e.code
if __name__ == "__main__":
main()
| Python | 0.000002 | |
d15ba379ec48c2c5bc76eeb0f8a514625f5b9e2f | add unit test to test whether the parallel implementation is alright | tests/test_gp_algebra.py | tests/test_gp_algebra.py | import pytest
import pickle
import os
import json
import numpy as np
from pytest import raises
from flare.gp import GaussianProcess
from flare.env import AtomicEnvironment
from flare.struc import Structure
from flare import mc_simple
from flare.otf_parser import OtfAnalysis
from flare.env import AtomicEnvironment
from flare.mc_simple import two_plus_three_body_mc, two_plus_three_body_mc_grad
from flare.mc_sephyps import two_plus_three_body_mc as two_plus_three_body_mc_multi
from flare.mc_sephyps import two_plus_three_body_mc_grad as two_plus_three_body_mc_grad_multi
from flare import gp_algebra, gp_algebra_multi
def get_random_training_set(nenv):
"""Create a random test structure """
np.random.seed(0)
cutoffs = np.array([0.8, 0.8])
hyps = np.array([1, 1, 1, 1, 1])
kernel = (two_plus_three_body_mc, two_plus_three_body_mc_grad)
kernel_m = (two_plus_three_body_mc_multi, two_plus_three_body_mc_grad_multi)
hyps_mask = {'nspec': 1,
'spec_mask': np.zeros(118, dtype=int),
'nbond': 1,
'bond_mask': np.array([0]),
'ntriplet': 1,
'triplet_mask': np.array([0])}
# create test data
cell = np.eye(3)
unique_species = [2, 1]
noa = 5
training_data = []
training_labels = []
for idenv in range(nenv):
positions = np.random.uniform(-1, 1, [noa,3])
species = np.random.randint(0, len(unique_species), noa)
struc = Structure(cell, species, positions)
training_data += [AtomicEnvironment(struc, 1, cutoffs)]
training_labels += [np.random.uniform(-1, 1, 3)]
return hyps, training_data, training_labels, kernel, cutoffs, \
kernel_m, hyps_mask
def test_ky_mat():
hyps, training_data, training_labels, kernel, cutoffs, \
kernel_m, hyps_mask = \
get_random_training_set(10)
func = [gp_algebra.get_ky_mat,
gp_algebra.get_ky_mat_par,
gp_algebra_multi.get_ky_mat,
gp_algebra_multi.get_ky_mat_par]
ky_mat0 = func[0](hyps, training_data,
training_labels, kernel[0], cutoffs)
# parallel version
ky_mat = func[1](hyps, training_data,
training_labels, kernel[0], cutoffs, no_cpus=2)
diff = (np.max(np.abs(ky_mat-ky_mat0)))
assert (diff==0), "parallel implementation is wrong"
# check multi hyps implementation
ky_mat = func[2](hyps, training_data,
training_labels, kernel_m[0], cutoffs, hyps_mask)
diff = (np.max(np.abs(ky_mat-ky_mat0)))
assert (diff==0), "multi hyps parameter implementation is wrong"
# check multi hyps parallel implementation
ky_mat = func[3](hyps, training_data,
training_labels, kernel_m[0], cutoffs,
hyps_mask, no_cpus=2)
diff = (np.max(np.abs(ky_mat-ky_mat0)))
assert (diff==0), "multi hyps parameter parallel "\
"implementation is wrong"
def test_ky_and_hyp():
hyps, training_data, training_labels, kernel, cutoffs, \
kernel_m, hyps_mask = \
get_random_training_set(10)
func = [gp_algebra.get_ky_and_hyp,
gp_algebra.get_ky_and_hyp_par,
gp_algebra_multi.get_ky_and_hyp,
gp_algebra_multi.get_ky_and_hyp_par]
hypmat_0, ky_mat0 = func[0](hyps, training_data,
training_labels, kernel[1], cutoffs)
# parallel version
hypmat, ky_mat = func[1](hyps, training_data,
training_labels, kernel[1], cutoffs, no_cpus=2)
diff = (np.max(np.abs(ky_mat-ky_mat0)))
assert (diff==0), "parallel implementation is wrong"
# check multi hyps implementation
hypmat, ky_mat = func[2](hyps, hyps_mask, training_data,
training_labels, kernel_m[1], cutoffs)
diff = (np.max(np.abs(ky_mat-ky_mat0)))
assert (diff==0), "multi hyps parameter implementation is wrong"
# check multi hyps parallel implementation
hypmat, ky_mat = func[3](hyps, hyps_mask, training_data,
training_labels, kernel_m[1], cutoffs,
no_cpus=2)
diff = (np.max(np.abs(ky_mat-ky_mat0)))
assert (diff==0), "multi hyps parameter parallel "\
"implementation is wrong"
| Python | 0 | |
aa1808c9a13894751953c8a1c816c89861e514d1 | Create new package. (#6061) | var/spack/repos/builtin/packages/r-iso/package.py | var/spack/repos/builtin/packages/r-iso/package.py | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RIso(RPackage):
"""Linear order and unimodal order (univariate) isotonic regression;
bivariate isotonic regression with linear order on both variables."""
homepage = "https://cran.r-project.org/package=Iso"
url = "https://cran.rstudio.com/src/contrib/Iso_0.0-17.tar.gz"
list_url = "https://cran.rstudio.com/src/contrib/Archive/Iso"
version('0.0-17', 'bf99821efb6a44fa75fdbf5e5c4c91e4')
| Python | 0 | |
384033b6b5d7a3b207c1360b896f70bfbc064caf | Update __init__.py | tendrl/integrations/gluster/sds_sync/__init__.py | tendrl/integrations/gluster/sds_sync/__init__.py | import etcd
import time
from tendrl.commons.objects.job import Job
from tendrl.commons import sds_sync
from tendrl.commons.utils import log_utils as logger
import uuid
class GlusterIntegrtaionsSyncThread(sds_sync.StateSyncThread):
def run(self):
logger.log(
"debug",
NS.get("publisher_id", None),
{"message": "%s running" % self.__class__.__name__}
)
while not self._complete.is_set():
try:
nodes = NS._int.client.read("/nodes")
except etcd.EtcdKeyNotFound:
time.sleep(int(NS.config.data.get("sync_interval", 10)))
continue
for node in nodes.leaves:
node_id = node.key.split('/')[-1]
try:
node_context = NS.tendrl.objects.NodeContext(
node_id=node_id
).load()
tendrl_context = NS.tendrl.objects.TendrlContext(
node_id=node_id
).load()
if node_context.status != "DOWN" or\
tendrl_context.sds_name != "gluster":
continue
# check if the node belongs to a cluster that is managed
cluster = NS.tendrl.objects.Cluster(
integration_id=tendrl_context.integration_id
).load()
if cluster.is_managed != "yes":
continue
# check if the bricks of this node are already
# marked as down
bricks = NS._int.client.read(
"clusters/{0}/Bricks/all/{1}".format(
tendrl_context.integration_id,
node_context.fqdn
)
)
bricks_marked_already = True
for brick in bricks.leaves:
brick_status = NS._int.client.read(
"{0}/status".format(brick.key)
).value
if brick_status != "Stopped":
bricks_marked_already = False
break
if bricks_marked_already:
continue
self.update_brick_status(
node_context.fqdn,
tendrl_context.integration_id,
"Stopped"
)
except etcd.EtcdKeyNotFound:
pass
time.sleep(int(NS.config.data.get("sync_interval", 10)))
def update_brick_status(self, fqdn, integration_id, status):
_job_id = str(uuid.uuid4())
_params = {
"TendrlContext.integration_id": integration_id,
"Node.fqdn": fqdn,
"Brick.status": status
}
_job_payload = {
"tags": [
"tendrl/integration/{0}".format(
integration_id
)
],
"run": "gluster.flows.UpdateBrickStatus",
"status": "new",
"parameters": _params,
"type": "sds"
}
Job(
job_id=_job_id,
status="new",
payload=_job_payload
).save()
| import etcd
import time
from tendrl.commons.objects.job import Job
from tendrl.commons import sds_sync
from tendrl.commons.utils import log_utils as logger
import uuid
class GlusterIntegrtaionsSyncThread(sds_sync.StateSyncThread):
def run(self):
logger.log(
"debug",
NS.get("publisher_id", None),
{"message": "%s running" % self.__class__.__name__}
)
while not self._complete.is_set():
time.sleep(int(NS.config.data.get("sync_interval", 10)))
try:
nodes = NS._int.client.read("/nodes")
except etcd.EtcdKeyNotFound:
return
for node in nodes.leaves:
node_id = node.key.split('/')[-1]
try:
node_context = NS.tendrl.objects.NodeContext(
node_id=node_id
).load()
tendrl_context = NS.tendrl.objects.TendrlContext(
node_id=node_id
).load()
if node_context.status != "DOWN" or\
tendrl_context.sds_name != "gluster":
continue
# check if the node belongs to a cluster that is managed
cluster = NS.tendrl.objects.Cluster(
integration_id=tendrl_context.integration_id
).load()
if cluster.is_managed != "yes":
continue
# check if the bricks of this node are already
# marked as down
bricks = NS._int.client.read(
"clusters/{0}/Bricks/all/{1}".format(
tendrl_context.integration_id,
node_context.fqdn
)
)
bricks_marked_already = True
for brick in bricks.leaves:
brick_status = NS._int.client.read(
"{0}/status".format(brick.key)
).value
if brick_status != "Stopped":
bricks_marked_already = False
break
if bricks_marked_already:
continue
self.update_brick_status(
node_context.fqdn,
tendrl_context.integration_id,
"Stopped"
)
except etcd.EtcdKeyNotFound:
pass
def update_brick_status(self, fqdn, integration_id, status):
_job_id = str(uuid.uuid4())
_params = {
"TendrlContext.integration_id": integration_id,
"Node.fqdn": fqdn,
"Brick.status": status
}
_job_payload = {
"tags": [
"tendrl/integration/{0}".format(
integration_id
)
],
"run": "gluster.flows.UpdateBrickStatus",
"status": "new",
"parameters": _params,
"type": "sds"
}
Job(
job_id=_job_id,
status="new",
payload=_job_payload
).save()
| Python | 0.000072 |
930274bb8ab10379f4c76618cccc604c9fe27996 | Update the test to match removed XLA:CPU device | tensorflow/python/eager/remote_cloud_tpu_test.py | tensorflow/python/eager/remote_cloud_tpu_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test that we can connect to a real Cloud TPU."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
from absl.testing import absltest
from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
from tensorflow.python.eager import remote
from tensorflow.python.framework import config
from tensorflow.python.tpu import tpu_strategy_util
FLAGS = flags.FLAGS
flags.DEFINE_string('tpu', '', 'Name of TPU to connect to.')
flags.DEFINE_string('project', None, 'Name of GCP project with TPU.')
flags.DEFINE_string('zone', None, 'Name of GCP zone with TPU.')
flags.DEFINE_integer('num_tpu_devices', 8, 'The expected number of TPUs.')
DEVICES_PER_TASK = 8
EXPECTED_DEVICES_PRE_CONNECT = [
'/device:CPU:0',
]
EXPECTED_NEW_DEVICES_AFTER_CONNECT_TEMPLATES = [
'/job:worker/replica:0/task:{task}/device:CPU:0',
'/job:worker/replica:0/task:{task}/device:XLA_CPU:0',
'/job:worker/replica:0/task:{task}/device:TPU_SYSTEM:0',
'/job:worker/replica:0/task:{task}/device:TPU:0',
'/job:worker/replica:0/task:{task}/device:TPU:1',
'/job:worker/replica:0/task:{task}/device:TPU:2',
'/job:worker/replica:0/task:{task}/device:TPU:3',
'/job:worker/replica:0/task:{task}/device:TPU:4',
'/job:worker/replica:0/task:{task}/device:TPU:5',
'/job:worker/replica:0/task:{task}/device:TPU:6',
'/job:worker/replica:0/task:{task}/device:TPU:7',
]
class RemoteCloudTPUTest(absltest.TestCase):
"""Test that we can connect to a real Cloud TPU."""
def test_connect(self):
# Log full diff on failure.
self.maxDiff = None # pylint:disable=invalid-name
self.assertCountEqual(
EXPECTED_DEVICES_PRE_CONNECT,
[device.name for device in config.list_logical_devices()])
resolver = tpu_cluster_resolver.TPUClusterResolver(
tpu=FLAGS.tpu, zone=FLAGS.zone, project=FLAGS.project
)
remote.connect_to_cluster(resolver)
expected_devices = EXPECTED_DEVICES_PRE_CONNECT
for task in range(FLAGS.num_tpu_devices // DEVICES_PER_TASK):
expected_devices.extend([
template.format(task=task)
for template in EXPECTED_NEW_DEVICES_AFTER_CONNECT_TEMPLATES
])
self.assertCountEqual(
expected_devices,
[device.name for device in config.list_logical_devices()])
tpu_strategy_util.initialize_tpu_system(resolver)
if __name__ == '__main__':
absltest.main()
| # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test that we can connect to a real Cloud TPU."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
from absl.testing import absltest
from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
from tensorflow.python.eager import remote
from tensorflow.python.framework import config
from tensorflow.python.tpu import tpu_strategy_util
FLAGS = flags.FLAGS
flags.DEFINE_string('tpu', '', 'Name of TPU to connect to.')
flags.DEFINE_string('project', None, 'Name of GCP project with TPU.')
flags.DEFINE_string('zone', None, 'Name of GCP zone with TPU.')
flags.DEFINE_integer('num_tpu_devices', 8, 'The expected number of TPUs.')
DEVICES_PER_TASK = 8
EXPECTED_DEVICES_PRE_CONNECT = [
'/device:CPU:0',
'/device:XLA_CPU:0',
]
EXPECTED_NEW_DEVICES_AFTER_CONNECT_TEMPLATES = [
'/job:worker/replica:0/task:{task}/device:CPU:0',
'/job:worker/replica:0/task:{task}/device:XLA_CPU:0',
'/job:worker/replica:0/task:{task}/device:TPU_SYSTEM:0',
'/job:worker/replica:0/task:{task}/device:TPU:0',
'/job:worker/replica:0/task:{task}/device:TPU:1',
'/job:worker/replica:0/task:{task}/device:TPU:2',
'/job:worker/replica:0/task:{task}/device:TPU:3',
'/job:worker/replica:0/task:{task}/device:TPU:4',
'/job:worker/replica:0/task:{task}/device:TPU:5',
'/job:worker/replica:0/task:{task}/device:TPU:6',
'/job:worker/replica:0/task:{task}/device:TPU:7',
]
class RemoteCloudTPUTest(absltest.TestCase):
"""Test that we can connect to a real Cloud TPU."""
def test_connect(self):
# Log full diff on failure.
self.maxDiff = None # pylint:disable=invalid-name
self.assertCountEqual(
EXPECTED_DEVICES_PRE_CONNECT,
[device.name for device in config.list_logical_devices()])
resolver = tpu_cluster_resolver.TPUClusterResolver(
tpu=FLAGS.tpu, zone=FLAGS.zone, project=FLAGS.project
)
remote.connect_to_cluster(resolver)
expected_devices = EXPECTED_DEVICES_PRE_CONNECT
for task in range(FLAGS.num_tpu_devices // DEVICES_PER_TASK):
expected_devices.extend([
template.format(task=task)
for template in EXPECTED_NEW_DEVICES_AFTER_CONNECT_TEMPLATES
])
self.assertCountEqual(
expected_devices,
[device.name for device in config.list_logical_devices()])
tpu_strategy_util.initialize_tpu_system(resolver)
if __name__ == '__main__':
absltest.main()
| Python | 0 |
4dd36d68225311e328cc4a909b3c56bf9b6e8e53 | Create picture.py | picture.py | picture.py | from ggame import App
myapp = App()
myapp.run()
| Python | 0.000002 | |
b3a3376e90d1eede9b2d33d0a4965c1f4920f20a | Add a memoizer | memoize.py | memoize.py |
# from http://code.activestate.com/recipes/578231-probably-the-fastest-memoization-decorator-in-the-/
__all__ = ["memoize"]
def memoize(f):
""" Memoization decorator for a function taking one or more arguments. """
class memodict(dict):
def __getitem__(self, *key):
return dict.__getitem__(self, key)
def __missing__(self, key):
ret = self[key] = f(*key)
return ret
return memodict().__getitem__
| Python | 0.000022 | |
9a902049212ceea29b7b0e440acd33e3c63c7beb | Add timer tests script. | scripts/examples/02-Board-Control/timer_tests.py | scripts/examples/02-Board-Control/timer_tests.py | # Timer Test Example
#
# This example tests all the timers.
import time
from pyb import Pin, Timer, LED
blue_led = LED(3)
# Note: functions that allocate memory are Not allowed in callbacks
def tick(timer):
blue_led.toggle()
print("")
for i in range(1, 18):
try:
print("Testing TIM%d... "%(i), end="")
tim = Timer(i, freq=10) # create a timer object using timer 4 - trigger at 1Hz
tim.callback(tick) # set the callback to our tick function
time.sleep(1000)
tim.deinit()
except ValueError as e:
print(e)
continue
print("done!")
| Python | 0 | |
11447d409756f2bcd459a6db9d51967358272780 | move flags to constant module | hotline/constant.py | hotline/constant.py | # -*- coding: utf-8 -*-
class flags(object):
DontHide = object()
Hide = object()
_list = [DontHide, Hide]
| Python | 0.000002 | |
1840239be19af9599094c44f7b502509a87065b2 | Commit the initial codes | lambda.py | lambda.py | #!/usr/bin/env python
import abc
import sys
# constants
KEYWORDS = {"\\", "."}
# classes
## error
class Error:
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
def append_message(self, message):
self.message = message + "\n" + self.message
## AST
class AstNode(metaclass=abc.ABCMeta):
@abc.abstractmethod
def eval(self, env : dict):
return NotImplemented
@abc.abstractmethod
def __str__(self):
return NotImplemented
class Variable(AstNode):
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
def eval(self, env):
if name in env:
return env[name]
return Error("A variable, {} is not defined.".format(name))
class LambdaAbstraction(AstNode):
def __init__(self, argument : str, body):
self.__argument = argument
self.__body = body
def __str__(self):
return "\\" + self.argument + "." + str(self.__body)
@property
def argument(self):
return self.__argument
@property
def body(self):
return self.__body
def eval(self, env):
return self
class FunctionApplication(AstNode):
def __init__(self, right_expression, left_expression):
assert isinstance(right_expression, LambdaAbstraction)
self.right_expression = right_expression
self.left_expression = left_expression
def __str__(self):
return str(self.right_expression) + " " + str(self.left_expression)
def eval(self, env):
env[self.right_expression.argument] = left_expression.eval(env)
return self.right_expression.body.eval(env)
## parser
class Parser:
def parse(self, text):
self.text = text
expression = self.expression() # DEBUG
exit("OK") # DEBUG
result, _ = self.expression()(0)
return result
def expression(self):
return self.term()
def term(self):
def parser(pos):
result, pos = choice(self.function_application(),
self.variable(),
self.lambda_abstraction())(pos)
if isinstance(result, Error):
return result.append_message("A term is expected."), pos
return result, pos
return parser
def variable(self):
def parser(pos):
result, pos = self.identifier()(pos)
if isinstance(result, Error):
return result.append_message("A variable is expected."), pos
return Variable(result), pos
return parser
def lambda_abstraction(self):
def parser(pos):
results, pos = sequence(self.keyword("\\"),
self.identifier(),
self.keyword("."))(pos)
if isinstance(results, Error):
return results.append_message("A lambda abstraction is expected."), pos
result, pos = self.expression()(pos)
if isinstance(result, Error):
return result.append_message("An expression is expected."), pos
return LambdaAbstraction(results[1], result), pos
return parser
def function_application(self):
def parser(pos):
result_1, pos = self.expression()(pos)
if isinstance(result_1, Error):
return result_1.append_message("An expression is expected."), pos
result_2, pos = self.expression()(pos)
if isinstance(result_2, Error):
return result_2.append_message("An expression is expected."), pos
return FunctionApplication(result_1, result_2), pos
return parser
def identifier(self):
def parser(pos):
_, pos = self.blanks()(pos)
identifier = self.text[pos:].split()[0]
if len(identifier) > 0 \
and all(not identifier.startswith(keyword) for keyword in KEYWORDS):
return identifier, pos + len(identifier)
return Error("An identifier is expected."), pos
return parser
def keyword(self, keyword):
assert keyword in KEYWORDS
def parser(pos):
_, pos = self.blanks()(pos)
if self.text[:len(keyword)] == keyword:
return keyword, pos + len(keyword)
return Error("A keyword, \"{}\" is expected.".format(keyword)), pos
return parser
def blanks(self):
def parser(pos):
while self.text[pos] in {" ", "\t", "\n"}:
pos += 1
return None, pos
return parser
# functions
def choice(*parsers):
def parser(pos):
for parser in parsers:
result, pos = parser(pos)
if not isinstance(result, Error):
return result, pos
return result, pos
return parser
def sequence(*parsers):
def parser(pos):
results = []
for parser in parsers:
result, pos = parser(pos)
if isinstance(result, Error):
return result, pos
results.append(result)
return results, pos
return parser
def recursed(parser_generator, *arguments):
def parser(pos):
return parser_generator(*arguments)(pos)
return parser
def interpret(text):
return Parser().parse(text).eval({})
## utils
def usage():
exit("usage: {} [<file>]".format(sys.argv[0]))
# main routine
def main():
args = sys.argv[1:]
if len(args) == 0:
print(interpret(input()))
elif len(args) == 1:
with open(args[0]) as f:
print(interpret(f.read()))
usage()
if __name__ == "__main__":
main()
| Python | 0.999635 | |
0295a1cbad1dfa2443e6b8e8d639b7d845adaebf | Add lc0225_implement_stack_using_queues.py | lc0225_implement_stack_using_queues.py | lc0225_implement_stack_using_queues.py | """Leetcode 225. Implement Stack using Queues
Easy
URL: https://leetcode.com/problems/implement-stack-using-queues/
Implement the following operations of a stack using queues.
- push(x) -- Push element x onto stack.
- pop() -- Removes the element on top of the stack.
- top() -- Get the top element.
- empty() -- Return whether the stack is empty.
Example:
MyStack stack = new MyStack();
stack.push(1);
stack.push(2);
stack.top(); // returns 2
stack.pop(); // returns 2
stack.empty(); // returns false
Notes:
- You must use only standard operations of a queue -- which means only
push to back, peek/pop from front, size, and is empty operations are valid.
- Depending on your language, queue may not be supported natively. You may simulate
a queue by using a list or deque (double-ended queue), as long as you use only
standard operations of a queue.
- You may assume that all operations are valid (for example, no pop or top
operations will be called on an empty stack).
"""
class MyStack(object):
def __init__(self):
"""
Initialize your data structure here.
"""
pass
def push(self, x):
"""
Push element x onto stack.
:type x: int
:rtype: None
"""
pass
def pop(self):
"""
Removes the element on top of the stack and returns that element.
:rtype: int
"""
pass
def top(self):
"""
Get the top element.
:rtype: int
"""
pass
def empty(self):
"""
Returns whether the stack is empty.
:rtype: bool
"""
pass
def main():
# obj = MyStack()
# obj.push(x)
# param_2 = obj.pop()
# param_3 = obj.top()
# param_4 = obj.empty()
pass
if __name__ == '__main__':
main()
| Python | 0.000002 | |
0a48d3dc2286db9cff5ec757d8b5f0b45f35ab7d | update : some minor fixes | ptop/interfaces/__init__.py | ptop/interfaces/__init__.py | from .GUI import PtopGUI | Python | 0 | |
261b2477cf6f086028a1028c7d8a02f1b1631018 | add solution for Jump Game | src/jumpGame.py | src/jumpGame.py | class Solution:
# @param A, a list of integers
# @return a boolean
def canJump(self, A):
if not A:
return False
max_dist = 0
for i in xrange(len(A)):
if i > max_dist:
return False
max_dist = max(max_dist, i+A[i])
return True
| Python | 0 | |
5c0805edd7d54a070b7ce1942eadfc0b3ff2874b | Update memory.py | src/collectors/memory/memory.py | src/collectors/memory/memory.py | # coding=utf-8
"""
This class collects data on memory utilization
Note that MemFree may report no memory free. This may not actually be the case,
as memory is allocated to Buffers and Cache as well. See
[this link](http://www.linuxatemyram.com/) for more details.
#### Dependencies
* /proc/meminfo or psutil
"""
import diamond.collector
import diamond.convertor
import os
try:
import psutil
psutil # workaround for pyflakes issue #13
except ImportError:
psutil = None
_KEY_MAPPING = [
'MemTotal',
'MemFree',
'Buffers',
'Cached',
'Active',
'Dirty',
'Inactive',
'Shmem',
'SwapTotal',
'SwapFree',
'SwapCached',
'VmallocTotal',
'VmallocUsed',
'VmallocChunk',
'Committed_AS',
]
class MemoryCollector(diamond.collector.Collector):
PROC = '/proc/meminfo'
def get_default_config_help(self):
config_help = super(MemoryCollector, self).get_default_config_help()
config_help.update({
'detailed': 'Set to True to Collect all the nodes',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(MemoryCollector, self).get_default_config()
config.update({
'enabled': 'True',
'path': 'memory',
'method': 'Threaded',
# Collect all the nodes or just a few standard ones?
# Uncomment to enable
#'detailed': 'True'
})
return config
def collect(self):
"""
Collect memory stats
"""
if os.access(self.PROC, os.R_OK):
file = open(self.PROC)
data = file.read()
file.close()
for line in data.splitlines():
try:
name, value, units = line.split()
name = name.rstrip(':')
value = int(value)
if (name not in _KEY_MAPPING
and 'detailed' not in self.config):
continue
for unit in self.config['byte_unit']:
value = diamond.convertor.binary.convert(value=value,
oldUnit=units,
newUnit=unit)
self.publish(name, value, metric_type='GAUGE')
# TODO: We only support one unit node here. Fix it!
break
except ValueError:
continue
return True
else:
if not psutil:
self.log.error('Unable to import psutil')
self.log.error('No memory metrics retrieved')
return None
phymem_usage = psutil.phymem_usage()
virtmem_usage = psutil.virtmem_usage()
units = 'B'
for unit in self.config['byte_unit']:
value = diamond.convertor.binary.convert(
value=phymem_usage.total, oldUnit=units, newUnit=unit)
self.publish('MemTotal', value, metric_type='GAUGE')
value = diamond.convertor.binary.convert(
value=phymem_usage.free, oldUnit=units, newUnit=unit)
self.publish('MemFree', value, metric_type='GAUGE')
value = diamond.convertor.binary.convert(
value=virtmem_usage.total, oldUnit=units, newUnit=unit)
self.publish('SwapTotal', value, metric_type='GAUGE')
value = diamond.convertor.binary.convert(
value=virtmem_usage.free, oldUnit=units, newUnit=unit)
self.publish('SwapFree', value, metric_type='GAUGE')
# TODO: We only support one unit node here. Fix it!
break
return True
return None
| # coding=utf-8
"""
This class collects data on memory utilization
Note that MemFree may report no memory free. This may not actually be the case,
as memory is allocated to Buffers and Cache as well. See
[this link](http://www.linuxatemyram.com/) for more details.
#### Dependencies
* /proc/meminfo or psutil
"""
import diamond.collector
import diamond.convertor
import os
try:
import psutil
psutil # workaround for pyflakes issue #13
except ImportError:
psutil = None
_KEY_MAPPING = [
'MemTotal',
'MemFree',
'Buffers',
'Cached',
'Active',
'Dirty',
'Inactive',
'Shmem',
'SwapTotal',
'SwapFree',
'SwapCached',
'VmallocTotal',
'VmallocUsed',
'VmallocChunk'
]
class MemoryCollector(diamond.collector.Collector):
PROC = '/proc/meminfo'
def get_default_config_help(self):
config_help = super(MemoryCollector, self).get_default_config_help()
config_help.update({
'detailed': 'Set to True to Collect all the nodes',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(MemoryCollector, self).get_default_config()
config.update({
'enabled': 'True',
'path': 'memory',
'method': 'Threaded',
# Collect all the nodes or just a few standard ones?
# Uncomment to enable
#'detailed': 'True'
})
return config
def collect(self):
"""
Collect memory stats
"""
if os.access(self.PROC, os.R_OK):
file = open(self.PROC)
data = file.read()
file.close()
for line in data.splitlines():
try:
name, value, units = line.split()
name = name.rstrip(':')
value = int(value)
if (name not in _KEY_MAPPING
and 'detailed' not in self.config):
continue
for unit in self.config['byte_unit']:
value = diamond.convertor.binary.convert(value=value,
oldUnit=units,
newUnit=unit)
self.publish(name, value, metric_type='GAUGE')
# TODO: We only support one unit node here. Fix it!
break
except ValueError:
continue
return True
else:
if not psutil:
self.log.error('Unable to import psutil')
self.log.error('No memory metrics retrieved')
return None
phymem_usage = psutil.phymem_usage()
virtmem_usage = psutil.virtmem_usage()
units = 'B'
for unit in self.config['byte_unit']:
value = diamond.convertor.binary.convert(
value=phymem_usage.total, oldUnit=units, newUnit=unit)
self.publish('MemTotal', value, metric_type='GAUGE')
value = diamond.convertor.binary.convert(
value=phymem_usage.free, oldUnit=units, newUnit=unit)
self.publish('MemFree', value, metric_type='GAUGE')
value = diamond.convertor.binary.convert(
value=virtmem_usage.total, oldUnit=units, newUnit=unit)
self.publish('SwapTotal', value, metric_type='GAUGE')
value = diamond.convertor.binary.convert(
value=virtmem_usage.free, oldUnit=units, newUnit=unit)
self.publish('SwapFree', value, metric_type='GAUGE')
# TODO: We only support one unit node here. Fix it!
break
return True
return None
| Python | 0 |
99f9f14039749f8e3e4340d6bf0e0394e3483ca2 | add basic propfind test | protocol/test_protocol_propfind.py | protocol/test_protocol_propfind.py | from smashbox.utilities import *
from smashbox.utilities.hash_files import *
from smashbox.protocol import *
@add_worker
def main(step):
d = make_workdir()
reset_owncloud_account()
URL = oc_webdav_url()
ls_prop_desktop20(URL,depth=0)
logger.info("Passed 1")
ls_prop_desktop20(URL,depth=1)
logger.info("Passed 2")
ls_prop_desktop17(URL,depth=0)
logger.info("Passed 3")
ls_prop_desktop17(URL,depth=1)
logger.info("Passed 4")
all_prop_android(URL,depth=0)
logger.info("Passed 5")
all_prop_android(URL,depth=1)
logger.info("Passed 6")
| Python | 0 | |
6a4f9568560608f1a1a1a78b0968b4968c4990c7 | add agent.builtin tests. | test/unit/agent/test_builtin.py | test/unit/agent/test_builtin.py |
# Copyright (c) 2014 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
from unittest import TestCase
from mock import Mock, patch
from gofer.agent.builtin import indent, signature
def remote(fn):
fn.gofer = {}
return fn
with patch('gofer.agent.builtin.remote', remote):
from gofer.agent.builtin import Admin
class Dog(object):
@staticmethod
@remote
def fn(name, age):
pass
@remote
def fn1(self, words):
pass
@remote
def fn2(self, words, *others):
pass
def fn3(self, words, *others, **keywords):
pass
class Plugin(object):
def __init__(self, name, enabled, dispatcher):
self.name = name
self._enabled = enabled
self.dispatcher = dispatcher
def enabled(self):
return self._enabled
class Action(object):
def __init__(self, name, interval):
self._name = name
self.interval = interval
def name(self):
return self._name
class Module(object):
@remote
def bar(self, n):
pass
@remote
def bar1(self, age):
pass
def bar2(self):
pass
HELP = """\
Plugins:
<plugin> animals
Classes:
<class> admin
methods:
cancel(sn, criteria)
hello()
help()
<class> dog
methods:
fn1(words)
fn2(words, *others)
Functions:
bar(n)
bar1(age)
Actions:
report {'hours': 24}
reboot {'minutes': 10}\
"""
class TestUtils(TestCase):
def test_indent(self):
fmt = 'My %s has nine lives'
cat = 'cat'
s = indent(fmt, 4, cat)
self.assertEqual(s, ' ' + fmt % cat)
def test_signature(self):
# function
fn = Dog.fn
self.assertEqual(signature(fn.__name__, fn), 'fn(name, age)')
# method
fn = Dog.fn1
self.assertEqual(signature(fn.__name__, fn), 'fn1(words)')
# method with varargs
fn = Dog.fn2
self.assertEqual(signature(fn.__name__, fn), 'fn2(words, *others)')
# method with varargs and keywords
fn = Dog.fn3
self.assertEqual(signature(fn.__name__, fn), 'fn3(words, *others, **keywords)')
class TestAdmin(TestCase):
@patch('gofer.agent.builtin.Tracker')
def test_cancel_sn(self, tracker):
sn = '1234'
admin = Admin()
canceled = admin.cancel(sn=sn)
tracker.return_value.cancel.assert_called_once_with(sn)
self.assertEqual(canceled, [tracker.return_value.cancel.return_value])
@patch('gofer.agent.builtin.Builder')
@patch('gofer.agent.builtin.Tracker')
def test_cancel_criteria(self, tracker, builder):
sn = '1234'
name = 'joe'
criteria = {'eq': name}
tracker.return_value.find.return_value = [sn]
# test
admin = Admin()
canceled = admin.cancel(criteria=criteria)
# validation
builder.return_value.build.assert_called_once_with(criteria)
tracker.return_value.cancel.assert_called_once_with(sn)
self.assertEqual(canceled, [tracker.return_value.cancel.return_value])
def test_hello(self):
admin = Admin()
self.assertEqual(admin.hello(), 'Hello, I am gofer agent')
@patch('gofer.agent.builtin.inspect.isfunction')
@patch('gofer.agent.builtin.inspect.ismodule')
@patch('gofer.agent.builtin.Actions')
@patch('gofer.agent.builtin.Plugin')
def test_help(self, plugin, actions, is_mod, is_fn):
is_mod.side_effect = lambda thing: thing == Module
is_fn.return_value = True
actions.return_value.collated.return_value = [
Action('report', dict(hours=24)),
Action('reboot', dict(minutes=10)),
]
dispatcher = Mock(catalog={
'admin': Admin,
'dog': Dog,
'mod': Module,
})
plugins = [
Plugin('animals', True, dispatcher),
Plugin('fish', False, None),
]
plugin.all.return_value = plugins
# test
admin = Admin()
s = admin.help()
# validation
self.assertEqual(s, HELP % {'plugin': plugins[0].name})
| Python | 0 | |
6bbdd1d9d60b03429dc2bc1ff3ba5d06353fad9a | Add a Bug class. | libzilla/bug.py | libzilla/bug.py | class Bug:
def __init__(self,
bug_number,
comment=None,
resolution=None,
status=None):
self.bug_number = bug_number
self.resolution = resolution
self.status = status
self.comment = comment
def __str__(self):
return """Bug #: [%s]
RESOLUTION: [%s]
STATUS: [%s]
Comment: %s""" % (
self.bug_number,
self.resolution,
self.status,
self.comment
)
| Python | 0 | |
2dad699b9281fea70c5e078166236f3175ef739a | add parse2.py file to take advantage of new patXML interface | parse2.py | parse2.py | #!/usr/bin/env python
import logging
# http://docs.python.org/howto/argparse.html
import argparse
import os
import datetime
import re
import mmap
import contextlib
import multiprocessing
import itertools
import sys
sys.path.append( '.' )
sys.path.append( './lib/' )
import shutil
from patXML import XMLPatent
from patXML import uniasc
from fwork import *
regex = re.compile(r"""
([<][?]xml[ ]version.*?[>] #all XML starts with ?xml
.*?
[<][/]us[-]patent[-]grant[>]) #and here is the end tag
""", re.I+re.S+re.X)
def list_files(directories, patentroot, xmlregex):
"""
Returns listing of all files within all directories relative to patentroot
whose filenames match xmlregex
"""
files = [patentroot+'/'+directory+'/'+fi for directory in directories for fi in \
os.listdir(patentroot+'/'+directory) \
if re.search(xmlregex, fi, re.I) != None]
return files
def parse_file(filename):
parsed_xmls = []
size = os.stat(filename).st_size
with open(filename,'r') as f:
with contextlib.closing(mmap.mmap(f.fileno(), size, access=mmap.ACCESS_READ)) as m:
parsed_xmls.extend(regex.findall(m))
return parsed_xmls
def parallel_parse(filelist):
pool = multiprocessing.Pool(multiprocessing.cpu_count())
return list(itertools.chain(*pool.imap_unordered(parse_file, filelist)))
# setup argparse
parser = argparse.ArgumentParser(description=\
'Specify source directory/directories for xml files to be parsed')
parser.add_argument('--directory','-d', type=str, nargs='+', default='',
help='comma separated list of directories relative to $PATENTROOT that \
parse.py will search for .xml files')
parser.add_argument('--patentroot','-p', type=str, nargs='?',
default=os.environ['PATENTROOT'] \
if os.environ.has_key('PATENTROOT') else '/',
help='root directory of all patent files/directories')
parser.add_argument('--xmlregex','-x', type=str,
nargs='?', default=r"ipg\d{6}.xml",
help='regex used to match xml files in each directory')
parser.add_argument('--verbosity', '-v', type = int,
nargs='?', default=0,
help='Set the level of verbosity for the computation. The higher the \
verbosity level, the less restrictive the print policy. 0 (default) \
= error, 1 = warning, 2 = info, 3 = debug')
# double check that variables are actually set
# we ignore the verbosity argument when determining
# if any variables have been set by the user
specified = [arg for arg in sys.argv if arg.startswith('-')]
nonverbose = [opt for opt in specified if '-v' not in opt]
if len(nonverbose)==0:
parser.print_help()
sys.exit(1)
# parse arguments and assign values
args = parser.parse_args()
DIRECTORIES = args.directory
XMLREGEX = args.xmlregex
PATENTROOT = args.patentroot
# adjust verbosity levels based on specified input
logging_levels = {0: logging.ERROR,
1: logging.WARNING,
2: logging.INFO,
3: logging.DEBUG}
VERBOSITY = logging_levels[args.verbosity]
logfile = "./" + 'xml-parsing.log'
logging.basicConfig(filename=logfile, level=VERBOSITY)
t1 = datetime.datetime.now()
#get a listing of all files within the directory that follow the naming pattern
files = list_files(DIRECTORIES, PATENTROOT, XMLREGEX)
logging.info("Total files: %d" % (len(files)))
# list of parsed xml strings
parsed_xmls = parallel_parse(files)
logging.info(" - Total Patents: %d" % (len(parsed_xmls)))
tables = ["assignee", "citation", "class", "inventor", "patent",\
"patdesc", "lawyer", "sciref", "usreldoc"]
total_count = 0
total_patents = 0
for filename in parsed_xmls:
xmllist = []
count = 0
patents = 0
for i, x in enumerate(parsed_xmls):
try:
xmllist.append(XMLPatent(x)) #TODO: this is the slow part (parallelize)
patents += 1
except Exception as inst:
#print type(inst)
logging.error(type(inst))
logging.error(" - Error: %s (%d) %s" % (filename, i, x[175:200]))
count += 1
#print xmllist
logging.info(" - number of patents: %d %s ", len(xmllist), datetime.datetime.now()-t1)
logging.info( " - number of errors: %d", count)
total_count += count
total_patents += patents
for table in tables:
# Cut the chaining here to better parameterize the call, allowing
# the databases to be built in place
# (/var/share/patentdata/patents/<year>)
# outdb = PATENTROOT + "/" + table
x.insertcallbacks[table]()
logging.info(" - %s", datetime.datetime.now()-t1)
logging.info(" - total errors: %d", total_count)
logging.info(" - total patents: %d", total_patents)
| Python | 0 | |
17bd35df32dd68d1cbc0fe73fcda186d13a66db0 | Add run.py | run.py | run.py | #!/usr/bin/env python3
#
# Copyright (c) 2014 Mark Samman <https://github.com/marksamman/pylinkshortener>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import threading
from app import app
from websocket import websocketThread
if __name__ == "__main__":
threading.Thread(target=websocketThread).start()
app.run()
| Python | 0.000009 | |
f1debd46a0d7d8442490f320602bbcb1702d529c | Create name_parser.py | name_parser.py | name_parser.py | #!/usr/bin/env python
def ParseFullName(full_name):
"""name_parser.ParseFullName
Purpose: Advanced name field Parseting that recognizes most multi-word last
names.
INPUTS:
full_name -- A string containing the name field to be parsed
OUTPUTS:
names -- A list of name dictionaries. Each name dictionary contains
a single person's first and last names.
ASSUMPTIONS:
- Each person has both a first and last name.
- full_name contains no salutations (Mr., Mrs., etc.), suffixes (Sr., Jr., etc.),
words in parentheses (like nicknames), or initials.
- Middle names are treated as part of a multi-word first name.
- Only multi-word last names with certain common prefixes (de, von, st., ect.)
are recognized.
- full_name contains either two or one name, following one of these formats:
1. <first1> <conjunction> <first2> <last>
2. <first1> <last 1> <conjunction> <first 2> <last2>
3. <first> <last>
where <first#> and <last#> may be multi-word names
"""
## separate the name field based on the conjunction
conjunction = None
if full_name.find(" and ") > 0:
conjunction = " and "
elif full_name.find(" And ") > 0:
conjunction = " And "
elif full_name.find(" & ") > 0:
conjunction = " & "
if conjunction != None:
full_name_list = full_name.split(conjunction)
## if parents have same last name, then there is only one name
## before the conjuction
## TBD - does not handle the multi-word first name case
if len(full_name_list[0].split(" ")) == 1:
answer = ParseType1Name(full_name_list)
else:
answer = ParseType2Name(full_name_list)
else:
answer = ParseType3Name(full_name)
return answer
def ParseType1Name(name_list):
"""name_parser.ParseType1Name
Puprpose: Parses a name field with the format <first1> <conjunction> <first2> <last>
INPUTS:
name_list -- A list of names in format ['<first1>','<first2> <last>']
OUTPUTS:
names -- A list of name dictionaries. Each name dictionary contains
a single person's first and last names.
"""
## last name for this type of name field resides in the name after the conjuction
second_name = ParseType3Name(name_list[1])
first_name = [{'first': name_list[0].strip(),
'last' : second_name[0]['last']}]
return first_name + second_name
def ParseType2Name(name_list):
"""name_parser.ParseType2Name
Puprpose: Parses a name field with the format <first1> <last1> <conjunction> <first2> <last2>
INPUTS:
name_list -- A list of names in format ['<first1> <last1>','<first2> <last2>']
OUTPUTS:
names -- A list of name dictionaries. Each name dictionary contains
a single person's first and last names.
"""
answer = ParseType3Name(name_list[0])
answer += ParseType3Name(name_list[1])
return answer
def ParseType3Name(full_name):
"""name_parser.ParseType3Name
Puprpose: Parses a name field with the format <first> <last>
INPUTS:
name_list -- A list of names in format ['<first> <last>']
OUTPUTS:
name -- A list of a name dictionary containing the person's first and last names.
"""
last_processed = -1
fname = ""
lname = ""
## split into words
name_parts = full_name.strip().split(" ")
## concat the first name
for i in range(len(name_parts)-1):
word = name_parts[i]
## move on to parsing the last name if we find an indicator of a compound
## last name (Von, Van, etc) we use i > 0 to allow for rare cases where an
## indicator is actually the first name (like "Von Fabella")
if IsCompoundLastName(word) and i > 0:
break;
last_processed = i
fname += " " + word.title()
## check that we have more than 1 word in our string
if len(name_parts) > 1:
## concat the last name
lname = ""
for i in range(last_processed+1,len(name_parts)):
lname += " " + name_parts[i].title()
else:
## otherwise, single word strings are assumed to be first names
fname = name_parts[0].title()
## return the various parts in an array
name = [{'first': fname.strip(),
'last' : lname.strip()}]
return name
## detect compound last names like "Von Fange"
def IsCompoundLastName(word):
## these are some common prefixes that identify a compound last names
prefix_words = ('vere','von','van','de','del','della','di','da','d',\
'pietro','vanden','du','st.','st','la','ter','o')
return word.lower() in prefix_words
def main():
test_names = ("Jane Doe", \
"Zane D Souza", \
"Stephanie de Rayal", \
"Edward de Sa", \
"Stewart O Flynn",\
"Claude van Damme",
"John and Jane Smith",
"John And Jane Smith",
"John & Jane Smith",
"Joe Schmoe and Betty Davis",
"Joan and Marc Edward Vlasic",
"Marc Edward and Joan Vlasic") ## TBD - expect this one will not parse correctly
for name in test_names:
answer = ParseFullName(name)
print answer
if __name__ == '__main__':
main()
| Python | 0 | |
7613285ba24990d62cf2273387a143aa74ce8bb0 | add shortcut to send sms message | nexmo/utils.py | nexmo/utils.py | from .libpynexmo.nexmomessage import NexmoMessage
from django.conf import settings
def send_message(to, message):
"""Shortcut to send a sms using libnexmo api.
Usage:
>>> from nexmo import send_message
>>> send_message('+33612345678', 'My sms message body')
"""
params = {
'username': settings.NEXMO_USERNAME,
'password': settings.NEXMO_PASSWORD,
'type': 'unicode',
'from': settings.NEXMO_FROM,
'to': to,
'text': message.encode('utf-8'),
}
sms = NexmoMessage(params)
response = sms.send_request()
return response
| Python | 0.000001 | |
49b68f35bb6555eaad7cd5e3bfeb4e7fadb500ba | Add intermediate tower 4 | pythonwarrior/towers/intermediate/level_004.py | pythonwarrior/towers/intermediate/level_004.py | # ----
# |C s |
# | @ S|
# |C s>|
# ----
level.description("Your ears become more in tune with the surroundings. "
"Listen to find enemies and captives!")
level.tip("Use warrior.listen to find spaces with other units, "
"and warrior.direction_of to determine what direction they're in.")
level.clue("Walk towards an enemy or captive with "
"warrior.walk_(warrior.direction_of(warrior.listen()[0])), "
"once len(warrior.listen()) == 0 then head for the stairs.")
level.time_bonus(55)
level.ace_score(144)
level.size(4, 3)
level.stairs(3, 2)
def add_abilities(warrior):
warrior.add_abilities('listen')
warrior.add_abilities('direction_of')
level.warrior(1, 1, 'east', func=add_abilities)
level.unit('captive', 0, 0, 'east')
level.unit('captive', 0, 2, 'east')
level.unit('sludge', 2, 0, 'south')
level.unit('thick_sludge', 3, 1, 'west')
level.unit('sludge', 2, 2, 'north')
| Python | 0.998117 | |
7662d0a0701381b37f60d42e7dbf04d7950c18ad | add management command to print out duplicate bihar tasks | custom/bihar/management/commands/bihar_cleanup_tasks.py | custom/bihar/management/commands/bihar_cleanup_tasks.py | import csv
from django.core.management.base import BaseCommand
from corehq.apps.hqcase.utils import get_cases_in_domain
from dimagi.utils.decorators.log_exception import log_exception
class Command(BaseCommand):
"""
Creates the backlog of repeat records that were dropped when bihar repeater
infrastructure went down.
"""
@log_exception()
def handle(self, *args, **options):
domain = 'care-bihar'
root_types = ('cc_bihar_pregnancy', 'cc_bihar_newborn')
TASK_TYPE = 'task'
# loop through all mother cases, then all child cases
# for each case get all associated tasks
# if any duplicates found, clean up / print them
with open('bihar-duplicate-tasks.csv', 'wb') as f:
writer = csv.writer(f, dialect=csv.excel)
_dump_headings(writer)
for case_type in root_types:
for parent_case in get_cases_in_domain(domain, case_type):
try:
tasks = filter(lambda subcase: subcase.type == TASK_TYPE, parent_case.get_subcases())
if tasks:
types = [_task_id(t) for t in tasks]
unique_types = set(types)
if len(unique_types) != len(tasks):
for type_being_checked in unique_types:
matching_cases = [t for t in tasks if _task_id(t) == type_being_checked]
if len(matching_cases) > 1:
_dump(parent_case, matching_cases, writer)
except Exception, e:
print 'error with case %s (%s)' % (parent_case._id, e)
def _task_id(task_case):
id = getattr(task_case, 'task_id', None)
if id is None:
print '%s has no task id' % task_case._id
return id
def _dump_headings(csv_writer):
csv_writer.writerow([
'parent case id',
'task case id',
'task id',
'date created',
'closed?',
'keep?',
])
def _dump(parent, tasklist, csv_writer):
tasklist = sorted(tasklist, key=lambda case: (not case.closed, case.opened_on))
for i, task in enumerate(tasklist):
csv_writer.writerow([
parent._id,
task._id,
_task_id(task),
task.opened_on,
task.closed,
i==0,
])
| Python | 0.000001 | |
7d4cdd3fbbf52143d0b1297a6695ce6b336b2b9d | Add a preprocessor script that does n-gram expansion based on either POS tags or words. For every expansion we start with a word unigram, and then try to extend to a part of speech tag or a word. | util/extend.py | util/extend.py | #!/usr/bin/python
#
# Word and part of speech expansion preprocessor script.
#
import math
import sys
def extractWordTags(line):
wordTags = []
return wordTags
def expansionFactor(badFreq):
return 1 + math.exp(-0.5 * float(badFreq))
def readCorpus(filename):
position = 0
words = dict()
tags = dict()
corpusFile = open(filename, 'r')
for line in corpusFile:
lineParts = line.strip().split()
for part in lineParts:
(word, tag) = part.rsplit('/', 1)
if not words.has_key(word):
words[word] = set()
words[word].add(position)
if not tags.has_key(tag):
tags[tag] = set()
tags[tag].add(position)
position += 1
return (words, tags)
def extractWordTags(line):
lineParts = line.strip().split()
return map(lambda x: x.rsplit('/', 1), lineParts)
def expandCorpus(filename, wordsOk, tagsOk, wordsErr, tagsErr, sentFile, formFile):
corpusFile = open(filename, 'r')
for line in corpusFile:
wordTags = extractWordTags(line)
for i in range(len(wordTags)):
ngram = [wordTags[i][0]]
okIdx = wordsOk.get(wordTags[i][0], set())
errIdx = wordsErr[wordTags[i][0]]
susp = float(len(errIdx)) / (len(errIdx) + len(okIdx))
for j in range(i + 1, len(wordTags)):
newOkIdx = set(map(lambda x: x + 1, okIdx))
newErrIdx = set(map(lambda x: x + 1, errIdx))
# Expand with a tag?
tag = wordTags[j][1]
tagOkIdx = tagsOk.get(tag, set()).intersection(newOkIdx)
tagErrIdx = tagsErr.get(tag, set()).intersection(newErrIdx)
if len(tagErrIdx) + len(tagOkIdx) == 0:
newSusp = 0.0
else:
newSusp = float(len(tagErrIdx)) / (len(tagErrIdx) + len(tagOkIdx))
ef = expansionFactor(len(tagErrIdx))
if newSusp > susp * ef:
ngram.append(tag)
okIdx = tagOkIdx
errIdx = tagErrIdx
susp = newSusp
continue
# Try word expansion
word = wordTags[j][0]
wordOkIdx = wordsOk.get(word, set()).intersection(newOkIdx)
wordErrIdx = wordsErr.get(word, set()).intersection(newErrIdx)
if len(wordErrIdx) + len(wordOkIdx) == 0:
break
else:
newSusp = float(len(wordErrIdx)) / (len(wordErrIdx) + len(wordOkIdx))
ef = expansionFactor(len(wordErrIdx))
if newSusp > susp:
ngram.append(word)
okIdx = wordOkIdx
errIdx = wordErrIdx
susp = newSusp
continue
break
ngramStr = ('_').join(ngram)
formFile.write("%s %f %d %d\n" % (ngramStr, susp, len(okIdx), len(errIdx)))
sentFile.write(('_').join(ngram))
sentFile.write(' ')
sentFile.write('\n')
if __name__ == "__main__":
if len(sys.argv) != 5:
print "Usage: %s good bad sent_out forms_out" % sys.argv[0]
sys.exit(1)
sys.stderr.write("Constructing hashtables...")
(wordsOk, tagsOk) = readCorpus(sys.argv[1])
(wordsErr, tagsErr) = readCorpus(sys.argv[2])
sys.stderr.write(" done!\n")
sentFile = open(sys.argv[3], "w")
formFile = open(sys.argv[4], "w")
expandCorpus(sys.argv[2], wordsOk, tagsOk, wordsErr, tagsErr, sentFile,
formFile)
| Python | 0.000004 | |
c9f5c9bf8fffe4f513fc8564eb92ca86a76f4087 | add cluster | mypy/cluster.py | mypy/cluster.py | import os
from os.path import split
import numpy as np
from scipy import sparse
from scipy.io import loadmat
# import matplotlib.pyplot as plt
from mne.stats.cluster_level import _find_clusters
base_dir = split(split(__file__)[0])[0]
chan_path = os.path.join(base_dir, 'data', 'chan')
# read channel connectivity
def get_chan_conn():
pth = os.path.join(chan_path, 'BioSemi64_chanconn.mat')
return loadmat(pth)['chan_conn'].astype('bool')
# plt.imshow(chan_conn, interpolation='none')
# plt.show()
def cluster_1d(data, connectivity=None):
if connectivity is not None:
connectivity = sparse.coo_matrix(connectivity)
return _find_clusters(data, 0.5, connectivity=connectivity)
def cluster_3d(matrix, chan_conn):
'''
parameters
----------
matrix - 3d matrix: channels by dim2 by dim3
chan_conn - 2d boolean matrix with information about
channel adjacency. If chann_conn[i, j] is True that
means channel i and j are adjacent.
returns
-------
clusters - 3d integer matrix with cluster labels
'''
# matrix has to be bool
assert matrix.dtype == np.bool
# nested import
from skimage.measure import label
# label each channel separately
clusters = np.zeros(matrix.shape, dtype='int')
max_cluster_id = 0
n_chan = matrix.shape[0]
for ch in range(n_chan):
clusters[ch, :, :] = label(matrix[ch, :, :],
connectivity=1, background=False)
# relabel so that layers do not have same cluster ids
num_clusters = clusters[ch, :, :].max()
clusters[ch, clusters[ch,:]>0] += max_cluster_id
max_cluster_id += num_clusters
# unrolled views into clusters for ease of channel comparison:
unrolled = [clusters[ch, :].ravel() for ch in range(n_chan)]
# check channel neighbours and merge clusters across channels
for ch in range(n_chan-1): # last chan will be already checked
ch1 = unrolled[ch]
ch1_ind = np.where(ch1)[0]
if ch1_ind.shape[0] == 0:
continue # no clusters, no fun...
# get unchecked neighbours
neighbours = np.where(chan_conn[ch+1:, ch])[0]
if neighbours.shape[0] > 0:
neighbours += ch + 1
for ngb in neighbours:
ch2 = unrolled[ngb]
for ind in ch1_ind:
# relabel clusters if adjacent and not the same id
if ch2[ind] and not (ch1[ind] == ch2[ind]):
c1 = min(ch1[ind], ch2[ind])
c2 = max(ch1[ind], ch2[ind])
clusters[clusters==c2] = c1
return clusters
def relabel_mat(mat, label_map):
'''change values in a matrix of integers such that mapping given
in label_map dict is fulfilled
parameters
----------
mat - numpy array of integers
label_map - dictionary, how to remap integer labels
returns
-------
mat_relab - relabeled numpy array
'''
mat_relab = mat.copy()
for k, v in label_map.items():
mat_relab[mat == k] = v
return mat_relab
| Python | 0.000002 | |
245879ce699b275edc3ee17e4cba1146241f25de | Add GLib mainllop transport for xmlrpcserver | wizbit/xmlrpcdeferred.py | wizbit/xmlrpcdeferred.py | import gobject
import xmlrpclib
class XMLRPCDeferred (gobject.GObject):
"""Object representing the delayed result of an XML-RPC
request.
.is_ready: bool
True when the result is received; False before then.
.value : any
Once is_ready=True, this attribute contains the result of the
request. If this value is an instance of the xmlrpclib.Fault
class, then some exception occurred during the request's
processing.
"""
__gsignals__ = {
'ready': (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE, ())
}
def __init__ (self, transport, http):
self.__gobject_init__()
self.transport = transport
self.http = http
self.value = None
self.is_ready = False
sock = self.http._conn.sock
self.src_id = gobject.io_add_watch(sock,
gobject.IO_IN | gobject.IO_HUP,
self.handle_io)
def handle_io (self, source, condition):
# Triggered when there's input available on the socket.
# The assumption is that all the input will be available
# relatively quickly.
self.read()
# Returning false prevents this callback from being triggered
# again. We also remove the monitoring of this file
# descriptor.
gobject.source_remove(self.src_id)
return False
def read (self):
errcode, errmsg, headers = self.http.getreply()
if errcode != 200:
raise ProtocolError(
host + handler,
errcode, errmsg,
headers
)
try:
result = xmlrpclib.Transport._parse_response(self.transport,
self.http.getfile(), None)
except xmlrpclib.Fault, exc:
result = exc
self.value = result
self.is_ready = True
self.emit('ready')
def __len__ (self):
# XXX egregious hack!!!
# The code in xmlrpclib.ServerProxy calls len() on the object
# returned by the transport, and if it's of length 1 returns
# the contained object. Therefore, this __len__ method
# returns a completely fake length of 2.
return 2
class GXMLRPCTransport (xmlrpclib.Transport):
def request(self, host, handler, request_body, verbose=0):
# issue XML-RPC request
h = self.make_connection(host)
if verbose:
h.set_debuglevel(1)
self.send_request(h, handler, request_body)
self.send_host(h, host)
self.send_user_agent(h)
self.send_content(h, request_body)
self.verbose = verbose
return XMLRPCDeferred(self, h)
| Python | 0 | |
0880d067f478ba6474e433e620a1e48e23ed9c34 | Add nginx+uWSGI for 10% perf improvement over gunicorn | wsgi/setup_nginxuwsgi.py | wsgi/setup_nginxuwsgi.py | import subprocess
import multiprocessing
import os
bin_dir = os.path.expanduser('~/FrameworkBenchmarks/installs/py2/bin')
config_dir = os.path.expanduser('~/FrameworkBenchmarks/config')
NCPU = multiprocessing.cpu_count()
def start(args):
try:
subprocess.check_call('sudo /usr/local/nginx/sbin/nginx -c ' +
config_dir + '/nginx_uwsgi.conf', shell=True)
# Run in the background, but keep stdout/stderr for easy debugging
subprocess.Popen(bin_dir + '/uwsgi --ini ' + config_dir + '/uwsgi.ini' +
' --processes ' + str(NCPU) +
' --wsgi hello:app',
shell=True, cwd='wsgi')
return 0
except subprocess.CalledProcessError:
return 1
def stop():
subprocess.call('sudo /usr/local/nginx/sbin/nginx -s stop', shell=True)
subprocess.call(bin_dir + '/uwsgi --ini ' + config_dir + '/uwsgi_stop.ini', shell=True)
return 0
| Python | 0 | |
8fe4b03d7944f2facf8f261d440e7220c4d1228b | add file | Python/mapexp.py | Python/mapexp.py | #!/usr/bin/env python
"""
map(fun, iterable,...)
Return a list of the results of applying the function to the items of
the argument sequence(s). If more than one sequence is given, the
function is called with an argument list consisting of the corresponding
item of each sequence, substituting None for missing values when not all
sequences have the same length. If the function is None, return a list of the items of the sequence (or a list of tuples if more than one sequenc:e).
"""
li1 = [1, 2, 3, 4, 5]
li2 = [1, 2, 3, 4, 7]
li3 = [2, 3, 4, "hehe"]
#return a list of sum of corresponding element in each list
ret = map(lambda x,y : x + y, li1, li2)
print ret
ret = map(lambda x, y : str(x) + str(y), li2, li3 )
print ret
#return a tubple consis of corresponding items from two list
ret = map(None, li2, li3)
print ret
#convert to list of list
ret = map(list, ret)
print ret
#flat list
la = []
for e in ret:
la += e
print la
#flat list
ret = reduce(lambda x, y: x + y, ret)
print ret
| Python | 0.000001 | |
6dd4b6ee9b9457d2362404aac71fd73e907bf535 | Add Timeline class. | source/vistas/ui/controls/timeline.py | source/vistas/ui/controls/timeline.py | import datetime
from bisect import insort
class TimelineFilter:
pass # Todo: implement
class Timeline:
_global_timeline = None
@classmethod
def app(cls):
""" Global timeline """
if cls._global_timeline is None:
cls._global_timeline = Timeline()
return cls._global_timeline
def __init__(self, start_time=None, end_time=None, current_time=None):
self._start_time = start_time
self._end_time = end_time
self._current_time = current_time
self._min_step = None
self._timestamps = []
# Todo: implement TimelineFilter
# self._filtered_timestamps = [] # potentially unneeded, since we can filter on the fly now
# self._use_filter = False
@property
def timestamps(self):
# return self._filtered_timestamps if self._use_filter else self._timestamps
return self._timestamps
@property
def num_timestamps(self):
return len(self._timestamps)
@property
def start_time(self):
return self._start_time
@start_time.setter
def start_time(self, value: datetime.datetime):
self._start_time = value
# Todo: TimelineEvent?
@property
def end_time(self):
return self._end_time
@end_time.setter
def end_time(self, value):
self._end_time = value
# Todo: TimelineEvent?
@property
def current_time(self):
return self._current_time
@current_time.setter
def current_time(self, value: datetime.datetime):
if value not in self._timestamps:
if value > self._timestamps[-1]:
value = self._timestamps[-1]
elif value < self._timestamps[0]:
value = self._timestamps[0]
else:
# Go to nearest floor step
value = list(filter(lambda x: x > value, self._timestamps))[0]
self._current_time = value
# Todo: TimelineEvent?
@property
def min_step(self):
return self._min_step
def reset(self):
self._timestamps = []
self._start_time, self._end_time, self._current_time = [datetime.datetime.fromtimestamp(0)] * 3
def add_timestamp(self, timestamp: datetime.datetime):
if timestamp not in self._timestamps:
if timestamp > self._timestamps[-1]:
self.end_time = timestamp
elif timestamp < self._timestamps[0]:
self.start_time = timestamp
insort(self._timestamps, timestamp) # unique and sorted
# recalculate smallest timedelta
self._min_step = self._timestamps[-1] - self._timestamps[0]
for i in range(len(self._timestamps) - 1):
diff = self._timestamps[i+1] - self._timestamps[i+1]
self._min_step = diff if diff < self._min_step else self._min_step
def index_at_time(self, time: datetime.datetime):
return self._timestamps.index(time)
@property
def current_index(self):
return self.index_at_time(self._current_time)
def time_at_index(self, index):
return self.timestamps[index]
| Python | 0 | |
560d82cd4b7de72a4fada77b0fe13bfb1caa9790 | package script | package.py | package.py | import os
import sys
for root, dirs, files in os.walk(os.path.dirname(os.path.abspath(__file__))):
for name in files:
if name.endswith((".java")):
file = open(name, "r")
lines = file.readlines()
file.close()
file = open(name, "w")
for line in lines:
if "package" not in line:
file.write(line)
file.close()
#filename = "hello.java"
#file = open(filename, "r")
#lines = file.readlines()
#file.close()
#file = open(filename, "w")
#for line in lines:
# if "package" not in line:
# file.write(line)
#
#file.close()
| Python | 0.000002 | |
cc9ce576a33c60acc9f60f12b42e56f474b760ac | Add json_jinja renderer | salt/renderers/json_jinja.py | salt/renderers/json_jinja.py | '''
The default rendering engine, yaml_jinja, this renderer will take a yaml file
with the jinja template and render it to a high data format for salt states.
'''
# Import python libs
import os
import json
# Import Third Party libs
from jinja2 import Template
def render(template):
'''
Render the data passing the functions and grains into the rendering system
'''
if not os.path.isfile(template):
return {}
passthrough = {}
passthrough.update(__salt__)
passthrough.update(__grains__)
template = Template(open(template, 'r').read())
json_data = template.render(**passthrough)
return json.loads(json_data)
| Python | 0.000668 | |
e35711d368faadaa017186200092297f264648fe | Add web ui | nodes/web_ui.py | nodes/web_ui.py | #!/usr/bin/env python
import roslib; roslib.load_manifest('rospilot')
import rospy
from pymavlink import mavutil
import rospilot.msg
from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
PORT_NUMBER = 8085
armed = None
#This class will handles any incoming request from
#the browser
class HttpHandler(BaseHTTPRequestHandler):
#Handler for the GET requests
def do_GET(self):
self.send_response(200)
self.send_header('Content-type','text/html')
self.send_header('Refresh','1;url=/')
self.end_headers()
# Send the html message
self.wfile.write(
"<html>"
"<body>"
"<h2" + (" style='color:red;'" if armed else "") + ">"
+ ("ARMED" if armed else "DISARMED") +
"</h2>"
"<a href='/arm'>"
+ ("disarm" if armed else "arm") +
"</a>"
"</body>"
"</html>")
if 'arm' in self.path:
node.send_arm(not armed)
return
class WebUiNode:
def __init__(self):
self.pub_set_mode = rospy.Publisher('set_mode', rospilot.msg.BasicMode)
rospy.Subscriber("basic_status", rospilot.msg.BasicMode, self.handle_status)
self.http_server = HTTPServer(('', PORT_NUMBER), HttpHandler)
def handle_status(self, data):
global armed
armed = data.armed
def send_arm(self, arm):
self.pub_set_mode.publish(arm)
def run(self):
rospy.init_node('rospilot_webui')
rospy.loginfo("Running")
while not rospy.is_shutdown():
self.http_server.handle_request()
self.http_server.close()
if __name__ == '__main__':
node = WebUiNode()
node.run()
| Python | 0.000001 | |
c8ede03a393ae1287a9a34e86af40cd6a8b3027b | add missing module (RBL-3757) | mint/targets.py | mint/targets.py | #
# Copyright (c) 2008 rPath, Inc.
#
# All Rights Reserved
#
from mint import database, mint_error
import simplejson
class TargetsTable(database.KeyedTable):
name = 'Targets'
key = 'targetId'
fields = ('targetId', 'targetType', 'targetName')
def addTarget(self, targetType, targetName):
cu = self.db.cursor()
targetId = self.getTargetId(targetType, targetName, None)
if targetId:
raise mint_error.TargetExists( \
"Target named '%s' of type '%s' already exists",
targetName, targetType)
cu = cu.execute("INSERT INTO Targets (targetType, targetName) VALUES(?, ?)", targetType, targetName)
self.db.commit()
return cu.lastid()
def getTargetId(self, targetType, targetName, default = -1):
cu = self.db.cursor()
cu.execute("""SELECT targetId FROM Targets WHERE targetType=?
AND targetName=?""", targetType, targetName)
res = cu.fetchone()
if res:
return res[0]
if default == -1:
raise mint_error.TargetMissing("No target named '%s' of type '%s'",
targetName, targetType)
return default
def deleteTarget(self, targetId):
cu = self.db.cursor()
cu.execute("DELETE FROM Targets WHERE targetId=?", targetId)
self.db.commit()
class TargetDataTable(database.DatabaseTable):
name = 'TargetData'
fields = ('targetId', 'name', 'value')
def addTargetData(self, targetId, targetData):
cu = self.db.cursor()
# perhaps check the id to be certain it's unique
for name, value in targetData.iteritems():
value = simplejson.dumps(value)
cu.execute("INSERT INTO TargetData VALUES(?, ?, ?)",
targetId, name, value)
self.db.commit()
def getTargetData(self, targetId):
cu = self.db.cursor()
cu.execute("SELECT name, value FROM TargetData WHERE targetId=?",
targetId)
res = {}
for name, value in cu.fetchall():
res[name] = simplejson.loads(value)
return res
def deleteTargetData(self, targetId):
cu = self.db.cursor()
cu.execute("DELETE FROM TargetData WHERE targetId=?", targetId)
self.db.commit()
| Python | 0 | |
3709bcbd421d82f9404ab3b054989546d95c006f | Fix another broken sc2reader.plugins reference. | sc2reader/scripts/sc2json.py | sc2reader/scripts/sc2json.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals, division
import sc2reader
from sc2reader.factories.plugins.replay import toJSON
def main():
import argparse
parser = argparse.ArgumentParser(description="Prints replay data to a json string.")
parser.add_argument('--indent', '-i', type=int, default=None, help="The per-line indent to use when printing a human readable json string")
parser.add_argument('--encoding', '-e', type=str, default='UTF-8', help="The character encoding use..")
parser.add_argument('path', metavar='path', type=str, nargs=1, help="Path to the replay to serialize.")
args = parser.parse_args()
factory = sc2reader.factories.SC2Factory()
factory.register_plugin("Replay", toJSON(encoding=args.encoding, indent=args.indent))
replay_json = factory.load_replay(args.path[0])
print(replay_json)
if __name__ == '__main__':
main()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals, division
import sc2reader
from sc2reader.plugins.replay import toJSON
def main():
import argparse
parser = argparse.ArgumentParser(description="Prints replay data to a json string.")
parser.add_argument('--indent', '-i', type=int, default=None, help="The per-line indent to use when printing a human readable json string")
parser.add_argument('--encoding', '-e', type=str, default='UTF-8', help="The character encoding use..")
parser.add_argument('path', metavar='path', type=str, nargs=1, help="Path to the replay to serialize.")
args = parser.parse_args()
factory = sc2reader.factories.SC2Factory()
factory.register_plugin("Replay", toJSON(encoding=args.encoding, indent=args.indent))
replay_json = factory.load_replay(args.path[0])
print(replay_json)
if __name__ == '__main__':
main()
| Python | 0 |
c3c559f893e31e728a429cf446039781cea1f25d | Add unit tests for `%tensorflow_version` | tests/test_tensorflow_magics.py | tests/test_tensorflow_magics.py | # Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the `%tensorflow_version` magic."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import unittest
from google.colab import _tensorflow_magics
class TensorflowMagicsTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(TensorflowMagicsTest, cls).setUpClass()
cls._original_version = _tensorflow_magics._tf_version
cls._original_sys_path = sys.path[:]
def setUp(self):
super(TensorflowMagicsTest, self).setUp()
_tensorflow_magics._tf_version = self._original_version
sys.path[:] = self._original_sys_path
def test_switch_1x_to_2x(self):
_tensorflow_magics._tensorflow_version("2.x")
tf2_path = _tensorflow_magics._available_versions["2.x"]
self.assertEqual(sys.path[1:], self._original_sys_path)
self.assertTrue(sys.path[0].startswith(tf2_path), (sys.path[0], tf2_path))
def test_switch_back(self):
_tensorflow_magics._tensorflow_version("2.x")
_tensorflow_magics._tensorflow_version("1.x")
self.assertEqual(sys.path, self._original_sys_path)
if __name__ == "__main__":
unittest.main()
| Python | 0 | |
cca75f45b80b72fdd4db9c06544474a2f6a40aa0 | Add a hack for importing Item data from a postgresql dump. | kirppu/management/commands/import_old_item_data.py | kirppu/management/commands/import_old_item_data.py | from collections import defaultdict
from decimal import Decimal
import sys
import re
from django.core.exceptions import ObjectDoesNotExist
from django.db import transaction
from django.utils.dateparse import parse_datetime
from django.core.management.base import BaseCommand
try:
from typing import Dict, List
except ImportError:
class _AbstractType(object):
def __getitem__(self, item): pass
Dict = _AbstractType()
List = _AbstractType()
from kirppu.models import Item, Vendor
from kirppuauth.models import User
# noinspection SpellCheckingInspection,PyPep8Naming
class PostgreDumpParser(object):
def __init__(self, file_name):
self._file_name = file_name
self._data = defaultdict(list) # type: Dict[str, List[Dict[str, str]]]
@property
def data(self):
return self._data
def parse(self, handle=None):
with (handle or open(self._file_name, "r")) as stream:
current_line_data = None
for line in stream:
if line.endswith("\n"):
line = line[:-1]
if current_line_data is not None:
if line == "\\.":
current_line_data = None
continue
if self.parse_STDIN(line, *current_line_data):
continue
if line.strip() == "":
continue
if line.startswith("COPY "):
current_line_data = self.parse_COPY(line)
@staticmethod
def parse_COPY(line):
m = re.match("COPY (?P<table>[\w_]+) \((?P<columns>(?:\w+, )*\w+)\) FROM stdin;", line)
if m is None:
raise ValueError("Not understood copy: " + line)
table = m.group("table")
columns = m.group("columns").split(", ")
return table, columns
def parse_STDIN(self, line, table, columns):
parts = line.split("\t")
assert len(parts) == len(columns), "Sizes differ: {} != {}: {}".format(len(parts), len(columns), line)
data = {
column: value
for column, value in zip(columns, parts)
}
self._data[table].append(data)
class TypeConverter(object):
@staticmethod
def int(inp):
return int(inp) if inp != "\\N" else None
@staticmethod
def str(inp):
return str(inp) if inp != "\\N" else None
@staticmethod
def bool(inp):
return (True, False)["tf".index(inp)] if inp != "\\N" else None
@staticmethod
def decimal(inp):
return Decimal(inp) if inp != "\\N" else None
@staticmethod
def datetime(inp):
return parse_datetime(inp) if inp != "\\N" else None
ItemColumnTypes = {
"hidden": "bool",
"lost_property": "bool",
"box_id": "int",
"vendor_id": "int",
"code": "str",
"abandoned": "bool",
"type": "str",
"price": "decimal",
"printed": "bool",
"adult": "str",
"itemtype": "str",
"name": "str",
"id": "int",
}
UserColumnTypes = {
"password": "str",
"last_login": "datetime",
"is_superuser": "bool",
"username": "str",
"first_name": "str",
"last_name": "str",
"email": "str",
"is_staff": "bool",
"is_active": "bool",
"date_joined": "datetime",
"phone": "str",
"last_checked": "datetime",
"id": "int",
}
VendorColumnTypes = {
"terms_accepted": "str",
"id": "int",
}
# noinspection SpellCheckingInspection
class DbConverter(object):
def __init__(self, table_name):
self._table_name = table_name
self._result = []
def parse(self, data):
p = getattr(self, "_parse_" + self._table_name)
return [
p(row)
for row in data
]
@staticmethod
def _parse_kirppu_item(row):
attrs = {
col: getattr(TypeConverter, ItemColumnTypes[col])(row[col])
for col in [
"hidden",
"lost_property",
"box_id",
"vendor_id",
"code",
"abandoned",
"type",
"price",
"printed",
"adult",
"itemtype",
"name",
]
}
r = Item(**attrs)
return r
@staticmethod
def _parse_kirppuauth_user(row):
attrs = {
col: getattr(TypeConverter, UserColumnTypes[col])(row[col])
for col in [
"password",
"last_login",
"is_superuser",
"username",
"first_name",
"last_name",
"email",
"is_staff",
"is_active",
"date_joined",
"phone",
"last_checked",
]
}
r = User(**attrs)
return r
@staticmethod
def _parse_kirppu_vendor(row):
attrs = {
col: getattr(TypeConverter, VendorColumnTypes[col])(row[col])
for col in [
]
}
r = Vendor(**attrs)
return r
class Command(BaseCommand):
help = r"""Import Item data from PostgreSQL dump from stdin or from a file that has been pre-processed.
Do not use unless you know how this works.
One part of Item-data pre-work: grep -P '^\d+\t[^\t]+\t[^\t]+\t[^\t]+\t\w\w\t\w+\t\w\t\w\t@@@\t.*$'
"""
def add_arguments(self, parser):
parser.add_argument("file", type=str, nargs="?")
def handle(self, *args, **options):
f_name = options.get("file")
if f_name is None:
parser = PostgreDumpParser("stdin")
parser.parse(sys.stdin)
else:
parser = PostgreDumpParser(f_name)
parser.parse()
results = {}
for table, data in parser.data.items():
converter = DbConverter(table)
results[table] = converter.parse(data)
user = results["kirppuauth_user"]
assert len(user) == 1
user = user[0]
vendor = results["kirppu_vendor"]
assert len(vendor) == 1
vendor = vendor[0]
items = results["kirppu_item"]
assert len(items) > 0
with transaction.atomic():
# Create the user if it doesn't exist. user is predefined for exception to use.
try:
user = User.objects.get(username=user.username)
except ObjectDoesNotExist:
user.save()
# Create vendor if it doesn't exist. vendor is predefined for exception to use.
try:
vendor = Vendor.objects.get(user=user)
except ObjectDoesNotExist:
vendor.user = user
vendor.save()
# TODO: Create boxes..
# Create items for the vendor.
for item in items:
item.vendor = vendor
item.save()
for table in results.keys():
# noinspection PyProtectedMember
print("\n".join("{} {}: {}".format(r._meta.object_name, r.pk, str(r)) for r in results[table]))
| Python | 0 | |
fe879d7f6f56db410eb5d3d9aeb5691d020661c7 | Create shackbullet.py | shackbullet.py | shackbullet.py | #Import the modules
import requests
import json
import uuid
#Edit these to your shacknews login credentials
shackname = 'Username'
shackpass = 'ShackPassword'
pushbulletkey = 'access token from https://www.pushbullet.com/account'
#Fun Bbegins
#generate uuid from namespace
uuid = uuid.uuid5(uuid.NAMESPACE_DNS, 'winchatty.com')
#setup registration payload
payload = { 'id' : uuid, 'name' : 'shackbullet', 'username' : shackname, 'password' : shackpass }
#register this client
r = requests.post("https://winchatty.com/v2/notifications/registerRichClient", data=payload)
#We are setup so start waiting for notifications
#setup checking payload
payload = { 'clientId' : uuid }
bulletheaders = { 'Authorization' : 'Bearer ' + pushbulletkey }
#main loop to check for notifications
while True:
#wait for notifications
r = requests.post("http://notifications.winchatty.com/v2/notifications/waitForNotification", data=payload)
#got one, now setup the payload for pushbullet
bulletpayload = { 'type' : 'link', 'title' : data['messages'][0]['subject'] + ': ' + data['messages'][0]['body'], 'body' : data['messages'][0]['body'], 'url' : 'http://www.shacknews.com/chatty?id=' + str(data['messages'][0]['postId']) + '#item_' + str(data['messages'][0]['postId']) }
#send the notification to pushbullet
r = requests.post("https://api.pushbullet.com/v2/pushes", headers=bulletheaders, data=bulletpayload)
| Python | 0.000109 | |
dc4a3ec9a8bb042cef115005d8ebf11dc2c5889e | Longest increasing subsequence in the list | longest_increasing_subsequence.py | longest_increasing_subsequence.py | l = [3,4,5,9,8,1,2,7,7,7,7,7,7,7,6,0,1]
empty = []
one = [1]
two = [2,1]
three = [1,0,2,3]
tricky = [1,2,3,0,-2,-1]
ring = [3,4,5,0,1,2]
internal = [9,1,2,3,4,5,0]
# consider your list as a ring, continuous and infinite
def longest_increasing_subsequence(l):
length = len(l)
if length == 0: return 0 # list is empty
i, tmp, longest = [0, 1, 1]
# 1 < tmp means that ring is finished, but the sequence continue to increase
while i < length or 1 < tmp:
# compare elements on the ring
if l[i%length] < l[(i+1)%length]:
tmp += 1
else:
if longest < tmp: longest = tmp
tmp = 1
i += 1
return longest
print("0 == " + str(longest_increasing_subsequence(empty)))
print("1 == " + str(longest_increasing_subsequence(one)))
print("2 == " + str(longest_increasing_subsequence(two)))
print("3 == " + str(longest_increasing_subsequence(three)))
print("5 == " + str(longest_increasing_subsequence(tricky)))
print("5 == " + str(longest_increasing_subsequence(internal)))
print("6 == " + str(longest_increasing_subsequence(ring)))
print("6 == " + str(longest_increasing_subsequence(l)))
| Python | 1 | |
0b419a71a414e605af57029286b627e286c5df47 | add session | controller/addSession.py | controller/addSession.py | #!/usr/local/bin/python3
"""
created_by: Aninda Manocha
created_date: 3/5/2015
last_modified_by: Aninda Manocha
last_modified_date: 3/5/2015
"""
import constants
import utils
import json
from sql.session import Session
from sql.user import User
#Format of session
#requestType: addSession
#token: "string"
#ip: "string"
#user: User
def iChooseU(json):
thisUser = utils.findUser(json)
token = json["token"]
ip = json["ip"]
user = json["user"]
theUser = User.get(user["id"])[0]
newSession = Session.noID(token, ip, user, ACTIVE)
newSession.add()
return utils.successJson(json)
| Python | 0 | |
d83e30cc2ec46eeb2f7c27c26e0fc3d2d3e6de90 | add an environment checker | scripts/check_environment.py | scripts/check_environment.py | """
Something to run to make sure our machine is up to snuff!
"""
import pg
import xlwt | Python | 0 | |
ef78460a3303216f424247203cf0b5e1ecc88197 | Add test for ticket #1074. | scipy/optimize/tests/test_regression.py | scipy/optimize/tests/test_regression.py | """Regression tests for optimize.
"""
from numpy.testing import *
import numpy as np
class TestRegression(TestCase):
def test_newton_x0_is_0(self):
"""Ticket #1074"""
import scipy.optimize
tgt = 1
res = scipy.optimize.newton(lambda x: x - 1, 0)
assert_almost_equal(res, tgt)
| Python | 0.000005 | |
c654841595fd679c511d2d3b91c2edc9335c78cc | Create Quiz-Problem8.py | Quiz-Problem8.py | Quiz-Problem8.py | # PROBLEM 8
def satisfiesF(L):
"""
Assumes L is a list of strings
Assume function f is already defined for you and it maps a string to a Boolean
Mutates L such that it contains all of the strings, s, originally in L such
that f(s) returns True, and no other elements
Returns the length of L after mutation
"""
harsh = L[:]
for x in harsh:
if f(x) == False:
a = L.index(x)
del L[a]
return len(L)
run_satisfiesF(L, satisfiesF)
| Python | 0.000001 | |
aa3cf6a383c38a9f17172ae2a754a8e67243e318 | add new form to bulk import indicators from json file | corehq/apps/indicators/forms.py | corehq/apps/indicators/forms.py | from django import forms
from django.utils.translation import ugettext_noop, ugettext as _
from bootstrap3_crispy import bootstrap as twbs
from bootstrap3_crispy.helper import FormHelper
from bootstrap3_crispy import layout as crispy
from corehq.apps.style.crispy import FormActions
class ImportIndicatorsFromJsonFileForm(forms.Form):
json_file = forms.FileField(
label=ugettext_noop("Exported File"),
required=False,
)
override_existing = forms.BooleanField(
label=_("Override Existing Indicators"),
required=False,
)
def __init__(self, *args, **kwargs):
super(ImportIndicatorsFromJsonFileForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = 'POST'
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-lg-2'
self.helper.field_class = 'col-lg-8'
self.helper.layout = crispy.Layout(
crispy.Field('json_file'),
crispy.Field('override_existing'),
FormActions(
twbs.StrictButton(_("Import Indicators"),
type='submit',
css_class='btn-primary'),
),
)
| Python | 0 | |
42fbeda997d5c8f67231ee5c8f420a7140870c26 | Add gce_img module for utilizing GCE image resources | lib/ansible/modules/extras/cloud/google/gce_img.py | lib/ansible/modules/extras/cloud/google/gce_img.py | #!/usr/bin/python
# Copyright 2015 Google Inc. All Rights Reserved.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""An Ansible module to utilize GCE image resources."""
import sys
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.gce import *
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError
from libcloud.common.google import ResourceNotFoundError
_ = Provider.GCE
except ImportError:
print('failed=True '
"msg='libcloud with GCE support is required for this module.'")
sys.exit(1)
DOCUMENTATION = '''
---
module: gce_img
short_description: utilize GCE image resources
description:
- This module can create and delete GCE private images from gzipped
compressed tarball containing raw disk data or from existing detached
disks in any zone. U(https://cloud.google.com/compute/docs/images)
options:
name:
description:
- the name of the image to create
required: true
default: null
aliases: []
source:
description:
- the source disk or the Google Cloud Storage URI to create the image from
required: false
default: null
aliases: []
state:
description:
- desired state of the image
required: false
default: "present"
choices: ["active", "present", "absent", "deleted"]
aliases: []
zone:
description:
- the zone of the disk specified by source
required: false
default: "us-central1-a"
aliases: []
service_account_email:
version_added: "1.6"
description:
- service account email
required: false
default: null
aliases: []
pem_file:
version_added: "1.6"
description:
- path to the pem file associated with the service account email
required: false
default: null
aliases: []
project_id:
version_added: "1.6"
description:
- your GCE project ID
required: false
default: null
aliases: []
requirements: [ "libcloud" ]
author: Peter Tan <ptan@google.com>
'''
EXAMPLES = '''
# Create an image named test-image from the disk 'test-disk' in zone us-central1-a.
- gce_img:
name: test-image
source: test-disk
zone: us-central1-a
state: present
# Delete an image named test-image in zone us-central1-a.
- gce_img:
name: test-image
zone: us-central1-a
state: deleted
'''
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
source=dict(),
state=dict(default='present'),
zone=dict(default='us-central1-a'),
service_account_email=dict(),
pem_file=dict(),
project_id=dict(),
)
)
gce = gce_connect(module)
name = module.params.get('name')
source = module.params.get('source')
state = module.params.get('state')
zone = module.params.get('zone')
changed = False
try:
image = gce.ex_get_image(name)
except GoogleBaseError, e:
module.fail_json(msg=str(e), changed=False)
# user wants to create an image.
if state in ['active', 'present'] and not image:
if not source:
module.fail_json(msg='Must supply a source', changed=False)
if source.startswith('https://storage.googleapis.com'):
# source is a Google Cloud Storage URI
volume = source
else:
try:
volume = gce.ex_get_volume(source, zone)
except ResourceNotFoundError:
module.fail_json(msg='Disk %s not found in zone %s' % (source, zone),
changed=False)
except GoogleBaseError, e:
module.fail_json(msg=str(e), changed=False)
try:
image = gce.ex_create_image(name, volume)
changed = True
except GoogleBaseError, e:
module.fail_json(msg=str(e), changed=False)
# user wants to delete the image.
if state in ['absent', 'deleted'] and image:
try:
gce.ex_delete_image(image)
changed = True
except GoogleBaseError, e:
module.fail_json(msg=str(e), changed=False)
module.exit_json(changed=changed, name=name)
sys.exit(0)
main()
| Python | 0 | |
04f851706b4384add01e6cd41f31305d587f7a36 | Create pushbullet.py | pushbullet.py | pushbullet.py | # Python unofficial Pushbullet client
# (C) 2015 Patrick Lambert - http://dendory.net - Provided under MIT License
import urllib.request
import sys
api_key = "XXXXXXXXX"
title = "My Title"
message = "My Body"
def notify(key, title, text):
post_params = {
'type': 'note',
'title': title,
'body': text
}
post_args = urllib.parse.urlencode(post_params)
data = post_args.encode()
request = urllib.request.Request(url='https://api.pushbullet.com/v2/pushes', headers={'Authorization': 'Bearer ' + key}, data=data)
result = urllib.request.urlopen(request)
return result.read().decode('utf-8')
if '-key' in sys.argv:
api_key = sys.argv[sys.argv.index('-key')+1]
if '-title' in sys.argv:
title = sys.argv[sys.argv.index('-title')+1]
if '-message' in sys.argv:
message = sys.argv[sys.argv.index('-message')+1]
print(notify(api_key, title, message))
| Python | 0.00003 | |
57b19c56b8be8c8131cc3d98cb9f30da3398412b | create a reporting helper for logging channel info | remoto/log.py | remoto/log.py |
def reporting(conn, result):
log_map = {'debug': conn.logger.debug, 'error': conn.logger.error}
while True:
try:
received = result.receive()
level_received, message = received.items()[0]
log_map[level_received](message.strip('\n'))
except EOFError:
break
| Python | 0 | |
371df7c27fa1c4130214c58ececa83b0e0b6b165 | Create palindrome3.py | palindrome3.py | palindrome3.py | palindrome3 = lambda x: str(x) == str(x)[::-1]
| Python | 0.000034 | |
5129dd5de6f4a8c0451adbb5631940bb82b51a26 | Add a script to enact updates to positions & alternative names | mzalendo/kenya/management/commands/kenya_apply_updates.py | mzalendo/kenya/management/commands/kenya_apply_updates.py | from collections import defaultdict
import csv
import datetime
import errno
import hmac
import hashlib
import itertools
import json
import os
import re
import requests
import sys
from django.core.management.base import NoArgsCommand, CommandError
from django.template.defaultfilters import slugify
from django_date_extensions.fields import ApproximateDate
from settings import IEBC_API_ID, IEBC_API_SECRET
from optparse import make_option
from core.models import Place, PlaceKind, Person, ParliamentarySession, Position, PositionTitle, Organisation, OrganisationKind
from iebc_api import *
data_directory = os.path.join(sys.path[0], 'kenya', '2013-election-data')
headings = ['Place Name',
'Place Type',
'Race Type',
'Old?',
'Existing Aspirant Position ID',
'Existing Aspirant Person ID',
'Existing Aspirant External ID',
'Existing Aspirant Legal Name',
'Existing Aspirant Other Names',
'API Normalized Name',
'API Code',
'Action']
class Command(NoArgsCommand):
help = 'Update the database with aspirants from the IEBC website'
option_list = NoArgsCommand.option_list + (
make_option('--commit', action='store_true', dest='commit', help='Actually update the database'),
)
def handle_noargs(self, **options):
csv_filename = os.path.join(data_directory, 'positions-to-end-delete-and-alternative-names.csv')
with open(csv_filename) as fp:
reader = csv.DictReader(fp)
for row in reader:
alternative_names_to_add = row['Alternative Names To Add']
if not alternative_names_to_add:
continue
position = Position.objects.get(pk=row["Existing Aspirant Position ID"])
if alternative_names_to_add == '[endpos]':
position.end_date = yesterday_approximate_date
maybe_save(position, **options)
elif alternative_names_to_add == '[delpos]':
if options['commit']:
position.delete()
else:
print "------------------------------------------------------------------------"
print alternative_names_to_add
names_to_add = [an.title().strip() for an in alternative_names_to_add.split(', ')]
for n in names_to_add:
person = Person.objects.get(pk=row['Existing Aspirant Person ID'])
person.add_alternative_name(n)
maybe_save(person, **options)
# for each county, representative, ward:
# for each contest_type:
# get all the current aspirants
# for each aspirant:
# find each other aspirant that has this aspirant as an alternative name
# (make a mapping of (person with multiple names) => all people whose names match those)
# for each value in that mapping, check that they have the same API CODE
# set the key person's API CODE
# check that there's no extra data attached to the values peope, then remove the position + the person
# check - if we're deleting a position, because there's already an older one there, make sure any IEBC code of the former is applied to the latter
| Python | 0 | |
67e7d530b4b4ffa86c9f147751cf17828e024cba | add migration to create job table | migrations/versions/5706baf73b01_add_jobs_table.py | migrations/versions/5706baf73b01_add_jobs_table.py | """Add jobs table
Revision ID: 5706baf73b01
Revises: 6bd350cf4748
Create Date: 2016-09-14 15:53:50.394610
"""
# revision identifiers, used by Alembic.
revision = '5706baf73b01'
down_revision = '6bd350cf4748'
from alembic import op
import sqlalchemy as sa
import server
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('job',
sa.Column('created', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('updated', sa.DateTime(timezone=True), nullable=True),
sa.Column('status', sa.Enum('queued', 'running', 'finished', name='status'), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('course_id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('description', sa.Text(), nullable=False),
sa.Column('failed', sa.Boolean(), nullable=False),
sa.Column('log', sa.Text(), nullable=True),
sa.ForeignKeyConstraint(['course_id'], ['course.id'], name=op.f('fk_job_course_id_course')),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], name=op.f('fk_job_user_id_user')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_job'))
)
op.create_index(op.f('ix_job_course_id'), 'job', ['course_id'], unique=False)
op.create_index(op.f('ix_job_user_id'), 'job', ['user_id'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_job_user_id'), table_name='job')
op.drop_index(op.f('ix_job_course_id'), table_name='job')
op.drop_table('job')
### end Alembic commands ###
| Python | 0.000001 | |
ec841c86348302dc67b48af93d2b3b5c8fb96b6e | add list | misc/list.py | misc/list.py | #!/usr/bin/env python
# Python 3: List comprehensions
fruits = ['Banana', 'Apple', 'Lime']
loud_fruits = [fruit.upper() for fruit in fruits]
print(loud_fruits)
# List and the enumerate function
print(list(enumerate(fruits)))
| Python | 0.000016 | |
427fa7f57776a73b5ec0e5045114d5ac330e6a57 | Create misc.py | misc/misc.py | misc/misc.py | Python | 0 | ||
a3da01a026018ae1c612fa16d2e382ac8bbd4f6b | Add pptxsanity.py and first binary release | pptxsanity.py | pptxsanity.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# With code by Eric Jang ericjang2004@gmail.com
TIMEOUT=6 # URL request timeout in seconds
SKIP200=1
from pptx import Presentation
import sys
import re
import os
import shutil
import glob
import tempfile
import urllib2
import signal
from zipfile import ZipFile
from xml.dom.minidom import parse
import platform
# Remove trailing unwanted characters from URL's
# This is a recursive function. Did I do it well? I don't know.
def striptrailingchar(s):
if s[-1] == "." or s[-1] == ")" or s[-1] == "," or s[-1] == ";" or s[-1] == "\\":
s = striptrailingchar(s[0:-1])
elif s[-5:] == """:
s = striptrailingchar(s[0:-5])
else:
pass
return s
def parseslidenotes(pptxfile):
urls = []
tmpd = tempfile.mkdtemp()
ZipFile(pptxfile).extractall(path=tmpd, pwd=None)
path = tmpd + '/ppt/notesSlides/'
for infile in glob.glob(os.path.join(path, '*.xml')):
#parse each XML notes file from the notes folder.
dom = parse(infile)
noteslist = dom.getElementsByTagName('a:t')
#separate last element of noteslist for use as the slide marking.
slideNumber = noteslist.pop()
slideNumber = slideNumber.toxml().replace('<a:t>', '').replace('</a:t>', '')
#start with this empty string to build the presenter note itself
text = ''
for node in noteslist:
xmlTag = node.toxml()
xmlData = xmlTag.replace('<a:t>', '').replace('</a:t>', '')
#concatenate the xmlData to the text for the particular slideNumber index.
text += " " + xmlData
# Convert to ascii to simplify
text = text.encode('ascii', 'ignore')
urlmatches = re.findall(urlmatchre,text)
if len(urlmatches) > 0:
for match in urlmatches: # Now it's a tuple
for urlmatch in match:
if urlmatch != '':
urls.append(striptrailingchar(urlmatch))
# Remove all the files created with unzip
shutil.rmtree(tmpd)
return urls
# Parse the text on slides using the python-pptx module, return URLs
def parseslidetext(prs):
urls = []
nexttitle = False
for slide in prs.slides:
text_runs = []
for shape in slide.shapes:
try:
if not shape.has_text_frame:
continue
except AttributeError:
sys.stderr.write("Error: Please upgrade your version of python-pptx: pip uninstall python-pptx ; pip install python-pptx\n")
sys.exit(-1)
for paragraph in shape.text_frame.paragraphs:
for run in paragraph.runs:
text_runs.append(run.text)
for text in text_runs:
if text == None : continue
try:
m = re.match(urlmatchre,text)
except IndexError,TypeError:
continue
if m != None:
url = striptrailingchar(m.groups()[0])
if url not in urls:
urls.append(url)
return urls
def signal_exit(signal, frame):
sys.exit(0)
if __name__ == "__main__":
if (len(sys.argv) != 2):
print "Validate URLs in the notes and slides of a PowerPoint pptx file."
print "Check GitHub for updates: http://github.com/joswr1ght/pptxsanity\n"
if (platform.system() == 'Windows'):
print "Usage: pptxsanity.exe [pptx file]"
else:
print "Usage: pptxsanity.py [pptx file]"
sys.exit(1)
signal.signal(signal.SIGINT, signal_exit)
try:
prs = Presentation(sys.argv[1])
except Exception:
sys.stderr.write("Invalid PPTX file: " + sys.argv[1] + "\n")
# This may be the most insane regex I've ever seen. It's very comprehensive, but it's too aggressive for
# what I want. It matches arp:remote in ettercap -TqM arp:remote // //, so I'm using something simpler
#urlmatchre = re.compile(r"""((?:[a-z][\w-]+:(?:/{1,3}|[a-z0-9%])|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|(([^\s()<>]+|(([^\s()<>]+)))*))+(?:(([^\s()<>]+|(([^\s()<>]+)))*)|[^\s`!()[]{};:'".,<>?«»“”‘’]))""", re.DOTALL)
urlmatchre = re.compile(r'((https?://[^\s<>"]+|www\.[^\s<>"]+))',re.DOTALL)
privateaddr = re.compile(r'(\S+127\.)|(\S+192\.168\.)|(\S+10\.)|(\S+172\.1[6-9]\.)|(\S+172\.2[0-9]\.)|(\S+172\.3[0-1]\.)|(\S+::1)')
SKIP200=int(os.getenv('SKIP200', 1))
urls = []
urls += parseslidetext(prs)
urls += parseslidenotes(sys.argv[1])
# De-duplicate URL's
urls = list(set(urls))
for url in urls:
url = url.encode('ascii', 'ignore')
# Add default URI for www.anything
if url[0:3] == "www": url="http://"+url
# Skip private IP addresses
if re.match(privateaddr,url): continue
try:
headers = { 'User-Agent' : 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:35.0) Gecko/20100101 Firefox/35.0' }
#ul=urllib2.urlopen(url, timeout=TIMEOUT)
req=urllib2.Request(url, None, headers)
ul=urllib2.urlopen(req, timeout=10)
code=ul.getcode()
if code == 200 and SKIP200 == 1:
continue
print str(code) + " : " + url
except Exception, e:
try:
print str(e.code) + " : " + url
except Exception:
print "ERR : " + url
| Python | 0 | |
0612769b07e88eae9865a16f1ae8162502fe65f9 | Add senate evacuation | 2016/1c/senate_evacuation.py | 2016/1c/senate_evacuation.py | #!/usr/bin/env python
from __future__ import print_function
def parse_senates(senates_str):
return [int(_) for _ in senates_str.split(' ')]
def get_evacuation_plan(senates):
if not isinstance(senates, list):
raise TypeError
num_parties = len(senates)
remaining_senates = senates[:]
evacuation = []
while sum(remaining_senates) > 0:
sorted_index = get_sorted_index(remaining_senates)
party_index0, party_index1 = sorted_index[:2]
if remaining_senates[party_index0] > 0:
evacuated_party0 = get_party(party_index0)
evacuation.append(evacuated_party0)
if remaining_senates[party_index1] > 0:
evacuated_party1 = get_party(party_index1)
evacuation.append(evacuated_party1)
evacuation.append(' ')
remaining_senates[party_index0] += -1
remaining_senates[party_index1] += -1
evacuation_plan = ''.join(evacuation)[:-1]
if evacuation_plan[-2] == ' ':
evacuation_plan = evacuation_plan[:-3] + ' ' + evacuation_plan[-3] + evacuation_plan[-1]
return evacuation_plan
def get_sorted_index(seq):
return sorted(range(len(seq)), key=lambda i:-seq[i])
def get_party(party_index):
return chr(party_index + 65)
if __name__ == '__main__':
import os
samples = ['2 2',
'3 2 2',
'1 1 2',
'2 3 1']
for sample in samples:
senates = parse_senates(sample)
print(get_evacuation_plan(senates))
data_files = ['A-small-practice', 'A-large-practice']
for f in data_files:
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'{0}.in'.format(f)), 'r') as input_file:
lines = input_file.readlines()
input_count = int(lines[0].replace('\n' ,''))
inputs = [line.replace('\n', '') for line in lines[2::2]]
i = 1
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'{0}.out'.format(f)), 'w') as output_file:
for in_ in inputs:
senates = parse_senates(in_)
output_file.write('Case #{0}: {1}\n'.format(i, get_evacuation_plan(senates)))
i += 1
| Python | 0.000367 | |
aabb57148cced94b31109b46adf83a43ca23f7a3 | allow to apply std functions back | mosql/std.py | mosql/std.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''It applies the standard functions to :mod:`mosql.util`.
The usage:
::
import mosql.std
If you want to patch again:
::
mosql.std.patch()
It will replace the functions in :mod:`mosql.util` with original standard functions.
'''
import mosql.util
def patch():
mosql.util.escape = mosql.util.std_escape
mosql.util.format_param = mosql.util.std_format_param
mosql.util.delimit_identifier = mosql.util.std_delimit_identifier
mosql.util.stringify_bool = mosql.util.std_stringify_bool
mosql.util.escape_identifier = mosql.util.std_escape_identifier
patch() # patch it when load this module
| Python | 0 | |
69892066449a40322a34b3a7b8e60e3fa99eef41 | Create deobfuscator.py | ver.-0.1/deobfuscator.py | ver.-0.1/deobfuscator.py | Python | 0.000015 | ||
3c396700a52571d5aae2a12fac601f063a7af761 | Add missing add_master.py. | devops/deployment/add_master.py | devops/deployment/add_master.py | #!/usr/bin/env python
# script to add minion config
import yaml
import sys
import os
f=open("/etc/salt/minion", 'r')
settings=yaml.load(f)
f.close()
ip=os.environ["MASTER_IP"]
if settings["master"].__class__ == str:
settings["master"] = [settings["master"]]
settings["master"] = [ip]
#if not ip in settings["master"]:
# settings["master"].insert(0, ip)
f=open("/etc/salt/minion", 'w')
f.write(yaml.dump(settings))
f.close()
print "Success:"
| Python | 0 | |
00a059f172e1d6214d858370829e1034c2742ce4 | add gevent run script | run_gevent.py | run_gevent.py | from gevent.monkey import patch_all; patch_all()
from gevent.wsgi import WSGIServer
from pypi_notifier import create_app
app = create_app('ProductionConfig')
http_server = WSGIServer(('0.0.0.0', 5001), app)
http_server.serve_forever()
| Python | 0 | |
916b7fe4ee4c3c5c55278927a7116a4d1e0ad6d1 | Add solarized256.py | solarized256.py | solarized256.py | # -*- coding: utf-8 -*-
"""
solarized256
------------
A Pygments style inspired by Solarized's 256 color mode.
:copyright: (c) 2011 by Hank Gay, (c) 2012 by John Mastro.
:license: BSD, see LICENSE for more details.
"""
from pygments.style import Style
from pygments.token import Token, Comment, Name, Keyword, Generic, Number, \
Operator, String
BASE03 = "#1c1c1c"
BASE02 = "#262626"
BASE01 = "#4e4e4e"
BASE00 = "#585858"
BASE0 = "#808080"
BASE1 = "#8a8a8a"
BASE2 = "#d7d7af"
BASE3 = "#ffffd7"
YELLOW = "#af8700"
ORANGE = "#d75f00"
RED = "#af0000"
MAGENTA = "#af005f"
VIOLET = "#5f5faf"
BLUE = "#0087ff"
CYAN = "#00afaf"
GREEN = "#5f8700"
class Solarized256Style(Style):
background_color = BASE03
styles = {
Keyword: GREEN,
Keyword.Constant: ORANGE,
Keyword.Declaration: BLUE,
Keyword.Namespace: ORANGE,
#Keyword.Pseudo
Keyword.Reserved: BLUE,
Keyword.Type: RED,
#Name
Name.Attribute: BASE1,
Name.Builtin: BLUE,
Name.Builtin.Pseudo: BLUE,
Name.Class: BLUE,
Name.Constant: ORANGE,
Name.Decorator: BLUE,
Name.Entity: ORANGE,
Name.Exception: YELLOW,
Name.Function: BLUE,
#Name.Label
#Name.Namespace
#Name.Other
Name.Tag: BLUE,
Name.Variable: BLUE,
#Name.Variable.Class
#Name.Variable.Global
#Name.Variable.Instance
#Literal
#Literal.Date
String: CYAN,
String.Backtick: BASE01,
String.Char: CYAN,
String.Doc: CYAN,
#String.Double
String.Escape: RED,
String.Heredoc: CYAN,
#String.Interpol
#String.Other
String.Regex: RED,
#String.Single
#String.Symbol
Number: CYAN,
#Number.Float
#Number.Hex
#Number.Integer
#Number.Integer.Long
#Number.Oct
Operator: BASE1,
Operator.Word: GREEN,
#Punctuation: ORANGE,
Comment: BASE01,
#Comment.Multiline
Comment.Preproc: GREEN,
#Comment.Single
Comment.Special: GREEN,
#Generic
Generic.Deleted: CYAN,
Generic.Emph: 'italic',
Generic.Error: RED,
Generic.Heading: ORANGE,
Generic.Inserted: GREEN,
#Generic.Output
#Generic.Prompt
Generic.Strong: 'bold',
Generic.Subheading: ORANGE,
#Generic.Traceback
Token: BASE1,
Token.Other: ORANGE,
}
| Python | 0 | |
6cab10d19386911f33aaca660a9e1a35751b18ee | broke github api url naming scheme. fixed | post_qr.py | post_qr.py | import praw
import OAuth2Util
import time
import requests
import json
import os
import humanize
# sweet mother of imports
def make_qr(repo):
"""
Takes a github url, uses the github api to get the direct download url and size, and uses google api to make a qr.
It returns the link to the qr (not on imgur) and the formatted file size
"""
repo = repo.rsplit('releases', 1)[0] # cut the url up to /releases/
repo = repo[18::]
print(repo)
req = requests.get("https://api.github.com/repos" + repo + "releases/latest") # change to proper api format
data = json.loads(req.text)
for item in data['assets']:
if ".cia" in item['name']: # if the download links have cia, make qr, else return None
url = item["browser_download_url"] # search for keys containing url and size
file_size = item['size']
file_size = humanize.naturalsize(file_size)
qr_url = ('https://chart.googleapis.com/chart?chs=300x300&cht=qr&chl=' + url + '&choe=UTF-8')
return qr_url, file_size
else:
return None
r = praw.Reddit('3DS Homebrew QR Poster for /r/3DSHacks v0.3'
'By /u/Im_Soul')
o = OAuth2Util.OAuth2Util(r) # create reddit oauth
# o.refresh()
if not os.path.isfile("posts_scanned.txt"): # check for posts_scanned.txt, if not, make empty list to store ids
posts_scanned = [] # if so, import the ids stored to the file
else:
with open("posts_scanned.txt", "r") as f:
posts_scanned = f.read()
posts_scanned = posts_scanned.split("\n")
posts_scanned = list(filter(None, posts_scanned))
subreddit = r.get_subreddit('3dshacks') # subreddit to scan
for submission in subreddit.get_new(limit=5): # get 5 posts
if submission.id not in posts_scanned: # check if we already checked the id
if 'github.com' in submission.url: # check if url is github
link_to_release = submission.url
if "release" in submission.url: # check if it's a release (bad way of doing it)
finished = make_qr(link_to_release)
if finished is not None: # if 'make_qr()' was a success
comment = '[QR Code (' + finished[1] + ')](' + finished[0] + ')' + '\n ***** \n Made by /u/Im_Soul' # comment formatting
submission.add_comment(comment)
print("Replied to ", submission.id, " on ", time.asctime(time.localtime(time.time()))) # run log
posts_scanned.append(submission.id) # add id to list
with open("posts_scanned.txt", "w") as f: # write from the list to the file
for post_id in posts_scanned:
f.write(post_id + "\n") | import praw
import OAuth2Util
import time
import requests
import json
import os
import humanize
# sweet mother of imports
def make_qr(repo):
"""
Takes a github url, uses the github api to get the direct download url and size, and uses google api to make a qr.
It returns the link to the qr (not on imgur) and the formatted file size
"""
if 'tag' in repo:
repo = repo.rsplit('tag', 1)[0] # cut the url up to /tag/
repo = repo[18::] # cut out www.~~~ blah up to /user/repo
else:
repo = repo.rsplit('releases', 1)[0] # cut the url up to /tag/
repo = repo[18::]
req = requests.get("https://api.github.com/repos" + repo + "latest") # change to proper api format
data = json.loads(req.text)
for item in data['assets']:
if ".cia" in item['name']: # if the download links have cia, make qr, else return None
url = item["browser_download_url"] # search for keys containing url and size
file_size = item['size']
file_size = humanize.naturalsize(file_size)
qr_url = ('https://chart.googleapis.com/chart?chs=300x300&cht=qr&chl=' + url + '&choe=UTF-8')
return qr_url, file_size
else:
return None
r = praw.Reddit('3DS Homebrew QR Poster for /r/3DSHacks v0.3'
'By /u/Im_Soul')
o = OAuth2Util.OAuth2Util(r) # create reddit oauth
# o.refresh()
if not os.path.isfile("posts_scanned.txt"): # check for posts_scanned.txt, if not, make empty list to store ids
posts_scanned = [] # if so, import the ids stored to the file
else:
with open("posts_scanned.txt", "r") as f:
posts_scanned = f.read()
posts_scanned = posts_scanned.split("\n")
posts_scanned = list(filter(None, posts_scanned))
subreddit = r.get_subreddit('3dshacks') # subreddit to scan
for submission in subreddit.get_new(limit=5): # get 5 posts
if submission.id not in posts_scanned: # check if we already checked the id
if 'github.com' in submission.url: # check if url is github
link_to_release = submission.url
if "release" in submission.url: # check if it's a release (bad way of doing it)
finished = make_qr(link_to_release)
if finished is not None: # if 'make_qr()' was a success
comment = '[QR Code (' + finished[1] + ')](' + finished[0] + ')' + '\n ***** \n Made by /u/Im_Soul' # comment formatting
submission.add_comment(comment)
print("Replied to ", submission.id, " on ", time.asctime(time.localtime(time.time()))) # run log
posts_scanned.append(submission.id) # add id to list
with open("posts_scanned.txt", "w") as f: # write from the list to the file
for post_id in posts_scanned:
f.write(post_id + "\n") | Python | 0.999656 |
07a122374abb60140e05b09f49ef942bd14c05f6 | add missed migration | measure_mate/migrations/0026_auto_20160531_0716.py | measure_mate/migrations/0026_auto_20160531_0716.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-05-31 07:16
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import re
class Migration(migrations.Migration):
dependencies = [
('measure_mate', '0025_auto_20160516_0046'),
]
operations = [
migrations.AlterField(
model_name='tag',
name='name',
field=models.SlugField(unique=True, validators=[django.core.validators.RegexValidator(re.compile(b'[A-Z]'), "Enter a valid 'slug' consisting of lowercase letters, numbers, underscores or hyphens.", b'invalid', True)], verbose_name=b'Tag Name'),
),
]
| Python | 0 | |
d3b4053c2ef39eda9246af2000bdf9460730b33b | convert json to git-versionable versions which current TinyDB user can live with. | prettifyjson.py | prettifyjson.py | #!/usr/bin/env python
from os import path
import sys
import json
if len(sys.argv) > 1:
print("usage:\n\t{} < your_json_file > your_prettified_json_file".format(
path.basename(sys.argv[0])))
sys.exit(1)
json.dump(json.load(sys.stdin), sys.stdout, indent=2)
| Python | 0 | |
caf0b00bc21208515d0ddded3cbb934735d45939 | add migration to create new model fields | coupons/migrations/0004_auto_20151105_1456.py | coupons/migrations/0004_auto_20151105_1456.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('coupons', '0003_auto_20150416_0617'),
]
operations = [
migrations.CreateModel(
name='CouponUser',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),
('redeemed_at', models.DateTimeField(blank=True, verbose_name='Redeemed at', null=True)),
],
),
migrations.AddField(
model_name='coupon',
name='user_limit',
field=models.PositiveIntegerField(verbose_name='User limit', default=1),
),
migrations.AlterField(
model_name='coupon',
name='type',
field=models.CharField(choices=[('monetary', 'Money based coupon'), ('percentage', 'Percentage discount'), ('virtual_currency', 'Virtual currency')], verbose_name='Type', max_length=20),
),
migrations.AddField(
model_name='couponuser',
name='coupon',
field=models.ForeignKey(related_name='users', to='coupons.Coupon'),
),
migrations.AddField(
model_name='couponuser',
name='user',
field=models.ForeignKey(null=True, to=settings.AUTH_USER_MODEL, blank=True, verbose_name='User'),
),
]
| Python | 0 | |
b181f2a57d57caaa6e53e193e88002a15e284fd0 | add the missing file. i are senior dvlpr | cryptography/hazmat/backends/openssl/utils.py | cryptography/hazmat/backends/openssl/utils.py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import six
def _truncate_digest(digest, order_bits):
digest_len = len(digest)
if 8 * digest_len > order_bits:
digest_len = (order_bits + 7) // 8
digest = digest[:digest_len]
if 8 * digest_len > order_bits:
rshift = 8 - (order_bits & 0x7)
assert rshift > 0 and rshift < 8
mask = 0xFF >> rshift << rshift
# Set the bottom rshift bits to 0
digest = digest[:-1] + six.int2byte(six.indexbytes(digest, -1) & mask)
return digest
| Python | 0.000523 | |
4a0e00574fc551dde74db1a817229eeb23c4e0a8 | Create prueba.py | prueba.py | prueba.py | from flask import Flask
from flask import request
import os
import xml.etree.ElementTree as ET
from threading import Thread
import email_lib
app = Flask(__name__)
xml = ""
def send_email(xml):
print "2"
email_lib.prueba()
print xml
email_lib.email_alert(customer_email,iccid, admin_details[1])
return None
@app.route('/webhook', methods=['POST','GET'])
def webhook():
print "webhook"
global xml
xml = "hola"
t = Thread(target=send_email, args=(xml,))
t.start()
print "acabando"
#Jasper resend the notification unless it receives a status 200 confirming the reception
return '',200
@app.route('/response', methods=['POST','GET'])
def response():
print xml #Comprobar como comparto la variable.
return "Acabamos de procesar su peticion, en breve recibira un email con los detalles"
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
app.run(debug=True, port=port, host='0.0.0.0', threaded=True)
| from flask import Flask
from flask import request
import os
import xml.etree.ElementTree as ET
from threading import Thread
import email_lib
app = Flask(__name__)
xml = ""
def send_email(xml):
print "2"
email_lib.prueba()
print xml
return None
@app.route('/webhook', methods=['POST','GET'])
def webhook():
print "webhook"
global xml
xml = "hola"
t = Thread(target=send_email, args=(xml,))
t.start()
print "acabando"
#Jasper resend the notification unless it receives a status 200 confirming the reception
return '',200
@app.route('/response', methods=['POST','GET'])
def response():
print xml #Comprobar como comparto la variable.
return "Acabamos de procesar su peticion, en breve recibira un email con los detalles"
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
app.run(debug=True, port=port, host='0.0.0.0', threaded=True)
| Python | 0.000008 |
7e4fd6b92040788bda1760cb261730f627ca6a10 | Add example from listing 7.6 | ch7/profile_read.py | ch7/profile_read.py | '''
Listing 7.6: Profiling data transfer
'''
import numpy as np
import pyopencl as cl
import pyopencl.array
import utility
from time import sleep
NUM_VECTORS = 8192
NUM_ITERATIONS = 2000
kernel_src = '''
__kernel void profile_read(__global char16 *c, int num) {
for(int i=0; i<num; i++) {
c[i] = (char16)(5);
}
}
'''
# Get device and context, create command queue and program
dev = utility.get_default_device()
context = cl.Context(devices=[dev], properties=None, dev_type=None, cache_dir=None)
# Create a command queue with the profiling flag enabled
queue = cl.CommandQueue(context, dev, properties=cl.command_queue_properties.PROFILING_ENABLE)
# Build program in the specified context using the kernel source code
prog = cl.Program(context, kernel_src)
try:
prog.build(options=['-Werror'], devices=[dev], cache_dir=None)
except:
print('Build log:')
print(prog.get_build_info(dev, cl.program_build_info.LOG))
raise
# Data
c = np.empty(shape=(NUM_VECTORS,), dtype=cl.array.vec.char16)
# Create output buffer
c_buff = cl.Buffer(context, cl.mem_flags.WRITE_ONLY, size=c.nbytes)
# Enqueue kernel (with argument specified directly)
global_size = (1,)
local_size = None
# Execute the kernel repeatedly using enqueue_read
read_time = 0.0
for i in range(NUM_ITERATIONS):
# __call__(queue, global_size, local_size, *args, global_offset=None, wait_for=None, g_times_l=False)
# Store kernel execution event (return value)
kernel_event = prog.profile_read(queue, global_size, local_size, c_buff, np.int32(NUM_VECTORS))
# Enqueue command to copy from buffers to host memory
# Store data transfer event (return value)
prof_event = cl.enqueue_copy(queue, dest=c, src=c_buff, is_blocking=True)
read_time += prof_event.profile.end - prof_event.profile.start
# Execute the kernel repeatedly using enqueue_map_buffer
map_time = 0.0
for i in range(NUM_ITERATIONS):
# __call__(queue, global_size, local_size, *args, global_offset=None, wait_for=None, g_times_l=False)
# Store kernel execution event (return value)
kernel_event = prog.profile_read(queue, global_size, local_size, c_buff, np.int32(NUM_VECTORS))
# Enqueue command to map from buffer two to host memory
(result_array, prof_event) = cl.enqueue_map_buffer(queue,
buf=c_buff,
flags=cl.map_flags.READ,
offset=0,
shape=(NUM_VECTORS,),
dtype=cl.array.vec.char16)
map_time += prof_event.profile.end - prof_event.profile.start
# Release the mapping (is this necessary?)
result_array.base.release(queue)
# Print averaged results
print('Average read time (ms): {}'.format(read_time / ( NUM_ITERATIONS * 1000)))
print('Average map time (ms): {}'.format(map_time / ( NUM_ITERATIONS * 1000))) | Python | 0 | |
8ecac8170a3f9323f76aa9252a9d3b2f57f7660c | Include duration in analysis output | ceam/analysis.py | ceam/analysis.py | # ~/ceam/ceam/analysis.py
import argparse
import pandas as pd
import numpy as np
def confidence(seq):
mean = np.mean(seq)
std = np.std(seq)
runs = len(seq)
interval = (1.96*std)/np.sqrt(runs)
return mean, mean-interval, mean+interval
def difference_with_confidence(a, b):
mean_diff = np.mean(a) - np.mean(b)
interval = 1.96*np.sqrt(np.std(a)**2/len(a)+np.std(b)**2/len(b))
return mean_diff, int(mean_diff-interval), int(mean_diff+interval)
def analyze_results(results):
intervention = results[results.intervention == True]
non_intervention = results[results.intervention == False]
i_dalys = intervention.ylds + intervention.ylls
ni_dalys = non_intervention.ylds + non_intervention.ylls
print('Total runs', len(intervention))
print('Mean duration', results.duration.mean())
print('DALYs (intervention)', confidence(i_dalys), 'DALYs (non-intervention)', confidence(ni_dalys))
print('DALYs averted', difference_with_confidence(ni_dalys,i_dalys))
print('Total Intervention Cost', confidence(intervention.intervention_cost))
print('Cost per DALY', confidence(intervention.intervention_cost.values/(ni_dalys.values-i_dalys.values)))
print('IHD Count (intervention)',confidence(intervention.ihd_count), 'IHD Count (non-intervention)', confidence(non_intervention.ihd_count))
print('Stroke Count (intervention)',confidence(intervention.hemorrhagic_stroke_count), 'Stroke Count (non-intervention)', confidence(non_intervention.hemorrhagic_stroke_count))
print('Healthcare Access Events per year (intervention):', confidence((intervention.general_healthcare_access+intervention.followup_healthcare_access)/20))
print('Healthcare Access Events per year (non-non_intervention):', confidence((non_intervention.general_healthcare_access+non_intervention.followup_healthcare_access)/20))
def dump_results(results, path):
results.to_csv(path)
def load_results(paths):
results = pd.DataFrame()
for path in paths:
results = results.append(pd.read_csv(path))
return results
def main():
import sys
analyze_results(load_results(sys.argv[1:]))
if __name__ == '__main__':
main()
# End.
| # ~/ceam/ceam/analysis.py
import argparse
import pandas as pd
import numpy as np
def confidence(seq):
mean = np.mean(seq)
std = np.std(seq)
runs = len(seq)
interval = (1.96*std)/np.sqrt(runs)
return mean, mean-interval, mean+interval
def difference_with_confidence(a, b):
mean_diff = np.mean(a) - np.mean(b)
interval = 1.96*np.sqrt(np.std(a)**2/len(a)+np.std(b)**2/len(b))
return mean_diff, int(mean_diff-interval), int(mean_diff+interval)
def analyze_results(results):
intervention = results[results.intervention == True]
non_intervention = results[results.intervention == False]
i_dalys = intervention.ylds + intervention.ylls
ni_dalys = non_intervention.ylds + non_intervention.ylls
print('Total runs', len(intervention))
print('DALYs (intervention)', confidence(i_dalys), 'DALYs (non-intervention)', confidence(ni_dalys))
print('DALYs averted', difference_with_confidence(ni_dalys,i_dalys))
print('Total Intervention Cost', confidence(intervention.intervention_cost))
print('Cost per DALY', confidence(intervention.intervention_cost.values/(ni_dalys.values-i_dalys.values)))
print('IHD Count (intervention)',confidence(intervention.ihd_count), 'IHD Count (non-intervention)', confidence(non_intervention.ihd_count))
print('Stroke Count (intervention)',confidence(intervention.hemorrhagic_stroke_count), 'Stroke Count (non-intervention)', confidence(non_intervention.hemorrhagic_stroke_count))
print('Healthcare Access Events per year (intervention):', confidence((intervention.general_healthcare_access+intervention.followup_healthcare_access)/20))
print('Healthcare Access Events per year (non-non_intervention):', confidence((non_intervention.general_healthcare_access+non_intervention.followup_healthcare_access)/20))
def dump_results(results, path):
results.to_csv(path)
def load_results(paths):
results = pd.DataFrame()
for path in paths:
results = results.append(pd.read_csv(path))
return results
def main():
import sys
analyze_results(load_results(sys.argv[1:]))
if __name__ == '__main__':
main()
# End.
| Python | 0.000012 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.