commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
5aafda5603ca6a5d2b1b6cfcae41132a46382c86 | add memcached example, closes #344 | smn/onadata,hnjamba/onaclone,mainakibui/kobocat,spatialdev/onadata,piqoni/onadata,kobotoolbox/kobocat,spatialdev/onadata,kobotoolbox/kobocat,GeoODK/onadata,qlands/onadata,qlands/onadata,awemulya/fieldsight-kobocat,piqoni/onadata,hnjamba/onaclone,spatialdev/onadata,GeoODK/onadata,hnjamba/onaclone,GeoODK/onadata,awemulya/fieldsight-kobocat,smn/onadata,hnjamba/onaclone,kobotoolbox/kobocat,jomolinare/kobocat,mainakibui/kobocat,sounay/flaminggo-test,mainakibui/kobocat,smn/onadata,mainakibui/kobocat,spatialdev/onadata,jomolinare/kobocat,piqoni/onadata,sounay/flaminggo-test,qlands/onadata,sounay/flaminggo-test,awemulya/fieldsight-kobocat,jomolinare/kobocat,sounay/flaminggo-test,GeoODK/onadata,qlands/onadata,awemulya/fieldsight-kobocat,piqoni/onadata,jomolinare/kobocat,smn/onadata,kobotoolbox/kobocat | onadata/settings/production_example.py | onadata/settings/production_example.py | from common import * # nopep8
# this setting file will not work on "runserver" -- it needs a server for
# static files
DEBUG = False
# override to set the actual location for the production static and media
# directories
MEDIA_ROOT = '/var/formhub-media'
STATIC_ROOT = "/srv/formhub-static"
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, "static"),
)
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
# your actual production settings go here...,.
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'formhub',
'USER': 'formhub_prod',
# the password must be stored in an environment variable
'PASSWORD': os.environ['FORMHUB_PROD_PW'],
# the server name may be in env
'HOST': os.environ.get("FORMHUB_DB_SERVER", 'dbserver.yourdomain.org'),
'OPTIONS': {
# note: this option obsolete starting with django 1.6
'autocommit': True,
}
},
'gis': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'phis',
'USER': 'staff',
# the password must be stored in an environment variable
'PASSWORD': os.environ['PHIS_PW'],
'HOST': 'gisserver.yourdomain.org',
'OPTIONS': {
'autocommit': True,
}
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Africa/Lagos'
TOUCHFORMS_URL = 'http://localhost:9000/'
MONGO_DATABASE = {
'HOST': 'localhost',
'PORT': 27017,
'NAME': 'formhub',
'USER': '',
'PASSWORD': ''
}
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'mlfs33^s1l4xf6a36$0#j%dd*sisfo6HOktYXB9y'
# Caching
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.PyLibMCCache',
'LOCATION': '127.0.0.1:11211',
}
}
| from common import * # nopep8
# this setting file will not work on "runserver" -- it needs a server for
# static files
DEBUG = False
# override to set the actual location for the production static and media
# directories
MEDIA_ROOT = '/var/formhub-media'
STATIC_ROOT = "/srv/formhub-static"
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, "static"),
)
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
# your actual production settings go here...,.
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'formhub',
'USER': 'formhub_prod',
# the password must be stored in an environment variable
'PASSWORD': os.environ['FORMHUB_PROD_PW'],
# the server name may be in env
'HOST': os.environ.get("FORMHUB_DB_SERVER", 'dbserver.yourdomain.org'),
'OPTIONS': {
# note: this option obsolete starting with django 1.6
'autocommit': True,
}
},
'gis': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'phis',
'USER': 'staff',
# the password must be stored in an environment variable
'PASSWORD': os.environ['PHIS_PW'],
'HOST': 'gisserver.yourdomain.org',
'OPTIONS': {
'autocommit': True,
}
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Africa/Lagos'
TOUCHFORMS_URL = 'http://localhost:9000/'
MONGO_DATABASE = {
'HOST': 'localhost',
'PORT': 27017,
'NAME': 'formhub',
'USER': '',
'PASSWORD': ''
}
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'mlfs33^s1l4xf6a36$0#j%dd*sisfo6HOktYXB9y'
| bsd-2-clause | Python |
5c3eaede26381babac281dfa0d9bec3ebe911ba8 | Add tool for building custom packages | HotelsDotCom/rainbow,linuxl0ver/rainbow,linuxl0ver/rainbow,metasyn/rainbow,ccampbell/rainbow,HotelsDotCom/rainbow,cybrox/rainbow,jeremykenedy/rainbow,cybrox/rainbow,segmentio/rainbow,jeremykenedy/rainbow,ptigas/rainbow,greyhwndz/rainbow,linuxl0ver/rainbow,jeremykenedy/rainbow,javipepe/rainbow,greyhwndz/rainbow,metasyn/rainbow,greyhwndz/rainbow,javipepe/rainbow,javipepe/rainbow,segmentio/rainbow,ccampbell/rainbow,ptigas/rainbow,HotelsDotCom/rainbow,metasyn/rainbow,cybrox/rainbow | util/compile.py | util/compile.py | #!/usr/bin/env python
import sys, os, subprocess
sys.argv.pop(0)
languages = sys.argv
languages.sort()
js_path = os.path.dirname(__file__) + '/../js/'
js_files_to_include = [js_path + 'rainbow.js']
included_languages = []
for language in languages:
path = js_path + 'language/' + language + '.js'
if not os.path.isfile(path):
print "no file for language: ",language
continue
included_languages.append(language)
js_files_to_include.append(path)
print 'waiting for closure compiler...'
proc = subprocess.Popen(['java', '-jar', '/usr/local/compiler-latest/compiler.jar', '--compilation_level', 'ADVANCED_OPTIMIZATIONS'] + js_files_to_include, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
output, err = proc.communicate()
file_name = 'rainbow' + ('+' + '+'.join(included_languages) if len(included_languages) else '') + '.min.js'
print 'writing to file:',file_name
new_file = js_path + file_name
file = open(new_file, "w")
file.write(output)
file.close()
| apache-2.0 | Python | |
86c4bc1a3aa8c29293e5d77dfbd66ea72f58d832 | add some python tests | zshipko/libtwombly,zshipko/libtwombly,zshipko/libtwombly | py/test.py | py/test.py | import unittest
from twombly import *
class TestDrawing(unittest.TestCase):
def setUp(self):
self.image = new_image(1200, 800)
self.drawing = draw(self.image)
def test_new_drawing(self):
self.assertIsNotNone(self.image)
self.assertEquals(self.image.shape[0], 800)
self.assertEquals(self.image.shape[1], 1200)
self.assertEquals(self.image.shape[2], 3)
self.assertIsInstance(self.drawing, Drawing)
def test_draw_line(self):
self.drawing.set_antialias(False)
self.drawing.set_color(255, 0, 255)
self.drawing.move_to(10, 10)
self.drawing.line_to(120, 80)
self.drawing.set_line_width(10)
self.drawing.stroke()
self.assertEquals(self.image[11, 11][0], 255)
self.assertEquals(self.image[79, 119][0], 255)
self.assertEquals(self.image[79, 119][0], 255)
def test_draw_rect(self):
self.drawing.set_color(0, 255, 0)
self.drawing.rect(150, 120, 180, 170)
self.drawing.fill()
self.assertEquals(self.image[150, 150][1], 255)
self.assertEquals(self.image[150, 150][0], self.image[150, 150][2])
if __name__ == '__main__':
unittest.main()
| mit | Python | |
fa12247a378e172b75db8d04378883c9d1231449 | Add tests for children sorting in the admin panel. | lektor/lektor,lektor/lektor,lektor/lektor,lektor/lektor | tests/test_api.py | tests/test_api.py | import os
from operator import itemgetter
import pytest
from flask import json
from lektor.admin import WebAdmin
@pytest.fixture
def children_records_data():
"""Returns test values for children records' `id`, `title`, and `pub_date` fields."""
return (
{'id': '1', 'title': '1 is the first number', 'pub_date': '2016-07-11'},
{'id': '2', 'title': 'Must be the Second item in a row', 'pub_date': '2017-05-03'},
{'id': '3', 'title': 'Z is the last letter', 'pub_date': '2017-05-03'},
{'id': '4', 'title': 'Some random string', 'pub_date': '2018-05-21'},
)
@pytest.fixture(scope='function', autouse=True)
def prepare_stub_data(scratch_project, children_records_data):
"""Creates folders, models, test object and its children records."""
tree = scratch_project.tree
with open(os.path.join(tree, 'models', 'mymodel.ini'), 'w') as f:
f.write(
'[children]\n'
'order_by = -pub_date, title\n'
)
with open(os.path.join(tree, 'models', 'mychildmodel.ini'), 'w') as f:
f.write(
'[fields.title]\n'
'type = string\n'
'[fields.pub_date]\n'
'type = date'
)
os.mkdir(os.path.join(tree, 'content', 'myobj'))
with open(os.path.join(tree, 'content', 'myobj', 'contents.lr'), 'w') as f:
f.write(
'_model: mymodel\n'
'---\n'
'title: My Test Object\n'
)
for record in children_records_data:
os.mkdir(os.path.join(tree, 'content', 'myobj', record['id']))
with open(os.path.join(tree, 'content', 'myobj', record['id'], 'contents.lr'), 'w') as f:
f.write(
'_model: mychildmodel\n'
'---\n'
'title: %s\n'
'---\n'
'pub_date: %s' % (record['title'], record['pub_date'])
)
def test_children_sorting_via_api(scratch_project, scratch_env, children_records_data):
webadmin = WebAdmin(scratch_env, output_path=scratch_project.tree)
data = json.loads(webadmin.test_client().get('/admin/api/recordinfo?path=/myobj').data)
children_records_ids_provided_by_api = list(map(itemgetter('id'), data['children']))
records_ordered_by_title = sorted(children_records_data, key=itemgetter('title'))
ordered_records = sorted(records_ordered_by_title, key=itemgetter('pub_date'), reverse=True)
assert list(map(itemgetter('id'), ordered_records)) == children_records_ids_provided_by_api
| bsd-3-clause | Python | |
46735fedcff6c7ee37ecce0912b44fbdb09338e5 | Add test | angr/cle | tests/test_hex.py | tests/test_hex.py | #!/usr/bin/env python
import logging
import nose
import os
import cle
TEST_BASE = os.path.join(os.path.dirname(os.path.realpath(__file__)),
os.path.join('..', '..', 'binaries'))
def test_macho():
"""
Basic smoke-test for the Mach-O loader
:return:
"""
machofile = os.path.join(TEST_BASE, 'tests', 'armel', 'i2c_master_read-arduino_mzero.hex')
ld = cle.Loader(machofile, auto_load_libs=False, main_opts={'custom_arch':"ARMEL"})
nose.tools.assert_true(isinstance(ld.main_bin,cle.Hex))
nose.tools.assert_equals(ld.main_bin.os, 'unknown')
nose.tools.assert_equals(ld.main_bin.entry,0x44cd)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
test_macho()
| bsd-2-clause | Python | |
923a90c8678c82a66233008d943f05e865b53ad8 | Add geom_density tests | has2k1/plotnine,has2k1/plotnine | ggplot/tests/test_geom_density.py | ggplot/tests/test_geom_density.py | from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
from .. import ggplot, aes, geom_density
from .conftest import cleanup
n = 6 # Some even number greater than 2
# ladder: 0 1 times, 1 2 times, 2 3 times, ...
df = pd.DataFrame({'x': np.repeat(range(n+1), range(n+1)),
'z': np.repeat(range(n//2), range(3, n*2, 4))})
@cleanup
def test_basic():
p = ggplot(df, aes('x', fill='factor(z)'))
p1 = p + geom_density(kernel='gaussian', alpha=.3)
p2 = p + geom_density(kernel='gaussian', alpha=.3, trim=True)
p3 = p + geom_density(kernel='triangular', alpha=.3) # other
assert p1 == 'gaussian'
assert p2 == 'gaussian-trimmed'
assert p3 == 'triangular'
| mit | Python | |
296044328ba9e9670db7b051d0160b08aa459414 | Create code.py | NikhilDhyani/GetLyrics | code/code.py | code/code.py | import requests
import re
import webbrowser
import os
from bs4 import BeautifulSoup
flag=0
name = (raw_input("please enter the name to search\n>"))
response = requests.get("https://www.google.co.in/search?&q="+name+"lyrics")
if response.status_code==200:
data = response.text
soup = BeautifulSoup(data,"html.parser")
#print(data)
for x in soup.find_all("h3",{"class":"r"}):
if flag==0:
for link in x.find_all('a'):
#print(link)
y=link.get('href')
if re.search(r'http://www.metrolyrics.com/',y):
flag=1
s = y
start = s.find('q=')+2
end = s.find('&sa', start)
url_extract = s[start:end]
response = requests.get(url_extract)
data = response.text
soup = BeautifulSoup(data,"html.parser")
for div in soup.find_all('div',{"class":"js-lyric-text"}):
for p in soup.find_all("p",{"class":"verse"}):
print("\n")
print(p.getText())
if re.search(r'http://www.lyricsmint.com/',y):
flag=1
s = y
start = s.find('q=')+2
end = s.find('&sa', start)
url_extract = s[start:end]
response = requests.get(url_extract)
data = response.text
soup = BeautifulSoup(data,"html.parser")
for div in soup.find_all('div',{"id":"lyric"}):
for p in div.find_all("p"):
print("\n")
print(p.getText())
if re.search(r'http://www.lyricsted.com/',y):
flag=1
s = y
start = s.find('q=')+2
end = s.find('&sa', start)
url_extract = s[start:end]
response = requests.get(url_extract)
data = response.text
soup = BeautifulSoup(data,"html.parser")
for div in soup.find_all('div',{"class":"entry-content group"}):
for p in div.find_all("p"):
text=p.getText()
if not re.search(r'lyrics',text):
print("\n")
print(p.getText())
| mit | Python | |
41df6d294b17b6b21af1d32e3c74c3ad1e0591bb | Add custom login form | fmorgner/django-maccman,fmorgner/django-maccman | maccman/forms.py | maccman/forms.py | from django.contrib.auth.forms import AuthenticationForm
from django.forms import forms
from django.utils.translation import gettext as _
from .models import UserProfile
class UserProfileAuthenticationForm(AuthenticationForm):
def confirm_login_allowed(self, user):
try:
UserProfile.objects.get(user=user)
except UserProfile.DoesNotExist:
raise forms.ValidationError(_('No profile found for this user'),
code='no_profile')
| bsd-3-clause | Python | |
c8fd1c96e629e3330dd11416a0eb7432118f1191 | rename for convenience | techtonik/pydotorg.pypi,techtonik/pydotorg.pypi | tools/demodata.py | tools/demodata.py | #!/usr/bin/python
import sys, os, urllib
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(root)
import admin, store, config
cfg = config.Config(root+'/config.ini')
st = store.Store(cfg)
# classifiers
for c in urllib.urlopen("http://pypi.python.org/pypi?%3Aaction=list_classifiers").read().splitlines():
admin.add_classifier(st, c)
# Demo data starts here
# an admin
otk = st.store_user('fred', 'fredpw', 'fred@python.test')
st.delete_otk(otk)
st.add_role('fred', 'Admin', None)
# an owner
otk = st.store_user('barney', 'barneypw', 'barney@python.test')
st.delete_otk(otk)
# package spam
st.set_user('barney', '127.0.0.1', True)
for version in ('0.8', '0.9', '1.0'):
st.store_package('spam', version, {
'author':'Barney Geroellheimer',
'author_email':'barney@python.test',
'homepage':'http://spam.python.test/',
'license':'GPL',
'summary':'The spam package',
'description':'Does anybody want to provide real data here?',
'classifiers':["Development Status :: 6 - Mature",
"Programming Language :: Python :: 2"],
'_pypi_hidden':False
})
# package eggs
for version in ('0.1', '0.2', '0.3', '0.4'):
st.store_package('eggs', version, {
'author':'Barney Geroellheimer',
'author_email':'barney@python.test',
'homepage':'http://eggs.python.test/',
'license':'GPL',
'summary':'The eggs package',
'description':'Does anybody want to provide real data here?',
'classifiers':["Development Status :: 3 - Alpha",
"Programming Language :: Python :: 3"],
'_pypi_hidden':version!='0.4'
})
st.commit()
| bsd-3-clause | Python | |
4047c59ed58dc8fef037ecc589bb7d82df967d13 | Create __init__.py | ggreco77/GWsky | GWsky/__init__.py | GWsky/__init__.py | from .GWsky import GWsky
from .version import __version__
| bsd-2-clause | Python | |
e478afec3d706f6d5784937fd33c6feb32ea0218 | Create lastfm.py | TingPing/plugins,TingPing/plugins | HexChat/lastfm.py | HexChat/lastfm.py | from __future__ import print_function
import sys
import json
if sys.version_info[0] == 2:
import urllib2 as urllib_error
import urllib as urllib_request
else:
import urllib.error as urllib_error
import urllib.request as urllib_request
import hexchat
__module_name__ = 'lastfm'
__module_author__ = 'TingPing'
__module_version__ = '0'
__module_description__ = 'Tell others what you are playing on last.fm'
lfm_help = """Lastfm Usage:
LFM <username>
LFM -e"""
USERNAME = hexchat.get_pluginpref('lfm_username')
KEY = '4847f738e6b34c0dc20b13fe42ea008e'
def lfm_cb(word, word_eol, userdata):
global USERNAME
echo = False
if len(word) == 2:
if word[1] == '-e':
echo = True
else:
USERNAME = word[1]
hexchat.set_pluginpref('lfm_username', word[1])
print('Lastfm: Username set to {}'.format(word[1]))
return hexchat.EAT_ALL
if not USERNAME:
print('Lastfm: No username set, use /lfm <username> to set it')
return hexchat.EAT_ALL
url = 'http://ws.audioscrobbler.com/2.0/?method=user.getrecentTracks&user={}&api_key={}&format=json'.format(USERNAME, KEY)
try:
response = urllib_request.urlopen(url)
text = response.read().decode('utf-8')
response.close()
except urllib_error.HTTPError as err:
print('Lastfm Error: {}'.format(err))
return hexchat.EAT_ALL
data = json.loads(text)
track = data['recenttracks']['track'][0]
if not '@attr' in track or not track['@attr']['nowplaying']:
print('Lastfm: No song currently playing')
return hexchat.EAT_ALL
title = track['name']
artist = track['artist']['#text']
album = track['album']['#text']
if echo:
cmd = 'echo Lastfm: {} by {} on {}.'.format(title, artist, album)
elif hexchat.get_pluginpref('lfm_say'):
cmd = 'say Now playing {} by {} on {}.'.format(title, artist, album)
else:
cmd = 'me is now playing {} by {} on {}.'.format(title, artist, album)
hexchat.command(cmd)
return hexchat.EAT_ALL
hexchat.hook_command('lfm', lfm_cb, help=lfm_help)
| mit | Python | |
1de1ca40d73d5d8937593c26bd97f4c9d9461c87 | Add tests for account-tag. | ProgVal/irctest | irctest/server_tests/test_account_tag.py | irctest/server_tests/test_account_tag.py | """
<http://ircv3.net/specs/extensions/account-tag-3.2.html>
"""
from irctest import cases
from irctest.client_mock import NoMessageException
from irctest.basecontrollers import NotImplementedByController
class AccountTagTestCase(cases.BaseServerTestCase, cases.OptionalityHelper):
def connectRegisteredClient(self, nick):
self.addClient()
self.sendLine(2, 'CAP LS 302')
capabilities = self.getCapLs(2)
assert 'sasl' in capabilities
self.sendLine(2, 'AUTHENTICATE PLAIN')
m = self.getMessage(2, filter_pred=lambda m:m.command != 'NOTICE')
self.assertMessageEqual(m, command='AUTHENTICATE', params=['+'],
fail_msg='Sent “AUTHENTICATE PLAIN”, server should have '
'replied with “AUTHENTICATE +”, but instead sent: {msg}')
self.sendLine(2, 'AUTHENTICATE amlsbGVzAGppbGxlcwBzZXNhbWU=')
m = self.getMessage(2, filter_pred=lambda m:m.command != 'NOTICE')
self.assertMessageEqual(m, command='900',
fail_msg='Did not send 900 after correct SASL authentication.')
self.sendLine(2, 'USER f * * :*')
self.sendLine(2, 'NICK {}'.format(nick))
self.sendLine(2, 'CAP END')
self.skipToWelcome(2)
@cases.SpecificationSelector.requiredBySpecification('IRCv3.2')
@cases.OptionalityHelper.skipUnlessHasMechanism('PLAIN')
def testPrivmsg(self):
try:
self.connectClient('foo', capabilities=['account-tag'])
except AssertionError:
raise NotImplementedByController('account-tag')
self.getMessages(1)
self.controller.registerUser(self, 'jilles', 'sesame')
self.connectRegisteredClient('bar')
self.sendLine(2, 'PRIVMSG foo :hi')
self.getMessages(2)
m = self.getMessage(1)
self.assertMessageEqual(m, command='PRIVMSG', # RPL_MONONLINE
fail_msg='Sent non-730 (RPL_MONONLINE) message after '
'“bar” sent a PRIVMSG: {msg}')
self.assertIn('account', m.tags, m,
fail_msg='PRIVMSG by logged in nick '
'does not contain an account tag: {msg}')
self.assertEqual(m.tags['account'], 'jilles', m,
fail_msg='PRIVMSG by logged in nick '
'does not contain the correct account tag (should be '
'“jilles”): {msg}')
@cases.SpecificationSelector.requiredBySpecification('IRCv3.2')
@cases.OptionalityHelper.skipUnlessHasMechanism('PLAIN')
def testMonitor(self):
try:
self.connectClient('foo', capabilities=['account-tag'])
except AssertionError:
raise NotImplementedByController('account-tag')
if 'MONITOR' not in self.server_support:
raise NotImplementedByController('MONITOR')
self.sendLine(1, 'MONITOR + bar')
self.getMessages(1)
self.controller.registerUser(self, 'jilles', 'sesame')
self.connectRegisteredClient('bar')
m = self.getMessage(1)
self.assertMessageEqual(m, command='730', # RPL_MONONLINE
fail_msg='Sent non-730 (RPL_MONONLINE) message after '
'monitored nick “bar” connected: {msg}')
self.assertEqual(len(m.params), 2, m,
fail_msg='Invalid number of params of RPL_MONONLINE: {msg}')
self.assertEqual(m.params[1].split('!')[0], 'bar',
fail_msg='730 (RPL_MONONLINE) with bad target after “bar” '
'connects: {msg}')
self.assertIn('account', m.tags, m,
fail_msg='730 (RPL_MONONLINE) sent because of logged in nick '
'does not contain an account tag: {msg}')
self.assertEqual(m.tags['account'], 'jilles', m,
fail_msg='730 (RPL_MONONLINE) sent because of logged in nick '
'does not contain the correct account tag (should be '
'“jilles”): {msg}')
| mit | Python | |
21b5fbfc2b400e70ed51876d6046320c974b666d | fix issue #57: add plugin for imagebam | regosen/gallery_get | gallery_plugins/plugin_imagebam.py | gallery_plugins/plugin_imagebam.py | # Plugin for gallery_get.
# Each definition can be one of the following:
# - a string
# - a regex string
# - a function that takes source as a parameter and returns an array or a string. (You may assume that re and urllib are already imported.)
# If you comment out a parameter, it will use the default defined in __init__.py
# identifier (default = name of this plugin after "plugin_") : If there's a match, we'll attempt to download images using this plugin.
# title: parses the gallery page for a title. This will be the folder name of the output gallery.
# redirect: if the links in the gallery page go to an html instead of an image, use this to parse the gallery page.
# direct_links: if redirect is non-empty, this parses each redirect page for a single image. Otherwise, this parses the gallery page for all images.
direct_links = r'meta property="og:image" content="(.+?)"'
# same_filename (default=False): if True, uses filename specified on remote link. Otherwise, creates own filename with incremental index.
| mit | Python | |
17335a6165b6070ceb4c4f1e6bb1d5a6a8bf08f2 | Add AttachedNotesMixin | OmeGak/indico,pferreir/indico,mvidalgarcia/indico,DirkHoffmann/indico,indico/indico,ThiefMaster/indico,pferreir/indico,indico/indico,indico/indico,mvidalgarcia/indico,indico/indico,mic4ael/indico,mic4ael/indico,DirkHoffmann/indico,pferreir/indico,OmeGak/indico,DirkHoffmann/indico,ThiefMaster/indico,OmeGak/indico,mvidalgarcia/indico,ThiefMaster/indico,pferreir/indico,OmeGak/indico,DirkHoffmann/indico,mic4ael/indico,ThiefMaster/indico,mvidalgarcia/indico,mic4ael/indico | indico/core/db/sqlalchemy/notes.py | indico/core/db/sqlalchemy/notes.py | # This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from indico.modules.events.notes.models.notes import EventNote
from indico.util.caching import memoize_request
class AttachedNotesMixin(object):
"""Allows for easy retrieval of structured information about
items attached to the object"""
# When set to ``True`` .has_note preload all notes that exist for the same event
# Should be set to False when not applicable (no object.event property)
PRELOAD_EVENT_NOTES = False
@property
@memoize_request
def has_note(self):
return EventNote.get_for_linked_object(self, preload_event=self.PRELOAD_EVENT_NOTES) is not None
| mit | Python | |
f35d5251500268235598e4dbd2ddf91c994e1632 | Fix for django-tagging and automcomplete | DjangoAdminHackers/ixxy-admin-utils,DjangoAdminHackers/ixxy-admin-utils | ixxy_admin_utils/custom_widgets.py | ixxy_admin_utils/custom_widgets.py | from dal_select2_tagging.widgets import TaggingSelect2
from django import VERSION
from tagging.utils import parse_tag_input
class IxxyTaggingSelect2(TaggingSelect2):
# TaggingSelect2 doesn't handle spaces as delimeters in tags
# Django-tagging has a function we can use that just works
def render_options(self, *args):
"""Render only selected tags."""
selected_choices_arg = 1 if VERSION < (1, 10) else 0
selected_choices = args[selected_choices_arg]
if selected_choices:
selected_choices = parse_tag_input(selected_choices)
options = [
'<option value="%s" selected="selected">%s</option>' % (c, c)
for c in selected_choices
]
return '\n'.join(options) | mit | Python | |
0d4ea4493484e2d34468e205f392d2025e6c2f87 | Add tower 9 | arbylee/python-warrior | towers/beginner/level_009.py | towers/beginner/level_009.py | # -----------
# |>Ca @ S wC|
# -----------
level.description("Time to hone your skills and apply all of the abilities that you have learned.")
level.tip("Watch your back.")
level.clue("Don't just keep shooting the bow while you are being attacked from behind.")
level.time_bonus(40)
level.ace_score(100)
level.size(11, 1)
level.stairs(0, 0)
level.warrior(5, 0, 'east')
level.unit('captive', 1, 0, 'east')
level.unit('archer', 2, 0, 'east')
level.unit('thick_sludge', 7, 0, 'west')
level.unit('wizard', 9, 0, 'west')
level.unit('captive', 10, 0, 'west')
| mit | Python | |
8c0c52712833de31aeb2c4a48c8f4300966f5366 | Add lc230_kth_smallest_element_in_a_bst.py | bowen0701/algorithms_data_structures | lc230_kth_smallest_element_in_a_bst.py | lc230_kth_smallest_element_in_a_bst.py | """Leetcode 230. Kth Smallest Element in a BST
Medium
URL: https://leetcode.com/problems/kth-smallest-element-in-a-bst/
Given a binary search tree, write a function kthSmallest to find the
kth smallest element in it.
Note:
You may assume k is always valid, 1 <= k <= BST's total elements.
Example 1:
Input: root = [3,1,4,null,2], k = 1
3
/ \
1 4
\
2
Output: 1
Example 2:
Input: root = [5,3,6,2,4,null,null,1], k = 3
5
/ \
3 6
/ \
2 4
/
1
Output: 3
Follow up:
What if the BST is modified (insert/delete operations) often and
you need to find the kth smallest frequently?
How would you optimize the kthSmallest routine?
"""
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, val):
self.val = val
self.left = None
self.right = None
class Solution(object):
def kthSmallest(self, root, k):
"""
:type root: TreeNode
:type k: int
:rtype: int
"""
pass
def main():
pass
if __name__ == '__main__':
main()
| bsd-2-clause | Python | |
432f69910cb6f963a99b3ed2215c8b6476e8f71a | Add first version of script to merge results according to cases | deib-polimi/Spark-Log-Parser,deib-polimi/Spark-Log-Parser | merge_results.py | merge_results.py | #! /usr/bin/env python3
## Copyright 2018 Eugenio Gianniti
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
import csv
import re
import sys
from argparse import ArgumentParser
from collections import defaultdict
def parse_arguments (argv = None):
parser = ArgumentParser (description = "summarize results by case")
msg = "a file containing the comparison of simulations and real measures"
parser.add_argument ("comparisons", help = msg)
msg = "a file containing cases (Make syntax)"
parser.add_argument ("cases", help = msg)
return parser.parse_args (argv)
def parse_case (lineno, line):
if not hasattr (parse_case, "name_re"):
parse_case.name_re = re.compile ("^\s*(\w+)\s*:")
match = parse_case.name_re.match (line)
if match:
name = match.group (1)
else:
raise SyntaxError ("missing name in case file, line {}:\n{}".
format (lineno, line))
_, _, values_str = line.partition (":")
# By enforcing the sorting we are sure that the minimum distance
# configuration is always the smallest in case of ties
values = sorted (int (cores) for cores in values_str.split ())
return name, values
def cases_from_file (filename):
with open (filename) as infile:
lines = infile.readlines ()
cases = dict (parse_case (idx, line)
for idx, line in enumerate (lines, start = 1))
return cases
def arrange_cases (filename):
cases = cases_from_file (filename)
all_cores = cases["all"]
del cases["all"]
pairings = dict ()
for name, values in cases.items ():
pairs = dict ()
for core in all_cores:
closest = min (values, key = lambda c: abs (c - core))
pairs[core] = closest
pairings[name] = pairs
return pairings
def parse_comparisons (filename):
data = defaultdict (dict)
with open (filename) as infile:
reader = csv.DictReader (infile)
experiment = re.compile (
"(?P<executors>\d+)_(?P<cpus>\d+)_\d+[mMgG]_(?P<datasize>\d+)")
for row in reader:
model = int (row["ModelCores"])
query = row["Query"]
match = experiment.fullmatch (row["Experiment"])
cores = int (match["executors"]) * int (match["cpus"])
data[query][(cores, model)] = 100 * float (row["Error[1]"])
return data
def avg (numbers):
result = 0.
count = 0
for n in numbers:
result += n
count += 1
return result / count
def arrange_results (errors, pairings):
results = list ()
for case, pairs in pairings.items ():
for query, partials in errors.items ():
training = avg (abs (partials[(cores, model)])
for cores, model in pairs.items ()
if cores == model)
test = avg (abs (partials[(cores, model)])
for cores, model in pairs.items ()
if cores != model)
results.append ({
"Case": case,
"Query": query,
"Training MAPE": training,
"Test MAPE": test
})
return results
def write_table (results):
fields = ["Case", "Query", "Training MAPE", "Test MAPE"]
writer = csv.DictWriter (sys.stdout, fields)
writer.writeheader ()
writer.writerows (results)
if __name__ == "__main__":
args = parse_arguments ()
pairings = arrange_cases (args.cases)
data = parse_comparisons (args.comparisons)
results = arrange_results (data, pairings)
write_table (results)
| apache-2.0 | Python | |
2475916e0d17e4b03bb93970b88555a229a5f3c0 | add tests for ninefold | andrewsomething/libcloud,Scalr/libcloud,Kami/libcloud,Kami/libcloud,ZuluPro/libcloud,StackPointCloud/libcloud,ByteInternet/libcloud,apache/libcloud,erjohnso/libcloud,andrewsomething/libcloud,ByteInternet/libcloud,Scalr/libcloud,mistio/libcloud,pquentin/libcloud,pquentin/libcloud,ZuluPro/libcloud,Kami/libcloud,apache/libcloud,erjohnso/libcloud,mistio/libcloud,apache/libcloud,pquentin/libcloud,vongazman/libcloud,vongazman/libcloud,StackPointCloud/libcloud,andrewsomething/libcloud,mistio/libcloud,Scalr/libcloud,ZuluPro/libcloud,erjohnso/libcloud,vongazman/libcloud,ByteInternet/libcloud,StackPointCloud/libcloud | libcloud/test/storage/test_ninefold.py | libcloud/test/storage/test_ninefold.py | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import base64
from libcloud.utils.py3 import b
from libcloud.storage.drivers.ninefold import NinefoldStorageDriver
from libcloud.test.storage.test_atmos import AtmosMockHttp, AtmosTests
class NinefoldTests(AtmosTests, unittest.TestCase):
def setUp(self):
NinefoldStorageDriver.connectionCls.conn_class = AtmosMockHttp
NinefoldStorageDriver.path = ''
AtmosMockHttp.type = None
AtmosMockHttp.upload_created = False
self.driver = NinefoldStorageDriver('dummy', base64.b64encode(b('dummy')))
self._remove_test_file()
| apache-2.0 | Python | |
5a0d37668c76c7ed77347f2d59cb5b7a20284385 | Add names updater | Pat-Laugh/gitignore-modifier | names_updater.py | names_updater.py | #!/usr/bin/env python
# Copyright 2017 Patrick Laughrea
# Licensed under the Apache License, Version 2.0
README = '''
## Purpose
This is a script to update names in the `gitignore.py` file.
## Requirements
The gitignore repository must have been cloned on the machine at `~/gitignore`.
The directory where this script is run must contain the `gitignore.py` file to
update.
## Concept
The `gitignore.py` file is read to determine where the `names` variable is
located. Its location is considered as the first occurence of an unindented
`names` variable.
`gitignore.py` is then used to call:
- `local set ~/gitignore`, to set the local path
- `local call git pull`, to update the directory's contents
- `dict`, to get all the key-values
- `local reset`, to reset the local path
The option `dict` returns the key-values of all the gitignore templates. If the
old and new values differ, then the `names` variable in the `gitignore.py` file
is set to be equal to the new value, after it's been stripped of all spaces.
'''
import sys, os, subprocess, ast
def main(argc, argv):
dir_name = os.path.dirname(os.path.abspath(__file__))
os.chdir(dir_name)
filename = 'gitignore.py'
with open(filename, 'r') as f:
lines = f.readlines()
path_gitignore = os.path.join(os.path.expanduser('~'), 'gitignore')
location_names = get_location_names(lines)
print_and_call('./%s local set %s' % (filename, path_gitignore))
print_and_call('./%s local call git pull' % filename)
names = ast.literal_eval(print_and_call('./%s dict' % filename))
print_and_call('./%s local reset' % filename)
old_names = get_old_names(lines[location_names])
if old_names == names:
print('Names are up to date.')
else:
lines[location_names] = 'names = %s\n' % str(names).replace(' ', '')
with open(filename, 'w') as f:
f.writelines(lines)
print('Names updated.')
def get_location_names(lines):
counter = 0
for line in lines:
if line[0:5] == 'names':
break
counter += 1
return counter
def print_and_call(cmd):
print(cmd)
return subprocess.check_output(cmd.split())
def get_old_names(line):
return ast.literal_eval(line[line.find('{'):])
if __name__ == '__main__':
main(len(sys.argv), sys.argv) | apache-2.0 | Python | |
3bdc692133c7e9e56fb3eeb77d0951ce21a225ec | Create ds_string_word_pattern.py | ngovindaraj/Python | leetcode/ds_string_word_pattern.py | leetcode/ds_string_word_pattern.py |
# @file Word Pattern
# @brief Given 2 sets check if it is a bijection
# https://leetcode.com/problems/word-pattern/
'''
Given a pattern and a string str, find if str follows the same pattern.
Here follow means a full match, such that there is a bijection between
a letter in pattern and a non-empty word in str.
Examples:
pattern = "abba", str = "dog cat cat dog" should return true.
pattern = "abba", str = "dog cat cat fish" should return false.
pattern = "aaaa", str = "dog cat cat dog" should return false.
pattern = "abba", str = "dog dog dog dog" should return false.
Notes:
You may assume pattern contains only lowercase letters, and str contains
lowercase letters separated by a single space.
'''
| mit | Python | |
36843505e1f8d80768c75487faf7cef20294936b | Add migration file | uccser/cs-unplugged,uccser/cs-unplugged,uccser/cs-unplugged,uccser/cs-unplugged | csunplugged/topics/migrations/0047_curriculumarea_colour.py | csunplugged/topics/migrations/0047_curriculumarea_colour.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-05 09:20
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('topics', '0046_glossarydefinition'),
]
operations = [
migrations.AddField(
model_name='curriculumarea',
name='colour',
field=models.CharField(max_length=15, null=True),
),
]
| mit | Python | |
f7c0241445cf21d15ea640b199826025b3a85aa6 | Create Enforce2FAMiddleware unit tests | dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | corehq/apps/users/tests/test_middleware.py | corehq/apps/users/tests/test_middleware.py | from django.test import TestCase
from django.contrib.auth import get_user_model
from django.test.client import Client
from django_otp.middleware import OTPMiddleware
from corehq.util.test_utils import flag_enabled
from corehq.apps.users.models import CouchUser
from corehq.apps.users.middleware import Enforce2FAMiddleware
import mock
class TestTwoFactorMiddleware(TestCase):
def setUp(self):
self.account_request = self.create_request(request_url="/account/",
username="test_1@test.com",
password="123")
self.non_account_request = self.create_request(request_url="/not_account/",
username="test_2@test.com",
password="123")
@classmethod
def create_request(cls, request_url, username, password):
# Initialize request
request = Client().get(request_url).wsgi_request
# Create user
request.user = get_user_model().objects.create_user(username=username, email=username, password=password)
username = request.user.get_username()
# Create couch user
request.couch_user = CouchUser()
# Login
assert Client().login(username=username, password=password)
# Activate middleware
OTPMiddleware().process_request(request)
return request
@classmethod
def enable_two_factor_for_user(cls, request):
request.user.otp_device = "test_device"
@classmethod
def call_process_view_with_couch_mock(cls, request, disable_two_factor):
with mock.patch('corehq.apps.users.models.CouchUser.two_factor_disabled',
new_callable=mock.PropertyMock,
return_value=disable_two_factor):
response = Enforce2FAMiddleware().process_view(request, "test_view_func",
"test_view_args", "test_view_kwargs")
return response
@flag_enabled('TWO_FACTOR_SUPERUSER_ROLLOUT')
def test_process_view_permission_denied(self):
request = self.non_account_request
with mock.patch('corehq.apps.users.models.CouchUser.two_factor_disabled',
new_callable=mock.PropertyMock,
return_value=False):
response = Enforce2FAMiddleware().process_view(request, "test_view_func",
"test_view_args", "test_view_kwargs")
self.assertEqual(response.status_code, 403)
self.assertEqual(response._request, request)
self.assertEqual(response.template_name, 'two_factor/core/otp_required.html')
@flag_enabled('TWO_FACTOR_SUPERUSER_ROLLOUT')
def test_process_view_two_factor_enabled(self):
request = self.non_account_request
self.enable_two_factor_for_user(request)
response = self.call_process_view_with_couch_mock(request, disable_two_factor=False)
self.assertEqual(response, None)
@flag_enabled('TWO_FACTOR_SUPERUSER_ROLLOUT')
def test_process_view_couch_user_two_factor_disabled(self):
request = self.non_account_request
response = self.call_process_view_with_couch_mock(request, disable_two_factor=True)
self.assertEqual(response, None)
@flag_enabled('TWO_FACTOR_SUPERUSER_ROLLOUT')
def test_process_view_account_url(self):
request = self.account_request
response = self.call_process_view_with_couch_mock(request, disable_two_factor=False)
self.assertEqual(response, None)
| bsd-3-clause | Python | |
79170b41e947557dad23ef2ea93934075548dc32 | Add reactor example with ODEs implemented in Python | Cantera/cantera-svn,Cantera/cantera-svn,Cantera/cantera-svn,Cantera/cantera-svn,Cantera/cantera-svn,Cantera/cantera-svn | interfaces/cython/cantera/examples/reactors/custom.py | interfaces/cython/cantera/examples/reactors/custom.py | """
Solve a constant pressure ignition problem where the governing equations are
implemented in Python.
This demonstrates an approach for solving problems where Cantera's reactor
network model cannot be configured to describe the system in question. Here,
Cantera is used for evaluating thermodynamic properties and kinetic rates while
an external ODE solver is used to integrate the resulting equations. In this
case, the SciPy wrapper for VODE is used, which uses the same variable-order BDF
methods as the Sundials CVODES solver used by Cantera.
"""
import cantera as ct
import numpy as np
import scipy.integrate
class ReactorOde(object):
def __init__(self, gas):
# Parameters of the ODE system and auxiliary data are stored in the
# ReactorOde object.
self.gas = gas
self.P = gas.P
def __call__(self, t, y):
"""the ODE function, y' = f(t,y) """
# State vector is [T, Y_1, Y_2, ... Y_K]
self.gas.set_unnormalized_mass_fractions(y[1:])
self.gas.TP = y[0], self.P
rho = self.gas.density
wdot = self.gas.net_production_rates
dTdt = - (np.dot(self.gas.partial_molar_enthalpies, wdot) /
(rho * self.gas.cp))
dYdt = wdot * self.gas.molecular_weights / rho
return np.hstack((dTdt, dYdt))
gas = ct.Solution('gri30.xml')
# Initial condition
gas.TPX = 1001, ct.one_atm, 'H2:2,O2:1,N2:4'
y0 = np.hstack((gas.T, gas.Y))
# Set up objects representing the ODE and the solver
ode = ReactorOde(gas)
solver = scipy.integrate.ode(ode)
solver.set_integrator('vode', method='bdf', with_jacobian=True)
solver.set_initial_value(y0, 0.0)
# Integrate the equations, keeping T(t) and Y(k,t)
t_end = 1e-3
t_out = [0.0]
T_out = [gas.T]
Y_out = [gas.Y]
dt = 1e-5
while solver.successful() and solver.t < t_end:
solver.integrate(solver.t + dt)
t_out.append(solver.t)
T_out.append(gas.T)
Y_out.append(gas.Y)
Y_out = np.array(Y_out).T
# Plot the results
try:
import matplotlib.pyplot as plt
L1 = plt.plot(t_out, T_out, color='r', label='T', lw=2)
plt.xlabel('time (s)')
plt.ylabel('Temperature (K)')
plt.twinx()
L2 = plt.plot(t_out, Y_out[gas.species_index('OH')], label='OH', lw=2)
plt.ylabel('Mass Fraction')
plt.legend(L1+L2, [line.get_label() for line in L1+L2], loc='lower right')
plt.show()
except ImportError:
print('Matplotlib not found. Unable to plot results.')
| bsd-3-clause | Python | |
d9d59666695ef9bdfce6fc4d8d886d5b492c546b | add code to submit oozie jobs via rest api | usc-isi-i2/mydig-webservice,usc-isi-i2/mydig-webservice,usc-isi-i2/mydig-webservice,usc-isi-i2/mydig-webservice | ws/jobs/manage_oozie_jobs.py | ws/jobs/manage_oozie_jobs.py | # Memex cluster oozie url - http://10.1.94.54:11000/oozie
import requests
class OozieJobs(object):
def __init__(self, oozie_url='http://localhost:11000/oozie'):
self.oozie_url = oozie_url
def submit_oozie_jobs(self, config_xml):
oozie_url = self.oozie_url + "/v1/jobs"
# open files in binary mode
# config_xml = codecs.open('config.xml, 'r')
files = {'file': config_xml}
response = requests.post(oozie_url, files=files)
return response
def manage_job(self, job_id, action):
# action can be 'start', 'suspend', 'resume', 'kill' and 'rerun'
# curl - i - X PUT "http://localhost:11000/oozie/v1/job/0000000-130524111605784-oozie-rkan-W?action=kill"
oozie_url = '{}/v1/job/{}?action={}'.format(self.oozie_url, job_id, action)
response = requests.put(oozie_url)
return response
"""
Sample config.xml
<configuration>
<property>
<name>user.name</name>
<value>rkanter</value>
</property>
<property>
<name>oozie.wf.application.path</name>
<value>${nameNode}/user/${user.name}/${examplesRoot}/apps/no-op</value>
</property>
<property>
<name>queueName</name>
<value>default</value>
</property>
<property>
<name>nameNode</name>
<value>hdfs://localhost:8020</value>
</property>
<property>
<name>jobTracker</name>
<value>localhost:8021</value>
</property>
<property>
<name>examplesRoot</name>
<value>examples</value>
</property>
</configuration>
""" | mit | Python | |
193a2cd23d074e346829caaed17194c9a130c134 | Add example app ipxactwriter.py | csquaredphd/ipyxact,olofk/ipyxact,csquaredphd/ipyxact,olofk/ipyxact,csquaredphd/ipyxact | ipxactwriter.py | ipxactwriter.py | import ipyxact.ipyxact as ipyxact
class Signal(object):
def __init__(self, name, width=0, low=0, asc=False):
self.name = name
self.width=width
self.low = low
self.asc = asc
class Vector(ipyxact.Vector):
def __init__(self, width=0, low=0, asc=False):
if asc:
self.left = low
self.right = low+width-1
else:
self.left = low+width-1
self.right = low
class Port(ipyxact.Port):
def __init__(self, name, direction, width=0, low=0, asc=False):
self.name = name
self.wire = ipyxact.Wire()
self.wire.direction = direction
if width > 0:
self.wire.vector = Vector(width, low, asc)
class WBBusInterface(ipyxact.BusInterface):
def __init__(self, name, mode):
super(WBBusInterface, self).__init__()
self.name = name
self.set_mode(mode)
abstractionType = ipyxact.AbstractionType()
abstractionType.vendor = "org.opencores"
abstractionType.library = "wishbone"
abstractionType.name = "wishbone.absDef"
abstractionType.version = "b3"
self.abstractionType = abstractionType
busType = ipyxact.BusType()
busType.vendor = "org.opencores"
busType.library = "wishbone"
busType.name = "wishbone"
busType.version = "b3"
self.busType = busType
self.portMaps = ipyxact.PortMaps()
def connect(self, prefix):
for p in WB_MASTER_PORTS:
portMap = ipyxact.PortMap()
physicalPort = ipyxact.PhysicalPort()
physicalPort.name = "{}_{}_i".format(prefix, p.name)
if p.width > 0:
physicalPort.vector = Vector(p.width)
portMap.physicalPort = physicalPort
logicalPort = ipyxact.LogicalPort()
logicalPort.name = "{}_o".format(p.name)
if p.width > 0:
logicalPort.vector = Vector(p.width)
portMap.logicalPort = logicalPort
busif.portMaps.portMap.append(portMap)
for p in WB_SLAVE_PORTS:
portMap = ipyxact.PortMap()
physicalPort = ipyxact.PhysicalPort()
physicalPort.name = "{}_{}_o".format(prefix, p.name)
if p.width > 0:
physicalPort.vector = Vector(p.width)
portMap.physicalPort = physicalPort
logicalPort = ipyxact.LogicalPort()
logicalPort.name = "{}_i".format(p.name)
if p.width > 0:
logicalPort.vector = Vector(p.width)
portMap.logicalPort = logicalPort
busif.portMaps.portMap.append(portMap)
WB_MASTER_PORTS = [Signal('adr', 32),
Signal('dat', 32),
Signal('sel', 4),
Signal('we'),
Signal('cyc'),
Signal('stb'),
Signal('cti', 3),
Signal('bte', 2)]
WB_SLAVE_PORTS = [Signal('dat', 32),
Signal('ack'),
Signal('err'),
Signal('rty')]
ipxact = ipyxact.Ipxact()
ipxact.nsver = '1.5'
ipxact.nskey = 'spirit'
ipxact.nsval = 'http://www.spiritconsortium.org/XMLSchema/SPIRIT/1.5'
ns = {ipxact.nskey : ipxact.nsval}
ipxact.component.vendor = 'opencores'
ipxact.component.library = 'ip'
ipxact.component.name = 'autointercon'
ipxact.component.version = '0'
ipxact.component.model = ipyxact.Model()
ports = ipyxact.Ports()
clk = Port('wb_clk_i', 'in')
rst = Port('wb_rst_i', 'in')
ports.port.append(clk)
ports.port.append(rst)
for p in WB_MASTER_PORTS:
mp = Port('wbs_ram_{}_i'.format(p.name), 'in', p.width)
ports.port.append(mp)
for p in WB_SLAVE_PORTS:
mp = Port('wbs_ram_{}_o'.format(p.name), 'out', p.width)
ports.port.append(mp)
ipxact.component.model.ports = ports
ipxact.component.busInterfaces = ipyxact.BusInterfaces()
busif = WBBusInterface("wb", "mirroredMaster")
busif.connect("wbs_ram")
ipxact.component.busInterfaces.busInterface.append(busif)
ipxact.write('wb_intercon.xml')
| mit | Python | |
1cb8f468325825bcadac1d5126a1415e57e8d314 | add test runner for notebooks | fabianrost84/ipycache,ihrke/ipycache,rossant/ipycache | ipynb_runner.py | ipynb_runner.py | """
Script for running ipython notebooks.
"""
from IPython.nbformat.current import read
from IPython.kernel import KernelManager
import argparse
from pprint import pprint
import sys
def get_ncells(nb):
# return number of code cells in a notebook
ncells=0
for ws in nb.worksheets:
for cell in ws.cells:
if cell.cell_type=="code":
ncells+=1
return ncells
# input arguments parsing
parser = argparse.ArgumentParser(description="Run an IPython Notebook")
parser.add_argument("notebook",
type=str,
help='notebook to run')
parser.add_argument("-v", "--verbose", help="increase output verbosity",
action="store_true")
parser.add_argument("-b", "--break-at-error", help="stop at error",
action="store_true")
parser.add_argument("-s", "--summary", help="print summary",
action="store_true")
args = parser.parse_args()
# add .ipynb extension if not given
notebook = '{name}{ext}'.format(name=args.notebook, ext=''
if '.' in args.notebook else '.ipynb')
if args.verbose:
print 'Checking: {}'.format(notebook)
nb = read(open(notebook), 'json')
# starting up kernel
km = KernelManager()
km.start_kernel()
kc = km.client()
kc.start_channels()
shell=kc.shell_channel
ncells=get_ncells(nb)
nerrors=0 # accumulate number of errors
nsucc=0
# loop over cells
icell=1
for ws in nb.worksheets:
for cell in ws.cells:
if cell.cell_type == 'code':
if args.verbose:
print "Cell:%i/%i> "%(icell,ncells),
icell+=1
shell.execute(cell.input)
msg=shell.get_msg()
status=msg['content']['status']
if args.verbose:
print status
if status=='ok':
nsucc+=1
continue
nerrors+=1
if args.verbose:
print "="*80
print msg['content']['ename'], ":", msg['content']['evalue']
print "{0:-^80}".format("<CODE>")
print cell.input
print "{0:-^80}".format("</CODE>")
for m in msg['content']['traceback']:
print m
print "="*80
if args.break_at_error:
break
if args.summary:
print "{0:#^80}".format(" Summary: %s "%args.notebook)
print "Num Errors : ", nerrors
print "Num Successes: ", nsucc
print "Num Cells : ", ncells
# kernel cleanup
kc.stop_channels()
km.shutdown_kernel(now=True)
sys.exit(-1 if nerrors>0 else 0)
| bsd-3-clause | Python | |
fa855ae48a15cfd9b1e01227952a29249faa55d0 | Create longest-uncommon-subsequence-i.py | kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,yiwen-luo/LeetCode,jaredkoontz/leetcode,yiwen-luo/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,yiwen-luo/LeetCode,jaredkoontz/leetcode,tudennis/LeetCode---kamyu104-11-24-2015,jaredkoontz/leetcode,jaredkoontz/leetcode,yiwen-luo/LeetCode,kamyu104/LeetCode,kamyu104/LeetCode,kamyu104/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,jaredkoontz/leetcode,yiwen-luo/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015 | Python/longest-uncommon-subsequence-i.py | Python/longest-uncommon-subsequence-i.py | # Time: O(min(a, b))
# Space: O(1)
# Given a group of two strings, you need to find the longest uncommon subsequence of this group of two strings.
# The longest uncommon subsequence is defined as the longest subsequence of one of these strings
# and this subsequence should not be any subsequence of the other strings.
#
# A subsequence is a sequence that can be derived from one sequence
# by deleting some characters without changing the order of the remaining elements.
# Trivially, any string is a subsequence of itself and an empty string is a subsequence of any string.
#
# The input will be two strings, and the output needs to be the length of the longest uncommon subsequence.
# If the longest uncommon subsequence doesn't exist, return -1.
#
# Example 1:
# Input: "aba", "cdc"
# Output: 3
# Explanation: The longest uncommon subsequence is "aba" (or "cdc"),
# because "aba" is a subsequence of "aba",
# but not a subsequence of any other strings in the group of two strings.
# Note:
#
# Both strings' lengths will not exceed 100.
# Only letters from a ~ z will appear in input strings.
class Solution(object):
def findLUSlength(self, a, b):
"""
:type a: str
:type b: str
:rtype: int
"""
if a == b:
return -1
return max(len(a), len(b))
| mit | Python | |
f61d0bffc71c85c3ea57dfae09c1a0275a42e1b0 | Create network.py | thewhitetulip/SamplePythonScripts | network.py | network.py | #uses scapy, you need to have scapy installed
from scapy.all import *
ip = raw_input('Enter IP address')
try:
alive, dead = alive,dead=srp(Ether(dst="ff:ff:ff:ff:ff:ff")/ARP(pdst=ip), timeout=2, verbose=0)
print "MAC - IP"
print alive[0][1].hwsrc # will print the MAC address of the IP address you specify in the ip
except:
pass
| mit | Python | |
4eeb2f120f42cb0af1cb8aca9c95b98a1c568d18 | add july class, and first script of pyplot | JasonWayne/machine-learning-from-scratch | july-ml-course/first_of_pyplot.py | july-ml-course/first_of_pyplot.py | import matplotlib.pyplot as plt
import math
if __name__ == "__main__":
# prepare data
x = [float(i) / 100 for i in range(1, 300)]
y = [math.log(i) for i in x]
# plot
plt.plot(x, y, 'r-', linewidth=3, label='log curve')
# prepare data
p1 = 20
p2 = 175
a = [x[p1], x[p2]]
b = [y[p1], y[p2]]
# plot
plt.plot(a, b, 'g-', linewidth=2)
plt.plot(a, b, 'b*', markersize=15, alpha=0.5)
# config
plt.legend(loc='upper left')
plt.grid(True)
plt.xlabel('x')
plt.ylabel('log(x)')
# show
plt.show()
| mit | Python | |
9d60de90e49231f64e2fe1719077a3b58c582faa | Create lcm.py | travisrobinson/coursera-specializations,travisrobinson/coursera-specializations,travisrobinson/coursera-specializations,travisrobinson/coursera-specializations,travisrobinson/coursera-specializations,travisrobinson/coursera-specializations,travisrobinson/coursera-specializations | data-structures-and-algorithms/algorithmic-toolbox/week-2/lcm.py | data-structures-and-algorithms/algorithmic-toolbox/week-2/lcm.py | # Uses python3
import sys
def gcd(a, b):
while b!= 0:
(a,b) = (b,a%b)
return a
def main():
a,b = map(int,sys.stdin.readline().split())
c = gcd(a, b)
d = (a*b)//c
print(d)
main()
| cc0-1.0 | Python | |
51d15d4c94ad9daeea15a559c9201a560971ad10 | Create english_to_hack3r.py | meyersbs/misc_nlp_scripts | english_to_hack3r.py | english_to_hack3r.py | # @AUTHOR: Benjamin Meyers
# @DESCRIPTION: Try to write code to convert text into hAck3r, using regular
# expressions and substitution, where e → 3, i → 1, o → 0,
# l → |, s → 5, . → 5w33t!, ate → 8. Normalize the text to
# lowercase before converting it. Add more substitutions of your
# own. Now try to map s to two different values: $ for
# word-initial s, and 5 for word-internal s.
import re, sys
def file_to_hacker(input_file):
with open(input_file, 'r') as f:
for line in f:
temp_line = line.lower().strip('.,:;!?')
temp_line = re.sub(r'\ss', ' $', temp_line)
temp_line = re.sub(r'ate', '8', temp_line)
temp_line = re.sub(r'for', '4', temp_line)
temp_line = re.sub(r'too', '2', temp_line)
temp_line = re.sub(r'to', '2', temp_line)
temp_line = re.sub(r'e', '3', temp_line)
temp_line = re.sub(r'i', '1', temp_line)
temp_line = re.sub(r'o', '0', temp_line)
temp_line = re.sub(r'l', '|', temp_line)
temp_line = re.sub(r's', '5', temp_line)
temp_line = re.sub(r'\.', '5w33t!', temp_line)
print(temp_line)
def text_to_hacker(input_text):
for line in input_text.split('\n'):
temp_line = line.lower().strip('.,:;!?')
temp_line = re.sub(r'\ss', ' $', temp_line)
temp_line = re.sub(r'ate', '8', temp_line)
temp_line = re.sub(r'for', '4', temp_line)
temp_line = re.sub(r'too', '2', temp_line)
temp_line = re.sub(r'to', '2', temp_line)
temp_line = re.sub(r'e', '3', temp_line)
temp_line = re.sub(r'i', '1', temp_line)
temp_line = re.sub(r'o', '0', temp_line)
temp_line = re.sub(r'l', '|', temp_line)
temp_line = re.sub(r's', '5', temp_line)
temp_line = re.sub(r'\.', '5w33t!', temp_line)
print(temp_line)
def main():
if sys.argv[1] == '-f':
file_to_hacker(sys.argv[2])
elif sys.argv[1] == '-t':
text_to_hacker(sys.argv[2])
else:
sys.exit("Invalid command.")
if __name__ == "__main__":
main()
| mit | Python | |
47ae456b3a4252d7c838219e3ebd15e049316891 | Add three shutters and open/close functions | NSLS-II-CHX/ipython_ophyd,NSLS-II-CHX/ipython_ophyd | profile_collection/startup/25-shutter.py | profile_collection/startup/25-shutter.py | from __future__ import print_function
import epics
import logging
from ophyd.controls import EpicsSignal
from ophyd.controls.signal import SignalGroup
class Shutter(SignalGroup):
def __init__(self, open=None, open_status=None,
close=None, close_status=None):
super(Shutter, self).__init__()
signals = [EpicsSignal(open_status, write_pv=open, alias='_open'),
EpicsSignal(close_status, write_pv=close, alias='_close'),
]
for sig in signals:
self.add_signal(sig)
def open(self):
self._open.value = 1
def close(self):
self._close.value = 1
foe_sh = Shutter(open='XF:11ID-PPS{Sh:FE}Cmd:Opn-Cmd',
open_status='XF:11ID-PPS{Sh:FE}Cmd:Opn-Sts',
close='XF:11ID-PPS{Sh:FE}Cmd:Cls-Cmd',
close_status='XF:11ID-PPS{Sh:FE}Cmd:Cls-Sts')
fe_sh = Shutter(open='XF:11IDA-PPS{PSh}Cmd:Opn-Cmd',
open_status='XF:11IDA-PPS{PSh}Cmd:Opn-Sts',
close='XF:11IDA-PPS{PSh}Cmd:Cls-Cmd',
close_status='XF:11IDA-PPS{PSh}Cmd:Cls-Sts')
class FastShutter(EpicsSignal):
def open(self):
self.put(1)
def close(self):
self.put(0)
fast_sh = FastShutter('XF:11IDB-ES{Zebra}:SOFT_IN:B0',
rw=True, name='fast_sh')
| bsd-2-clause | Python | |
7e0561c01a6e7e321b51dace78a772e6cba61e41 | Rename and add | vicyangworld/AutoOfficer | CmdFormat.py | CmdFormat.py | import ctypes
import os
STD_INPUT_HANDLE = -10
STD_OUTPUT_HANDLE= -11
STD_ERROR_HANDLE = -12
FOREGROUND_BLACK = 0x0
FOREGROUND_BLUE = 0x01 # text color contains blue.
FOREGROUND_GREEN= 0x02 # text color contains green.
FOREGROUND_RED = 0x04 # text color contains red.
FOREGROUND_INTENSITY = 0x08 # text color is intensified.
BACKGROUND_BLUE = 0x10 # background color contains blue.
BACKGROUND_GREEN= 0x20 # background color contains green.
BACKGROUND_RED = 0x40 # background color contains red.
BACKGROUND_INTENSITY = 0x80 # background color is intensified.
class CmdFormat(object):
"""docstring for CmdFormat"""
std_out_handle = ctypes.windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
def __init__(self, WinTitle="Console Window",\
color=FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE | FOREGROUND_INTENSITY,\
):
super( CmdFormat, self).__init__()
self.WinTitle = WinTitle
os.system("title " + WinTitle)
def set_cmd_color(self, color, handle=std_out_handle):
bool = ctypes.windll.kernel32.SetConsoleTextAttribute(handle, color)
return bool
def reset_color(self):
self.set_cmd_color(FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE | FOREGROUND_INTENSITY)
def print_white_text(self,print_text,end='\n'):
self.reset_color()
print(print_text,end=end)
def print_red_text(self, print_text,end='\n'):
self.set_cmd_color(4 | 8)
print(print_text,end=end)
self.reset_color()
def print_green_input_text(self, print_text):
self.set_cmd_color(FOREGROUND_GREEN | FOREGROUND_INTENSITY)
c = input(print_text)
self.reset_color()
return c
def print_green_text(self, print_text,end='\n'):
self.set_cmd_color(FOREGROUND_GREEN | FOREGROUND_INTENSITY)
print(print_text,end=end)
self.reset_color()
def print_yellow_text(self, print_text,end='\n'):
self.set_cmd_color(6 | 8)
print(print_text,end=end)
self.reset_color()
def print_blue_text(self, print_text,end='\n'):
self.set_cmd_color(1 | 10)
print(print_text,end=end)
self.reset_color()
if __name__ == '__main__':
clr = CmdFormat("Window Title")
clr.set_cmd_color(FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE | FOREGROUND_INTENSITY)
clr.print_red_text('red')
clr.print_green_text("green")
clr.print_green_input_text("input: ")
clr.print_blue_text('blue')
clr.print_yellow_text('yellow')
input() | mit | Python | |
99ab0354c5e0812020d983fbb0480d8b7b649645 | add django migrations | Organice/djangocms-maps,Organice/djangocms-maps,Organice/djangocms-maps,Organice/djangocms-maps | djangocms_googlemap/migrations/0002_auto_20160621_0926.py | djangocms_googlemap/migrations/0002_auto_20160621_0926.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djangocms_googlemap', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='googlemap',
name='cmsplugin_ptr',
field=models.OneToOneField(parent_link=True, related_name='djangocms_googlemap_googlemap', auto_created=True, primary_key=True, serialize=False, to='cms.CMSPlugin'),
),
migrations.AlterField(
model_name='googlemap',
name='route_planer',
field=models.BooleanField(default=False, verbose_name='route planner'),
),
migrations.AlterField(
model_name='googlemap',
name='route_planer_title',
field=models.CharField(default='Calculate your fastest way to here', max_length=150, null=True, verbose_name='route planner title', blank=True),
),
]
| bsd-3-clause | Python | |
5f8282c4533386e8d1e5d2dbdbe030be958540ab | Add not committed file globaleaks/handlers/robots.py | vodkina/GlobaLeaks,vodkina/GlobaLeaks,vodkina/GlobaLeaks,vodkina/GlobaLeaks | backend/globaleaks/handlers/robots.py | backend/globaleaks/handlers/robots.py | # -*- coding: UTF-8
# public
# ****
#
# Implementation of classes handling the HTTP request to /node, public
# exposed API.
from twisted.internet.defer import inlineCallbacks, returnValue
from globaleaks import models, LANGUAGES_SUPPORTED
from globaleaks.handlers.admin.files import db_get_file
from globaleaks.handlers.base import BaseHandler
from globaleaks.models import l10n
from globaleaks.models.config import NodeFactory
from globaleaks.models.l10n import NodeL10NFactory
from globaleaks.orm import transact_ro
from globaleaks.rest.apicache import GLApiCache
from globaleaks.settings import GLSettings
from globaleaks.utils.sets import disjoint_union
from globaleaks.utils.structures import get_localized_values
@transact_ro
def serialize_ahmia(store, language):
"""
Serialize Ahmia.fi descriptor.
"""
ret_dict = NodeFactory(store).public_export()
return {
'title': ret_dict['name'],
'description': NodeL10NFactory(store).get_val(language, 'description'),
'keywords': '%s (GlobaLeaks instance)' % ret_dict['name'],
'relation': ret_dict['public_site'],
'language': ret_dict['default_language'],
'contactInformation': u'',
'type': 'GlobaLeaks'
}
class AhmiaDescriptionHandler(BaseHandler):
@BaseHandler.transport_security_check("unauth")
@BaseHandler.unauthenticated
@inlineCallbacks
def get(self):
"""
Get the ahmia.fi descriptor
"""
if not GLSettings.memory_copy.ahmia:
yield
self.set_status(404)
return
ret = yield GLApiCache.get('ahmia', self.request.language,
serialize_ahmia, self.request.language)
self.write(ret)
class RobotstxtHandler(BaseHandler):
@BaseHandler.transport_security_check("unauth")
@BaseHandler.unauthenticated
def get(self):
"""
Get the robots.txt
"""
self.set_header('Content-Type', 'text/plain')
self.write("User-agent: *\n")
self.write("Allow: /" if GLSettings.memory_copy.allow_indexing else "Disallow: /")
class SitemapHandler(BaseHandler):
@BaseHandler.transport_security_check("unauth")
@BaseHandler.unauthenticated
def get(self):
"""
Get the sitemap.xml
"""
if not GLSettings.memory_copy.allow_indexing:
self.set_status(404)
return
self.set_header('Content-Type', 'text/xml')
self.write("<?xml version='1.0' encoding='UTF-8' ?>\n" +
"<urlset\n" +
" xmlns='http://www.sitemaps.org/schemas/sitemap/0.9'\n" +
" xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'\n" +
" xsi:schemaLocation='http://www.sitemaps.org/schemas/sitemap/0.9\n" +
" http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd'>\n" +
" <url>\n" +
" <loc>" + GLSettings.memory_copy.public_site + "/</loc>\n" +
" <changefreq>weekly</changefreq>\n" +
" <priority>1.00</priority>\n" +
" </url>\n" +
" <url>\n" +
" <loc>" + GLSettings.memory_copy.public_site + "/#/submission</loc>\n" +
" <changefreq>weekly</changefreq>\n" +
" <priority>1.00</priority>\n" +
" </url>\n" +
"</urlset>")
| agpl-3.0 | Python | |
22929adf17184037bff4be87e872a1f39aeda2ff | add migrations | praekelt/molo,praekelt/molo,praekelt/molo,praekelt/molo | molo/core/migrations/0036_auto_add_related_section_to_article.py | molo/core/migrations/0036_auto_add_related_section_to_article.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-27 14:17
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0028_merge'),
('core', '0035_content_rotation_settings'),
]
operations = [
migrations.CreateModel(
name='ArticlePageRelatedSections',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='related_sections', to='core.ArticlePage')),
('section', models.ForeignKey(blank=True, help_text='Section that this page also belongs too', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailcore.Page')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.AlterField(
model_name='sitelanguage',
name='locale',
field=models.CharField(choices=[(b'af', b'Afrikaans'), (b'ar', b'Arabic'), (b'ast', b'Asturian'), (b'az', b'Azerbaijani'), (b'bg', b'Bulgarian'), (b'be', b'Belarusian'), (b'bn', b'Bengali'), (b'br', b'Breton'), (b'bs', b'Bosnian'), (b'ca', b'Catalan'), (b'cs', b'Czech'), (b'cy', b'Welsh'), (b'da', b'Danish'), (b'de', b'German'), (b'el', b'Greek'), (b'en', b'English'), (b'en-au', b'Australian English'), (b'en-gb', b'British English'), (b'eo', b'Esperanto'), (b'es', b'Spanish'), (b'es-ar', b'Argentinian Spanish'), (b'es-co', b'Colombian Spanish'), (b'es-mx', b'Mexican Spanish'), (b'es-ni', b'Nicaraguan Spanish'), (b'es-ve', b'Venezuelan Spanish'), (b'et', b'Estonian'), (b'eu', b'Basque'), (b'fa', b'Persian'), (b'fi', b'Finnish'), (b'fr', b'French'), (b'fy', b'Frisian'), (b'ga', b'Irish'), (b'gd', b'Scottish Gaelic'), (b'gl', b'Galician'), (b'he', b'Hebrew'), (b'hi', b'Hindi'), (b'hr', b'Croatian'), (b'hu', b'Hungarian'), (b'ia', b'Interlingua'), (b'id', b'Indonesian'), (b'io', b'Ido'), (b'is', b'Icelandic'), (b'it', b'Italian'), (b'ja', b'Japanese'), (b'ka', b'Georgian'), (b'kk', b'Kazakh'), (b'km', b'Khmer'), (b'kn', b'Kannada'), (b'ko', b'Korean'), (b'lb', b'Luxembourgish'), (b'lt', b'Lithuanian'), (b'lv', b'Latvian'), (b'mk', b'Macedonian'), (b'ml', b'Malayalam'), (b'mn', b'Mongolian'), (b'mr', b'Marathi'), (b'my', b'Burmese'), (b'nb', b'Norwegian Bokmal'), (b'ne', b'Nepali'), (b'nl', b'Dutch'), (b'nn', b'Norwegian Nynorsk'), (b'os', b'Ossetic'), (b'pa', b'Punjabi'), (b'pl', b'Polish'), (b'pt', b'Portuguese'), (b'pt-br', b'Brazilian Portuguese'), (b'ro', b'Romanian'), (b'ru', b'Russian'), (b'sk', b'Slovak'), (b'sl', b'Slovenian'), (b'sq', b'Albanian'), (b'sr', b'Serbian'), (b'sr-latn', b'Serbian Latin'), (b'sv', b'Swedish'), (b'sw', b'Swahili'), (b'ta', b'Tamil'), (b'te', b'Telugu'), (b'th', b'Thai'), (b'tr', b'Turkish'), (b'tt', b'Tatar'), (b'udm', b'Udmurt'), (b'uk', b'Ukrainian'), (b'ur', b'Urdu'), (b'vi', b'Vietnamese'), (b'zh-hans', b'Simplified Chinese'), (b'zh-hant', b'Traditional Chinese'), (b'zu', 'Zulu'), (b'xh', 'Xhosa'), (b'st', 'Sotho'), (b've', 'Venda'), (b'tn', 'Tswana'), (b'ts', 'Tsonga'), (b'ss', 'Swati'), (b'nr', 'Ndebele')], help_text='Site language', max_length=255, verbose_name='language name'),
),
]
| bsd-2-clause | Python | |
db333070b61caef62ea5b68e5bf75f92d879db74 | Add regression test for bug #1922053 | mahak/nova,openstack/nova,mahak/nova,openstack/nova,openstack/nova,mahak/nova | nova/tests/functional/regressions/test_bug_1922053.py | nova/tests/functional/regressions/test_bug_1922053.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.tests.functional import integrated_helpers
class ForceUpWithDoneEvacuations(integrated_helpers._IntegratedTestBase):
"""Regression test for bug 1922053.
This regression test aims to assert the behaviour of n-api when forcing a
compute service up when it is associated with evacuation migration
records still marked as `done`. This suggests that the compute service
was not correctly fenced when the evacuation was requested and has not
restarted since allowing the evacuation migration records to move to a
state of `completed`.
"""
ADMIN_API = True
microversion = 'latest'
def _create_test_server(self, compute_host):
return self._create_server(host=compute_host, networks='none')
def _force_down_compute(self, hostname):
compute_id = self.api.get_services(
host=hostname, binary='nova-compute')[0]['id']
self.api.put_service(compute_id, {'forced_down': 'true'})
def _force_up_compute(self, hostname):
compute_id = self.api.get_services(
host=hostname, binary='nova-compute')[0]['id']
self.api.put_service(compute_id, {'forced_down': 'false'})
def test_force_up_with_done_evacuation_records(self):
# Launch a second compute to host the evacuated instance
self._start_compute('compute2')
# Create a test server to evacuate
server = self._create_test_server('compute')
# Assert we've landed on the first compute
self.assertEqual('compute', server['OS-EXT-SRV-ATTR:host'])
# Force down the first compute to allow the evacuation
self._force_down_compute('compute')
# Evacuate then assert the instance moves to compute2 and that the
# migration record is moved to done
server = self._evacuate_server(
server,
expected_host='compute2',
expected_migration_status='done'
)
# FIXME(lyarwood): This is bug #1922053, this shouldn't be allowed with
# `done` evacuation migration records still associated with the host.
# Replace this with the following assertion once fixed:
# ex = self.assertRaises(
# client.OpenStackApiException,
# self._force_up_compute,
# 'compute',
# )
# self.assertEqual(400, ex.response.status_code)
self._force_up_compute('compute')
# Assert that the evacuation migration record remains `done`
self._wait_for_migration_status(server, ["done"])
# Restart the source compute to move the migration record along
self.computes['compute'].stop()
self.computes['compute'].start()
# Assert that the evacuation migration record is now `completed`
self._wait_for_migration_status(server, ["completed"])
class ForceUpWithDoneEvacuationsv252(ForceUpWithDoneEvacuations):
"""Regression test for bug 1922053 using microversion 2.52.
Required as the PUT /os-services/force-down API used by this test via
self.api.force_down_service is superseeded by PUT /os-services/{service_id}
API used by our parent ForceUpWithDoneEvacuations class from >=2.53.
This test also uses the 'availability_zone' parameter to force the server
to spawn on the first compute as 'host' is only available from 2.74.
"""
microversion = '2.52'
def _create_test_server(self, compute_host):
return self._create_server(az='nova:compute', networks='none')
def _force_down_compute(self, hostname):
self.api.force_down_service(hostname, 'nova-compute', forced_down=True)
def _force_up_compute(self, hostname):
self.api.force_down_service(
hostname, 'nova-compute', forced_down=False)
| apache-2.0 | Python | |
ecdd4aa60fadd392cdb1cfbdc178d742d260d640 | Add a new example | Dentosal/ImbaPython | examples/example1.py | examples/example1.py | import imba
imba.install(["json", "requests"])
my_ip = "http://httpbin.org/ip".fetch().json["origin"]
print(my_ip)
| mit | Python | |
1d021cd7b52ecc4d9684dc607a1ff9f0b8181b37 | Add python script to read receiver file | RaphaelPoncet/2016-macs2-projet-hpc | read_receiver.py | read_receiver.py | #! /usr/bin/env python
import numpy
from matplotlib import pyplot as plt
import matplotlib.colors as colors
import os
import sys
import seaborn
def ParseVariableBinaryHeader(header):
header_detect = '#'
header = header.strip("\n").split()
assert(header[0] == header_detect)
name = header[1]
dtype = header[2]
nb_components = int(header[3])
nx = int(header[4])
ny = int(header[5])
nz = int(header[6])
return name, dtype, nb_components, nx, ny, nz
receiver_filename = "output/receivers.dat"
receiver_file = open(receiver_filename, 'r')
readlines = receiver_file.read().split("\n")
receiver_file.close()
temp_filename = "tmp.binary"
tempfile = open(temp_filename, 'wb')
# Parse header
header = readlines[0]
name, dtype, nb_components, nx, ny, nz = ParseVariableBinaryHeader(header)
# Write data without header
for line in readlines[1:]:
tempfile.write(line + "\n")
tempfile.close()
tempfile = open(temp_filename, 'rb')
data = numpy.fromfile(tempfile, dtype = 'float_')
tempfile.close()
if os.path.exists(temp_filename):
print "Removing temporary file " + str(temp_filename)
os.remove(temp_filename)
print data.shape, nx, nz
data = data.reshape(nz, nx)
amplitude_max = max(numpy.amax(data), - numpy.amin(data))
print "amplitude_max=", amplitude_max
rcv_ids = {'rcv1': (nx / 2, 'blue'),
'rcv2': (nx / 4, 'red'),
'rcv3': (3 * nx / 5, 'green'),
# 'rcv4': (1200, 'orange'),
# 'rcv5': (800, 'purple'),
}
plt.figure()
with seaborn.axes_style("dark"):
cmap = 'gray'
plt.imshow(data, cmap = cmap, interpolation = 'none', aspect = 'auto', vmin = - 0.1 * amplitude_max, vmax = 0.1 * amplitude_max)
for key, value in rcv_ids.iteritems():
rcv_id, color = value
plt.plot([rcv_id, rcv_id], [0.0, nz], color = color, linewidth = 2)
plt.xlim([0,nx])
plt.ylim([nz,0])
plt.figure()
cnt = 1
for key, value in rcv_ids.iteritems():
rcv_id, color = value
offset = numpy.power(-1.0, cnt) * (2.0 * amplitude_max) * (cnt / 2)
print offset
plt.plot(offset + data[:, rcv_id], color = color, linewidth = 2, label = key)
cnt += 1
plt.legend()
plt.show()
| apache-2.0 | Python | |
a737844028a576f08a63767a42da92b53b567a67 | add missing script | sunlightlabs/read_FEC,sunlightlabs/read_FEC,sunlightlabs/read_FEC,sunlightlabs/read_FEC | fecreader/summary_data/management/commands/set_committee_candidate_linkage.py | fecreader/summary_data/management/commands/set_committee_candidate_linkage.py | # Sets the curated_candidate_overlay field in committee_overlay from the Authorized_Candidate_Committees model. Use pop_auth_committees to set the data in Authorized_Candidate_Committees itself.
from django.core.management.base import BaseCommand, CommandError
from summary_data.models import Authorized_Candidate_Committees, Candidate_Overlay, Committee_Overlay
class Command(BaseCommand):
help = "Sets the curated_candidate_overlay field in committee_overlay from the Authorized_Candidate_Committees model"
requires_model_validation = False
def handle(self, *args, **options):
accs = Authorized_Candidate_Committees.objects.all()
for acc in accs:
como = cando = None
try:
como = Committee_Overlay.objects.get(fec_id=acc.committee_id)
except Committee_Overlay.DoesNotExist:
print "Missing committee overlay for %s %s" % (acc.committee_name, acc.committee_id)
continue
try:
cando = Candidate_Overlay.objects.get(fec_id=acc.candidate_id)
except Candidate_Overlay.DoesNotExist:
print "Missing candidate overlay for candidate--committee name is:%s committee_id is: %s" % (acc.committee_name, acc.candidate_id)
continue
como.curated_candidate = cando
como.save() | bsd-3-clause | Python | |
8a9490a962936c4695fbe639b45807aba012d102 | Create LongSubWRC_001.py | cc13ny/algo,Chasego/cod,cc13ny/algo,cc13ny/Allin,cc13ny/Allin,Chasego/cod,Chasego/codi,Chasego/codirit,cc13ny/Allin,Chasego/cod,cc13ny/algo,Chasego/codi,cc13ny/algo,Chasego/codirit,cc13ny/Allin,Chasego/codirit,Chasego/codi,Chasego/codirit,Chasego/cod,Chasego/cod,cc13ny/algo,Chasego/codi,Chasego/codi,Chasego/codirit,cc13ny/Allin | leetcode/003-Longest-Substring-Without-Repeating-Characters/LongSubWRC_001.py | leetcode/003-Longest-Substring-Without-Repeating-Characters/LongSubWRC_001.py | class Solution:
# @return an integer
def lengthOfLongestSubstring(self, s):
start, maxlen = 0, 0
dict = {s[i]: -1 for i in range(len(s))}
for i in range(len(s)):
if dict[s[i]] != -1:
while start <= dict[s[i]]:
dict[s[start]] = -1
start += 1
if i - start + 1 > maxlen: maxlen = i - start + 1
dict[s[i]] = i
return maxlen
| mit | Python | |
1c2d41bebbadfe41cad72e950864dcbfcdc938c0 | test file following new format with C code and header, missing decorators for getting dll handles and hypothesis-based testing | pleiszenburg/zugbruecke | tests/test_devide.py | tests/test_devide.py | # -*- coding: utf-8 -*-
"""
ZUGBRUECKE
Calling routines in Windows DLLs from Python scripts running on unixlike systems
https://github.com/pleiszenburg/zugbruecke
tests/test_devide.py: Tests by reference argument passing (int pointer)
Required to run on platform / side: [UNIX, WINE]
Copyright (C) 2017-2019 Sebastian M. Ernst <ernst@pleiszenburg.de>
<LICENSE_BLOCK>
The contents of this file are subject to the GNU Lesser General Public License
Version 2.1 ("LGPL" or "License"). You may not use this file except in
compliance with the License. You may obtain a copy of the License at
https://www.gnu.org/licenses/old-licenses/lgpl-2.1.txt
https://github.com/pleiszenburg/zugbruecke/blob/master/LICENSE
Software distributed under the License is distributed on an "AS IS" basis,
WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for the
specific language governing rights and limitations under the License.
</LICENSE_BLOCK>
"""
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# C
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
HEADER = """
int __stdcall DEMODLL divide_int(
int a,
int b,
int *remainder
);
"""
SOURCE = """
int __stdcall DEMODLL divide_int(
int a,
int b,
int *remainder
)
{
int quot = a / b;
*remainder = a % b;
return quot;
}
"""
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# IMPORT
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
from .lib.ctypes import ctypes
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# TEST(s)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# get handle, generate a and b
# @...
def test_devide(dll_handle, x, y):
divide_int = dll_handle.divide_int
divide_int.argtypes = (ctypes.c_int, ctypes.c_int, ctypes.POINTER(ctypes.c_int))
divide_int.restype = ctypes.c_int
rem_ = ctypes.c_int()
quot = divide_int(x, y, rem_)
rem = rem_.value
assert (x // y, x % y) == (quot, rem)
| lgpl-2.1 | Python | |
2dc8ae91713fdd73ac1e835dcb191714c2e93593 | Add unittests for some methods of runner | lordappsec/ooni-probe,juga0/ooni-probe,lordappsec/ooni-probe,Karthikeyan-kkk/ooni-probe,juga0/ooni-probe,0xPoly/ooni-probe,lordappsec/ooni-probe,kdmurray91/ooni-probe,juga0/ooni-probe,0xPoly/ooni-probe,kdmurray91/ooni-probe,lordappsec/ooni-probe,juga0/ooni-probe,Karthikeyan-kkk/ooni-probe,0xPoly/ooni-probe,Karthikeyan-kkk/ooni-probe,0xPoly/ooni-probe,kdmurray91/ooni-probe,Karthikeyan-kkk/ooni-probe,kdmurray91/ooni-probe | tests/test_runner.py | tests/test_runner.py | from twisted.trial import unittest
from ooni.inputunit import InputUnit
from ooni.nettest import NetTestCase
from ooni.reporter import OReporter
from ooni.runner import loadTestsAndOptions, runTestCasesWithInputUnit
class DummyTestCase(NetTestCase):
def test_a(self):
self.report['bar'] = 'bar'
def test_b(self):
self.report['foo'] = 'foo'
class DummyTestCasePP(DummyTestCase):
def postProcessor(self, report):
self.report['antani'] = 'sblinda'
class DummyReporter(OReporter):
dummy_report = []
def createReport(self, options):
pass
def writeReportEntry(self, entry):
self.dummy_report.append(entry)
class TestRunner(unittest.TestCase):
def test_load_test_and_options(self):
input_unit = InputUnit([0,1,2,3,4])
cmd_line_options = {}
test_cases, options = loadTestsAndOptions([DummyTestCase],
cmd_line_options)
self.assertEqual(test_cases[0][1], 'test_b')
self.assertEqual(test_cases[1][1], 'test_a')
def test_run_testcase_with_input_unit(self):
oreporter = DummyReporter()
oreporter.dummy_report = []
def done(result):
report = oreporter.dummy_report
self.assertEqual(len(report), 5*2)
for idx, entry in enumerate(oreporter.dummy_report):
if idx % 2 == 0:
self.assertEqual(entry['report']['foo'], 'foo')
else:
self.assertEqual(entry['report']['bar'], 'bar')
input_unit = InputUnit([0,1,2,3,4])
cmd_line_options = {'collector': None}
test_cases, options = loadTestsAndOptions([DummyTestCase],
cmd_line_options)
d = runTestCasesWithInputUnit(test_cases, input_unit, oreporter)
d.addBoth(done)
return d
def test_with_post_processing(self):
oreporter = DummyReporter()
oreporter.dummy_report = []
def done(result):
report = oreporter.dummy_report
self.assertEqual(len(report), 3)
for entry in report:
if entry['test_name'] == 'summary':
self.assertEqual(entry['report'], {'antani': 'sblinda'})
input_unit = InputUnit([None])
cmd_line_options = {'collector': None}
test_cases, options = loadTestsAndOptions([DummyTestCasePP],
cmd_line_options)
d = runTestCasesWithInputUnit(test_cases, input_unit, oreporter)
d.addBoth(done)
return d
| bsd-2-clause | Python | |
5e5e58b705d30df62423ec8bb6018c6807114580 | Add the app config for osf registrations | laurenbarker/SHARE,aaxelb/SHARE,aaxelb/SHARE,zamattiac/SHARE,zamattiac/SHARE,laurenbarker/SHARE,CenterForOpenScience/SHARE,laurenbarker/SHARE,aaxelb/SHARE,zamattiac/SHARE,CenterForOpenScience/SHARE,CenterForOpenScience/SHARE | providers/io/osf/registrations/apps.py | providers/io/osf/registrations/apps.py | from share.provider import ProviderAppConfig
from .harvester import OSFRegistrationsHarvester
class AppConfig(ProviderAppConfig):
name = 'providers.io.osf.registrations'
version = '0.0.1'
title = 'osf_registrations'
long_title = 'Open Science Framework Registrations'
home_page = 'http://api.osf.io/registrations/'
harvester = OSFRegistrationsHarvester
| apache-2.0 | Python | |
04fa680f4be4afc44dc0df3834b096d8fa7a05ac | Add tests for nbgrader update | jupyter/nbgrader,jhamrick/nbgrader,jupyter/nbgrader,jhamrick/nbgrader,jupyter/nbgrader,jupyter/nbgrader,jhamrick/nbgrader,jhamrick/nbgrader,jupyter/nbgrader | nbgrader/tests/apps/test_nbgrader_update.py | nbgrader/tests/apps/test_nbgrader_update.py | from os.path import join
from .. import run_nbgrader
from .base import BaseTestApp
class TestNbGraderUpdate(BaseTestApp):
def test_help(self):
"""Does the help display without error?"""
run_nbgrader(["update", "--help-all"])
def test_no_args(self):
"""Is there an error if no arguments are given?"""
run_nbgrader(["update"], retcode=1)
def test_update(self, db, course_dir):
with open("nbgrader_config.py", "a") as fh:
fh.write("""c.NbGrader.db_assignments = [dict(name='ps1', duedate='2015-02-02 14:58:23.948203 PST')]\n""")
fh.write("""c.NbGrader.db_students = [dict(id="foo"), dict(id="bar")]""")
self._copy_file(join("files", "test-v0.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
run_nbgrader(["assign", "ps1", "--db", db], retcode=1)
# now update the metadata
run_nbgrader(["update", course_dir])
# now assign should suceed
run_nbgrader(["assign", "ps1", "--db", db])
# autograde should fail on old metadata, too
self._copy_file(join("files", "test-v0.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
run_nbgrader(["autograde", "ps1", "--db", db], retcode=1)
# now update the metadata
run_nbgrader(["update", course_dir])
# now autograde should suceed
run_nbgrader(["autograde", "ps1", "--db", db])
| bsd-3-clause | Python | |
48d6c81e79e867d448380a087f81b034b162d5bf | Add GostTrack implementation | Acruxx/ARS | GostTrack.py | GostTrack.py | class GostTrack:
def __init__(self):
self.list_size = 0
self.current_pos = 0
self.pos_list = []
def add_pos(self, ang, rad):
self.list_size += 1
self.pos_list.append((ang, rad))
def get_next_pos(self):
if self.list_size == self.list_size:
self.current_pos += 1
return self.pos_list[self.current_pos-1];
def reset_count(self):
self.current_pos = 0;
if __name__ == '__main__':
h = GostTrack()
h.add_pos(20, 30)
h.add_pos(34, 76)
print(h.get_next_pos())
print(h.get_next_pos())
h.reset_count()
print(h.get_next_pos())
print(h.get_next_pos())
| mit | Python | |
6e74d412681f36b921d84c0cc72929be0a3df828 | Use threadlocals to store middleware data instead of the request | kbussell/django-auditlog,chris-griffin/django-auditlog,johnrtipton/django-auditlog,jjkester/django-auditlog,robmagee/django-auditlog,Zmeylol/auditlog,rauleb/django-auditlog | src/auditlog/middleware.py | src/auditlog/middleware.py | from __future__ import unicode_literals
import threading
import time
from django.conf import settings
from django.db.models.signals import pre_save
from django.utils.functional import curry
from django.db.models.loading import get_model
from auditlog.models import LogEntry
threadlocal = threading.local()
class AuditlogMiddleware(object):
"""
Middleware to couple the request's user to log items. This is accomplished by currying the signal receiver with the
user from the request (or None if the user is not authenticated).
"""
def process_request(self, request):
"""
Gets the current user from the request and prepares and connects a signal receiver with the user already
attached to it.
"""
# Initialize thread local storage
threadlocal.auditlog = {
'signal_duid': (self.__class__, time.time()),
'remote_addr': request.META.get('REMOTE_ADDR'),
}
# In case of proxy, set 'original' address
if request.META.get('HTTP_X_FORWARDED_FOR'):
threadlocal.auditlog['remote_addr'] = request.META.get('HTTP_X_FORWARDED_FOR').split(',')[0]
# Connect signal for automatic logging
if hasattr(request, 'user') and hasattr(request.user, 'is_authenticated') and request.user.is_authenticated():
set_actor = curry(self.set_actor, request.user)
pre_save.connect(set_actor, sender=LogEntry, dispatch_uid=threadlocal.auditlog['signal_duid'], weak=False)
def process_response(self, request, response):
"""
Disconnects the signal receiver to prevent it from staying active.
"""
if hasattr(threadlocal, 'auditlog'):
pre_save.disconnect(sender=LogEntry, dispatch_uid=threadlocal.auditlog['signal_duid'])
return response
def process_exception(self, request, exception):
"""
Disconnects the signal receiver to prevent it from staying active in case of an exception.
"""
if hasattr(threadlocal, 'auditlog'):
pre_save.disconnect(sender=LogEntry, dispatch_uid=threadlocal.auditlog['signal_duid'])
return None
@staticmethod
def set_actor(user, sender, instance, **kwargs):
"""
Signal receiver with an extra, required 'user' kwarg. This method becomes a real (valid) signal receiver when
it is curried with the actor.
"""
try:
app_label, model_name = settings.AUTH_USER_MODEL.split('.')
auth_user_model = get_model(app_label, model_name)
except ValueError:
auth_user_model = get_model('auth', 'user')
if sender == LogEntry and isinstance(user, auth_user_model) and instance.actor is None:
instance.actor = user
| from __future__ import unicode_literals
import time
from django.conf import settings
from django.db.models.signals import pre_save
from django.utils.functional import curry
from django.db.models.loading import get_model
from auditlog.models import LogEntry
class AuditlogMiddleware(object):
"""
Middleware to couple the request's user to log items. This is accomplished by currying the signal receiver with the
user from the request (or None if the user is not authenticated).
"""
def process_request(self, request):
"""
Gets the current user from the request and prepares and connects a signal receiver with the user already
attached to it.
"""
if hasattr(request, 'user') and hasattr(request.user, 'is_authenticated') and request.user.is_authenticated():
user = request.user
request.auditlog_ts = time.time()
set_actor = curry(self.set_actor, user)
pre_save.connect(set_actor, sender=LogEntry, dispatch_uid=(self.__class__, request.auditlog_ts), weak=False)
def process_response(self, request, response):
"""
Disconnects the signal receiver to prevent it from staying active.
"""
# Disconnecting the signal receiver is required because it will not be garbage collected (non-weak reference)
if hasattr(request, 'auditlog_ts'):
pre_save.disconnect(sender=LogEntry, dispatch_uid=(self.__class__, request.auditlog_ts))
return response
def process_exception(self, request, exception):
"""
Disconnects the signal receiver to prevent it from staying active in case of an exception.
"""
if hasattr(request, 'auditlog_ts'):
pre_save.disconnect(sender=LogEntry, dispatch_uid=(self.__class__, request.auditlog_ts))
return None
@staticmethod
def set_actor(user, sender, instance, **kwargs):
"""
Signal receiver with an extra, required 'user' kwarg. This method becomes a real (valid) signal receiver when
it is curried with the actor.
"""
try:
app_label, model_name = settings.AUTH_USER_MODEL.split('.')
auth_user_model = get_model(app_label, model_name)
except ValueError:
auth_user_model = get_model('auth', 'user')
if sender == LogEntry and isinstance(user, auth_user_model) and instance.actor is None:
instance.actor = user
| mit | Python |
9df5fc2493e38a30f44edcad97d0aebd415f97be | Add doosabin/generate_doosabin_test_data.py | rstebbing/subdivision,rstebbing/subdivision | doosabin/generate_doosabin_test_data.py | doosabin/generate_doosabin_test_data.py | # generate_doosabin_test_data.py
# Imports
from cStringIO import StringIO
from functools import partial
import numpy as np
np.random.seed(1337)
import doosabin
from verification.common import example_extraordinary_patch
s = StringIO()
def savetxt(type_, fmt, name, X):
X = np.atleast_2d(X)
s.write('const %s %s[] = {\n' % (type_, name))
for i, xi in enumerate(X):
s.write(' ')
for j, xij in enumerate(xi):
s.write(fmt % xij)
if i >= len(X) - 1 and j >= len(xi) - 1:
s.write('\n')
else:
s.write(',')
s.write(' ' if j < len(xi) - 1 else '\n')
s.write('};\n')
def savetxt_MapConstMatrixXd(name, X):
savetxt('double', '%+.6e', '{0}Data'.format(name), X)
s.write('const MapConstMatrixXd {0}({0}Data, {1}, {2});\n'.format(
name, X.shape[1], X.shape[0]))
t, step = np.linspace(0.0, 1.0, 3, endpoint=False, retstep=True)
t += 0.5 * step
U = np.dstack(np.broadcast_arrays(t[:, np.newaxis], t)).reshape(-1, 2)
savetxt_MapConstMatrixXd('kU', U)
s.write('\n')
for N in [3, 4, 5, 6]:
T, X = example_extraordinary_patch(N, return_mesh=True)
X += 0.1 * np.random.randn(X.size).reshape(X.shape)
x0 = np.mean(X, axis=0)
X = np.c_[X, -np.linalg.norm(X, axis=1)]
T_ = [len(T)]
for t in T:
T_.append(len(t))
T_.extend(t)
T_ = np.array(T_, dtype=np.int32)
savetxt('int', '%d', 'kT%dData' % N, T_)
s.write(('const doosabin::FaceArray kT{0} = '
'InitialiseFaceArray(kT{0}Data, {1});\n\n').format(N, len(T_)))
savetxt_MapConstMatrixXd('kX%d' % N, X)
s.write('\n')
names_powers_and_basis_functions = [
('M', 0, doosabin.biquadratic_bspline_position_basis),
('Mu', 1, doosabin.biquadratic_bspline_du_basis),
('Muu', 2, doosabin.biquadratic_bspline_du_du_basis)]
for n, p, b in names_powers_and_basis_functions:
P = np.array([doosabin.recursive_evaluate(p, b, N, u, X)
for u in U])
savetxt_MapConstMatrixXd('k%s%d' % (n, N), P)
s.write('\n')
with open('doosabin_test_data.txt', 'w') as fp:
fp.write(s.getvalue())
| mit | Python | |
ba6f426ca3f1eaf909cfa6257673a1800cc1c8fe | add download url script | rboman/progs,rboman/progs,rboman/progs,rboman/progs,rboman/progs,rboman/progs,rboman/progs,rboman/progs,rboman/progs,rboman/progs | sandbox/webapi/dropboxapi_download_url.py | sandbox/webapi/dropboxapi_download_url.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import json
import requests
import os.path
def get_api_token():
"""read my api token
"""
with open('E:\Dropbox\Bin\dropbox_api_token.txt') as f:
token = f.readline().rstrip()
#print "token='{}'".format(token)
return token
def download(url, filename=None):
# https://www.dropbox.com/developers/documentation/http/documentation#files-save_url
# curl -X POST https://api.dropboxapi.com/2/files/save_url \
# --header "Authorization: Bearer " \
# --header "Content-Type: application/json" \
# --data "{\"path\": \"/a.txt\",\"url\": \"http://example.com/a.txt\"}"
if not filename:
filename = os.path.basename(url)
apiurl = 'https://api.dropboxapi.com/2/files/save_url'
token = get_api_token()
print 'requesting download of', filename
r = requests.post(apiurl, headers={
"Authorization": 'Bearer %s' % token,
"Content-Type": 'application/json'
}, json={ # "json" instead of "data"
'path' : '/%s' % filename, # should start with a '/'
'url': url
})
#r.raise_for_status()
print 'r.status_code =', r.status_code
# print 'r.headers =', r.headers
# print 'r.encoding =', r.encoding
# print 'r.url =', r.url
print 'r.text =', r.text
# if r.status_code==200:
# print 'r.json() =', r.json()
# r.status_code = 200
# r.headers = {'X-Content-Type-Options': 'nosniff', 'Content-Encoding': 'gzip', 'Transfer-Encoding': 'chunked', 'X-Server-Response-Time': '571', 'Vary': 'Accept-Encoding', 'Server': 'nginx', 'X-Envoy-Upstream-Service-Time': '579', 'Connection': 'keep-alive', 'X-Dropbox-Request-Id': '03665620068c9845c23a1cc92890f9dd', 'Pragma': 'no-cache', 'Cache-Control': 'no-cache',
# 'Date': 'Sun, 03 May 2020 15:07:10 GMT', 'X-Frame-Options': 'SAMEORIGIN', 'Content-Type': 'application/json'}
# r.encoding = None
# r.url = https://api.dropboxapi.com/2/files/save_url
# r.text = {".tag": "async_job_id", "async_job_id": "2fzY2UcViSYAAAAAAAS9tg"}
# r.json() = {u'.tag': u'async_job_id', u'async_job_id': u'2fzY2UcViSYAAAAAAAS9tg'}
if __name__=="__main__":
url='https://cdimage.debian.org/debian-cd/current-live/amd64/iso-hybrid/debian-live-10.3.0-amd64-standard.iso'
download(url)
| apache-2.0 | Python | |
7eff8eb8e72b7b23d2a8440f7aa97694625932dc | Add methods for processing raw data from pickled file | borgarlie/TDT4501-Specialization-Project | seq2seq_summarization/process_raw_data.py | seq2seq_summarization/process_raw_data.py | import pickle
import re
def process_text(text):
"""
This function uses multiple steps to process the input properly
1) Remove all html tags from the text
2) Lower case every character
3) Remove all special characters except punctuation, comma, exclamation mark and question mark
4) Make all these types of punctuations separated from the text (single tokens)
5) Remove all multi-spaces so that we only have 1 space between each word
6) Remove possibly space from start and end of text
7) Replace tab and newlines with space
8) Replace all numbers with <###> -- maybe it should be a # per digit instead, as done some other place?
9) Add <EOS> token
:param text: The text to be processed
:return: The processed text
"""
text = re.sub("<.*?>", " ", text) # 1
text = re.sub('(?<=[^?!.0-9])(?=[.,!?])', ' ', text) # 4
text = re.sub('(?=[,])', ' ', text) # 4
text = re.sub('(?=\. )', ' ', text) # 4
text = text.lower() # 2
text = re.sub("[^A-Za-z0-9 .!?,øæå]+", "", text) # 3
text = re.sub('[0-9]', '#', text) # 8
text = " ".join(text.split()) # 5, 6, 7 - i think
text = text + " <EOS>" # 9
return text
class Article:
def __init__(self, article_dict, max_words):
art = article_dict[-1]
if art is None:
raise ValueError("Article is of type None")
if "tags" not in art:
raise ValueError("Tags not present")
if "title" not in art:
raise ValueError("Title not present")
if "body" not in art:
raise ValueError("Body not present")
tags = art["tags"]
self.tags = []
for obj in tags:
if "urlPattern" not in obj:
continue
self.tags.append(obj["urlPattern"]) # urlPattern vs. displayName
self.title = process_text(art["title"])
self.body = process_text(art["body"])
if len(self.body.split(" ")) > max_words:
raise ValueError("body too large")
def __str__(self):
text = "Tags: \n" + self.tags.__str__() + "\n"
text += "Title: \n" + self.title + "\n"
text += "Body: \n" + self.body + "\n"
return text
def __repr__(self):
self.__str__()
def has_tag(article, tag):
return tag in article.tags
def get_articles_from_pickle_file(path, max_words=150):
articles = []
with open(path, 'rb') as f:
print("Loading data")
data = pickle.load(f)
print("Done loading")
errors = 0
non_errors = 0
for key, value in data.items():
try:
articles.append(Article(value, max_words))
non_errors += 1
except ValueError:
errors += 1
print("Done processing data")
print("total errors = %d" % errors)
print("Total articles without error = %d" % non_errors)
return articles
def count_total_tags(articles):
print("Counting tags: ")
tags = {}
for item in articles:
for tag in item.tags:
if tag in tags:
tags[tag] += 1
else:
tags[tag] = 1
print("Done counting tags")
print("Sorting tags")
s = [(k, tags[k]) for k in sorted(tags, key=tags.get, reverse=True)]
for k, v in s:
print(k, v)
print("Done printing tags")
def save_articles_for_single_tag(articles, tag, relative_path):
with open(relative_path + tag + '.article.txt', 'w') as f:
for item in articles:
f.write(item.body)
f.write("\n")
with open(relative_path + tag + '.title.txt', 'w') as f:
for item in articles:
f.write(item.title)
f.write("\n")
def count_articles_in_max_length_range(articles, start, end):
range_dict = {}
for i in range(start, end):
total = 0
for item in articles:
if len(item.body.split(" ")) < i:
total += 1
range_dict[i] = total
for k in sorted(range_dict.keys()):
print(k, range_dict[k])
def filter_list_with_single_tag(articles, tag):
tagname_list = []
for item in articles:
if has_tag(item, tag):
tagname_list.append(item)
return tagname_list
if __name__ == '__main__':
tag = "politi"
max_words = 150
articles = get_articles_from_pickle_file('../data/articles2_nor/total.pkl', max_words)
filtered = filter_list_with_single_tag(articles, tag)
save_articles_for_single_tag(filtered, tag, '../data/articles2_nor/')
print("Done")
| mit | Python | |
729fda094a195c4c24846e637fcba70b5d416f18 | add .py version of upcoming2posts | thehackerwithin/berkeley,thehackerwithin/berkeley,thehackerwithin/berkeley,thehackerwithin/berkeley,thehackerwithin/berkeley,thehackerwithin/berkeley | docs/upcoming2posts.py | docs/upcoming2posts.py |
# coding: utf-8
# # upcoming2posts
#
# By Stuart Geiger (@staeiou), MIT license
#
# This is a script you run the day after THW, which changes yesterday's file from "upcoming" to "posts" so that the next week's topic shows on the main page.
# In[78]:
# In[1]:
import datetime
from datetime import timedelta
import os
import glob
import re
# In[ ]:
# In[2]:
today = datetime.date.today()
yesterday = today - timedelta(1)
# In[3]:
if yesterday.isoweekday() == 2:
yesterday_str = yesterday.strftime("%Y-%m-%d")
# In[4]:
filename = glob.glob("_posts/" + yesterday_str + "*")[0]
# In[5]:
with open(filename, "r") as file:
file_text = file.read()
file_text
# In[6]:
file_text = file_text.replace('category: upcoming', 'category: posts')
file_text = file_text.replace('category:upcoming', 'category: posts')
file_text
# In[7]:
with open(filename, "w") as file:
file.write(file_text)
| bsd-3-clause | Python | |
c7a9ed8771039be0fa8288cf0cca14f44a1f3d35 | Add timeslice example | tamasgal/km3pipe,tamasgal/km3pipe | examples/offline_analysis/timeslices.py | examples/offline_analysis/timeslices.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ts=4 sw=4 et
"""
==================
Reading Timeslices
==================
This examples show how to access L0, L1, L2 and SN timeslice streams using
km3pipe. Note that you need an activated Jpp environment and jppy installed
(http://git.km3net.de/km3py/jppy.git).
Both are available in Lyon, wherebei ``jppy`` is included in the group
python installation.
A general note on timeslice streams: in older Jpp versions there was only
one timeslice stream present in the files. This was later separated into
L0, L1, L2 and SN streams. A convenient tool to check the availability of
streams is ``JPrintTree -f FILENAME``::
> JPrintTree -f KM3NeT_00000014_00004451.root
KM3NeT_00000014_00004451.root
KM3NET_TIMESLICE KM3NETDAQ::JDAQTimeslice 2808 8416 [MB]
KM3NET_EVENT KM3NETDAQ::JDAQEvent 70 0 [MB]
KM3NET_SUMMARYSLICE KM3NETDAQ::JDAQSummaryslice 2808 4 [MB]
Here is an example with split timeslice streams::
> JPrintTree -f KM3NeT_00000029_00003242.root
KM3NeT_00000029_00003242.root
KM3NET_TIMESLICE KM3NETDAQ::JDAQTimeslice 0 0 [MB]
KM3NET_TIMESLICE_L0 KM3NETDAQ::JDAQTimesliceL0 0 0 [MB]
KM3NET_TIMESLICE_L1 KM3NETDAQ::JDAQTimesliceL1 5390 319 [MB]
KM3NET_TIMESLICE_L2 KM3NETDAQ::JDAQTimesliceL2 0 0 [MB]
KM3NET_TIMESLICE_SN KM3NETDAQ::JDAQTimesliceSN 107910 162 [MB]
KM3NET_EVENT KM3NETDAQ::JDAQEvent 21445 24 [MB]
KM3NET_SUMMARYSLICE KM3NETDAQ::JDAQSummaryslice 107910 109 [MB]
"""
ROOT_FILENAME = "KM3NeT_00000014_00004451.root"
import km3pipe as kp
####################################################################
# The timeslice pump is used to convert the timeslice objects in
# the ROOT file into numpy recarrays. We explicitly set the stream
# to an empty string, since we are opening an older file.
#
# Here is how to access the hits.
pump = kp.io.jpp.TimeslicePump(filename=ROOT_FILENAME, stream='')
for blob in pump:
hits = blob['TSHits']
| mit | Python | |
5ef616d0563f9e4f29ef7eaa3c163d24cf3e131f | Add simple integration test for memory profiler | jitseniesen/spyder-memory-profiler,jitseniesen/spyder-memory-profiler,spyder-ide/spyder.memory_profiler | spyder_memory_profiler/widgets/tests/test_memoryprofiler.py | spyder_memory_profiler/widgets/tests/test_memoryprofiler.py | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) Spyder Project Developers
#
# Licensed under the terms of the MIT License
# (see LICENSE.txt for details)
# -----------------------------------------------------------------------------
"""Tests for memoryprofiler.py."""
# Standard library imports
import os
# Third party imports
from pytestqt import qtbot
from qtpy.QtCore import Qt
from spyder.utils.qthelpers import qapplication
MAIN_APP = qapplication()
# Local imports
from spyder_memory_profiler.widgets.memoryprofiler import MemoryProfilerWidget
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock # Python 2
TEST_SCRIPT = \
"""@profile
def foo():
a = [1] * (10 ** 6)
b = [2] * (2 * 10 ** 7)
del b
return a
foo()"""
def test_profile_and_display_results(qtbot, tmpdir, monkeypatch):
"""Run profiler on simple script and check that results are okay."""
os.chdir(tmpdir.strpath)
testfilename = tmpdir.join('test_foo.py').strpath
with open(testfilename, 'w') as f:
f.write(TEST_SCRIPT)
MockQMessageBox = Mock()
monkeypatch.setattr('spyder_memory_profiler.widgets.memoryprofiler.QMessageBox',
MockQMessageBox)
widget = MemoryProfilerWidget(None)
qtbot.addWidget(widget)
widget.analyze(testfilename)
qtbot.wait(2000) # wait for tests to run
MockQMessageBox.assert_not_called()
dt = widget.datatree
assert dt.topLevelItemCount() == 1 # number of functions profiled
top = dt.topLevelItem(0)
assert top.data(0, Qt.DisplayRole).startswith('foo ')
assert top.childCount() == 6
for i in range(6):
assert top.child(i).data(0, Qt.DisplayRole) == i + 1 # line no
# column 2 has increment (in Mib); displayed as 'xxx MiB' so need to strip
# last 4 characters
assert float(top.child(2).data(2, Qt.DisplayRole)[:-4]) >= 7 # increment (MiB)
assert float(top.child(2).data(2, Qt.DisplayRole)[:-4]) <= 8
assert float(top.child(3).data(2, Qt.DisplayRole)[:-4]) >= 150
assert float(top.child(3).data(2, Qt.DisplayRole)[:-4]) <= 160
assert float(top.child(4).data(2, Qt.DisplayRole)[:-4]) >= -160
assert float(top.child(4).data(2, Qt.DisplayRole)[:-4]) <= -150
assert float(top.child(5).data(2, Qt.DisplayRole)[:-4]) == 0
| mit | Python | |
6b3d2ae77f3c1eb9b960cf01e1a94ec6fdbf6632 | load information from res1.mat | judithyueli/pyFKF | CO2simulation.py | CO2simulation.py | import scipy.io
import numpy as np
# class CO2simulation(object):
# def __init__(self, resolution):
# """
# resolution: low(59x55), medium(117x109), large(217x234)
# """
# Load Vp1.mat, Vp2.mat, Vp3.mat into
resolution = 'low'
if resolution is 'low':
Vp_dict = scipy.io.loadmat('./data/Res1.mat')
elif resolution is 'medium':
Vp_dict = scipy.io.loadmat('./data/Res2.mat')
elif resolution is 'high':
Vp_dict = scipy.io.loadmat('./data/Res3.mat')
else:
print 'select resolution among low, medium and high'
x_true_array = Vp_dict['truemodel']
x_loc_array = Vp_dict['xc']
y_loc_array = Vp_dict['yc']
H_mtx = Vp_dict['H']
x_true = [] #np.array(41)
for i in range(41):
x_true.append(x_true_array[i][0])
print x_true[40].shape
print type (x_true_array)
print len(x_true)
| mit | Python | |
58af5137225855ffbfa62a03dc1015570ef0d203 | Purge beffer just in case before getting the result | DMOJ/site,Phoenix1369/site,Phoenix1369/site,monouno/site,Minkov/site,Minkov/site,Minkov/site,monouno/site,monouno/site,monouno/site,Phoenix1369/site,monouno/site,DMOJ/site,DMOJ/site,Minkov/site,DMOJ/site,Phoenix1369/site | judge/math_parser.py | judge/math_parser.py | from HTMLParser import HTMLParser
import re
from django.conf import settings
MATHTEX_CGI = getattr(settings, 'MATHTEX_CGI', 'http://www.forkosh.com/mathtex.cgi')
inline_math = re.compile(r'~(.*?)~|\\\((.*?)\\\)')
display_math = re.compile(r'\$\$(.*?)\$\$|\\\[(.*?)\\\]')
REPLACES = [
(u'\u2264', r'\le'),
(u'\u2265', r'\ge'),
(u'\u2026', '...'),
(u'\u2212', '-'),
('≤', r'\le'),
('≤', r'\ge'),
(r'\lt', '<'),
(r'\gt', '>'),
]
def format_math(math):
for a, b in REPLACES:
math = math.replace(a, b)
return math
class MathHTMLParser(HTMLParser):
@classmethod
def convert(cls, html):
converter = cls()
converter.feed(html)
return converter.result
def __init__(self):
HTMLParser.__init__(self)
self.new_page = []
self.data_buffer = []
def _sub_inline(self, match):
return self.inline_math(format_math(match.group(1) or match.group(2)))
def _sub_display(self, match):
return self.display_math(format_math(match.group(1) or match.group(2)))
def inline_math(self, math):
raise NotImplementedError()
def display_math(self, math):
raise NotImplementedError()
@property
def result(self):
self.purge_buffer()
return ''.join(self.new_page)
def purge_buffer(self):
if self.data_buffer:
buffer = ''.join(self.data_buffer)
buffer = inline_math.sub(self._sub_inline, buffer)
buffer = display_math.sub(self._sub_display, buffer)
self.new_page.append(buffer)
del self.data_buffer[:]
def handle_starttag(self, tag, attrs):
self.purge_buffer()
self.new_page.append('<%s%s>' % (tag, ' '.join([''] + ['%s="%s"' % p for p in attrs])))
def handle_endtag(self, tag):
self.purge_buffer()
self.new_page.append('</%s>' % tag)
def handle_data(self, data):
self.data_buffer.append(data)
def handle_entityref(self, name):
self.data_buffer.append('&%s;' % name)
def handle_charref(self, name):
self.data_buffer.append('&#%s;' % name)
| from HTMLParser import HTMLParser
import re
from django.conf import settings
MATHTEX_CGI = getattr(settings, 'MATHTEX_CGI', 'http://www.forkosh.com/mathtex.cgi')
inline_math = re.compile(r'~(.*?)~|\\\((.*?)\\\)')
display_math = re.compile(r'\$\$(.*?)\$\$|\\\[(.*?)\\\]')
REPLACES = [
(u'\u2264', r'\le'),
(u'\u2265', r'\ge'),
(u'\u2026', '...'),
(u'\u2212', '-'),
('≤', r'\le'),
('≤', r'\ge'),
(r'\lt', '<'),
(r'\gt', '>'),
]
def format_math(math):
for a, b in REPLACES:
math = math.replace(a, b)
return math
class MathHTMLParser(HTMLParser):
@classmethod
def convert(cls, html):
converter = cls()
converter.feed(html)
return converter.result
def __init__(self):
HTMLParser.__init__(self)
self.new_page = []
self.data_buffer = []
def _sub_inline(self, match):
return self.inline_math(format_math(match.group(1) or match.group(2)))
def _sub_display(self, match):
return self.display_math(format_math(match.group(1) or match.group(2)))
def inline_math(self, math):
raise NotImplementedError()
def display_math(self, math):
raise NotImplementedError()
@property
def result(self):
return ''.join(self.new_page)
def purge_buffer(self):
if self.data_buffer:
buffer = ''.join(self.data_buffer)
buffer = inline_math.sub(self._sub_inline, buffer)
buffer = display_math.sub(self._sub_display, buffer)
self.new_page.append(buffer)
del self.data_buffer[:]
def handle_starttag(self, tag, attrs):
self.purge_buffer()
self.new_page.append('<%s%s>' % (tag, ' '.join([''] + ['%s="%s"' % p for p in attrs])))
def handle_endtag(self, tag):
self.purge_buffer()
self.new_page.append('</%s>' % tag)
def handle_data(self, data):
self.data_buffer.append(data)
def handle_entityref(self, name):
self.data_buffer.append('&%s;' % name)
def handle_charref(self, name):
self.data_buffer.append('&#%s;' % name)
| agpl-3.0 | Python |
21295e723abbff6c9a04c19179e7c61658f36418 | Create clickhouse_system_metrics.py | site24x7/plugins,site24x7/plugins,site24x7/plugins | clickhouse/clickhouse_system_metrics/clickhouse_system_metrics.py | clickhouse/clickhouse_system_metrics/clickhouse_system_metrics.py | #!/usr/bin/python
'''
Site24x7 Plugin to monitor Clickhouse Asynchronous metrics
'''
from clickhouse_driver import Client
import json
import argparse
### Query: SELECT event, value FROM system.events
### Ignored metrics
### "BackgroundBufferFlushSchedulePoolTask", "BackgroundDistributedSchedulePoolTask", "BackgroundMovePoolTask", "BackgroundPoolTask", "BackgroundSchedulePoolTask",
### "CacheDictionaryUpdateQueueBatches", "CacheDictionaryUpdateQueueKeys", "DictCacheRequests", "DiskSpaceReservedForMerge", "DistributedFilesToInsert",
### "DistributedSend", "EphemeralNode", "GlobalThread", "GlobalThreadActive", "HTTPConnection", "InterserverConnection", "LocalThread", "LocalThreadActive",
### "MemoryTracking", "MemoryTrackingForMerges", "MemoryTrackingInBackgroundBufferFlushSchedulePool",
### "MemoryTrackingInBackgroundDistributedSchedulePool", "MemoryTrackingInBackgroundMoveProcessingPool",
### "MemoryTrackingInBackgroundProcessingPool", "MemoryTrackingInBackgroundSchedulePool", "PartMutation",
### "QueryPreempted", "QueryThread", "RWLockActiveReaders", "RWLockActiveWriters",
### "Revision", "SendExternalTables", "SendScalars", "StorageBufferBytes","TCPConnection", "VersionInteger",, "ZooKeeperWatch"
METRICS = ["ContextLockWait", "DelayedInserts", "Merge", "MySQLConnection", "OpenFileForRead", "OpenFileForWrite", "PostgreSQLConnection", "Query",
"RWLockWaitingReaders", "RWLockWaitingWriters", "Read", "ReadonlyReplica", "ReplicatedChecks", "ReplicatedFetch", "ReplicatedSend",
"StorageBufferRows", "Write", "ZooKeeperRequest", "ZooKeeperSession"]
class ClickHouse:
def __init__(self) :
pass
def _connect_(self):
parser = argparse.ArgumentParser()
parser.add_argument('--hostname', help='hostname', nargs='?', default='localhost')
parser.add_argument('--port', help='port number', type=int, nargs='?', default=9000)
parser.add_argument('--database', help='database', nargs='?', default='default')
parser.add_argument('--user', help='user', nargs='?', default='default')
parser.add_argument('--password', help='password', nargs='?', default='')
parser.add_argument('--timeout', help='connection timeout', type=int, nargs='?', default=10)
args = parser.parse_args()
return Client(host=args.hostname, port=args.port, database=args.database,user=args.user,password=args.password,connect_timeout=args.timeout)
def _get_data_(self):
QUERY = 'SELECT metric,value FROM system.metrics'
connection = self._connect_()
self.metrics = dict(connection.execute(QUERY))
connection.disconnect()
data = {}
for key in self.metrics:
if key in METRICS : data[key] = self.metrics[key]
return dict(data)
if __name__ == '__main__':
clickhouse = ClickHouse()
result = clickhouse._get_data_()
print(json.dumps(result, indent=4, sort_keys=True))
| bsd-2-clause | Python | |
e5304549600ab58687c97f58aa37e2734b55c735 | add a command to clear task metas | dstufft/jutils | crate_project/apps/crate/management/commands/clear_celery_meta.py | crate_project/apps/crate/management/commands/clear_celery_meta.py | import redis
from django.conf import settings
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def handle(self, *args, **options):
r = redis.StrictRedis(host=settings.GONDOR_REDIS_HOST, port=settings.GONDOR_REDIS_PORT, password=settings.GONDOR_REDIS_PASSWORD)
i = 0
for key in r.keys("celery-*"):
r.delete(key)
i += 1
print "%d keys cleared" % i
| bsd-2-clause | Python | |
0bda6b7c6b77144c9a356d3e5846b365d2ce4600 | add TextRenderer protocol | mozman/ezdxf,mozman/ezdxf,mozman/ezdxf,mozman/ezdxf,mozman/ezdxf | src/ezdxf/addons/drawing/text_renderer.py | src/ezdxf/addons/drawing/text_renderer.py | # Copyright (c) 2022, Manfred Moitzi
# License: MIT License
from typing import TypeVar
from typing_extensions import Protocol
from ezdxf.tools.fonts import FontFace, FontMeasurements
from ezdxf.path import Path
T = TypeVar("T")
class TextRenderer(Protocol):
"""Minimal requirement to be usable as a universal text renderer, for more
information see usage in PillowBackend().
Implementations:
- MplTextRenderer
- QtTextRenderer
"""
def get_font_properties(self, font: FontFace) -> T:
...
def get_font_measurements(self, font_properties: T) -> FontMeasurements:
...
def get_scale(self, cap_height: float, font_properties: T) -> float:
...
def get_text_line_width(
self, text: str, cap_height: float, font: FontFace = None
) -> float:
...
def get_ezdxf_path(self, text: str, font_properties: T) -> Path:
...
| mit | Python | |
e05345f3133313c77ebf1dd26c8a44a5d4d98cfe | add test_TreeReaderPackage.py | alphatwirl/alphatwirl,alphatwirl/alphatwirl,alphatwirl/alphatwirl,TaiSakuma/AlphaTwirl,TaiSakuma/AlphaTwirl,alphatwirl/alphatwirl | tests/test_TreeReaderPackage.py | tests/test_TreeReaderPackage.py | #!/usr/bin/env python
from AlphaTwirl import TreeReaderPackage
import unittest
##____________________________________________________________________________||
class MockReader(object):
pass
##____________________________________________________________________________||
class MockCollector(object):
def __init__(self):
self.readers = [ ]
self.collected = False
def addReader(self, datasetName, reader):
self.readers.append((datasetName, reader))
def collect(self):
self.collected = True
##____________________________________________________________________________||
class TestTreeReaderPackage(unittest.TestCase):
def test_make(self):
collector = MockCollector()
package = TreeReaderPackage(MockReader, collector)
reader = package.make("data1")
self.assertIsInstance(reader, MockReader)
self.assertEqual([("data1", reader)], collector.readers)
def test_collect(self):
collector = MockCollector()
package = TreeReaderPackage(MockReader, collector)
self.assertFalse(collector.collected)
package.collect()
self.assertTrue(collector.collected)
##____________________________________________________________________________||
| bsd-3-clause | Python | |
2ab5dc1d9f2f19811e168aef90df086bcb618909 | Create /tests02_treat/ with __init__.py | ToFuProject/tofu,Didou09/tofu,ToFuProject/tofu | tests/tests02_treat/__init__.py | tests/tests02_treat/__init__.py | """
This module contains tests for tofu.geom in its structured version
"""
# Nose-specific
from nose import with_setup # optional
#######################################################
#
# Setup and Teardown
#
#######################################################
def setup_module(module):
print ("") # this is to get a newline after the dots
print ("")
print ("############################################")
print ("############################################")
print (" test02_treat")
print ("############################################")
print ("")
def teardown_module(module):
#os.remove(VesTor.Id.SavePath + VesTor.Id.SaveName + '.npz')
#os.remove(VesLin.Id.SavePath + VesLin.Id.SaveName + '.npz')
#print ("teardown_module after everything in this file")
#print ("") # this is to get a newline
pass
| mit | Python | |
60ae1e1288a34e56fe42191e2f496e67a293e808 | Add conversion tool for apocraphya - messy regexes | kiwiheretic/logos-v2,kiwiheretic/logos-v2,kiwiheretic/logos-v2,kiwiheretic/logos-v2 | tools/convert_apo.py | tools/convert_apo.py | # convert TRK apo books to cancelbot format
import os
import csv
import re
import pdb
def convert_file(filename_in, filename_out):
print "Converting %s to %s" % (filename_in, filename_out)
fin = open(filename_in,"r")
fout = open(filename_out + ".txt","w")
chapter = 1 # some files don't have chapter numbers
for line in fin.readlines():
mch1 = re.match("\d+\s\w+\s(\d+):\d+-\d+", line, re.I)
# chapter regexes (Macabees)
if mch1:
print line
chapter = int(mch1.group(1))
continue
# verse regexes
mch1 = re.match("\[(\d+)\]\s+(.*)", line, re.I)
mch2 = re.match("(\d+):(\d+)\s+(.*)", line, re.I)
mch3 = re.match("\s(\d+)\.\s+(.*)", line, re.I)
mch4 = re.match("(\d+)\s(\w.*)", line, re.I)
notmch4 = re.match("(\d+)\s\w+\s\d+:\d+-\d+", line, re.I)
mch5 = re.match("\((\d+)\)\s+(.*)", line, re.I)
if mch1:
verse = int(mch1.group(1))
text = mch1.group(2)
fout.write("%d:%d %s\n" % (chapter, verse, text))
elif mch3:
verse = int(mch3.group(1))
text = mch3.group(2)
fout.write("%d:%d %s\n" % (chapter, verse, text))
elif mch4 and not notmch4:
verse = int(mch4.group(1))
text = mch4.group(2)
fout.write("%d:%d %s\n" % (chapter, verse, text))
elif mch5:
verse = int(mch5.group(1))
text = mch5.group(2)
fout.write("%d:%d %s\n" % (chapter, verse, text))
elif mch2:
chapter = int(mch2.group(1))
verse = int(mch2.group(2))
text = mch2.group(3)
fout.write("%d:%d %s\n" % (chapter, verse, text))
fout.close()
fin.close()
if __name__ == "__main__":
base = os.path.dirname(os.path.realpath(__file__))
outdir = os.path.join(base, 'output')
trans_file = os.path.join(base, 'trans_file.csv')
if not os.path.exists(outdir):
os.mkdir(outdir)
if not os.path.exists(trans_file):
fh = open(trans_file, "w")
for f in os.listdir(base):
if os.path.isfile(os.path.join(base, f)):
fh.write( "\"%s\", \"\"\n" % (f,))
fh.close()
else:
with open(trans_file, 'rb') as csvfile:
csvreader = csv.reader(csvfile)
for row in csvreader:
fn_in = row[0]
fn_out = row[1]
convert_file(os.path.join(base,fn_in),
os.path.join(outdir,fn_out))
| apache-2.0 | Python | |
d5fdbc97dbf64e2a49a898644f03b2ead27c5095 | Add an interface for ContainerRegistry | onitake/Uranium,onitake/Uranium | UM/Settings/ContainerRegistry.py | UM/Settings/ContainerRegistry.py | # Copyright (c) 2016 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
from UM.PluginRegistry import PluginRegistry
## Central class to manage all Setting containers.
#
#
class ContainerRegistry:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._containers = []
PluginRegistry.getInstance().addType("settings_container", self.addContainerType)
## Find all DefinitionContainer objects matching certain criteria.
#
# \param filter \type{dict} A dictionary containing keys and values that need to match the metadata of the DefinitionContainer.
def findDefinitionContainers(self, filter):
return []
## Find all InstanceContainer objects matching certain criteria.
#
# \param filter \type{dict} A dictionary containing keys and values that need to match the metadata of the InstanceContainer.
def findInstanceContainers(self, filter):
return []
## Find all ContainerStack objects matching certain criteria.
#
# \param filter \type{dict} A dictionary containing keys and values that need to match the metadata of the ContainerStack.
def findContainerStacks(self, filter):
return []
## Add a container type that will be used to serialize/deserialize containers.
#
# \param container An instance of the container type to add.
def addContainerType(self, container):
pass
## Get the singleton instance for this class.
@classmethod
def getInstance(cls):
if not cls.__instance:
cls.__instance = ContainerRegistry()
return cls.__instance
__instance = None
| agpl-3.0 | Python | |
548cf8f797abae99658573c289747cc498be3a5a | add dispatch | talkincode/toughlib | toughlib/dispatch.py | toughlib/dispatch.py | #!/usr/bin/env python
# coding=utf-8
import types
class EventDispatcher:
def __init__(self, prefix="event_"):
self.prefix = prefix
self.callbacks = {}
def sub(self, name, meth):
self.callbacks.setdefault(name, []).append(meth)
def register(self, obj):
from twisted.python import reflect
d = {}
reflect.accumulateMethods(obj, d, self.prefix)
for k,v in d.items():
self.registerHandler(k, v)
def pub(self, name, *args, **kwargs):
for cb in self.callbacks[name]:
cb(*args, **kwargs)
dispatch = EventDispatcher()
sub = dispatch.sub
pub = dispatch.pub
register = dispatch.register
| mit | Python | |
c8e4fc3cea9cb294a609eccc5024a25fc20ddc29 | Add tests for failedloginblocker | mysociety/django-failedloginblocker | tests.py | tests.py | from django.test import TestCase
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
class FailedLoginBlockerTest(TestCase):
def setUp(self):
self.test_user = User.objects.create_user('test_user', 'test@example.com', 'password')
self.login_url = reverse('login')
def test_blocked_after_three_failed_login_attempts(self):
login_params = {
'username': self.test_user.username,
'password': 'wrong_password',
}
resp = self.client.post(self.login_url, login_params)
self.assertEqual(200, resp.status_code)
resp = self.client.post(self.login_url, login_params)
self.assertEqual(200, resp.status_code)
resp = self.client.post(self.login_url, login_params)
self.assertEqual(200, resp.status_code)
resp = self.client.post(self.login_url, login_params)
self.assertEqual(403, resp.status_code)
| bsd-3-clause | Python | |
71fd282aed8b299e3365c84df6d9a7b282157d45 | Update shopping_app | gr1d99/shopping-list,gr1d99/shopping-list,gr1d99/shopping-list | shopping_app.py | shopping_app.py | import configparser
import os
from flask import Flask
from views import (IndexView, AddItemsView, DashboardView,
LoginView, Logout, RegisterView,
RemoveShoppingList, ShoppingListDetail)
BASEDIR = os.path.dirname(os.path.abspath(__file__))
app = Flask(__name__)
config = configparser.ConfigParser()
config.read(BASEDIR + '/secret.ini')
# app urls
app.add_url_rule('/', view_func=IndexView.as_view('index'))
app.add_url_rule('/dashboard', view_func=DashboardView.as_view('dashboard'))
app.add_url_rule('/login', view_func=LoginView.as_view('login'))
app.add_url_rule('/logout', view_func=Logout.as_view('logout'))
app.add_url_rule('/register', view_func=RegisterView.as_view('register'))
app.add_url_rule('/create-shopping-list', view_func=AddItemsView.as_view('create-shopping-list'))
app.add_url_rule('/shopping-list-detail/', view_func=ShoppingListDetail.as_view('shopping-list-detail'))
app.add_url_rule('/remove-shopping-list', view_func=RemoveShoppingList.as_view('remove-shopping-list'))
# app conf
app.secret_key = os.environ.get('SECRET_KEY', config['SECRET_KEY']['KEY'])
if __name__ == '__main__':
app.run()
| import configparser
import os
from flask import Flask
from views import (IndexView, AddItemsView, DashboardView,
LoginView, Logout, RegisterView,
RemoveShoppingList, ShoppingListDetail)
BASEDIR = os.path.dirname(os.path.abspath(__file__))
app = Flask(__name__)
config = configparser.ConfigParser()
config.read(BASEDIR + '/secret.ini')
# app urls
app.add_url_rule('/', view_func=IndexView.as_view('index'))
app.add_url_rule('/dashboard', view_func=DashboardView.as_view('dashboard'))
app.add_url_rule('/login', view_func=LoginView.as_view('login'))
app.add_url_rule('/logout', view_func=Logout.as_view('logout'))
app.add_url_rule('/register', view_func=RegisterView.as_view('register'))
app.add_url_rule('/create-shopping-list', view_func=AddItemsView.as_view('create-shopping-list'))
app.add_url_rule('/shopping-list-detail/', view_func=ShoppingListDetail.as_view('shopping-list-detail'))
app.add_url_rule('/remove-shopping-list', view_func=RemoveShoppingList.as_view('remove-shopping-list'))
# app conf
app.secret_key = config['SECRET_KEY']['KEY']
if __name__ == '__main__':
app.run()
| mit | Python |
f1c49a22a57f81a2d9358507c98e6527861ba3f3 | add project tag command | macarthur-lab/seqr,ssadedin/seqr,macarthur-lab/xbrowse,ssadedin/seqr,macarthur-lab/xbrowse,macarthur-lab/seqr,ssadedin/seqr,macarthur-lab/xbrowse,macarthur-lab/seqr,macarthur-lab/xbrowse,ssadedin/seqr,macarthur-lab/xbrowse,macarthur-lab/seqr,macarthur-lab/seqr,ssadedin/seqr,macarthur-lab/xbrowse | seqr/management/commands/add_project_tag.py | seqr/management/commands/add_project_tag.py | from django.core.management.base import BaseCommand, CommandError
from django.db.models.query_utils import Q
from seqr.models import Project, VariantTagType
from seqr.model_utils import create_seqr_model
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('--project', help='Project for tag.', required=True)
parser.add_argument('--name', help='Tag name', required=True)
parser.add_argument('--order', help='Order in project tag list', required=True)
parser.add_argument('--category', help='Category (optional)')
parser.add_argument('--description', help='Description (optional)')
parser.add_argument('--color', help='Color (optional)')
def handle(self, *args, **options):
project_name = options['project']
tag_options = {k: options[k] or '' for k in ['name', 'order', 'category', 'description', 'color']}
project = Project.objects.get(Q(name=project_name) | Q(guid=project_name))
if VariantTagType.objects.filter(name__iexact=options['name']).filter(Q(project=project) | Q(project__isnull=True)):
raise CommandError('Tag "{}" already exists for project {}'.format(options['name'], project_name))
create_seqr_model(VariantTagType, project=project, **tag_options)
| agpl-3.0 | Python | |
e762fc5b9ceb1fda0c61bec64a5ec795af66e1f0 | add migration to update Meta options | gpodder/mygpo,gpodder/mygpo,gpodder/mygpo,gpodder/mygpo | mygpo/podcasts/migrations/0021_meta.py | mygpo/podcasts/migrations/0021_meta.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('podcasts', '0020_extend_episode_mimetypes'),
('contenttypes', '__first__'),
]
operations = [
migrations.AlterModelOptions(
name='episode',
options={'ordering': ['-released']},
),
migrations.AlterModelOptions(
name='mergeduuid',
options={'verbose_name': 'Merged UUID', 'verbose_name_plural': 'Merged UUIDs'},
),
migrations.AlterModelOptions(
name='slug',
options={'ordering': ['order']},
),
migrations.AlterModelOptions(
name='url',
options={'ordering': ['order'], 'verbose_name': 'URL', 'verbose_name_plural': 'URLs'},
),
]
| agpl-3.0 | Python | |
c6efc8f73c264dd57a7d248d0aa33858863de826 | Update clipboard_pygame.py | jegger/kivy,Ramalus/kivy,MiyamotoAkira/kivy,viralpandey/kivy,jegger/kivy,kivy/kivy,CuriousLearner/kivy,darkopevec/kivy,denys-duchier/kivy,thezawad/kivy,kived/kivy,Ramalus/kivy,CuriousLearner/kivy,habibmasuro/kivy,jkankiewicz/kivy,xiaoyanit/kivy,xiaoyanit/kivy,dirkjot/kivy,angryrancor/kivy,rnixx/kivy,MiyamotoAkira/kivy,kivy/kivy,arcticshores/kivy,edubrunaldi/kivy,cbenhagen/kivy,manthansharma/kivy,bionoid/kivy,KeyWeeUsr/kivy,cbenhagen/kivy,bob-the-hamster/kivy,iamutkarshtiwari/kivy,jffernandez/kivy,jffernandez/kivy,jegger/kivy,autosportlabs/kivy,habibmasuro/kivy,dirkjot/kivy,ernstp/kivy,bionoid/kivy,denys-duchier/kivy,arlowhite/kivy,JohnHowland/kivy,manashmndl/kivy,inclement/kivy,el-ethan/kivy,gonzafirewall/kivy,JohnHowland/kivy,xiaoyanit/kivy,iamutkarshtiwari/kivy,CuriousLearner/kivy,matham/kivy,LogicalDash/kivy,habibmasuro/kivy,yoelk/kivy,viralpandey/kivy,youprofit/kivy,Cheaterman/kivy,vitorio/kivy,iamutkarshtiwari/kivy,Ramalus/kivy,autosportlabs/kivy,angryrancor/kivy,kived/kivy,KeyWeeUsr/kivy,adamkh/kivy,matham/kivy,gonzafirewall/kivy,vipulroxx/kivy,ernstp/kivy,viralpandey/kivy,manashmndl/kivy,bionoid/kivy,youprofit/kivy,VinGarcia/kivy,Shyam10/kivy,Shyam10/kivy,thezawad/kivy,aron-bordin/kivy,bionoid/kivy,manashmndl/kivy,jehutting/kivy,andnovar/kivy,akshayaurora/kivy,darkopevec/kivy,bob-the-hamster/kivy,jkankiewicz/kivy,kivy/kivy,LogicalDash/kivy,adamkh/kivy,ernstp/kivy,Farkal/kivy,adamkh/kivy,LogicalDash/kivy,MiyamotoAkira/kivy,bhargav2408/kivy,kived/kivy,KeyWeeUsr/kivy,LogicalDash/kivy,matham/kivy,bliz937/kivy,tony/kivy,vitorio/kivy,thezawad/kivy,Cheaterman/kivy,yoelk/kivy,jegger/kivy,JohnHowland/kivy,youprofit/kivy,edubrunaldi/kivy,rafalo1333/kivy,Shyam10/kivy,janssen/kivy,rafalo1333/kivy,denys-duchier/kivy,gonzafirewall/kivy,akshayaurora/kivy,mSenyor/kivy,janssen/kivy,Cheaterman/kivy,manthansharma/kivy,arcticshores/kivy,Farkal/kivy,aron-bordin/kivy,aron-bordin/kivy,arlowhite/kivy,JohnHowland/kivy,mSenyor/kivy,edubrunaldi/kivy,tony/kivy,arcticshores/kivy,aron-bordin/kivy,vipulroxx/kivy,angryrancor/kivy,adamkh/kivy,VinGarcia/kivy,Farkal/kivy,akshayaurora/kivy,arlowhite/kivy,KeyWeeUsr/kivy,inclement/kivy,cbenhagen/kivy,darkopevec/kivy,yoelk/kivy,mSenyor/kivy,angryrancor/kivy,jffernandez/kivy,matham/kivy,vipulroxx/kivy,VinGarcia/kivy,xpndlabs/kivy,gonzafirewall/kivy,rafalo1333/kivy,andnovar/kivy,Shyam10/kivy,manthansharma/kivy,xpndlabs/kivy,bob-the-hamster/kivy,arcticshores/kivy,dirkjot/kivy,bhargav2408/kivy,darkopevec/kivy,bob-the-hamster/kivy,yoelk/kivy,Cheaterman/kivy,bhargav2408/kivy,janssen/kivy,dirkjot/kivy,rnixx/kivy,denys-duchier/kivy,el-ethan/kivy,jkankiewicz/kivy,tony/kivy,Farkal/kivy,jehutting/kivy,bliz937/kivy,manthansharma/kivy,jkankiewicz/kivy,inclement/kivy,jffernandez/kivy,jehutting/kivy,vipulroxx/kivy,janssen/kivy,MiyamotoAkira/kivy,vitorio/kivy,rnixx/kivy,ernstp/kivy,bliz937/kivy,el-ethan/kivy,autosportlabs/kivy,andnovar/kivy,xpndlabs/kivy | kivy/core/clipboard/clipboard_pygame.py | kivy/core/clipboard/clipboard_pygame.py | '''
Clipboard Pygame: an implementation of the Clipboard using pygame.scrap.
'''
__all__ = ('ClipboardPygame', )
from kivy.utils import platform
from kivy.compat import PY2
from kivy.core.clipboard import ClipboardBase
if platform not in ('win', 'linux', 'macosx'):
raise SystemError('unsupported platform for pygame clipboard')
try:
import pygame
import pygame.scrap
except:
raise
class ClipboardPygame(ClipboardBase):
_is_init = False
def init(self):
if ClipboardPygame._is_init:
return
pygame.scrap.init()
ClipboardPygame._is_init = True
def get(self, mimetype='text/plain'):
self.init()
text = pygame.scrap.get(mimetype)
if PY2:
text = text.encode('utf-8')
return text
def put(self, data, mimetype='text/plain'):
self.init()
if platform == 'macosx' and data.endswith('\x00'):
data = data[:-1]
pygame.scrap.put(mimetype, data)
def get_types(self):
self.init()
return pygame.scrap.get_types()
| '''
Clipboard Pygame: an implementation of the Clipboard using pygame.scrap.
'''
__all__ = ('ClipboardPygame', )
from kivy.utils import platform
from kivy.compat import PY2
from kivy.core.clipboard import ClipboardBase
if platform not in ('win', 'linux', 'macosx'):
raise SystemError('unsupported platform for pygame clipboard')
try:
import pygame
import pygame.scrap
except:
raise
class ClipboardPygame(ClipboardBase):
_is_init = False
def init(self):
if ClipboardPygame._is_init:
return
pygame.scrap.init()
ClipboardPygame._is_init = True
def get(self, mimetype='text/plain'):
self.init()
<<<<<<< Updated upstream
text = pygame.scrap.get(mimetype)
if PY2:
text = text.encode('utf-8')
return text
=======
return pygame.scrap.get(mimetype).encode('utf-8')
>>>>>>> Stashed changes
def put(self, data, mimetype='text/plain'):
self.init()
if platform == 'macosx' and data.endswith('\x00'):
data = data[:-1]
pygame.scrap.put(mimetype, data)
def get_types(self):
self.init()
return pygame.scrap.get_types()
| mit | Python |
efd3fbf51be25b46e3d5dd0cb76aaab60de7e4c8 | Add a file allowing you to try it manually | 3DHubs/Ranch | tryit.py | tryit.py | import json
from ranch import Address, InvalidAddressException
filename = input('Read data from: [data/export.json] ')
if filename == '':
filename = 'data/export.json'
with open(filename, 'r') as data:
specs = json.load(data)
a = Address(specs)
while not a.is_valid():
fields = a.get_field_types()
last_field = fields[-1]
if len(fields) > 1:
for field in fields[:-1]:
if field[0] not in a.fields:
last_field = field
break
try:
a.set_field(last_field[0], input(str(last_field[0]) + ': '))
except InvalidAddressException as e:
print('Error:', str(e))
print(a)
| apache-2.0 | Python | |
02a2d195ffe1c14fc04eb58fb62ee8dbdee70318 | add Xray | JohanComparat/nbody-npt-functions,JohanComparat/nbody-npt-functions | multidark/bin_SMHMr/add_Ms_Xray.py | multidark/bin_SMHMr/add_Ms_Xray.py | import StellarMass
import XrayLuminosity
import numpy as n
from scipy.stats import norm
from scipy.integrate import quad
from scipy.interpolate import interp1d
import glob
import astropy.io.fits as fits
import os
import time
import numpy as n
import sys
# set up box, and redshift
aexp = 0.74230
z = 1./0.74230 -1.
fileList = n.array(glob.glob(os.path.join(os.environ['MD04'], "snapshots", "hlist_" + str(aexp) + "_SAM_Nb_*.fits" )))
fileList.sort()
# set up the stellar mass computation
sm = StellarMass.StellarMass()
mhs = n.logspace(10,15,99)
ratio = sm.SMHMr(mhs,0.)
stellar_mass = sm.meanSM(mhs,0.)
print sm.sample_Ms( mhs, 0., scatter = 0.15 )
# set up the x ray lambda SAR
xr = XrayLuminosity.XrayLuminosity()
logMs = n.arange(6.5,12.5,0.01)
cdfs_interpolations = []
XXS = n.arange(32,36.1,0.1)
for jj,mass in enumerate(logMs):
pd = lambda ll : xr.psi(ll, logM=mass, z=z)
norm = quad( pd, 32, 36)[0]
cdfs_interpolations.append( interp1d(n.array([quad( pd, 32, X)[0] for X in XXS ])/norm,XXS) )
cdfs_interpolations = n.array(cdfs_interpolations)
sys.exit()
print cdf(XXS, logM)
print cdf_norm(logM)
x = n.arange(29,37, 0.1)
y = pdn(x)
# loop on the files
ii=0
for fileName in fileList:
t0=time.time()
outFile = os.path.join(os.environ['MD04'], "catalogs", os.path.basename(fileName)[:-5] + ".Ms.fits")
print outFile
hd = fits.open(fileName)
Nhalo=len(hd[1].data['mvir'])
Mgal_mvir_Mo13 = norm.rvs( loc = sm.meanSM(10**hd[1].data['mvir'], z), scale = 0.15 )
randomX = n.random.rand(len(Mgal_mvir_Mo13))
indexes = n.searchsorted(logMs,Mgal_mvir_Mo13)
lambda_sar_Bo16 = n.array([ cdfs_interpolations[indexes[ii]](randomX[ii]) for ii in range(Nhalo) ])
col0 = fits.Column(name='Mgal_mvir_Mo13',format='D', array = Mgal_mvir_Mo13 )
col1 = fits.Column(name='Mgal_m200c_Mo13',format='D', array = norm.rvs( loc = sm.meanSM(10**hd[1].data['m200c'], z), scale = 0.15 ) )
col2 = fits.Column(name='lambda_sar_Bo16',format='D', array = lambda_sar_Bo16 )
col3 = fits.Column(name='Lx_cluster',format='D', array = n.ones(Nhalo) )
#define the table hdu
colArray = []
for col in hd[1].columns :
colArray.append(col)
colArray.append(col0)
colArray.append(col1)
colArray.append(col2)
colArray.append(col3)
hdu_cols = fits.ColDefs(colArray)
tb_hdu = fits.BinTableHDU.from_columns( hdu_cols )
#define the header
prihdr = fits.Header()
prihdr['author'] = 'JC'
prihdu = fits.PrimaryHDU(header=prihdr)
#writes the file
thdulist = fits.HDUList([prihdu, tb_hdu])
#os.system("rm "+self.snl[ii][:-5]+"_PM_Nb_"+str(Nb)+".fits")
if os.path.isfile(outFile):
os.system("rm "+outFile)
thdulist.writeto(outFile)
print time.time()-t0
| cc0-1.0 | Python | |
de438ae338f0fff48edff1e148ae860bf9354fc3 | Create 2-motors.py | CamJam-EduKit/EduKit3 | Code/2-motors.py | Code/2-motors.py | # CamJam EduKit 3 - Robotics
# Worksheet 2 - Motor Test Code
import RPi.GPIO as GPIO # Import the GPIO Library
import time # Import the Time library
# Set the GPIO modes
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
# Set the GPIO Pin mode
GPIO.setup(7, GPIO.OUT)
GPIO.setup(8, GPIO.OUT)
GPIO.setup(9, GPIO.OUT)
GPIO.setup(10, GPIO.OUT)
# Turn all motors off
GPIO.output(7, 0)
GPIO.output(8, 0)
GPIO.output(9, 0)
GPIO.output(10, 0)
# Turn the right motor forwards
GPIO.output(9, 0)
GPIO.output(10, 1)
# Turn the left motor forwards
GPIO.output(7, 0)
GPIO.output(8, 1)
# Wait for 1 seconds
time.sleep(1)
# Reset the GPIO pins (turns off motors too)
GPIO.cleanup()
| mit | Python | |
d32414164552f48226842176e05229f6895e3c1d | Add test utility for creating users. | CTPUG/wafer,CTPUG/wafer,CTPUG/wafer,CTPUG/wafer | wafer/tests/utils.py | wafer/tests/utils.py | """Utilities for testing wafer."""
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
def create_user(username, email=None, superuser=False, perms=()):
if superuser:
create = get_user_model().objects.create_superuser
else:
create = get_user_model().objects.create_user
if email is None:
email = "%s@example.com" % username
user = create(username, email, "%s_password" % username)
for codename in perms:
perm = Permission.objects.get(codename=codename)
user.user_permissions.add(perm)
if perms:
user = get_user_model().objects.get(pk=user.pk)
return user
| isc | Python | |
fb616d3e5e9db85a107b130293944fa53c262649 | add engine_saki | jay-tyler/data-petting-zoo,jay-tyler/data-petting-zoo,jay-tyler/data-petting-zoo | engine_saki.py | engine_saki.py | import pandas as pd
import numpy as np
from pandas import Series, DataFrame
import re
from rules import name_rules, geofeat_rules
# Warn if a value is being assigned to a copy
pd.set_option('mode.chained_assignment', 'warn')
# Imports of needed data
gb = pd.read_pickle("data/pickles/gb.pk1")
def setgb(file_path):
"""Read the GB.txt file from geonames and return an appropriately
filtered DataFrame"""
gb = pd.read_table(file_path)
column_names = ['geoid', 'name', 'asciiname', 'altname', 'lat', 'long',
'feature_class', 'feature_code', 'country_code', 'cc2', 'adm1', 'adm2',
'adm3', 'adm4', 'pop', 'elev', 'delev', 'timezone', 'moddate']
gb.columns = column_names
# Removing rows that correspond to Cyprus, N. Ireland, cruft etc.
remove_these_adm1 = ['05', '00', '01', 'NIR', '03']
for i in remove_these_adm1:
gb = gb[gb.adm1 != i]
# Setting a list of alternate names from altname string field
gb['ls_altname'] = gb['altname'].dropna().apply(lambda x: x.split(','))
# Pick only feature codes that correspond to towns and cities, see
# http://www.geonames.org/export/codes.html
gb = gb[gb.feature_code.isin(geofeat_rules)]
gb.index = range(len(gb))
return gb
def setfam(dfin):
"""In-place setup name families for df dataframe.
Note: this only acts on the 'name' field. Use another function to setup
altname families
Column changes: will add a ls_namefam column
TODO: This is a one run deal; not idempotent. Fix.
"""
df = dfin.copy()
df["ls_namefam"] = np.nan
# Iterate over rows in dataframe
for index, row in df.iterrows():
# For each row, check each name_rule
for namekey, ls_regex in name_rules.iteritems():
result = patinstr(row.loc['name'], ls_regex)
if result:
cur = df.loc[index, 'ls_namefam']
if not isinstance(cur, list):
df.loc[index, 'ls_namefam'] = list([namekey])
else:
df.loc[index, 'ls_namefam'].append(namekey)
return df
# list([name]) if not isinstance(cur, list) else cur.append(name)
# gb['ls_altname'].map(lambda x:patinls(x, patlist))
def setupalt(df):
df_alt = df.copy()
df_alt.drop(['altname'], inplace=True, axis=1)
df_alt.drop(['ls_altname'], inplace=True, axis=1)
df_alt['parent'] = np.nan
column_names = ['geoid', 'name', 'parent', 'asciiname', 'lat', 'long',
'feature_class', 'feature_code', 'country_code', 'cc2',
'adm1', 'adm2', 'adm3', 'adm4', 'pop', 'elev', 'delev',
'timezone', 'moddate']
df_alt = df_alt[column_names]
# for index, row in df.iterrows():
# parent_name = row['name']
# for a_name in row['ls_altname']:
# df_alt.ix['name', ] = a_name
# df_alt.ix['parent', ] = parent_name
for index, row in df.iterrows():
parent_name = row['name']
for a_name in row['ls_altname']:
row['name'] = a_name
for i in range(len(df)):
df_alt.ix[i, 'name'] = a_name
return df_alt
def patinls(slist, patlist):
"""Search each string in slist for a substring instance defined by patlist;
return re.match if found, None if not found"""
found = None
try:
strings = iter(slist)
except TypeError:
return
for string in strings:
if not found:
for pat in patlist:
found = re.match(pat, string)
if found:
break
else:
break
return found
def patinstr(string, patlist):
"""Search string for a substring instance defined by patlist; return
re.match if found, None if not found"""
found = None
for pat in patlist:
found = re.match(pat, string)
if found:
break
return found
if __name__ == "__main__":
gbf = 'data/pristine/GB.txt'
gb = setgb(gbf)
other = setfam(gb)
print other['ls_namefam'].dropna()
# def setup_df_alt(df):
# df = df.copy()
# for index, row in df.iterrows():
# add'parent' = np.nan
# df_alt.append(row[slice])
# parent_name = row['name']
# for aname in row['is_altname']:
# row['name'] = aname
# df_alt.append(row[slice])
# return de_alt
| mit | Python | |
76d2119cd6b065dddc85046e302f55188ae1207b | Create test_package.py | sebi06/BioFormatsRead,dvdmena/BioFormatsRead | test_package.py | test_package.py | import numpy as np
import os
import bfimage as bf
filename = r'C:\Python_ZEN_Output\Experiment-454.czi'
imgbase = os.path.basename(filename)
imgdir = os.path.dirname(filename)
## get image meta-information
MetaInfo = bf.bftools.get_relevant_metainfo_wrapper(filename)
img6d = bf.bftools.getImage6D(filename, MetaInfo['Sizes'])
## show relevant image Meta-Information
print '\n'
print 'Image Directory : ', imgdir
print 'Image Filename : ', imgbase
print 'Images Dim Sizes : ', MetaInfo['Sizes']
print 'Dimension Order* : ', MetaInfo['DimOrder BF']
print 'Dimension Order CZI : ', MetaInfo['OrderCZI']
print 'Total Series Number : ', MetaInfo['TotalSeries']
print 'Image Dimensions : ', MetaInfo['TotalSeries'], MetaInfo['SizeT'], MetaInfo['SizeZ'], MetaInfo['SizeC'],\
MetaInfo['SizeY'], MetaInfo['SizeX']
print 'Scaling XYZ [micron] : ', MetaInfo['XScale'], MetaInfo['YScale'], MetaInfo['ZScale']
print 'Objective M-NA-Imm : ', MetaInfo['ObjMag'], MetaInfo['NA'], MetaInfo['Immersion']
print 'Objective Name : ', MetaInfo['ObjModel']
print 'Detector Name : ', MetaInfo['DetName']
print 'Ex. Wavelengths [nm] : ', MetaInfo['WLEx']
print 'Em. Wavelengths [nm] : ', MetaInfo['WLEm']
print 'Channel Description : ', MetaInfo['ChDesc']
print 'Array Shape 6D : ', np.shape(img6d)
| bsd-2-clause | Python | |
089c1529a74cbe8f294b5ef4e81ec144a44a5a7b | Add broker object tests. | AdamGagorik/pydarkstar,LegionXI/pydarkstar | tests/broker.py | tests/broker.py | """
.. moduleauthor:: Adam Gagorik <adam.gagorik@gmail.com>
"""
import unittest
import pydarkstar.logutils
import pydarkstar.database
import pydarkstar.broker
import pydarkstar.rc
pydarkstar.logutils.setDebug()
class TestDarkObject(unittest.TestCase):
def setUp(self):
self.db = pydarkstar.database.Database.pymysql(**pydarkstar.rc.sql)
def test_init(self):
pydarkstar.broker.Broker(self.db)
if __name__ == '__main__':
unittest.main() | mit | Python | |
aae317c01772b46cad96adaab5c7611897b1ade5 | add configuration for NDK 25 and Android API 32 (12L) | opencv/opencv,opencv/opencv,opencv/opencv,opencv/opencv,opencv/opencv,opencv/opencv,opencv/opencv,opencv/opencv,opencv/opencv,opencv/opencv | platforms/android/ndk-25.config.py | platforms/android/ndk-25.config.py | # Docs: https://developer.android.com/ndk/guides/cmake#android_native_api_level
ANDROID_NATIVE_API_LEVEL = int(os.environ.get('ANDROID_NATIVE_API_LEVEL', 32))
cmake_common_vars = {
# Docs: https://source.android.com/docs/setup/about/build-numbers
# Docs: https://developer.android.com/studio/publish/versioning
'ANDROID_COMPILE_SDK_VERSION': os.environ.get('ANDROID_COMPILE_SDK_VERSION', 32),
'ANDROID_TARGET_SDK_VERSION': os.environ.get('ANDROID_TARGET_SDK_VERSION', 32),
'ANDROID_MIN_SDK_VERSION': os.environ.get('ANDROID_MIN_SDK_VERSION', ANDROID_NATIVE_API_LEVEL),
# Docs: https://developer.android.com/studio/releases/gradle-plugin
'ANDROID_GRADLE_PLUGIN_VERSION': '7.3.1',
'GRADLE_VERSION': '7.5.1',
'KOTLIN_PLUGIN_VERSION': '1.5.20',
}
ABIs = [
ABI("2", "armeabi-v7a", None, ndk_api_level=ANDROID_NATIVE_API_LEVEL, cmake_vars=cmake_common_vars),
ABI("3", "arm64-v8a", None, ndk_api_level=ANDROID_NATIVE_API_LEVEL, cmake_vars=cmake_common_vars),
ABI("5", "x86_64", None, ndk_api_level=ANDROID_NATIVE_API_LEVEL, cmake_vars=cmake_common_vars),
ABI("4", "x86", None, ndk_api_level=ANDROID_NATIVE_API_LEVEL, cmake_vars=cmake_common_vars),
]
| apache-2.0 | Python | |
b62b0c79e5da2312867ce44078e6098edcc09a5c | Implement more unit tests | amicks/Speculator | speculator/tests/unit/test_poloniex.py | speculator/tests/unit/test_poloniex.py | import unittest
from speculator.utils import poloniex
# https://poloniex.com/public?command=returnChartData¤cyPair=USDT_BTC&start=1483228800&end=1483272000&period=86400
EXPECTED_RESPONSE = [{'close': 999.36463982,
'date': 1483228800,
'high': 1008.54999326,
'low': 957.02,
'open': 965.00000055,
'quoteVolume': 1207.33863593,
'volume': 1196868.2615889,
'weightedAverage': 991.32772361},
{'close': 1019.00000076,
'date': 1483315200,
'high': 1034.32896003,
'low': 994.00000044,
'open': 999.92218873,
'quoteVolume': 1818.58703006,
'volume': 1847781.3863449,
'weightedAverage': 1016.0533182}]
EXPECTED_CHANGES = [EXPECTED_RESPONSE[1]['close'] - EXPECTED_RESPONSE[0]['close']]
class PoloniexTest(unittest.TestCase):
def test_chart_json(self):
ny_midnight = 1483228800 # 01/01/2017, 00:00
ny_noon = 1483315200 # 01/02/2017, 00:00
period = 86400
currency_pair = 'USDT_BTC'
http_response = poloniex.chart_json(ny_midnight, ny_noon, period, currency_pair)
self.assertEqual(http_response, EXPECTED_RESPONSE)
def test_parse_changes(self):
self.assertEqual(poloniex.parse_changes(EXPECTED_RESPONSE), EXPECTED_CHANGES)
def test_get_gains_losses(self):
res = {'gains': [g for g in EXPECTED_CHANGES if g >= 0],
'losses': [l for l in EXPECTED_CHANGES if l < 0]}
self.assertEqual(poloniex.get_gains_losses(EXPECTED_CHANGES), res)
| mit | Python | |
e6e1a737e729a40dbda35a41fc861720f79c6907 | Fix OSS build. | facebook/SoLoader,facebook/SoLoader,facebook/SoLoader | build_defs/fb_core_android_library.bzl | build_defs/fb_core_android_library.bzl | """OSS shim of the internal fb_core_android_library macros."""
def fb_core_android_library(**kwargs):
native.android_library(**kwargs)
| apache-2.0 | Python | |
251607873e6da532040882a9873c47c7d7998dcf | Add __main__.py file to drupdate package | jalama/drupdates | drupdates/__main__.py | drupdates/__main__.py | from drupdates import main
main()
| mit | Python | |
374428e14e4291856659b0043cd01b51e0aca141 | Add main.py example | openmv/openmv,tianzhihen/openmv,tianzhihen/openmv,kwagyeman/openmv,SmartArduino/openmv,openmv/openmv,SmartArduino/openmv,kwagyeman/openmv,SmartArduino/openmv,iabdalkader/openmv,kwagyeman/openmv,tianzhihen/openmv,openmv/openmv,openmv/openmv,iabdalkader/openmv,iabdalkader/openmv,tianzhihen/openmv,SmartArduino/openmv,kwagyeman/openmv,iabdalkader/openmv | usr/examples/main.py | usr/examples/main.py | import led, time
while (vcp_is_connected()==False):
led.on(led.BLUE)
time.sleep(150)
led.off(led.BLUE)
time.sleep(100)
led.on(led.BLUE)
time.sleep(150)
led.off(led.BLUE)
time.sleep(600)
| mit | Python | |
38a20b4746230d9e6b62d9a93aaa4102087d20e2 | Add User Handler | kkstu/Torweb,kkstu/Torweb | handler/user.py | handler/user.py | #!/usr/bin/python
# -*- coding:utf-8 -*-
# Powered By KK Studio
from BaseHandler import BaseHandler
class LoginHandler(BaseHandler):
def get(self):
self.render('user/login.html')
class RegisterHandler(BaseHandler):
def get(self):
self.render('user/register.html') | mit | Python | |
4c17b9e487e3e423f77d15fcefc9e78694d3b806 | Add sql2csv utility | Jobava/csvkit,elcritch/csvkit,gepuro/csvkit,KarrieK/csvkit,reubano/csvkit,arowla/csvkit,bmispelon/csvkit,kyeoh/csvkit,dannguyen/csvkit,cypreess/csvkit,aequitas/csvkit,nriyer/csvkit,bradparks/csvkit__query_join_filter_CSV_cli,haginara/csvkit,moradology/csvkit,barentsen/csvkit,tlevine/csvkit,archaeogeek/csvkit,matterker/csvkit,onyxfish/csvkit,wireservice/csvkit,Tabea-K/csvkit,doganmeh/csvkit,jpalvarezf/csvkit,unpingco/csvkit,themiurgo/csvkit,wjr1985/csvkit,snuggles08/csvkit | csvkit/utilities/sql2csv.py | csvkit/utilities/sql2csv.py | #!/usr/bin/env python
from argparse import FileType
import sys
from csvkit import CSVKitWriter
from csvkit import sql
from csvkit import table
from csvkit.cli import CSVKitUtility
class SQL2CSV(CSVKitUtility):
description = 'Execute an SQL query on a database and output the result to a CSV file.'
override_flags = 'f,b,d,e,H,p,q,S,t,u,z,zero'.split(',')
def add_arguments(self):
self.argparser.add_argument('--db', dest='connection_string', default='sqlite://',
help='An sqlalchemy connection string to connect to a database.',)
self.argparser.add_argument('file', metavar="FILE", nargs='?', type=FileType('rb'), default=sys.stdin,
help='The file to use as SQL query. If both FILE and QUERY are omitted, query will be read from STDIN.')
self.argparser.add_argument('-q', '--query', default=None,
help="The SQL query to execute. If specified, it overrides FILE and STDIN.")
self.argparser.add_argument('-H', '--no-header-row', dest='no_header_row', action='store_true',
help='Do not output column names.')
self.argparser.set_defaults(
delimiter=None,
doublequote=None,
escapechar=None,
encoding='utf-8',
maxfieldsize=None,
quotechar=None,
quoting=None,
skipinitialspace=None,
tabs=None,
)
def main(self):
try:
engine, metadata = sql.get_connection(self.args.connection_string)
except ImportError:
raise ImportError('You don\'t appear to have the necessary database backend installed for connection string you\'re trying to use.. Available backends include:\n\nPostgresql:\tpip install psycopg2\nMySQL:\t\tpip install MySQL-python\n\nFor details on connection strings and other backends, please see the SQLAlchemy documentation on dialects at: \n\nhttp://www.sqlalchemy.org/docs/dialects/\n\n')
conn = engine.connect()
if self.args.query:
query = self.args.query.strip()
else:
query = ""
for line in self.args.file:
query += line
rows = conn.execute(query)
output = CSVKitWriter(self.output_file, **self.writer_kwargs)
if not self.args.no_header_row:
output.writerow(rows._metadata.keys)
for row in rows:
output.writerow(row)
conn.close()
def launch_new_instance():
utility = SQL2CSV()
utility.main()
if __name__ == "__main__":
launch_new_instance()
| mit | Python | |
1c659e3f58a23beabaab2a2aaa917fe7d35bf4e3 | add a setup_package for ALMA data (needed for the cycle0 table) | ceb8/astroquery,ceb8/astroquery,imbasimba/astroquery,imbasimba/astroquery | astroquery/alma/setup_package.py | astroquery/alma/setup_package.py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
def get_package_data():
paths = [os.path.join('data', 'cycle0_delivery_asdm_mapping.txt'),
]
return {'astroquery.alma': paths}
| bsd-3-clause | Python | |
b43501a6f0a01330f5c65a1998319e4edd96d864 | Create IRC2Interview.py | junh1024/IRC2Interview | IRC2Interview.py | IRC2Interview.py | from io import StringIO
text = """
<junh1024> Both upscaled to 2/3 ch
<junh1024> I upscale to 6/7 ch this time
<junh1024> In prev 2, i made fake stereo width, this time, it's all real stereo, but i chopped it up lots
<junh1024> so 3xstereo vs 3xmono
<drf|Desktop> I mean
<drf|Desktop> I'm still fine with just making it actual mono
<drf|Desktop> it sounds just as good
"""
text=text.replace(" i "," I ")#capitalize Is
nicktokennumber = 0 #0 = first token
lastnick=""
lines = text.splitlines() #split by line
output = StringIO() #riced string
for line in lines:
tokenizedline = line.split(' ')
try:
if tokenizedline[nicktokennumber] != lastnick: #different nick
# print(tokenizedline[nicktokennumber])
# print(lastnick)
output.write('\r\n' + tokenizedline[nicktokennumber] + ':')
except:
continue
for word in range(nicktokennumber+1,len(tokenizedline)):
output.write(' '+ tokenizedline[word]) #print the the rest of the lines
output.write('.') #add full top
lastnick=tokenizedline[nicktokennumber]
# print(lastnick)
outputstring=output.getvalue()
print(outputstring)
| agpl-3.0 | Python | |
2ab367a28919763d2ec2ebb435b51c1545c16a6f | add s3stat tool | capitalone/cloud-custodian,VeritasOS/cloud-custodian,ocampocj/cloud-custodian,sixfeetup/cloud-custodian,FireballDWF/cloud-custodian,stevenmjo/cloud-custodian,jeffastorey/cloud-custodian,alfredgamulo/cloud-custodian,gwh59/cloud-custodian,scotwk/cloud-custodian,jimmyraywv/cloud-custodian,siddartha1992/cloud-custodian,JohnTheodore/cloud-custodian,taohungyang/cloud-custodian,ewbankkit/cloud-custodian,ewbankkit/cloud-custodian,thisisshi/cloud-custodian,kapilt/cloud-custodian,FireballDWF/cloud-custodian,capitalone/cloud-custodian,alfredgamulo/cloud-custodian,taohungyang/cloud-custodian,Sutto/cloud-custodian,taohungyang/cloud-custodian,jdubs/cloud-custodian,andrewalexander/cloud-custodian,thisisshi/cloud-custodian,kapilt/cloud-custodian,capitalone/cloud-custodian,Sutto/cloud-custodian,alfredgamulo/cloud-custodian,kapilt/cloud-custodian,thisisshi/cloud-custodian,ewbankkit/cloud-custodian,Sutto/cloud-custodian,JohnTheodore/cloud-custodian,scotwk/cloud-custodian,kapilt/cloud-custodian,FireballDWF/cloud-custodian,ocampocj/cloud-custodian,capitalone/cloud-custodian,ocampocj/cloud-custodian,FireballDWF/cloud-custodian,taohungyang/cloud-custodian,ocampocj/cloud-custodian,RyanWolfe/cloud-custodian,ewbankkit/cloud-custodian,Sutto/cloud-custodian | tools/s3stat.py | tools/s3stat.py | from datetime import datetime, timedelta
import boto3
import json
import pprint
import logging
import os
def bucket_info(c, bucket):
result = {'Bucket': bucket}
response = c.get_metric_statistics(
Namespace='AWS/S3',
MetricName='NumberOfObjects',
Dimensions=[
{'Name': 'BucketName',
'Value': bucket},
{'Name': 'StorageType',
'Value': 'AllStorageTypes'}
],
StartTime=datetime.now().replace(hour=0, minute=0, second=0, microsecond=0) - timedelta(1),
EndTime=datetime.now().replace(hour=0, minute=0, second=0, microsecond=0),
Period=60*24*24,
Statistics=['Average'])
if not response['Datapoints']:
result['ObjectCount'] = 0
else:
result['ObjectCount'] = response['Datapoints'][0]['Average']
response = c.get_metric_statistics(
Namespace='AWS/S3',
MetricName='BucketSizeBytes',
Dimensions=[
{'Name': 'BucketName',
'Value': bucket},
{'Name': 'StorageType',
'Value': 'StandardStorage'},
],
StartTime=datetime.now().replace(hour=0, minute=0, second=0, microsecond=0) - timedelta(10),
EndTime=datetime.now().replace(hour=0, minute=0, second=0, microsecond=0),
Period=60*24*24,
Statistics=['Average'])
if not response['Datapoints']:
result['Size'] = 0
result['SizeGB'] = 0
else:
result['Size'] = response['Datapoints'][0]['Average']
result['SizeGB'] = result['Size'] / (1024.0 * 1024 * 1024)
return result
def main():
logging.basicConfig(level=logging.INFO)
bucket = os.environ.get('BUCKET')
s = boto3.Session()
cw = s.client('cloudwatch')
s3 = s.client('s3')
buckets = s3.list_buckets()['Buckets']
results = {'buckets':[]}
size_count = obj_count = 0.0
for b in buckets:
i = bucket_info(cw, b['Name'])
results['buckets'].append(i)
obj_count += i['ObjectCount']
size_count += i['SizeGB']
results['TotalObjects'] = obj_count
results['TotalSizeGB'] = size_count
print json.dumps(results, indent=2)
if __name__ == '__main__':
main()
| apache-2.0 | Python | |
710a4db6c9e50f09de3359b8b53b3003e41e8480 | Add a simple watchdog python script. | sitedyno/krad_radio,sitedyno/krad_radio,sitedyno/krad_radio,sitedyno/krad_radio | watchdog.py | watchdog.py | #!/usr/bin/env
'''Watchdog script to keep a krad_radio instance running'''
import subprocess
import time
class Daemon:
def __init__(self):
self.cmd = 'krad_radio_daemon'
self.station = 'test'
self.running = False
def log(self, msg):
timestamp = time.strftime('%Y-%M-%d %H:%M:%S', time.gmtime())
print '--- %s - %s ---\n' % (timestamp, msg)
def run(self, cmd=None):
if not cmd:
cmd = self.cmd
self.running = True
while self.running:
self.log('running %s' % cmd)
ret = subprocess.call([cmd, self.station])
self.log('finished return ' + str(ret))
if __name__ == '__main__':
job = Daemon();
job.run()
| isc | Python | |
f30d459b8527074e50de504695491ad17bb18f0e | Add tests for exclude passing | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | tests/pytests/unit/states/test_saltmod.py | tests/pytests/unit/states/test_saltmod.py | import pytest
import salt.modules.saltutil as saltutil
import salt.states.saltmod as saltmod
from tests.support.mock import create_autospec, patch
@pytest.fixture(autouse=True)
def setup_loader(request):
setup_loader_modules = {saltmod: {"__opts__": {"__role": "testsuite"}}}
with pytest.helpers.loader_mock(request, setup_loader_modules) as loader_mock:
yield loader_mock
@pytest.fixture
def fake_cmd():
fake_cmd = create_autospec(saltutil.cmd)
with patch.dict(saltmod.__salt__, {"saltutil.cmd": fake_cmd}):
yield fake_cmd
@pytest.mark.parametrize(
"exclude",
[True, False],
)
def test_exclude_parameter_gets_passed(exclude, fake_cmd):
"""
Smoke test for for salt.states.statemod.state(). Ensures that we
don't take an exception if optional parameters are not specified in
__opts__ or __env__.
"""
args = ("webserver_setup", "webserver2")
expected_exclude = exclude
kwargs = {
"tgt_type": "glob",
"exclude": expected_exclude,
"highstate": True,
}
saltmod.state(*args, **kwargs)
call = fake_cmd.call_args[1]
assert call["kwarg"]["exclude"] == expected_exclude
def test_exclude_parameter_is_not_passed_if_not_provided(fake_cmd):
# Make sure we don't barf on existing behavior
args = ("webserver_setup", "webserver2")
kwargs_without_exclude = {
"tgt_type": "glob",
"highstate": True,
}
saltmod.state(*args, **kwargs_without_exclude)
call = fake_cmd.call_args[1]
assert "exclude" not in call["kwarg"]
| apache-2.0 | Python | |
b7532ac5daa922fd9cf64b0cee01afedc7452101 | Remove hardcoded IP | zdanek/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,testn/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,zapov/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,leafo/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,zloster/FrameworkBenchmarks,doom369/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,actframework/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,actframework/FrameworkBenchmarks,denkab/FrameworkBenchmarks,Verber/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,sgml/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,zloster/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,doom369/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,methane/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,torhve/FrameworkBenchmarks,sgml/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,actframework/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,jamming/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,zloster/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,zapov/FrameworkBenchmarks,zapov/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,methane/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,zloster/FrameworkBenchmarks,testn/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,leafo/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,torhve/FrameworkBenchmarks,dmacd/FB-try1,saturday06/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,herloct/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,sgml/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,grob/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,zapov/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,sgml/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,valyala/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,valyala/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,testn/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,joshk/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,Verber/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,valyala/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,joshk/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,actframework/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,Verber/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,khellang/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,zloster/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,torhve/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,khellang/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,zloster/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,methane/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,grob/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,valyala/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,valyala/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,dmacd/FB-try1,kostya-sh/FrameworkBenchmarks,zloster/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,khellang/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,khellang/FrameworkBenchmarks,zapov/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,valyala/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,jamming/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,zloster/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,jamming/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,leafo/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,zapov/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,doom369/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,Verber/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,Verber/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,sgml/FrameworkBenchmarks,zloster/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,dmacd/FB-try1,herloct/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,jamming/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,denkab/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,jamming/FrameworkBenchmarks,actframework/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,valyala/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,denkab/FrameworkBenchmarks,torhve/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,dmacd/FB-try1,methane/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,herloct/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,khellang/FrameworkBenchmarks,doom369/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,leafo/FrameworkBenchmarks,Verber/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,sxend/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,sxend/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,doom369/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,torhve/FrameworkBenchmarks,sxend/FrameworkBenchmarks,actframework/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,zapov/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,doom369/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,doom369/FrameworkBenchmarks,leafo/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,sxend/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,joshk/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,torhve/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,valyala/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,sxend/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,grob/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,zloster/FrameworkBenchmarks,herloct/FrameworkBenchmarks,testn/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,zapov/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,zapov/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,denkab/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,joshk/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,joshk/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,leafo/FrameworkBenchmarks,actframework/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,doom369/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,zloster/FrameworkBenchmarks,dmacd/FB-try1,stefanocasazza/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,dmacd/FB-try1,denkab/FrameworkBenchmarks,denkab/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,methane/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,doom369/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,grob/FrameworkBenchmarks,torhve/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,doom369/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,herloct/FrameworkBenchmarks,testn/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,zloster/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,denkab/FrameworkBenchmarks,valyala/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,zloster/FrameworkBenchmarks,jamming/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,dmacd/FB-try1,greenlaw110/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,doom369/FrameworkBenchmarks,sgml/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,sxend/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,sxend/FrameworkBenchmarks,leafo/FrameworkBenchmarks,grob/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,sxend/FrameworkBenchmarks,Verber/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,sgml/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,dmacd/FB-try1,zane-techempower/FrameworkBenchmarks,methane/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,sxend/FrameworkBenchmarks,valyala/FrameworkBenchmarks,methane/FrameworkBenchmarks,testn/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,jamming/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,grob/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,sxend/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,methane/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,actframework/FrameworkBenchmarks,doom369/FrameworkBenchmarks,herloct/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,doom369/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,zapov/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,denkab/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,zloster/FrameworkBenchmarks,Verber/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,methane/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,jamming/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,grob/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,sgml/FrameworkBenchmarks,jamming/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,leafo/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,Verber/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,zapov/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,joshk/FrameworkBenchmarks,zloster/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,khellang/FrameworkBenchmarks,jamming/FrameworkBenchmarks,joshk/FrameworkBenchmarks,zapov/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,khellang/FrameworkBenchmarks,khellang/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,sxend/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,actframework/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,herloct/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,joshk/FrameworkBenchmarks,sxend/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,actframework/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,sgml/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,testn/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,leafo/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,testn/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,actframework/FrameworkBenchmarks,valyala/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,methane/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,jamming/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,sgml/FrameworkBenchmarks,herloct/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,herloct/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,khellang/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,torhve/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,doom369/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,denkab/FrameworkBenchmarks,khellang/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,torhve/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,jamming/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,Verber/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,actframework/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,Verber/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,grob/FrameworkBenchmarks,leafo/FrameworkBenchmarks,joshk/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,sxend/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,denkab/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,testn/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,dmacd/FB-try1,leafo/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,zapov/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,herloct/FrameworkBenchmarks,grob/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,testn/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,valyala/FrameworkBenchmarks,sgml/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,sgml/FrameworkBenchmarks,grob/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,sxend/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,denkab/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,grob/FrameworkBenchmarks,actframework/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,actframework/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,zapov/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,methane/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,torhve/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,sgml/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,khellang/FrameworkBenchmarks,testn/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,sgml/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,leafo/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,torhve/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,joshk/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,methane/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,jamming/FrameworkBenchmarks,Verber/FrameworkBenchmarks,testn/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,sxend/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,testn/FrameworkBenchmarks,herloct/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,khellang/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,khellang/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,methane/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,zloster/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,doom369/FrameworkBenchmarks,grob/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,dmacd/FB-try1,martin-g/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,joshk/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,herloct/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,doom369/FrameworkBenchmarks,dmacd/FB-try1,jaguililla/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,dmacd/FB-try1,Dith3r/FrameworkBenchmarks,herloct/FrameworkBenchmarks,sxend/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,sxend/FrameworkBenchmarks,torhve/FrameworkBenchmarks,Verber/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,grob/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,denkab/FrameworkBenchmarks,jamming/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,doom369/FrameworkBenchmarks,valyala/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,zloster/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,actframework/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,Verber/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,sxend/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,denkab/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,zapov/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,methane/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,joshk/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,valyala/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,joshk/FrameworkBenchmarks,grob/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,zapov/FrameworkBenchmarks,herloct/FrameworkBenchmarks,joshk/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,testn/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,actframework/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,herloct/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,zloster/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,denkab/FrameworkBenchmarks,khellang/FrameworkBenchmarks | flask/app.py | flask/app.py | from flask import Flask, jsonify, request
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy import create_engine
from random import randint
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://benchmarkdbuser:benchmarkdbpass@DBHOSTNAME:3306/hello_world'
db = SQLAlchemy(app)
dbraw_engine = create_engine(app.config['SQLALCHEMY_DATABASE_URI'])
class World(db.Model):
__tablename__ = "World"
id = db.Column(db.Integer, primary_key=True)
randomNumber = db.Column(db.Integer)
# http://stackoverflow.com/questions/7102754/jsonify-a-sqlalchemy-result-set-in-flask
@property
def serialize(self):
"""Return object data in easily serializeable format"""
return {
'id' : self.id,
'randomNumber': self.randomNumber
}
@app.route("/json")
def hello():
resp = {"message": "Hello, World!"}
return jsonify(resp)
@app.route("/db")
def get_random_world():
num_queries = request.args.get("queries", 1)
worlds = []
for i in range(int(num_queries)):
wid = randint(1, 10000)
worlds.append(World.query.get(wid).serialize)
return jsonify(worlds=worlds)
@app.route("/dbs")
def get_random_world_single():
wid = randint(1, 10000)
worlds = [World.query.get(wid).serialize]
return jsonify(worlds=worlds)
@app.route("/dbraw")
def get_random_world_raw():
connection = dbraw_engine.connect()
num_queries = request.args.get("queries", 1)
worlds = []
for i in range(int(num_queries)):
wid = randint(1, 10000)
result = connection.execute("SELECT * FROM world WHERE id = " + str(wid)).fetchone()
worlds.append({'id': result[0], 'randomNumber': result[1]})
connection.close()
return jsonify(worlds=worlds)
@app.route("/dbsraw")
def get_random_world_single_raw():
connection = dbraw_engine.connect()
wid = randint(1, 10000)
result = connection.execute("SELECT * FROM world WHERE id = " + str(wid)).fetchone()
worlds = [{'id': result[0], 'randomNumber': result[1]})]
connection.close()
return jsonify(worlds=worlds)
if __name__ == "__main__":
app.run()
| from flask import Flask, jsonify, request
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy import create_engine
from random import randint
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://benchmarkdbuser:benchmarkdbpass@192.168.0.12:3306/hello_world'
db = SQLAlchemy(app)
dbraw_engine = create_engine(app.config['SQLALCHEMY_DATABASE_URI'])
class World(db.Model):
__tablename__ = "World"
id = db.Column(db.Integer, primary_key=True)
randomNumber = db.Column(db.Integer)
# http://stackoverflow.com/questions/7102754/jsonify-a-sqlalchemy-result-set-in-flask
@property
def serialize(self):
"""Return object data in easily serializeable format"""
return {
'id' : self.id,
'randomNumber': self.randomNumber
}
@app.route("/json")
def hello():
resp = {"message": "Hello, World!"}
return jsonify(resp)
@app.route("/db")
def get_random_world():
num_queries = request.args.get("queries", 1)
worlds = []
for i in range(int(num_queries)):
wid = randint(1, 10000)
worlds.append(World.query.get(wid).serialize)
return jsonify(worlds=worlds)
@app.route("/dbs")
def get_random_world_single():
worlds = []
wid = randint(1, 10000)
worlds.append(World.query.get(wid).serialize)
return jsonify(worlds=worlds)
@app.route("/dbraw")
def get_random_world_raw():
connection = dbraw_engine.connect()
num_queries = request.args.get("queries", 1)
worlds = []
for i in range(int(num_queries)):
wid = randint(1, 10000)
result = connection.execute("SELECT * FROM world WHERE id = " + str(wid)).fetchone()
worlds.append({'id': result[0], 'randomNumber': result[1]})
connection.close()
return jsonify(worlds=worlds)
@app.route("/dbsraw")
def get_random_world_single_raw():
connection = dbraw_engine.connect()
worlds = []
wid = randint(1, 10000)
result = connection.execute("SELECT * FROM world WHERE id = " + str(wid)).fetchone()
worlds.append({'id': result[0], 'randomNumber': result[1]})
connection.close()
return jsonify(worlds=worlds)
if __name__ == "__main__":
app.run()
| bsd-3-clause | Python |
5c003984a9fa0776e926ced2727b87284e37dbd3 | Create flotation.py | aknh9189/code | flotation.py | flotation.py |
# coding: utf-8
# In[1]:
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import numpy as np
get_ipython().magic(u'matplotlib inline')
# In[2]:
water = [0,2,2,3,1.5,1.5,3,2,2,2,2,2.5,2]
alc = [0,2.5,2.5,2.5,2.5,3,2.5,2.5]
weight = [20.9+(0.41*5*x) for x in range(0,13)]
actWater = [(22)+sum(water[:x+1]) for x in range(0,len(water))]
actalc = [(28)+sum(alc[:x+1]) for x in range(0,len(alc))]
print actalc
slopeAlc, intercept = np.polyfit(weight[:len(actalc)], actalc, 1) #mL/g
slopeWater, interssss = np.polyfit(weight, actWater, 1)
print slopeWater,slopeAlc
densityWater = 1/(slopeWater * 0.001)
densityAlc = 1/(slopeAlc * 0.001)
print densityWater, densityAlc
# In[3]:
actualWater = 1000
actualAlc = 789
pErrorWater = (abs(actualWater-densityWater)/actualWater) * 100
pErrorAlc = (abs(actualAlc-densityAlc)/actualAlc) *100
print pErrorWater, pErrorAlc
# In[4]:
plt.figure()
plt.plot(weight,actWater,"o")
plt.plot(weight[:len(actalc)],actalc,"o")
plt.xlabel("Mass (g)")
plt.ylabel("Displacement (mL)")
# In[5]:
x = [0,1,2,3,4]
y = [0,0.5,1,1.5,2]
plt.figure()
plt.plot(y,x)
slope,inter = np.polyfit(y,x,1)
print slope
# In[9]:
densityAlc * (1/100.0**3) *1000
# In[ ]:
| mit | Python | |
0c4f140d887e339c8182abb3caf6def3406e700d | add new package at v2.3.3 (#22043) | LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack | var/spack/repos/builtin/packages/professor/package.py | var/spack/repos/builtin/packages/professor/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Professor(Package):
"""Professor Monte-Carlo tuning package"""
homepage = "https://professor.hepforge.org/"
url = "https://professor.hepforge.org/downloads/?f=Professor-2.3.3.tar.gz"
maintainers = ['mjk655']
version('2.3.3', sha256='60c5ba00894c809e2c31018bccf22935a9e1f51c0184468efbdd5d27b211009f')
depends_on('wxwidgets')
depends_on('yoda')
depends_on('eigen')
depends_on('py-cython')
depends_on('py-iminuit')
depends_on('py-matplotlib')
def install(self, spec, prefix):
make()
make('PREFIX={0}'.format(prefix), "install")
| lgpl-2.1 | Python | |
6143408507468c1718999bc2bc16d7e394741e29 | Add unit test for TimedProc regression | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | tests/unit/utils/test_timed_subprocess.py | tests/unit/utils/test_timed_subprocess.py | # -*- coding: utf-8 -*-
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Testing libs
from tests.support.unit import TestCase
# Import salt libs
import salt.utils.timed_subprocess as timed_subprocess
class TestTimedSubprocess(TestCase):
def test_timedproc_with_shell_true_and_list_args(self):
'''
This test confirms the fix for the regression introduced in 1f7d50d.
The TimedProc dunder init would result in a traceback if the args were
passed as a list and shell=True was set.
'''
p = timed_subprocess.TimedProc(['echo', 'foo'], shell=True)
del p # Don't need this anymore
| apache-2.0 | Python | |
6b882d76e7ea141b12adb0464555d0f7526002fb | add word2vec skipgrams | raul-jr3/dope-learning | word2vec/gram_skip.py | word2vec/gram_skip.py | from keras.layers.core import Dense, Reshape
from keras.layers.embeddings import Embedding
from keras.models import Sequential
from keras.preprocessing.text import *
from keras.preprocessing.sequence import skipgrams
text = "I love green eggs and ham ."
tokenizer = Tokenizer()
tokenizer.fit_on_texts([text])
word2id = tokenizer.word_index
id2word = {v:k for k, v in word2id.items()}
wids = [word2id[w] for w in text_to_word_sequence(text)]
pairs, labels = skipgrams(wids, len(word2id))
print(len(pairs), len(labels))
for i in range(10):
print("({:s} ({:d}), {:s} ({:d})) -> {:d}".format(
id2word[pairs[i][0]], pairs[i][0],
id2word[pairs[i][1]], pairs[i][1],
labels[i]
))
vocab_size = 5000
embed_size = 300
word_model = Sequential()
word_model.add(Embedding(vocab_size, embed_size,
embeddings_initializer="glorot_uniform",
input_length = 1))
word_model.add(Reshape((embed_size, )))
context_model = Sequential()
context_model.add(Embedding(vocab_size, embed_size,
embeddings_initializer="glorot_uniform",
input_length = 1))
context_model.add(Reshape((embed_size, )))
model = Sequential()
model.add(word_model)
model.add(context_model)
model.add(Dense(1, kernel_initializer="glorot_uniform", activation = "sigmoid"))
model.compile(loss="mse", optimizer="adam")
| mit | Python | |
e6cf2eb5bcc164e84672d1c2c2f653da6406220e | add example of querying a specific server | tomlanyon/dnspython | examples/query_specific.py | examples/query_specific.py | #!/usr/bin/env python
# Two ways of querying a specific nameserver.
from __future__ import print_function
import dns.message
import dns.rdataclass
import dns.rdatatype
import dns.query
# This way is just like nslookup/dig:
qname = dns.name.from_text('amazon.com')
q = dns.message.make_query(qname, dns.rdatatype.NS)
print('The query is:')
print(q)
print('')
r = dns.query.udp(q, '8.8.8.8')
print('The response is:')
print(r)
print('')
print('The nameservers are:')
ns_rrset = r.find_rrset(r.answer, qname, dns.rdataclass.IN, dns.rdatatype.NS)
for rr in ns_rrset:
print(rr.target)
print('')
print('')
# A higher-level way
import dns.resolver
resolver = dns.resolver.Resolver(configure=False)
resolver.nameservers = ['8.8.8.8']
answer = dns.resolver.query('amazon.com', 'NS')
print('The nameservers are:')
for rr in answer:
print(rr.target)
| isc | Python | |
5e93c27a8ba8fed12ae871b49e295cf773b3443a | Add the get_arguments function in include | softwaresaved/international-survey | survey_creation/include/get_arguments.py | survey_creation/include/get_arguments.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import getopt
"""
Short script to parse
the argments from the command line
"""
def get_arguments(argv):
"""
"""
country = None
year = None
try:
opts, args = getopt.getopt(argv, 'hc:y:', ['country=', 'year='])
except getopt.GetoptError:
print('run.py -c <country> -y <year>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('run.py -c <country> -y <year>')
sys.exit()
elif opt in ('-c', '--country'):
country = arg
elif opt in ('-y', '--year'):
year = arg
if country and year:
# folder_path = os.path.join(year, country)
return year, country
else:
print('Need a country and a year. Please use the following command:\n' +
'\trun.py -c <country> -y <year>')
sys.exit(2)
| bsd-3-clause | Python | |
4d6543c3860806730e2f3b7a6534418f0e75ff81 | add solution for Sort List | zhyu/leetcode,zhyu/leetcode | src/sortList.py | src/sortList.py | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param head, a ListNode
# @return a ListNode
def sortList(self, head):
if head is None or head.next is None:
return head
fast, slow = head, head
while fast.next and fast.next.next:
slow = slow.next
fast = fast.next.next
slow.next, slow = None, slow.next
slow, fast = self.sortList(head), self.sortList(slow)
return self.merge(slow, fast)
def merge(self, head1, head2):
if head1 is None:
return head2
if head2 is None:
return head1
p = ListNode(0)
res = p
while head1 and head2:
if head1.val < head2.val:
p.next = head1
head1 = head1.next
else:
p.next = head2
head2 = head2.next
p = p.next
if head1:
p.next = head1
elif head2:
p.next = head2
return res.next
| mit | Python | |
7f2f8afa2fcc187374608dbca111ae73dfc9b800 | Create new git-todo script using argparse. | alice1017/gitTools | git-todo2.py | git-todo2.py | #!/usr/bin/env python
#coding: utf-8
import os
import sys
import argparse
from util import core
from util import adjust
from util import objects
from util.git import *
from util.color import *
from util.objects import Todo
from subprocess import Popen, PIPE
from StringIO import StringIO
from datetime import datetime
from sys import exit as kill
from gettext import gettext as _
parser = argparse.ArgumentParser(description="You can manage to \
What you want to do on your git repository.")
subparsers = parser.add_subparsers(
title="git-todo commands",
dest="commands",
help="You can use these commands.")
cmd_init = subparsers.add_parser(
"init",
help="Program prepare for maneging your todo.")
cmd_add = subparsers.add_parser(
"add",
help="You can add new task. \
Please write task content at argument.")
cmd_add.add_argument(
"content",
action="store",
help="your task content string. If you want to write \
long string what contains spaces, please put \
double-quotation(\") to left and right side of long string.")
cmd_ls = subparsers.add_parser(
"ls",
help="You can show all todo with any filter.")
cmd_ls.add_argument(
"--filter",
action="store",
dest="filter:content",
help="This is todo filter.\
You can make the filter by string concatenate \
the filter name and filter contents by colon. \
(ex. date:2012/12/31, status:open, etc.)")
cmd_ls.add_argument(
"--sortby",
action="store",
dest="element:order",
help="This option sort todo by sort element.\
You can make the sort element by string concatenate \
the sorter element name and sort order by colon. \
(ex. date:ascending, index:Descending, etc.)")
class ArgumentNamespace(object):
def __setattr__(self, key, value):
if value != None:
if key.index(":") != -1 and value.index(":") == -1:
parser.error
if key == "filter:content":
filter_name, filter_content = value.split(":")
self.__dict__["filter-%s"%filter_name] = filter_content
#setattr(self, "filter-%s"%filter_name, filter_content)
elif key == "element:order":
sorter_name, sort_order = value.split(":")
self.__dict__["sortby-%s"%sorter_name] = sort_order
#setattr(self, "sortby-%s"%sorter_name, sort_order)
else:
self.__dict__[key] = value
#setattr(self, key, value)
def __repr__(self):
return "ArgumentNamespace(%s)" % ", ".join([k+"='%s'"%v for k,v in vars(self).iteritems()])
if __name__ == "__main__":
args = parser.parse_args(namespace=ArgumentNamespace())
print args
| mit | Python | |
1f4aadc77bbc5e75db2737d5c580a887f5114945 | Add files via upload | jinro221/opseilen | LinkNaarRules.py | LinkNaarRules.py | import pygame
import sys
import os
from pygame.locals import *
pygame.init()
# Class spelregels openen
class spelregels:
def open_spelregel(self, open1):
self.open1 = img1
# Openen spelregels vanuit bestand (Emma's computer)
img1 = pygame.image.load(os.path.join('/Users/emmadrost/Documents/Development/python/spelregels.bmp'))
# W en H zijn juiste groote voor openen document 600-900
white = (0, 0, 0)
w = 600
h = 900
screen = pygame.display.set_mode((w, h))
screen.fill((white))
running = True
# Openen scherm met spelregels
while running:
screen.fill((white))
screen.blit(img1,(0,0))
pygame.display.flip()
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
| mit | Python | |
754d4dc5feab67f55751ed07c706499e7d3dac89 | add Flask-Table hello.py | Akagi201/learning-python,Akagi201/learning-python,Akagi201/learning-python,Akagi201/learning-python,Akagi201/learning-python | flask/Flask-Table/hello.py | flask/Flask-Table/hello.py | # import things
from flask_table import Table, Col
# Declare your table
class ItemTable(Table):
name = Col('Name')
description = Col('Description')
# Get some objects
class Item(object):
def __init__(self, name, description):
self.name = name
self.description = description
items = [Item('Name1', 'Description1'),
Item('Name2', 'Description2'),
Item('Name3', 'Description3')]
# Or, equivalently, some dicts
# items = [dict(name='Name1', description='Description1'),
# dict(name='Name2', description='Description2'),
# dict(name='Name3', description='Description3')]
# Or, more likely, load items from your database with something like
# items = ItemModel.query.all()
# Populate the table
table = ItemTable(items)
# Print the html
print(table.__html__())
# or just {{ table }} from within a Jinja template
| mit | Python | |
39fc0fe91ca4bf787ceeab9ff594168f70fe0dba | Add a new version of dump source that actually marches along the nodes | michael-okeefe/soep-sandbox | src/python2/dump_source.py | src/python2/dump_source.py | # Import the JModelica.org Python packages
import pymodelica
from pymodelica.compiler_wrappers import ModelicaCompiler
# Create a compiler and compiler target object
mc = ModelicaCompiler()
# Build trees as if for an FMU or Model Exchange v 1.0
#target = mc.create_target_object("me", "1.0")
source = mc.parse_model("CauerLowPassAnalog.mo")
indent_amount = 2
def dump(src, fid, indent=0):
ind = " " * (indent_amount * indent)
try:
fid.write(ind + src.getNodeName() + "\n")
except:
fid.write(ind + "exception: " + str(src) + "\n")
try:
for idx in range(src.numChild):
dump(src.children[idx], fid, indent+1)
except:
fid.write(ind + "(exception)\n")
# dump the filter instance
with open('out.txt', 'w') as fid:
dump(source, fid, 0)
print "DONE!"
| mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.