commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
462cdfaf93f23e227b8da44e143a5ff9e8c047be | test futil for files | tests/test_futil.py | tests/test_futil.py | """Run doctests in pug.nlp.futil."""
from __future__ import print_function, absolute_import
import doctest
import pug.nlp.futil
from unittest import TestCase
class DoNothingTest(TestCase):
"""A useless TestCase to encourage Django unittests to find this module and run `load_tests()`."""
def test_example(self):
self.assertTrue(True)
def load_tests(loader, tests, ignore):
"""Run doctests for the pug.nlp.futil module"""
tests.addTests(doctest.DocTestSuite(pug.nlp.futil, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE))
return tests
| Python | 0 | |
a1039c2e38243b64d2027621aa87ee020636f23b | Add initial test for routes. | tests/test_views.py | tests/test_views.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import website
import unittest
import tempfile
class FPOTestCase(unittest.TestCase):
def test_homepage(self):
self.app = website.app.test_client()
resp = self.app.get('/')
self.assertEqual(resp.status_code, 200)
def test_admin(self):
self.app = website.app.test_client()
resp = self.app.get('/admin/')
self.assertEqual(resp.status_code, 200)
if __name__ == '__main__':
unittest.main()
| Python | 0 | |
6cebbd302556469dd4231d6252ec29c5d7c1165c | add script to convert data from Rime/luna-pinyin | data/convertdict.py | data/convertdict.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
def uniq(seq): # Dave Kirby
# Order preserving
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
def pinyin(word):
N = len(word)
pos = 0
result = []
while pos < N:
for i in range(N, pos, -1):
frag = word[pos:i]
if frag in chdict:
result.append(sorted(chdict[frag], key=lambda x: -prob.get((frag, x), 0))[0])
break
pos = i
return ' '.join(result)
chdict = {}
prob = {}
started = False
# Pass 1: Load Pinyin and its probability from dict
with open('luna_pinyin.dict.yaml', 'r', encoding='utf-8') as f:
for ln in f:
ln = ln.strip()
if started and ln and ln[0] != '#':
l = ln.split('\t')
w, c = l[0], l[1]
if w in chdict:
chdict[w].append(c)
else:
chdict[w] = [c]
if len(l) == 3:
if l[2][-1] == '%':
p = float(l[2][:-1]) / 100
else:
p = float(l[2])
prob[(w, c)] = p
elif ln == '...':
started = True
essay = {}
# Pass 2: Load more words and word frequency
with open('essay.txt', 'r', encoding='utf-8') as f:
for ln in f:
word, freq = ln.strip().split('\t')
# add-one smoothing
essay[word] = int(freq) + 1
if len(word) > 1:
c = pinyin(word)
if word not in chdict:
chdict[word] = [c]
# Pass 3: Calculate (word, pinyin) pair frequency
final = []
for word, codes in chdict.items():
for code in codes:
freq = max(int(essay.get(word, 1) * prob.get((word, code), 1)), 1)
final.append((word, code, freq))
final.sort()
with open('pinyin_rime.txt', 'w', encoding='utf-8') as f:
for item in final:
f.write('%s\t%s\t%s\n' % item)
| Python | 0 | |
7553a438672ab68206f30204d572f89bd088e744 | Add files via upload | pylab.py | pylab.py | import numpy as np
import matplotlib.pyplot as plt
import sympy as sympy
import csv
masterDataList = []
with open('candata.csv', 'r') as f:
reader = csv.reader(f)
for row in reader:
commitList = list(reader)
masterDataList.append(commitList)
print(masterDataList[0][1][0])
"""number of lines of data from csv = n """
dataKind = 2
n = 4
integers = []
for r in range(0,n):
integers.append(r)
for a in integers:
print(masterDataList[0][a][dataKind])
""" note there is an extra shell list so format is for any item... masterDatList[0][time in s][type of item]
dataKind = input("Data measured {options are: gps time, gps lat, gps long, gps alt, gps speed, gps course, gyroX, gyroy, gyroZ, compass, temp, humidity, pressure, long dist, lat dist, alt dist} = ")
withRespectTo = input("Measuring with respect to _____ {options are: time, altitude}")
"""
"""once input mechanism known, can automate data input - for now random data"""
x = np.array([0, 1, 2, 3, 4,
5, 6, 7, 8, 9])
y = np.array([0, 9.8, 16, 17, 20,
23, 27, 34, 47, 70])
"""acceleration for y for windform"""
order = int(input("order = "))
"""order is the order of polynomial we can try to estimate the data, can oscillate
I've made it throw up a warning if the data and the order mismatch loads
and making an order > 100 polynomial will like start screwing up computer"""
coefficients = np.polyfit(x, y, order)
polynomial = np.poly1d(coefficients)
functionToIntegrate = ""
for coefficient in coefficients:
if str(coefficient).find("-") == -1:
coefficient = str(coefficient)
coefficient = "+ "+str(coefficient)
if str(coefficient).find("-") == 1:
coefficient = str(coefficient)
coefficient = "- "+str(coefficient)
functionToIntegrate = functionToIntegrate + str(coefficient)+"z "
print(functionToIntegrate)
""" NEED TO MAKE EXCEPTION FOR LAST COEFFICIENT """
powerArray = []
for p,l in enumerate(functionToIntegrate):
if l == "z":
powerArray.append(p)
print(powerArray)
exponentLengthCount = 0
for power in powerArray:
exponent = "**"+str(order)
exponentLengthCount = exponentLengthCount + len(exponent)
functionToIntegrate = functionToIntegrate[:(power+1+exponentLengthCount-(len(exponent)))] + exponent + functionToIntegrate[((power+1+exponentLengthCount-(len(exponent)))):]
order = order-1
print(functionToIntegrate)
xs = np.arange(0, 9, 1)
ys = polynomial(xs)
func = "z**3 + z"
sympy.init_printing(use_unicode=False, wrap_line=False, no_global=True)
z = sympy.Symbol('z')
indefiniteIntegral = sympy.integrate(func, z)
print(indefiniteIntegral)
plt.plot(x, y, 'x')
plt.plot(xs, ys)
plt.ylabel('y')
plt.xlabel('x')
plt.show()
| Python | 0 | |
41e3d696967b523d0d031a0a17d18c9804f455ee | Change G+ default type | djangocms_blog/settings.py | djangocms_blog/settings.py | # -*- coding: utf-8 -*-
from django.conf import settings
from meta_mixin import settings as meta_settings
BLOG_IMAGE_THUMBNAIL_SIZE = getattr(settings, 'BLOG_IMAGE_THUMBNAIL_SIZE', {
'size': '120x120',
'crop': True,
'upscale': False
})
BLOG_IMAGE_FULL_SIZE = getattr(settings, 'BLOG_IMAGE_FULL_SIZE', {
'size': '640x120',
'crop': True,
'upscale': False
})
BLOG_TAGCLOUD_MIN = getattr(settings, 'BLOG_TAGCLOUD_MIN', 1)
BLOG_TAGCLOUD_MAX = getattr(settings, 'BLOG_TAGCLOUD_MAX', 10)
BLOG_PAGINATION = getattr(settings, 'BLOG_PAGINATION', 10)
BLOG_LATEST_POSTS = getattr(settings, 'BLOG_LATEST_POSTS', 5)
BLOG_POSTS_LIST_TRUNCWORDS_COUNT = getattr(settings, 'BLOG_POSTS_LIST_TRUNCWORDS_COUNT', 100)
BLOG_TYPE = getattr(settings, 'BLOG_TYPE', 'Article')
BLOG_FB_TYPE = getattr(settings, 'BLOG_FB_TYPE', 'Article')
BLOG_FB_APPID = getattr(settings, 'BLOG_FB_APPID', meta_settings.FB_APPID)
BLOG_FB_PROFILE_ID = getattr(settings, 'BLOG_FB_PROFILE_ID', meta_settings.FB_PROFILE_ID)
BLOG_FB_PUBLISHER = getattr(settings, 'BLOG_FB_PUBLISHER', meta_settings.FB_PUBLISHER)
BLOG_FB_AUTHOR_URL = getattr(settings, 'BLOG_FB_AUTHOR_URL', 'get_author_url')
BLOG_FB_AUTHOR = getattr(settings, 'BLOG_FB_AUTHOR', 'get_author_name')
BLOG_TWITTER_TYPE = getattr(settings, 'BLOG_TWITTER_TYPE', 'Summary')
BLOG_TWITTER_SITE = getattr(settings, 'BLOG_TWITTER_SITE', meta_settings.TWITTER_SITE)
BLOG_TWITTER_AUTHOR = getattr(settings, 'BLOG_TWITTER_AUTHOR', 'get_author_twitter')
BLOG_GPLUS_TYPE = getattr(settings, 'BLOG_GPLUS_SCOPE_CATEGORY', 'Blog')
BLOG_GPLUS_AUTHOR = getattr(settings, 'BLOG_GPLUS_AUTHOR', 'get_author_gplus')
BLOG_ENABLE_COMMENTS = getattr(settings, 'BLOG_ENABLE_COMMENTS', True)
BLOG_USE_PLACEHOLDER = getattr(settings, 'BLOG_USE_PLACEHOLDER', True)
| # -*- coding: utf-8 -*-
from django.conf import settings
from meta_mixin import settings as meta_settings
BLOG_IMAGE_THUMBNAIL_SIZE = getattr(settings, 'BLOG_IMAGE_THUMBNAIL_SIZE', {
'size': '120x120',
'crop': True,
'upscale': False
})
BLOG_IMAGE_FULL_SIZE = getattr(settings, 'BLOG_IMAGE_FULL_SIZE', {
'size': '640x120',
'crop': True,
'upscale': False
})
BLOG_TAGCLOUD_MIN = getattr(settings, 'BLOG_TAGCLOUD_MIN', 1)
BLOG_TAGCLOUD_MAX = getattr(settings, 'BLOG_TAGCLOUD_MAX', 10)
BLOG_PAGINATION = getattr(settings, 'BLOG_PAGINATION', 10)
BLOG_LATEST_POSTS = getattr(settings, 'BLOG_LATEST_POSTS', 5)
BLOG_POSTS_LIST_TRUNCWORDS_COUNT = getattr(settings, 'BLOG_POSTS_LIST_TRUNCWORDS_COUNT', 100)
BLOG_TYPE = getattr(settings, 'BLOG_TYPE', 'Article')
BLOG_FB_TYPE = getattr(settings, 'BLOG_FB_TYPE', 'Article')
BLOG_FB_APPID = getattr(settings, 'BLOG_FB_APPID', meta_settings.FB_APPID)
BLOG_FB_PROFILE_ID = getattr(settings, 'BLOG_FB_PROFILE_ID', meta_settings.FB_PROFILE_ID)
BLOG_FB_PUBLISHER = getattr(settings, 'BLOG_FB_PUBLISHER', meta_settings.FB_PUBLISHER)
BLOG_FB_AUTHOR_URL = getattr(settings, 'BLOG_FB_AUTHOR_URL', 'get_author_url')
BLOG_FB_AUTHOR = getattr(settings, 'BLOG_FB_AUTHOR', 'get_author_name')
BLOG_TWITTER_TYPE = getattr(settings, 'BLOG_TWITTER_TYPE', 'Summary')
BLOG_TWITTER_SITE = getattr(settings, 'BLOG_TWITTER_SITE', meta_settings.TWITTER_SITE)
BLOG_TWITTER_AUTHOR = getattr(settings, 'BLOG_TWITTER_AUTHOR', 'get_author_twitter')
BLOG_GPLUS_TYPE = getattr(settings, 'BLOG_GPLUS_SCOPE_CATEGORY', 'Article')
BLOG_GPLUS_AUTHOR = getattr(settings, 'BLOG_GPLUS_AUTHOR', 'get_author_gplus')
BLOG_ENABLE_COMMENTS = getattr(settings, 'BLOG_ENABLE_COMMENTS', True)
BLOG_USE_PLACEHOLDER = getattr(settings, 'BLOG_USE_PLACEHOLDER', True)
| Python | 0 |
ab2b2c6f12e2e5ec53ac6d140919a343a74b7e3c | Update migration | django_afip/migrations/0017_receipt_issued_date.py | django_afip/migrations/0017_receipt_issued_date.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-10 13:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('afip', '0016_auto_20170529_2012'),
]
operations = [
migrations.AlterField(
model_name='receipt',
name='issued_date',
field=models.DateField(
help_text=(
'Can diverge up to 5 days for good, or 10 days otherwise'
),
verbose_name='issued date',
),
),
]
| Python | 0 | |
a52dd9d66ff7d9a29f6d635e5ca1a2a0584c267b | Add rosetta utils | rosetta_utils.py | rosetta_utils.py | # From: https://github.com/mbi/django-rosetta/issues/50
# Gunicorn may work with --reload option but it needs
# https://pypi.python.org/pypi/inotify package for performances
from django.dispatch import receiver
from rosetta.signals import post_save
import time
import os
@receiver(post_save)
def restart_server(sender, **kwargs):
os.system("./gunicorn.sh stop")
pass
| Python | 0.0015 | |
4de410b1ea93665f22874826ceebcea68737dde7 | Add permissions list | tlbot/permission.py | tlbot/permission.py | ###############################################################################
# TransportLayerBot: Permission List - All-in-one modular bot for Discord #
# Copyright (C) 2017 TransportLayer #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published #
# by the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
# Permissions as of 01 Aug 2017
# Values: https://discordapp.com/developers/docs/topics/permissions
CREATE_INSTANT_INVITE = 0x00000001 # Allows creation of instant invites
KICK_MEMBERS = 0x00000002 # Allows kicking members
BAN_MEMBERS = 0x00000004 # Allows banning members
ADMINISTRATOR = 0x00000008 # Allows all permissions and bypasses channel permission overwrites
MANAGE_CHANNELS = 0x00000010 # Allows management and editing of channels
MANAGE_GUILD = 0x00000020 # Allows management and editing of the guild
ADD_REACTIONS = 0x00000040 # Allows for the addition of reactions to messages
VIEW_AUDIT_LOG = 0x00000080 # Allows for viewing of audit logs
READ_MESSAGES = 0x00000400 # Allows reading messages in a channel. The channel will not appear for users without this permission
SEND_MESSAGES = 0x00000800 # Allows for sending messages in a channel
SEND_TTS_MESSAGES = 0x00001000 # Allows for sending of /tts messages
MANAGE_MESSAGES = 0x00002000 # Allows for deletion of other users messages
EMBED_LINKS = 0x00004000 # Links sent by this user will be auto-embedded
ATTACH_FILES = 0x00008000 # Allows for uploading images and files
READ_MESSAGE_HISTORY = 0x00010000 # Allows for reading of message history
MENTION_EVERYONE = 0x00020000 # Allows for using the @everyone tag to notify all users in a channel, and the @here tag to notify all online users in a channel
USE_EXTERNAL_EMOJIS = 0x00040000 # Allows the usage of custom emojis from other servers
CONNECT = 0x00100000 # Allows for joining of a voice channel
SPEAK = 0x00200000 # Allows for speaking in a voice channel
MUTE_MEMBERS = 0x00400000 # Allows for muting members in a voice channel
DEAFEN_MEMBERS = 0x00800000 # Allows for deafening of members in a voice channel
MOVE_MEMBERS = 0x01000000 # Allows for moving of members between voice channels
USE_VAD = 0x02000000 # Allows for using voice-activity-detection in a voice channel
CHANGE_NICKNAME = 0x04000000 # Allows for modification of own nickname
MANAGE_NICKNAMES = 0x08000000 # Allows for modification of other users nicknames
MANAGE_ROLES = 0x10000000 # Allows management and editing of roles
MANAGE_WEBHOOKS = 0x20000000 # Allows management and editing of webhooks
MANAGE_EMOJIS = 0x40000000 # Allows management and editing of emojis
| Python | 0.000001 | |
8410b027987f088b86989898b4fade5b0960886a | Solve problem 2 | problem002.py | problem002.py | #!/usr/bin/env python3
def fibs(maxnumber):
fib1, fib2 = 1, 2
while fib1 < maxnumber:
yield fib1
fib1, fib2 = fib2, fib1 + fib2
print(sum(f for f in fibs(4000000) if f % 2 == 0))
| Python | 0.999999 | |
278920272efd7ab959d7cad5b5f7d6c17935c7e6 | Add problem 35, circular primes | problem_35.py | problem_35.py | from math import sqrt
from time import time
PRIME_STATUS = {}
def is_prime(n):
if n == 2:
return True
if n % 2 == 0 or n <= 1:
return False
for i in range(3, int(sqrt(n))+1, 2):
if n % i == 0:
return False
return True
def check_prime_circles(num):
circles = []
s = str(num)
for i in range(len(s)):
circle = int(s[i:] + s[0:i])
circles.append(circle)
if circle not in PRIME_STATUS:
PRIME_STATUS[circle] = is_prime(circle)
if not PRIME_STATUS[circle]:
return False
return True
def main():
circular_primes = []
for num in range(2, 1000000):
if check_prime_circles(num):
circular_primes.append(num)
print 'Circular primes:', circular_primes
print 'Amount of circular primes:', len(circular_primes)
if __name__ == '__main__':
t = time()
main()
print 'Time:', time() - t
| Python | 0.000126 | |
dad430fd56b8be22bd1a3b9773f9948c3e305883 | Add unit tests for lazy strings | stringlike/test/lazy_tests.py | stringlike/test/lazy_tests.py | import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')))
from stringlike.lazy import LazyString, CachedLazyString
from unittest import main, TestCase
class TestLazyString(TestCase):
def test_equality(self):
self.assertEqual(LazyString(lambda: 'abc'), 'abc')
def test_delay(self):
self.evaluateCount = 0
def func():
self.evaluateCount += 1
return 'abc'
lazyString = LazyString(func)
self.assertEqual(self.evaluateCount, 0)
self.assertEqual(lazyString, 'abc')
self.assertEqual(self.evaluateCount, 1)
self.assertEqual(lazyString, 'abc')
self.assertEqual(self.evaluateCount, 2)
class TestCachedLazyString(TestCase):
def test_equality(self):
self.assertEqual(CachedLazyString(lambda: 'abc'), 'abc')
def test_delay(self):
self.evaluateCount = 0
def func():
self.evaluateCount += 1
return 'abc'
cachedLazyString = CachedLazyString(func)
self.assertEqual(self.evaluateCount, 0)
self.assertEqual(cachedLazyString, 'abc')
self.assertEqual(self.evaluateCount, 1)
self.assertEqual(cachedLazyString, 'abc')
self.assertEqual(self.evaluateCount, 1)
if __name__ == '__main__':
main()
| Python | 0.000001 | |
458d2e55de4db6c9f72758b745245301ebd02f48 | Add solution 100 | 100_to_199/euler_100.py | 100_to_199/euler_100.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Problem 100
If a box contains twenty-one coloured discs, composed of fifteen blue discs and six red discs, and two discs were taken at random, it can be seen that the probability of taking two blue discs, P(BB) = (15/21)ร(14/20) = 1/2.
The next such arrangement, for which there is exactly 50% chance of taking two blue discs at random, is a box containing eighty-five blue discs and thirty-five red discs.
By finding the first arrangement to contain over 1012 = 1,000,000,000,000 discs in total, determine the number of blue discs that the box would contain.
'''
from itertools import count
from math import sqrt, ceil
# https://oeis.org/A001542
def get_nominator(n):
a = ceil((((3 + 2 * sqrt(2)) ** n) - ((3 - 2 * sqrt(2)) ** n)) / (2 * sqrt(2)))
return a
# Actually Diophantine pairs.. https://oeis.org/A011900
def p100(): # Answer: 756872327473, 0.01s
L = 10 ** 12
n = 1
for i in count(1):
np = get_nominator(i // 2) # pattern is repeated
res = n * (n+np)
n = n + np
if res * 1.414 > L: # 15/21, 85/120 is around 1.414xxxx
print(res)
break
return
p100()
| Python | 0.998911 | |
c421024bfd1660685bb6ec6cb84a0369244627c5 | add celery module | service_mapper/celery.py | service_mapper/celery.py | from __future__ import absolute_import
import os
from celery import Celery
from django.conf import settings
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'service_mapper.settings')
app = Celery('service_mapper')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
| Python | 0.000001 | |
2eb05eb7d42f1b14191cccba2563c2105fabaed1 | Add processing module | processing.py | processing.py | #!/usr/bin/env python
"""
Processing routines for the waveFlapper case.
"""
import foampy
import numpy as np
import matplotlib.pyplot as plt
width_2d = 0.1
width_3d = 3.66
def plot_force():
"""Plots the streamwise force on the paddle over time."""
def plot_moment():
data = foampy.load_forces_moments()
i = 10
t = data["time"][i:]
m = data["moment"]["pressure"]["z"] + data["moment"]["viscous"]["z"]
m = m[i:]*width_3d/width_2d
plt.figure()
plt.plot(t, m)
plt.xlabel("t (s)")
plt.ylabel("Flapper moment (Nm)")
print("Max moment from CFD =", m.max(), "Nm")
print("Theoretical max moment (including inertia) =", 5500*3.3, "Nm")
plt.show()
if __name__ == "__main__":
plot_moment()
| Python | 0.000001 | |
df0e285b6f8465eb273af50c242299c5601fa09f | Add a new example | examples/sanic_aiomysql_with_global_pool.py | examples/sanic_aiomysql_with_global_pool.py | # encoding: utf-8
"""
You need the aiomysql
"""
import asyncio
import os
import aiomysql
import uvloop
from sanic import Sanic
from sanic.response import json
database_name = os.environ['DATABASE_NAME']
database_host = os.environ['DATABASE_HOST']
database_user = os.environ['DATABASE_USER']
database_password = os.environ['DATABASE_PASSWORD']
app = Sanic()
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
async def get_pool(*args, **kwargs):
"""
the first param in *args is the global instance ,
so we can store our connection pool in it .
and it can be used by different request
:param args:
:param kwargs:
:return:
"""
args[0].pool = {
"aiomysql": await aiomysql.create_pool(host=database_host, user=database_user, password=database_password,
db=database_name,
maxsize=5)}
async with args[0].pool['aiomysql'].acquire() as conn:
async with conn.cursor() as cur:
await cur.execute('DROP TABLE IF EXISTS sanic_polls')
await cur.execute("""CREATE TABLE sanic_polls (
id serial primary key,
question varchar(50),
pub_date timestamp
);""")
for i in range(0, 100):
await cur.execute("""INSERT INTO sanic_polls
(id, question, pub_date) VALUES ({}, {}, now())
""".format(i, i))
@app.route("/")
async def test():
result = []
data = {}
async with app.pool['aiomysql'].acquire() as conn:
async with conn.cursor() as cur:
await cur.execute("SELECT question, pub_date FROM sanic_polls")
async for row in cur:
result.append({"question": row[0], "pub_date": row[1]})
if result or len(result) > 0:
data['data'] = res
return json(data)
if __name__ == '__main__':
app.run(host="127.0.0.1", workers=4, port=12000, before_start=get_pool)
| Python | 0.000102 | |
e7b6aef4db85c777463d2335107145b60b678ae2 | Create a new tour example | examples/tour_examples/maps_introjs_tour.py | examples/tour_examples/maps_introjs_tour.py | from seleniumbase import BaseCase
class MyTourClass(BaseCase):
def test_google_maps_tour(self):
self.open("https://www.google.com/maps/@42.3598616,-71.0912631,15z")
self.wait_for_element("#searchboxinput")
self.wait_for_element("#minimap")
self.wait_for_element("#zoom")
self.create_tour(theme="introjs")
self.add_tour_step("Welcome to Google Maps!",
title="โ
SeleniumBase Tours ๐")
self.add_tour_step("Type in a location here.", "#searchboxinput",
title="Search Box")
self.add_tour_step("Then click here to show it on the map.",
"#searchbox-searchbutton", alignment="bottom")
self.add_tour_step("Or click here to get driving directions.",
"#searchbox-directions", alignment="bottom")
self.add_tour_step("Use this button to switch to Satellite view.",
"#minimap div.widget-minimap", alignment="right")
self.add_tour_step("Click here to zoom in.", "#widget-zoom-in",
alignment="left")
self.add_tour_step("Or click here to zoom out.", "#widget-zoom-out",
alignment="left")
self.add_tour_step("Use the Menu button to see more options.",
".searchbox-hamburger-container", alignment="right")
self.add_tour_step("Or click here to see more Google apps.",
'[title="Google apps"]', alignment="left")
self.add_tour_step("Thanks for using SeleniumBase Tours!",
title="๐ End of Guided Tour ๐")
self.export_tour(filename="google_maps_introjs_tour.js")
self.play_tour()
| Python | 0.000012 | |
8ddc9333513a2e900ff61b6d2904db3e58635bb9 | add initial self_publish version | elm_self_publish.py | elm_self_publish.py | #! /usr/bin/env python
from __future__ import print_function
import sys
import json
import shutil
import argparse
def copy_package(location, destination):
shutil.copytree(location, destination)
def package_name(url):
""" get the package name from a github url """
project = url.split('/')[-1].split('.')[0]
user = url.split('/')[-2]
return {
"project": project,
"user": user
}
def self_publish(package_location, destination=".", quiet=False):
""" package_location should be the local package to install
"""
elm_package_file = "{location}/elm-package.json".format(location=package_location)
exact_deps_file = "{destination}/elm-stuff/exact-dependencies.json".format(
destination=destination,
location=package_location
)
with open(elm_package_file) as f:
elm_package = json.load(f)
package_details = package_name(elm_package['repository'])
version = elm_package['version']
place = package_details['user'] + '/' + package_details['project']
copy_package(package_location, '{destination}/elm-stuff/packages/{place}/{version}'.format(
place=place,
version=version,
destination=destination
))
with open(exact_deps_file) as f:
data = f.read()
package_info = {}
if data:
package_info = json.loads(data)
with open(exact_deps_file, 'w') as f:
package_info[place] = version
json.dump(package_info, f, sort_keys=False, indent=4)
with open(elm_package_file, 'w') as f:
elm_package['dependencies'][place] = version
json.dump(elm_package, f, sort_keys=False, indent=4)
def main():
parser = argparse.ArgumentParser(description='Publish a local package into your project')
parser.add_argument('--quiet', '-q', action='store_true', help='don\'t print anything', default=False)
parser.add_argument('package_location')
parser.add_argument('destination')
args = parser.parse_args()
self_publish(args.package_location, args.destination, quiet=args.quiet)
if __name__ == '__main__':
main()
| Python | 0 | |
a004611ceb3402c95675a749eb9a3db764c97e51 | Move cython_build_ext command to utils.distutils and put it to setup.cfg | edgedb/lang/common/distutils.py | edgedb/lang/common/distutils.py | ##
# Copyright (c) 2014 Sprymix Inc.
# All rights reserved.
#
# See LICENSE for details.
##
from distutils.command import build_ext as _build_ext
class cython_build_ext(_build_ext.build_ext):
def __init__(self, *args, **kwargs):
self._ctor_args = args
self._ctor_kwargs = kwargs
self._cython = None
def __getattribute__(self, name):
cython = object.__getattribute__(self, '_cython')
if cython is None:
from Cython.Distutils import build_ext
_ctor_args = object.__getattribute__(self, '_ctor_args')
_ctor_kwargs = object.__getattribute__(self, '_ctor_kwargs')
cython = build_ext(*_ctor_args, **_ctor_kwargs)
object.__setattr__(self, '_cython', cython)
return getattr(cython, name)
| Python | 0 | |
d05de03f258c215ce0a23023e5c15b057fbf7283 | add missing import | s2plib/fusion.py | s2plib/fusion.py | # Copyright (C) 2015, Carlo de Franchis <carlo.de-franchis@cmla.ens-cachan.fr>
# Copyright (C) 2015, Gabriele Facciolo <facciolo@cmla.ens-cachan.fr>
# Copyright (C) 2015, Enric Meinhardt <enric.meinhardt@cmla.ens-cachan.fr>
# Copyright (C) 2015, Julien Michel <julien.michel@cnes.fr>
from __future__ import print_function
import os
import sys
import shutil
import numpy as np
from osgeo import gdal
gdal.UseExceptions()
from s2plib.config import cfg
from s2plib import common
def average_if_close(x, threshold):
"""
"""
if np.nanmax(x) - np.nanmin(x) > threshold:
return np.nan
else:
return np.nanmedian(x)
def merge_n(output, inputs, offsets, averaging='average_if_close', threshold=1):
"""
Merge n images of equal sizes by taking the median/mean/min/max pixelwise.
Args:
inputs: list of paths to the input images
output: path to the output image
averaging: string containing the name of a function that accepts
1D arrays. It is applied to 1D slices of the stack of images along
the last axis. Possible values are, for instance np.min, np.max,
np.mean, np.median and their nanproof counterparts, ie np.nanmin,
np.nanmax, np.nanmean, np.nanmedian
"""
assert(len(inputs) == len(offsets))
# get input images size
if inputs:
f = gdal.Open(inputs[0])
w, h = f.RasterXSize, f.RasterYSize
f = None # this is the gdal way of closing files
# read input images and apply offsets
x = np.empty((h, w, len(inputs)))
for i, img in enumerate(inputs):
f = gdal.Open(img)
x[:, :, i] = f.GetRasterBand(1).ReadAsArray() - offsets[i]
f = None
if cfg['debug']:
common.rasterio_write('{}_registered.tif'.format(os.path.splitext(img)[0]),
x[:, :, i] + np.mean(offsets))
# apply the averaging operator
if averaging.startswith(('np.', 'numpy.')):
avg = np.apply_along_axis(getattr(sys.modules['numpy'], averaging.split('.')[1]),
axis=2, arr=x)
elif averaging == 'average_if_close':
avg = np.apply_along_axis(average_if_close, 2, x, threshold)
# add the mean offset
avg += np.mean(offsets)
# write the average to output
if inputs:
shutil.copy(inputs[0], output) # copy an input file to get the metadata
f = gdal.Open(output, gdal.GA_Update)
f.GetRasterBand(1).WriteArray(avg) # update the output file content
f = None
| # Copyright (C) 2015, Carlo de Franchis <carlo.de-franchis@cmla.ens-cachan.fr>
# Copyright (C) 2015, Gabriele Facciolo <facciolo@cmla.ens-cachan.fr>
# Copyright (C) 2015, Enric Meinhardt <enric.meinhardt@cmla.ens-cachan.fr>
# Copyright (C) 2015, Julien Michel <julien.michel@cnes.fr>
from __future__ import print_function
import os
import sys
import shutil
import numpy as np
from osgeo import gdal
gdal.UseExceptions()
from s2plib.config import cfg
def average_if_close(x, threshold):
"""
"""
if np.nanmax(x) - np.nanmin(x) > threshold:
return np.nan
else:
return np.nanmedian(x)
def merge_n(output, inputs, offsets, averaging='average_if_close', threshold=1):
"""
Merge n images of equal sizes by taking the median/mean/min/max pixelwise.
Args:
inputs: list of paths to the input images
output: path to the output image
averaging: string containing the name of a function that accepts
1D arrays. It is applied to 1D slices of the stack of images along
the last axis. Possible values are, for instance np.min, np.max,
np.mean, np.median and their nanproof counterparts, ie np.nanmin,
np.nanmax, np.nanmean, np.nanmedian
"""
assert(len(inputs) == len(offsets))
# get input images size
if inputs:
f = gdal.Open(inputs[0])
w, h = f.RasterXSize, f.RasterYSize
f = None # this is the gdal way of closing files
# read input images and apply offsets
x = np.empty((h, w, len(inputs)))
for i, img in enumerate(inputs):
f = gdal.Open(img)
x[:, :, i] = f.GetRasterBand(1).ReadAsArray() - offsets[i]
f = None
if cfg['debug']:
common.rasterio_write('{}_registered.tif'.format(os.path.splitext(img)[0]),
x[:, :, i] + np.mean(offsets))
# apply the averaging operator
if averaging.startswith(('np.', 'numpy.')):
avg = np.apply_along_axis(getattr(sys.modules['numpy'], averaging.split('.')[1]),
axis=2, arr=x)
elif averaging == 'average_if_close':
avg = np.apply_along_axis(average_if_close, 2, x, threshold)
# add the mean offset
avg += np.mean(offsets)
# write the average to output
if inputs:
shutil.copy(inputs[0], output) # copy an input file to get the metadata
f = gdal.Open(output, gdal.GA_Update)
f.GetRasterBand(1).WriteArray(avg) # update the output file content
f = None
| Python | 0.000001 |
bc235b15bbeacf7fee7e1d23a5d94b6271e33e41 | Add initial code | rpsls.py | rpsls.py | #!/usr/bin/python
from collections import OrderedDict
from random import choice, seed
from sys import exit
WEAPONS = OrderedDict([
('rock', 1),
('paper', 2),
('scissors', 3),
('lizard', 5),
('spock', 4)
])
EXPLANATIONS = {
'lizardlizard': 'Lizard equals lizard',
'lizardpaper': 'Lizard eats paper',
'lizardrock': 'Rock crushes lizard',
'lizardscissors': 'Scissors decapitate lizard',
'lizardspock': 'Lizard poisons spock',
'paperpaper': 'Paper equals paper',
'paperrock': 'Paper wraps rock',
'paperscissors': 'Scissors cut paper',
'paperspock': 'Paper disproves Spock',
'rockrock': 'Rock equals rock',
'rockscissors': 'Rock breaks scissors',
'rockspock': 'Spock vapourises rock',
'scissorsscissors': 'Scissors equal scissors',
'scissorsspock': 'Spock breaks scissors',
'spockspock': 'Spock equals Spock'
}
def do_battle(player_weapon, cpu_weapon):
explanation = EXPLANATIONS[''.join(sorted([player_weapon, cpu_weapon]))]
result = (WEAPONS[player_weapon] - WEAPONS[cpu_weapon]) % 5
if result == 0:
message = 'It\'s a draw.'
elif result % 2 == 0:
message = 'CPU wins!'
else:
message = 'Player wins!'
return '{}. {}'.format(explanation, message)
def is_valid_weapon(weapon):
return weapon in WEAPONS.keys()
def get_random_weapon():
seed()
return choice(WEAPONS.keys())
def run():
print 'Choose your weapon ({}), or quit:'.format(', '.join(WEAPONS.keys()))
player_weapon = raw_input('> ').lower()
if player_weapon == 'quit':
print 'Thanks for playing.'
exit()
if not is_valid_weapon(player_weapon):
print '\'{}\' is not a valid weapon, try again.\n'.format(player_weapon)
run()
cpu_weapon = get_random_weapon()
print '(Player) {} - vs - {} (CPU)'.format(player_weapon, cpu_weapon)
print '{}\n'.format(do_battle(player_weapon, cpu_weapon))
run()
if __name__ == '__main__':
run()
| Python | 0.000003 | |
43c74dc2dbe82a30f7a9b6c0403db39eb159fc96 | add control panel test for fetch | paystackapi/tests/test_cpanel.py | paystackapi/tests/test_cpanel.py | import httpretty
from paystackapi.tests.base_test_case import BaseTestCase
from paystackapi.cpanel import ControlPanel
class TestPage(BaseTestCase):
@httpretty.activate
def test_fetch_payment_session_timeout(self):
"""Method defined to test fetch payment session timeout."""
httpretty.register_uri(
httpretty.get,
self.endpoint_url("/integration/payment_session_timeout"),
content_type='text/json',
body='{"status": true, "message": "Payment session timeout retrieved"}',
status=201,
)
response = ControlPanel.fetch_payment_session_timeout()
self.assertTrue(response['status'])
| Python | 0 | |
233db6d2decad39c98bf5cbe8b974f93308bea16 | Create re.py | python2.7/re.py | python2.7/re.py | #/usr/bin/python
import re
#Shows how to test if a string matches a regular expression (yes/no) and uses more than one modifier
expression = re.compile(r"^\w+.+string", re.I | re.S) #compile the expression
if expression.match("A Simple String To Test"): #See if a string matches it
print "Matched"
else:
print "Did Not Match"
#Splitting with a regular expression
scalar_list = "item 1, item 2, item 3" #A text string delimitted by comma and variable whitespace
items = re.split(",\s+", scalar_list) #Splitting this up into an array called items
print items[1] + ":" + items[0] #printing a couple of the elements
#Extraction/parsing
parse_this = "Text with some digits: 1234 and some hexidecimal deadbeef1337"
extractions = re.compile(r"[^\d]+(\d+).+\s([0-9a-f]+)$") #Our regex; groups we want in ()'s
peices = extractions.match(parse_this) #exec our re and result in peices
print "Number: " + peices.group(1) + " Hex:" + peices.group(2) #display both extracted groups
| Python | 0.000001 | |
d93916b1927f0ae099cee3cf93619d3113db147b | Add small example of basic anomaly detection w/peewee. | examples/anomaly_detection.py | examples/anomaly_detection.py | import math
from peewee import *
db = SqliteDatabase(':memory:')
class Reg(Model):
key = TextField()
value = IntegerField()
class Meta:
database = db
db.create_tables([Reg])
# Create a user-defined aggregate function suitable for computing the standard
# deviation of a series.
@db.aggregate('stddev')
class StdDev(object):
def __init__(self):
self.n = 0
self.values = []
def step(self, value):
self.n += 1
self.values.append(value)
def finalize(self):
if self.n < 2:
return 0
mean = sum(self.values) / self.n
sqsum = sum((i - mean) ** 2 for i in self.values)
return math.sqrt(sqsum / (self.n - 1))
values = [2, 3, 5, 2, 3, 12, 5, 3, 4, 1, 2, 1, -9, 3, 3, 5]
Reg.create_table()
Reg.insert_many([{'key': 'k%02d' % i, 'value': v}
for i, v in enumerate(values)]).execute()
# We'll calculate the mean and the standard deviation of the series in a common
# table expression, which will then be used by our query to find rows whose
# zscore exceeds a certain threshold.
cte = (Reg
.select(fn.avg(Reg.value), fn.stddev(Reg.value))
.cte('stats', columns=('series_mean', 'series_stddev')))
# The zscore is defined as the (value - mean) / stddev.
zscore = (Reg.value - cte.c.series_mean) / cte.c.series_stddev
# Find rows which fall outside of 2 standard deviations.
threshold = 2
query = (Reg
.select(Reg.key, Reg.value, zscore.alias('zscore'))
.from_(Reg, cte)
.where((zscore >= threshold) | (zscore <= -threshold))
.with_cte(cte))
for row in query:
print(row.key, row.value, round(row.zscore, 2))
db.close()
| Python | 0 | |
12b334983be4caf0ba97534b52f928180e31e564 | add quick script to release lock | release-lock.py | release-lock.py | from batch import Lock
lock = Lock(key="charge-cards-lock")
lock.release()
| Python | 0 | |
687a186bd29eb1bef7a134fa5499c9b4c56abaa6 | Create setup.py | setup.py | setup.py | from distutils.core import setup
import py2exe, os, pygame
origIsSystemDLL = py2exe.build_exe.isSystemDLL
def isSystemDLL(pathname):
if os.path.basename(pathname).lower() in ["sdl_ttf.dll"]:
return 0
return origIsSystemDLL(pathname)
py2exe.build_exe.isSystemDLL = isSystemDLL
pygamedir = os.path.split(pygame.base.__file__)[0]
os.path.join(pygamedir, pygame.font.get_default_font()),
os.path.join(pygamedir, 'SDL.dll'),
os.path.join(pygamedir, 'SDL_ttf.dll')
setup(
console=["pick_a_number.py"],
options={
"py2exe":{
"packages": ["pygame"]
}
}
)
| Python | 0.000001 | |
77ca6d5e6ef7e07ede92fa2b4566a90c31fd7845 | Bump grappelli and filebrowser versions. | setup.py | setup.py |
from __future__ import with_statement
import os
exclude = ["mezzanine/project_template/dev.db",
"mezzanine/project_template/local_settings.py"]
exclude = dict([(e, None) for e in exclude])
for e in exclude:
if e.endswith(".py"):
try:
os.remove("%sc" % e)
except:
pass
try:
with open(e, "r") as f:
exclude[e] = (f.read(), os.stat(e))
os.remove(e)
except:
pass
from setuptools import setup, find_packages
from mezzanine import __version__ as version
install_requires = [
"django >= 1.3.1",
"filebrowser_safe == 0.2.4",
"grappelli_safe == 0.2.4",
]
try:
from PIL import Image, ImageOps
except ImportError:
install_requires += ["pillow"]
try:
setup(
name="Mezzanine",
version=version,
author="Stephen McDonald",
author_email="stephen.mc@gmail.com",
description="An open source content management platform built using "
"the Django framework.",
long_description=open("README.rst").read(),
license="BSD",
url="http://mezzanine.jupo.org/",
zip_safe=False,
include_package_data=True,
packages=find_packages(),
install_requires=install_requires,
entry_points="""
[console_scripts]
mezzanine-project=mezzanine.bin.mezzanine_project:create_project
""",
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Web Environment",
"Framework :: Django",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Internet :: WWW/HTTP :: WSGI",
"Topic :: Software Development :: Libraries :: "
"Application Frameworks",
"Topic :: Software Development :: Libraries :: Python Modules",
])
finally:
for e in exclude:
if exclude[e] is not None:
data, stat = exclude[e]
try:
with open(e, "w") as f:
f.write(data)
os.chown(e, stat.st_uid, stat.st_gid)
os.chmod(e, stat.st_mode)
except:
pass
|
from __future__ import with_statement
import os
exclude = ["mezzanine/project_template/dev.db",
"mezzanine/project_template/local_settings.py"]
exclude = dict([(e, None) for e in exclude])
for e in exclude:
if e.endswith(".py"):
try:
os.remove("%sc" % e)
except:
pass
try:
with open(e, "r") as f:
exclude[e] = (f.read(), os.stat(e))
os.remove(e)
except:
pass
from setuptools import setup, find_packages
from mezzanine import __version__ as version
install_requires = [
"django >= 1.3.1",
"filebrowser_safe == 0.2.3",
"grappelli_safe == 0.2.2",
]
try:
from PIL import Image, ImageOps
except ImportError:
install_requires += ["pillow"]
try:
setup(
name="Mezzanine",
version=version,
author="Stephen McDonald",
author_email="stephen.mc@gmail.com",
description="An open source content management platform built using "
"the Django framework.",
long_description=open("README.rst").read(),
license="BSD",
url="http://mezzanine.jupo.org/",
zip_safe=False,
include_package_data=True,
packages=find_packages(),
install_requires=install_requires,
entry_points="""
[console_scripts]
mezzanine-project=mezzanine.bin.mezzanine_project:create_project
""",
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Web Environment",
"Framework :: Django",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Internet :: WWW/HTTP :: WSGI",
"Topic :: Software Development :: Libraries :: "
"Application Frameworks",
"Topic :: Software Development :: Libraries :: Python Modules",
])
finally:
for e in exclude:
if exclude[e] is not None:
data, stat = exclude[e]
try:
with open(e, "w") as f:
f.write(data)
os.chown(e, stat.st_uid, stat.st_gid)
os.chmod(e, stat.st_mode)
except:
pass
| Python | 0 |
8dfdcfa0f1d13e810a6e56e0a031f15dbaba3656 | Use environment metadata for conditional dependencies | setup.py | setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import djangocms_blog
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
version = djangocms_blog.__version__
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
print("You probably want to also tag the version now:")
print(" git tag -a %s -m 'version %s'" % (version, version))
print(" git push --tags")
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='djangocms-blog',
version=version,
description='A djangoCMS 3 blog application',
long_description=readme + '\n\n' + history,
author='Iacopo Spalletti',
author_email='i.spalletti@nephila.it',
url='https://github.com/nephila/djangocms-blog',
packages=[
'djangocms_blog',
],
include_package_data=True,
install_requires=[
'django-parler>=1.2',
'django-cms>=3.0',
'django-taggit',
'django-filer',
'pytz',
'django-taggit-templatetags',
'django-taggit-autosuggest',
'django-admin-enhancer',
'djangocms-text-ckeditor',
'cmsplugin-filer',
'django-meta>=0.2',
'django-meta-mixin>=0.1.1',
'south>=1.0.1',
],
extras_require={
":python_version=='3.3'": ['django-select2-py3'],
":python_version=='3.4'": ['django-select2-py3'],
":python_version=='2.6'": ['django-select2'],
":python_version=='2.7'": ['django-select2'],
},
license="BSD",
zip_safe=False,
keywords='djangocms-blog, blog, django, wordpress, multilingual',
test_suite='cms_helper.run',
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import djangocms_blog
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
version = djangocms_blog.__version__
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
print("You probably want to also tag the version now:")
print(" git tag -a %s -m 'version %s'" % (version, version))
print(" git push --tags")
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='djangocms-blog',
version=version,
description='A djangoCMS 3 blog application',
long_description=readme + '\n\n' + history,
author='Iacopo Spalletti',
author_email='i.spalletti@nephila.it',
url='https://github.com/nephila/djangocms-blog',
packages=[
'djangocms_blog',
],
include_package_data=True,
install_requires=[
'django-parler>=1.2',
'django-cms>=3.0',
'django-taggit',
'django-filer',
'django-select2' if sys.version_info[0]==2 else 'django-select2-py3',
'pytz',
'django-taggit-templatetags',
'django-taggit-autosuggest',
'django-admin-enhancer',
'djangocms-text-ckeditor',
'cmsplugin-filer',
'django-meta>=0.2',
'django-meta-mixin>=0.1.1',
'south>=1.0.1',
],
license="BSD",
zip_safe=False,
keywords='djangocms-blog, blog, django, wordpress, multilingual',
test_suite='cms_helper.run',
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
| Python | 0 |
2e57e929db19ebd864680d4616eb1bba595f1e57 | Create setup.py | setup.py | setup.py | from distutils.core import setup
setup(
name = 'fram3w0rk-python',
packages = ['fram3w0rk-python'],
version = '0.5',
description = '"Class" effort to unify functions across 30 languages.',
author = 'Jonathan Lawton',
author_email = 'jlawton@lawtonsoft.com',
url = 'https://github.com/LawtonSoft/Fram3w0rk-Python',
download_url = 'https://github.com/LawtonSoft/Fram3work-Python/tarball/0.1',
keywords = ['fram3w0rk', 'mvc', 'web'],
classifiers = [],
)
| Python | 0.000001 | |
b0184d74d0f186662df8596f511f95e1130bcf20 | Add libffi package | rules/libffi.py | rules/libffi.py | import xyz
import os
import shutil
class Libffi(xyz.BuildProtocol):
pkg_name = 'libffi'
def configure(self, builder, config):
builder.host_lib_configure(config=config)
rules = Libffi()
| Python | 0 | |
e846a9c77f98e61287a37953fdbee570208dd2d5 | add setup.py for python packaging | setup.py | setup.py | """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='pinyinflix',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='1.0.0',
description='Converts mandarin chinese .srt files to pinyin-annotated .dfxp files that can be used with Netflix.',
long_description=long_description,
# The project's main homepage.
url='https://github.com/astromme/pinyinflix',
# Author details
author='Andrew Stromme',
author_email='andrew.stromme@gmail.com',
# Choose your license
license='APACHE2.0',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: Apache 2.0 License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='netflix subtitle subtitles subs chinese mandarin pinyin hanzi srt dfxp',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
#packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
py_modules=["pinyinflix"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['jieba', 'pinyin'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
# extras_require={
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
# },
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# package_data={
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'pinyinflix=pinyinflix:main',
],
},
)
| Python | 0.000001 | |
b1d08df29b02c107bbb2f2edc9add0c6f486c530 | Add app | app.py | app.py | # coding: utf-8
import json
import flask
from flask import request
import telegram
__name__ = u'eth0_bot'
__author__ = u'Joker_Qyou'
__config__ = u'config.json'
app = flask.Flask(__name__)
app.debug = False
with open(__config__, 'r') as cfr:
config = json.loads(cfr.read())
bot = telegram.Bot(token=token_info)
bot.setWebhook(u'%(server)s/%(token)s' % config)
@app.route(u'/%s' % config.get('token').split(':')[-1])
def webhook():
''' WebHook API func '''
print request.POST
| Python | 0.000002 | |
69f787a69e400b69fa4aef2e49f6f03781304dae | Update setup.py. | setup.py | setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
from astm.version import __version__
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup
# http://wiki.python.org/moin/Distutils/Cookbook/AutoPackageDiscovery
import os
def is_package(path):
return (
os.path.isdir(path) and
os.path.isfile(os.path.join(path, '__init__.py'))
)
def find_packages(path='.', base=""):
""" Find all packages in path """
packages = {}
for item in os.listdir(path):
dir = os.path.join(path, item)
if is_package(dir):
if base:
module_name = "%(base)s.%(item)s" % vars()
else:
module_name = item
packages[module_name] = dir
packages.update(find_packages(dir, module_name))
return packages
setup(
name = 'astm',
version = __version__,
description = 'Python implementation of ASTM E1381/1394 protocol.',
long_description = open('README').read(),
author = 'Alexander Shorin',
author_email = 'kxepal@gmail.com',
license = 'BSD',
url = 'http://code.google.com/p/python-astm',
install_requires = [],
test_suite = 'astm.tests',
zip_safe = True,
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Scientific/Engineering :: Medical Science Apps.'
],
packages = find_packages(),
)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
from astm.version import __version__
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup
# http://wiki.python.org/moin/Distutils/Cookbook/AutoPackageDiscovery
import os
def is_package(path):
return (
os.path.isdir(path) and
os.path.isfile(os.path.join(path, '__init__.py'))
)
def find_packages(path='.', base=""):
""" Find all packages in path """
packages = {}
for item in os.listdir(path):
dir = os.path.join(path, item)
if is_package( dir ):
if base:
module_name = "%(base)s.%(item)s" % vars()
else:
module_name = item
packages[module_name] = dir
packages.update(find_packages(dir, module_name))
return packages
setup(
name = 'astm',
version = __version__,
description = 'Python implementation of ASTM E1381/1394 protocol.',
author = 'Alexander Shorin',
author_email = 'kxepal@gmail.com',
license = 'BSD',
url = 'http://code.google.com/p/python-astm',
install_requires = [],
test_suite = 'astm.tests',
zip_safe = True,
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Scientific/Engineering :: Medical Science Apps.'
],
packages = find_packages(),
)
| Python | 0 |
19b6d71e17f616bed3566d5615b5938bbfe3a497 | Add setup.py | setup.py | setup.py | #!/usr/bin/env python
from distutils.core import setup
setup(name='hydrus',
version='0.0.1',
description='A space-based application for W3C HYDRA Draft',
author='W3C HYDRA development group',
author_email='public-hydra@w3.org',
url='https://github.com/HTTP-APIs/hydrus',
packages=['flask==0.11'],
)
| Python | 0.000001 | |
e2ae0798424d4aa0577e22d563646856866fbd1f | add setup.py file for pypi | setup.py | setup.py | import os
from setuptools import setup, find_packages
import versioncheck
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='django-versioncheck',
version=versioncheck.__version__,
description='A small django app which tries to be annoying if your django version is outdated.',
long_description=read('README.md'),
license='MIT License',
author='Richard Stromer',
author_email='noxan@byteweaver.org',
url='https://github.com/noxan/django-versioncheck',
packages=find_packages(),
install_requires=[
'django',
],
)
| Python | 0 | |
d43bcc978b1d79a20820ab1df73bd69d5d3c100d | Add setup.py | setup.py | setup.py | from setuptools import find_packages
from setuptools import setup
VERSION = '0.0.1'
setup_args = dict(
name='BigQuery-Python',
description='Simple Python client for interacting with Google BigQuery.',
url='https://github.com/tylertreat/BigQuery-Python',
version=VERSION,
license='Apache',
packages=find_packages(),
include_package_data=True,
install_requires=['google-api-python-client', 'pyopenssl'],
author='Tyler Treat',
author_email='ttreat31@gmail.com',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
)
if __name__ == '__main__':
setup(**setup_args)
| Python | 0.000001 | |
840e178a85da246d8357481a8e6ea5a8d87deef7 | Create setup.py | setup.py | setup.py | """
KonF'00'
~~~~~~~~
KonFoo is a Python Package for creating byte stream mappers in a declarative
way with as little code as necessary to help fighting the confusion with the
foo of the all too well-known memory dumps or binary data.
Setup
-----
.. code:: bash
$ pip install KonFoo
Links
-----
* `website <http://github.com/JoeVirtual/KonFoo/>`_
* `documentation <http://github.com/JoeVirtual/KonFoo/master/docs/>`_
* `development version
<http://github.com/JoeVirtual/KonFoo/master>`_
"""
import re
import ast
from setuptools import setup
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('konfoo/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
setup(
name='KonFoo',
version=version,
license='BSD',
author='Jochen Gerhaeusser',
author_email='jochen.gerhaeusser@freenet.de',
url='http://github.com/JoeVirtual/KonFoo',
description='A declarative byte stream mapping engine.',
long_description=__doc__,
packages=['konfoo'],
install_requires=[],
classifiers=[
'License :: BSD License',
'Programming Language :: Python :: 3',
]
)
| Python | 0.000001 | |
10ccc510deab5c97ce8a6c5ee57232c5e399986e | Add decision tree classifier attempt. | decision_tree.py | decision_tree.py | import pandas as pd
from sklearn import tree
# X = [[0, 1], [1, 1]]
# Y = [0, 1]
#clf = tree.DecisionTreeClassifier()
#clf = clf.fit(X, Y)
data = pd.read_excel('/home/andre/sandbox/jhu-immuno/journal.pcbi.1003266.s001-2.XLS')
resp_cols = [ 'MHC' ]
data['y'] = data.Immunogenicity.map({'non-immunogenic': 0, 'immunogenic': 1 })
X = data[resp_cols]
Y = data.y
clf = tree.DecisionTreeClassifier()
dummy = pd.get_dummies(data.MHC)
clf.fit(dummy, Y)
from sklearn.externals.six import StringIO
f = tree.export_graphviz(clf, out_file = 'decision_tree') | Python | 0.000004 | |
efe596e3f935fe31af5bcbd8ef1afbb6750be123 | add a setup.py | setup.py | setup.py | """Set up the kd project"""
from setuptools import setup
import kd
setup(
name='kd',
version=kd.__version__,
url='https://github.com/jalanb/kd',
license='MIT License',
author='J Alan Brogan',
author_email='kd@al-got-rhythm.net',
description='kd is a smarter cd',
platforms='any',
classifiers=[
'Programming Language :: Python :: 2.7',
'Development Status :: 2 - Pre-Alpha',
'Natural Language :: English',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: Unix',
'Topic :: System :: Shells',
],
test_suite='nose.collector',
tests_require=['nose'],
extras_require={
'docs': ['Sphinx'],
'testing': ['nose'],
}
)
| Python | 0.000001 | |
9220523e6bcac6b80410a099b2f2fd30d7cbb7d3 | Add first draft of setup.py | setup.py | setup.py | from setuptools import setup
setup(
name = 'pyAPT',
version = '0.1.0',
author = 'Christoph Weinsheimer',
author_email = 'christoph.weinsheimer@desy.de',
packages = ['pyAPT'],
scripts = [],
description = 'Controller module for Thorlabs motorized stages',
install_requires = [],
)
| Python | 0 | |
49a7fdc78cd71b75b1fbcc0023e428479ce38f41 | Implement a cryptographic hash function | sha_1.py | sha_1.py | #!/usr/local/bin/python
"""
sha_1.py
@author Elliot and Erica
"""
from cryptography_utilities import (wrap_bits_left, decimal_to_binary,
binary_to_decimal, pad_plaintext, block_split, bitwise_and,
bitwise_or, bitwise_xor, bitwise_not, hex_to_binary)
BLOCKSIZE = 512
SUB_BLOCKSIZE = 32
SHA_1_INTERVALS = 80
def add(*binaries):
"""Execute modular arithmetic mod 2^32. Input may consist of any
number of binary strings.
"""
total = 0
for binary in binaries:
total += binary_to_decimal(binary) % 2**32
return decimal_to_binary(total % 2**32)
def mixing_operation(interval, b, c, d):
"""Perform one of four operations, based on the interval. The b, c, and
d arguments are SHA-1 sub-registers.
"""
if 0 <= interval <= 19:
return bitwise_or(bitwise_and(b, c),
bitwise_and(bitwise_not(b), d))
elif interval <= 39:
return bitwise_xor(b, c, d)
elif interval <= 59:
return bitwise_or(bitwise_and(b, c),
bitwise_and(b, d),
bitwise_and(c, d))
elif interval <= 79:
return bitwise_xor(b, c, d)
else:
raise Exception('Interval out of bounds')
def round_constant(interval):
"""Return one of four binary string constants, based on the interval."""
if 0 <= interval <= 19:
return hex_to_binary('5A827999')
elif interval <= 39:
return hex_to_binary('6ED9EBA1')
elif interval <= 59:
return hex_to_binary('8F1BBCDC')
elif interval <= 79:
return hex_to_binary('CA62C1D6')
else:
raise Exception('Interval out of bounds')
def sha_1_expansion(block):
"""Take a 512 bit binary message and convert it into a series of
32 bit blocks.
"""
sub_blocks = block_split(block, SUB_BLOCKSIZE)
for interval in xrange(len(sub_blocks), SHA_1_INTERVALS):
new_sub_block = bitwise_xor(sub_blocks[interval - 3],
sub_blocks[interval - 8],
sub_blocks[interval - 14],
sub_blocks[interval - 16])
sub_blocks.append(wrap_bits_left(new_sub_block, 1))
return sub_blocks
def sha_1_compression(sub_registers, sub_blocks):
"""Combines a series of sub_blocks into a single 160-bit binary
string. The sub-registers and sub_blocks parameters should be
collections of 32-bit binary strings.
"""
a, b, c, d, e = sub_registers
for interval in xrange(SHA_1_INTERVALS):
new_a = add(wrap_bits_left(a, 5),
mixing_operation(interval, b, c, d),
e,
sub_blocks[interval],
round_constant(interval))
e = d
d = c
c = wrap_bits_left(b, 30)
b = a
a = new_a
return map(add, sub_registers, [a, b, c, d, e])
def sha_1(binary_message):
"""SHA-1 cryptographic hash function. Take a binary string of any
length and output an obfuscated 160-bit binary hash."""
padded_message = pad_plaintext(binary_message, BLOCKSIZE)
sub_registers = [hex_to_binary(initial_register)
for initial_register
in ['67452301', 'EFCDAB89', '98BADCFE',
'10325476', 'C3D2E1F0']]
for block in block_split(padded_message, BLOCKSIZE):
sub_blocks = sha_1_expansion(block)
sub_registers = sha_1_compression(sub_registers, sub_blocks)
return ''.join(sub_registers)
| Python | 0.999999 | |
d923548321961bad8dcbe15a31ceaeda79aae934 | Create xr.py | xr.py | xr.py | #!/usr/bin/env python
# -*- coding: utf8 -*-
'''Element Manager xr class'''
__author__ = "Arnis Civciss (arnis.civciss@lattelecom.lv)"
__copyright__ = "Copyright (c) 2012 Arnis Civciss"
#__version__ = "$Revision: 0.1 $"
#__date__ = "$Date: 2012/01/08 $"
#__license__ = ""
import re
from lib.telnet import Telnet
class CliError(Exception):
'''iSAM command line error exception class.'''
def __init__(self, command, output):
'''Initialize cli exception. Join output in one string if it's a list.'''
self.command = command
if isinstance(output, list):
self.output = ''.join([`num` for num in output])
else:
self.output = output
def __str__(self):
'''Returns friendly cli error. Command and error ouput.'''
return "cli error in command: %s\nOutput: %s." % (self.command, self.output)
class EmXr(Telnet):
'''XR Element Manager Class'''
def __init__(self, **kwargs):
'''Initialize the node. Mandatory parameter - host - node IP address.
Default parameters:
user = 'script2'
passwd = 'xxxx'
login_wait = 'name:'
password_wait = 'assword:'
prompt='#'
timeout= 15
port = 23
enable_string=''
enable_prompt=''
enable_passwd = ''
enable_wait=''
init_command='terminal length 0'
debuglevel=None #100 is debug on
'''
host = kwargs['host']
debuglevel = kwargs.get('debuglevel', 100)
user = 'user'
passwd = 'password'
login_wait = 'name:'
password_wait = 'assword:'
prompt='#'
timeout= 15
port = 23
enable_string=''
enable_prompt='#'
enable_wait = 'assword'
init_command = 'terminal length 0'
enable_passwd = ''
self.cli_err = re.compile('% Invalid input|% Bad IP|% Access denied|% No such configuration|%|Namespace is locked by another agent|Do you wish to proceed with this commit anyway', re.DOTALL)
Telnet.__init__(self, host, user, passwd, login_wait, password_wait, port,
prompt, timeout, enable_string, enable_prompt, enable_passwd,
enable_wait, init_command, debuglevel)
#def write_raw_sequence(self, seq):
def open(self):
out = Telnet.open(self)
reh = re.compile('\n([^#]+)', re.DOTALL)
out = Telnet.run_command(self, 'terminal exec prompt no-timestamp')
out = Telnet.run_command(self, 'terminal monitor disable')
if reh.search(out).group(1):
part = (reh.search(out).group(1)).rsplit(':')[1]
self.prompt = part
self.hostname = part
def run_command(self, command):
'''Runs any command on the node. Raises CliError in case of syntax errors.
Returns output as a list.'''
out = Telnet.run_command(self, command)
out = out.translate(None, '\b')
if self.cli_err.search(out):
raise CliError(command, out)
return out
if __name__ == "__main__":
pass
try:
rtr = EmXr(host='192.168.140.1', debuglevel=0)
rtr.open()
print rtr.hostname
print rtr.prompt
out = rtr.run_command('show arp vrf ngn vlan 4005')
for key, value in arp.items():
print "Key %s, Value %s" % (key, value)
except CliError as e:
print 'Cli Error %s ' % e
# except CliError as e:
# print 'Cli Error %s ' % e
| Python | 0.00001 | |
31d018181c5183acadbe309a250aed17cbae5a28 | Create Add_Binary.py | Array/Add_Binary.py | Array/Add_Binary.py | Given two binary strings, return their sum (also a binary string).
For example,
a = "11"
b = "1"
Return "100".
class Solution:
# @param a, a string
# @param b, a string
# @return a string
def addBinary(self, a, b):
A = len(a)
B = len(b)
i = 1
result = []
carry = 0
while i <= max(A,B):
sum = carry
if i <= A:
sum += int(a[-i])
if i <= B:
sum += int(b[-i])
bit = sum % 2
carry = sum / 2
i += 1
result.insert(0,str(bit))
if carry > 0 :
result.insert(0,'1')
return ''.join(result)
| Python | 0.000002 | |
12192eca146dc1974417bd4fd2cf3722e0049910 | add arduino example | example/ard2rrd.py | example/ard2rrd.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Arduino UNO A0 value to RRD db
# - read an integer from a serial port and store it on RRD redis database
import serial
from pyRRD_Redis import RRD_redis, StepAddFunc
# some const
TAG_NAME = 'arduino_a0'
# init serial port and RRD db
ser = serial.Serial(port='/dev/ttyACM0', baudrate=9600, timeout=1)
rrd = RRD_redis('rrd:' + TAG_NAME, size=2048, step=1.0, add_func=StepAddFunc.avg)
# fill database
while True:
# read A0 on serial
try:
a0 = int(ser.readline())
if not 0 <= a0 <= 1023:
raise ValueError
except ValueError:
a0 = None
# store value
if a0 is not None:
# store with scale to 0/100 %
rrd.add_step(float(a0) * 100 / 1023)
| Python | 0.000046 | |
013ee19808dc86d29cb3aa86b38dc35fe98a5580 | add to and remove from /etc/hosts some agent node info so condor can recognise its workers | conpaas-services/src/conpaas/services/htcondor/manager/node_info.py | conpaas-services/src/conpaas/services/htcondor/manager/node_info.py | """
Copyright (c) 2010-2013, Contrail consortium.
All rights reserved.
Redistribution and use in source and binary forms,
with or without modification, are permitted provided
that the following conditions are met:
1. Redistributions of source code must retain the
above copyright notice, this list of conditions
and the following disclaimer.
2. Redistributions in binary form must reproduce
the above copyright notice, this list of
conditions and the following disclaimer in the
documentation and/or other materials provided
with the distribution.
3. Neither the name of the Contrail consortium nor the
names of its contributors may be used to endorse
or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import os
import re
from functools import wraps
def test_rw_permissions(f):
"""
Checks the read/write permissions of the specified file
"""
@wraps(f)
def rw_check(thefile, *args, **kwargs):
if not os.access(thefile, os.R_OK | os.W_OK):
raise Exception("Cannot read/write file %s " % thefile)
else:
return f(thefile, *args, **kwargs)
return rw_check
@test_rw_permissions
def add_node_info(hostsfile, ip, vmid):
"""
Add the newly created agent-IP and VM-id to the hostsfile
"""
targetfile = open(hostsfile,'a')
targetfile.write("%s worker-%s.htc\n" % (ip, vmid))
targetfile.close()
def remove_node_info(hostsfile, ip):
"""
Remove the agent-IP and VM-id from the hostsfile
"""
contentlines = open(hostsfile).readlines()
targetfile = open(hostsfile, 'w')
for line in contentlines:
if not re.search('^' + ip, line):
targetfile.write(line)
| Python | 0.000004 | |
2f47284b44ceef3c12990a4f9621062040fe6fcb | Add day 4 solution | day4.py | day4.py | #!/usr/bin/env python
from hashlib import md5
tests = ['abcdef', 'pqrstuv']
string = 'iwrupvqb'
for idx in range(10000000):
hash = md5((string + str(idx)).encode('ascii'))
if hash.hexdigest().startswith('000000'):
print(idx)
break
| Python | 0.001388 | |
e5008fdf481a80db3b5583d35e6fd369a28cd7ce | drop session_details for sessions | example/__init__.py | example/__init__.py | from pupa.scrape import Jurisdiction
from .people import PersonScraper
class Example(Jurisdiction):
jurisdiction_id = 'ocd-jurisdiction/country:us/state:ex/place:example'
name = 'Example Legislature'
url = 'http://example.com'
provides = ['people']
parties = [
{'name': 'Independent' },
{'name': 'Green' },
{'name': 'Bull-Moose'}
]
sessions = [
{'name': '2013', '_scraped_name': '2013'}
]
def get_scraper(self, session, scraper_type):
if scraper_type == 'people':
return PersonScraper
def scrape_session_list(self):
return ['2013']
| from pupa.scrape import Jurisdiction
from .people import PersonScraper
class Example(Jurisdiction):
jurisdiction_id = 'ocd-jurisdiction/country:us/state:ex/place:example'
name = 'Example Legislature'
url = 'http://example.com'
provides = ['people']
parties = [
{'name': 'Independent' },
{'name': 'Green' },
{'name': 'Bull-Moose'}
]
session_details = {
'2013': {'_scraped_name': '2013'}
}
def get_scraper(self, session, scraper_type):
if scraper_type == 'people':
return PersonScraper
def scrape_session_list(self):
return ['2013']
| Python | 0 |
863fbee6edc89b68412831677391bc51e41a1e03 | add combine program | final-project/code/combine.py | final-project/code/combine.py | #!/usr/bin/env python
import argparse
import os
import re
import time
import pandas as pd
import numpy as np
COORD_COLUMNS = [
"left_eye_center_x", "left_eye_center_y",
"right_eye_center_x", "right_eye_center_y",
"left_eye_inner_corner_x", "left_eye_inner_corner_y",
"left_eye_outer_corner_x", "left_eye_outer_corner_y",
"right_eye_inner_corner_x", "right_eye_inner_corner_y",
"right_eye_outer_corner_x", "right_eye_outer_corner_y",
"left_eyebrow_inner_end_x", "left_eyebrow_inner_end_y",
"left_eyebrow_outer_end_x", "left_eyebrow_outer_end_y",
"right_eyebrow_inner_end_x", "right_eyebrow_inner_end_y",
"right_eyebrow_outer_end_x", "right_eyebrow_outer_end_y",
"nose_tip_x", "nose_tip_y",
"mouth_left_corner_x", "mouth_left_corner_y",
"mouth_right_corner_x", "mouth_right_corner_y",
"mouth_center_top_lip_x", "mouth_center_top_lip_y",
"mouth_center_bottom_lip_x", "mouth_center_bottom_lip_y"]
def missing_cols_names():
ordered_cols = [re.sub(r'_[xy]$', '', f) for f in COORD_COLUMNS]
selected_cols = ([c for (i, c) in enumerate(ordered_cols) if i
in range(0, len(ordered_cols), 2)])
assert set(selected_cols) == set(ordered_cols)
return ['missing_' + c for c in selected_cols]
def process(in_dir, in_filename, out_filepath):
candidate_sources = (
[d for d in os.listdir(in_dir)
if os.path.isdir(os.path.join(in_dir, d))])
sources = (
[d for d in candidate_sources if
os.path.exists(os.path.join(in_dir, d, in_filename))])
def process_file(source):
y_hat_path = os.path.join(in_dir, source, in_filename)
return pd.read_csv(y_hat_path, engine='c', index_col=0)
start_time = time.time()
print "Reading files"
frames = [process_file(s) for s in sources]
print [df.shape for df in frames]
print " took {:.3f}s".format(time.time() - start_time)
start_time = time.time()
print "Concatenating Dataframes"
result = pd.concat(frames, axis=1)
all_column_names = np.concatenate((COORD_COLUMNS, missing_cols_names()))
result.sort_index(inplace=True)
result = result[all_column_names]
print " took {:.3f}s".format(time.time() - start_time)
start_time = time.time()
print "Writing output to %s" % out_filepath
result.to_csv(out_filepath)
print " took {:.3f}s".format(time.time() - start_time)
def real_main(options):
datasources = {
"valid": {
"pred": "last_layer_val.csv",
"actual": "y_validate.csv"
},
"train": {
"pred": "last_layer_train.csv",
"actual": "y_train.csv"
}
}
for source_name, source_dict in datasources.items():
for type_name, filename in source_dict.items():
out_file = (
"combined_" + "_".join([source_name, type_name]) + '.csv')
process(options.in_dir,
filename, os.path.join(options.in_dir, out_file))
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'-d', '--dir', dest='in_dir', help="Input Directory", required=True)
options = parser.parse_args()
real_main(options)
if __name__ == "__main__":
# missing_cols_names()
main()
| Python | 0 | |
04bc7c9bfe017f981a73a55b51587343725a2159 | edit 2 | Floris/dexter.py | Floris/dexter.py | serie = {
'seasons': [
{
'name': 'S01',
'year': 2006,
'director': "James Manos Jr.",
'episodes': [
{'name': 's01e01', 'title': "Pilot"},
{'name': 's01e01', 'title': "Crocodile"},
{'name': 's01e01', 'title': "Popping Cherry"},
{'name': 's01e01', 'title': "Let's give the boy a hand"}
]
},
{
'name': 'S02',
'year': 2007,
'director': "James Manos Jr.",
'episodes': [
{'name': 's01e01', 'title': ""},
{'name': 's01e01', 'title': ""},
{'name': 's01e01', 'title': ""},
{'name': 's01e01', 'title': ""}
]
},
{
'name': 'S03',
'year': 2008,
'director': "James Manos Jr.",
'episodes': [
{'name': 's01e01', 'title': ""},
{'name': 's01e01', 'title': ""},
{'name': 's01e01', 'title': ""},
{'name': 's01e01', 'title': ""}
]
},
{
'name': 'S04',
'year': 2009,
'director': "James Manos Jr.",
'episodes': [
{'name': 's01e01', 'title': ""},
{'name': 's01e01', 'title': ""},
{'name': 's01e01', 'title': ""},
{'name': 's01e01', 'title': ""}
]
},
{
'name': 'S05',
'year': 2010,
'director': "James Manos Jr.",
'episodes': [
{'name': 's01e01', 'title': ""},
{'name': 's01e01', 'title': ""},
{'name': 's01e01', 'title': ""},
{'name': 's01e01', 'title': ""}
]
},
{
'name': 'S06',
'year': 2011,
'director': "James Manos Jr.",
'episodes': [
{'name': 's01e01', 'title': ""},
{'name': 's01e01', 'title': ""},
{'name': 's01e01', 'title': ""},
{'name': 's01e01', 'title': ""}
]
},
{
'name': 'S07',
'year': 2012,
'director': "James Manos Jr.",
'episodes': [
{'name': 's01e01', 'title': ""},
{'name': 's01e01', 'title': ""},
{'name': 's01e01', 'title': ""},
{'name': 's01e01', 'title': ""}
]
},
{
'name': 'S08',
'year': 2013,
'director': "James Manos Jr.",
'episodes': [
{'name': 's01e01', 'title': ""},
{'name': 's01e01', 'title': ""},
{'name': 's01e01', 'title': ""},
{'name': 's01e01', 'title': ""}
]
}
],
'main character': 'Dexter Morgan',
'title': 'Dexter'
} | Python | 0 | |
cd9c9080a00cc7e05b5ae4574dd39ddfc86fef3b | Create enc.py | enc.py | enc.py | #!/usr/bin/python
"""
Generate encrypted messages wrapped in a self-decrypting python script
usage: python enc.py password > out.py
where password is the encryption password and out.py is the message/script file
to decrypt use: python out.py password
this will print the message to stdout.
"""
import sys, random
def encrypt(key, msg):
encrypted = []
for i, c in enumerate(msg):
key_c = ord(key[i % len(key)])-32
msg_c = ord(c)-32
encrypted.append(chr(((msg_c + key_c) % 95)+32))
return ''.join(encrypted)
def decrypt(key, enc):
msg=[]
for i, c in enumerate(enc):
key_c = ord(key[i % len(key)])-32
enc_c = ord(c)-32
msg.append(chr(((enc_c - key_c) % 95)+32))
return ''.join(msg)
def check(enc):
is_good=True
for i, c in enumerate(enc):
is_good = is_good and (32 <= ord(c) <= 126)
return is_good
def make_randstr(msg_len):
sl = []
r = random.SystemRandom()
for i in range(msg_len):
sl.append(chr(r.randint(32,126)))
return ''.join(sl)
if __name__ == '__main__':
msg = sys.stdin.read().replace("\n","\\n").replace("\t","\\t")
randstr = make_randstr(len(msg))
key = encrypt(sys.argv[1], randstr)
encrypted = encrypt(key, msg)
decrypted = decrypt(key, encrypted)
if not msg == decrypted:
print msg
print decrypted
raise Exception("Encryption Fail")
print """
#!/usr/bin/python
import sys
def encrypt(key, msg):
encrypted = []
for i, c in enumerate(msg):
key_c = ord(key[i % len(key)])-32
msg_c = ord(c)-32
encrypted.append(chr(((msg_c + key_c) % 95)+32))
return ''.join(encrypted)
def decrypt(key, enc):
msg=[]
for i, c in enumerate(enc):
key_c = ord(key[i % len(key)])-32
enc_c = ord(c)-32
msg.append(chr(((enc_c - key_c) % 95)+32))
return ''.join(msg)
if __name__ == '__main__':"""
print "\trandstr = ", repr(randstr)
print "\tenc = ", repr(encrypted)
print "\tkey = encrypt(sys.argv[1], randstr)"
print "\tdecrypted = decrypt(key, enc).replace(\"\\\\n\",\"\\n\").replace(\"\\\\t\",\"\\t\")"
print "\tprint decrypted"
| Python | 0.000001 | |
8adfedd0c30fab796fccac6ec58c09e644a91b2f | Add script to shuffle paired fastq sequences. | shuffle_fastq.py | shuffle_fastq.py | # shuffles the sequences in a fastq file
import os
import random
from Bio import SeqIO
import fileinput
from argparse import ArgumentParser
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--fq1", required="True")
parser.add_argument("--fq2", required="True")
args = parser.parse_args()
with open(args.fq1) as in_handle:
fq1 = [x for x in SeqIO.parse(in_handle, "fastq-sanger")]
with open(args.fq2) as in_handle:
fq2 = [x for x in SeqIO.parse(in_handle, "fastq-sanger")]
order = range(len(fq1))
random.shuffle(order)
fq1_name = os.path.splitext(args.fq1)[0]
fq2_name = os.path.splitext(args.fq2)[0]
with open(fq1_name + ".shuffled.fq", "wa") as fq1_handle, open(fq2_name + ".shuffled.fq", "wa") as fq2_handle:
for i in order:
fq1_handle.write(fq1[i].format("fastq-sanger"))
fq2_handle.write(fq2[i].format("fastq-sanger"))
| Python | 0 | |
f9e11b0e9eb5a69adaa2021499acf329023aca09 | Add Python bindings | ini.py | ini.py | from ctypes import POINTER, Structure, cdll, c_char_p, c_int, c_uint, byref
from sys import argv
def _checkOpen(result, func, arguments):
if result:
return result
else:
raise IOError("Failed to open INI file: '%s'" % arguments[0])
def _checkRead(result, func, arguments):
if result == -1:
raise SyntaxError("Error occured while parsing INI file")
return result
def _init():
class _INI(Structure):
pass
IniPtr = POINTER(_INI)
lib = cdll.LoadLibrary('libini.so.0')
ini_open = lib.ini_open
ini_open.restype = IniPtr
ini_open.archtypes = (c_char_p, )
ini_open.errcheck = _checkOpen
global _ini_open
_ini_open = ini_open
ini_close = lib.ini_close
ini_close.restype = None
ini_close.archtypes = (IniPtr, )
global _ini_close
_ini_close = ini_close
ini_next_section = lib.ini_next_section
ini_next_section.restype = c_int
ini_next_section.archtypes = (IniPtr, c_char_p)
ini_next_section.errcheck = _checkRead
global _ini_next_section
_ini_next_section = ini_next_section
ini_read_pair = lib.ini_read_pair
ini_read_pair.restype = c_int
ini_read_pair.archtypes = (IniPtr, c_char_p, c_char_p)
ini_read_pair.errcheck = _checkRead
global _ini_read_pair
_ini_read_pair = ini_read_pair
_init()
class INI(object):
def __init__(self, path):
self._ini = _ini_open(path)
def __del__(self):
_ini_close(self._ini)
def next_section(self):
s = c_char_p()
res = _ini_next_section(self._ini, byref(s))
if res == 1:
return s.value
def read_pair(self):
key = c_char_p()
val = c_char_p()
res = _ini_read_pair(self._ini, byref(key), byref(val))
if res == 1:
return (key.value, val.value)
return ((),())
def main():
if len(argv) != 2:
print "Usage: ini.py [INI_FILE]..."
return
ini = INI(argv[1])
while True:
name = ini.next_section()
if not name:
print 'End.'
break
print 'In section: ' + name
while True:
key, value = ini.read_pair()
if not key:
print 'End of section.'
break
print 'Reading key: ' + key + ' value: ' + value
if __name__ == '__main__':
main()
| Python | 0.000002 | |
b7f9e5555481ba4e34bcc12beecf540d3204a15f | Fix pep8 issue | raven/contrib/celery/__init__.py | raven/contrib/celery/__init__.py | """
raven.contrib.celery
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
try:
from celery.task import task
except ImportError:
from celery.decorators import task
from celery.signals import task_failure
from raven.base import Client
class CeleryMixin(object):
def send_encoded(self, message):
"Errors through celery"
self.send_raw.delay(message)
@task(routing_key='sentry')
def send_raw(self, message):
return super(CeleryMixin, self).send_encoded(message)
class CeleryClient(CeleryMixin, Client):
pass
def register_signal(client):
@task_failure.connect(weak=False)
def process_failure_signal(sender, task_id, exception, args, kwargs,
traceback, einfo, **kw):
client.captureException(
exc_info=einfo.exc_info,
extra={
'task_id': task_id,
'task': sender,
'args': args,
'kwargs': kwargs,
})
| """
raven.contrib.celery
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
try:
from celery.task import task
except ImportError:
from celery.decorators import task
from celery.signals import task_failure
from raven.base import Client
class CeleryMixin(object):
def send_encoded(self, message):
"Errors through celery"
self.send_raw.delay(message)
@task(routing_key='sentry')
def send_raw(self, message):
return super(CeleryMixin, self).send_encoded(message)
class CeleryClient(CeleryMixin, Client):
pass
def register_signal(client):
@task_failure.connect(weak=False)
def process_failure_signal(sender, task_id, exception, args, kwargs,
traceback, einfo, **kw):
client.captureException(
exc_info=einfo.exc_info,
extra={
'task_id': task_id,
'task': sender,
'args': args,
'kwargs': kwargs,
})
| Python | 0 |
dba14e6dfbaacf79d88f1be0b831488f45fc1bfc | Create coroutine.py | gateway/src/test/coroutine.py | gateway/src/test/coroutine.py | #!/usr/bin/python3.5
import asyncio
import time
now = lambda: time.time()
async def func(x):
print('Waiting for %d s' % x)
await asyncio.sleep(x)
return 'Done after {}s'.format(x)
start = now()
coro1 = func(1)
coro2 = func(2)
coro3 = func(4)
tasks = [
asyncio.ensure_future(coro1),
asyncio.ensure_future(coro2),
asyncio.ensure_future(coro3)
]
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.wait(tasks))
for task in tasks:
print('Task return: ', task.result())
print('Program consumes: %f s' % (now() - start))
| Python | 0.000006 | |
b9d47f54b76345f0c8f7d486282fc416ba540aee | Add specs for ArgumentParser | tests/test_argument_parser.py | tests/test_argument_parser.py | import pytest
from codeclimate_test_reporter.components.argument_parser import ArgumentParser
def test_parse_args_default():
parsed_args = ArgumentParser().parse_args([])
assert(parsed_args.file == "./.coverage")
assert(parsed_args.token is None)
assert(parsed_args.stdout is False)
assert(parsed_args.debug is False)
assert(parsed_args.version is False)
def test_parse_args_with_options():
args = ["--version", "--debug", "--stdout", "--file", "file", "--token", "token"]
parsed_args = ArgumentParser().parse_args(args)
assert(parsed_args.debug)
assert(parsed_args.file == "file")
assert(parsed_args.token == "token")
assert(parsed_args.stdout)
assert(parsed_args.version)
| Python | 0 | |
614579c38bea10798d285ec2608650d36369020a | add test demonstrating duplicate stream handling | tests/test_invalid_streams.py | tests/test_invalid_streams.py | import fixtures
import dnfile
def test_duplicate_stream():
path = fixtures.DATA / "invalid-streams" / "duplicate-stream.exe"
dn = dnfile.dnPE(path)
assert "#US" in dn.net.metadata.streams
assert dn.net.user_strings.get_us(1).value == "BBBBBBBB" | Python | 0 | |
ffb5caf83055e734baf711366b6779ecb24a013c | Add script to generate other adobe themes | addons/adobe/clone.py | addons/adobe/clone.py | #!/usr/bin/env python
from PIL import Image, ImageEnhance
import PIL.ImageOps
import fnmatch
import shutil
import os
def globPath(path, pattern):
result = []
for root, subdirs, files in os.walk(path):
for filename in files:
if fnmatch.fnmatch(filename, pattern):
result.append(os.path.join(root, filename))
return result
def inverse(inpng, outpng):
image = Image.open(inpng)
if image.mode == 'RGBA':
r, g, b, a = image.split()
rgb_image = Image.merge('RGB', (r, g, b))
inverted_image = PIL.ImageOps.invert(rgb_image)
r2, g2, b2 = inverted_image.split()
final_transparent_image = Image.merge('RGBA', (r2, g2, b2, a))
final_transparent_image.save(outpng)
else:
inverted_image = PIL.ImageOps.invert(image)
inverted_image.save(outpng)
def darken(inpng, outpng, darkness):
im1 = Image.open(inpng)
im2 = im1.point(lambda p: p * darkness)
im2.save(outpng)
def bright(inpng, outpng, brightness):
peak = Image.open(inpng)
enhancer = ImageEnhance.Brightness(peak)
bright = enhancer.enhance(brightness)
bright.save(outpng)
def makeClone(name, brightness):
outdir = os.path.join("..", name)
if not os.path.isdir(outdir):
os.makedirs(outdir)
for p in globPath('.', "**"):
outfile = os.path.join(outdir, p)
curdir = os.path.dirname(outfile)
if not os.path.isdir(curdir):
os.makedirs(curdir)
if p.endswith(".png"):
bright(p, outfile, brightness)
elif p.endswith(".tres"):
content = open(p).read()
content = content.replace("res://addons/adobe/", "res://addons/{}/".format(name))
of = open(outfile, 'w')
of.write(content)
of.close()
else:
shutil.copy(p, outfile)
makeClone("adobe_dark", 0.65)
makeClone("adobe_light", 1.35)
| Python | 0 | |
c5ecaef62d788b69446181c6ba495cb273bf98ef | Add rolling mean scatter plot example | altair/examples/scatter_with_rolling_mean.py | altair/examples/scatter_with_rolling_mean.py | """
Scatter Plot with Rolling Mean
------------------------------
A scatter plot with a rolling mean overlay. In this example a 30 day window
is used to calculate the mean of the maximum temperature around each date.
"""
# category: scatter plots
import altair as alt
from vega_datasets import data
source = data.seattle_weather()
line = alt.Chart(source).mark_line(
color='red',
size=3
).transform_window(
rolling_mean='mean(temp_max)',
frame=[-15, 15]
).encode(
x='date:T',
y='rolling_mean:Q'
)
points = alt.Chart(source).mark_point().encode(
x='date:T',
y=alt.Y('temp_max:Q',
axis=alt.Axis(title='Max Temp'))
)
points + line
| Python | 0.000002 | |
5ec793ffb8c260a02ab7da655b5f56ff3c3f5da7 | add find_anagrams.py | algo/find_anagrams.py | algo/find_anagrams.py | words = "oolf folo oolf lfoo fool oofl fool loof oofl folo abr bra bar rab rba abr arb bar abr abr"
words = [word.strip() for word in words.split(" ")]
anagrams = {}
for word in words:
sorted_word = ''.join(sorted(word))
anagrams[sorted_word] = anagrams.get(sorted_word, []) + [word]
print anagrams
| Python | 0.000321 | |
f830c778fd06e1548da0b87aafa778834005c64e | Add fls simprocedures | angr/procedures/win32/fiber_local_storage.py | angr/procedures/win32/fiber_local_storage.py | import angr
KEY = 'win32_fls'
def mutate_dict(state):
d = dict(state.globals.get(KEY, {}))
state.globals[KEY] = d
return d
def has_index(state, idx):
if KEY not in state.globals:
return False
return idx in state.globals[KEY]
class FlsAlloc(angr.SimProcedure):
def run(self, callback):
if not self.state.solver.is_true(callback == 0):
raise angr.errors.SimValueError("Can't handle callback function in FlsAlloc")
d = mutate_dict(self.state)
new_key = len(d) + 1
d[new_key] = self.state.se.BVV(0, self.state.arch.bits)
return new_key
class FlsFree(angr.SimProcedure):
def run(self, index):
set_val = self.inline_call(FlsSetValue, (index, self.state.se.BVV(0, self.state.arch.bits)))
return set_val.ret_expr
class FlsSetValue(angr.SimProcedure):
def run(self, index, value):
conc_indexs = self.state.se.any_n_int(index, 2)
if len(conc_indexs) != 1:
raise angr.errors.SimValueError("Can't handle symbolic index in FlsSetValue")
conc_index = conc_indexs[0]
if not has_index(self.state, conc_index):
return 0
mutate_dict(self.state)[conc_index] = value
return 1
class FlsGetValue(angr.SimProcedure):
def run(self, index):
conc_indexs = self.state.se.any_n_int(index, 2)
if len(conc_indexs) != 1:
raise angr.errors.SimValueError("Can't handle symbolic index in FlsGetValue")
conc_index = conc_indexs[0]
if not has_index(self.state, conc_index):
return 0
return self.globals[KEY][conc_index]
| Python | 0 | |
1bda23c9e6fee7815617a8ad7f64c80a32e223c5 | Add script for jira story point report. | scripts/jira.py | scripts/jira.py | #!/usr/bin/python
import sys
import os
import requests
import urllib
g_user = None
g_pass = None
g_sprint = None
def usage():
print("")
print("usage: " + g_script_name + " --user username --pass password --sprint sprintname")
print("")
sys.exit(1)
def unknown_arg(s):
print("")
print("ERROR: Unknown argument: " + s)
print("")
usage()
def parse_args(argv):
global g_user
global g_pass
global g_sprint
i = 1
while (i < len(argv)):
s = argv[i]
if (s == "--user"):
i += 1
if (i > len(argv)):
usage()
g_user = argv[i]
elif (s == "--pass"):
i += 1
if (i > len(argv)):
usage()
g_pass = argv[i]
elif (s == "--sprint"):
i += 1
if (i > len(argv)):
usage()
g_sprint = argv[i]
elif (s == "-h" or s == "--h" or s == "-help" or s == "--help"):
usage()
else:
unknown_arg(s)
i += 1
if (g_user is None):
usage()
if (g_pass is None):
usage()
if (g_sprint is None):
usage()
def main(argv):
"""
Main program.
@return: none
"""
global g_script_name
g_script_name = os.path.basename(argv[0])
parse_args(argv)
url = 'https://0xdata.atlassian.net/rest/api/2/search?jql=sprint="' + urllib.quote(g_sprint) + '"&maxResults=1000'
r = requests.get(url, auth=(g_user, g_pass))
if (r.status_code != 200):
print("ERROR: status code is " + str(r.status_code))
sys.exit(1)
j = r.json()
issues = j[u'issues']
story_points_map = {}
for issue in issues:
name = issue[u'fields'][u'assignee'][u'name']
story_points = issue[u'fields'][u'customfield_10004']
if story_points is None:
story_points = 0
else:
story_points = float(story_points)
if name in story_points_map:
n = story_points_map[name]
story_points_map[name] = n + story_points
else:
story_points_map[name] = story_points
for key in sorted(story_points_map.keys()):
value = story_points_map[key]
print("{}: {}").format(key, value)
if __name__ == "__main__":
main(sys.argv)
| Python | 0 | |
a24095964e32da33ea946b3c28bdc829a505585d | Add lidar example | lidar.py | lidar.py | """ Copyright 2021 CyberTech Labs Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. """
import math
w = 240
h = 280
scale = 0.5
waitTimer = 500
moveControl = 0
tickPerSecond = 1000 // waitTimer
while not brick.keys().wasPressed(KeysEnum.Up):
moveControl = (moveControl + 1) % (10 * tickPerSecond)
power = 100
if math.sin(moveControl / tickPerSecond) < 0:
power = -100
brick.motor('M3').setPower(power)
brick.motor('M4').setPower(power)
pic = [0x008800] * (h * w)
for j in range(w // 2, w):
pic[h // 2 * w + j] = 0x888888
data = brick.lidar().read()
for i in range(360):
distance = data[i]
if distance == 0:
continue
theta = i * math.pi / 180
x = distance * math.cos(theta)
y = distance * math.sin(theta)
x_px = min(w - 1, max(0, math.floor(x * scale + w / 2)))
y_px = min(h - 1, max(0, math.floor(y * scale + h / 2)))
pic[y_px * w + x_px] = 0
brick.display().show(pic, w, h, 'rgb32')
script.wait(waitTimer)
brick.stop()
| Python | 0.000001 | |
ccf1fb5d5ef1e2b12bc49afd260b1d2d0a166a43 | Prepare v2.20.7.dev | flexget/_version.py | flexget/_version.py | """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '2.20.7.dev'
| """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '2.20.6'
| Python | 0.000003 |
950e6b975323293ed8b73a5ffe8448072e0dac27 | Fix downloader | support/download.py | support/download.py | # A file downloader.
import contextlib, os, tempfile, timer, urllib2, urlparse
class Downloader:
def __init__(self, dir=None):
self.dir = dir
# Downloads a file and removes it when exiting a block.
# Usage:
# d = Downloader()
# with d.download(url) as f:
# use_file(f)
def download(self, url, cookie=None):
suffix = os.path.splitext(urlparse.urlsplit(url)[2])[1]
fd, filename = tempfile.mkstemp(suffix=suffix, dir=self.dir)
os.close(fd)
with timer.print_time('Downloading', url, 'to', filename):
opener = urllib2.build_opener()
if cookie:
opener.addheaders.append(('Cookie', cookie))
num_tries = 2
for i in range(num_tries):
try:
f = opener.open(url)
except urllib2.URLError, e:
print('Failed to open url', url)
continue
length = f.headers.get('content-length')
if not length:
print('Failed to get content-length')
continue
length = int(length)
with open(filename, 'wb') as out:
count = 0
while count < length:
data = f.read(1024 * 1024)
count += len(data)
out.write(data)
@contextlib.contextmanager
def remove(filename):
try:
yield filename
finally:
os.remove(filename)
return remove(filename)
| # A file downloader.
import contextlib, os, tempfile, timer, urllib2, urlparse
class Downloader:
def __init__(self, dir=None):
self.dir = dir
# Downloads a file and removes it when exiting a block.
# Usage:
# d = Downloader()
# with d.download(url) as f:
# use_file(f)
def download(self, url, cookie=None):
suffix = os.path.splitext(urlparse.urlsplit(url)[2])[1]
fd, filename = tempfile.mkstemp(suffix=suffix, dir=self.dir)
os.close(fd)
with timer.print_time('Downloading', url, 'to', filename):
opener = urllib2.build_opener()
if cookie:
opener.addheaders.append(('Cookie', cookie))
num_tries = 2
for i in range(num_tries):
try:
f = opener.open(url)
except urllib2.URLError, e:
print('Failed to open url', url)
continue
length = f.headers.get('content-length')
if not length:
print('Failed to get content-length')
continue
length = int(length)
with open(filename, 'wb') as out:
count = 0
while count < length:
data = f.read(1024 * 1024)
count += len(data)
out.write(data)
@contextlib.contextmanager
def remove(filename):
try:
yield filename
finally:
pass #os.remove(filename)
return remove(filename)
| Python | 0.000001 |
7c84bfb5a37705cc824489b0c1c5aba415ccff6b | Split out of SWDCommon.py | DebugPort.py | DebugPort.py | class DebugPort:
ID_CODES = (
0x1BA01477, # EFM32
0x2BA01477, # STM32
0x0BB11477, # NUC1xx
)
def __init__ (self, swd):
self.swd = swd
# read the IDCODE
# Hugo: according to ARM DDI 0316D we should have 0x2B.. not 0x1B.., but
# 0x1B.. is what upstream used, so leave it in here...
idcode = self.idcode()
if idcode not in DebugPort.ID_CODES:
print "warning: unexpected idcode: ", idcode
# power shit up
self.swd.writeSWD(False, 1, 0x54000000)
if (self.status() >> 24) != 0xF4:
print "error powering up system"
sys.exit(1)
# get the SELECT register to a known state
self.select(0,0)
self.curAP = 0
self.curBank = 0
def idcode (self):
return self.swd.readSWD(False, 0)
def abort (self, orunerr, wdataerr, stickyerr, stickycmp, dap):
value = 0x00000000
value = value | (0x10 if orunerr else 0x00)
value = value | (0x08 if wdataerr else 0x00)
value = value | (0x04 if stickyerr else 0x00)
value = value | (0x02 if stickycmp else 0x00)
value = value | (0x01 if dap else 0x00)
self.swd.writeSWD(False, 0, value)
def status (self):
return self.swd.readSWD(False, 1)
def control (self, trnCount = 0, trnMode = 0, maskLane = 0, orunDetect = 0):
value = 0x54000000
value = value | ((trnCount & 0xFFF) << 12)
value = value | ((maskLane & 0x00F) << 8)
value = value | ((trnMode & 0x003) << 2)
value = value | (0x1 if orunDetect else 0x0)
self.swd.writeSWD(False, 1, value)
def select (self, apsel, apbank):
value = 0x00000000
value = value | ((apsel & 0xFF) << 24)
value = value | ((apbank & 0x0F) << 4)
self.swd.writeSWD(False, 2, value)
def readRB (self):
return self.swd.readSWD(False, 3)
def readAP (self, apsel, address):
adrBank = (address >> 4) & 0xF
adrReg = (address >> 2) & 0x3
if apsel != self.curAP or adrBank != self.curBank:
self.select(apsel, adrBank)
self.curAP = apsel
self.curBank = adrBank
return self.swd.readSWD(True, adrReg)
def writeAP (self, apsel, address, data, ignore = False):
adrBank = (address >> 4) & 0xF
adrReg = (address >> 2) & 0x3
if apsel != self.curAP or adrBank != self.curBank:
self.select(apsel, adrBank)
self.curAP = apsel
self.curBank = adrBank
self.swd.writeSWD(True, adrReg, data, ignore)
| Python | 0.000002 | |
aaa3e5296f1e22bb5960c553f5e5b42f64d216db | Create HMM.py | HMM.py | HMM.py | #By Mohit Minhas
import math
import numpy
#from sklearn.hmm import MultinomialHMM
#from hmmn import *
from hmmpy import *
from sklearn.cluster import k_means
#from scipy.cluster.vq import kmeans2
def get_xyz_data(path,name):
xfl=path+'\\'+name+'_x.csv'
xx = numpy.genfromtxt(xfl, delimiter=',')
yfl=path+'\\'+name+'_y.csv'
xy = numpy.genfromtxt(yfl, delimiter=',')
zfl=path+'\\'+name+'_z.csv'
xz = numpy.genfromtxt(zfl, delimiter=',')
x=[]
x.append(xx)
x.append(xy)
x.append(xz)
x=numpy.array(x)
return x
"""
def emprob(M,N):
a=1/float(N)
E=numpy.zeros((M,N))
for i in xrange(M):
for j in xrange(N):
E[i][j]=a
return E
"""
def prior_transition_matrix(K,LR):
P = numpy.multiply(1/float(LR),numpy.identity(K+1))
w=1/float(LR)
for i in xrange(1,K-(LR-1)+1):
for j in xrange(1,LR-1+1):
P[i][i+j]=w
for i in xrange(K-(LR-2),K+1):
for j in xrange(1,K-i+1+1):
P[i][i+(j-1)] = 1/float(K-i+1)
P=P[1:,1:]
return P
def get_point_centroids(indata,K,D):
mean = numpy.zeros((indata.shape[1],D))
for n in xrange(0,(indata.shape[1])):
for i in xrange(0,(indata.shape[2])):
for j in xrange(0,D):
mean[n][j] = mean[n][j] + indata[j][n][i]
mean[n] = mean[n]/(indata.shape[2])
(centroids,x,y)=k_means(mean,K) #random order. change n_jobs to speed up
return centroids
def get_point_clusters(data,centroids,D):
XClustered = [[] for x in xrange(data.shape[2])]
K = centroids.shape[0]
for n in xrange(0,(data.shape[1])):
for i in xrange(0,(data.shape[2])):
temp = numpy.zeros((K,1))
for j in xrange(0,K):
#if (D==3)
temp[j] = math.sqrt(math.pow((centroids[j][0] - data[0][n][i]),2)+math.pow((centroids[j][1] - data[1][n][i]),2)+math.pow((centroids[j][2] - data[2][n][i]),2));
I = numpy.argmin(temp)
XClustered[i].append(I)
XClustered=numpy.array(XClustered)
return XClustered
def pr_hmm(o,a,b,pi):
n=len(a[0])
T=len(o)
for i in xrange(1,n+1):
m[1][i]=b[i][o[1]]*pi[i];
for t in xrange(1,(T-1)+1):
for j in xrange(1,n+1):
z=0
for i in xrange(1,n+1):
z=z+a[i][j]*m[t][i]
m[t+1][j]=z*b[j][o[t+1]]
p=0
for i in xrange(1,n+1):
p=p+m[T][i]
p=math.log(p)
return p
D=3
M=12
N=8
LR=2
train_gesture='x'
test_gesture='x'
gestureRecThreshold = 0
training = get_xyz_data('data/train',train_gesture)
testing = get_xyz_data('data/test',test_gesture)
centroids = get_point_centroids(training,N,D)
ATrainBinned = get_point_clusters(training,centroids,D)
ATestBinned = get_point_clusters(testing,centroids,D)
pP = prior_transition_matrix(M,LR)
#W=emprob(M,N)
#print ATrainBinned
#model=MultinomialHMM(n_components=M,startprob_prior=pP,n_iter=50)
#model.n_symbols=N
#print model.n_symbols
#model.fit(ATrainBinned)
#model=MultinomialHMM(ATrainBinned,pP,[1:N]',M,cyc,.00001) #ENTER
#logprob=model.score(ATestBinned)
#print logprob
hmm=HMM(n_states=M,V=[0,1,2,3,4,5,6,7],A=pP)
print 'TRAINING'
print
baum_welch(hmm,ATrainBinned,graph=False,verbose=True)
print
print 'TESTING'
print
b=forward(hmm,ATestBinned[0])
print b
#model=DiscreteHmm(numstates=M,numclasses=N)
#model.learn(ATrainBinned,numsteps=ATrainBinned.shape[0])
"""
sumLik = 0.0
minLik = float('inf')
for j in xrange(0,len(ATrainBinned)):
lik = pr_hmm(ATrainBinned[j],P,E.T,Pi)
if (lik < minLik):
minLik = lik
sumLik = sumLik + lik
gestureRecThreshold = 2.0*sumLik/len(ATrainBinned)
print'\n\n********************************************************************\n'
print'Testing {0) sequences for a log likelihood greater than {1)\n'.format(len(ATestBinned),gestureRecThreshold)
print'********************************************************************\n\n'
recs = 0
tLL = numpy.zeros((len(ATestBinned),1))
for j in xrange(1,len(ATestBinned)):
tLL[j][1] = pr_hmm(ATestBinned[j],P,E.T,Pi)
if (tLL[j][1] > gestureRecThreshold):
recs = recs + 1
print 'Log likelihood: {0) > {1) (threshold) -- FOUND {2) GESTURE!\n'.format(tLL[j][1],gestureRecThreshold,test_gesture)
else:
print 'Log likelihood: {0} < {1} (threshold) -- NO {2} GESTURE.\n'.format(tLL[j][1],gestureRecThreshold,test_gesture)
print'Recognition success rate: {0) percent\n'.format(100*recs/len(ATestBinned))
"""
| Python | 0.000001 | |
bdd2fcfdc9444cdf2d74ac9397bd01bfe34f102a | Create a test for a quadtree-based gravity forward and inverse model. This ensures equivalent source modelling is accessible. | tests/pf/test_grav_inversion_linear_quadtree.py | tests/pf/test_grav_inversion_linear_quadtree.py | from __future__ import print_function
import unittest
import numpy as np
from SimPEG import (
utils,
maps,
regularization,
data_misfit,
optimization,
inverse_problem,
directives,
inversion,
)
from discretize.utils import mkvc, mesh_builder_xyz, refine_tree_xyz
from SimPEG.potential_fields import gravity
import shutil
np.random.seed(43)
class GravInvLinProblemTest(unittest.TestCase):
def setUp(self):
def simulate_topo(x, y, amplitude=50, scale_factor=100):
# Create synthetic Gaussian topography from a function
return amplitude * np.exp(
-0.5 * ((x / scale_factor) ** 2.0 + (y / scale_factor) ** 2.0)
)
# Create grid of points for topography
[xx, yy] = np.meshgrid(
np.linspace(-200.0, 200.0, 50), np.linspace(-200.0, 200.0, 50)
)
zz = simulate_topo(xx, yy)
topo_xyz = np.c_[mkvc(xx), mkvc(yy), mkvc(zz)]
# Create and array of observation points
altitude = 5
xr = np.linspace(-100.0, 100.0, 20)
yr = np.linspace(-100.0, 100.0, 20)
X, Y = np.meshgrid(xr, yr)
Z = simulate_topo(X, Y) + altitude
# Create a gravity survey
xyzLoc = np.c_[mkvc(X.T), mkvc(Y.T), mkvc(Z.T)]
rxLoc = gravity.Point(xyzLoc)
srcField = gravity.SourceField([rxLoc])
survey = gravity.Survey(srcField)
# Create a quadtree mesh
h = [5, 5]
padDist = np.ones((2, 2)) * 100
nCpad = [2, 4]
mesh = mesh_builder_xyz(
topo_xyz[:, :2], h, padding_distance=padDist, mesh_type="TREE",
)
mesh = refine_tree_xyz(
mesh,
xyzLoc[:, :2],
method="radial",
octree_levels=nCpad,
octree_levels_padding=nCpad,
finalize=True,
)
# elevations are Nx2 array of [bottom-southwest, top-northeast] corners
# Set tne to topo height at cell centers
z_tne = simulate_topo(mesh.cell_centers[:, 0], mesh.cell_centers[:, 1])
# Set bsw to 50 m below the lowest z_tne
z_bsw = np.full_like(z_tne, fill_value=z_tne.min() - 50.0)
mesh_elevations = np.c_[z_bsw, z_tne]
# Create a density model and generate data,
# with a block in a half space
self.model = utils.model_builder.addBlock(
mesh.gridCC, np.zeros(mesh.nC), np.r_[-20, -20], np.r_[20, 20], 0.3,
)
# Create reduced identity map. All cells are active in an quadtree
idenMap = maps.IdentityMap(nP=mesh.nC)
# Create the forward model operator
self.sim = gravity.Simulation3DIntegral(
mesh, survey=survey, rhoMap=idenMap, store_sensitivities="ram",
)
# Define the mesh cell heights independent from mesh
self.sim.Zn = mesh_elevations
data = self.sim.make_synthetic_data(
self.model, relative_error=0.0, noise_floor=0.01, add_noise=True
)
# Create a regularization
reg = regularization.Sparse(mesh, mapping=idenMap)
reg.norms = np.c_[0, 0, 0, 0]
reg.mref = np.zeros(mesh.nC)
# Data misfit function
dmis = data_misfit.L2DataMisfit(simulation=self.sim, data=data)
# Add directives to the inversion
opt = optimization.ProjectedGNCG(
maxIter=15, lower=-1.0, upper=1.0, maxIterLS=5, maxIterCG=5, tolCG=1e-4,
)
invProb = inverse_problem.BaseInvProblem(dmis, reg, opt, beta=1e3)
# Build directives
IRLS = directives.Update_IRLS(
f_min_change=1e-3, max_irls_iterations=20, beta_tol=1e-1, beta_search=False
)
sensitivity_weights = directives.UpdateSensitivityWeights()
update_Jacobi = directives.UpdatePreconditioner()
self.inv = inversion.BaseInversion(
invProb, directiveList=[IRLS, sensitivity_weights, update_Jacobi]
)
def test_grav_inverse(self):
# Run the inversion
mrec = self.inv.run(self.model)
residual = np.linalg.norm(mrec - self.model) / np.linalg.norm(self.model)
print(residual)
self.assertLess(residual, 0.7)
def tearDown(self):
# Clean up the working directory
if self.sim.store_sensitivities == "disk":
shutil.rmtree(self.sim.sensitivity_path)
if __name__ == "__main__":
unittest.main()
| Python | 0 | |
570aaad3da93f9252efb787a58bbe5151eff93d4 | Create run_ToolKit.py | 0.0.5/run_ToolKit.py | 0.0.5/run_ToolKit.py | # run_ToolKit.py
from modulos import main
if __name__ == "__main__":
main.main()
| Python | 0.000002 | |
857ccf7f6cfed4e8663d635c119f8683c9ee09e0 | Add random choice plugin (with_random_choice) | lib/ansible/runner/lookup_plugins/random_choice.py | lib/ansible/runner/lookup_plugins/random_choice.py | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import subprocess
from ansible import utils, errors
import random
# useful for introducing chaos ... or just somewhat reasonably fair selection
# amongst available mirrors
#
# tasks:
# - debug: msg=$item
# with_random_choice:
# - one
# - two
# - three
class LookupModule(object):
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
def run(self, terms, **kwargs):
if isinstance(terms, basestring):
terms = [ terms ]
return [ random.choice(terms) ]
| Python | 0 | |
b3f91806b525ddef50d541f937bed539f9bae20a | Use cache backend for sessions in deployed settings. | mezzanine/project_template/deploy/live_settings.py | mezzanine/project_template/deploy/live_settings.py |
DATABASES = {
"default": {
# Ends with "postgresql_psycopg2", "mysql", "sqlite3" or "oracle".
"ENGINE": "django.db.backends.postgresql_psycopg2",
# DB name or path to database file if using sqlite3.
"NAME": "%(proj_name)s",
# Not used with sqlite3.
"USER": "%(proj_name)s",
# Not used with sqlite3.
"PASSWORD": "%(db_pass)s",
# Set to empty string for localhost. Not used with sqlite3.
"HOST": "127.0.0.1",
# Set to empty string for default. Not used with sqlite3.
"PORT": "",
}
}
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTOCOL", "https")
CACHE_MIDDLEWARE_SECONDS = 60
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.memcached.MemcachedCache",
"LOCATION": "127.0.0.1:11211",
}
}
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
|
DATABASES = {
"default": {
# Ends with "postgresql_psycopg2", "mysql", "sqlite3" or "oracle".
"ENGINE": "django.db.backends.postgresql_psycopg2",
# DB name or path to database file if using sqlite3.
"NAME": "%(proj_name)s",
# Not used with sqlite3.
"USER": "%(proj_name)s",
# Not used with sqlite3.
"PASSWORD": "%(db_pass)s",
# Set to empty string for localhost. Not used with sqlite3.
"HOST": "127.0.0.1",
# Set to empty string for default. Not used with sqlite3.
"PORT": "",
}
}
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTOCOL", "https")
CACHE_MIDDLEWARE_SECONDS = 60
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.memcached.MemcachedCache",
"LOCATION": "127.0.0.1:11211",
}
}
| Python | 0 |
62545500553443863d61d9e5ecc80307c745a227 | Add migration to remove non-{entity,classifier} dimensions from the database, and to recompute cubes if necessary | migrate/20110917T143029-remove-value-dimensions.py | migrate/20110917T143029-remove-value-dimensions.py | import logging
from openspending.lib import cubes
from openspending import migration, model, mongo
log = logging.getLogger(__name__)
def up():
group_args = ({'dataset':1}, {}, {'num': 0},
'function (x, acc) { acc.num += 1 }')
before = mongo.db.dimension.group(*group_args)
dims = model.dimension.find({'type': {'$nin': ['entity', 'classifier']}})
for d in dims:
log.info("Removing dimension: %s", d)
model.dimension.remove({'_id': d['_id']})
after = mongo.db.dimension.group(*group_args)
for bf, af in zip(before, after):
if int(bf['num']) != int(af['num']):
log.warn("Number of dimensions for dimension '%s' "
"changed. Recomputing cubes.", bf['dataset'])
ds = model.dataset.find_one({'name': bf['dataset']})
cubes.Cube.update_all_cubes(ds)
def down():
raise migration.IrreversibleMigrationError("Can't add back dimension "
"fields that we dropped!") | Python | 0.000001 | |
87cbdd44ee17ecc5951b6f062a160c9fad465053 | add BaiduMap | BaiduMap/__init__.py | BaiduMap/__init__.py | import png, numpy
import matplotlib.pyplot as plt
import json, urllib.request, collections.abc, os, sys
from urllib.parse import quote_plus
from collections import OrderedDict
AK = None
SERVER_URL = None
__location__ = os.path.join(os.getcwd(), os.path.dirname(os.path.realpath(__file__)))
with open(os.path.join(__location__, 'config.json'), 'r') as config:
x = json.load(config)
AK = x['ak']
SERVER_URL = x['server']
BASE_URL = "%s?ak=%s" % (SERVER_URL, AK)
class URLBuilder:
def __init__(self, base_url):
if '?' not in base_url:
base_url += '?'
self.__base_url = base_url
self.__url = base_url
self.__attr = OrderedDict()
def __addParam(self, name, value):
if not self.__url.endswith('&'):
self.__url += '&'
self.__url += "%s=%s" % (name, value)
def __resetURL(self):
self.__url = self.__base_url
def addParam(self, name, value):
self.__attr[str(name)] = str(value)
def removeParam(self, name):
try:
del self.__attr[str(name)]
except KeyError:
pass
def generateURL(self):
self.__resetURL()
for item in self.__attr.items():
self.__addParam(item[0], item[1])
return self.__url
def buildURL(width=None, height=None, certer=[], zoom=None, copyright=1, scale=2, bbox=[], markers=[], markerStyles=[], labels=[], labelStyles=[], paths=[], pathStyles=[]):
url = URLBuilder(BASE_URL)
if width:
url.addParam('width', quote_plus(str(width)))
if height:
url.addParam('height', quote_plus(str(height)))
if certer:
if isinstance(center, str):
url.addParam('center', quote_plus(center))
elif isinstance(center, collections.abc.Sequence):
url.addParam('center', quote_plus('%f,%f' % center))
if zoom:
url.addParam('zoom', quote_plus(str(zoom)))
if copyright:
url.addParam('copyright', quote_plus(str(copyright)))
if scale:
url.addParam('scale', quote_plus(str(scale)))
if bbox:
url.addParam('bbox', quote_plus('%f,%f,%f,%f' % bbox))
if markers:
pass
# not implemented
if markerStyles:
pass
# not implemented
if labels:
pass
# not implemented
if labelStyles:
pass
# not implemented
if paths:
pass
# not implemented
if pathStyles:
pass
# not implemented
return url.generateURL()
def fetchImage(url):
r = png.Reader(file=urllib.request.urlopen(url))
data = r.asFloat()
column_count = data[0]
row_count = data[1]
pngdata = data[2]
plane_count = data[3]['planes']
image_2d = numpy.vstack(map(numpy.float_, pngdata))
image_3d = numpy.reshape(image_2d, (row_count, column_count, plane_count))
return image_3d
def plotMap(image):
# TODO: ้ๆใๅฏน้ฝ
plt.imshow(image, alpha=0.5)
plt.show() | Python | 0.000001 | |
c599b5d470cf80b964af1b261a11540516e120df | Add Dehnen smoothing as a wrapper | galpy/potential_src/DehnenSmoothWrapperPotential.py | galpy/potential_src/DehnenSmoothWrapperPotential.py | ###############################################################################
# DehnenSmoothWrapperPotential.py: Wrapper to smoothly grow a potential
###############################################################################
from galpy.potential_src.WrapperPotential import SimpleWrapperPotential
class DehnenSmoothWrapperPotential(SimpleWrapperPotential):
def __init__(self,amp=1.,pot=None,tform=-4.,tsteady=None,ro=None,vo=None):
"""
NAME:
__init__
PURPOSE:
initialize a DehnenSmoothWrapper Potential
INPUT:
amp - amplitude to be applied to the potential (default: 1.)
pot - Potential instance or list thereof; the amplitude of this will be grown by this wrapper
tform - start of growth
tsteady - time from tform at which the potential is fully grown (default: -tform/2, st the perturbation is fully grown at tform/2)
OUTPUT:
(none)
HISTORY:
2017-06-26 - Started - Bovy (UofT)
"""
SimpleWrapperPotential.__init__(self,amp=amp,pot=pot,ro=ro,vo=vo)
self._tform= tform
if tsteady is None:
self._tsteady= self._tform/2.
else:
self._tsteady= self._tform+tsteady
self.hasC= False
self.hasC_dxdv= False
def _smooth(self,t):
#Calculate relevant time
if t < self._tform:
smooth= 0.
elif t < self._tsteady:
deltat= t-self._tform
xi= 2.*deltat/(self._tsteady-self._tform)-1.
smooth= (3./16.*xi**5.-5./8*xi**3.+15./16.*xi+.5)
else: #bar is fully on
smooth= 1.
return smooth
def _wrap(self,attribute,R,Z,phi=0.,t=0.):
return self._smooth(t)\
*self._wrap_pot_func(attribute)(self._pot,R,Z,phi=phi,t=t)
| Python | 0 | |
ddc61e8158fb1dfb33b30a19f7e9cd3be8eaf3a2 | add app.py | app.py | app.py | from flask import Flask
app = Flask(__name__)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000, debug=True)
| Python | 0.000003 | |
cab4b903b986a7f8bfe4955bf80190bb7f33b012 | Create bot.py | bot.py | bot.py | # -*- coding: utf-8 -*-
import twitter_key
import tweepy
import markovtweet
def auth():
auth = tweepy.OAuthHandler(twitter_key.CONSUMER_KEY, twitter_key.CONSUMER_SECRET)
auth.set_access_token(twitter_key.ACCESS_TOKEN, twitter_key.ACCESS_SECRET)
return tweepy.API(auth)
if __name__ == "__main__":
api = auth()
markovtweet.markovtweet(api)
| Python | 0.000001 | |
f1b11d2b111ef0b70f0babe6e025056ff1a68acc | Create InMoov.LeapMotionHandTracking.py | home/Alessandruino/InMoov.LeapMotionHandTracking.py | home/Alessandruino/InMoov.LeapMotionHandTracking.py | i01 = Runtime.createAndStart("i01","InMoov")
#Set here the port of your InMoov Left Hand Arduino , in this case COM5
leftHand = i01.startLeftHand("COM5")
#==============================
#Set the min/max values for fingers
i01.leftHand.thumb.setMinMax( 0, 61)
i01.leftHand.index.map(0 , 89)
i01.leftHand.majeure.map(0 , 89)
i01.leftHand.ringFinger.map(0 , 104)
i01.leftHand.pinky.map(0 , 91)
#===============================
#Start the Leap Tracking
i01.leftHand.starLeapTracking()
#stop leap tracking
#i01.leftHand.stopLeapTracking()
| Python | 0 | |
ddf940dc932c04ebd287085ec7d035a93ac5598f | add findmyiphone flask api | ios.py | ios.py | from pyicloud import PyiCloudService
from flask import Flask, jsonify, request, abort
api = PyiCloudService('nikisweeting@gmail.com')
app = Flask(__name__)
@app.route('/devices', methods=['GET'])
def device_list():
devices = []
for id, device in api.devices.items():
location_info = device.location()
device_json = {
'id': id,
'name': device.data['name'],
'model': device.data['deviceDisplayName'],
'is_desktop': device.data['isMac'],
'location': {
'lat': location_info['latitude'],
'lng': location_info['longitude'],
'source': location_info['positionType'],
'accuracy': location_info['horizontalAccuracy'],
'is_old': location_info['isOld'],
'is_accurate': not location_info['isInaccurate'],
'timestamp': location_info['timeStamp'],
} if location_info else None,
}
devices.append(device_json)
return jsonify({'devices': devices})
@app.route('/alert', methods=['POST'])
def alert():
device_id = request.form['id']
subject = request.form.get('subject', '').strip()
message = request.form.get('message', '').strip()
sounds = request.form.get('sounds')
device = api.devices.get(device_id)
if not device:
abort(404)
if not message:
device.play_sound(subject=subject)
else:
device.display_message(subject=subject, message=message, sounds=bool(sounds))
return jsonify({'success': True, 'errors': []})
if __name__ == '__main__':
app.run()
| Python | 0.000001 | |
81791b79fca6b23436518cf94b79175bd6ec06e7 | Create lcd.py | lcd.py | lcd.py | #!/usr/bin/python
#--------------------------------------
# ___ ___ _ ____
# / _ \/ _ \(_) __/__ __ __
# / , _/ ___/ /\ \/ _ \/ // /
# /_/|_/_/ /_/___/ .__/\_, /
# /_/ /___/
#
# lcd_i2c.py
# LCD test script using I2C backpack.
# Supports 16x2 and 20x4 screens.
#
# Author : Matt Hawkins
# Date : 20/09/2015
#
# http://www.raspberrypi-spy.co.uk/
#
# Copyright 2015 Matt Hawkins
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#--------------------------------------
import smbus
import time
import memcache
mc = memcache.Client(['127.0.0.1:11211'], debug=0)
# Define some device parameters
I2C_ADDR = 0x27 # I2C device address
LCD_WIDTH = 16 # Maximum characters per line
# Define some device constants
LCD_CHR = 1 # Mode - Sending data
LCD_CMD = 0 # Mode - Sending command
LCD_LINE_1 = 0x80 # LCD RAM address for the 1st line
LCD_LINE_2 = 0xC0 # LCD RAM address for the 2nd line
LCD_LINE_3 = 0x94 # LCD RAM address for the 3rd line
LCD_LINE_4 = 0xD4 # LCD RAM address for the 4th line
LCD_BACKLIGHT = 0x08 # On
#LCD_BACKLIGHT = 0x00 # Off
ENABLE = 0b00000100 # Enable bit
# Timing constants
E_PULSE = 0.0005
E_DELAY = 0.0005
#Open I2C interface
#bus = smbus.SMBus(0) # Rev 1 Pi uses 0
bus = smbus.SMBus(1) # Rev 2 Pi uses 1
def lcd_init():
# Initialise display
lcd_byte(0x33,LCD_CMD) # 110011 Initialise
lcd_byte(0x32,LCD_CMD) # 110010 Initialise
lcd_byte(0x06,LCD_CMD) # 000110 Cursor move direction
lcd_byte(0x0C,LCD_CMD) # 001100 Display On,Cursor Off, Blink Off
lcd_byte(0x28,LCD_CMD) # 101000 Data length, number of lines, font size
lcd_byte(0x01,LCD_CMD) # 000001 Clear display
time.sleep(E_DELAY)
def lcd_byte(bits, mode):
# Send byte to data pins
# bits = the data
# mode = 1 for data
# 0 for command
bits_high = mode | (bits & 0xF0) | LCD_BACKLIGHT
bits_low = mode | ((bits<<4) & 0xF0) | LCD_BACKLIGHT
# High bits
bus.write_byte(I2C_ADDR, bits_high)
lcd_toggle_enable(bits_high)
# Low bits
bus.write_byte(I2C_ADDR, bits_low)
lcd_toggle_enable(bits_low)
def lcd_toggle_enable(bits):
# Toggle enable
time.sleep(E_DELAY)
bus.write_byte(I2C_ADDR, (bits | ENABLE))
time.sleep(E_PULSE)
bus.write_byte(I2C_ADDR,(bits & ~ENABLE))
time.sleep(E_DELAY)
def lcd_string(message,line):
# Send string to display
message = message.ljust(LCD_WIDTH," ")
lcd_byte(line, LCD_CMD)
for i in range(LCD_WIDTH):
lcd_byte(ord(message[i]),LCD_CHR)
def main():
# Main program block
# Initialise display
lcd_init()
while True:
fl = mc.get("d1")
fc = mc.get("d2")
fr = mc.get("d3")
bl = mc.get("d6")
bc = mc.get("d5")
br = mc.get("d4")
f = "FL:",round(fl)," C:",round(fc)," R:",round(fr)
b = "BL:",round(bl)," C:",round(bc)," R:",round(br)
# Send some test
lcd_string(f,LCD_LINE_1)
lcd_string(b,LCD_LINE_2)
time.sleep(0.5)
# Send some more text
lcd_string("> RPiSpy",LCD_LINE_1)
lcd_string("> I2C LCD",LCD_LINE_2)
time.sleep(3)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
finally:
lcd_byte(0x01, LCD_CMD)
| Python | 0.000023 | |
bb7bb2e12d3ccbb55f0b0e6db5d0cb79c3ea8079 | Add missing migration for profile items. | km_api/know_me/migrations/0013_remove_profileitem_media_resource.py | km_api/know_me/migrations/0013_remove_profileitem_media_resource.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-01 14:16
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('know_me', '0012_emergencyitem'),
]
operations = [
migrations.RemoveField(
model_name='profileitem',
name='media_resource',
),
]
| Python | 0 | |
a0b9d1977b2aa2366a334231b4dd5dbe047d7122 | Add testcase for Category.can_create_events | indico/modules/categories/models/categories_test.py | indico/modules/categories/models/categories_test.py | # This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
import pytest
from indico.core.db.sqlalchemy.protection import ProtectionMode
@pytest.mark.parametrize(('protection_mode', 'creation_restricted', 'acl', 'allowed'), (
# not restricted
(ProtectionMode.public, False, None, True),
(ProtectionMode.protected, False, None, False),
(ProtectionMode.protected, False, {'read_access': True}, True),
# restricted - authorized
(ProtectionMode.protected, True, {'full_access': True}, True),
(ProtectionMode.protected, True, {'roles': {'create'}}, True),
# restricted - not authorized
(ProtectionMode.public, True, None, False),
(ProtectionMode.protected, True, None, False),
(ProtectionMode.protected, True, {'read_access': True}, False)
))
def test_can_create_events(dummy_category, dummy_user, protection_mode, creation_restricted, acl, allowed):
dummy_category.protection_mode = protection_mode
dummy_category.event_creation_restricted = creation_restricted
if acl:
dummy_category.update_principal(dummy_user, **acl)
assert dummy_category.can_create_events(dummy_user) == allowed
def test_can_create_events_no_user(dummy_category):
assert not dummy_category.can_create_events(None)
| Python | 0.000021 | |
eb54c75c0f5b7e909177777ce935358b7ac25def | Add zip and unzip to zip_file | py_sys/file/zip_file.py | py_sys/file/zip_file.py | # coding=utf-8
import os
import zipfile
class ZipFile(object):
def __init__(self):
pass
def zip(self, dir_path, zip_file):
file_list = []
def walk_dir(sub_dir):
for root, dirs, files in os.walk(sub_dir):
for _file in files:
file_list.append(os.path.join(root, _file))
for _dir in dirs:
walk_dir(_dir)
if os.path.isfile(dir_path):
file_list.append(dir_path)
else :
walk_dir(dir_path)
zf = zipfile.ZipFile(zip_file, "w", zipfile.zlib.DEFLATED)
for tar in file_list:
arcname = tar[len(dir_path):]
zf.write(tar, arcname)
zf.close()
def unzip(self, zip_file, dir_path):
if not os.path.exists(dir_path): os.mkdir(dir_path, 0777)
zf_obj = zipfile.ZipFile(zip_file)
for zf_name in zf_obj.namelist():
zf_name = zf_name.replace('\\','/')
if zf_name.endswith('/'):
os.mkdir(os.path.join(dir_path, zf_name))
else:
ext_file = os.path.join(dir_path, zf_name)
ext_dir= os.path.dirname(ext_file)
if not os.path.exists(ext_dir):
os.mkdir(ext_dir,0777)
out_file = open(ext_file, 'wb')
out_file.write(zf_obj.read(zf_name))
out_file.close()
| Python | 0.000001 | |
e27b005e5dc797e2326ab175ef947021c5a85cb7 | Add ptt.py | ptt.py | ptt.py | import telnetlib
import re
RN = '\r\n'
C_L = '\x0C'
C_Z = '\x1A'
ESC = '\x1B'
class PTT():
def __init__(self):
self.ptt = telnetlib.Telnet('ptt.cc')
self.where = 'login'
def login(self, username, password, dup=False):
self.__wait_til('่จปๅ: ', encoding='big5')
self.__send(username, ',', RN)
self.__wait_til('ๅฏ็ขผ: ', encoding='big5')
self.__send(password, RN)
index = self.__expect('ๆญก่ฟๆจๅๅบฆๆ่จช', '้่ค็ปๅ
ฅ', '่ซๅฟ้ ป็น็ปๅ
ฅ')[0]
if index == 2:
self.__send(RN)
index = self.__expect('ๆญก่ฟๆจๅๅบฆๆ่จช', '้่ค็ปๅ
ฅ')[0]
if index == 1:
self.__send('n' if dup else 'y', RN)
index = self.__expect('ๆญก่ฟๆจๅๅบฆๆ่จช')[0]
if index == -1:
print("Login failed")
self.close()
self.__send(RN)
index = self.__expect('ใไธปๅ่ฝ่กจใ', '้ฏ่ชคๅ่ฉฆ')[0]
if index == 1:
self.__send('y', RN)
# in menu now
self.where = 'menu'
def close(self):
self.ptt.close()
print('Connection closed')
def __wait_til(self, exp, encoding='utf-8', timeout=None):
return self.ptt.read_until(exp.encode(encoding), timeout)
def __send(self, *args):
s = ''.join(args)
self.ptt.write(s.encode())
def __expect(self, *args, encoding='utf-8', timeout=5):
exp_list = [exp.encode(encoding) for exp in args]
expect = self.ptt.expect(exp_list, timeout)
if expect[0] == -1:
raise TimeoutError(expect[2])
return expect
class TimeoutError(Exception):
pass
if __name__ == '__main__':
pass
| Python | 0.000122 | |
eef2dff2855ef310dbdb6b864a92306cae724ed7 | add missing the missing file exceptions.py | pyecharts/exceptions.py | pyecharts/exceptions.py | class NoJsExtension(Exception):
pass
| Python | 0.000003 | |
0d3255f8a69fe5192cb36ee42a731293cfd09715 | Add VmCorTaxonPhenology Class | backend/geonature/core/gn_profiles/models.py | backend/geonature/core/gn_profiles/models.py | from geonature.utils.env import DB
from utils_flask_sqla.serializers import serializable
@serializable
class VmCorTaxonPhenology(DB.Model):
__tablename__ = "vm_cor_taxon_phenology"
__table_args__ = {"schema": "gn_profiles"}
cd_ref = DB.Column(DB.Integer)
period = DB.Column(DB.Integer)
id_nomenclature_life_stage = DB.Column(DB.Integer)
id_altitude_range = DB.Column(DB.Integer)
count_valid_data = DB.Column(DB.Integer)
| Python | 0 | |
18d40200224d68b0ce93c2710516ed63566b1ad3 | Add merge migration | osf/migrations/0127_merge_20180822_1927.py | osf/migrations/0127_merge_20180822_1927.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-08-22 19:27
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0124_merge_20180816_1229'),
('osf', '0126_update_review_group_names'),
]
operations = [
]
| Python | 0.000001 | |
a55fee4515c9e6187198a8fc27ec15e7786d5782 | Create utils.py | utils.py | utils.py | #!/usr/bin/env python
'''Python script that must be kept with all of these plugins'''
def color(color, message):
'''color forground/background encoding IRC messages'''
colors = {'white': '00', 'black': '01', 'blue': '02', 'navy': '02',
'green': '03', 'red': '04', 'brown': '05', 'maroon': '05',
'purple': '06', 'orange': '07', 'olive': '07', 'gold': '07',
'yellow': '08', 'lightgreen': '09', 'lime': '09', 'teal': '10',
'cyan': '11', 'lightblue': '12', 'royal': '12', 'lightpurple': '13',
'pink': '13', 'fuchsia': '13', 'grey': '14', 'lightgrey': '0', 'silver': '0'}
color = str(color).lower()
message = str(message)
if '/' in color:
color = color.split('/')
message = '\x03' + colors[color[0]] + ',' + colors[color[1]] + message + '\x03'
else:
message = '\x03' + colors[color] + message + '\x03'
return message
def bold(message):
'''bold encoding IRC messages'''
return ('\x02' + str(message) + '\x02')
def italic(message):
'''italicize encoding IRC messages'''
return ('\x16' + str(message) + '\x16')
def underline(message):
'''underlined encoding IRC messages'''
return ('\x1f' + str(message) + '\x1f')
| Python | 0.000001 | |
36e6ff93b270672e0918e5ac0d7f9698834ad6ae | add Pathfinder skeleton | game/pathfinding.py | game/pathfinding.py | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""pathfinding.py: """
class Pathfinder(object):
def __init__(self, size_x, size_y):
self._size_x = size_x
self._size_y = size_y
def find_path(self, from_coords, to_coords):
pass | Python | 0 | |
ca956d335ad6bf6e190869d98c7abb3b554dfa3d | Create TS3IdleBot.py | TS3IdleBot.py | TS3IdleBot.py | import telnetlib
import time
from config import config
def getClients():
print "Getting a list of clients."
telnet.write("clientlist -times\n")
clients = telnet.read_until("msg=ok")
clients = clients.replace(" ", "\n")
clients = clients.replace("\r", "")
clients = clients.split("|")
cLen = len(clients)
print clients
for i in range(0, cLen):
try:
if config["botname"] in clients[i]:
clients.remove(clients[i])
else:
clients[i] = clients[i].split("\n")
clients[i] = filter(None,clients[i])
cLen -= 1
except IndexError:
print "Somehow we've escaped the bounds of the loop. :O Skip it and we should be fine."
return clients
def moveIdlers(clients):
print "Checking for idlers."
for i in range(0, len(clients)):
if float(clients[i][5].strip("client_idle_time=")) > float(config["idle"])*60000:
print "Moving user " + clients[i][3].replace("client_nickname=", "") + " to idle channel."
telnet.write("clientmove clid="+clients[i][0].strip("clid=")+ " cid=13\n")
telnet.read_until("msg=ok")
print "Done checking for idlers."
print "TS3IdleBot"
print "http://www.github.com/rmgr\n"
print "Exit TS3IdleBot with CTRL + C."
print "Connecting to server " + config["host"]+ ":" + config["port"]
telnet = telnetlib.Telnet(config["host"],config["port"])
telnet.open(telnet.host, telnet.port)
telnet.write("login "+config["user"]+" "+config["pass"]+"\n")
telnet.read_until("msg=ok")
print "Connected successfully."
print "Using virtual server "+config["serverid"]
telnet.write("use sid="+config["serverid"] + "\n")
telnet.read_until("msg=ok")
print "Server selection successful."
print "Setting bot nickname as " + config["botname"] + "."
telnet.write("clientupdate client_nickname="+config["botname"]+"\n")
telnet.read_until("msg=ok")
print "Set successfully."
while True:
try:
clients = getClients()
moveIdlers(clients)
print "Sleeping for 5 minutes."
time.sleep(300)
except KeyboardInterrupt:
print "Exiting TS3IdleBot"
exit()
telnet.write("logout\n")
telnet.read_until("msg=ok")
telnet.close()
| Python | 0 | |
ae0ebdccfffffbad259842365712bd4b6e52fc8e | add test files for HDF5 class and read_feats function | sprocket/util/tests/test_hdf5.py | sprocket/util/tests/test_hdf5.py | from __future__ import division, print_function, absolute_import
import os
import unittest
import numpy as np
from sprocket.util.hdf5 import HDF5, read_feats
dirpath = os.path.dirname(os.path.realpath(__file__))
listf = os.path.join(dirpath, '/data/test.h5')
class hdf5FunctionsTest(unittest.TestCase):
def test_HDF5(self):
data1d = np.random.rand(100)
data2d = np.random.rand(100).reshape(50, 2)
# write test
path = os.path.join(dirpath, 'data/test.h5')
h5 = HDF5(path, 'w')
h5.save(data1d, '1d')
h5.save(data2d, '2d')
h5.close()
# open test
tmph5 = HDF5(path, 'r')
tmp1d = tmph5.read(ext='1d')
tmp2d = tmph5.read(ext='2d')
tmph5.close()
assert np.allclose(tmp1d, data1d)
assert np.allclose(tmp2d, data2d)
# tset read_feats function
listpath = os.path.join(dirpath, 'data/test.list')
with open(listpath, 'w') as fp:
fp.write('data/test')
list1d = read_feats(listpath, dirpath, ext='1d')
assert np.allclose(list1d[0], data1d)
# remove files
os.remove(path)
os.remove(listpath)
| Python | 0 | |
26fcbefee171f8d56504a7eba121027f0c5be8b5 | Add migration for new overrides table | lms/djangoapps/grades/migrations/0013_persistentsubsectiongradeoverride.py | lms/djangoapps/grades/migrations/0013_persistentsubsectiongradeoverride.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('grades', '0012_computegradessetting'),
]
operations = [
migrations.CreateModel(
name='PersistentSubsectionGradeOverride',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True, db_index=True)),
('modified', models.DateTimeField(auto_now=True, db_index=True)),
('earned_all_override', models.FloatField(null=True, blank=True)),
('possible_all_override', models.FloatField(null=True, blank=True)),
('earned_graded_override', models.FloatField(null=True, blank=True)),
('possible_graded_override', models.FloatField(null=True, blank=True)),
('grade', models.OneToOneField(related_name='override', to='grades.PersistentSubsectionGrade')),
],
),
]
| Python | 0 | |
e5d3fea99d58a1b02ebe84148d63330ea8d5c3a0 | Create WordLadder.py | WordLadder.py | WordLadder.py | '''
Given a source word, target word and an English dictionary, transform the source word to target by
changing/adding/removing 1 character at a time, while all intermediate words being valid English words.
Return the transformation chain which has the smallest number of intermediate words.
'''
| Python | 0 | |
4ba2f92a9712530d084823dae52f54167f2f3afb | fix test source to work with empty msgs | new_pmlib/TestSimpleSource.py | new_pmlib/TestSimpleSource.py | #=========================================================================
# TestSimpleSource
#=========================================================================
# This class will output messages on a val/rdy interface from a
# predefined list.
#
from new_pymtl import *
from ValRdyBundle import OutValRdyBundle
class TestSimpleSource( Model ):
#-----------------------------------------------------------------------
# Constructor
#-----------------------------------------------------------------------
def __init__( s, nbits, msgs ):
s.out = OutValRdyBundle( nbits )
s.done = OutPort ( 1 )
s.msgs = msgs
s.idx = 0
#-----------------------------------------------------------------------
# Tick
#-----------------------------------------------------------------------
def elaborate_logic( s ):
@s.tick
def tick():
# Handle reset
if s.reset:
if s.msgs:
s.out.msg.next = s.msgs[0]
s.out.val.next = False
s.done.next = False
return
# Check if we have more messages to send.
if ( s.idx == len(s.msgs) ):
if s.msgs:
s.out.msg.next = s.msgs[0]
s.out.val.next = False
s.done.next = True
return
# At the end of the cycle, we AND together the val/rdy bits to
# determine if the output message transaction occured
out_go = s.out.val and s.out.rdy
# If the output transaction occured, then increment the index.
if out_go:
s.idx = s.idx + 1
# The output message is always the indexed message in the list, or if
# we are done then it is the first message again.
if ( s.idx < len(s.msgs) ):
s.out.msg.next = s.msgs[s.idx]
s.out.val.next = True
s.done.next = False
else:
s.out.msg.next = s.msgs[0]
s.out.val.next = False
s.done.next = True
#-----------------------------------------------------------------------
# Line tracing
#-----------------------------------------------------------------------
def line_trace( s ):
return "({:2}) {}".format( s.idx, s.out )
| #=========================================================================
# TestSimpleSource
#=========================================================================
# This class will output messages on a val/rdy interface from a
# predefined list.
#
from new_pymtl import *
from ValRdyBundle import OutValRdyBundle
class TestSimpleSource( Model ):
#-----------------------------------------------------------------------
# Constructor
#-----------------------------------------------------------------------
def __init__( s, nbits, msgs ):
s.out = OutValRdyBundle( nbits )
s.done = OutPort ( 1 )
s.msgs = msgs
s.idx = 0
#-----------------------------------------------------------------------
# Tick
#-----------------------------------------------------------------------
def elaborate_logic( s ):
@s.tick
def tick():
# Handle reset
if s.reset:
s.out.msg.next = s.msgs[0]
s.out.val.next = False
s.done.next = False
return
# Check if we have more messages to send.
if ( s.idx == len(s.msgs) ):
s.out.msg.next = s.msgs[0]
s.out.val.next = False
s.done.next = True
return
# At the end of the cycle, we AND together the val/rdy bits to
# determine if the output message transaction occured
out_go = s.out.val and s.out.rdy
# If the output transaction occured, then increment the index.
if out_go:
s.idx = s.idx + 1
# The output message is always the indexed message in the list, or if
# we are done then it is the first message again.
if ( s.idx < len(s.msgs) ):
s.out.msg.next = s.msgs[s.idx]
s.out.val.next = True
s.done.next = False
else:
s.out.msg.next = s.msgs[0]
s.out.val.next = False
s.done.next = True
#-----------------------------------------------------------------------
# Line tracing
#-----------------------------------------------------------------------
def line_trace( s ):
return "({:2}) {}".format( s.idx, s.out )
| Python | 0 |
75aabd425bd32a9467d7a06b250a0a5b1f5ba852 | Add more comments | application/serializer.py | application/serializer.py | '''
This module maps the data that will be used by the marshall when returning the
data to the user
'''
from flask_restful import fields
bucket_list_item_serializer = {
'item_id': fields.Integer,
'name': fields.String,
'date_created': fields.DateTime,
'date_modified': fields.DateTime,
'done': fields.Boolean
}
bucket_list_serializer = {
'id': fields.Integer,
'name': fields.String,
'items':fields.Nested(bucket_list_item_serializer),
'created_by': fields.String,
'date_created': fields.DateTime,
'date_modified': fields.DateTime
} | Python | 0 | |
da0f31d6ca5aa8f425c86b9c0caf965f062e1dba | test buying max clicks and gen clicks in the same test | functional-tests/suite6.py | functional-tests/suite6.py | from clickerft.cft import Cft
from time import sleep
class Suite4(Cft):
def test_buy_target_max_and_gen(self):
"""
buy clicks until we have 50 max clicks of 50
and 10 clicks/sec
"""
targetGen = 4
while int(self.clicksPerGeneration.text) < targetGen:
clicksOwned = int(self.clicksOwned.text)
priceGen = int(self.pincreaseClicksPerGeneration.text)
for ii in xrange(min(clicksOwned, priceGen)):
self.increaseClicksPerGeneration.click()
assert int(self.clicksPerGeneration.text) == targetGen
targetMax = 12
while int(self.maxClicks.text) < targetMax:
clicksOwned = int(self.clicksOwned.text)
priceMax = int(self.pincreaseMaxClicks.text)
for ii in xrange(min(clicksOwned, priceMax)):
self.increaseMaxClicks.click()
assert int(self.maxClicks.text) == targetMax
if __name__ == '__main__':
Suite4()
| Python | 0 | |
78cbfc300a4623f4f5e3bd7726f43abdbb9ef0a3 | Add mysql_connector | pythonfiles/mysql_connector.py | pythonfiles/mysql_connector.py | import mysql.connector
from mysql.connector import errorcode
from abc import ABCMeta, abstractmethod
class connector(object):
_config = {
'user': 'vda8888',
'password': '123456',
'host': '127.0.0.1',
'database': 'test',
'raise_on_warnings': True,
}
def __init__(self):
self.cnx = None
def initConnection(self):
try:
self.cnx = mysql.connector.connect(**connector._config)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exists")
else:
print(err)
def commit(self):
self.cnx.commit()
def get_cursor(self):
return self.cnx.cursor()
def closeConnection(self):
self.cnx.close()
class table(metaclass=ABCMeta):
def __init__(self, connector):
self._connector = connector
self._cursor = self._connector.get_cursor()
self.name = None
self.column_names = []
def get_cursor(self):
return self._cursor
def destroy(self):
if (self._cursor is not None):
self._cursor.close()
def view_full_table(self):
query = ("SELECT * FROM %s")
self._cursor.execute(query % (self.name,))
#This method must NOT be overridden
def insert_row(self, inserted_dict):
addCommand = "INSERT INTO " + self.name + " ("
field_count = 0
field_index = 0
end = ""
#Add column names
while (field_index < len(self.column_names)):
current_value = inserted_dict.get(self.column_names[field_index])
if (current_value is not None):
field_count += 1
if (field_count != 1):
addCommand += ", "
end += ", "
addCommand += self.column_names[field_index]
end += self.insert_row_add_expression(field_index, current_value)
field_index += 1
addCommand += ") VALUES ("
addCommand += end + ")"
print (addCommand)
self.get_cursor().execute(addCommand)
self._connector.commit()
#This method must NOT be overridden
def find_row(self, key_dictionary):
query = "SELECT * FROM " + self.name
field_count = 0 #Always start with 1 since 0 is assumed to be 0
field_index = 0
while (field_index < len(self.column_names)):
current_value = key_dictionary.get(str(self.column_names[field_index]))
if (current_value is not None):
field_count += 1
if (field_count == 1):
query += " WHERE "
else:
query += " AND "
query += self.column_names[field_index] + " "
print (field_index)
query += self.find_row_add_expression(field_index, current_value)
field_index += 1
print (query)
self._cursor.execute(query)
def delete_row(self, delete_dict):
query = "DELETE FROM " + self.name
field_count = 0 #Always start with 1 since 0 is assumed to be 0
field_index = 0
while (field_index < len(self.column_names)):
current_value = delete_dict.get(str(self.column_names[field_index]))
if (current_value is not None):
field_count += 1
if (field_count == 1):
query += " WHERE "
else:
query += " AND "
query += self.column_names[field_index] + " "
print (field_index)
query += self.find_row_add_expression(field_index, current_value)
field_index += 1
print (query)
self._cursor.execute(query)
self._connector.commit()
@abstractmethod
def find_row_add_expression(self, column_index, value):
pass
@abstractmethod
def insert_row_add_expression(self, column_index, value):
pass
class words_table(table):
def __init__(self, connector): #Pass in a _connector to database
super().__init__(connector)
self.name = "words"
self.column_names = ["id", "name", "pos", "definition", "context", "rating", "french_name", "french_definition", "french_context"]
def find_row_add_expression(self, column_index, value):
if (column_index == 0):
return "= " + str(value)
elif (column_index == 1): # name
return "REGEXP " + "'" + value + "'"
elif (column_index == 2): # pos
return "REGEXP " + "'" + value + "'"
elif (column_index == 3): # definition
return "REGEXP " + "'" + value + "'"
elif (column_index == 4): # context
return "REGEXP " + "'" + value + "'"
elif (column_index == 5): # rating
return "= " + str(value)
elif (column_index == 6): # french_name
return "REGEXP " + "'" + value + "'"
elif (column_index == 7): # french_definition
return "REGEXP " + "'" + value + "'"
elif (column_index == 8): # french_context
return "REGEXP " + "'" + value + "'"
def insert_row_add_expression(self, column_index, value):
if (column_index == 0):
return str(value)
elif (column_index == 1): # name
return "'" + value + "'"
elif (column_index == 2): # pos
return "'" + value + "'"
elif (column_index == 3): # definition
return "'" + value + "'"
elif (column_index == 4): # context
return "'" + value + "'"
elif (column_index == 5): # rating
return str(value)
elif (column_index == 6): # french_name
return "'" + value + "'"
elif (column_index == 7): # french_definition
return "'" + value + "'"
elif (column_index == 8): # french_context
return "'" + value + "'"
#Always start with the following two lines
connector = connector()
connector.initConnection()
table = words_table(connector)
key = {}
key["name"] = "dog"
key["pos"] = "animal"
table.view_full_table()
#table.find_row(key)
#table.insert_row(key)
#Once a query is done, results are stored in get_cursor(). We MUST read the results before moving on
for (result) in table.get_cursor():
print (result)
#table.delete_row(key)
for (result) in table.get_cursor():
print (result)
#Once finish using a table, MUST destroy it
table.destroy()
#Always end with this line
connector.closeConnection()
| Python | 0.000024 | |
158f04702b6c1dcda9981d8da05fe059e84c3f90 | Add example with churches. | examples/churches.py | examples/churches.py | # -*- coding: utf-8 -*-
'''
This script demonstrates using the AATProvider to get the concept of
Churches.
'''
from skosprovider_getty.providers import AATProvider
aat = AATProvider(metadata={'id': 'AAT'})
churches = aat.get_by_id(300007466)
lang = ['en', 'nl', 'es', 'de']
print('Labels')
print('------')
for l in churches.labels:
print(l.language + ': ' + l.label.decode('utf-8') + ' [' + l.type + ']')
print('Notes')
print('-----')
for n in churches.notes:
print(n.language + ': ' + n.note.decode('utf-8') + ' [' + n.type + ']')
| Python | 0 | |
7c82a2a8887d25ef86e5d0004cf0a0e0bc4b23ac | Create CodingContestTorontoParkingTickets2013.py | CodingContestTorontoParkingTickets2013.py | CodingContestTorontoParkingTickets2013.py | import re
from collections import defaultdict
processed_data = defaultdict(int) # dict to capture reduced dataset info, default value == 0
only_chars = re.compile('\D+').search # pre-compiled reg-exp, for fast run time, to get street name, ignoring numbers
# import raw data file with parking information
with open('Parking_data.csv', 'r') as raw_data:
# skip the first line of header data
next
# iterate over the remaining file line by line
for line in raw_data:
# split line by ',' into an array
worked_line = line.split(',')
# get and clean up street name for dict use and, if valid name found, collect fine amount in dict
try:
processed_data[only_chars(worked_line[7]).group(0).lstrip()] += int(worked_line[4])
except:
next
# find street with greatest total fines processed_data
highest_street = max(processed_data, key=processed_data.get)
highest_fine = processed_data[highest_street]
# print out the results
print('Highest revenue street: {0} with ${1}.'.format(highest_street, highest_fine))
| Python | 0.000001 | |
f8ee383cc3b3f1f9166627e81a64af4939e4de10 | add amqp style routing for virtual channels, allows memory backend to behave like amqp | example/topic.py | example/topic.py | from kombu.connection import BrokerConnection
from kombu.messaging import Exchange, Queue, Consumer, Producer
# configuration, normally in an ini file
exchange_name = "test.shane"
exchange_type = "topic"
exchange_durable = True
message_serializer = "json"
queue_name = "test.q"
# 1. setup the connection to the exchange
# hostname,userid,password,virtual_host not used with memory backend
cons_conn = BrokerConnection(hostname="localhost",
userid="guest",
password="guest",
virtual_host="/",
transport="memory")
cons_chan = cons_conn.channel()
cons_exch = Exchange(exchange_name, type=exchange_type, durable=exchange_durable)
pub_conn = BrokerConnection(hostname="localhost",
userid="guest",
password="guest",
virtual_host="/",
transport="memory")
pub_chan = pub_conn.channel()
pub_exch = Exchange(exchange_name, type=exchange_type, durable=exchange_durable)
# 2. setup the consumer, the consumer declares/creates the queue, if you
# publish to a queue before there is a consumer it will fail unless the queue
# was first created and is durable
class AConsumer:
def __init__(self, queue_name, key):
self.queue = Queue(queue_name, exchange=cons_exch, routing_key=key)
self.consumer = Consumer(cons_chan, [self.queue])
self.consumer.consume()
def mq_callback(message_data, message):
print("%s: %r: %r" % (key, message.delivery_info, message_data,))
#message.ack()
self.consumer.register_callback(mq_callback)
c1 = AConsumer("test_1","test.1")
c2 = AConsumer("testing","test.ing")
# consumers can use simple pattern matching when defining a queue
c3 = AConsumer("test_all","test.*")
# 3. publish something to consume
# publishers always send to a specific route, the mq will route to the queues
producer = Producer(pub_chan, exchange=pub_exch, serializer=message_serializer)
producer.publish({"name": "Shane Caraveo", "username": "mixedpuppy"}, routing_key="test.1")
producer.publish({"name": "Micky Mouse", "username": "donaldduck"}, routing_key="test.ing")
producer.publish({"name": "Anonymous", "username": "whoami"}, routing_key="test.foobar")
def have_messages():
return sum([q.qsize() for q in cons_chan.queues.values()])
# 5. run the event loop
while have_messages():
try:
cons_conn.drain_events()
except KeyboardInterrupt:
print
print "quitting"
break
except Exception, e:
import traceback
print traceback.format_exc()
break
| Python | 0 | |
aff827e9cc02bcee6cf8687e1dff65f39daaf6c6 | Add a failing test to the landing page to check for upcoming events. | workshops/test/test_landing_page.py | workshops/test/test_landing_page.py | from django.core.urlresolvers import reverse
from django.test import TestCase
from mock import patch
from datetime import date
class FakeDate(date):
"A fake replacement for date that can be mocked for testing."
pass
@classmethod
def today(cls):
return cls(2013, 12, 7)
@patch('workshops.models.datetime.date', FakeDate)
class TestLandingPage(TestCase):
"Tests for the workshop landing page"
fixtures = ['event_test']
def test_has_upcoming_events(self):
"""Test that the landing page is passed some
upcoming_events in the context.
"""
response = self.client.get(reverse('index'))
# This will fail if the context variable doesn't exist
upcoming_events = response.context['upcoming_events']
# There are 2 upcoming events
assert len(upcoming_events) == 2
# They should all start with upcoming
assert all([e.slug[:8] == 'upcoming' for e in upcoming_events])
| Python | 0 | |
91918be596c83f468c6c940df7326896aa6082e7 | Fix stringify on multichoice forms | adagios/forms.py | adagios/forms.py | # -*- coding: utf-8 -*-
from django.utils.encoding import smart_str
from django import forms
class AdagiosForm(forms.Form):
""" Base class for all forms in this module. Forms that use pynag in any way should inherit from this one.
"""
def clean(self):
cleaned_data = {}
tmp = super(AdagiosForm, self).clean()
for k,v in tmp.items():
if isinstance(k, (unicode)):
k = smart_str(k)
if isinstance(v, (unicode)):
v = smart_str(v)
cleaned_data[k] = v
return cleaned_data
| # -*- coding: utf-8 -*-
from django.utils.encoding import smart_str
from django import forms
class AdagiosForm(forms.Form):
""" Base class for all forms in this module. Forms that use pynag in any way should inherit from this one.
"""
def clean(self):
cleaned_data = {}
tmp = super(AdagiosForm, self).clean()
for k,v in tmp.items():
if isinstance(k, (unicode)):
k = smart_str(k)
if isinstance(v, (unicode)):
v = smart_str(v)
cleaned_data[k] = smart_str(v)
return cleaned_data
| Python | 0.000004 |
cb7bb1d9f24706f3cce2e9841595ee80ce7e2c7f | Implement GetKeyboardType | angr/procedures/win_user32/keyboard.py | angr/procedures/win_user32/keyboard.py | import angr
class GetKeyboardType(angr.SimProcedure):
def run(self, param):
# return the values present at time of author's testing
if self.state.solver.is_true(param == 0):
return 4
if self.state.solver.is_true(param == 1):
return 0
if self.state.solver.is_true(param == 2):
return 12
return 0
| Python | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.