commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
a5b43e23008eaca8da9d7d740c3ce976b698b615 | Add Daniel Berlin's draft of a 'blame' script. | jmckaskill/subversion,jmckaskill/subversion,jmckaskill/subversion,jmckaskill/subversion,jmckaskill/subversion,jmckaskill/subversion,jmckaskill/subversion,jmckaskill/subversion | tools/examples/blame.py | tools/examples/blame.py | #!/usr/bin/env python2
#
# USAGE: annotate.py [-r REV] [-h DBHOME] repos-path
#
import sys
import os
import getopt
import difflib
from svn import fs, util
CHUNK_SIZE = 100000
def getfile(pool, path, rev=None, home='.'):
db_path = os.path.join(home, 'db')
if not os.path.exists(db_path):
db_path = home
annotresult = {}
fsob = fs.new(pool)
fs.open_berkeley(fsob, db_path)
if rev is None:
rev = fs.youngest_rev(fsob, pool)
filedata = ''
for i in xrange(0, rev+1):
root = fs.revision_root(fsob, i, pool)
if fs.check_path(root, path, pool) != util.svn_node_none:
first = i
break
print "First revision is %d" % first
print "Last revision is %d" % rev
for i in xrange(first, rev+1):
previousroot = root
root = fs.revision_root(fsob, i, pool)
if i != first:
if not fs.contents_changed(root, path, previousroot, path, pool):
continue
file = fs.file_contents(root, path, pool)
previousdata = filedata
filedata = ''
while 1:
data = util.svn_stream_read(file, CHUNK_SIZE)
if not data:
break
filedata = filedata + data
print "Current revision is %d" % i
diffresult = difflib.ndiff(previousdata.splitlines(1),
filedata.splitlines(1))
# print ''.join(diffresult)
k = 0
for j in diffresult:
if j[0] == ' ':
if annotresult.has_key (k):
k = k + 1
continue
else:
annotresult[k] = (i, j[2:])
k = k + 1
continue
elif j[0] == '?':
continue
annotresult[k] = (i, j[2:])
if j[0] != '-':
k = k + 1
# print ''.join(diffresult)
# print annotresult
for x in xrange(len(annotresult.keys())):
sys.stdout.write("Line %d (rev %d):%s" % (x,
annotresult[x][0],
annotresult[x][1]))
def usage():
print "USAGE: annotate.py [-r REV] [-h DBHOME] repos-path"
sys.exit(1)
def main():
opts, args = getopt.getopt(sys.argv[1:], 'r:h:')
if len(args) != 1:
usage()
rev = None
home = '.'
for name, value in opts:
if name == '-r':
rev = int(value)
elif name == '-h':
home = value
util.run_app(getfile, args[0], rev, home)
if __name__ == '__main__':
main()
| apache-2.0 | Python | |
8a2d86cb5d78d0865d5b0ed2e99fe79e4c739759 | Add calc_size tool | iychoi/biospectra,iychoi/biospectra,iychoi/biospectra | tools/calc_size.py | tools/calc_size.py | #! /usr/bin/env python
import os
import os.path
import sys
def _sumSize(path):
size = os.path.getsize(path)
print path, "=", size, "bytes"
return size
def sumSize(path):
sizeTotal = 0;
if os.path.isdir(path):
for p in os.listdir(path):
sizeTotal += sumSize(os.path.join(path, p))
else:
sizeTotal += _sumSize(path)
return sizeTotal
def main():
sizeTotal = sumSize(sys.argv[1])
print "total size", "=", sizeTotal, "bytes"
print "total size", "=", sizeTotal/1024, "kilobytes"
print "total size", "=", sizeTotal/1024/1024, "megabytes"
print "total size", "=", sizeTotal/1024/1024/1024, "gigabytes"
if __name__ == "__main__":
main()
| apache-2.0 | Python | |
b0b6afff93391bda296ed5ef87d122054f91eef5 | add testdb | vincentdavis/Colorado-Property-Data,vincentdavis/Colorado-Property-Data,vincentdavis/Colorado-Property-Data,vincentdavis/Colorado-Property-Data | dbtest.py | dbtest.py |
DB = MySQLDatabase('codatadb',
user='heteroskedastic1',
host='heteroskedastic1.mysql.pythonanywhere-services.com') | mit | Python | |
80b286daad1136dde91010a41970c89c22247b50 | add loan_estimate.py | fassake/bdd-security,cfarm/owning-a-home,imuchnik/owning-a-home,cfarm/owning-a-home,fna/owning-a-home,fassake/bdd-security,imuchnik/owning-a-home,CapeSepias/owning-a-home,amymok/owning-a-home,CapeSepias/owning-a-home,fassake/bdd-security,Scotchester/owning-a-home,cfarm/owning-a-home,cfarm/owning-a-home,CapeSepias/owning-a-home,fna/owning-a-home,OrlandoSoto/owning-a-home,CapeSepias/owning-a-home,amymok/owning-a-home,amymok/owning-a-home,imuchnik/owning-a-home,Scotchester/owning-a-home,amymok/owning-a-home,OrlandoSoto/owning-a-home,contolini/owning-a-home,contolini/owning-a-home,fna/owning-a-home,OrlandoSoto/owning-a-home,contolini/owning-a-home,fna/owning-a-home,Scotchester/owning-a-home,imuchnik/owning-a-home,contolini/owning-a-home,Scotchester/owning-a-home,fassake/bdd-security,OrlandoSoto/owning-a-home | test/browser_testing/features/pages/loan_estimate.py | test/browser_testing/features/pages/loan_estimate.py | from selenium.webdriver.common.action_chains import ActionChains
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium import webdriver
from pages.base import Base
import re
import time
class LoanEstimate(Base):
def __init__(self, logger, directory, base_url=r'http://localhost/',
driver=None, driver_wait=10, delay_secs=0):
super(LoanEstimate, self).__init__(logger, directory, base_url,
driver, driver_wait, delay_secs)
# check that element(s) is on the page
# @return: element or elements if there are many
def _element_found(self, css_selector):
try:
elements = self.driver.find_elements_by_css_selector(css_selector)
if len(elements) == 1:
return elements[0]
else:
return elements
except NoSuchElementException:
return []
def tab_is_found(self, tab_name):
tab_css_selector = '.tab-link__%s' % tab_name.lower()
if self._element_found(tab_css_selector):
return True
return False
def content_image_is_loaded(self):
image_css_selector = '.image-map_image'
images = self._element_found(image_css_selector)
for image in images:
if not image.is_displayed():
next
if image:
size = image.size
if size['width'] > 200 and size['height'] > 200:
return True
return False
def resize_to_mobile_size(self):
self.driver.set_window_size(360, 640)
def expandable_explainers_are_loaded(self, tab_name):
parent_css_selector = 'div.expandable__form-explainer-%s' % tab_name.lower()
elements = self._element_found(parent_css_selector)
good_elements = 0
for element in elements:
content_css_selector = '#%s .expandable_content' % element.get_attribute('id')
if element.is_displayed() and\
self._expandable_explainer_content_is_loaded(content_css_selector, element):
good_elements += 1
return good_elements
def _expandable_explainer_content_is_loaded(self, css_selector, parent_element):
element = self._element_found(css_selector)
if not element:
return False
original_visibility = element.is_displayed()
ActionChains(self.driver).move_to_element(parent_element).perform()
parent_element.click()
#time.sleep(1)
new_visibility = element.is_displayed()
return original_visibility != new_visibility
def _click_tab(self, tab_name):
css_selector = '.tab-link__%s' % tab_name.lower()
self.driver.find_element_by_css_selector(css_selector).click()
def _element_size(self, css_selector):
element = self._element_found(css_selector)
item = element
if type(element) is list:
for el in element:
if el.is_displayed():
item = el
if item and item.size:
return item.size
return {width: 0, height: 0}
def hover_an_overlay(self):
bad_elements = 0
elements = self._element_found('a.image-map_overlay')
for element in elements:
if element.is_displayed():
ActionChains(self.driver).move_to_element(element).perform()
anchor = element.get_attribute('href')
anchor = re.sub('^[^#]*', '', anchor)
explainer_element = self._element_found(anchor)
if not explainer_element:
bad_elements += 1
else:
classes = filter(lambda x: x,
explainer_element.get_attribute('class').split(' '))
if 'has-attention' not in classes:
bad_elements += 1
return bad_elements
| cc0-1.0 | Python | |
27eb90e40f1f3fe4a17e0228e4de9d427382ac44 | Create calendar_api.py | julianaklulo/clockwise | calendar_api.py | calendar_api.py | import httplib2
import os
import apiclient
import oauth2client
from oauth2client import client, tools
from datetime import datetime, timedelta
alarm_time = datetime.utcnow() + timedelta(minutes = 1)
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
SCOPES = 'https://www.googleapis.com/auth/calendar.readonly'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Clockwise'
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'clockwise.json')
store = oauth2client.file.Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatability with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def getEvents():
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
service = apiclient.discovery.build('calendar', 'v3', http=http)
now = datetime.utcnow().isoformat() + 'Z'
tomorrow = (datetime.utcnow() + timedelta(1)).isoformat() + 'Z'
eventsResult = service.events().list(
calendarId='primary', timeMin=now, timeMax=tomorrow,
maxResults=10, singleEvents=True, orderBy='startTime').execute()
events = eventsResult.get('items', [])
if not events:
return ("Nada agendado!")
events_data = ""
events_qty = 0
for event in events:
event_time = event['start'].get('dateTime')
if event['summary'] == "Alarm":
set_alarm_time(event_time[11:19])
else:
events_data += "%s %s " % (event_time[11:16], event['summary'])
events_qty += 1
if events_qty == 0:
return ("Nada agendado!")
return events_data
def set_alarm_time(time):
global alarm_time
alarm_time = datetime.strptime(time, "%H:%M:%S")
print("alarm set")
| mit | Python | |
fc97a838d54417cb063a7757040ff279f298d0bb | Add snip code for http.cookies | JohnTroony/Scriptology,JohnTroony/Scriptology,JohnTroony/Scriptology,JohnTroony/Scriptology,JohnTroony/Scriptology,JohnTroony/Scriptology | cookie_skel.py | cookie_skel.py | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 14 20:49:34 2016
@author: troon
"""
import BaseHTTPServer, SimpleHTTPServer
from http.cookies import SimpleCookie as cookie
class ApplicationRequestHandler(SimpleHTTPServer.BaseHTTPRequestHandler):
sessioncookies = {}
def __init__(self,*args,**kwargs):
self.sessionidmorsel = None
super().__init__(*args,**kwargs)
def _session_cookie(self,forcenew=False):
cookiestring = "\n".join(self.headers.get_all('Cookie',failobj=[]))
c = cookie()
c.load(cookiestring)
try:
if forcenew or self.sessioncookies[c['session_id'].value]-time() > 3600:
raise ValueError('new cookie needed')
except:
c['session_id']=uuid().hex
for m in c:
if m=='session_id':
self.sessioncookies[c[m].value] = time()
c[m]["httponly"] = True
c[m]["max-age"] = 3600
c[m]["expires"] = self.date_time_string(time()+3600)
self.sessionidmorsel = c[m]
break
| cc0-1.0 | Python | |
9dadbe8197ba479d23111eace2f3eda8e471df89 | Add very WIP multiplayer server | darkf/darkfo,darkf/darkfo,darkf/darkfo,darkf/darkfo,darkf/darkfo | mpserv.py | mpserv.py | from eventlet import wsgi, websocket
import eventlet
import json, time, string, os
def is_valid_name_char(c):
return c in string.ascii_letters + string.digits + "-_"
"""
sessions = {}
def broadcast_to_user_sessions(username, t, msg):
global sessions
for session in user_sessions(username):
try:
session.send(t, msg)
except OSError:
session.disconnected("Broken pipe")
"""
class GameContext:
def __init__(self):
self.host = None
self.guest = None
self.serializedMap = None
self.elevation = None
context = GameContext()
# Connection handler
class Connection:
def __init__(self, ws):
self.sock = ws
self.uid = None
self.name = None
self.pos = None
def _send(self, msg):
self.sock.send(json.dumps(msg))
def send(self, t, msg):
msg.update({"t": t})
self._send(msg)
def _recv(self):
return json.loads(self.sock.wait())
def recv(self):
data = self.sock.wait()
if data is None:
raise EOFError()
msg = json.loads(data)
return msg["t"], msg
def disconnected(self, reason=""):
print("client", self.name, "disconnected:", reason)
# TODO: Broadcast drop out
def error(self, request, msg):
self.send("error", {"request": request, "message": msg})
def serve(self):
global context
self.send("hello", {"network": {"name": "test server"}})
try:
while True:
t, msg = self.recv()
print("Received %s message from %r" % (t, self.name))
if t == "ident":
self.name = msg["name"]
print("Client identified as", msg["name"])
elif t == "host":
context.host = self
context.serializedMap = msg["map"]
context.elevation = msg["player"]["elevation"]
self.pos = msg["player"]["position"]
print("Got a host:", self.name)
elif t == "join":
context.guest = self
print("Got a guest:", self.name)
self.pos = context.host.pos.copy()
self.pos["x"] += 2
print("Sending map")
self.send("map", {"map": context.serializedMap, "player": {"position": self.pos, "elevation": context.elevation}})
elif t == "close":
self.disconnected("close message received")
break
except (EOFError, OSError):
self.disconnected("socket closed")
@websocket.WebSocketWSGI
def connection(ws):
con = Connection(ws)
con.serve()
if __name__ == "__main__":
wsgi.server(eventlet.listen(('', 8090)), connection) | apache-2.0 | Python | |
63586b03111d2c2aac3b4ffe5266468d3bd666fd | solve 1 problem | Shuailong/Leetcode | solutions/add-two-numbers.py | solutions/add-two-numbers.py | #!/usr/bin/env python
# encoding: utf-8
"""
add-two-numbers.py
Created by Shuailong on 2016-04-23.
https://leetcode.com/problems/add-two-numbers/.
"""
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
p1 = l1
p2 = l2
s = ListNode(0)
p = s
carry = 0
while p1 and p2:
p.next = ListNode((p1.val + p2.val + carry) % 10)
carry = (p1.val + p2.val + carry) / 10
p1 = p1.next
p2 = p2.next
p = p.next
while p1:
p.next = ListNode((p1.val + carry) % 10)
carry = (p1.val + carry) / 10
p1 = p1.next
p = p.next
while p2:
p.next = ListNode((p2.val + carry) % 10)
carry = (p2.val + carry) / 10
p2 = p2.next
p = p.next
if carry:
p.next = ListNode(carry)
return s.next
def main():
solution = Solution()
l1 = ListNode(2)
l1.next = ListNode(4)
l1.next.next = ListNode(3)
l2 = ListNode(5)
l2.next = ListNode(6)
l2.next.next = ListNode(4)
s = solution.addTwoNumbers(l1, l2)
while s:
print s.val,
s = s.next
if __name__ == '__main__':
main()
| mit | Python | |
61750d22d44e4d2caa99703c1fd8d61e1e829e07 | Create test.py | shivarajnesargi/BotOrNot | ProjectMidway/Data/test.py | ProjectMidway/Data/test.py | mit | Python | ||
0cde44fb3fade24bc0e1aed5ee4820aa2172806a | Add 2nd example. | cournape/talkbox,cournape/talkbox | doc/src/examples/periodogram_2.py | doc/src/examples/periodogram_2.py | import numpy as np
import matplotlib.pyplot as plt
from scikits.talkbox.spectral.basic import periodogram
from scipy.signal import hamming, hanning
fs = 1000
x = np.sin(2 * np.pi * 0.15 * fs * np.linspace(0., 0.3, 0.3 * fs))
x += 0.1 * np.random.randn(x.size)
px1, fx1 = periodogram(x, nfft=16384, fs=fs)
px2, fx2 = periodogram(x * hamming(x.size), nfft=16384, fs=fs)
plt.subplot(2, 1, 1)
plt.plot(fx1, 10 * np.log10(px1))
plt.subplot(2, 1, 2)
plt.plot(fx2, 10 * np.log10(px2))
plt.xlabel('Frequency (Hz)')
plt.ylabel('Amplitude (dB)')
plt.savefig('periodogram_2.png')
| mit | Python | |
08636c9740b3103fd05c81791f43faeb29920305 | Add tests for some util functions. | aleGpereira/libcloud,sahildua2305/libcloud,pquentin/libcloud,Verizon/libcloud,wuyuewen/libcloud,aleGpereira/libcloud,iPlantCollaborativeOpenSource/libcloud,Verizon/libcloud,Jc2k/libcloud,Cloud-Elasticity-Services/as-libcloud,aviweit/libcloud,vongazman/libcloud,techhat/libcloud,ZuluPro/libcloud,pquentin/libcloud,illfelder/libcloud,lochiiconnectivity/libcloud,thesquelched/libcloud,mathspace/libcloud,mgogoulos/libcloud,t-tran/libcloud,mistio/libcloud,niteoweb/libcloud,erjohnso/libcloud,schaubl/libcloud,thesquelched/libcloud,Kami/libcloud,wrigri/libcloud,pantheon-systems/libcloud,curoverse/libcloud,cryptickp/libcloud,cloudControl/libcloud,carletes/libcloud,Cloud-Elasticity-Services/as-libcloud,wuyuewen/libcloud,DimensionDataCBUSydney/libcloud,StackPointCloud/libcloud,munkiat/libcloud,SecurityCompass/libcloud,ZuluPro/libcloud,wido/libcloud,marcinzaremba/libcloud,sgammon/libcloud,munkiat/libcloud,mathspace/libcloud,mbrukman/libcloud,wrigri/libcloud,watermelo/libcloud,pantheon-systems/libcloud,wido/libcloud,kater169/libcloud,SecurityCompass/libcloud,apache/libcloud,jimbobhickville/libcloud,JamesGuthrie/libcloud,mistio/libcloud,curoverse/libcloud,samuelchong/libcloud,NexusIS/libcloud,MrBasset/libcloud,illfelder/libcloud,t-tran/libcloud,mgogoulos/libcloud,Keisuke69/libcloud,watermelo/libcloud,pantheon-systems/libcloud,Jc2k/libcloud,techhat/libcloud,aleGpereira/libcloud,schaubl/libcloud,jimbobhickville/libcloud,SecurityCompass/libcloud,kater169/libcloud,ZuluPro/libcloud,ninefold/libcloud,lochiiconnectivity/libcloud,mtekel/libcloud,vongazman/libcloud,MrBasset/libcloud,jerryblakley/libcloud,MrBasset/libcloud,ByteInternet/libcloud,ClusterHQ/libcloud,cloudControl/libcloud,Kami/libcloud,Cloud-Elasticity-Services/as-libcloud,samuelchong/libcloud,supertom/libcloud,watermelo/libcloud,Itxaka/libcloud,Kami/libcloud,marcinzaremba/libcloud,Itxaka/libcloud,NexusIS/libcloud,dcorbacho/libcloud,sahildua2305/libcloud,cryptickp/libcloud,atsaki/libcloud,briancurtin/libcloud,niteoweb/libcloud,supertom/libcloud,marcinzaremba/libcloud,mtekel/libcloud,andrewsomething/libcloud,sfriesel/libcloud,cloudControl/libcloud,jimbobhickville/libcloud,wrigri/libcloud,sfriesel/libcloud,ByteInternet/libcloud,apache/libcloud,supertom/libcloud,wido/libcloud,briancurtin/libcloud,dcorbacho/libcloud,aviweit/libcloud,sergiorua/libcloud,ByteInternet/libcloud,ninefold/libcloud,samuelchong/libcloud,Scalr/libcloud,apache/libcloud,JamesGuthrie/libcloud,Scalr/libcloud,iPlantCollaborativeOpenSource/libcloud,t-tran/libcloud,smaffulli/libcloud,techhat/libcloud,ClusterHQ/libcloud,schaubl/libcloud,andrewsomething/libcloud,mtekel/libcloud,pquentin/libcloud,Keisuke69/libcloud,DimensionDataCBUSydney/libcloud,niteoweb/libcloud,smaffulli/libcloud,cryptickp/libcloud,vongazman/libcloud,mbrukman/libcloud,NexusIS/libcloud,Verizon/libcloud,kater169/libcloud,aviweit/libcloud,carletes/libcloud,iPlantCollaborativeOpenSource/libcloud,jerryblakley/libcloud,erjohnso/libcloud,erjohnso/libcloud,jerryblakley/libcloud,mathspace/libcloud,dcorbacho/libcloud,briancurtin/libcloud,lochiiconnectivity/libcloud,sergiorua/libcloud,sfriesel/libcloud,StackPointCloud/libcloud,StackPointCloud/libcloud,DimensionDataCBUSydney/libcloud,curoverse/libcloud,thesquelched/libcloud,Itxaka/libcloud,mistio/libcloud,carletes/libcloud,sergiorua/libcloud,smaffulli/libcloud,wuyuewen/libcloud,illfelder/libcloud,JamesGuthrie/libcloud,sahildua2305/libcloud,sgammon/libcloud,mgogoulos/libcloud,atsaki/libcloud,mbrukman/libcloud,atsaki/libcloud,Scalr/libcloud,andrewsomething/libcloud,munkiat/libcloud | test/test_utils.py | test/test_utils.py | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more§
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
import warnings
import os.path
import libcloud.utils
WARNINGS_BUFFER = []
def show_warning(msg, cat, fname, lno):
WARNINGS_BUFFER.append((msg, cat, fname, lno))
original_func = warnings.showwarning
class TestUtils(unittest.TestCase):
def setUp(self):
global WARNINGS_BUFFER
WARNINGS_BUFFER = []
def tearDown(self):
global WARNINGS_BUFFER
WARNINGS_BUFFER = []
warnings.showwarning = original_func
def test_guess_file_mime_type(self):
file_path = os.path.abspath(__file__)
mimetype, encoding = libcloud.utils.guess_file_mime_type(file_path=file_path)
self.assertEqual(mimetype, 'text/x-python')
def test_deprecated_warning(self):
warnings.showwarning = show_warning
libcloud.utils.SHOW_DEPRECATION_WARNING = False
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.deprecated_warning('test_module')
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.SHOW_DEPRECATION_WARNING = True
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.deprecated_warning('test_module')
self.assertEqual(len(WARNINGS_BUFFER), 1)
def test_in_development_warning(self):
warnings.showwarning = show_warning
libcloud.utils.SHOW_IN_DEVELOPMENT_WARNING = False
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.in_development_warning('test_module')
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.SHOW_IN_DEVELOPMENT_WARNING = True
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.in_development_warning('test_module')
self.assertEqual(len(WARNINGS_BUFFER), 1)
if __name__ == '__main__':
sys.exit(unittest.main())
| apache-2.0 | Python | |
0574900ba42deda7ee61a809ffc64abc643b58b1 | add hibernation test | OpenAcademy-OpenStack/vm-hibernation | testHibernation.py | testHibernation.py | from novaclient.v1_1 import Client as NovaClient
import unittest
def hibernate(username, password, tenant_name, auth_url, serverID):
nova = NovaClient(username = username,
api_key = password,
project_id = tenant_name,
auth_url = auth_url)
server = nova.servers.get(serverID)
server.shelve()
class VMHibernationTest(unittest.TestCase):
# to run the test, a VM must be created on openstack first
# after shelve() is invoked on the server, its state changes from
# "ACTIVE" to "SHELVED" and then to "SHELVED_OFFLOADED"
def test(self):
nova = NovaClient(username = "admin",
api_key = "password",
project_id = "demo",
auth_url = "http://192.168.50.4:5000/v2.0")
server = nova.servers.list()[0]
self.failUnless(server.status == "ACTIVE")
hibernate("admin",
"password",
"demo",
"http://192.168.50.4:5000/v2.0",
server.id)
while server.status == "ACTIVE" or server.status == "SHELVED":
server = nova.servers.list()[0]
self.failUnless(server.status == "SHELVED_OFFLOADED")
def main():
unittest.main()
if __name__ == "__main__":
main()
| apache-2.0 | Python | |
7771baf7b4806c15cb85df6c34e344345538df4a | Add background thread example | voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts | background-thread.py | background-thread.py | import time
import requests
from tomorrow import threads
@threads(5)
def download(url):
return requests.get(url)
if __name__ == "__main__":
start = time.time()
responses = [download(url) for url in urls]
html = [response.text for response in responses]
end = time.time()
print "Time: %f seconds" % (end - start)
| mit | Python | |
3bb75969a9fc068a05bf81f096d0e58e3440a09f | Create nltk10.py | PythonProgramming/Natural-Language-Processing-NLTK-Python-2.7 | nltk10.py | nltk10.py | import time
import urllib2
from urllib2 import urlopen
import re
import cookielib
from cookielib import CookieJar
import datetime
import sqlite3
import nltk
cj = CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
conn = sqlite3.connect('knowledgeBase.db')
c = conn.cursor()
visitedLinks = []
def processor(data):
namedEntArray = []
try:
tokenized = nltk.word_tokenize(data)
tagged = nltk.pos_tag(tokenized)
namedEnt = nltk.ne_chunk(tagged, binary=True)
entities = re.findall(r'NE\s(.*?)/',str(namedEnt))
descriptives = re.findall(r'\(\'(\w*)\',\s\'JJ\w?\'', str(tagged))
if len(entities) > 1:
pass
elif len(entities) == 0:
pass
else:
print '_________________________'
print 'Named:',entities[0]
print 'Descriptions:'
for eachDesc in descriptives:
print eachDesc
currentTime = time.time()
dateStamp = datetime.datetime.fromtimestamp(currentTime).strftime('%Y-%m-%d %H:%M:%S')
namedEntity = entities[0]
relatedWord = eachDesc
c.execute("INSERT INTO knowledgeBase (unix, datestamp, namedEntity, relatedWord) VALUES (?,?,?,?)",
(currentTime, dateStamp, namedEntity, relatedWord))
conn.commit()
except Exception, e:
print 'failed in the main try of processor'
print str(e)
time.sleep(55)
def huffingtonRSSvisit():
try:
page = 'http://feeds.huffingtonpost.com/huffingtonpost/raw_feed'
sourceCode = opener.open(page).read()
try:
links = re.findall(r'<link.*href=\"(.*?)\"', sourceCode)
for link in links:
if '.rdf' in link:
pass
elif link in visitedLinks:
print ' link already visited, moving on.'
else:
visitedLinks.append(link)
print 'visiting the link'
print '###################'
linkSource = opener.open(link).read()
linesOfInterest = re.findall(r'<p>(.*?)</p>', str(linkSource))
for eachLine in linesOfInterest:
if '<img width' in eachLine:
pass
elif '<a href=' in eachLine:
pass
else:
processor(eachLine)
time.sleep(5)
except Exception, e:
print 'failed 2nd loop of huffingtonRSS'
print str(e)
except Exception, e:
print 'failed main loop of huffingtonRSS'
print str(e)
while True:
currentTime = time.time()
dateStamp = datetime.datetime.fromtimestamp(currentTime).strftime('%Y-%m-%d %H:%M:%S')
huffingtonRSSvisit()
time.sleep(1800)
print 'sleeping'
print dateStamp
| mit | Python | |
ea4acac26fbedef3a9a5395860334c9bb95bcacb | add a new class GenericKeyComposerAddressAccess | alphatwirl/alphatwirl,alphatwirl/alphatwirl,alphatwirl/alphatwirl,TaiSakuma/AlphaTwirl,alphatwirl/alphatwirl,TaiSakuma/AlphaTwirl | AlphaTwirl/Counter/GenericKeyComposerAddressAccess.py | AlphaTwirl/Counter/GenericKeyComposerAddressAccess.py | # Tai Sakuma <tai.sakuma@cern.ch>
##____________________________________________________________________________||
class GenericKeyComposerAddressAccess(object):
def __init__(self, varNames, binnings, indices = None):
self._varNames = varNames
self._binnings = binnings
self._indices = indices if indices is not None else [None]*len(self._varNames)
self._first = True
def __call__(self, event):
if self._first:
self._arrays = self._findArrays(event)
self._first = False
ret = [ ]
for array, binning, index in zip(self._arrays, self._binnings, self._indices):
if index is not None:
if array['countarray'][0] <= index: return None
var = array['array'][index]
else:
var = array['array'][0]
var_bin = binning(var)
if var_bin is None: return None
ret.append(var_bin)
return tuple(ret)
def next(self, key):
ret = [ ]
for i in range(len(self._binnings)):
keyc = list(key)
keyc[i] = self._binnings[i].next(keyc[i])
ret.append(tuple(keyc))
return tuple(ret)
def binnings(self):
return self._binnings
def _findArrays(self, event):
return [event.arrays[n] for n in self._varNames]
##____________________________________________________________________________||
class GenericKeyComposerAddressAccessBuilder(object):
def __init__(self, varNames, binnings, indices = None):
self.varNames = varNames
self.binnings = binnings
self.indices = indices
def __call__(self):
return GenericKeyComposerAddressAccess(varNames = self.varNames, binnings = self.binnings, indices = self.indices)
##____________________________________________________________________________||
| bsd-3-clause | Python | |
57d731d6fb958c165cdeb5a9194669b07f8d54c1 | Create central_tendency.py | jamaps/open_geo_scripts,jamaps/fun_with_gdal,jamaps/fun_with_gdal,jamaps/gdal_and_ogr_scripts,jamaps/open_geo_scripts,jamaps/shell_scripts,jamaps/gdal_and_ogr_scripts,jamaps/shell_scripts,jamaps/open_geo_scripts | central_tendency.py | central_tendency.py |
# computes measures of geo central tendancy from a csv table
# table must be set up as 3 columns: x, y, weight
import csv
csv_path = 'x_y_w.csv'
# calculating the mean centre
with open(csv_path, 'rb') as f:
reader = csv.reader(f)
x_sum = 0
y_sum = 0
n = 0
for row in reader:
n = n + 1
x_sum = x_sum + float(row[0])
y_sum = y_sum + float(row[1])
mean_x = x_sum / n
mean_y = y_sum / n
# calculating the standard distance
with open(csv_path, 'rb') as f:
reader = csv.reader(f)
x_sq_dist = 0
y_sq_dist = 0
for row in reader:
x_sq_dist = (float(row[0]) - mean_x)**2 + float(x_sq_dist)
y_sq_dist = (float(row[1]) - mean_y)**2 + float(y_sq_dist)
SD = float(((x_sq_dist + y_sq_dist) / n)**0.5)
# caclulating the weighted mean centre
with open(csv_path, 'rb') as f:
reader = csv.reader(f)
x_w_sum = 0
y_w_sum = 0
w_sum = 0
for row in reader:
w_sum = float(row[2]) + float(w_sum)
x_w_sum = (float(row[0]) * float(row[2])) + float(x_w_sum)
y_w_sum = (float(row[1]) * float(row[2])) + float(y_w_sum)
w_mean_x = x_w_sum / w_sum
w_mean_y = y_w_sum / w_sum
# caclulating the weighted standard distance
with open(csv_path, 'rb') as f:
reader = csv.reader(f)
x_sq_dist = 0
y_sq_dist = 0
w_x_sq_dist = 0
w_y_sq_dist = 0
for row in reader:
w_x_sq_dist = float((float(row[0]) - float(w_mean_x))**2)*float(row[2]) + float(w_x_sq_dist)
w_y_sq_dist = float((float(row[1]) - float(w_mean_y))**2)*float(row[2]) + float(w_y_sq_dist)
WSD = ( ((w_x_sq_dist) + (w_y_sq_dist)) / w_sum ) ** 0.5
# print results
print "Count = %i" % (n)
print "Mean Centre = (%f, %f)" % (mean_x, mean_y)
print "Standard Distance = %f" % (SD)
print "Weighted Mean Centre = (%f, %f)" % (w_mean_x, w_mean_y)
print "Weighted Standard Distance = %f" % (WSD)
| mit | Python | |
624eff1e010c7b36f43e2580346b5ae74cc20128 | Create does_my_number_look_big_in_this.py | Kunalpod/codewars,Kunalpod/codewars | does_my_number_look_big_in_this.py | does_my_number_look_big_in_this.py | #Kunal Gautam
#Codewars : @Kunalpod
#Problem name: Does my number look big in this?
#Problem level: 6 kyu
def narcissistic( value ):
return value==int(sum([int(x)**len(str(value)) for x in str(value)]))
| mit | Python | |
ebde18f5958463d622805b1a09244d07c81ec8ae | Bump development version | benzkji/django-cms,divio/django-cms,datakortet/django-cms,netzkolchose/django-cms,keimlink/django-cms,czpython/django-cms,netzkolchose/django-cms,czpython/django-cms,yakky/django-cms,FinalAngel/django-cms,keimlink/django-cms,jsma/django-cms,benzkji/django-cms,jproffitt/django-cms,mkoistinen/django-cms,mkoistinen/django-cms,divio/django-cms,evildmp/django-cms,yakky/django-cms,divio/django-cms,timgraham/django-cms,nimbis/django-cms,jsma/django-cms,vxsx/django-cms,bittner/django-cms,timgraham/django-cms,jproffitt/django-cms,czpython/django-cms,datakortet/django-cms,benzkji/django-cms,rsalmaso/django-cms,benzkji/django-cms,FinalAngel/django-cms,yakky/django-cms,nimbis/django-cms,timgraham/django-cms,evildmp/django-cms,netzkolchose/django-cms,evildmp/django-cms,vxsx/django-cms,jproffitt/django-cms,jproffitt/django-cms,yakky/django-cms,rsalmaso/django-cms,divio/django-cms,nimbis/django-cms,mkoistinen/django-cms,czpython/django-cms,keimlink/django-cms,evildmp/django-cms,bittner/django-cms,bittner/django-cms,jsma/django-cms,netzkolchose/django-cms,bittner/django-cms,rsalmaso/django-cms,rsalmaso/django-cms,mkoistinen/django-cms,jsma/django-cms,datakortet/django-cms,vxsx/django-cms,FinalAngel/django-cms,vxsx/django-cms,FinalAngel/django-cms,datakortet/django-cms,nimbis/django-cms | cms/__init__.py | cms/__init__.py | # -*- coding: utf-8 -*-
__version__ = '3.0.17.dev1'
default_app_config = 'cms.apps.CMSConfig'
| # -*- coding: utf-8 -*-
__version__ = '3.0.16'
default_app_config = 'cms.apps.CMSConfig'
| bsd-3-clause | Python |
9831d82463b556c70d64dc17ceeadb2d54c6141d | add unittest for Kutil. | PeerAssets/pypeerassets,backpacker69/pypeerassets | tests/kutiltest.py | tests/kutiltest.py | import unittest
from pypeerassets.kutil import Kutil
class KutilTestCase(unittest.TestCase):
def test_network_parameter_load(self):
'''tests if loading of network parameteres is accurate'''
mykey = Kutil(network="ppc")
self.assertEqual(mykey.denomination, 1000000)
self.assertEqual(mykey.wif_prefix, b'b7')
self.assertEqual(mykey.pubkeyhash, b'37')
def test_key_generation(self):
'''test privkey/pubkey generation'''
mykey = Kutil(network="ppc")
# check if keys are in proper format
self.assertTrue(isinstance(mykey.keypair.private_key, bytes))
self.assertTrue(isinstance(mykey.keypair.pubkey.serialize(), bytes))
# check if key generation is what is expected from seed
'''
self.assertEqual(mykey.privkey, '416b2b925a4b004a3ccb92295e5a835cfd854ef7c4afde0b0eabd5d2492594e2')
self.assertEqual(mykey.pubkey, '03d612848fca55fd57760ff204434d41091927eeda4dfec39e78956b2cc6dbd62b')
'''
def test_address_generation(self):
'''test if addresses are properly made'''
mykey = Kutil(network="ppc")
self.assertTrue(mykey.address.startswith("P"))
self.assertTrue(isinstance(mykey.address, str))
self.assertTrue(len(mykey.address), 34)
def test_wif_import(self):
'''test improting WIF privkey'''
mykey = Kutil(wif="7A6cFXZSZnNUzutCMcuE1hyqDPtysH2LrSA9i5sqP2BPCLrAvZM")
self.assertEqual(mykey.address, 'PJxwxuBqjpHhhdpV6KY1pXxUSUNb6omyNW')
self.assertEqual(mykey.pubkey, '02a119079ef5be1032bed61cc295cdccde58bf70e0dd982399c024d1263740f398')
self.assertEqual(mykey.privkey, 'b43d38cdfa04ecea88f7d9d7e95b15b476e4a6c3f551ae7b45344831c3098da2')
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | Python | |
6958b09e08662e2a13b1f161cdd22f4f929d56c6 | add sample program | takahasi/utility,takahasi/utility,takahasi/utility,takahasi/utility | codeiq/yatta.py | codeiq/yatta.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" script for generate process template directories
This is xxx
"""
from __future__ import print_function
def yatta(n):
if n % 2 == 0:
print("invalid")
return
m = int(n / 2) + 1
for i in reversed(range(n)):
if i < m:
for j in range(n):
if j == m - 1:
print("y", end="")
else:
print(".", end="")
print("")
else:
for j in range(n):
p = n - i -1
if (j == p) | (j == (n - p - 1)):
print("y", end="")
else:
print(".", end="")
print("")
return
if __name__ == '__main__':
yatta(int(raw_input()))
| mit | Python | |
04740a33ab8b4d43cda71668ff7027ac7e5982d5 | Add test. This continues to need pytz and tzlocal. | python-caldav/caldav | tests/test_cdav.py | tests/test_cdav.py | import datetime
import pytz
import tzlocal
from caldav.elements.cdav import _to_utc_date_string
SOMEWHERE_REMOTE = pytz.timezone('Brazil/DeNoronha') # UTC-2 and no DST
def test_to_utc_date_string_date():
input = datetime.date(2019, 5, 14)
res = _to_utc_date_string(input)
assert res == '20190514T000000Z'
def test_to_utc_date_string_utc():
input = datetime.datetime(2019, 5, 14, 21, 10, 23, 23, tzinfo=datetime.timezone.utc)
res = _to_utc_date_string(input.astimezone())
assert res == '20190514T211023Z'
def test_to_utc_date_string_dt_with_pytz_tzinfo():
input = datetime.datetime(2019, 5, 14, 21, 10, 23, 23)
res = _to_utc_date_string(SOMEWHERE_REMOTE.localize(input))
assert res == '20190514T231023Z'
def test_to_utc_date_string_dt_with_local_tz():
input = datetime.datetime(2019, 5, 14, 21, 10, 23, 23)
res = _to_utc_date_string(input.astimezone())
exp_dt = tzlocal.get_localzone().localize(input).astimezone(datetime.timezone.utc)
exp = exp_dt.strftime("%Y%m%dT%H%M%SZ")
assert res == exp
def test_to_utc_date_string_naive_dt():
input = datetime.datetime(2019, 5, 14, 21, 10, 23, 23)
res = _to_utc_date_string(input)
exp_dt = tzlocal.get_localzone().localize(input).astimezone(datetime.timezone.utc)
exp = exp_dt.strftime("%Y%m%dT%H%M%SZ")
assert res == exp
| apache-2.0 | Python | |
7a70d230d3ceb3c37d718f138e80b132b9a05fae | Add migration for multiple repos per team. | mythmon/edwin,mythmon/edwin,mythmon/edwin,mythmon/edwin | edwin/teams/migrations/0005_auto_20150811_2236.py | edwin/teams/migrations/0005_auto_20150811_2236.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('teams', '0004_auto_20150516_0009'),
]
operations = [
migrations.AlterField(
model_name='team',
name='github_repo',
field=models.CharField(blank=True, help_text='Comma-separated list of repos, like "mozilla/edwin,mozilla/edwin2"', max_length=1024),
),
]
| mpl-2.0 | Python | |
432ba7d3f923f6a22aef7bb45c7f658422968aa0 | add python script to plot p/r curve | patverga/torch-relation-extraction,patverga/torch-relation-extraction,patverga/torch-relation-extraction | bin/plot-pr-curve.py | bin/plot-pr-curve.py | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import matplotlib.colors
import sys
# change font family to match math
#plt.rc('font', family='serif')
fontsize = 18
font = {'family' : 'sans-serif',
# 'serif' : 'Times Regular',
'size' : fontsize}
matplotlib.rc('font', **font)
output_dir = "plots"
# load in data
data_fname = sys.argv[1]
labels = np.unique(np.loadtxt(data_fname, usecols=[2], dtype='str'))
print labels
data = np.loadtxt(data_fname, converters = {2: lambda y: np.where(labels==y)[0]})
print data
recall_idx = 0
precision_idx = 1
model_idx = 2
# initialize figures
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
ax1.set_title("Recall vs. Precision", fontsize=fontsize)
ax1.set_xlabel("Recall")
ax1.set_ylabel("Precision")
for i in range(len(labels)):
indices = np.where(data[:,model_idx] == i)
ax1.plot(data[indices,recall_idx][0], data[indices,precision_idx][0], label=labels[i])
ax1.yaxis.set_major_formatter(ticker.FuncFormatter(lambda y, pos: ('%.2f')%(y)))
# add legend
ax1.legend()
#fig1.savefig("%s/compare-accuracy-speed-margin.pdf" % (output_dir), bbox_inches='tight')
plt.show()
| mit | Python | |
2d7782508e6bfe1be88da3409ae67371dab7379d | Add files via upload | gem/oq-engine,gem/oq-engine,gem/oq-engine,gem/oq-engine,gem/oq-engine | openquake/hazardlib/tests/gsim/skarlatoudis_2013_test.py | openquake/hazardlib/tests/gsim/skarlatoudis_2013_test.py | # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2015-2019 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
from openquake.hazardlib.gsim.skarlatoudis_2013 import (
SkarlatoudisetalSlab2013)
from openquake.hazardlib.tests.gsim.utils import BaseGSIMTestCase
class SkarlatoudisetalSlab2013TestCase(BaseGSIMTestCase):
"""
Tests the Skarlatoudis et al. (2013) model for subduction
intraslab earthquakes
"""
GSIM_CLASS = SkarlatoudisetalSlab2013
MEAN_FILE = "SKARL13/SKARL13_SSLAB_CENTRAL_MEAN_1.csv"
TOTAL_FILE = "SKARL13/SKARL13_SSLAB_CENTRAL_STDDEV_TOTAL.csv"
INTER_FILE = "SKARL13/SKARL13_SSLAB_CENTRAL_STDDEV_INTER.csv"
INTRA_FILE = "SKARL13/SKARL13_SSLAB_CENTRAL_STDDEV_INTRA.csv"
def test_mean(self):
self.check(self.MEAN_FILE,
max_discrep_percentage=0.1)
def test_std_total(self):
self.check(self.TOTAL_FILE,
max_discrep_percentage=0.1)
def test_std_inter(self):
self.check(self.INTER_FILE,
max_discrep_percentage=0.1)
def test_std_intra(self):
self.check(self.INTRA_FILE,
max_discrep_percentage=0.1)
| agpl-3.0 | Python | |
f5237b61a50b66d6ac3123318af94db8ec95173b | Test tasks.get_email_addresses handles subentites. | wesleykendall/django-entity-emailer,wesleykendall/django-entity-emailer,ambitioninc/django-entity-emailer,ambitioninc/django-entity-emailer | entity_emailer/tests/test_tasks.py | entity_emailer/tests/test_tasks.py | from entity.models import Entity, EntityRelationship
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from django_dynamic_fixture import G, N
from entity_emailer import tasks
from entity_emailer.models import Email
class Test_get_email_addresses(TestCase):
def setUp(self):
self.ct = ContentType.objects.get_for_model(Email)
self.super_entity = G(
Entity,
entity_meta={'email': 'test_super@example.com'},
entity_type=self.ct
)
self.sub_entity_1 = G(
Entity,
entity_meta={'email': 'test_sub1@example.com'},
entity_type=self.ct
)
self.sub_entity_2 = G(
Entity,
entity_meta={'email': 'test_sub2@example.com'},
entity_type=self.ct
)
G(EntityRelationship, sub_entity=self.sub_entity_1, super_entity=self.super_entity)
G(EntityRelationship, sub_entity=self.sub_entity_2, super_entity=self.super_entity)
def test_returns_sub_entities_emails(self):
email = N(Email, send_to=self.super_entity, subentity_type=self.ct, context={})
addresses = tasks.get_email_addresses(email)
expected_addresses = {u'test_sub1@example.com', u'test_sub2@example.com'}
self.assertEqual(set(addresses), expected_addresses)
def test_returns_own_email(self):
pass
def test_unsubscription_works(self):
pass
| mit | Python | |
4ec9406a19b5f42b0b05f37c12b99dd91853514f | Add all the data necessary for generations | samedhi/gaend,talkiq/gaend,talkiq/gaend,samedhi/gaend | tests/generator_test.py | tests/generator_test.py | from datetime import date, time, datetime
from google.appengine.ext import testbed, ndb
from gaend.main import app
import unittest
import webtest
# References
# cloud.google.com/appengine/docs/python/ndb/db_to_ndb
# cloud.google.com/appengine/docs/python/ndb/entity-property-reference
# cloud.google.com/appengine/docs/python/ndb/creating-entity-models#expando
SERIALIZE = [{'key1': True, 'key2': []}, [1, 2.0, {}, 'json']]
COMPUTED = lambda x: "COMPUTED_PROPERTY"
PROPERTIES = {
ndb.IntegerProperty: [int],
ndb.FloatProperty: [float],
ndb.BooleanProperty: [bool],
ndb.StringProperty: [(basestring, lambda x: len(s) < 1500)],
ndb.TextProperty: [basestring],
ndb.BlobProperty: [basestring],
ndb.DateProperty: [date],
ndb.TimeProperty: [time],
ndb.DateTimeProperty: [datetime],
ndb.GeoPtProperty: [ndb.GeoPt],
ndb.KeyProperty: [ndb.Model],
ndb.StructuredProperty: [ndb.Model],
ndb.LocalStructuredProperty: [ndb.Model],
ndb.JsonProperty: SERIALIZE,
ndb.PickleProperty: SERIALIZE,
ndb.ComputedProperty: COMPUTED,
}
# Untested Property Types:
# ndb.BlobKeyProperty - Holdover from `db` days?
# ndb.UserProperty - Google recomends not using this
# ndb.GenericProperty - Why not just use ndb.Expando class?
class DefaultModel(ndb.Model):
pass
DEFAULTS = {
bool: False,
int: 0,
float: 0.0,
basestring: "",
ndb.GeoPt: ndb.GeoPt(0,0),
ndb.Model: DefaultModel,
}
CHOICES = {
bool: [True, False],
int: [-1, 0, 1],
float: [-1.0, 0.0, 1.0],
basestring: "",
ndb.GeoPt: ndb.GeoPt(0,0),
ndb.Model: DefaultModel,
}
PROPERTY_OPTIONS = {
'indexed': bool,
'repeated': bool,
'required': bool,
'default': DEFAULTS,
'choices': CHOICES,
}
class GeneratorTest(unittest.TestCase):
def setUp(self):
self.testapp = webtest.TestApp(app)
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
def tearDown(self):
self.testbed.deactivate()
def testTruthItself(self):
assert True
| from datetime import date, time, datetime
from google.appengine.ext import testbed, ndb
from gaend.main import app
import unittest
import webtest
# References
# cloud.google.com/appengine/docs/python/ndb/db_to_ndb
# cloud.google.com/appengine/docs/python/ndb/entity-property-reference
# cloud.google.com/appengine/docs/python/ndb/creating-entity-models#expando
PROPERTIES = [
ndb.IntegerProperty, # [int],
ndb.FloatProperty, # [float],
ndb.BooleanProperty, # [bool],
ndb.StringProperty, # [(basestring, lambda x, # len(s) < 1500)],
ndb.TextProperty, # [basetring]
ndb.BlobProperty, # [basestring],
ndb.DateProperty, # [date],
ndb.TimeProperty, # [time],
ndb.DateTimeProperty, # [datetime],
ndb.GeoPtProperty, # [ndb.GeoPt],
ndb.KeyProperty, # [ndb.Model],
ndb.StructuredProperty, # [ndb.Model]
ndb.LocalStructuredProperty, # [ndb.Model],
ndb.JsonProperty, # python list or dict
ndb.PickleProperty # python list or dict
]
# Untested Property Types
# 1. ndb.BlobKeyProperty - Holdover from `db` days?
# 2. ndb.UserProperty - Google recomends not using this
# 3. ndb.ExpandoProperty - Why not just use ndb.Expando class?
# 4. ndb.GenericProperty - Why not just use ndb.Expando class?
class GeneratorTest(unittest.TestCase):
def setUp(self):
self.testapp = webtest.TestApp(app)
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
def tearDown(self):
self.testbed.deactivate()
def testTruthItself(self):
assert True
| mit | Python |
4c4891f24c0e5b093d3a9fcb0de86609b01a69c3 | Add migration for replace location with country and city | softwaresaved/fat,softwaresaved/fat,softwaresaved/fat,softwaresaved/fat | fellowms/migrations/0053_auto_20160804_1447.py | fellowms/migrations/0053_auto_20160804_1447.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-08-04 14:47
from __future__ import unicode_literals
from django.db import migrations
import django_countries.fields
class Migration(migrations.Migration):
dependencies = [
('fellowms', '0052_merge'),
]
operations = [
migrations.RenameField(
model_name='event',
old_name='location',
new_name='city',
),
migrations.RenameField(
model_name='fellow',
old_name='home_location',
new_name='home_city',
),
migrations.AddField(
model_name='event',
name='country',
field=django_countries.fields.CountryField(default='UK', max_length=2),
),
migrations.AddField(
model_name='fellow',
name='home_country',
field=django_countries.fields.CountryField(default='UK', max_length=2),
),
]
| bsd-3-clause | Python | |
d6c5e3c40e2106b7ef4ddc800a1c00493d4f469f | Add customized Pool class. | airekans/Monsit,airekans/Monsit | tests/test_pool.py | tests/test_pool.py | import gevent.pool
import gevent.queue
import gevent.event
import time
import traceback
def timeit(func):
def wrap(*args, **kwargs):
begin_time = time.time()
try:
return func(*args, **kwargs)
except:
traceback.print_exc()
finally:
end_time = time.time()
print 'function %s:' % func.__name__, end_time - begin_time
return wrap
global_counter = 0
def test_func():
global global_counter
global_counter += 1
return global_counter
class Pool(object):
def __init__(self, pool_size=None):
self._task_queue = gevent.queue.JoinableQueue()
self._pool = gevent.pool.Pool(pool_size)
if pool_size is None:
pool_size = 100
for _ in xrange(pool_size):
self._pool.spawn(self.worker_func)
def worker_func(self):
while True:
task = self._task_queue.get()
if task is None:
self._task_queue.task_done()
break
task()
self._task_queue.task_done()
def spawn(self, func, *args, **kwargs):
task = lambda: func(*args, **kwargs)
self._task_queue.put_nowait(task)
def join(self):
for _ in xrange(len(self._pool)):
self._task_queue.put_nowait(None)
self._task_queue.join()
self._pool.join()
def kill(self):
self._pool.kill()
@timeit
def test_my_pool():
pool = Pool(1000)
for _ in xrange(100000):
pool.spawn(test_func)
pool.join()
@timeit
def test_gevent_pool():
pool = gevent.pool.Pool(1000)
for _ in xrange(100000):
pool.spawn(test_func)
pool.join()
if __name__ == '__main__':
global_counter = 0
test_gevent_pool()
print 'global_counter', global_counter
global_counter = 0
test_my_pool()
print 'global_counter', global_counter | mit | Python | |
077e581326e0791e1bf5816baba6c6a6cba17d9d | Test for setting CHOICES_SEPARATOR | Pierre-Sassoulas/django-survey,Pierre-Sassoulas/django-survey,Pierre-Sassoulas/django-survey | survey/tests/test_default_settings.py | survey/tests/test_default_settings.py | from survey.tests import BaseTest
from django.test import override_settings
from django.conf import settings
from django.test import tag
from survey import set_default_settings
@tag("set")
@override_settings()
class TestDefaultSettings(BaseTest):
def test_set_choices_separator(self):
url = "/admin/survey/survey/1/change/"
del settings.CHOICES_SEPARATOR
self.login()
with self.assertRaises(AttributeError):
self.client.get(url)
set_default_settings()
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
| agpl-3.0 | Python | |
813f42aee6b38031c4993d322e12053c91d10c8a | add failing test to recreate logrotate bug [#461] | Shopify/dd-agent,jshum/dd-agent,tebriel/dd-agent,remh/dd-agent,jvassev/dd-agent,jraede/dd-agent,yuecong/dd-agent,PagerDuty/dd-agent,pfmooney/dd-agent,PagerDuty/dd-agent,pfmooney/dd-agent,AntoCard/powerdns-recursor_check,gphat/dd-agent,ess/dd-agent,zendesk/dd-agent,guruxu/dd-agent,gphat/dd-agent,c960657/dd-agent,manolama/dd-agent,lookout/dd-agent,manolama/dd-agent,polynomial/dd-agent,mderomph-coolblue/dd-agent,jraede/dd-agent,packetloop/dd-agent,pmav99/praktoras,jshum/dd-agent,jvassev/dd-agent,AniruddhaSAtre/dd-agent,jyogi/purvar-agent,PagerDuty/dd-agent,jyogi/purvar-agent,a20012251/dd-agent,jshum/dd-agent,huhongbo/dd-agent,joelvanvelden/dd-agent,jraede/dd-agent,eeroniemi/dd-agent,Wattpad/dd-agent,remh/dd-agent,Mashape/dd-agent,darron/dd-agent,takus/dd-agent,brettlangdon/dd-agent,Mashape/dd-agent,AniruddhaSAtre/dd-agent,indeedops/dd-agent,tebriel/dd-agent,JohnLZeller/dd-agent,brettlangdon/dd-agent,relateiq/dd-agent,Mashape/dd-agent,pmav99/praktoras,indeedops/dd-agent,JohnLZeller/dd-agent,Wattpad/dd-agent,GabrielNicolasAvellaneda/dd-agent,pmav99/praktoras,takus/dd-agent,AntoCard/powerdns-recursor_check,AntoCard/powerdns-recursor_check,c960657/dd-agent,relateiq/dd-agent,jraede/dd-agent,citrusleaf/dd-agent,mderomph-coolblue/dd-agent,jamesandariese/dd-agent,tebriel/dd-agent,joelvanvelden/dd-agent,guruxu/dd-agent,jamesandariese/dd-agent,relateiq/dd-agent,AntoCard/powerdns-recursor_check,huhongbo/dd-agent,zendesk/dd-agent,darron/dd-agent,jyogi/purvar-agent,jraede/dd-agent,jvassev/dd-agent,oneandoneis2/dd-agent,huhongbo/dd-agent,eeroniemi/dd-agent,pmav99/praktoras,oneandoneis2/dd-agent,benmccann/dd-agent,jamesandariese/dd-agent,ess/dd-agent,lookout/dd-agent,Shopify/dd-agent,zendesk/dd-agent,mderomph-coolblue/dd-agent,guruxu/dd-agent,yuecong/dd-agent,pfmooney/dd-agent,c960657/dd-agent,indeedops/dd-agent,Mashape/dd-agent,ess/dd-agent,Shopify/dd-agent,darron/dd-agent,urosgruber/dd-agent,mderomph-coolblue/dd-agent,relateiq/dd-agent,c960657/dd-agent,ess/dd-agent,benmccann/dd-agent,packetloop/dd-agent,indeedops/dd-agent,a20012251/dd-agent,jyogi/purvar-agent,GabrielNicolasAvellaneda/dd-agent,citrusleaf/dd-agent,amalakar/dd-agent,jamesandariese/dd-agent,a20012251/dd-agent,citrusleaf/dd-agent,remh/dd-agent,GabrielNicolasAvellaneda/dd-agent,Wattpad/dd-agent,yuecong/dd-agent,Shopify/dd-agent,ess/dd-agent,cberry777/dd-agent,a20012251/dd-agent,GabrielNicolasAvellaneda/dd-agent,truthbk/dd-agent,darron/dd-agent,a20012251/dd-agent,pfmooney/dd-agent,tebriel/dd-agent,truthbk/dd-agent,packetloop/dd-agent,packetloop/dd-agent,AniruddhaSAtre/dd-agent,citrusleaf/dd-agent,remh/dd-agent,amalakar/dd-agent,urosgruber/dd-agent,PagerDuty/dd-agent,lookout/dd-agent,huhongbo/dd-agent,manolama/dd-agent,jamesandariese/dd-agent,takus/dd-agent,eeroniemi/dd-agent,polynomial/dd-agent,urosgruber/dd-agent,lookout/dd-agent,Shopify/dd-agent,manolama/dd-agent,amalakar/dd-agent,cberry777/dd-agent,c960657/dd-agent,amalakar/dd-agent,brettlangdon/dd-agent,AniruddhaSAtre/dd-agent,zendesk/dd-agent,Wattpad/dd-agent,pmav99/praktoras,packetloop/dd-agent,yuecong/dd-agent,lookout/dd-agent,JohnLZeller/dd-agent,guruxu/dd-agent,huhongbo/dd-agent,gphat/dd-agent,zendesk/dd-agent,AniruddhaSAtre/dd-agent,JohnLZeller/dd-agent,takus/dd-agent,AntoCard/powerdns-recursor_check,urosgruber/dd-agent,benmccann/dd-agent,darron/dd-agent,eeroniemi/dd-agent,Mashape/dd-agent,eeroniemi/dd-agent,oneandoneis2/dd-agent,relateiq/dd-agent,takus/dd-agent,tebriel/dd-agent,jvassev/dd-agent,remh/dd-agent,Wattpad/dd-agent,joelvanvelden/dd-agent,brettlangdon/dd-agent,urosgruber/dd-agent,jshum/dd-agent,jyogi/purvar-agent,brettlangdon/dd-agent,benmccann/dd-agent,oneandoneis2/dd-agent,manolama/dd-agent,polynomial/dd-agent,JohnLZeller/dd-agent,yuecong/dd-agent,truthbk/dd-agent,mderomph-coolblue/dd-agent,citrusleaf/dd-agent,oneandoneis2/dd-agent,guruxu/dd-agent,GabrielNicolasAvellaneda/dd-agent,PagerDuty/dd-agent,jvassev/dd-agent,gphat/dd-agent,gphat/dd-agent,pfmooney/dd-agent,amalakar/dd-agent,benmccann/dd-agent,cberry777/dd-agent,cberry777/dd-agent,cberry777/dd-agent,polynomial/dd-agent,indeedops/dd-agent,joelvanvelden/dd-agent,joelvanvelden/dd-agent,jshum/dd-agent,truthbk/dd-agent,polynomial/dd-agent,truthbk/dd-agent | tests/test_tail.py | tests/test_tail.py | import logging
import subprocess
import tempfile
import unittest
class TestTail(unittest.TestCase):
def setUp(self):
self.log_file = tempfile.NamedTemporaryFile()
self.logrotate_config = tempfile.NamedTemporaryFile()
self.logrotate_config.write("""%s {
copytruncate
notifempty
missingok
rotate 1
weekly
}""" % self.log_file.name)
self.logrotate_config.flush()
self.logrotate_state_file = tempfile.NamedTemporaryFile()
def _trigger_logrotate(self):
subprocess.check_call([
'logrotate',
'-v', # Verbose logging
'-f', # Force the rotation even though the file isn't old
# Create a state file that you have file permissions for
'-s', self.logrotate_state_file.name,
self.logrotate_config.name
])
def test_logrotate_copytruncate(self):
from checks.utils import TailFile
line_parser = lambda line: line
tail = TailFile(logging.getLogger(), self.log_file.name, line_parser)
self.assertEquals(tail._size, 0)
# Write some data to the log file
init_string = "hey there, I am a log\n"
self.log_file.write(init_string)
self.log_file.flush()
# Consume from the tail
gen = tail.tail(line_by_line=False, move_end=True)
gen.next()
# Verify that the tail consumed the data I wrote
self.assertEquals(tail._size, len(init_string))
# Trigger a copytruncate logrotation on the log file
self._trigger_logrotate()
# Write a new line to the log file
new_string = "I am shorter\n"
self.log_file.write(new_string)
self.log_file.flush()
# Verify that the tail recognized the logrotation
self.assertEquals(tail._size, len(new_string))
| bsd-3-clause | Python | |
60774ca1a336fac5c0d9fb92ab6de509cd92dd43 | Add tests for topsort. | fhirschmann/penchy,fhirschmann/penchy | tests/test_util.py | tests/test_util.py | import unittest2
import util
class TopSortTest(unittest2.TestCase):
def test_error(self):
a,b = range(2)
deps = [(a,b), (b,a)]
with self.assertRaises(ValueError):
util.topological_sort([], deps)
def test_multi_deps(self):
a,b,c,d = range(4)
start = [a,b]
deps = [([a,b], c),
(c, d)]
self.assertTrue(util.topological_sort(start, deps) in ([a,b,c,d],
[b,a,c,d]))
def test_linear_deps(self):
a,b,c,d = range(4)
start = [a]
deps = [(a,b),(b,c),(c,d)]
self.assertListEqual(util.topological_sort(start, deps), range(4))
| mit | Python | |
13917ab0aba2951bd10b64b53cdd358b169a432f | Create Bender_un_robot_dépressif.py | Alumet/Codingame | Medium/Bender_un_robot_dépressif.py | Medium/Bender_un_robot_dépressif.py | import sys
import math
# Auto-generated code below aims at helping you parse
# the standard input according to the problem statement.
l, c = [int(i) for i in input().split()]
Map=[]
T=[]
for i in range(l):
Map.append(input())
if "@" in Map[i]:
X=Map[i].index('@')
Y=i
if 'T' in Map[i]:
T.append([Map[i].index('T'),i])
direction_loop=['S','E','N','W']
direction_values=[('S',0,1),('N',0,-1),('E',1,0),('W',-1,0)]
direction=('S',0,1)
def heading():
for i in direction_loop:
direction=heading_value(i)
if Map[Y+direction[2]][X+direction[1]] in [' ','S','E','N','W','T','I','B','$']:
return direction
break
def heading_value(x):
for i in direction_values:
if x==i[0]:
return i
mv_liste=[]
going=True
beer=False
loop=False
c=0
while going:
if Map[Y][X]=="B":
if beer:
beer=False
else:
beer=True
if Map[Y][X]=="I":
direction_loop.reverse()
if Map[Y][X]=="T":
for el in T:
if el!=[X,Y]:
X=el[0]
Y=el[1]
break
if Map[Y][X]=="$":
going=False
else:
X_t1=X+direction[1]
Y_t1=Y+direction[2]
if Map[Y][X] in ['S','N','E','W']:
direction=heading_value(Map[Y][X])
elif Map[Y_t1][X_t1]=='#':
direction=heading()
elif Map[Y_t1][X_t1]=='X':
if beer:
Map[Y_t1]=Map[Y_t1][0:X_t1]+' '+Map[Y_t1][X_t1+1::]
else:
direction=heading()
X+=direction[1]
Y+=direction[2]
if [direction[0],(X,Y)] in mv_liste:
c+=1
if c>len(mv_liste)/1.2:
loop=True
going=False
mv_liste.append([direction[0],(X,Y)])
trad=['SOUTH','EAST','NORTH','WEST']
direction_loop=['S','E','N','W']
if loop:
print("LOOP")
else:
for el in mv_liste:
print(trad[direction_loop.index(el[0])])
| mit | Python | |
3b6e905bee79286c3998bef56d3c1ad811287078 | add cleanup script for cleaning veth netns and bridges | John-Lin/tinynet,John-Lin/tinynet | clean.py | clean.py | """
Mininet 2.3.0d1 License
Modifications copyright (c) 2017 Che Wei, Lin
Copyright (c) 2013-2016 Open Networking Laboratory
Copyright (c) 2009-2012 Bob Lantz and The Board of Trustees of
The Leland Stanford Junior University
Original authors: Bob Lantz and Brandon Heller
We are making Mininet available for public use and benefit with the
expectation that others will use, modify and enhance the Software and
contribute those enhancements back to the community. However, since we
would like to make the Software available for broadest use, with as few
restrictions as possible permission is hereby granted, free of charge, to
any person obtaining a copy of this Software to deal in the Software
under the copyrights without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
The name and trademarks of copyright holder(s) may NOT be used in
advertising or publicity pertaining to the Software or any derivatives
without specific, written prior permission.
"""
from subprocess import ( Popen, PIPE, check_output as co,
CalledProcessError )
def sh( cmd ):
"Print a command and send it to the shell"
print( cmd + '\n' )
return Popen( [ '/bin/sh', '-c', cmd ], stdout=PIPE ).communicate()[ 0 ]
def main():
print( "*** Removing OVS datapaths\n" )
dps = sh("ovs-vsctl --timeout=1 list-br").strip().splitlines()
if dps:
sh( "ovs-vsctl " + " -- ".join( "--if-exists del-br " + dp
for dp in dps if dp ) )
# And in case the above didn't work...
dps = sh( "ovs-vsctl --timeout=1 list-br" ).strip().splitlines()
for dp in dps:
sh( 'ovs-vsctl del-br ' + dp )
print( "*** Removing all links of the pattern vethX\n" )
links = sh( "ip link show | "
"egrep -o '(veth+[[:alnum:]]+)'"
).splitlines()
# Delete blocks of links
n = 1000 # chunk size
for i in range( 0, len( links ), n ):
cmd = ';'.join( 'ip link del %s' % link
for link in links[ i : i + n ] )
sh( '( %s ) 2> /dev/null' % cmd )
print( "*** Removing all links of the pattern tapX\n" )
taps = sh( "ip link show | "
"egrep -o '(tap+[[:digit:]]+[[:alnum:]]+)'"
).splitlines()
# Delete blocks of links
n = 1000 # chunk size
for i in range( 0, len( taps ), n ):
cmd = ';'.join( 'ip link del %s' % tap
for tap in taps[ i : i + n ] )
sh( '( %s ) 2> /dev/null' % cmd )
print( "*** Removing all network namespaces of the pattern cni-X-X-X-X-X\n" )
nses = sh( "ip netns | "
"egrep -o '(cni-+[[:alnum:]]+-[[:alnum:]]+-[[:alnum:]]+-[[:alnum:]]+-[[:alnum:]]+)'"
).splitlines()
# Delete blocks of links
n = 1000 # chunk size
for i in range( 0, len( nses ), n ):
cmd = ';'.join( 'ip netns del %s' % ns
for ns in nses[ i : i + n ] )
sh( '( %s ) 2> /dev/null' % cmd )
print( "*** Cleanup complete.\n" )
if __name__ == "__main__":
main()
| apache-2.0 | Python | |
7ecf3eb5ac261ae3846d23e1ca8c90e2ba65353a | Create main.py | JOSUEXLION/prog3-uip,JOSUEXLION/prog3-uip | parciales/parcial2/main.py | parciales/parcial2/main.py | import json
from kivy.app import App
from kivy.uix.screenmanager import ScreenManager, Screen, FadeTransition
from kivy.uix.dropdown import DropDown
from kivy.uix.button import Button
from kivy.uix.popup import Popup
from kivy.uix.label import Label
from kivy.properties import NumericProperty, StringProperty, ReferenceListProperty,\
ObjectProperty
from kivy.core.window import Window
Window.clearcolor = (1, 1, 1, 1)
class TaxiCosto(Screen):
_layaout = ObjectProperty(None)
_drop_a = ObjectProperty(None)
_drop_b = ObjectProperty(None)
price = StringProperty('0.00')
def alertNotification(self, title, text):
popup = Popup(title=title, content=Label(text=text), size_hint=(None, None), size=(400, 300))
popup.open()
def getCosto(self):
if(self._drop_a.text != 'Origen' and self._drop_b.text != 'Destino'):
opt_a = self.ubicationsData[self._drop_a.text]
opt_b = self.ubicationsData[self._drop_b.text]
if opt_a in self.prices[opt_b]:
self.price = 'B/. ' + self.prices[opt_b][opt_a]
else:
self.price = 'Incalculable! :('
else:
self.alertNotification('Campos incompletos', 'Por favor. \nllene las opciones!.')
def onSelectDropDown(self, drop):
self.selectDrop = drop
"""docstring for TaxiCosto"""
def __init__(self, *args, **kwargs):
super(TaxiCosto, self).__init__(*args, **kwargs)
# read json config ubications
with open('ubications.json') as ubi_file:
self.ubications = json.load(ubi_file)
with open('prices.json') as pri_file:
self.prices = json.load(pri_file)
self.ubicationsData = {}
# create dropdownList
dropdown = DropDown()
for ubication in self.ubications:
self.ubicationsData[ubication['name']] = ubication['type']
# when adding widgets, we need to specify the height manually (disabling
# the size_hint_y) so the dropdown can calculate the area it needs.
btn = Button(text=ubication['name'], size_hint_y=None, height=60)
# for each button, attach a callback that will call the select() method
# on the dropdown. We'll pass the text of the button as the data of the
# selection.
btn.bind(on_release=lambda btn: dropdown.select(btn))
# then add the button inside the dropdown
dropdown.add_widget(btn)
self._drop_a.bind(on_release=dropdown.open)
self._drop_b.bind(on_release=dropdown.open)
# one last thing, listen for the selection in the dropdown list and
# assign the data to the button text.
dropdown.bind(on_select=lambda instance, x: instanceDrop(x))
# self._layaout.add_widget(self._drop_a)
def instanceDrop(a):
self.price = '0.00'
if(self.selectDrop == 'a'):
self._drop_a.text = a.text
else:
self._drop_b.text = a.text
class MainApp(App):
def build(self):
taxi = TaxiCosto()
return taxi
if __name__ == '__main__':
MainApp().run()
__version__ = "0.0.1"
| mit | Python | |
b4eebd858e07d33a3e7de8f9fda3ae009a0036c2 | Add a node | HiroyukiAbe/pimouse_ros,HiroyukiAbe/pimouse_ros | scripts/buzzer1.py | scripts/buzzer1.py | #!/usr/bin/env python
import rospy
rospy.init_node('buzzer')
rospy.spin()
| bsd-3-clause | Python | |
fe226ce33f116480bfea8f258fdffa1fd96e379c | read temp from gotemp | randomstring/raspberrypi | gotemp.py | gotemp.py | #!/usr/bin/python
import time
import struct
ldusb = file("/dev/hidraw1")
time.sleep(0.5)
pkt = ldusb.read(8)
parsed_pkt = list(struct.unpack("<BBHHH", pkt))
num_samples = parsed_pkt.pop(0)
seqno = parsed_pkt.pop(0)
for sample in range(num_samples):
cel = parsed_pkt[sample]/128.0
fahr = (9.0/5.0 * cel) + 32.0
print(fahr)
| mit | Python | |
9abecda7f3c981b1cde193be038171c63bf69020 | Add root init | ibanner56/OtherDave | otherdave/__init__.py | otherdave/__init__.py | # OtherDave/otherdave/__init__.py | mit | Python | |
79d550cd96862bc4a4c0db60a2db60d3efa5cd6d | add vcrpy test helper module | rackerlabs/fastfood,samstav/fastfood,samstav/fastfood,martinb3/fastfood,samstav/fastfood,martinb3/fastfood,rackerlabs/fastfood,samstav/fastfood,brint/fastfood,brint/fastfood,brint/fastfood,martinb3/fastfood,rackerlabs/fastfood,martinb3/fastfood,brint/fastfood | tests/vcrhelper.py | tests/vcrhelper.py | """vcrpy integration helpers."""
import os
import unittest
import vcr
CASSETTE_LIB = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'cassettes')
assert os.path.isdir(CASSETTE_LIB), "Cassette library not found."
RECORD_MODE = 'none'
class VCRHelper(unittest.TestCase):
filter_headers = [
'user-agent',
'date',
'public-key-pins',
]
def do_filter_headers(self, thing):
for key, value in thing['headers'].items():
if key.lower() in self.filter_headers:
redact = '<%s-FILTERED>' % key.upper()
thing['headers'][key] = redact
return thing
def before_record_request(self, request):
# scrub any request data here
return request
def before_record_response(self, response):
# scrub sensitive response data here
response = self.do_filter_headers(response)
return response
def setUp(self, **vcrkwargs):
defaults = {
'filter_headers': self.filter_headers,
'record_mode': RECORD_MODE,
'cassette_library_dir': CASSETTE_LIB,
'before_record_request': self.before_record_request,
'before_record_response': self.before_record_response,
}
defaults.update(vcrkwargs)
self.vcr = vcr.VCR(
**defaults
)
| apache-2.0 | Python | |
30d9dff89a56229ff1f7cf73181cf8fe10f31b6d | Test scanner | nikhilm/muzicast,nikhilm/muzicast | tests/test_full_scan.py | tests/test_full_scan.py | import sys
print sys.path
from muzicast.collection import CollectionScanner
scanner = CollectionScanner(['/shared/music-test'])
assert type(scanner.directories) is list
scanner.full_scan()
| mit | Python | |
d1b7ed5f705c8e0935778636ade00a7452e2ea7f | Add management command for importing Holvi Invoices and Orders | rambo/asylum,rambo/asylum,hacklab-fi/asylum,jautero/asylum,HelsinkiHacklab/asylum,jautero/asylum,rambo/asylum,rambo/asylum,jautero/asylum,HelsinkiHacklab/asylum,hacklab-fi/asylum,hacklab-fi/asylum,HelsinkiHacklab/asylum,HelsinkiHacklab/asylum,jautero/asylum,hacklab-fi/asylum | project/holviapp/management/commands/import_holvidata.py | project/holviapp/management/commands/import_holvidata.py | # -*- coding: utf-8 -*-
import datetime
import itertools
import dateutil.parser
from django.core.management.base import BaseCommand, CommandError
from holviapp.importer import HolviImporter
from holviapp.utils import list_invoices, list_orders
def yesterday_proxy():
now_yesterday = datetime.datetime.now() - datetime.timedelta(days=1)
start_yesterday = datetime.datetime.combine(now_yesterday.date(), datetime.datetime.min.time())
return start_yesterday.isoformat()
class Command(BaseCommand):
help = 'Import transaction data from Holvi API'
def add_arguments(self, parser):
parser.add_argument('--all', action='store_true', help='Import all Holvi transactions (WARNING: this may take forever)')
parser.add_argument('since', type=str, nargs='?', default=yesterday_proxy(), help='Import transactions updated since datetime, defaults to yesterday midnight')
def handle(self, *args, **options):
if (not options['since']
and not options['all']):
raise CommandError('Either since or all must be specified')
invoice_filters = {}
order_filters = {}
if not options.get('all', False):
since_parsed = dateutil.parser.parse(options['since'])
print("Importing since %s" % since_parsed.isoformat())
invoice_filters['update_time_from'] = since_parsed.isoformat()
order_filters['filter_paid_time_from'] = since_parsed.isoformat()
h = HolviImporter(itertools.chain(list_invoices(**invoice_filters), list_orders(**order_filters)))
transactions = h.import_transactions()
for t in transactions:
print("Imported transaction %s" % t)
| mit | Python | |
cc25e521a99049bb1333c4a36df776c3303dda7b | add test for new functionality | secnot/rectpack | tests/test_generator.py | tests/test_generator.py | from unittest import TestCase
import rectpack.packer
import random
class TestGenerator(TestCase):
def setUp(self):
self.rectangles = [(w, h) for w in range(8,50, 8) for h in range(8,50, 8)]
def test_factory(self):
p = rectpack.packer.newPacker()
for r in self.rectangles:
p.add_rect(*r)
p.add_factory(50, 50)
p.pack()
# check that bins were generated
self.assertGreater(len(p.bin_list()), 0)
# check that all of the rectangles made it in
self.assertEqual(len(p.rect_list()), len(self.rectangles))
| apache-2.0 | Python | |
8ca6dd9d1089b5976d54e06f452a45306dbfb55e | Add generator test | eiri/echolalia-prototype | tests/test_generator.py | tests/test_generator.py | import unittest
from echolalia.generator import Generator
class GeneratorTestCase(unittest.TestCase):
def setUp(self):
self.items = ['pystr', 'pyint']
def test_generate(self):
generator = Generator(items=self.items)
docs = generator.generate(3)
self.assertEqual(len(docs), 3)
for doc in docs:
self.assertIn('pystr', doc)
self.assertIn('pyint', doc)
self.assertIsInstance(doc, dict)
self.assertIsInstance(doc['pystr'], str)
self.assertIsInstance(doc['pyint'], int)
| mit | Python | |
ad0ed0b60db6b527b0c210d2e1a23d529d36889d | Create test_gutenberg.py | fnielsen/dasem,fnielsen/dasem | tests/test_gutenberg.py | tests/test_gutenberg.py |
import pytest
from dasem.gutenberg import Word2Vec
@pytest.fixture
def w2v():
return Word2Vec()
def test_w2v(w2v):
word_and_similarities = w2v.most_similar('dreng')
assert len(word_and_similarities) == 10
| apache-2.0 | Python | |
6cad13197f7d2e399ef3e91a63a34637814c2ad1 | fix import command | fangohr/oommf-python,fangohr/oommf-python,ryanpepper/oommf-python,ryanpepper/oommf-python,fangohr/oommf-python,ryanpepper/oommf-python,ryanpepper/oommf-python | utils/test_mesh.py | utils/test_mesh.py | import unittest
import numpy as np
import mesh
class TestIterCoordsInt(unittest.TestCase):
def test_zyx_ordering(self):
m = mesh.Mesh((3, 1, 1), cellsize=(1, 1, 1))
indices = [r for r in m.iter_coords_int()]
expected = [[0, 0, 0], [1, 0, 0], [2, 0, 0]]
assert np.array_equal(m.mesh_size, [3, 1, 1])
assert m.array_order == mesh.Mesh.ZYX
assert np.array_equal(expected, indices)
m = mesh.Mesh((2, 2, 2), cellsize=(1, 1, 1))
indices = [r for r in m.iter_coords_int()]
expected = [[0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 1, 0],
[0, 0, 1], [1, 0, 1], [0, 1, 1], [1, 1, 1]]
assert np.array_equal(m.mesh_size, [2, 2, 2])
assert m.array_order == mesh.Mesh.ZYX
assert np.array_equal(expected, indices)
def test_xyz_ordering(self):
m = mesh.Mesh((3, 1, 1), cellsize=(1, 1, 1), array_order=mesh.Mesh.XYZ)
indices = [r for r in m.iter_coords_int()]
expected = [[0, 0, 0], [1, 0, 0], [2, 0, 0]]
assert np.array_equal(m.mesh_size, [3, 1, 1])
assert m.array_order == mesh.Mesh.XYZ
assert np.array_equal(expected, indices)
m = mesh.Mesh((2, 2, 2), cellsize=(1, 1, 1), array_order=mesh.Mesh.XYZ)
indices = [r for r in m.iter_coords_int()]
expected = [[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1],
[1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1]]
assert np.array_equal(m.mesh_size, [2, 2, 2])
assert m.array_order == mesh.Mesh.XYZ
assert np.array_equal(expected, indices)
class TestIterCoords(unittest.TestCase):
def test_zyx_ordering(self):
m = mesh.Mesh((3, 1, 1), cellsize=(1, 1, 1))
coords = [r for r in m.iter_coords()]
expected = [[0.5, 0.5, 0.5], [1.5, 0.5, 0.5], [2.5, 0.5, 0.5]]
assert np.array_equal(expected, coords)
| import unittest
import numpy as np
from finmag.util.oommf import mesh
class TestIterCoordsInt(unittest.TestCase):
def test_zyx_ordering(self):
m = mesh.Mesh((3, 1, 1), cellsize=(1, 1, 1))
indices = [r for r in m.iter_coords_int()]
expected = [[0, 0, 0], [1, 0, 0], [2, 0, 0]]
assert np.array_equal(m.mesh_size, [3, 1, 1])
assert m.array_order == mesh.Mesh.ZYX
assert np.array_equal(expected, indices)
m = mesh.Mesh((2, 2, 2), cellsize=(1, 1, 1))
indices = [r for r in m.iter_coords_int()]
expected = [[0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 1, 0],
[0, 0, 1], [1, 0, 1], [0, 1, 1], [1, 1, 1]]
assert np.array_equal(m.mesh_size, [2, 2, 2])
assert m.array_order == mesh.Mesh.ZYX
assert np.array_equal(expected, indices)
def test_xyz_ordering(self):
m = mesh.Mesh((3, 1, 1), cellsize=(1, 1, 1), array_order=mesh.Mesh.XYZ)
indices = [r for r in m.iter_coords_int()]
expected = [[0, 0, 0], [1, 0, 0], [2, 0, 0]]
assert np.array_equal(m.mesh_size, [3, 1, 1])
assert m.array_order == mesh.Mesh.XYZ
assert np.array_equal(expected, indices)
m = mesh.Mesh((2, 2, 2), cellsize=(1, 1, 1), array_order=mesh.Mesh.XYZ)
indices = [r for r in m.iter_coords_int()]
expected = [[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1],
[1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1]]
assert np.array_equal(m.mesh_size, [2, 2, 2])
assert m.array_order == mesh.Mesh.XYZ
assert np.array_equal(expected, indices)
class TestIterCoords(unittest.TestCase):
def test_zyx_ordering(self):
m = mesh.Mesh((3, 1, 1), cellsize=(1, 1, 1))
coords = [r for r in m.iter_coords()]
expected = [[0.5, 0.5, 0.5], [1.5, 0.5, 0.5], [2.5, 0.5, 0.5]]
assert np.array_equal(expected, coords)
| bsd-2-clause | Python |
2427afc967169f1e9d942bb7d955454b7ad0a44e | add open/close/cancel position example | ccxt/ccxt,ccxt/ccxt,ccxt/ccxt,ccxt/ccxt,ccxt/ccxt | examples/py/phemex-open-cancel-close-positions.py | examples/py/phemex-open-cancel-close-positions.py | # -*- coding: utf-8 -*-
import os
import sys
from pprint import pprint
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(root + '/python')
import ccxt # noqa: E402
print('CCXT Version:', ccxt.__version__)
exchange = ccxt.phemex({
'enableRateLimit': True, # https://github.com/ccxt/ccxt/wiki/Manual#rate-limit
'apiKey': 'YOUR_API_KEY', # testnet keys if using the testnet sandbox
'secret': 'YOUR_SECRET', # testnet keys if using the testnet sandbox
'options': {
'defaultType': 'swap',
},
})
# exchange.set_sandbox_mode(True) # uncomment to use the testnet sandbox
markets = exchange.load_markets()
amount = 10
symbol = 'BTC/USD:USD'
# Opening and Canceling a pending contract order (unrealistic price)
order = exchange.create_order(symbol, 'limit', 'buy', amount, '20000')
exchange.cancel_order(order['id'], symbol)
# Opening and exiting a filled contract position by issuing the exact same order but in the opposite direction
# Opening a long position
order = exchange.create_order(symbol, 'market', 'buy', amount)
# closing the previous position by issuing the exact same order but in the opposite direction
# with reduceOnly option to prevent an unwanted exposure increase
orderClose = exchange.create_order(symbol, 'market', 'sell', amount, None, {'reduceOnly': True}) | mit | Python | |
999dda7b26585cdfb165752fcca95894e275968d | Add folder to save the scripts from the book learning sklearn, and add the first chapter script | qingkaikong/useful_script,qingkaikong/useful_script,qingkaikong/useful_script,qingkaikong/useful_script,qingkaikong/useful_script | python/sklearn/learning_sklearn/simple_classification.py | python/sklearn/learning_sklearn/simple_classification.py | from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn import metrics
from sklearn import preprocessing
import numpy as np
#get the dataset
iris = datasets.load_iris()
X_iris, y_iris = iris.data, iris.target
# Get dataset with only the first two attributes
X, y = X_iris[:, :2], y_iris
# Split the dataset into a training and a testing set
# Test set will be the 25% taken randomly
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.25, random_state=33)
print X_train.shape, y_train.shape
# Standardize the features
scaler = preprocessing.StandardScaler().fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
#plot the data
import matplotlib.pyplot as plt
colors = ['red', 'greenyellow', 'blue']
for i in xrange(len(colors)):
xs = X_train[:, 0][y_train == i]
ys = X_train[:, 1][y_train == i]
plt.scatter(xs, ys, c=colors[i])
plt.legend(iris.target_names, scatterpoints = 1)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.show()
#Using SGD
from sklearn.linear_model import SGDClassifier
clf = SGDClassifier()
clf.fit(X_train, y_train)
#plot decision
x_min, x_max = X_train[:, 0].min() - .5, X_train[:, 0].max() +.5
y_min, y_max = X_train[:, 1].min() - .5, X_train[:, 1].max() +.5
xs = np.arange(x_min, x_max, 0.5)
fig, axes = plt.subplots(1, 3)
fig.set_size_inches(10, 6)
for i in [0, 1, 2]:
axes[i].set_aspect('equal')
axes[i].set_title('Class '+ str(i) + ' versus the rest')
axes[i].set_xlabel('Sepal length')
axes[i].set_ylabel('Sepal width')
axes[i].set_xlim(x_min, x_max)
axes[i].set_ylim(y_min, y_max)
plt.sca(axes[i])
plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train,cmap=plt.cm.prism)
ys = (-clf.intercept_[i] - xs * clf.coef_[i, 0]) / clf.coef_[i, 1]
plt.plot(xs, ys, hold=True)
plt.show()
#print out precision, recall, and F1-score
y_pred = clf.predict(X_test)
print metrics.classification_report(y_test, y_pred,target_names=iris.target_names)
#print out confusion matrix, the true classes are in rows, and predicted class
#in columns
print metrics.confusion_matrix(y_test, y_pred)
#Using cross-validation
from sklearn.cross_validation import cross_val_score, KFold
from sklearn.pipeline import Pipeline
# create a composite estimator made by a pipeline of the
#standarization and the linear model
clf = Pipeline([
('scaler', preprocessing.StandardScaler()),
('linear_model', SGDClassifier())
])
# create a k-fold cross validation iterator of k=5 folds
cv = KFold(X.shape[0], 5, shuffle=True, random_state=33)
# by default the score used is the one returned by score
#method of the estimator (accuracy)
scores = cross_val_score(clf, X, y, cv=cv)
print scores
from scipy.stats import sem
def mean_score(scores):
return ("Mean score: {0:.3f} (+/-{1:.3f})").format(np.mean(scores), sem(scores))
print mean_score(scores)
| bsd-3-clause | Python | |
ad97be23fe9e267ed9d64b08fdf64631e234d43a | Add wierd migration | UrLab/incubator,UrLab/incubator,UrLab/incubator,UrLab/incubator | projects/migrations/0002_auto_20151208_1553.py | projects/migrations/0002_auto_20151208_1553.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0001_squashed_0010_task'),
]
operations = [
migrations.AlterField(
model_name='project',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='project',
name='modified',
field=models.DateTimeField(auto_now=True),
),
]
| agpl-3.0 | Python | |
b5da9dcf973a9a780c729f2855fb3784bfe9328a | Create heatdb.py | HeatIsland/HeatDB,HeatIsland/HeatDB | heatdb.py | heatdb.py | import webapp2
import logging
import re
import cgi
import jinja2
import os
import random
import string
import hashlib
import hmac
import Cookie
import urllib2
import time
from datetime import datetime, timedelta
from google.appengine.api import memcache
from google.appengine.ext import db
from xml.dom import minidom
## see http://jinja.pocoo.org/docs/api/#autoescaping
def guess_autoescape(template_name):
if template_name is None or '.' not in template_name:
return False
ext = template_name.rsplit('.', 1)[1]
return ext in ('html', 'htm', 'xml')
JINJA_ENVIRONMENT = jinja2.Environment(
autoescape=guess_autoescape, ## see http://jinja.pocoo.org/docs/api/#autoescaping
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'])
class MyHandler(webapp2.RequestHandler):
def write(self, *items):
self.response.write(" : ".join(items))
def render_str(self, template, **params):
tplt = JINJA_ENVIRONMENT.get_template('templates/'+template)
return tplt.render(params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
def render_json(self, d):
json_txt = json.dumps(d)
self.response.headers['Content-Type'] = 'application/json; charset=UTF-8'
self.write(json_txt)
class MainPage(MyHandler):
def get(self):
application = webapp2.WSGIApplication([
('/', MainPage),],debug=True)
| mit | Python | |
3ca7eaca8026088dba1719a5dd2e3da1a6ffe404 | add householder qr algorithm - not working yet. | ddrake/mth653 | qr/hqr.py | qr/hqr.py | from numpy import *
def inner(v,w):
return sum(v.conj() * w)
def qr(a):
(m,n) = shape(a)
v = zeros((m,n))
for k in range(n):
print("k=%d" % k)
x = a[k:m,k]
x[0] += sign(x[0])*linalg.norm(x)
vk = x / linalg.norm(x)
v[k:m,k] = vk
print(vk)
a[k:m,k:n] -= 2*vk[:,newaxis].dot(vk.conj().dot(a[k:m,k:n])[newaxis,:])
print(a)
return v
| mit | Python | |
c111bc4dd1c040b2ddf1a83c4d93692a77eb269f | Create __init__.py | rockwolf/python,rockwolf/python,rockwolf/python,rockwolf/python,rockwolf/python,rockwolf/python | fade/database/versions/__init__.py | fade/database/versions/__init__.py | bsd-3-clause | Python | ||
3067c29b47974d75e8f9a6f01596e9be10411b81 | Add admin.py file | scottferg/django-c2dm | admin.py | admin.py | # Copyright (c) 2010, Scott Ferguson
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the software nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY SCOTT FERGUSON ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL SCOTT FERGUSON BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from django.contrib import admin
from models import AndroidDevice
def registration_id(object):
return '%s...' % object.registration_id[:24]
registration_id.short_description = "Registration ID"
class AndroidDeviceAdmin(admin.ModelAdmin):
list_display = (
'device_id',
registration_id,
'collapse_key',
'last_messaged',
'failed_push'
)
admin.site.register(AndroidDevice, AndroidDeviceAdmin)
| bsd-3-clause | Python | |
d942340fb5cfe8aa9aade11b3117b9848097c8a1 | Write an abstraction for storing locality state in ES | jeffbryner/MozDef,mozilla/MozDef,mpurzynski/MozDef,jeffbryner/MozDef,mozilla/MozDef,mpurzynski/MozDef,mpurzynski/MozDef,mpurzynski/MozDef,mozilla/MozDef,mozilla/MozDef,jeffbryner/MozDef,jeffbryner/MozDef | alerts/geomodel/journal.py | alerts/geomodel/journal.py | '''To make GeoModel code more testable, we abstract interaction with
ElasticSearch away via a "journal interface". This is just a function that,
called with an ES index and a list of `Entry`, stores the contained locality
state data in ElasticSearch.
'''
from typing import Callable, List, NamedTuple
from mozdef_util.elasticsearch_client import ElasticsearchClient as ESClient
from alerts.geomodel.locality import State
# TODO: Switch to dataclasses when we upgrade to Python 3.7+
class Entry(NamedTuple):
'''
'''
identifier: str
state: State
JournalInterface = Callable[[List[Entry], str]]
def wrap(client: ESClient) -> JournalInterface:
'''Wrap an `ElasticsearchClient` in a closure of type `JournalInterface`.
'''
def wrapper(entries: List[Entry], esindex: str):
for entry in entries:
document = dict(entry.state._asdict())
client.save_object(
index=esindex,
body=document,
doc_id=entry.identifer)
return wrapper
| mpl-2.0 | Python | |
b1f964e9725a18014de17d454bb733b7ad43cd38 | Write Pytac script to write all readback pvs to file | razvanvasile/Work-Mini-Projects,razvanvasile/Work-Mini-Projects,razvanvasile/Work-Mini-Projects | pytac/write_to_file_readback_pvs.py | pytac/write_to_file_readback_pvs.py | import pytac.load_csv
import pytac.epics
def write_data_to_file(file_name, data):
fin = open(file_name, 'w')
for row in data:
fin.write('{0}\n'.format(row))
fin.close()
def get_readback_pvs(mode):
lattice = pytac.load_csv.load(mode, pytac.epics.EpicsControlSystem())
elements = lattice.get_elements()
readback_pvs = list()
# Get the readback pvs of all elements
for element in elements:
fields = element.get_fields()
for field in fields:
readback_pvs.append(element.get_pv_name(field, 'readback'))
return readback_pvs
def main():
readback_pvs = get_readback_pvs('VMX')
# Sort the result. It is required for comparison with the Matlab result.
readback_pvs = sorted(readback_pvs)
write_data_to_file('readback_pvs_py.txt', readback_pvs)
if __name__=='__main__':
main()
| apache-2.0 | Python | |
c12a589b92a7336b68626aa742b0611584a8b943 | add wunderground driver | jf87/smap,immesys/smap,jf87/smap,immesys/smap,SoftwareDefinedBuildings/smap,immesys/smap,jf87/smap,jf87/smap,SoftwareDefinedBuildings/smap,SoftwareDefinedBuildings/smap,jf87/smap,SoftwareDefinedBuildings/smap,immesys/smap,SoftwareDefinedBuildings/smap,SoftwareDefinedBuildings/smap,jf87/smap,immesys/smap | python/smap/drivers/wunderground.py | python/smap/drivers/wunderground.py |
import urllib2
import rfc822
from xml.dom.minidom import parse, parseString
from xml.parsers.expat import ExpatError
from twisted.internet import reactor
from twisted.python import log
from smap import driver
def get_val(dom, key):
v = dom.getElementsByTagName(key)[0].firstChild.nodeValue
return v
class WunderGround(driver.SmapDriver):
def setup(self, opts):
self.url = opts.get("Address",
"http://api.wunderground.com/weatherstation/WXCurrentObXML.asp")
self.id = opts.get("ID", "KCABERKE7")
self.last_time = 0
self.metadata_done = False
self.add_timeseries("wind_dir", "deg")
self.add_timeseries("wind_speed", "m/s", data_type="double")
self.add_timeseries("wind_gust", "m/s", data_type="double")
self.add_timeseries("humidity", "rh")
self.add_timeseries("temperature", "C", data_type="double")
self.add_timeseries("pressure", "mb", data_type="double")
self.add_timeseries("dew_point", "C", data_type="double")
def start(self):
reactor.callInThread(self.update)
def update(self):
try:
url = self.url + "?ID=" + self.id
fh = urllib2.urlopen(url, timeout=10)
except urllib2.URLError, e:
log.err("URLError getting reading: [%s]: %s" % (url, str(e)))
return
except urllib2.HTTPError, e:
log.err("HTTP Error: [%s]: %s" % (url, str(e)))
return
try:
dom = parse(fh)
except ExpatError, e:
log.err("Exception parsing DOM [%s]: %s" % (url, str(e)))
return
try:
reading_time = rfc822.parsedate_tz(get_val(dom, "observation_time_rfc822"))
reading_time = int(rfc822.mktime_tz(reading_time))
except Exception, e:
log.err("Exception finding time [%s]: %s" % (url, str(e)))
return
if reading_time > self.last_time:
self.add('/wind_dir', reading_time, int(get_val(dom, "wind_degrees")))
self.add('/wind_speed', reading_time, float(get_val(dom, "wind_mph")))
self.add("/wind_gust", reading_time, float(get_val(dom, "wind_gust_mph")))
self.add("/humidity", reading_time, int(get_val(dom, "relative_humidity")))
self.add("/temperature", reading_time, float(get_val(dom, "temp_c")))
self.add("/pressure", reading_time, float(get_val(dom, "pressure_mb")))
self.add("/dew_point", reading_time, float(get_val(dom, "dewpoint_c")))
last_time = reading_time
if not self.metadata_done:
self.metadata_done = True
self.set_metadata('/', {
'Extra/StationType' : get_val(dom, "station_type"),
'Extra/StationID' : get_val(dom, "station_id"),
'Location/Latitude' : get_val(dom, "latitude"),
'Location/Longitude': get_val(dom, "longitude"),
'Location/Altitude': get_val(dom, "elevation"),
'Location/Uri' : get_val(dom, "link"),
'Location/City' : get_val(dom, "city"),
'Location/State' : get_val(dom, "state"),
})
dom.unlink()
| bsd-2-clause | Python | |
a88aed479937b09b560c8820d3d5c1003a94b9f1 | add google hangout parser | mjperrone/personal-lexicon,mjperrone/personal-lexicon | parse_hangouts.py | parse_hangouts.py | #!/usr/bin/env python
# usage: `python parse_hangouts.py path/to/takeout/`
import json
import sys
# holy wow this format is a mess without any docs
def parse_hangouts(path):
data = json.loads(open(path, 'r').read())
ids = {}
for conversation in data['conversation_state']:
for x in conversation['conversation_state']['conversation']['participant_data']:
if 'fallback_name' in x:
ids[x['id']['gaia_id']] = x['fallback_name']
for message in conversation['conversation_state']['event']:
sender = ids.get(message['sender_id']['gaia_id'], "notfound")
if sender in ('Mike Perrone', 'Michael Perrone')\
and 'chat_message' in message\
and 'segment' in message['chat_message']['message_content']:
for segment in message['chat_message']['message_content']['segment']:
if 'text' in segment:
print segment['text']
parse_hangouts(sys.argv[-1] + "Hangouts/Hangouts.json")
| mit | Python | |
514a04b5ffa7c9e3ede068c860933e9a404e6063 | add missing file. | biokit/biokit,biokit/biokit | biokit/stats/criteria.py | biokit/stats/criteria.py | import math
def AIC(L, k):
return 2*k - 2 * math.log(L)
def AICc(L, k, n):
return AIC(L, k) + 2*k*(k+1.)/(n-k-1.)
def BIC(L, k, n):
return -2 * math.log(L) + k * (math.log(n) - math.log(2*math.pi))
| bsd-2-clause | Python | |
ee8b3fd94bac16390b367dc5030489738ff67958 | add example to get UETable from data base | mverwe/JetRecoValidation | tools/getUETable_cfg.py | tools/getUETable_cfg.py | import FWCore.ParameterSet.Config as cms
process = cms.Process("jectxt")
process.load('Configuration.StandardSequences.Services_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
# define your favorite global tag
process.GlobalTag.globaltag = '74X_dataRun2_HLT_ppAt5TeV_v0'#Prompt_v4'#auto:run2_data'
process.GlobalTag.toGet.extend([
cms.PSet(record = cms.string("JetCorrectionsRecord"),
tag = cms.string("UETableCompatibilityFormat_Calo_v02_offline"),
label = cms.untracked.string("UETable_Calo")
)
])
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(1))
process.source = cms.Source("EmptySource")
process.readAK4PF = cms.EDAnalyzer('JetCorrectorDBReader',
# below is the communication to the database
payloadName = cms.untracked.string('UETable_Calo'),
# this is used ONLY for the name of the printed txt files. You can use any name that you like,
# but it is recommended to use the GT name that you retrieved the files from.
globalTag = cms.untracked.string('74X_dataRun2_HLT_ppAt5TeV_v0'),
printScreen = cms.untracked.bool(False),
createTextFile = cms.untracked.bool(True)
)
process.readAK4PFoff = process.readAK4PF.clone(payloadName = 'UETable_Calo')
process.p = cms.Path(process.readAK4PFoff)
| cc0-1.0 | Python | |
5e1440874bc4e3f5ab2de23f72ad7f950ccce12e | add missing migration for the `fake` backend | terceiro/squad,terceiro/squad,terceiro/squad,terceiro/squad | squad/ci/migrations/0019_add_fake_backend.py | squad/ci/migrations/0019_add_fake_backend.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-02-26 21:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ci', '0018_testjob_dates'),
]
operations = [
migrations.AlterField(
model_name='backend',
name='implementation_type',
field=models.CharField(choices=[('fake', 'fake'), ('lava', 'lava'), ('null', 'null')], default='null', max_length=64),
),
]
| agpl-3.0 | Python | |
0083a6fadad8bb0f202bab2af183a10f09e19459 | Add simple demo of piglow - lighting arms | claremacrae/raspi_code,claremacrae/raspi_code,claremacrae/raspi_code | piglow/demo_piglow.py | piglow/demo_piglow.py | from piglow import PiGlow
import time
def brighten_arm( arm ):
for i in range( 1, 10 ):
piglow.arm( arm, i )
time.sleep( 0.11 )
time.sleep( 0.5 )
piglow.arm( arm, 0 )
piglow = PiGlow()
piglow.all(0)
brighten_arm( 1 )
brighten_arm( 2 )
brighten_arm( 3 )
| mit | Python | |
b3f8be5b6ab7e4e713004447a3cfbda743d80394 | Add management command to update corpus logic hashes | PUNCH-Cyber/YaraGuardian,PUNCH-Cyber/YaraGuardian,PUNCH-Cyber/YaraGuardian,PUNCH-Cyber/YaraGuardian | rules/management/commands/CorpusLogicUpdate.py | rules/management/commands/CorpusLogicUpdate.py | import logging
from django.core.management.base import BaseCommand, CommandError
from plyara import YaraParser
from rules.models import YaraRule
# Configure Logging
logging.basicConfig(level=logging.INFO)
class Command(BaseCommand):
help = 'Recalculate the logic hashes of the entire rule corpus'
def handle(self, *args, **options):
corpus = YaraRule.objects.all()
rule_count = corpus.count()
message = 'Updating logic hashes for {} rules'.format(rule_count)
logging.info(message)
rule_index = 0
for rule in corpus.iterator():
rule_index += 1
logic_data = {'strings': rule.strings, 'condition_terms': rule.condition}
logic_hash = YaraParser.parserInterpreter.generateLogicHash(logic_data)
rule.logic_hash = logic_hash
rule.save()
logging.info('Rule Logic Update: {} of {}'.format(rule_index, rule_count))
| apache-2.0 | Python | |
aefb6fbf38f8756458e487328139caf41afb6cee | Create MD5-DICT.py | thezakman/CTF-Scripts,thezakman/CTF-Scripts | MD5-DICT.py | MD5-DICT.py | from hashlib import md5
# by TheZakMan
# Exemplo de md5: 21232f297a57a5a743894a0e4a801fc3 (admin)
# dict: /usr/share/wordlists/rockyou.txt
print "[Md5 Dict-Cracker]"
print "| wordlist.txt |\n"
crackme = raw_input("MD5:")
#f = open('wordlist.txt', 'r')
f = open('/usr/share/wordlists/rockyou.txt', 'r')
words = [line[0:-1] for line in f.readlines()]
#words = 'test', 'alex', 'steve', 'admin'
#print words
for word in words:
if md5(word).hexdigest() == crackme:
print "\nCracked:", word
break
if md5(word).hexdigest() != crackme:
print "\n[*] Not Found!"
| artistic-2.0 | Python | |
96e17fbac42354be33b90e23759220ccb81d3223 | add sample + MM/DD/YY | 98pm/youtube_upload_timelapse,98pm/youtube_upload_timelapse | sample.py | sample.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command-line skeleton application for YouTube Data API.
Usage:
$ python sample.py
You can also get help on all the command-line flags the program understands
by running:
$ python sample.py --help
"""
import argparse
import httplib2
import os
import sys
from apiclient import discovery
from oauth2client import file
from oauth2client import client
from oauth2client import tools
# Parser for command-line arguments.
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[tools.argparser])
# CLIENT_SECRETS is name of a file containing the OAuth 2.0 information for this
# application, including client_id and client_secret. You can see the Client ID
# and Client secret on the APIs page in the Cloud Console:
# <https://cloud.google.com/console#/project/386891151732/apiui>
CLIENT_SECRETS = os.path.join(os.path.dirname(__file__), 'client_secrets.json')
# Set up a Flow object to be used for authentication.
# Add one or more of the following scopes. PLEASE ONLY ADD THE SCOPES YOU
# NEED. For more information on using scopes please see
# <https://developers.google.com/+/best-practices>.
FLOW = client.flow_from_clientsecrets(CLIENT_SECRETS,
scope=[
'https://www.googleapis.com/auth/youtube',
'https://www.googleapis.com/auth/youtube.readonly',
'https://www.googleapis.com/auth/youtube.upload',
'https://www.googleapis.com/auth/youtubepartner',
'https://www.googleapis.com/auth/youtubepartner-channel-audit',
],
message=tools.message_if_missing(CLIENT_SECRETS))
def main(argv):
# Parse the command-line flags.
flags = parser.parse_args(argv[1:])
# If the credentials don't exist or are invalid run through the native client
# flow. The Storage object will ensure that if successful the good
# credentials will get written back to the file.
storage = file.Storage('sample.dat')
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = tools.run_flow(FLOW, storage, flags)
# Create an httplib2.Http object to handle our HTTP requests and authorize it
# with our good Credentials.
http = httplib2.Http()
http = credentials.authorize(http)
# Construct the service object for the interacting with the YouTube Data API.
service = discovery.build('youtube', 'v3', http=http)
try:
print "Success! Now add code here."
except client.AccessTokenRefreshError:
print ("The credentials have been revoked or expired, please re-run"
"the application to re-authorize")
# For more information on the YouTube Data API you can visit:
#
# https://developers.google.com/youtube/v3
#
# For more information on the YouTube Data API Python library surface you
# can visit:
#
# https://developers.google.com/resources/api-libraries/documentation/youtube/v3/python/latest/
#
# For information on the Python Client Library visit:
#
# https://developers.google.com/api-client-library/python/start/get_started
if __name__ == '__main__':
main(sys.argv)
| apache-2.0 | Python | |
14e32e60181083c8d0271fc974f3f1161ea81c74 | Add first pass on create_tab script | tock/libtock-c,tock/libtock-c,tock/libtock-c | tools/tab/create_tab.py | tools/tab/create_tab.py | #!/usr/bin/env python
import datetime
import io
import os
import sys
import tarfile
TAB_VERSION = 1
output_filename = sys.argv[1]
name = sys.argv[2]
inputs = sys.argv[3:]
metadata = []
metadata.append('tab-version = {}'.format(TAB_VERSION))
metadata.append('name = "{}"'.format(name))
metadata.append('only-for-boards = ""')
metadata.append('build-date = {}'.format(datetime.datetime.now().isoformat()[:19]+'Z'))
with tarfile.open(output_filename, 'w') as tar:
for name in inputs:
arcname = os.path.basename(name)
tar.add(name, arcname=arcname)
# Add metadata
data = '\n'.join(metadata).encode('utf-8')
file = io.BytesIO(data)
info = tarfile.TarInfo(name='metadata.toml')
info.size = len(data)
tar.addfile(tarinfo=info, fileobj=file)
| apache-2.0 | Python | |
916250bc9509986f1dfce3b09ddbc7a49aa79d42 | Add admin for payments | ygrass/handsome,ygrass/handsome | payments/admin.py | payments/admin.py | # -*- coding: utf-8 -*-
from django.contrib import admin
from .models import Payment, Refund
admin.site.register(Payment)
admin.site.register(Refund)
| unlicense | Python | |
25f0615f4fb35779ce8688c5d29f92288ac2c30d | Add filesystem checks class | rzeka/QLDS-Manager | util/filesystem.py | util/filesystem.py | import os
class FSCheck:
def __init__(self, filepath, name: None):
if name is None:
name = filepath
self.filepath = filepath
self.name = name
def exists(self, error: True):
if not os.path.exists(self.filepath):
if error:
print('%s executable doesn\'t exist. Install it first' % self.name)
exit(32)
else:
return False
return True
def access(self, type_, error: True):
if type_ in ['read', 'r']:
access = os.R_OK
access_human = 'read'
elif type_ in ['write', 'w']:
access = os.W_OK
access_human = 'write'
elif type_ in ['exec', 'x', 'execute']:
access = os.X_OK
access_human = 'exec'
else:
access = None
access_human = None
if access is None:
raise AttributeError('Unknown access type')
if not os.access(self.filepath, access):
if error:
print('No %s access to %s' % (access_human, self.name))
exit(31)
else:
return False
return True
| mit | Python | |
c0621b765234d33481318828dd3dc4bbc8481131 | Add edges script, which generates a set of outline tiles for a double-height tileset | tbentropy/pytile | edges.py | edges.py | from PIL import Image, ImageDraw, ImageFont
# compare a to b, b to c, c to d and d to a
max_edge_difference = 2
output = []
for a in range(5):
for b in range(5):
for c in range(5):
for d in range(5):
if 0 in [a,b,c,d]:
# Must be one vertex at 0
if not (abs(a-b) > 2 or abs(b-c) > 2 or abs(c-d) > 2 or abs(d-a) > 2):
# All sides must have at most 2 units of up/down movement
output.append([a,b,c,d])
g0 = []
g1 = []
g2 = []
g3 = []
g4 = []
print "with 0, not 1, 2, 3 or 4"
for o in output:
if 0 in o and not 1 in o and not 2 in o and not 3 in o and not 4 in o:
g0.append(o)
print o
print "with 1, not 2, 3 or 4"
for o in output:
if 1 in o and not 2 in o and not 3 in o and not 4 in o:
g1.append(o)
print o
print "with 2, not 3 or 4"
for o in output:
if 2 in o and not 3 in o and not 4 in o:
g2.append(o)
print o
print "with 3"
for o in output:
if 3 in o:
g3.append(o)
print o
print "with 4"
for o in output:
if 4 in o:
g4.append(o)
print o
print len(output)
def draw_polygon(x, y):
draw.polygon([(-1+x*64, 49+y*64), (31+x*64, 33+y*64), (31+x*64, 49+y*64)], fill=(0,190,0))
draw.polygon([(32+x*64, 33+y*64), (64+x*64, 49+y*64), (32+x*64, 48+y*64)], fill=(0,190,0))
draw.polygon([(64+x*64, 48+y*64), (32+x*64, 64+y*64), (32+x*64, 48+y*64)], fill=(0,190,0))
draw.polygon([(31+x*64, 64+y*64), (-1+x*64, 48+y*64), (31+x*64, 48+y*64)], fill=(0,190,0))
def draw_heights(x, y, h):
draw.line([(0+x*64, 48+y*64), (0+x*64, 48+y*64-h[0]*8)], fill=(255,0,0))
draw.line([(32+x*64, 33+y*64), (32+x*64, 33+y*64-h[1]*8)], fill=(255,0,0))
draw.line([(63+x*64, 48+y*64), (63+x*64, 48+y*64-h[2]*8)], fill=(255,0,0))
draw.line([(31+x*64, 63+y*64), (31+x*64, 63+y*64-h[3]*8)], fill=(255,0,0))
draw.line([(0+x*64, 48+y*64-h[0]*8), (31+x*64, 33+y*64-h[1]*8)], fill=(0,0,255))
draw.line([(32+x*64, 33+y*64-h[1]*8), (63+x*64, 48+y*64-h[2]*8)], fill=(0,0,200))
draw.line([(63+x*64, 48+y*64-h[2]*8), (32+x*64, 63+y*64-h[3]*8)], fill=(0,0,160))
draw.line([(31+x*64, 63+y*64-h[3]*8), (0+x*64, 48+y*64-h[0]*8)], fill=(0,0,120))
out = Image.new("RGB", (640, 640), (231,255,255))
draw = ImageDraw.Draw(out)
oi = 0
for y in range(10):
for x in range(10):
draw_polygon(x, y)
try:
draw_heights(x, y, output[oi])
except IndexError:
pass
oi += 1
##draw.text((2,52), "0", fill=(0,0,0))
##draw.text((32,25), "0", fill=(0,0,0))
##draw.text((60,52), "0", fill=(0,0,0))
##draw.text((38,58), "0", fill=(0,0,0))
del draw
out.save("testout.png", "PNG")
| bsd-3-clause | Python | |
edabec29ebb99e938fd3523951597e336ddd3adc | Add text vectorizers benchmarks (#9086) | scikit-learn/scikit-learn,bnaul/scikit-learn,aflaxman/scikit-learn,chrsrds/scikit-learn,vinayak-mehta/scikit-learn,zorroblue/scikit-learn,scikit-learn/scikit-learn,glemaitre/scikit-learn,betatim/scikit-learn,huzq/scikit-learn,jakirkham/scikit-learn,nhejazi/scikit-learn,wazeerzulfikar/scikit-learn,nhejazi/scikit-learn,amueller/scikit-learn,amueller/scikit-learn,jakirkham/scikit-learn,chrsrds/scikit-learn,TomDLT/scikit-learn,lesteve/scikit-learn,espg/scikit-learn,vinayak-mehta/scikit-learn,anntzer/scikit-learn,ogrisel/scikit-learn,xuewei4d/scikit-learn,clemkoa/scikit-learn,TomDLT/scikit-learn,saiwing-yeung/scikit-learn,chrsrds/scikit-learn,aflaxman/scikit-learn,sergeyf/scikit-learn,wazeerzulfikar/scikit-learn,vortex-ape/scikit-learn,zorroblue/scikit-learn,BiaDarkia/scikit-learn,AlexandreAbraham/scikit-learn,ivannz/scikit-learn,chrsrds/scikit-learn,betatim/scikit-learn,vinayak-mehta/scikit-learn,saiwing-yeung/scikit-learn,kevin-intel/scikit-learn,betatim/scikit-learn,glemaitre/scikit-learn,sergeyf/scikit-learn,ndingwall/scikit-learn,xuewei4d/scikit-learn,sergeyf/scikit-learn,herilalaina/scikit-learn,jakirkham/scikit-learn,xuewei4d/scikit-learn,clemkoa/scikit-learn,AlexandreAbraham/scikit-learn,TomDLT/scikit-learn,AlexandreAbraham/scikit-learn,vortex-ape/scikit-learn,TomDLT/scikit-learn,xuewei4d/scikit-learn,vortex-ape/scikit-learn,ivannz/scikit-learn,sergeyf/scikit-learn,aflaxman/scikit-learn,zorroblue/scikit-learn,nhejazi/scikit-learn,ndingwall/scikit-learn,amueller/scikit-learn,espg/scikit-learn,huzq/scikit-learn,bnaul/scikit-learn,ivannz/scikit-learn,BiaDarkia/scikit-learn,glemaitre/scikit-learn,BiaDarkia/scikit-learn,manhhomienbienthuy/scikit-learn,ivannz/scikit-learn,huzq/scikit-learn,kevin-intel/scikit-learn,lesteve/scikit-learn,aflaxman/scikit-learn,saiwing-yeung/scikit-learn,ndingwall/scikit-learn,shyamalschandra/scikit-learn,manhhomienbienthuy/scikit-learn,clemkoa/scikit-learn,ogrisel/scikit-learn,lesteve/scikit-learn,espg/scikit-learn,herilalaina/scikit-learn,herilalaina/scikit-learn,clemkoa/scikit-learn,anntzer/scikit-learn,anntzer/scikit-learn,nhejazi/scikit-learn,jakirkham/scikit-learn,kevin-intel/scikit-learn,zorroblue/scikit-learn,huzq/scikit-learn,saiwing-yeung/scikit-learn,bnaul/scikit-learn,shyamalschandra/scikit-learn,vinayak-mehta/scikit-learn,glemaitre/scikit-learn,manhhomienbienthuy/scikit-learn,lesteve/scikit-learn,scikit-learn/scikit-learn,betatim/scikit-learn,ogrisel/scikit-learn,BiaDarkia/scikit-learn,wazeerzulfikar/scikit-learn,ogrisel/scikit-learn,wazeerzulfikar/scikit-learn,AlexandreAbraham/scikit-learn,espg/scikit-learn,ndingwall/scikit-learn,shyamalschandra/scikit-learn,bnaul/scikit-learn,vortex-ape/scikit-learn,scikit-learn/scikit-learn,kevin-intel/scikit-learn,shyamalschandra/scikit-learn,anntzer/scikit-learn,herilalaina/scikit-learn,manhhomienbienthuy/scikit-learn,amueller/scikit-learn | benchmarks/bench_text_vectorizers.py | benchmarks/bench_text_vectorizers.py | """
To run this benchmark, you will need,
* scikit-learn
* pandas
* memory_profiler
* psutil (optional, but recommended)
"""
from __future__ import print_function
import timeit
import itertools
import numpy as np
import pandas as pd
from memory_profiler import memory_usage
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import (CountVectorizer, TfidfVectorizer,
HashingVectorizer)
n_repeat = 3
def run_vectorizer(Vectorizer, X, **params):
def f():
vect = Vectorizer(**params)
vect.fit_transform(X)
return f
text = fetch_20newsgroups(subset='train').data
print("="*80 + '\n#' + " Text vectorizers benchmark" + '\n' + '='*80 + '\n')
print("Using a subset of the 20 newsrgoups dataset ({} documents)."
.format(len(text)))
print("This benchmarks runs in ~20 min ...")
res = []
for Vectorizer, (analyzer, ngram_range) in itertools.product(
[CountVectorizer, TfidfVectorizer, HashingVectorizer],
[('word', (1, 1)),
('word', (1, 2)),
('word', (1, 4)),
('char', (4, 4)),
('char_wb', (4, 4))
]):
bench = {'vectorizer': Vectorizer.__name__}
params = {'analyzer': analyzer, 'ngram_range': ngram_range}
bench.update(params)
dt = timeit.repeat(run_vectorizer(Vectorizer, text, **params),
number=1,
repeat=n_repeat)
bench['time'] = "{:.2f} (+-{:.2f})".format(np.mean(dt), np.std(dt))
mem_usage = memory_usage(run_vectorizer(Vectorizer, text, **params))
bench['memory'] = "{:.1f}".format(np.max(mem_usage))
res.append(bench)
df = pd.DataFrame(res).set_index(['analyzer', 'ngram_range', 'vectorizer'])
print('\n========== Run time performance (sec) ===========\n')
print('Computing the mean and the standard deviation '
'of the run time over {} runs...\n'.format(n_repeat))
print(df['time'].unstack(level=-1))
print('\n=============== Memory usage (MB) ===============\n')
print(df['memory'].unstack(level=-1))
| bsd-3-clause | Python | |
9bee248bce5edbf073f66e5d7a621f22bbba314f | Fix a failing test | ArvinPan/pyzmq,swn1/pyzmq,ArvinPan/pyzmq,Mustard-Systems-Ltd/pyzmq,yyt030/pyzmq,yyt030/pyzmq,caidongyun/pyzmq,dash-dash/pyzmq,yyt030/pyzmq,dash-dash/pyzmq,swn1/pyzmq,caidongyun/pyzmq,dash-dash/pyzmq,Mustard-Systems-Ltd/pyzmq,Mustard-Systems-Ltd/pyzmq,swn1/pyzmq,ArvinPan/pyzmq,caidongyun/pyzmq | zmq/devices/__init__.py | zmq/devices/__init__.py | """0MQ Device classes for running in background threads or processes."""
#
# Copyright (c) 2010 Brian E. Granger
#
# This file is part of pyzmq.
#
# pyzmq is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# pyzmq is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from zmq.core.device import device
from zmq.devices import basedevice, monitoredqueue, monitoredqueuedevice
from zmq.devices.basedevice import *
from zmq.devices.monitoredqueue import *
from zmq.devices.monitoredqueuedevice import *
__all__ = ['device']
for submod in (basedevice, monitoredqueue, monitoredqueuedevice):
__all__.extend(submod.__all__)
| """0MQ Device classes for running in background threads or processes."""
#
# Copyright (c) 2010 Brian E. Granger
#
# This file is part of pyzmq.
#
# pyzmq is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# pyzmq is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from zmq.core.device import device
from zmq.devices import basedevice, monitoredqueue
from zmq.devices.basedevice import *
from zmq.devices.monitoredqueue import *
from zmq.devices.monitoredqueuedevice import *
__all__ = ['device']
for submod in (basedevice, monitoredqueue, monitoredqueuedevice):
__all__.extend(submod.__all__)
| bsd-3-clause | Python |
8c168933c85f828ec85d6c069143e3c4174657b7 | Create 10.CubeProperties.py | stoyanov7/SoftwareUniversity,stoyanov7/SoftwareUniversity,stoyanov7/SoftwareUniversity,stoyanov7/SoftwareUniversity | TechnologiesFundamentals/ProgrammingFundamentals/MethodsAndDebugging-Excercises/10.CubeProperties.py | TechnologiesFundamentals/ProgrammingFundamentals/MethodsAndDebugging-Excercises/10.CubeProperties.py | import math
cubeSide = float(input())
parameter = input()
if parameter == "face":
face = math.sqrt(math.pow(cubeSide, 2) * 2)
print("%.2f" % face)
elif parameter == "space":
space = math.sqrt(math.pow(cubeSide, 2) * 3)
print("%.2f" % space)
elif parameter == "volume":
volume = math.pow(cubeSide, 3)
print("%.2f" % volume)
elif parameter == "area":
area = math.pow(cubeSide, 2) * 6
print("%.2f" % area)
| mit | Python | |
9df4f11d878ee8d13dcbcee49745bdcc8ab3e507 | Remove logging from test config | dimagi/commcare-hq,puttarajubr/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,gmimano/commcaretest,SEL-Columbia/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,gmimano/commcaretest,SEL-Columbia/commcare-hq,qedsoftware/commcare-hq,gmimano/commcaretest,puttarajubr/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,SEL-Columbia/commcare-hq,qedsoftware/commcare-hq | .travis/localsettings.py | .travis/localsettings.py | import os
####### Configuration for CommCareHQ Running on Travis-CI #####
####### Database config. This assumes Postgres #######
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'commcarehq',
'USER': 'postgres',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '5432'
}
}
SQL_REPORTING_DATABASE_URL = "sqlite:////tmp/commcare_reporting_test.db"
####### Couch Config ######
COUCH_HTTPS = False
COUCH_SERVER_ROOT = '127.0.0.1:5984'
COUCH_USERNAME = ''
COUCH_PASSWORD = ''
COUCH_DATABASE_NAME = 'commcarehq'
######## Email setup ########
# email settings: these ones are the custom hq ones
EMAIL_LOGIN = "notifications@dimagi.com"
EMAIL_PASSWORD = "******"
EMAIL_SMTP_HOST = "smtp.gmail.com"
EMAIL_SMTP_PORT = 587
EMAIL_BACKEND='django.core.mail.backends.console.EmailBackend'
####### Bitly ########
BITLY_LOGIN = 'dimagi'
BITLY_APIKEY = '*******'
####### Jar signing config ########
_ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
JAR_SIGN = dict(
jad_tool = os.path.join(_ROOT_DIR, "corehq", "apps", "app_manager", "JadTool.jar"),
key_store = os.path.join(_ROOT_DIR, "InsecureTestingKeyStore"),
key_alias = "javarosakey",
store_pass = "onetwothreefourfive",
key_pass = "onetwothreefourfive",
)
# prod settings
SOIL_DEFAULT_CACHE = "redis"
SOIL_BACKEND = "soil.CachedDownload"
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': 'localhost:11211',
},
'redis': {
'BACKEND': 'redis_cache.cache.RedisCache',
'LOCATION': 'localhost:6379',
'OPTIONS': {},
}
}
ELASTICSEARCH_HOST = 'localhost'
ELASTICSEARCH_PORT = 9200
AUDIT_ADMIN_VIEWS=False
# No logging
LOGGING = {
'version': 1,
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'django.utils.log.NullHandler',
},
},
'loggers': {
'': {
'level': 'CRITICAL',
'handler': 'null',
'propagate': False,
}
}
}
| import os
####### Configuration for CommCareHQ Running on Travis-CI #####
####### Database config. This assumes Postgres #######
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'commcarehq',
'USER': 'postgres',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '5432'
}
}
SQL_REPORTING_DATABASE_URL = "sqlite:////tmp/commcare_reporting_test.db"
####### Couch Config ######
COUCH_HTTPS = False
COUCH_SERVER_ROOT = '127.0.0.1:5984'
COUCH_USERNAME = ''
COUCH_PASSWORD = ''
COUCH_DATABASE_NAME = 'commcarehq'
######## Email setup ########
# email settings: these ones are the custom hq ones
EMAIL_LOGIN = "notifications@dimagi.com"
EMAIL_PASSWORD = "******"
EMAIL_SMTP_HOST = "smtp.gmail.com"
EMAIL_SMTP_PORT = 587
EMAIL_BACKEND='django.core.mail.backends.console.EmailBackend'
####### Bitly ########
BITLY_LOGIN = 'dimagi'
BITLY_APIKEY = '*******'
####### Jar signing config ########
_ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
JAR_SIGN = dict(
jad_tool = os.path.join(_ROOT_DIR, "corehq", "apps", "app_manager", "JadTool.jar"),
key_store = os.path.join(_ROOT_DIR, "InsecureTestingKeyStore"),
key_alias = "javarosakey",
store_pass = "onetwothreefourfive",
key_pass = "onetwothreefourfive",
)
# prod settings
SOIL_DEFAULT_CACHE = "redis"
SOIL_BACKEND = "soil.CachedDownload"
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': 'localhost:11211',
},
'redis': {
'BACKEND': 'redis_cache.cache.RedisCache',
'LOCATION': 'localhost:6379',
'OPTIONS': {},
}
}
ELASTICSEARCH_HOST = 'localhost'
ELASTICSEARCH_PORT = 9200
AUDIT_ADMIN_VIEWS=False
| bsd-3-clause | Python |
16f29bfc832a64accd6ef67c2140f70ea07f2f05 | Add PyUnit for deep feature extraction of a LeNet model with mxnet. | michalkurka/h2o-3,mathemage/h2o-3,h2oai/h2o-3,h2oai/h2o-3,spennihana/h2o-3,michalkurka/h2o-3,h2oai/h2o-3,mathemage/h2o-3,michalkurka/h2o-3,mathemage/h2o-3,spennihana/h2o-3,h2oai/h2o-dev,h2oai/h2o-dev,h2oai/h2o-3,spennihana/h2o-3,michalkurka/h2o-3,spennihana/h2o-3,michalkurka/h2o-3,h2oai/h2o-3,spennihana/h2o-3,mathemage/h2o-3,h2oai/h2o-dev,h2oai/h2o-dev,h2oai/h2o-dev,mathemage/h2o-3,h2oai/h2o-3,mathemage/h2o-3,h2oai/h2o-3,spennihana/h2o-3,michalkurka/h2o-3,h2oai/h2o-3,mathemage/h2o-3,spennihana/h2o-3,h2oai/h2o-dev,michalkurka/h2o-3,h2oai/h2o-dev | h2o-py/tests/testdir_algos/deepwater/pyunit_lenet_deepwater_feature_extraction.py | h2o-py/tests/testdir_algos/deepwater/pyunit_lenet_deepwater_feature_extraction.py | from __future__ import print_function
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils
from h2o.estimators.deepwater import H2ODeepWaterEstimator
def deepwater_lenet():
if not H2ODeepWaterEstimator.available(): return
frame = h2o.import_file(pyunit_utils.locate("bigdata/laptop/deepwater/imagenet/cat_dog_mouse.csv"))
print(frame.head(5))
model = H2ODeepWaterEstimator(epochs=100, learning_rate=1e-3, network='lenet', score_interval=0, train_samples_per_iteration=1000)
model.train(x=[0],y=1, training_frame=frame)
extracted = model.deepfeatures(frame, "pooling1_output")
#print(extracted.describe())
print(extracted.ncols)
assert extracted.ncols == 800, "extracted frame doesn't have 800 columns"
extracted = model.deepfeatures(frame, "activation2_output")
#print(extracted.describe())
print(extracted.ncols)
assert extracted.ncols == 500, "extracted frame doesn't have 500 columns"
h2o.remove_all()
if __name__ == "__main__":
pyunit_utils.standalone_test(deepwater_lenet)
else:
deepwater_lenet()
| apache-2.0 | Python | |
9a1635dcdb21548fcb7b1f718624c991602588e6 | Initialize P01_isPhoneNumber | JoseALermaIII/python-tutorials,JoseALermaIII/python-tutorials | books/AutomateTheBoringStuffWithPython/Chapter07/P01_isPhoneNumber.py | books/AutomateTheBoringStuffWithPython/Chapter07/P01_isPhoneNumber.py | # This program returns True if a string is a phone number and False if not
# However, it's not very efficient
def isPhoneNumber(text):
if len(text) != 12:
return False
for i in range(0, 3):
if not text[i].isdecimal():
return False
if text[3] != '-':
return False
for i in range(4, 7):
if not text[i].isdecimal():
return False
if text[7] != '-':
return False
for i in range(8, 12):
if not text[i].isdecimal():
return False
return True
print('415-555-4242 is a phone number:')
print(isPhoneNumber('415-555-4242'))
print('Moshi moshi is a phone number:')
print(isPhoneNumber('Moshi moshi'))
| mit | Python | |
af6272941a66967c3a64d735223fefc917056562 | add example | jontrulson/upm,MakerCollider/upm,stefan-andritoiu/upm,tripzero/upm,ShawnHymel/upm,Jon-ICS/upm,sasmita/upm,mircea/upm,kissbac/upm,andreivasiliu2211/upm,arfoll/upm,srware/upm,nitirohilla/upm,sasmita/upm,Vaghesh/upm,g-vidal/upm,yoyojacky/upm,0xD34D/upm,tripzero/upm,kissbac/upm,andreivasiliu2211/upm,Vaghesh/upm,tylergibson/upm,Vaghesh/upm,fieldhawker/upm,intel-iot-devkit/upm,afmckinney/upm,pylbert/upm,mircea/upm,intel-iot-devkit/upm,afmckinney/upm,g-vidal/upm,malikabhi05/upm,fieldhawker/upm,skiselev/upm,spitfire88/upm,izard/upm,ShawnHymel/upm,srware/upm,intel-iot-devkit/upm,rafaneri/upm,afmckinney/upm,noahchense/upm,stefan-andritoiu/upm,jontrulson/upm,mircea/upm,0xD34D/upm,jontrulson/upm,sasmita/upm,GSmurf/upm,stefan-andritoiu/upm,malikabhi05/upm,g-vidal/upm,GSmurf/upm,noahchense/upm,noahchense/upm,Jon-ICS/upm,rafaneri/upm,Propanu/upm,tylergibson/upm,stefan-andritoiu/upm,Propanu/upm,andreivasiliu2211/upm,yoyojacky/upm,ShawnHymel/upm,jontrulson/upm,jontrulson/upm,malikabhi05/upm,skiselev/upm,whbruce/upm,Jon-ICS/upm,srware/upm,afmckinney/upm,arfoll/upm,tripzero/upm,stefan-andritoiu/upm,malikabhi05/upm,0xD34D/upm,pylbert/upm,pylbert/upm,Jon-ICS/upm,spitfire88/upm,MakerCollider/upm,fieldhawker/upm,whbruce/upm,tylergibson/upm,yoyojacky/upm,skiselev/upm,g-vidal/upm,skiselev/upm,spitfire88/upm,intel-iot-devkit/upm,whbruce/upm,whbruce/upm,spitfire88/upm,malikabhi05/upm,kissbac/upm,afmckinney/upm,Jon-ICS/upm,pylbert/upm,rafaneri/upm,whbruce/upm,izard/upm,g-vidal/upm,skiselev/upm,tylergibson/upm,intel-iot-devkit/upm,sasmita/upm,Propanu/upm,g-vidal/upm,yoyojacky/upm,malikabhi05/upm,nitirohilla/upm,stefan-andritoiu/upm,GSmurf/upm,yoyojacky/upm,tylergibson/upm,mircea/upm,kissbac/upm,srware/upm,nitirohilla/upm,skiselev/upm,andreivasiliu2211/upm,GSmurf/upm,nitirohilla/upm,tripzero/upm,intel-iot-devkit/upm,nitirohilla/upm,andreivasiliu2211/upm,ShawnHymel/upm,mircea/upm,Propanu/upm,izard/upm,rafaneri/upm,arfoll/upm,Propanu/upm,MakerCollider/upm,0xD34D/upm,noahchense/upm,spitfire88/upm,tripzero/upm,kissbac/upm,pylbert/upm,Propanu/upm,rafaneri/upm,pylbert/upm,MakerCollider/upm,srware/upm,ShawnHymel/upm,arfoll/upm,fieldhawker/upm,Vaghesh/upm,MakerCollider/upm,arfoll/upm,sasmita/upm | examples/python/rgb-lcd.py | examples/python/rgb-lcd.py | # Author: Brendan Le Foll <brendan.le.foll@intel.com>
# Copyright (c) 2014 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import pyupm_i2clcd as lcd
x = lcd.Jhd1313m1(0, 0x3E, 0x62)
x.write('hello')
| mit | Python | |
771eede117c29af75c1d8d21f0da538bd280b5c1 | Create search.py | Chick3nman/MD5-Record-Search | search.py | search.py | # Requires Parallel to be installed
# Use the below command to start with all available cores used
# seq `nproc` | parallel -u python script.py
__authors__ = ['Chick3nputer', 'Supersam654']
from itertools import islice, product
import string
import hashlib
from random import shuffle
from sys import argv
chars = string.ascii_uppercase + string.digits + string.ascii_lowercase
def generate_strings(size):
alphabet = list(chars * size)
while True:
shuffle(alphabet)
for i in range(0, len(alphabet), size):
yield ''.join(alphabet[i: i + size])
def tsum(hexhash):
return sum(int(hexhash[i: i + 2], 16) for i in range(0, len(hexhash), 2))
def edit_distance(h1, h2):
xor = int(h1, 16) ^ int(h2, 16)
return bin(xor)[2:].count('1')
def work():
# Start both not at 0 and 128 to avoid a lot of startup noise.
max_ones = 109
min_ones = 19
rand_length = 32 - len("Chick3nman-")
i = 0
for combo in generate_strings(rand_length):
i += 1
if i % 100000000 == 0:
print "Processed %d hashes." % i
clear = "Chick3nman-" + combo
hashhex = hashlib.md5(clear).hexdigest()
ones_count = bin(int(hashhex, 16))[2:].count('1')
if ones_count > max_ones:
plain = hashhex + ':' + clear
max_ones = ones_count
print "New BITMAX Hash Found %s = %s" % (plain, max_ones)
elif ones_count < min_ones:
plain = hashhex + ':' + clear
min_ones = ones_count
print "New BITMIN Hash Found %s = %s" % (plain, min_ones)
if hashhex.startswith('ffffffffffffff'):
print "New MAX Hash Found %s:%s" % (hashhex, clear)
elif hashhex.startswith('00000000000000'):
print "New MIN Hash Found %s:%s" % (hashhex, clear)
tsumhex = tsum(hashhex)
if tsumhex < 190:
print "New TMIN Hash Found %s:%s" % (hashhex, clear)
elif tsumhex > 3909:
print "New TMAX Hash Found %s:%s" % (hashhex, clear)
base_distance = edit_distance(hashhex, '0123456789abcdeffedcba9876543210')
if base_distance < 20:
print "New BASE Hash Found %s:%s" % (hashhex, clear)
# Can't prefix with Chick3nman and do this one.
# fp_distance = edit_distance(clear, hashhex)
# if fp_distance < 28:
# print "New FP Hash Found %s:%s" %s:%s" % (hashhex, clear)
if __name__ == '__main__':
print "Starting worker %s" % argv[1]
work()
| mit | Python | |
5ee78767ebaa5c1bbceb7ce2c82fa6687169b0c2 | Add exercice The Paranoid Android | AntoineAugusti/katas,AntoineAugusti/katas,AntoineAugusti/katas | codingame/medium/paranoid_android.py | codingame/medium/paranoid_android.py | class Elevator(object):
def __init__(self, floor, pos):
super(Elevator, self).__init__()
self.floor = floor
self.pos = pos
self.direction = None
def __str__(self):
return 'Elevator on floor %i (pos %i) with dir %s' % (self.floor, self.pos, self.direction)
class Game(object):
def __init__(self, nbFloors, width, exitFloor, exitPos, nbElevators):
super(Game, self).__init__()
self.nbFloors = nbFloors
self.width = width
self.exitFloor = exitFloor
self.exitPos = exitPos
self.nbElevators = nbElevators
self.elevators = [0] * nbFloors
def addElevators(self):
for _ in xrange(self.nbElevators):
# elevatorFloor: floor on which this elevator is found
# elevatorPos: position of the elevator on its floor
elevatorFloor, elevatorPos = [int(j) for j in raw_input().split()]
self.elevators[elevatorFloor] = Elevator(elevatorFloor, elevatorPos)
# Don't forget to add the elevator leading to the exit
self.elevators[self.exitFloor] = Elevator(self.exitFloor, self.exitPos)
def setElevatorsDirections(self):
for i in range(self.nbFloors - 1):
if (self.elevators[i].pos > self.elevators[i+1].pos):
self.elevators[i+1].direction = 'LEFT'
else:
self.elevators[i+1].direction = 'RIGHT'
# nbFloors: number of floors
# width: width of the area
# nbRounds: maximum number of rounds
# exitFloor: floor on which the exit is found
# exitPos: position of the exit on its floor
# nbTotalClones: number of generated clones
# nbAdditionalElevators: ignore (always zero)
# nbElevators: number of elevators
nbFloors, width, nbRounds, exitFloor, exitPos, nbTotalClones, nbAdditionalElevators, nbElevators = [int(i) for i in raw_input().split()]
game = Game(nbFloors, width, exitFloor, exitPos, nbElevators)
game.addElevators()
game.setElevatorsDirections()
firstRound = True
# Game loop
while True:
# cloneFloor: floor of the leading clone
# clonePos: position of the leading clone on its floor
# direction: direction of the leading clone: LEFT or RIGHT
cloneFloor, clonePos, direction = raw_input().split()
cloneFloor = int(cloneFloor)
clonePos = int(clonePos)
if firstRound:
firstRound = False
if (clonePos < game.elevators[0].pos):
game.elevators[0].direction = 'RIGHT'
else:
game.elevators[0].direction = 'LEFT'
if cloneFloor == -1:
print 'WAIT'
else:
if direction == game.elevators[cloneFloor].direction:
print 'WAIT'
else:
print 'BLOCK'
| mit | Python | |
f7132b86ca5f4dafeb88ca65b3d7fe71c6886cc5 | Add packageinfo command | ylatuya/cerbero,jackjansen/cerbero-2013,brion/cerbero,flexVDI/cerbero,sdroege/cerbero,BigBrother-International/gst-cerbero,OptoFidelity/cerbero,jackjansen/cerbero-2013,nzjrs/cerbero,multipath-rtp/cerbero,AlertMe/cerbero,cee1/cerbero-mac,sdroege/cerbero,jackjansen/cerbero-2013,brion/cerbero,ramaxlo/cerbero,shoreflyer/cerbero,freedesktop-unofficial-mirror/gstreamer__cerbero,lubosz/cerbero,freedesktop-unofficial-mirror/gstreamer__sdk__cerbero,nicolewu/cerbero,fluendo/cerbero,AlertMe/cerbero,freedesktop-unofficial-mirror/gstreamer__cerbero,freedesktop-unofficial-mirror/gstreamer-sdk__cerbero,jackjansen/cerbero,nirbheek/cerbero,superdump/cerbero,davibe/cerbero,ford-prefect/cerbero,lubosz/cerbero,ylatuya/cerbero,multipath-rtp/cerbero,davibe/cerbero,flexVDI/cerbero,freedesktop-unofficial-mirror/gstreamer__sdk__cerbero,fluendo/cerbero,cee1/cerbero-mac,fluendo/cerbero,davibe/cerbero,ford-prefect/cerbero,fluendo/cerbero,multipath-rtp/cerbero,OptoFidelity/cerbero,GStreamer/cerbero,nirbheek/cerbero,nirbheek/cerbero-old,BigBrother-International/gst-cerbero,nirbheek/cerbero,sdroege/cerbero,atsushieno/cerbero,OptoFidelity/cerbero,nzjrs/cerbero,justinjoy/cerbero,lubosz/cerbero,sdroege/cerbero,nirbheek/cerbero-old,EricssonResearch/cerbero,EricssonResearch/cerbero,freedesktop-unofficial-mirror/gstreamer-sdk__cerbero,jackjansen/cerbero,shoreflyer/cerbero,nirbheek/cerbero,atsushieno/cerbero,brion/cerbero,atsushieno/cerbero,ikonst/cerbero,ford-prefect/cerbero,nzjrs/cerbero,EricssonResearch/cerbero,freedesktop-unofficial-mirror/gstreamer-sdk__cerbero,superdump/cerbero,OptoFidelity/cerbero,brion/cerbero,ramaxlo/cerbero,BigBrother-International/gst-cerbero,flexVDI/cerbero,shoreflyer/cerbero,ramaxlo/cerbero,justinjoy/cerbero,flexVDI/cerbero,freedesktop-unofficial-mirror/gstreamer-sdk__cerbero,jackjansen/cerbero,AlertMe/cerbero,multipath-rtp/cerbero,centricular/cerbero,cee1/cerbero-mac,freedesktop-unofficial-mirror/gstreamer__cerbero,nzjrs/cerbero,nzjrs/cerbero,atsushieno/cerbero,ikonst/cerbero,ylatuya/cerbero,AlertMe/cerbero,freedesktop-unofficial-mirror/gstreamer-sdk__cerbero,jackjansen/cerbero,EricssonResearch/cerbero,nicolewu/cerbero,justinjoy/cerbero,superdump/cerbero,fluendo/cerbero,lubosz/cerbero,multipath-rtp/cerbero,GStreamer/cerbero,nicolewu/cerbero,GStreamer/cerbero,sdroege/cerbero,ikonst/cerbero,GStreamer/cerbero,superdump/cerbero,freedesktop-unofficial-mirror/gstreamer__sdk__cerbero,jackjansen/cerbero-2013,centricular/cerbero,freedesktop-unofficial-mirror/gstreamer__cerbero,nirbheek/cerbero-old,ramaxlo/cerbero,BigBrother-International/gst-cerbero,davibe/cerbero,ramaxlo/cerbero,shoreflyer/cerbero,ikonst/cerbero,centricular/cerbero,flexVDI/cerbero,cee1/cerbero-mac,nirbheek/cerbero-old,jackjansen/cerbero-2013,freedesktop-unofficial-mirror/gstreamer__sdk__cerbero,atsushieno/cerbero,centricular/cerbero,ylatuya/cerbero,ford-prefect/cerbero,centricular/cerbero,justinjoy/cerbero,shoreflyer/cerbero,EricssonResearch/cerbero,BigBrother-International/gst-cerbero,brion/cerbero,ikonst/cerbero,GStreamer/cerbero,AlertMe/cerbero | cerbero/commands/info.py | cerbero/commands/info.py | # cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
from cerbero.commands import Command, register_command
from cerbero.utils import _, N_, ArgparseArgument
from cerbero.utils import messages as m
from cerbero.packages.packagesstore import PackagesStore
INFO_TPL='''
Name: %(name)s
Version: %(version)s
Homepage: %(url)s
Dependencies: %(deps)s
Licences: %(licenses)s
Description: %(desc)s
'''
class PackageInfo(Command):
doc = N_('Print information about this package')
name = 'packageinfo'
def __init__(self):
Command.__init__(self,
[ArgparseArgument('package', nargs=1,
help=_('name of the package')),
ArgparseArgument('-l', '--list-files', action='store_true', default=False,
help=_('List all files installed by this package')),
])
def run(self, config, args):
store = PackagesStore(config)
p_name = args.package[0]
if args.list_files:
m.message('\n'.join(store.get_package_files_list(p_name)))
else:
p = store.get_package(p_name)
d = {'name': p.name, 'version': p.version, 'url': p.url,
'licenses': ' '.join(p.licenses), 'desc': p.shortdesc,
'deps': ', '.join(store.get_package_deps(p_name))}
m.message(INFO_TPL % d)
register_command(PackageInfo)
| lgpl-2.1 | Python | |
cb79c9bf74cb18f3ee86c7c3d5415ce1b088dde2 | Add missing markdown file. | AllMyChanges/allmychanges.com,AllMyChanges/allmychanges.com,AllMyChanges/allmychanges.com,AllMyChanges/allmychanges.com | allmychanges/markdown.py | allmychanges/markdown.py | import CommonMark
def render_markdown(text):
parser = CommonMark.DocParser()
renderer = CommonMark.HTMLRenderer()
ast = parser.parse(text)
return renderer.render(ast)
| bsd-2-clause | Python | |
114ea6c10658d2c199c68637d04bdd968fcc4452 | Test case for task.info.json files | voyagersearch/voyager-py,voyagersearch/voyager-py | voyager_tasks/test/test_info_files.py | voyager_tasks/test/test_info_files.py | import os
import sys
import glob
import json
import unittest
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
import voyager_tasks
class TestInfoFiles(unittest.TestCase):
"""Test case for checking info files exist
for each task and have a valid structure.
"""
@classmethod
def setUpClass(self):
self.tasks = set(voyager_tasks.__tasks__)
self.info_dir = os.path.abspath(os.path.join(os.path.dirname(os.getcwd()), '..', 'info'))
self.json_files = set([os.path.basename(f).split('.')[0] for f in glob.glob(os.path.join(self.info_dir, '*.info.json'))])
self.names = []
self.runner = set()
self.display = set()
files_to_test = self.json_files.intersection(self.tasks)
for name in files_to_test:
test_file = os.path.join(self.info_dir, '{0}.info.json'.format(name))
with open(test_file) as f:
d = json.load(f)
self.names.append(d['name'])
self.runner.add(d['runner'])
self.display.add(d['display'].keys()[0])
def test_json_exists(self):
"""Ensure an info.json file exists for each task"""
self.assertEqual(self.tasks.issubset(self.json_files), True)
def test_json_names(self):
"""Verify each info.json has a valid name field and value"""
self.assertEqual(sorted(list(self.tasks)), sorted(self.names))
def test_json_runner(self):
self.assertEqual(len(list(self.runner)) == 1 and list(self.runner)[0] == 'python', True)
def test_json_display(self):
"""Default display should be set to 'en' for all info.json files"""
self.assertEqual(len(list(self.display)) == 1 and list(self.display)[0] == 'en', True)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | Python | |
24f536a72b0467ff3ee1615f515ecff9fbf36bb3 | Add pair sum | sitdh/com-prog | ch07_04.py | ch07_04.py | number_of_data = int(input().strip())
ma = [0] * number_of_data
numbers = [int(c) for c in input().strip().split()]
for i in range(1, number_of_data - 1):
ma[i] = (sum(numbers[i-1:i+2])/3)
ma[0] = sum(numbers[0:2])/2
ma[-1] = sum(numbers[number_of_data - 2:])/2
ma = [str(d) for d in ma]
print("\n".join(ma))
| mit | Python | |
afa0efbdfc6bc4d19eaba919bc82c907fce37fa7 | add base for API endpoint | devinit/extractormeter | datasets/api.py | datasets/api.py | import json
from flask import request, Response, url_for
from jsonschema import validate, ValidationError
import models
import decorators
from datasets import app
from database import session
| mit | Python | |
ce9ac96a6f1e57ebbce162b7e097675c23f1f2f4 | Implement simple gaussian process regression. | alasdairtran/mclearn,chengsoonong/mclass-sky,alasdairtran/mclearn,alasdairtran/mclearn,chengsoonong/mclass-sky,chengsoonong/mclass-sky,alasdairtran/mclearn,chengsoonong/mclass-sky | projects/jakub/gaussian_processes/gaussian_process_regression.py | projects/jakub/gaussian_processes/gaussian_process_regression.py | import csv
import sys
import matplotlib.pyplot as plt
import numpy as np
import sklearn
import sklearn.gaussian_process.kernels
kernel = (sklearn.gaussian_process.kernels.ConstantKernel()
+ sklearn.gaussian_process.kernels.Matern(length_scale=2, nu=3/2)
+ sklearn.gaussian_process.kernels.WhiteKernel(noise_level=1))
LABEL_COL = 4
INPUT_COLS = 7, 9, 11, 13, 15
INPUT_DIM = len(INPUT_COLS)
INPUT_ROW_VALID = lambda row: row[2] == "Galaxy"
INPUT_SAMPLES_NUM = 1000
TESTING_SAMPLES_NUM = 1000
PLOT_SAMPLES = 1000
def take_samples(reader, num):
X = np.empty((num, INPUT_DIM))
y = np.empty((num,))
i = 0
for row in reader:
if INPUT_ROW_VALID(row):
y[i] = float(row[LABEL_COL])
for j, col in enumerate(INPUT_COLS):
X[i, j] = float(row[col])
i += 1
if i == num:
break
else:
raise Exception("Not enough samples in file.")
return X, y
def main(path):
with open(path) as f:
reader = csv.reader(f)
next(reader) # Skip headers
X, y = take_samples(reader, INPUT_SAMPLES_NUM)
test_X, test_y = take_samples(reader, TESTING_SAMPLES_NUM)
gp = sklearn.gaussian_process.GaussianProcessRegressor(kernel=kernel)
gp.fit(X, y)
if False:
X_pred = np.empty((PRED_DATA, INPUT_DIM))
X_pred[:, :4] = np.mean(X[:, :4], axis=0)
X_pred[:, 4] = np.linspace(np.min(X[:, 4]), np.max(X[:, 4]), num=PRED_DATA)
y_pred, sigmas = gp.predict(X_pred, return_std=True)
plt.plot(X[:, 4], y, "ro", markersize=0.5)
plt.errorbar(X_pred[:, 4], y_pred, yerr=sigmas, capsize=0)
plt.show()
print("Score: {}".format(gp.score(test_X, test_y)))
if __name__ == '__main__':
if len(sys.argv) != 2:
raise ValueError()
main(sys.argv[1])
| bsd-3-clause | Python | |
c4aca4fe1bf02286f218ca855a41e380987818f7 | Add test example | cloudcomputinghust/CAL_Appliances,cloudcomputinghust/CAL_Appliances,cloudcomputinghust/CAL_Appliances | fcap/tests/test_example.py | fcap/tests/test_example.py | import unittest
class BasicTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_absolute_truth_and_meaning(self):
assert True
| mit | Python | |
fa27978c50364c903e2c343560f66db6ddc76bdb | add setup.py | ccwang002/2014-Taipeipy-venv | play_tox/setup.py | play_tox/setup.py | from setuptools import setup
setup(name="x")
| mit | Python | |
6c4edaefe30905f62b885b931a1c5ca6d65cd220 | Add tests for project model | ganemone/ontheside,ganemone/ontheside,ganemone/ontheside | server/tests/models/test_project.py | server/tests/models/test_project.py | from server.models import Project
from server.tests.helpers import fixtures, FlaskTestCase
class TestProject(FlaskTestCase):
@fixtures('single_project.json')
def test_get_single_owner(self):
"""Test getting single project owner
"""
with self.flaskapp.test_request_context():
project = Project.query.filter_by(id=1).first()
owners = project.get_owners()
assert len(owners) is 1
@fixtures('many_owners.json')
def test_get_many_owners(self):
"""Test getting multiple project owners
"""
with self.flaskapp.test_request_context():
project = Project.query.filter_by(id=1).first()
owners = project.get_owners()
assert len(owners) is 3
@fixtures('single_contributer.json')
def test_get_single_contributer(self):
"""Test getting single contributer
"""
with self.flaskapp.test_request_context():
project = Project.query.filter_by(id=1).first()
contributers = project.get_contributers()
assert len(contributers) is 1
@fixtures('many_contributers.json')
def test_get_many_contributers(self):
"""Test getting many contributers
"""
with self.flaskapp.test_request_context():
project = Project.query.filter_by(id=1).first()
contributers = project.get_contributers()
assert len(contributers) is 3
@fixtures('single_designer.json')
def test_get_single_designer(self):
"""Test getting single designer
"""
with self.flaskapp.test_request_context():
project = Project.query.filter_by(id=1).first()
designers = project.get_designers()
assert len(designers) is 1
@fixtures('many_designers.json')
def test_get_many_designers(self):
"""Test getting many designers
"""
with self.flaskapp.test_request_context():
project = Project.query.filter_by(id=1).first()
designers = project.get_designers()
assert len(designers) is 2 | mit | Python | |
ea7f32243c70de2737b1759db2c0e12337ecf840 | add missing file | simphony/simphony-common | simphony/testing/abc_check_lattice.py | simphony/testing/abc_check_lattice.py | import abc
from functools import partial
import numpy
from numpy.testing import assert_array_equal
from simphony.testing.utils import (
create_data_container, compare_data_containers, compare_lattice_nodes)
from simphony.cuds.lattice import LatticeNode
from simphony.core.data_container import DataContainer
class ABCCheckLattice(object):
__metaclass__ = abc.ABCMeta
def setUp(self):
self.addTypeEqualityFunc(
DataContainer, partial(compare_data_containers, testcase=self))
self.addTypeEqualityFunc(
LatticeNode, partial(compare_lattice_nodes, testcase=self))
self.size = (5, 10, 15)
self.base_vect = (0.1, 0.2, 0.3)
self.origin = (-2.0, 0.0, 1.0)
self.container = self.container_factory(
'foo', 'Cubic', self.base_vect, self.size, self.origin)
@abc.abstractmethod
def container_factory(self, name, type_, base_vect, size, origin):
""" Create and return a lattice.
"""
def test_iter_nodes(self):
container = self.container
# number of nodes
number_of_nodes = sum(1 for node in container.iter_nodes())
self.assertEqual(number_of_nodes, numpy.prod(self.size))
# data
for node in container.iter_nodes():
self.assertEqual(node.data, DataContainer())
# indexes
x, y, z = numpy.meshgrid(
range(self.size[0]), range(self.size[1]), range(self.size[2]))
expected = set(zip(x.flat, y.flat, z.flat))
indexes = {node.index for node in container.iter_nodes()}
self.assertEqual(indexes, expected)
def test_get_node(self):
container = self.container
index = 2, 3, 4
node = container.get_node(index)
expected = LatticeNode(index)
self.assertEqual(node, expected)
# check that mutating the node does not change internal info
node.data = create_data_container()
self.assertNotEqual(container.get_node(index), node)
def test_get_node_with_invalid_index(self):
container = self.container
index = 2, 300, 4
with self.assertRaises(IndexError):
container.get_node(index)
index = 2, 3, -4
with self.assertRaises(IndexError):
container.get_node(index)
def test_update_node(self):
container = self.container
index = 2, 3, 4
node = container.get_node(index)
node.data = create_data_container()
container.update_node(node)
new_node = container.get_node(index)
self.assertEqual(new_node, node)
# Check that `new_node` is not the same instance as `node`
self.assertIsNot(new_node, node)
def test_get_coordinate(self):
container = self.container
xspace, yspace, zspace = self.base_vect
x, y, z = numpy.meshgrid(
range(self.size[0]), range(self.size[1]), range(self.size[2]))
indexes = zip(x.flat, y.flat, z.flat)
expected = zip(
x.ravel() * xspace + self.origin[0],
y.ravel() * yspace + self.origin[1],
z.ravel() * zspace + self.origin[2])
for i, index in enumerate(indexes):
assert_array_equal(container.get_coordinate(index), expected[i])
def test_lattive_properties(self):
container = self.container
# check values
self.assertEqual(container.type, 'Cubic')
self.assertEqual(container.name, 'foo')
assert_array_equal(container.size, self.size)
assert_array_equal(container.origin, self.origin)
assert_array_equal(container.base_vect, self.base_vect)
# check read-only
with self.assertRaises(AttributeError):
container.type = 'Cubic'
with self.assertRaises(AttributeError):
container.size = self.size
with self.assertRaises(AttributeError):
container.origin = self.origin
with self.assertRaises(AttributeError):
container.base_vect = self.base_vect
# check read-write
container.name = 'boo'
self.assertEqual(container.name, 'boo')
| bsd-2-clause | Python | |
ee62d6a972e5af72fc9a5e2e36d1a7822a1703af | Add sample on veh handle setup in remote process | hakril/PythonForWindows | samples/remote_veh_segv.py | samples/remote_veh_segv.py | import windows
import windows.test
from windows.generated_def.winstructs import *
#c = windows.test.pop_calc_64()
c = windows.test.pop_calc_64(dwCreationFlags=CREATE_SUSPENDED)
python_code = """
import windows
import ctypes
import windows
from windows.vectored_exception import VectoredException
import windows.generated_def.windef as windef
from windows.generated_def.winstructs import *
windows.utils.create_console()
@VectoredException
def handler(exc):
print("POUET")
if exc[0].ExceptionRecord[0].ExceptionCode == EXCEPTION_ACCESS_VIOLATION:
target_addr = ctypes.cast(exc[0].ExceptionRecord[0].ExceptionInformation[1], ctypes.c_void_p).value
print("Instr at {0} accessed to addr {1}".format(hex(exc[0].ExceptionRecord[0].ExceptionAddress), hex(target_addr)))
#return windef.EXCEPTION_CONTINUE_EXECUTION
return windef.EXCEPTION_CONTINUE_SEARCH
windows.winproxy.AddVectoredExceptionHandler(0, handler)
print("YOLO<3")
print(ctypes.c_uint.from_address(0x42424242).value)
"""
x = c.execute_python(python_code) | bsd-3-clause | Python | |
0e5ba1d9ae7ca7d5439d886abe732f0fcebed49b | Create classes.py | V1Soft/Essential | classes.py | classes.py | class String(object):
| bsd-3-clause | Python | |
e29bdc567c3d1f04f9e9ec17792052b0f66f918e | reorder users migration | monty5811/apostello,monty5811/apostello,monty5811/apostello,monty5811/apostello | apostello/migrations/0010_auto_20160421_1411.py | apostello/migrations/0010_auto_20160421_1411.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-21 13:11
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('apostello', '0009_userprofile_message_cost_limit'),
]
operations = [
migrations.AlterModelOptions(
name='userprofile',
options={'ordering': ['user__email']},
),
]
| mit | Python | |
a519b7c91a8ea84549efcdf145aed56cf89b9d59 | Create users.py | chidaobanjiu/Loocat.cc,chidaobanjiu/Flask_Web,chidaobanjiu/MANA2077,chidaobanjiu/MANA2077,chidaobanjiu/Flask_Web,chidaobanjiu/MANA2077,chidaobanjiu/Loocat.cc,chidaobanjiu/Loocat.cc,chidaobanjiu/Flask_Web,chidaobanjiu/MANA2077,chidaobanjiu/MANA2077,chidaobanjiu/Flask_Web,chidaobanjiu/MANA2077 | app/api_1_0/users.py | app/api_1_0/users.py | from flask import jsonify, request, current_app, url_for
from . import api
from ..models import User, Post
@api.route('/users/<int:id>')
def get_user(id):
user = User.query.get_or_404(id)
return jsonify(user.to_json())
@api.route('/users/<int:id>/posts/')
def get_user_posts(id):
user = User.query.get_or_404(id)
page = request.args.get('page', 1, type=int)
pagination = user.posts.order_by(Post.timestamp.desc()).paginate(
page, per_page=current_app.config['FLASKY_POSTS_PER_PAGE'],
error_out=False)
posts = pagination.items
prev = None
if pagination.has_prev:
prev = url_for('api.get_user_posts', page=page-1, _external=True)
next = None
if pagination.has_next:
next = url_for('api.get_user_posts', page=page+1, _external=True)
return jsonify({
'posts': [post.to_json() for post in posts],
'prev': prev,
'next': next,
'count': pagination.total
})
@api.route('/users/<int:id>/timeline/')
def get_user_followed_posts(id):
user = User.query.get_or_404(id)
page = request.args.get('page', 1, type=int)
pagination = user.followed_posts.order_by(Post.timestamp.desc()).paginate(
page, per_page=current_app.config['FLASKY_POSTS_PER_PAGE'],
error_out=False)
posts = pagination.items
prev = None
if pagination.has_prev:
prev = url_for('api.get_user_followed_posts', page=page-1,
_external=True)
next = None
if pagination.has_next:
next = url_for('api.get_user_followed_posts', page=page+1,
_external=True)
return jsonify({
'posts': [post.to_json() for post in posts],
'prev': prev,
'next': next,
'count': pagination.total
})
| mit | Python | |
7445d91a68753052c837d5e0c919585d1f09d3d6 | Add deterministic annealing demo | SalemAmeen/bayespy,bayespy/bayespy,jluttine/bayespy,fivejjs/bayespy | bayespy/demos/annealing.py | bayespy/demos/annealing.py | ######################################################################
# Copyright (C) 2015 Jaakko Luttinen
#
# This file is licensed under Version 3.0 of the GNU General Public
# License. See LICENSE for a text of the license.
######################################################################
######################################################################
# This file is part of BayesPy.
#
# BayesPy is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# BayesPy is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BayesPy. If not, see <http://www.gnu.org/licenses/>.
######################################################################
"""
Demonstration of deterministic annealing.
Deterministic annealing aims at avoiding convergence to local optima and
finding the global optimum :cite:`Katahira:2008`.
"""
import numpy as np
import scipy
import matplotlib.pyplot as plt
import bayespy.plot as myplt
from bayespy.utils import misc
from bayespy.utils import random
from bayespy.nodes import GaussianARD, Categorical, Mixture
from bayespy.inference.vmp.vmp import VB
from bayespy.inference.vmp import transformations
import bayespy.plot as bpplt
from bayespy.demos import pca
def run(N=500, seed=42, maxiter=100, plot=True):
"""
Run deterministic annealing demo for 1-D Gaussian mixture.
"""
if seed is not None:
np.random.seed(seed)
mu = GaussianARD(0, 1,
plates=(2,),
name='means')
Z = Categorical([0.3, 0.7],
plates=(N,),
name='classes')
Y = Mixture(Z, GaussianARD, mu, 1,
name='observations')
# Generate data
z = Z.random()
data = np.empty(N)
for n in range(N):
data[n] = [4, -4][z[n]]
Y.observe(data)
# Initialize means closer to the inferior local optimum
mu.initialize_from_value([0, 6])
#mu.initialize_from_value([6, 0])
Q = VB(Y, Z, mu)
Q.save()
#
# Standard VB-EM algorithm
#
Q.update(repeat=maxiter)
## if plot:
## bpplt.pyplot.plot(Q.L, 'k-')
mu_vbem = mu.u[0].copy()
L_vbem = Q.compute_lowerbound()
#
# VB-EM with deterministic annealing
#
Q.load()
beta = 0.03
while True:
Q.set_annealing(beta)
print("Set annealing to %.2f" % beta)
Q.update(repeat=maxiter, tol=1e-8)
if beta == 1:
break
else:
beta = min(beta*2, 1.0)
## if plot:
## bpplt.pyplot.plot(Q.L, 'r:')
## bpplt.pyplot.xlabel('Iterations')
## bpplt.pyplot.ylabel('VB lower bound')
## bpplt.pyplot.legend(['VB-EM', 'Deterministic annealing'],
## loc='lower right')
mu_anneal = mu.u[0].copy()
L_anneal = Q.compute_lowerbound()
print("True component probabilities:", np.array([0.3, 0.7]))
print("True component means:", np.array([4, -4]))
print("VB-EM component means:", mu_vbem)
print("VB-EM lower bound:", L_vbem)
print("Annealed VB-EM component means:", mu_anneal)
print("Annealed VB-EM lower bound:", L_anneal)
return
if __name__ == '__main__':
import sys, getopt, os
try:
opts, args = getopt.getopt(sys.argv[1:],
"",
["n=",
"seed=",
"maxiter="])
except getopt.GetoptError:
print('python annealing.py <options>')
print('--n=<INT> Number of data points')
print('--maxiter=<INT> Maximum number of VB iterations')
print('--seed=<INT> Seed (integer) for the random number generator')
sys.exit(2)
kwargs = {}
for opt, arg in opts:
if opt == "--maxiter":
kwargs["maxiter"] = int(arg)
elif opt == "--seed":
kwargs["seed"] = int(arg)
elif opt in ("--n",):
kwargs["N"] = int(arg)
run(**kwargs)
plt.show()
| mit | Python | |
52a8a1cd093f8bdbaf0abfc85eff2d3682e24b12 | Add Python script for questions linting | PavloKapyshin/rusk,PavloKapyshin/rusk,PavloKapyshin/rusk | scripts/check-questions.py | scripts/check-questions.py | #!/usr/bin/env python3
import os
import sys
import json
import collections
import unicodedata
TEXT_FIELD = "t"
OPTIONS_FIELD = "o"
KIND_FIELD = "k"
CORRECT_FIELD = "c"
MANDATORY_FIELDS = {TEXT_FIELD, OPTIONS_FIELD, CORRECT_FIELD}
def norm(s):
return unicodedata.normalize("NFD", s)
def error(message, *, n):
raise ValueError(" ".join((message, "({})".format(n))))
def check(questions):
text_occurences = collections.defaultdict(list)
for n, question in enumerate(questions, start=1):
# Contains mandatory fields.
missing = MANDATORY_FIELDS - set(question.keys())
if missing:
error("missing {}".format(", ".join(missing)), n=n)
text_occurences[norm(question[TEXT_FIELD])].append(n)
# Kind, if present, is "tr".
if KIND_FIELD in question and question[KIND_FIELD] != "tr":
error("{} != tr".format(KIND_FIELD), n=n)
# There are at least four options & they are unique.
options = tuple(map(norm, question[OPTIONS_FIELD]))
options_count = len(options)
if len(set(options)) != options_count or options_count < 4:
error(">= 4 unique options are required", n=n)
# There is at least one correct index.
correct = question[CORRECT_FIELD]
if len(correct) < 1:
error(">= 1 correct index is required", n=n)
for index in correct:
try:
options[index]
except IndexError:
error("index {} is not adressable".format(index), n=n)
# Text is not repeated.
for text, ns in text_occurences.items():
if len(ns) > 1:
error(
"t {} is repeated at {}".format(
text, ", ".join(map(str, ns[1:]))), n=ns[0])
def main():
questions_path = os.path.normpath(
os.path.join(
os.path.dirname(__file__), "..", "src", "questions.json"))
with open(questions_path, "r", encoding="utf-8") as file:
questions = json.load(file)
try:
check(questions)
except ValueError as e:
print(e, file=sys.stderr)
exit(1)
if __name__ == "__main__":
main()
| bsd-3-clause | Python | |
3a662b5820ea90c0cd63116a610ede25558c5562 | Add tests directory to sourceterm package and start test module. | ihuston/pyflation,ihuston/pyflation | sourceterm/tests/test_srcequations.py | sourceterm/tests/test_srcequations.py | '''
Created on 25 Aug 2010
@author: ith
'''
import unittest
class Test(unittest.TestCase):
def testName(self):
pass
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | bsd-3-clause | Python | |
3d11921f67c1928bb79869c3af1f8836360219fd | Add SIF assembler for Boolean network generation | sorgerlab/indra,johnbachman/indra,bgyori/indra,sorgerlab/belpy,sorgerlab/belpy,pvtodorov/indra,johnbachman/belpy,pvtodorov/indra,sorgerlab/indra,jmuhlich/indra,jmuhlich/indra,pvtodorov/indra,jmuhlich/indra,johnbachman/indra,sorgerlab/belpy,johnbachman/indra,pvtodorov/indra,bgyori/indra,sorgerlab/indra,bgyori/indra,johnbachman/belpy,johnbachman/belpy | indra/assemblers/sif_assembler.py | indra/assemblers/sif_assembler.py | import networkx as nx
from indra.statements import *
class SifAssembler(object):
def __init__(self, stmts=None):
if stmts is None:
self.stmts = []
else:
self.stmts = stmts
self.graph = nx.DiGraph()
self.nodes = {}
def make_model(self):
for st in self.stmts:
if isinstance(st, Activation):
s = self.add_node(st.subj)
t = self.add_node(st.obj)
if st.is_activation:
self.add_edge(s, t, {'polarity': 'positive'})
else:
self.add_edge(s, t, {'polarity': 'negative'})
def print_boolean_net(self, out_file=None):
init_str = ''
for node_key in self.graph.nodes():
node_name = self.graph.node[node_key]['name']
init_str += '%s = False\n' % node_name
rule_str = ''
for node_key in self.graph.nodes():
node_name = self.graph.node[node_key]['name']
in_edges = self.graph.in_edges(node_key)
if not in_edges:
continue
parents = [e[0] for e in in_edges]
polarities = [self.graph.edge[e[0]][node_key]['polarity']
for e in in_edges]
pos_parents = [par for par, pol in zip(parents, polarities) if
pol == 'positive']
neg_parents = [par for par, pol in zip(parents, polarities) if
pol == 'negative']
rhs_pos_parts = []
for par in pos_parents:
rhs_pos_parts.append(self.graph.node[par]['name'])
rhs_pos_str = ' or '.join(rhs_pos_parts)
rhs_neg_parts = []
for par in neg_parents:
rhs_neg_parts.append(self.graph.node[par]['name'])
rhs_neg_str = ' or '.join(rhs_neg_parts)
if rhs_pos_str:
if rhs_neg_str:
rhs_str = '(' + rhs_pos_str + \
') and not (' + rhs_neg_str + ')'
else:
rhs_str = rhs_pos_str
else:
rhs_str = 'not (' + rhs_neg_str + ')'
node_eq = '%s* = %s\n' % (node_name, rhs_str)
rule_str += node_eq
full_str = init_str + '\n' + rule_str
if out_file is not None:
with open(out_file, 'wt') as fh:
fh.write(full_str)
return full_str
def add_node(self, agent):
node_key = agent.matches_key()
self.graph.add_node(node_key, name=agent.name)
return node_key
def add_edge(self, s, t, edge_attributes=None):
if edge_attributes is None:
self.graph.add_edge(s, t)
else:
self.graph.add_edge(s, t, edge_attributes)
| bsd-2-clause | Python | |
1b10427e309861bb8afb00c45446f5a9cce5ba96 | Create whenido-renamer.py | tingmakpuk/Renamer | whenido-renamer.py | whenido-renamer.py | """Renamer, Copyright 2014, Whenido, /u/tingmakpuk. Open source license available.
This program was the first project of a complete novice, and the use of the program is at your own risk.
Current version 3.0: More eligantly avoids renaming this program file. However, it can still rename other folders in the directory.
WIP v 4.0: Use __file__ to most eligantly skip? Need to research identifying folders.
"""
# *Config and warnings*
import os
import sys
#Better way to format?
print "This program is designed to rename tv shows in Plex format of 'Name - s01e01'.ext"
print "Please read directions and use only as instructed, or you may end up renaming files you do not want to rename."
print
print "Plex prefers your files separated into seasons('Showname' folder -'Season #' folder -'Showname - s01e01.ext')."
print "This program is designed to work with one season at a time; please separate your episodes into seasons per plex protocol."
print "Put this program into a season folder and restart. Files will be taken alphabetically, and rename them, per your naming instructions."
print
print
# *Get name, season and verification*
name_loop = True
while name_loop == True:
name = str(raw_input("Enter the show name the way you want it listed: ")) #Retry once if left blank, but defaults to generic if left blank twice.
if name == "":
print "That wouldn't be too smart. Please try again."
name = str(raw_input("Enter the show name the way you want it listed: ") or "Herp Derp")
if name == "Herp Derp":
print "Well, can't say I didn't try."
season = int(raw_input("Which season: ") or 1) #Defaults to season 1 if left blank. Crashes if str.
episode = 0
print
print "All the files in this directory will be relabeled as follows:"
for filename in os.listdir("."):
#v3.0 has been fixed; previous versions will break at Whenido;
#Still room for formal fix of checking for folders (?) and using __file__ (?) instead of Whenido
if filename.startswith("Whenido") == False:
print
print filename
filename_ext = filename[-3:]
episode += 1
newfile = str("{} - S{:02d}E{:02d}.{}".format(name, season, episode, filename_ext))
print "Will be renamed as %s " % (newfile)
print
os.rename(filename, newfile)
elif filename.startswith("Whenido"):
print
print "Skipping %s." % (filename)
continue
valid = {"yes":True, "y":True, "ye":True,
"no":False, "n":False}
yesno_loop = True
while yesno_loop:
print
sys.stdout.write("Type no to cancel; yes to rename all files: ")
choice = raw_input().lower()
if choice in valid:
if valid[choice] == True:
name_loop = False
yesno_loop = False
elif valid[choice] == False:
name_loop = True
print
print "Canceled. Restarting..."
break
else:
sys.stdout.write("Please respond with 'yes' or 'no' "\
"(or 'y' or 'n').\n")
# *Process files*
#Rerunning the newfile is inefficient, but it has to be recalculated for each file, so no way around?
episode = 0
for filename in os.listdir("."):
#v3.0 has been fixed; previous versions will break at Whenido;
#Still room for formal fix of checking for folders (?) and using __file__ (?) instead of Whenido
if filename.startswith("Whenido") == False:
print
print filename
filename_ext = filename[-3:]
episode += 1
newfile = str("{} - S{:02d}E{:02d}.{}".format(name, season, episode, filename_ext))
print "Renamed as %s " % (newfile)
print
os.rename(filename, newfile)
elif filename.startswith("Whenido"): #for lack of a better way, keep program named Whenido to skip
print
print "Skipped %s." % (filename)
continue
| apache-2.0 | Python | |
f552979125531fade029bc8baa51e2d0bb9dd320 | Simplify retrieving of config home in core init method | prvnkumar/powerline,Luffin/powerline,IvanAli/powerline,s0undt3ch/powerline,kenrachynski/powerline,DoctorJellyface/powerline,firebitsbr/powerline,wfscheper/powerline,xfumihiro/powerline,magus424/powerline,russellb/powerline,seanfisk/powerline,xxxhycl2010/powerline,darac/powerline,junix/powerline,s0undt3ch/powerline,wfscheper/powerline,firebitsbr/powerline,darac/powerline,seanfisk/powerline,S0lll0s/powerline,cyrixhero/powerline,blindFS/powerline,magus424/powerline,lukw00/powerline,cyrixhero/powerline,seanfisk/powerline,darac/powerline,S0lll0s/powerline,keelerm84/powerline,Liangjianghao/powerline,bartvm/powerline,IvanAli/powerline,QuLogic/powerline,QuLogic/powerline,areteix/powerline,keelerm84/powerline,xfumihiro/powerline,EricSB/powerline,cyrixhero/powerline,dragon788/powerline,bartvm/powerline,bezhermoso/powerline,s0undt3ch/powerline,IvanAli/powerline,kenrachynski/powerline,areteix/powerline,Liangjianghao/powerline,prvnkumar/powerline,DoctorJellyface/powerline,Luffin/powerline,junix/powerline,blindFS/powerline,DoctorJellyface/powerline,S0lll0s/powerline,kenrachynski/powerline,xxxhycl2010/powerline,Luffin/powerline,magus424/powerline,firebitsbr/powerline,lukw00/powerline,blindFS/powerline,bezhermoso/powerline,xxxhycl2010/powerline,prvnkumar/powerline,EricSB/powerline,Liangjianghao/powerline,wfscheper/powerline,lukw00/powerline,russellb/powerline,junix/powerline,dragon788/powerline,bezhermoso/powerline,EricSB/powerline,xfumihiro/powerline,areteix/powerline,bartvm/powerline,dragon788/powerline,QuLogic/powerline,russellb/powerline | powerline/core.py | powerline/core.py | # -*- coding: utf-8 -*-
import importlib
import json
import os
import sys
from colorscheme import Colorscheme
from theme import Theme
class Powerline(object):
def __init__(self, ext):
config_home = os.environ.get('XDG_CONFIG_HOME', os.path.expanduser('~/.config'))
config_path = os.path.join(config_home, 'powerline')
plugin_path = os.path.realpath(os.path.dirname(__file__))
self.search_paths = [config_path, plugin_path]
sys.path[:0] = self.search_paths
# Load main config file
config = self._load_json_config('config')
self.config = config['common']
self.config_ext = config['ext'][ext]
# Load and initialize colorscheme
colorscheme_config = self._load_json_config(os.path.join('colorschemes', self.config_ext['colorscheme']))
colorscheme = Colorscheme(colorscheme_config)
# Load and initialize extension theme
theme_config = self._load_json_config(os.path.join('themes', ext, self.config_ext['theme']))
self.theme = Theme(ext, colorscheme, theme_config, self.config)
# Load and initialize extension renderer
renderer_module_name = 'powerline.ext.{0}.renderer'.format(ext)
renderer_class_name = '{0}Renderer'.format(ext.capitalize())
renderer_class = getattr(importlib.import_module(renderer_module_name), renderer_class_name)
self.renderer = renderer_class(self.theme)
def _load_json_config(self, config_file):
config_file += '.json'
for path in self.search_paths:
config_file_path = os.path.join(path, config_file)
if os.path.isfile(config_file_path):
with open(config_file_path, 'rb') as config_file_fp:
return json.load(config_file_fp)
raise IOError('Config file not found in search path: {0}'.format(config_file))
| # -*- coding: utf-8 -*-
import importlib
import json
import os
import sys
from colorscheme import Colorscheme
from theme import Theme
class Powerline(object):
def __init__(self, ext):
try:
config_home = os.environ['XDG_CONFIG_HOME']
except KeyError:
config_home = os.path.expanduser('~/.config')
config_path = os.path.join(config_home, 'powerline')
plugin_path = os.path.realpath(os.path.dirname(__file__))
self.search_paths = [config_path, plugin_path]
sys.path[:0] = self.search_paths
# Load main config file
config = self._load_json_config('config')
self.config = config['common']
self.config_ext = config['ext'][ext]
# Load and initialize colorscheme
colorscheme_config = self._load_json_config(os.path.join('colorschemes', self.config_ext['colorscheme']))
colorscheme = Colorscheme(colorscheme_config)
# Load and initialize extension theme
theme_config = self._load_json_config(os.path.join('themes', ext, self.config_ext['theme']))
self.theme = Theme(ext, colorscheme, theme_config, self.config)
# Load and initialize extension renderer
renderer_module_name = 'powerline.ext.{0}.renderer'.format(ext)
renderer_class_name = '{0}Renderer'.format(ext.capitalize())
renderer_class = getattr(importlib.import_module(renderer_module_name), renderer_class_name)
self.renderer = renderer_class(self.theme)
def _load_json_config(self, config_file):
config_file += '.json'
for path in self.search_paths:
config_file_path = os.path.join(path, config_file)
if os.path.isfile(config_file_path):
with open(config_file_path, 'rb') as config_file_fp:
return json.load(config_file_fp)
raise IOError('Config file not found in search path: {0}'.format(config_file))
| mit | Python |
5999d8b572d8f28fc4fee0826660a40ec108d15b | Create trimfile.py | suzannerohrback/somaticCNVpipeline,suzannerohrback/somaticCNVpipeline | bin/preprocess/trimfile.py | bin/preprocess/trimfile.py | #!/usr/bin/python
def trimOne():
return 0
| mit | Python | |
5bcc8e8fb427322e98bb8cd27f3b15270a6e75a7 | Add script to automatically populate struct fields | bazad/ida_kernelcache | scripts/populate_struct.py | scripts/populate_struct.py | #
# scripts/populate_struct.py
# Brandon Azad
#
# Populate a struct using data flow analysis.
#
def kernelcache_populate_struct(struct=None, address=None, register=None, delta=None):
import idc
import idautils
import idaapi
import ida_kernelcache as kc
import ida_kernelcache.ida_utilities as idau
# Define the form to ask for the arguments.
class MyForm(idaapi.Form):
def __init__(self):
swidth = 40
idaapi.Form.__init__(self, r"""STARTITEM 0
Automatically populate struct fields
<#The name of the structure#Structure:{structure}>
<#The address of the instruction at which the register points to the structure#Address :{address}>
<#The register containing the pointer to the structure#Register :{register}>
<#The offset of the pointer from the start of the structure#Delta :{delta}>""", {
'structure': idaapi.Form.StringInput( tp=idaapi.Form.FT_IDENT, swidth=swidth),
'address': idaapi.Form.NumericInput(tp=idaapi.Form.FT_ADDR, swidth=swidth, width=1000),
'register': idaapi.Form.StringInput( tp=idaapi.Form.FT_IDENT, swidth=swidth),
'delta': idaapi.Form.NumericInput(tp=idaapi.Form.FT_INT64, swidth=swidth),
})
def OnFormChange(self, fid):
return 1
# If any argument is unspecified, get it using the form.
if any(arg is None for arg in (struct, address, register, delta)):
f = MyForm()
f.Compile()
f.structure.value = struct or 'struc'
f.address.value = address or idc.ScreenEA()
f.register.value = register or 'X0'
f.delta.value = delta or 0
ok = f.Execute()
if ok != 1:
print 'Cancelled'
return False
struct = f.structure.value
address = f.address.value
register = f.register.value
delta = f.delta.value
f.Free()
# Open the structure.
sid = idau.struct_open(struct, create=True)
if sid is None:
print 'Could not open struct {}'.format(struct)
return False
# Check that the address is in a function.
if not idaapi.get_func(address):
print 'Address {:#x} is not a function'.format(address)
return False
# Get the register id.
register_id = None
if type(register) is str:
register_id = idaapi.str2reg(register)
elif type(register) is int:
register_id = register
register = idaapi.get_reg_name(register_id, 8)
if register_id is None or register_id < 0:
print 'Invalid register {}'.format(register)
return False
# Validate delta.
if delta < 0 or delta > 0x1000000:
print 'Invalid delta {}'.format(delta)
return False
print 'struct = {}, address = {:#x}, register = {}, delta = {:#x}'.format(struct, address,
register, delta)
# Run the data flow to collect the accesses and then add those fields to the struct.
accesses = kc.data_flow.pointer_accesses(function=address,
initialization={ address: { register_id: delta } })
kc.build_struct.create_struct_fields(sid, accesses=accesses)
# Set the offsets to stroff.
for addresses_and_deltas in accesses.values():
for ea, delta in addresses_and_deltas:
insn = idautils.DecodeInstruction(ea)
if insn:
for op in insn.Operands:
if op.type == idaapi.o_displ:
idc.OpStroffEx(ea, op.n, sid, delta)
# All done! :)
print 'Done'
return True
kernelcache_populate_struct()
| mit | Python | |
d268c5870623b1c5f6da202264cb1b399f037ec8 | Create rename.py | nrikee/Unix-Like_tools_in_Python | rename.py | rename.py | import sys
import os
if len ( sys.argv ) == 4:
args = sys.argv [ 1: ]
else:
print 'Usage: python rename.py [path]'
sys.exit ( 0 )
path = '.'
filenames = os.listdir ( str ( path ) )
# Check some things
text = open ( args [ 0 ], 'r' ).read ( )
original_names = text.split ( '\n' )
text = open ( args [ 1 ], 'r' ).read ( )
new_names = text.split ( '\n' )
all_names = [ ]
all_names.extend ( original_names )
all_names.extend ( new_names )
if len ( all_names ) != len ( set ( all_names ) ):
print 'Something is incorrect. Maybe duplicated names.'
sys.exit ( 0 )
for pair in zip ( original_names, new_names ):
if pair [ 0 ] in filenames:
os.rename ( pair [ 0 ], pair [ 1 ] )
| mit | Python | |
8e61c18d23812a70d65ec42d7c36c5f1b7ed829d | add script for 50bp window gff summary. | brentp/methylcode,brentp/methylcode,brentp/methylcode,brentp/methylcode | scripts/summarize_gff50.py | scripts/summarize_gff50.py | import sys
import os.path as op
sys.path.insert(0, "/home/brentp/src/methylcode/code/")
from methyl import MethylGroup
prefix = sys.argv[1] # something like: out1234n/thaliana_v9
acontext = sys.argv[2] # CHH or CHG or CG
window = 50
mg = MethylGroup(prefix)
fh = open(mg.dir + mg.prefix + ".test.%ibp.%s.gff" % (window, acontext), "w")
print >>sys.stderr, "writing to %s" % (fh.name, )
print >>fh, "##gff-version 3"
sys.argv[1] = op.abspath(sys.argv[1])
print >>fh, "#%s" % " ".join(sys.argv)
for chr, m in mg.iteritems():
cs, ts, mask = m.as_context(acontext)
bp_max = len(ts)
for start in range(0, bp_max + 1, window):
end = min(start + window, bp_max)
t_count = ts[start:end].sum()
c_count = cs[start:end].sum()
n = mask[start:end].sum()
if c_count + t_count == 0:
plot = methyl = 0.0
else:
plot = methyl = c_count / float(c_count + t_count)
strand = "."
plot = "%.3g" % plot
attrs="c=%i;t=%i;n=%i" % (c_count, t_count, n)
print >>fh, "\t".join(map(str, [chr, sys.argv[0], "dmc", start + 1, end, plot, strand, ".", attrs]))
| bsd-3-clause | Python | |
3dc9204c80f2f7be5f82200c059a6a62f02bf6c1 | Update blogroll and social links. | enthought/distarray,enthought/distarray,RaoUmer/distarray,RaoUmer/distarray | www/pelicanconf.py | www/pelicanconf.py | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = u'IPython development team and Enthought, Inc.'
SITENAME = u'DistArray'
SITEURL = ''
PATH = 'content'
TIMEZONE = 'America/Chicago'
DEFAULT_LANG = u'en'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
# Blogroll
LINKS = (('NumPy', 'http://www.numpy.org/'),
('SciPy', 'http://www.scipy.org'),
('IPython', 'http://ipython.org/'),
('Enthought', 'http://www.enthought.com/'),
)
# Social widget
SOCIAL = (('github', 'https://github.com/enthought/distarray'),)
DEFAULT_PAGINATION = False
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
| #!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = u'IPython development team and Enthought, Inc.'
SITENAME = u'DistArray'
SITEURL = ''
PATH = 'content'
TIMEZONE = 'America/Chicago'
DEFAULT_LANG = u'en'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
# Blogroll
LINKS = (('Pelican', 'http://getpelican.com/'),
('Python.org', 'http://python.org/'),
('Jinja2', 'http://jinja.pocoo.org/'),
('You can modify those links in your config file', '#'),)
# Social widget
SOCIAL = (('You can add links in your config file', '#'),
('Another social link', '#'),)
DEFAULT_PAGINATION = False
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
| bsd-3-clause | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.