commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
8117b3bee367afea107f7ef4b2003006e0ea857e
|
Create anteater.py
|
anteater.py
|
anteater.py
|
Python
| 0.000005
|
@@ -0,0 +1,1854 @@
+#!/usr/bin/python%0A# -*- coding: utf-8 -*-%0A%0A###########################################%0A# 27.11.2012 %7C word scraper bot wsb.py #%0A# by PirateSecurity # http://piratesec.de #%0A###########################################%0A%0Aimport mechanize%0Aimport cookielib%0Aimport urllib2%0Afrom bs4 import BeautifulSoup%0Aimport re%0Aimport sys%0Aimport unicodedata%0Aimport os%0Aimport urlparse%0A%0A# Ich bin ein Browser, bitte block mich nicht :_)%0Abrowser = mechanize.Browser()%0Acookies = cookielib.MozillaCookieJar('cookie_jar')%0Abrowser.set_cookiejar(cookies)%0Abrowser.set_handle_redirect(True)%0Abrowser.set_handle_robots(False)%0Abrowser.set_handle_equiv(True)%0Abrowser.set_handle_gzip(False)%0Abrowser.set_handle_referer(True)%0Abrowser.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)%0Abrowser.addheaders = %5B('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:14.0) Gecko/20120405 Firefox/14.0a1')%5D%0A%0A# Listen%0Alinklist = %5B%5D%0Awordlist = %5B%5D%0A%0A# Container%0Awordfile = open('wordlist.txt', 'w')%0Alinkfile = open('links.txt', 'w')%0A%0A# Koch die Suppe und Filter sie mir%0Ainitiallink = raw_input(%22enter target to scrape for words_%3E %22)%0Alinklist.append(initiallink)%0Aprint %22scraping... Press Ctrl+C to exit.%22%0A%0A# Main Loop%0A###########%0A%0Afor link in linklist:%0A%09try:%0A%09%09soup = BeautifulSoup((browser.open(link)).read())%0A%09%09filteredsoup = re.findall(r%22(?:%5Cs%7C%5E)(%5Cw+)(?=%5Cs%7C$)%22, ((soup.get_text()).encode('utf-8', 'ignore')))%0A%09%09%0A%09%09for word in filteredsoup:%0A%09%09%09if (word not in wordlist) and (len(word) %3E 2 and len(word) %3C 12):%0A%09%09%09%09wordlist.append(word)%0A%09%09%09%09wordfile.write(str(word) + '%5Cn')%0A%09%09%09%09%0A%09%09for eachnewlink in soup.findAll('a', href=True):%0A%09%09%09eachnewlink%5B'href'%5D = urlparse.urljoin(link, eachnewlink%5B'href'%5D)%0A%09%09%09if eachnewlink not in linklist:%0A%09%09%09%09linklist.append(eachnewlink%5B'href'%5D)%0A%09%09%09%09linkfile.write(str(eachnewlink%5B'href'%5D.encode('utf-8', 'ignore')) + '%5Cn')%0A%09except:%0A%09%09continue%0A%0A
|
|
f2c47ccf852e1a2b2a68f4d7ac1e72409ddfad3e
|
Create scratch.py
|
scratch.py
|
scratch.py
|
Python
| 0.000002
|
@@ -0,0 +1,513 @@
+import os%0Aimport urllib%0A%0ADOWNLOADS_DIR = '/python-downloader/downloaded'%0A%0A# For every line in the file%0Afor url in open('urls.txt'):%0A # Split on the rightmost / and take everything on the right side of that%0A name = url.rsplit('/', 1)%5B-1%5D%0A%0A # Combine the name and the downloads directory to get the local filename%0A filename = os.path.join(DOWNLOADS_DIR, name)%0A%0A # Download the file if it does not exist%0A if not os.path.isfile(filename):%0A urllib.urlretrieve(url, filename)%0A %0A %0A
|
|
8e3b2b6103a591dae2b99d7e219722e0992dae65
|
Add CatagoryMgr
|
CatagoryManager.py
|
CatagoryManager.py
|
Python
| 0.000003
|
@@ -0,0 +1,854 @@
+from DbHelper import DbHelper%0A%0A%0Aclass CatagoryManager(object):%0A TABLE_NAME = %22cactagory%22%0A FIELD_TITLE = %22title%22%0A FIELD_COUNT = %22count%22%0A%0A def __init__(self):%0A self.db = DbHelper()%0A sql = %22CREATE TABLE IF NOT EXISTS %60%22 + self.TABLE_NAME + %22%60 (%22 %5C%0A + self.FIELD_TITLE + %22 TEXT NOT NULL,%22 %5C%0A + self.FIELD_COUNT + %22 INT NOT NULL DEFAULT '1',%22 %5C%0A + %22PRIMARY KEY (%60%22 + self.FIELD_TITLE + %22%60(100)));%22%0A%0A self.db.execute(sql)%0A%0A def add(self, entry):%0A sql = %22INSERT INTO %60%22 + self.TABLE_NAME + %22%60 VALUES ('%22 %5C%0A + entry.title + %22', 1) %22 %5C%0A + %22ON DUPLICATE KEY UPDATE %22 + self.FIELD_COUNT + %22 = %22 + self.FIELD_COUNT + %22 + 1;%22%0A self.db.execute(sql)%0A%0A def clear(self):%0A sql = %22DELETE FROM %22 + self.TABLE_NAME + %22;%22%0A self.db.execute(sql)%0A
|
|
dcc64e9fd8bb3cb407959a30a2054fc180596bae
|
Add Pandas integration unit tests
|
tests/test_pandas_integration.py
|
tests/test_pandas_integration.py
|
Python
| 0
|
@@ -0,0 +1,1321 @@
+from unittest import TestCase%0A%0Aimport numpy as np%0Aimport pandas as pd%0Aimport numpy.testing as npt%0A%0Afrom nimble import Events%0A%0A%0Aclass TestAsPandasCondition(TestCase):%0A def setUp(self):%0A conditional_series = pd.Series(%5B0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1%5D)%0A condition = (conditional_series %3E 0)%0A self.events = Events(condition)%0A%0A def test_as_series(self):%0A validation_series = pd.Series(%5B0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1%5D)%0A test_series = self.events.as_series()%0A test_series.equals(validation_series)%0A%0A def test_as_array(self):%0A validation_array = np.array(%5B0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1%5D)%0A npt.assert_array_equal(validation_array, self.events.as_array())%0A%0A%0Aclass TestAsNpArrCondition(TestCase):%0A def setUp(self):%0A conditional_array = np.array(%5B0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1%5D)%0A condition = (conditional_array %3E 0)%0A self.events = Events(condition)%0A%0A def test_as_series(self):%0A validation_series = pd.Series(%5B0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1%5D)%0A test_series = self.events.as_series()%0A test_series.equals(validation_series)%0A%0A def test_as_array(self):%0A validation_array = np.array(%5B0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1%5D)%0A npt.assert_array_equal(validation_array, self.events.as_array())%0A
|
|
1696ca33e644d3cb1138d7ee4c48239b7a757cfd
|
Add the first script to try a led light
|
python_scripts/gpio_test.py
|
python_scripts/gpio_test.py
|
Python
| 0.000001
|
@@ -0,0 +1,198 @@
+import RPi.GPIO as GPIO%0Aimport time%0AGPIO.setmode(GPIO.BOARD)%0AGPIO.setup(7,GPIO.OUT)%0Afor x in range(0,10):%0A GPIO.output(7,True)%0A time.sleep(1)%0A GPIO.output(7,False)%0A time.sleep(1)%0AGPIO.cleanup()%0A
|
|
5c091ea20a531692c676f9d4e0f450c0e642f740
|
Create module for uptime retrieval
|
Modules/Uptime.py
|
Modules/Uptime.py
|
Python
| 0
|
@@ -0,0 +1,1229 @@
+from collections import OrderedDict%0Afrom ModuleInterface import ModuleInterface%0Afrom IRCResponse import IRCResponse, ResponseType%0Aimport datetime%0A%0A%0Aclass Module(ModuleInterface):%0A triggers = %5B%22uptime%22%5D%0A help = %22uptime -- returns the uptime for the bot%22%0A%0A def onTrigger(self, Hubbot, message):%0A now = datetime.datetime.now()%0A timeDelta = now - Hubbot.startTime%0A return IRCResponse(ResponseType.Say, %22I have been running for %7B%7D%22.format(self.deltaTimeToString(timeDelta)), message.ReplyTo)%0A%0A def deltaTimeToString(self, timeDelta):%0A %22%22%22%0A @type timeDelta: timedelta%0A %22%22%22%0A d = OrderedDict()%0A d%5B'days'%5D = timeDelta.days%0A d%5B'hours'%5D, rem = divmod(timeDelta.seconds, 3600)%0A d%5B'minutes'%5D, d%5B'seconds'%5D = divmod(rem, 60) # replace _ with d%5B'seconds'%5D to get seconds%0A%0A def lex(durationWord, duration):%0A if duration == 1:%0A return '%7B0%7D %7B1%7D'.format(duration, durationWord%5B:-1%5D)%0A else:%0A return '%7B0%7D %7B1%7D'.format(duration, durationWord)%0A%0A deltaString = ' '.join(%5Blex(word, number) for word, number in d.iteritems() if number %3E 0%5D)%0A return deltaString if len(deltaString) %3E 0 else 'seconds'%0A
|
|
e39270b69e1e8831c177bf4e5051726e6a678407
|
Add wsgi python script [ci skip]
|
app/wsgi.py
|
app/wsgi.py
|
Python
| 0
|
@@ -0,0 +1,384 @@
+import os%0Aimport sys%0A%0Aroot = os.path.dirname(os.path.realpath(__file__ + '/..'))%0A%0A# activate the virtual env%0Aactivate_this = root + '/venv/bin/activate_this.py'%0Aexecfile(activate_this, dict(__file__=activate_this))%0A%0Asys.path.insert(0, root)%0A%0A# set the environment variable to production%0Aos.environ%5B'FLASK_ENV'%5D=%22PRODUCTION%22%0A%0A# run the application%0Afrom app import app as application%0A%0A%0A
|
|
655bf4b4159e70b4a99185a1735ac63c3ee951dc
|
Add script to filter result by result type.
|
analysis/filter-by-result-type.py
|
analysis/filter-by-result-type.py
|
Python
| 0
|
@@ -0,0 +1,1142 @@
+#!/usr/bin/env python%0Aimport argparse%0Aimport os%0Aimport logging%0Aimport pprint%0Aimport sys%0Aimport yaml%0A%0A# HACK%0A_file = os.path.abspath(__file__)%0A_dir = os.path.dirname(os.path.dirname(_file))%0Asys.path.insert(0, _dir)%0Afrom BoogieRunner.ResultType import ResultType%0A%0Adef main(args):%0A resultTypes = %5B r.name for r in list(ResultType)%5D # Get list of ResultTypes as strings%0A logging.basicConfig(level=logging.DEBUG)%0A parser = argparse.ArgumentParser()%0A parser.add_argument('result_yml', type=argparse.FileType('r'), default='-')%0A parser.add_argument('result_type', choices=resultTypes)%0A pargs = parser.parse_args(args)%0A%0A results = yaml.load(pargs.result_yml)%0A%0A assert isinstance(results, list)%0A%0A # Get out of requested type%0A resultCode = ResultType%5Bpargs.result_type%5D.value%0A%0A count = 0%0A collected = %5B %5D%0A for r in results:%0A if r%5B'result'%5D == resultCode:%0A count += 1%0A collected.append(r)%0A%0A logging.info('Count of type %7B%7D : %7B%7D'.format(pargs.result_type, count))%0A%0A print(yaml.dump(collected, default_flow_style=False))%0A%0Aif __name__ == '__main__':%0A sys.exit(main(sys.argv%5B1:%5D))%0A
|
|
983a89d7b400ec71806311cfe6a576d85dd59958
|
Create distance.py
|
abstractions/sensor/distance/distance.py
|
abstractions/sensor/distance/distance.py
|
Python
| 0.001409
|
@@ -0,0 +1,1646 @@
+# This code has to be added to __init__.py in folder .../devices/sensor%0A%0Aclass Distance():%0A def __family__(self):%0A return %22Distance%22%0A%0A def __getMillimeter__(self):%0A raise NotImplementedError%0A%0A @api(%22Distance%22, 0)%0A @request(%22GET%22, %22sensor/distance/*%22)%0A @response(contentType=M_JSON)%0A def distanceWildcard(self):%0A values = %7B%7D%0A distance = self.__getMillimeter__()%0A values%5B%22mm%22%5D = %22%25.02f%22 %25 distance%0A values%5B%22cm%22%5D = %22%25.02f%22 %25 (distance / 10)%0A values%5B%22m%22%5D = %22%25.02f%22 %25 (distance / 1000)%0A values%5B%22in%22%5D = %22%25.02f%22 %25 (distance / 25.4)%0A values%5B%22ft%22%5D = %22%25.02f%22 %25 (distance / 25.4 / 12)%0A values%5B%22yd%22%5D = %22%25.02f%22 %25 (distance / 25.4 / 36)%0A return values%0A%0A @api(%22Distance%22)%0A @request(%22GET%22, %22sensor/distance/mm%22)%0A @response(%22%25.02f%22)%0A def getMillimeter(self):%0A return self.__getMillimeter__()%0A%0A @api(%22Distance%22)%0A @request(%22GET%22, %22sensor/distance/cm%22)%0A @response(%22%25.02f%22)%0A def getCentimeter(self):%0A return self.getMillimeter() / 10%0A%0A @api(%22Distance%22)%0A @request(%22GET%22, %22sensor/distance/m%22)%0A @response(%22%25.02f%22)%0A def getMeter(self):%0A return self.getMillimeter() / 1000%0A%0A @api(%22Distance%22)%0A @request(%22GET%22, %22sensor/distance/in%22)%0A @response(%22%25.02f%22)%0A def getInch(self):%0A return self.getMillimeter() / 25.4%0A%0A @api(%22Distance%22)%0A @request(%22GET%22, %22sensor/distance/ft%22)%0A @response(%22%25.02f%22)%0A def getFoot(self):%0A return self.getInch() / 12%0A%0A @api(%22Distance%22)%0A @request(%22GET%22, %22sensor/distance/yd%22)%0A @response(%22%25.02f%22)%0A def getYard(self):%0A return self.getInch() / 36%0A%0A
|
|
bf93b3b4c8965e31e5b9b8ebdbf3f1b1d258e15e
|
Add a new script to simplify profiling of cvs2svn.py. Document in the script how to use kcachegrind to view the results.
|
tools/cvs2svn/profile-cvs2svn.py
|
tools/cvs2svn/profile-cvs2svn.py
|
Python
| 0.000009
|
@@ -0,0 +1,1068 @@
+#!/usr/bin/env python%0A#%0A# Use this script to profile cvs2svn.py using Python's hotshot profiler.%0A#%0A# The profile data is stored in cvs2svn.hotshot. To view the data using%0A# hotshot, run the following in python:%0A# %0A# import hotshot.stats%0A# stats = hotshot.stats.load('cvs2svn.hotshot')%0A# stats.strip_dirs()%0A# stats.sort_stats('time', 'calls')%0A# stats.print_stats(20)%0A# %0A# It is also possible (and a lot better) to use kcachegrind to view the data.%0A# To do so, you must first convert the data to the cachegrind format using%0A# hotshot2cachegrind, which you can download from the following URL:%0A# %0A# http://kcachegrind.sourceforge.net/cgi-bin/show.cgi/KcacheGrindContribPython%0A# %0A# Convert the data using the following command:%0A# %0A# hotshot2cachegrind -o cachegrind.out cvs2svn.hotshot%0A# %0A# Depending on the size of the repository, this can take a long time. When%0A# the conversion is done, simply open cachegrind.out in kcachegrind.%0A%0Aimport cvs2svn, hotshot%0A%0Aprof = hotshot.Profile('cvs2svn.hotshot')%0Aprof.runcall(cvs2svn.main)%0Aprof.close()%0A
|
|
61f06365254c57ced68beb83714164186164d939
|
add solutin for LRU Cache
|
src/LRUCache.py
|
src/LRUCache.py
|
Python
| 0
|
@@ -0,0 +1,1519 @@
+class LRUCache:%0A%0A class ListNode:%0A%0A def __init__(self, val):%0A self.val = val%0A self.next = None%0A self.prev = None%0A%0A # @param capacity, an integer%0A def __init__(self, capacity):%0A self.capacity = capacity%0A self.size = 0%0A self.cache = %7B%7D%0A self.head = self.ListNode(-1)%0A self.tail = self.ListNode(-1)%0A self.head.next = self.tail%0A self.tail.prev = self.head%0A%0A # @return an integer%0A def get(self, key):%0A if key in self.cache:%0A self._refresh(self.cache%5Bkey%5D%5B1%5D)%0A return self.cache%5Bkey%5D%5B0%5D%0A return -1%0A%0A # @param key, an integer%0A # @param value, an integer%0A # @return nothing%0A def set(self, key, value):%0A if key not in self.cache:%0A self._refresh(self.ListNode(key))%0A self.size += 1%0A else:%0A self._refresh(self.cache%5Bkey%5D%5B1%5D)%0A self.cache%5Bkey%5D = %5Bvalue, self.head.next%5D%0A if self.size %3E self.capacity:%0A node = self.tail.prev%0A del self.cache%5Bnode.val%5D%0A self._delete_node(node)%0A self.size = self.capacity%0A%0A def _refresh(self, node):%0A if self.head.next == node:%0A return%0A if node.next:%0A self._delete_node(node)%0A node.prev = self.head%0A node.next = self.head.next%0A self.head.next = node%0A node.next.prev = node%0A%0A def _delete_node(self, node):%0A node.prev.next = node.next%0A node.next.prev = node.prev%0A
|
|
3a0fdcf51e1db8abab900a6cc1b4596d0dc4b054
|
automate fab process
|
automata.py
|
automata.py
|
Python
| 0.000818
|
@@ -0,0 +1,529 @@
+import pexpect%0Aimport getpass%0A%0Aversion = raw_input('Version: ')%0Asecret = getpass.getpass('Enter Passphrase: ')%0Agithub_username = 'ianjuma'%0A%0Aclean = pexpect.spawn('fab clean')%0Aclean.expect('Passphrase for private key:')%0Aclean.send(secret)%0A%0Adeploy = pexpect.spawn('fab deploy:%25s' %25(version,))%0Adeploy.expect('Passphrase for private key:')%0Adeploy.sendline(secret)%0Adeploy.expect(%22Username for 'https://github.com':%22)%0Adeploy.sendline(github_username)%0Adeploy.expect(%22Password for 'https://ianjuma@github.com':%22)%0Adeploy.sendline(secret)%0A
|
|
5ea95763c541b30a4b3f9ef5dbfa201b24ae5293
|
Create get_gg_list_result.py
|
get_gg_list_result.py
|
get_gg_list_result.py
|
Python
| 0.000003
|
@@ -0,0 +1,1681 @@
+import time%0Afrom splinter import Browser%0A%0Adef splinter(url,browser):%0A #login 126 email websize%0A browser.visit(url)%0A #wait web element loading%0A time.sleep(5)%0A #fill in account and password%0A browser.find_by_id('idInput').fill('xxxxxx')%0A browser.find_by_id('pwdInput').fill('xxxxx')%0A #click the button of login%0A browser.find_by_id('loginBtn').click()%0A time.sleep(8)%0A #close the window of brower%0A %0Adef getAll(browser):%0A element_list = browser.find_by_id(%22ires%22)%0A rel = element_list.find_by_tag('h3')%0A for i in rel:%0A print i.find_by_tag(%22a%22).text%0A e = i.find_by_tag(%22a%22)%0A print e%5B'href'%5D #ok!!%0A data.append(e%5B%22href%22%5D)%0A%0Adef visitGG(url,text):%0A browser.visit(url)%0A time.sleep(2)%0A browser.fill('q',text)%0A browser.find_by_name('btnK').click()%0A if browser.is_text_present(text):%0A print 'yes, found it'%0A else:%0A print 'no. didn t find it'%0A %0A getAll(browser)%0A print data%0A print %22next page =========%22%0A #next page%0A browser.find_by_id('pnnext').click()%0A time.sleep(2)%0A if browser.is_text_present(text):%0A print 'yes, found it'%0A else:%0A print 'no. didn t find it'%0A getAll(browser)%0A print data%0A %0Adef visitFB(url):%0A #1. visit%0A browser.visit(url)%0A time.sleep(2)%0A #2. common%0A %0A%0Aif __name__ == '__main__':%0A data=%5B%5D%0A%0A browser = Browser('chrome')%0A text = %22pet site:facebook.com%22%0A visitGG(%22http://www.google.com%22,text)%0A %0A time.sleep(2)%0A %0A for i in data:%0A print %22start action%22%0A visitFB(i)%0A %0A %0A #websize3 ='http://www.126.com'%0A #splinter(websize3)%0A %0A #browser.quit()%0A
|
|
3095142aa814e51e8fcde4d53633a93a54a7574f
|
Index main label reference
|
migrations/versions/e679554261b2_main_label_index.py
|
migrations/versions/e679554261b2_main_label_index.py
|
Python
| 0
|
@@ -0,0 +1,469 @@
+%22%22%22Main label index%0A%0ARevision ID: e679554261b2%0ARevises: e2be4ab896d3%0ACreate Date: 2019-05-09 18:55:24.472216%0A%0A%22%22%22%0A%0A# revision identifiers, used by Alembic.%0Arevision = 'e679554261b2'%0Adown_revision = 'e2be4ab896d3'%0A%0Afrom alembic import op%0Aimport sqlalchemy as sa # NOQA%0A%0A%0Adef upgrade():%0A op.create_index(op.f('ix_label_main_label_id'), 'label', %5B'main_label_id'%5D, unique=False)%0A%0A%0Adef downgrade():%0A op.drop_index(op.f('ix_label_main_label_id'), table_name='label')%0A
|
|
ff51c695b516ea7e16518779c66ebd827b4f6230
|
Clean up Encode
|
python/pyphantomjs/encoding.py
|
python/pyphantomjs/encoding.py
|
'''
This file is part of the PyPhantomJS project.
Copyright (C) 2011 James Roe <roejames12@hotmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import codecs
class Encode(object):
def __init__(self, encoding, default):
# check that encoding is valid
try:
codecs.lookup(encoding)
self.encoding = encoding
self._encoding = encoding.lower()
except LookupError:
# fall back to default encoding
self.encoding = default
self._encoding = default.lower()
@property
def name(self):
return codecs.lookup(self.encoding).name
|
Python
| 0.951529
|
@@ -950,54 +950,8 @@
ing%0A
- self._encoding = encoding.lower()%0A
@@ -1057,53 +1057,8 @@
ault
-%0A self._encoding = default.lower()
%0A%0A
|
815d758f74e01bc7a460e211ffb9cb81fedb9726
|
add 0002
|
Jaccorot/0002/0002.py
|
Jaccorot/0002/0002.py
|
Python
| 0.999998
|
@@ -0,0 +1,1033 @@
+#!/usr/local/bin/python%0A#coding=utf-8%0A%0A#%E7%AC%AC 0002 %E9%A2%98%EF%BC%9A%E5%B0%86 0001 %E9%A2%98%E7%94%9F%E6%88%90%E7%9A%84 200 %E4%B8%AA%E6%BF%80%E6%B4%BB%E7%A0%81%EF%BC%88%E6%88%96%E8%80%85%E4%BC%98%E6%83%A0%E5%88%B8%EF%BC%89%E4%BF%9D%E5%AD%98%E5%88%B0 MySQL %E5%85%B3%E7%B3%BB%E5%9E%8B%E6%95%B0%E6%8D%AE%E5%BA%93%E4%B8%AD%E3%80%82%0A%0Aimport uuid%0Aimport MySQLdb%0A%0Adef create_code(num, length):%0A#%E7%94%9F%E6%88%90%E2%80%9Dnum%E2%80%9C%E4%B8%AA%E6%BF%80%E6%B4%BB%E7%A0%81%EF%BC%8C%E6%AF%8F%E4%B8%AA%E6%BF%80%E6%B4%BB%E7%A0%81%E5%90%AB%E6%9C%89%E2%80%9Dlength%E2%80%9C%E4%BD%8D%0A result = %5B%5D%0A while True:%0A uuid_id = uuid.uuid1()%0A temp = str(uuid_id).replace('-', '')%5B:length%5D%0A if temp not in result:%0A result.append(temp)%0A if len(result) == num:%0A break%0A return result%0A%0A%0Adef save_to_mysql(num_list):%0A conn = MySQLdb.connect(host='localhost', user='root', passwd='aaaa', port=3306)%0A cur = conn.cursor()%0A%0A sql_create_database = 'create database if not exists activecode_db'%0A cur.execute(sql_create_database)%0A%0A conn.select_db(%22activecode_db%22)%0A sql_create_table = 'create table if not exists active_codes(active_code char(32))'%0A cur.execute(sql_create_table)%0A%0A cur.executemany('insert into active_codes values(%25s)', num_list)%0A%0A conn.commit()%0A cur.close()%0A conn.close()%0A%0Acode_num = create_code(200, 20)%0A#print code_num%0Asave_to_mysql(code_num)%0A
|
|
715be73c6fc5f3e5d151a0534309d097c73f1963
|
Fix listsinceblock.py
|
qa/rpc-tests/listsinceblock.py
|
qa/rpc-tests/listsinceblock.py
|
#!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class ListSinceBlockTest (BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
def run_test (self):
'''
`listsinceblock` did not behave correctly when handed a block that was
no longer in the main chain:
ab0
/ \
aa1 [tx0] bb1
| |
aa2 bb2
| |
aa3 bb3
|
bb4
Consider a client that has only seen block `aa3` above. It asks the node
to `listsinceblock aa3`. But at some point prior the main chain switched
to the bb chain.
Previously: listsinceblock would find height=4 for block aa3 and compare
this to height=5 for the tip of the chain (bb4). It would then return
results restricted to bb3-bb4.
Now: listsinceblock finds the fork at ab0 and returns results in the
range bb1-bb4.
This test only checks that [tx0] is present.
'''
assert_equal(self.is_network_split, False)
self.nodes[2].generate(101)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 50)
assert_equal(self.nodes[3].getbalance(), 0)
# Split network into two
self.split_network()
assert_equal(self.is_network_split, True)
# send to nodes[0] from nodes[2]
senttx = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 1)
# generate on both sides
lastblockhash = self.nodes[1].generate(6)[5]
self.nodes[2].generate(7)
print('lastblockhash=%s' % (lastblockhash))
self.sync_all()
self.join_network()
# listsinceblock(lastblockhash) should now include tx, as seen from nodes[0]
lsbres = self.nodes[0].listsinceblock(lastblockhash)
found = False
for tx in lsbres['transactions']:
if tx['txid'] == senttx:
found = True
break
assert_equal(found, True)
if __name__ == '__main__':
ListSinceBlockTest().main()
|
Python
| 0.000014
|
@@ -1657,16 +1657,17 @@
ce(), 50
+0
)%0A
|
f015f18d1d4c495cb0b909b93e51d97f00301d59
|
Set the id of the person, not the roll call item
|
pupa/importers/votes.py
|
pupa/importers/votes.py
|
from pupa.models import Vote
from .utils import (people_by_jurisdiction_and_name,
orgs_by_jurisdiction_and_name,
bills_by_jurisdiction_and_name)
from .base import BaseImporter
class VoteImporter(BaseImporter):
_type = 'vote'
_model_class = Vote
def __init__(self, jurisdiction_id,
person_importer, org_importer, bill_importer):
super(VoteImporter, self).__init__(jurisdiction_id)
self.person_importer = person_importer
self.bill_importer = bill_importer
self.org_importer = org_importer
def get_db_spec(self, vote):
spec = {
"motion": vote.motion,
"chamber": vote.chamber,
"date": vote.date,
"jurisdiction_id": vote.jurisdiction_id,
}
return spec
def prepare_object_from_json(self, obj):
bill = obj.get('bill', None)
if bill:
if bill.get('id'):
# We've been given a hard ID. Let's use it to match against
# the real bill. (since the scraper knew what they were doing)
bill['id'] = self.bill_importer.resolve_json_id(bill['id'])
else:
# OK. Right. We weren't given an ID. Let's try to do a
# match by name.
bills = bills_by_jurisdiction_and_name(
obj['jurisdiction_id'],
bill['name'],
)
if bills.count() != 1:
self.warning("Can't resolve bill `%s'" % (bill['name']))
else:
bill_obj = bills[0]
bill['id'] = bill_obj['_id']
for vote in obj['roll_call']:
who = vote['person']
people = people_by_jurisdiction_and_name(
obj['jurisdiction_id'],
who['name'],
chamber=who.get('chamber')
)
if people.count() != 1:
self.warning("can't match `%s'" % (who['name']))
continue # can't match
person_obj = people[0]
vote['id'] = person_obj['_id']
org = obj.get('organization')
org_id = obj.get('organization_id')
if org and not org_id: # OK. We have an org that needs matching.
orgs = orgs_by_jurisdiction_and_name(
obj['jurisdiction_id'],
org,
) # get all matching orgs.
if orgs.count() == 1:
org_obj = orgs[0] # Let's get the only result.
obj['organization_id'] = org_obj['_id']
else:
self.warning("can't match `%s'" % (org))
elif org_id: # We have a sort of org ID
if org is None: # If we have the ID but no the name (odd...)
raise ValueError("Someone set an org_id without an org name.")
org_json_id = obj['organization_id'] # scrape-time match?
if org_json_id and not org_json_id.startswith("ocd-organization"):
obj['organization_id'] = self.org_importer.resolve_json_id(
org_json_id) # resolve it.
return obj
|
Python
| 0.000012
|
@@ -2133,16 +2133,26 @@
vote%5B'
+person'%5D%5B'
id'%5D = p
|
c3e8a9a60410ca4494038ba9f3a774b960a8a29e
|
Create quiz3.py
|
Laboratorios/quiz3.py
|
Laboratorios/quiz3.py
|
Python
| 0.000002
|
@@ -0,0 +1,160 @@
+%0Asegundos = 0%0Awhile chance %3C 6:%0Amints_seg = int ((%22ingrese el tiempo en segundos:%22))%0A%09chance +=1%0A%09if mints_seg / 60%0A%09segundos =60 time_seg%2560%0A%09print (segundos)%0A
|
|
71a6d0a032896f4ef2e9a4cda541d142f2c48171
|
Add unittests for environment handler.
|
typhon/tests/test_environment.py
|
typhon/tests/test_environment.py
|
Python
| 0
|
@@ -0,0 +1,1117 @@
+# -*- coding: utf-8 -*-%0A%22%22%22Testing the environment/configuration handler.%0A%22%22%22%0Aimport os%0Afrom copy import copy%0A%0Aimport pytest%0A%0Afrom typhon import environment%0A%0A%0Aclass TestEnvironment:%0A %22%22%22Testing the environment handler.%22%22%22%0A def setup_method(self):%0A %22%22%22Run all test methods with an empty environment.%22%22%22%0A self.env = copy(os.environ)%0A os.environ = %7B%7D%0A%0A def teardown_method(self):%0A %22%22%22Restore old environment.%22%22%22%0A os.environ = self.env%0A%0A def test_get_environment_variables(self):%0A %22%22%22Test if environment variables are considered.%22%22%22%0A os.environ%5B'TYPHON_ENV_TEST'%5D = 'TEST_VALUE'%0A%0A assert environment.environ%5B'TYPHON_ENV_TEST'%5D == 'TEST_VALUE'%0A%0A def test_set_environment_variables(self):%0A %22%22%22Test if environment variables are updated.%22%22%22%0A environment.environ%5B'TYPHON_ENV_TEST'%5D = 'TEST_VALUE'%0A%0A assert os.environ%5B'TYPHON_ENV_TEST'%5D == 'TEST_VALUE'%0A%0A def test_undefined_variable(self):%0A %22%22%22Test behavior for undefined variables.%22%22%22%0A with pytest.raises(KeyError):%0A environment.environ%5B'TYPHON_ENV_TEST'%5D%0A
|
|
d84a0b0d50fb4d01b2a2354d5317afd181f1053c
|
Add Random Forest Regression in Python
|
Regression/RandomForestRegression/regularRandomForestRegression.py
|
Regression/RandomForestRegression/regularRandomForestRegression.py
|
Python
| 0.000002
|
@@ -0,0 +1,2237 @@
+# -*- coding: utf-8 -*-%0A%22%22%22Random Forest Regression for machine learning.%0A%0ARandom forest algorithm is a supervised classification algorithm. As the name%0Asuggest, this algorithm creates the forest with a number of decision trees.%0A%0AIn general, the more trees in the forest the more robust the forest looks like.%0AIn the same way in the random forest classifier, the higher the number of trees%0Ain the forest gives the high accuracy results.%0A%0A%0AExample:%0A%0A $ python regularRandomForestRegression.py%0A%0ATodo:%0A *%0A%22%22%22%0A# Importing the libraries%0Aimport numpy as np%0Aimport matplotlib.pyplot as plt%0Aimport pandas as pd%0Afrom sklearn.ensemble import RandomForestRegressor%0A# from sklearn.preprocessing import StandardScaler%0A# from sklearn.model_selection import train_test_split%0A%0A%0A# Importing the dataset%0Adataset = pd.read_csv('Position_Salaries.csv')%0Afeatures = dataset.iloc%5B:, 1:2%5D.values%0Alabels = dataset.iloc%5B:, 2%5D.values%0A%0A# Splitting the Dataset into a Training set and a Test set%0A%22%22%22feature_train, feature_test, label_train, label_test = train_test_split(%0A features, labels, test_size=0.2)%0A%22%22%22%0A# Feature scaling, normalize scale is important. Especially on algorithms%0A# involving euclidian distance. Two main feature scaling formulas are:%0A# Standardisation: x_stand = (x-mean(x))/(standard_deviation(x))%0A# Normalisation: x_norm = (x-min(x))/(max(x)-min(x))%0A%22%22%22sc_feature = StandardScaler()%0Afeature_train = sc_feature.fit_transform(feature_train)%0Afeature_test = sc_feature.transform(feature_test)%0Asc_labels = StandardScaler()%0Alabels_train = sc_labels.fit_transform(labels_train)%0Alabels_test = sc_labels.transform(labels_test)%0A%22%22%22%0A%0A# Fit the Random Forest Regression to the dataset%0Aregressor = RandomForestRegressor(n_estimators=310, random_state=0)%0Aregressor.fit(features, labels)%0A%0A%0A# Predict new result with the Random Forest Regression%0Ay_pred = regressor.predict(6.5)%0A%0A%0A# Visualising the regression results with smoother curve%0Ax_grid = np.arange(min(features), max(features), 0.01)%0Ax_grid = x_grid.reshape((len(x_grid), 1))%0Aplt.scatter(features, labels, color='r')%0Aplt.plot(x_grid, regressor.predict(x_grid), color='b')%0Aplt.title('Truth or Bluff (Random Forest Regression)')%0Aplt.xlabel('Position level')%0Aplt.ylabel('Salary')%0Aplt.show()%0A
|
|
8bdab0460cf280a63538e8c56650a90109cda283
|
add PermMissingElem.py - working
|
PermMissinElem.py
|
PermMissinElem.py
|
Python
| 0
|
@@ -0,0 +1,88 @@
+def solution(A):%0A euler = (len(A) + 1) * (len(A) + 2) / 2%0A return euler - sum(A) %0A
|
|
8f60b540e44fd13787c11303d81f570861c74bcf
|
make M5_PATH a real search path
|
configs/common/SysPaths.py
|
configs/common/SysPaths.py
|
# Copyright (c) 2006 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ali Saidi
import os, sys
from os.path import isdir, join as joinpath
from os import environ as env
config_path = os.path.dirname(os.path.abspath(__file__))
config_root = os.path.dirname(config_path)
def disk(file):
system()
return joinpath(disk.dir, file)
def binary(file):
system()
return joinpath(binary.dir, file)
def script(file):
system()
return joinpath(script.dir, file)
def system():
if not system.dir:
try:
path = env['M5_PATH'].split(':')
except KeyError:
path = [ '/dist/m5/system', '/n/poolfs/z/dist/m5/system' ]
for system.dir in path:
if os.path.isdir(system.dir):
break
else:
raise ImportError, "Can't find a path to system files."
if not binary.dir:
binary.dir = joinpath(system.dir, 'binaries')
if not disk.dir:
disk.dir = joinpath(system.dir, 'disks')
if not script.dir:
script.dir = joinpath(config_root, 'boot')
system.dir = None
binary.dir = None
disk.dir = None
script.dir = None
|
Python
| 0.000858
|
@@ -1755,16 +1755,203 @@
_path)%0A%0A
+def searchpath(path, file):%0A for p in path:%0A f = joinpath(p, file)%0A if os.path.exists(f):%0A return f%0A raise IOError, %22Can't find file '%25s' on path.%22 %25 file%0A%0A
def disk
@@ -1978,28 +1978,30 @@
%0A return
-join
+search
path(disk.di
@@ -1998,19 +1998,20 @@
th(disk.
-dir
+path
, file)%0A
@@ -2049,28 +2049,30 @@
%0A return
-join
+search
path(binary.
@@ -2071,19 +2071,20 @@
(binary.
-dir
+path
, file)%0A
@@ -2126,20 +2126,22 @@
return
-join
+search
path(scr
@@ -2144,19 +2144,20 @@
(script.
-dir
+path
, file)%0A
@@ -2189,19 +2189,20 @@
system.
-dir
+path
:%0A
@@ -2208,20 +2208,16 @@
try:%0A
-
@@ -2286,28 +2286,24 @@
-
path = %5B '/d
@@ -2362,108 +2362,123 @@
-for system.dir in path:%0A if os.path.isdir(system.dir):%0A break%0A else
+# filter out non-existent directories%0A system.path = filter(os.path.isdir, path)%0A%0A if not system.path
:%0A
@@ -2494,21 +2494,17 @@
raise I
-mport
+O
Error, %22
@@ -2558,19 +2558,20 @@
binary.
-dir
+path
:%0A
@@ -2579,22 +2579,24 @@
binary.
-dir
+path
=
+%5B
joinpath
@@ -2596,26 +2596,17 @@
oinpath(
-system.dir
+p
, 'binar
@@ -2610,16 +2610,38 @@
naries')
+ for p in system.path%5D
%0A if
@@ -2649,19 +2649,20 @@
ot disk.
-dir
+path
:%0A
@@ -2668,22 +2668,24 @@
disk.
-dir
+path
=
+%5B
joinpath
@@ -2689,28 +2689,41 @@
ath(
-system.dir, 'disks')
+p, 'disks') for p in system.path%5D
%0A
@@ -2737,19 +2737,20 @@
script.
-dir
+path
:%0A
@@ -2758,22 +2758,24 @@
script.
-dir
+path
=
+%5B
joinpath
@@ -2795,16 +2795,17 @@
'boot')
+%5D
%0A%0Asystem
@@ -2805,19 +2805,20 @@
%0Asystem.
-dir
+path
= None%0A
@@ -2824,19 +2824,20 @@
%0Abinary.
-dir
+path
= None%0A
@@ -2841,19 +2841,20 @@
ne%0Adisk.
-dir
+path
= None%0A
@@ -2860,19 +2860,20 @@
%0Ascript.
-dir
+path
= None%0A
|
32d12ae035d1c8cebd3a163f9e35c538628e5bc7
|
Add test_message.py
|
tests/test_message.py
|
tests/test_message.py
|
Python
| 0
|
@@ -0,0 +1,1181 @@
+#!/usr/bin/env python3%0A#%0A# Copyright 2016 Red Hat, Inc.%0A#%0A# Authors:%0A# Fam Zheng %3Cfamz@redhat.com%3E%0A#%0A# This work is licensed under the MIT License. Please see the LICENSE file or%0A# http://opensource.org/licenses/MIT.%0A%0Aimport sys%0Aimport os%0Aimport time%0Aimport datetime%0Afrom patchewtest import PatchewTestCase, main%0A%0Aclass ProjectTest(PatchewTestCase):%0A%0A def setUp(self):%0A self.create_superuser()%0A%0A def test_0_second(self):%0A from api.models import Message%0A message = Message()%0A message.date = datetime.datetime.utcnow()%0A age = message.get_age()%0A self.assertEqual(age, %220 second%22)%0A%0A def test_now(self):%0A from api.models import Message%0A message = Message()%0A dt = datetime.datetime.fromtimestamp(time.time() + 100)%0A message.date = dt%0A age = message.get_age()%0A self.assertEqual(age, %22now%22)%0A%0A def test_1_day(self):%0A from api.models import Message%0A message = Message()%0A dt = datetime.datetime.fromtimestamp(time.time() - 3600 * 25)%0A message.date = dt%0A age = message.get_age()%0A self.assertEqual(age, %221 day%22)%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
a965c542e8a2ea4bb74e522eae34161d8a6c3efa
|
Add minimal product test
|
tests/test_product.py
|
tests/test_product.py
|
Python
| 0.000069
|
@@ -0,0 +1,1659 @@
+# -*- coding: utf-8 -*-%0A%0Aimport unittest%0Aimport os%0A%0Afrom context import epages%0A%0Aclass TestProduct(unittest.TestCase):%0A%0A client = None%0A product_service = None%0A product_id = None%0A%0A @classmethod%0A def setUpClass(cls):%0A host = os.environ%5B'EPAGES_HOST'%5D%0A shop = os.environ%5B'EPAGES_SHOP'%5D%0A token = os.environ%5B'EPAGES_TOKEN'%5D%0A TestProduct.client = epages.HTTPClient(host, shop, token)%0A TestProduct.product_service = epages.ProductService(TestProduct.client)%0A%0A def setUp(self):%0A payload = %7B%0A %22productNumber%22: %221337%22,%0A %22name%22: %22epages rest API test product%22,%0A %22shortDescription%22: %22Awesome product%22,%0A %22description%22: %22This is a brand new product%22,%0A %22manufacturer%22: %22Awesome Products Company%22,%0A %22price%22: 13.37,%0A %7D%0A%0A params = %7B%0A %22locale%22: %22en_GB%22,%0A %22currency%22: %22EUR%22,%0A %7D%0A%0A try:%0A response = TestProduct.client.post(u%22/products%22, params=params, json=payload)%0A TestProduct.product_id = response%5B%22productId%22%5D%0A except epages.RESTError, error:%0A print(unicode(error))%0A%0A def test_shop(self):%0A pass%0A%0A def tearDown(self):%0A try:%0A if TestProduct.product_id is not None:%0A status_code = TestProduct.client.delete(u%22/products/%22 + TestProduct.product_id)%0A self.assertEquals(status_code, 204, %22DELETE on product should yield 204!%22)%0A except epages.RESTError, error:%0A print(unicode(error))%0A%0A @classmethod%0A def tearDownClass(cls):%0A pass%0A%0Aif __name__ == '__main__':%0A unittest.main()%0A
|
|
cf8ff340597d29431eaed8265a67205a1b021ee7
|
add host_evacuate task
|
rock/tasks/host_evacuate.py
|
rock/tasks/host_evacuate.py
|
Python
| 0.000002
|
@@ -0,0 +1,1214 @@
+from flow_utils import BaseTask%0Afrom actions import NovaAction%0Afrom server_evacuate import ServerEvacuateTask%0Aimport logging%0A%0Aclass HostEvacuateTask(BaseTask,NovaAction):%0A%0A def execute(self, host):%0A n_client = self._get_client()%0A%0A evacuated_host = host%0A evacuable_servers = n_client.servers.list(%0A search_opts=%7B'host':evacuated_host,%0A 'all_tenants':1%7D)%0A%0A evacuated_servers = list()%0A for server in evacuable_servers:%0A logging.debug(%22Processing %25s%22 %25 server)%0A if hasattr(server,'id'):%0A response = ServerEvacuateTask().execute(server.id,True)%0A if response%5B'accepted'%5D:%0A logging.info(%22Evacuated %25s from %25s: %25s%22 %25%0A (response%5B%22uuid%22%5D, evacuated_host, response%5B%22reason%22%5D))%0A evacuated_servers.append(server)%0A else:%0A logging.error(%22Evacuation of %25s on %25s failed: %25s%22 %25%0A (response%5B%22uuid%22%5D, evacuated_host, response%5B%22reason%22%5D))%0A time.sleep(2)%0A else:%0A logging.error(%22Could not evacuate instance: %25s%22 %25 server.to_dict())%0A%0A%0A
|
|
ceb3c0535f2701d595d440552d60da876d7cd0b8
|
Move some functions from 'model.utils' to 'core.xrf_utils'
|
pyxrf/core/xrf_utils.py
|
pyxrf/core/xrf_utils.py
|
Python
| 0
|
@@ -0,0 +1,2570 @@
+import xraylib%0A%0A%0Adef parse_compound_formula(compound_formula):%0A r%22%22%22%0A Parses the chemical formula of a compound and returns the dictionary,%0A which contains element name, atomic number, number of atoms and mass fraction%0A in the compound.%0A%0A Parameters%0A ----------%0A%0A compound_formula: str%0A chemical formula of the compound in the form %60%60FeO2%60%60, %60%60CO2%60%60 or %60%60Fe%60%60.%0A Element names must start with capital letter.%0A%0A Returns%0A -------%0A%0A dictionary of dictionaries, data on each element in the compound: key -%0A sybolic element name, value - a dictionary that contains %60%60AtomicNumber%60%60,%0A %60%60nAtoms%60%60 and %60%60massFraction%60%60 of the element. The elements are sorted%0A in the order of growing atomic number.%0A%0A Raises%0A ------%0A%0A RuntimeError is raised if compound formula cannot be parsed%0A %22%22%22%0A%0A xraylib.SetErrorMessages(0) # This is supposed to stop XRayLib from printing%0A # internal error messages, but it doesn't work%0A%0A try:%0A compound_data = xraylib.CompoundParser(compound_formula)%0A except SystemError:%0A msg = f%22Invalid chemical formula '%7Bcompound_formula%7D' is passed, parsing failed%22%0A raise RuntimeError(msg)%0A%0A # Now create more manageable structure%0A compound_dict = %7B%7D%0A for e_an, e_mf, e_na in zip(compound_data%5B%22Elements%22%5D,%0A compound_data%5B%22massFractions%22%5D,%0A compound_data%5B%22nAtoms%22%5D):%0A e_name = xraylib.AtomicNumberToSymbol(e_an)%0A compound_dict%5Be_name%5D = %7B%22AtomicNumber%22: e_an,%0A %22nAtoms%22: e_na,%0A %22massFraction%22: e_mf%7D%0A%0A return compound_dict%0A%0A%0Adef split_compound_mass(compound_formula, compound_mass):%0A r%22%22%22%0A Computes mass of each element in the compound given total mass of the compound%0A%0A Parameters%0A ----------%0A%0A compound_formula: str%0A chemical formula of the compound in the form %60%60FeO2%60%60, %60%60CO2%60%60 or %60%60Fe%60%60.%0A Element names must start with capital letter.%0A%0A compound_mass: float%0A total mass of the compound%0A%0A Returns%0A -------%0A%0A dictionary: key - symbolic element name, value - mass of the element%0A%0A Raises%0A ------%0A%0A RuntimeError is raised if compound formula cannot be parsed%0A %22%22%22%0A%0A compound_dict = parse_compound_formula(compound_formula)%0A%0A element_dict = %7B%7D%0A for el_name, el_info in compound_dict.items():%0A element_dict%5Bel_name%5D = el_info%5B%22massFraction%22%5D * compound_mass%0A%0A return element_dict%0A
|
|
2a6ec396512c435413f6e3848d1448af839fa9a6
|
Add unittests for FindQuery
|
tests/test_storage.py
|
tests/test_storage.py
|
Python
| 0
|
@@ -0,0 +1,681 @@
+import time%0A%0Afrom graphite_api.storage import FindQuery%0A%0Afrom . import TestCase%0A%0A%0Aclass StorageTestCase(TestCase):%0A def test_find_query(self):%0A end = int(time.time())%0A start = end - 3600%0A%0A query = FindQuery('collectd', None, None)%0A self.assertEqual(repr(query), '%3CFindQuery: collectd from * until *%3E')%0A%0A query = FindQuery('collectd', start, None)%0A self.assertEqual(repr(query), '%3CFindQuery: collectd from %25s until *%3E'%0A %25 time.ctime(start))%0A%0A query = FindQuery('collectd', None, end)%0A self.assertEqual(repr(query), '%3CFindQuery: collectd from * until %25s%3E'%0A %25 time.ctime(end))%0A
|
|
e4396938425bc27fc730d580a6cd4ee6e3fd09e9
|
Remove v1.0 and v1.1 API from version info.
|
quantum/api/versions.py
|
quantum/api/versions.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Citrix Systems.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import webob.dec
from quantum.api.views import versions as versions_view
from quantum import wsgi
LOG = logging.getLogger(__name__)
class Versions(object):
@classmethod
def factory(cls, global_config, **local_config):
return cls()
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
"""Respond to a request for all Quantum API versions."""
version_objs = [
{
"id": "v1.0",
"status": "DEPRECATED",
},
{
"id": "v1.1",
"status": "CURRENT",
},
{
"id": "v2.0",
"status": "PROPOSED",
},
]
if req.path != '/':
return webob.exc.HTTPNotFound()
builder = versions_view.get_view_builder(req)
versions = [builder.build(version) for version in version_objs]
response = dict(versions=versions)
metadata = {
"application/xml": {
"attributes": {
"version": ["status", "id"],
"link": ["rel", "href"],
}
}
}
content_type = req.best_match_content_type()
body = (wsgi.Serializer(metadata=metadata).
serialize(response, content_type))
response = webob.Response()
response.content_type = content_type
response.body = body
return response
|
Python
| 0.000001
|
@@ -1144,17 +1144,17 @@
%22id%22: %22v
-1
+2
.0%22,%0A
@@ -1181,211 +1181,15 @@
%22: %22
-DEPRECATED%22,%0A %7D,%0A %7B%0A %22id%22: %22v1.1%22,%0A %22status%22: %22CURRENT%22,%0A %7D,%0A %7B%0A %22id%22: %22v2.0%22,%0A %22status%22: %22PROPOSED
+CURRENT
%22,%0A
|
5db291b8a745f8dc640e7cdc7a274535abcc63af
|
Create rPiEinkQR.py
|
rPiEinkQR.py
|
rPiEinkQR.py
|
Python
| 0.000001
|
@@ -0,0 +1,708 @@
+import os%0Afrom PIL import Image%0Afrom epyper.displayCOGProcess import Display%0Afrom epyper.displayController import DisplayController%0A%0A# code to create QR code of good size for eink screen%0A# qrencode -o qrcode.png -s 7 -l L -v 1 -m 1 %22TestThree003%22%0A%0AQRname = %22qrencode -o qrcode.png -s 7 -l L -v 1 -m 1 %5C%22%22 + %22TestThree04%22 + %22%5C%22%22%0Aos.system(QRname)%0A%0AimgName = %22qrcode.png%22%0A%0Aim = Image.open(imgName)%0A%0Aconvertname = %22convert %22 + imgName + %22 -background white -gravity center -extent 264x176 %22+ imgName%0Aos.system(convertname)%0A%0Aim = Image.open(imgName)%0A%0A#create DisplayController instance specifying display type as an argument%0Adisplay = DisplayController(Display.EPD_TYPE_270)%0A%0A#display it!%0Adisplay.displayImg(im)%0A
|
|
dff76b6518b1de1be56def7469180d841a9e6121
|
Create __init__.py
|
Tools/__init__.py
|
Tools/__init__.py
|
Python
| 0.000429
|
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-%0A
|
|
a152c7c48baa0f1c82e7d84bebbee674eb4f2761
|
Add command to queue expired tiles
|
tilequeue/commands.py
|
tilequeue/commands.py
|
Python
| 0.000001
|
@@ -0,0 +1,2634 @@
+from tilequeue.queues import make_sqs_queue%0Afrom tilequeue.tile import explode_with_parents%0Afrom tilequeue.tile import parse_expired_coord_string%0Aimport argparse%0Aimport os%0A%0Adef add_aws_cred_options(arg_parser):%0A arg_parser.add_argument('--aws_access_key_id')%0A arg_parser.add_argument('--aws_secret_access_key')%0A return arg_parser%0A%0Adef enqueue_arg_parser():%0A parser = argparse.ArgumentParser()%0A parser = add_aws_cred_options(parser)%0A parser.add_argument('--queue',%0A required=True,%0A help='Name of aws sqs queue, should already exist.',%0A )%0A parser.add_argument('--expired-tiles-file',%0A required=True,%0A help='Path to file containing list of expired tiles. Should be one per line, %3Czoom%3E/%3Ccolumn%3E/%3Crow%3E',%0A )%0A return parser%0A%0Adef assert_aws_config(args):%0A if (args.aws_access_key_id is not None or%0A args.aws_secret_access_key is not None):%0A # assert that if either is specified, both are specified%0A assert (args.aws_access_key_id is not None and%0A args.aws_secret_access_key is not None), 'Must specify both aws key and secret'%0A else:%0A assert 'AWS_ACCESS_KEY_ID' in os.environ, 'Missing AWS_ACCESS_KEY_ID config'%0A assert 'AWS_SECRET_ACCESS_KEY' in os.environ, 'Missing AWS_SECRET_ACCESS_KEY config'%0A%0A%0Adef enqueue_process_main():%0A parser = enqueue_arg_parser()%0A args = parser.parse_args()%0A assert_aws_config(args)%0A%0A queue = make_sqs_queue(%0A args.queue, args.aws_access_key_id, args.aws_secret_access_key)%0A%0A expired_tiles = %5B%5D%0A with open(args.expired_tiles_file) as f:%0A for line in f:%0A line = line.strip()%0A if not line:%0A continue%0A coord = parse_expired_coord_string(line)%0A if coord is None:%0A print 'Could not parse coordinate from line: ' %25 line%0A continue%0A expired_tiles.append(coord)%0A%0A print 'Number of expired tiles: %25d' %25 len(expired_tiles)%0A%0A exploded_coords = explode_with_parents(expired_tiles)%0A print 'Number of total expired tiles with all parents: %25d' %25 len(exploded_coords)%0A%0A print 'Queuing ... '%0A%0A # sort in any way?%0A%0A # zoom level strategy?%0A # only enqueue work for zooms %3E 10 if in metro extract area?%0A%0A # exploded_coords is a set, but enqueue_batch expects a list for slicing%0A exploded_coords = list(exploded_coords)%0A%0A queue.enqueue_batch(list(exploded_coords))%0A%0A print 'Queuing ... Done'%0A%0Aif __name__ == '__main__':%0A enqueue_process_main()%0A
|
|
707781ac58318af002cc1e75d8c31839d4e66e77
|
add module to support search result export
|
arches/app/utils/geos_to_pyshp.py
|
arches/app/utils/geos_to_pyshp.py
|
Python
| 0
|
@@ -0,0 +1,872 @@
+from django.contrib.gis.geos import MultiPoint%0Afrom django.contrib.gis.geos import MultiLineString%0Afrom django.contrib.gis.geos import MultiPolygon%0A%0Adef convert_geom(geos_geom):%0A if geos_geom.geom_type == 'Point':%0A multi_geom = MultiPoint(geos_geom)%0A shp_geom = %5B%5Bc for c in multi_geom.coords%5D%5D%0A if geos_geom.geom_type == 'LineString':%0A multi_geom = MultiLineString(geos_geom)%0A shp_geom = %5Bc for c in multi_geom.coords%5D%0A if geos_geom.geom_type == 'Polygon':%0A multi_geom = MultiPolygon(geos_geom)%0A shp_geom = %5Bc%5B0%5D for c in multi_geom.coords%5D%0A if geos_geom.geom_type == 'MultiPoint':%0A shp_geom = %5B%5Bc for c in geos_geom.coords%5D%5D%0A if geos_geom.geom_type == 'MultiLineString':%0A shp_geom = %5Bc for c in geos_geom.coords%5D%0A if geos_geom.geom_type == 'MultiPolygon':%0A shp_geom = %5Bc%5B0%5D for c in geos_geom.coords%5D%0A%0A return shp_geom
|
|
3e0903ba2f74d5f73241d1ffc5056f2a77c709e0
|
Add a simple test for SetupPrometheusEndpointOnPortRange
|
tests/test_exports.py
|
tests/test_exports.py
|
Python
| 0.000001
|
@@ -0,0 +1,367 @@
+#!/usr/bin/env python%0Afrom django_prometheus.exports import SetupPrometheusEndpointOnPortRange%0Aimport unittest%0A%0A%0Aclass ExportTest(unittest.TestCase):%0A def testPortRange(self):%0A port_range = %5B8000, 8001%5D%0A SetupPrometheusEndpointOnPortRange(port_range)%0A SetupPrometheusEndpointOnPortRange(port_range)%0A%0Aif __name__ == 'main':%0A unittest.main()%0A
|
|
a4ce015943da37335114aa8b384f2ee7371f6446
|
Test in app-factory.
|
tests/test_factory.py
|
tests/test_factory.py
|
Python
| 0
|
@@ -0,0 +1,1658 @@
+from flask import Flask%0Afrom flask_sslify import SSLify%0Afrom pytest import fixture%0A%0A%0Aclass AppFactoryContext(object):%0A%0A def __init__(self):%0A self.sslify = SSLify()%0A self.app = None%0A self.appctx = None%0A%0A def __enter__(self):%0A self.app = self.create_app()%0A self.appctx = self.app.app_context()%0A self.appctx.push()%0A return self.appctx%0A%0A def __exit__(self, exc_type, exc_value, exc_tb):%0A self.appctx.pop()%0A self.app = None%0A self.appctx = None%0A%0A def create_app(self):%0A app = Flask(__name__)%0A app.config%5B'DEBUG'%5D = False%0A app.config%5B'TESTING'%5D = False%0A app.config%5B'SERVER_NAME'%5D = 'example.com'%0A app.config%5B'SSLIFY_PERMANENT'%5D = True%0A self.sslify.init_app(app)%0A app.add_url_rule('/', 'home', self.view_home)%0A return app%0A%0A def view_home(self):%0A return 'home'%0A%0A%0A@fixture%0Adef app_factory():%0A context = AppFactoryContext()%0A with context:%0A yield context%0A%0A%0Adef test_config(app_factory):%0A assert app_factory.sslify.hsts_include_subdomains is False%0A assert app_factory.sslify.permanent is True%0A assert app_factory.sslify.skip_list is None%0A%0A%0Adef test_redirection(app_factory):%0A client = app_factory.app.test_client()%0A r = client.get('/')%0A assert r.status_code == 301%0A assert r.headers%5B'Location'%5D == 'https://example.com/'%0A%0A%0Adef test_hsts_header(app_factory):%0A client = app_factory.app.test_client()%0A r = client.get('/', base_url='https://example.com')%0A assert r.status_code == 200%0A assert r.data == 'home'%0A assert r.headers%5B'Strict-Transport-Security'%5D == 'max-age=31536000'%0A
|
|
a866b7e2de7e76e8bfb3b1feb22d7692afa5111d
|
Add test exposing stale promise job store cache (connected to #817)
|
src/toil/test/src/promisesTest.py
|
src/toil/test/src/promisesTest.py
|
Python
| 0
|
@@ -0,0 +1,1253 @@
+# Copyright (C) 2015 UCSC Computational Genomics Lab%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0Afrom __future__ import absolute_import%0Afrom toil.job import Job%0Afrom toil.test import ToilTest%0A%0Aclass CachedUnpicklingJobStoreTest(ToilTest):%0A %22%22%22%0A https://github.com/BD2KGenomics/toil/issues/817%0A %22%22%22%0A def test(self):%0A %22%22%22%0A Runs two identical Toil workflows with different job store paths%0A %22%22%22%0A for _ in range(2):%0A options = Job.Runner.getDefaultOptions(self._getTestJobStorePath())%0A options.logLevel = %22INFO%22%0A root = Job.wrapJobFn(parent)%0A value = Job.Runner.startToil(root, options)%0A%0A%0Adef parent(job):%0A return job.addChildFn(child).rv()%0A%0A%0Adef child():%0A return 1%0A
|
|
e99807f81dea6bac82f373a210af0c4f26b61334
|
test - Test for exception on syntax error
|
tests/test_request.py
|
tests/test_request.py
|
Python
| 0
|
@@ -0,0 +1,318 @@
+import pytest%0A%0Aimport overpy%0A%0A%0Aclass TestQuery(object):%0A def test_syntax_error(self):%0A with pytest.raises(overpy.exception.OverpassBadRequest):%0A api = overpy.Overpass()%0A # Missing ; after way(1)%0A api.query((%0A %22way(1)%22%0A %22out body;%22%0A ))
|
|
69c5015a1a9dc3233530d691d20befa529f7c880
|
Create lookupAndStoreTweets.py utility.
|
app/utils/insert/lookupAndStoreTweets.py
|
app/utils/insert/lookupAndStoreTweets.py
|
Python
| 0
|
@@ -0,0 +1,1164 @@
+#!/usr/bin/env python2%0A# -*- coding: utf-8 -*-%0A%22%22%22%0ALookup and Store Tweets utility.%0A%22%22%22%0Aimport argparse%0Aimport os%0Aimport sys%0A%0A# Allow imports to be done when executing this file directly.%0AappDir = os.path.abspath(os.path.join(os.path.dirname(__file__),%0A os.path.pardir, os.path.pardir))%0Asys.path.insert(0, appDir)%0A%0Afrom lib import tweets%0Afrom lib.twitter import auth%0A%0A%0Adef main():%0A %22%22%22%0A Command-line interface to lookup tweet GUIDs and store them in the db.%0A %22%22%22%0A parser = argparse.ArgumentParser(description=%22Lookup and Store Tweets%22%0A %22 utility.%22)%0A%0A parser.add_argument('tweetGuids',%0A nargs='+',%0A help=%22List of one or more tweet GUIDS to lookup and%22%0A %22 store in the db, separated by spaces. %22%0A %22 Profiles are also stored so that tweets can be%22%0A %22 linked to them.%22)%0A args = parser.parse_args()%0A%0A APIConn = auth.getAppOnlyConnection()%0A tweets.lookupTweetGuids(APIConn, args.tweetGuids)%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
bb2644fc14dd92cf54c6a22da6fe3a66f89535e6
|
Create VNSautoRotator.py
|
VNSautoRotator.py
|
VNSautoRotator.py
|
Python
| 0
|
@@ -0,0 +1,993 @@
+import numpy as np%0Aimport sys, os%0Aimport nrrd%0A%0Aif (len(sys.argv) %3C 2):%0A print 'Error: missing arguments!'%0A print 'e.g. python VNSautoRotator.py imageIn.nrrd %5BImageOut.nrrd%5D'%0A print 'rotate RPI to LPS orientation for CMTK (as it doesn't like RPI)'%0Aelse:%0A print 'Processing %25s...'%25 (str(sys.argv%5B1%5D))%0A data1, header1 = nrrd.read(str(sys.argv%5B1%5D))%0A print header1%0A header1%5B'space'%5D = 'left-posterior-superior'%0A header1.pop(%22space dimension%22, None)%0A print header1%0A if (np.sum(np.sum(data1,(0,1))%5B-100:%5D) %3C np.sum(np.sum(data1,(0,1))%5B:100%5D)): %0A print 'RPI orientation detected so rotating to LPS...'%0A data2 = np.flip(data1, (0, 2))%0A data1 = data2%0A print 'saving...'%0A if (len(sys.argv) == 3):%0A nrrd.write(str(sys.argv%5B2%5D), data1, header1)%0A print 'saved to ' + str(sys.argv%5B2%5D)%0A else:%0A nrrd.write(str(sys.argv%5B1%5D).replace('.nrrd','_LPS.nrrd'), data1, header1)%0A print 'saved to ' + str(sys.argv%5B1%5D).replace('.nrrd','_LPS.nrrd')%0A
|
|
16b73476daedaf1b111e900c0db947dcdab1c9a6
|
Add zits crawler
|
comics/crawler/crawlers/zits.py
|
comics/crawler/crawlers/zits.py
|
Python
| 0
|
@@ -0,0 +1,863 @@
+from comics.crawler.utils.lxmlparser import LxmlParser%0Afrom comics.crawler.base import BaseComicCrawler%0Afrom comics.crawler.meta import BaseComicMeta%0A%0Aclass ComicMeta(BaseComicMeta):%0A name = 'Zits'%0A language = 'en'%0A url = 'http://www.arcamax.com/zits'%0A start_date = '1997-07-01'%0A history_capable_days = 14%0A schedule = 'Mo,Tu,We,Tu,Fr,Sa,Su'%0A time_zone = -5%0A rights = 'Jerry Scott and Jim Borgman'%0A%0Aclass ComicCrawler(BaseComicCrawler):%0A def _get_url(self):%0A self.parse_feed('http://www.arcamax.com/zits/channelfeed')%0A%0A for entry in self.feed.entries:%0A if entry.title.endswith(self.pub_date.strftime('%25-1m/%25-1d/%25Y')):%0A self.web_url = entry.link%0A break%0A%0A if not self.web_url:%0A return%0A%0A page = LxmlParser(self.web_url)%0A self.url = page.src('p.m0 img')%0A
|
|
9b0d6239bf73dce4cc981f13dd16d3c5f46b40b3
|
Create dominick.py
|
dominick.py
|
dominick.py
|
Python
| 0
|
@@ -0,0 +1 @@
+%0A
|
|
5ffdaf778157d112c26b96020408f80ec3820e02
|
Create __init__.py
|
crispy/actions/__init__.py
|
crispy/actions/__init__.py
|
Python
| 0.000429
|
@@ -0,0 +1,34 @@
+from crispy.actions.core import *%0A
|
|
fb9915a481e3161325eb5200db2232e6e34b2acc
|
Add support for Jawbone
|
services/jawbone.py
|
services/jawbone.py
|
Python
| 0
|
@@ -0,0 +1,1866 @@
+from oauthlib.common import add_params_to_uri%0Aimport foauth.providers%0A%0A%0Aclass Jawbone(foauth.providers.OAuth2):%0A # General info about the provider%0A provider_url = 'https://jawbone.com/'%0A docs_url = 'https://jawbone.com/up/developer/endpoints'%0A category = 'Fitness'%0A%0A # URLs to interact with the API%0A authorize_url = 'https://jawbone.com/auth/oauth2/auth'%0A access_token_url = 'https://jawbone.com/auth/oauth2/token'%0A api_domain = 'jawbone.com'%0A%0A available_permissions = %5B%0A (None, 'Read your name and profile picture'),%0A ('extended_read', 'Read your age, gender, weight, and height'),%0A ('location_read', %22Read the places you've visited%22),%0A ('friends_read', 'Read your list of friends'),%0A ('mood_read', 'Read your mood'),%0A ('mood_write', 'Write to your mood'),%0A ('move_read', 'Read your moves and workouts'),%0A ('move_write', 'Write to your movies and create a workout'),%0A ('sleep_read', 'Read your sleep data'),%0A ('sleep_write', 'Write to your sleep data'),%0A ('meal_read', 'Read your meals'),%0A ('meal_write', 'Write to your meals'),%0A ('weight_read', 'Read your body metrics'),%0A ('weight_write', 'Write to your body metrics'),%0A ('cardiac_read', 'Read your heart data'),%0A ('cardiac_write', 'Write your heart data'),%0A ('generic_event_read', 'Read all other types of events'),%0A ('generic_event_write', 'Write to all other types of events'),%0A %5D%0A%0A def get_authorize_params(self, redirect_uri, scopes):%0A # Always request at least user information%0A scopes.append('basic_read')%0A return super(Jawbone, self).get_authorize_params(redirect_uri, scopes)%0A%0A def get_user_id(self, key):%0A r = self.api(key, self.api_domain, u'/nudge/api/v.1.1/users/@me')%0A return r.json()%5Bu'data'%5D%5Bu'xid'%5D%0A
|
|
4f50891c1a7d918010dbcecd640bb4b83f7bd2a3
|
ADD taobao login
|
socialoauth/sites/taobao.py
|
socialoauth/sites/taobao.py
|
Python
| 0
|
@@ -0,0 +1,1066 @@
+# -*- coding: utf-8 -*-%0A%0Afrom socialoauth.sites.base import OAuth2%0A%0A%0Aclass TaoBao(OAuth2):%0A AUTHORIZE_URL = 'https://oauth.taobao.com/authorize'%0A ACCESS_TOKEN_URL = 'https://oauth.taobao.com/token'%0A TAOBAO_API_URL = 'https://eco.taobao.com/router/rest'%0A%0A %0A def build_api_url(self, url):%0A return self.TAOBAO_API_URL%0A %0A def build_api_data(self, **kwargs):%0A data = %7B%0A 'access_token': self.access_token,%0A 'v': 2.0,%0A 'format':'json'%0A %7D%0A data.update(kwargs)%0A return data%0A %0A def parse_token_response(self, res):%0A self.uid = res%5B'taobao_user_id'%5D%0A self.access_token = res%5B'access_token'%5D%0A self.expires_in = res%5B'expires_in'%5D%0A self.refresh_token = res%5B'refresh_token'%5D%0A %0A res = self.api_call_get(method='taobao.user.buyer.get', %0A fields='nick,avatar')%0A%0A user = res%5B'user_buyer_get_response'%5D%5B'user'%5D%0A self.name = user%5B'nick'%5D%0A self.avatar = user%5B'avatar'%5D%0A self.avatar_large = %22%22
|
|
20eb83e4e8e0391c9efaca7f30a80220f9a14e9a
|
Add codelists management tools
|
maediprojects/query/codelists.py
|
maediprojects/query/codelists.py
|
Python
| 0.000001
|
@@ -0,0 +1,816 @@
+from maediprojects import db, models%0Aimport datetime%0A%0Adef create_code(data):%0A codelistcode = models.CodelistCode()%0A %0A for attr, val in data.items():%0A setattr(codelistcode, attr, val)%0A db.session.add(codelistcode)%0A db.session.commit()%0A return codelistcode%0A%0Adef update_attr(data):%0A codelistcode = models.CodelistCode.query.filter_by(%0A code = data%5B'code'%5D,%0A %09codelist_code = data%5B%22codelist_code%22%5D%0A ).first()%0A setattr(codelistcode, data%5B'attr'%5D, data%5B'value'%5D)%0A db.session.add(codelistcode)%0A db.session.commit()%0A return True%0A%0Adef delete_code(data):%0A codelistcode = models.CodelistCode.query.filter_by(%0A code = data%5B'code'%5D,%0A %09codelist_code = data%5B%22codelist_code%22%5D%0A ).first()%0A db.session.delete(codelistcode)%0A db.session.commit()%0A return True%0A
|
|
c4578461b82dbe0f94439e6823669af536ae3f09
|
add index on _id_document field
|
apps/archive_history/service.py
|
apps/archive_history/service.py
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import logging
from eve.utils import config
from copy import deepcopy
from superdesk import get_resource_service
from superdesk.resource import Resource
from superdesk.services import BaseService
from apps.archive.common import ITEM_UPDATE, get_user, ITEM_CREATE
from superdesk.metadata.item import CONTENT_STATE, ITEM_STATE
from superdesk.utc import utcnow
log = logging.getLogger(__name__)
fields_to_remove = ['_id', '_etag', 'versioncreator', 'originalcreator', 'versioncreated',
'_current_version', 'version', '_updated', 'lock_session', 'lock_user', 'lock_time', 'lock_action',
'force_unlock', '_created', 'guid', 'family_id', 'firstcreated', 'original_creator']
class ArchiveHistoryResource(Resource):
endpoint_name = 'archive_history'
resource_methods = ['GET']
item_methods = ['GET']
schema = {
'item_id': {'type': 'string'},
'user_id': Resource.rel('users', True),
'operation': {'type': 'string'},
'update': {'type': 'dict', 'nullable': True},
'version': {'type': 'integer'},
'original_item_id': {'type': 'string'}
}
mongo_indexes = {'item_id': ([('item_id', 1)], {'background': True})}
class ArchiveHistoryService(BaseService):
def on_item_updated(self, updates, original, operation=None):
item = deepcopy(original)
if updates:
item.update(updates)
self._save_history(item, updates, operation or ITEM_UPDATE)
def on_item_deleted(self, doc):
lookup = {'item_id': doc[config.ID_FIELD]}
self.delete(lookup=lookup)
def get_user_id(self, item):
user = get_user()
if user:
return user.get(config.ID_FIELD)
def _save_history(self, item, update, operation):
# in case of auto-routing, if the original_creator exists in our database
# then create item create record in the archive history.
if item.get(ITEM_STATE) == CONTENT_STATE.ROUTED and item.get('original_creator') \
and not item.get('original_id'):
user = get_resource_service('users').find_one(req=None, _id=item.get('original_creator'))
firstcreated = item.get('firstcreated', utcnow())
if user:
history = {
'item_id': item[config.ID_FIELD],
'user_id': user.get(config.ID_FIELD),
'operation': ITEM_CREATE,
'update': self._remove_unwanted_fields(update, item),
'version': item.get(config.VERSION, 1),
'_created': firstcreated,
'_updated': firstcreated
}
self.post([history])
history = {
'item_id': item[config.ID_FIELD],
'user_id': self.get_user_id(item),
'operation': operation,
'update': self._remove_unwanted_fields(update, item),
'version': item.get(config.VERSION, 1)
}
self.post([history])
def _remove_unwanted_fields(self, update, original):
if update:
update_copy = deepcopy(update)
for field in fields_to_remove:
update_copy.pop(field, None)
if original.get('sms_message') == update_copy.get('sms_message'):
update_copy.pop('sms_message', None)
return update_copy
|
Python
| 0.000002
|
@@ -1458,24 +1458,33 @@
_indexes = %7B
+%0A
'item_id': (
@@ -1522,16 +1522,93 @@
: True%7D)
+,%0A '_id_document': (%5B('_id_document', 1)%5D, %7B'background': True%7D),%0A
%7D%0A%0A%0Aclas
|
1c810e9026f2d2c7ce3722d89a0cd7d333904e0f
|
add ipdb.py for easier debugging
|
examples/ipdb.py
|
examples/ipdb.py
|
Python
| 0.000001
|
@@ -0,0 +1,1802 @@
+%22%22%22%0AThis module provides a quick n dirty way to get a debug ipython shell.%0A2 ways to achieve that:%0A%0A1. call set_trace() will immediately stop your program at that position%0A2. import ipdb will overwrite sys.excepthook with ipdb.info. This will%0A provide the ipython shell%0A%22%22%22%0A%0Aimport sys%0Afrom IPython.core.debugger import Pdb%0Afrom IPython.core.shellapp import InteractiveShellApp%0Afrom IPython.core import ipapi%0A%0Ashell = InteractiveShellApp(argv=%5B''%5D)%0A%0Adef_colors = ipapi.get().colors%0A%0Adef set_trace():%0A frame = sys._getframe().f_back%0A Pdb(def_colors).set_trace(frame)%0A%0A# Post-Mortem interface, copied from pdb%0Adef post_mortem(t=None):%0A # handling the default%0A if t is None:%0A # sys.exc_info() returns (type, value, traceback) if an exception is%0A # being handled, otherwise it returns None%0A t = sys.exc_info()%5B2%5D%0A if t is None:%0A raise ValueError(%22A valid traceback must be passed if no %22%0A %22exception is being handled%22)%0A # added def_colors here for ipython colors%0A p = Pdb(def_colors)%0A #p.reset()%0A p.interaction(None, t)%0A%0A# code snippet from http://code.activestate.com/recipes/65287-automatically-start-the-debugger-on-an-exception/%0Adef info(type, value, tb):%0A if hasattr(sys, 'ps1') or not sys.stderr.isatty():%0A # we are in interactive mode or we don't have a tty-like%0A # device, so we call the default hook%0A sys.__excepthook__(type, value, tb)%0A else:%0A import traceback%0A # we are NOT in interactive mode, print the exception...%0A traceback.print_exception(type, value, tb)%0A print%0A # ...then start the debugger in post-mortem mode.%0A # pdb.pm() does pdb.post_mortem%0A post_mortem(sys.last_traceback)%0A%0Asys.excepthook = info
|
|
b71fddfab16cd5d3716f15a0d2e2a9abe2e36c8a
|
Fix SSH connection caching.
|
push/ssh.py
|
push/ssh.py
|
import select
import getpass
import paramiko
# hack to add paramiko support for AES encrypted private keys
if "AES-128-CBC" not in paramiko.PKey._CIPHER_TABLE:
from Crypto.Cipher import AES
paramiko.PKey._CIPHER_TABLE["AES-128-CBC"] = dict(cipher=AES, keysize=16, blocksize=16, mode=AES.MODE_CBC)
class SshError(Exception):
def __init__(self, code):
self.code = code
def __str__(self):
return "remote command exited with code %d" % self.code
class SshConnection(object):
def __init__(self, config, log, host):
self.config = config
self.log = log
self.host = host
self.client = paramiko.SSHClient()
if not config.ssh.strict_host_key_checking:
self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.client.connect(host,
username=config.ssh.user,
timeout=config.ssh.timeout,
pkey=config.ssh.pkey)
def execute_command(self, command, display_output=False):
transport = self.client.get_transport()
channel = transport.open_session()
channel.settimeout(self.config.ssh.timeout)
channel.set_combine_stderr(True)
channel.exec_command(command)
channel.shutdown_write()
output = []
while True:
readable = select.select([channel], [], [])[0]
if not readable:
continue
received = channel.recv(1024)
if not received:
break
received = unicode(received, "utf-8")
output.append(received)
if display_output:
self.log.write(received, newline=False)
status_code = channel.recv_exit_status()
if status_code != 0:
raise SshError(status_code)
return "".join(output)
def close(self):
self.client.close()
class SshDeployer(object):
"""Executes deploy commands on remote systems using SSH. If multiple
commands are run on the same host in succession, the same connection is
reused for each."""
def __init__(self, config, args, log):
self.config = config
self.args = args
self.log = log
self.current_connection = None
config.ssh.pkey = None
if not config.ssh.key_filename:
return
key_classes = (paramiko.RSAKey, paramiko.DSSKey)
for key_class in key_classes:
try:
config.ssh.pkey = key_class.from_private_key_file(
config.ssh.key_filename)
except paramiko.PasswordRequiredException:
need_password = True
break
except paramiko.SSHException:
continue
else:
need_password = False
break
else:
raise SshError("invalid key file %s" % config.ssh.key_filename)
tries_remaining = 3
while need_password and tries_remaining:
password = getpass.getpass("password for %s: " %
config.ssh.key_filename)
try:
config.ssh.pkey = key_class.from_private_key_file(
config.ssh.key_filename,
password=password)
need_password = False
except paramiko.SSHException:
tries_remaining -= 1
if need_password and not tries_remaining:
raise SshError("invalid password.")
def shutdown(self):
if self.current_connection:
self.current_connection.close()
self.current_connection = None
def _get_connection(self, host):
if self.current_connection and self.current_connection.host != host:
self.current_connection.close()
self.current_connection = None
self.current_connection = SshConnection(self.config, self.log, host)
return self.current_connection
def _run_command(self, host, binary, *args, **kwargs):
command = " ".join(("/usr/bin/sudo", binary) + args)
self.log.debug(command)
if not self.args.testing:
conn = self._get_connection(host)
display_output = kwargs.get("display_output", True)
return conn.execute_command(command, display_output=display_output)
else:
return "TESTING"
def run_build_command(self, *args, **kwargs):
return self._run_command(self.config.deploy.build_host,
self.config.deploy.build_binary,
*args, **kwargs)
def run_deploy_command(self, host, *args, **kwargs):
return self._run_command(host,
self.config.deploy.deploy_binary,
*args, **kwargs)
|
Python
| 0
|
@@ -3976,32 +3976,76 @@
nnection = None%0A
+ if not self.current_connection:%0A
self.cur
|
da4436ec5ec3c982e42e9f85749ac8c8cf8b8a94
|
add codegen submodule
|
altair/codegen.py
|
altair/codegen.py
|
Python
| 0.000001
|
@@ -0,0 +1,1553 @@
+%22%22%22%0AObject for generating Python code calls%0A%22%22%22%0A%0Aclass CodeGen(object):%0A def __init__(self, name, args=None, kwargs=None, methods=None):%0A self.name = name%0A self.args = (args or %5B%5D)%0A self.kwargs = (kwargs or %7B%7D)%0A self.methods = (methods or %5B%5D)%0A%0A def to_str(self, tablevel=0, tabsize=4):%0A %22%22%22Return a string representation of the code%22%22%22%0A def get_str(obj, tablevel=tablevel, tabsize=tabsize):%0A if isinstance(obj, CodeGen):%0A return obj.to_str(tablevel=tablevel, tabsize=tabsize)%0A else:%0A return str(obj)%0A%0A args = %5Bget_str(arg) for arg in self.args%5D%0A kwargs = %5B((tablevel + tabsize) * ' '%0A + '%7B0%7D=%7B1%7D'.format(k, get_str(v, tablevel + tabsize)))%0A for k, v in sorted(self.kwargs.items())%5D%0A if kwargs:%0A kwargs = kwargs + %5Btablevel * ' '%5D%0A%0A if not kwargs and not args:%0A call = '%7B0%7D()'.format(self.name)%0A elif not kwargs:%0A call = '%7B0%7D(%7B1%7D)'.format(self.name, ', '.join(args))%0A elif not args:%0A call = '%7B0%7D(%5Cn%7B1%7D)'.format(self.name, ',%5Cn'.join(kwargs))%0A else:%0A call = '%7B0%7D(%7B1%7D%7B2%7D)'.format(self.name, ', '.join(args),%0A ',%5Cn'.join(%5B''%5D + kwargs))%0A%0A for method in self.methods:%0A call += '.%7B0%7D'.format(get_str(method))%0A%0A return call%0A%0A def __str__(self):%0A return self.to_str()%0A%0A def rename(self, newname):%0A self.name = newname%0A return self%0A
|
|
9d20d1f87f509ce51fde5c51460ff0b17c051ca1
|
Create pytest_setup.py
|
utils/pytest_setup.py
|
utils/pytest_setup.py
|
Python
| 0.000007
|
@@ -0,0 +1,113 @@
+pip install -U pytest-xdist%0Apip install -U parameterized%0Apip install -U pytest-flake8%0Apip install -U pytest-html%0A
|
|
7039e4f25d8eecdf2d5d2b4a4a769e05c5075222
|
Fix description of 'api_read_full_member' permission
|
bluebottle/members/migrations/0020_auto_20171031_1048.py
|
bluebottle/members/migrations/0020_auto_20171031_1048.py
|
Python
| 0.000052
|
@@ -0,0 +1,604 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.10.8 on 2017-10-31 09:48%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations%0A%0A%0Adef rename_full_member_permission(apps, schema_editor):%0A Permission = apps.get_model('auth', 'Permission')%0A%0A perm = Permission.objects.get(codename='api_read_full_member')%0A%0A perm.name = 'Can view full members through the API'%0A perm.save()%0A%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('members', '0019_auto_20170824_1812'),%0A %5D%0A%0A operations = %5B%0A migrations.RunPython(rename_full_member_permission)%0A %5D%0A
|
|
4b4e9bc8f9605519b12d4da25dc6822baa629d2e
|
Add test_core
|
unit_tests/test_core.py
|
unit_tests/test_core.py
|
Python
| 0.000001
|
@@ -0,0 +1,374 @@
+from lcapy import *%0Aimport unittest%0Aimport sympy as sym%0A%0As = sym.var('s')%0A%0Aclass LcapyTester(unittest.TestCase):%0A %22%22%22Unit tests for lcapy%0A%0A %22%22%22%0A%0A def test_sExpr(self):%0A %22%22%22Lcapy: check sExpr%0A%0A %22%22%22%0A a = sExpr('(s+2)/(s-2)')%0A self.assertEqual(a.N, sExpr('s+2'), %22N incorrect.%22)%0A self.assertEqual(a.D, sExpr('s-2'), %22D incorrect.%22)%0A%0A
|
|
30be74075e761f932a10ea0806a08991b8fd9cb4
|
Add script to list nodes without an external ID
|
code/python/find-nodes-without-external-id.py
|
code/python/find-nodes-without-external-id.py
|
Python
| 0
|
@@ -0,0 +1,2328 @@
+#!/usr/bin/env python%0A%0Aimport httplib%0Aimport urllib%0Aimport json%0Aimport ssl%0Aimport argparse%0Aimport re%0A%0Aparser = argparse.ArgumentParser(description='Find any node that does not have an external ID set.')%0Aparser.add_argument('--target-url', required=True, help='URL for the UpGuard instance. This should be the hostname only (appliance.upguard.org instead of https://appliance.upguard.org)')%0Aparser.add_argument('--api-key', required=True, help='API key for the UpGuard instance')%0Aparser.add_argument('--secret-key', required=True, help='Secret key for the UpGuard instance')%0Aparser.add_argument('--insecure', action='store_true', help='Ignore SSL certificate check?')%0Aparser.add_argument('--per-page', type=int, default=10, help='Number of nodes to retrieve in each call. (Default: 100)')%0Aargs = parser.parse_args()%0A%0A# Initializations%0Abrowser = None%0A%0Adef getNodes(browser, method, endpoint, page=1, per_page=100):%0A %22%22%22%0A Return a JSON-parsed dictionary of nodes%0A %22%22%22%0A get_headers = %7B%0A %22Authorization%22: %22Token token=%5C%22%7B%7D%7B%7D%5C%22%22.format(args.api_key, args.secret_key),%0A %22Accept%22: %22application/json%22%7D%0A%0A browser.request(%22GET%22, %22%7B%7D?page=%7B%7D&per_page=%7B%7D%22.format(endpoint, page, per_page), '', get_headers)%0A response = browser.getresponse()%0A if response.status %3E= 400:%0A raise httplib.HTTPException(%22%7B%7D: %7B%7D%22.format(str(response.status), str(response.reason)))%0A%0A return json.loads(response.read())%0A%0Atry:%0A # Setup browser object%0A url = args.target_url%0A if 'http' in url:%0A # URL needs to be a hostname, so remove 'https://'%0A url = re.sub('https?:%5C/%5C/', '', url)%0A browser = httplib.HTTPConnection(url)%0A if args.insecure:%0A context = ssl._create_unverified_context()%0A browser = httplib.HTTPSConnection(url, context=context)%0A%0A page = 1%0A nodes = getNodes(browser, %22GET%22, %22/api/v2/nodes.json%22, page=page, per_page=args.per_page)%0A print %22Searching for nodes without an external ID...%22%0A while nodes:%0A for node in nodes:%0A if not node%5B'external_id'%5D:%0A print %22%7B%7D (hostname: %7B%7D)%22.format(node%5B'name'%5D)%0A%0A page += 1%0A nodes = getNodes(browser, %22GET%22, %22/api/v2/nodes.json%22, page=page, per_page=args.per_page)%0A%0Aexcept httplib.HTTPException as h:%0A print h.message;%0Afinally:%0A if browser:%0A browser.close()%0A
|
|
f12af379ec31b8c14bf871768c558c81bad95301
|
Add grains for the cloud metadata server
|
salt/grains/metadata.py
|
salt/grains/metadata.py
|
Python
| 0
|
@@ -0,0 +1,1241 @@
+# -*- coding: utf-8 -*-%0A'''%0AGrains from cloud metadata servers at 169.254.169.254%0A%0A.. versionadded:: Nitrogen%0A%0A:depends: requests%0A'''%0Afrom __future__ import absolute_import%0A%0A# Import python libs%0Aimport os%0Aimport socket%0A%0A# Import salt libs%0Aimport salt.utils.http as http%0A%0A%0A# metadata server information%0AIP = '169.254.169.254'%0AHOST = 'http://%7B0%7D/'.format(IP)%0A%0A%0Adef __virtual__():%0A sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)%0A sock.settimeout(1)%0A result = sock.connect_ex((IP, 80))%0A if result != 0:%0A return False%0A if http.query(os.path.join(HOST, 'latest/'), status=True).get('status') != 200:%0A return False%0A return True%0A%0A%0Adef _search(prefix=%22latest/%22):%0A '''%0A Recursively look up all grains in the metadata server%0A '''%0A ret = %7B%7D%0A for line in http.query(os.path.join(HOST, prefix))%5B'body'%5D.split('%5Cn'):%0A if line.endswith('/'):%0A ret%5Bline%5B:-1%5D%5D = _search(prefix=os.path.join(prefix, line))%0A elif '=' in line:%0A key, value = line.split('=')%0A ret%5Bvalue%5D = _search(prefix=os.path.join(prefix, key))%0A else:%0A ret%5Bline%5D = http.query(os.path.join(HOST, prefix, line))%5B'body'%5D%0A return ret%0A%0A%0Adef metadata():%0A return _search()%0A
|
|
101189c319c2d0fadc97fd1077a87c11ab159a12
|
add a new test folder.
|
Test/new_module.py
|
Test/new_module.py
|
Python
| 0
|
@@ -0,0 +1,394 @@
+#!/usr/bin/env python3%0A#%0A# Copyright (c) 2016 Zhixian MA %3Czxma_sjtu@qq.com%3E%0A# MIT license%0A%0A%22%22%22%0AA test script to learn the grammar and code tyle of python, and try to make interesting docstrings.%0A%22%22%22%0A%0Aimport os%0Aimport sys%0Aimport argparse%0Aimport logging%0A%0Aimport numpy as np%0Afrom astropy.io import fits%0A%0Aimport fg21sim%0Afrom fg21sim.configs import configs%0Afrom fg21sim.utils import setup_logging%0A%0A%0A
|
|
3d624b5693a753ee8ecdd6f979eaa3d17736dca7
|
Create Syllabifier.py
|
cltk/corpus/middle_english/Syllabifier.py
|
cltk/corpus/middle_english/Syllabifier.py
|
Python
| 0.000001
|
@@ -0,0 +1,379 @@
+%22%22%22%0ASonority hierarchy for Middle English%0A%22%22%22%0A%0ASyllabifier = %7B%0A 'a': 1,%0A '%C3%A6': 1,%0A 'e': 1,%0A 'i': 1,%0A 'o': 1,%0A 'u': 1,%0A 'y': 1,%0A 'm': 2,%0A 'n': 2,%0A 'p': 3,%0A 'b': 3,%0A 'd': 3,%0A 'g': 3,%0A 't': 3,%0A 'k': 3,%0A '%C3%B0': 3,%0A 'c': 4,%0A 'f': 4,%0A 's': 4,%0A 'h': 4,%0A 'v': 4,%0A 'x': 4,%0A '%C3%BE': 4,%0A 'r': 5,%0A '%C6%BF': 5,%0A 'l': 6%0A%7D%0A%0A%0A%0A
|
|
bace8f65e696211db5a6ffa2cefc70d2e061b950
|
Add Support for /r/greentext
|
greentext/app.py
|
greentext/app.py
|
Python
| 0
|
@@ -0,0 +1,1900 @@
+#encoding:utf-8%0A%0Afrom utils import get_url, weighted_random_subreddit%0Afrom utils import SupplyResult%0A%0A%0A# Subreddit that will be a source of content%0Asubreddit = weighted_random_subreddit(%7B%0A 'greentext': 1.0,%0A # If we want get content from several subreddits%0A # please provide here 'subreddit': probability%0A # 'any_other_subreddit': 0.02%0A%7D)%0A# Telegram channel with @reddit2telegram_bot as an admin%0At_channel = '@r_greentext'%0A%0A%0Adef send_post(submission, r2t):%0A what, url, ext = get_url(submission)%0A%0A # If this func returns:%0A # False %E2%80%93 it means that we will not send%0A # this submission, let's move to the next.%0A # True %E2%80%93 everything is ok, we send the submission%0A # None %E2%80%93 we do not want to send anything this time,%0A # let's just sleep.%0A%0A # Get all data from submission that we need%0A title = submission.title%0A link = submission.shortlink%0A text = '%7B%7D%5Cn%7B%7D'.format(title, link)%0A%0A if what == 'text':%0A # If it is text submission, it is not really funny.%0A # return r2t.send_text(submission.selftext)%0A return SupplyResult.DO_NOT_WANT_THIS_SUBMISSION%0A elif what == 'other':%0A # Also we are not interesting in any other content.%0A return SupplyResult.DO_NOT_WANT_THIS_SUBMISSION%0A elif what == 'album':%0A # It is ok if it is an album.%0A base_url = submission.url%0A text = '%7B%7D%5Cn%7B%7D%5Cn%5Cn%7B%7D'.format(title, base_url, link)%0A r2t.send_text(text)%0A r2t.send_album(url)%0A return SupplyResult.SUCCESSFULLY%0A elif what in ('gif', 'img'):%0A # Also it is ok if it is gif or any kind of image.%0A%0A # Check if content has already appeared in%0A # out telegram channel.%0A if r2t.dup_check_and_mark(url) is True:%0A return SupplyResult.DO_NOT_WANT_THIS_SUBMISSION%0A return r2t.send_gif_img(what, url, ext, text)%0A else:%0Areturn SupplyResult.DO_NOT_WANT_THIS_SUBMISSION%0A
|
|
b1af12dfc111c6550c166d00fdabf7fa707bfc1b
|
Create main.py
|
challenge_0/python/wost/main.py
|
challenge_0/python/wost/main.py
|
Python
| 0.000001
|
@@ -0,0 +1,167 @@
+'''%0AWritten in Python 3.6%0A'''%0A%0Adef main(text):%0A%09print(f%22Hello world, additional text: %7Btext%7D%22)%0A%0Aif __name__ == %22__main__%22:%0A%09main(input(%22What would you like to say?%22))%0A
|
|
14aba0695514866439164f48fe1f66390719431f
|
Add selcet_gamma.py (authored by Amnon)
|
scripts/select_gamma.py
|
scripts/select_gamma.py
|
Python
| 0
|
@@ -0,0 +1,2394 @@
+#!/usr/bin/python%0A# -*- coding: utf-8 -*-%0A%22%22%22%0ACreated on Fri Oct 18 10:13:48 2013%0A%0A@author: amnon%0A%0A### 80 char max please%0A%0ALook at all the gammaproteobacteria and select candidate contamination sequence%0A OTUs%0Aoutput: a list of sorted gammaproteobacteria (or other) otuids, according to %0A mean frequency%0A%22%22%22%0A%0Aimport sys%0Aimport argparse%0Aimport numpy as np%0A# to load a BIOM table%0Afrom biom.parse import parse_biom_table%0Afrom biom.util import biom_open%0A%0Adef TestAll(biomfile, outputfile, taxonomyclass, taxonomyname,level):%0A %22%22%22doc string here, a one liner %0A%0A ...and then more detail%0A %22%22%22%0A odat=%5B%5D%0A t = parse_biom_table(biom_open(biomfile,'U'))%0A %0A t2 = t.normObservationBySample()%0A%0A # to iterate over the table by observation, doing something based on the%0A # taxonomy:%0A class_idx = taxonomyclass%0A for values, ids, metadata in t2.iterObservations():%0A tname=metadata%5B'taxonomy'%5D%5Bclass_idx%5D.lstrip()%0A if tname == taxonomyname:%0A mv = np.mean(values)%0A odat.append((ids,mv))%0A %0A # odat.sort(key=lambda tup: tup%5B1%5D, reverse=True)%0A odat.sort(key=lambda tup: tup%5B1%5D)%0A%0A csum=%5B(odat%5B0%5D%5B0%5D,odat%5B0%5D%5B1%5D,odat%5B0%5D%5B1%5D)%5D%0A for cval in odat%5B1:%5D:%0A csum.append((cval%5B0%5D,cval%5B1%5D,csum%5B-1%5D%5B2%5D+cval%5B1%5D))%0A %0A # no get it from big to small%0A csum.reverse()%0A %0A # and write everything above the threshold (to filter)%0A snames=open(outputfile,'w')%0A for cval in csum:%0A if cval%5B2%5D%3E=level:%0A snames.write(cval%5B0%5D+%22%5Ct%22+str(cval%5B1%5D)+%22%5Ct%22+str(cval%5B2%5D)+'%5Cn')%0A snames.close()%0A%0Adef main(argv):%0A parser=argparse.ArgumentParser(description='Select Gammaproteobacteria (or other group) contamination candidates')%0A parser.add_argument('-i','--biom',help='biom file of the experiment')%0A parser.add_argument('-o','--output',help='output file name')%0A parser.add_argument('-c','--classpos',help='class of taxonomy name (0-kingdom,1-phylum etc.',default=2)%0A parser.add_argument('-t','--taxonomy',help='taxonomy name (including c__ or equivalent)',default='c__Gammaproteobacteria')%0A parser.add_argument('-l','--level',help='minimal cumulative level for OTUs to filter (use 0 to get all of them)',default='0.03')%0A %0A args=parser.parse_args(argv)%0A TestAll(args.biom,args.output,int(args.classpos),args.taxonomy,float(args.level))%0A%0Aif __name__ == %22__main__%22:%0A main(sys.argv%5B1:%5D) %0A
|
|
83fe6892c5b061f5fbba64c9f870f30c80b1a12a
|
create word bigram matrix
|
dsl/features/word_level.py
|
dsl/features/word_level.py
|
Python
| 0.999995
|
@@ -0,0 +1,1914 @@
+import logging%0Afrom os import path%0Afrom argparse import ArgumentParser%0Afrom featurize import Tokenizer, BigramModel%0A%0A%0Adef parse_args():%0A p = ArgumentParser()%0A p.add_argument('--train', type=str)%0A p.add_argument('--test', type=str)%0A p.add_argument('--raw-matrix-dir', type=str)%0A p.add_argument('--workdir', type=str)%0A p.add_argument('--topn', type=int, default=100)%0A p.add_argument('--threshold', type=int, default=2)%0A return p.parse_args()%0A%0A%0Adef get_paths(base, workdir):%0A raw_paths = %5B%0A 'train_raw.mtx',%0A 'test_raw.mtx',%0A 'train.labels',%0A 'train.labels.int',%0A 'test.labels',%0A 'test.labels.int',%0A 'frequent_features',%0A 'labeldict',%0A 'featdict',%0A %5D%0A workdir_paths = %5B%0A 'top_corr_features',%0A 'train_top_corr.mtx',%0A 'test_top_corr.mtx',%0A 'train_dense.mtx',%0A 'test_dense.mtx',%0A %5D%0A paths = %7Bk: path.join(base, k) for k in raw_paths%7D%0A paths.update(%7Bk: path.join(workdir, k) for k in workdir_paths%7D)%0A return paths%0A%0A%0Adef main():%0A FORMAT = '%25(asctime)s %25(levelname)s %25(message)s'%0A logging.basicConfig(format=FORMAT)%0A logging.getLogger().setLevel(logging.DEBUG)%0A args = parse_args()%0A paths = get_paths(args.raw_matrix_dir, args.workdir)%0A t = Tokenizer(ws_norm=True, filter_punct=True)%0A b_train = BigramModel(t, padding=True)%0A b_train.paths = paths%0A b_train.load_or_build_train(args.train, args.threshold)%0A b_train.choose_top_pearson(args.topn)%0A b_train.save_top_corr_features()%0A b_train.to_filtered_matrix()%0A b_train.save_matrix(paths%5B'train_top_corr.mtx'%5D)%0A b_train.save_as_dense_matrix(paths%5B'train_dense.mtx'%5D)%0A%0A b_train.load_or_build_test(args.test)%0A b_train.to_filtered_matrix()%0A b_train.save_matrix(paths%5B'test_top_corr.mtx'%5D)%0A b_train.save_as_dense_matrix(paths%5B'test_dense.mtx'%5D)%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
fec8de91954230b44b717f4b3d5a3a774c108fdf
|
Create monitor.py
|
assets/monitor.py
|
assets/monitor.py
|
Python
| 0.000001
|
@@ -0,0 +1,2263 @@
+#!/usr/bin/env python%0A%0Aimport sqlite3%0A%0Aimport os%0Aimport time%0Aimport glob%0A%0A# global variables%0Asperiod=(15*60)-1%0Adbname='/var/www/templog.db'%0A%0A%0A%0A# store the temperature in the database%0Adef log_temperature(temp):%0A%0A conn=sqlite3.connect(dbname)%0A curs=conn.cursor()%0A%0A curs.execute(%22INSERT INTO temps values(datetime('now'), (?))%22, (temp,))%0A%0A # commit the changes%0A conn.commit()%0A%0A conn.close()%0A%0A%0A# display the contents of the database%0Adef display_data():%0A%0A conn=sqlite3.connect(dbname)%0A curs=conn.cursor()%0A%0A for row in curs.execute(%22SELECT * FROM temps%22):%0A print str(row%5B0%5D)+%22%09%22+str(row%5B1%5D)%0A%0A conn.close()%0A%0A%0A%0A# get temerature%0A# returns None on error, or the temperature as a float%0Adef get_temp(devicefile):%0A%0A try:%0A fileobj = open(devicefile,'r')%0A lines = fileobj.readlines()%0A fileobj.close()%0A except:%0A return None%0A%0A # get the status from the end of line 1 %0A status = lines%5B0%5D%5B-4:-1%5D%0A%0A # is the status is ok, get the temperature from line 2%0A if status==%22YES%22:%0A print status%0A tempstr= lines%5B1%5D%5B-6:-1%5D%0A tempvalue=float(tempstr)/1000%0A print tempvalue%0A return tempvalue%0A else:%0A print %22There was an error.%22%0A return None%0A%0A%0A%0A# main function%0A# This is where the program starts %0Adef main():%0A%0A # enable kernel modules%0A os.system('sudo modprobe w1-gpio')%0A os.system('sudo modprobe w1-therm')%0A%0A # search for a device file that starts with 28%0A devicelist = glob.glob('/sys/bus/w1/devices/28*')%0A if devicelist=='':%0A return None%0A else:%0A # append /w1slave to the device file%0A w1devicefile = devicelist%5B0%5D + '/w1_slave'%0A%0A%0A# while True:%0A%0A # get the temperature from the device file%0A temperature = get_temp(w1devicefile)%0A if temperature != None:%0A print %22temperature=%22+str(temperature)%0A else:%0A # Sometimes reads fail on the first attempt%0A # so we need to retry%0A temperature = get_temp(w1devicefile)%0A print %22temperature=%22+str(temperature)%0A%0A # Store the temperature in the database%0A log_temperature(temperature)%0A%0A # display the contents of the database%0A# display_data()%0A%0A# time.sleep(speriod)%0A%0A%0Aif __name__==%22__main__%22:%0A main()%0A%0A%0A%0A
|
|
81b178677a3c217f62be85bf16964a1f0717930f
|
fix #1
|
commons_util/os_utils/memory.py
|
commons_util/os_utils/memory.py
|
Python
| 0.000002
|
@@ -0,0 +1,23 @@
+__author__ = 'Danyang'%0A
|
|
cd1ed470e319c6aa5d2ed5206d6fb6fba63876ee
|
add k-fold splitter
|
splitter.py
|
splitter.py
|
Python
| 0.000002
|
@@ -0,0 +1,1647 @@
+%22%22%22%0AStuff which splits dataset into train and test sets.%0A%22%22%22%0A%0Aclass KFoldSplitter(object):%0A %22%22%22Splitter that splits a table into k groups of (almost) equal size.%0A Before using this splitter, make sure the table to split has a %60group_id%60 column.%0A%0A Sample usage:%0A %3E%3E%3E splitter.split('query')%0A %3E%3E%3E while splitter.more_rounds():%0A %3E%3E%3E splitter.next_round()%0A %3E%3E%3E # use query_traina and query_test%0A %22%22%22%0A%0A def __init__(self, dbm, k):%0A %22%22%22%0A @param dbm a DatabaseManager%0A @param k the number of folds to split into, k should be %3E 1%0A %22%22%22%0A self.dbm = dbm%0A self.k = k%0A self.current_table = ''%0A self.current_round = 1%0A%0A def split(self, table):%0A %22%22%22After splitting, 1 %3C= table.group_id %3C= k%22%22%22%0A self.current_table = table%0A self.dbm.begin()%0A self.dbm.query('UPDATE %25s SET group_id = FLOOR(1 + RAND()*%25d)' %25 (table, self.k))%0A self.dbm.commit()%0A%0A def more_rounds(self):%0A return self.current_round %3C= self.k%0A%0A def next_round(self):%0A %22%22%22Prepare table_train and table_test tables.%0A They are **actually** views.%22%22%22%0A self.dbm.begin()%0A self.dbm.query('CREATE OR REPLACE VIEW %25s_test AS SELECT * FROM %25s WHERE group_id = %25d' %25 (self.current_table, self.current_table, self.current_round))%0A self.dbm.query('CREATE OR REPLACE VIEW %25s_train AS SELECT * FROM %25s WHERE group_id != %25d' %25 (self.current_table, self.current_table, self.current_round))%0A self.dbm.commit()%0A%0A # don't forget to increment round otherwise client might get stuck in infinite loop%0A self.current_round += 1%0A
|
|
6119f7998d918d3b38f129b7afd720f9a35e35c1
|
Add script for fetching metadata from audio file
|
audio-metadata.py
|
audio-metadata.py
|
Python
| 0
|
@@ -0,0 +1,1763 @@
+#! /usr/bin/env python %0Aimport os%0Aimport sys%0Aimport re%0Aimport tempfile%0A%0Adef getVideoDetails(filepath):%0A tmpf = tempfile.NamedTemporaryFile()%0A os.system(%22ffmpeg -i %5C%22%25s%5C%22 2%3E %25s%22 %25 (filepath, tmpf.name))%0A lines = tmpf.readlines()%0A tmpf.close()%0A metadata = %7B%7D%0A for l in lines:%0A l = l.strip()%0A if l.startswith('Duration'):%0A metadata%5B'duration'%5D = re.search('Duration: (.*?),', l).group(0).split(':',1)%5B1%5D.strip(' ,')%0A metadata%5B'bitrate'%5D = re.search(%22bitrate: (%5Cd+ kb/s)%22, l).group(0).split(':')%5B1%5D.strip()%0A if l.startswith('Stream #0:0'):%0A metadata%5B'video'%5D = %7B%7D%0A metadata%5B'video'%5D%5B'codec'%5D, metadata%5B'video'%5D%5B'profile'%5D = %5C%0A %5Be.strip(' ,()') for e in re.search('Video: (.*? %5C(.*?%5C)),? ', l).group(0).split(':')%5B1%5D.split('(')%5D%0A metadata%5B'video'%5D%5B'resolution'%5D = re.search('(%5B1-9%5D%5Cd+x%5Cd+)', l).group(1)%0A metadata%5B'video'%5D%5B'bitrate'%5D = re.search('(%5Cd+ kb/s)', l).group(1)%0A metadata%5B'video'%5D%5B'fps'%5D = re.search('(%5Cd+ fps)', l).group(1)%0A if l.startswith('Stream #0:1'):%0A metadata%5B'audio'%5D = %7B%7D%0A metadata%5B'audio'%5D%5B'codec'%5D = re.search('Audio: (.*?) ', l).group(1)%0A metadata%5B'audio'%5D%5B'frequency'%5D = re.search(', (.*? Hz),', l).group(1)%0A metadata%5B'audio'%5D%5B'bitrate'%5D = re.search(', (%5Cd+ kb/s)', l).group(1)%0A return metadata%0A%0Aif __name__ == '__main__':%0A if len(sys.argv) != 2:%0A print(%22Usage: ./getVideoDetails.py %3Cfilepath(absolute or relative)%3E%22)%0A sys.exit(%22Syntax Error%22)%0A print( getVideoDetails(sys.argv%5B1%5D) )%0A
|
|
21df69e2b2be4d59b5c8257d7efbf27a75eeb8dd
|
Add priming_output example
|
examples/priming_output.py
|
examples/priming_output.py
|
Python
| 0.00023
|
@@ -0,0 +1,793 @@
+#!/usr/bin/env python3%0A%22%22%22Test priming output buffer.%0A%0ASee http://www.portaudio.com/docs/proposals/020-AllowCallbackToPrimeStream.html%0A%0ANote that this is only supported in some of the host APIs.%0A%0A%22%22%22%0Aimport sounddevice as sd%0A%0A%0Adef callback(indata, outdata, frames, time, status):%0A outdata.fill(0)%0A if status.priming_output:%0A assert status.input_underflow, 'input underflow flag should be set'%0A assert not indata.any(), 'input buffer should be filled with zeros'%0A print('Priming output buffer!')%0A outdata%5B0%5D = 1%0A else:%0A print('Not priming, I quit!')%0A raise sd.CallbackStop%0A%0A%0Awith sd.Stream(channels=2, callback=callback,%0A prime_output_buffers_using_stream_callback=True) as stream:%0A while stream.active:%0A sd.sleep(100)%0A
|
|
98398398f590c3a98733193fc0ea45a1948edd0e
|
Add example to compare layers in a char-rnn task.
|
examples/recurrent-text.py
|
examples/recurrent-text.py
|
Python
| 0
|
@@ -0,0 +1,2017 @@
+#!/usr/bin/env python%0A%0Aimport climate%0Aimport matplotlib.pyplot as plt%0Aimport numpy as np%0Aimport theanets%0A%0Aimport utils%0A%0Aclimate.enable_default_logging()%0A%0ACOLORS = %5B'#d62728', '#1f77b4', '#2ca02c', '#9467bd', '#ff7f0e',%0A '#e377c2', '#8c564b', '#bcbd22', '#7f7f7f', '#17becf'%5D%0A%0AURL = 'http://www.gutenberg.org/cache/epub/2701/pg2701.txt'%0A%0Awith open(utils.find('moby.txt', URL)) as handle:%0A text = theanets.recurrent.Text(handle.read().lower().replace('%5Cn', ' '))%0A%0Aseed = text.encode(text.text%5B200000:200010%5D)%0Afor i, layer in enumerate((%0A dict(form='rnn', activation='sigmoid'),%0A dict(form='gru', activation='sigmoid'),%0A dict(form='scrn', activation='linear'),%0A dict(form='lstm'),%0A dict(form='mrnn', activation='sigmoid', factors=len(text.alpha)),%0A dict(form='clockwork', activation='linear', periods=(1, 2, 4, 8, 16)))):%0A losses = %5B%5D%0A layer.update(size=100)%0A net = theanets.recurrent.Classifier(%5B%0A 1 + len(text.alpha), layer, 1000, 1 + len(text.alpha)%5D)%0A for tm, _ in net.itertrain(text.classifier_batches(30, 16),%0A min_improvement=0.99,%0A validate_every=50,%0A patience=0,%0A algo='rmsprop',%0A learning_rate=0.0001):%0A if np.isnan(tm%5B'loss'%5D):%0A break%0A print('%7B%7D%7C%7B%7D (%7B:.1f%7D%25)'.format(%0A text.decode(seed),%0A text.decode(net.predict_sequence(seed, 30)),%0A 100 * tm%5B'acc'%5D))%0A losses.append(tm%5B'loss'%5D)%0A%0A plt.plot(losses, label=layer%5B'form'%5D, alpha=0.7, color=COLORS%5Bi%5D)%0A%0Aplt.gca().xaxis.tick_bottom()%0Aplt.gca().yaxis.tick_left()%0Aplt.gca().spines%5B'top'%5D.set_color('none')%0Aplt.gca().spines%5B'right'%5D.set_color('none')%0Aplt.gca().spines%5B'bottom'%5D.set_position(('outward', 6))%0Aplt.gca().spines%5B'left'%5D.set_position(('outward', 6))%0A%0Aplt.gca().set_ylabel('Loss')%0Aplt.gca().set_xlabel('Training Epoch')%0Aplt.gca().grid(True)%0A%0Aplt.legend()%0Aplt.show()%0A
|
|
f41dc1eb966da1505d4dedd00034debf79774807
|
add tests
|
smalltalk_like/tests.py
|
smalltalk_like/tests.py
|
Python
| 0
|
@@ -0,0 +1,1917 @@
+from obj_model import Class, Instance, TYPE, OBJECT%0A%0A%0Adef test_creation():%0A test_attribute()%0A test_subclass()%0A test_callmethod()%0A%0A%0Adef test_attribute():%0A%0A # Python Code%0A class A(object):%0A pass%0A%0A obj = A()%0A obj.a = 1%0A assert obj.a == 1%0A%0A obj.b = 2%0A assert obj.b == 2%0A%0A obj.a = 3%0A assert obj.a == 3%0A%0A # Object Model Code%0A A = Class(name='A', base_class=OBJECT, fields=%7B%7D, metaclass=TYPE)%0A obj = Instance(A)%0A%0A obj.write_attribute('a', 1)%0A assert obj.read_attribute('a') == 1%0A%0A obj.write_attribute('b', 2)%0A assert obj.read_attribute('b') == 2%0A%0A obj.write_attribute('a', 3)%0A assert obj.read_attribute('a') == 3%0A%0A%0Adef test_subclass():%0A%0A # Python Code%0A class A(object):%0A pass%0A%0A class B(A):%0A pass%0A%0A obj_b = B()%0A assert isinstance(obj_b, B) and isinstance(obj_b, A) and isinstance(obj_b, object)%0A assert not isinstance(obj_b, type)%0A%0A # Object Model Code%0A A = Class(name='A', base_class=OBJECT, fields=%7B%7D, metaclass=TYPE)%0A B = Class(name='B', base_class=A, fields=%7B%7D, metaclass=TYPE)%0A obj_b = Instance(B)%0A assert obj_b.isinstance(B) and obj_b.isinstance(A) and obj_b.isinstance(OBJECT)%0A assert not obj_b.isinstance(TYPE)%0A%0A%0Adef test_callmethod():%0A%0A # Python Code%0A class A(object):%0A%0A def m1(self):%0A return self.a%0A%0A def m2(self, n):%0A return self.a + n%0A%0A obj = A()%0A obj.a = 1%0A assert obj.m1() == 1%0A assert obj.m2(3) == 4%0A%0A # Object Model Code%0A def m1_A(self):%0A return self.read_attribute('a')%0A%0A def m2_A(self, n):%0A return self.read_attribute('a') + n%0A%0A A = Class(name='A', base_class=OBJECT, fields=%7B'm1_A': m1_A, 'm2_A': m2_A%7D, metaclass=TYPE)%0A%0A obj = Instance(A)%0A obj.write_attribute('a', 1)%0A assert obj.call_method('m1_A') == 1%0A assert obj.call_method('m2_A', 3) == 4%0A%0A%0Aif __name__ == '__main__':%0A test_creation()
|
|
6c9cf71064cf8a0c47147efeb742b2d66caa1c47
|
add stub models file
|
corehq/apps/toggle_ui/models.py
|
corehq/apps/toggle_ui/models.py
|
Python
| 0
|
@@ -0,0 +1,93 @@
+# Stub models file%0Afrom couchdbkit.ext.django.schema import Document%0A%0Aclass _(Document): pass
|
|
01c8d16df94ce558593b29974e30aa96679c6862
|
add stylize.py for feed-forward mode
|
stylize.py
|
stylize.py
|
Python
| 0
|
@@ -0,0 +1,917 @@
+%22%22%22%0ACopyright 2016-present Tony Peng%0A%0ALoad a trained feed-forward model to stylize an image.%0A%22%22%22%0A%0Aimport nets%0Aimport numpy as np%0Aimport tensorflow as tf%0Aimport utils%0Aimport time%0A%0AMODEL_PATH = 'models/trained/Udnie'%0ACONTENT_IMAGE_PATH = 'runs/Udnie/content_small.jpg'%0AOUTPUT_IMAGE_PATH = 'runs/Udnie/styled4.jpg'%0A%0Acontent_image = utils.read_image(CONTENT_IMAGE_PATH)%0A%0Awith tf.Session() as sess:%0A x = tf.placeholder(tf.float32, shape=(1, ) + content_image.shape)%0A stylzr = nets.stylzr(x)%0A%0A # load the model%0A model = tf.train.latest_checkpoint(MODEL_PATH)%0A saver = tf.train.Saver()%0A saver.restore(sess, model)%0A%0A # evaluate!%0A start_time = time.time()%0A styled_image = stylzr.eval(feed_dict=%7Bx: np.array(%5Bcontent_image%5D)%7D)%0A print(%22eval: %22+str(time.time() - start_time)+%22s%22)%0A styled_image = styled_image.reshape(styled_image.shape%5B1:%5D)%0A utils.write_image(styled_image, OUTPUT_IMAGE_PATH)%0A
|
|
544c9cf63f54ca9e77fa37ab5e529791f9e00c3c
|
Create sysinfo.py
|
sysinfo.py
|
sysinfo.py
|
Python
| 0.000002
|
@@ -0,0 +1,61 @@
+#!/usr/bin/env python3%0A%0Aif __name__ == '__main__':%0A print%0A
|
|
277eca85f5eaf009c9ae7a38ef28801a747f9efa
|
Add copyright notice
|
nose2/util.py
|
nose2/util.py
|
import os
import re
import sys
try:
from compiler.consts import CO_GENERATOR
except ImportError:
# IronPython doesn't have a complier module
CO_GENERATOR=0x20
try:
from inspect import isgeneratorfunction # new in 2.6
except ImportError:
import inspect
# backported from Python 2.6
def isgeneratorfunction(func):
return bool((inspect.isfunction(func) or inspect.ismethod(func)) and
func.func_code.co_flags & CO_GENERATOR)
import six
IDENT_RE = re.compile(r'^[_a-zA-Z]\w*$', re.UNICODE)
VALID_MODULE_RE = re.compile(r'[_a-zA-Z]\w*\.py$', re.UNICODE)
def ln(label, char='-', width=70):
"""Draw a divider, with label in the middle.
>>> ln('hello there')
'---------------------------- hello there -----------------------------'
Width and divider char may be specified. Defaults are 70 and '-'
respectively.
"""
label_len = len(label) + 2
chunk = (width - label_len) // 2
out = '%s %s %s' % (char * chunk, label, char * chunk)
pad = width - len(out)
if pad > 0:
out = out + (char * pad)
return out
def valid_module_name(path):
return VALID_MODULE_RE.search(path)
def name_from_path(path):
# back up to find module root
parts = []
path = os.path.normpath(path)
base = os.path.splitext(path)[0]
candidate, top = os.path.split(base)
parts.append(top)
while candidate:
if ispackage(candidate):
candidate, top = os.path.split(candidate)
parts.append(top)
else:
break
return '.'.join(reversed(parts))
def module_from_name(name):
__import__(name)
return sys.modules[name]
def ispackage(path):
"""Is this path a package directory?"""
if os.path.isdir(path):
# at least the end of the path must be a legal python identifier
# and __init__.py[co] must exist
end = os.path.basename(path)
if IDENT_RE.match(end):
for init in ('__init__.py', '__init__.pyc', '__init__.pyo'):
if os.path.isfile(os.path.join(path, init)):
return True
if sys.platform.startswith('java') and \
os.path.isfile(os.path.join(path, '__init__$py.class')):
return True
return False
def safe_decode(string):
if string is None:
return string
try:
return string.decode()
except UnicodeDecodeError:
pass
try:
return string.decode('utf-8')
except UnicodeDecodeError:
return six.u('<unable to decode>')
|
Python
| 0
|
@@ -1,12 +1,294 @@
+%22%22%22%0AThis module contains some code copied from unittest2/loader.py and other%0Acode developed in reference to that module and others within unittest2.%0A%0Aunittest2 is Copyright (c) 2001-2010 Python Software Foundation; All%0ARights Reserved. See: http://docs.python.org/license.html%0A%0A%22%22%22%0A
import os%0Aim
|
5dd7f77f97e897591bc1d7615a986dbe0ad4f183
|
Add `caption` and `tbody` tags to whitelist
|
src/ggrc/models/__init__.py
|
src/ggrc/models/__init__.py
|
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: dan@reciprocitylabs.com
# Maintained By: dan@reciprocitylabs.com
from .all_models import *
"""All gGRC model objects and associated utilities."""
def create_db_with_create_all():
from ggrc.app import db
import ggrc.models.all_models
db.create_all()
def create_db_with_migrations(quiet=False):
from ggrc.app import db
from ggrc.migrate import upgradeall
import logging
if quiet:
logging.disable(logging.INFO)
upgradeall()
if quiet:
logging.disable(logging.NOTSET)
def drop_db_with_drop_all():
from ggrc.app import db
import ggrc.models.all_models
db.drop_all()
def drop_db_with_migrations(quiet=False):
from ggrc.app import db
from ggrc.migrate import downgradeall
import ggrc.models.all_models
import logging
from ggrc import settings
if quiet:
logging.disable(logging.INFO)
if 'mysql' in settings.SQLALCHEMY_DATABASE_URI:
db.engine.execute('SET FOREIGN_KEY_CHECKS = 0')
downgradeall(drop_versions_table=True)
if quiet:
logging.disable(logging.NOTSET)
if 'mysql' in settings.SQLALCHEMY_DATABASE_URI:
db.engine.execute('SET FOREIGN_KEY_CHECKS = 1')
def create_db(use_migrations=False, quiet=False):
if use_migrations:
create_db_with_migrations(quiet)
else:
create_db_with_create_all()
def drop_db(use_migrations=False, quiet=False):
if use_migrations:
drop_db_with_migrations(quiet)
else:
drop_db_with_drop_all()
def init_app(app):
from .all_models import all_models
[model._inflector for model in all_models]
from sqlalchemy.orm.session import Session
from sqlalchemy import event
from .cache import Cache
from ggrc.services.common import get_cache
def update_cache_before_flush(session, flush_context, objects):
cache = get_cache(create = True)
if cache:
cache.update_before_flush(session, flush_context)
def update_cache_after_flush(session, flush_context):
cache = get_cache(create = False)
if cache:
cache.update_after_flush(session, flush_context)
def clear_cache(session):
cache = get_cache()
if cache:
cache.clear()
event.listen(Session, 'before_flush', update_cache_before_flush)
event.listen(Session, 'after_flush', update_cache_after_flush)
event.listen(Session, 'after_commit', clear_cache)
event.listen(Session, 'after_rollback', clear_cache)
# Register event listener on all String and Text attributes to sanitize them.
import bleach
import sqlalchemy as sa
from ggrc.models.reflection import SanitizeHtmlInfo
# Set up custom tags/attributes for bleach
bleach_tags = ['strong', 'em', 'b', 'i', 'p', 'code', 'pre', 'tt', 'samp',
'kbd', 'var', 'sub', 'sup', 'dfn', 'cite', 'big', 'small', 'address',
'hr', 'br', 'div', 'span', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'ul',
'ol', 'li', 'dl', 'dt', 'dd', 'abbr', 'acronym', 'a', 'img',
'blockquote', 'del', 'ins', 'table', 'tr', 'td', 'th'] + bleach.ALLOWED_TAGS
bleach_attrs = {}
attrs = ['href', 'src', 'width', 'height', 'alt', 'cite', 'datetime',
'title', 'class', 'name', 'xml:lang', 'abbr']
for tag in bleach_tags:
bleach_attrs[tag] = attrs
def cleaner(target, value, oldvalue, initiator):
ret = bleach.clean(value, bleach_tags, bleach_attrs)
return ret
for model in all_models:
attr_info = SanitizeHtmlInfo(model)
for attr_name in attr_info._sanitize_html:
attr = getattr(model, attr_name)
sa.event.listen(attr, 'set', cleaner, retval=True)
from .inflector import get_model
|
Python
| 0
|
@@ -2734,16 +2734,34 @@
tags = %5B
+%0A 'caption',
'strong'
@@ -2811,17 +2811,16 @@
'samp',
-
%0A '
@@ -2887,17 +2887,16 @@
ddress',
-
%0A '
@@ -2962,17 +2962,16 @@
', 'ul',
-
%0A '
@@ -3029,17 +3029,16 @@
, 'img',
-
%0A '
@@ -3072,16 +3072,25 @@
'table',
+ 'tbody',
'tr', '
@@ -3098,16 +3098,24 @@
d', 'th'
+,%0A
%5D + blea
@@ -3161,16 +3161,23 @@
ttrs = %5B
+%0A
'href',
@@ -3228,17 +3228,16 @@
tetime',
-
%0A '
@@ -3279,16 +3279,23 @@
, 'abbr'
+%0A
%5D%0A for
|
475ea65cce34b7af03a7355e16d95104292aa7fb
|
Create suntimes.py
|
suntimes.py
|
suntimes.py
|
Python
| 0.000055
|
@@ -0,0 +1,1345 @@
+#! /bin/python%0A# -*- coding: UTF-8 -*-%0Aimport urllib2, json, datetime, time%0Aimport dateutil.parser%0A%0Aglobal latitude%0Aglobal longitude%0A%0Aapi=json.loads(urllib2.urlopen(%22http://freegeoip.net/json/%22).read().decode(%22UTF-8%22))%0Alatitude=str(api%5B'latitude'%5D)%0Alongitude=str(api%5B%22longitude%22%5D)%0A%0Adef getsunrise(lat=%22%22, lng=%22%22, formatted=1):%0A if lat==%22%22 or lng == %22%22:%0A lat=latitude%0A lng=longitude%0A url=%22http://api.sunrise-sunset.org/json?lat=%22 + lat + %22&lng=%22 + lng + %22&formatted=%22 + str(formatted)%0A print url%0A sunapi=urllib2.urlopen(url)%0A return json.loads(sunapi.read().decode(%22UTF-8%22))%5B'results'%5D%5B'sunrise'%5D%0A%0Adef getsunset(lat=%22%22, lng=%22%22, formatted=%221%22):%0A if lat==%22%22 or lng == %22%22: %0A lat=latitude%0A lng=longitude%0A sunapi=urllib2.urlopen(%22http://api.sunrise-sunset.org/json?lat=%22 + lat + %22&lng=%22 + lng + %22&formatted=%22 + str(formatted))%0A return json.loads(sunapi.read().decode(%22UTF-8%22))%5B'results'%5D%5B'sunset'%5D%0A%0Adef nighttrue(lat=%22%22, lng=%22%22):%0A sunrise = dateutil.parser.parse(getsunrise(lat, lng, 0).replace(%22+00:00%22,%22%22))%0A sunset = dateutil.parser.parse(getsunset(lat, lng, 0).replace(%22+00:00%22,%22%22))%0A timenow = datetime.datetime.now()%0A if sunrise %3E= timenow %3E= sunset ==False:%0A return False%0A else:%0A return True%0A%0Aif __name__ == '__main__':%0A bools=nighttrue()%0A if bools == True:%0A print %22night time%22%0A elif bools == False:%0A print %22day%22%0A else:%0A print bools%0A %0A
|
|
561b1b0bf1950bac54bc9c079daf6c09b3f87158
|
Create pd.py
|
src/ml/pd.py
|
src/ml/pd.py
|
Python
| 0.000002
|
@@ -0,0 +1,932 @@
+#encoding=utf8%0Aimport pandas as pd%0Aimport numpy as np%0Aimport matplotlib.pyplot as plt%0A%0As = pd.Series(%5B1,3,5,np.nan,6,8%5D)%0Aprint(s)%0Adates = pd.date_range('20130101', periods=6)%0Aprint(dates)%0A#%E5%88%9B%E5%BB%BADataFrame%0Adf = pd.DataFrame(np.random.randn(6,4), index=dates, columns=list('ABCD'))%0Aprint(df)%0A#%E9%80%9A%E8%BF%87%E5%AD%97%E5%85%B8%E5%88%9B%E5%BB%BADataFrame%0Af2 = pd.DataFrame(%7B 'A' : 1.,%0A 'B' : pd.Timestamp('20130102'),%0A 'C' : pd.Series(1,index=list(range(4)),dtype='float32'),%0A 'D' : np.array(%5B3%5D * 4,dtype='int32'),%0A 'E' : pd.Categorical(%5B%22test%22,%22train%22,%22test%22,%22train%22%5D),%0A 'F' : 'foo' %7D)%0Aprint(f2)%0A%0A#%E6%8E%A2%E7%B4%A2%E6%95%B0%E6%8D%AE%0A%0Aprint(%22%E5%89%8D%E4%BA%94%E8%A1%8C%EF%BC%9A%22,df.head())%0Aprint(%22%E5%90%8E%E4%B8%89%E8%A1%8C%EF%BC%9A%22,df.tail(3))%0Aprint(%22index: %22,df.index)%0Aprint(%22columns: %22,df.columns)%0Aprint(%22values: %22,df.values)%0Aprint(%22describe: %22,df.describe())%0Aprint(%22%E8%BD%AC%E7%BD%AE%EF%BC%9A%22,df.T)%0Aprint(%22%E6%8C%89%E7%85%A7axis%E6%8E%92%E5%88%97%EF%BC%9A%22,df.sort_index(axis=0, ascending=False))%0Aprint(%22%E6%8C%89%E7%85%A7%E6%9F%90%E5%88%97%E6%8E%92%E5%BA%8F%EF%BC%9A%22,df.sort_values(by='B'))%0A
|
|
f9317419417ec348b6520ce6aecf852a391d4b01
|
Add importers module init
|
po2strings/importers/__init__.py
|
po2strings/importers/__init__.py
|
Python
| 0
|
@@ -0,0 +1,23 @@
+# -*- coding: utf-8 -*-
|
|
124190aae0f39885011a5f12667d2348ffa32d09
|
add invoke task to remve trailing ws
|
tasks/ws.py
|
tasks/ws.py
|
Python
| 0.000002
|
@@ -0,0 +1,919 @@
+import os%0A%0Afrom invoke import task%0A%0Afrom ._config import ROOT_DIR, NAME%0A%0A%0Adef trim_py_files(directory):%0A for root, dirs, files in os.walk(directory):%0A for fname in files:%0A filename = os.path.join(root, fname)%0A if fname.endswith('.py'):%0A with open(filename, 'rb') as f:%0A code = f.read().decode()%0A lines = %5Bline.rstrip() for line in code.splitlines()%5D%0A while lines and not lines%5B-1%5D:%0A lines.pop(-1)%0A lines.append('') # always end with a newline%0A with open(filename, 'wb') as f:%0A f.write('%5Cn'.join(lines).encode())%0A%0A%0A@task%0Adef ws(ctx):%0A %22%22%22 Remove trailing whitespace from all py files.%0A %22%22%22%0A trim_py_files(os.path.join(ROOT_DIR, 'flexx'))%0A trim_py_files(os.path.join(ROOT_DIR, 'flexxamples'))%0A trim_py_files(os.path.join(ROOT_DIR, 'tasks'))%0A
|
|
9f6952e0c46795bb704c9169cd71fdf18d952ebf
|
Add ChEBI client
|
indra/databases/chebi_client.py
|
indra/databases/chebi_client.py
|
Python
| 0
|
@@ -0,0 +1,466 @@
+import os%0Aimport csv%0Afrom functools32 import lru_cache%0A%0Achebi_to_pubchem_file = os.path.dirname(os.path.abspath(__file__)) + %5C%0A '/../resources/chebi_to_pubchem.tsv'%0Atry:%0A fh = open(chebi_to_pubchem_file, 'rt')%0A rd = csv.reader(fh, delimiter='%5Ct')%0A chebi_pubchem = %7B%7D%0A for row in rd:%0A chebi_pubchem%5Brow%5B0%5D%5D = row%5B1%5D%0Aexcept IOError:%0A chebi_pubchem = %7B%7D%0A%0Adef get_pubchem_id(chebi_id):%0A return chebi_pubchem.get(chebi_id)%0A
|
|
3826858481c4f9bbf8d887fa390322f8190c96e2
|
Add module to list ip addresses
|
py3status/modules/net_iplist.py
|
py3status/modules/net_iplist.py
|
Python
| 0
|
@@ -0,0 +1,2768 @@
+# -*- coding: utf-8 -*-%0A%22%22%22%0ADisplay the list of current IPs. This excludes loopback IPs and displays%0A%22no connection%22 if there is no connection.%0A%0AConfiguration parameters%0A ignore: list of IPs to ignore. Can use shell style wildcards.%0A (default: %5B'127.*'%5D)%0A no_connection: string to display if there are no non-ignored IPs%0A (default: 'no connection')%0A separator: string to use between IPs.%0A (default: ' ')%0A%22%22%22%0A%0A# import your useful libs here%0Aimport socket%0Aimport struct%0Aimport fcntl%0Aimport array%0Afrom fnmatch import fnmatch%0A%0A%0Aclass Py3status:%0A cache_timeout = 30%0A separator = ' '%0A no_connection = 'no connection'%0A ignore = %5B'127.*'%5D%0A%0A def __init__(self):%0A pass%0A%0A def ip_list(self):%0A response = %7B%0A 'cached_until': self.py3.time_in(seconds=self.cache_timeout),%0A 'full_text': ''%0A %7D%0A%0A ip = %5B%5D%0A ifaces = self._list_ifaces()%0A for iface in ifaces:%0A addr = self._get_ip(iface)%0A add = True%0A for ignore in self.ignore:%0A if fnmatch(addr, ignore):%0A add = False%0A break%0A if add:%0A ip.append(addr)%0A if len(ip) == 0:%0A response%5B'full_text'%5D = self.no_connection%0A response%5B'color'%5D = self.py3.COLOR_BAD%0A else:%0A response%5B'full_text'%5D = self.separator.join(ip)%0A response%5B'color'%5D = self.py3.COLOR_GOOD%0A%0A return response%0A%0A def _list_ifaces(self):%0A SIOCGIFCONF = 0x8912%0A sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)%0A sockfd = sock.fileno()%0A max_possible = 128 # arbitrary. raise if needed.%0A data = max_possible * 32%0A names = array.array('B', %5B0%5D) * data%0A outbytes = struct.unpack('iL', fcntl.ioctl(sockfd, SIOCGIFCONF,%0A struct.pack('iL', data,%0A names.buffer_info()%5B0%5D)))%5B0%5D%0A namestr = names.tostring()%0A lst = %5B%5D%0A for i in range(0, outbytes, 40):%0A name = namestr%5Bi:i+16%5D.split(b'%5Cx00', 1)%5B0%5D%0A lst.append(name)%0A return lst%0A%0A def _get_ip(self, iface):%0A SIOCGIFADDR = 0x8915%0A sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)%0A sockfd = sock.fileno()%0A ifreq = struct.pack('16sH14s', iface, socket.AF_INET, b'%5Cx00'*14)%0A try:%0A res = fcntl.ioctl(sockfd, SIOCGIFADDR, ifreq)%0A except:%0A return None%0A ip = struct.unpack('16sH2x4s8x', res)%5B2%5D%0A return socket.inet_ntoa(ip)%0A%0Aif __name__ == %22__main__%22:%0A %22%22%22%0A Test this module by calling it directly.%0A %22%22%22%0A from py3status.module_test import module_test%0A module_test(Py3status)%0A
|
|
66ad00861f7143e35ab80674295fa5bf7998cfa5
|
Create pytabcomplete.py
|
HexChat/pytabcomplete.py
|
HexChat/pytabcomplete.py
|
Python
| 0.000002
|
@@ -0,0 +1,1519 @@
+from __future__ import print_function%0Aimport hexchat%0A%0A__module_name__ = %22PythonTabComplete%22%0A__module_author__ = %22TingPing%22%0A__module_version__ = %220%22%0A__module_description__ = %22Tab completes modules in Interactive Console%22%0A%0Alastmodule = ''%0Alastcomplete = 0%0Alasttext = ''%0A%0Adef keypress_cb(word, word_eol, userdata):%0A global lastmodule%0A%09global lastcomplete%0A%09global lasttext%0A%0A%09if not word%5B0%5D == '65289': # Tab%0A%09%09return%0A%09if not hexchat.get_info('channel') == '%3E%3Epython%3C%3C':%0A%09%09return%0A%0A%09text = hexchat.get_info('inputbox')%0A%09#pos = hexchat.get_prefs('state_cursor') # TODO: allow completing mid line%0A%09if not text:# or not pos:%0A%09%09return%0A%0A%09try:%0A%09%09module = text.split(' ')%5B-1%5D.split('.')%5B0%5D%0A%09except IndexError:%0A%09%09return%0A%0A%09if lastmodule != module:%0A%09%09lastcomplete = 0%0A%09%09lasttext = text%0A%09lastmodule = module%0A%0A%09try:%0A%09%09exec('import %7B%7D'.format(module)) # Has to be imported to dir() it%0A%09%09completes = eval('dir(%7B%7D)'.format(module))%0A%09%09if lastcomplete + 1 %3C len(completes):%0A%09%09%09lastcomplete = lastcomplete + 1%0A%09%09else:%0A%09%09%09lastcomplete = 0%0A%09except (NameError, SyntaxError, ImportError):%0A%09%09return%0A%0A%09if lasttext%5B-1%5D != '.':%0A%09%09sep = '.'%0A%09else:%0A%09%09sep = ''%0A%09%0A%09newtext%09= lasttext + sep + completes%5Blastcomplete%5D%0A%0A%09hexchat.command('settext %7B%7D'.format(newtext))%0A%09hexchat.command('setcursor %7B%7D'.format(len(newtext)))%0A%0Adef unload_cb(userdata):%0A%09print(__module_name__, 'version', __module_version__, 'unloaded.')%0A%0Ahexchat.hook_print('Key Press', keypress_cb)%0Ahexchat.hook_unload(unload_cb)%0Aprint(__module_name__, 'version', __module_version__, 'loaded.')%0A
|
|
248023106d4e881110a646e9d078ecad4f58e24d
|
Add a Python program which reads from a pipe and writes the data it gets to syslog.
|
pipelogger.py
|
pipelogger.py
|
Python
| 0
|
@@ -0,0 +1,565 @@
+#!/usr/bin/env python%0A#%0A%0Aimport argparse%0Aimport os%0Aimport syslog%0A%0Aparser = argparse.ArgumentParser(%0A%09description='Syslog messages as read from a pipe')%0A%0Aparser.add_argument('-i', '--ident',%0A%09help='Use the given identifier for syslogging',%0A%09required=True)%0Aparser.add_argument('pipe', help='Pipe file to read log records from')%0Aargs = parser.parse_args()%0A%0Asyslog.openlog(args.ident, 0)%0A%0Aif not os.path.exists(args.pipe):%0A%09os.mkfifo(args.pipe)%0A%0Awhile os.path.exists(args.pipe):%0A%09f = open(args.pipe, 'r')%0A%0A%09for l in f:%0A%09%09syslog.syslog(l)%0A%0A%09f.close()%0A%0Asyslog.closelog()%0A
|
|
85d29ef779687a3b9db5333ce9921fc20e66b985
|
Create test_get.py
|
test_get.py
|
test_get.py
|
Python
| 0.000003
|
@@ -0,0 +1,371 @@
+#!/usr/bin/env python%0A# -*- coding=utf-8 -*-%0A#%E4%BB%A5get%E6%98%8E%E6%96%87%E7%9A%84%E6%96%B9%E5%BC%8F%E4%BC%A0%E9%80%92%E6%95%B0%E6%8D%AE%0Aimport urllib%0Aimport urllib2%0A %0Avalues=%7B%7D%0Avalues%5B'username'%5D = %221016903103@qq.com%22%0Avalues%5B'password'%5D=%22XXXX%22%0Adata = urllib.urlencode(values) %0Aurl = %22http://passport.csdn.net/account/login%22%0Ageturl = url + %22?%22+data #%E5%AD%97%E7%AC%A6%E4%B8%B2%E5%90%88%E5%B9%B6%0Arequest = urllib2.Request(geturl)%0Aresponse = urllib2.urlopen(request)%0Aprint response.read()%0A
|
|
5d54d3f9ead119671affa9bb04ec64efc7c3eea4
|
Fix check.py a+x perms
|
check.py
|
check.py
|
#!/usr/bin/env python3
# coding: utf-8
"""
EXPERIMENTAL
Regular expression rule checker for Khan Academy translations.
Instructions:
- Download https://crowdin.com/download/project/khanacademy.zip
- Unzip the 'de' folder.
- From the directory where the 'de' folder is located, run this script.
"""
import polib
import re
import os
import os.path
from multiprocessing import Pool
from ansicolor import red, black
from jinja2 import Environment, FileSystemLoader
def readPOFiles(directory):
"""
Read all PO files from a given directory and return
a dictionary path -> PO object.
Also supports using a single file as argument.
"""
if os.path.isfile(directory): #Single file
poFilenames = [directory]
else:
poFilenames = []
#Recursively iterate directory, ignore everythin except *.po
for (curdir, _, files) in os.walk(directory):
for f in files:
#Ignore non-PO files
if not f.endswith(".po"): continue
#Add to list of files to process
poFilenames.append(os.path.join(curdir, f))
# Parsing is computationally expensive.
# Distribute processing amongst distinct processing
# if there is a significant number of files
if len(poFilenames) > 3:
pool = Pool(None) #As many as CPUs
parsedFiles = pool.map(polib.pofile, poFilenames)
return {path: parsedFile
for path, parsedFile
in zip(poFilenames, parsedFiles)}
else: #Only a small number of files, process directly
return {path: polib.pofile(path) for path in poFilenames}
def findByRule(poFiles, msgstrRegexStr):
"""
In a dictionary of PO objects, find msgstrs that satisfy a given regex.
"""
#Precompile expressions
msgstrRegex = re.compile(msgstrRegexStr, re.UNICODE)
#Iterate over files
for filename, po in poFiles.items():
for entry in po:
searchResult = msgstrRegex.search(entry.msgstr)
if searchResult:
yield (entry, searchResult.group(0), filename)
def download():
import subprocess
url = "https://crowdin.com/download/project/khanacademy.zip"
subprocess.check_output(["wget", url])
subprocess.check_output(["unzip", "khanacademy.zip", "de/*"], shell=False)
#Coordinate separated by comma instead of |
commaSeparatedCoordinate = r"\$\(\d+\s*\,\s*\d+\)\$"
assert(re.match(commaSeparatedCoordinate, "$(12,3)$"))
#Simple currency value in dollar (matches both comma sep)
simpleDollarCurrency = r"\$\s*\\\\\$\s*\d+([.,]\d+)?\s*\$"
assert(re.match(simpleDollarCurrency, "$\\\\$12$"))
assert(re.match(simpleDollarCurrency, "$\\\\$12.5$"))
assert(re.match(simpleDollarCurrency, "$\\\\$12,5$"))
def hitsToHTML(poFiles, outfile, rule):
hits = list(findByRule(poFiles, rule))
#Initialize template engine
env = Environment(loader=FileSystemLoader('templates'))
template = env.get_template("template.html")
with open(outfile, "w") as outfile:
outfile.write(template.render(hits=hits))
return len(hits)
if __name__ == "__main__":
download()
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('directory', help='The directory to look for translation files')
parser.add_argument('outfile', help='The HTML output file')
args = parser.parse_args()
poFiles = readPOFiles(args.directory)
print(black("Read %d files" % len(poFiles), bold=True))
ctr = hitsToHTML(poFiles, args.outfile, commaSeparatedCoordinate)
print ("Found %d rule violations" % ctr)
|
Python
| 0.000002
| |
66201e6d73a909bc0ad932ad4b5de9d2ce30d4fe
|
add Blob class
|
PhloxAR/features/blob.py
|
PhloxAR/features/blob.py
|
Python
| 0.000001
|
@@ -0,0 +1,356 @@
+# -*- coding:utf-8 -*-%0Afrom __future__ import division, print_function%0Afrom __future__ import absolute_import, unicode_literals%0A%0Afrom PhloxAR.base import math%0Afrom PhloxAR.base import sss%0Afrom PhloxAR.base import *%0Afrom PhloxAR.features.feature import Feature%0Afrom PhloxAR.color import Color%0Afrom PhloxAR.image import Image%0A%0A%0Aclass Blob(Feature):%0A pass
|
|
0b06fb26fa5393e4ba80e2942ebba34d9f9fa4de
|
Create 1st Python script
|
Python/spatial-basics.py
|
Python/spatial-basics.py
|
Python
| 0.000001
|
@@ -0,0 +1,60 @@
+from shapely.wkt import loads%0Ag = loads('POINT (0.0 0.0)')%0A%0A
|
|
a61d37449f8000a83942513f2ad71151ef26822d
|
Add unit tests for synapse.cells
|
synapse/tests/test_cells.py
|
synapse/tests/test_cells.py
|
Python
| 0.000001
|
@@ -0,0 +1,508 @@
+import synapse.axon as s_axon%0Aimport synapse.cells as s_cells%0Aimport synapse.cryotank as s_cryotank%0A%0Afrom synapse.tests.common import *%0A%0Aclass CellTest(SynTest):%0A%0A def test_cell_cryo(self):%0A with self.getTestDir() as dirn:%0A with s_cells.cryo(dirn) as cryo:%0A self.isinstance(cryo, s_cryotank.CryoCell)%0A%0A def test_cell_axon(self):%0A with self.getTestDir() as dirn:%0A with s_cells.axon(dirn) as axon:%0A self.isinstance(axon, s_axon.AxonCell)%0A
|
|
7454abdfba5d37d81dc3ad4bf7fb2f63bc552f38
|
Add wsgi file
|
toolkit.wsgi
|
toolkit.wsgi
|
Python
| 0.000001
|
@@ -0,0 +1,200 @@
+import os%0Aimport sys%0A%0Aos.environ%5B'DJANGO_SETTINGS_MODULE'%5D = 'settings'%0Asys.path.append(os.path.abspath(%22.%22))%0A%0Aimport django.core.handlers.wsgi%0Aapplication = django.core.handlers.wsgi.WSGIHandler()%0A%0A%0A
|
|
324161f37b54aee71de801b4206f925c967d11d4
|
Add a couple of simple tests and fix typo
|
tasklib/tests.py
|
tasklib/tests.py
|
Python
| 0.00002
|
@@ -0,0 +1,712 @@
+import shutil%0Aimport tempfile%0Aimport unittest%0Aimport uuid%0A%0Afrom .task import TaskWarrior%0A%0A%0Aclass TasklibTest(unittest.TestCase):%0A%0A def setUp(self):%0A self.tmp = tempfile.mkdtemp()%0A self.tw = TaskWarrior(data_location=self.tmp)%0A%0A def tearDown(self):%0A shutil.rmtree(self.tmp)%0A%0A%0Aclass TaskFilterTest(TasklibTest):%0A%0A def test_all_empty(self):%0A self.assertEqual(len(self.tw.tasks.all()), 0)%0A%0A def test_all_non_empty(self):%0A self.tw.execute_command(%5B'add', 'test task'%5D)%0A self.assertEqual(len(self.tw.tasks.all()), 1)%0A self.assertEqual(self.tw.tasks.all()%5B0%5D%5B'description'%5D, 'test task')%0A self.assertEqual(self.tw.tasks.all()%5B0%5D%5B'status'%5D, 'pending')%0A
|
|
86d51e36ca0f5772717d72d4729fb331a0066636
|
Fix smoke tests to delete resources synchronously.
|
tempest/smoke.py
|
tempest/smoke.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from tempest import test
LOG = logging.getLogger(__name__)
class SmokeTest(object):
"""
Base test case class mixin for "smoke tests"
Smoke tests are tests that have the following characteristics:
* Test basic operations of an API, typically in an order that
a regular user would perform those operations
* Test only the correct inputs and action paths -- no fuzz or
random input data is sent, only valid inputs.
* Use only the default client tool for calling an API
"""
pass
class DefaultClientSmokeTest(test.DefaultClientTest, SmokeTest):
"""
Base smoke test case class that provides the default clients to
access the various OpenStack APIs.
"""
@classmethod
def tearDownClass(cls):
# NOTE(jaypipes): Because smoke tests are typically run in a specific
# order, and because test methods in smoke tests generally create
# resources in a particular order, we destroy resources in the reverse
# order in which resources are added to the smoke test class object
if not cls.resources:
return
thing = cls.resources.pop()
while True:
LOG.debug("Deleting %r from shared resources of %s" %
(thing, cls.__name__))
# Resources in novaclient all have a delete() method
# which destroys the resource...
thing.delete()
if not cls.resources:
return
thing = cls.resources.pop()
|
Python
| 0
|
@@ -1758,38 +1758,37 @@
object%0A
-if not
+while
cls.resources:%0A
@@ -1787,39 +1787,24 @@
ources:%0A
- return%0A
thin
@@ -1831,28 +1831,8 @@
p()%0A
- while True:%0A
@@ -1938,16 +1938,17 @@
ame__))%0A
+%0A
@@ -1957,35 +1957,42 @@
#
-Resources in novaclient all
+OpenStack resources are assumed to
hav
@@ -2003,23 +2003,16 @@
delete()
- method
%0A
@@ -2017,16 +2017,23 @@
#
+ method
which d
@@ -2075,32 +2075,33 @@
thing.delete()%0A
+%0A
if n
@@ -2100,89 +2100,741 @@
-if not cls.resources:%0A return%0A thing = cls.resources.pop(
+def is_deletion_complete():%0A # Deletion testing is only required for objects whose%0A # existence cannot be checked via retrieval.%0A if isinstance(thing, dict):%0A return True%0A try:%0A thing.get()%0A except Exception as e:%0A # Clients are expected to return an exception%0A # called 'NotFound' if retrieval fails.%0A if e.__class__.__name__ == 'NotFound':%0A return True%0A raise%0A return False%0A%0A # Block until resource deletion has completed or timed-out%0A test.call_until_true(is_deletion_complete, 10, 1
)%0A
|
ce8f335b8b52d682cd233a96529201a4c537e88d
|
Add Python 3.5
|
setup.py
|
setup.py
|
#!/usr/bin/env python
from setuptools import setup
from codecs import open
from scot import __version__ as ver
with open('README.md', encoding='utf-8') as readme:
long_description = readme.read()
setup(
name='scot',
version=ver,
description='EEG/MEG Source Connectivity Toolbox',
long_description=long_description,
url='https://github.com/scot-dev/scot',
author='SCoT Development Team',
author_email='scotdev@googlegroups.com',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
keywords='source connectivity EEG MEG ICA',
packages=['scot', 'scot.eegtopo', 'scot.external'],
package_data={'scot': ['scot.ini']}
)
|
Python
| 0.999999
|
@@ -968,16 +968,64 @@
: 3.4',%0A
+ 'Programming Language :: Python :: 3.5'%0A
%5D,%0A
|
fc70bf43639f34d92b21c66269ee2e15da9f0e5c
|
Fix missing dev dependency
|
setup.py
|
setup.py
|
import setuptools
import sys
# Thwart installation for unsupported versions of Python. `pip` didn't start
# enforcing `python_requires` until 9.0.
if sys.version_info < (3, 4):
raise RuntimeError('Unsupported Python version: ' + sys.version)
setuptools.setup(
author='Diego Argueta',
author_email='dargueta@users.noreply.github.com',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
description='A Python library for reading and writing structured binary data.',
extras_require={
'dev': [
'bumpversion==0.5.*',
'detox>=0.10',
'Sphinx>=1.6',
'tox>=2.8, <3.0', # 3.x breaks some plugins
'tox-pyenv>=1.0',
],
'test': [
'pytest>=3.0, !=3.2.0, !=3.3.0',
'pytest-cov>=2.0',
'pytest-mock>=1.3.0',
'pytest-randomly>=1.0',
],
},
license='BSD 3-Clause License',
name='binobj',
python_requires='>=3.4',
packages=setuptools.find_packages(
exclude=['docs', 'docs.*', 'tests', 'tests.*']),
url='https://www.github.com/dargueta/binobj',
version='0.4.4'
)
|
Python
| 0.000112
|
@@ -1117,24 +1117,51 @@
tox%3E=0.10',%0A
+ 'pylint%3E=2.0',%0A
|
a17efdceeeec0932ff403ebeb6f787ea8b08a3a4
|
Add print lists function practice problem
|
Problems/printLists.py
|
Problems/printLists.py
|
Python
| 0.000003
|
@@ -0,0 +1,693 @@
+#!/Applications/anaconda/envs/Python3/bin%0A%0A%0Adef main():%0A # Test suite%0A test_list_1 = %5B%22puppy%22, %22kitten%22, %22lion cub%22%5D%0A test_list_2 = %5B%22lettuce%22,%0A %22bacon%22,%0A %22turkey%22,%0A %22mayonnaise%22,%0A %22tomato%22,%0A %22white bread%22%5D%0A%0A pretty_print_lists(test_list_1)%0A pretty_print_lists(test_list_2)%0A%0A%0Adef pretty_print_lists(l):%0A output = %22%22%0A last_index = len(l) - 1%0A for i, item in enumerate(l):%0A if i == last_index:%0A output += %22and %7B%7D%22.format(item)%0A else:%0A output += %22%7B%7D, %22.format(item)%0A%0A print(output)%0A return None%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
9bbea15cd6832f9a0a75a05775fcf2a12297f8c8
|
Update setup.py
|
setup.py
|
setup.py
|
"""Chassis: Opinionated REST Framework."""
from distutils.core import setup
setup(
name='chassis',
version='0.0.5',
packages=['chassis'],
description="Tornado framework for self-documenting JSON RESTful APIs.",
author="Refinery 29",
author_email="chassis-project@refinery29.com",
url="https://github.com/refinery29/chassis",
download_url="https://github.com/refinery29/chassis/archive/v0.0.5.tar.gz",
keywords=['Tornado', 'RESTful', 'REST', 'API', 'JSON', 'framework'],
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: HTTP Servers',
'Topic :: Software Development :: Libraries :: Application Frameworks'
],
long_description="""\
Chassis is Refinery29's framework layer on top of Tornado for rapidly
building performant, self-documenting JSON-based REST APIs.
"""
)
|
Python
| 0
|
@@ -839,9 +839,252 @@
: 2.
-7
+6',%0A 'Programming Language :: Python :: 2.7',%0A 'Programming Language :: Python :: 3',%0A 'Programming Language :: Python :: 3.2',%0A 'Programming Language :: Python :: 3.3',%0A 'Programming Language :: Python :: 3.4
',%0A
|
14ff724cd05f51973af9ede47d9f8cfe2a1ce908
|
Add optional flag to setuptools extension (#78)
|
setup.py
|
setup.py
|
import sys
import platform
from pkg_resources import parse_version
from setuptools import setup, Extension
cpython = platform.python_implementation() == 'CPython'
is_glibc = platform.libc_ver()[0] == 'glibc'
windows = sys.platform.startswith('win')
if is_glibc:
glibc_ver = platform.libc_ver()[1]
libc_ok = parse_version(glibc_ver) >= parse_version('2.9')
else:
libc_ok = not windows
min_win_version = windows and sys.version_info >= (3, 5)
min_unix_version = not windows and sys.version_info >= (3, 3)
# Enable GNU features for libc's like musl, should have no effect
# on Apple/BSDs
if libc_ok:
gnu_flag = ['-D_GNU_SOURCE']
else:
gnu_flag = []
if cpython and ((min_unix_version and libc_ok) or min_win_version):
_cbor2 = Extension(
'_cbor2',
# math.h routines are built-in to MSVCRT
libraries=['m'] if not windows else [],
extra_compile_args=['-std=c99'] + gnu_flag,
sources=[
'source/module.c',
'source/encoder.c',
'source/decoder.c',
'source/tags.c',
'source/halffloat.c',
]
)
kwargs = {'ext_modules': [_cbor2]}
else:
kwargs = {}
setup(
use_scm_version={
'version_scheme': 'post-release',
'local_scheme': 'dirty-tag'
},
setup_requires=[
'setuptools >= 40.7.0',
'setuptools_scm >= 1.7.0'
],
**kwargs
)
|
Python
| 0
|
@@ -1106,16 +1106,39 @@
%5D
+,%0A optional=True
%0A )%0A
|
34643864e52f3231aa40256bc160569af234e8e7
|
Add setup.py
|
setup.py
|
setup.py
|
Python
| 0.000001
|
@@ -0,0 +1,437 @@
+from setuptools import find_packages, setup%0A%0A%0Aversion = __import__('vdlkino').__version__%0A%0A%0Asetup(%0A name='vdlkino',%0A version=version,%0A description='Library in Python for comunicate computer with Arduino running VDLKino',%0A author='Eduardo Klosowski',%0A author_email='eduardo_klosowski@yahoo.com',%0A license='MIT',%0A packages=%5B'vdlkino'%5D,%0A zip_safe=False,%0A extras_require=%7B%0A 'serial': %5B'pyserial'%5D,%0A %7D,%0A)%0A
|
|
8ec65137efcf1f8cf37923b916e7496e10027edc
|
Bump version.
|
setup.py
|
setup.py
|
import os
import sys
from setuptools import setup
INSTALL_REQUIRES = ['python_cjson', 'requests >=1.0.3', 'boto >=2.1.1']
if sys.version_info < (2, 7, 0):
INSTALL_REQUIRES.append('argparse>=1.1')
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "qds_sdk",
version = "1.0.15b1",
author = "Qubole",
author_email = "dev@qubole.com",
description = ("Python SDK for coding to the Qubole Data Service API"),
keywords = "qubole sdk api",
url = "http://packages.python.org/qds_sdk",
packages=['qds_sdk'],
scripts=['bin/qds.py'],
install_requires=INSTALL_REQUIRES,
long_description="[Please visit the project page at https://github.com/qubole/qds-sdk-py]\n\n" + read('README.rst')
)
|
Python
| 0
|
@@ -337,11 +337,9 @@
.0.1
-5b1
+6
%22,%0A
|
42a287d23a1153df636c193695615d99b7c75e4d
|
Test stopping all running file backups
|
test/stop_all.py
|
test/stop_all.py
|
Python
| 0
|
@@ -0,0 +1,382 @@
+import urbackup_api%0D%0A%0D%0A%0D%0Aserver = urbackup_api.urbackup_server(%22http://127.0.0.1:55414/x%22, %22admin%22, %22foo%22)%0D%0A%0D%0Afor action in server.get_actions():%0D%0A a = action%5B%22action%22%5D%0D%0A if a ==server.action_full_file or a==server.action_resumed_full_file:%0D%0A print(%22Running full file backup: %22+action%5B%22name%22%5D)%0D%0A %0D%0A print(%22Stopping...%22)%0D%0A server.stop_action(action)
|
|
a9cc03c02b6d8571efd563e04f2cb774f4c3e7bf
|
add original walk.py
|
lib/walk.py
|
lib/walk.py
|
Python
| 0.000004
|
@@ -0,0 +1,688 @@
+# File: os-path-walk-example-2.py%0A#http://effbot.org/librarybook/os-path/os-path-walk-example-2.py%0A%0Aimport os%0A%0Adef index(directory):%0A # like os.listdir, but traverses directory trees%0A stack = %5Bdirectory%5D%0A files = %5B%5D%0A while stack:%0A directory = stack.pop()%0A for file in os.listdir(directory):%0A print(directory,file)%0A fullname = os.path.join(directory, file)%0A files.append(fullname)%0A if os.path.isdir(fullname) and not os.path.islink(fullname):%0A stack.append(fullname)%0A return files%0A%0Afor file in index(%22.%22):%0A print file%0A%0A## .%5Caifc-example-1.py%0A## .%5Canydbm-example-1.py%0A## .%5Carray-example-1.py%0A## ...%0A
|
|
3a9445c6b3053d492c12bbf808d251c6da55632a
|
Add a test for the builtin __import__ function.
|
tests/import/builtin_import.py
|
tests/import/builtin_import.py
|
Python
| 0.000003
|
@@ -0,0 +1,305 @@
+# test calling builtin import function%0A%0A# basic test%0A__import__('builtins')%0A%0A# first arg should be a string%0Atry:%0A __import__(1)%0Aexcept TypeError:%0A print('TypeError')%0A%0A# level argument should be non-negative%0Atry:%0A __import__('xyz', None, None, None, -1)%0Aexcept ValueError:%0A print('ValueError')%0A
|
|
6dcd913e794edbac28d98988d0936262d4663b9f
|
create input function
|
core/get_input.py
|
core/get_input.py
|
Python
| 0.999998
|
@@ -0,0 +1,514 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A%0Afrom core.compatible import version%0Afrom core.alert import __input_msg%0A%0A%0Adef __input(msg, default):%0A if version() is 2:%0A try:%0A data = raw_input(__input_msg(msg))%0A if data == '':%0A data = default%0A except:%0A data = default%0A else:%0A try:%0A data = input(__input_msg(msg))%0A if data == '':%0A data = default%0A except:%0A data = default%0A return data%0A
|
|
a72a0674a6db3880ed699101be3c9c46671989f0
|
Add a primitive pythonic wrapper.
|
xxdata_11.py
|
xxdata_11.py
|
Python
| 0
|
@@ -0,0 +1,563 @@
+import os%0Aimport _xxdata_11%0A%0Aparameters = %7B%0A 'isdimd' : 200,%0A 'iddimd' : 40,%0A 'itdimd' : 50,%0A 'ndptnl' : 4,%0A 'ndptn' : 128,%0A 'ndptnc' : 256,%0A 'ndcnct' : 100%0A%7D%0A%0Adef read_scd(filename):%0A fd = open(filename, 'r')%0A%0A fortran_filename = 'fort.%25d' %25 fd.fileno()%0A os.symlink(filename, fortran_filename)%0A%0A iclass = 2 # class number for scd files%0A ret = _xxdata_11.xxdata_11(fd.fileno(), iclass, **parameters)%0A os.unlink(fortran_filename)%0A return ret%0A%0Aif __name__ == '__main__':%0A out = read_scd('scd96_c.dat')%0A print out%5B0%5D%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.