commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
8c4833dbf9f4ae32afbfbe6a3cb8e4630abc3d25
|
Add test for local login
|
test/requests/test_login_local.py
|
test/requests/test_login_local.py
|
Python
| 0
|
@@ -0,0 +1,1936 @@
+import requests%0Afrom wqflask import user_manager%0Afrom parametrized_test import ParametrizedTest%0A%0Aclass TestLoginLocal(ParametrizedTest):%0A%0A def setUp(self):%0A super(TestLoginLocal, self).setUp()%0A self.login_url = self.gn2_url +%22/n/login%22%0A data = %7B%0A %22es_connection%22: self.es,%0A %22email_address%22: %22test@user.com%22,%0A %22full_name%22: %22Test User%22,%0A %22organization%22: %22Test Organisation%22,%0A %22password%22: %22test_password%22,%0A %22password_confirm%22: %22test_password%22%0A %7D%0A user_manager.basic_info = lambda : %7B %22basic_info%22: %22basic%22 %7D%0A user_manager.RegisterUser(data)%0A%0A def testLoginNonRegisteredUser(self):%0A data = %7B%0A %22email_address%22: %22non@existent.email%22,%0A %22password%22: %22doesitmatter?%22%0A %7D%0A result = requests.post(self.login_url, data=data)%0A self.assertEqual(result.url, self.login_url, %22%22)%0A%0A def testLoginWithRegisteredUserBothRememberMeAndImportCollectionsFalse(self):%0A data = %7B%0A %22email_address%22: %22test@user.com%22,%0A %22password%22: %22test_password%22%0A %7D%0A result = requests.post(self.login_url, data=data)%0A print(%22THE COOKIES? %22, result.cookies)%0A self.assertEqual(%0A result.url%0A , self.gn2_url+%22/?import_collections=false%22%0A , %22Login should have been successful%22)%0A %0A%0A%0Adef main(gn2, es):%0A import unittest%0A suite = unittest.TestSuite()%0A suite.addTest(TestLoginLocal(methodName=%22testLoginNonRegisteredUser%22, gn2_url=gn2, es_url=es))%0A suite.addTest(TestLoginLocal(methodName=%22testLoginWithRegisteredUserBothRememberMeAndImportCollectionsFalse%22, gn2_url=gn2, es_url=es))%0A runner = unittest.TextTestRunner()%0A runner.run(suite)%0A%0Aif __name__ == %22__main__%22:%0A import sys%0A if len(sys.argv) %3C 3:%0A raise Exception(%22Required arguments missing%22)%0A else:%0A main(sys.argv%5B1%5D, sys.argv%5B2%5D)%0A
|
|
008711b6d5506aed60a693c296a7a01180c2ea86
|
Create dss.py
|
dss.py
|
dss.py
|
Python
| 0.000001
|
@@ -0,0 +1,1959 @@
+from functions import *%0Aimport multiprocessing%0Aimport time%0A%0Awith open(%22config.txt%22) as f:%0A lines = f.readlines()%0Amax_instances = int(lines%5B0%5D.split(' ')%5B1%5D)%0A%0A%0Aclass machine():%0A 'Class for the instance of a machine'%0A %0A q = %5Bmultiprocessing.Queue() for i in range(max_instances + 1)%5D%0A # q%5B0%5D is unused%0A count = 1%0A%0A def __init__(self):%0A self.mac_id = machine.count%0A machine.count += 1%0A%0A def execute_func(self, func_name, *args):%0A comm_str = str(func_name) + ' = multiprocessing.Process(name = %22' + str(func_name) + '%22, target = ' + str(func_name) + ', args = ('%0A comm_str += 'self,'%0A for arg in args:%0A if(type(arg) is str):%0A comm_str += '%22' + str(arg) + '%22,'%0A else:%0A comm_str += str(arg) + ','%0A comm_str += '))'%0A%0A # create the new process%0A exec(comm_str)%0A%0A # start the new process%0A comm_str = str(func_name) + '.start()'%0A exec(comm_str)%0A%0A def send(self, destination_id, message):%0A # send message to the machine with machine_id destination_id%0A%0A mac_id = int(destination_id%5B8:%5D)%0A if(mac_id %3E= machine.count or mac_id %3C= 0):%0A return -1%0A%0A # message is of the format %22hello%7C2%22. Meaning message is %22hello%22 from machine with id 2%0A # However, the message received is processed and then returned back to the user%0A message += '%7C' + str(self.get_id())%0A%0A machine.q%5Bmac_id%5D.put(message)%0A return 1%0A%0A def recv(self):%0A mac_id = self.get_id()%0A if(mac_id %3E= machine.count or mac_id %3C= 0):%0A return -1, -1%0A%0A message = machine.q%5Bmac_id%5D.get().split('%7C')%0A%0A # message received is returned with the format %22hello%22 message from %22machine_2%22%0A return message%5B0%5D, 'machine_' + message%5B1%5D%0A%0A def get_id(self):%0A return self.mac_id%0A%0A def get_machine_id(self):%0A return %22machine_%22 + str(self.get_id()) %0A
|
|
c0e7393c5cc3f1095891a35b552e4a69733c83b6
|
add a simple example
|
demos/helloworld.py
|
demos/helloworld.py
|
Python
| 0.999997
|
@@ -0,0 +1,248 @@
+#!/usr/bin/env python%0D%0Afrom __future__ import with_statement%0D%0A%0D%0Aimport PyV8%0D%0A%0D%0Aclass Global(PyV8.JSClass):%0D%0A def writeln(self, arg):%0D%0A print arg%0D%0A %0D%0Awith PyV8.JSContext(Global()) as ctxt:%0D%0A ctxt.eval(%22writeln('Hello World');%22)%0D%0A
|
|
5669960952104b811df34fa9229d7e597407c753
|
add basic unit testing for appliance instances (incomplete)
|
tests/test_appliance_instance.py
|
tests/test_appliance_instance.py
|
Python
| 0.000017
|
@@ -0,0 +1,769 @@
+import sys%0Asys.path.append('..')%0Aimport disaggregator as da%0Aimport unittest%0Aimport pandas as pd%0Aimport numpy as np%0A%0Aclass ApplianceInstanceTestCase(unittest.TestCase):%0A%0A def setUp(self):%0A indices = %5Bpd.date_range('1/1/2013', periods=96, freq='15T'),%0A pd.date_range('1/2/2013', periods=96, freq='15T')%5D%0A data = %5Bnp.zeros(96),np.zeros(96)%5D%0A series = %5Bpd.Series(d, index=i) for d,i in zip(data,indices)%5D%0A self.traces = %5Bda.ApplianceTrace(s,%7B%7D) for s in series%5D%0A self.normal_instance = da.ApplianceInstance(self.traces)%0A%0A def test_get_traces(self):%0A self.assertIsNotNone(self.normal_instance.get_traces(),%0A 'instance should have traces')%0A%0Aif __name__ == %22__main__%22:%0A unittest.main()%0A
|
|
18103afcc20dcf9ada357b36d58a62756ddb3e3e
|
Fix tests
|
tests/storage/dav/test_main.py
|
tests/storage/dav/test_main.py
|
# -*- coding: utf-8 -*-
import datetime
import os
from textwrap import dedent
import pytest
import requests
import requests.exceptions
from tests import EVENT_TEMPLATE, TASK_TEMPLATE, VCARD_TEMPLATE
import vdirsyncer.exceptions as exceptions
from vdirsyncer.storage.base import Item
from vdirsyncer.storage.dav import CaldavStorage, CarddavStorage
from .. import StorageTests, format_item
dav_server = os.environ.get('DAV_SERVER', '').strip() or 'radicale'
def _get_server_mixin(server_name):
from . import __name__ as base
x = __import__('{}.servers.{}'.format(base, server_name), fromlist=[''])
return x.ServerMixin
ServerMixin = _get_server_mixin(dav_server)
class DavStorageTests(ServerMixin, StorageTests):
dav_server = dav_server
def test_dav_broken_item(self, s):
item = Item(u'HAHA:YES')
try:
s.upload(item)
except (exceptions.Error, requests.exceptions.HTTPError):
pass
assert not list(s.list())
def test_dav_empty_get_multi_performance(self, s, monkeypatch):
def breakdown(*a, **kw):
raise AssertionError('Expected not to be called.')
monkeypatch.setattr('requests.sessions.Session.request', breakdown)
assert list(s.get_multi([])) == []
class TestCaldavStorage(DavStorageTests):
storage_class = CaldavStorage
@pytest.fixture(params=[EVENT_TEMPLATE, TASK_TEMPLATE])
def item_template(self, request):
return request.param
@pytest.mark.parametrize('item_type', ['VTODO', 'VEVENT'])
def test_doesnt_accept_vcard(self, item_type, get_storage_args):
s = self.storage_class(item_types=(item_type,), **get_storage_args())
try:
s.upload(format_item(VCARD_TEMPLATE))
except (exceptions.Error, requests.exceptions.HTTPError):
pass
assert not list(s.list())
@pytest.mark.parametrize('item_types,calls_num', [
(('VTODO',), 1),
(('VEVENT',), 1),
(('VTODO', 'VEVENT'), 2),
(('VTODO', 'VEVENT', 'VJOURNAL'), 3),
((), 1)
])
def test_item_types_performance(self, get_storage_args, item_types,
calls_num, monkeypatch, get_item):
s = self.storage_class(item_types=item_types, **get_storage_args())
old_parse = s._parse_prop_responses
calls = []
def new_parse(*a, **kw):
calls.append(None)
return old_parse(*a, **kw)
monkeypatch.setattr(s, '_parse_prop_responses', new_parse)
list(s.list())
assert len(calls) == calls_num
@pytest.mark.xfail(dav_server == 'radicale',
reason='Radicale doesn\'t support timeranges.')
def test_timerange_correctness(self, get_storage_args):
start_date = datetime.datetime(2013, 9, 10)
end_date = datetime.datetime(2013, 9, 13)
s = self.storage_class(start_date=start_date, end_date=end_date,
**get_storage_args())
too_old_item = format_item(dedent(u'''
BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//hacksw/handcal//NONSGML v1.0//EN
BEGIN:VEVENT
DTSTART:19970714T170000Z
DTEND:19970715T035959Z
SUMMARY:Bastille Day Party
X-SOMETHING:{r}
UID:{r}
END:VEVENT
END:VCALENDAR
''').strip())
too_new_item = format_item(dedent(u'''
BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//hacksw/handcal//NONSGML v1.0//EN
BEGIN:VEVENT
DTSTART:20150714T170000Z
DTEND:20150715T035959Z
SUMMARY:Another Bastille Day Party
X-SOMETHING:{r}
UID:{r}
END:VEVENT
END:VCALENDAR
''').strip())
good_item = format_item(dedent(u'''
BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//hacksw/handcal//NONSGML v1.0//EN
BEGIN:VEVENT
DTSTART:20130911T170000Z
DTEND:20130912T035959Z
SUMMARY:What's with all these Bastille Day Partys
X-SOMETHING:{r}
UID:{r}
END:VEVENT
END:VCALENDAR
''').strip())
s.upload(too_old_item)
s.upload(too_new_item)
href, etag = s.upload(good_item)
assert list(s.list()) == [(href, etag)]
def test_invalid_resource(self, monkeypatch, get_storage_args):
calls = []
args = get_storage_args(collection=None)
def request(session, method, url, data=None, headers=None, auth=None,
verify=None):
assert url == args['url']
calls.append(None)
r = requests.Response()
r.status_code = 200
r._content = 'Hello World.'
return r
monkeypatch.setattr('requests.sessions.Session.request', request)
with pytest.raises(ValueError):
s = self.storage_class(**args)
list(s.list())
assert len(calls) == 1
def test_item_types(self, s):
event = s.upload(format_item(EVENT_TEMPLATE))
task = s.upload(format_item(TASK_TEMPLATE))
s.item_types = ('VTODO', 'VEVENT')
assert set(s.list()) == set([event, task])
s.item_types = ('VTODO',)
assert set(s.list()) == set([task])
s.item_types = ('VEVENT',)
assert set(s.list()) == set([event])
s.item_types = ()
assert set(s.list()) == set([event, task])
class TestCarddavStorage(DavStorageTests):
storage_class = CarddavStorage
@pytest.fixture
def item_template(self):
return VCARD_TEMPLATE
|
Python
| 0.000003
|
@@ -4614,75 +4614,16 @@
rl,
-data=None, headers=None, auth=None,%0A verify=None
+**kwargs
):%0A
|
54f7cdf15d3fdbd70a5f06ec38aa84dfd828c7e7
|
Add simple gui
|
gui.py
|
gui.py
|
Python
| 0.000001
|
@@ -0,0 +1,1047 @@
+from tkinter import Tk, LEFT, SUNKEN, X%0Afrom tkinter.ttk import Frame, Button, Style%0Afrom PIL import Image, ImageTk%0A%0A%0Adef main():%0A root = Tk()%0A root.geometry(%22300x300%22)%0A%0A separator = Frame(root, height=200, relief=SUNKEN)%0A separator.pack(fill=X, padx=10)%0A%0A s = Style()%0A s.configure(%22Visible.TButton%22, foreground=%22red%22, background=%22pink%22)%0A%0A frame = Frame(root)%0A frame.pack_propagate(0)%0A image = Image.open(%22faces.jpeg%22)%0A photo = ImageTk.PhotoImage(image)%0A b = Button(root, image=photo, style=%22Visible.TButton%22, cursor=%22dot%22)%0A b.pack(side=LEFT, expand=1)%0A%0A image1 = Image.open(%22m_bg.png%22)%0A photo1 = ImageTk.PhotoImage(image1)%0A b1 = Button(root, image=photo1, style=%22Visible.TButton%22, cursor=%22dot%22)%0A b1.pack(side=LEFT, expand=1)%0A%0A image2 = Image.open(%22mermaid_1.jpg%22)%0A photo2 = ImageTk.PhotoImage(image2)%0A b2 = Button(root, image=photo2, style=%22Visible.TButton%22, cursor=%22dot%22)%0A b2.pack(side=LEFT, expand=1)%0A frame.pack(fill=X)%0A root.mainloop()%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
5af36bbe29a8a7a7418fc535c5647c9be511f0b4
|
Add script to write user counts to csv.
|
scripts/userCounts.py
|
scripts/userCounts.py
|
Python
| 0
|
@@ -0,0 +1,782 @@
+%22%22%22%0AScript to write user counts for each region to CSV.%0A%0A%22%22%22%0A%0Aimport twitterproj%0A%0Adef main():%0A%0A db = twitterproj.connect()%0A filenames = %5B'grids/counties.user_counts.bot_filtered.csv',%0A 'grids/states.user_counts.bot_filtered.csv',%0A 'grids/squares.user_counts.bot_filtered.csv'%5D%0A%0A funcs = %5B'counties', 'states', 'squares'%5D%0A%0A for func, filename in zip(funcs, filenames):%0A # The attribute we want is twitterproj.hashtag_counts__%7Bgridtype%7D%0A regions = getattr(twitterproj, 'hashtag_counts__' + func)()%0A lines = %5B%22# count%22%5D%0A for region in regions:%0A lines.append(str(region%5B'user_count'%5D))%0A%0A with open(filename, 'w') as f:%0A f.write('%5Cn'.join(lines))%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
fb15c992a286abe066333abfdabbb13646d383d6
|
Create final_P7_Frob.py
|
final_P7_Frob.py
|
final_P7_Frob.py
|
Python
| 0.000866
|
@@ -0,0 +1,1208 @@
+class Frob(object):%0A def __init__(self, name):%0A self.name = name%0A self.before = None%0A self.after = None%0A def setBefore(self, before):%0A # example: a.setBefore(b) sets b before a%0A self.before = before%0A def setAfter(self, after):%0A # example: a.setAfter(b) sets b after a%0A self.after = after%0A def getBefore(self):%0A return self.before%0A def getAfter(self):%0A return self.after%0A def myName(self):%0A return self.name%0A %0Adef insert(atMe, newFrob):%0A if atMe.myName()%3CnewFrob.myName():%0A pre = atMe%0A while pre.getAfter()!=None and pre.getAfter().myName()%3CnewFrob.myName():%0A pre=pre.getAfter()%0A newFrob.setAfter(pre.getAfter())%0A newFrob.setBefore(pre)%0A if pre.getAfter()!=None:%0A pre.getAfter().setBefore(newFrob)%0A pre.setAfter(newFrob)%0A else:%0A aft=atMe%0A while aft.getBefore()!=None and aft.getBefore().myName()%3EnewFrob.myName():%0A aft=aft.getBefore()%0A newFrob.setAfter(aft)%0A newFrob.setBefore(aft.getBefore())%0A if aft.getBefore()!=None:%0A aft.getBefore().setAfter(newFrob)%0A aft.setBefore(newFrob)%0A
|
|
0c18bb0993be77059aa75015cc5433eaacbe8999
|
Add barebones RFC downloader and renderer.
|
rfc.py
|
rfc.py
|
Python
| 0
|
@@ -0,0 +1,436 @@
+import pydoc%0Aimport sys%0A%0Atry:%0A from urllib.request import urlopen%0Aexcept ImportError:%0A from urllib2 import urlopen%0A%0A%0Adef get_rfc(rfc):%0A url = %22http://www.ietf.org/rfc/rfc%7B0%7D.txt%22.format(rfc)%0A f = urlopen(url)%0A data = f.read()%0A if isinstance(data, bytes):%0A data = data.decode('utf-8')%0A%0A return data%0A%0A%0Adef render_rfc(rfc):%0A pydoc.pager(get_rfc(rfc))%0A%0A%0Aif __name__ == %22__main__%22:%0A render_rfc(sys.argv%5B1%5D)%0A
|
|
681c21a5fbf3bc713468e33bb10dfa9bf6d62850
|
Add migration to fix admin users with roles
|
corehq/apps/users/migrations/0004_rm_role_id_from_admins.py
|
corehq/apps/users/migrations/0004_rm_role_id_from_admins.py
|
Python
| 0
|
@@ -0,0 +1,1377 @@
+from django.db import migrations%0A%0Afrom corehq.apps.es import UserES%0Afrom corehq.apps.users.models import WebUser%0Afrom corehq.util.couch import DocUpdate, iter_update%0Afrom corehq.util.django_migrations import skip_on_fresh_install%0Afrom corehq.util.log import with_progress_bar%0A%0A%0A@skip_on_fresh_install%0Adef fix_users(apps, schema_editor):%0A user_ids = with_progress_bar(_get_admins_with_roles())%0A iter_update(WebUser.get_db(), _remove_role, user_ids, verbose=True)%0A%0A%0Adef _get_admins_with_roles():%0A # domain_memberships isn't a nested mapping in ES, so this only checks that%0A # they have a domain membership that's an admin, and one with a role_id,%0A # not that it's both on the same membership%0A return (UserES()%0A .web_users()%0A .term('domain_memberships.is_admin', True)%0A .non_null('domain_memberships.role_id')%0A .get_ids())%0A%0A%0Adef _remove_role(user_doc):%0A changed = False%0A for dm in user_doc%5B'domain_memberships'%5D:%0A if dm%5B'is_admin'%5D and dm%5B'role_id'%5D:%0A dm%5B'role_id'%5D = None%0A changed = True%0A%0A if changed:%0A return DocUpdate(user_doc)%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('users', '0003_roles_permissions_update'),%0A %5D%0A%0A operations = %5B%0A migrations.RunPython(fix_users, reverse_code=migrations.RunPython.noop, elidable=True)%0A %5D%0A
|
|
3134e22eb5da9bd7104c199f788288e0fc823db2
|
Add basic endopoints powered by bottle
|
app.py
|
app.py
|
Python
| 0
|
@@ -0,0 +1,1456 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A%0Aimport bottle%0Afrom bottle import route, run, template, request, response%0A%0Aimport os%0Aimport uuid%0A%0A@route('/')%0Adef get_simple_form():%0A %22%22%22%0A Returns simple images upload form%0A :return:%0A %22%22%22%0A return ('%3Cform action=%22/imgs%22 method=%22post%22 enctype=%22multipart/form-data%22%3E%5Cn'%0A ' Input: %3Cinput name=%22input%22 type=%22file%22%3E%5Cn'%0A ' Style: %3Cinput name=%22style%22 type=%22file%22%3E%5Cn'%0A ' %3Cinput value=%22Upload%22 type=%22submit%22%3E%5Cn'%0A '%3C/form%3E')%0A%0A%0A@route('/imgs', method='POST')%0Adef upload_imgs():%0A %22%22%22%0A Upload input & style images and return id%0A :return:%0A %22%22%22%0A%0A # get files%0A input_img = request.files.get('input')%0A style_img = request.files.get('style')%0A if not check_img_png(input_img) or not check_img_png(style_img):%0A return 'File extension not allowed.'%0A%0A # assign uuid%0A id = uuid.uuid4()%0A input_up_path = id.get_hex() + input_img.filename%0A style_up_path = id.get_hex() + style_img.filename%0A input_img.save(input_up_path)%0A style_img.save(style_up_path)%0A%0A return template('Uploaded images. ID is %22%7B%7Bid%7D%7D%22.', id=id.get_hex())%0A%0Adef check_img_png(image):%0A %22%22%22%0A Check whether%0A :param image:%0A :return:%0A %22%22%22%0A name, ext = os.path.split(image.filename)%0A return ext not in ('png', 'jpeg', 'jpg')%0A%0A@route('/statuses/%3Cid%3E')%0Adef show_status(id=''):%0A return%0A%0A%0Aif __name__ == '__main__':%0A run(debug=True)%0A
|
|
07442bd7ddd07635002493bafb6ac16a24fd5d82
|
Add script for http live streaming
|
hls.py
|
hls.py
|
Python
| 0
|
@@ -0,0 +1,2715 @@
+var http = require('http');%0Avar fs = require('fs');%0Avar url = require('url');%0Avar path = require('path');%0Avar zlib = require('zlib');%0A%0APORT = 8000;%0A%0Ahttp.createServer(function (req, res) %7B%0A var uri = url.parse(req.url).pathname;%0A%0A if (uri == '/player.html') %7B%0A res.writeHead(200, %7B 'Content-Type': 'text/html' %7D);%0A res.write('%3Chtml%3E%3Chead%3E%3Ctitle%3EHLS Player fed by node.js' +%0A '%3C/title%3E%3C/head%3E%3Cbody%3E');%0A res.write('%3Cvideo src=%22http://' + req.socket.localAddress +%0A ':' + PORT + '/out.M3U8%22 controls autoplay%3E%3C/body%3E%3C/html%3E');%0A res.end();%0A return;%0A %7D%0A%0A var filename = path.join(%22./%22, uri);%0A fs.exists(filename, function (exists) %7B%0A if (!exists) %7B%0A console.log('file not found: ' + filename);%0A res.writeHead(404, %7B 'Content-Type': 'text/plain' %7D);%0A res.write('file not found: %25s%5Cn', filename);%0A res.end();%0A %7D else %7B%0A console.log('sending file: ' + filename);%0A switch (path.extname(uri)) %7B%0A case '.M3U8':%0A fs.readFile(filename, function (err, contents) %7B%0A if (err) %7B%0A res.writeHead(500);%0A res.end();%0A %7D else if (contents) %7B%0A res.writeHead(200,%0A %7B'Content-Type':%0A 'application/vnd.apple.mpegurl'%7D);%0A var ae = req.headers%5B'accept-encoding'%5D;%0A if (ae.match(/%5Cbgzip%5Cb/)) %7B%0A zlib.gzip(contents, function (err, zip) %7B%0A if (err) throw err;%0A%0A res.writeHead(200,%0A %7B'content-encoding': 'gzip'%7D);%0A res.end(zip);%0A %7D);%0A %7D else %7B%0A res.end(contents, 'utf-8');%0A %7D%0A %7D else %7B%0A console.log('emptly playlist');%0A res.writeHead(500);%0A res.end();%0A %7D%0A %7D);%0A break;%0A case '.ts':%0A res.writeHead(200, %7B 'Content-Type':%0A 'video/MP2T' %7D);%0A var stream = fs.createReadStream(filename,%0A %7B bufferSize: 64 * 1024 %7D);%0A stream.pipe(res);%0A break;%0A default:%0A console.log('unknown file type: ' +%0A path.extname(uri));%0A res.writeHead(500);%0A res.end();%0A %7D%0A %7D%0A %7D);%0A%7D).listen(PORT);%0A
|
|
4596c0a54457ee515d164bafc399010af190eaa9
|
Add basic http service to turn a led ON-OFF
|
led.py
|
led.py
|
Python
| 0
|
@@ -0,0 +1,369 @@
+from flask import Flask%0Aimport RPi.GPIO as GPIO%0A%0A%0Aapp = Flask(__name__)%0A%0A%0A@app.route(%22/led/on/%22)%0Adef led_on():%0A GPIO.output(11, GPIO.HIGH)%0A return %22Led ON%22%0A%0A%0A@app.route(%22/led/off/%22)%0Adef led_off():%0A GPIO.output(11, GPIO.LOW)%0A return %22Led OFF%22%0A%0A%0Aif __name__ == %22__main__%22:%0A GPIO.setmode(GPIO.BOARD)%0A GPIO.setup(11, GPIO.OUT)%0A app.run(host='0.0.0.0')%0A
|
|
3492ffd5ffa0c7d1dfb5a9f4a587777245044685
|
add test cases of ruamel.yaml backend
|
tests/backend/yaml/ruamel_yaml.py
|
tests/backend/yaml/ruamel_yaml.py
|
Python
| 0
|
@@ -0,0 +1,1414 @@
+#%0A# Copyright (C) - 2018 Satoru SATOH %3Cssato @ redhat.com%3E%0A# License: MIT%0A#%0A# pylint: disable=missing-docstring,invalid-name,too-few-public-methods%0A# pylint: disable=ungrouped-imports%0Afrom __future__ import absolute_import%0A%0Aimport os%0Aimport anyconfig.backend.yaml.pyyaml as TT%0Aimport tests.backend.common as TBC%0A%0Afrom anyconfig.compat import OrderedDict%0A%0A%0ACNF_S = %22%22%22%0Aa: 0%0Ab: bbb%0Ac:%0A - 1%0A - 2%0A - 3%0A%0Asect0: §0%0A d: %5B%22x%22, %22y%22, %22z%22%5D%0Asect1:%0A %3C%3C: *sect0%0A e: true%0A%22%22%22%0A%0ACNF = OrderedDict(((%22a%22, 0), (%22b%22, %22bbb%22), (%22c%22, %5B1, 2, 3%5D),%0A (%22sect0%22, OrderedDict(((%22d%22, %22x y z%22.split()), ))),%0A (%22sect1%22, OrderedDict(((%22d%22, %22x y z%22.split()),%0A (%22e%22, True))))))%0A%0A%0Aclass HasParserTrait(TBC.HasParserTrait):%0A%0A psr = TT.Parser()%0A cnf = CNF%0A cnf_s = CNF_S%0A%0A opts = dict(typ=%22rt%22, pure=True,%0A preserve_quotes=True,%0A indent=dict(mapping=4, sequence=4, offset=2))%0A%0A setattr(psr, %22dict_options%22, opts)%0A%0A%0Aclass Test_10(TBC.Test_10_dumps_and_loads, HasParserTrait): # noqa: N801%0A%0A load_options = dict(ac_safe=True, Loader=TT.yaml.loader.Loader)%0A dump_options = dict(ac_safe=True)%0A empty_patterns = %5B('', %7B%7D), (' ', %7B%7D), ('%5B%5D', %5B%5D),%0A (%22#%25s#%25s%22 %25 (os.linesep, os.linesep), %7B%7D)%5D%0A%0A%0Aclass Test_20(TBC.Test_20_dump_and_load, HasParserTrait): # noqa: N801%0A%0A pass%0A%0A# vim:sw=4:ts=4:et:%0A
|
|
3f9a8ee16e47f4ce0d75a1b856341c05436c2aff
|
Create sending_email.py
|
sending_email.py
|
sending_email.py
|
Python
| 0.000005
|
@@ -0,0 +1,2415 @@
+# -*- coding: utf-8 -*-%0A%22%22%22%0ACreated on Wed May 10 16:32:22 2017%0AThis scripts are used to send out data from Pasture to Wenlong from field.%0A%0AKey features:%0A- Parse and send out all .par files%0A- Send out email at certain intervals: such as one day.%0A@author: wliu14%0A%22%22%22%0A%0Afrom email import encoders%0Afrom email.mime.text import MIMEText%0Afrom email.mime.multipart import MIMEMultipart%0Afrom email.mime.base import MIMEBase%0A%0Aimport smtplib%0Aimport time%0Aimport glob%0Aimport os%0Aimport logging%0A%0Adef _get_files(path):%0A # parse and find out all the files endwith .par.%0A files = list()%0A%0A for filename in glob.glob(os.path.join(path, '*.par')):%0A files.append(filename)%0A%0A return files%0A%0Adef send_email(path):%0A%0A #information of the email address.%0A from_addr = 'wenlongliu853@gmail.com'%0A email_password = 'XXXX'%0A to_addr = 'wenlongliu853@gmail.com'%0A%0A #send attachment via email.%0A msg = MIMEMultipart()%0A msg%5B'From'%5D = from_addr%0A msg%5B'To'%5D = to_addr%0A msg%5B'Subject'%5D = 'This is a testing email.'%0A%0A #Attachement information.%0A files = _get_files(path)%0A msg.attach(MIMEText('This is a testing email to send out the Plymouth data', 'plain', 'utf-8'))%0A%0A for filename in files:%0A #Adding attachments.%0A with open(filename, 'rb') as f:%0A # Set the name and format of the attachment:%0A mime = MIMEBase('text', 'plain', filename=filename)%0A # Header information:%0A mime.add_header('Content-Disposition', 'attachment', filename=filename)%0A mime.add_header('Content-ID', '%3C0%3E')%0A mime.add_header('X-Attachment-Id', '0')%0A # Read in attachment:%0A mime.set_payload(f.read())%0A # Decode the information:%0A encoders.encode_base64(mime)%0A # Add files into attachment.%0A msg.attach(mime)%0A%0A smtp_server = 'smtp.gmail.com'%0A smtp_port = 587%0A server = smtplib.SMTP(smtp_server, smtp_port)%0A server.starttls()%0A server.set_debuglevel(1)%0A server.login(from_addr, email_password)%0A server.sendmail(from_addr, to_addr, msg.as_string())%0A server.quit()%0A%0Aif __name__ == '__main__':%0A path = 'C:%5C%5Cs-canV5.0%5C%5CResults%5C%5CORIGINAL'%0A interval = 600 #Unit: second%0A while True:%0A try:%0A send_email(path)%0A print('%5Cn Sending one email!%5Cn')%0A except:%0A print('%5Cn error, try again later.%5Cn ')%0A finally:%0A time.sleep(interval)%0A
|
|
03f46b0d6867bcb8a88e53b26089705cb1667bbd
|
Add script to generate images from all samples
|
tools/create_from_sample_texts.py
|
tools/create_from_sample_texts.py
|
Python
| 0
|
@@ -0,0 +1,356 @@
+#!/usr/bin/env python%0A%0Aimport teetime%0Aimport os%0A%0A%0Adef main():%0A with open('samples/sample-texts.txt') as fh:%0A for line in fh:%0A print line.strip()%0A path = teetime.create_typography(line.strip(), colors=False)%0A os.rename(path, os.path.join('samples', os.path.basename(path)))%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
08c0c68ed52e9644cc92ad8afdc423b43b4c1326
|
Add Fractal_Tree.py.
|
Fractal_Tree.py
|
Fractal_Tree.py
|
Python
| 0
|
@@ -0,0 +1,454 @@
+__author__ = %22ClaytonBat%22%0Aimport turtle%0A%0Adef tree(branchLen,t):%0A if branchLen %3E 5:%0A t.forward(branchLen)%0A t.right(20)%0A tree(branchLen-15,t)%0A t.left(40)%0A tree(branchLen-15,t)%0A t.right(20)%0A t.backward(branchLen)%0A%0Adef main():%0A t = turtle.Turtle()%0A myWin = turtle.Screen()%0A t.left(90)%0A t.up()%0A t.backward(100)%0A t.down()%0A t.color(%22green%22)%0A tree(75,t)%0A myWin.exitonclick()%0A%0Amain()%0A
|
|
b575394351209239b754b99b3839ba4c799fc831
|
Use _pb2_grpc module to access gRPC entities
|
src/python/grpcio_reflection/grpc_reflection/v1alpha/reflection.py
|
src/python/grpcio_reflection/grpc_reflection/v1alpha/reflection.py
|
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Reference implementation for reflection in gRPC Python."""
import threading
import grpc
from google.protobuf import descriptor_pb2
from google.protobuf import descriptor_pool
from grpc_reflection.v1alpha import reflection_pb2
from grpc_reflection.v1alpha import reflection_pb2_grpc
_POOL = descriptor_pool.Default()
def _not_found_error():
return reflection_pb2.ServerReflectionResponse(
error_response=reflection_pb2.ErrorResponse(
error_code=grpc.StatusCode.NOT_FOUND.value[0],
error_message=grpc.StatusCode.NOT_FOUND.value[1].encode(),))
def _file_descriptor_response(descriptor):
proto = descriptor_pb2.FileDescriptorProto()
descriptor.CopyToProto(proto)
serialized_proto = proto.SerializeToString()
return reflection_pb2.ServerReflectionResponse(
file_descriptor_response=reflection_pb2.FileDescriptorResponse(
file_descriptor_proto=(serialized_proto,)),)
class ReflectionServicer(reflection_pb2.ServerReflectionServicer):
"""Servicer handling RPCs for service statuses."""
def __init__(self, service_names, pool=None):
"""Constructor.
Args:
service_names: Iterable of fully-qualified service names available.
"""
self._service_names = tuple(sorted(service_names))
self._pool = _POOL if pool is None else pool
def _file_by_filename(self, filename):
try:
descriptor = self._pool.FindFileByName(filename)
except KeyError:
return _not_found_error()
else:
return _file_descriptor_response(descriptor)
def _file_containing_symbol(self, fully_qualified_name):
try:
descriptor = self._pool.FindFileContainingSymbol(
fully_qualified_name)
except KeyError:
return _not_found_error()
else:
return _file_descriptor_response(descriptor)
def _file_containing_extension(self, containing_type, extension_number):
try:
message_descriptor = self._pool.FindMessageTypeByName(containing_type)
extension_descriptor = self._pool.FindExtensionByNumber(
message_descriptor, extension_number)
descriptor = self._pool.FindFileContainingSymbol(
extension_descriptor.full_name)
except KeyError:
return _not_found_error()
else:
return _file_descriptor_response(descriptor)
def _all_extension_numbers_of_type(self, containing_type):
try:
message_descriptor = self._pool.FindMessageTypeByName(containing_type)
extension_numbers = tuple(sorted(
extension.number
for extension in self._pool.FindAllExtensions(message_descriptor)))
except KeyError:
return _not_found_error()
else:
return reflection_pb2.ServerReflectionResponse(
all_extension_numbers_response=reflection_pb2.
ExtensionNumberResponse(
base_type_name=message_descriptor.full_name,
extension_number=extension_numbers))
def _list_services(self):
return reflection_pb2.ServerReflectionResponse(
list_services_response=reflection_pb2.ListServiceResponse(service=[
reflection_pb2.ServiceResponse(name=service_name)
for service_name in self._service_names
]))
def ServerReflectionInfo(self, request_iterator, context):
for request in request_iterator:
if request.HasField('file_by_filename'):
yield self._file_by_filename(request.file_by_filename)
elif request.HasField('file_containing_symbol'):
yield self._file_containing_symbol(
request.file_containing_symbol)
elif request.HasField('file_containing_extension'):
yield self._file_containing_extension(
request.file_containing_extension.containing_type,
request.file_containing_extension.extension_number)
elif request.HasField('all_extension_numbers_of_type'):
yield self._all_extension_numbers_of_type(
request.all_extension_numbers_of_type)
elif request.HasField('list_services'):
yield self._list_services()
else:
yield reflection_pb2.ServerReflectionResponse(
error_response=reflection_pb2.ErrorResponse(
error_code=grpc.StatusCode.INVALID_ARGUMENT.value[0],
error_message=grpc.StatusCode.INVALID_ARGUMENT.value[1]
.encode(),))
def enable_server_reflection(service_names, server, pool=None):
"""Enables server reflection on a server.
Args:
service_names: Iterable of fully-qualified service names available.
server: grpc.Server to which reflection service will be added.
pool: DescriptorPool object to use (descriptor_pool.Default() if None).
"""
reflection_pb2_grpc.add_ServerReflectionServicer_to_server(
ReflectionServicer(service_names), server, pool)
|
Python
| 0
|
@@ -2498,32 +2498,37 @@
r(reflection_pb2
+_grpc
.ServerReflectio
|
1893473729acd938a0657127b82892af1bdb987b
|
Create AbortMultipartUploads.py
|
obs/cleanUnmergedFragments/AbortMultipartUploads.py
|
obs/cleanUnmergedFragments/AbortMultipartUploads.py
|
Python
| 0
|
@@ -0,0 +1,1088 @@
+#!/usr/bin/python%0A# -*- coding: UTF-8 -*-%0Aimport sys%0Aimport commands%0A%0Aif __name__ == '__main__':%0A if len(sys.argv%5B1:%5D) %3E 0:%0A bucket_nameurl = str(sys.argv%5B1:%5D%5B0%5D)%0A else:%0A bucket_nameurl = %22%22%0A print(%22bucket name should be specified%5CnAbortMultipartUploads.py %5Bs3://BucketName%5D%22)%0A sys.exit()%0A%0A while True:%0A ls_cmd = %22s3cmd multipart %25s%22 %25 bucket_nameurl%0A out = commands.getoutput(ls_cmd)%0A if len(out.splitlines()) %3C 3:%0A print(%22All multiuploads have been aborted.%22)%0A sys.exit()%0A%0A if len(sys.argv%5B1:%5D) %3E 1 and sys.argv%5B2:%5D%5B0%5D.lower() == %22list%22:%0A print(%22only up to 1000 multiuploads can be displayed !%5Cn%22)%0A print(out)%0A sys.exit()%0A%0A for line in out.splitlines()%5B2:%5D:%0A obj_uid = line.split(%22%5Ct%22)%0A url = obj_uid%5B1%5D%0A uploadId = obj_uid%5B2%5D%0A print(%22url:%5B%25s%5D, uploadId:%5B%25s%5D%22 %25 (url, uploadId))%0A abort_cmd = %22s3cmd abortmp %25s %25s%22 %25 (url, uploadId)%0A out = commands.getoutput(abort_cmd)%0A print(out)%0A%0A
|
|
6dca2d95144ebe22f58cb4dafb00a3f8a402316e
|
add answer for question 4
|
question_4/heguilong.py
|
question_4/heguilong.py
|
Python
| 0.999999
|
@@ -0,0 +1,1646 @@
+#!/usr/bin/env python3%0A%22%22%22%0AFile: heguilong.py%0AAuthor: heguilong%0AEmail: hgleagle@gmail.com%0AGithub: https://github.com/hgleagle%0ADescription:%0A %E6%96%90%E6%B3%A2%E9%82%A3%E5%A5%91%E6%95%B0%E5%88%97%E7%94%B10%E5%92%8C1%E5%BC%80%E5%A7%8B%EF%BC%8C%E4%B9%8B%E5%90%8E%E7%9A%84%E6%96%90%E6%B3%A2%E9%82%A3%E5%A5%91%E7%B3%BB%E6%95%B0%E5%B0%B1%E6%98%AF%E7%94%B1%E4%B9%8B%E5%89%8D%E7%9A%84%E4%B8%A4%E6%95%B0%E7%9B%B8%E5%8A%A0%E8%80%8C%E5%BE%97%E5%87%BA%EF%BC%8C%E4%BE%8B%E5%A6%82%0A%E6%96%90%E6%B3%A2%E9%82%A3%E5%A5%91%E6%95%B0%E5%88%97%E7%9A%84%E5%89%8D10%E4%B8%AA%E6%95%B0%E6%98%AF 0, 1, 1, 2, 3, 5, 8, 13, 21, 34%E3%80%82%0A%22%22%22%0Aimport sys%0Aimport logging%0A%0A%0Alogging.basicConfig(level=logging.DEBUG, format='%25(asctime)s - %25(levelname)s %5C%0A - %25(message)s')%0A%0A%0A# solution 1%0Aclass Fibonaci():%0A def __init__(self):%0A %22%22%22TODO: Docstring for __init__.%0A%0A :arg1: TODO%0A :returns: TODO%0A%0A %22%22%22%0A self.fib_dict = %7B%7D%0A%0A def calculate(self, number):%0A %22%22%22TODO: Docstring for calculate.%0A%0A :f: TODO%0A :number: TODO%0A :returns: TODO%0A%0A %22%22%22%0A # logging.debug(%22number: %7B%25d%7D%22 %25 number)%0A if number %3C= 1:%0A result = number%0A else:%0A result = self.calculate(number - 1) + self.calculate(number - 2)%0A if number %3E 0 and number not in self.fib_dict:%0A self.fib_dict%5Bnumber%5D = result%0A return result%0A%0A def show_fib_values(self):%0A print(self.fib_dict.values())%0A%0A%0A# solution 2%0Adef fib(n):%0A a, b = 0, 1%0A while n %3E 0:%0A yield b%0A a, b = b, a + b%0A n -= 1%0A%0A%0Aif __name__ == %22__main__%22:%0A if len(sys.argv) != 2:%0A print(%22Usage: python3 heguilong.py number%22)%0A sys.exit()%0A number = int(sys.argv%5B1%5D)%0A if number %3C= 0:%0A print(%22number should be larger than 0%22)%0A%0A # solution 1%0A print(%22Solution 1:%22)%0A fib_obj = Fibonaci()%0A fib_obj.calculate(number)%0A fib_obj.show_fib_values()%0A%0A # solution 2%0A print(%22Solution 2:%22)%0A for i in fib(number):%0A print(i)%0A
|
|
a50ce7117e4c1300410b74b5511722e4d7d57be4
|
Implement script to automate setting scope location and time
|
set_when_and_where.py
|
set_when_and_where.py
|
Python
| 0
|
@@ -0,0 +1,1098 @@
+#!/usr/bin/env python%0A%0Aimport config%0Aimport configargparse%0Aimport math%0Aimport time%0Aimport nexstar%0Aimport ephem%0A%0Aparser = configargparse.ArgParser(default_config_files=config.DEFAULT_FILES)%0Aparser.add_argument('--scope', help='serial device for connection to telescope', default='/dev/ttyUSB0')%0Aparser.add_argument('--lat', required=True, help='latitude of observer (+N)')%0Aparser.add_argument('--lon', required=True, help='longitude of observer (+E)')%0Aargs = parser.parse_args()%0A%0A# We want to parse the latitude and longitude exactly the same way as our other%0A# scripts do: by letting ephem.Angle do the parsing itself. But they explicitly%0A# disallow us from creating our own Angle objects directly, so we're forced to%0A# work around that by using an object (Observer) that has Angles in it already.%0Aobserver = ephem.Observer()%0Aobserver.lat = args.lat%0Aobserver.lon = args.lon%0A%0A# Convert to degrees%0Alat = observer.lat * 180.0 / math.pi%0Alon = observer.lon * 180.0 / math.pi%0A%0A# Shove data into telescope%0Anexstar = nexstar.NexStar(args.scope)%0Anexstar.set_location(lat, lon)%0Anexstar.set_time(time.time())%0A
|
|
1212677ac1087498fa83a3d4d9e8ba9d13c35b20
|
Add the basic structure for the notification handler.
|
handler/notification.py
|
handler/notification.py
|
Python
| 0
|
@@ -0,0 +1,32 @@
+class ListHandler(BaseHandler):%0A
|
|
77fc04ddf6dbc9cb618b427b36628adb019b2f43
|
add import script
|
scripts/import.py
|
scripts/import.py
|
Python
| 0
|
@@ -0,0 +1,1083 @@
+#!/usr/bin/python%0A#%0A# Copyright (c) 2008 rPath, Inc.%0A#%0A# This program is distributed under the terms of the Common Public License,%0A# version 1.0. A copy of this license should have been distributed with this%0A# source file in a file called LICENSE. If it is not present, the license%0A# is always available at http://www.rpath.com/permanent/licenses/CPL-1.0.%0A#%0A# This program is distributed in the hope that it will be useful, but%0A# without any warranty; without even the implied warranty of merchantability%0A# or fitness for a particular purpose. See the Common Public License for%0A# full details.%0A#%0A%0Aimport os%0Aimport sys%0A%0Asys.path.insert(0, os.environ%5B'HOME'%5D + '/hg/rpath-xmllib')%0Asys.path.insert(0, os.environ%5B'HOME'%5D + '/hg/conary')%0Asys.path.insert(0, os.environ%5B'HOME'%5D + '/hg/mirrorball')%0A%0Afrom conary.lib import util%0Asys.excepthook = util.genExcepthook()%0A%0Afrom updatebot import bot, config, log%0A%0Alog.addRootLogger()%0Acfg = config.UpdateBotConfig()%0Acfg.read(os.environ%5B'HOME'%5D + '/hg/mirrorball/config/opensuse/updatebotrc')%0Aobj = bot.Bot(cfg)%0Aobj.create()%0A%0Aimport epdb ; epdb.st()%0A
|
|
42a9c36d711f2550cc68fdba96b6af36d3d31d8d
|
Create grasshopperDebug.py
|
CodeWars/8kyu/grasshopperDebug.py
|
CodeWars/8kyu/grasshopperDebug.py
|
Python
| 0.00002
|
@@ -0,0 +1,283 @@
+def weather_info (temp):%0A c = convertToCelsius(temp)%0A if (c %3C= 0):%0A return (str(c) + %22 is freezing temperature%22)%0A else:%0A return (str(c) + %22 is above freezing temperature%22)%0A %0Adef convertToCelsius (temp):%0A temp = (((float(temp) - 32) * 5) / 9)%0A return temp%0A
|
|
88fe01b543aeb9e022a2a23df9a2877a6eaaed15
|
Add analysis path for the IRAM scripts
|
paths.py
|
paths.py
|
import os
import socket
from functools import partial
import glob
'''
Common set of paths giving the location of data products.
'''
def name_return_check(filename, path, no_check=False):
full_path = os.path.join(path, filename)
if not os.path.exists(full_path) and not no_check:
raise OSError("{} does not exist.".format(full_path))
return full_path
if socket.gethostname() == 'ewk':
root = os.path.expanduser('~/Dropbox/code_development/VLA_Lband/')
data_path = "/mnt/MyRAID/M33/"
# Add in path for NRAO and cloud instances
elif socket.gethostname() == 'caterwauler':
root = os.path.expanduser('~/Dropbox/code_development/VLA_Lband/')
data_path = os.path.expanduser("~/volume/data/")
# NRAO
elif "nmpost" in socket.gethostname():
root = os.path.expanduser("~/VLA_Lband")
data_path = os.path.expanduser("~/data")
elif "segfault" == socket.gethostname():
root = os.path.expanduser("~/Dropbox/code_development/VLA_Lband/")
data_path = "/mnt/bigdata/ekoch/M33"
c_path = os.path.join(root, '14B-088')
archival_path = os.path.join(root, 'AT0206')
a_path = os.path.join(root, '16B')
archival_12_path = os.path.join(root, '12A-403')
ancillary_path = os.path.join(root, 'ancillary_data')
c_hi_analysispath = \
partial(name_return_check, path=os.path.join(c_path, 'HI/analysis'))
archival_hi_analysispath = os.path.join(archival_path, 'AT0206/Analysis')
# Pipeline paths
fourteenB_pipe_path = os.path.join(c_path, "Cal_Scripts/EVLA_pipeline1.3.0") + "/"
sixteenB_pipe_path = os.path.join(a_path, "pipeline4.7.1_custom") + "/"
twelveA_pipe_path = os.path.join(archival_12_path, "pipeline4.6.0") + "/"
# Paths to common modules
image_script_path = os.path.join(root, 'imaging_pipeline')
# Data paths
fourteenB_HI_data_path = \
partial(name_return_check,
path=os.path.join(data_path, "VLA/14B-088/HI/full_imaging_noSD/"))
fourteenB_HI_data_wGBT_path = \
partial(name_return_check,
path=os.path.join(data_path, "VLA/14B-088/HI/full_imaging_wGBT/"))
arecibo_HI_data_path = \
partial(name_return_check,
path=os.path.join(data_path, "Arecibo/"))
ebhis_HI_data_path = \
partial(name_return_check,
path=os.path.join(data_path, "EBHIS/"))
gbt_HI_data_path = \
partial(name_return_check,
path=os.path.join(data_path, "GBT/"))
iram_co21_data_path = partial(name_return_check,
path=os.path.join(data_path, "co21/"))
iram_co21_14B088_data_path = \
partial(name_return_check,
path=os.path.join(data_path, "co21/14B-088/"))
# Paper figures path
papers_path = os.path.expanduser("~/Dropbox/My_Papers/")
paper1_figures_path = \
lambda x: os.path.join(papers_path, "In Prep/m33-HI-paper1/figures/", x)
paper1_tables_path = \
lambda x: os.path.join(papers_path, "In Prep/m33-HI-paper1/tables/", x)
# Proposal Figures
varfig_path = os.path.expanduser("~/Dropbox/Various Plots/Proposals")
proposal_figures_path = lambda x: os.path.join(varfig_path, x)
# All figures
fig_path = os.path.expanduser("~/Dropbox/Various Plots/M33/")
allfigs_path = lambda x: os.path.join(fig_path, x)
alltables_path = lambda x: os.path.join(fig_path, "tables", x)
def find_dataproduct_names(path):
'''
Given a path, return a dictionary of the data products with the name
convention used in this repository.
'''
search_dict = {"Moment0": "mom0",
"Moment1": "mom1",
"LWidth": "lwidth",
"Skewness": "skewness",
"Kurtosis": "kurtosis",
"PeakTemp": "peaktemps",
"PeakVels": "peakvels.",
"Cube": "masked.fits",
"Source_Mask": "masked_source_mask.fits",
"CentSub_Cube": "masked.centroid_corrected",
"CentSub_Mask": "masked_source_mask.centroid_corrected",
"RotSub_Cube": "masked.rotation_corrected",
"RotSub_Mask": "masked_source_mask.rotation_corrected",
"PeakSub_Cube": "masked.peakvels_corrected",
"PeakSub_Mask": "masked_source_mask.peakvels_corrected"}
found_dict = {}
for filename in glob.glob(os.path.join(path, "*.fits")):
for key in search_dict:
if search_dict[key] in filename:
found_dict[key] = filename
search_dict.pop(key)
break
return found_dict
# Return dictionaries with names for the existing directories
fourteenB_HI_file_dict = \
find_dataproduct_names(fourteenB_HI_data_path("", no_check=True))
fourteenB_wGBT_HI_file_dict = \
find_dataproduct_names(fourteenB_HI_data_wGBT_path("", no_check=True))
if __name__ == "__main__":
# Append the repo directory to the path so paths is importable
os.sys.path.append(root)
|
Python
| 0
|
@@ -1404,16 +1404,138 @@
ysis')%0A%0A
+iram_co21_analysispath = %5C%0A partial(name_return_check,%0A path=os.path.join(ancillary_path, 'IRAM30m_CO21'))%0A%0A
# Pipeli
|
9ba0ff62572dcfd7912c9b58091b59844f8e1753
|
Add script for Helmholtz rates
|
results/sccg-table.py
|
results/sccg-table.py
|
Python
| 0
|
@@ -0,0 +1,1430 @@
+import os%0Aimport sys%0Aimport pandas as pd%0A%0A%0Ap4_data = %22helmholtz-results/helmholtz_conv-d-4.csv%22%0Ap5_data = %22helmholtz-results/helmholtz_conv-d-5.csv%22%0Ap6_data = %22helmholtz-results/helmholtz_conv-d-6.csv%22%0Ap7_data = %22helmholtz-results/helmholtz_conv-d-7.csv%22%0Adata_set = %5Bp4_data, p5_data, p6_data, p7_data%5D%0A%0Afor data in data_set:%0A if not os.path.exists(data):%0A print(%22Cannot find data file '%25s'%22 %25 data)%0A sys.exit(1)%0A%0Atable = r%22%22%22%5Cbegin%7Btabular%7D%7B%7C l %7C c %7C c %7C c %7C%7D%0A%5Chline%0A%5Cmulticolumn%7B4%7D%7B%7Cc%7C%7D%7B$H%5E1$ Helmholtz%7D %5C%5C%0A%5Chline%0A%5Cmultirow%7B2%7D%7B*%7D%7B$k$%7D & mesh &%0A%5Cmulticolumn%7B2%7D%7B%7Cc%7C%7D%7B$%5Cnorm%7Bp-p_h%7D_%7BL%5E2(%5COmega)%7D %5Cleq %5Cmathcal%7BO%7D(h%5E%7Bk+1%7D)$%7D %5C%5C%0A%5Ccline%7B2-4%7D%0A& $r$ & $L%5E2$-error & rate %5C%5C%0A%22%22%22%0A%0Alformat = r%22%22%22& %7Bmesh: d%7D & %7BL2Errors:.3e%7D & %7BConvRates%7D %5C%5C%0A%22%22%22%0A%0A%0Adef rate(s):%0A if s == '---':%0A return s%0A else:%0A return %22%7Bs:.3f%7D%22.format(s=float(s))%0A%0A%0Afor data in data_set:%0A df = pd.read_csv(data)%0A df = df.sort_values(%22Mesh%22)%0A degree = df.Degree.values%5B0%5D%0A table += r%22%22%22%0A %5Chline%0A %5Cmultirow%7B6%7D%7B*%7D%7B%25d%7D%0A %22%22%22 %25 degree%0A for k in df.Mesh:%0A sliced = df.loc%5Blambda x: x.Mesh == k%5D%0A table += lformat.format(mesh=k,%0A L2Errors=sliced.L2Errors.values%5B0%5D,%0A ConvRates=rate(sliced.ConvRates.values%5B0%5D),%0A Reductions=sliced.ResidualReductions.values%5B0%5D)%0A%0Atable += r%22%22%22%5Chline%0A%5Cend%7Btabular%7D%0A%22%22%22%0Aprint(table)%0A
|
|
248c738b31e43ef456d47045bc5f5b2d58d35d98
|
add autocomplete with a German-Korean dictionary
|
workflow/dedic_naver_search.py
|
workflow/dedic_naver_search.py
|
Python
| 0.000003
|
@@ -0,0 +1,1729 @@
+# Naver Search Workflow for Alfred 2%0A# Copyright (C) 2013 Jinuk Baek%0A# This program is free software; you can redistribute it and/or%0A# modify it under the terms of the GNU General Public License%0A# as published by the Free Software Foundation; either version 2%0A# of the License, or (at your option) any later version.%0A#%0A# This program is distributed in the hope that it will be useful,%0A# but WITHOUT ANY WARRANTY; without even the implied warranty of%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the%0A# GNU General Public License for more details.%0A#%0A# You should have received a copy of the GNU General Public License%0A# along with this program; if not, write to the Free Software%0A# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.%0A%0A%0Aimport sys%0A%0Afrom workflow import web, Workflow%0A%0A%0Adef get_dictionary_data(word):%0A%09url = 'http://dedicac.naver.com/ac'%0A%09params = dict(%0A%09%09_callback=%22%22,%0A%09%09st=%2211%22,%0A%09%09r_lt=%2210%22, %0A%09%09q=word)%0A%0A%0A%09r = web.get(url, params)%0A%09r.raise_for_status()%0A%09return r.json()%0A%0A%0Adef main(wf):%0A%09import cgi;%0A%0A%09args = wf.args%5B0%5D%0A%0A%09wf.add_item(title = 'Search Naver Dedic for %5C'%25s%5C'' %25 args, %0A%09%09%09%09autocomplete=args, %0A%09%09%09%09arg=args,%0A%09%09%09%09valid=True)%0A%0A%09def wrapper():%0A%09%09return get_dictionary_data(args)%0A%0A%09res_json = wf.cached_data(%22de_%25s%22 %25 args, wrapper, max_age=600)%0A%0A%09for item in res_json%5B'items'%5D:%0A%09%09for ltxt in item:%0A%09%09%09if len(ltxt) %3E 0:%0A%09%09%09%09txt = ltxt%5B0%5D%5B0%5D;%0A%09%09%09%09rtxt = cgi.escape(ltxt%5B1%5D%5B0%5D);%0A%0A%09%09%09%09wf.add_item(title = u%22%25s %25s%22 %25 (txt, rtxt) ,%0A%09%09%09%09%09%09%09subtitle = 'Search Naver Dedic for %5C'%25s%5C'' %25 txt, %0A%09%09%09%09%09%09%09autocomplete=txt, %0A%09%09%09%09%09%09%09arg=txt,%0A%09%09%09%09%09%09%09valid=True);%0A%0A%09wf.send_feedback()%0A%09%09%09%09%0A%0A%0Aif __name__ == '__main__':%0A%09wf = Workflow()%0A%09sys.exit(wf.run(main))%0A%0A%0A
|
|
4137bdd36fa4a1b4e194c2c61f803cecdebe8f69
|
Implement MySQL driver
|
lib/mysql_driver.py
|
lib/mysql_driver.py
|
Python
| 0.000001
|
@@ -0,0 +1,1329 @@
+import mysql.connector%0A%0Aimport sql_driver%0Aimport screen_buffer%0A%0Aclass MySQLDriver(sql_driver.SqlDriver):%0A class Factory(object):%0A def __init__(self, **mysql_conf):%0A self._mysql_conf = mysql_conf%0A if 'port' in self._mysql_conf:%0A self._mysql_conf%5B'port'%5D = int(self._mysql_conf%5B'port'%5D)%0A%0A def create_driver(self, state):%0A return MySQLDriver(self._mysql_conf, level=state.level,%0A facility=state.facility, host=state.host, program=state.program)%0A%0A def __init__(self, mysql_conf, **kwargs):%0A sql_driver.SqlDriver.__init__(self, **kwargs)%0A self._mysql_conf = mysql_conf%0A%0A def start_connection(self):%0A self._connection = mysql.connector.connect(**(self._mysql_conf))%0A%0A def stop_connection(self):%0A self._connection.close()%0A%0A def select(self, cmd):%0A result = self._connection.cursor()%0A result.execute(cmd)%0A return result%0A%0A def fetch_record(self, query):%0A rec = query.fetchone()%0A if rec is None:%0A query.close()%0A self._connection.rollback()%0A return%0A return %7B 'id': rec%5B0%5D, 'facility_num': str(rec%5B1%5D),%0A 'level_num': str(rec%5B2%5D), 'host': rec%5B3%5D, 'datetime': rec%5B4%5D,%0A 'program': rec%5B5%5D, 'pid': rec%5B6%5D, 'message': rec%5B7%5D %7D%0A
|
|
6a29e9f963af4920b21c64d157ca90b0d7d081c4
|
implement parallelForLoop.py
|
pythonPractiseSamples/parallelForLoop.py
|
pythonPractiseSamples/parallelForLoop.py
|
Python
| 0.000064
|
@@ -0,0 +1,368 @@
+#! /usr/bin/env python%0A# -*- coding: utf-8 -*-%0A# vim:fenc=utf-8%0A#%0A# Copyright %C2%A9 2017 Damian Ziobro %3Cdamian@xmementoit.com%3E%0A#%0A%0Afrom joblib import Parallel, delayed%0Aimport multiprocessing%0A%0An=1000000%0A%0Adef squareRoot(i):%0A return i*i%0A%0Acpus = multiprocessing.cpu_count()%0A#cpus = 1%0A%0Aresults = Parallel(n_jobs=cpus)(delayed(squareRoot)(i) for i in range(n))%0A#print results%0A
|
|
b9711e4fd82441669fdd97b1e5eeb12f03e995a5
|
Make srrun use the proper executable on windows
|
srrun.py
|
srrun.py
|
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
import copy
import os
import subprocess
import sys
mypath = os.path.abspath(__file__)
mydir = os.path.split(mypath)[0]
srhome = os.path.join(mydir, '..')
srhome = os.path.abspath(srhome)
srbin = os.path.join(srhome, 'bin')
srpython = os.path.join(srbin, 'python')
srpypath = [mydir, os.path.join(mydir, 'wpr')]
env = copy.copy(os.environ)
env['PYTHONPATH'] = ':'.join(srpypath)
# Set a sane umask for all children
os.umask(022)
sys.exit(subprocess.call([srpython] + sys.argv[1:], env=env))
|
Python
| 0.000083
|
@@ -237,16 +237,32 @@
port os%0A
+import platform%0A
import s
@@ -271,16 +271,16 @@
process%0A
-
import s
@@ -351,16 +351,99 @@
ath)%5B0%5D%0A
+if platform.system().lower() == 'windows':%0A srpython = sys.executable%0Aelse:%0A
srhome =
@@ -469,16 +469,20 @@
, '..')%0A
+
srhome =
@@ -506,16 +506,20 @@
srhome)%0A
+
srbin =
@@ -542,24 +542,28 @@
ome, 'bin')%0A
+
srpython = o
|
a0dfe319ae8c834cc4257ef7be4aa0982490d9a0
|
Add protocol support for null Arrays
|
kafka/protocol/types.py
|
kafka/protocol/types.py
|
from __future__ import absolute_import
from struct import pack, unpack, error
from .abstract import AbstractType
def _pack(f, value):
try:
return pack(f, value)
except error:
raise ValueError(error)
def _unpack(f, data):
try:
(value,) = unpack(f, data)
return value
except error:
raise ValueError(error)
class Int8(AbstractType):
@classmethod
def encode(cls, value):
return _pack('>b', value)
@classmethod
def decode(cls, data):
return _unpack('>b', data.read(1))
class Int16(AbstractType):
@classmethod
def encode(cls, value):
return _pack('>h', value)
@classmethod
def decode(cls, data):
return _unpack('>h', data.read(2))
class Int32(AbstractType):
@classmethod
def encode(cls, value):
return _pack('>i', value)
@classmethod
def decode(cls, data):
return _unpack('>i', data.read(4))
class Int64(AbstractType):
@classmethod
def encode(cls, value):
return _pack('>q', value)
@classmethod
def decode(cls, data):
return _unpack('>q', data.read(8))
class String(AbstractType):
def __init__(self, encoding='utf-8'):
self.encoding = encoding
def encode(self, value):
if value is None:
return Int16.encode(-1)
value = str(value).encode(self.encoding)
return Int16.encode(len(value)) + value
def decode(self, data):
length = Int16.decode(data)
if length < 0:
return None
value = data.read(length)
if len(value) != length:
raise ValueError('Buffer underrun decoding string')
return value.decode(self.encoding)
class Bytes(AbstractType):
@classmethod
def encode(cls, value):
if value is None:
return Int32.encode(-1)
else:
return Int32.encode(len(value)) + value
@classmethod
def decode(cls, data):
length = Int32.decode(data)
if length < 0:
return None
value = data.read(length)
if len(value) != length:
raise ValueError('Buffer underrun decoding Bytes')
return value
class Boolean(AbstractType):
@classmethod
def encode(cls, value):
return _pack('>?', value)
@classmethod
def decode(cls, data):
return _unpack('>?', data.read(1))
class Schema(AbstractType):
def __init__(self, *fields):
if fields:
self.names, self.fields = zip(*fields)
else:
self.names, self.fields = (), ()
def encode(self, item):
if len(item) != len(self.fields):
raise ValueError('Item field count does not match Schema')
return b''.join([
field.encode(item[i])
for i, field in enumerate(self.fields)
])
def decode(self, data):
return tuple([field.decode(data) for field in self.fields])
def __len__(self):
return len(self.fields)
def repr(self, value):
key_vals = []
try:
for i in range(len(self)):
try:
field_val = getattr(value, self.names[i])
except AttributeError:
field_val = value[i]
key_vals.append('%s=%s' % (self.names[i], self.fields[i].repr(field_val)))
return '(' + ', '.join(key_vals) + ')'
except:
return repr(value)
class Array(AbstractType):
def __init__(self, *array_of):
if len(array_of) > 1:
self.array_of = Schema(*array_of)
elif len(array_of) == 1 and (isinstance(array_of[0], AbstractType) or
issubclass(array_of[0], AbstractType)):
self.array_of = array_of[0]
else:
raise ValueError('Array instantiated with no array_of type')
def encode(self, items):
return b''.join(
[Int32.encode(len(items))] +
[self.array_of.encode(item) for item in items]
)
def decode(self, data):
length = Int32.decode(data)
return [self.array_of.decode(data) for _ in range(length)]
def repr(self, list_of_items):
return '[' + ', '.join([self.array_of.repr(item) for item in list_of_items]) + ']'
|
Python
| 0
|
@@ -3908,32 +3908,94 @@
e(self, items):%0A
+ if items is None:%0A return Int32.encode(-1)%0A
return b
@@ -4003,16 +4003,16 @@
'.join(%0A
-
@@ -4170,32 +4170,81 @@
32.decode(data)%0A
+ if length == -1:%0A return None%0A
return %5B
@@ -4295,16 +4295,16 @@
ngth)%5D%0A%0A
-
def
@@ -4322,32 +4322,92 @@
list_of_items):%0A
+ if list_of_items is None:%0A return 'NULL'%0A
return '
|
5d1da267791456f6c5e386d6e7204d02371c2eb2
|
Add tests for gold projects
|
readthedocs/rtd_tests/tests/test_gold.py
|
readthedocs/rtd_tests/tests/test_gold.py
|
Python
| 0
|
@@ -0,0 +1,2175 @@
+from django.contrib.auth.models import User%0Afrom django.core.urlresolvers import reverse%0Afrom django.test import TestCase%0A%0Afrom django_dynamic_fixture import get%0Afrom django_dynamic_fixture import new%0A%0Afrom readthedocs.gold.models import GoldUser, LEVEL_CHOICES%0Afrom readthedocs.projects.models import Project%0A%0A%0Adef create_user(username, password):%0A user = new(User, username=username)%0A user.set_password(password)%0A user.save()%0A return user%0A%0A%0Aclass GoldViewTests(TestCase):%0A%0A def setUp(self):%0A self.user = create_user(username='owner', password='test')%0A%0A self.project = get(Project, slug='test')%0A%0A self.golduser = get(GoldUser, user=self.user, level=LEVEL_CHOICES%5B0%5D%5B0%5D)%0A%0A self.client.login(username='owner', password='test')%0A%0A def test_adding_projects(self):%0A self.assertEqual(self.golduser.projects.count(), 0)%0A resp = self.client.post(reverse('gold_projects'), data=%7B'project': 'test'%7D)%0A self.assertEqual(self.golduser.projects.count(), 1)%0A self.assertEqual(resp.status_code, 302)%0A%0A def test_too_many_projects(self):%0A self.project2 = get(Project, slug='test2')%0A%0A self.assertEqual(self.golduser.projects.count(), 0)%0A resp = self.client.post(reverse('gold_projects'), data=%7B'project': self.project.slug%7D)%0A self.assertEqual(self.golduser.projects.count(), 1)%0A self.assertEqual(resp.status_code, 302)%0A resp = self.client.post(reverse('gold_projects'), data=%7B'project': self.project2.slug%7D)%0A self.assertFormError(%0A resp, form='form', field=None, errors='You already have the max number of supported projects.'%0A )%0A self.assertEqual(resp.status_code, 200)%0A self.assertEqual(self.golduser.projects.count(), 1)%0A%0A def test_remove_project(self):%0A self.assertEqual(self.golduser.projects.count(), 0)%0A self.client.post(reverse('gold_projects'), data=%7B'project': self.project.slug%7D)%0A self.assertEqual(self.golduser.projects.count(), 1)%0A%0A self.client.post(%0A reverse('gold_projects_remove', args=%5Bself.project.slug%5D),%0A )%0A self.assertEqual(self.golduser.projects.count(), 0)%0A
|
|
680ab5562e2b4599c74b9605b688538c1da1479d
|
add profiler helper function
|
lib/util/profile.py
|
lib/util/profile.py
|
Python
| 0.000001
|
@@ -0,0 +1,367 @@
+import cProfile%0A%0Adef profile_this(fn):%0A def profiled_fn(*args, **kwargs):%0A fpath = fn.__name__ + '.profile'%0A prof = cProfile.Profile()%0A ret = prof.runcall(fn, *args, **kwargs)%0A prof.dump_stats(fpath)%0A return ret%0A return profiled_fn%0A%0A# Just use the following decorator to get a pstat profile of it after runtime%0A#@profile_this%0A
|
|
73479f2efe46623f40ea4a49edfc79de0725a291
|
Create cdbhelp.py
|
cdbhelp.py
|
cdbhelp.py
|
Python
| 0.000029
|
@@ -0,0 +1,1172 @@
+%22%22%22cdbhelp.py%0A%0AThis file will define a window which displays help information.%0A%22%22%22%0A%0Afrom tkinter import *%0Afrom tkinter import ttk%0A%0Aclass cdbHelp:%0A def __init__(self, tag):%0A %0A self.bgcolor = 'lavender'%0A %0A self.helpwin = Tk()%0A #self.frame=Frame(self.helpwin).grid()%0A self.helpwin.configure(background=self.bgcolor)%0A self.helpwin.title('Help')%0A%0A self.instruct = Text(self.helpwin, font=('Helvetica', 10))%0A%0A self.instruct.grid(row=3, column=1, padx=20, pady=10)%0A%0A if tag == 'login':%0A self.volunteer_help()%0A else:%0A return%0A%0A def volunteer_help(self):%0A %22%22%22This function will give volunteer help.%0A %22%22%22%0A vinstruct = %22Instructions: %5Cn 1. Select your name from the list.%22+%5C%0A %22%5Cn2. Press the 'Login' button to proceed to the%22+%5C%0A %22database.%5Cn3. Press the 'View' button to see and make%22+%5C%0A %22changes to your information.%22%0A %0A self.helpwin.configure(background=self.bgcolor)%0A self.instruct.insert('1.0', vinstruct)%0A return%0A %0A
|
|
426afb06904b2e4ebab380b6d5ea79c2f481cb44
|
add text encoder
|
rlp/sedes/text.py
|
rlp/sedes/text.py
|
Python
| 0.000039
|
@@ -0,0 +1,1964 @@
+from rlp.exceptions import SerializationError, DeserializationError%0Afrom rlp.atomic import Atomic%0A%0A%0Aclass Text:%0A %22%22%22A sedes object for encoded text data of certain length.%0A%0A :param min_length: the minimal length in encoded characters or %60None%60 for no lower limit%0A :param max_length: the maximal length in encoded characters or %60None%60 for no upper limit%0A :param allow_empty: if true, empty strings are considered valid even if%0A a minimum length is required otherwise%0A %22%22%22%0A%0A def __init__(self, min_length=None, max_length=None, allow_empty=False, encoding='utf8'):%0A self.min_length = min_length or 0%0A self.max_length = max_length or float('inf')%0A self.allow_empty = allow_empty%0A%0A @classmethod%0A def fixed_length(cls, l, allow_empty=False):%0A %22%22%22Create a sedes for text data with exactly %60l%60 encoded characters.%22%22%22%0A return cls(l, l, allow_empty=allow_empty)%0A%0A @classmethod%0A def is_valid_type(cls, obj):%0A return isinstance(obj, str)%0A%0A def is_valid_length(self, l):%0A return any((self.min_length %3C= l %3C= self.max_length,%0A self.allow_empty and l == 0))%0A%0A def serialize(self, obj):%0A if not self.is_valid_type(obj):%0A raise SerializationError('Object is not a serializable (%7B%7D)'.format(type(obj)), obj)%0A%0A if not self.is_valid_length(len(obj)):%0A raise SerializationError('Object has invalid length', obj)%0A%0A return obj.encode('utf8')%0A%0A def deserialize(self, serial):%0A if not isinstance(serial, Atomic):%0A m = 'Objects of type %7B%7D cannot be deserialized'%0A raise DeserializationError(m.format(type(serial).__name__), serial)%0A%0A text_value = serial.encode(self.encoding)%0A%0A if self.is_valid_length(len(text_value)):%0A return text_value%0A else:%0A raise DeserializationError('%7B%7D has invalid length'.format(type(serial)), serial)%0A%0A%0Atext = Text()%0A
|
|
c83e946a2b5205c7246b1cfbde7b6e84759b3876
|
add potentiometer script
|
potentiometer.py
|
potentiometer.py
|
Python
| 0.000001
|
@@ -0,0 +1,585 @@
+#from pydcpf.appliances.quido import Device as QuidoDevice%0A#from pydcpf.appliances.ad4xxx_drak4 import Device as AD4Device%0A#from pydcpf.appliances.evr116 import Device as EVRDevice%0A#from pydcpf.appliances.AC250Kxxx import Device as AC250KDevice%0A%0ATELNET = %22telnet 192.168.2.243 10001%22%0Atime_delay = 0.2%0Atime_step = 0.001%0A%0A%0AIon = %22*B1OS1H%22%0AIoff = %22*B1OS1L%22%0AIIon = %22*B1OS2H%22%0AIIoff = %22*B1OS2L%22%0AIIIon = %22*B1OS3H%22%0AIIIoff = %22*B1OS3L%22%0A%0Adef getValue():%0A return exec(%22wget -o /dev/null -O - 'http://192.168.2.253/data.xml' %7C grep -Po '%3Cinput id='1'.*val='%5CK%5B 0-9%5D*'%7Cxargs%22)%0A%0Aprint getValue()%0A%0A
|
|
4b67b82daab0b42bcd452c00e13ad47f918e9e38
|
Fix the Android build.
|
gyp/opts.gyp
|
gyp/opts.gyp
|
{
'targets': [
# Due to an unfortunate intersection of lameness between gcc and gyp,
# we have to build the *_SSE2.cpp files in a separate target. The
# gcc lameness is that, in order to compile SSE2 intrinsics code, it
# must be passed the -msse2 flag. However, with this flag, it may
# emit SSE2 instructions even for scalar code, such as the CPUID
# test used to test for the presence of SSE2. So that, and all other
# code must be compiled *without* -msse2. The gyp lameness is that it
# does not allow file-specific CFLAGS, so we must create this extra
# target for those files to be compiled with -msse2.
#
# This is actually only a problem on 32-bit Linux (all Intel Macs have
# SSE2, Linux x86_64 has SSE2 by definition, and MSC will happily emit
# SSE2 from instrinsics, while generating plain ol' 386 for everything
# else). However, to keep the .gyp file simple and avoid platform-specific
# build breakage, we do this on all platforms.
# For about the same reason, we need to compile the ARM opts files
# separately as well.
{
'target_name': 'opts',
'type': 'static_library',
'include_dirs': [
'../include/config',
'../include/core',
'../src/core',
'../src/opts',
],
'conditions': [
[ 'skia_arch_type == "x86"', {
'conditions': [
[ 'skia_os in ["linux", "freebsd", "openbsd", "solaris"]', {
'cflags': [
'-msse2',
],
}],
],
'sources': [
'../src/opts/opts_check_SSE2.cpp',
'../src/opts/SkBitmapProcState_opts_SSE2.cpp',
'../src/opts/SkBlitRow_opts_SSE2.cpp',
'../src/opts/SkBlitRect_opts_SSE2.cpp',
'../src/opts/SkUtils_opts_SSE2.cpp',
],
'dependencies': [
'opts_ssse3',
],
}],
[ 'skia_arch_type == "arm" and armv7 == 1', {
# The assembly uses the frame pointer register (r7 in Thumb/r11 in
# ARM), the compiler doesn't like that.
'cflags!': [
'-fno-omit-frame-pointer',
],
'cflags': [
'-fomit-frame-pointer',
],
'variables': {
'arm_neon_optional%': '<(arm_neon_optional>',
},
'sources': [
'../src/opts/opts_check_arm.cpp',
'../src/opts/memset.arm.S',
'../src/opts/SkBitmapProcState_opts_arm.cpp',
'../src/opts/SkBlitRow_opts_arm.cpp',
'../src/opts/SkBlitRow_opts_arm.h',
],
'conditions': [
[ 'arm_neon == 1 or arm_neon_optional == 1', {
'dependencies': [
'opts_neon',
]
}]
],
}],
[ 'skia_arch_type == "arm" and armv7 != 1', {
'sources': [
'../src/opts/SkBitmapProcState_opts_none.cpp',
'../src/opts/SkBlitRow_opts_none.cpp',
'../src/opts/SkUtils_opts_none.cpp',
],
}],
],
},
# For the same lame reasons as what is done for skia_opts, we have to
# create another target specifically for SSSE3 code as we would not want
# to compile the SSE2 code with -mssse3 which would potentially allow
# gcc to generate SSSE3 code.
{
'target_name': 'opts_ssse3',
'type': 'static_library',
'include_dirs': [
'../include/config',
'../include/core',
'../src/core',
],
'conditions': [
[ 'skia_os in ["linux", "freebsd", "openbsd", "solaris"]', {
'cflags': [
'-mssse3',
],
}],
# TODO(epoger): the following will enable SSSE3 on Macs, but it will
# break once we set OTHER_CFLAGS anywhere else (the first setting will
# be replaced, not added to)
[ 'skia_os in ["mac"]', {
'xcode_settings': {
'OTHER_CFLAGS': ['-mssse3',],
},
}],
[ 'skia_arch_type == "x86"', {
'sources': [
'../src/opts/SkBitmapProcState_opts_SSSE3.cpp',
],
}],
],
},
# NEON code must be compiled with -mfpu=neon which also affects scalar
# code. To support dynamic NEON code paths, we need to build all
# NEON-specific sources in a separate static library. The situation
# is very similar to the SSSE3 one.
{
'target_name': 'opts_neon',
'type': 'static_library',
'include_dirs': [
'../include/config',
'../include/core',
'../src/core',
'../src/opts',
],
'cflags!': [
'-fno-omit-frame-pointer',
'-mfpu=vfp', # remove them all, just in case.
'-mfpu=vfpv3',
'-mfpu=vfpv3-d16',
],
'cflags': [
'-mfpu=neon',
'-fomit-frame-pointer',
],
'sources': [
'../src/opts/memset16_neon.S',
'../src/opts/memset32_neon.S',
'../src/opts/SkBitmapProcState_arm_neon.cpp',
'../src/opts/SkBitmapProcState_matrixProcs_neon.cpp',
'../src/opts/SkBitmapProcState_matrix_clamp_neon.h',
'../src/opts/SkBitmapProcState_matrix_repeat_neon.h',
'../src/opts/SkBlitRow_opts_arm_neon.cpp',
],
},
],
}
# Local Variables:
# tab-width:2
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=2 shiftwidth=2:
|
Python
| 0.000053
|
@@ -5024,62 +5024,8 @@
S',%0A
- '../src/opts/SkBitmapProcState_arm_neon.cpp',%0A
|
eb3510933b356c5b97e7a0cce9ebad563f21bf3c
|
Create BinTreeInTraversal_002.py
|
leetcode/094-Binary-Tree-Inorder-Traversal/BinTreeInTraversal_002.py
|
leetcode/094-Binary-Tree-Inorder-Traversal/BinTreeInTraversal_002.py
|
Python
| 0
|
@@ -0,0 +1,733 @@
+class Solution:%0A # @param root, a tree node%0A # @return a list of integers%0A def iterative_inorder(self, root, list):%0A stack = %5B%5D%0A while root or stack:%0A if root:%0A stack.append(root)%0A root = root.left%0A else:%0A root = stack.pop()%0A list.append(root.val)%0A root = root.right%0A return list%0A %0A def recursive_inorder(self, root, list):%0A if root:%0A self.inorder(root.left, list)%0A list.append(root.val)%0A self.inorder(root.right, list)%0A %0A def inorderTraversal(self, root):%0A list = %5B%5D%0A self.iterative_inorder(root, list)%0A return list%0A
|
|
7ac77a2f95bebad6a13e1d538c366c8688c9d0a6
|
Create __init__.py
|
crispy/localevents/__init__.py
|
crispy/localevents/__init__.py
|
Python
| 0.000429
|
@@ -0,0 +1,12 @@
+import core%0A
|
|
aafdd253bc818d605023dd2a22164d2ac3cdc911
|
Add first draft of outdoor map (for issue #3)
|
examples/outdoor.py
|
examples/outdoor.py
|
Python
| 0
|
@@ -0,0 +1,1031 @@
+#!/usr/bin/python%0A%22%22%22Example map generator: Outdoor%0A%0AThis script demonstrates vmflib by generating a map with a 2D skybox and%0Asome terrain (a displacement map).%0A%0A%22%22%22%0Afrom vmf import *%0Afrom vmf.types import Vertex%0Afrom vmf.tools import Block%0A%0Am = vmf.ValveMap()%0A%0Awalls = %5B%5D%0A%0A# Floor%0Afloor = Block(Vertex(0, 0, -512), (1024, 1024, 64))%0A%0A# Ceiling%0Aceiling = Block(Vertex(0, 0, 512), (1024, 1024, 64))%0Aceiling.set_material('tools/toolsskybox2d')%0A%0A# Left wall%0Awalls.append(Block(Vertex(-512, 0, 0), (64, 1024, 1024)))%0A%0A# Right wall%0Awalls.append(Block(Vertex(512, 0, 0), (64, 1024, 1024)))%0A%0A# Forward wall%0Awalls.append(Block(Vertex(0, 512, 0), (1024, 64, 1024)))%0A%0A# Rear wall%0Awalls.append(Block(Vertex(0, -512, 0), (1024, 64, 1024)))%0A%0A# Set each wall's material%0Afor wall in walls:%0A wall.set_material('PL_BARNBLITZ/WOODWALL_YELLOWWORN002')%0A%0A# Add walls to world geometry%0Am.world.children.extend(walls)%0Am.world.children.extend(%5Bfloor, ceiling%5D)%0A%0A# TODO: Define a playerspawn entity%0A%0A# Write the map to a file%0Am.write_vmf('outdoor.vmf')%0A
|
|
1c228a8de02c81df8d22bde75ac22b902ee39c77
|
Add oedb connection helper
|
data_processing/tools/io.py
|
data_processing/tools/io.py
|
Python
| 0
|
@@ -0,0 +1,1064 @@
+from sqlalchemy import create_engine%0A%0Adef oedb_session(section='oedb'):%0A %22%22%22Get SQLAlchemy session object with valid connection to OEDB%22%22%22%0A%0A # get session object by oemof.db tools (requires .oemof/config.ini%0A try:%0A from oemofof import db%0A conn = db.connection(section=section)%0A%0A except:%0A print('Please provide connection parameters to database:%5Cn' +%0A 'Hit %5BEnter%5D to take defaults')%0A%0A host = input('host (default oe.iws.cs.ovgu.de): ') or 'oe.iws.cs.ovgu.de'%0A port = input('port (default 5432): ') or '5432'%0A database = input(%22database name (default 'oedb'): %22) or 'oedb'%0A user = input('user (default postgres): ')%0A password = input('password: ')%0A%0A conn = create_engine(%0A 'postgresql://' + '%25s:%25s@%25s:%25s/%25s' %25 (user,%0A password,%0A host,%0A port,%0A database))%0A%0A return conn
|
|
f7742c3ffcd86667e86e7cb80977f24eddc5444c
|
add wrapper for `gr1x` that circumvents entry point
|
examples/wrapper.py
|
examples/wrapper.py
|
Python
| 0
|
@@ -0,0 +1,367 @@
+#!/usr/bin/env python%0A%22%22%22Wrapper to circumvent the entry point.%0A%0ABecause, if development versions of dependencies are installed,%0Abut %60install_requires%60 contains no local identifiers,%0Athen the entry point raises a %60VersionConflict%60 for its context.%0A%22%22%22%0Aimport sys%0Afrom tugs import solver%0A%0A%0Aif __name__ == '__main__':%0A solver.command_line_wrapper(args=sys.argv%5B1:%5D)%0A
|
|
00c8c165e3f9a136a8950ca1fb0f2d9ade6731d6
|
Add a regression test for whitespace normalization in the BibTeX parser.
|
pybtex/tests/bibtex_parser_test.py
|
pybtex/tests/bibtex_parser_test.py
|
Python
| 0
|
@@ -0,0 +1,789 @@
+from pybtex.database import BibliographyData%0Afrom pybtex.core import Entry%0Afrom pybtex.database.input.bibtex import Parser%0Afrom cStringIO import StringIO%0A%0Atest_data = %5B%0A (%0A '''%0A ''',%0A BibliographyData(),%0A ),%0A (%0A '''@ARTICLE%7B%0A test,%0A title=%7BPolluted%0A with %7BDDT%7D.%0A %7D,%0A %7D''',%0A BibliographyData(%7Bu'test': Entry('article', %7Bu'title': 'Polluted with %7BDDT%7D.'%7D)%7D),%0A ),%0A%5D%0A%0A%0Adef _test(bibtex_input, correct_result):%0A parser = Parser(encoding='UTF-8')%0A parser.parse_stream(StringIO(bibtex_input))%0A result = parser.data%0A assert result == correct_result%0A%0Adef test_bibtex_parser():%0A for bibtex_input, correct_result in test_data:%0A _test(bibtex_input, correct_result)%0A
|
|
3a6725103b3d39701cf4f71f911b2a4f5484eb1e
|
Improve the repr for the BaseEstimator
|
scikits/learn/base.py
|
scikits/learn/base.py
|
"""
Base class for all estimators.
"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD Style
import inspect
import numpy as np
from .metrics import zero_one, mean_square_error
################################################################################
class BaseEstimator(object):
""" Base class for all estimators in the scikit learn
Note
=====
All estimators should specify all the parameters that can be set
at the class level in their __init__ as explicit keyword
arguments (no *args, **kwargs).
"""
@classmethod
def _get_param_names(cls):
try:
args, varargs, kw, default = inspect.getargspec(cls.__init__)
assert varargs is None, (
'scikit learn estimators should always specify their '
'parameters in the signature of their init (no varargs).'
)
# Remove 'self'
# XXX: This is going to fail if the init is a staticmethod, but
# who would do this?
args.pop(0)
except TypeError:
# No explicit __init__
args = []
return args
def _get_params(self):
out = dict()
for key in self._get_param_names():
out[key] = getattr(self, key)
return out
def _set_params(self, **params):
""" Set the parameters of the estimator.
"""
valid_params = self._get_param_names()
for key, value in params.iteritems():
assert key in valid_params, ('Invalid parameter %s '
'for estimator %s' %
(key, self.__class__.__name__))
setattr(self, key, value)
def __repr__(self):
options = np.get_printoptions()
np.set_printoptions(precision=5, threshold=64, edgeitems=2)
class_name = self.__class__.__name__
params_str = (',\n' + (1+len(class_name))*' ').join(
'%s=%s' % (k, v)
for k, v in self._get_params().iteritems())
np.set_printoptions(**options)
return '%s(%s)' % (
class_name,
params_str
)
################################################################################
class ClassifierMixin(object):
""" Mixin class for all classifiers in the scikit learn
"""
def score(self, X, y):
return - zero_one(self.predict(X), y)
################################################################################
class RegressorMixin(object):
""" Mixin class for all regression estimators in the scikit learn
"""
def score(self, X, y):
return - mean_square_error(self.predict(X), y)
|
Python
| 0
|
@@ -1897,16 +1897,59 @@
__name__
+%0A%0A # Do a multi-line justified repr:
%0A
@@ -1960,15 +1960,84 @@
ams_
+li
st
-r
=
-(
+list()%0A this_line_length = len(class_name)%0A line_sep =
',%5Cn
@@ -2062,20 +2062,15 @@
ame)
+/2
)*' '
-).join(
%0A
@@ -2070,35 +2070,84 @@
' '%0A
-
+for i, (k, v) in enumerate(self._get_params().iteritems()):%0A
@@ -2146,18 +2146,28 @@
+this_repr
+=
'%25s=%25s'
@@ -2177,88 +2177,509 @@
(k,
-v) %0A for k, v in self._get_params().iteritems()
+repr(v))%0A if i %3E 0: %0A if (this_line_length + len(this_repr) %3E= 75%0A or '%5Cn' in this_repr):%0A params_list.append(line_sep)%0A this_line_length += len(line_sep)%0A else:%0A params_list.append(', ')%0A this_line_length += 2%0A params_list.append(this_repr)%0A this_line_length += len(this_repr)%0A%0A params_str = ''.join(params_list
)%0A
|
337928d30d96146cb8033e3ccb15d7d6d0c85d5a
|
add managed_layer_test
|
python/tests/managed_layer_test.py
|
python/tests/managed_layer_test.py
|
Python
| 0.000091
|
@@ -0,0 +1,370 @@
+import neuroglancer%0A%0Adef test_visible():%0A layer = neuroglancer.ManagedLayer('a', %7B'type': 'segmentation', 'visible': False%7D)%0A assert layer.name == 'a'%0A assert layer.visible == False%0A assert layer.to_json() == %7B'name': 'a', 'type': 'segmentation', 'visible': False%7D%0A layer.visible = True%0A assert layer.to_json() == %7B'name': 'a', 'type': 'segmentation'%7D%0A
|
|
38903c6b6e4dec9fd2fe73b0c468a8b3f2ab870a
|
Add multi np array
|
python_practice/numpy_exercise2.py
|
python_practice/numpy_exercise2.py
|
Python
| 0.999987
|
@@ -0,0 +1,188 @@
+import numpy as np%0A%0AMatrix_A = np.array( %5B%5B1,1%5D,%5B0,1%5D%5D )%0AMatrix_B = np.array( %5B%5B2,0%5D,%5B3,4%5D%5D )%0A%0Aprint Matrix_A*Matrix_B%0A%0Aprint Matrix_A.dot(Matrix_B) %0A%0Aprint np.dot(Matrix_A, Matrix_B) %0A
|
|
46393dca28abe0df421066e76b26f198ad790690
|
Create NimGame_001.py
|
leetcode/NimGame_001.py
|
leetcode/NimGame_001.py
|
Python
| 0.000001
|
@@ -0,0 +1,150 @@
+class Solution(object):%0A def canWinNim(self, n):%0A %22%22%22%0A :type n: int%0A :rtype: bool%0A %22%22%22%0A return not (n %25 4 == 0)%0A
|
|
648df0127879804041e795cb9479e08ddfd6459b
|
fix syntax errors
|
family.py
|
family.py
|
#!/usr/bin/env python
"""This module defines the family class.
"""
import person
class Family:
"""Class Person
TBD - document class
"""
def __init__(self):
self.adults = []
self.children = []
def AddAdultsFromCombinedField(self, teacher, name_field, grade):
parent_count = 1
parent_num = ""
parents = name_field.split(" and ")
# if parents have same last name, then there is only one name
# before the first "and"
if len(parents[0].split(" ")) == 1:
last_name = parents[-1].split(" ")[-1]
for name in parents:
parent = name.split(' ')
new_adult = person.Person()
self.adults += [new_adult.SetFromRoster(last_name = last_name,
first_name = parent[0],
grade = grade,
teacher = teacher,
name_field = name_field,
family_relation = "Adult"+parent_num)]
# prepare the parent_tag for the next parent
parent_count += 1
parent_num = str(parent_count)
# each parent as a unique first and last name
else:
for name in parents:
parent = name.split(' ')
new_adult = person.Person()
self.adults += [new_adult.SetFromRoster(last_name = parent[-1],
first_name = " ".join(parent[0:-1]),
grade = grade,
teacher = teacher,
name_field = name_field,
family_relation = "Adult"+parent_num)]
# prepare the parent_tag for the next parent
parent_count += 1
parent_num = str(parent_count)
def AddAdultFromDirectory(self, fields):
new_adult = person.Person()
self.adults += [new_adult.SetFromDirectory(fields)]
def AddChildFromDirectory(self, fields):
new_child = person.Person()
self.children += [new_child.SetFromDirectory(fields)]
def CreateFromRoster(self, fields):
# for elementary school (< 6th grade) teacher name is retained
# for middle school, teacher name is replaced with grade level
if int(fields[2]) < 6:
teacher = fields[4]
else:
teacher = fields[2]
# add adults to the family
self.AddAdultsFromCombinedField(teacher = teacher,
name_field = fields[3],
grade = fields[2])
# add the child to the family
new_child = person.Person()
self.children = [new_adult.SetFromRoster(last_name = fields[0],
first_name = fields[1],
grade = fields[2],
teacher = teacher,
name_field = name_field,
family_relation = "Child1")]
def IsSameFamily(self, other):
if not len(self.adults) == len(other.adults):
return False
num_found = 0
for adult in self.adults:
for other_adult in other.adults:
if adult.IsSame(other_adult):
num_found += 1
break
return num_found = len(self.adults)
def CombineWith(self, other):
to_add = []
for possible_child in other.children:
for existing_child in self.children:
if existing_child.IsSame(possible_child):
break
else:
to_add += [possible_child]
self.children += to_add
def IsChildless(self):
return len(self.children) == 0
def IsOrphan(self):
return len(self.adults) == 0
|
Python
| 0.000009
|
@@ -2337,17 +2337,16 @@
%0A
-
def AddC
@@ -3884,16 +3884,17 @@
_found =
+=
len(sel
|
0a1ef74350a1d06a6f7fb10f6bf7f0621968766b
|
load 'jinja2.ext.do' extension by default
|
jinja2cli/cli.py
|
jinja2cli/cli.py
|
"""
jinja2-cli
==========
License: BSD, see LICENSE for more details.
"""
from jinja2cli import __version__
class InvalidDataFormat(Exception): pass
class InvalidInputData(Exception): pass
class MalformedJSON(InvalidInputData): pass
class MalformedINI(InvalidInputData): pass
class MalformedYAML(InvalidInputData): pass
class MalformedQuerystring(InvalidInputData): pass
# Global list of available format parsers on your system
# mapped to the callable/Exception to parse a string into a dict
formats = {}
# json - simplejson or packaged json as a fallback
try:
import simplejson
formats['json'] = (simplejson.loads, simplejson.decoder.JSONDecodeError, MalformedJSON)
except ImportError:
try:
import json
formats['json'] = (json.loads, ValueError, MalformedJSON)
except ImportError:
pass
# ini - Nobody likes you.
try:
# Python 2
import ConfigParser
except ImportError:
# Python 3
import configparser as ConfigParser
def _parse_ini(data):
import StringIO
class MyConfigParser(ConfigParser.ConfigParser):
def as_dict(self):
d = dict(self._sections)
for k in d:
d[k] = dict(self._defaults, **d[k])
d[k].pop('__name__', None)
return d
p = MyConfigParser()
p.readfp(StringIO.StringIO(data))
return p.as_dict()
formats['ini'] = (_parse_ini, ConfigParser.Error, MalformedINI)
# yaml - with PyYAML
try:
import yaml
formats['yaml'] = (yaml.load, yaml.YAMLError, MalformedYAML)
except ImportError:
pass
# querystring - querystring parsing
def _parse_qs(data):
""" Extend urlparse to allow objects in dot syntax.
>>> _parse_qs('user.first_name=Matt&user.last_name=Robenolt')
{'user': {'first_name': 'Matt', 'last_name': 'Robenolt'}}
"""
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
dict_ = {}
for k, v in urlparse.parse_qs(data).items():
v = map(lambda x: x.strip(), v)
v = v[0] if len(v) == 1 else v
if '.' in k:
pieces = k.split('.')
cur = dict_
for idx, piece in enumerate(pieces):
if piece not in cur:
cur[piece] = {}
if idx == len(pieces) - 1:
cur[piece] = v
cur = cur[piece]
else:
dict_[k] = v
return dict_
formats['querystring'] = (_parse_qs, Exception, MalformedQuerystring)
import os
import sys
from optparse import OptionParser
import jinja2
from jinja2 import Environment, FileSystemLoader
def format_data(format_, data):
return formats[format_][0](data)
def render(template_path, data):
env = Environment(loader=FileSystemLoader(os.path.dirname(template_path)))
output = env.get_template(os.path.basename(template_path)).render(data).encode('utf-8')
return output
def cli(opts, args):
if args[1] == '-':
data = sys.stdin.read()
else:
data = open(os.path.join(os.getcwd(), os.path.expanduser(args[1]))).read()
template_path = os.path.abspath(args[0])
try:
data = format_data(opts.format, data)
except formats[opts.format][1]:
raise formats[opts.format][2](u'%s ...' % data[:60])
sys.exit(1)
output = render(template_path, data)
sys.stdout.write(output)
sys.exit(0)
def main():
default_format = 'json'
if default_format not in formats:
default_format = sorted(formats.keys())[0]
parser = OptionParser(usage="usage: %prog [options] <input template> <input data>",
version="jinja2-cli v%s\n - Jinja2 v%s" % (__version__, jinja2.__version__))
parser.add_option('--format', help='Format of input variables: %s' % ', '.join(formats.keys()),
dest='format', action='store', default=default_format)
opts, args = parser.parse_args()
if len(args) == 0:
parser.print_help()
sys.exit(1)
if args[0] == 'help':
parser.print_help()
sys.exit(1)
# Without the second argv, assume they want to read from stdin
if len(args) == 1:
args.append('-')
if opts.format not in formats:
raise InvalidDataFormat(opts.format)
cli(opts, args)
sys.exit(0)
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -2761,16 +2761,25 @@
ronment(
+%0A
loader=F
@@ -2825,16 +2825,60 @@
e_path))
+,%0A extensions=%5B'jinja2.ext.do'%5D,%0A
)%0A ou
|
0aa078c8beb6bad3dd0f30463f2925f01282e353
|
add merge script
|
scripts/merge.py
|
scripts/merge.py
|
Python
| 0.000001
|
@@ -0,0 +1,333 @@
+#! /usr/bin/env python%0A%0Aimport gzip%0Aimport json%0Aimport sys%0A%0Aret = %7B%0A %22samples%22: %5B%5D%0A%7D%0A%0Adef opn(fn):%0A if fn.endswith('.gz'):%0A return gzip.open(fn)%0A return open(fn)%0A%0Afor file_name in sys.argv%5B1:%5D:%0A with opn(file_name) as f:%0A file_content = json.load(f)%0A ret%5B%22samples%22%5D.extend(file_content%5B%22samples%22%5D)%0A%0Aprint(json.dumps(ret))%0A
|
|
9a045ac0c5cfe39689d8e1446674193e8862d269
|
add IPLookup
|
cogs/ip.py
|
cogs/ip.py
|
Python
| 0
|
@@ -0,0 +1,1039 @@
+#!/bin/env python%0A%0Aimport discord%0Afrom discord.ext import commands%0Afrom utils import aiohttp_wrap as aw%0A%0Aclass IPLookup:%0A def __init__(self, bot):%0A self.bot = bot%0A self.aio_session = bot.aio_session%0A self.api_uri = 'http://ip-api.com/json/%7B%7D'%0A%0A @commands.command(aliases=%5B'ip'%5D)%0A async def iplookup(self, ctx, *, query: str):%0A %22%22%22 Get information about an IP or website %22%22%22%0A res = await aw.aio_get_json(self.aio_session, query)%0A%0A # Check whether successful%0A if not res or res%5B'status'%5D == 'fail':%0A return await ctx.send(f%22Sorry, I couldn't find any data on %60%7Bquery%7D%60.%22)%0A%0A em = discord.Embed(title=res%5B'org'%5D, color=discord.Color.dark_magenta())%0A em.add_field(name='Location', value=f%22%7Bres%5B'city'%5D, res%5B'regionName'%5D, res%5B'country'%5D%7D%22)%0A em.add_field(name='Coordinates', value=f%22(%7Bres%5B'lat'%5D:.3f%7D, %7Bres%5B'lon'%5D:.3f%7D)%22)%0A em.add_field(name='ISP', value=res%5B'isp'%5D)%0A%0A await ctx.send(embed=em)%0A%0A%0Adef setup(bot):%0A bot.add_cog(IPLookup(bot))
|
|
314fcab1904cd0c5e434789bef09766d33e2d6ef
|
add synth.py for generation
|
packages/google-cloud-asset/synth.py
|
packages/google-cloud-asset/synth.py
|
Python
| 0
|
@@ -0,0 +1,1288 @@
+# copyright 2018 google LLC%0A#%0A# licensed under the apache license, version 2.0 (the %22license%22);%0A# you may not use this file except in compliance with the license.%0A# you may obtain a copy of the license at%0A#%0A# http://www.apache.org/licenses/license-2.0%0A#%0A# unless required by applicable law or agreed to in writing, software%0A# distributed under the license is distributed on an %22as is%22 basis,%0A# without warranties or conditions of any kind, either express or implied.%0A# see the license for the specific language governing permissions and%0A# limitations under the license.%0A%0A%22%22%22this script is used to synthesize generated parts of this library.%22%22%22%0A%0Aimport synthtool as s%0Aimport synthtool.gcp as gcp%0Aimport subprocess%0Aimport logging%0A%0Alogging.basicconfig(level=logging.debug)%0A%0Agapic = gcp.gapicgenerator()%0Acommon_templates = gcp.commontemplates()%0A%0Aversion = %22v1%22%0A%0Alibrary = gapic.node_library('asset', version, private=True)%0As.copy(library, excludes=%5B'src/index.js', 'readme.md', 'package.json'%5D)%0A%0Atemplates = common_templates.node_library(%0A package_name=%22@google-cloud/asset%22,%0A repo_name=%22googleapis/nodejs-asset%22,%0A)%0As.copy(templates)%0A%0A%0A'''%0Anode.js specific cleanup%0A'''%0Asubprocess.run(%5B'npm', 'ci'%5D)%0Asubprocess.run(%5B'npm', 'run', 'prettier'%5D)%0Asubprocess.run(%5B'npm', 'run', 'lint'%5D)%0A%0A
|
|
37f7ab9435939a144b08fdbb52e1e519ad139318
|
add some field definitions
|
ldapdb/models/fields.py
|
ldapdb/models/fields.py
|
Python
| 0.000003
|
@@ -0,0 +1,1250 @@
+# -*- coding: utf-8 -*-%0A# %0A# django-ldapdb%0A# Copyright (C) 2009 Bollor%C3%A9 telecom%0A# See AUTHORS file for a full list of contributors.%0A# %0A# This program is free software: you can redistribute it and/or modify%0A# it under the terms of the GNU General Public License as published by%0A# the Free Software Foundation, either version 3 of the License, or%0A# (at your option) any later version.%0A# %0A# This program is distributed in the hope that it will be useful,%0A# but WITHOUT ANY WARRANTY; without even the implied warranty of%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the%0A# GNU General Public License for more details.%0A# %0A# You should have received a copy of the GNU General Public License%0A# along with this program. If not, see %3Chttp://www.gnu.org/licenses/%3E.%0A#%0A%0Afrom django.db.models import fields, SubfieldBase%0A%0Aclass CharField(fields.CharField):%0A def __init__(self, *args, **kwargs):%0A kwargs%5B'max_length'%5D = 200%0A super(CharField, self).__init__(*args, **kwargs)%0A%0Aclass ImageField(fields.Field):%0A pass%0A%0Aclass IntegerField(fields.IntegerField):%0A pass%0A%0Aclass ListField(fields.Field):%0A __metaclass__ = SubfieldBase%0A%0A def to_python(self, value):%0A if not value:%0A return %5B%5D%0A return value%0A%0A
|
|
c5fd2a7bccb45325acdf7f0800843ddb9ad82b64
|
split Natura2000 pdf files
|
migrations/split_pdf.py
|
migrations/split_pdf.py
|
Python
| 0
|
@@ -0,0 +1,887 @@
+#use http://pybrary.net/pyPdf/%0A%0Afrom pyPdf import PdfFileWriter, PdfFileReader%0Aimport re%0A%0Apattern = re.compile(r%22RO(SCI%7CSPA)%5Cd%7B4%7D%22) %0A%0Asource_path = %22/Users/cornel/Downloads/2011-10-20_protectia_naturii_RO_SPA_SDF_2011.pdf%22%0Apdf = PdfFileReader(file(source_path, %22rb%22))%0A%0Adef save_pdf(output, name):%0A outputStream = file(%22%25s.pdf%22 %25 name, %22wb%22)%0A output.write(outputStream)%0A outputStream.close()%0A%0Aoutput = PdfFileWriter()%0Afor i in range(0, pdf.getNumPages()):%0A page = pdf.getPage(i)%0A #skip empty pages%0A if page.has_key('/Contents'):%0A text = page.extractText()%0A if text.find('1. IDENTIFICAREA SITULUI') %3E 0:%0A if i:%0A save_pdf(output, name)%0A output = PdfFileWriter()%0A match = pattern.search(text)%0A name = match.group(0)%0A output.addPage(pdf.getPage(i))%0A%0A#save last pages%0Asave_pdf(output, name)%0A
|
|
8ffcf96b5b270fa77026c8c62ee267363ae2e7b1
|
Add virtualenv plugin: build python environment.
|
fapistrano/plugins/virtualenv.py
|
fapistrano/plugins/virtualenv.py
|
Python
| 0
|
@@ -0,0 +1,1878 @@
+# -*- coding: utf-8 -*-%0A%22%22%22%0Avirtualenv plugin provide%0A%0A2 virtualenv environment:%0A%0A1. virtualenvwrapper in /home/depploy/.virtulaenvs/%25(project_name)s%0A2. virtualenv in each release directory%0A%0A2 pip install:%0A%0A1. pip%0A2. pip wheel%0A%0A%22%22%22%0Afrom fabric.api import run, env, prefix, cd%0Afrom fabric.contrib.files import exists%0Afrom .. import signal%0A%0Adef init():%0A if not hasattr(env, 'virtualenv_type'):%0A env.virtualenv_type = 'virtualenvwrapper'%0A%0A if not hasattr(env, 'virtualenv_upgrade_pip'):%0A env.virtualenv_upgrade_pip = True%0A%0A signal.register('deploy.updated', build_python_env)%0A%0Adef build_python_env():%0A if env.virtualenv_type == 'virtualenvwrapper':%0A _check_virtualenvwrapper_env()%0A _check_virtualenvwrapper_activate()%0A elif env.virtualenv_type == 'virtualenv':%0A _check_virtualenv_env()%0A _check_virtualenv_activate()%0A%0A if env.virtualenv_upgrade_pip:%0A _upgrade_pip()%0A%0A _install_requirements()%0A%0Adef _check_virtualenvwrapper_env():%0A if not exists('~/.virtualenvs/%25(project_name)s' %25 env):%0A run('source %25(virtualenvwrapper_source)s && mkvirtualenv %25(project_name)s' %25 env)%0A%0Adef _check_virtualenv_env():%0A if not exists('%25(releases_path)s/%25(new_release)s/venv' %25 env):%0A run('virtualenv %25(releases_path)s/%25(new_release)s/venv' %25 env)%0A%0Adef _check_virtualenvwrapper_activate():%0A env.activate = 'source ~/.virtualenvs/%25(project_name)s/bin/activate' %25 env%0A%0Adef _check_virtualenv_activate():%0A env.activate = 'source %25(releases_path)s/%25(new_release)s/venv/bin/activate' %25 env%0A%0Adef _upgrade_pip():%0A with prefix(env.activate):%0A run('pip install -q -U pip setuptools wheel %7C%7C pip install -U pip setuptools wheel')%0A%0Adef _install_requirements():%0A with prefix(env.activate):%0A with cd('%25(releases_path)s/%25(new_release)s' %25 env):%0A run('pip install -r requirements.txt' %25 env)%0A
|
|
4987e1722f8b55e99fbc9455eafe0210b5973060
|
create server bootstrap script
|
server/server.py
|
server/server.py
|
Python
| 0.000001
|
@@ -0,0 +1,438 @@
+#! /usr/bin/python%0A# encoding=utf-8%0A%0Aimport os%0Aimport cherrypy%0Afrom http.router import Router%0A%0Adef bootstrap():%0A api_config = os.path.abspath(os.path.join(os.getcwd(), 'config/api.conf'))%0A router = Router()%0A%0A cherrypy.tools.CORS = cherrypy.Tool('before_handler', router.cors)%0A cherrypy.tree.mount(router, '/api', api_config)%0A%0A cherrypy.engine.start()%0A cherrypy.engine.block()%0A%0Aif __name__ == '__main__':%0A bootstrap()%0A
|
|
45c81e268df7f01fdeb64b053583b2946739eebe
|
add pactest with pacparser lib
|
pactest.py
|
pactest.py
|
Python
| 0.000126
|
@@ -0,0 +1,986 @@
+#!/usr/bin/python%0A#-*- coding: utf-8 -*-%0A%0A'''%0AYou have to install pacparser before runing this script.%0AYou can get pacparser from https://code.google.com/p/pacparser.%0A'''%0A%0Aimport pacparser%0Aimport time%0A%0Adef get_pac_result(filename, url, host):%0A%09pacparser.init()%0A%09pacparser.parse_pac(filename)%0A%09ret_str = pacparser.find_proxy(url, host)%0A%09pacparser.cleanup()%0A%09return ret_str%0A%0Adef main_test(filename, test_times):%0A%09pacparser.init()%0A%09pacparser.parse_pac(filename)%0A%09beg_time = time.time()%0A%09for i in xrange(test_times):%0A%09%09ret_str = pacparser.find_proxy('http://www.coding.com', 'www.coding.com') # using the worst case%0A%09end_time = time.time()%0A%09print %22%25s:%5CnTotal Time: %25s s%5CnAvg. Time: %25s ms%5Cn%5Cn%22 %25 (filename, end_time - beg_time, (end_time - beg_time) * 1000.0 / test_times),%0A%09pacparser.cleanup()%0A%0Adef time_test():%0A%09main_test(%22whitelist.pac%22, 10000)%0A%09main_test(%22whiteiplist.pac%22, 100)%0A%09#main_test(%22flora_pac.pac%22, 100)%0A%09#main_test(%22usufu_flora_pac.pac%22, 100)%0A%0Adef main():%0A%09time_test()%0A%0Amain()%0A
|
|
a68d89a4f351f8df2bfceeac77540b23e29827be
|
Add failing test for bug #1375 -- no out-of-bounds error for token.nbor()
|
spacy/tests/regression/test_issue1375.py
|
spacy/tests/regression/test_issue1375.py
|
Python
| 0
|
@@ -0,0 +1,488 @@
+from __future__ import unicode_literals%0Aimport pytest%0Afrom ...vocab import Vocab%0Afrom ...tokens.doc import Doc%0A%0A@pytest.mark.xfail%0Adef test_issue1375():%0A '''Test that token.nbor() raises IndexError for out-of-bounds access.'''%0A doc = Doc(Vocab(), words=%5B'0', '1', '2'%5D)%0A with pytest.raises(IndexError):%0A assert doc%5B0%5D.nbor(-1)%0A assert doc%5B1%5D.nbor(-1).text == '0'%0A with pytest.raises(IndexError):%0A assert doc%5B2%5D.nbor(1)%0A assert doc%5B1%5D.nbor(1).text == '2'%0A %0A
|
|
6418326667f0819a028606dee1683965a0092e0a
|
add functions to find the data dir
|
cs251tk/specs/dirs.py
|
cs251tk/specs/dirs.py
|
Python
| 0.000001
|
@@ -0,0 +1,205 @@
+import os%0A%0A%0Adef get_specs_dir():%0A return os.path.join(get_data_dir(), 'cs251tk', 'specs')%0A%0A%0Adef get_data_dir():%0A return os.getenv('XDG_DATA_HOME', os.path.join(os.getenv('HOME'), '.local', 'share'))%0A
|
|
4af8e31da47b321cfbd84223619379167c9c7d3b
|
Add config file with list of programs
|
app/config.py
|
app/config.py
|
Python
| 0.000001
|
@@ -0,0 +1,1394 @@
+programs = %7B%0A %22ascii_text%22: %7B%0A %22title%22: %22ASCII Text%22,%0A %22path%22: %22programs/ascii_text.py%22,%0A %7D,%0A %22blink_sun%22: %7B%0A %22title%22: %22Blink Sun%22,%0A %22path%22: %22programs/blink_sun.py%22%0A %7D,%0A %22cheertree%22: %7B%0A %22title%22: %22Cheertree%22,%0A %22path%22: %22programs/cheertree.py%22%0A %7D,%0A %22cross%22: %7B%0A %22title%22: %22Cross%22,%0A %22path%22: %22programs/cross.py%22%0A %7D,%0A %22demo%22: %7B%0A %22title%22: %22Demo%22,%0A %22path%22: %22programs/demo.py%22%0A %7D,%0A %22dna%22: %7B%0A %22title%22: %22DNA%22,%0A %22path%22: %22programs/dna.py%22%0A %7D,%0A %22game_of_life%22: %7B%0A %22title%22: %22Game of Life%22,%0A %22path%22: %22programs/game_of_life.py%22%0A %7D,%0A %22matrix%22: %7B%0A %22title%22: %22Matrix%22,%0A %22path%22: %22programs/matrix.py%22%0A %7D,%0A %22psychedelia%22: %7B%0A %22title%22: %22Psychedelia%22,%0A %22path%22: %22programs/psychedelia.py%22%0A %7D,%0A %22rain%22: %7B%0A %22title%22: %22Rain%22,%0A %22path%22: %22programs/rain.py%22%0A %7D,%0A %22random_blinky%22: %7B%0A %22title%22: %22Random Blinky%22,%0A %22path%22: %22programs/random_blinky.py%22%0A %7D,%0A %22random_sparkles%22: %7B%0A %22title%22: %22Random Sparkles%22,%0A %22path%22: %22programs/random_sparkles.py%22%0A %7D,%0A %22simple%22: %7B%0A %22title%22: %22Simple%22,%0A %22path%22: %22programs/simple.py%22%0A %7D,%0A %22snow%22: %7B%0A %22title%22: %22Snow%22,%0A %22path%22: %22programs/snow.py%22%0A %7D,%0A %22trig%22: %7B%0A %22title%22: %22Trig%22,%0A %22path%22: %22programs/trig.py%22%0A %7D%0A%7D
|
|
7749a3531cc2985112b7ef60421dd9c07e742bcb
|
Add fabfile.py file to project
|
fabfile.py
|
fabfile.py
|
Python
| 0
|
@@ -0,0 +1,373 @@
+#! /usr/bin/env python2%0A# -*- coding: utf-8 -*-%0A%0A%22%22%22The jupyterhub stack fabric file%22%22%22%0A%0Afrom fabric.api import local%0A%0Adef git():%0A %22%22%22Setup Git%22%22%22%0A%0A local(%22git remote rm origin%22)%0A local(%22git remote add origin https://korniichuk@github.com/korniichuk/jupyterhub.git%22)%0A local(%22git remote add bitbucket https://korniichuk@bitbucket.org/korniichuk/jupyterhub.git%22)%0A
|
|
c6d47e22825da4516f313f4a8bef6237c17b48b9
|
Clear user's profile id cache on organization change.
|
judge/signals.py
|
judge/signals.py
|
from django.core.cache.utils import make_template_fragment_key
from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
from django.core.cache import cache
from .models import Problem, Contest, Submission, Organization, Profile, NavigationBar, MiscConfig, Language, Judge, \
BlogPost, ContestSubmission
from .caching import update_submission, finished_submission
@receiver(post_save, sender=Problem)
def problem_update(sender, instance, **kwargs):
cache.delete(make_template_fragment_key('problem_html', (instance.id,)))
cache.delete(make_template_fragment_key('submission_problem', (instance.id,)))
@receiver(post_save, sender=Profile)
def profile_update(sender, instance, **kwargs):
cache.delete(make_template_fragment_key('user_on_rank', (instance.id,)))
cache.delete(make_template_fragment_key('user_org_on_rank', (instance.id,)))
cache.delete(make_template_fragment_key('submission_user', (instance.id,)))
cache.delete(make_template_fragment_key('org_member_count', (instance.organization_id,)))
@receiver(post_save, sender=Contest)
def contest_update(sender, instance, **kwargs):
cache.delete(make_template_fragment_key('contest_html', (instance.id,)))
@receiver(post_save, sender=Language)
def language_update(sender, instance, **kwargs):
cache.delete(make_template_fragment_key('language_html', (instance.id,)))
@receiver(post_save, sender=Judge)
def language_update(sender, instance, **kwargs):
cache.delete(make_template_fragment_key('judge_html', (instance.id,)))
@receiver(post_save, sender=BlogPost)
def post_update(sender, instance, **kwargs):
cache.delete(make_template_fragment_key('post_summary', (instance.id,)))
cache.delete(make_template_fragment_key('post_content', (instance.id,)))
@receiver(post_save, sender=Submission)
def submission_update(sender, instance, **kwargs):
update_submission(instance.id)
@receiver(post_delete, sender=Submission)
def submission_delete(sender, instance, **kwargs):
finished_submission(instance)
instance.user.calculate_points()
@receiver(post_delete, sender=ContestSubmission)
def contest_submission_delete(sender, instance, **kwargs):
participation = instance.participation
participation.recalculate_score()
cache.delete(make_template_fragment_key('conrank_user_prob',
(participation.profile.user_id,
participation.contest_id)))
@receiver(post_save, sender=Organization)
def organization_update(sender, instance, **kwargs):
cache.delete(make_template_fragment_key('organization_html', (instance.id,)))
@receiver(post_save, sender=NavigationBar)
def navigation_update(sender, instance, **kwargs):
cache.delete('navbar')
cache.delete('navbar_dict')
cache.delete(make_template_fragment_key('navbar'))
@receiver(post_save, sender=MiscConfig)
def misc_config_update(sender, instance, **kwargs):
cache.delete('misc_config:%s' % instance.key)
|
Python
| 0
|
@@ -2667,32 +2667,173 @@
instance.id,)))%0A
+ for user in instance.members.values_list('id', flat=True):%0A cache.delete(make_template_fragment_key('user_org_on_rank', (user,)))%0A
%0A%0A@receiver(post
|
fcea7c42c7b793a84febd29112b50fc89b5fd6f4
|
Add fabfile to generate documentation
|
fabfile.py
|
fabfile.py
|
Python
| 0.000001
|
@@ -0,0 +1,1163 @@
+# -*- coding: utf-8 -*-%0A%22%22%22%0A fabfile%0A%0A Fab file to build and push documentation to github%0A%0A :copyright: %C2%A9 2013 by Openlabs Technologies & Consulting (P) Limited%0A :license: BSD, see LICENSE for more details.%0A%22%22%22%0Aimport time%0A%0Afrom fabric.api import local, lcd%0A%0A%0Adef upload_documentation():%0A %22%22%22%0A Build and upload the documentation HTML to github%0A %22%22%22%0A temp_folder = '/tmp/%25s' %25 time.time()%0A local('mkdir -p %25s' %25 temp_folder)%0A%0A # Build the documentation%0A with lcd('docs'):%0A local('make html')%0A local('mv build/html/* %25s' %25 temp_folder)%0A%0A # Checkout to gh-pages branch%0A local('git checkout gh-pages')%0A%0A # Copy back the files from temp folder%0A local('rm -rf *')%0A local('mv %25s/* .' %25 temp_folder)%0A%0A # Add the relevant files%0A local('git add *.html')%0A local('git add *.js')%0A local('git add *.js')%0A local('git add *.inv')%0A local('git add _images')%0A local('git add _sources')%0A local('git add _static')%0A local('git commit -m %22Build documentation%22')%0A local('git push')%0A%0A print %22Documentation uploaded to Github.%22%0A print %22View at: http://openlabs.github.io/trytond-prestashop%22%0A
|
|
1e82283cc85b2eb449969849d23c4ffa2c090426
|
Add script to batch convert a directory recursively
|
scripts/directory_batch_convert.py
|
scripts/directory_batch_convert.py
|
Python
| 0.000001
|
@@ -0,0 +1,1503 @@
+import os%0Aimport sys%0Aimport re%0Afrom pathlib import Path%0A%0Aimport argparse%0A%0Afrom convert2netcdf4 import parseandconvert%0A%0Aparser = argparse.ArgumentParser(description='Recursively batch convert Vaisala old-binary format to NetCDF files. Keeps directory structure.')%0Aparser.add_argument('--from', dest='fromdir', help='Input directory', required=True)%0Aparser.add_argument('--to', dest='todir', help='Output directory. Created if not exists. Files will be overwritten.', required=True)%0A%0AEXTENSION_REGEX = r'.*%5C.edt$%7C.*%5C.%5B0-9%5D%7B2%7De$'%0A%0Adef main():%0A args = parser.parse_args()%0A%0A from_dir = Path(args.fromdir)%0A to_dir = Path(args.todir)%0A%0A for dirpath, dirnames, files in os.walk(from_dir.as_posix()):%0A for name in files:%0A #if name.lower().endswith(extension):%0A if re.match(EXTENSION_REGEX, name.lower(), re.M%7Cre.I):%0A input_file = os.path.join(dirpath, name)%0A input_path = Path(input_file)%0A%0A diff = input_path.relative_to(from_dir)%0A output_path = to_dir.joinpath(diff)%0A extension = output_path.suffix%0A output_file = output_path.as_posix()%0A output_file = output_file.replace(extension, '.nc')%0A%0A if not output_path.parent.exists():%0A output_path.parent.mkdir(parents=True, exist_ok=True)%0A%0A print(output_file)%0A parseandconvert(input_file, output_file)%0A%0A%0Aif __name__ == '__main__':%0A main()%0A%0A sys.exit(0)
|
|
8a3caf06f146ff9d3cf20d0d739c78cb93c16325
|
Add migrations
|
ureport/polls/migrations/0049_auto_20160810_1823.py
|
ureport/polls/migrations/0049_auto_20160810_1823.py
|
Python
| 0.000001
|
@@ -0,0 +1,498 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('polls', '0048_populate_age_and_gender_on_poll_results'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='poll',%0A name='poll_date',%0A field=models.DateTimeField(help_text='The date to display for this poll. Leave empty to use flow creation date.'),%0A ),%0A %5D%0A
|
|
b69643de7f9ec207949e0054d2b1e98dbb81d898
|
Add new package: librelp (#18779)
|
var/spack/repos/builtin/packages/librelp/package.py
|
var/spack/repos/builtin/packages/librelp/package.py
|
Python
| 0
|
@@ -0,0 +1,1065 @@
+# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other%0A# Spack Project Developers. See the top-level COPYRIGHT file for details.%0A#%0A# SPDX-License-Identifier: (Apache-2.0 OR MIT)%0A%0Afrom spack import *%0A%0A%0Aclass Librelp(AutotoolsPackage):%0A %22%22%22Librelp is an easy to use library for the RELP protocol. RELP%0A (stands for Reliable Event Logging Protocol) is a general-purpose,%0A extensible logging protocol.%22%22%22%0A%0A homepage = %22http://www.rsyslog.com/librelp/%22%0A url = %22https://github.com/rsyslog/librelp/archive/v1.7.0.tar.gz%22%0A%0A version('1.7.0', sha256='ff46bdd74798934663d1388d010270325dc6a6ed6d44358ca69b280a8304b1e9')%0A version('1.6.0', sha256='acaaa6b8e295ecd8e9d9b70c1c3c8fb3cc3c95a9ed5ce1689688510d0eecb37e')%0A version('1.5.0', sha256='ce7f463944417ba77d7b586590e41e276f7b107d3e35a77ce768cf3889b5e1a6')%0A%0A depends_on('autoconf', type='build')%0A depends_on('automake', type='build')%0A depends_on('libtool', type='build')%0A depends_on('m4', type='build')%0A depends_on('openssl')%0A depends_on('gnutls@2.0.0:')%0A
|
|
860f8224bf8ef2f1553a17842d1389491f43bfa5
|
Add missing migration for wagtail.tests
|
wagtail/tests/migrations/0008_auto_20141113_2125.py
|
wagtail/tests/migrations/0008_auto_20141113_2125.py
|
Python
| 0
|
@@ -0,0 +1,703 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('tests', '0007_registerdecorator'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='pagechoosermodel',%0A name='page',%0A field=models.ForeignKey(help_text='help text', to='wagtailcore.Page'),%0A preserve_default=True,%0A ),%0A migrations.AlterField(%0A model_name='snippetchoosermodel',%0A name='advert',%0A field=models.ForeignKey(help_text='help text', to='tests.Advert'),%0A preserve_default=True,%0A ),%0A %5D%0A
|
|
78420caaac5c5055d9264e9905c5e14e9756a064
|
Add Cli_server_tcp, just like Cli_server_local, but using TCP socket.
|
sippy/Cli_server_tcp.py
|
sippy/Cli_server_tcp.py
|
Python
| 0
|
@@ -0,0 +1,2115 @@
+# Copyright (c) 2003-2005 Maxim Sobolev. All rights reserved.%0A# Copyright (c) 2006-2014 Sippy Software, Inc. All rights reserved.%0A#%0A# All rights reserved.%0A#%0A# Redistribution and use in source and binary forms, with or without modification,%0A# are permitted provided that the following conditions are met:%0A#%0A# 1. Redistributions of source code must retain the above copyright notice, this%0A# list of conditions and the following disclaimer.%0A#%0A# 2. Redistributions in binary form must reproduce the above copyright notice,%0A# this list of conditions and the following disclaimer in the documentation and/or%0A# other materials provided with the distribution.%0A#%0A# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS %22AS IS%22 AND%0A# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED%0A# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE%0A# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR%0A# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES%0A# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;%0A# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON%0A# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT%0A# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS%0A# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.%0A%0Afrom twisted.internet.protocol import Factory%0Afrom twisted.internet import reactor%0Afrom Cli_session import Cli_session%0A%0Aclass Cli_server_tcp(Factory):%0A command_cb = None%0A%0A def __init__(self, command_cb, address):%0A self.command_cb = command_cb%0A self.protocol = Cli_session%0A reactor.listenTCP(address%5B1%5D, self, interface = address%5B0%5D)%0A%0A def buildProtocol(self, addr):%0A p = Factory.buildProtocol(self, addr)%0A p.command_cb = self.command_cb%0A return p%0A%0Aif __name__ == '__main__':%0A def callback(clm, cmd):%0A print cmd%0A return False%0A laddr = ('127.0.0.1', 12345)%0A f = Cli_server_tcp(callback, laddr)%0A reactor.run()%0A
|
|
057173c48a56c0c858212233ee60dcbb88e22838
|
Add tests.py, including one failure
|
mysite/profile/tests.py
|
mysite/profile/tests.py
|
Python
| 0.000002
|
@@ -0,0 +1,1498 @@
+import django.test%0Afrom search.models import Project%0A%0Aimport twill%0Afrom twill import commands as tc%0Afrom twill.shell import TwillCommandLoop%0Afrom django.test import TestCase%0Afrom django.core.servers.basehttp import AdminMediaHandler%0Afrom django.core.handlers.wsgi import WSGIHandler%0Afrom StringIO import StringIO%0A%0A# FIXME: Later look into http://stackoverflow.com/questions/343622/how-do-i-submit-a-form-given-only-the-html-source%0A%0A# Functions you'll need:%0A%0Adef twill_setup():%0A app = AdminMediaHandler(WSGIHandler())%0A twill.add_wsgi_intercept(%22127.0.0.1%22, 8080, lambda: app)%0A %0Adef twill_teardown():%0A twill.remove_wsgi_intercept('127.0.0.1', 8080)%0A%0Adef make_twill_url(url):%0A # modify this%0A return url.replace(%22http://openhatch.org/%22,%0A %22http://127.0.0.1:8080/%22)%0A%0Adef twill_quiet():%0A # suppress normal output of twill.. You don't want to%0A # call this if you want an interactive session%0A twill.set_output(StringIO())%0A%0Aclass ProfileTests(django.test.TestCase):%0A def setUp(self):%0A twill_setup()%0A%0A def tearDown(self):%0A twill_teardown()%0A%0A def testSlash(self):%0A response = self.client.get('/profile/')%0A%0A def testAddContribution(self):%0A url = 'http://openhatch.org/profile/'%0A tc.go(make_twill_url(url))%0A tc.fv('add_contrib', 'project', 'Babel')%0A tc.fv('add_contrib', 'contrib_text', 'msgctxt support')%0A tc.fv('add_contrib', 'url', 'http://babel.edgewall.org/ticket/54')%0A tc.submit()%0A%0A
|
|
bfc386c1a894811532ccfc65ce45339a964c5ac0
|
Create batch_clip.py
|
batch_clip.py
|
batch_clip.py
|
Python
| 0.000003
|
@@ -0,0 +1,464 @@
+# clips all .shps in folder to a boundary polygon%0A%0Afrom subprocess import call%0Aimport os%0A%0A# in dir of to be clipped shps and boundary file%0Ashp_folder = %22nrn_rrn_on_shp_en%22%0Aclip_poly = %22clip_bound.shp%22%0A%0A#output dir name%0A# os.mkdir(%22clipped%22)%0A%0Ac = 0%0Afor subdir, dirs, files in os.walk(shp_folder):%0A for file in files:%0A if file.endswith(('.shp')):%0A%09%09print file%0A%09%09call(%5B%22ogr2ogr%22, %22-clipsrc%22, clip_poly, %22clipped/%22 + file, shp_folder + '/' + file%5D)%0A%09%09c += 1%0Aprint c%0A
|
|
d1e403fce1affd0c7da1753fda441dd9a9c1d9ff
|
copy ISON settings for BBC
|
net/settings_bbc.py
|
net/settings_bbc.py
|
Python
| 0
|
@@ -0,0 +1,267 @@
+# settings_ison.py%0Afrom settings_common import *%0A%0ATEMPDIR = '/data2/tmp'%0ADATABASES%5B'default'%5D%5B'NAME'%5D = 'an-ison'%0A%0ALOGGING%5B'loggers'%5D%5B'django.request'%5D%5B'level'%5D = 'WARN'%0A%0ASESSION_COOKIE_NAME = 'IsonAstrometrySession'%0A%0Assh_solver_config = 'an-ison'%0Asitename = 'ison'%0A%0A
|
|
f20ed3b4941fef84e95afce4db0349ed8da3070e
|
Create Hour_Rain_Map.py
|
Hour_Rain_Map.py
|
Hour_Rain_Map.py
|
Python
| 0.000001
|
@@ -0,0 +1,2077 @@
+#############################################################################%0A# Name: Elizabeth Rentschlar #%0A# Assistantce from: #%0A# Purpose: Use Hourly rain totals condensed in Hours.py to create a series #%0A# of maps that show the #%0A# Created: 12/21/15 #%0A# Copyright: (c) City of Bryan #%0A# ArcGIS Version: 10.2.2 #%0A# Python Version: 2.7 #%0A#############################################################################%0A#Import arcpy module%0Aimport arcpy%0Aimport datetime%0A%0A# set workspace%0Aarcpy.env.overwriteOutput = True%0Awork_space = 'G:%5CGIS_PROJECTS%5CWATER_SERVICES%5CRain_Gauges'%0Amap_doc = '.%5CRain_Gauge_Map.mxd'%0Amap_output_folder = '.%5CRain_Maps%5C'%0Ahour_xy = '.%5CHour_xy.py'%0A%0Amapdoc = arcpy.mapping.MapDocument(map_doc)%0Adata_frame = arcpy.mapping.ListDataFrames(mapdoc)%5B0%5D%0Aprint data_frame.name%0Aprint data_frame.scale%0Alegend = arcpy.mapping.ListLayoutElements(mapdoc, %22LEGEND_ELEMENT%22, %0A %22Legend%22)%5B0%5D %0A%0Ahours = %5B1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24%5D%0Adates = %5B%5D%0Atoday_date = date.today() %09%09%0A# could not get datetime.datetime(2014,09,24%5B,00%5B,00%5B,00%5B,00000%5B,None%5D%5D%5D%5D%5D) to work%0Ad = datetime.date(2014,9,24)%0At = datetime.time(12,0,0)%0Afirst_date = datetime.datetime.combine(d, t)%0A#print first_date%0Aone_day = datetime.timedelta(days = 1)%0A#i = today_date%0A#while i != first_date:%0Ai = first_date%0Awhile i %3C= today_date :%0A dates.append(i)%0A new_date = i + one_day%0A #print new_date%0A i = new_date%0A#print dates%0A%0As_cur = arcpy.da.SearchCursor(hour_xy, %0A %5B'Day', 'Hour', 'Rain_Total'%5D)%0A%0Afor text in arcpy.mapping.ListLayoutElements(mapdoc, %22TEXT_ELEMENT%22): %0A if text.text == %22Date: Text%22: %0A text.text = district + %22%5Cn%22 + date %0A %0Aarcpy.RefreshActiveView() %0Aarcpy.RefreshTOC() %0A
|
|
d8ad74b80f214ca313d01533ea6f15082cbb3af2
|
Add tests for calibration procedure (draft)
|
srttools/core/tests/test_calibration.py
|
srttools/core/tests/test_calibration.py
|
Python
| 0
|
@@ -0,0 +1,1596 @@
+from ..calibration import CalibratorTable%0Afrom ..read_config import read_config%0Afrom ..scan import list_scans%0Aimport numpy as np%0Aimport matplotlib.pyplot as plt%0Aimport unittest%0Afrom astropy.table import Table%0Afrom ..imager import ScanSet%0Aimport os%0Aimport glob%0A%0A%0Aclass Test2_Calibration(unittest.TestCase):%0A @classmethod%0A def setup_class(klass):%0A import os%0A global DEBUG_MODE%0A DEBUG_MODE = True%0A%0A klass.curdir = os.path.dirname(__file__)%0A klass.datadir = os.path.join(klass.curdir, 'data')%0A%0A klass.config_file = %5C%0A os.path.abspath(os.path.join(klass.datadir, %22calibrators.ini%22))%0A%0A klass.config = read_config(klass.config_file)%0A%0A def step_1_calibrate(self):%0A %22%22%22Simple calibration from scans.%22%22%22%0A scan_list = %5C%0A list_scans(self.config%5B'datadir'%5D,%0A self.config%5B'calibrator_directories'%5D)%0A%0A scan_list.sort()%0A%0A caltable = CalibratorTable()%0A caltable.from_scans(scan_list)%0A caltable.update()%0A caltable.write(os.path.join(self.datadir, 'calibrators.hdf5'),%0A path=%22config%22, overwrite=True)%0A%0A def step_999_cleanup(self):%0A %22%22%22Clean up the mess.%22%22%22%0A os.unlink(os.path.join(self.datadir, 'calibrators.hdf5'))%0A for d in self.config%5B'list_of_directories'%5D:%0A hfiles = glob.glob(os.path.join(self.config%5B'datadir'%5D, d, '*.hdf5'))%0A print(hfiles)%0A for h in hfiles:%0A os.unlink(h)%0A%0A def test_all(self):%0A self.step_1_calibrate()%0A%0A self.step_999_cleanup()%0A
|
|
427f4562dc2a4fa7b805154e3ffd732381ed6e8d
|
Add dbwriter.py
|
dbwriter.py
|
dbwriter.py
|
Python
| 0.000001
|
@@ -0,0 +1,1116 @@
+from pymongo import MongoClient%0Aimport os%0Afrom pathlib import Path%0Adef main():%0A client = MongoClient('localhost', 27017)%0A db = client%5B'thesis-database'%5D%0A #tfidf-cv-category%0A collection = db%5B'tfidf-cv-category'%5D%0A path = 'Calculated/tfidf/cv-category'%0A for filename in os.listdir(path):%0A if filename%5B-3:%5D == %22csv%22:%0A fullpath = path + '/' + filename%0A q = Path(fullpath)%0A with q.open() as f:%0A for line in f:%0A # %22cvid%22, %22catid%22, %22skillName%22, %22similarity%22%0A cvid, catid, skillName, similarity = line.strip().split(',')%0A obj = %7B%22cvid%22: cvid, %22catid%22: catid, %22skillName%22: skillName, %5C%0A %22similarity%22: similarity%7D%0A collection.insert_one(obj)%0A #tfidf-job-category%0A #tfidf-job-cv%0A #tfidf2-cv-category%0A #tfidf2-job-category%0A #tfidf2-job-cv%0A #countvectorizer-cv-category%0A #countvectorizer-job-category%0A #countvectorizer-job-cv%0A #word2vec-cv-category%0A #word2vec-cv-category%0A #word2vec-job-cv%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
afcd636772952c8b6d2cca51e1851af29f5b6707
|
Create PostForm.
|
blog/forms.py
|
blog/forms.py
|
Python
| 0
|
@@ -0,0 +1,226 @@
+from django import forms%0A%0Afrom .models import Post%0A%0A%0Aclass PostForm(forms.ModelForm):%0A class Meta:%0A model = Post%0A fields = '__all__'%0A%0A def clean_slug(self):%0A return self.cleaned_data%5B'slug'%5D.lower()%0A
|
|
e39290b71299843eff858fb51543b99a06178a1d
|
Add a simple 8x benchmark script
|
ice40/picorv32_benchmark.py
|
ice40/picorv32_benchmark.py
|
Python
| 0
|
@@ -0,0 +1,1566 @@
+#!/usr/bin/env python3%0Aimport os, sys, threading%0Afrom os import path%0Aimport subprocess%0Aimport re%0A%0Anum_runs = 8%0A%0Aif not path.exists(%22picorv32.json%22):%0A os.remove(%22picorv32.json%22)%0A subprocess.run(%5B%22wget%22, %22https://raw.githubusercontent.com/cliffordwolf/picorv32/master/picorv32.v%22%5D, check=True)%0A subprocess.run(%5B%22yosys%22, %22-q%22, %22-p%22, %22synth_ice40 -json picorv32.json -top top%22, %22picorv32.v%22, %22picorv32_top.v%22%5D, check=True)%0A%0Afmax = %7B%7D%0A%0Aif not path.exists(%22picorv32_work%22):%0A os.mkdir(%22picorv32_work%22)%0A%0Athreads = %5B%5D%0A%0Afor i in range(num_runs):%0A def runner(run):%0A ascfile = %22picorv32_work/picorv32_s%7B%7D.asc%22.format(run)%0A if path.exists(ascfile):%0A os.remove(ascfile)%0A result = subprocess.run(%5B%22../nextpnr-ice40%22, %22--hx8k%22, %22--seed%22, str(run), %22--json%22, %22picorv32.json%22, %22--asc%22, ascfile%5D, stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)%0A if result.returncode != 0:%0A print(%22Run %7B%7D failed!%22.format(run))%0A else:%0A icetime_res = subprocess.check_output(%5B%22icetime%22, %22-d%22, %22hx8k%22, ascfile%5D)%0A fmax_m = re.search(r'%5C((%5B0-9.%5D+) MHz%5C)', icetime_res.decode('utf-8'))%0A fmax%5Brun%5D = float(fmax_m.group(1))%0A threads.append(threading.Thread(target=runner, args=%5Bi+1%5D))%0A%0Afor t in threads: t.start()%0Afor t in threads: t.join()%0A%0Afmax_min = min(fmax.values())%0Afmax_max = max(fmax.values())%0Afmax_avg = sum(fmax.values()) / len(fmax)%0A%0Aprint(%22%7B%7D/%7B%7D runs passed%22.format(len(fmax), num_runs))%0Aprint(%22icetime: min = %7B%7D MHz, avg = %7B%7D MHz, max = %7B%7D MHz%22.format(fmax_min, fmax_avg, fmax_max))%0A
|
|
30081b470fc3522afc4af3a4fc33eb28bc85d6d6
|
Add project config manager
|
polyaxon_cli/managers/project.py
|
polyaxon_cli/managers/project.py
|
Python
| 0.000001
|
@@ -0,0 +1,434 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import absolute_import, division, print_function%0A%0Afrom polyaxon_schemas.project import ProjectConfig%0A%0Afrom polyaxon_cli.managers.base import BaseConfigManager%0A%0A%0Aclass ProjectConfigManager(BaseConfigManager):%0A %22%22%22Manages access token configuration .plxprojectconfig file.%22%22%22%0A%0A IS_GLOBAL = False%0A CONFIG_FILE_NAME = '.plxprojectconfig'%0A CONFIG = ProjectConfig%0A INIT_COMMAND = True%0A
|
|
8e1a117c0d0bf3614beed0410862d1dc1a91b306
|
Create shp2zip.py
|
upload-geospatial-data/python/shp2zip.py
|
upload-geospatial-data/python/shp2zip.py
|
Python
| 0.000007
|
@@ -0,0 +1,1480 @@
+import os%0Aimport shutil%0Aimport zipfile%0A%0A# Creates a zip file containing the input shapefile%0A# inShp: Full path to shapefile to be zipped%0A# Delete: Set to True to delete shapefile files after zip%0Adef ZipShp (inShp, Delete = True):%0A #List of shapefile file extensions%0A extensions = %5B%22.shp%22,%22.shx%22,%22.dbf%22,%22.sbn%22,%22.sbx%22,%22.fbn%22,%22.fbx%22,%22.ain%22,%22.aih%22,%22.atx%22,%22.ixs%22,%22.mxs%22,%22.prj%22,%22.xml%22,%22.cpg%22,%22.shp.xml%22%5D%0A #Directory of shapefile%0A inLocation = os.path.dirname (inShp)%0A #Base name of shapefile%0A inName = os.path.basename (os.path.splitext (inShp)%5B0%5D)%0A #Create zipfile name%0A zipfl = os.path.join (inLocation, inName + %22.zip%22)%0A #Create zipfile object%0A ZIP = zipfile.ZipFile (zipfl, %22w%22)%0A #Empty list to store files to delete%0A delFiles = %5B%5D%0A #Iterate files in shapefile directory%0A for fl in os.listdir (inLocation):%0A #Iterate extensions%0A for extension in extensions:%0A #Check if file is shapefile file%0A if fl == inName + extension:%0A #Get full path of file%0A inFile = os.path.join (inLocation, fl)%0A #Add file to delete files list%0A delFiles += %5BinFile%5D%0A #Add file to zipfile%0A ZIP.write (inFile, fl)%0A break%0A #Delete shapefile if indicated%0A if Delete == True:%0A for fl in delFiles:%0A os.remove (fl)%0A #Close zipfile object%0A ZIP.close()%0A #Return zipfile full path%0A return zipfl%0A
|
|
06804460f2b0e70b7b08d6657353c1c172a0df4c
|
add examples/tube-stream-private.py
|
examples/tube-stream-private.py
|
examples/tube-stream-private.py
|
Python
| 0
|
@@ -0,0 +1,1390 @@
+import sys%0A%0Afrom stream_tube_client import StreamTubeJoinerPrivateClient, %5C%0A StreamTubeInitiatorPrivateClient%0A%0Adef usage():%0A print %22Usage:%5Cn%22 %5C%0A %22Offer a stream tube to %5Bcontact%5D using the trivial stream server:%5Cn%22 %5C%0A %22%5Ctpython %25s %5Baccount-file%5D %5Bcontact%5D%5Cn%22 %5C%0A %22Accept a stream tube from a contact and connect it to the trivial stream client:%5Cn%22 %5C%0A %22%5Ctpython %25s %5Baccount-file%5D%5Cn%22 %5C%0A %22Offer a stream tube to %5Bcontact%5D using the UNIX socket %5Bsocket%5D:%5Cn%22 %5C%0A %22%5Ctpython %25s %5Baccount-file%5D %5Bcontact%5D %5Bsocket%5D%5Cn%22 %5C%0A %22Accept a stream tube from a contact and wait for connections from an external client:%5Cn%22 %5C%0A %22%5Ctpython %25s %5Baccount-file%5D --no-trivial-client%5Cn%22 %5C%0A %25 (sys.argv%5B0%5D, sys.argv%5B0%5D, sys.argv%5B0%5D, sys.argv%5B0%5D)%0A%0Aif __name__ == '__main__':%0A args = sys.argv%5B1:%5D%0A%0A if len(args) == 2 and args%5B1%5D != '--no-trivial-client':%0A client = StreamTubeInitiatorPrivateClient(args%5B0%5D, contact_id=args%5B1%5D)%0A elif len(args) == 1:%0A client = StreamTubeJoinerPrivateClient(args%5B0%5D, True)%0A elif len(args) == 3:%0A client = StreamTubeInitiatorPrivateClient(args%5B0%5D, args%5B1%5D, args%5B2%5D)%0A elif len(args) == 2 and args%5B1%5D == '--no-trivial-client':%0A client = StreamTubeJoinerPrivateClient(args%5B0%5D, False)%0A else:%0A usage()%0A sys.exit(0)%0A%0A client.run()%0A
|
|
d304c1cd9b35f92a6f5bb1c739402b8f3a6c22c8
|
Create cigar_party.py
|
Python/CodingBat/cigar_party.py
|
Python/CodingBat/cigar_party.py
|
Python
| 0.000357
|
@@ -0,0 +1,224 @@
+# http://codingbat.com/prob/p195669%0A%0Adef cigar_party(cigars, is_weekend):%0A if is_weekend and cigars %3E= 40:%0A return True%0A elif not is_weekend and (cigars %3E= 40 and cigars %3C= 60):%0A return True%0A else:%0A return False%0A
|
|
dbf1d68fd3c3681b1aa7673b1cfd8a2cc3417edf
|
Create problem5.py
|
Project-Euler/Problem5/problem5.py
|
Project-Euler/Problem5/problem5.py
|
Python
| 0.000048
|
@@ -0,0 +1,1001 @@
+%22%22%22%0A%5Bref.href%5D https://projecteuler.net/problem=5%0A%0ASmallest multiple.%0A%0A2520 is the smallest number that can be divided by each%0Aof the numbers from 1 to 10 without any remainder.%0A%0AWhat is the smallest positive number that is evenly%0Adivisible by all of the numbers from 1 to 20?%0A%22%22%22%0A%0AEPS = 1e-6%0A%0Adef IsDivisibleByAll(n, divs):%0A for div in divs:%0A if n %25 div != 0:%0A return False%0A return True%0A%0Adef GCD(n, m, eps = EPS):%0A if ((abs(n) %3E= 0 and abs(n) %3C= eps) or%0A (abs(m) %3E= 0 and abs(m) %3C= eps)):%0A return 0%0A while n %25 m %3E eps:%0A temp = m%0A m = n %25 m%0A n = temp%0A return m%0A%0Adef LCD(n, m, eps = EPS):%0A gcd = GCD(n, m, eps)%0A if abs(gcd) %3E= 0 and abs(gcd) %3C= eps:%0A return 1%0A return n * m / gcd%0A%0Alowestdiv = 1%0Ahighestdiv = 20%0Adivs = range(lowestdiv, highestdiv + 1)%0Alcd = reduce(LCD, divs)%0Aprint %22The smallest positive number that is evenly divisible by %22 +%5C%0A %22, %22.join(%5Bstr(div) for div in divs%5D) + %22 is %22 + str(lcd) + %22.%22%0A %0A%0A
|
|
b76b64ae3914d66bcf0e3eef60390c96c2cd1a43
|
Clean up W601 error
|
wagtail/contrib/wagtailsitemaps/tests.py
|
wagtail/contrib/wagtailsitemaps/tests.py
|
from django.test import TestCase
from django.core.cache import cache
from wagtail.wagtailcore.models import Page, PageViewRestriction, Site
from wagtail.tests.testapp.models import SimplePage, EventIndex
from .sitemap_generator import Sitemap
class TestSitemapGenerator(TestCase):
def setUp(self):
self.home_page = Page.objects.get(id=2)
self.child_page = self.home_page.add_child(instance=SimplePage(
title="Hello world!",
slug='hello-world',
live=True,
))
self.unpublished_child_page = self.home_page.add_child(instance=SimplePage(
title="Unpublished",
slug='unpublished',
live=False,
))
self.protected_child_page = self.home_page.add_child(instance=SimplePage(
title="Protected",
slug='protected',
live=True,
))
PageViewRestriction.objects.create(page=self.protected_child_page, password='hello')
self.site = Site.objects.get(is_default_site=True)
def test_get_pages(self):
sitemap = Sitemap(self.site)
pages = sitemap.get_pages()
self.assertIn(self.child_page.page_ptr, pages)
self.assertNotIn(self.unpublished_child_page.page_ptr, pages)
self.assertNotIn(self.protected_child_page.page_ptr, pages)
def test_get_urls(self):
sitemap = Sitemap(self.site)
urls = [url['location'] for url in sitemap.get_urls()]
self.assertIn('http://localhost/', urls) # Homepage
self.assertIn('http://localhost/hello-world/', urls) # Child page
def test_get_urls_uses_specific(self):
# Add an event page which has an extra url in the sitemap
self.home_page.add_child(instance=EventIndex(
title="Events",
slug='events',
live=True,
))
sitemap = Sitemap(self.site)
urls = [url['location'] for url in sitemap.get_urls()]
self.assertIn('http://localhost/events/', urls) # Main view
self.assertIn('http://localhost/events/past/', urls) # Sub view
def test_render(self):
sitemap = Sitemap(self.site)
xml = sitemap.render()
# Check that a URL has made it into the xml
self.assertIn('http://localhost/hello-world/', xml)
# Make sure the unpublished page didn't make it into the xml
self.assertNotIn('http://localhost/unpublished/', xml)
# Make sure the protected page didn't make it into the xml
self.assertNotIn('http://localhost/protected/', xml)
class TestSitemapView(TestCase):
def test_sitemap_view(self):
response = self.client.get('/sitemap.xml')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailsitemaps/sitemap.xml')
self.assertEqual(response['Content-Type'], 'text/xml; charset=utf-8')
def test_sitemap_view_cache(self):
cache_key = 'wagtail-sitemap:%d' % Site.objects.get(is_default_site=True).id
# Check that the key is not in the cache
self.assertFalse(cache.has_key(cache_key))
# Hit the view
first_response = self.client.get('/sitemap.xml')
self.assertEqual(first_response.status_code, 200)
self.assertTemplateUsed(first_response, 'wagtailsitemaps/sitemap.xml')
# Check that the key is in the cache
self.assertTrue(cache.has_key(cache_key))
# Hit the view again. Should come from the cache this time
second_response = self.client.get('/sitemap.xml')
self.assertEqual(second_response.status_code, 200)
self.assertTemplateNotUsed(second_response, 'wagtailsitemaps/sitemap.xml') # Sitemap should not be re rendered
# Check that the content is the same
self.assertEqual(first_response.content, second_response.content)
|
Python
| 0.000006
|
@@ -3086,38 +3086,30 @@
sert
-False
+NotIn
(cache
-.has
_key
-(
+,
cache
-_key)
)%0A%0A
@@ -3394,37 +3394,27 @@
sert
-True
+In
(cache
-.has
_key
-(
+,
cache
-_key)
)%0A%0A
|
d441cfd92cd8d843f22f181d485786fe1ed8948f
|
Add herokustatus plugin
|
plugins/herokustatus.py
|
plugins/herokustatus.py
|
Python
| 0
|
@@ -0,0 +1,598 @@
+import urllib.parse%0Aimport requests%0A%0Aclass Plugin:%0A def __call__(self, bot):%0A bot.on_respond(r%22heroku st(atus)?$%22, self.on_respond)%0A bot.on_help(%22herokustatus%22, self.on_help)%0A%0A def on_respond(self, bot, msg, reply):%0A url = %22https://status.heroku.com/api/v3/current-status%22%0A headers = %7B %22User-Agent%22: %22SmartBot%22 %7D%0A%0A res = requests.get(url, headers=headers).json()%0A reply(%22Production: %7B0%7D%5CnDevelopment: %7B1%7D%22.format(res%5B%22status%22%5D%5B%22Production%22%5D, res%5B%22status%22%5D%5B%22Development%22%5D))%0A%0A def on_help(self, bot, reply):%0A reply(%22Syntax: heroku st%5Batus%5D%22)%0A
|
|
0af447c0371bd157c03fc5097ac8c0e0a5873ff7
|
Add temporary satellites analysis example.
|
examples/satellites_analyze.py
|
examples/satellites_analyze.py
|
Python
| 0
|
@@ -0,0 +1,1733 @@
+assert __name__ == '__main__'%0A%0Aimport bayeslite.bql as bql%0Aimport bayeslite.core as core%0Aimport bayeslite.parse as parse%0Aimport crosscat.LocalEngine as localengine%0Aimport getopt%0Aimport sys%0A%0A# XXX This is wrong -- should be part of bayesdb proper. But it, and%0A# copypasta of it, will do for now until internals are restructured%0A# well enough for bdb.execute to work.%0Adef bql_exec(bdb, string):%0A import sys%0A print %3E%3Esys.stderr, '--%3E %25s' %25 (string.strip(),)%0A phrases = parse.parse_bql_string(string)%0A phrase = phrases.next()%0A done = None%0A try:%0A phrases.next()%0A done = False%0A except StopIteration:%0A done = True%0A if done is not True:%0A raise ValueError('%3E1 phrase: %25s' %25 (string,))%0A return bql.execute_phrase(bdb, phrase)%0A%0Adef usage():%0A print %3E%3Esys.stderr, 'Usage: %25s %5B-hv%5D %5B-i %3Citer%3E%5D %5B-m %3Cmodels%3E%5D' %25 %5C%0A (sys.argv%5B0%5D)%0A%0Aiterations = None%0Amodelnos = None%0Atry:%0A opts, args = getopt.getopt(sys.argv%5B1:%5D, '?hi:m:', %5B%5D)%0Aexcept getopt.GetoptError as e:%0A print str(e)%0A usage()%0Aif 0 %3C len(args):%0A usage()%0Afor o, a in opts:%0A if o in ('-h', '-?'):%0A usage()%0A sys.exit()%0A elif o == '-i':%0A iterations = int(a)%0A elif o == '-m':%0A modelnos = a%0A else:%0A assert False, 'bad option %25s' %25 (o,)%0A%0Abdb = core.BayesDB(localengine.LocalEngine(seed=0), pathname='satellites.bdb')%0Abql_exec(bdb, %22create btable if not exists satellites%22 +%0A %22 from 'satellites.utf8.csv'%22)%0Abql_exec(bdb, 'initialize 10 models if not exists for satellites')%0Aif iterations is not None:%0A modelspec = 'models %25s' %25 (modelnos,) if modelnos is not None else ''%0A bql_exec(bdb, 'analyze satellites %25s for %25d iterations wait' %25%0A (modelspec, iterations))%0A
|
|
034cb3a0fe2a2c0c8b47fd631ca28bbfa7091902
|
add recursive preOrder BST
|
BST/bst.py
|
BST/bst.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
class Node(object):
def __init__(self, value):
self.left = None
self.right = None
self.value = value
# iterative
def preOrder(root):
if root is None:
return []
stack = [root]
preorder = []
while stack:
node = stack.pop()
preorder.append(node.val)
if node.right:
stack.append(node.right)
if node.left:
stack.append(node.left)
return preorder
|
Python
| 0.000046
|
@@ -281,16 +281,213 @@
value%0A%0A%0A
+# recursive%0Adef preOrderRecur(root):%0A if not root:%0A return%0A print(root.value)%0A if root.right:%0A preOrderRecur(root.right)%0A if root.left:%0A preOrderRecur(root.left)%0A%0A%0A
# iterat
|
fdb768c9dbfca33a013b9588e0dadc8c68abc992
|
Simplify smartplaylist flow
|
beetsplug/smartplaylist.py
|
beetsplug/smartplaylist.py
|
# This file is part of beets.
# Copyright 2015, Dang Mai <contact@dangmai.net>.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Generates smart playlists based on beets queries.
"""
from __future__ import print_function
from beets.plugins import BeetsPlugin
from beets import config, ui, library
from beets.util import normpath, syspath
import os
# Global variable so that smartplaylist can detect database changes and run
# only once before beets exits.
database_changed = False
def _items_for_query(lib, playlist, album=False):
"""Get the matching items for a playlist's configured queries.
`album` indicates whether to process the item-level query or the
album-level query (if any).
"""
key = 'album_query' if album else 'query'
if key not in playlist:
return []
# Parse quer(ies). If it's a list, perform the queries and manually
# concatenate the results
query_strings = playlist[key]
if not isinstance(query_strings, (list, tuple)):
query_strings = [query_strings]
model = library.Album if album else library.Item
results = []
for q in query_strings:
query, sort = library.parse_query_string(q, model)
if album:
new = lib.albums(query, sort)
else:
new = lib.items(query, sort)
results.extend(new)
return results
def update_playlists(lib):
ui.print_("Updating smart playlists...")
playlists = config['smartplaylist']['playlists'].get(list)
playlist_dir = config['smartplaylist']['playlist_dir'].as_filename()
relative_to = config['smartplaylist']['relative_to'].get()
if relative_to:
relative_to = normpath(relative_to)
for playlist in playlists:
items = []
items.extend(_items_for_query(lib, playlist, True))
items.extend(_items_for_query(lib, playlist, False))
m3us = {}
basename = playlist['name'].encode('utf8')
# As we allow tags in the m3u names, we'll need to iterate through
# the items and generate the correct m3u file names.
for item in items:
m3u_name = item.evaluate_template(basename, True)
if not (m3u_name in m3us):
m3us[m3u_name] = []
item_path = item.path
if relative_to:
item_path = os.path.relpath(item.path, relative_to)
if item_path not in m3us[m3u_name]:
m3us[m3u_name].append(item_path)
# Now iterate through the m3us that we need to generate
for m3u in m3us:
m3u_path = normpath(os.path.join(playlist_dir, m3u))
with open(syspath(m3u_path), 'w') as f:
for path in m3us[m3u]:
f.write(path + '\n')
ui.print_("... Done")
class SmartPlaylistPlugin(BeetsPlugin):
def __init__(self):
super(SmartPlaylistPlugin, self).__init__()
self.config.add({
'relative_to': None,
'playlist_dir': u'.',
'auto': True,
'playlists': []
})
def commands(self):
def update(lib, opts, args):
update_playlists(lib)
spl_update = ui.Subcommand('splupdate',
help='update the smart playlists')
spl_update.func = update
return [spl_update]
@SmartPlaylistPlugin.listen('database_change')
def handle_change(lib):
global database_changed
database_changed = True
@SmartPlaylistPlugin.listen('cli_exit')
def update(lib):
auto = config['smartplaylist']['auto']
if database_changed and auto:
update_playlists(lib)
|
Python
| 0
|
@@ -884,142 +884,8 @@
os%0A%0A
-# Global variable so that smartplaylist can detect database changes and run%0A# only once before beets exits.%0Adatabase_changed = False%0A%0A
%0Adef
@@ -3443,16 +3443,119 @@
%7D)%0A%0A
+ if self.config%5B'auto'%5D:%0A self.register_listener('database_change', self.db_change)%0A%0A
def
@@ -3825,165 +3825,68 @@
e%5D%0A%0A
-%0A@SmartPlaylistPlugin.listen('database_change')%0Adef handle_change(lib):%0A global database_changed%0A database_changed = True%0A%0A%0A@SmartPlaylistPlugin.
+ def db_change(self, lib):%0A self.register_
listen
+er
('cl
@@ -3896,102 +3896,49 @@
xit'
-)%0Adef update(lib):%0A auto = config%5B'smartplaylist'%5D%5B'auto'%5D%0A if database_changed and auto
+, self.update)%0A%0A def update(self, lib)
:%0A
|
bdfbea2e5896a6a20ae15f053b80b664e946eb88
|
Solved task 2 on bio algorithms
|
Stepic/BioinformaticsAlgorithms/ReverseComplement.py
|
Stepic/BioinformaticsAlgorithms/ReverseComplement.py
|
Python
| 0.998269
|
@@ -0,0 +1,524 @@
+# Reverse Complement Problem: Reverse complement a nucleotide pattern%0A%0Adef main():%0A dna = ''%0A with open('dna.txt', 'r') as f:%0A dna = f.read()%0A%0A comp_dna = ''%0A%0A for i in dna%5B::-1%5D:%0A if i == 'A':%0A comp_dna += 'T'%0A elif i == 'T':%0A comp_dna += 'A'%0A elif i == 'C':%0A comp_dna += 'G'%0A elif i == 'G':%0A comp_dna += 'C'%0A%0A with open('res.txt', 'w') as f:%0A f.write(comp_dna)%0A%0A print(comp_dna)%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
aa52332d622e16b3d1524eb2dc12047cec02fb33
|
Create calculator.py
|
calculator.py
|
calculator.py
|
Python
| 0.000001
|
@@ -0,0 +1,1673 @@
+# my-first-calculator%0A#Description is in the name%0A%0A%0A#calculator%0A%0Aprint %22For addition press 1%22%0Aprint %22For subtraction press 2%22%0Aprint %22For multiplication press 3%22%0Aprint %22For division press 4%22%0Aprint %22If you're all done press 5%22%0A%0Acmd=float(int(raw_input(%22Enter operation number:%22)))%0A%0A%0A#Addition%0Aif cmd==1:%0A print %22Ok begin addition%22%0A first=float(int(raw_input(%22Enter first number:%22)))%0A second=float(int(raw_input(%22Enter second number:%22)))%0A result=first+second%0A print first,%22+%22,second,%22=%22,result,%22(sum)%22%0A%0A#Subtraction%0Aelif cmd==2:%0A print %22Ok begin Subtraction:%22%0A first=float(int(raw_input(%22Enter first number:%22)))%0A second=float(int(raw_input(%22Enter second number:%22)))%0A result=first-second%0A print first,%22-%22,second,%22=%22,result,%22(difference)%22%0A%0A#Multiplication%0Aelif cmd==3:%0A print %22Ok begin multiplication%22%0A first=float(int(raw_input(%22Enter multiplicand:%22)))%0A second=float(int(raw_input(%22Enter multiplier:%22)))%0A product=first*second%0A print first,%22X%22,second,%22=%22,product,%22(product)%22%0A%0A#Division%0Aelif cmd==4:%0A print %22Ok begin division%22%0A print %22For decimal division press 1%22%0A print %22For normal division press 2%22%0A command=float(int(raw_input(%22Enter Division type:%22)))%0A if command==1:%0A first=float(int(raw_input(%22Enter Dividend:%22)))%0A second=float(int(raw_input(%22Enter Divisor:%22)))%0A result=first/second%0A print first,%22/%22,second,%22=%22,result,%22(quotient)%22%0A elif command==2:%0A first=int(raw_input(%22Enter Dividend:%22))%0A second=int(raw_input(%22Enter Divisor:%22))%0A result1=first/second%0A result2=first%25second%0A print first,%22/%22,second,%22=%22,result1,%22, remainder =%22,result2%0A%0A %0A %0A
|
|
069a5758b16624ac2b547ede44123b64c89baf96
|
Add simple script mapping YTID to KA URLs.
|
map_ytids_to_ka_urls.py
|
map_ytids_to_ka_urls.py
|
Python
| 0
|
@@ -0,0 +1,1782 @@
+#!/usr/bin/env python3%0Afrom kapi import *%0Afrom utils import *%0Aimport argparse, sys%0Aimport time%0A%0A%0Adef read_cmd():%0A %22%22%22Function for reading command line options.%22%22%22%0A desc = %22Program for mapping YouTube IDs to KA URLs to Crowdin WYSIWYG editor.%22 %0A parser = argparse.ArgumentParser(description=desc)%0A parser.add_argument('input_file',metavar='INPUT_FILE', help='Text file containing YouTube IDs and possibly filenames.')%0A parser.add_argument('-s','--subject', dest='subject', default='root', help='Print full tree for a given domain/subject.')%0A return parser.parse_args()%0A%0Adef print_children_titles(content_tree):%0A for child in content_tree%5B%22children%22%5D:%0A pprint(child%5B'title'%5D)%0A%0Adef print_dict_without_children(dictionary):%0A for k in dictionary.keys():%0A if k != 'children':%0A print(k, dictionary%5Bk%5D)%0A%0Aif __name__ == %22__main__%22:%0A%0A opts = read_cmd()%0A infile = opts.input_file%0A subject_title = opts.subject%0A%0A # List ytids may also contain filenames%0A ytids = %5B%5D%0A # Reading file with YT id's%0A with open(infile, %22r%22) as f:%0A for line in f:%0A y = line.split()%0A if len(y) != 0:%0A ytids.append(y%5B0%5D)%0A else:%0A ytids.append(None)%0A%0A tree = load_ka_tree('video')%0A%0A if subject_title == 'root':%0A subtree = tree%0A else:%0A subtree = find_ka_topic(tree, subject_title)%0A%0A videos = %5B%5D%0A for ytid in ytids:%0A if ytid is not None:%0A v = find_video_by_youtube_id(subtree, ytid)%0A if v:%0A videos.append(find_video_by_youtube_id(subtree, ytid) )%0A else:%0A videos.append(ytid)%0A%0A for v in videos:%0A try:%0A print(v%5B'ka_url'%5D.replace('www', 'translate'))%0A except:%0A print(v)%0A%0A
|
|
41265e02f47a55d11dcc921aeeebebba290ed61f
|
Fix Dee.
|
game/migrations/0008_fix_dee.py
|
game/migrations/0008_fix_dee.py
|
Python
| 0
|
@@ -0,0 +1,484 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0A%0A%0Adef fix_dee(apps, schema_editor):%0A%0A Character = apps.get_model('game', 'Character')%0A%0A dee = Character.objects.get(name='Dee')%0A dee.en_face = 'characters/front_view/Dee.svg'%0A dee.save()%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('game', '0007_added_block__limits'),%0A %5D%0A%0A operations = %5B%0A migrations.RunPython(fix_dee)%0A %5D%0A
|
|
6d0f78ccdb8587d5a35ee297198a664274598747
|
Create run_test.py
|
recipes/pytest-sugar/run_test.py
|
recipes/pytest-sugar/run_test.py
|
Python
| 0.000004
|
@@ -0,0 +1,199 @@
+import django%0Afrom django.conf import settings%0Asettings.configure(INSTALLED_APPS=%5B'pytest_sugar', 'django.contrib.contenttypes', 'django.contrib.auth'%5D) %0Adjango.setup() %0A %0Aimport pytest_sugar%0A
|
|
e50dc18525e0e4cbbef56cd16ba4e2d9690464f1
|
Add solution for problem 34
|
euler034.py
|
euler034.py
|
Python
| 0
|
@@ -0,0 +1,344 @@
+#!/usr/bin/python%0A%0Afrom math import factorial, log%0Avalues = %5B0%5D*10%0A%0Afor i in range(10):%0A values%5Bi%5D = factorial(i)%0A%0Atotal = 0%0Afor i in range(10, factorial(9) * 7):%0A target = 0%0A test = i%0A while test != 0:%0A x = test %25 10%0A target += values%5Bx%5D%0A test = test // 10%0A if i == target:%0A total += i%0A%0Aprint(total)%0A
|
|
427752b4ee3d63d1bf29a9a2a9be011662df8556
|
add login handler
|
jupyterhub_login.py
|
jupyterhub_login.py
|
Python
| 0.000001
|
@@ -0,0 +1,787 @@
+from sandstone.lib.handlers.base import BaseHandler%0Aimport requests%0Aimport os%0A%0Aclass JupyterHubLoginHandler(BaseHandler):%0A def get(self):%0A api_token = os.environ%5B'JUPYTERHUB_API_TOKEN'%5D%0A%0A url = '%7Bprotocol%7D://%7Bhost%7D/hub/api/authorizations/token/%7Btoken%7D'.format(%0A protocol=self.request.protocol,%0A host=self.request.host,%0A token=api_token%0A )%0A%0A res = requests.get(%0A url,%0A headers=%7B%0A 'Authorization': 'token %25s' %25 api_token%0A %7D%0A )%0A%0A%0A username = res.json()%5B'name'%5D%0A%0A if username:%0A self.set_secure_cookie('user', username)%0A self.redirect('/user/%7B%7D'.format(username))%0A else:%0A self.set_status(403)%0A self.finish()%0A
|
|
8bd8ae1daa432bce9881214c4d326ac8a38e2046
|
Correct MAPE loss
|
keras/objectives.py
|
keras/objectives.py
|
from __future__ import absolute_import
import theano
import theano.tensor as T
import numpy as np
from six.moves import range
epsilon = 1.0e-9
def mean_squared_error(y_true, y_pred):
return T.sqr(y_pred - y_true).mean(axis=-1)
def mean_absolute_error(y_true, y_pred):
return T.abs_(y_pred - y_true).mean(axis=-1)
def mean_absolute_percentage_error(y_true, y_pred):
return T.abs_((y_true - y_pred) / y_true).mean(axis=-1) * 100
def mean_squared_logarithmic_error(y_true, y_pred):
return T.sqr(T.log(T.clip(y_pred, epsilon, np.inf) + 1.) - T.log(T.clip(y_true, epsilon, np.inf) + 1.)).mean(axis=-1)
def squared_hinge(y_true, y_pred):
return T.sqr(T.maximum(1. - y_true * y_pred, 0.)).mean(axis=-1)
def hinge(y_true, y_pred):
return T.maximum(1. - y_true * y_pred, 0.).mean(axis=-1)
def categorical_crossentropy(y_true, y_pred):
'''Expects a binary class matrix instead of a vector of scalar classes
'''
y_pred = T.clip(y_pred, epsilon, 1.0 - epsilon)
# scale preds so that the class probas of each sample sum to 1
y_pred /= y_pred.sum(axis=-1, keepdims=True)
cce = T.nnet.categorical_crossentropy(y_pred, y_true)
return cce
def binary_crossentropy(y_true, y_pred):
y_pred = T.clip(y_pred, epsilon, 1.0 - epsilon)
bce = T.nnet.binary_crossentropy(y_pred, y_true)
return bce
# aliases
mse = MSE = mean_squared_error
mae = MAE = mean_absolute_error
mape = MAPE = mean_absolute_percentage_error
msle = MSLE = mean_squared_logarithmic_error
from .utils.generic_utils import get_from_module
def get(identifier):
return get_from_module(identifier, globals(), 'objective')
|
Python
| 0.000002
|
@@ -408,22 +408,55 @@
pred) /
-y_true
+T.clip(T.abs_(y_true), epsilon, np.inf)
).mean(a
@@ -468,16 +468,17 @@
1) * 100
+.
%0A%0Adef me
|
29aa26f553633fbe4a5ae37721e1da0ecca4139c
|
Create MyaiBot-Pictures.py
|
home/moz4r/MyaiBot-Pictures.py
|
home/moz4r/MyaiBot-Pictures.py
|
Python
| 0
|
@@ -0,0 +1,1293 @@
+#PICTURE FIND AND DISPLAY BOT%0A#LONK AT http://www.myai.cloud/%0A#FOR SERVER NAME AND BOT STATUS%0A#IT S A SMALL COMPUTER FOR NOW SORRY IF PROBLEMS%0Afrom java.lang import String%0Aimport random%0Aimport threading%0Aimport itertools%0A%0Ahttp = Runtime.createAndStart(%22http%22,%22HttpClient%22)%0ARuntime.createAndStart(%22chatBot%22, %22ProgramAB%22)%0ARuntime.createAndStart(%22ear%22, %22WebkitSpeechRecognition%22) %0ARuntime.createAndStart(%22webGui%22, %22WebGui%22)%0ARuntime.createAndStart(%22htmlFilter%22, %22HtmlFilter%22)%0ARuntime.createAndStart(%22mouth%22, %22AcapelaSpeech%22) %0ARuntime.createAndStart(%22Image%22, %22ImageDisplay%22) %0A%0Amouth.setVoice(%22Ryan%22)%0Amouth.setLanguage(%22EN%22)%0A#mouth.setVoice(%22Antoine%22)%0A#mouth.setLanguage(%22FR%22)%0A%0AchatBot.startSession( %22default%22, %22rachel%22) %0AchatBot.addTextListener(htmlFilter) %0AhtmlFilter.addListener(%22publishText%22, python.name, %22talk%22) %0A%0Adef talk(data):%0A%09mouth.speak(data)%0A %09print %22chatbot dit :%22, data%0Adef FindImage(image):%0A%09mouth.speak(%22I show you %22+image)%0A #mouth.speak(%22Voici %22+image)%0A%09#PLEASE USE REAL LANGUAGE PARAMETER :%0A%09#lang=XX ( FR/EN/RU/IT etc...)%0A%09#A FAKE LANGUAGE WORKS BUT DATABASE WILL BROKE%0A%09a = String(http.get(%22http://myai.cloud/bot1.php?pic=%22+image.replace(%22 %22, %22%2520%22)+%22&lang=US%22))%0A%09#a = String(http.get(%22http://myai.cloud/bot1.php?pic=%22+image.replace(%22 %22, %22%2520%22)+%22&lang=FR%22))%0A%09Image.display(a)%0A
|
|
147374971ad21406d61beb1512b5a702298fc3dc
|
add a generic seach module (relies on sqlalchemy)
|
cartoweb/plugins/search.py
|
cartoweb/plugins/search.py
|
Python
| 0.000001
|
@@ -0,0 +1,3037 @@
+from sqlalchemy.sql import select%0Afrom sqlalchemy.sql import and_%0Afrom sqlalchemy.sql import func%0A%0Afrom shapely.geometry.point import Point%0Afrom shapely.geometry.polygon import Polygon%0A%0Aclass Search:%0A EPSG = 4326%0A UNITS = 'degrees'%0A%0A def __init__(self, idColumn, geomColumn, epsg=EPSG, units=UNITS):%0A self.idColumn = idColumn%0A self.geomColumn = geomColumn%0A self.epsg = epsg%0A self.units = units%0A self.limit = None%0A %0A def buildExpression(self, request):%0A id = None%0A path = request.path_info.split(%22/%22)%0A if len(path) %3E 1:%0A path_pieces = path%5B-1%5D.split(%22.%22)%0A if len(path_pieces) %3E 1 and path_pieces%5B0%5D.isdigit():%0A id = int(path_pieces%5B0%5D)%0A%0A expr = None%0A if id is not None:%0A expr = self.idColumn == id;%0A %0A if 'maxfeatures' in request.params:%0A self.limit = int(request.params%5B'maxfeatures'%5D)%0A %0A epsg = self.EPSG%0A if 'epsg' in request.params:%0A epsg = request.params%5B'epsg'%5D%0A%0A # deal with lonlat query%0A if 'lon' in request.params and 'lat' in request.params and 'radius' in request.params:%0A # define point from lonlat%0A lon = float(request.params%5B'lon'%5D)%0A lat = float(request.params%5B'lat'%5D)%0A point = Point(lon, lat)%0A pgPoint = func.pointfromtext(point.wkt, epsg)%0A%0A if epsg != self.epsg:%0A pgPoint = func.transform(pgPoint, self.epsg)%0A%0A # build query expression%0A if self.units == 'degrees':%0A dist = func.distance_sphere(self.geomColumn, pgPoint)%0A else:%0A dist = func.distance(self.geomColumn, pgPoint)%0A e = dist %3C float(request.params%5B'radius'%5D)%0A%0A # update query expression%0A if expr is not None:%0A expr = and_(expr, e)%0A else:%0A expr = e%0A%0A # deal with box query%0A elif 'box' in request.params:%0A coords = request.params%5B'box'%5D.split(',')%0A # define polygon from box%0A pointA = (float(coords%5B0%5D), float(coords%5B1%5D))%0A pointB = (float(coords%5B0%5D), float(coords%5B3%5D))%0A pointC = (float(coords%5B2%5D), float(coords%5B3%5D))%0A pointD = (float(coords%5B2%5D), float(coords%5B1%5D))%0A pointE = pointA%0A coords = (pointA, pointB, pointC, pointD, pointE)%0A poly = Polygon(coords)%0A pgPoly = func.geomfromtext(poly.wkt, epsg)%0A%0A if epsg != self.epsg:%0A pgPoly = func.transform(pgPoly, self.epsg)%0A%0A # build query expression%0A e = self.geomColumn.op('&&')(pgPoly)%0A%0A # update query expression%0A if expr is not None:%0A expr = and_(expr, e)%0A else:%0A expr = e%0A%0A return expr%0A%0A def query(self, session, obj, tableObj, expr):%0A return session.query(obj).from_statement(%0A select(%5BtableObj%5D, expr).limit(self.limit)).all()%0A
|
|
6078617684edbc7f264cfe08d60f7c3d24d2898f
|
add test for handle_conversation_before_save
|
plugin/test/test_handle_conversation_before_save.py
|
plugin/test/test_handle_conversation_before_save.py
|
Python
| 0.000001
|
@@ -0,0 +1,2144 @@
+import unittest%0Aimport copy%0Afrom unittest.mock import Mock%0A%0Aimport chat_plugin%0Afrom chat_plugin import handle_conversation_before_save%0A%0A%0Aclass TestHandleConversationBeforeSave(unittest.TestCase):%0A%0A def setUp(self):%0A self.conn = None%0A chat_plugin.current_user_id = Mock(return_value=%22user1%22)%0A%0A def record(self):%0A return %7B%0A 'participant_ids': %5B'user1', 'user2'%5D,%0A 'admin_ids': %5B'user1'%5D%0A %7D%0A%0A def original_record(self):%0A return %7B%0A 'participant_ids': %5B'user1', 'user2'%5D,%0A 'admin_ids': %5B'user1'%5D%0A %7D%0A%0A def test_with_valid_record(self):%0A handle_conversation_before_save(%0A self.record(), self.original_record(), self.conn)%0A%0A def test_no_participants(self):%0A record = self.record()%0A record%5B'participant_ids'%5D = %5B%5D%0A with self.assertRaises(Exception) as cm:%0A handle_conversation_before_save(%0A record, self.original_record(), self.conn)%0A%0A def test_no_admins(self):%0A record = self.record()%0A record%5B'admin_ids'%5D = %5B%5D%0A with self.assertRaises(Exception) as cm:%0A handle_conversation_before_save(%0A record, self.original_record(), self.conn)%0A%0A def test_create_direct_message_for_others(self):%0A record = self.record()%0A record%5B'participant_ids'%5D = %5B'user2', 'user3'%5D%0A record%5B'is_direct_message'%5D = True%0A with self.assertRaises(Exception) as cm:%0A handle_conversation_before_save(%0A record, None, self.conn)%0A%0A def test_create_direct_message_with_three_participants(self):%0A record = self.record()%0A record%5B'participant_ids'%5D = %5B'user1', 'user2', 'user3'%5D%0A record%5B'is_direct_message'%5D = True%0A with self.assertRaises(Exception) as cm:%0A handle_conversation_before_save(%0A record, None, self.conn)%0A%0A def test_direct_message_should_have_no_admin(self):%0A record = self.record()%0A record%5B'is_direct_message'%5D = True%0A handle_conversation_before_save(record, None, self.conn)%0A self.assertTrue(record%5B'admin_ids'%5D == %5B%5D)%0A
|
|
943e162eee203f05b5a2d5b19bcb4a9c371cc93b
|
Add new script to get comet velocity from kymograph
|
plugins/Scripts/Plugins/Kymograph_Comet_Velocity.py
|
plugins/Scripts/Plugins/Kymograph_Comet_Velocity.py
|
Python
| 0
|
@@ -0,0 +1,1590 @@
+# @Float(label=%22Time Interval (s)%22, value=1) dt%0A# @Float(label=%22Pixel Length (um)%22, value=1) pixel_length%0A# @Boolean(label=%22Do you want to save results files ?%22, required=False) save_results%0A# @Boolean(label=%22Do you want to save ROI files ?%22, required=False) save_roi%0A# @ImageJ ij%0A# @ImagePlus img%0A# @Dataset data%0A# @StatusService status%0A%0Aimport os%0Aimport math%0A%0Afrom ij.plugin.frame import RoiManager%0Afrom ij.measure import ResultsTable%0A%0Adef main():%0A%09# Get image processor and imgplus%0A%09imp = img.getProcessor()%0A%09imgp = data.getImgPlus()%0A%09fname = data.getSource()%0A%09name = os.path.basename(fname)%0A%09%0A%09# Get ROIManager%0A%09rm = RoiManager.getInstance()%0A%09if not rm:%0A%09%09status.warn(%22Use ROI Manager tool (Analyze%3ETools%3EROI Manager...).%22)%0A%09%09return False%0A%0A%09if len(rm.getRoisAsArray()) == 0:%0A%09%09status.warn(%22ROI Manager does not have any ROI.%22)%0A%09%09return False%0A%09%09%0A%09if save_roi:%0A%09%09roi_path = os.path.splitext(fname)%5B0%5D + %22.ROI.zip%22%0A%09%09rm.runCommand(%22Save%22, roi_path);%0A%0A%09rt = ResultsTable()%0A%0A%09for i, roi in enumerate(rm.getRoisAsArray()):%0A%09%09x1 = roi.x1%0A%09%09y1 = roi.y1%0A%09%09x2 = roi.x2%0A%09%09y2 = roi.y2%0A%0A%09%09if x1 %3E x2:%0A%09%09%09x1, x2 = x2, x1%0A%09%09%09y1, y2 = y2, y1%0A%0A%09%09run_length = roi.y1 - roi.y2%0A%09%09run_duration = roi.x2 - roi.x1%0A%09%09run_speed = run_length / run_duration%0A%0A%09%09rt.incrementCounter()%0A%09%09rt.addValue(%22Track ID%22, i+1)%0A%09%09rt.addValue(%22Track Length (um)%22, run_length)%0A%09%09rt.addValue(%22Track duration (s)%22, run_duration)%0A%09%09rt.addValue(%22Track speed (um/s)%22, run_speed)%0A%09%0A%09results_path = roi_path = os.path.splitext(fname)%5B0%5D + %22.Results.csv%22%0A%09rt.save(results_path)%0A%09rt.show('Comet Analysis Results for %22%25s%22' %25 name)%0A%0Amain()
|
|
c426c773ee36d2872f79ff01d3bed615245e61b3
|
add nbconvert.utils.pandoc
|
IPython/nbconvert/utils/pandoc.py
|
IPython/nbconvert/utils/pandoc.py
|
Python
| 0
|
@@ -0,0 +1,1726 @@
+%22%22%22Utility for calling pandoc%22%22%22%0A#-----------------------------------------------------------------------------%0A# Copyright (c) 2013 the IPython Development Team.%0A#%0A# Distributed under the terms of the Modified BSD License.%0A#%0A# The full license is in the file COPYING.txt, distributed with this software.%0A#-----------------------------------------------------------------------------%0A%0A#-----------------------------------------------------------------------------%0A# Imports%0A#-----------------------------------------------------------------------------%0A%0Afrom __future__ import print_function%0A%0A# Stdlib imports%0Aimport sys%0Aimport subprocess%0A%0A# IPython imports%0Afrom IPython.utils.py3compat import cast_bytes%0A%0A#-----------------------------------------------------------------------------%0A# Classes and functions%0A#-----------------------------------------------------------------------------%0A%0Adef pandoc(source, fmt, to, encoding='utf-8'):%0A %22%22%22Convert an input string in format %60from%60 to format %60to%60 via pandoc.%0A%0A This function will raise an error if pandoc is not installed.%0A Any error messages generated by pandoc are printed to stderr.%0A%0A Parameters%0A ----------%0A source : string%0A Input string, assumed to be valid format %60from%60.%0A fmt : string%0A The name of the input format (markdown, etc.)%0A to : string%0A The name of the output format (html, etc.)%0A%0A Returns%0A -------%0A out : unicode%0A Output as returned by pandoc.%0A %22%22%22%0A p = subprocess.Popen(%5B'pandoc', '-f', fmt, '-t', to%5D,%0A stdin=subprocess.PIPE, stdout=subprocess.PIPE%0A )%0A out, _ = p.communicate(cast_bytes(source, encoding))%0A out = out.decode(encoding, 'replace')%0A return out%5B:-1%5D%0A%0A
|
|
5b951c91a7d054958d819bf19f97a5b33e21ff2d
|
add in some forms
|
lib/buyers/forms.py
|
lib/buyers/forms.py
|
Python
| 0.000001
|
@@ -0,0 +1,301 @@
+from django import forms%0Afrom .models import Buyer%0A%0A%0Aclass BuyerValidation(forms.ModelForm):%0A%0A class Meta:%0A model = Buyer%0A%0A%0Aclass PreapprovalValidation(forms.Form):%0A start = forms.DateField()%0A end = forms.DateField()%0A return_url = forms.URLField()%0A cancel_url = forms.URLField()%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.