commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13 values | lang stringclasses 23 values |
|---|---|---|---|---|---|---|---|---|
803201baa32fb847f363b6807f92f2d0b6a51c51 | Test that an error in pre_gen_project aborts generation | audreyr/cookiecutter,stevepiercy/cookiecutter,stevepiercy/cookiecutter,michaeljoseph/cookiecutter,willingc/cookiecutter,Springerle/cookiecutter,hackebrot/cookiecutter,hackebrot/cookiecutter,pjbull/cookiecutter,michaeljoseph/cookiecutter,audreyr/cookiecutter,Springerle/cookiecutter,terryjbates/cookiecutter,terryjbates/cookiecutter,dajose/cookiecutter,dajose/cookiecutter,luzfcb/cookiecutter,luzfcb/cookiecutter,pjbull/cookiecutter,willingc/cookiecutter | tests/test_abort_generate_on_hook_error.py | tests/test_abort_generate_on_hook_error.py | # -*- coding: utf-8 -*-
import pytest
from cookiecutter import generate
from cookiecutter import exceptions
@pytest.mark.usefixtures('clean_system')
def test_pre_gen_hook(tmpdir):
context = {
'cookiecutter': {
"repo_dir": "foobar",
"abort_pre_gen": "yes",
"abort_post_gen": "no"
}
}
with pytest.raises(exceptions.FailedHookException):
generate.generate_files(
repo_dir='tests/hooks-abort-render',
context=context,
output_dir=str(tmpdir)
)
assert not tmpdir.join('foobar').isdir()
| bsd-3-clause | Python | |
b079edc37cd8abb68194637ee90b9fecc51b9b98 | Add basic test for document quickcaching | qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | corehq/apps/cachehq/tests.py | corehq/apps/cachehq/tests.py | from copy import deepcopy
from mock import patch, MagicMock
from django.test import SimpleTestCase
from dimagi.ext import couchdbkit as couch
from corehq.apps.cachehq.mixins import CachedCouchDocumentMixin
class BlogPost(CachedCouchDocumentMixin, couch.Document):
title = couch.StringProperty()
body = couch.StringProperty()
class TestCachedCouchDocumentMixin(SimpleTestCase):
@patch('dimagi.ext.couchdbkit.Document.save', MagicMock())
@patch('dimagi.ext.couchdbkit.Document.get')
def test_get(self, doc_get):
blog_post = BlogPost(title="My favorite colors", body="blue")
blog_post._id = 'idssrgglcfoyxdtrunbcae'
doc_get.return_value = deepcopy(blog_post)
blog_post.save()
blog_post.clear_caches()
# Make two `get`s and assert that only one made it to Document.get
BlogPost.get(blog_post._id)
BlogPost.get(blog_post._id)
doc_get.assert_called_once_with(blog_post._id)
# Update the doc, save, and assert that Document.get was hit again
blog_post.body = "Actually, it's purple"
blog_post.save()
BlogPost.get(blog_post._id)
self.assertEqual(doc_get.call_count, 2)
| bsd-3-clause | Python | |
fa9421ef98d2dee2b9428d4165f5242aebe51a48 | create cliconf plugin for enos - enos.py (#31509) | thaim/ansible,thaim/ansible | lib/ansible/plugins/cliconf/enos.py | lib/ansible/plugins/cliconf/enos.py | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by
# Ansible still belong to the author of the module, and may assign their own
# license to the complete work.
#
# Copyright (C) 2018 Lenovo, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Contains CLI Configuration Plugin methods for ENOS Modules
# Lenovo Networking
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import json
from itertools import chain
from ansible.module_utils._text import to_bytes, to_text
from ansible.module_utils.network_common import to_list
from ansible.plugins.cliconf import CliconfBase, enable_mode
class Cliconf(CliconfBase):
def get_device_info(self):
device_info = {}
device_info['network_os'] = 'enos'
reply = self.get(b'show version')
data = to_text(reply, errors='surrogate_or_strict').strip()
match = re.search(r'^Software Version (.*?) ', data, re.M | re.I)
if match:
device_info['network_os_version'] = match.group(1)
match = re.search(r'^Lenovo RackSwitch (\S+)', data, re.M | re.I)
if match:
device_info['network_os_model'] = match.group(1)
match = re.search(r'^(.+) uptime', data, re.M)
if match:
device_info['network_os_hostname'] = match.group(1)
else:
device_info['network_os_hostname'] = "NA"
return device_info
@enable_mode
def get_config(self, source='running'):
if source not in ('running', 'startup'):
msg = "fetching configuration from %s is not supported"
return self.invalid_params(msg % source)
if source == 'running':
cmd = b'show running-config'
else:
cmd = b'show startup-config'
return self.send_command(cmd)
@enable_mode
def edit_config(self, command):
for cmd in chain([b'configure terminal'], to_list(command), [b'end']):
self.send_command(cmd)
def get(self, *args, **kwargs):
return self.send_command(*args, **kwargs)
def get_capabilities(self):
result = {}
result['rpc'] = self.get_base_rpc()
result['network_api'] = 'cliconf'
result['device_info'] = self.get_device_info()
return json.dumps(result)
| mit | Python | |
ce5ba72605e93e4fd83f36cced28d7c813c95e54 | Create myfile.py | iROCKBUNNY/myfile | myfile.py | myfile.py | import os
import re
def searchByExt(rootpath, ext):
print '----- File List -----'
results = []
for root, dirs, files in os.walk(rootpath):
for filename in files:
if re.search(r'.*\.%s' % ext, filename):
result = os.path.join(root, filename)
results.append(result)
print 'Find: %s' % result
print '-- End of File List--'
return results
def modifyPrefix(filelist, oldPrefix='', newPrefix=''):
# add prefix
if oldPrefix == '' and newPrefix != '':
action = 'Add'
# remove prefix
if oldPrefix != '' and newPrefix == '':
action = 'Remove'
# change prefix
if oldPrefix != '' and newPrefix != '':
action = 'Change'
# stay unchanged
if oldPrefix == newPrefix:
print 'The prefix stay unchanged.'
return
for oldFile in filelist:
if os.path.exists(oldFile):
if os.path.isfile(oldFile):
dirname, filename = os.path.split(oldFile)
if filename[:len(oldPrefix)] == oldPrefix:
newFilename = newPrefix + filename[len(oldPrefix):]
newFile = os.path.join(dirname, newFilename)
os.rename(oldFile, newFile)
if os.path.exists(newFile):
print '%s prefix: %s -> %s. Succeed' % (action, filename, newFilename)
else:
print '%s prefix: %s -> %s. Fail' % (action, filename, newFilename)
else:
print 'Warning: Invalid old prefix for file: %s (The requested prefix to be %sd is "%s"). Skip' % (filename, action.lower(), oldPrefix)
continue
else:
print 'Warning: %s is not a valid file. Skip' % oldFile
else:
print 'Warning: %s does not exist. Skip' % oldFile
def modifySuffix(filelist, oldSuffix='', newSuffix=''):
# add suffix
if oldSuffix == '' and newSuffix != '':
action = 'Add'
# remove suffix
if oldSuffix != '' and newSuffix == '':
action = 'Remove'
# change suffix
if oldSuffix != '' and newSuffix != '':
action = 'Change'
# stay unchanged
if oldSuffix == newSuffix:
print 'The suffix stay unchanged.'
return
for oldFile in filelist:
if os.path.exists(oldFile):
if os.path.isfile(oldFile):
dirname, fullfilename = os.path.split(oldFile)
filename, ext = os.path.splitext(fullfilename)
if filename[len(filename)-len(oldSuffix):] == oldSuffix:
newFilename = filename[:len(filename)-len(oldSuffix)] + newSuffix + ext
newFile = os.path.join(dirname, newFilename)
os.rename(oldFile, newFile)
if os.path.exists(newFile):
print '%s suffix: %s -> %s. Succeed' % (action, fullfilename, newFilename)
else:
print '%s suffix: %s -> %s. Fail' % (action, fullfilename, newFilename)
else:
print 'Warning: Invalid old suffix for file: %s (The requested suffix to be %sd is "%s"). Skip' % (fullfilename, action.lower(), oldSuffix)
continue
else:
print 'Warning: %s is not a valid file. Skip' % oldFile
else:
print 'Warning: %s does not exist. Skip' % oldFile
| mit | Python | |
b159433375714c67ac36e58d4323196222759f30 | Add missing migration from 096092b. | cdubz/babybuddy,cdubz/babybuddy,cdubz/babybuddy | babybuddy/migrations/0003_add_refresh_help_text.py | babybuddy/migrations/0003_add_refresh_help_text.py | # Generated by Django 2.0.5 on 2018-07-15 14:16
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('babybuddy', '0002_add_settings'),
]
operations = [
migrations.AlterField(
model_name='settings',
name='dashboard_refresh_rate',
field=models.DurationField(blank=True, choices=[(None, 'disabled'), (datetime.timedelta(0, 60), '1 min.'), (datetime.timedelta(0, 120), '2 min.'), (datetime.timedelta(0, 180), '3 min.'), (datetime.timedelta(0, 240), '4 min.'), (datetime.timedelta(0, 300), '5 min.'), (datetime.timedelta(0, 600), '10 min.'), (datetime.timedelta(0, 900), '15 min.'), (datetime.timedelta(0, 1800), '30 min.')], default=datetime.timedelta(0, 60), help_text='This setting will only be used when a browser does not support refresh on focus.', null=True, verbose_name='Refresh rate'),
),
]
| bsd-2-clause | Python | |
0104f898a4a54027688411dd20d39aeecfc31f6d | Create player.py | Lincoln-Cybernetics/Explore- | player.py | player.py | import pygame
import ss
class Player(pygame.sprite.Sprite):
def __init__(self, level, *groups):
super(Player, self).__init__(*groups)
self.Rimg = pygame.image.load('RangerDanR.png')
self.Limg = pygame.image.load('RangerDanL.png')
self.image = self.Rimg
self.rect = pygame.rect.Rect((100,100), self.image.get_size())#320,240
self.level = level
def command(self, cmd):
prev = self.rect.copy()
if cmd == "U":
if self.rect.y <= 100:
self.level.move_BG("D")
else:
self.rect.y -= 100
if cmd == "D":
if self.rect.y >= self.level.mastery - 200:
self.level.move_BG("U")
else:
self.rect.y += 100
if cmd == "L":
self.image = self.Limg
if self.rect.x <= 100:
self.level.move_BG("R")
else:
self.rect.x -= 100
if cmd == "R":
self.image = self.Rimg
if self.rect.x >= self.level.masterx - 200:
self.level.move_BG("L")
else:
self.rect.x += 100
new = self.rect
for loc in pygame.sprite.spritecollide(self, self.level.unpassable, False):
loc = loc.rect
if prev.right <= loc.left and new.right > loc.left:
new.right = loc.left
if prev.left >= loc.right and new.left < loc.right:
new.left = loc.right
if prev.bottom <= loc.top and new.bottom > loc.top:
new.bottom = loc.top
if prev.top >= loc.bottom and new.top < loc.bottom:
new.top = loc.bottom
for thing in pygame.sprite.spritecollide(self, self.level.items, True):
if thing.flavor == 'gem':
pygame.mixer.Sound('tadaa.wav').play()
def update(self):
pass
| unlicense | Python | |
59fa328c62cc7808bce365ddb1e0e1c0d744913b | add a basic reader | towerjoo/RssRolls | reader.py | reader.py | import feedparser
rss_url = "http://towerjoo.github.io/feed.xml"
feed= feedparser.parse(rss_url)
import pdb;pdb.set_trace()
| mit | Python | |
49f8a3de02d2e479232c327a3f78409a2297e173 | Add a basic implementation of screen recording + audio | nirbheek/shell-recorder-enhanced | record.py | record.py | #!/usr/bin/env python3
# vim: set sts=4 sw=4 et tw=0 :
#
# Author: Nirbheek Chauhan <nirbheek.chauhan@gmail.com>
# License: MIT
#
import sys, time
from gi.repository import Gio, GLib
def get_displays():
display_p = Gio.DBusProxy.new_for_bus_sync (Gio.BusType.SESSION,
Gio.DBusProxyFlags.NONE, None,
# Let's make the owner the shell itself instead of owning a bus name
# of our own. It doesn't really matter.
"org.gnome.Shell",
"/org/gnome/Mutter/DisplayConfig",
"org.gnome.Mutter.DisplayConfig",
# We want to do this synchronously
None)
displays = display_p.call_sync ("GetResources", None,
Gio.DBusCallFlags.NONE, -1, None).unpack()
# displays is a weird structure filled with numbers; extract what we need out of it
displays_placement = displays[1]
displays_metadata = [key[-1] for key in displays[2]]
displays_info = []
for (metadata, placement) in zip(displays_metadata, displays_placement):
# Extract the precise area on the entire canvas that this display is shown
area = placement[2:6]
if area[0] < 0 or area[1] < 0 or area[2] <= 0 or area[3] <= 0:
# Remove invalid or useless displays
continue
# Extract some identifying details about this display
details = {
"name": "{0} {1}".format(metadata["display-name"], metadata["product"]),
"connector-type": metadata["connector-type"],
"presentation": metadata["presentation"], # No idea what this is
"primary": metadata["primary"],
}
# Append a tuple of (display_details, (x, y, width, height))
displays_info.append((details, area))
return displays_info
def select_display(displays):
ii = 0
print ("Select a display to screencast:")
for (display, area) in displays:
print ("[{0}] {1}, connected via {2}".format(ii, display['name'],
display['connector-type']),
end="", flush=True)
if display['presentation']:
print (" (presentation)", end="", flush=True)
if display['primary']:
print (" (primary)")
else:
print ("")
ii += 1
print ("> ", end="")
while True:
try:
index = int(input())
except ValueError:
print ("Invalid index, try again\n> ", end="", flush=True)
continue
else:
break
return displays[index]
def screencast_area(filename, area):
cast_p = Gio.DBusProxy.new_for_bus_sync (Gio.BusType.SESSION,
Gio.DBusProxyFlags.NONE, None,
# Let's make the owner the shell itself instead of owning a bus name
# of our own. It doesn't really matter.
"org.gnome.Shell",
"/org/gnome/Shell/Screencast",
"org.gnome.Shell.Screencast",
# We want to do this synchronously
None)
# In theory, we can extend this to add a v4l2src source and record the
# webcam too, but that was buggy in my testing. The audio went out of whack
# and the timestamps were all messed up. This was when using compositor to
# overlay one video on top of the other.
pipeline_str = """
matroskamux streamable=true name=m
pulsesrc ! audioconvert ! opusenc ! queue name="audioq" ! m.
vp8enc min_quantizer=13 max_quantizer=13 cpu-used=5 deadline=1000000 threads=%T ! queue name="videoq" ! m.
"""
pipeline = GLib.Variant.new_string (pipeline_str)
params = area + (filename, {'pipeline': pipeline})
ret = cast_p.call_sync ("ScreencastArea",
# Write to test.webm, with no options
GLib.Variant("(iiiisa{sv})", params),
Gio.DBusCallFlags.NONE, -1, None).unpack()
return (cast_p,) + ret
filename = "test.mkv"
if len(sys.argv) > 1:
filename = sys.argv[1]
(display, area) = select_display(get_displays())
(stop_p, ret, f) = screencast_area (filename, area)
if not ret:
exit(1)
print ("Casting screen '{0}' to '{1}'".format(display['name'], f))
# Record for 10 seconds. Ideally we want a mainloop here or something.
time.sleep(10)
stop_p.call_sync("StopScreencast", None, Gio.DBusCallFlags.NONE, -1, None)
| mit | Python | |
021bf311598350e6fa976f72456e218c74bddbc6 | Create mush.py | keithporcaro/mush | mush.py | mush.py | import base64
import os
import pygsm
import gzip #replace with 7zip (LZMA)
import uuid
import random
import string
import fnmatch
import time
import multiprocessing
#Python 2.x
#need modem info
#need to auto-detect modem
#need some sort of interface
#replace x separator with semicolon
#replace with port of GSM Modem
modem = pygsm.GsmModem(port="/dev/ttyUSB0")
def random_char(y):
return ''.join(random.choice(string.ascii_letters) for x in range(y))
#this encodes a file into base64 (or whatever encoding), and splits that encoding into formatted SMS messages
def mush_file():
f = raw_input("Filepath?")
r = raw_input("Recipient?")
f_in = open(f, 'rt')
f_out = gzip.open(f+'.temp.gz','wb')
f_out.writelines(f_in)
f_in.close()
f_out.close()
f_read=open(f+'.temp.gz','rb')
initial_data = f_read.read()
f_read.close()
encoded_data = base64.b64encode(initial_data)
final_encoded = []
#with this plus a phone number, chance of a collision is 1/52^3. Probably lower since this isn't a high volume tool.
ident = random_char(3)
for i in xrange((len(encoded_data)/120)):
final_encoded.append("f;"+ident+";"+str(i+1)+";"+str(len(encoded_data)/120)+" "+encoded_data[i*120:(i+1)*120])
final_encoded.insert(0, 'f;'+ident+';0;'+str(len(final_encoded))+' '+os.path.basename(f))
for i in xrange(len(final_encoded)):
f_out = open("outgoing/"+str(uuid.uuid4())+".smsout","wb")
f_out.write(str(r)+"\n"+final_encoded[i])
f_out.close()
os.remove(f+'.temp.gz')
print "done"
mush_file()
#handle and store incoming messages
def process(msg):
sender = msg.sender
text = msg.text.split(" ")
file_id = text[0]
content = text[1]
f = open("parts/"+sender+" "+file_id+".part", "wb")
f.write(content)
f.close()
#delete from SIM card
check(sender, file_id, file_id.split(";")[3])
#check to see if the entire file has arrived yet
def check(sender, file_id, size):
file_base=";".join(file_id.split(";")[:2])
for i in xrange(size+1):
try:
if os.path.isfile("parts/"+sender + " " + file_base + ";" + str(i) + '.part'):
pass
else:
raise Exception()
except:
return
builder(sender, file_base, size)
#if the entire file has arrived, assemble it.
def builder(sender, file_base, size):
content=[]
if os.path.isfile(sender + " " + file_base + ";" + str(0) + '.part'):
f = open("parts/"+sender + " " + file_base + ";" + str(0) + '.part', 'rt')
file_name = f.read()
f.close()
for i in xrange(1, size+1):
try:
if os.path.isfile(sender + " " + file_base + ";" + str(i) + '.part'):
f = open("parts/"+sender + " " + file_base + ";" + str(i) + '.part')
content.append(f.read())
f.close()
else:
raise Exception()
except:
return
fh = open("files/"+file_name,"wb")
fh.write(base64.b64decode("".join(content)))
fh.close()
cleanup(sender, file_base)
def cleanup(sender, file_base):
#To-Do clear stuff off the SIM card
pass
#fake modem for testing throughput
def fake_modem(recipient, text):
time.sleep(1)
#print "Sent " + text + " to " + str(recipient)
return True
#send outgoing messages
def dispatch(outdir, awaiting):
#try:
if awaiting:
f = open(outdir+awaiting[0],'rt')
m = f.read().split("\n")
#send sms
if modem.send_sms(m[0], m[1]):
#dummy function
#if fake_modem(m[0],m[1]):
os.remove(outdir+awaiting[0])
#print "Deleted outgoing file"
else:
pass
#handle this error. Break up the chain? Add Wait Time Back?
# except:
# break
def modem_loop():
#check for forever
while True: #make this a try statement (check for signal)
#need wait_for_network? Maybe here? Not sure. Definitely on the send
try:
#is there a message on the modem?
msg = modem.next_message()
#msg = none
#is there anything in outgoing?
outbox = fnmatch.filter(os.listdir('outgoing/'),'*.smsout')
if msg is not None:
#process anything that came in
process(msg)
elif outbox:
#send anything that goes out
dispatch('outgoing/', outbox)
else:
raise Exception()
except:
time.sleep(2)
# Main Thread: Listener and encoder for files
# Thread 2: Modem Loop (intake, dispatch, check, decode)
# Do I need a third thread for check and decode?
thread2 = multiprocessing.Process(target=modem_loop, args=[])
thread2.start()
print "Processing in the background..."
mush_file()
| mit | Python | |
374f516be38e9630ff1ff6cda4146d0ebd2a9537 | remove model | dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | corehq/apps/sms/migrations/0048_delete_sqlicdsbackend.py | corehq/apps/sms/migrations/0048_delete_sqlicdsbackend.py | # Generated by Django 2.2.13 on 2020-10-28 09:55
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('sms', '0047_merge_20200918_1641'),
]
operations = [
migrations.DeleteModel(
name='SQLICDSBackend',
),
]
| bsd-3-clause | Python | |
536e15458090b88962a0fd906b8f6dabfbad73d4 | Add files via upload | gnublet/py_explorations,gnublet/py_explorations | sklearn/exercise_02_sentiment.py | sklearn/exercise_02_sentiment.py | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
pipeline = Pipeline([
('vect', TfidfVectorizer(min_df=3, max_df=0.95)),
('clf', LinearSVC(C=1000)),
])
##TfidfVectorizer params:
#min_df: ignore terms that have doc frequency < threshold (cut-off).
#float in [0,1] represents proportion of docs, integer represents absolute counts
#max_df: similar
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
}
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1)
grid_search.fit(docs_train, y_train)
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
n_candidates = len(grid_search.cv_results_['params'])
print("n_candidates = {}".format(n_candidates))
for i in range(n_candidates):
print(i, 'params - %s; mean - %0.2f; std - %0.2f'
% (grid_search.cv_results_['params'][i],
grid_search.cv_results_['mean_test_score'][i],
grid_search.cv_results_['std_test_score'][i]))
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
y_predicted = grid_search.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
#new--
# Predict the result on some short new sentences:
sentences = [
u'Wow! this movie was amazing. I was impressed by the in-depth action, humor, and special effects! I loved the scene where the main character saved the day! Awesome and would see again! I definitely recommend this to anyone!',
u'No offense, but this was one of the worst movies I\'ve seen in many years. It had little emotion and no character development. Waste of time. Ugh! ',
]
predicted2 = grid_search.predict(sentences)
for s, p in zip(sentences, predicted2):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
from sklearn.externals import joblib
joblib.dump(grid_search, 'moviesentiments.pkl')
#load with using:
#grid_search = joblib.load('moviesentiments.pkl') | mit | Python | |
0d7e702a5f04f1e9e544b4e99eb57f6a9ddeabbe | cover new utility function for revoked token cleanup | conorsch/securedrop,conorsch/securedrop,ehartsuyker/securedrop,heartsucker/securedrop,conorsch/securedrop,heartsucker/securedrop,ehartsuyker/securedrop,conorsch/securedrop,conorsch/securedrop,ehartsuyker/securedrop,ehartsuyker/securedrop,heartsucker/securedrop,ehartsuyker/securedrop,ehartsuyker/securedrop,heartsucker/securedrop,heartsucker/securedrop | securedrop/tests/test_journalist_utils.py | securedrop/tests/test_journalist_utils.py | # -*- coding: utf-8 -*-
from flask import url_for
import os
import pytest
import random
from models import RevokedToken
from sqlalchemy.orm.exc import NoResultFound
from journalist_app.utils import cleanup_expired_revoked_tokens
os.environ['SECUREDROP_ENV'] = 'test' # noqa
from .utils.api_helper import get_api_headers
random.seed('◔ ⌣ ◔')
def test_revoke_token_cleanup_does_not_delete_tokens_if_not_expired(journalist_app, test_journo,
journalist_api_token):
with journalist_app.test_client() as app:
resp = app.post(url_for('api.logout'), headers=get_api_headers(journalist_api_token))
assert resp.status_code == 200
cleanup_expired_revoked_tokens()
revoked_token = RevokedToken.query.filter_by(token=journalist_api_token).one()
assert revoked_token.journalist_id == test_journo['id']
def test_revoke_token_cleanup_does_deletes_tokens_that_are_expired(journalist_app, test_journo,
journalist_api_token, mocker):
with journalist_app.test_client() as app:
resp = app.post(url_for('api.logout'), headers=get_api_headers(journalist_api_token))
assert resp.status_code == 200
# Mock response from expired token method when token is expired
mocker.patch('journalist_app.admin.Journalist.validate_token_is_not_expired_or_invalid',
return_value=None)
cleanup_expired_revoked_tokens()
with pytest.raises(NoResultFound):
RevokedToken.query.filter_by(token=journalist_api_token).one()
| agpl-3.0 | Python | |
e41b79855e966977c4484efd4ad6a02475833b3e | Add ex4.4: tornado multiple requests with asyncio integration | MA3STR0/PythonAsyncWorkshop | code/ex4.4-tornado_with_asyncio.py | code/ex4.4-tornado_with_asyncio.py | from tornado.platform.asyncio import AsyncIOMainLoop, to_asyncio_future
from tornado.httpclient import AsyncHTTPClient
import asyncio
import time
URL = 'http://127.0.0.1:8000'
@asyncio.coroutine
def get_greetings():
http_client = AsyncHTTPClient()
response = yield from to_asyncio_future(http_client.fetch(URL))
return response.body.decode('utf-8')
if __name__ == "__main__":
AsyncIOMainLoop().install()
loop = asyncio.get_event_loop()
t1 = time.time()
texts = loop.run_until_complete(get_greetings())
print(time.time() - t1, "seconds passed")
print(texts)
loop.close()
| mit | Python | |
a8e66380cb63e52ad57f66cb9e1a652dca5b32b9 | Create __init__.py | Raytone-D/puppet | puppet/__init__.py | puppet/__init__.py | mit | Python | ||
e81426b1f7890c056f926281c5a445bc6e74c80b | Create py-参数传递.py | ganmk/python-prctice | py-参数传递.py | py-参数传递.py | # 包裹关键字传递 dic是一个字典 收集所有的关键字传递给函数func_t
def func_t(**dic):
print type(dic)
print dic
print func_t(a=1, b=2)
print func_t(a=3, b=4, c=5)
| mit | Python | |
4f265b626c9ff5c333ea6c27cb08b45c2cecc7f3 | Add plugin code | aristidesfl/sublime-git-commit-message-auto-save | gitcommitautosave.py | gitcommitautosave.py | """Git Commit Auto Save.
Sublime Text 3 package to auto save commit messages when the window is closed.
This allows the user to close the window without having to save before,
or having to deal with the "Save File" popup.
"""
import sublime_plugin
class GitCommitAutoSave(sublime_plugin.EventListener):
def on_load(self, view):
if view.file_name().endswith('COMMIT_EDITMSG'):
view.set_scratch(True) # disable save file dialog on exit
def on_pre_close(self, view):
if view.file_name().endswith('COMMIT_EDITMSG'):
view.run_command("save")
| mit | Python | |
ac482caafe8c63de2606bb4894462f7b2e2bcb70 | Add initial script to print rosbag files | oliverlee/antlia | python/printbag.py | python/printbag.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Convert a rosbag file to legacy lidar binary format.
"""
"""LIDAR datatype format is:
(
timestamp (long),
flag (bool saved as int),
accelerometer[3] (double),
gps[3] (double),
distance[LIDAR_NUM_ANGLES] (long),
)
'int' and 'long' are the same size on the raspberry pi (32 bits).
"""
import sys
import rosbag
def decode_bag(bag):
topics = ['/scan', '/flagbutton_pressed']
return [message for message in bag.read_messages(topics=topics)]
if __name__ == '__main__':
if len(sys.argv) < 2:
print(('Usage: {} <rosbag> [<outfile>] \n\n'
'Print contents of rosbag file. If <outfile> is provided, \n'
'write contents of rosbag file to <outfile> in the legacy \n'
'lidar binary format.').format(__file__))
sys.exit(1)
outfile = None
filename = sys.argv[1]
if len(sys.argv) == 3:
outfile = sys.argv[2]
with rosbag.Bag(filename) as bag:
print(decode_bag(bag))
sys.exit()
| bsd-2-clause | Python | |
a74cc0f4c06db4dad3007f52ec4eb062773700be | Create quora_duplicate.py | py-in-the-sky/challenges,py-in-the-sky/challenges,py-in-the-sky/challenges | quora_duplicate.py | quora_duplicate.py | """
see: https://www.hackerrank.com/contests/quora-haqathon/challenges/duplicate
"""
import re
from json import loads
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import Pipeline
from sklearn.ensemble import BaggingClassifier
from nltk.stem.lancaster import LancasterStemmer
WORD_RE = re.compile(r'\w+')
STEMMER = LancasterStemmer()
STOP_WORDS = [
'i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', 'your', 'yours',
'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she', 'her', 'hers',
'herself', 'it', 'its', 'itself', 'they', 'them', 'their', 'theirs', 'themselves',
'what', 'which', 'who', 'whom', 'this', 'that', 'these', 'those', 'am', 'is', 'are',
'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having', 'do', 'does',
'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until',
'while', 'of', 'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into',
'through', 'during', 'before', 'after', 'above', 'below', 'to', 'from', 'up', 'down',
'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further', 'then', 'once', 'here',
'there', 'when', 'where', 'why', 'how', 'all', 'any', 'both', 'each', 'few', 'more',
'most', 'other', 'some', 'such', 'no', 'nor', 'not', 'only', 'own', 'same', 'so',
'than', 'too', 'very', 's', 't', 'can', 'will', 'just', 'don', 'should', 'now',
'll', 're', 'm', 've'
]
def parse_t(test_string):
return test_string.split()
def parse_d(training_string):
key1, key2, score = training_string.split()
return [key1, key2, -1 if score == '0' else 1]
def parse_topic(question_topic):
return question_topic['name'] if question_topic else ''
def stem(word):
return STEMMER.stem(word)
def parse_text(question_text):
return set(stem(w) for w in WORD_RE.findall(question_text.lower())
if w not in STOP_WORDS)
def parse_q(question_json):
q = loads(question_json)
q_body = {
'question_text': parse_text(q['question_text']),
'context_topic': parse_topic(q['context_topic']),
'topics': [parse_topic(t) for t in q['topics']],
'view_count': q['view_count'],
'follow_count': q['follow_count'],
'age': int(q['age'])
}
return (q['question_key'], q_body)
def score_context(q1, q2):
context1, topics1 = q1['context_topic'], q1['topics']
context2, topics2 = q2['context_topic'], q2['topics']
s1 = (context1 in topics2) if context1 else False
s2 = (context2 in topics1) if context2 else False
s3 = (context1 == context2) if context1 and context2 else False
return int(s3), (s1 + s2)
def score_topics(q1, q2):
return len(set(q1['topics']) & set(q2['topics']))
def generic_score(q1, q2, field_name):
diff = float(abs(q1[field_name] - q2[field_name]))
max_quant = max(q1[field_name], q2[field_name])
if max_quant:
return int(100 * diff / max_quant)
else:
return 0
def reduce_pair(key1, key2):
q1, q2 = questions[key1], questions[key2]
t1, t2 = q1['question_text'], q2['question_text']
n_words_matched = len(t1 & t2)
n_words = len(t1 | t2)
percent_words_matched = int(100* float(n_words_matched) / n_words) if n_words else 0
context_score1, context_score2 = score_context(q1, q2)
topic_score = score_topics(q1, q2)
view_score = generic_score(q1, q2, 'view_count')
follow_score = generic_score(q1, q2, 'follow_count')
age_score = generic_score(q1, q2, 'age')
return [n_words_matched, percent_words_matched, context_score1,
context_score2, topic_score, view_score, follow_score, age_score]
def predict(key1, key2, clf):
if key1 == key2:
return 1
data = reduce_pair(key1, key2)
prediction_score = clf.predict([data])[0]
return 0 if prediction_score < 1 else 1
## get data
Q = int(raw_input())
questions = dict(parse_q(raw_input()) for _ in xrange(Q))
## train here
D = int(raw_input())
training = (parse_d(raw_input()) for _ in xrange(D))
X, Y = zip(*((reduce_pair(k1, k2), -1 if score < 1 else 1) for k1,k2,score in training))
base_clf = RandomForestClassifier(n_estimators=7, min_samples_leaf=4)
clf = Pipeline([
('classification', BaggingClassifier(base_estimator=base_clf, random_state=1, n_estimators=16))
]).fit(X, Y)
## predict here
N = int(raw_input())
testing = (parse_t(raw_input()) for _ in xrange(N))
for key1, key2 in testing:
print key1, key2, predict(key1, key2, clf)
| mit | Python | |
5cf2c2c4dcbc9e0cca57a7634e5118c2dc278c75 | Add media compatibility | tysonholub/twilio-python,twilio/twilio-python | twilio/rest/resources/compatibility/media.py | twilio/rest/resources/compatibility/media.py | from twilio.rest.resources import InstanceResource, ListResource
class Media(InstanceResource):
pass
class MediaList(ListResource):
def __call__(self, message_sid):
base_uri = "%s/Messages/%s" % (self.base_uri, message_sid)
return MediaList(base_uri, self.auth, self.timeout)
| mit | Python | |
a2e566cc0b925f80c30602141e890cdf9b13306b | Migrate to latest version of db. | PythonClutch/python-clutch,PythonClutch/python-clutch,PythonClutch/python-clutch | migrations/versions/1003fd6fc47_.py | migrations/versions/1003fd6fc47_.py | """empty message
Revision ID: 1003fd6fc47
Revises: 1a54c4cacbe
Create Date: 2015-03-24 13:33:50.898511
"""
# revision identifiers, used by Alembic.
revision = '1003fd6fc47'
down_revision = '1a54c4cacbe'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('project', sa.Column('git_url', sa.String(length=400), nullable=True))
op.drop_column('project', 'github_url')
op.drop_column('project', 'age')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('project', sa.Column('age', postgresql.TIMESTAMP(), autoincrement=False, nullable=True))
op.add_column('project', sa.Column('github_url', sa.VARCHAR(length=400), autoincrement=False, nullable=True))
op.drop_column('project', 'git_url')
### end Alembic commands ###
| mit | Python | |
38cf6ee407468e192101cbd456411c56cbf09e68 | Add example of distribution fit on any selected feature | lidakanari/NeuroM,BlueBrain/NeuroM,juanchopanza/NeuroM,wizmer/NeuroM,liesbethvanherpe/NeuroM,mgeplf/NeuroM,eleftherioszisis/NeuroM | examples/extract_distribution.py | examples/extract_distribution.py | #!/usr/bin/env python
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Extract a distribution for the selected feature of the population of neurons among
the exponential, normal and uniform distribution, according to the minimum ks distance.
'''
from neurom import ezy
from scipy import stats
import numpy as np
import argparse
def parse_args():
'''Parse command line arguments'''
parser = argparse.ArgumentParser(
description='Morphology fit distribution extractor',
epilog='Note: Prints the optimal distribution and corresponding parameters.')
parser.add_argument('datapath',
help='Path to morphology data file or directory')
parser.add_argument('feature',
help='Feature available for the ezy.neuron')
return parser.parse_args()
def distribution_fit(data, distribution='norm'):
'''Calculates and returns the parameters of a distribution'''
return getattr(stats, distribution).fit(data)
def distribution_error(data, distribution='norm'):
'''Calculates and returns the distance of a fitted distribution
from the initial data.
'''
params = distribution_fit(data, distribution=distribution)
return stats.kstest(data, distribution, params)[0]
def test_multiple_distr(data):
'''Runs the distribution fit for multiple distributions and returns
the optimal distribution along with the corresponding parameters.
'''
# Create a list of basic distributions
distr_to_check = ['norm', 'expon', 'uniform']
# Fit the section lengths of the neuron with a distribution.
fit_data = {d: distribution_fit(data, d) for d in distr_to_check}
# Get the error for the fitted data with each distribution.
fit_error = {distribution_error(data, d): d for d in distr_to_check}
# Select the distribution with the minimum ks distance from data
optimal = fit_error.values()[np.argmax(fit_error.iterkeys())]
return optimal, fit_data[optimal]
if __name__ == '__main__':
args = parse_args()
data_path = args.datapath
feature = args.feature
population = ezy.load_neurons(data_path)
feature_data = [getattr(n, 'get_' + feature)() for n in population]
try:
result = test_multiple_distr(feature_data)
except ValueError:
from itertools import chain
feature_data = list(chain(*feature_data))
result = test_multiple_distr(feature_data)
print "Optimal distribution fit for %s is: %s with parameters %s"\
% (feature, result[0], result[1])
| bsd-3-clause | Python | |
4aced6fea8ff8ccd087362cb237a9f00d111d0d8 | Add command to turn on locations flag | dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq | corehq/apps/commtrack/management/commands/toggle_locations.py | corehq/apps/commtrack/management/commands/toggle_locations.py | from django.core.management.base import BaseCommand
from corehq.apps.domain.models import Domain
from corehq.feature_previews import LOCATIONS
from corehq.toggles import NAMESPACE_DOMAIN
from toggle.shortcuts import update_toggle_cache, namespaced_item
from toggle.models import Toggle
class Command(BaseCommand):
def handle(self, *args, **options):
domains = Domain.get_all()
for domain in domains:
if domain.commtrack_enabled:
toggle = Toggle.get(LOCATIONS.slug)
toggle_user_key = namespaced_item(domain.name, NAMESPACE_DOMAIN)
if toggle_user_key not in toggle.enabled_users:
toggle.enabled_users.append(toggle_user_key)
toggle.save()
update_toggle_cache(LOCATIONS.slug, toggle_user_key, True)
if not domain.locations_enabled:
domain.locations_enabled = True
domain.save()
| bsd-3-clause | Python | |
1eaab9f929dc748e57865fb4c8717158e6c47fa5 | Add more index on contact activities | rapidpro/ureport,Ilhasoft/ureport,Ilhasoft/ureport,rapidpro/ureport,rapidpro/ureport,Ilhasoft/ureport,Ilhasoft/ureport,rapidpro/ureport | ureport/stats/migrations/0018_better_indexes.py | ureport/stats/migrations/0018_better_indexes.py | # Generated by Django 3.2.6 on 2021-10-13 12:37
from django.db import migrations
# language=SQL
INDEX_SQL_CONTACTACTIVITY_ORG_DATE_SCHEME_NOT_NULL = """
CREATE INDEX IF NOT EXISTS stats_contactactivity_org_id_date_scheme_not_null on stats_contactactivity (org_id, date, scheme) WHERE scheme IS NOT NULL;
"""
class Migration(migrations.Migration):
dependencies = [
("stats", "0017_better_indexes"),
]
operations = [
migrations.RunSQL(INDEX_SQL_CONTACTACTIVITY_ORG_DATE_SCHEME_NOT_NULL, ""),
]
| agpl-3.0 | Python | |
9becada645e9680974dbb18fee10983d204dfd3d | Create low-res cubes, masks, and moment arrays | e-koch/VLA_Lband,e-koch/VLA_Lband | 14B-088/HI/analysis/cube_pipeline_lowres.py | 14B-088/HI/analysis/cube_pipeline_lowres.py |
'''
Convolve the VLA + GBT data to 2 * beam and 5 * beam, then run the
masking and moments pipeline.
Make signal masks and compute the moments.
'''
from astropy import log
import os
from radio_beam import Beam
from spectral_cube import SpectralCube
from cube_analysis import run_pipeline
from paths import (fourteenB_wGBT_HI_file_dict, fourteenB_HI_data_wGBT_path)
file_path = fourteenB_HI_data_wGBT_path("smooth_2beam", no_check=True)
if not os.path.exists(file_path):
os.mkdir(file_path)
cube = SpectralCube.read(fourteenB_wGBT_HI_file_dict["Cube"])
# Convolve to 2 * beam. May as well make it circular.
beam2 = Beam(2 * cube.beams.largest_beam().major)
conv_cube = cube.convolve_to(beam2)
file_name = os.path.join(file_path, "M33_14B-088_HI.clean.image.GBT_feathered.38arcsec.fits")
conv_cube.write(file_name)
del conv_cube
del cube
log.info("Masking and moments for the VLA+GBT 2 * beam cube")
run_pipeline(file_name,
file_path,
masking_kwargs={"method": "ppv_connectivity",
"save_cube": True,
"is_huge": True,
"noise_map": None,
"smooth_chans": 31,
"min_chan": 10,
"peak_snr": 5.,
"min_snr": 2,
"edge_thresh": 1,
"verbose": False,
"show_plots": False,
},
moment_kwargs={"num_cores": 6,
"verbose": True})
print("Running 5 beam convolution and masking.")
file_path = fourteenB_HI_data_wGBT_path("smooth_5beam", no_check=True)
if not os.path.exists(file_path):
os.mkdir(file_path)
cube = SpectralCube.read(fourteenB_wGBT_HI_file_dict["Cube"])
beam5 = Beam(5 * cube.beams.largest_beam().major)
conv_cube = cube.convolve_to(beam5)
file_name = os.path.join(file_path, "M33_14B-088_HI.clean.image.GBT_feathered.95arcsec.fits")
conv_cube.write(file_name)
del conv_cube
del cube
# VLA+GBT cube
log.info("Masking and moments for the VLA+GBT 5 * beam cube")
run_pipeline(file_name,
file_path,
masking_kwargs={"method": "ppv_connectivity",
"save_cube": True,
"is_huge": True,
"noise_map": None,
"smooth_chans": 31,
"min_chan": 10,
"peak_snr": 5.,
"min_snr": 2,
"edge_thresh": 1,
},
moment_kwargs={"num_cores": 1,
"verbose": True,
"chunk_size": 2e5})
| mit | Python | |
7651b436e9d817ffae7f8c64f6ee8088dd1ae889 | Add hall.py | Stefal/V4MPod,Stefal/V4MPod,Stefal/V4MPod,Stefal/V4MPod | raspberry/hall.py | raspberry/hall.py | import os
import sys
import smbus
import time
import datetime
import RPi.GPIO as GPIO
import PyCmdMessenger
import subprocess
import gpsd
import threading
import Adafruit_Nokia_LCD as LCD
import Adafruit_GPIO.SPI as SPI
import lcd_menu as menu
from queue import Queue
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
cam_range=0b00001111
# Set Rpi.GPIO to BCM mode
GPIO.setmode(GPIO.BCM)
# Channel used to receive MCP interrupts
mcp1_inta_pin = 19
mcp1_intb_pin = 16
mcp2_inta_pin = 26
mcp2_intb_pin = 20
# Set these channels as input
GPIO.setup(mcp1_inta_pin, GPIO.IN)
GPIO.setup(mcp1_intb_pin, GPIO.IN)
GPIO.setup(mcp2_inta_pin, GPIO.IN)
GPIO.setup(mcp2_intb_pin, GPIO.IN)
#bus = smbus.SMBus(0) # Rev 1 Pi uses 0
bus = smbus.SMBus(1) # Rev 2 Pi uses 1
Keypressed = False
MCP1 = 0x21
MCP2 = 0x20 # Device address (A0-A2)
IODIRA = 0x00 # IO direction (0 = output, 1 = input (Default))
IODIRB = 0x01
IOPOLA = 0x02 # IO polarity (0 = normal, 1 = inverse)
IOPOLB = 0x03
GPINTENA =0x04 # Interrupt on change (0 = disable, 1 = enable)
GPINTENB =0x05
DEFVALA = 0x06 # Default comparison for interrupt on change (interrupts on opposite)
DEFVALB = 0x07
INTCONA = 0x08 # Interrupt control (0 = interrupt on change from previous, 1 = interrupt on change from DEFVAL)
INTCONB = 0x09
IOCON = 0x0A # IO Configuration: bank/mirror/seqop/disslw/haen/odr/intpol/notimp
#IOCON 0x0B // same as 0x0A
GPPUA = 0x0C # Pull-up resistor (0 = disabled, 1 = enabled)
GPPUB = 0x0D
INFTFA = 0x0E # Interrupt flag (read only) : (0 = no interrupt, 1 = pin caused interrupt)
INFTFB = 0x0F
INTCAPA = 0x10 # Interrupt capture (read only) : value of GPIO at time of last interrupt
INTCAPB = 0x11
GPIOA = 0x12 # Port value. Write to change, read to obtain value
GPIOB = 0x13
OLLATA = 0x14 # Output latch. Write to latch output.
OLLATB = 0x15
# For the MCP1 :
# On Bank A, there is only the hall sensor output on pin 1
# Set GPA pins as inputs by setting
# bits of IODIRA register to 1
# Set pins 1 as input
bus.write_byte_data(MCP1,IODIRA,0x01)
# Set GPIOA polarity to normal
bus.write_byte_data(MCP1, IOPOLA, 0x00)
# Enable pull up resistor on GPIOA
#bus.write_byte_data(MCP1, GPPUA, 0xFF)
# no mirror interrupts, disable sequential mode, active HIGH
bus.write_byte_data(MCP1, IOCON, 0b00100010)
#Configure interrupt on port A as "interrupt-on-pin change"
bus.write_byte_data(MCP1, INTCONA, 0x00)
#bus.write_byte_data(MCP1, DEFVALA, 0xFF)
#bus.write_byte_data(MCP1, INTCONA, 0xFF)
#Enable interrupt on port A
bus.write_byte_data(MCP1, GPINTENA, 0x01)
#set all outputs to HIGH
#bus.write_byte_data(MCP1, GPIOA, 0xFF)
# Read the INTCAPA to reset the int
bus.read_byte_data(MCP1, INTCAPA)
"""
for MyData in range(1,16):
# Count from 1 to 8 which in binary will count
# from 001 to 111
bus.write_byte_data(DEVICE,OLLATA,0x1)
time.sleep(1)
bus.write_byte_data(DEVICE,OLLATA,0x0)
time.sleep(1)
"""
#Hall pulse queue
hall_pulse_queue = Queue()
def hall_callback(hall_pin):
print('Edge detected on MCP1 Hall sensor pin %s' %hall_pin)
MCP1_status = bus.read_byte_data(MCP1, INTCAPA)
if MCP1_status & 0b0 == 0:
hall_pulse_queue.put(time.time())
print("MCP1 pins status: ", bin(MCP1_status))
#temp
#time.sleep(0.5)
bus.read_byte_data(MCP1, INTCAPA)
#bus.read_byte_data(MCP1, INTCAPB)
# add rising edge detection on a channel
GPIO.add_event_detect(mcp1_inta_pin, GPIO.RISING, callback=hall_callback)
#reset interrupt on mcp, or an already active interrupt
#would disable a new one, rendering the mcp unusable.
#bus.read_byte_data(MCP1, INTCAPA)
| mit | Python | |
8fbd7421e9517ead4293c62086f3305810c93b1b | Add initial manage/fabfile/ci.py (sketch) | rinfo/rdl,rinfo/rdl,rinfo/rdl,rinfo/rdl,rinfo/rdl,rinfo/rdl | manage/fabfile/ci.py | manage/fabfile/ci.py | from fabric.api import *
@task
@role('ci')
def install():
sudo("apt-get install git")
sudo("apt-get install maven2") # TODO: maven3
sudo("apt-get install groovy") # TODO: groovy-1.8, or gradle...
configure_groovy_grapes()
sudo("apt-get install python-dev")
sudo("apt-get install python-pip")
sudo("pip install fabric")
def configure_groovy_grapes():
run("mkdir -p ~/.groovy/")
# TODO:
#put("grapeConfig.xml", "~/.groovy/grapeConfig.xml")
| bsd-2-clause | Python | |
95d87c541ebf82109b882daebcb5b387f0f1cdb8 | Read the american physics society graph | charanpald/APGL | exp/influence2/ReputationExp2.py | exp/influence2/ReputationExp2.py | import numpy
try:
ctypes.cdll.LoadLibrary("/usr/local/lib/libigraph.so")
except:
pass
import igraph
from apgl.util.PathDefaults import PathDefaults
from exp.util.IdIndexer import IdIndexer
import xml.etree.ElementTree as ET
import array
metadataDir = PathDefaults.getDataDir() + "aps/aps-dataset-metadata-2010/"
metadataFilename = metadataDir + "PRSTAB.xml"
citationsDir = PathDefaults.getDataDir() + "aps/aps-dataset-citations-2010/"
citatonsFilename = citationsDir + "citing_cited.csv"
tree = ET.parse(metadataFilename)
root = tree.getroot()
authorIndexer = IdIndexer("i")
articleIndexer = IdIndexer("i")
for child in root:
authorGroups = child.findall('authgrp')
for authorGroup in authorGroups:
authors = authorGroup.findall("author")
for author in authors:
if author.find("givenname") != None:
fullname = author.find("givenname").text
else:
fullname = ""
for middlename in author.findall("middlename"):
fullname += " " + middlename.text
fullname += " " + author.find("surname").text
authorId = fullname
articleId = child.attrib["doi"]
authorIndexer.append(authorId)
articleIndexer.append(articleId)
authorInds = authorIndexer.getArray()
articleInds = articleIndexer.getArray()
#We now need to read the citations file and add those edges
article1Inds = array.array("i")
article2Inds = array.array("i")
citationsFile = open(citatonsFilename)
citationsFile.readline()
for line in citationsFile:
vals = line.split(",")
articleId1 = vals[0].strip()
articleId2 = vals[1].strip()
#print(articleId1, articleId2)
articleIdDict = articleIndexer.getIdDict()
if articleId1 in articleIdDict and articleId2 in articleIdDict:
article1Inds.append(articleIdDict[articleId1])
article2Inds.append(articleIdDict[articleId2])
article1Inds = numpy.array(article1Inds)
article2Inds = numpy.array(article2Inds)
authorArticleEdges = numpy.c_[authorInds, articleInds]
print(authorArticleEdges)
articleArticleEdges = numpy.c_[article1Inds, article2Inds]
print(articleArticleEdges)
print(articleArticleEdges.shape)
graph = igraph.Graph()
graph.add_vertices(numpy.max(authorInds) + numpy.max(articleInds))
graph.add_edges(authorArticleEdges)
print(graph.summary())
| bsd-3-clause | Python | |
8f391cfd541f68a3c4bfc20be68c32d4e2d6798f | Add server script | developius/piometer | server.py | server.py | #!/usr/bin/python3
# import the necessary components
from flask import Flask, request, jsonify
app = Flask(__name__)
# define a dictionary to store our information in
info = {}
# listen for data at /data
@app.route("/data", methods=["GET", "POST"])
def api():
# convert the data to a dict
data = request.get_json(silent=True)
# pretty debug
print("%s sent: %s" % (data['uuid'], data['stats']))
# replace the old data with the new data
info[data['uuid']] = {'cpu': data['stats']['cpu'], 'mem': data['stats']['memory']}
# keep count of the CPU and memory across all devices
totalCpu = 0
totalMem = 0
# loop through all devices
for key, stats in info.items():
# add the CPU and memory to their respective counters
totalCpu += stats['cpu']
totalMem += stats['mem']
# calculate means
meanCpu = totalCpu / len(info)
meanMem = totalMem / len(info)
# more nice debugging
print("Mean CPU: %s" % meanCpu)
print("Mean memory: %s" % meanMem)
# tell the client that all is well
return(jsonify({"message": "ok"}))
# if file is called directly...
if __name__ == "__main__":
# ...start the server on port 8742
app.run(port=8742, host="0.0.0.0")
| mit | Python | |
a49d1d96b49eb6006e864bbaf2757cd5358b0110 | Create func.py | chapman3/proj5-map,chapman3/proj5-map | func.py | func.py | #it's fun, c?
def read_poi(file):
poi_dict = {}
#taken from project2
for line in file:
line = line.rstrip()
if len(line) == 0:
continue
parts = line.split(' ', 2)
print(parts[0],parts[1],parts[2])
poi_dict[parts[2]] = parts[0],parts[1]
return poi_dict
if __name__ == "__main__":
read_poi(open("static/data/poi.txt"))
| artistic-2.0 | Python | |
4810c88d484bc02fe5f7983dbf9cac0be5a440cd | Create reverse_word_order.py | lcnodc/codes,lcnodc/codes | 09-revisao/practice_python/reverse_word_order.py | 09-revisao/practice_python/reverse_word_order.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Exercise 15: Reverse Word Order
Write a program (using functions!) that asks the user for a long string
containing multiple words. Print back to the user the same string,
except with the words in backwards order. For example, say I type the
string:
My name is Michele
Then I would see the string:
Michele is name My
shown back to me.
"""
def reverse_word_order(word: str):
return " ".join(word.split(" ")[::-1])
long_string = input("Write a long string: ")
print("The string in backwards order: %s" % reverse_word_order(long_string))
| mit | Python | |
c460874436ee087a50f9f7ec06c15ae9a110a656 | Initialize web spider class definition & imports | MaxLikelihood/CODE | spider.py | spider.py | from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import Selector
| mit | Python | |
3f6e3d60588dec447fffbfc7e5fc65f34cbd3aa7 | fix bug in version.py subprocess call | jerkos/cobrapy,zakandrewking/cobrapy,aebrahim/cobrapy,JuBra/cobrapy,JuBra/cobrapy,jeicher/cobrapy,jeicher/cobrapy,zakandrewking/cobrapy,jerkos/cobrapy,aebrahim/cobrapy | cobra/version.py | cobra/version.py | #!/usr/bin/env python
"""
Tracks the version number. If git is installed and file script
is located within a git repository, git describe is used to get
the version information. This version string is sanitized to
comply with PEP 386 and stored in the RELEASE-VERSION file.
If git describe can not be run, the RELEASE-VERSION file is used
for version information instead.
"""
__all__ = ("get_git_version")
from subprocess import check_output
from os import path
current_dir = path.dirname(path.abspath(__file__))
version_file = path.join(current_dir, "RELEASE-VERSION")
def call_git_describe(abbrev=4):
try:
return check_output(["git", "describe", "--tags",
"--abbrev=%d" % abbrev], cwd=current_dir).strip()
except:
return None
def read_release_version():
try:
with open(version_file, "r") as infile:
version = infile.read().strip()
if len(version) == 0:
version = None
return version
except:
return None
def write_release_version(version):
with open(version_file, "w") as outfile:
outfile.write("%s\n" % version)
def get_git_version(abbrev=4):
# Read in the version that's currently in RELEASE-VERSION.
release_version = read_release_version()
# First try to get the current version using "git describe".
version = call_git_describe(abbrev)
#adapt to PEP 386 compatible versioning scheme
version = pep386adapt(version)
# If that doesn't work, fall back on the value that's in
# RELEASE-VERSION.
if version is None:
version = release_version
# If we still don't have anything, that's an error.
if version is None:
raise ValueError("Cannot find the version number!")
# If the current version is different from what's in the
# RELEASE-VERSION file, update the file to be current.
if version != release_version:
write_release_version(version)
# Finally, return the current version.
return version
def pep386adapt(version):
if version is None:
return
if '-' in version:
# adapt git-describe version to be in line with PEP 386
parts = version.split('-')
parts[-2] = 'post'+parts[-2]
version = '.'.join(parts[:-1])
return version
if __name__ == "__main__":
print get_git_version()
| #!/usr/bin/env python
"""
Tracks the version number. If git is installed and file script
is located within a git repository, git describe is used to get
the version information. This version string is sanitized to
comply with PEP 386 and stored in the RELEASE-VERSION file.
If git describe can not be run, the RELEASE-VERSION file is used
for version information instead.
"""
__all__ = ("get_git_version")
from subprocess import check_output
from os import path
current_dir = path.dirname(path.abspath(__file__))
version_file = path.join(current_dir, "RELEASE-VERSION")
def call_git_describe(abbrev=4):
try:
return check_output(["git", "describe", "--tags",
"--abbrev=%d" % abbrev], dir=current_dir).strip()
except:
return None
def read_release_version():
try:
with open(version_file, "r") as infile:
version = infile.read().strip()
if len(version) == 0:
version = None
return version
except:
return None
def write_release_version(version):
with open(version_file, "w") as outfile:
outfile.write("%s\n" % version)
def get_git_version(abbrev=4):
# Read in the version that's currently in RELEASE-VERSION.
release_version = read_release_version()
# First try to get the current version using "git describe".
version = call_git_describe(abbrev)
#adapt to PEP 386 compatible versioning scheme
version = pep386adapt(version)
# If that doesn't work, fall back on the value that's in
# RELEASE-VERSION.
if version is None:
version = release_version
# If we still don't have anything, that's an error.
if version is None:
raise ValueError("Cannot find the version number!")
# If the current version is different from what's in the
# RELEASE-VERSION file, update the file to be current.
if version != release_version:
write_release_version(version)
# Finally, return the current version.
return version
def pep386adapt(version):
if version is None:
return
if '-' in version:
# adapt git-describe version to be in line with PEP 386
parts = version.split('-')
parts[-2] = 'post'+parts[-2]
version = '.'.join(parts[:-1])
return version
if __name__ == "__main__":
print get_git_version()
| lgpl-2.1 | Python |
10e1866abffadf61f8593159006b8dbf431afd6b | Add convert module tests | orbingol/NURBS-Python,orbingol/NURBS-Python | tests/test_convert.py | tests/test_convert.py | """
Tests for the NURBS-Python package
Released under The MIT License. See LICENSE file for details.
Copyright (c) 2018 Onur Rauf Bingol
Tests B-Spline to NURBS conversions. Requires "pytest" to run.
"""
from geomdl import BSpline
from geomdl import convert
SAMPLE_SIZE = 5
C_DEGREE = 2
C_CTRLPTS = [[1, 1, 0], [2, 1, -1], [2, 2, 0]]
C_KV = [0, 0, 0, 1, 1, 1]
S_DEGREE_U = 2
S_DEGREE_V = 2
S_CTRLPTS = [[0, 0, 0], [0, 1, 0], [0, 2, -3],
[1, 0, 6], [1, 1, 0], [1, 2, 0],
[2, 0, 0], [2, 1, 0], [2, 2, 3]]
S_KV_U = [0, 0, 0, 1, 1, 1]
S_KV_V = [0, 0, 0, 1, 1, 1]
def test_convert_curve():
curve_bs = BSpline.Curve()
curve_bs.degree = C_DEGREE
curve_bs.ctrlpts = C_CTRLPTS
curve_bs.knotvector = C_KV
curve_bs.sample_size = SAMPLE_SIZE
curve_nurbs = convert.bspline_to_nurbs(curve_bs)
curve_nurbs.sample_size = SAMPLE_SIZE
# Expected weights vector
res_weights = [1.0 for _ in range(len(C_CTRLPTS))]
# Expected evaluation result
res = [[1.0, 1.0, 0.0], [1.4375, 1.0625, -0.375], [1.75, 1.25, -0.5], [1.9375, 1.5625, -0.375], [2.0, 2.0, 0.0]]
assert not curve_bs.rational
assert curve_nurbs.rational
assert curve_nurbs.evalpts == res
assert curve_nurbs.weights == tuple(res_weights)
def test_convert_surface():
surf_bs = BSpline.Surface()
surf_bs.degree_u = S_DEGREE_U
surf_bs.degree_v = S_DEGREE_V
surf_bs.ctrlpts_size_u = 3
surf_bs.ctrlpts_size_v = 3
surf_bs.ctrlpts = S_CTRLPTS
surf_bs.knotvector_u = S_KV_U
surf_bs.knotvector_v = S_KV_V
surf_nurbs = convert.bspline_to_nurbs(surf_bs)
surf_nurbs.sample_size = SAMPLE_SIZE
# Expected weights vector
res_weights = [1.0 for _ in range(3*3)]
# Expected output
res = [[0.0, 0.0, 0.0], [0.0, 0.5, -0.1875], [0.0, 1.0, -0.75], [0.0, 1.5, -1.6875],
[0.0, 2.0, -3.0], [0.5, 0.0, 2.25], [0.5, 0.5, 1.171875], [0.5, 1.0, 0.1875],
[0.5, 1.5, -0.703125], [0.5, 2.0, -1.5], [1.0, 0.0, 3.0], [1.0, 0.5, 1.6875],
[1.0, 1.0, 0.75], [1.0, 1.5, 0.1875], [1.0, 2.0, 0.0], [1.5, 0.0, 2.25],
[1.5, 0.5, 1.359375], [1.5, 1.0, 0.9375], [1.5, 1.5, 0.984375], [1.5, 2.0, 1.5],
[2.0, 0.0, 0.0], [2.0, 0.5, 0.1875], [2.0, 1.0, 0.75], [2.0, 1.5, 1.6875], [2.0, 2.0, 3.0]]
assert not surf_bs.rational
assert surf_nurbs.rational
assert surf_nurbs.evalpts == res
assert surf_nurbs.weights == tuple(res_weights)
| mit | Python | |
957490251e5038d9fb963f0c43ea3973e763c134 | Add test | saketkc/moca,saketkc/moca,saketkc/moca | tests/test_plotter.py | tests/test_plotter.py | from moca.plotter import create_plot
import os
import pytest
@pytest.mark.mpl_image_compare(baseline_dir='data/images',
filename='ENCSR000AKB_PhyloP_1.png')
def join_path(head, leaf):
return os.path.join(head, leaf)
def test_image():
base_path = 'tests/data/ENCSR000AKB/'
meme_file = base_path+'moca_output/meme_out/meme.txt'
plot_title = 'ENCSR000AKB.sorted'
oc = 'tests/data/generated_out'
motif_number = 1
flank_motif = 5
sample_score_files = [base_path+'moca_output/fimo_out_1/phylop.mean.txt']
control_score_files = [base_path+'moca_output/fimo_random_1/phylop.mean.txt']
plot_titles = ['PhyloP']
centrimo_dir = base_path + 'moca_output/centrimo_out'
figs = create_plot(meme_file,
plot_title,
output_dir=oc,
centrimo_dir=centrimo_dir,
motif_number=motif_number,
flank_length=flank_motif,
sample_score_files=sample_score_files,
control_score_files=control_score_files,
reg_plot_titles=plot_titles,
annotate=None,
save=False)
return figs[0]
| isc | Python | |
08047dddf65f44bf4312e639ad0009bd1ab6f837 | Add routing tests (incl. one xfail for nesting) | andrewgodwin/django-channels,django/channels,andrewgodwin/channels | tests/test_routing.py | tests/test_routing.py | from unittest.mock import MagicMock
import django
import pytest
from django.conf.urls import url
from channels.http import AsgiHandler
from channels.routing import ChannelNameRouter, ProtocolTypeRouter, URLRouter
def test_protocol_type_router():
"""
Tests the ProtocolTypeRouter
"""
# Test basic operation
router = ProtocolTypeRouter({
"websocket": MagicMock(return_value="ws"),
"http": MagicMock(return_value="http"),
})
assert router({"type": "websocket"}) == "ws"
assert router({"type": "http"}) == "http"
# Test defaulting to AsgiHandler
router = ProtocolTypeRouter({
"websocket": MagicMock(return_value="ws"),
})
assert isinstance(router({"type": "http"}), AsgiHandler)
# Test an unmatched type
with pytest.raises(ValueError):
router({"type": "aprs"})
# Test a scope with no type
with pytest.raises(KeyError):
router({"tyyyype": "http"})
def test_channel_name_router():
"""
Tests the ChannelNameRouter
"""
# Test basic operation
router = ChannelNameRouter({
"test": MagicMock(return_value=1),
"other_test": MagicMock(return_value=2),
})
assert router({"channel": "test"}) == 1
assert router({"channel": "other_test"}) == 2
# Test an unmatched channel
with pytest.raises(ValueError):
router({"channel": "chat"})
# Test a scope with no channel
with pytest.raises(ValueError):
router({"type": "http"})
def test_url_router():
"""
Tests the URLRouter
"""
posarg_app = MagicMock(return_value=4)
kwarg_app = MagicMock(return_value=5)
router = URLRouter([
url(r"^$", MagicMock(return_value=1)),
url(r"^foo/$", MagicMock(return_value=2)),
url(r"^bar", MagicMock(return_value=3)),
url(r"^posarg/(\d+)/$", posarg_app),
url(r"^kwarg/(?P<name>\w+)/$", kwarg_app),
])
# Valid basic matches
assert router({"type": "http", "path": "/"}) == 1
assert router({"type": "http", "path": "/foo/"}) == 2
assert router({"type": "http", "path": "/bar/"}) == 3
assert router({"type": "http", "path": "/bar/baz/"}) == 3
# Valid positional matches
assert router({"type": "http", "path": "/posarg/123/"}) == 4
assert posarg_app.call_args[0][0]["url_route"] == {"args": ("123",), "kwargs": {}}
assert router({"type": "http", "path": "/posarg/456/"}) == 4
assert posarg_app.call_args[0][0]["url_route"] == {"args": ("456",), "kwargs": {}}
# Valid keyword argument matches
assert router({"type": "http", "path": "/kwarg/hello/"}) == 5
assert kwarg_app.call_args[0][0]["url_route"] == {"args": tuple(), "kwargs": {"name": "hello"}}
assert router({"type": "http", "path": "/kwarg/hellothere/"}) == 5
assert kwarg_app.call_args[0][0]["url_route"] == {"args": tuple(), "kwargs": {"name": "hellothere"}}
# Invalid matches
with pytest.raises(ValueError):
router({"type": "http", "path": "/nonexistent/"})
@pytest.mark.xfail
def test_url_router_nesting():
"""
Tests that nested URLRouters add their keyword captures together.
"""
test_app = MagicMock(return_value=1)
inner_router = URLRouter([
url(r"^book/(?P<book>[\w\-]+)/page/(\d+)/$", test_app),
])
outer_router = URLRouter([
url(r"^universe/(\d+)/author/(?P<author>\w+)/$", inner_router),
])
assert outer_router({"type": "http", "path": "/universe/42/author/andrewgodwin/book/channels-guide/page/10/"}) == 1
assert test_app.call_args[0][0]["url_route"] == {
"args": ("42", "10"),
"kwargs": {"book": "channels-guide", "author": "andrewgodwin"},
}
@pytest.mark.skipif(django.VERSION[0] < 2, reason="Needs Django 2.x")
def test_url_router_path():
"""
Tests that URLRouter also works with path()
"""
from django.urls import path
kwarg_app = MagicMock(return_value=3)
router = URLRouter([
path("", MagicMock(return_value=1)),
path("foo/", MagicMock(return_value=2)),
path("author/<name>/", kwarg_app),
path("year/<int:year>/", kwarg_app),
])
# Valid basic matches
assert router({"type": "http", "path": "/"}) == 1
assert router({"type": "http", "path": "/foo/"}) == 2
# Named without typecasting
assert router({"type": "http", "path": "/author/andrewgodwin/"}) == 3
assert kwarg_app.call_args[0][0]["url_route"] == {"args": tuple(), "kwargs": {"name": "andrewgodwin"}}
# Named with typecasting
assert router({"type": "http", "path": "/year/2012/"}) == 3
assert kwarg_app.call_args[0][0]["url_route"] == {"args": tuple(), "kwargs": {"year": 2012}}
# Invalid matches
with pytest.raises(ValueError):
router({"type": "http", "path": "/nonexistent/"})
| bsd-3-clause | Python | |
ed0a2a8fc20a44499d9db03d2eb8fcd58c1b0cd3 | Add unit tests | prkumar/uplink | tests/test_session.py | tests/test_session.py | # Local imports
from uplink import session
def test_base_url(uplink_builder_mock):
# Setup
uplink_builder_mock.base_url = "https://api.github.com"
sess = session.Session(uplink_builder_mock)
# Run & Verify
assert uplink_builder_mock.base_url == sess.base_url
def test_headers(uplink_builder_mock):
# Setup
sess = session.Session(uplink_builder_mock)
# Run
sess.headers["key"] = "value"
# Verify
uplink_builder_mock.add_hook.assert_called()
assert sess.headers == {"key": "value"}
def test_params(uplink_builder_mock):
# Setup
sess = session.Session(uplink_builder_mock)
# Run
sess.params["key"] = "value"
# Verify
uplink_builder_mock.add_hook.assert_called()
assert sess.params == {"key": "value"}
def test_auth(uplink_builder_mock):
# Setup
uplink_builder_mock.auth = ("username", "password")
sess = session.Session(uplink_builder_mock)
# Run & Verify
assert uplink_builder_mock.auth == sess.auth
def test_auth_set(uplink_builder_mock):
# Setup
sess = session.Session(uplink_builder_mock)
# Run
sess.auth = ("username", "password")
# Verify
assert ("username", "password") == uplink_builder_mock.auth
| mit | Python | |
f1b22c952dabb3b66638000078e1ab2d0b7acea2 | Add missing utils file | ojarva/home-info-display,ojarva/home-info-display,ojarva/home-info-display,ojarva/home-info-display | homedisplay/homedisplay/utils.py | homedisplay/homedisplay/utils.py | import redis
import json
redis_instance = redis.StrictRedis()
def publish_ws(key, content):
redis_instance.publish("home:broadcast:generic", json.dumps({"key": key, "content": content}))
| bsd-3-clause | Python | |
f338d34e750fd4d06cd0992c7f457c403b1cff3b | add a simple tool to dump the GETLBASTATUS provisioning status | AHelper/python-scsi,AHelper/python-scsi,rosjat/python-scsi | tools/getlbastatus.py | tools/getlbastatus.py | #!/usr/bin/env python
# coding: utf-8
import sys
from pyscsi.pyscsi.scsi import SCSI
from pyscsi.pyscsi.scsi_device import SCSIDevice
from pyscsi.pyscsi.scsi_enum_getlbastatus import P_STATUS
def usage():
print 'Usage: getlbastatus.py [--help] [-l <lba>] <device>'
def main():
i = 1
lba = 0
while i < len(sys.argv):
if sys.argv[i] == '--help':
return usage()
if sys.argv[i] == '-l':
del sys.argv[i]
lba = int(sys.argv[i], 10)
del sys.argv[i]
continue
i += 1
if len(sys.argv) < 2:
return usage()
device = sys.argv[1]
sd = SCSIDevice(device)
s = SCSI(sd)
r = s.readcapacity16().result
if not r['lbpme']:
print 'LUN is fully provisioned.'
return
r = s.getlbastatus(lba).result
for i in range(len(r['lbas'])):
print 'LBA:%d-%d %s' % (
r['lbas'][i]['lba'],
r['lbas'][i]['lba'] + r['lbas'][i]['num_blocks'] - 1,
P_STATUS[r['lbas'][i]['p_status']]
)
if __name__ == "__main__":
main()
| lgpl-2.1 | Python | |
46c816f169b29a8fe91f14ab477222873d9bed88 | Add DocTestParser | orenault/TestLink-API-Python-client | robot/docparser.py | robot/docparser.py | import re
class DocTestParser(object):
"""Find all externaltestcaseid's in a test's docstring.
If your externaltestcaseid prefix is abc and the test has 'abc-123' in it's docstring.
`DocTestParser('abc').get_testcases()` would return `['abc-123']`.
"""
def __init__(self, doc_matcher=None, doc_matchers=None):
"""
:param doc_matchers: List of regex to find in docstring
"""
self.doc_matchers = doc_matchers if doc_matchers is not None else []
if doc_matcher:
self.doc_matchers.append(doc_matcher)
def get_testcases(self, test):
testcases = set()
for matcher in self.doc_matchers:
testcases |= set(re.findall('{}-\d+'.format(matcher), test.doc))
return testcases
| apache-2.0 | Python | |
4f2c91c06ab13eec02ef0199ef45d0eeaf555ea7 | Add dunder init for lowlevel. | python-astrodynamics/astrodynamics,python-astrodynamics/astrodynamics | astrodynamics/lowlevel/__init__.py | astrodynamics/lowlevel/__init__.py | # coding: utf-8
from __future__ import absolute_import, division, print_function
| mit | Python | |
de10593be3c513d41423dffcedd220c02dd37d6c | Add config_default.py | tranhuucuong91/simple-notebooks,tranhuucuong91/simple-notebooks,tranhuucuong91/simple-notebooks | config_default.py | config_default.py | # -*- coding: utf-8 -*-
"""
Created on 2015-10-23 08:06:00
@author: Tran Huu Cuong <tranhuucuong91@gmail.com>
"""
import os
# Blog configuration values.
# You may consider using a one-way hash to generate the password, and then
# use the hash again in the login view to perform the comparison. This is just
# for simplicity.
ADMIN_PASSWORD = 'admin@secret'
APP_DIR = os.path.dirname(os.path.realpath(__file__))
# The playhouse.flask_utils.FlaskDB object accepts database URL configuration.
DATABASE = 'sqliteext:///%s' % os.path.join(APP_DIR, 'blog.db')
DEBUG = False
# The secret key is used internally by Flask to encrypt session data stored
# in cookies. Make this unique for your app.
SECRET_KEY = 'shhh, secret!'
# This is used by micawber, which will attempt to generate rich media
# embedded objects with maxwidth=800.
SITE_WIDTH = 800
| mit | Python | |
95bb2d362e6a41b4e6421b5e8752b5040ea23d3f | Test file | nikhilkalige/flir | main.py | main.py | from flir.stream import Stream
from flir.flir import FLIR
import time
#h = Stream("129.219.136.149", 4000)
#h.connect()
#time.sleep(5)
#h.write("PP-500".encode("ascii"))
x = FLIR("129.219.136.149", 4000)
x.connect()
x.pan(30)
print(x.pan())
x.pan_offset(10)
print(x.pan())
x.stream.close()
| bsd-3-clause | Python | |
e2020af5ccd41f8571a2d0db4f5345ca9a8b561e | Add migration for db changes | DataONEorg/d1_python,DataONEorg/d1_python,DataONEorg/d1_python,DataONEorg/d1_python | gmn/src/d1_gmn/app/migrations/0010_auto_20170805_0107.py | gmn/src/d1_gmn/app/migrations/0010_auto_20170805_0107.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-08-05 01:07
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0009_auto_20170603_0546'),
]
operations = [
migrations.CreateModel(
name='Chain',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('head_pid', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='chain_head_pid', to='app.IdNamespace')),
('sid', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='chain_sid', to='app.IdNamespace')),
],
),
migrations.CreateModel(
name='ChainMember',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('chain', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.Chain')),
('pid', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='chainmember_pid', to='app.IdNamespace')),
],
),
migrations.RemoveField(
model_name='chainidtoseriesid',
name='head_pid',
),
migrations.RemoveField(
model_name='chainidtoseriesid',
name='sid',
),
migrations.RemoveField(
model_name='persistentidtochainid',
name='chain',
),
migrations.RemoveField(
model_name='persistentidtochainid',
name='pid',
),
migrations.DeleteModel(
name='ChainIdToSeriesID',
),
migrations.DeleteModel(
name='PersistentIdToChainID',
),
]
| apache-2.0 | Python | |
a0cd167b9f19e2a4a9d1f2a80bc3586cce15c6ab | Add GMN DB migration to current | DataONEorg/d1_python,DataONEorg/d1_python,DataONEorg/d1_python,DataONEorg/d1_python | gmn/src/d1_gmn/app/migrations/0019_auto_20190418_1512.py | gmn/src/d1_gmn/app/migrations/0019_auto_20190418_1512.py | # Generated by Django 2.2 on 2019-04-18 20:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0018_auto_20180901_0115'),
]
operations = [
migrations.AlterModelOptions(
name='eventlog',
options={},
),
migrations.AlterModelOptions(
name='scienceobject',
options={},
),
]
| apache-2.0 | Python | |
9ffc52e4cfabff9ee1bd669d76e25c54a3cafffc | Revert "shit" | inhumanundead/getjams | main.py | main.py | # GetJams - get some new jams for ya mental
import musicbrainzngs as jams
import random,sys,os,wx
jams.set_useragent("GetJams","1.0","inhumanundead@gmail.com")
def GetArtist(genre):
x = jams.search_artists(tag=genre)['artist-list']
y = []
y.append(x[random.randint(0,len(x)-1)]['sort-name'])
y.append(x[random.randint(0,len(x)-1)]['sort-name'])
y.append(x[random.randint(0,len(x)-1)]['sort-name'])
y.append(x[random.randint(0,len(x)-1)]['sort-name'])
y.append(x[random.randint(0,len(x)-1)]['sort-name'])
return y
def GetSimilar(artist):
x = jams.search_artists(sortname=artist)['artist-list'][0]['tag-list'][0]['name']
y = jams.search_artists(tag=x)['artist-list']
z = []
for i in range(0,5):
z.append(y[random.randint(0,len(x)-1)]['sort-name'])
return z
class GetJamsWindow(wx.Frame):
def __init__(self,parent,title):
wx.Frame.__init__(self,parent,title=title,size=(640,300))
self.textbox = wx.TextCtrl(self, size=(126,30), pos=(250,20))
self.button = wx.Button(self, wx.ID_OK, " Genre ",pos=(250+36,55),size=(48+4,30))
self.button2 = wx.Button(self,wx.ID_OK, "Artist",pos=(250+42,55+30),size=(42,30))
self.artist_text = wx.StaticText(self,label="Artists:\n\n",pos=(275,125),size=(640,410),style=wx.ALIGN_CENTER)
self.CreateStatusBar()
self.button.Bind(wx.EVT_BUTTON,self.GenreSearch)
self.button2.Bind(wx.EVT_BUTTON,self.ArtistSearch)
self.Show(True)
def ArtistSearch(self,event):
z = GetSimilar(self.textbox.GetValue())
self.artist_text.SetLabel("Artists:\n\n"+z[0]+"\n"+z[1]+"\n"+z[2]+"\n"+z[3]+"\n"+z[4])
def GenreSearch(self,event):
z = GetArtist(self.textbox.GetValue())
self.artist_text.SetLabel("Artists:\n\n"+z[0]+"\n"+z[1]+"\n"+z[2]+"\n"+z[3]+"\n"+z[4])
if __name__ == "__main__":
app = wx.App(False)
frame = GetJamsWindow(None, "GetJams")
app.MainLoop()
| bsd-3-clause | Python | |
a6ef1e2456f84b50102c4192984b0c18b9c81a27 | Create scriptGenerator.py | AlexEnriquez/Script-Generator | scriptGenerator.py | scriptGenerator.py | #!/usr/bin/env python
#GENERATE A NEW SCRIPT
def Creation():
fichero=open('generator.py','w')
fichero.close()
def Save():
fichero=open('generator.py','a')
fichero.write('from PyQt4.QtGui import *\n')
fichero.write('import sys\n')
fichero.write('class Window(QWidget):\n')
fichero.write('\tdef __init__(self,parent=None):\n')
fichero.write('\t\tQWidget.__init__(self)\n')
fichero.write('\t\tself.label=QLabel("Show a message")\n')
fichero.write('\t\tself.lineEdit=QLineEdit()\n')
fichero.write('\t\tself.lineEdit.setPlaceholderText("Write Here")\n')
fichero.write('\t\tself.button=QPushButton("Show message")\n')
fichero.write('\t\tself.layout=QVBoxLayout()\n')
fichero.write('\t\tself.layout.addWidget(self.label)\n')
fichero.write('\t\tself.layout.addWidget(self.lineEdit)\n')
fichero.write('\t\tself.layout.addWidget(self.button)\n')
fichero.write('\t\tself.setLayout(self.layout)\n')
fichero.write('\t\tself.button.clicked.connect(self.Mensaje)\n')
fichero.write('\tdef Mensaje(self):\n')
fichero.write('\t\tmensaje=QMessageBox.information(self,"mensaje",self.lineEdit.text())\n')
fichero.write('if __name__=="__main__":\n')
fichero.write('\ttry:\n')
fichero.write('\t\tapp=QApplication(sys.argv)\n')
fichero.write('\t\tventana=Window()\n')
fichero.write('\t\tventana.show()\n')
fichero.write('\t\tsys.exit(app.exec_())\n')
fichero.write('\texcept SystemExit:\n')
fichero.write('\t\tpass\n')
fichero.close()
n=raw_input("Do you wish create a new script [Y/N]?: ")
if n=='Y' or n=='y':
Creation()
Save()
print("the file has been created")
raw_input()
else:
print("file doesn't created")
raw_input()
| bsd-3-clause | Python | |
f3f07d6e8218d523227c63eea4b088573ca632ef | Add release script | bryanforbes/Erasmus | scripts/release.py | scripts/release.py | #!/usr/bin/env python
from __future__ import annotations
import subprocess
from dataclasses import dataclass
from datetime import datetime
from pathlib import Path
from typing import TYPE_CHECKING, Literal, overload
from zoneinfo import ZoneInfo
import click
if TYPE_CHECKING:
from typing_extensions import Self
@dataclass(slots=True)
class CalVer:
year: int
month: int
patch: int
dev: int | None
def next_version(self, *, dev_release: bool = False) -> Self:
today = datetime.now(tz=ZoneInfo('America/Chicago')).date()
today_tuple = (today.year - 2000, today.month)
if today_tuple <= (self.year, self.month):
year = self.year
month = self.month
if self.dev is not None:
patch = self.patch
if dev_release:
dev = self.dev + 1
else:
dev = None
else:
patch = self.patch + 1
dev = 0 if dev_release else None
else:
year = today_tuple[0]
month = today_tuple[1]
patch = 0
dev = 0 if dev_release else None
return self.__class__(year, month, patch, dev)
@classmethod
def parse(cls, version: str, /) -> Self:
split_version: list[str] = version.split('.')
return cls(
year=int(split_version[0]),
month=int(split_version[1]),
patch=int(split_version[2]),
dev=int(split_version[3][3:]) if len(split_version) > 3 else None,
)
def __str__(self) -> str:
version_string = f'{self.year}.{self.month}.{self.patch}'
if self.dev is not None:
version_string += f'.dev{self.dev}'
return version_string
@overload
def run(*args: str, dry_run: Literal[True] = True) -> str:
...
@overload
def run(*args: str, dry_run: bool) -> str | None:
...
def run(*args: str, dry_run: bool = False) -> str | None:
print(f'> {" ".join(args)}')
if not dry_run:
process = subprocess.run(args, capture_output=True, encoding='utf-8')
return process.stdout[:-1]
return None
@click.command()
@click.option('--dry-run', is_flag=True)
@click.option('--force', is_flag=True)
def release(dry_run: bool, force: bool) -> None:
root = Path(__file__).resolve().parent.parent
with open(root / 'NEWS.md', 'r') as f:
news_lines = f.readlines()
if news_lines[0] != '# Version UNRELEASED\n':
raise RuntimeError('First line must be for unreleased version')
if not force:
for line in news_lines[1:]:
if line.startswith('* '):
break
elif line.startswith('# Version '):
raise RuntimeError('No news entries')
next_version = CalVer.parse(run('poetry', 'version', '-s')).next_version()
run('poetry', 'version', str(next_version), dry_run=dry_run)
print('> Updating NEWS.md')
news_lines[0] = f'# Version {next_version}\n'
print(f'>> {news_lines[0][:-1]}')
if not dry_run:
with open(root / 'NEWS.md', 'w') as f:
f.writelines(news_lines)
run('git', 'add', '--update', '.', dry_run=dry_run)
run('git', 'commit', '-m', f'Release {next_version}', dry_run=dry_run)
run('git', 'tag', f'v{next_version}', dry_run=dry_run)
run(
'poetry',
'version',
str(next_version.next_version(dev_release=True)),
dry_run=dry_run,
)
print('> Updating NEWS.md')
news_lines = ['# Version UNRELEASED\n', '\n', '\n'] + news_lines
print(f'>> {news_lines[0][:-1]}')
if not dry_run:
with open(root / 'NEWS.md', 'w') as f:
f.writelines(news_lines)
run('git', 'add', '--update', '.', dry_run=dry_run)
run('git', 'commit', '-m', 'Post release version bump', dry_run=dry_run)
if __name__ == '__main__':
release()
| bsd-3-clause | Python | |
844810f393724684d855e6e12fd20c392b6f06a0 | check if key even exists before going into os.environ | alex/pyechonest,yuanmeibin/pyechonest,wisperwinter/pyechonest,ruohoruotsi/pyechonest,diegobill/pyechonest,diCaminha/pyechonest,KMikhaylovCTG/pyechonest,wisperwinter/pyechonest,MathieuDuponchelle/pyechonest3,shyamalschandra/pyechonest,EliteScientist/pyechonest,KMikhaylovCTG/pyechonest,DaisukeMiyamoto/pyechonest,andreylh/pyechonest,taytorious/pyechonest,Victorgichohi/pyechonest,diCaminha/pyechonest,salah-ghanim/pyechonest,alex/pyechonest,echonest/pyechonest,mhcrnl/pyechonest,diegobill/pyechonest,Victorgichohi/pyechonest,MathieuDuponchelle/pyechonest3,EliteScientist/pyechonest,beni55/pyechonest,abhisheknshah/dynamicplaylistgenerator,beni55/pyechonest,shyamalschandra/pyechonest,mmiquiabas/pyechonest,krishofmans/pyechonest,salah-ghanim/pyechonest,mmiquiabas/pyechonest,esternocleidomastoideo/pyechonest,yuanmeibin/pyechonest,esternocleidomastoideo/pyechonest,DaisukeMiyamoto/pyechonest,taytorious/pyechonest,abhisheknshah/dynamicplaylistgenerator,mhcrnl/pyechonest,echonest/pyechonest,andreylh/pyechonest,krishofmans/pyechonest,ruohoruotsi/pyechonest | src/pyechonest/config.py | src/pyechonest/config.py | """
Global configuration variables for accessing the Echo Nest web API.
"""
ECHO_NEST_API_KEY = None
__version__ = "$Revision: 0 $"
# $Source$
import os
if('ECHO_NEST_API_KEY' in os.environ):
ECHO_NEST_API_KEY = os.environ['ECHO_NEST_API_KEY']
else:
ECHO_NEST_API_KEY = None
API_HOST = 'developer.echonest.com'
API_SELECTOR = '/api/'
"Locations for the Analyze API calls."
HTTP_USER_AGENT = 'PyENAPI'
"""
You may change this to be a user agent string of your
own choosing.
"""
MP3_BITRATE = 192
"""
Default bitrate for MP3 output. Conventionally an
integer divisible by 32kbits/sec.
"""
CACHE = True
"""
You may change this to False to prevent local caching
of API results.
"""
OBEY_RATE_LIMIT = True
"""
The Echo Nest limits users to 120 api calls per minute.
By default, pyechonest enforces this limit locally. Set this
variable to False to turn of local enforcement. The Echo Nest
api will still throttle you.
"""
| """
Global configuration variables for accessing the Echo Nest web API.
"""
ECHO_NEST_API_KEY = None
__version__ = "$Revision: 0 $"
# $Source$
import os
if(os.environ['ECHO_NEST_API_KEY']):
ECHO_NEST_API_KEY = os.environ['ECHO_NEST_API_KEY']
else:
ECHO_NEST_API_KEY = None
API_HOST = 'developer.echonest.com'
API_SELECTOR = '/api/'
"Locations for the Analyze API calls."
HTTP_USER_AGENT = 'PyENAPI'
"""
You may change this to be a user agent string of your
own choosing.
"""
MP3_BITRATE = 192
"""
Default bitrate for MP3 output. Conventionally an
integer divisible by 32kbits/sec.
"""
CACHE = True
"""
You may change this to False to prevent local caching
of API results.
"""
OBEY_RATE_LIMIT = True
"""
The Echo Nest limits users to 120 api calls per minute.
By default, pyechonest enforces this limit locally. Set this
variable to False to turn of local enforcement. The Echo Nest
api will still throttle you.
""" | bsd-3-clause | Python |
1c2ba73eb0405dcfd427574c197e6a0588390f67 | Simplify shipping template tags | WadeYuChen/django-oscar,django-oscar/django-oscar,kapari/django-oscar,bnprk/django-oscar,monikasulik/django-oscar,saadatqadri/django-oscar,Bogh/django-oscar,mexeniz/django-oscar,ka7eh/django-oscar,saadatqadri/django-oscar,solarissmoke/django-oscar,taedori81/django-oscar,bschuon/django-oscar,taedori81/django-oscar,binarydud/django-oscar,vovanbo/django-oscar,john-parton/django-oscar,pdonadeo/django-oscar,spartonia/django-oscar,itbabu/django-oscar,michaelkuty/django-oscar,kapt/django-oscar,michaelkuty/django-oscar,pasqualguerrero/django-oscar,pasqualguerrero/django-oscar,ahmetdaglarbas/e-commerce,itbabu/django-oscar,rocopartners/django-oscar,rocopartners/django-oscar,solarissmoke/django-oscar,jinnykoo/wuyisj.com,bschuon/django-oscar,WillisXChen/django-oscar,thechampanurag/django-oscar,john-parton/django-oscar,jmt4/django-oscar,jinnykoo/christmas,faratro/django-oscar,amirrpp/django-oscar,MatthewWilkes/django-oscar,jlmadurga/django-oscar,manevant/django-oscar,ka7eh/django-oscar,ahmetdaglarbas/e-commerce,dongguangming/django-oscar,WillisXChen/django-oscar,adamend/django-oscar,manevant/django-oscar,amirrpp/django-oscar,josesanch/django-oscar,binarydud/django-oscar,QLGu/django-oscar,ka7eh/django-oscar,jmt4/django-oscar,spartonia/django-oscar,monikasulik/django-oscar,adamend/django-oscar,lijoantony/django-oscar,thechampanurag/django-oscar,josesanch/django-oscar,jinnykoo/wuyisj,taedori81/django-oscar,eddiep1101/django-oscar,ahmetdaglarbas/e-commerce,WillisXChen/django-oscar,marcoantoniooliveira/labweb,MatthewWilkes/django-oscar,Jannes123/django-oscar,itbabu/django-oscar,jinnykoo/wuyisj.com,faratro/django-oscar,ademuk/django-oscar,jmt4/django-oscar,ademuk/django-oscar,saadatqadri/django-oscar,machtfit/django-oscar,mexeniz/django-oscar,Bogh/django-oscar,WillisXChen/django-oscar,django-oscar/django-oscar,lijoantony/django-oscar,nickpack/django-oscar,pdonadeo/django-oscar,kapari/django-oscar,manevant/django-oscar,eddiep1101/django-oscar,lijoantony/django-oscar,WadeYuChen/django-oscar,jinnykoo/christmas,sonofatailor/django-oscar,ahmetdaglarbas/e-commerce,nfletton/django-oscar,binarydud/django-oscar,django-oscar/django-oscar,pdonadeo/django-oscar,anentropic/django-oscar,Jannes123/django-oscar,Jannes123/django-oscar,solarissmoke/django-oscar,jlmadurga/django-oscar,jinnykoo/wuyisj.com,bschuon/django-oscar,mexeniz/django-oscar,lijoantony/django-oscar,pdonadeo/django-oscar,QLGu/django-oscar,jlmadurga/django-oscar,binarydud/django-oscar,kapt/django-oscar,sonofatailor/django-oscar,kapt/django-oscar,marcoantoniooliveira/labweb,dongguangming/django-oscar,kapari/django-oscar,WadeYuChen/django-oscar,ademuk/django-oscar,adamend/django-oscar,sasha0/django-oscar,jinnykoo/wuyisj,jinnykoo/wuyisj.com,bnprk/django-oscar,machtfit/django-oscar,saadatqadri/django-oscar,michaelkuty/django-oscar,Bogh/django-oscar,django-oscar/django-oscar,pasqualguerrero/django-oscar,nickpack/django-oscar,marcoantoniooliveira/labweb,thechampanurag/django-oscar,sasha0/django-oscar,nfletton/django-oscar,faratro/django-oscar,monikasulik/django-oscar,MatthewWilkes/django-oscar,taedori81/django-oscar,pasqualguerrero/django-oscar,sasha0/django-oscar,sonofatailor/django-oscar,anentropic/django-oscar,WillisXChen/django-oscar,amirrpp/django-oscar,jinnykoo/christmas,monikasulik/django-oscar,MatthewWilkes/django-oscar,nfletton/django-oscar,thechampanurag/django-oscar,ademuk/django-oscar,okfish/django-oscar,Jannes123/django-oscar,vovanbo/django-oscar,michaelkuty/django-oscar,anentropic/django-oscar,rocopartners/django-oscar,eddiep1101/django-oscar,josesanch/django-oscar,Bogh/django-oscar,jinnykoo/wuyisj,anentropic/django-oscar,nickpack/django-oscar,jmt4/django-oscar,itbabu/django-oscar,machtfit/django-oscar,dongguangming/django-oscar,manevant/django-oscar,WadeYuChen/django-oscar,WillisXChen/django-oscar,bnprk/django-oscar,nfletton/django-oscar,bnprk/django-oscar,okfish/django-oscar,marcoantoniooliveira/labweb,john-parton/django-oscar,adamend/django-oscar,vovanbo/django-oscar,vovanbo/django-oscar,spartonia/django-oscar,mexeniz/django-oscar,sonofatailor/django-oscar,faratro/django-oscar,rocopartners/django-oscar,okfish/django-oscar,jlmadurga/django-oscar,amirrpp/django-oscar,eddiep1101/django-oscar,kapari/django-oscar,spartonia/django-oscar,nickpack/django-oscar,QLGu/django-oscar,dongguangming/django-oscar,solarissmoke/django-oscar,jinnykoo/wuyisj,ka7eh/django-oscar,john-parton/django-oscar,QLGu/django-oscar,sasha0/django-oscar,bschuon/django-oscar,okfish/django-oscar | oscar/templatetags/shipping_tags.py | oscar/templatetags/shipping_tags.py | from django import template
register = template.Library()
@register.assignment_tag
def shipping_charge(method, basket):
"""
Template tag for calculating the shipping charge for a given shipping
method and basket, and injecting it into the template context.
"""
return method.calculate(basket)
@register.assignment_tag
def shipping_charge_discount(method, basket):
"""
Template tag for calculating the shipping discount for a given shipping
method and basket, and injecting it into the template context.
"""
return method.discount(basket)
@register.assignment_tag
def shipping_charge_excl_discount(method, basket):
"""
Template tag for calculating the shipping charge (excluding discounts) for
a given shipping method and basket, and injecting it into the template
context.
"""
return method.calculate_excl_discount(basket)
| from django import template
register = template.Library()
@register.tag
def shipping_charge(parse, token):
"""
Template tag for calculating the shipping charge for a given shipping
method and basket, and injecting it into the template context.
"""
return build_node(ShippingChargeNode, token)
@register.tag
def shipping_charge_discount(parse, token):
"""
Template tag for calculating the shipping discount for a given shipping
method and basket, and injecting it into the template context.
"""
return build_node(ShippingChargeDiscountNode, token)
@register.tag
def shipping_charge_excl_discount(parse, token):
"""
Template tag for calculating the shipping charge (excluding discounts) for
a given shipping method and basket, and injecting it into the template
context.
"""
return build_node(ShippingChargeExclDiscountNode, token)
def build_node(node_class, token):
tokens = token.split_contents()
if len(tokens) != 5 or tokens[3] != 'as':
raise template.TemplateSyntaxError(
"%(tag)r tag uses the following syntax: "
"{%% %(tag)r method basket as "
"name %%}" % {'tag': tokens[0]})
method_var, basket_var, name_var = tokens[1], tokens[2], tokens[4]
return node_class(method_var, basket_var, name_var)
class ShippingNode(template.Node):
method_name = None
def __init__(self, method_var, basket_var, name_var):
self.method_var = template.Variable(method_var)
self.basket_var = template.Variable(basket_var)
self.name_var = name_var
def render(self, context):
try:
method = self.method_var.resolve(context)
basket = self.basket_var.resolve(context)
except template.VariableDoesNotExist:
return ''
context[self.name_var] = getattr(
method, self.method_name)(basket)
return ''
class ShippingChargeNode(ShippingNode):
method_name = 'calculate'
class ShippingChargeDiscountNode(ShippingNode):
method_name = 'discount'
class ShippingChargeExclDiscountNode(ShippingNode):
method_name = 'calculate_excl_discount'
| bsd-3-clause | Python |
3ef6a9dbe2916d669d3e7e7cfab86a365237bc19 | Make octane result format match the old v8_benchmark output. | crosswalk-project/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,krieger-od/nwjs_chromium.src,bright-sparks/chromium-spacewalk,bright-sparks/chromium-spacewalk,timopulkkinen/BubbleFish,M4sse/chromium.src,patrickm/chromium.src,ondra-novak/chromium.src,mohamed--abdel-maksoud/chromium.src,Just-D/chromium-1,M4sse/chromium.src,bright-sparks/chromium-spacewalk,Pluto-tv/chromium-crosswalk,hujiajie/pa-chromium,bright-sparks/chromium-spacewalk,M4sse/chromium.src,crosswalk-project/chromium-crosswalk-efl,fujunwei/chromium-crosswalk,ChromiumWebApps/chromium,hgl888/chromium-crosswalk,Chilledheart/chromium,fujunwei/chromium-crosswalk,bright-sparks/chromium-spacewalk,Just-D/chromium-1,timopulkkinen/BubbleFish,nacl-webkit/chrome_deps,axinging/chromium-crosswalk,ondra-novak/chromium.src,pozdnyakov/chromium-crosswalk,axinging/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,M4sse/chromium.src,bright-sparks/chromium-spacewalk,mogoweb/chromium-crosswalk,Chilledheart/chromium,M4sse/chromium.src,Jonekee/chromium.src,mogoweb/chromium-crosswalk,hujiajie/pa-chromium,dednal/chromium.src,ondra-novak/chromium.src,jaruba/chromium.src,Fireblend/chromium-crosswalk,fujunwei/chromium-crosswalk,anirudhSK/chromium,Pluto-tv/chromium-crosswalk,timopulkkinen/BubbleFish,Jonekee/chromium.src,chuan9/chromium-crosswalk,ltilve/chromium,hgl888/chromium-crosswalk,markYoungH/chromium.src,mogoweb/chromium-crosswalk,pozdnyakov/chromium-crosswalk,littlstar/chromium.src,hgl888/chromium-crosswalk,Chilledheart/chromium,hgl888/chromium-crosswalk,Jonekee/chromium.src,Just-D/chromium-1,TheTypoMaster/chromium-crosswalk,ChromiumWebApps/chromium,pozdnyakov/chromium-crosswalk,markYoungH/chromium.src,markYoungH/chromium.src,Chilledheart/chromium,fujunwei/chromium-crosswalk,zcbenz/cefode-chromium,mogoweb/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Jonekee/chromium.src,M4sse/chromium.src,krieger-od/nwjs_chromium.src,ChromiumWebApps/chromium,Fireblend/chromium-crosswalk,hgl888/chromium-crosswalk,ltilve/chromium,jaruba/chromium.src,hujiajie/pa-chromium,dushu1203/chromium.src,markYoungH/chromium.src,Just-D/chromium-1,TheTypoMaster/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,M4sse/chromium.src,nacl-webkit/chrome_deps,ltilve/chromium,patrickm/chromium.src,crosswalk-project/chromium-crosswalk-efl,mohamed--abdel-maksoud/chromium.src,timopulkkinen/BubbleFish,chuan9/chromium-crosswalk,jaruba/chromium.src,anirudhSK/chromium,nacl-webkit/chrome_deps,axinging/chromium-crosswalk,axinging/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,mogoweb/chromium-crosswalk,anirudhSK/chromium,TheTypoMaster/chromium-crosswalk,ondra-novak/chromium.src,axinging/chromium-crosswalk,Pluto-tv/chromium-crosswalk,ondra-novak/chromium.src,ltilve/chromium,pozdnyakov/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk-efl,timopulkkinen/BubbleFish,mogoweb/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,mohamed--abdel-maksoud/chromium.src,dednal/chromium.src,zcbenz/cefode-chromium,hujiajie/pa-chromium,M4sse/chromium.src,dushu1203/chromium.src,mogoweb/chromium-crosswalk,dushu1203/chromium.src,ChromiumWebApps/chromium,nacl-webkit/chrome_deps,krieger-od/nwjs_chromium.src,ltilve/chromium,jaruba/chromium.src,dednal/chromium.src,hgl888/chromium-crosswalk-efl,crosswalk-project/chromium-crosswalk-efl,zcbenz/cefode-chromium,PeterWangIntel/chromium-crosswalk,anirudhSK/chromium,patrickm/chromium.src,zcbenz/cefode-chromium,bright-sparks/chromium-spacewalk,hgl888/chromium-crosswalk,Pluto-tv/chromium-crosswalk,markYoungH/chromium.src,PeterWangIntel/chromium-crosswalk,dushu1203/chromium.src,ChromiumWebApps/chromium,Jonekee/chromium.src,anirudhSK/chromium,jaruba/chromium.src,hgl888/chromium-crosswalk-efl,mohamed--abdel-maksoud/chromium.src,Chilledheart/chromium,fujunwei/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,fujunwei/chromium-crosswalk,markYoungH/chromium.src,ltilve/chromium,hujiajie/pa-chromium,hgl888/chromium-crosswalk-efl,Pluto-tv/chromium-crosswalk,Pluto-tv/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk-efl,patrickm/chromium.src,Chilledheart/chromium,pozdnyakov/chromium-crosswalk,patrickm/chromium.src,timopulkkinen/BubbleFish,ondra-novak/chromium.src,axinging/chromium-crosswalk,Jonekee/chromium.src,dednal/chromium.src,Jonekee/chromium.src,zcbenz/cefode-chromium,markYoungH/chromium.src,hujiajie/pa-chromium,Jonekee/chromium.src,zcbenz/cefode-chromium,axinging/chromium-crosswalk,Fireblend/chromium-crosswalk,zcbenz/cefode-chromium,TheTypoMaster/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,nacl-webkit/chrome_deps,nacl-webkit/chrome_deps,chuan9/chromium-crosswalk,jaruba/chromium.src,mohamed--abdel-maksoud/chromium.src,anirudhSK/chromium,Jonekee/chromium.src,Just-D/chromium-1,nacl-webkit/chrome_deps,timopulkkinen/BubbleFish,dushu1203/chromium.src,mohamed--abdel-maksoud/chromium.src,fujunwei/chromium-crosswalk,nacl-webkit/chrome_deps,anirudhSK/chromium,timopulkkinen/BubbleFish,littlstar/chromium.src,patrickm/chromium.src,jaruba/chromium.src,dednal/chromium.src,jaruba/chromium.src,hgl888/chromium-crosswalk-efl,dushu1203/chromium.src,chuan9/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,patrickm/chromium.src,krieger-od/nwjs_chromium.src,ondra-novak/chromium.src,Chilledheart/chromium,Just-D/chromium-1,ChromiumWebApps/chromium,ChromiumWebApps/chromium,Chilledheart/chromium,PeterWangIntel/chromium-crosswalk,pozdnyakov/chromium-crosswalk,hujiajie/pa-chromium,PeterWangIntel/chromium-crosswalk,Pluto-tv/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,dednal/chromium.src,ltilve/chromium,ChromiumWebApps/chromium,krieger-od/nwjs_chromium.src,pozdnyakov/chromium-crosswalk,littlstar/chromium.src,zcbenz/cefode-chromium,krieger-od/nwjs_chromium.src,bright-sparks/chromium-spacewalk,dushu1203/chromium.src,pozdnyakov/chromium-crosswalk,ChromiumWebApps/chromium,axinging/chromium-crosswalk,mogoweb/chromium-crosswalk,timopulkkinen/BubbleFish,nacl-webkit/chrome_deps,Fireblend/chromium-crosswalk,anirudhSK/chromium,markYoungH/chromium.src,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk,littlstar/chromium.src,mohamed--abdel-maksoud/chromium.src,M4sse/chromium.src,PeterWangIntel/chromium-crosswalk,anirudhSK/chromium,markYoungH/chromium.src,M4sse/chromium.src,axinging/chromium-crosswalk,Fireblend/chromium-crosswalk,jaruba/chromium.src,PeterWangIntel/chromium-crosswalk,littlstar/chromium.src,mohamed--abdel-maksoud/chromium.src,anirudhSK/chromium,littlstar/chromium.src,ondra-novak/chromium.src,PeterWangIntel/chromium-crosswalk,Fireblend/chromium-crosswalk,chuan9/chromium-crosswalk,zcbenz/cefode-chromium,axinging/chromium-crosswalk,littlstar/chromium.src,krieger-od/nwjs_chromium.src,ltilve/chromium,Fireblend/chromium-crosswalk,chuan9/chromium-crosswalk,dushu1203/chromium.src,hujiajie/pa-chromium,pozdnyakov/chromium-crosswalk,Jonekee/chromium.src,timopulkkinen/BubbleFish,hgl888/chromium-crosswalk,ChromiumWebApps/chromium,jaruba/chromium.src,patrickm/chromium.src,hgl888/chromium-crosswalk-efl,Fireblend/chromium-crosswalk,fujunwei/chromium-crosswalk,ondra-novak/chromium.src,dednal/chromium.src,krieger-od/nwjs_chromium.src,anirudhSK/chromium,ChromiumWebApps/chromium,hujiajie/pa-chromium,mogoweb/chromium-crosswalk,dednal/chromium.src,M4sse/chromium.src,Just-D/chromium-1,Just-D/chromium-1,timopulkkinen/BubbleFish,mohamed--abdel-maksoud/chromium.src,hujiajie/pa-chromium,hgl888/chromium-crosswalk-efl,anirudhSK/chromium,zcbenz/cefode-chromium,patrickm/chromium.src,mogoweb/chromium-crosswalk,dednal/chromium.src,TheTypoMaster/chromium-crosswalk,markYoungH/chromium.src,Chilledheart/chromium,chuan9/chromium-crosswalk,dushu1203/chromium.src,ltilve/chromium,dushu1203/chromium.src,hgl888/chromium-crosswalk,dednal/chromium.src,dushu1203/chromium.src,TheTypoMaster/chromium-crosswalk,krieger-od/nwjs_chromium.src,mohamed--abdel-maksoud/chromium.src,nacl-webkit/chrome_deps,Just-D/chromium-1,pozdnyakov/chromium-crosswalk,dednal/chromium.src,fujunwei/chromium-crosswalk,Jonekee/chromium.src,jaruba/chromium.src,zcbenz/cefode-chromium,hujiajie/pa-chromium,krieger-od/nwjs_chromium.src,axinging/chromium-crosswalk,Fireblend/chromium-crosswalk,chuan9/chromium-crosswalk,littlstar/chromium.src,krieger-od/nwjs_chromium.src,ChromiumWebApps/chromium,Pluto-tv/chromium-crosswalk,pozdnyakov/chromium-crosswalk,bright-sparks/chromium-spacewalk,markYoungH/chromium.src,nacl-webkit/chrome_deps,hgl888/chromium-crosswalk-efl | tools/perf/perf_tools/octane.py | tools/perf/perf_tools/octane.py | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import multi_page_benchmark
from telemetry import util
class Octane(multi_page_benchmark.MultiPageBenchmark):
def MeasurePage(self, _, tab, results):
js_is_done = """
completed && !document.getElementById("progress-bar-container")"""
def _IsDone():
return bool(tab.runtime.Evaluate(js_is_done))
util.WaitFor(_IsDone, 300, poll_interval=5)
js_get_results = """
var results = {}
var result_divs = document.querySelectorAll('.p-result');
for (var r in result_divs) {
if (result_divs[r].id && result_divs[r].id.indexOf('Result-') == 0)
var key = result_divs[r].id.replace('Result-', '');
results[key] = result_divs[r].innerHTML;
}
var main_banner = document.getElementById("main-banner").innerHTML;
var octane_score = main_banner.substr(main_banner.lastIndexOf(':') + 2);
results['score'] = octane_score;
JSON.stringify(results);
"""
result_dict = eval(tab.runtime.Evaluate(js_get_results))
for key, value in result_dict.iteritems():
if value == '...':
continue
data_type = 'unimportant'
if key == 'score':
data_type = 'default'
results.Add(key, 'score (bigger is better)', value, data_type=data_type)
| # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import multi_page_benchmark
from telemetry import util
class Octane(multi_page_benchmark.MultiPageBenchmark):
def MeasurePage(self, _, tab, results):
js_is_done = """
completed && !document.getElementById("progress-bar-container")"""
def _IsDone():
return bool(tab.runtime.Evaluate(js_is_done))
util.WaitFor(_IsDone, 300, poll_interval=5)
js_get_results = """
var results = {}
var result_divs = document.querySelectorAll('.p-result');
for (var r in result_divs) {
if (result_divs[r].id && result_divs[r].id.indexOf('Result-') == 0)
results[result_divs[r].id] = result_divs[r].innerHTML;
}
var main_banner = document.getElementById("main-banner").innerHTML;
var octane_score = main_banner.substr(main_banner.lastIndexOf(':') + 2);
results['Result-Octane'] = octane_score;
JSON.stringify(results);
"""
result_dict = eval(tab.runtime.Evaluate(js_get_results))
for key, value in result_dict.iteritems():
if value == '...':
continue
results.Add(key, '', value)
| bsd-3-clause | Python |
238f5788211ed117ceedbb234e7404bc02716d60 | add serializer.py | zhengze/zblog,zhengze/zblog,zhengze/zblog,zhengze/zblog | apps/zblog/serializers.py | apps/zblog/serializers.py |
from rest_framework import serializers
# Serializers define the API representation.
class ArticleSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Article
#fields = ('title', 'content', 'hits', 'created_time', 'updated_time', 'category', 'tags')
fields = ('title', 'content', 'hits', 'created_time', 'updated_time')
| mit | Python | |
9bf9e9ace12fe43c18ef1676681b3b6f5df65d4c | Add example for comparison of two sets of trees based on extracted morphometrics | eleftherioszisis/NeuroM,lidakanari/NeuroM,wizmer/NeuroM,juanchopanza/NeuroM,liesbethvanherpe/NeuroM,BlueBrain/NeuroM,mgeplf/NeuroM | examples/comparison.py | examples/comparison.py | # Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Module for the comparison of the morphometrics between two sets of trees.'''
import numpy as np
from neurom.core import iter_neurites
def get_mod_features(object1, object2, flist, collect_all, mod):
'''Computes features from module mod'''
for feat in flist:
feature_pop = np.array([l for l in iter_neurites(object1, getattr(mod, feat))])
feature_neu = np.array([l for l in iter_neurites(object2, getattr(mod, feat))])
# Standardization of data: (data - mean(data))/ std(data)
m = np.mean(feature_pop)
st = np.std(feature_pop)
collect_all.append([(feature_pop - m) / st, (feature_neu - m) / st])
return collect_all
def get_features(object1, object2, tri_list=('meander_angle'),
seg_list=('length'), bif_list=(), # TODO: 'remote_angle', 'local_angle'
sec_list=('end_point_path_length', 'area', 'length')):
'''Plots a list of boxplots for each feature in feature_list for object 1.
Then presents the value of object 2 for each feature as an colored objected
in the same boxplot.
Parameters:
object1: list\
List of neurons.
object2 : list\
List of neurons.
feature_list : list\
List of strings, representing the features of interest.
Returns:
collect_all:\
A list of pairs of flattened data for each feature.
'''
from neurom import triplets as tri
from neurom import segments as seg
from neurom import bifurcations as bif
from neurom import sections as sec
collect_all = []
collect_all = get_mod_features(object1, object2, tri_list, collect_all, tri)
collect_all = get_mod_features(object1, object2, seg_list, collect_all, seg)
collect_all = get_mod_features(object1, object2, bif_list, collect_all, bif)
collect_all = get_mod_features(object1, object2, sec_list, collect_all, sec)
return collect_all
def boxplots(collect_all, new_fig=True, subplot=False,
feature_titles=('Section length',
'Section area',
'Section path length',
'Segment lengths',
'Segment meander angles')):
'''Plots a list of boxplots for each feature in feature_list for object 1.
Then presents the value of object 2 for each feature as an colored objected
in the same boxplot.
Parameters:
collect_all:\
A list of pairs of flattened data for each feature.
new_fig (Optional[bool]):\
Default is False, which returns the default matplotlib axes 111\
If a subplot needs to be specified, it should be provided in xxx format.
subplot (Optional[bool]):\
Default is False, which returns a matplotlib figure object. If True,\
returns a matplotlib axis object, for use as a subplot.
Returns:
fig:\
A figure which contains the list of boxplots.
'''
from neurom.view import common
fig, ax = common.get_figure(new_fig=new_fig, subplot=subplot)
ax.boxplot(list(np.transpose(np.array(collect_all))[0]), vert=False)
for idata, data in enumerate(collect_all):
ax.scatter(np.median(data[1]), len(collect_all) - idata, s=100, color='r', marker='s')
ax.set_yticklabels(feature_titles)
fig, ax = common.plot_style(fig, ax, xlabel='Normalized units (dimensionless)',
title='Summarizing features')
return fig, ax
| bsd-3-clause | Python | |
4e962d97b6a9d97db915c92c5a388a3a36573d63 | add 32 | ericdahl/project-euler,ericdahl/project-euler,ericdahl/project-euler,ericdahl/project-euler,ericdahl/project-euler,ericdahl/project-euler | p032.py | p032.py | import itertools
matches = set()
for i in itertools.permutations('123456789', 9):
for s in xrange(1, 4):
for s2 in xrange(s + 1, (14 - s) / 2):
a = int(''.join(i[:s]))
b = int(''.join(i[s:s2]))
c = int(''.join(i[s2:]))
if a * b == c:
matches.add(c)
print sum(matches) | bsd-3-clause | Python | |
691a5873596c487eece704bad991270c6b275dde | Create Game_of_Master_Mind.py | UmassJin/Leetcode | Cracking_Coding_Interview/Game_of_Master_Mind.py | Cracking_Coding_Interview/Game_of_Master_Mind.py | class Result:
def __init__(self):
self.hit = 0
self.pseudohit = 0
def estimate(guess, solution):
if len(guess) != len(solution): return None
result = Result()
idict = {}
for i, char in enumerate(guess):
if char == solution[i]:
result.hit += 1
else:
idict[solution[i]] = idict.get(solution[i], 0) + 1 # Note: here we use the idict to record the solution frequency!
for i, char in enumerate(guess):
print "char: %s", char
print "solution: ", solution
if (char != solution[i]) and (idict.get(char, 0) > 0):
result.pseudohit += 1
idict[char] -= 1
print "hit: %d, pseudohit: %d" %(result.hit, result.pseudohit)
return result
guess = 'GGRR'
solution = 'RGGY'
estimate(guess, solution)
| mit | Python | |
b5906545121d4d5229552d3e2243a290810d1c1c | add self-connect.py | chenshuo/recipes,chenshuo/recipes,chenshuo/recipes,chenshuo/recipes,chenshuo/recipes,chenshuo/recipes | python/self-connect.py | python/self-connect.py | #!/usr/bin/python
import errno
import socket
import sys
import time
if len(sys.argv) < 2:
print "Usage: %s port" % sys.argv[0]
print "port should in net.ipv4.ip_local_port_range"
else:
port = int(sys.argv[1])
for i in range(65536):
try:
sock = socket.create_connection(('localhost', port))
print "connected", sock.getsockname(), sock.getpeername()
time.sleep(60*60)
except socket.error, e:
if e.errno != errno.ECONNREFUSED:
break
| bsd-3-clause | Python | |
6c16f074f273c2f040c3eadcf34307b2fbef4cda | Add transformers.py | KamiyamaKiriko/weechat-scripts | python/transformers.py | python/transformers.py | # -*- encoding: utf-8 -*-
import weechat
import re
stripFormatting = re.compile(r"|\d{0,2}(,\d{0,2})?")
script = {
"name": "transformers",
"author": "KamiyamaKiriko",
"version": "0.2",
"license": "MIT",
"description": "Fancily fancify your fancy messages",
}
replacements = {
":V": u"<̈",
"<3": u"4<3 ",
"qUcyy": u"┐( ̄ー ̄)┌",
"miyabi.png": u"( ¬‿¬)",
";we:": u"┐('~`;)┌",
";dohoho:": u"(  ̄ ▽  ̄ )ノ Dohohohoho~",
";uh:": u"( ゚‿ ゚)",
";rage:": u"(╬ ಠ益ಠ)",
";obto:": u"ヾ(๑╹◡╹๑)ノ",
";kyubey:": u"/人◕ ‿‿ ◕人\",
";cryingdisapprove:": u"ಥ_ಥ",
";disapprove:": u"ಠ_ಠ",
";cries:": u"11。・゚・(ノД`)11・゚・。 ",
";donger:": u"ヽ༼ຈلຈ༽ノ",
}
def hook_message_send(data, buffer, command):
text = weechat.buffer_get_string(buffer, 'input').decode("utf-8")
if text.startswith('/') and not text.startswith('//'):
return weechat.WEECHAT_RC_OK
for original in replacements:
text = text.replace(original, replacements[original])
if weechat.buffer_get_string(buffer, "name").startswith("bitlbee"):
text = stripFormatting.sub("", text)
weechat.buffer_set(buffer, 'input', text.encode("utf-8"))
return weechat.WEECHAT_RC_OK
weechat.register(script['name'], script['author'], script['version'], script['license'], script['description'], "", "")
weechat.hook_command_run("/input return", "hook_message_send", "")
| mit | Python | |
d4151bf2a30fc8a497f7d4cb3f6eba4b6913447e | Create tester.py | nckswt/LEDframe,nckswt/LEDframe | tester.py | tester.py | print("hey!")
| apache-2.0 | Python | |
e1ceaa62c7e6f0974b21a23105280da49e9657bf | Send push notifications | kfdm/gntp-regrowl | regrowl/bridge/push.py | regrowl/bridge/push.py | """
Send push notifications
Uses pushnotify to send notifications to iOS and Android devices
Requires https://pypi.python.org/pypi/pushnotify
Sample config
[regrowl.bridge.push]
label = prowl,<apikey>
other = nma,<apikey>
example = pushover,<apikey>
"""
from __future__ import absolute_import
try:
import pushnotify
except ImportError:
raise ImportError('Requires https://pypi.python.org/pypi/pushnotify Please install from PyPi')
import logging
from regrowl.regrowler import ReGrowler
logger = logging.getLogger(__name__)
logging.getLogger('requests').setLevel(logging.WARNING)
__all__ = ['PushNotifier']
class PushNotifier(ReGrowler):
valid = ['NOTIFY']
_kwargs = {
'Notification-Callback-Target': 'url',
'Notification-Priority': 'pritory',
}
def notify(self, packet):
for label, settings in self.config.items(__name__):
notifier, apikey = settings.split(',')
client = pushnotify.get_client(
notifier, packet.headers['Application-Name'])
if not client:
logger.error('Error loading push provider %s', notifier)
return
logger.info('Sending push to %s with %s', label, notifier)
kwargs = {}
for key, target in self._kwargs.items():
if key in packet.headers:
kwargs[target] = packet.headers[key]
client.add_key(apikey)
client.notify(
packet.headers['Notification-Text'],
packet.headers['Notification-Title'],
kwargs=kwargs
)
| mit | Python | |
f423a32dac3b3232a03e6eebdb0664d2b5cdf87e | Add test for ordinal | NewAcropolis/api,NewAcropolis/api,NewAcropolis/api | tests/app/utils/test_time.py | tests/app/utils/test_time.py | from app.utils.time import make_ordinal
class WhenMakingOrdinal:
def it_returns_an_ordinal_correctly(self):
ordinal = make_ordinal(11)
assert ordinal == '11th'
| mit | Python | |
5647dfbf3aa2b2c5cb7f32b60b21a47ad2ee6f20 | add google foobar exercise | congminghaoxue/learn_python | solution_level1.py | solution_level1.py | #!/usr/bin/env python
# encoding: utf-8
def answer(s):
re = ''
a = ord('a')
z = ord('z')
for c in s:
ascii_code = ord(c)
if ascii_code >= a and ascii_code <= z:
tmp = chr(a + z -ascii_code)
re = re + tmp
else:
re = re + c
return re
if __name__ == '__main__':
str = raw_input("Inputs:")
print(answer(str))
| apache-2.0 | Python | |
34dca8c90ba650356c19ff7c42d19f09a050bd64 | Add file from previous commit | spMohanty/TranslatorsDesk,ltrc/TranslatorsDesk,spMohanty/TranslatorsDesk,ltrc/TranslatorsDesk,spMohanty/TranslatorsDesk,spMohanty/TranslatorsDesk,ltrc/TranslatorsDesk,ltrc/TranslatorsDesk | translatorsdesk/worker_functions.py | translatorsdesk/worker_functions.py | import subprocess
from rq import Queue
from redis import Redis
redis_conn = Redis()
q = Queue(connection=redis_conn)
#=================================================================
# Process Input File
def extract_xliff(file):
cmd = ["lib/okapi/tikal.sh", "-x", file]
p = subprocess.Popen(cmd, stdout = subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
out, err = p.communicate()
def extract_po(file):
cmd = ["xliff2po", "-i", file+".xlf", "-o", file+".po"]
p = subprocess.Popen(cmd, stdout = subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
out, err = p.communicate()
def process_input_file(file):
# redis_conn.set(file, "start")
extract_xliff(file)
extract_po(file)
# redis_conn.set(file, "done")
#=================================================================
#Generate Output file
def newFilePath(fileName):
newFileName = fileName.split(".")
extension = newFileName.pop(-1)
newFileName.append("out")
newFileName.append(extension)
newFileName = ".".join(newFileName)
newPath = fileName
newPath = newPath.split("/")[:-1]
newPath.append(newFileName)
newPath = "/".join(newPath)
return newPath
def mergePOFileWithXLF(file):
#Merge PO file onto XLIFF File
cmd = ["pomerge", "-i", file+".po", "-t", file+".xlf", "-o", file+".xlf.new"]
p = subprocess.Popen(cmd, stdout = subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
out, err = p.communicate()
print cmd, out, err
def takeBackupOfOldXLFFile(file):
#Move old xlf file to a new location and mark it as .old
cmd = ["mv", file+".xlf", file+".xlf.old"]
p = subprocess.Popen(cmd, stdout = subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
out, err = p.communicate()
print cmd, out, err
def moveNewXLFToCorrectLocation(file):
#Move the newly generated xlf file to the expected location of xlf file
cmd = ["mv", file+".xlf.new", file+".xlf"]
p = subprocess.Popen(cmd, stdout = subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
out, err = p.communicate()
print cmd, out, err
def removeOldOutputFile(file):
newPath = newFilePath(file)
#Delete any old generated files, if present
cmd = ["rm", newPath]
p = subprocess.Popen(cmd, stdout = subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
out, err = p.communicate()
print cmd, out, err
def mergeTranslatedXLFFileWithDocument(file):
#Merge translated xlf file with the doc
cmd = ["lib/okapi/tikal.sh", "-m", file+".xlf"]
p = subprocess.Popen(cmd, stdout = subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
out, err = p.communicate()
print cmd, out, err
def generateOutputFile(file):
# file : fullpath of the file
mergePOFileWithXLF(file)
takeBackupOfOldXLFFile(file)
moveNewXLFToCorrectLocation(file)
removeOldOutputFile(file)
mergeTranslatedXLFFileWithDocument(file)
newPath = newFilePath(file)
publicly_accessible_path = "/" + "/".join(newPath.split("/")[1:])
def merge(xliff_file):
cmd = ["lib/okapi/tikal.sh", "-m", xliff_file]
p = subprocess.Popen(cmd, stdout = subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
out, err = p.communicate()
| bsd-3-clause | Python | |
e1b23ecf168d397da373c4441c67e655da58e3e9 | Add basic Log class to represent a log record. | 4degrees/mill,4degrees/sawmill | source/bark/log.py | source/bark/log.py | # :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
from collections import MutableMapping
class Log(MutableMapping):
'''Hold individual log data.'''
def __init__(self, *args, **kw):
'''Initialise log.'''
super(Log, self).__init__()
self._mapping = dict(*args, **kw)
def __str__(self):
'''Return string representation.'''
return str(self._mapping)
def __len__(self):
'''Return number of keys.'''
def __iter__(self):
'''Return iterator over object.'''
return iter(self._mapping)
def __getitem__(self, key):
'''Return value referenced by *key*.'''
return self._mapping[key]
def __setitem__(self, key, value):
'''Set *key* to reference *value*.'''
self._mapping[key] = value
def __delitem(self, key):
'''Remove *key* reference.'''
del self._mapping[key]
| apache-2.0 | Python | |
6c6214681ee89f2f67b09542fcf7690aa61954b9 | Add maybe_make_dir() | ronrest/convenience_py,ronrest/convenience_py | file/maybe_make_dir.py | file/maybe_make_dir.py | import os
# ==============================================================================
# MAYBE_MAKE_DIR
# ==============================================================================
def maybe_make_dir(path):
""" Checks if a directory path exists on the system, if it does not, then
it creates that directory (and any parent directories needed to
create that directory)
"""
if not os.path.exists(path):
os.makedirs(path)
| apache-2.0 | Python | |
c57bfdfae235e7ed7b5f13922a7fbc64dbd112f1 | Add a missing migration | pythonindia/junction,pythonindia/junction,pythonindia/junction,pythonindia/junction | junction/proposals/migrations/0025_auto_20200321_0049.py | junction/proposals/migrations/0025_auto_20200321_0049.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2020-03-20 19:19
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('proposals', '0024_auto_20170610_1857'),
]
operations = [
migrations.AlterIndexTogether(
name='proposalcomment',
index_together=set([('is_spam', 'marked_as_spam_by'), ('commenter', 'is_spam')]),
),
]
| mit | Python | |
0779277486a6812f5b58e1fc1ab6fe1e5dc35559 | add dummy api tests | OpenDataPolicingNC/Traffic-Stops,OpenDataPolicingNC/Traffic-Stops,OpenDataPolicingNC/Traffic-Stops,OpenDataPolicingNC/Traffic-Stops | nc/tests/test_api.py | nc/tests/test_api.py | from django.core.urlresolvers import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from nc.models import Agency
class AgencyTests(APITestCase):
def test_list_agencies(self):
"""Test Agency list"""
Agency.objects.create(name="Durham")
url = reverse('agency-api-list')
data = [{'id': 1, 'name': 'Durham'}]
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, data)
def test_stops_api(self):
"""Test Agency stops API endpoint"""
agency = Agency.objects.create(name="Durham")
url = reverse('agency-api-stops', args=[agency.pk])
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_stops_by_reason(self):
"""Test Agency stops_by_reason API endpoint"""
agency = Agency.objects.create(name="Durham")
url = reverse('agency-api-stops-by-reason', args=[agency.pk])
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
| mit | Python | |
9a983fc4223cedc3c34c53b1241ffc71ac063a5c | Add benchmark | spotify/sparkey-python,pombredanne/sparkey-python | test/bench.py | test/bench.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sparkey
import tempfile
import os
import unittest
import time
from random import randint
class TestBench(unittest.TestCase):
def setUp(self):
self.log_fd, self.logfile = tempfile.mkstemp()
self.hash_fd, self.hashfile = tempfile.mkstemp()
def tearDown(self):
os.remove(self.logfile)
os.remove(self.hashfile)
def _create(self, compression_type, num_entries):
writer = sparkey.LogWriter(self.logfile, compression_type=compression_type, compression_block_size=1024)
for i in xrange(0, num_entries):
writer.put("key_" + str(i), "value_" + str(i))
writer.close()
sparkey.writehash(self.hashfile, self.logfile)
def _random_access(self, num_entries, num_lookups):
reader = sparkey.HashReader(self.hashfile, self.logfile)
for i in xrange(0, num_lookups):
r = str(randint(0, num_entries - 1))
self.assertEquals("value_" + r, reader['key_' + r])
reader.close()
def _test(self, compression_type, num_entries, num_lookups):
print "Testing bulk insert of %d elements and %d random lookups" % (num_entries, num_lookups)
print " Candidate: Sparkey %s" % ("None" if compression_type == 0 else "Snappy")
t1 = time.clock()
self._create(compression_type, num_entries)
t2 = time.clock()
print " creation time (wall): %2.2f" % (t2 - t1)
print " throughput (puts/wallsec): %2.2f" % (num_entries / (t2 - t1))
print " file size: %d" % (os.stat(self.logfile).st_size + os.stat(self.hashfile).st_size)
self._random_access(num_entries, num_lookups)
t3 = time.clock()
print " lookup time (wall): %2.2f" % (t3 - t2)
print " throughput (lookups/wallsec): %2.2f" % (num_lookups / (t3 - t2))
def testBench(self):
self._test(sparkey.Compression.NONE, 1000, 1000*1000)
self._test(sparkey.Compression.NONE, 1000*1000, 1000*1000)
self._test(sparkey.Compression.NONE, 10*1000*1000, 1000*1000)
self._test(sparkey.Compression.NONE, 100*1000*1000, 1000*1000)
self._test(sparkey.Compression.SNAPPY, 1000, 1000*1000)
self._test(sparkey.Compression.SNAPPY, 1000*1000, 1000*1000)
self._test(sparkey.Compression.SNAPPY, 10*1000*1000, 1000*1000)
self._test(sparkey.Compression.SNAPPY, 100*1000*1000, 1000*1000)
if __name__ == '__main__': unittest.main()
| apache-2.0 | Python | |
9b68ee1e0ffb60ebffe0bb90da2512da4bbbeb99 | add split_from_listobj_and_tuple_it func | eufat/gender-ml,eufat/gender-ml | utils/split_listobj_and_tuple_it.py | utils/split_listobj_and_tuple_it.py |
def split_from_listobj_and_tuple_it():
return 0 | mit | Python | |
c6d70585e1266e4008e38548404a0bcffccfcecf | Create websocket server | cphyc/andremote,cphyc/andremote,cphyc/andremote | web_ws.py | web_ws.py | #!/usr/bin/env python3
"""Example for aiohttp.web websocket server
"""
import asyncio
import os
from aiohttp.web import Application, Response, MsgType, WebSocketResponse
import argparse
import sys
sys.path.append('../')
from pyxdotool.instruction import Instruction
import json
parser = argparse.ArgumentParser(description='Plot data from output of the black hole simulation.')
parser.add_argument('--host', type=str, default='localhost')
parser.add_argument('--port', '-p', type=int, default=8000)
WS_FILE = os.path.join(os.path.dirname(__file__), 'websocket.html')
DISPLAY = ':0'
def parseRequest(data):
def parseExtraArgs(dic):
if 'args' in dic:
return dic['args']
else:
return {}
i = Instruction(display=':0')
if type(data) is dict:
data = [data]
for instr in data:
if 'mouseMoveRelative' in instr:
dx = instr['mouseMoveRelative']['dx']
dy = instr['mouseMoveRelative']['dy']
args = parseExtraArgs(instr['mouseMoveRelative'])
i.mouseMoveRelative(dx, dy, *args)
elif 'mouseMove' in instr:
x = instr['mouseMove']['x']
y = instr['mouseMove']['y']
args = parseExtraArgs(instr['mouseMove'])
i.mouseMove(x, y, *args)
elif 'key' in instr:
keys = instr['key']['keys']
args = parseExtraArgs(instr['key'])
i.key(keys, *args)
elif 'search' in instr:
regexp = instr['search']['regexp']
args = parseExtraArgs(instr['search'])
i.search(regexp, **args)
elif 'windowActivate' in instr:
args = parseExtraArgs(instr['windowActivate'])
i.windowActivate(**args)
elif 'windowFocus' in instr:
args = parseExtraArgs(instr['windowFocus'])
i.windowFocus(**args)
elif 'sleep' in instr:
time = instr['sleep']['time']
i.sleep(time)
elif 'click' in instr:
button = instr['click']['button']
args = parseExtraArgs(instr['click'])
i.click(button, **args)
else:
print('Unsupported instruction', instr)
print(i.instructions)
return i.exec()
@asyncio.coroutine
def wsHandler(request):
resp = WebSocketResponse()
ok, protocol = resp.can_start(request)
if not ok:
with open(WS_FILE, 'rb') as fp:
return Response(body=fp.read(), content_type='text/html')
yield from resp.prepare(request)
print('Someone joined.')
for ws in request.app['sockets']:
ws.send_str('Someone joined')
request.app['sockets'].append(resp)
while True:
msg = yield from resp.receive()
if msg.tp == MsgType.text:
obj = json.loads(msg.data)
retVal = parseRequest(obj)
for ws in request.app['sockets']:
if ws is resp:
ws.send_str(json.dumps(retVal))
else:
break
request.app['sockets'].remove(resp)
print('Someone disconnected.')
for ws in request.app['sockets']:
ws.send_str('Someone disconnected.')
return resp
@asyncio.coroutine
def init(loop, args):
app = Application(loop=loop)
app['sockets'] = []
app.router.add_route('GET', '/', wsHandler)
handler = app.make_handler()
srv = yield from loop.create_server(handler, args.host, args.port)
print("Server started at http://{}:{}".format(args.host, args.port))
return app, srv, handler
@asyncio.coroutine
def finish(app, srv, handler):
for ws in app['sockets']:
ws.close()
app['sockets'].clear()
yield from asyncio.sleep(0.1)
srv.close()
yield from handler.finish_connections()
yield from srv.wait_closed()
if __name__ == '__main__':
args = parser.parse_args()
loop = asyncio.get_event_loop()
app, srv, handler = loop.run_until_complete(init(loop, args))
try:
loop.run_forever()
except KeyboardInterrupt:
loop.run_until_complete(finish(app, srv, handler))
| mit | Python | |
ece4598e6297ef071b1c928435efb3bee73e3ddb | Backup script. | liuche/prox-server | scripts/backup.py | scripts/backup.py | import json
from app.firebase import db
"""
Backup script for saving contents at the path in Firebase.
"""
def backup(path):
backup = db().child(path).get().val()
with open("out.json", "w") as f: json.dump(backup, f)
if __name__ == '__main__':
# See app/constants for table prefixes and suffixes
path = "branches/02-chicago/venues"
backup(path)
| mpl-2.0 | Python | |
2ad0a7f50b6b120c6e769033037c0e1661d2480d | Add spacer/exp_name.py to generate names for experiments | agurfinkel/brunch,agurfinkel/brunch | spacer/exp_name.py | spacer/exp_name.py | #! /usr/bin/env python3
# Name for experiments directory
import sys
import words
import argparse
import os.path
from datetime import datetime
import platform
class ExpNamer(object):
def __init__(self):
self._name = 'exp_name'
self._help = 'Name experiment'
def mk_arg_parser(self, ap):
ap.add_argument('idx',
metavar='FILE',
help='Index of benchmarks for this experiment')
return ap
def run(self, args=None):
idx = os.path.splitext(os.path.basename(args.idx))[0]
date = datetime.now().strftime('%d_%m_%Y-t%H-%M-%S')
noun = words.get_a_noun(length=7, bound='atmost', seed=date)
noun = noun.lower()
node = platform.node().split('.')[0]
print(f"{idx}.{node}.{noun}.{date}")
def main(self, argv):
ap = argparse.ArgumentParser(prog=self._name, description=self._help)
ap = self.mk_arg_parser(ap)
args = ap.parse_args(argv)
return self.run(args)
def main():
cmd = ExpNamer()
return cmd.main(sys.argv[1:])
if __name__ == '__main__':
sys.exit(main())
| mit | Python | |
8b467efd1f998d05da0272a284773501f0b330ff | Add a test file which was missing from a recent branch | grzes/djangae,grzes/djangae,potatolondon/djangae,grzes/djangae,potatolondon/djangae | djangae/tests/test_meta_queries.py | djangae/tests/test_meta_queries.py | from django.db import models
from djangae.test import TestCase
from djangae.contrib import sleuth
class MetaQueryTestModel(models.Model):
field1 = models.CharField(max_length=32)
class PrimaryKeyFilterTests(TestCase):
def test_pk_in_with_slicing(self):
i1 = MetaQueryTestModel.objects.create();
self.assertFalse(
MetaQueryTestModel.objects.filter(pk__in=[i1.pk])[9999:]
)
self.assertFalse(
MetaQueryTestModel.objects.filter(pk__in=[i1.pk])[9999:10000]
)
def test_limit_correctly_applied_per_branch(self):
MetaQueryTestModel.objects.create(field1="test")
MetaQueryTestModel.objects.create(field1="test2")
with sleuth.watch('google.appengine.api.datastore.Query.Run') as run_calls:
list(MetaQueryTestModel.objects.filter(field1__in=["test", "test2"])[:1])
self.assertEqual(1, run_calls.calls[0].kwargs['limit'])
self.assertEqual(1, run_calls.calls[1].kwargs['limit'])
with sleuth.watch('google.appengine.api.datastore.Query.Run') as run_calls:
list(MetaQueryTestModel.objects.filter(field1__in=["test", "test2"])[1:2])
self.assertEqual(2, run_calls.calls[0].kwargs['limit'])
self.assertEqual(2, run_calls.calls[1].kwargs['limit'])
| bsd-3-clause | Python | |
62c04b70178f3df8a8c7cbf01de0896d3e808698 | Create __init__.py | OdooCommunityWidgets/mass_mailing_themes_boilerplate,OdooCommunityWidgets/mass_mailing_themes_community | mass_mailing_themes_boilerplate/__init__.py | mass_mailing_themes_boilerplate/__init__.py | mit | Python | ||
615247c28d58fbbff40f5e4122441d77acb19003 | Integrate notification app in settings and add basic structure of files | Fleeg/fleeg-platform,Fleeg/fleeg-platform | notification/urls.py | notification/urls.py | from django.conf.urls import url
from link.views import LinkView, LinkReactionView, LinkCommentView
urlpatterns = [
url(r'^$', LinkView.new, name='link_new'),
url(r'^(?P<post_id>[0-9]+)/add/$', LinkView.add, name='link_add'),
url(r'^(?P<post_id>[0-9]+)/react/$', LinkReactionView.react, name='link_react'),
url(r'^(?P<post_id>[0-9]+)/unreact/$', LinkReactionView.unreact, name='link_unreact'),
url(r'^(?P<post_id>[0-9]+)/comment/$', LinkCommentView.comment, name='link_comment'),
]
| agpl-3.0 | Python | |
09c9f6ba56890a2a56eaa77eae47cda92a39965e | Add unit tests for test_url | moreati/pylons,Pylons/pylons,Pylons/pylons,moreati/pylons,Pylons/pylons,moreati/pylons | tests/test_units/test_url.py | tests/test_units/test_url.py | import unittest
from repoze.bfg.testing import cleanUp
class TestRouteUrl(unittest.TestCase):
def setUp(self):
cleanUp()
def tearDown(self):
cleanUp()
def _callFUT(self, *arg, **kw):
from pylons.url import route_url
return route_url(*arg, **kw)
def test_with_elements(self):
from repoze.bfg.interfaces import IRoutesMapper
request = _makeRequest()
mapper = DummyRoutesMapper(result='/1/2/3')
request.registry.registerUtility(mapper, IRoutesMapper)
result = self._callFUT('flub', request, 'extra1', 'extra2',
a=1, b=2, c=3, _query={'a':1},
_anchor=u"foo")
self.assertEqual(result,
'http://example.com:5432/1/2/3/extra1/extra2?a=1#foo')
def test_no_elements(self):
from repoze.bfg.interfaces import IRoutesMapper
request = _makeRequest()
mapper = DummyRoutesMapper(result='/1/2/3')
request.registry.registerUtility(mapper, IRoutesMapper)
result = self._callFUT('flub', request, a=1, b=2, c=3, _query={'a':1},
_anchor=u"foo")
self.assertEqual(result,
'http://example.com:5432/1/2/3?a=1#foo')
def test_it_generation_error(self):
from repoze.bfg.interfaces import IRoutesMapper
request = _makeRequest()
mapper = DummyRoutesMapper(raise_exc=KeyError)
request.registry.registerUtility(mapper, IRoutesMapper)
mapper.raise_exc = KeyError
self.assertRaises(KeyError, self._callFUT, 'flub', request, a=1)
def test_generate_doesnt_receive_query_or_anchor(self):
from repoze.bfg.interfaces import IRoutesMapper
mapper = DummyRoutesMapper(result='')
from zope.component import getSiteManager
sm = getSiteManager()
sm.registerUtility(mapper, IRoutesMapper)
request = DummyRequest()
result = self._callFUT('flub', request, _query=dict(name='some_name'))
self.assertEqual(mapper.kw, {}) # shouldnt have anchor/query
self.assertEqual(result, 'http://example.com:5432?name=some_name')
def test_with_app_url(self):
from repoze.bfg.interfaces import IRoutesMapper
request = _makeRequest()
mapper = DummyRoutesMapper(result='/1/2/3')
request.registry.registerUtility(mapper, IRoutesMapper)
result = self._callFUT('flub', request, _app_url='http://example2.com')
self.assertEqual(result, 'http://example2.com/1/2/3')
def test_custom_url_gen(self):
from repoze.bfg.interfaces import IRoutesMapper
request = _makeRequest()
mapper = DummyRoutesMapper(result='/smith', routes={'flub': DummyRoute})
request.registry.registerUtility(mapper, IRoutesMapper)
result = self._callFUT('flub', request, a=1, b=2, c=3, _query={'a':1})
self.assertEqual(result, 'http://example.com:5432/smith')
class DummyRequest:
application_url = 'http://example.com:5432' # app_url never ends with slash
def __init__(self, environ=None):
if environ is None:
environ = {}
self.environ = environ
class DummyRoutesMapper:
raise_exc = None
def __init__(self, result='/1/2/3', raise_exc=False, routes={}):
self.result = result
self.routes = routes
def generate(self, *route_args, **kw):
self.kw = kw
if self.raise_exc:
raise self.raise_exc
return self.result
class DummyRoute:
@staticmethod
def custom_url_generator(route_name, request, *elements, **kw):
return route_name, request, [], {}
def _makeRequest(environ=None):
from repoze.bfg.registry import Registry
request = DummyRequest(environ)
request.registry = Registry()
return request
| bsd-3-clause | Python | |
ca1cfd2514d382b1187eab880014b6a611d3568d | add some testing for Resources and ResourceAttributesMixin | infoxchange/slumber,ministryofjustice/slumber,CloudNcodeInc/slumber,IAlwaysBeCoding/slumber,zongxiao/slumber,IAlwaysBeCoding/More,futurice/slumber,s-block/slumber,samgiles/slumber,jannon/slumber | tests/resource.py | tests/resource.py | import mock
import unittest
import httplib2
import slumber
class ResourceAttributesMixinTestCase(unittest.TestCase):
def test_attribute_fallback_to_resource(self):
class ResourceMixinTest(slumber.ResourceAttributesMixin, slumber.MetaMixin, object):
class Meta:
authentication = None
base_url = None
format = "json"
rmt = ResourceMixinTest(base_url="http://example.com/")
self.assertTrue(isinstance(rmt.example, slumber.Resource))
class ResourceTestCase(unittest.TestCase):
def setUp(self):
self.base_resource = slumber.Resource(base_url="http://example/api/v1/test")
def test_get_serializer(self):
self.assertTrue(isinstance(self.base_resource.get_serializer(), slumber.Serializer))
def test_request_200(self):
# Mock a Response Object
r = mock.Mock(spec=httplib2.Response)
r.status = 200
# Mock The httplib2.Http class
self.base_resource._http = mock.Mock(spec=httplib2.Http)
self.base_resource._http.request.return_value = (r, "Mocked Content")
resp, content = self.base_resource._request("GET")
self.assertTrue(resp is r)
self.assertEqual(content, "Mocked Content")
self.base_resource._http.request.assert_called_once_with(
"http://example/api/v1/test",
"GET",
body=None,
headers={"content-type": self.base_resource.get_serializer().get_content_type()}
)
| bsd-2-clause | Python | |
7f4cfe09a29202475b0941558f8ab722e63cee7e | Add MPL 2.0 to license trove | apache/incubator-allura,Bitergia/allura,apache/allura,lym/allura-git,lym/allura-git,apache/allura,Bitergia/allura,apache/incubator-allura,lym/allura-git,heiths/allura,Bitergia/allura,apache/incubator-allura,Bitergia/allura,heiths/allura,Bitergia/allura,heiths/allura,apache/incubator-allura,heiths/allura,leotrubach/sourceforge-allura,leotrubach/sourceforge-allura,leotrubach/sourceforge-allura,apache/allura,lym/allura-git,leotrubach/sourceforge-allura,apache/allura,lym/allura-git,apache/allura,heiths/allura | scripts/migrations/023-add-new-trove-license-category.py | scripts/migrations/023-add-new-trove-license-category.py | import sys
import logging
from ming.orm.ormsession import ThreadLocalORMSession
from allura import model as M
log = logging.getLogger(__name__)
def main():
M.TroveCategory(trove_cat_id=905,
trove_parent_id=14,
shortname='mpl20',
fullname='Mozilla Public License 2.0 (MPL 2.0)',
fullpath='License :: OSI-Approved Open Source :: Mozilla Public License 2.0 (MPL 2.0)')
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
if __name__ == '__main__':
main()
| apache-2.0 | Python | |
07cccdbb7fdc6919503c5b11bca8604e1f7a0d59 | Create roman_numeral_convert.py | SamGriffith3/PyLearning-Projects,SamGriffith3/PyLearning-Projects | projects/roman_numeral_convert.py | projects/roman_numeral_convert.py |
#Roman numerals are: [i v x l c d m]
def stringer (x):
number_string = str(x)
a = number_string[0]
b = number_string[1]
c = number_string[2]
d = number_string[3]
a_list = [ I, II, III, IV, V, VI, VII, VIII, IX]
b_list = [ X, XX, XXX, XL, L, LX, LXX, LXXX, XC]
c_list = [ C, CC, CCC, CD, D, DC, DCC, DCCC, CM]
d_list = [ M, MM, MMM]
x = int(input("Your Number(up to 3999): "))
stringer
print a_list[a-1]
print b_list[b-1]
print c_list[c-1]
print d_list[d-1]
| mit | Python | |
4e02394f87bec9f73364738550c0b441beb80696 | Build Tower | SelvorWhim/competitive,SelvorWhim/competitive,SelvorWhim/competitive,SelvorWhim/competitive | Codewars/BuildTower.py | Codewars/BuildTower.py | def tower_builder(n_floors):
return [((n_floors - i)*' ' + (2*i - 1)*'*' + (n_floors - i)*' ') for i in range(1,n_floors+1)]
| unlicense | Python | |
b17c1bf616ad3bc1d56b106bc8a606866b8f3f1a | Create Knapsack01.py | Chasego/codi,cc13ny/Allin,Chasego/cod,Chasego/codirit,cc13ny/Allin,cc13ny/algo,Chasego/codi,Chasego/cod,Chasego/cod,cc13ny/algo,cc13ny/algo,cc13ny/Allin,cc13ny/Allin,Chasego/cod,Chasego/cod,Chasego/codi,Chasego/codirit,Chasego/codirit,Chasego/codirit,cc13ny/Allin,cc13ny/algo,Chasego/codirit,Chasego/codi,Chasego/codi,cc13ny/algo | ap/py/Knapsack01.py | ap/py/Knapsack01.py | '''
You can run it directly to see results.
'''
def knapsack01(sizes, vals, S):
#aux[i][s] is the maximal value given the first (i-1) items under the size limitation: s
aux = [[-1 for _ in range(S + 1)] for _ in vals]
for i in range(len(vals)):
for s in range(S+1):
if i == 0:
aux[0][s] = 0 if sizes[0] > s else vals[0]
else:
# For the case of the first(i+1) items (i.e. aux[i][s]), there're 2 cases:
#
# Include the (i+1)th item or Not
#
# If the (i+1)th item is bigger than the size limitation, then we won't put
# it in the bag. Then the case for the first i items is same for the first
# (i+1) items. Otherwise, it will be compared with for the case of the first
# i items after the (i+1)th item is included.
res = 0 if s < sizes[i] else aux[i-1][s-sizes[i]] + vals[i]
aux[i][s] = max(aux[i-1][s], res)
return aux
def find_path(aux, sizes):
S = len(aux[0]) - 1
i = len(aux) - 1
idx = []
while i > 0:
if aux[i][S] > aux[i-1][S]:
# Only if these two values are different, it means that
# the (i+1)th item is included
idx = [i] + idx
S -= sizes[i]
i -= 1
if S >= sizes[0]:
idx = [0] + idx
return idx
def testcase():
# test case
tests = []
tests.append(([1, 2, 3, 2, 2], [8, 4, 0, 5, 3], 5))
tests.append(([1, 2, 3, 2, 2], [8, 4, 0, 5, 3], 6))
tests.append(([1, 2, 3, 2, 2], [8, 4, 0, 5, 3], 7))
tests.append(([1], [7], 8))
return tests
def print_info(tests):
n = 60
print '#' * n
print
print 'Problem: KnapSack01'
print
print '=' * n
for j, t in enumerate(tests):
sizes = t[0]
vals = t[1]
S = t[2]
aux = knapsack01(sizes, vals, S)
idx = find_path(aux, sizes)
sub_sizes = [sizes[i] for i in idx]
sub_vals = [vals[i] for i in idx]
print
print 'test case #' + str(j) + ':'
print ' sizes: ' + str(sizes)
print ' vals : ' + str(vals)
print ' S : ' + str(S)
print
print ' Auxiliary Matrx :'
for a in aux:
print ' ' + str(a)
print
print ' Subset Indices : ' + str(idx)
print ' Subset Sizes : ' + str(sub_sizes)
print ' Subset Vals : ' + str(sub_vals)
print
print ' Subset Size Sum: ' + str(sum(sub_sizes))
print ' Subset Val Sum: ' + str(sum(sub_vals))
print
print '=' * n
def main():
print_info(testcase())
if __name__ == "__main__":
main()
| mit | Python | |
acedeb97935c53d0e7f1e39b2282f8a90bf379ee | add test case | VeryCB/flask-slack | test_flask.py | test_flask.py | from pytest import fixture
from flask import Flask
from flask_slack import Slack
class App(object):
def __init__(self):
self.app = Flask(__name__)
self.app.debug = True
self.slack = Slack(self.app)
self.app.add_url_rule('/', view_func=self.slack.dispatch)
self.client = self.app.test_client()
@fixture
def app():
return App()
def test_register_command(app):
res = app.client.get('/')
assert res.status_code == 200
assert res.data == b'Command None is not found in team None'
| bsd-3-clause | Python | |
6beccf0c0b4e7788403415c05ae9f31e6c0a89eb | Add tests for Generalized Procrustes Analysis (GPA) | MaxHalford/Prince | tests/test_gpa.py | tests/test_gpa.py | import unittest
import numpy as np
from sklearn import datasets
from sklearn import decomposition
from sklearn.utils import estimator_checks
import prince
class TestGPA(unittest.TestCase):
# def setUp(self):
def __init__(self):
# Create a list of 2-D circles with different locations and rotations
n_shapes = 4
n_points = 12
n_dims = 2
shape_sizes = np.arange(1, n_shapes + 1)
shape_angle_offsets = 10 * np.arange(n_shapes)
shape_center_offsets = np.tile(np.arange(n_shapes), (n_dims, 1))
base_angles = np.linspace(0, 2 * np.pi, num=n_points, endpoint=False)
# Size (n_shapes, n_points)
angles = base_angles[np.newaxis, :] + shape_angle_offsets[:, np.newaxis]
# Calculate along dimensions
x = (
np.cos(angles) * shape_sizes[:, np.newaxis]
+ shape_center_offsets[0][:, np.newaxis]
)
y = (
np.sin(angles) * shape_sizes[:, np.newaxis]
+ shape_center_offsets[1][:, np.newaxis]
)
self.shapes = np.stack([x, y], axis=-1)
def test_fit(self):
gpa = prince.GPA()
self.assertIsInstance(gpa.fit(self.shapes), prince.GPA)
def test_transform(self):
gpa = prince.GPA(copy=True)
aligned_shapes = gpa.fit(self.shapes).transform(self.shapes)
self.assertIsInstance(aligned_shapes, np.ndarray)
self.assertEqual(self.shapes.shape, aligned_shapes.shape)
def test_fit_transform(self):
gpa = prince.GPA()
aligned_shapes = gpa.fit_transform(self.shapes)
self.assertIsInstance(aligned_shapes, np.ndarray)
def test_fit_transform_single(self):
"""Aligning a single shape should return the same shape."""
gpa = prince.GPA()
shapes = self.shapes.shape[0:1]
aligned_shapes = gpa.fit_transform(shapes)
np.testing.assert_array_equal(shapes, aligned_shapes)
def test_copy(self):
shapes_copy = np.copy(self.shapes)
gpa = prince.GPA(copy=True)
gpa.fit(shapes_copy)
np.testing.assert_array_equal(self.shapes, shapes_copy)
gpa = prince.GPA(copy=False)
gpa.fit(shapes_copy)
self.assertRaises(
AssertionError, np.testing.assert_array_equal, self.shapes, shapes_copy
)
def test_check_estimator(self):
estimator_checks.check_estimator(prince.GPA(as_array=True))
| mit | Python | |
7ab615aa37263a38ca33fd0d9d8b7f7ec37442ca | add tests for low-level 'nacl.c' API | JackWink/pynacl,reaperhulk/pynacl,pyca/pynacl,hoffmabc/pynacl,lmctv/pynacl,reaperhulk/pynacl,ucoin-io/cutecoin,reaperhulk/pynacl,scholarly/pynacl,reaperhulk/pynacl,lmctv/pynacl,alex/pynacl,pyca/pynacl,xueyumusic/pynacl,pyca/pynacl,JackWink/pynacl,ucoin-io/cutecoin,lmctv/pynacl,JackWink/pynacl,xueyumusic/pynacl,hoffmabc/pynacl,scholarly/pynacl,alex/pynacl,alex/pynacl,hoffmabc/pynacl,xueyumusic/pynacl,lmctv/pynacl,pyca/pynacl,pyca/pynacl,scholarly/pynacl,ucoin-io/cutecoin,JackWink/pynacl,scholarly/pynacl,xueyumusic/pynacl,reaperhulk/pynacl,alex/pynacl,Insoleet/cutecoin,lmctv/pynacl,ucoin-bot/cutecoin | tests/test_raw.py | tests/test_raw.py | # Copyright 2013 Donald Stufft and individual contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from binascii import hexlify
from nacl import c
import hashlib
def test_hash():
msg = "message"
h1 = c.crypto_hash(msg)
assert len(h1) == c.crypto_hash_BYTES
assert hexlify(h1) == "f8daf57a3347cc4d6b9d575b31fe6077e2cb487f60a96233c08cb479dbf31538cc915ec6d48bdbaa96ddc1a16db4f4f96f37276cfcb3510b8246241770d5952c"
assert hexlify(h1) == hashlib.sha512(msg).hexdigest()
h2 = c.crypto_hash_sha512(msg)
assert len(h2) == c.crypto_hash_sha512_BYTES
assert hexlify(h2) == hexlify(h1)
h3 = c.crypto_hash_sha256(msg)
assert len(h3) == c.crypto_hash_sha256_BYTES
assert hexlify(h3) == "ab530a13e45914982b79f9b7e3fba994cfd1f3fb22f71cea1afbf02b460c6d1d"
assert hexlify(h3) == hashlib.sha256(msg).hexdigest()
def test_secretbox():
key = "\x00" * c.crypto_secretbox_KEYBYTES
msg = "message"
nonce = "\x01" * c.crypto_secretbox_NONCEBYTES
# TODO: NaCl is secretbox(msg,nonce,key)
ct = c.crypto_secretbox(key, msg, nonce)
assert len(ct) == len(msg) + c.crypto_secretbox_BOXZEROBYTES
assert hexlify(ct) == "3ae84dfb89728737bd6e2c8cacbaf8af3d34cc1666533a"
# TODO: NaCl is secretbox_open(ct,nonce,key)
msg2 = c.crypto_secretbox_open(key, ct, nonce)
assert msg2 == msg
def test_box():
# TODO: NaCl C++ is pk=box_keypair(sk), C is box_keypair(pk,sk)
A_secretkey, A_pubkey = c.crypto_box_keypair()
assert len(A_secretkey) == c.crypto_box_SECRETKEYBYTES
assert len(A_pubkey) == c.crypto_box_PUBLICKEYBYTES
B_secretkey, B_pubkey = c.crypto_box_keypair()
# TODO: NaCl is beforenm(k,pk,sk)
k1 = c.crypto_box_beforenm(A_secretkey, B_pubkey)
assert len(k1) == c.crypto_box_BEFORENMBYTES
k2 = c.crypto_box_beforenm(B_secretkey, A_pubkey)
assert hexlify(k1) == hexlify(k2)
message = "message"
nonce = "\x01" * c.crypto_box_NONCEBYTES
# TODO: NaCl is box_afternm(ct, msg, nonce, k)
ct1 = c.crypto_box_afternm(k1, message, nonce)
assert len(ct1) == len(message) + c.crypto_box_BOXZEROBYTES
# TODO: NaCl is box(ct, msg, nonce, pubkey, secretkey)
ct2 = c.crypto_box(A_secretkey, B_pubkey, message, nonce)
assert hexlify(ct2) == hexlify(ct1)
# TODO: NaCl is open(msg, ct, nonce, pk, sk)
m1 = c.crypto_box_open(B_secretkey, A_pubkey, ct1, nonce)
assert m1 == message
# TODO: NaCl is open_afternm(msg, ct, nonce, k)
m2 = c.crypto_box_open_afternm(k1, ct1, nonce)
assert m2 == message
def test_sign():
# TODO: NaCl C++ is pk=keypair(sk), C is keypair(pk,sk)
seed = "\x00" * c.crypto_sign_SEEDBYTES
secretkey, pubkey = c.crypto_sign_seed_keypair(seed)
assert len(pubkey) == c.crypto_sign_PUBLICKEYBYTES
assert len(secretkey) == c.crypto_sign_SECRETKEYBYTES
secretkey, pubkey = c.crypto_sign_keypair()
assert len(pubkey) == c.crypto_sign_PUBLICKEYBYTES
assert len(secretkey) == c.crypto_sign_SECRETKEYBYTES
# TODO: NaCl is sm=sign(msg, sk)
msg = "message"
sigmsg = c.crypto_sign(secretkey, msg)
assert len(sigmsg) == len(msg) + c.crypto_sign_BYTES
# TODO: NaCl is msg=open(sm, pk)
msg2 = c.crypto_sign_open(pubkey, sigmsg)
assert msg2 == msg
def secret_scalar():
# TODO: NaCl is box_keypair(pk,sk)
secretkey, pubkey = c.crypto_box_keypair()
assert len(secretkey) == c.crypto_box_SECRETKEYBYTES
assert c.crypto_box_SECRETKEYBYTES == c.crypto_scalarmult_BYTES
return secretkey, pubkey
def test_scalarmult():
x, xpub = secret_scalar()
assert len(x) == 32
y, ypub = secret_scalar()
bx = c.crypto_scalarmult_base(x)
assert hexlify(bx) == hexlify(xpub)
| apache-2.0 | Python | |
f86da5eddec2dd37f4797ea1caf404e8fec82701 | add unit tests for query parsing | vimeo/graph-explorer,dbirchak/graph-explorer,dbirchak/graph-explorer,dbirchak/graph-explorer,vimeo/graph-explorer,dbirchak/graph-explorer,vimeo/graph-explorer,vimeo/graph-explorer | test_query.py | test_query.py | from query import parse_query
import copy
default_parsed_query = {
'from': '-24hours',
'to': 'now',
'min': None,
'max': None,
'avg_by': {},
'limit_targets': 500,
'avg_over': None,
'patterns': ['target_type=', 'unit='],
'group_by': ['target_type=', 'unit=', 'server'],
'sum_by': {},
'statement': 'graph'
}
def test_query_basic():
query = parse_query("")
assert query == default_parsed_query
query = parse_query("foo bar")
new = copy.deepcopy(default_parsed_query)
new['patterns'].extend(['foo', 'bar'])
assert query == new
def test_query_advanced():
query = parse_query("octo -20hours unit=b/s memory group by foo avg by barsum by baz")
new = copy.deepcopy(default_parsed_query)
new['patterns'].extend(['octo', '-20hours', 'unit=b/s', 'memory', 'by', 'baz'])
new['avg_by'] = {'barsum': ['']}
new['group_by'] = ['target_type=', 'unit=', 'foo']
assert query == new
query = parse_query("stack from -20hours to -10hours avg over 10M sum by foo:bucket1|bucket2,bar min 100 max 200")
new = copy.deepcopy(default_parsed_query)
new['statement'] = 'stack'
new['avg_over'] = (10, 'M')
new['from'] = '-20hours'
new['to'] = '-10hours'
new['min'] = '100'
new['max'] = '200'
new['sum_by'] = {'foo': ['bucket1', 'bucket2'], 'bar': ['']}
assert query == new
| apache-2.0 | Python | |
0baca9564c9df7b06645f71abdda0fe3090f46a6 | Add a test-case for lit xunit output | GPUOpen-Drivers/llvm,dslab-epfl/asap,llvm-mirror/llvm,llvm-mirror/llvm,llvm-mirror/llvm,apple/swift-llvm,apple/swift-llvm,llvm-mirror/llvm,apple/swift-llvm,GPUOpen-Drivers/llvm,dslab-epfl/asap,dslab-epfl/asap,apple/swift-llvm,llvm-mirror/llvm,llvm-mirror/llvm,dslab-epfl/asap,dslab-epfl/asap,llvm-mirror/llvm,apple/swift-llvm,GPUOpen-Drivers/llvm,GPUOpen-Drivers/llvm,GPUOpen-Drivers/llvm,GPUOpen-Drivers/llvm,llvm-mirror/llvm,GPUOpen-Drivers/llvm,dslab-epfl/asap,llvm-mirror/llvm,GPUOpen-Drivers/llvm,apple/swift-llvm,apple/swift-llvm,apple/swift-llvm,dslab-epfl/asap | utils/lit/tests/xunit-output.py | utils/lit/tests/xunit-output.py | # Check xunit output
# RUN: %{lit} --xunit-xml-output %t.xunit.xml %{inputs}/test-data
# RUN: FileCheck < %t.xunit.xml %s
# CHECK: <?xml version="1.0" encoding="UTF-8" ?>
# CHECK: <testsuites>
# CHECK: <testsuite name='test-data' tests='1' failures='0'>
# CHECK: <testcase classname='test-data.' name='metrics.ini' time='0.00'/>
# CHECK: </testsuite>
# CHECK: </testsuites> | apache-2.0 | Python | |
aff4fbae6933f33898f1a32511d9e4cc0b44fef5 | Add permissions class (made via builder). | SunDwarf/curious | curious/dataclasses/permissions.py | curious/dataclasses/permissions.py | # I'm far too lazy to type out each permission bit manually.
# So here's a helper method.
def build_permissions_class(name: str="Permissions"):
# Closure methods.
def __init__(self, value: int = 0):
"""
Creates a new Permissions object.
:param value: The bitfield value of the permissions object.
"""
self.bitfield = value
def _get_bit(self, bit: int) -> bool:
"""
Gets a bit from the internal bitfield of the permissions.
"""
return bool((self.bitfield >> bit) & 1)
def _set_bit(self, bit: int, value: bool):
if value:
self.bitfield |= (1 << bit)
else:
self.bitfield &= ~(1 << bit)
# Operator overloads.
def __eq__(self, other):
return self.bitfield == other.bitfield
# This is a dict because discord skips some permissions.
permissions = {
"create_instant_invite": 0,
"kick_members": 1,
"ban_members": 2,
"administrator": 3,
"manage_channels": 4,
"manage_server": 5,
"add_reactions": 6,
"read_messages": 10,
"send_messages": 11,
"send_tts_messages": 12,
"manage_messages": 13,
"embed_links": 14,
"attach_files": 15,
"read_message_history": 16,
"mention_everyone": 17,
"use_external_emojis": 18,
"voice_connect": 20,
"voice_speak": 21,
"voice_mute_members": 22,
"voice_deafen_members": 23,
"voice_move_members": 24,
"voice_use_voice_activation": 25,
"change_nickname": 26,
"manage_nicknames": 27,
"manage_roles": 28,
"manage_webhooks": 29,
"manage_emojis": 30,
# rest are unused
}
# Create a bunch of property objects for each permission.
def _get_permission_getter(name: str, bit: int):
def _junk_function(self) -> bool:
return self._get_bit(bit)
_junk_function.__name__ = name
return _junk_function
def _get_permission_setter(name: str, bit: int):
def _junk_function(self, value: bool):
return self._set_bit(bit, value)
_junk_function.__name__ = name
return _junk_function
_doc_base = ":return: If this member has the {} permission (bit {})."
properties = {
name: property(fget=_get_permission_getter(name, bit),
fset=_get_permission_setter(name, bit),
doc=_doc_base.format(name, bit)) for (name, bit) in permissions.items()
}
# Create the namespace dict to use in the type declaration.
namespace = {
"__init__": __init__,
"_set_bit": _set_bit,
"_get_bit": _get_bit,
"__eq__": __eq__,
**properties
}
new_class = type(name, (object,), namespace)
return new_class
Permissions = build_permissions_class("Permissions")
| mit | Python | |
e818989604ddaf34dd5730cc3b73093744b59a29 | Create themes.py | ollien/Timpani,ollien/Timpani,ollien/Timpani | timpani/themes.py | timpani/themes.py | from . import database
THEME_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../themes"))
| mit | Python | |
2e90787a245d6a30c733699a819a1ff888c308b7 | add simple boot script | squeaky-pl/japronto,squeaky-pl/japronto,squeaky-pl/japronto,squeaky-pl/japronto,squeaky-pl/japronto | src/japronto/__main__.py | src/japronto/__main__.py | from argparse import ArgumentParser
from importlib import import_module
import sys
from .app import Application
def main():
parser = ArgumentParser(prog='python -m japronto')
parser.add_argument('--host', dest='host', type=str, default='0.0.0.0')
parser.add_argument('--port', dest='port', type=int, default=8080)
parser.add_argument('--worker-num', dest='worker_num', type=int, default=1)
parser.add_argument('application')
args = parser.parse_args()
try:
module, attribute = args.application.rsplit('.', 1)
except ValueError:
print(
"Application specificer must contain at least one '.', got '{}'."
.format(args.application))
return 1
try:
module = import_module(module)
except ModuleNotFoundError as e:
print(e.args[0] + ' on Python search path.')
return 1
try:
attribute = getattr(module, attribute)
except AttributeError:
print("Module '{}' does not have an attribute '{}'."
.format(module.__name__, attribute))
return 1
if not isinstance(attribute, Application):
print("{} is not an instance of 'japronto.Application'.")
return 1
attribute.run()
sys.exit(main())
| mit | Python | |
bfa84c54166a606e4c7b587aeb10e5e79a2d0e50 | Add __init__ | naveenvhegde/pytmdb3,wagnerrp/pytmdb3 | tmdb3/__init__.py | tmdb3/__init__.py | #!/usr/bin/env python
from tmdb_api import Configuration, searchMovie, searchPerson, Person, \
Movie, Collection, __version__
from request import set_key
from tmdb_exceptions import *
| bsd-3-clause | Python | |
8830c0ae6a35a68cdeebdf8a7411e63f60b22c09 | Add script to host release management tools. Currently performs a single task: makes regexes for all JIRAs included in a release by parsing the CHANGES.txt files | apache/solr,apache/solr,apache/solr,apache/solr,apache/solr | dev-tools/scripts/manageRelease.py | dev-tools/scripts/manageRelease.py | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
sys.path.append(os.path.dirname(__file__))
from scriptutil import *
import argparse
import re
# Pulls out all JIRAs mentioned in the given CHANGES.txt filename under the given version
# and outputs a regular expression that will match all of them
def print_changes_jira_regex(filename, version):
release_section_re = re.compile(r'\s*====*\s+(.*)\s+===')
version_re = re.compile(r'%s(?:$|[^-])' % version)
bullet_re = re.compile(r'\s*[-*]\s*(.*)')
issue_list_re = re.compile(r'[:,/()\s]*((?:LUCENE|SOLR)-\d+)')
more_issues_on_next_line_re = re.compile(r'(?:[:,/()\s]*(?:LUCENE|SOLR)-\d+)+\s*,\s*$') # JIRA list with trailing comma
under_requested_version = False
requested_version_found = False
more_issues_on_next_line = False
lucene_issues = []
solr_issues = []
with open(filename, 'r') as changes:
for line in changes:
version_boundary = release_section_re.match(line)
if version_boundary is not None:
if under_requested_version:
break # No longer under the requested version - stop looking for JIRAs
else:
if version_re.search(version_boundary.group(1)):
under_requested_version = True # Start looking for JIRAs
requested_version_found = True
else:
if under_requested_version:
bullet_match = bullet_re.match(line)
if more_issues_on_next_line or bullet_match is not None:
content = bullet_match.group(1) if bullet_match is not None else line
for issue in issue_list_re.findall(content):
(lucene_issues if issue.startswith('LUCENE-') else solr_issues).append(issue.rsplit('-', 1)[-1])
more_issues_on_next_line = more_issues_on_next_line_re.match(content)
if not requested_version_found:
raise Exception('Could not find %s in %s' % (version, filename))
print('\nRegex to match JIRAs in the %s release section in %s:' % (version, filename))
if len(lucene_issues) > 0:
print(r'LUCENE-(?:%s)\b' % '|'.join(lucene_issues), end='')
if len(solr_issues) > 0:
print('|', end='')
if len(solr_issues) > 0:
print(r'SOLR-(?:%s)\b' % '|'.join(solr_issues), end='')
print()
def read_config():
parser = argparse.ArgumentParser(description='Tools to help manage a Lucene/Solr release')
parser.add_argument('version', type=Version.parse, help='Version of the form X.Y.Z')
c = parser.parse_args()
c.branch_type = find_branch_type()
c.matching_branch = c.version.is_bugfix_release() and c.branch_type == BranchType.release or \
c.version.is_minor_release() and c.branch_type == BranchType.stable or \
c.version.is_major_release() and c.branch_type == BranchType.unstable
print ("branch_type is %s " % c.branch_type)
return c
def main():
c = read_config()
# TODO: add other commands to perform, specifiable via cmdline param
# Right now, only one operation is performed: generate regex matching JIRAs for the given version from CHANGES.txt
print_changes_jira_regex('lucene/CHANGES.txt', c.version)
print_changes_jira_regex('solr/CHANGES.txt', c.version)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('\nReceived Ctrl-C, exiting early')
| apache-2.0 | Python | |
772fdb2251b5b8b374f15f43195a5e5f1fe9671e | Create deconvolution.py | google/trax,google/trax | trax/layers/deconvolution.py | trax/layers/deconvolution.py | # coding=utf-8
# Copyright 2020 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Trax Transpose Convolution Layers"""
import functools
import itertools
import operator
from trax import fastmath
from trax.fastmath import numpy as jnp
from trax.layers import base
from trax.layers import initializers as init
from jax import lax
class ConvTranspose(base.Layer):
"""Layer constructor function for a general Transpose Convolutional Layer"""
def __init__(self, filters, kernel_size, strides = None, padding = 'VALID', rhs_dilation = None, dimension_numbers=('NHWC', 'HWIO', 'NHWC'), kernel_initialzer = None, bias_initializer = init.RandomNormalInitializer(1e-6)):
super(ConvTranspose, self).__init__()
self._filters = filters
self._kernel_size = kernel_size
self._padding = padding
self._rhs_dilation = rhs_dilation
self._dimension_numbers = dimension_numbers
self._lhs_spec, self._rhs_spec, self._out_spec = dimension_numbers
self._one = (1, ) * len(kernel_size)
self._strides = strides or self._one
self._bias_initializer = bias_initializer
rhs_spec = self._rhs_spec
self._kernel_initializer = kernel_initialzer
if kernel_initialzer is None:
self._kernel_initializer = init.GlorotNormalInitializer(
rhs_spec.index('O'), rhs_spec.index('I')
)
def _check_nhwc(self):
msg = 'Deconvolutions on more than 4 dimensions only supported in NHWC.'
assert self._lhs_spec == self._out_spec == 'NHWC', msg
def forward(self, x):
w, b = self.weights
x_shape = list(x.shape)
if len(x_shape) > 4:
self._check_nhwc()
new_batch_dim = functools.reduce(operator.mul, x.shape[:-3])
x = jnp.reshape(x, [new_batch_dim] + list(x.shape[-3:]))
res = lax.conv_transpose(
x, w, self._strides, self._padding, self._rhs_dilation, self._dimension_numbers) + b
if len(x_shape) > 4:
res = jnp.reshape(res, x_shape[:-3] + list(res.shape[-3:]))
return res
def _kernel_shape(self, input_shape):
"""Helper to calculate the kernel shape."""
kernel_size_iter = iter(self._kernel_size)
return [self._filters if c == 'O' else
input_shape[self._lhs_spec.index('C')] if c == 'I' else
next(kernel_size_iter) for c in self._rhs_spec]
def init_weights_and_state(self, input_signature):
input_shape = input_signature.shape
if len(input_shape) > 4:
self._check_nhwc()
new_batch_dim = functools.reduce(operator.mul, input_shape[:-3])
input_shape = [new_batch_dim] + list(input_shape[-3:])
kernel_shape = self._kernel_shape(input_shape)
bias_shape = [self._filters if c == 'C' else 1 for c in self._out_spec]
bias_shape = tuple(itertools.dropwhile(lambda x: x == 1, bias_shape))
rng1, rng2 = fastmath.random.split(self.rng, 2)
w = self._kernel_initializer(kernel_shape, rng1)
b = self._bias_initializer(bias_shape, rng2)
self.weights = (w, b)
| apache-2.0 | Python | |
60f5dbe34884683626ca7f045fa79d2c247197fe | add test for class checks | smspillaz/pychecker,smspillaz/pychecker,smspillaz/pychecker | pychecker/pychecker2/tests/class.py | pychecker/pychecker2/tests/class.py | import compiler.ast
class B(compiler.ast.Const):
def x(self):
self.inherited = 1
class A(B):
def __init__(self):
self.x = 1 # define x on A
self.w.q = 1
def f(s, self): # unusual self
print self
s.self = 1
s = 7
def x(): # no self, redefine x on object
pass
def y(self):
self.a, self.b = (1, 2) # define a, b
def z(self):
print self.z # method
print self.x # assigned
print self.a # unpacked
print self.w # unknown
print self.known # known is known from B
print self.value # from compiler.ast.Const
print self.goofy # defined in class scope
goofy = x
| bsd-3-clause | Python | |
a5e599f4a7c2f20c4f0ed79366db985cba7ae85e | Add template context debugging templatetag | python-dirbtuves/website,python-dirbtuves/website,python-dirbtuves/website | pylab/website/templatetags/debug.py | pylab/website/templatetags/debug.py | from django import template
register = template.Library()
@register.simple_tag(name='pdb', takes_context=True)
def pdb(context, *args, **kwargs):
import ipdb; ipdb.set_trace()
| agpl-3.0 | Python | |
e7852c457da3cea0f8a20773cc3a355f559b845e | Update version to 1.7 | artefactual/archivematica,artefactual/archivematica,artefactual/archivematica,artefactual/archivematica | src/dashboard/src/main/migrations/0047_version_number.py | src/dashboard/src/main/migrations/0047_version_number.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def data_migration(apps, schema_editor):
Agent = apps.get_model('main', 'Agent')
Agent.objects \
.filter(identifiertype='preservation system', name='Archivematica') \
.update(identifiervalue='Archivematica-1.7')
class Migration(migrations.Migration):
dependencies = [
('main', '0046_optional_normative_structmap'),
]
operations = [
migrations.RunPython(data_migration)
]
| agpl-3.0 | Python | |
a4a73ac2e5a15e53a0935987911c5905890bfab8 | Add Overwatch command. | sk89q/Plumeria,sk89q/Plumeria,sk89q/Plumeria | orchard/overwatch.py | orchard/overwatch.py | """Get stats for Overwatch."""
from plumeria.command import commands, CommandError
from plumeria.command.parse import Word
from plumeria.message.lists import build_list
from plumeria.util import http
from plumeria.util.http import BadStatusCodeError
from plumeria.util.ratelimit import rate_limit
GENERAL_STATS = (
('Played', 'time_played', '{:.0f}'),
('K/D', 'kpd', '{:.2f}'),
('Dmg/t', 'all_damage_done_avg_per_10_min', '{:.0f}'),
('Best Streak', 'kill_streak_best', '{:.0f}'),
('Obj Time/g', 'objective_time_most_in_game', '{:.5f}'),
('Most Dmg/g', 'all_damage_done_most_in_game', '{:.0f}'),
('Most Heal/g', 'healing_done_most_in_game', '{:.0f}'),
('Medals', 'medals', '{:.0f}'),
('Gold Med', 'medals_gold', '{:.0f}'),
('Won', 'games_won', '{:.0f}'),
)
def generate_stats_from_keys(data, stats):
entries = []
for label, key, format in stats:
if key in data:
entries.append(('{}: **' + format + '**').format(label, float(data[key])))
return ' | '.join(entries)
@commands.create('overwatch', 'ow', cost=2, category='Games', params=[Word('battletag'), Word('region', fallback=None)])
@rate_limit()
async def overwatch(message, battletag, region=None):
"""
Get someone's Overwatch stats.
The name is case-sensitive.
Example::
/overwatch booo#0000
"""
try:
r = await http.get("https://owapi.net/api/v3/u/{name}/blob".format(name=battletag.replace("#", "-")))
data = r.json()
except BadStatusCodeError as e:
if e.http_code == 404:
raise CommandError("Battletag '{}' not found. The name is CASE-SENSITIVE.".format(battletag))
raise
regions = []
for key, value in data.items():
if key[0] != "_" and value:
regions.append(key)
if not len(regions):
raise CommandError("Battle tag found but there are no stats for '{}'.".format(battletag))
if not region and len(regions) > 1:
raise CommandError("Please specify a region (one of {}) for '{}'.".format(', '.join(regions), battletag))
if region and region.lower() not in regions:
raise CommandError("Please specify a region in one of {} for '{}'.".format(', '.join(regions), battletag))
if not region:
region = regions[0]
stats = data[region]
lines = []
for type, s in stats['stats'].items():
lines.append(
"**{}**: {}".format(type.capitalize(), generate_stats_from_keys(s['game_stats'], GENERAL_STATS)))
return build_list(lines)
def setup():
commands.add(overwatch)
| mit | Python | |
010dd0366ddb62e52f295ec1648c1bab38f9e437 | move python wrappers to their own file | ryansb/tremendous,ryansb/tremendous | tremendous/api.py | tremendous/api.py | from tremendous.bindings import lib
from tremendous.bindings import ffi
def apply_format(color, body):
s = lib.apply_format(color, body)
return ffi.string(s)
| mit | Python | |
19c5600486ea7bee68eb1098636b21757938d799 | Add is_prime python | felipecustodio/algorithms,felipecustodio/algorithms,felipecustodio/algorithms,felipecustodio/algorithms,felipecustodio/algorithms,felipecustodio/algorithms,felipecustodio/algorithms,felipecustodio/algorithms,felipecustodio/algorithms,felipecustodio/algorithms,felipecustodio/algorithms,felipecustodio/algorithms,felipecustodio/algorithms,felipecustodio/algorithms,felipecustodio/algorithms,felipecustodio/algorithms | math/is_prime/python/is_prime.py | math/is_prime/python/is_prime.py | import math
def is_prime(number):
if number <= 1: return False
if number == 2: return True
if (number % 2) == 0: return False
for i in range(3, int(math.sqrt(number)) +1,2):
if number % 1 == 0: return False
return True
number = input ("Enter number :")
if is_prime(number):
print("It is prime")
else:
print("It is not prime")
| mit | Python | |
582128f1061ab74da76d26a366bfd3c8fee8f007 | Add scripts/fill_events.py to generate mock data | thobbs/logsandra | scripts/fill_events.py | scripts/fill_events.py | #!/usr/bin/env python
import sys
import os
sys.path.append(os.path.join(os.path.dirname('__file__'), '..', 'src'))
from random import randint
from datetime import datetime, timedelta
from logsandra.model.client import CassandraClient
client = CassandraClient('test', 'localhost', 9160, 3)
today = datetime.now()
keywords = ['foo', 'bar', 'baz']
for i in range(1000):
d = today + timedelta(randint(-7, -1), randint(-3600*24, 3600*24))
client.add_log(d, 'test entry', 'here', [keywords[i % 3]])
| mit | Python | |
92ed053619e27a538b93e87905c0ccf4599808ae | add a ann investigation script | NeuromorphicProcessorProject/snn_toolbox | tests/investigate_ann.py | tests/investigate_ann.py | """Polting everything for investigating ANN.
Author: Yuhuang Hu
Email : duguyue100@gmail.com
"""
from keras.models import model_from_json
from keras import backend as K
import os
from os.path import join
import matplotlib.pyplot as plt
import numpy as np
from snntoolbox.io_utils.plotting import plot_layer_activity
np.set_printoptions(threshold=np.inf)
cifar10_label = {
0: "airplane",
1: "automobile",
2: "bird",
3: "cat",
4: "deer",
5: "dog",
6: "frog",
7: "horse",
8: "ship",
9: "truck"
}
def plot_weights(model, filename, num_cols=8, num_rows=4):
"""Plot weights."""
W = model.layers[0].W.get_value(borrow=True)
plt.figure()
for i in xrange(W.shape[0]):
W_t = W[i]-np.min(W[i])
W_t /= np.max(W_t)
plt.subplot(num_cols, num_rows, i+1)
plt.imshow(W_t.transpose(1, 2, 0), interpolation='nearest')
plt.axis('off')
plt.savefig(filename, bbox_inches='tight')
print ("[MESSAGE] The filters are saved at %s" % (filename))
def plot_out(model, image, path, filename, layer=0):
"""plot activation from some layer."""
get_act = K.function([model.layers[0].input, K.learning_phase()],
[model.layers[layer].output])
layer_output = get_act([image, 0])[0]
plot_layer_activity((layer_output[0], "1st convolution after relu"),
filename, path=path, limits=None)
print ("[MESSAGE] The feature maps saved at %s"
% (join(record_path, "fms_for_image_"+str(image_id)+".png")))
# data path
home_path = os.environ["HOME"]
config_path = join(home_path, ".snntoolbox")
data_path = join(config_path, "datasets")
cifar10_path = join(data_path, "cifar10")
# model path
model_name = "82.65.bodo"
model_json = os.path.join(config_path, model_name+".json")
model_data = os.path.join(config_path, model_name+".h5")
# output paths
out_path = join(config_path, "ann_investigate", model_name)
image_id = 55
record_path = join(out_path, "sample_image_"+str(image_id))
if not os.path.isdir(record_path):
os.makedirs(record_path)
image_path = join(record_path, "sample_image_"+str(image_id)+".png")
filter_path = join(out_path, "filters.png")
fms_path = join(record_path, "fms_for_image_"+str(image_id)+".png")
# load data
data = np.load(os.path.join(cifar10_path, "X_test.npz"))["arr_0"]
label = np.load(os.path.join(cifar10_path, "Y_test.npz"))["arr_0"]
# plot image
image = np.array([data[image_id]])
plt.figure()
plt.imshow(image[0].transpose(1, 2, 0))
plt.title(cifar10_label[np.argmax(label[image_id])])
plt.savefig(image_path, bbox_inches='tight')
print ("[MESSAGE] sample image is saved at %s" % (image_path))
# load the model
json_file = open(model_json, 'r')
model = model_from_json(json_file.read())
model.load_weights(model_data)
# plot filters
if not os.path.exists(filter_path):
plot_weights(model, filter_path, num_cols=4, num_rows=8)
plot_out(model, image, record_path,
"fms_for_image_"+str(image_id)+".png", layer=1)
| mit | Python | |
a102fb888b60454d7efbe26e4afb38a59c212769 | Add script to delete spam users. | EuroPython/epcon,EuroPython/epcon,EuroPython/epcon,EuroPython/epcon | p3/management/commands/delete_spam_users.py | p3/management/commands/delete_spam_users.py | # -*- coding: utf-8 -*-
""" Delete users creating by spambots.
"""
import logging as log
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from assopy import models as amodels
###
class Command(BaseCommand):
# Options
option_list = BaseCommand.option_list + (
make_option('--dry-run',
action='store_true',
dest='dry_run',
help='Do everything except delete users',
),
)
args = '<conference>'
# Dry run ?
dry_run = False
@transaction.atomic
def handle(self, *args, **options):
# Handle options
self.dry_run = options.get('dry_run', False)
# Between June 1-4 2018, a Chinese spammer create 30k fake user
# accounts
spam_users = amodels.User.objects.filter(
user__first_name = '金诚送38元',
)
print ('Found %i (potential) spam users.' % len(spam_users))
count = 0
for user in spam_users:
# Filter out users with tickets
tickets = user.tickets()
if tickets:
print ('Spam user %r has %i tickets: skipping.' % (
user.user.get_username(), len(tickets)))
continue
# Delete user and all related objects
if not self.dry_run:
user.delete()
count += 1
if count % 1000 == 0:
print ('Deleted %i spam users.' % count)
if self.dry_run:
print ('Would have deleted %i spam users.' % count)
else:
print ('Deleted %i spam users.' % count)
| bsd-2-clause | Python | |
3ebbdf64ba244097e0c78e229d0c81d393bb4460 | add msct_report file | 3324fr/spinalcordtoolbox,3324fr/spinalcordtoolbox,3324fr/spinalcordtoolbox,3324fr/spinalcordtoolbox,3324fr/spinalcordtoolbox,3324fr/spinalcordtoolbox,3324fr/spinalcordtoolbox | scripts/msct_report.py | scripts/msct_report.py | import os
import shutil
import glob
from collections import OrderedDict
import msct_report_config
import msct_report_util
import msct_report_image
class Report:
def __init__(self, exists, reportDir):
self.dir = os.path.dirname(os.path.realpath(__file__));
self.reportFolder = reportDir
# TODO:the template link could change in production
self.templatesDirLink = os.path.join(self.dir,'..', msct_report_config.templatesDirName)
# copy all the assets file inside the new folder
if not exists:
self.__createNew()
def __createMenuLink(self, contraste, tool, id=None):
item = {
'name': tool,
'link': '{}-{}'.format(contraste, tool)
}
return item
def __getMenuLinks(self):
"""
this function parse the current report folder and return the correspondind links by parsing html file names
:return:
"""
htmls = glob.glob1(self.reportFolder, "*.html")
links =OrderedDict()
if htmls:
for item in htmls:
rmvHtml = item.split('.')
tmp = rmvHtml[0].split('-')
if tmp.__len__() > 1:
if not tmp[0] in links:
links[tmp[0]] = [self.__createMenuLink(tmp[0], tmp[1])]
else:
links[tmp[0]].append(self.__createMenuLink(tmp[0], tmp[1]))
return links
def __createNew(self):
"""
create a new report folder in the given directory
:return:e
"""
# copy assets into sct_report dir
shutil.copytree(os.path.join(self.templatesDirLink, msct_report_config.assetsDirName),
os.path.join(self.reportFolder, msct_report_config.assetsDirName))
# copy the .config.json (TODO:its config really necessary)
msct_report_util.copy(os.path.join(self.templatesDirLink, msct_report_config.reportConfigFileName), self.reportFolder,
msct_report_config.reportConfigFileName)
def appendItem(self, item):
"""
:param item:
:return:
"""
# get images link from qc images
qcImagesItemLink = os.path.join(self.reportFolder,'img', item.contrastName, item.toolName)
print "qcImagesItem",qcImagesItemLink
if os.path.exists(qcImagesItemLink):
#TODO:Marche pas bien =>take all png or jpeg
imagesLink = glob.glob1(qcImagesItemLink, msct_report_config.imagesExt)
if imagesLink:
for img in imagesLink:
item.addImageLink(msct_report_image.Image(img, os.path.join(item.imagesDir, img)))
else:
print "no qc images in the current directory"
else:
raise Exception("qc images not founded")
# generate html file for the item
item.generateHtmlFromTemplate(self.templatesDirLink, msct_report_config.constrasteToolTemplate)
return
def refreshIndexFile(self):
"""
:return:
"""
fileLink = os.path.join(self.reportFolder, msct_report_config.indexTemplate)
tags = {
'links': self.__getMenuLinks()
}
msct_report_util.createHtmlFile(self.templatesDirLink, msct_report_config.indexTemplate, fileLink, tags)
return
| mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.