commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13 values | lang stringclasses 23 values |
|---|---|---|---|---|---|---|---|---|
e7e51333133dd561e8a746144c29c6635d8a982a | Add migration to add column for proposal image filename | fairdemocracy/vilfredo-core | migrations/versions/320f4eb0698b_add_proposal_image.py | migrations/versions/320f4eb0698b_add_proposal_image.py | """add proposal image
Revision ID: 320f4eb0698b
Revises: 26ef95fc6f2c
Create Date: 2015-03-31 15:55:20.062624
"""
# revision identifiers, used by Alembic.
revision = '320f4eb0698b'
down_revision = '26ef95fc6f2c'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('proposal', sa.Column('image', sa.String(length=150), nullable=False))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('proposal', 'image')
### end Alembic commands ###
| agpl-3.0 | Python | |
2233b8cb2e59e4304492b60eb9842962130e14c2 | Create NoisyNeighborsClosedForm.py | laichunpongben/CodeJam | NoisyNeighborsClosedForm.py | NoisyNeighborsClosedForm.py | # Google Code Jam
# Google Code Jam 2015
# Round 1B
# Problem B. Noisy Neighbors
# Closed form solution O(1)
from math import ceil
testCaseFile = open("NoisyNeighbors_B-large-practice.in", "r")
lines = testCaseFile.read().split('\n')
n = int(lines[0])
testCases = [0 for x in range(n)]
class TestCase:
def __init__(self, r, c, n):
self.row = r
self.column = c
self.tenant = n
def compute_min_unhappiness(self):
r = self.row
c = self.column
n = self.tenant
width = int(min(r,c))
unhappinessAtFull = (r-1)*c+(c-1)*r
maxNAtZero = ceil(r*c/2)
if (width == 1):
if ((r*c)%2==0):
if (n<=maxNAtZero): return 0
else: return 2*(n-maxNAtZero)-1
else:
if (n<=maxNAtZero): return 0
else: return 2*(n-maxNAtZero)
else:
minNAtMinusFour = r*c - ceil((r-2)*(c-2)/2)
if ((r*c)%2==0):
if (n<=maxNAtZero): return 0
elif (n<=maxNAtZero+2): return 2*(n-maxNAtZero)
elif (n<minNAtMinusFour): return 3*(n-maxNAtZero)-2
else: return unhappinessAtFull - 4*(r*c-n)
else:
minNAtMinusThree = minNAtMinusFour - (r+c-6)
if (n<=maxNAtZero): return 0
elif (n<minNAtMinusThree): return 3*(n-maxNAtZero)
elif (n<minNAtMinusFour): return unhappinessAtFull - 4*(r*c-minNAtMinusFour) - 3*(minNAtMinusFour-n)
else: return unhappinessAtFull - 4*(r*c-n)
def initialize_test_cases(lines):
global testCases
for index, item in enumerate(lines):
if index > 0:
items = item.split(' ')
if (len(items) > 1):
r = int(items[0])
c = int(items[1])
n = int(items[2])
testCases[index - 1] = TestCase(r, c, n)
def print_all_results():
for x in range(len(testCases)):
print('Case #' + str(x+1) + ': ' + str(testCases[x].compute_min_unhappiness()))
initialize_test_cases(lines)
print_all_results()
| apache-2.0 | Python | |
f39947677bc2eaf15a0a9d5ef976a29905b23339 | Add AirQuality notification | irrrze/Scripts | PushAirQuality.py | PushAirQuality.py | from twitter import *
from pushbullet import PushBullet
import config
CONSUMER_KEY = config.twitter_consumer_key
CONSUMER_SECRET = config.twitter_consumer_secret
OAUTH_TOKEN = config.twitter_oauth_token
OAUTH_SECRET = config.twitter_oauth_secret
pb_api_key = config.pb_api_key
twitter = Twitter(auth=OAuth(
OAUTH_TOKEN, OAUTH_SECRET, CONSUMER_KEY, CONSUMER_SECRET))
tweets = twitter.statuses.user_timeline(screen_name="CGShanghaiAir", count=1)
text = tweets[0]['text']
pm25 = text.split(";")[3]
if pm25 > 90:
pb = PushBullet(pb_api_key)
nexus6p = pb.get_device('Huawei Nexus 6P')
nexus6p.push_note('Shanghai Air Quality', text)
| apache-2.0 | Python | |
d4b86bc3b4440d665eb8119828a9ffe241b321a6 | Update 24-game.py | yiwen-luo/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,yiwen-luo/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,yiwen-luo/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,kamyu104/LeetCode,yiwen-luo/LeetCode,yiwen-luo/LeetCode,kamyu104/LeetCode,kamyu104/LeetCode,kamyu104/LeetCode | Python/24-game.py | Python/24-game.py | # Time: O(n^3 * 4^n), n = 4
# Space: O(n^2)
from fractions import Fraction
from operator import *
class Solution(object):
def judgePoint24(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
def dfs(nums):
if len(nums) == 1:
return nums[0] == 24
ops = [add, sub, mul, div]
for i in xrange(len(nums)):
for j in xrange(len(nums)):
if i == j:
continue
for op in ops:
if op == div and nums[j] == 0:
continue
next_nums = [nums[k] for k in xrange(len(nums)) if k not in [i, j]]
next_nums.append(op(nums[i], nums[j]))
if dfs(next_nums):
return True
return False
return dfs(map(lambda x: Fraction(x, 1), nums))
| # Time: O(n^3 * 4^n)
# Space: O(n^2)
from fractions import Fraction
from operator import *
class Solution(object):
def judgePoint24(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
def dfs(nums):
if len(nums) == 1:
return nums[0] == 24
ops = [add, sub, mul, div]
for i in xrange(len(nums)):
for j in xrange(len(nums)):
if i == j:
continue
for op in ops:
if op == div and nums[j] == 0:
continue
next_nums = [nums[k] for k in xrange(len(nums)) if k not in [i, j]]
next_nums.append(op(nums[i], nums[j]))
if dfs(next_nums):
return True
return False
return dfs(map(lambda x: Fraction(x, 1), nums))
| mit | Python |
aa212ffb28d48835c788199ec9f5a09bf83fb443 | Add a utility to reduce GlobalISel tests | apple/swift-llvm,GPUOpen-Drivers/llvm,apple/swift-llvm,GPUOpen-Drivers/llvm,llvm-mirror/llvm,GPUOpen-Drivers/llvm,apple/swift-llvm,llvm-mirror/llvm,llvm-mirror/llvm,GPUOpen-Drivers/llvm,apple/swift-llvm,llvm-mirror/llvm,llvm-mirror/llvm,apple/swift-llvm,GPUOpen-Drivers/llvm,GPUOpen-Drivers/llvm,apple/swift-llvm,GPUOpen-Drivers/llvm,llvm-mirror/llvm,apple/swift-llvm,llvm-mirror/llvm,llvm-mirror/llvm,llvm-mirror/llvm,GPUOpen-Drivers/llvm,apple/swift-llvm | utils/bugpoint_gisel_reducer.py | utils/bugpoint_gisel_reducer.py | #!/usr/bin/env python
"""Reduces GlobalISel failures.
This script is a utility to reduce tests that GlobalISel
fails to compile.
It runs llc to get the error message using a regex and creates
a custom command to check that specific error. Then, it runs bugpoint
with the custom command.
"""
from __future__ import print_function
import argparse
import re
import subprocess
import sys
import tempfile
import os
def log(msg):
print(msg)
def hr():
log('-' * 50)
def log_err(msg):
print('ERROR: {}'.format(msg), file=sys.stderr)
def check_path(path):
if not os.path.exists(path):
log_err('{} does not exist.'.format(path))
raise
return path
def check_bin(build_dir, bin_name):
file_name = '{}/bin/{}'.format(build_dir, bin_name)
return check_path(file_name)
def run_llc(llc, irfile):
pr = subprocess.Popen([llc,
'-o',
'-',
'-global-isel',
'-pass-remarks-missed=gisel',
irfile],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = pr.communicate()
res = pr.wait()
if res == 0:
return 0
re_err = re.compile(
r'LLVM ERROR: ([a-z\s]+):.*(G_INTRINSIC[_A-Z]* <intrinsic:@[a-zA-Z0-9\.]+>|G_[A-Z_]+)')
match = re_err.match(err)
if not match:
return 0
else:
return [match.group(1), match.group(2)]
def run_bugpoint(bugpoint_bin, llc_bin, opt_bin, tmp, ir_file):
compileCmd = '-compile-command={} -c {} {}'.format(
os.path.realpath(__file__), llc_bin, tmp)
pr = subprocess.Popen([bugpoint_bin,
'-compile-custom',
compileCmd,
'-opt-command={}'.format(opt_bin),
ir_file])
res = pr.wait()
if res != 0:
log_err("Unable to reduce the test.")
raise
def run_bugpoint_check():
path_to_llc = sys.argv[2]
path_to_err = sys.argv[3]
path_to_ir = sys.argv[4]
with open(path_to_err, 'r') as f:
err = f.read()
res = run_llc(path_to_llc, path_to_ir)
if res == 0:
return 0
log('GlobalISed failed, {}: {}'.format(res[0], res[1]))
if res != err.split(';'):
return 0
else:
return 1
def main():
# Check if this is called by bugpoint.
if len(sys.argv) == 5 and sys.argv[1] == '-c':
sys.exit(run_bugpoint_check())
# Parse arguments.
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('BuildDir', help="Path to LLVM build directory")
parser.add_argument('IRFile', help="Path to the input IR file")
args = parser.parse_args()
# Check if the binaries exist.
build_dir = check_path(args.BuildDir)
ir_file = check_path(args.IRFile)
llc_bin = check_bin(build_dir, 'llc')
opt_bin = check_bin(build_dir, 'opt')
bugpoint_bin = check_bin(build_dir, 'bugpoint')
# Run llc to see if GlobalISel fails.
log('Running llc...')
res = run_llc(llc_bin, ir_file)
if res == 0:
log_err("Expected failure")
raise
hr()
log('GlobalISel failed, {}: {}.'.format(res[0], res[1]))
tmp = tempfile.NamedTemporaryFile()
log('Writing error to {} for bugpoint.'.format(tmp.name))
tmp.write(';'.join(res))
tmp.flush()
hr()
# Run bugpoint.
log('Running bugpoint...')
run_bugpoint(bugpoint_bin, llc_bin, opt_bin, tmp.name, ir_file)
hr()
log('Done!')
hr()
output_file = 'bugpoint-reduced-simplified.bc'
log('Run llvm-dis to disassemble the output:')
log('$ {}/bin/llvm-dis -o - {}'.format(build_dir, output_file))
log('Run llc to reproduce the problem:')
log('$ {}/bin/llc -o - -global-isel '
'-pass-remarks-missed=gisel {}'.format(build_dir, output_file))
if __name__ == '__main__':
main()
| apache-2.0 | Python | |
fcf0ed3c4e2deb9ce1d6a758dc18e6a03542eb59 | Add a script to find parties with multiple emblems (logos) from the EC | mysociety/yournextmp-popit,mysociety/yournextmp-popit,DemocracyClub/yournextrepresentative,mysociety/yournextrepresentative,datamade/yournextmp-popit,neavouli/yournextrepresentative,YoQuieroSaber/yournextrepresentative,neavouli/yournextrepresentative,mysociety/yournextmp-popit,DemocracyClub/yournextrepresentative,YoQuieroSaber/yournextrepresentative,YoQuieroSaber/yournextrepresentative,YoQuieroSaber/yournextrepresentative,neavouli/yournextrepresentative,mysociety/yournextmp-popit,openstate/yournextrepresentative,datamade/yournextmp-popit,datamade/yournextmp-popit,datamade/yournextmp-popit,openstate/yournextrepresentative,mysociety/yournextrepresentative,datamade/yournextmp-popit,openstate/yournextrepresentative,DemocracyClub/yournextrepresentative,openstate/yournextrepresentative,neavouli/yournextrepresentative,neavouli/yournextrepresentative,mysociety/yournextrepresentative,mysociety/yournextrepresentative,openstate/yournextrepresentative,YoQuieroSaber/yournextrepresentative,mysociety/yournextrepresentative,mysociety/yournextmp-popit | candidates/management/commands/candidates_parties_with_multiple_emblems.py | candidates/management/commands/candidates_parties_with_multiple_emblems.py | from django.core.management.base import BaseCommand
from candidates.popit import create_popit_api_object, popit_unwrap_pagination
class Command(BaseCommand):
def handle(self, *args, **options):
api = create_popit_api_object()
for org in popit_unwrap_pagination(
api.organizations,
per_page=100
):
org.pop('versions', None)
org.pop('memberships', None)
images = org.get('images', [])
if len(images) < 2:
continue
print "====================================================="
print len(images), org['id'], org['name'].encode('utf-8')
for image in images:
print ' --'
print ' ' + image['notes'].encode('utf-8')
print ' ' + image['url']
| agpl-3.0 | Python | |
c06a72515cf2fddc604641b70b497f74d9ef5d78 | use vlc to set clips | Dennovin/videoscripts,Dennovin/videoscripts,Dennovin/videoscripts | vlc.py | vlc.py | #!/usr/bin/env python
import dbus
import Tkinter
import os
import pprint
import yaml
videofiles = {}
def format_time(s, fmt="{m:02d}:{s:02.0f}"):
return fmt.format(m=int(s/60), s=float(s)%60)
def current_filename():
current_video_file = props.Get("org.mpris.MediaPlayer2.Player", "Metadata")["xesam:url"].replace("file://", "")
current_video_file = os.path.basename(current_video_file)
if current_video_file not in videofiles:
videofiles[current_video_file] = {"goals": [], "timer_events": [], "clips": []}
current_filename_label.config(text=current_video_file)
return current_video_file
def video_time():
return float(props.Get("org.mpris.MediaPlayer2.Player", "Position") / 1000000)
def start_clip():
videofiles[current_filename()]["clip_start"] = video_time()
def end_clip():
if videofiles[current_filename()].get("clip_start", None) is not None:
print "ending"
clip = {"start": videofiles[current_filename()]["clip_start"], "end": video_time()}
videofiles[current_filename()]["clips"].append(clip)
clips_listbox.insert(Tkinter.END, "{} - {}".format(format_time(clip["start"]), format_time(clip["end"])))
def add_timer_event():
videofiles[current_filename()]["timer_events"].append({"time": video_time()})
game_events_listbox.insert(Tkinter.END, "{}: Timer".format(format_time(video_time())))
def add_goal():
videofiles[current_filename()]["goals"].append({"time": video_time()})
game_events_listbox.insert(Tkinter.END, "{}: Goal".format(format_time(video_time())))
def save_config():
print yaml.dump(videofiles)
# http://specifications.freedesktop.org/mpris-spec/latest/Player_Interface.html
bus = dbus.SessionBus()
player = bus.get_object("org.mpris.MediaPlayer2.vlc", "/org/mpris/MediaPlayer2")
iface = dbus.Interface(player, "org.mpris.MediaPlayer2.Player")
props = dbus.Interface(player, "org.freedesktop.DBus.Properties")
key_actions = {
31: start_clip,
32: end_clip,
39: save_config,
42: add_goal,
28: add_timer_event,
41: lambda: props.Set("org.mpris.MediaPlayer2.Player", "Rate", 3 - props.Get("org.mpris.MediaPlayer2.Player", "Rate")),
65: iface.PlayPause,
113: lambda: iface.Seek(-5000000),
114: lambda: iface.Seek(5000000),
}
def key(event):
print "pressed {} ({})".format(event.keysym, event.keycode)
if event.keycode in key_actions:
key_actions[event.keycode]()
window = Tkinter.Tk()
filename_frame = Tkinter.Frame(window)
filename_frame.pack()
Tkinter.Label(filename_frame, text="Current File:").pack(side=Tkinter.LEFT)
current_filename_label = Tkinter.Label(filename_frame, text="None")
current_filename_label.pack(side=Tkinter.LEFT)
Tkinter.Label(window, text="Game Events:").pack()
game_events_listbox = Tkinter.Listbox(window)
game_events_listbox.pack()
Tkinter.Label(window, text="Clips:").pack()
clips_listbox = Tkinter.Listbox(window)
clips_listbox.pack()
window.bind("<Key>", key)
Tkinter.mainloop()
| mit | Python | |
0f6e065a70bcd1f9dd64dfa04c13cb0065e33c13 | Add basic test for navigator | atkvo/masters-bot,atkvo/masters-bot,atkvo/masters-bot,atkvo/masters-bot,atkvo/masters-bot | src/autobot/src/navigator_test.py | src/autobot/src/navigator_test.py | #!/usr/bin/env python
import unittest
import mock
from autobot.msg import detected_object
from navigator import *
def fake_stopCar():
return True
def fake_srvTogglePathFinder(state):
return
def fake_setWallDist(dist, wall):
return
class NavigatorTest(unittest.TestCase):
@mock.patch('navigator.setWallDist',
side_effect=fake_setWallDist)
@mock.patch('navigator.srvTogglePathFinder',
side_effect=fake_srvTogglePathFinder)
@mock.patch('navigator.stopCar', side_effect=fake_stopCar)
def testPersonInFront(self, fake_stopCar,
fake_srvTogglePathFinder,
fake_setWallDist):
global OBJECT_MAP
global PATH_STATE
OBJECT_MAP.addToMap('person', 10, 50, 1.2)
OBJECT_MAP.addToMap('cat', 10, 50, 60)
OBJECT_MAP.addToMap('bat', 10, 50, 65)
PATH_STATE.enabled = True
onDecisionInterval(None)
fake_setWallDist.assert_not_called()
fake_stopCar.assert_called()
| mit | Python | |
d11d7c38edef63e50dbd1da78a8829905a86c2a5 | Add forgotten file | onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle | bluebottle/assignments/states.py | bluebottle/assignments/states.py | from bluebottle.activities.states import ActivityStateMachine, ContributionStateMachine
from bluebottle.assignments.models import Assignment, Applicant
class AssignmentStateMachine(ActivityStateMachine):
model = Assignment
class ApplicantStateMachine(ContributionStateMachine):
model = Applicant
| bsd-3-clause | Python | |
98f26afc012b1ab25360738776c36b58229d0b3a | Add CLI interface. | btimby/fulltext,btimby/fulltext | fulltext/__main__.py | fulltext/__main__.py | """
Fulltext CLI interface.
"""
from __future__ import absolute_import
import sys
import logging
from docopt import docopt
import fulltext
def _handle_open(path):
with open(path, 'rb') as f:
return fulltext.get(f)
def main(args=sys.argv[1:]):
"""
Extract text from a file.
Usage:
fulltext [-f] <path>...
Options:
-f Open file first.
"""
opt = docopt(main.__doc__.strip(), args, options_first=True)
logger = logging.getLogger()
logger.addHandler(logging.StreamHandler())
handler = fulltext.get
if opt['-f']:
handler = _handle_open
for path in opt['<path>']:
print(handler(path))
if __name__ == '__main__':
main()
| mit | Python | |
5666161f59a8c3efa5b3f884912f9777c9a12edd | Add the ability to get template variables from the CLI | rgreinho/saliere,TeamLovely/Saliere,rgreinho/saliere,rgreinho/saliere,rgreinho/saliere,TeamLovely/Saliere | saliere/main.py | saliere/main.py | #!/usr/bin/python3
"""Creates a skeleton for various projects based on Jinja2 templates.
Example:
$ main.py mysql -t salt-formula
$ main.py mysql-django -t django
$ main.py mysql -t salt-formula -o my-formula-directory
$ main.py mysql -t ~/my/custom/template -o my-template-directory
"""
import argparse
import os
from saliere.config import Config
from saliere.templatizer import Templatizer
# Define a list of valid paths to look for the templates
template_path_list = ['templates', '../templates', '/usr/local/share/saliere/templates']
def main():
# Create the parser.
parser = argparse.ArgumentParser(description="Create a skeleton for your formula.")
# Create the options.
parser.add_argument("-n", "--name", help="the name of your project", type=str)
parser.add_argument("-t", "--type", help="the type of your template or the path of a jinja template", type=str)
parser.add_argument("-o", "--output", default=os.getcwd(),
help="output directory (default is the current directory)", type=str)
parser.add_argument("-l", "--list", action="store_true", help="list the available templates")
parser.add_argument("-c", "--configfile", default='config.yml',
help="file containing the template information (default: config.yml)", type=str)
parser.add_argument("--var", default=None, help="template values", type=str)
# Parse the arguments.
args = parser.parse_args()
# Create the templatizer object.
t = Templatizer(template_path_list)
# List the templates if asked to.
if args.list:
print("Available templates: \n\t" + "\n\t".join(t.list_templates()))
exit(0)
# Ensure the project name and project type are specified.
if not args.name or not args.type:
print("The template type and project name are required: -t type -n name.")
exit(1)
# Retrieve the template path.
template_path = t.locate_template(args.type)
if not template_path:
print("The template name you specified does not exist.")
exit(1)
# Get the project type
t.template_type = args.type
# Load the template variables, if any, from the command line.
if args.var:
vars_split = args.var.split('|')
vars_list = [v.split('=', 1) for v in vars_split if '=' in v]
template_vars = dict(vars_list)
# Load the template variables, if any, from the configuration file.
else:
config = Config()
config.load_from_file(args.configfile)
template_vars = config.get_value(args.type)
# Call the copy function.
t.copy(args.name, args.output, template_vars)
if __name__ == '__main__':
main()
| #!/usr/bin/python3
"""Creates a skeleton for various projects based on Jinja2 templates.
Example:
$ main.py mysql -t salt-formula
$ main.py mysql-django -t django
$ main.py mysql -t salt-formula -o my-formula-directory
$ main.py mysql -t ~/my/custom/template -o my-template-directory
"""
import argparse
import os
from saliere.config import Config
from saliere.templatizer import Templatizer
# Define a list of valid paths to look for the templates
template_path_list = ['templates', '../templates', '/usr/local/share/saliere/templates']
def main():
# Create the parser.
parser = argparse.ArgumentParser(description="Create a skeleton for your formula.")
# Create the options.
parser.add_argument("-n", "--name", help="the name of your project", type=str)
parser.add_argument("-t", "--type", help="the type of your template or the path of a jinja template", type=str)
parser.add_argument("-o", "--output", default=os.getcwd(),
help="output directory (default is the current directory)", type=str)
parser.add_argument("-l", "--list", action="store_true", help="list the available templates")
parser.add_argument("-c", "--configfile", default='config.yml',
help="file containing the template information (default: config.yml)", type=str)
# Parse the arguments.
args = parser.parse_args()
# Create the templatizer object.
t = Templatizer(template_path_list)
# List the templates if asked to.
if args.list:
print("Available templates: \n\t" + "\n\t".join(t.list_templates()))
exit(0)
# Ensure the project name and project type are specified.
if not args.name or not args.type:
print("The template type and project name are required: -t type -n name.")
exit(1)
# Retrieve the template path.
template_path = t.locate_template(args.type)
if not template_path:
print("The template name you specified does not exist.")
exit(1)
# Get the project type
t.template_type = args.type
# Load the template variables, if any, from the configuration file.
config = Config()
config.load_from_file(args.configfile)
template_vars = config.get_value(args.type)
# Call the copy function.
t.copy(args.name, args.output, template_vars)
if __name__ == '__main__':
main()
| mit | Python |
1dc11286b21d8a84e3d1d9a194cc49275be4d97d | Add core models example factories | Candihub/pixel,Candihub/pixel,Candihub/pixel,Candihub/pixel,Candihub/pixel | apps/core/factories.py | apps/core/factories.py | from factory import Faker, Iterator, SubFactory
from factory.django import DjangoModelFactory
from apps.data.factories import EntryFactory, RepositoryFactory
from . import models
class SpeciesFactory(DjangoModelFactory):
name = Faker('word')
reference = SubFactory(EntryFactory)
repository = SubFactory(RepositoryFactory)
description = Faker('text', max_nb_chars=300)
class Meta:
model = 'core.Species'
django_get_or_create = ('name', )
class StrainFactory(DjangoModelFactory):
name = Faker('word')
description = Faker('text', max_nb_chars=300)
species = SubFactory(SpeciesFactory)
reference = SubFactory(EntryFactory)
class Meta:
model = 'core.Strain'
django_get_or_create = ('name', )
class OmicsUnitTypeFactory(DjangoModelFactory):
name = Faker('word')
description = Faker('text', max_nb_chars=300)
class Meta:
model = 'core.OmicsUnitType'
django_get_or_create = ('name', )
class OmicsUnitFactory(DjangoModelFactory):
reference = SubFactory(EntryFactory)
strain = SubFactory(StrainFactory)
type = SubFactory(OmicsUnitTypeFactory)
status = Iterator(s[0] for s in models.OmicsUnit.STATUS_CHOICES)
class Meta:
model = 'core.OmicsUnit'
django_get_or_create = ('reference', 'strain')
| bsd-3-clause | Python | |
6ffeadb02f751e27ab78216ea2932f9b540210b5 | Create subbytes.py | nvandervoort/PyRTL,deekshadangwal/PyRTL,deekshadangwal/PyRTL,nvandervoort/PyRTL,UCSBarchlab/PyRTL,UCSBarchlab/PyRTL | research/aes/subbytes.py | research/aes/subbytes.py | # subbytes.py
import pyrtl
from pyrtl import *
# S-box table.
sbox_data = [0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16]
sbox = RomBlock(bitwidth=128, addrwidth=8, data=sbox_data)
def SubBytes(in_vector):
""" SubBytes round of AES.
Input: A single wirevector of bitwidth 128.
Output: A single wirevector of bitwidth 128.
"""
a00 = in_vector[120:128]
a01 = in_vector[112:120]
a02 = in_vector[104:112]
a03 = in_vector[96:104]
a10 = in_vector[88:96]
a11 = in_vector[80:88]
a12 = in_vector[72:80]
a13 = in_vector[64:72]
a20 = in_vector[56:64]
a21 = in_vector[48:56]
a22 = in_vector[40:48]
a23 = in_vector[32:40]
a30 = in_vector[24:32]
a31 = in_vector[16:24]
a32 = in_vector[8:16]
a33 = in_vector[0:8]
b00 = sbox[a00]
b01 = sbox[a01]
b02 = sbox[a02]
b03 = sbox[a03]
b10 = sbox[a10]
b11 = sbox[a11]
b12 = sbox[a12]
b13 = sbox[a13]
b20 = sbox[a20]
b21 = sbox[a21]
b22 = sbox[a22]
b23 = sbox[a23]
b30 = sbox[a30]
b31 = sbox[a31]
b32 = sbox[a32]
b33 = sbox[a33]
out_vector = pyrtl.concat(b00, b01, b02, b03,
b10, b11, b12, b13,
b20, b21, b22, b23,
b30, b31, b32, b33)
return out_vector
# Hardware build.
aes_input = pyrtl.Input(bitwidth=128, name='aes_input')
aes_output = pyrtl.Output(bitwidth=128, name='aes_output')
aes_output <<= SubBytes(aes_input)
print pyrtl.working_block()
print
sim_trace = pyrtl.SimulationTrace()
sim = pyrtl.Simulation(tracer=sim_trace)
for cycle in range(1):
sim.step({aes_input: 0x53})
sim_trace.render_trace(symbol_len=12, segment_size=5)
| bsd-3-clause | Python | |
b27a5127c8a98df0cdbe8715587b3d40415e32c7 | add unit tests | mhcomm/pypeman,mhcomm/pypeman,mhcomm/pypeman | pypeman/tests/test_ctx_nodes.py | pypeman/tests/test_ctx_nodes.py | """
tests for pypeman.contrib.ctx
"""
import asyncio
import pytest
from pypeman.contrib.ctx import CombineCtx
from pypeman.nodes import NodeException
from pypeman.tests.pytest_helpers import clear_graph # noqa: F401
from pypeman.tests.common import generate_msg
# TODO: might refactor to another file?
from pypeman.tests.test_nodes import FakeChannel
def mk_msgs_w_ctx(*ctx_ids):
"""
helper to create a msg with a few contexts
"""
msg = generate_msg(
message_content="test",
message_meta=dict(entry1="meta1"),
)
ctx_msgs = []
for ctx_id in ctx_ids:
meta = dict(entry1="meta_%s" % ctx_id)
ctx_msg = generate_msg(
message_content={"val_%s" % ctx_id: "data_%s" % ctx_id},
message_meta=meta,
)
msg.add_context(ctx_id, ctx_msg)
ctx_msgs.append(ctx_msg)
return msg, ctx_msgs
# def __init__(self, ctx_names, meta_from=None, flatten=False, *args, **kwargs):
@pytest.mark.usefixtures("clear_graph")
def test_combine_ctx_not_two_names(event_loop):
with pytest.raises(NodeException):
CombineCtx([], name="ctx1")
with pytest.raises(NodeException):
CombineCtx(["a"], name="ctx2")
@pytest.mark.usefixtures("clear_graph")
def test_combine_ctx_2_names(event_loop):
loop = event_loop
asyncio.set_event_loop(loop)
# nut == Node Under Test
nut = CombineCtx(["a", "b"], name="ctx1")
nut.channel = FakeChannel(loop)
msg, ctx_msgs = mk_msgs_w_ctx("a", "b")
rslt = loop.run_until_complete(nut.handle(msg))
assert rslt.payload["a"] == ctx_msgs[0].payload
assert rslt.payload["b"] == ctx_msgs[1].payload
assert rslt.meta == ctx_msgs[0].meta
@pytest.mark.usefixtures("clear_graph")
def test_combine_ctx_2_names_w_meta(event_loop):
loop = event_loop
asyncio.set_event_loop(loop)
# nut == Node Under Test
nut = CombineCtx(["a", "b"], meta_from="b", name="ctx1")
nut.channel = FakeChannel(loop)
msg, ctx_msgs = mk_msgs_w_ctx("a", "b")
rslt = loop.run_until_complete(nut.handle(msg))
assert rslt.payload["a"] == ctx_msgs[0].payload
assert rslt.payload["b"] == ctx_msgs[1].payload
assert rslt.meta == ctx_msgs[1].meta
@pytest.mark.usefixtures("clear_graph")
def test_combine_ctx_2_names_flat(event_loop):
loop = event_loop
asyncio.set_event_loop(loop)
# nut == Node Under Test
nut = CombineCtx(["a", "b"], name="ctx1", flatten=True)
nut.channel = FakeChannel(loop)
msg, ctx_msgs = mk_msgs_w_ctx("a", "b")
rslt = loop.run_until_complete(nut.handle(msg))
exp_payload = dict(ctx_msgs[0].payload)
exp_payload.update(ctx_msgs[1].payload)
assert rslt.payload == exp_payload
assert rslt.meta == ctx_msgs[0].meta
| apache-2.0 | Python | |
38e231076209f0d71ee64bd4d60e1769aac8ce93 | add raspberry pi receiver script | zerog2k/power_meter_cs5460a,zerog2k/power_meter_cs5460a | power_monitor_rf24.py | power_monitor_rf24.py | #!/usr/bin/env python
# receive values from CS5460A power monitor via NRF24L01
# may need to run as sudo
# see https://github.com/zerog2k/power_meter_cs5460a for arduino transmitter code
import time as time
from RF24 import *
import RPi.GPIO as GPIO
import binascii
import struct
from datetime import datetime, date
MSGTYPES = [ "MSG_POWER_METER" ]
irq_gpio_pin = None
########### USER CONFIGURATION ###########
# See https://github.com/TMRh20/RF24/blob/master/RPi/pyRF24/readme.md
# CE Pin, CSN Pin, SPI Speed
#RPi B+
# Setup for GPIO 22 CE and CE0 CSN for RPi B+ with SPI Speed @ 8Mhz
radio = RF24(RPI_BPLUS_GPIO_J8_15, RPI_BPLUS_GPIO_J8_24, BCM2835_SPI_SPEED_1MHZ)
# Setup for connected IRQ pin, GPIO 24 on RPi B+; uncomment to activate
#irq_gpio_pin = RPI_BPLUS_GPIO_J8_18
#irq_gpio_pin = 24
pipes = [0x4A454E5300]
radio.begin()
radio.setChannel( 1 )
# set datarate
radio.setDataRate( RF24_250KBPS )
#radio.setPALevel(RF24_PA_MAX)
radio.enableDynamicPayloads()
radio.printDetails()
radio.openReadingPipe(0, pipes[0])
radio.startListening()
dt = datetime
pipenum = -1
# forever loop
while True:
try:
have_data, pipenum = radio.available_pipe()
if have_data:
len = radio.getDynamicPayloadSize()
if len > 0:
msgtype = radio.read(1);
receive_payload = radio.read(len)
if msgtype[0] == MSGTYPES.index("MSG_POWER_METER"):
(voltage, current, true_power, power_factor) = struct.unpack_from("ffff", receive_payload, 1)
print "%s pipe: %d, msgtype: %s, voltage: %0.1f, current: %0.2f, true_power: %0.1f, PF: %0.2f" \
% (dt.now(), pipenum, MSGTYPES[msgtype[0]], voltage, current, true_power, power_factor)
else:
print "%s got: pipe=%d size=%s raw=%s" % (dt.now(), pipenum, len, binascii.hexlify(receive_payload))
time.sleep(1)
except Exception as e:
print e.strerror
| mit | Python | |
3b33a9410bac5b710a52e603fd40ed88765b7414 | Create colecoes.py | FelipeGomesSan/poo-python,gomesfelipe/poo-python | colecoes/colecoes.py | colecoes/colecoes.py | from aula5.pessoa import import Pessoa
from aula6.pessoas_tipos import Homem, Mulher
if __name__=='__main__':
gomes = Homem('Gomes')
gomes_igual = Homem('Gomes')
gomes_identico=gomes
selina=Mulher('Selina')
print(gomes is gomes_igual)
print(gomes is gomes_identico)
print(gomes == gomes_igual)
print(gomes == gomes_identico)
| mit | Python | |
0f06b139ecfbdb05dee86b4cbda5b23c9af4379a | test private name | xupeixiang/dive_into_python | chap5/test_private_name_coven.py | chap5/test_private_name_coven.py | #!/usr/bin/python
# -*- indent-tabs-mode: nil; tab-width: 4 -*-
# vi: et ts=4 sts=4 sw=4
class Foo:
def __priv(self):
print "I'm private"
def main():
foo = Foo()
getattr(Foo, '_Foo__priv')(foo)
if __name__ == '__main__':
main()
| apache-2.0 | Python | |
c48bf268ec7e077443ad347f007d7477d841cc04 | Add ds_binary_heap.py | bowen0701/algorithms_data_structures | ds_binary_heap.py | ds_binary_heap.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class BinaryHeap(object):
def __init__(self):
pass
def main():
pass
if __name__ == '__main__':
main()
| bsd-2-clause | Python | |
7b9023dc5dcdd4ad8e1ea9c7803e81a13da42a5b | Add MNIST dataset | niboshi/chainer,delta2323/chainer,cupy/cupy,chainer/chainer,keisuke-umezawa/chainer,wkentaro/chainer,jnishi/chainer,niboshi/chainer,wkentaro/chainer,cupy/cupy,chainer/chainer,keisuke-umezawa/chainer,ronekko/chainer,keisuke-umezawa/chainer,hvy/chainer,cupy/cupy,pfnet/chainer,okuta/chainer,kiyukuta/chainer,ktnyt/chainer,ktnyt/chainer,hvy/chainer,keisuke-umezawa/chainer,chainer/chainer,ktnyt/chainer,ktnyt/chainer,rezoo/chainer,okuta/chainer,hvy/chainer,jnishi/chainer,wkentaro/chainer,wkentaro/chainer,jnishi/chainer,okuta/chainer,kikusu/chainer,anaruse/chainer,tkerola/chainer,kashif/chainer,kikusu/chainer,ysekky/chainer,cupy/cupy,jnishi/chainer,aonotas/chainer,niboshi/chainer,chainer/chainer,okuta/chainer,hvy/chainer,niboshi/chainer | chainer/dataset/datasets/mnist.py | chainer/dataset/datasets/mnist.py | import gzip
import os
import struct
import numpy
import six
from six.moves.urllib import request
from chainer.dataset.datasets import tuple_dataset
from chainer.dataset import download
def get_mnist_training(withlabel=True, ndim=1, dtype=numpy.float32, scale=1.):
"""Gets the MNIST training set.
`MNIST <http://yann.lecun.com/exdb/mnist/>`_ is a set of hand-written
digits represented by grey-scale 28x28 images. Each pixel is scaled to
values in the interval ``[0, scale]``.
This function returns the training set of the official MNIST dataset.
Args:
withlabel (bool): If True, it returns a dataset with labels. In this
case, each example is a tuple of an image and a label. Otherwise,
the dataset only contains images.
ndim (int): Number of dimensions of each image. The shape of each image
is determined depending on ndim as follows:
- ``ndim == 1``: the shape is ``(784,)``
- ``ndim == 2``: the shape is ``(28, 28)``
- ``ndim == 3``: the shape is ``(1, 28, 28)``
dtype: Data type of images.
scale (float): Pixel value scale. If it is 1 (default), pixels are
scaled to the interval ``[0, 1]``.
Returns:
Dataset of tuples if ``withlabel`` is True, or an array of images
otherwise. In latter case, each row corresponds to an image.
.. seealso::
Use :func:`get_mnist_test` to retrieve the MNIST test set.
"""
raw = _retrieve_mnist_training()
return _preprocess_mnist(raw, withlabel, ndim, dtype, scale)
def get_mnist_test(withlabel=True, ndim=1, dtype=numpy.float32, scale=1.):
"""Gets the MNIST test set.
`MNIST <http://yann.lecun.com/exdb/mnist/>`_ is a set of hand-written
digits represented by grey-scale 28x28 images. Each pixel is scaled to
values in the interval ``[0, scale]``.
This function returns the test set of the official MNIST dataset.
Args:
withlabel (bool): If True, it returns a dataset with labels. In this
case, each example is a tuple of an image and a label. Othewrise,
the dataset only contains images.
ndim (int): Number of dimensions of each image. See
:func:`get_mnist_training` for details.
dtype: Data type of images.
scale (float): Pixel value scale. If it is 1 (default), pixels are
scaled to the interval ``[0, 1]``.
Returns:
Dataset of tuples if ``withlabel`` is True, or an array of images
otherwise. In latter case, each row corresponds to an image.
"""
raw = _retrieve_mnist_test()
return _preprocess_mnist(raw, withlabel, ndim, dtype, scale)
def _preprocess_mnist(raw, withlabel, ndim, dtype, scale):
images = raw['x']
if ndim == 2:
images = images.reshape(-1, 28, 28)
elif ndim == 3:
images = images.reshape(-1, 1, 28, 28)
elif ndim != 1:
raise ValueError('invalid ndim for MNIST dataset')
images = images.astype(dtype)
images *= scale / 255.
if withlabel:
labels = raw['y'].astype(numpy.int32)
return tuple_dataset.TupleDataset(images, labels)
else:
return images
def _retrieve_mnist_training():
urls = ['http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz']
return _retrieve_mnist('train.npz', urls)
def _retrieve_mnist_test():
urls = ['http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz']
return _retrieve_mnist('test.npz', urls)
def _retrieve_mnist(name, urls):
root = download.get_dataset_directory('pfnet/chainer/mnist')
path = os.path.join(root, name)
return download.cached_create_file(
path, lambda path: _make_npz(path, urls), numpy.load)
def _make_npz(path, urls):
x_url, y_url = urls
x_path = download.cached_download(x_url)
y_path = download.cached_download(y_url)
with gzip.open(x_path, 'rb') as fx, gzip.open(y_path, 'rb') as fy:
fx.read(4)
fy.read(4)
N = struct.unpack(fx.read(4), '>i')
if N != struct.unpack(fy.read(4), '>i'):
raise RuntimeError('wrong pair of MNIST images and labels')
fx.read(8)
x = numpy.empty((N, 784), dtype=numpy.uint8)
y = numpy.empty(N, dtype=numpy.uint8)
for i in six.moves.range(N):
y[i] = ord(fy.read(1))
for j in six.moves.range(784):
x[i, j] = ord(fx.read(1))
numpy.savez_compressed(path, x=x, y=y)
return {'x': x, 'y': y}
| mit | Python | |
d0cfb59819cdb1f55115616e3600c8483f54d43f | add viz.py file | ijstokes/bokeh-blaze-tutorial,chdoig/scipy2015-blaze-bokeh,kcompher/scipy2015-blaze-bokeh,kunalj101/scipy2015-blaze-bokeh,kunalj101/scipy2015-blaze-bokeh,zhenxu66/scipy2015-blaze-bokeh,jnovinger/scipy2015-blaze-bokeh,chdoig/scipy2015-blaze-bokeh,jnovinger/scipy2015-blaze-bokeh,kcompher/scipy2015-blaze-bokeh,ijstokes/bokeh-blaze-tutorial,zhenxu66/scipy2015-blaze-bokeh | viz.py | viz.py | # -*- coding: utf-8 -*-
import math
from collections import OrderedDict
import pandas as pd
import netCDF4
from bokeh.plotting import figure, show, output_notebook
from bokeh.models import DatetimeTickFormatter, ColumnDataSource, HoverTool, Plot, Range1d
from bokeh.palettes import RdBu11
from bokeh.models.glyphs import Text, Rect
import utils.world_countries as wc
from utils.colormap import RGBAColorMapper
colormap = RGBAColorMapper(-6, 6, RdBu11)
def get_slice(t, year, month):
i = (year - 1850)*12 + month - 1
return colormap.color(t[i, :, :])
def climate_map():
data = netCDF4.Dataset('data/Land_and_Ocean_LatLong1.nc')
t = data.variables['temperature']
image = get_slice(t, 1950, 1)
world_countries = wc.data.copy()
worldmap = pd.DataFrame.from_dict(world_countries, orient='index')
# Create your plot
p = figure(width=900, height=500, x_axis_type=None, y_axis_type=None,
x_range=[-180,180], y_range=[-90,90], toolbar_location="left")
p.image_rgba(
image=[image],
x=[-180], y=[-90],
dw=[360], dh=[180], name='image'
)
p.patches(xs=worldmap['lons'], ys=worldmap['lats'], fill_color="white", fill_alpha=0,
line_color="black", line_width=0.5)
return p
def legend():
# Set ranges
xdr = Range1d(0, 100)
ydr = Range1d(0, 500)
# Create plot
plot = Plot(
x_range=xdr,
y_range=ydr,
title="",
plot_width=100,
plot_height=500,
min_border=0,
toolbar_location=None,
outline_line_color="#FFFFFF",
)
# For each color in your palette, add a Rect glyph to the plot with the appropriate properties
palette = RdBu11
width = 40
for i, color in enumerate(palette):
rect = Rect(
x=40, y=(width * (i + 1)),
width=width, height=40,
fill_color=color, line_color='black'
)
plot.add_glyph(rect)
# Add text labels and add them to the plot
minimum = Text(x=50, y=0, text=['-6 ºC'])
plot.add_glyph(minimum)
maximum = Text(x=50, y=460, text=['6 ºC'])
plot.add_glyph(maximum)
return plot
def timeseries():
# Get data
df = pd.read_csv('data/Land_Ocean_Monthly_Anomaly_Average.csv')
df['datetime'] = pd.to_datetime(df['datetime'])
df = df[['anomaly','datetime']]
df['moving_average'] = pd.rolling_mean(df['anomaly'], 12)
df = df.fillna(0)
# List all the tools that you want in your plot separated by comas, all in one string.
TOOLS="crosshair,pan,wheel_zoom,box_zoom,reset,hover,previewsave"
# New figure
t = figure(x_axis_type = "datetime", width=1000, height=200,tools=TOOLS)
# Data processing
# The hover tools doesn't render datetime appropriately. We'll need a string.
# We just want dates, remove time
f = lambda x: str(x)[:7]
df["datetime_s"]=df[["datetime"]].applymap(f)
source = ColumnDataSource(df)
# Create plot
t.line('datetime', 'anomaly', color='lightgrey', legend='anom', source=source)
t.line('datetime', 'moving_average', color='red', legend='avg', source=source, name="mva")
# Style
xformatter = DatetimeTickFormatter(formats=dict(months=["%b %Y"], years=["%Y"]))
t.xaxis[0].formatter = xformatter
t.xaxis.major_label_orientation = math.pi/4
t.yaxis.axis_label = 'Anomaly(ºC)'
t.legend.orientation = "bottom_right"
t.grid.grid_line_alpha=0.2
t.toolbar_location=None
# Style hover tool
hover = t.select(dict(type=HoverTool))
hover.tooltips = """
<div>
<span style="font-size: 15px;">Anomaly</span>
<span style="font-size: 17px; color: red;">@anomaly</span>
</div>
<div>
<span style="font-size: 15px;">Month</span>
<span style="font-size: 10px; color: grey;">@datetime_s</span>
</div>
"""
hover.renderers = t.select("mva")
# Show plot
#show(t)
return t | mit | Python | |
cd1c67c34768bdef0cc4649573e2541558e648ad | Add : Basic client implementation | oleiade/Elevator | elevator/client.py | elevator/client.py | #!/usr/bin/env python
#Copyright (c) 2011 Fabula Solutions. All rights reserved.
#Use of this source code is governed by a BSD-style license that can be
#found in the license.txt file.
# leveldb client
import zmq
import threading
import time
import ujson as json
class Elevator(object):
def __init__(self, host="tcp://127.0.0.1:4141", timeout=10*1000):
self.host = host
self.timeout = timeout
self.connect()
def __del__(self):
self.close()
def connect(self):
self.context = zmq.Context()
self.socket = self.context.socket(zmq.XREQ)
self.socket.connect(self.host)
def Get(self, key):
self.socket.send_multipart(['GET', json.dumps([key])])
return self.socket.recv_multipart()[0]
def Put(self, key, value):
self.socket.send_multipart(['PUT', json.dumps([key, value])])
return self.socket.recv_multipart()[0]
def Delete(self, key):
self.socket.send_multipart(['DELETE', json.dumps([key])])
return self.socket.recv_multipart()[0]
def Range(self, start=None, end=None):
self.socket.send_multipart(['RANGE', json.dumps([start, end])])
return self.socket.recv_multipart()[0]
def close(self):
self.socket.close()
self.context.term()
| mit | Python | |
fc40c3f740f9f5dedbcddd4dcbd274c76aaba529 | Add ToS script | sebastienvercammen/ptc-acc-gen,sebastienvercammen/ptc-acc-gen,FrostTheFox/ptc-acc-gen,FrostTheFox/ptc-acc-gen,sebastienvercammen/ptc-acc-gen,FrostTheFox/ptc-acc-gen | output/tos.py | output/tos.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""tos.py - Accept PokemonGo ToS for multiple accounts using file."""
from pgoapi import PGoApi
from pgoapi.utilities import f2i
from pgoapi import utilities as util
from pgoapi.exceptions import AuthException
import pprint
import time
import threading
import sys, getopt
def accept_tos(username, password):
api = PGoApi()
api.set_position(40.7127837, -74.005941, 0.0)
api.login('ptc', username, password)
time.sleep(2)
req = api.create_request()
req.mark_tutorial_complete(tutorials_completed = 0, send_marketing_emails = False, send_push_notifications = False)
response = req.call()
print('Accepted Terms of Service for {}'.format(username))
with open(str(sys.argv[1])) as f:
credentials = [x.strip().split(' ') for x in f.readlines()]
for username,password in credentials:
accept_tos(username, password) | mit | Python | |
db4f449be99d7b66bd7c46a1a3af8b46424421c6 | Add tests for DummyCurrentPlaylistController.get_by_{id,uri} | rawdlite/mopidy,kingosticks/mopidy,bacontext/mopidy,glogiotatidis/mopidy,jodal/mopidy,bacontext/mopidy,SuperStarPL/mopidy,jmarsik/mopidy,mopidy/mopidy,priestd09/mopidy,woutervanwijk/mopidy,pacificIT/mopidy,diandiankan/mopidy,glogiotatidis/mopidy,dbrgn/mopidy,adamcik/mopidy,SuperStarPL/mopidy,hkariti/mopidy,bacontext/mopidy,kingosticks/mopidy,tkem/mopidy,swak/mopidy,priestd09/mopidy,bencevans/mopidy,abarisain/mopidy,jcass77/mopidy,quartz55/mopidy,bencevans/mopidy,dbrgn/mopidy,diandiankan/mopidy,rawdlite/mopidy,woutervanwijk/mopidy,liamw9534/mopidy,dbrgn/mopidy,quartz55/mopidy,quartz55/mopidy,tkem/mopidy,rawdlite/mopidy,quartz55/mopidy,abarisain/mopidy,mokieyue/mopidy,diandiankan/mopidy,jodal/mopidy,mopidy/mopidy,SuperStarPL/mopidy,swak/mopidy,hkariti/mopidy,jodal/mopidy,pacificIT/mopidy,priestd09/mopidy,mokieyue/mopidy,jcass77/mopidy,ZenithDK/mopidy,vrs01/mopidy,jmarsik/mopidy,kingosticks/mopidy,mokieyue/mopidy,pacificIT/mopidy,bencevans/mopidy,jmarsik/mopidy,bencevans/mopidy,hkariti/mopidy,ali/mopidy,bacontext/mopidy,liamw9534/mopidy,ali/mopidy,rawdlite/mopidy,ZenithDK/mopidy,vrs01/mopidy,jmarsik/mopidy,tkem/mopidy,vrs01/mopidy,ali/mopidy,hkariti/mopidy,ZenithDK/mopidy,swak/mopidy,ali/mopidy,vrs01/mopidy,adamcik/mopidy,mokieyue/mopidy,pacificIT/mopidy,diandiankan/mopidy,swak/mopidy,dbrgn/mopidy,glogiotatidis/mopidy,mopidy/mopidy,jcass77/mopidy,adamcik/mopidy,ZenithDK/mopidy,SuperStarPL/mopidy,tkem/mopidy,glogiotatidis/mopidy | tests/backends/get_test.py | tests/backends/get_test.py | import unittest
from mopidy.backends.dummy import DummyBackend, DummyCurrentPlaylistController
from mopidy.models import Playlist, Track
class CurrentPlaylistGetTest(unittest.TestCase):
def setUp(self):
self.b = DummyBackend()
self.c = self.b.current_playlist
def test_get_by_id_returns_unique_match(self):
track = Track(id=1)
self.c.playlist = Playlist(tracks=[Track(id=13), track, Track(id=17)])
self.assertEqual(track, self.c.get_by_id(1))
def test_get_by_id_returns_first_of_multiple_matches(self):
track = Track(id=1)
self.c.playlist = Playlist(tracks=[Track(id=13), track, track])
self.assertEqual(track, self.c.get_by_id(1))
def test_get_by_id_raises_keyerror_if_no_match(self):
self.c.playlist = Playlist(tracks=[Track(id=13), Track(id=17)])
try:
self.c.get_by_id(1)
self.fail(u'Should raise KeyError if no match')
except KeyError:
pass
def test_get_by_uri_returns_unique_match(self):
track = Track(uri='a')
self.c.playlist = Playlist(
tracks=[Track(uri='z'), track, Track(uri='y')])
self.assertEqual(track, self.c.get_by_uri('a'))
def test_get_by_uri_returns_first_of_multiple_matches(self):
track = Track(uri='a')
self.c.playlist = Playlist(tracks=[Track(uri='z'), track, track])
self.assertEqual(track, self.c.get_by_uri('a'))
def test_get_by_uri_raises_keyerror_if_no_match(self):
self.c.playlist = Playlist(tracks=[Track(uri='z'), Track(uri='y')])
try:
self.c.get_by_uri('a')
self.fail(u'Should raise KeyError if no match')
except KeyError:
pass
| apache-2.0 | Python | |
069a031ce871125fb727a5ec43f406539be0150f | add .mdown ext in check_ext | tankywoo/simiki,tankywoo/simiki,tankywoo/simiki,zhaochunqi/simiki,9p0le/simiki,9p0le/simiki,zhaochunqi/simiki,9p0le/simiki,zhaochunqi/simiki | simiki/utils.py | simiki/utils.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from os import path as osp
RESET_COLOR = "\033[0m"
COLOR_CODES = {
"debug" : "\033[1;34m", # blue
"info" : "\033[1;32m", # green
"warning" : "\033[1;33m", # yellow
"error" : "\033[1;31m", # red
"critical" : "\033[1;41m", # background red
}
def color_msg(level, msg):
return COLOR_CODES[level] + msg + RESET_COLOR
def check_path_exists(path):
"""Check if the path(include file and directory) exists"""
if osp.exists(path):
return True
return False
def check_extension(filename):
"""Filter file by suffix
If the file suffix not in the allowed suffixes, the return true and filter.
The `fnmatch` module can also get the suffix:
patterns = ["*.md", "*.mkd", "*.markdown"]
fnmatch.filter(files, pattern)
"""
# Allowed suffixes ( aka "extensions" )
exts = {".md", ".mkd", ".mdown", ".markdown"}
return osp.splitext(filename)[1] in exts
if __name__ == "__main__":
print(color_msg("debug", "DEBUG"))
print(color_msg("info", "DEBUG"))
print(color_msg("warning", "WARNING"))
print(color_msg("error", "ERROR"))
print(color_msg("critical", "CRITICAL"))
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from os import path as osp
RESET_COLOR = "\033[0m"
COLOR_CODES = {
"debug" : "\033[1;34m", # blue
"info" : "\033[1;32m", # green
"warning" : "\033[1;33m", # yellow
"error" : "\033[1;31m", # red
"critical" : "\033[1;41m", # background red
}
def color_msg(level, msg):
return COLOR_CODES[level] + msg + RESET_COLOR
def check_path_exists(path):
"""Check if the path(include file and directory) exists"""
if osp.exists(path):
return True
return False
def check_extension(filename):
"""Filter file by suffix
If the file suffix not in the allowed suffixes, the return true and filter.
The `fnmatch` module can also get the suffix:
patterns = ["*.md", "*.mkd", "*.markdown"]
fnmatch.filter(files, pattern)
"""
# Allowed suffixes ( aka "extensions" )
exts = {".md", ".mkd", ".markdown"}
return osp.splitext(filename)[1] in exts
if __name__ == "__main__":
print(color_msg("debug", "DEBUG"))
print(color_msg("info", "DEBUG"))
print(color_msg("warning", "WARNING"))
print(color_msg("error", "ERROR"))
print(color_msg("critical", "CRITICAL"))
| mit | Python |
f68c673273acbc62259213ceb47bb34e7d3f87fd | Create combination_test.py | BrahmsPotato/PlayWithPrettyFat | test/combination_test.py | test/combination_test.py |
def loop(array_input, com_len, head, array_output):
n= com_len-1;sign=range(head+1,head+com_len)
while(sign[n-1]<=len(array_input)-n):
core(head,sign, n,array_input,array_output)
sign=[x + 1 for x in sign]
def core(head, sign, n, array_input,array_output):
fetch=sign[n-1]
array_child=[array_input[head]].extend(array_input[sign[0]:fetch])
while fetch < len(array_input):
i=0
array_child[n-1]=array_input[fetch]
array_output.append(array_child)
fetch+=1
i+=1
if __name__ == "__main__":
array_input=[1,2,3,4,5]; com_len=3; array_output=[]
for head in range(0,len(array_input)-com_len+2):
loop(array_input, com_len, head,array_output)
print array_output
| mit | Python | |
d4d5ef52cf7ac9f40bb8ada199b6c035690eacfa | Add tests for transmission | Gr1N/rpihelper,Gr1N/rpihelper | rpihelper/transmission/tests.py | rpihelper/transmission/tests.py | # -*- coding: utf-8 -*-
import transmissionrpc
from unittest import TestCase
from unittest.mock import patch, MagicMock
from rpihelper.transmission.logic import (
transmissionrpc_client, transmissionrpc_add_torrent,
)
__all__ = (
'TransmissionrpcClientLogicTests',
'TransmissionrpcAddTorrentLogicTests',
)
def raise_exception(*args, **kwargs):
raise transmissionrpc.error.TransmissionError
class TransmissionrpcClientLogicTests(TestCase):
@patch('rpihelper.transmission.logic.transmissionrpc.Client')
def test_ok(self, mock_client):
tc = transmissionrpc_client()
mock_client.assert_called_once()
self.assertTrue(isinstance(tc, MagicMock))
@patch('rpihelper.transmission.logic.transmissionrpc.Client', new=raise_exception)
def test_transmission_error(self):
tc = transmissionrpc_client()
self.assertIsNone(tc)
class TransmissionrpcAddTorrentLogicTests(TestCase):
def test_ok(self):
tc = MagicMock()
tc_add_torrent = MagicMock()
tc.add_torrent = tc_add_torrent
success = transmissionrpc_add_torrent(tc, 'fake_file')
tc_add_torrent.assert_called_once()
self.assertTrue(success)
def test_transmission_error(self):
tc = MagicMock()
tc_add_torrent = raise_exception
tc.add_torrent = tc_add_torrent
success = transmissionrpc_add_torrent(tc, 'fake_file')
self.assertFalse(success)
| mit | Python | |
cb1e797c6039a1677024a563852b117b581faaf2 | Add solution of problem 1 in Python | tborisova/euler,nerd-life/euler,nerd-life/euler,tborisova/euler,tborisova/euler,nerd-life/euler,tborisova/euler,nerd-life/euler,tborisova/euler,nerd-life/euler,nerd-life/euler,tborisova/euler,nerd-life/euler,tborisova/euler | problem1/rumen.py | problem1/rumen.py | sum(filter(lambda x: x % 3 == 0 or x % 5 == 0, range(1, 1000)))
| mit | Python | |
cb82fd05c02b97bfc82668164fe3f3bb22faaade | Add fair and square | laichunpongben/CodeJam | 2013/qualification_round/fair_and_square.py | 2013/qualification_round/fair_and_square.py | #!/usr/bin/env python
# Need solve time complexity
from __future__ import print_function
from collections import deque
def count_fair_and_square_numbers(a, b):
count = 0
n = a
while n <= b:
if is_fair_and_square(n):
count += 1
n += 1
return count
def is_fair_and_square(n):
assert isinstance(n, int) or isinstance(n, long)
if is_palindrome(n):
if is_square(n):
return is_palindrome(int(n ** 0.5))
else:
return False
else:
return False
def is_square(n):
assert isinstance(n, int) or isinstance(n, long)
if 0 <= n <= 1:
return True
x = n // 2
seen = set([x])
while x ** 2 != n:
x = (x + (n // x)) // 2
if x in seen:
return False
seen.add(x)
return True
def is_palindrome(n):
assert isinstance(n, int) or isinstance(n, long)
dq = deque(str(n))
dq.reverse()
n_ = int(''.join(dq))
return n == n_
if __name__ == '__main__':
import os
print(is_square(152415789666209426002111556165263283035677489))
samples = [
(1, 4),
(10, 120),
(100, 1000)
]
for sample in samples:
print(count_fair_and_square_numbers(*sample))
data_files = ['C-small-practice', 'C-large-practice-1', 'C-large-practice-2']
for f in data_files:
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'{0}.in'.format(f)), 'r') as input_file:
lines = input_file.readlines()
input_count = int(lines[0].replace('\n' ,''))
inputs = [line.replace('\n', '') for line in lines[1:]]
i = 1
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'{0}.out'.format(f)), 'w') as output_file:
for in_ in inputs:
a, b = tuple([int(_) for _ in in_.split(' ')])
print(a, b)
output_file.write('Case #{0}: {1}\n'.format(i, count_fair_and_square_numbers(a, b)))
i += 1
| apache-2.0 | Python | |
463b20a1fa6740e6db2c8abac3861fa9a30f9a2e | Add Django 1.4.1 as a support version to suppress warning. | MikeAmy/django-reversion,cbrepo/django-reversion,fladi/django-reversion,fladi/django-reversion,Beauhurst/django-reversion,pydanny/django-reversion,ixc/django-reversion,blag/django-reversion,adonm/django-reversion,lutoma/django-reversion,Beauhurst/django-reversion,matllubos/django-reversion,MikeAmy/django-reversion,etianen/django-reversion,talpor/django-reversion,ixc/django-reversion,IanLee1521/django-reversion,unicefuganda/edtrac,Govexec/django-reversion,unicefuganda/edtrac,IanLee1521/django-reversion,pydanny/django-reversion,blag/django-reversion,unicefuganda/edtrac,mkebri/django-reversion,etianen/django-reversion,mkebri/django-reversion,Govexec/django-reversion,lutoma/django-reversion,adonm/django-reversion,talpor/django-reversion,matllubos/django-reversion | src/reversion/__init__.py | src/reversion/__init__.py | """
Transactional version control for Django models.
Developed by Dave Hall.
<http://www.etianen.com/>
"""
import django, warnings
from reversion.revisions import default_revision_manager, revision_context_manager, VersionAdapter
from reversion.admin import VersionAdmin
from reversion.models import pre_revision_commit, post_revision_commit
from reversion.version import __version__
VERSION = __version__
SUPPORTED_DJANGO_VERSIONS = (
(1, 4, 0),
(1, 4, 1),
)
def check_django_version():
"""Checks the version of django being used, and issues a warning if incorrect."""
if django.VERSION[:3] not in SUPPORTED_DJANGO_VERSIONS:
format_version = lambda v: u".".join(unicode(n) for n in v)
warnings.warn(
(
u"django-reversion %(reversion_version)s is intended for use with django %(supported_django_version)s. "
u"You are running django %(django_version)s, so some features, such as admin integration, may not work. "
u"Please see https://github.com/etianen/django-reversion/wiki/Compatible-Django-Versions"
) % {
"reversion_version": format_version(VERSION),
"supported_django_version": ' or '.join(format_version(v) for v in SUPPORTED_DJANGO_VERSIONS),
"django_version": format_version(django.VERSION[:3]),
}
)
check_django_version()
# Legacy revision reference.
revision = default_revision_manager # TODO: Deprecate eventually.
# Easy registration methods.
register = default_revision_manager.register
is_registered = default_revision_manager.is_registered
unregister = default_revision_manager.unregister
get_adapter = default_revision_manager.get_adapter
get_registered_models = default_revision_manager.get_registered_models
# Context management.
create_revision = revision_context_manager.create_revision
# Revision meta data.
get_db = revision_context_manager.get_db
set_db = revision_context_manager.set_db
get_user = revision_context_manager.get_user
set_user = revision_context_manager.set_user
get_comment = revision_context_manager.get_comment
set_comment = revision_context_manager.set_comment
add_meta = revision_context_manager.add_meta
get_ignore_duplicates = revision_context_manager.get_ignore_duplicates
set_ignore_duplicates = revision_context_manager.set_ignore_duplicates
# Low level API.
get_for_object_reference = default_revision_manager.get_for_object_reference
get_for_object = default_revision_manager.get_for_object
get_unique_for_object = default_revision_manager.get_unique_for_object
get_for_date = default_revision_manager.get_for_date
get_deleted = default_revision_manager.get_deleted
| """
Transactional version control for Django models.
Developed by Dave Hall.
<http://www.etianen.com/>
"""
import django, warnings
from reversion.revisions import default_revision_manager, revision_context_manager, VersionAdapter
from reversion.admin import VersionAdmin
from reversion.models import pre_revision_commit, post_revision_commit
from reversion.version import __version__
VERSION = __version__
SUPPORTED_DJANGO_VERSIONS = (
(1, 4, 0),
)
def check_django_version():
"""Checks the version of django being used, and issues a warning if incorrect."""
if django.VERSION[:3] not in SUPPORTED_DJANGO_VERSIONS:
format_version = lambda v: u".".join(unicode(n) for n in v)
warnings.warn(
(
u"django-reversion %(reversion_version)s is intended for use with django %(supported_django_version)s. "
u"You are running django %(django_version)s, so some features, such as admin integration, may not work. "
u"Please see https://github.com/etianen/django-reversion/wiki/Compatible-Django-Versions"
) % {
"reversion_version": format_version(VERSION),
"supported_django_version": ' or '.join(format_version(v) for v in SUPPORTED_DJANGO_VERSIONS),
"django_version": format_version(django.VERSION[:3]),
}
)
check_django_version()
# Legacy revision reference.
revision = default_revision_manager # TODO: Deprecate eventually.
# Easy registration methods.
register = default_revision_manager.register
is_registered = default_revision_manager.is_registered
unregister = default_revision_manager.unregister
get_adapter = default_revision_manager.get_adapter
get_registered_models = default_revision_manager.get_registered_models
# Context management.
create_revision = revision_context_manager.create_revision
# Revision meta data.
get_db = revision_context_manager.get_db
set_db = revision_context_manager.set_db
get_user = revision_context_manager.get_user
set_user = revision_context_manager.set_user
get_comment = revision_context_manager.get_comment
set_comment = revision_context_manager.set_comment
add_meta = revision_context_manager.add_meta
get_ignore_duplicates = revision_context_manager.get_ignore_duplicates
set_ignore_duplicates = revision_context_manager.set_ignore_duplicates
# Low level API.
get_for_object_reference = default_revision_manager.get_for_object_reference
get_for_object = default_revision_manager.get_for_object
get_unique_for_object = default_revision_manager.get_unique_for_object
get_for_date = default_revision_manager.get_for_date
get_deleted = default_revision_manager.get_deleted
| bsd-3-clause | Python |
0b445c9606d30f31a6df1d99ef4d564f931014f2 | use unittest | vottie/lang | python/calc/calc_test.py | python/calc/calc_test.py | import unittest
from calc import Calc
class CalcTest(unittest.TestCase):
def setUp(self):
print "Calc Test"
def test_add(self):
c = Calc()
x = 100
y = 200
result = 0
result = c.add(x,y)
print '{0} + {1} = {2}'.format(x, y, result)
self.assertEqual(x + y, result)
if __name__ == '__main__':
unittest.main()
| mit | Python | |
4972930bb42ed6d7ebc1bad2909ede1a3c213cec | Add preprocessing functions. | prasanna08/MachineLearning | preprocess.py | preprocess.py | import numpy as np
"""This file contains some functions related to preprocessing."""
def get_output_array_from_labels(output_labels, labels_encoding=None):
labels = np.unique(output_labels)
labels = labels.reshape(len(labels), 1)
outputs = np.zeros((output_labels.shape[0], labels.shape[0]))
if not labels_encoding:
labels_encoding = np.concatenate(
[labels, np.eye(labels.shape[0])], axis=1)
for enc in labels_encoding:
indices = np.where(output_labels == enc[0])
outputs[indices[0]] = enc[1:]
return outputs
| mit | Python | |
c1d66909a6ce9903aa0a856d80721c756bc54806 | test for neo4j | clemsos/mitras,clemsos/mitras,clemsos/mitras | test/test_neo4j_graph.py | test/test_neo4j_graph.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# from py2neo import neo4j, node, rel
from bulbs.config import DEBUG
from bulbs.neo4jserver import Graph, Config, NEO4J_URI
from message import Message, IsRetweet # models
from datetime import datetime
# setup
config = Config(NEO4J_URI, "james", "secret")
g = Graph(config)
# g.config.set_logger(DEBUG)
# g.add_proxy("message", Message)
g.add_proxy("tweet", Message)
g.add_proxy("isRT", IsRetweet)
# create node
m1= g.tweet.create(text="salut",created_at=datetime.now())
m2= g.tweet.create(text="re-salut",created_at=datetime.now())
# nodes = g.tweet.index.lookup(text="salut")
# create edge
rt=g.isRT.create(m2,m1)
# Connect to neo4j
# graph_db = neo4j.GraphDatabaseService("http://localhost:7474/db/data/"
| mit | Python | |
09f1cf984a456a4a452f1a1c0a0ff6fd09b7b415 | add code.py | onjs/Album | code.py | code.py | print 'Hello GitHub' | artistic-2.0 | Python | |
a5012c9fb81768e85b555b52264baa11efc17ba1 | Add unittest for select_taxa that runs main and selects a single genome | ODoSE/odose.nl | test/test_select_taxa.py | test/test_select_taxa.py | import logging
import os
import tempfile
import unittest
import select_taxa
class Test(unittest.TestCase):
def setUp(self):
self.longMessage = True
logging.root.setLevel(logging.DEBUG)
def test_main(self):
'''
Select a single genome and assert the download log file contains the correct output for it.
'''
# Setup arguments
target = tempfile.mktemp()[1]
try:
args = ('--genomes=13960 --genomes-file=' + target).split()
# Write to argument file
select_taxa.main(args)
# Assert contents
with open(target) as reader:
contents = reader.read()
self.assertIn('17745.1\tEscherichia coli E24377A', contents)
finally:
os.remove(target)
| mit | Python | |
b5207cfcee8bd3f1a41fc87f3e9afcfe94646314 | Add example of how to list of codecs. | gmarco/mlt-orig,wideioltd/mlt,ttill/MLT-roto-tracking,zzhhui/mlt,zzhhui/mlt,ttill/MLT,wideioltd/mlt,zzhhui/mlt,xzhavilla/mlt,zzhhui/mlt,j-b-m/mlt,j-b-m/mlt,ttill/MLT-roto,zzhhui/mlt,anba8005/mlt,siddharudh/mlt,ttill/MLT-roto-tracking,zzhhui/mlt,zzhhui/mlt,mltframework/mlt,ttill/MLT,siddharudh/mlt,anba8005/mlt,siddharudh/mlt,siddharudh/mlt,ttill/MLT-roto,mltframework/mlt,wideioltd/mlt,mltframework/mlt,ttill/MLT-roto-tracking,ttill/MLT,siddharudh/mlt,anba8005/mlt,ttill/MLT-roto-tracking,ttill/MLT-roto,ttill/MLT-roto,gmarco/mlt-orig,j-b-m/mlt,gmarco/mlt-orig,anba8005/mlt,siddharudh/mlt,j-b-m/mlt,zzhhui/mlt,ttill/MLT,gmarco/mlt-orig,wideioltd/mlt,j-b-m/mlt,j-b-m/mlt,anba8005/mlt,j-b-m/mlt,wideioltd/mlt,mltframework/mlt,j-b-m/mlt,ttill/MLT-roto-tracking,gmarco/mlt-orig,ttill/MLT,xzhavilla/mlt,wideioltd/mlt,ttill/MLT-roto,anba8005/mlt,ttill/MLT-roto,j-b-m/mlt,anba8005/mlt,mltframework/mlt,wideioltd/mlt,siddharudh/mlt,mltframework/mlt,ttill/MLT,ttill/MLT-roto-tracking,ttill/MLT-roto-tracking,gmarco/mlt-orig,ttill/MLT,wideioltd/mlt,anba8005/mlt,xzhavilla/mlt,ttill/MLT,gmarco/mlt-orig,gmarco/mlt-orig,mltframework/mlt,siddharudh/mlt,anba8005/mlt,mltframework/mlt,xzhavilla/mlt,xzhavilla/mlt,ttill/MLT-roto,zzhhui/mlt,ttill/MLT-roto,siddharudh/mlt,xzhavilla/mlt,mltframework/mlt,gmarco/mlt-orig,ttill/MLT-roto,xzhavilla/mlt,ttill/MLT-roto-tracking,xzhavilla/mlt,xzhavilla/mlt,ttill/MLT,ttill/MLT-roto-tracking,mltframework/mlt,j-b-m/mlt,wideioltd/mlt | src/swig/python/codecs.py | src/swig/python/codecs.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Import required modules
import mlt
# Start the mlt system
mlt.Factory().init( )
# Create the consumer
c = mlt.Consumer( mlt.Profile(), "avformat" )
# Ask for video codecs supports
c.set( 'vcodec', 'list' )
# Start the consumer to generate the list
c.start()
# Get the vcodec property
codecs = mlt.Properties( c.get_data( 'vcodec' ) )
# Print the list of codecs
for i in range( 0, codecs.count()):
print codecs.get( i )
| lgpl-2.1 | Python | |
872e2a38845d8a9d321435092f808e2eb79a26e3 | test case for issue #9 | chfw/pyexcel-ods,chfw/pyexcel-ods | tests/test_formatters.py | tests/test_formatters.py | import os
from unittest import TestCase
from textwrap import dedent
import pyexcel as pe
class TestAutoDetectInt(TestCase):
def setUp(self):
self.content = [[1,2,3.1]]
self.test_file = "test_auto_detect_init.ods"
pe.save_as(array=self.content, dest_file_name=self.test_file)
def test_auto_detect_int(self):
sheet = pe.get_sheet(file_name=self.test_file)
expected = dedent("""
pyexcel_sheet1:
+---+---+-----+
| 1 | 2 | 3.1 |
+---+---+-----+""").strip()
self.assertEqual(str(sheet), expected)
def test_get_book_auto_detect_int(self):
book = pe.get_book(file_name=self.test_file)
expected = dedent("""
pyexcel_sheet1:
+---+---+-----+
| 1 | 2 | 3.1 |
+---+---+-----+""").strip()
self.assertEqual(str(book), expected)
def test_auto_detect_int_false(self):
sheet = pe.get_sheet(file_name=self.test_file, auto_detect_int=False)
expected = dedent("""
pyexcel_sheet1:
+-----+-----+-----+
| 1.0 | 2.0 | 3.1 |
+-----+-----+-----+""").strip()
self.assertEqual(str(sheet), expected)
def test_get_book_auto_detect_int_false(self):
book = pe.get_book(file_name=self.test_file, auto_detect_int=False)
expected = dedent("""
pyexcel_sheet1:
+-----+-----+-----+
| 1.0 | 2.0 | 3.1 |
+-----+-----+-----+""").strip()
self.assertEqual(str(book), expected)
def tearDown(self):
os.unlink(self.test_file)
| bsd-3-clause | Python | |
5692f64619bf009cf92bf0a8c6f77bf82f0e3d02 | Add a new regression testing module | FactoryBoy/factory_boy | tests/test_regression.py | tests/test_regression.py | # Copyright: See the LICENSE file.
"""Regression tests related to issues found with the project"""
import datetime
import typing as T
import unittest
import factory
# Example objects
# ===============
class Author(T.NamedTuple):
fullname: str
pseudonym: T.Optional[str] = None
class Book(T.NamedTuple):
title: str
author: Author
class PublishedBook(T.NamedTuple):
book: Book
published_on: datetime.date
countries: T.List[str]
class FakerRegressionTests(unittest.TestCase):
def test_locale_issue(self):
"""Regression test for `KeyError: 'locale'`
See #785 #786 #787 #788 #790 #796.
"""
class AuthorFactory(factory.Factory):
class Meta:
model = Author
class Params:
unknown = factory.Trait(
fullname="",
)
fullname = factory.Faker("name")
public_author = AuthorFactory(unknown=False)
self.assertIsNone(public_author.pseudonym)
unknown_author = AuthorFactory(unknown=True)
self.assertEqual("", unknown_author.fullname)
| mit | Python | |
58354f477decff942a3063a12fb72684beca8233 | Add singleton tests | 3ptscience/properties,aranzgeo/properties | tests/test_singletons.py | tests/test_singletons.py | # coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import properties
from properties.extras import Singleton
class TestSingleton(unittest.TestCase):
def test_singleton(self):
a = Singleton('a')
b = Singleton('a')
c = Singleton('c')
assert a is b
assert a is not c
d = properties.copy(c)
assert d is c
e = Singleton.deserialize(d.serialize())
assert e is d
class AnotherSingleton(Singleton):
pass
with self.assertRaises(ValueError):
AnotherSingleton('a')
def test_hassingleton(self):
class HasSingleton(properties.HasProperties):
s = properties.Instance('', Singleton)
hs1 = HasSingleton()
hs2 = HasSingleton()
hs3 = HasSingleton()
hs1.s = 'a'
hs2.s = Singleton('a')
hs3.s = {'name': 'a'}
assert hs1.s is hs2.s
assert hs1.s is hs3.s
if __name__ == '__main__':
unittest.main()
| mit | Python | |
b93b8d96114338809e6a082f819291144eedd4af | add an utils to reduce the original dataset to a choosen class samples size | plabadille/image_classifier | reduce_dataset.py | reduce_dataset.py | import sys, os
from shutil import copyfile
supplied_args = sys.argv[1:]
DATA_DIRECTORY = "data_dir"
NEW_DATA_DIRECTORY = supplied_args[0] if supplied_args else sys.exit("You need to supplied a new data directory name : $python reduce_dataset.py <new data directory name> <max sample by class>")
MAX_SAMPLE_BY_CLASS = int(supplied_args[1]) if len(supplied_args) > 1 else sys.exit("You need to supplied the max sample number by class you want : $python reduce_dataset.py <new data directory name> <max sample by class>")
if not os.path.exists(DATA_DIRECTORY):
sys.exist("The default data directory %s doesn't exist, please create it and store in it your default dataset." % DATA_DIRECTORY)
if not os.path.exists(NEW_DATA_DIRECTORY):
os.makedirs(NEW_DATA_DIRECTORY)
classes = {}
directories_it = os.scandir(DATA_DIRECTORY)
for entry in directories_it:
if entry.is_file():
continue
subdir_new_path = os.path.join(NEW_DATA_DIRECTORY, entry.name)
subdir_path = os.path.join(DATA_DIRECTORY, entry.name)
if not os.path.exists(subdir_new_path):
os.makedirs(subdir_new_path)
classes[entry.name] = 0
files_it = os.scandir(subdir_path)
for file in files_it:
if MAX_SAMPLE_BY_CLASS > classes[entry.name]:
file_src = os.path.join(subdir_path, file.name)
file_dest = os.path.join(subdir_new_path, file.name)
copyfile(file_src, file_dest)
classes[entry.name] += 1
else:
break
classes_count = len(classes)
print( "%d classes with a maximum of %d samples were successfuly copied from %s directory to %s directory." % (classes_count, MAX_SAMPLE_BY_CLASS, DATA_DIRECTORY, NEW_DATA_DIRECTORY))
| mit | Python | |
ab164307310474625926bbc9ea7fae03b99c99cf | Create architecture core models | YACOWS/opps,williamroot/opps,opps/opps,YACOWS/opps,williamroot/opps,YACOWS/opps,jeanmask/opps,williamroot/opps,opps/opps,jeanmask/opps,opps/opps,jeanmask/opps,opps/opps,williamroot/opps,YACOWS/opps,jeanmask/opps | opps/core/models/__init__.py | opps/core/models/__init__.py | # -*- coding: utf-8 -*-
from opps.core.models.channel import *
from opps.core.models.publisher import *
| mit | Python | |
c488befd6f27a6576a2f6f34c46f29f63d5505dc | add BAM module report | sequana/sequana,sequana/sequana,sequana/sequana,sequana/sequana,sequana/sequana | sequana/modules_report/bamqc.py | sequana/modules_report/bamqc.py | # -*- coding: utf-8 -*-
#
# This file is part of Sequana software
#
# Copyright (c) 2016 - Sequana Development Team
#
# File author(s):
# Thomas Cokelaer <thomas.cokelaer@pasteur.fr>
# Dimitri Desvillechabrol <dimitri.desvillechabrol@pasteur.fr>,
# <d.desvillechabrol@gmail.com>
# Rachel Legendre <rachel.legendre@pasteur.fr>
#
# Distributed under the terms of the 3-clause BSD license.
# The full license is in the LICENSE file, distributed with this software.
#
# website: https://github.com/sequana/sequana
# documentation: http://sequana.readthedocs.io
#
##############################################################################
"""Report dedicated to BAM file
.. autosummary::
BAMQCModule
"""
import os
from sequana.lazy import pandas as pd
from sequana.modules_report.base_module import SequanaBaseModule
from sequana.bamtools import SAMFlags
from sequana import BAM
from sequana.lazy import pylab
from sequana.lazy import reports
from sequana.utils.datatables_js import DataTable
__all__ = ['BAMQCModule']
class BAMQCModule(SequanaBaseModule):
"""Report dedicated to BAM file
::
from sequana import sequana_data
from sequana.modules_report.bamqc import BAMQCModule
filename = sequana_data("test.bam")
r = BAMQCModule(filename)
r.create_html("test.html")
# report/bam.html is now available
.. todo:: right now, the computation is performed in the class. Ideally,
we would like the computation to happen elsewhere, where a json is stored.
The json would be the input to this class.
"""
def __init__(self, bam_input, output_filename=None):
super().__init__()
self.bam_input = bam_input
self.title = "Bam Report"
self.create_report_content()
self.create_html(output_filename)
def create_report_content(self):
self.sections = list()
self.add_flag_section()
self.add_images_section()
def _computation(self):
self.bam = BAM(self.bam_input)
results = {}
results['alignment_count'] = len(self.bam)
# first, we store the flags
df = self.bam.get_flags_as_df().sum()
df = df.to_frame()
df.columns = ['counter']
sf = SAMFlags()
df['meaning'] = sf.get_meaning()
df = df[['meaning', 'counter']]
results['flags'] = df
return results
self.bam.plot_bar_flags(logy=False, filename=self.directory + os.sep +
"bar_flags.png")
self.bam.plot_bar_mapq(filename=self.directory + os.sep + "bar_mapq.png")
def add_flag_section(self):
data = self._computation()
df = data['flags']
datatable = DataTable(df, "flags", index=True)
datatable.datatable.datatable_options = {
'scrollX': '300px',
'pageLength': 15,
'scrollCollapse': 'true',
'dom': 'tB',
"paging": "false",
'buttons': ['copy', 'csv']}
js = datatable.create_javascript_function()
html_tab = datatable.create_datatable(float_format='%.3g')
html = ""
html += "{} {}".format(html_tab, js)
self.sections.append({
"name": "Flags information",
"anchor": "flags",
"content": html
})
def add_images_section(self):
style = "width:65%"
import pylab
pylab.ioff()
def plotter1(filename):
self.bam.plot_bar_flags(logy=True, filename=filename)
html1 = self.create_embedded_png(plotter1, "filename", style=style)
def plotter2(filename):
self.bam.plot_bar_flags(logy=False, filename=filename)
html2 = self.create_embedded_png(plotter2, "filename", style=style)
def plotter3(filename):
self.bam.plot_bar_mapq(filename=filename)
html3 = self.create_embedded_png(plotter3, "filename", style=style)
self.sections.append({
"name": "Image",
"anchor": "table",
"content": html1 + html2 + html3
})
| bsd-3-clause | Python | |
9fde684095ba34300fcade827dfb17eae99f4daa | add advanced.py | ianzhengnan/learnpy,ianzhengnan/learnpy | renew/advanced.py | renew/advanced.py |
def fib(max):
a, b, n = 0, 1, 0
while n < max:
yield b
a, b = b, a + b
n += 1
print('done')
for i in fib(20):
print(i)
| apache-2.0 | Python | |
fab91baa976693f89c6001a0e09e0f351d30ccfe | add decorator timeout test | ResolveWang/WeiboSpider,ResolveWang/WeiboSpider,yzsz/weibospider,yzsz/weibospider | test/test_decorator.py | test/test_decorator.py | # coding=utf-8
import unittest
from decorators.decorator import *
import time
class TestDecorator(unittest.TestCase):
def test_timeout(self):
@timeout(1)
def test_timeout_no_params():
time.sleep(2)
self.assertTrue()
test_timeout_no_params()
@timeout(1)
def test_timeout_with_params(*args, **kwargs):
self.assertEqual(args, (1, 2, 3))
self.assertEqual(kwargs, {'a': 1, 'b': 2})
test_timeout_with_params(1, 2, 3, a=1, b=2)
if __name__ == '__main__':
unittest.main()
| mit | Python | |
db85c1a9aca124ef4cf45c61244c6cf556138d77 | Add cmd.py script | ruslo/configs | python/cmd.py | python/cmd.py | #!/usr/bin/env python3
# Copyright (c) 2014, Ruslan Baratov
# All rights reserved.
import argparse
import os
import stat
import subprocess
import sys
import detail.os
import detail.command
assert(sys.version_info.major == 3)
parser = argparse.ArgumentParser(description='Start windows cmd')
args = parser.parse_args()
explorer_cygpath = detail.command.get_absolute_path('explorer')
explorer_winpath = detail.os.cygwin_to_win(explorer_cygpath)
cwd_winpath = detail.os.cygwin_to_win(os.getcwd())
"""Temporary `.bat` script"""
tmp_path = '/tmp/configs.python.cmd.py-temp.bat'
tmp_winpath = detail.os.cygwin_to_win(tmp_path)
temp = open(tmp_path, 'w')
temp.write('{}:\n'.format(cwd_winpath[0]))
temp.write('cd "{}"\n'.format(cwd_winpath))
temp.write('cmd\n')
os.chmod(
tmp_path,
stat.S_IXOTH | stat.S_IXGRP | stat.S_IXUSR |
stat.S_IROTH | stat.S_IRGRP | stat.S_IRUSR
)
subprocess.Popen([
'cmd',
'/C',
'start',
'clean shell',
'/I',
explorer_winpath,
tmp_winpath
])
| bsd-2-clause | Python | |
48faf04cfcd40739e2a0ddfc593f2320f1aeef65 | Create re_install.py | mic100/RPi_recovery | re_install.py | re_install.py | # -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------#
# #
# import libs #
# #
#-----------------------------------------------------------------------------#
import os
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
# #
# main dev #
# #
#-----------------------------------------------------------------------------#
#when i need to re install all the program in case of loss
#we can get them back from the run of that python program
#os.system("sudo modprobe w1-gpio")
#os.system("sudo modprobe w1-therm")
#os.system("cd /sys/bus/w1/devices/28*")
os.system("sudo apt-get update")
os.system("sudo apt-get upgrade")
os.system("sudo apt-get install python-pip")
os.system("sudo apt-get install python-dev")
os.system("sudo apt-get install python-rpi.gpio")
#we install the lib to get the temperature of the temp sensors DS18B20
#se more here : https://github.com/timofurrer/w1thermsensor
os.system("sudo pip install w1thermsensor")
#install mysqldb lib for python used in code
os.system("sudo apt-get install python-mysqldb")
#os.system("sudo apt-get install tightvncserver")
os.system("sudo reboot")
print "the end"
| mit | Python | |
4411c676426fb580d33ae09682444c093ab2c204 | Add multi-processing tests | vbkaisetsu/clopure | test/test_mp.py | test/test_mp.py | import unittest
import time
from clopure.core import ClopureRunner
from clopure.parser import ClopureParser
class TestMultiprocessing(unittest.TestCase):
def setUp(self):
self.parser = ClopureParser()
self.runner = ClopureRunner(procs=4)
def test_pmap(self):
code = "(defimport time sleep) (list (pmap #(do (sleep %) %) [1.0 0.8 0.5 0.1 0.1 0.3]))"
tree = self.parser.parse_line(code)
result = self.runner.evaluate(tree[0])
start_time = time.time()
result = self.runner.evaluate(tree[1])
end_time = time.time()
self.assertEqual(result, [1.0, 0.8, 0.5, 0.1, 0.1, 0.3])
self.assertTrue(0.95 < end_time - start_time < 1.05)
def test_pmap_unord(self):
code = "(defimport time sleep) (list (pmap-unord #(do (sleep %) %) [1.0 0.8 0.5 0.1 0.1 0.3]))"
tree = self.parser.parse_line(code)
result = self.runner.evaluate(tree[0])
start_time = time.time()
result = self.runner.evaluate(tree[1])
end_time = time.time()
self.assertEqual(result, [0.1, 0.1, 0.5, 0.3, 0.8, 1.0])
self.assertTrue(0.95 < end_time - start_time < 1.05)
def test_iter_mp_split(self):
code = "(defimport time sleep) (list ((iter-mp-split #(map #(do (sleep %) %) %)) [1.0 0.8 0.5 0.1 0.1 0.3]))"
tree = self.parser.parse_line(code)
result = self.runner.evaluate(tree[0])
start_time = time.time()
result = self.runner.evaluate(tree[1])
end_time = time.time()
self.assertEqual(result, [1.0, 0.8, 0.5, 0.1, 0.1, 0.3])
self.assertTrue(0.95 < end_time - start_time < 1.05)
def test_iter_mp_split_unord(self):
code = "(defimport time sleep) (list ((iter-mp-split-unord #(map #(do (sleep %) %) %)) [1.0 0.8 0.5 0.1 0.1 0.3]))"
tree = self.parser.parse_line(code)
result = self.runner.evaluate(tree[0])
start_time = time.time()
result = self.runner.evaluate(tree[1])
end_time = time.time()
self.assertEqual(result, [0.1, 0.1, 0.5, 0.3, 0.8, 1.0])
self.assertTrue(0.95 < end_time - start_time < 1.05)
| mit | Python | |
122eb3c6eb9f8467fc5d3325f0e5c58cc285cb50 | Add a script to convert hex formatted key to token using random partitioner | bharatendra/ctools | token-hexkey.py | token-hexkey.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specif
# a script to convert a given key in hex format to token using RandomPartitioner
import hashlib
import binascii
import sys
if len(sys.argv) < 2:
print "usage: python token.py <key in hex format>"
sys.exit(1)
key = binascii.unhexlify(sys.argv[1])
# Calculate MD5 digest and convert it to hex format
digest = hashlib.md5(key).hexdigest()
# Convert the hash digest to 2's complement form
token = long(digest, 16)
bits = 128
if ((token & (1 << (bits - 1))) != 0):
token = token - (1 << bits)
# Convert the resulting number to unsigned form
print abs(token)
| apache-2.0 | Python | |
b35a0d2415cfc8d8d5d4060f1cf411a42c90a9a0 | add leetcode Pascal's Triangle. | Fity/2code,Fity/2code,Fity/2code,Fity/2code,Fity/2code,Fity/2code | leetcode/PascalTriangle/solution.py | leetcode/PascalTriangle/solution.py | # -*- coding:utf-8 -*-
class Solution:
# @return a list of lists of integers
def generate(self, numRows):
ret = []
if numRows == 0:
return []
if numRows == 1:
ret.append([1])
return ret
if numRows == 2:
ret.extend([[1], [1,1]])
return ret
ret.extend([[1], [1,1]])
prev = [1,1]
for x in xrange(numRows - 2):
row = [1]
row_prev = prev[0]
for v in prev[1:]:
row.append(row_prev + v)
row_prev = v
row.append(1)
prev = row
ret.append(row)
return ret
| mit | Python | |
8b5f09708eb79abdcde730727f6788881a3a68a3 | Initialize P4_textToExcel | JoseALermaIII/python-tutorials,JoseALermaIII/python-tutorials | books/AutomateTheBoringStuffWithPython/Chapter12/PracticeProjects/P4_textToExcel.py | books/AutomateTheBoringStuffWithPython/Chapter12/PracticeProjects/P4_textToExcel.py | # Write a program to read in the contents of several text files (you can make
# the text files yourself) and insert those contents into a spreadsheet, with
# one line of text per row. The lines of the first text file will be in the
# cells of column A, the lines of the second text file will be in the cells of
# column B, and so on.
| mit | Python | |
ed17414ed09e117b33f8407517e2a69fa839452e | add edit-keps.py | kubernetes/enhancements,kubernetes/enhancements,kubernetes/enhancements | hack/edit-keps.py | hack/edit-keps.py | #!/usr/bin/env python3
# Copyright 2021 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Edit KEPs en-masse by round-tripping them through ruamel.yaml
This is not intended for general usage, because:
- many keps have different formatting, and we're not at a point where
we can enforce formatting standards, so this is almost guaranteed
to introduce formatting change noise
- the idea is to manually edit this file with the specific edit to be
done, rather that developing a general purpose language to do this
"""
import argparse
import glob
from os import path
import ruamel.yaml
# Files that will be ignored
EXCLUDED_FILES = []
# A hilariously large line length to ensure we never line-wrap
MAX_WIDTH = 2000000000
def setup_yaml():
# Setup the ruamel.yaml parser
yaml = ruamel.yaml.YAML(typ='rt')
yaml.preserve_quotes = True
# This is what's used in the template, currently ~36 KEPs have drifted
yaml.indent(mapping=2, sequence=4, offset=2)
yaml.width = MAX_WIDTH
return yaml
def edit_kep(yaml, file_name, force_rewrite=False):
with open(file_name, "r") as fp:
kep = yaml.load(fp)
rewrite = force_rewrite
stage = kep.get("stage", "unknown")
status = kep.get("status", "unknown")
latest_milestone = kep.get("latest-milestone", "unknown")
last_updated = kep.get("last-updated", "unknown")
milestone = kep.get("milestone", {})
if status == "implemented":
if latest_milestone == "unknown":
print(f'status: {status} stage: {stage} last-updated: {last_updated} file: {file_name}')
kep["latest-milestone"] = "0.0"
rewrite = True
if stage == "unknown":
if latest_milestone == "unknown":
kep["stage"] = "stable"
else:
kep["stage"] = [s for s,v in milestone.items() if v == latest_milestone][0]
rewrite = True
# Dump KEP to file_name
if rewrite:
print(f' writing {file_name}')
with open(file_name, "w") as fp:
yaml.dump(kep, fp)
fp.truncate()
def main(keps_dir, force_rewrite):
yaml = setup_yaml()
for f in glob.glob(f'{keps_dir}/**/kep.yaml', recursive=True):
if path.basename(f) not in EXCLUDED_FILES:
try:
print(f'processing file: {f}')
edit_kep(yaml, f, force_rewrite)
except Exception as e: # pylint: disable=broad-except
print(f'ERROR: could not edit {f}: {e}')
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(
description='Does things to KEPs')
PARSER.add_argument(
'--keps-dir',
default='../keps',
help='Path to KEPs directoryProw Job Directory')
PARSER.add_argument(
'--force',
default=False,
help='Force rewrite of all KEPs')
ARGS = PARSER.parse_args()
main(ARGS.keps_dir, ARGS.force)
| apache-2.0 | Python | |
c50e072c5e79083ec3ec4104789a64223c2f63f8 | Create tao.py | taofengno1/wechatcron | tao.py | tao.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from apscheduler.schedulers.blocking import BlockingScheduler
import itchat, time
itchat.auto_login()
def task():
chatroomList = itchat.get_chatrooms(False);
for m in chatroomList:
NickName = m['NickName'].encode('utf-8')
if NickName == u'测试'.encode('utf-8'):
text = u'中文群发测试'.encode('utf-8')
itchat.send(text, m['UserName'])
sched = BlockingScheduler()
sched.add_job(task, 'cron', month='1-12', day='1-31', hour=14, minute=32)
sched.start()
| mit | Python | |
7e757d24bff5758350dd2bc92b9e2b1e2f919c12 | Add compute synth (#3830) | googleapis/google-cloud-java,googleapis/google-cloud-java,googleapis/google-cloud-java | java-compute/google-cloud-compute/synth.py | java-compute/google-cloud-compute/synth.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.gcp as gcp
gapic = gcp.DiscoGAPICGenerator()
library = gapic.java_library(
service='compute',
version='v1',
config_path='artman_compute.yaml',
artman_output_name='')
s.copy(library / 'gapic-google-cloud-compute-v1/src', 'src')
| apache-2.0 | Python | |
2d88daf10d11033bfd597112fb6484783c5a852a | Create xyz.py | bskinn/opan,bskinn/opan | xyz.py | xyz.py | #...
| mit | Python | |
e61840020820af4e7a625e472c060e8396b24055 | add migrations | praekelt/molo-gem,praekelt/molo-gem,praekelt/molo-gem | gem/migrations/0013_gemsettings_moderator_name.py | gem/migrations/0013_gemsettings_moderator_name.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.12 on 2017-03-09 13:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gem', '0012_partner_credit'),
]
operations = [
migrations.AddField(
model_name='gemsettings',
name='moderator_name',
field=models.TextField(blank=True, help_text=b'Moderator name', null=True, verbose_name=b'Moderator Name'),
),
]
| bsd-2-clause | Python | |
955ae619a6502a68f9a8d34022a4a8b1ebeb5ce2 | Create 20.py | Pouf/CodingCompetition,Pouf/CodingCompetition | E/20.py | E/20.py | # Problem 20 - Factorial digit sum
# n! means n × (n − 1) × ... × 3 × 2 × 1
# For example, 10! = 10 × 9 × ... × 3 × 2 × 1 = 3628800,
# and the sum of the digits in the number 10! is 3 + 6 + 2 + 8 + 8 + 0 + 0 = 27
# Find the sum of the digits in the number 100!
from math import factorial as f
print(sum(int(c) for c in str(f(100))))
| mit | Python | |
81f983c833d9858ad23f589367bf601babddf858 | Add some useful activation functions. | mmohaveri/DeepNetTookKit | elements/activation_functions.py | elements/activation_functions.py | import theano
import theano.tensor as T
"""
A set of activation functions for Neural Network layers.
They're in the form of class so we can take advantage of constructor
to set initial value for some parameters.
"""
def tanh(x):
"""
tanh function (-1 to 1)
@input: x, theano shared variable.
@output: element-wise tanh of x
"""
return T.tanh(x)
def sigmoid(x):
"""
sigmoid function (0 to 1, (tanh(x)+1)/2).
@input: x, theano shared variable.
@output: element-wise sigmoid of x
"""
return (T.tanh(x)+1)/2
def linier(x):
"""
linier function.
@input: x, theano shared variable.
@output: x
"""
return x
def relu_generator(alpha=0):
"""
this function returns a relu function with proper alpha value.
@input: alpha, slope of negative side of ReLU.
@output: ReLU function
"""
def relu(x):
"""
rectified linier function (-alpha*x if x<0, x if x>0).
@input: x, theano shared variable.
@output: x<0?-alpha*x:x
"""
return T.nnet.relu(x, alpha)
return relu
# TODO:
# add RBF activation function
#
# def RBF(x):
# """
# radial basis function.
# @input: x, theano shared variable.
# @output: Not Implimented
# """
| mit | Python | |
b8e7f5381abcf15d07cac07c20c671ec7cc64c90 | Add missing migration. | ideascube/ideascube,ideascube/ideascube,ideascube/ideascube,ideascube/ideascube | ideascube/mediacenter/migrations/0013_auto_20170323_1525.py | ideascube/mediacenter/migrations/0013_auto_20170323_1525.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-23 15:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mediacenter', '0012_auto_20170210_0940'),
]
operations = [
migrations.AlterField(
model_name='document',
name='kind',
field=models.CharField(choices=[('image', 'image'), ('audio', 'sound'), ('video', 'video'), ('pdf', 'pdf'), ('text', 'text'), ('epub', 'epub'), ('mobi', 'mobi'), ('app', 'app'), ('other', 'other')], default='other', max_length=5, verbose_name='type'),
),
]
| agpl-3.0 | Python | |
6585ca91a399a06094636a505fe813a0425c1a35 | add auth module (split from server mod.) | word-killers/mark2down,word-killers/mark2down,word-killers/mark2down | auth.py | auth.py | from urllib import urlencode
from requests import post
auth_url = 'https://github.com/login/oauth/authorize'
access_token_url = 'https://github.com/login/oauth/access_token'
def generate_auth_link(client_id, scopes):
# append the client_id and scopes list to the url query string
return auth_url + '?' + urlencode({
'client_id': client_id,
'scope': ','.join(scopes)
})
def get_auth_token(client_id, client_secret, code):
# request a token
response = post(
access_token_url,
data={
'client_id': client_id,
'client_secret': client_secret,
'code': code
},
headers={
'Accept': 'application/json'
}
)
# decode the response
json = response.json()
# check if response contains the token
if 'access_token' in json:
return json['access_token']
else:
return None # token request failed
| mit | Python | |
dbc20f37c7fb1dd00c90ac54d2021fb1ba3b5eda | Add some end-to-end functional tests | rhgrant10/Groupy | exam.py | exam.py | import time
import sys
from groupy.client import Client
def read_token_from_file(filename):
with open(filename) as f:
return f.read().strip()
def test_groups(groups):
for group in groups:
print(group)
print('Members:')
for member in group.members[:5]:
print(member)
print('Recent messages:')
for message in group.messages.list()[:5]:
print(message)
print('Leaderboard (day):')
for message in group.leaderboard.list_day()[:5]:
print(message.favorited_by)
print('Gallery:')
for message in group.gallery.list()[:5]:
print(message.attachments)
print()
def test_messages(messages):
for message in messages:
print(message)
print(message.attachments)
print('Liking...', message.like())
time.sleep(1) # you get rate limited by liking/unliking too fast
print('Unliking...', message.unlike())
def test_chats(chats):
for chat in chats:
print(chat)
print('Recent messages:')
for message in chat.messages.list():
print(message)
def main(*args):
token_file = args[0]
token = read_token_from_file(token_file)
client = Client.from_token(token)
groups = list(client.groups.list().autopage())
test_group_ids = ('12268264', '27205597', '27205784', '35799100')
target_groups = []
for group in groups:
if group.id in test_group_ids:
print('Found {0} (id={0.group_id})'.format(group))
target_groups.append(group)
if len(target_groups) < len(test_group_ids):
raise Exception('could not find group test groups')
chats = list(client.chats.list())
test_chat_ids = ('14529712+14612048',)
target_chats = []
for chat in chats:
if chat.last_message['conversation_id'] in test_chat_ids:
print('Found {}'.format(chat))
target_chats.append(group)
if len(target_chats) < len(test_chat_ids):
raise Exception('could not find group test chats')
target_messages = []
for group in target_groups:
target_messages.append(group.messages.list()[0])
for chat in target_chats:
target_messages.append(chat.messages.list()[0])
print_header('test groups')
test_groups(target_groups)
print_header('test chats')
test_chats(target_chats)
print_header('test messages')
test_messages(target_messages)
def print_header(header):
print('\n')
print('=' * 50)
print('| {}'.format(header))
print('=' * 50)
print()
if __name__ == '__main__':
main(*sys.argv[1:])
| apache-2.0 | Python | |
64139e0a41c1b1da81e9b5e244b2d7095c4a7a2b | Add delete old sessions command | nanuxbe/djangopackages,QLGu/djangopackages,nanuxbe/djangopackages,QLGu/djangopackages,pydanny/djangopackages,QLGu/djangopackages,pydanny/djangopackages,pydanny/djangopackages,nanuxbe/djangopackages | core/management/commands/delete_old_sessions.py | core/management/commands/delete_old_sessions.py | from datetime import datetime
from django.core.management.base import BaseCommand
from django.contrib.sessions.models import Session
"""
>>> def clean(count):
... for idx, s in enumerate(Session.objects.filter(expire_date__lt=now)[:count+1]):
... s.delete()
... if str(idx).endswith('000'): print idx
... print "{0} records left".format(Session.objects.filter(expire_date__lt=now).count())
...
"""
class Command(NoArgsCommand):
args = '<count count ...>'
help = "Delete old sessions"
def handle(self, *args, **options):
old_sessions = Session.objects.filter(expire_date__lt=datetime.now())
self.stdout.write("Deleting {0} expired sessions".format(
old_sessions.count()
)
for index, session in enumerate(old_sessions):
session.delete()
if str(idx).endswith('000'):
self.stdout.write("{0} records deleted".format(index)
self.stdout.write("{0} expired sessions remaining".format(
Session.objects.filter(expire_date__lt=datetime.now())
) | mit | Python | |
bb0cff292f1931b52bf05a3a0630dda9a508023f | Add basic wrapper for gym env | OpenMined/PySyft,OpenMined/PySyft,OpenMined/PySyft,OpenMined/PySyft | packages/syft/src/syft/lib/gym/env.py | packages/syft/src/syft/lib/gym/env.py | # third party
import gym
# syft relative
from ...generate_wrapper import GenerateWrapper
from ...proto.lib.gym.env_pb2 import Env as Env_PB
gym_env_type = type(gym.Env())
def object2proto(obj: gym.Env) -> Env_PB:
return Env_PB(id=obj.unwrapped.spec.id)
def proto2object(proto: Env_PB) -> gym.Env:
return gym.make(proto.id)
GenerateWrapper(
wrapped_type=gym_env_type,
import_path="gym.Env",
protobuf_scheme=Env_PB,
type_object2proto=object2proto,
type_proto2object=proto2object,
)
| apache-2.0 | Python | |
05bf0cd188d4666c9c0aeb56a95d7867f25952c2 | Add a script for dqn continuous task demo | toslunar/chainerrl,toslunar/chainerrl | demo_dqn_continuous.py | demo_dqn_continuous.py | import argparse
import chainer
from chainer import serializers
import gym
import numpy as np
import random_seed
import env_modifiers
import q_function
def eval_single_run(env, model, phi):
test_r = 0
obs = env.reset()
done = False
while not done:
s = chainer.Variable(np.expand_dims(phi(obs), 0))
qout = model(s)
a = qout.greedy_actions.data[0]
obs, r, done, info = env.step(a)
test_r += r
return test_r
def main():
import logging
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('model', type=str)
parser.add_argument('--env', type=str, default='Pendulum-v0')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--n-runs', type=int, default=10)
parser.add_argument('--window-visible', action='store_true')
parser.add_argument('--render', action='store_true')
parser.set_defaults(render=False)
args = parser.parse_args()
random_seed.set_random_seed(args.seed)
env = gym.make(args.env)
timestep_limit = env.spec.timestep_limit
env_modifiers.make_timestep_limited(env, timestep_limit)
if args.render:
env_modifiers.make_rendered(env)
obs_size = np.asarray(env.observation_space.shape).prod()
action_size = np.asarray(env.action_space.shape).prod()
q_func = q_function.FCSIContinuousQFunction(
obs_size, action_size, 100, 2, env.action_space)
serializers.load_hdf5(args.model, q_func)
scores = []
def phi(obs):
return obs.astype(np.float32)
for i in range(args.n_runs):
score = eval_single_run(env, q_func, phi)
print('Run {}: {}'.format(i, score))
scores.append(score)
print('Average: {}'.format(sum(scores) / args.n_runs))
if __name__ == '__main__':
main()
| mit | Python | |
a2cb69b40daa7ab7b222e7d670dd1022571395a1 | add aiohttp demon | snower/torpeewee,snower/torpeewee | demos/aiohttp_demon.py | demos/aiohttp_demon.py | # -*- coding: utf-8 -*-
# 18/5/22
# create by: snower
import datetime
from torpeewee import *
from aiohttp import web
db = MySQLDatabase("test", host="127.0.0.1", user="root", passwd="123456")
class BaseModel(Model):
class Meta:
database = db
class Test(BaseModel):
id = IntegerField(primary_key= True)
data = CharField(max_length=64, null=False)
created_at = DateTimeField()
async def show_handle(request):
datas = [t.data for t in await Test.select()]
return web.Response(text = u"<br />".join(datas))
async def create_handle(request):
data = await request.post()
data = data["data"]
await Test.create(data=data, created_at=datetime.datetime.now())
return web.HTTPFound('/')
app = web.Application()
app.add_routes([
web.get('/', show_handle),
web.post('/', create_handle)
])
web.run_app(app) | mit | Python | |
6d43946db5b672ca875c793417f1fb7894387f73 | Add Ycm Config | guoxiao/skiplist | tests/.ycm_extra_conf.py | tests/.ycm_extra_conf.py | import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-Weffc++',
'-pedantic',
#'-Wc++98-compat',
'-Wno-long-long',
'-Wno-variadic-macros',
'-fexceptions',
'-DNDEBUG',
# You 100% do NOT need -DUSE_CLANG_COMPLETER in your flags; only the YCM
# source code needs it.
'-DUSE_CLANG_COMPLETER',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=c++14',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c++',
'-isystem',
'../BoostParts',
'-isystem',
# This path will only work on OS X, but extra paths that don't exist are not
# harmful
'/System/Library/Frameworks/Python.framework/Headers',
'-isystem',
'../llvm/include',
'-isystem',
'../llvm/tools/clang/include',
'-I',
'.',
'-I',
'./ClangCompleter',
'-isystem', './tests/gmock/gtest',
'-isystem', './tests/gmock/gtest/include',
'-isystem', './tests/gmock',
'-isystem', './tests/gmock/include',
'-isystem', '/usr/include',
'-isystem', '/usr/local/include',
'-isystem', '/Library/Developer/CommandLineTools/usr/bin/../include/c++/v1',
'-isystem', '/Library/Developer/CommandLineTools/usr/bin/../lib/clang/7.0.0/include',
'-isystem', '/Library/Developer/CommandLineTools/usr/include',
'-isystem', '/usr/include/c++/5.1.1',
'-isystem', '/usr/include/c++/5.1.1/x86_64-redhat-linux',
'-isystem', '/usr/include/c++/5.1.1/backward',
'-isystem', '/usr/lib/clang/3.7.0/include',
'-isystem', 'googletest/googletest',
'-isystem', 'googletest/googletest/include',
'-I', '../',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
try:
final_flags.remove( '-stdlib=libc++' )
except ValueError:
pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| mit | Python | |
3b88f0a96b60734374656e290845fc826f988850 | add dumpprices | joequant/bitcoin-price-api | scripts/dumpprices.py | scripts/dumpprices.py | #!/bin/python3
import requests
import grequests
assets = ['USD', 'USDT', 'EUR', 'BTC', 'XRP', 'ETH', 'HKD', 'LTC', 'RUR']
#btce
def btc_e(assets):
retval = []
r = requests.get('https://btc-e.com/api/3/info').json()
urls=[]
pairs = []
for k, v in r['pairs'].items():
k1, k2 = k.upper().split("_")
if k1 in assets and k2 in assets:
pairs.append(k)
urls.append('https://btc-e.com/api/3/ticker/' + k)
rs = [grequests.get(u) for u in urls]
for i in zip(pairs, grequests.map(rs)):
r = i[1].json()
k = i[0]
k1, k2 = k.upper().split("_")
retval.append({'from': k1,
'to': k2,
'bid': r[k]['buy'],
'ask': r[k]['sell']})
return retval
def gatecoin(assets):
retval = []
r = requests.get('https://api.gatecoin.com/Public/LiveTickers').json()
for k in r['tickers']:
s = k['currencyPair']
k1 = s[0:3].upper()
k2 = s[3:].upper()
if k1 in assets and k2 in assets:
retval.append({'from': k1,
'to': k2,
'bid': k['bid'],
'ask': k['ask']})
return retval
def poloniex(assets):
"""Poloniex assets"""
retval = []
r = requests.get('https://poloniex.com/public?command=returnTicker')
d = r.json()
for k, v in d.items():
k1, k2 = k.split("_")
if k1 in assets and k2 in assets:
retval.append({'from': k1,
'to': k2,
'bid': v['highestBid'],
'ask': v['lowestAsk']})
return retval
def bitfinex(assets):
"""Bitfinex assets"""
retval = []
urls = []
pairs = []
bitfinex_url = 'https://api.bitfinex.com/v1'
symbols = requests.get(bitfinex_url + '/symbols').json()
for s in symbols:
k1 = s[0:3].upper()
k2 = s[3:].upper()
if k1 in assets or k2 in assets:
pairs.append(s)
urls.append(bitfinex_url + '/pubticker/' + s)
rs = [grequests.get(u) for u in urls]
for i in zip(symbols, grequests.map(rs)):
r = i[1].json()
k = i[0]
k1 = k[0:3].upper()
k2 = k[3:].upper()
retval.append({'from': k1,
'to': k2,
'bid': r['bid'],
'ask': r['ask']})
return retval
def bitstamp(assets):
"""Bitstamp assets"""
retval = []
bitstamp_url = 'https://www.bitstamp.net/api/v2/ticker/'
for s in ['btcusd', 'btceur',
'eurusd', 'xrpusd', 'xrpeur',
'xrpbtc']:
d = requests.get(bitstamp_url + s +"/").json()
k1 = s[0:3].upper()
k2 = s[3:].upper()
if k1 not in assets or k2 not in assets:
retval.append({'from': k1,
'to': k2,
'bid': d['bid'],
'ask': d['ask']})
return retval
#add tag
def add_tag(d, tag):
d['from'] = d['from'] + ":" + tag
d['to'] = d['to'] + ":" + tag
return d
for k,v in [
['bitfinex', bitfinex],
['btce', btc_e],
['gatecoin', gatecoin],
['poloniex', poloniex],
['bitstamp', bitstamp]
]:
for j in v(assets):
if j['from'] not in assets or j['to'] not in assets:
continue
j = add_tag(j,k)
print(','.join([j['from'], j['to'], str(j['bid']), str(j['ask'])]))
#bitfinex
#bitstamp
#anx
| mit | Python | |
f61570297ef56e94b104aff42c822ea82a66030b | Add tests for database | sanchopanca/rcblog,sanchopanca/rcblog,sanchopanca/rcblog | tests/test_database.py | tests/test_database.py | import unittest
from rcblog import db
class TestDataBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
db.DB_NAME = 'test'
date_base = db.DataBase()
try:
db.r.table_drop('languages').run(date_base.connection)
except Exception as e:
print(e)
try:
db.r.table_drop('posts').run(date_base.connection)
except Exception as e:
print(e)
try:
date_base.init()
except Exception as e:
print(e)
try:
db.r.table('languages').delete().run(date_base.connection)
except Exception as e:
print(e)
try:
db.r.table('posts').delete().run(date_base.connection)
except Exception as e:
print(e)
def setUp(self):
self.date_base = db.DataBase()
def tearDown(self):
db.r.table('languages').delete().run(self.date_base.connection)
db.r.table('posts').delete().run(self.date_base.connection)
def test_add_translation(self):
self.date_base.add_post({'eng': 'post1_eng.md', 'rus': 'post1_rus.md'}, ['tag1', 'tag2'])
posts = self.date_base.get_all_posts()
self.assertEqual(len(posts), 1)
post = posts[0]
id_ = post['id']
self.date_base.add_translation(id_, {'jbo': 'post1_jbo.md'})
posts = self.date_base.get_all_posts()
self.assertEqual(len(posts), 1)
post = posts[0]
self.assertEqual(post['translations']['eng'], 'post1_eng.md')
self.assertEqual(post['translations']['rus'], 'post1_rus.md')
self.assertEqual(post['translations']['jbo'], 'post1_jbo.md')
def test_add_tag(self):
self.date_base.add_post({'eng': 'post1_eng.md', 'rus': 'post1_rus.md'}, ['tag1', 'tag2'])
posts = self.date_base.get_all_posts()
self.assertEqual(len(posts), 1)
post = posts[0]
id_ = post['id']
self.date_base.add_tags(id_, ['tag3', 'tag4'])
posts = self.date_base.get_all_posts()
self.assertEqual(len(posts), 1)
post = posts[0]
self.assertEqual(post['tags'], ['tag1', 'tag2', 'tag3', 'tag4'])
| mit | Python | |
054c71e88a5fb278ffcdac2ce85a59843f5e3ac0 | add new tests for oop | ratnania/pyccel,ratnania/pyccel | tests/scripts/oop/ex1.py | tests/scripts/oop/ex1.py | # coding: utf-8
#$ header class Point(public)
#$ header method __init__(Point, double, double)
#$ header method __del__(Point)
#$ header method translate(Point, double, double)
class Point(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __del__(self):
pass
def translate(self, a, b):
self.x = self.x + a
self.y = self.y + b
p = Point (0.0, 0.0)
p.translate(1.0, 2.0)
#print(p.x, p.y)
#a = p.x
#print(a)
del p
| mit | Python | |
8d280e5a464a9ca75ac7c35e02d8de6bddbaaa7e | Add reaction tests | iwi/linkatos,iwi/linkatos | tests/test_reaction.py | tests/test_reaction.py | import pytest
import linkatos.reaction as react
def test_positive_reaction():
reaction = '+1'
assert react.positive_reaction(reaction) is True
def test_not_positive_reaction():
reaction = '-1'
assert react.positive_reaction(reaction) is False
def test_known_reaction_neg():
reaction = '-1'
assert react.known_reaction(reaction) is True
def test_known_reaction_pos():
reaction = '+1'
assert react.known_reaction(reaction) is True
def test_unknown_reaction():
reaction = 'worried'
assert react.known_reaction(reaction) is False
def test_equal_ids():
id_one = 'id'
id_two = 'id'
assert react.reacting_to_url(id_one, id_two) is True
def test_different_ids():
id_one = 'id1'
id_two = 'id2'
assert react.reacting_to_url(id_one, id_two) is False
def test_confirmation():
reaction = '+1'
url_message_id = 'id'
reaction_to_id = 'id'
assert react.is_confirmation(reaction, url_message_id, reaction_to_id) is True
| mit | Python | |
c0f7be02fb1dc294a9bac2867fc695e353ea3445 | Test Resource. | soasme/electro | tests/test_resource.py | tests/test_resource.py | # -*- coding: utf-8 -*-
from unittest import TestCase
from electro.resource import Resource
class TestResource(TestCase):
def assert_parser(self, assert_value, values):
resource = Resource()
value = resource._parse_response(values)
self.assertEqual(value, assert_value)
def test_empty_content_will_return_204(self):
self.assert_parser(('', 204, {}), '')
def test_dict_will_return_200(self):
self.assert_parser(('{}', 200, {}), {})
def test_list_will_return_200(self):
self.assert_parser(('[]', 200, {}), [])
def test_str_will_return_200(self):
self.assert_parser(('"test"', 200, {}), 'test')
def test_data_with_code(self):
self.assert_parser(('{}', 201, {}), ({}, 201))
def test_data_with_code_and_headers(self):
self.assert_parser(('{}', 201, {'k':'v'}), ({}, 201, {'k':'v'}))
| mit | Python | |
5adf35b9131ea6c0a16f6765cf44c50767ddc3f3 | add testanalyzing | Parisson/TimeSide,Parisson/TimeSide,Parisson/TimeSide,Parisson/TimeSide,Parisson/TimeSide | tests/testanalyzing.py | tests/testanalyzing.py | from timeside.decoder import *
from timeside.analyzer import *
from unit_timeside import *
import os.path
__all__ = ['TestAnalyzing']
class TestAnalyzing(TestCase):
"Test all analyzers"
def setUp(self):
self.source = os.path.join (os.path.dirname(__file__), "samples/sweep.wav")
def testDC(self):
"Test mean DC shift"
self.analyzer = MeanDCShift()
self.value = -0
def testMeanLevel(self):
"Test mean level"
self.analyzer = MeanLevel()
self.value = -9.856
def testMaxLevel(self):
"Test max level"
self.analyzer = MaxLevel()
self.value = -6.0209999999999999
def tearDown(self):
decoder = FileDecoder(self.source)
(decoder | self.analyzer).run()
self.assertEquals(self.analyzer.result(), self.value)
if __name__ == '__main__':
unittest.main(testRunner=TestRunner())
| agpl-3.0 | Python | |
b2a083e1531134ec82a70ca581fca31db7867566 | add test for data with no coincidences | simomarsili/ndd | tests/test_singletons.py | tests/test_singletons.py | # -*- coding: utf-8 -*-
# pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
"""Test ref results for data with no coincidences."""
import numpy
import pytest
from pytest import approx
from ndd.estimators import NSB, AsymptoticNSB, Plugin
from ndd.exceptions import NddError
N = (10, 10)
K = (10, 1000)
@pytest.fixture(params=zip(N, K))
def data(request):
n, k = request.param
return {'nk': numpy.array([1] * n), 'k': k}
def test_NSB(data):
"""The NSB estimate should be somewhat close to log(k)"""
estimator = NSB()
relative_error = 1 - estimator(**data) / numpy.log(data['k'])
assert 0 < relative_error < 0.2
def test_Asymptotic(data):
"""Should raise an exception"""
estimator = AsymptoticNSB()
with pytest.raises(NddError):
estimator(**data)
def test_Plugin(data):
"""Should be close to the log of #visited bins with frequency > 0"""
estimator = Plugin(alpha=None)
k = sum(data['nk'] > 0)
assert estimator(**data) == approx(numpy.log(k))
def test_Plugin_pseudo(data):
"""Should be close to log(cardinality)"""
estimator = Plugin(alpha=1)
assert estimator(**data) == approx(numpy.log(data['k']), rel=1.e-3)
| bsd-3-clause | Python | |
d8470858316f260a1801d7113f2eee6a0595b9d1 | add tool.py | cleverhans-lab/cleverhans,cleverhans-lab/cleverhans,cleverhans-lab/cleverhans,openai/cleverhans | examples/adversarial_asr/tool.py | examples/adversarial_asr/tool.py | from tensorflow.python import pywrap_tensorflow
import numpy as np
import tensorflow as tf
from lingvo.core import asr_frontend
from lingvo.core import py_utils
def _MakeLogMel(audio, sample_rate):
audio = tf.expand_dims(audio, axis=0)
static_sample_rate = 16000
mel_frontend = _CreateAsrFrontend()
with tf.control_dependencies(
[tf.assert_equal(sample_rate, static_sample_rate)]):
log_mel, _ = mel_frontend.FPropDefaultTheta(audio)
return log_mel
def _CreateAsrFrontend():
p = asr_frontend.MelFrontend.Params()
p.sample_rate = 16000.
p.frame_size_ms = 25.
p.frame_step_ms = 10.
p.num_bins = 80
p.lower_edge_hertz = 125.
p.upper_edge_hertz = 7600.
p.preemph = 0.97
p.noise_scale = 0.
p.pad_end = False
# Stack 3 frames and sub-sample by a factor of 3.
p.left_context = 2
p.output_stride = 3
return p.cls(p)
def create_features(input_tf, sample_rate_tf, mask_freq):
"""
Return:
A tensor of features with size (batch_size, max_time_steps, 80)
"""
features_list = []
# unstact the features with placeholder
input_unpack = tf.unstack(input_tf, axis=0)
for i in range(len(input_unpack)):
features = _MakeLogMel(input_unpack[i], sample_rate_tf)
features = tf.reshape(features, shape=[-1, 80])
features = tf.expand_dims(features, dim=0)
features_list.append(features)
features_tf = tf.concat(features_list, axis=0)
features_tf = features_tf * mask_freq
return features_tf
def create_inputs(model, features, tgt, batch_size, mask_freq):
tgt_ids, tgt_labels, tgt_paddings = model.GetTask().input_generator.StringsToIds(tgt)
# we expect src_inputs to be of shape [batch_size, num_frames, feature_dim, channels]
src_paddings = tf.zeros([tf.shape(features)[0], tf.shape(features)[1]], dtype=tf.float32)
src_paddings = 1. - mask_freq[:,:,0]
src_frames = tf.expand_dims(features, dim=-1)
inputs = py_utils.NestedMap()
inputs.tgt = py_utils.NestedMap(
ids=tgt_ids,
labels=tgt_labels,
paddings=tgt_paddings,
weights=1.0 - tgt_paddings)
inputs.src = py_utils.NestedMap(src_inputs=src_frames, paddings=src_paddings)
inputs.sample_ids = tf.zeros([batch_size])
return inputs
class Transform(object):
'''
Return: PSD
'''
def __init__(self):
self.scale = 8. / 3.
self.frame_length = int(FLAGS.window_size)
self.frame_step = int(FLAGS.window_size//4)
def __call__(self, x, psd_max_ori):
win = tf.contrib.signal.stft(x, self.frame_length, self.frame_step)
z = self.scale *tf.abs(win / FLAGS.window_size)
psd = tf.square(z)
PSD = tf.pow(10., 9.6) / tf.reshape(psd_max_ori, [-1, 1, 1]) * psd
return PSD | mit | Python | |
7ac29357f9bd022a5d1bc68a0a7aa589a7ff5790 | add server crypto example | aliyun/aliyun-oss-python-sdk | examples/object_server_crypto.py | examples/object_server_crypto.py | # -*- coding: utf-8 -*-
import os
import shutil
import oss2
from oss2.headers import requestHeader
# 以下代码展示了其用服务端加密功能的各项操作
# 首先初始化AccessKeyId、AccessKeySecret、Endpoint等信息。
# 通过环境变量获取,或者把诸如“<你的AccessKeyId>”替换成真实的AccessKeyId等。
#
# 以杭州区域为例,Endpoint可以是:
# http://oss-cn-hangzhou.aliyuncs.com
# https://oss-cn-hangzhou.aliyuncs.com
# 分别以HTTP、HTTPS协议访问。
access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<你的AccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<你的AccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<你的Bucket>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<你的访问域名>')
# 确认上面的参数都填写正确了
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, '请设置参数:' + param
key = 'server-crypto.txt'
content = b'a' * 1024 * 1024
# 创建Bucket对象,所有Object相关的接口都可以通过Bucket对象来进行
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)
# 上传文件使用服务端AES256进行加密
myHeader = requestHeader()
myHeader.setServerSideEncryption("AES256")
bucket.put_object(key, content, headers = myHeader)
# 下载文件验证一下
result = bucket.get_object(key)
content_got = b''
for chunk in result:
content_got += chunk
assert content_got == content
# 上传文件使用服务端KMS进行加密
myHeader = requestHeader()
myHeader.setServerSideEncryption("KMS", cmk_id = "11111")
bucket.put_object(key, content, headers = myHeader)
# 下载文件验证一下
result = bucket.get_object(key)
content_got = b''
for chunk in result:
content_got += chunk
assert content_got == content
| mit | Python | |
ad54db707004dd2b6e445c72462c1e937417d046 | test viz lib on fibonacci numbers | egorhm/algo | algopy/fib_gcd.py | algopy/fib_gcd.py | from rcviz import viz, callgraph
@viz
def fib1(num):
assert num >= 0
if num <= 1:
return num
fb1 = fib1(num - 1)
fb2 = fib1(num - 2)
res = fb1 + fb2
return res
@viz
def fib2(num):
assert num >= 0
return num if num <= 1 else fib2(num - 1) + fib2(num - 2)
def gcd(a, b):
print "a = %d, b = %d" % (a, b)
if a == 0 or b == 0:
return max(a, b)
res = gcd(b % a, a)
return res
def main():
a, b = 24, 9
d = gcd(a, b)
print(d)
if __name__ == "__main__":
# main()
print fib1(6)
# callgraph.reset()
callgraph.render("test.png")
| mit | Python | |
8510352580ac6f39d706b6a4ace8426f9b45ca6c | Add unit tests for security_group_rules_client | bigswitch/tempest,cisco-openstack/tempest,Tesora/tesora-tempest,Tesora/tesora-tempest,rakeshmi/tempest,openstack/tempest,zsoltdudas/lis-tempest,vedujoshi/tempest,Juniper/tempest,sebrandon1/tempest,izadorozhna/tempest,zsoltdudas/lis-tempest,LIS/lis-tempest,masayukig/tempest,xbezdick/tempest,vedujoshi/tempest,openstack/tempest,rakeshmi/tempest,masayukig/tempest,izadorozhna/tempest,xbezdick/tempest,Juniper/tempest,bigswitch/tempest,sebrandon1/tempest,cisco-openstack/tempest,LIS/lis-tempest | tempest/tests/services/compute/test_security_group_rules_client.py | tempest/tests/services/compute/test_security_group_rules_client.py | # Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.services.compute.json import security_group_rules_client
from tempest.tests import fake_auth_provider
from tempest.tests.services.compute import base
class TestSecurityGroupRulesClient(base.BaseComputeServiceTest):
FAKE_SECURITY_GROUP_RULE = {
"security_group_rule": {
"id": "2d021cf1-ce4b-4292-994f-7a785d62a144",
"ip_range": {
"cidr": "0.0.0.0/0"
},
"parent_group_id": "48700ff3-30b8-4e63-845f-a79c9633e9fb",
"to_port": 443,
"ip_protocol": "tcp",
"group": {},
"from_port": 443
}
}
def setUp(self):
super(TestSecurityGroupRulesClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = security_group_rules_client.SecurityGroupRulesClient(
fake_auth, 'compute', 'regionOne')
def _test_create_security_group_rule(self, bytes_body=False):
req_body = {
"from_port": "443",
"ip_protocol": "tcp",
"to_port": "443",
"cidr": "0.0.0.0/0",
"parent_group_id": "48700ff3-30b8-4e63-845f-a79c9633e9fb"
}
self.check_service_client_function(
self.client.create_security_group_rule,
'tempest.common.service_client.ServiceClient.post',
self.FAKE_SECURITY_GROUP_RULE,
to_utf=bytes_body, **req_body)
def test_create_security_group_rule_with_str_body(self):
self._test_create_security_group_rule()
def test_create_security_group_rule_with_bytes_body(self):
self._test_create_security_group_rule(bytes_body=True)
def test_delete_security_group_rule(self):
self.check_service_client_function(
self.client.delete_security_group_rule,
'tempest.common.service_client.ServiceClient.delete',
{}, status=202, group_rule_id='group-id')
| apache-2.0 | Python | |
0d32be58f5145c067e012a9d314be3f688bcbc2a | Add tests for view | praekelt/vumi-go,praekelt/vumi-go,praekelt/vumi-go,praekelt/vumi-go | go/scheduler/tests/test_views.py | go/scheduler/tests/test_views.py | import datetime
from go.vumitools.tests.helpers import djangotest_imports
with djangotest_imports(globals()):
from django.core.urlresolvers import reverse
from django.template import defaultfilters
from django.conf import settings
from go.base.tests.helpers import GoDjangoTestCase, DjangoVumiApiHelper
from go.scheduler.models import Task
from go.scheduler.views import SchedulerListView
class TestSchedulerListView(GoDjangoTestCase):
def setUp(self):
self.vumi_helper = self.add_helper(
DjangoVumiApiHelper())
self.user_helper = self.vumi_helper.make_django_user()
self.client = self.vumi_helper.get_client()
def create_task(self, label, account_id=None, delta=7):
now = datetime.datetime.now()
scheduled_time = now + datetime.timedelta(days=delta)
if account_id is None:
account_id = self.user_helper.account_key
return Task.objects.create(
account_id=account_id, label=label, scheduled_for=scheduled_time)
def test_no_tasks(self):
r = self.client.get(reverse('scheduler:tasks'))
self.assertContains(r, '>Scheduled Tasks</a>')
self.assertContains(r, '>No scheduled tasks<')
def assert_contains_task(self, response, task):
self.assertContains(response, task.label)
self.assertContains(response, task.get_task_type_display())
self.assertContains(response, task.get_status_display())
formatted_date = defaultfilters.date(
task.scheduled_for, settings.DATETIME_FORMAT)
self.assertContains(response, formatted_date)
timezone = defaultfilters.date(
task.scheduled_for, 'T')
self.assertContains(response, timezone)
time_remaining = defaultfilters.timeuntil(task.scheduled_for)
self.assertContains(response, time_remaining)
def test_single_task(self):
task = self.create_task('Test task')
r = self.client.get(reverse('scheduler:tasks'))
self.assert_contains_task(r, task)
def test_multiple_pages(self):
tasks = []
for i in range(SchedulerListView.paginate_by + 1):
task = self.create_task('Test task %d' % i)
tasks.append(task)
r = self.client.get(reverse('scheduler:tasks'))
excluded_task = tasks.pop()
for task in tasks:
self.assert_contains_task(r, task)
self.assertNotContains(r, excluded_task.label)
self.assertContains(r, '←</a>')
self.assertContains(r, '→</a>')
def test_task_past(self):
task = self.create_task('Test task', delta=-7)
r = self.client.get(reverse('scheduler:tasks'))
self.assertNotContains(r, task.label)
def test_task_different_user(self):
user2 = self.vumi_helper.make_django_user(email='user2@domain.com')
task = self.create_task('Test task', account_id=user2.account_key)
r = self.client.get(reverse('scheduler:tasks'))
self.assertNotContains(r, task.label)
def test_scheduled_tasks_in_header(self):
r = self.client.get(reverse('scheduler:tasks'))
self.assertContains(r, '>Scheduled Tasks</a></li>')
| bsd-3-clause | Python | |
c39b95eebb402d1d0137448b3f0efd9b6d7ec169 | Test if repository manager if retrieving a repository when we lookup after one | shawkinsl/pyolite,PressLabs/pyolite | tests/managers/test_repository.py | tests/managers/test_repository.py | from unittest import TestCase
from mock import MagicMock, patch
from nose.tools import eq_
from pyolite.managers.repository import RepositoryManager
class TestRepositoryManager(TestCase):
def test_get_repository(self):
mocked_repository = MagicMock()
mocked_repository.get_by_name.return_value = 'my_repo'
mocked_path = MagicMock()
mocked_git = MagicMock()
with patch.multiple('pyolite.managers.manager',
Path=MagicMock(return_value=mocked_path),
Git=MagicMock(return_value=mocked_git)):
with patch.multiple('pyolite.managers.repository',
Repository=mocked_repository):
repos = RepositoryManager('/path/to/admin/repo/')
eq_(repos.get('my_repo'), 'my_repo')
mocked_repository.get_by_name.assert_called_once_with('my_repo',
mocked_path,
mocked_git)
| bsd-2-clause | Python | |
9ad755263fe12fa16c0b27381893c380626c85d8 | Add unittest for string_view conversion | olifre/root,olifre/root,karies/root,olifre/root,zzxuanyuan/root,karies/root,olifre/root,root-mirror/root,karies/root,zzxuanyuan/root,olifre/root,root-mirror/root,zzxuanyuan/root,karies/root,karies/root,root-mirror/root,root-mirror/root,zzxuanyuan/root,karies/root,olifre/root,root-mirror/root,zzxuanyuan/root,root-mirror/root,zzxuanyuan/root,olifre/root,olifre/root,zzxuanyuan/root,root-mirror/root,zzxuanyuan/root,olifre/root,karies/root,zzxuanyuan/root,root-mirror/root,root-mirror/root,karies/root,zzxuanyuan/root,karies/root,zzxuanyuan/root,olifre/root,zzxuanyuan/root,karies/root,root-mirror/root,karies/root,root-mirror/root,olifre/root | bindings/pyroot/test/conversions.py | bindings/pyroot/test/conversions.py | import unittest
import ROOT
cppcode = """
void stringViewConv(std::string_view) {};
"""
class ListInitialization(unittest.TestCase):
@classmethod
def setUpClass(cls):
ROOT.gInterpreter.Declare(cppcode)
def test_string_view_conv(self):
ROOT.stringViewConv("pyString")
if __name__ == '__main__':
unittest.main()
| lgpl-2.1 | Python | |
8f02faec76c9b8cb7468934a4981fe1fe3ed30b5 | add client | fivejjs/crosscat,poppingtonic/BayesDB,probcomp/crosscat,fivejjs/crosscat,probcomp/crosscat,mit-probabilistic-computing-project/crosscat,mit-probabilistic-computing-project/crosscat,JDReutt/BayesDB,probcomp/crosscat,fivejjs/crosscat,JDReutt/BayesDB,probcomp/crosscat,mit-probabilistic-computing-project/crosscat,mit-probabilistic-computing-project/crosscat,poppingtonic/BayesDB,fivejjs/crosscat,fivejjs/crosscat,mit-probabilistic-computing-project/crosscat,JDReutt/BayesDB,fivejjs/crosscat,probcomp/crosscat,mit-probabilistic-computing-project/crosscat,JDReutt/BayesDB,poppingtonic/BayesDB,probcomp/crosscat,probcomp/crosscat,probcomp/crosscat,poppingtonic/BayesDB,mit-probabilistic-computing-project/crosscat,JDReutt/BayesDB,fivejjs/crosscat,poppingtonic/BayesDB | jsonrpc_http/Client.py | jsonrpc_http/Client.py | import tabular_predDB.python_utils.api_utils as au
from tabular_predDB.jsonrpc_http.MiddlewareEngine import MiddlewareEngine
middleware_engine = MiddlewareEngine()
class Client(object):
def __init__(self, hostname='localhost', port=8008):
if hostname == None:
self.online = False
else:
self.URI = 'http://' + hostname + ':%d' % port
def call(self, method_name, args_dict):
if self.online:
out, id = au.call(method_name, args_dict, self.URI)
else:
method = getattr(middleware_engine, method_name)
argnames = inspect.getargspec(method)[0]
args = [args_dict[argname] for argname in argnames if argname in args_dict]
out = method(*args)
return out
def ping(self):
return self.call('ping', {})
def runsql(self, sql_command):
return self.call('runsql', {'sql_command': sql_command})
def start_from_scratch(self):
return self.call('start_from_scratch', {})
def drop_and_load_db(self, filename):
return self.call('drop_and_load_db', {'filename': filename})
def drop_tablename(self, tablename):
return self.call('drop_tablename', {'tablename': tablename})
def delete_chain(self, tablename, chain_index):
return self.call('delete_chain', {'tablename': tablename})
def upload_data_table(self, tablename, csv, crosscat_column_types):
args_dict = dict()
args_dict['tablename'] = tablename
args_dict['csv'] = table_csv
args_dict['crosscat_column_types'] = crosscat_column_types
return self.call('upload_data_table', args_dict)
def create_model(self, tablename, n_chains):
args_dict = dict()
args_dict['tablename'] = tablename
args_dict['n_chains'] = n_chains
return self.call('create_model', args_dict)
def analyze(self, tablename, chain_index=1, iterations=2, wait=False):
args_dict = dict()
args_dict['tablename'] = tablename
args_dict['chain_index'] = chain_index
args_dict['wait'] = wait
args_dict['iterations'] = iterations
return self.call('analyze', args_dict)
def infer(self, tablename, columnstring, newtablename, confidence, whereclause, limit, numsamples):
args_dict = dict()
args_dict['tablename'] = tablename
args_dict['columnstring'] = columnstring
args_dict['newtablename'] = newtablename
args_dict['whereclause'] = whereclause
args_dict['confidence'] = confidence
args_dict['limit'] = limit
args_dict['numsamples'] = numsamples
return self.call('infer', args_dict)
def predict(self, tablename, columnstring, newtablename, whereclause, numpredictions):
args_dict = dict()
args_dict['tablename'] = tablename
args_dict['columnstring'] = columnstring
args_dict['newtablename'] = newtablename
args_dict['whereclause'] = whereclause
args_dict['numpredictions'] = numpredictions
return self.call('predict', args_dict)
def write_json_for_table(self, tablename):
return self.call('write_json_for_table', {'tablename': tablename})
def create_histogram(self, M_c, data, columns, mc_col_indices, filename):
args_dict = dict()
args_dict['M_c'] = M_c
args_dict['data'] = data
args_dict['columns'] = columns
args_dict['mc_col_indices'] = mc_col_indices
args_dict['filename'] = filename
return self.call('create_histogram', args_dict)
def jsonify_and_dump(self, to_dump, filename):
return self.call('jsonify_and_dump', {'to_dump': to_dump, 'filename': filename})
def get_metadata_and_table(self, tablename):
return self.call('get_metadata_and_table', {'tablename': tablename})
def get_latent_states(self, tablename):
return self.call('get_latent_states', {'tablename': tablename})
def gen_feature_z(self, tablename, filename=None, dir=None):
return self.call('gen_feature_z', {'tablename': tablename, 'filename':filename, 'dir':dir})
def dump_db(self, filename, dir=None):
return self.call('dump_db', {'filename':filename, 'dir':dir})
def guessschema(self, tablename, csv):
return self.call('guessschema', {'tablename':tablename, 'csv':csv})
| apache-2.0 | Python | |
6cfca819bbefab1f38904fc73b46dae80e03b32e | Create __init__.py | alex-kooper/knockoutpy | knockoutpy/__init__.py | knockoutpy/__init__.py | mit | Python | ||
9eb5f67a954888c4e14789b5b8acc785c789a77c | Add a command for creating rsa key. | juanifioren/django-oidc-provider,wojtek-fliposports/django-oidc-provider,nmohoric/django-oidc-provider,nmohoric/django-oidc-provider,ByteInternet/django-oidc-provider,juanifioren/django-oidc-provider,wojtek-fliposports/django-oidc-provider,bunnyinc/django-oidc-provider,bunnyinc/django-oidc-provider,wayward710/django-oidc-provider,torreco/django-oidc-provider,wayward710/django-oidc-provider,torreco/django-oidc-provider,ByteInternet/django-oidc-provider | oidc_provider/management/commands/creatersakey.py | oidc_provider/management/commands/creatersakey.py | from Crypto.PublicKey import RSA
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
help = 'Randomly generate a new RSA key for the OpenID server'
def handle(self, *args, **options):
try:
key = RSA.generate(1024)
file_path = settings.BASE_DIR + '/OIDC_RSA_KEY.pem'
with open(file_path, 'w') as f:
f.write(key.exportKey('PEM'))
self.stdout.write('RSA key successfully created at: ' + file_path)
except Exception as e:
self.stdout.write('Something goes wrong: ' + e.message)
| mit | Python | |
aaddd474b8e17164c59f445d14b75b9f20a95948 | add post install | gsmafra/py-aasp-casa | setup_post_install.py | setup_post_install.py | import urllib2
import zipfile
import re
import sys
from glob import glob
from os import chdir, mkdir, rename, getcwd
from os.path import exists
from resample_all import resample_all
def run_post_install():
# Double check modules
modules = set(['numpy', 'scipy', 'librosa', 'sklearn'])
for module in modules:
try:
__import__(module)
except ImportError:
print('module \'' + str(module) + '\' is not installed')
sys.exit()
# Download dataset
url = 'http://c4dm.eecs.qmul.ac.uk/rdr/bitstream/handle/123456789/29/scenes_stereo.zip'
file_name = url.split('/')[-1]
u = urllib2.urlopen(url)
f = open(file_name, 'wb')
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
print 'Downloading: %s Bytes: %s' % (file_name, file_size)
file_size_dl = 0
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
status = r'%10d [%3.2f%%]' % (file_size_dl, file_size_dl * 100. / file_size)
status = status + chr(8)*(len(status)+1)
print status,
f.close()
# Extract zip
print('\nExtracting zip file')
with zipfile.ZipFile('scenes_stereo.zip', "r") as z:
z.extractall('./')
# Reorganize folders
print('Moving files to class folders')
audio_folder = 'scenes_stereo/'
home_folder = getcwd()
chdir(audio_folder)
for filename in glob('*'):
y = re.split('0|1', filename)[0]
if not exists(y):
mkdir(y)
rename(filename, y + '/' + filename)
# Resample
print('Resampling all files to 8kHz')
chdir(home_folder)
resample_all()
print('Setup finished with no errors')
| mit | Python | |
11c4fe68be160caba706fab05767238396e8d25b | Add files via upload | moranzcw/Computer-Networking-A-Top-Down-Approach-NOTES | SocketProgrammingAssignment/作业3-邮件客户端/TSL和发送混合类型email.py | SocketProgrammingAssignment/作业3-邮件客户端/TSL和发送混合类型email.py |
from socket import *
import base64
endmsg = ".\r\n"
mail_t='1254516725@qq.com'
#chose qq mail smtp server
mailserver = 'smtp.qq.com'
fromaddr='2634081011@qq.com'
toaddr='galliumwang@163.com'
user='MjYzNDA4MTAxMUBxcS5jb20='
passw='aXFvcm1ncGd2aHp2ZWNnaQ=='
serverPort=25
serverPort_TLS=587
clientSocket = socket(AF_INET, SOCK_STREAM)
clientSocket.connect((mailserver, serverPort))#根据需求选择是否需要TLS加密的端口
recv = clientSocket.recv(1024).decode()
print(recv)
# Send HELO command and print server response.
heloCommand = 'HELO 169.254.186.23\r\n'
print(heloCommand)
clientSocket.send(heloCommand.encode())
recv = clientSocket.recv(1024).decode()
print(recv)
'''
temp='STARTTLS\r\n'
print(temp)
clientSocket.send(temp.encode())
recv = clientSocket.recv(1024).decode()
print(recv)
'''#如果选择TLS加密则去除该段注释符
# Send MAIL FROM command and print server response.
# Fill in start
temp='AUTH login\r\n'
print(temp)
clientSocket.send(temp.encode())
recv = clientSocket.recv(1024).decode()
print(recv)
temp=user+'\r\n'
print(temp)
clientSocket.send(temp.encode())
recv = clientSocket.recv(1024).decode()
print(recv)
temp=passw+'\r\n'
print(temp)
clientSocket.send(temp.encode())
recv = clientSocket.recv(1024).decode()
print(recv)
temp='mail from:<'+fromaddr+'>\r\n'#########
print(temp)
clientSocket.send(temp.encode())
recv = clientSocket.recv(1024).decode()
print(recv)
# Fill in end
# Send RCPT TO command and print server response.
# Fill in start
temp='rcpt to:<'+toaddr+'>\r\n'
print(temp)
clientSocket.send(temp.encode())
recv = clientSocket.recv(1024).decode()
print(recv)
# Fill in end
# Send DATA command and print server response.
# Fill in start
temp='data\r\n'
print(temp)
clientSocket.send(temp.encode())
recv = clientSocket.recv(1024).decode()
print(recv)
# Fill in end
#此部分可伪造收件人和发件人/除最后一处外,其余地方 \r\n or \n 皆可
send_text='from:%s\nto:%s\nsubject:hello,you!\
\nContent-Type:text/plain\n'%(fromaddr,toaddr)+'\n'+'hello'+'\r\n'
send_text=send_text.encode()
send_html='from:%s\nto:%s\nsubject:hello,you!\
\nContent-Type:text/html\n'%(fromaddr,toaddr)+'\n'+'<h1>hello</h1><img src="https://pic3.zhimg.com/50/v2-29a01fdecc80b16e73160c40637a5e8c_hd.jpg">'+'\r\n'
send_html=send_html.encode()
f=open('gfriend.jpg','rb').read()
f=base64.b64encode(f)
send_image=('from:%s\nto:%s\nsubject:hello,you!\
\nContent-Type:image/JPEG\nContent-transfer-encoding:base64\n'%(fromaddr,toaddr)+'\n').encode()+f+'\r\n'.encode()
#需要指定图片的编码类型
send_text_with_image='from:%s\nto:%s\nsubject:hello,you!\
\nContent-Type:multipart/mixed;boundary="simple"\n\n--simple\n'%(fromaddr,toaddr)+'Content-Type:text/html\n\n<h1>hello</h1><img src="https://pic3.zhimg.com/50/v2-29a01fdecc80b16e73160c40637a5e8c_hd.jpg">\n\n'
send_text_with_image=send_text_with_image.encode()+'--simple\n'.encode()+'Content-Type:image/JPEG\nContent-transfer-encoding:base64\n\n'.encode()
f=open('gfriend.jpg','rb').read()
f=base64.b64encode(f)
send_text_with_image+=f
send_text_with_image+='\n--simple\r\n'.encode()
temp=send_text_with_image
print(temp)
clientSocket.send(temp)
temp=endmsg
print(temp)
clientSocket.send(temp.encode())
recv = clientSocket.recv(1024).decode()
print(recv)
# Fill in end
# Send QUIT command and get server response.
# Fill in start
temp='quit\r\n'
print(temp)
clientSocket.send(temp.encode())
recv = clientSocket.recv(1024).decode()
print(recv)
# Fill in end
| mit | Python | |
45140f281ac8df0a8f325e99d2cc17385eabbcf4 | Create fizzbuzz.py | Souloist/Projects,Souloist/Projects,Souloist/Projects,Souloist/Projects,Souloist/Projects | solutions/fizzbuzz.py | solutions/fizzbuzz.py | def fizzbuzz(number):
for i in range(number):
if i%15 == 0:
print "FizzBuzz"
elif i%5 == 0:
print "Buzz"
elif i%3 == 0:
print "Fizz"
else:
print i
def main():
fizzbuzz(101)
if __name__ == '__main__':
main()
| mit | Python | |
2ea891fd99eb50f58abb6cf1dba55950916742ab | Clear solution for roman-numerals. | mknecht/checkio-attempts | roman-numerals.py | roman-numerals.py | # I 1 (unus)
# V 5 (quinque)
# X 10 (decem)
# L 50 (quinquaginta)
# C 100 (centum)
# D 500 (quingenti)
# M 1,000 (mille)
place2symbol = {
0: "I",
1: "X",
2: "C",
3: "M",
}
replacements = [
("I" * 9, "IX"),
("I" * 5, "V"),
("I" * 4, "IV"),
("X" * 9, "XC"),
("X" * 5, "L"),
("X" * 4, "XL"),
("C" * 9, "CM"),
("C" * 5, "D"),
("C" * 4, "CD"),
]
def checkio(number):
snumber = str(number)
replaceable = "".join([
place2symbol[len(snumber) - invp - 1] * int(d)
for invp, d
in enumerate(snumber)
if d != "0" # There is no zero in the roman number system.
])
for old, new in replacements:
replaceable = replaceable.replace(old, new)
return replaceable
if __name__ == '__main__':
assert checkio(6) == 'VI', '6'
assert checkio(76) == 'LXXVI', '76'
assert checkio(499) == 'CDXCIX', '499'
assert checkio(3888) == 'MMMDCCCLXXXVIII', '3888'
| mit | Python | |
d0c4ff9461144e9608c30c8d5a43381282912cc0 | Add builtin/github/writer.py | samjabrahams/anchorhub | anchorhub/builtin/github/writer.py | anchorhub/builtin/github/writer.py | """
File that initializes a Writer object designed for GitHub style markdown files.
"""
from anchorhub.writer import Writer
from anchorhub.builtin.github.wstrategies import MarkdownATXWriterStrategy, \
MarkdownSetextWriterStrategy, MarkdownInlineLinkWriterStrategy
import anchorhub.builtin.github.switches as ghswitches
def make_github_markdown_writer(opts):
"""
Creates a Writer object used for parsing and writing Markdown files with
a GitHub style anchor transformation
:param opts:
:return: A Writer object designed for parsing, modifying, and writing
AnchorHub tags to converted anchors in Markdown files using GitHub style
anchors
"""
assert hasattr(opts, 'wrapper_regex')
atx = MarkdownATXWriterStrategy(opts)
inline = MarkdownInlineLinkWriterStrategy(opts)
code_block_switch = ghswitches.code_block_switch
strategies = [atx, inline]
switches = [code_block_switch]
return Writer(strategies, switches=switches) | apache-2.0 | Python | |
ed19693800bbe50121fead603a3c645fdc1ed81a | Add migration | City-of-Helsinki/smbackend,City-of-Helsinki/smbackend | services/migrations/0059_add_unit_count_related_name.py | services/migrations/0059_add_unit_count_related_name.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-05-17 11:34
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('services', '0058_add_servicenodeunitcount'),
]
operations = [
migrations.AlterField(
model_name='servicenodeunitcount',
name='service_node',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='unit_counts', to='services.ServiceNode'),
),
]
| agpl-3.0 | Python | |
d46374388596fee83be8aa850afc961579b71a22 | add basic settings.py | openatx/uiautomator2,openatx/uiautomator2,openatx/uiautomator2 | uiautomator2/settings.py | uiautomator2/settings.py | # coding: utf-8
#
from typing import Any
import uiautomator2 as u2
class Settings(object):
def __init__(self, d: u2.Device = None):
self._d = d
self._defaults = {
"post_delay": 0,
"implicitly_wait": 20.0,
}
self._props = {
"post_delay": [float, int],
"implicitly_wait": [float, int],
}
for k, v in self._defaults.items():
if k not in self._props:
self._props[k] = type(v)
def get(self, key: str) -> Any:
return self._defaults.get(key)
def set(self, key: str, val: Any):
if key not in self._props:
raise AttributeError("invalid attribute", key)
if not isinstance(val, self._props[key]):
raise TypeError("invalid type, only accept: %s" % self._props[key])
self._defaults[key] = val
def __setitem__(self, key: str, val: Any):
self.set(key, val)
def __getitem__(self, key: str) -> Any:
return self.get(key)
if __name__ == "__main__":
settings = Settings()
settings.set("pre_delay", 10)
print(settings['pre_delay'])
settings["post_delay"] = 10
| mit | Python | |
4dac5069084e90a0c4b0fd12e763e92df79f31c5 | rename ds_justification_reason to justification_reason - add migration | unicef/un-partner-portal,unicef/un-partner-portal,unicef/un-partner-portal,unicef/un-partner-portal | backend/unpp_api/apps/project/migrations/0017_auto_20170915_0734.py | backend/unpp_api/apps/project/migrations/0017_auto_20170915_0734.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-09-15 07:34
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('project', '0016_remove_application_agency'),
]
operations = [
migrations.RenameField(
model_name='application',
old_name='ds_justification_reason',
new_name='justification_reason',
),
]
| apache-2.0 | Python | |
d7f024bc47c362afc6930510dea3bc425d5b554a | create example_fabfile | DNX/pg_fabrep | pg_fabrep/example_fabfile.py | pg_fabrep/example_fabfile.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from fabric.api import env, task
from pg_fabrep.tasks import *
@task
def example_cluster():
# name of your cluster - no spaces, no special chars
env.cluster_name = 'example_cluster'
# always ask user for confirmation when run any tasks
# default: True
#env.ask_confirmation = True
| bsd-3-clause | Python | |
c4243483052ec7eec2f1f88ea72fafc953d35648 | Add ptxgen sample | nvidia-compiler-sdk/pynvvm | samples/ptxgen.py | samples/ptxgen.py | # Copyright (c) 2013 NVIDIA Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This sample illustrates a simple LLVM IR -> PTX compiler implemented using
libNVVM. All command-line options are passed along to libNVVM. Arguments that
start with '-' are assumed to be options and are passed along accordingly.
Otherwise, options are treated as file names and are read as IR input(s).
"""
import sys
from pynvvm.compiler import Program, ProgramException
if len(sys.argv) < 2:
print('Usage: %s ir-file-1 [ir-file-2 [ir-file-3 ...]]' % sys.argv[0])
sys.exit(1)
try:
p = Program()
options = []
for a in sys.argv[1:]:
if a.startswith('-'):
options.append(a)
else:
with open(a, 'rb') as f:
p.add_module(f.read())
ptx = p.compile(options)
print(ptx)
except ProgramException as e:
print('ERROR:\n%s\n' % repr(e))
| mit | Python | |
ebc417be95bcec7b7a25dc1ad587f17b1bfa521d | Add download_student_forms | rhyolight/nupic.son,rhyolight/nupic.son,rhyolight/nupic.son | scripts/download_student_forms.py | scripts/download_student_forms.py | #!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Downloads student forms.
"""
__authors__ = [
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
import optparse
import os
import shutil
import interactive
parser = optparse.OptionParser(usage="usage: %prog [options] app_id")
parser.add_option("-o", "--output", dest="outputdir", default="forms",
help="write files to target DIR", metavar="DIR")
def downloadStudentForms(options):
from google.appengine.ext import db
from soc.modules.gsoc.models.profile import GSoCStudentInfo
from soc.modules.gsoc.views.helper import lists as list_helper
q = lambda: GSoCStudentInfo.all().filter('number_of_projects', 1)
outputdir = os.path.abspath(options.outputdir)
if not os.path.exists(outputdir):
os.mkdir(outputdir)
if not os.path.isdir(outputdir):
print "Could not create output dir: %s" % outputdir
print "Fetching StudentInfo..."
students = list(i for i in interactive.deepFetch(q) if i.tax_form)
keys = list_helper.collectParentKeys(students)
keys = list(set(keys))
prefetched = {}
print "Fetching Profile..."
for i in xrange(0, len(keys), 100):
chunk = keys[i:i+100]
entities = db.get(chunk)
prefetched.update(dict((i.key(), i) for i in entities if i))
list_helper.distributeParentKeys(students, prefetched)
countries = ['United States']
us_students = [i for i in students if i.parent().res_country in countries]
for student in us_students:
form = student.tax_form
_, ext = os.path.splitext(form.filename)
path = os.path.join(outputdir, student.parent().link_id + ext)
dst = open(path, "w")
src = form.open()
shutil.copyfileobj(src, dst)
print "Downloading form to '%s'..." % path
print "Done."
def main():
options, args = parser.parse_args()
if len(args) < 1:
parser.error("Missing app_id")
if len(args) > 1:
parser.error("Too many arguments")
interactive.setup()
interactive.setupRemote(args[0])
downloadStudentForms(options)
if __name__ == '__main__':
main()
| apache-2.0 | Python | |
3b67d7919affb47e79a8b7cd5dab5f226e96eb86 | Update IDTools (#1547) | artefactual/archivematica,artefactual/archivematica,artefactual/archivematica,artefactual/archivematica | src/dashboard/src/fpr/migrations/0033_update_idtools.py | src/dashboard/src/fpr/migrations/0033_update_idtools.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def data_migration_up(apps, schema_editor):
"""Update identification tools FIDO and Siegfried to current
versions, allowing for integration of PRONOM 96.
"""
idtool = apps.get_model("fpr", "IDTool")
idcommand = apps.get_model("fpr", "IDCommand")
# Update FIDO tool
idtool.objects.filter(uuid="c33c9d4d-121f-4db1-aa31-3d248c705e44").update(
version="1.4.1", slug="fido-141"
)
# Find old FIDO command.
old_fido_command = idcommand.objects.get(
uuid="213d1589-c255-474f-81ac-f0a618181e40"
)
# Create new FIDO, but do not enable.
idcommand.objects.create(
replaces=old_fido_command,
uuid="ff2c0b52-741d-4f7a-9b52-ba3529051af3",
description="Identify using Fido 1.4.1",
config=old_fido_command.config,
script=old_fido_command.script,
script_type=old_fido_command.script_type,
tool=idtool.objects.get(uuid="c33c9d4d-121f-4db1-aa31-3d248c705e44"),
enabled=False,
)
# Update Siegfried tool.
idtool.objects.filter(uuid="454df69d-5cc0-49fc-93e4-6fbb6ac659e7").update(
version="1.8.0", slug="siegfried-180"
)
# Find old Siegfried command and disable it.
old_siegfried_command = idcommand.objects.get(
uuid="75290b14-2931-455f-bdde-3b4b3f8b7f15"
)
old_siegfried_command.enabled = False
old_siegfried_command.save()
# Create new command using the new version of Siegfried
idcommand.objects.create(
replaces=old_siegfried_command,
uuid="9402ad69-f045-4d0a-8042-9c990645910a",
description="Identify using Siegfried 1.8.0",
config=old_siegfried_command.config,
script=old_siegfried_command.script,
script_type=old_siegfried_command.script_type,
tool=idtool.objects.get(uuid="454df69d-5cc0-49fc-93e4-6fbb6ac659e7"),
enabled=True,
)
def data_migration_down(apps, schema_editor):
"""Revert FIDO and Siegfriend to previous versions
"""
idtool = apps.get_model("fpr", "IDTool")
idcommand = apps.get_model("fpr", "IDCommand")
# Remove new ID Commands
idcommand.objects.filter(uuid="ff2c0b52-741d-4f7a-9b52-ba3529051af3").delete()
idcommand.objects.filter(uuid="9402ad69-f045-4d0a-8042-9c990645910a").delete()
# Revert Fido tool
idtool.objects.filter(uuid="c33c9d4d-121f-4db1-aa31-3d248c705e44").update(
version="1.3.12", slug="fido-1312"
)
# Revert Siegfried tool
idtool.objects.filter(uuid="454df69d-5cc0-49fc-93e4-6fbb6ac659e7").update(
version="1.7.6", slug="siegfried-176"
)
# Restore old Siegfried command.
idcommand.objects.filter(uuid="df074736-e2f7-4102-b25d-569c099d410c").update(
enabled=True
)
class Migration(migrations.Migration):
dependencies = [("fpr", "0032_pronom_96")]
operations = [migrations.RunPython(data_migration_up, data_migration_down)]
| agpl-3.0 | Python | |
013d793c6ebe7a4d426d6c2d823510f90b84d19e | Add a landmine to get rid of obselete test netscape plugins | bright-sparks/chromium-spacewalk,chuan9/chromium-crosswalk,bright-sparks/chromium-spacewalk,mohamed--abdel-maksoud/chromium.src,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk-efl,axinging/chromium-crosswalk,jaruba/chromium.src,Jonekee/chromium.src,Pluto-tv/chromium-crosswalk,anirudhSK/chromium,ltilve/chromium,ltilve/chromium,chuan9/chromium-crosswalk,dushu1203/chromium.src,dednal/chromium.src,fujunwei/chromium-crosswalk,M4sse/chromium.src,TheTypoMaster/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,Chilledheart/chromium,littlstar/chromium.src,Jonekee/chromium.src,anirudhSK/chromium,anirudhSK/chromium,Just-D/chromium-1,crosswalk-project/chromium-crosswalk-efl,Jonekee/chromium.src,ChromiumWebApps/chromium,krieger-od/nwjs_chromium.src,ondra-novak/chromium.src,littlstar/chromium.src,ondra-novak/chromium.src,anirudhSK/chromium,ChromiumWebApps/chromium,M4sse/chromium.src,ondra-novak/chromium.src,hgl888/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,PeterWangIntel/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,Just-D/chromium-1,crosswalk-project/chromium-crosswalk-efl,ltilve/chromium,axinging/chromium-crosswalk,ChromiumWebApps/chromium,fujunwei/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,markYoungH/chromium.src,littlstar/chromium.src,anirudhSK/chromium,ChromiumWebApps/chromium,ChromiumWebApps/chromium,ChromiumWebApps/chromium,crosswalk-project/chromium-crosswalk-efl,Jonekee/chromium.src,fujunwei/chromium-crosswalk,Pluto-tv/chromium-crosswalk,krieger-od/nwjs_chromium.src,fujunwei/chromium-crosswalk,Fireblend/chromium-crosswalk,dushu1203/chromium.src,krieger-od/nwjs_chromium.src,bright-sparks/chromium-spacewalk,dednal/chromium.src,patrickm/chromium.src,bright-sparks/chromium-spacewalk,mohamed--abdel-maksoud/chromium.src,PeterWangIntel/chromium-crosswalk,hgl888/chromium-crosswalk-efl,TheTypoMaster/chromium-crosswalk,Just-D/chromium-1,jaruba/chromium.src,bright-sparks/chromium-spacewalk,jaruba/chromium.src,Pluto-tv/chromium-crosswalk,anirudhSK/chromium,TheTypoMaster/chromium-crosswalk,axinging/chromium-crosswalk,patrickm/chromium.src,fujunwei/chromium-crosswalk,Jonekee/chromium.src,Jonekee/chromium.src,Chilledheart/chromium,chuan9/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,Just-D/chromium-1,hgl888/chromium-crosswalk,hgl888/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,krieger-od/nwjs_chromium.src,M4sse/chromium.src,Jonekee/chromium.src,fujunwei/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,dednal/chromium.src,littlstar/chromium.src,crosswalk-project/chromium-crosswalk-efl,dushu1203/chromium.src,Fireblend/chromium-crosswalk,M4sse/chromium.src,Fireblend/chromium-crosswalk,hgl888/chromium-crosswalk-efl,ltilve/chromium,markYoungH/chromium.src,ltilve/chromium,crosswalk-project/chromium-crosswalk-efl,dednal/chromium.src,Just-D/chromium-1,fujunwei/chromium-crosswalk,ondra-novak/chromium.src,PeterWangIntel/chromium-crosswalk,Just-D/chromium-1,markYoungH/chromium.src,ltilve/chromium,chuan9/chromium-crosswalk,anirudhSK/chromium,M4sse/chromium.src,hgl888/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,fujunwei/chromium-crosswalk,Pluto-tv/chromium-crosswalk,markYoungH/chromium.src,littlstar/chromium.src,jaruba/chromium.src,jaruba/chromium.src,axinging/chromium-crosswalk,markYoungH/chromium.src,crosswalk-project/chromium-crosswalk-efl,jaruba/chromium.src,Chilledheart/chromium,markYoungH/chromium.src,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk,patrickm/chromium.src,Chilledheart/chromium,ChromiumWebApps/chromium,PeterWangIntel/chromium-crosswalk,markYoungH/chromium.src,axinging/chromium-crosswalk,markYoungH/chromium.src,dednal/chromium.src,markYoungH/chromium.src,Just-D/chromium-1,littlstar/chromium.src,ondra-novak/chromium.src,bright-sparks/chromium-spacewalk,bright-sparks/chromium-spacewalk,axinging/chromium-crosswalk,patrickm/chromium.src,dushu1203/chromium.src,axinging/chromium-crosswalk,Fireblend/chromium-crosswalk,bright-sparks/chromium-spacewalk,ChromiumWebApps/chromium,mohamed--abdel-maksoud/chromium.src,dednal/chromium.src,jaruba/chromium.src,dushu1203/chromium.src,patrickm/chromium.src,hgl888/chromium-crosswalk-efl,jaruba/chromium.src,M4sse/chromium.src,krieger-od/nwjs_chromium.src,jaruba/chromium.src,jaruba/chromium.src,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk-efl,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk-efl,TheTypoMaster/chromium-crosswalk,axinging/chromium-crosswalk,hgl888/chromium-crosswalk,ltilve/chromium,ondra-novak/chromium.src,hgl888/chromium-crosswalk,fujunwei/chromium-crosswalk,Pluto-tv/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,Fireblend/chromium-crosswalk,hgl888/chromium-crosswalk,patrickm/chromium.src,krieger-od/nwjs_chromium.src,Fireblend/chromium-crosswalk,Chilledheart/chromium,krieger-od/nwjs_chromium.src,M4sse/chromium.src,M4sse/chromium.src,dushu1203/chromium.src,Fireblend/chromium-crosswalk,Pluto-tv/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,axinging/chromium-crosswalk,Chilledheart/chromium,ondra-novak/chromium.src,Chilledheart/chromium,markYoungH/chromium.src,chuan9/chromium-crosswalk,krieger-od/nwjs_chromium.src,patrickm/chromium.src,dushu1203/chromium.src,markYoungH/chromium.src,anirudhSK/chromium,anirudhSK/chromium,mohamed--abdel-maksoud/chromium.src,jaruba/chromium.src,hgl888/chromium-crosswalk-efl,Fireblend/chromium-crosswalk,chuan9/chromium-crosswalk,ltilve/chromium,mohamed--abdel-maksoud/chromium.src,M4sse/chromium.src,M4sse/chromium.src,krieger-od/nwjs_chromium.src,PeterWangIntel/chromium-crosswalk,dushu1203/chromium.src,patrickm/chromium.src,dednal/chromium.src,M4sse/chromium.src,TheTypoMaster/chromium-crosswalk,Just-D/chromium-1,ChromiumWebApps/chromium,Pluto-tv/chromium-crosswalk,axinging/chromium-crosswalk,anirudhSK/chromium,dednal/chromium.src,Fireblend/chromium-crosswalk,hgl888/chromium-crosswalk,anirudhSK/chromium,littlstar/chromium.src,TheTypoMaster/chromium-crosswalk,littlstar/chromium.src,axinging/chromium-crosswalk,dednal/chromium.src,Just-D/chromium-1,dushu1203/chromium.src,ondra-novak/chromium.src,crosswalk-project/chromium-crosswalk-efl,ltilve/chromium,bright-sparks/chromium-spacewalk,Pluto-tv/chromium-crosswalk,dednal/chromium.src,ondra-novak/chromium.src,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk,dushu1203/chromium.src,anirudhSK/chromium,dednal/chromium.src,ChromiumWebApps/chromium,Jonekee/chromium.src,TheTypoMaster/chromium-crosswalk,Jonekee/chromium.src,Jonekee/chromium.src,Chilledheart/chromium,mohamed--abdel-maksoud/chromium.src,Chilledheart/chromium,ChromiumWebApps/chromium,chuan9/chromium-crosswalk,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk-efl,mohamed--abdel-maksoud/chromium.src,ChromiumWebApps/chromium,krieger-od/nwjs_chromium.src,Jonekee/chromium.src,dushu1203/chromium.src,patrickm/chromium.src | build/get_landmines.py | build/get_landmines.py | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This file emits the list of reasons why a particular build needs to be clobbered
(or a list of 'landmines').
"""
import optparse
import sys
import landmine_utils
builder = landmine_utils.builder
distributor = landmine_utils.distributor
gyp_defines = landmine_utils.gyp_defines
gyp_msvs_version = landmine_utils.gyp_msvs_version
platform = landmine_utils.platform
def print_landmines(target):
"""
ALL LANDMINES ARE EMITTED FROM HERE.
target can be one of {'Release', 'Debug', 'Debug_x64', 'Release_x64'}.
"""
if (distributor() == 'goma' and platform() == 'win32' and
builder() == 'ninja'):
print 'Need to clobber winja goma due to backend cwd cache fix.'
if platform() == 'android':
print 'Clobber: Resources removed in r195014 require clobber.'
if platform() == 'win' and builder() == 'ninja':
print 'Compile on cc_unittests fails due to symbols removed in r185063.'
if platform() == 'linux' and builder() == 'ninja':
print 'Builders switching from make to ninja will clobber on this.'
if platform() == 'mac':
print 'Switching from bundle to unbundled dylib (issue 14743002).'
if platform() in ('win', 'mac'):
print ('Improper dependency for create_nmf.py broke in r240802, '
'fixed in r240860.')
if (platform() == 'win' and builder() == 'ninja' and
gyp_msvs_version() == '2012' and
gyp_defines().get('target_arch') == 'x64' and
gyp_defines().get('dcheck_always_on') == '1'):
print "Switched win x64 trybots from VS2010 to VS2012."
if (platform() == 'win' and builder() == 'ninja' and
gyp_msvs_version().startswith('2013')):
print "Switched win from VS2010 to VS2013."
print 'Need to clobber everything due to an IDL change in r154579 (blink)'
if (platform() != 'ios'):
print 'Clobber to get rid of obselete test plugin after r248358'
def main():
parser = optparse.OptionParser()
parser.add_option('-t', '--target',
help=='Target for which the landmines have to be emitted')
options, args = parser.parse_args()
if args:
parser.error('Unknown arguments %s' % args)
print_landmines(options.target)
return 0
if __name__ == '__main__':
sys.exit(main())
| #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This file emits the list of reasons why a particular build needs to be clobbered
(or a list of 'landmines').
"""
import optparse
import sys
import landmine_utils
builder = landmine_utils.builder
distributor = landmine_utils.distributor
gyp_defines = landmine_utils.gyp_defines
gyp_msvs_version = landmine_utils.gyp_msvs_version
platform = landmine_utils.platform
def print_landmines(target):
"""
ALL LANDMINES ARE EMITTED FROM HERE.
target can be one of {'Release', 'Debug', 'Debug_x64', 'Release_x64'}.
"""
if (distributor() == 'goma' and platform() == 'win32' and
builder() == 'ninja'):
print 'Need to clobber winja goma due to backend cwd cache fix.'
if platform() == 'android':
print 'Clobber: Resources removed in r195014 require clobber.'
if platform() == 'win' and builder() == 'ninja':
print 'Compile on cc_unittests fails due to symbols removed in r185063.'
if platform() == 'linux' and builder() == 'ninja':
print 'Builders switching from make to ninja will clobber on this.'
if platform() == 'mac':
print 'Switching from bundle to unbundled dylib (issue 14743002).'
if platform() in ('win', 'mac'):
print ('Improper dependency for create_nmf.py broke in r240802, '
'fixed in r240860.')
if (platform() == 'win' and builder() == 'ninja' and
gyp_msvs_version() == '2012' and
gyp_defines().get('target_arch') == 'x64' and
gyp_defines().get('dcheck_always_on') == '1'):
print "Switched win x64 trybots from VS2010 to VS2012."
if (platform() == 'win' and builder() == 'ninja' and
gyp_msvs_version().startswith('2013')):
print "Switched win from VS2010 to VS2013."
print 'Need to clobber everything due to an IDL change in r154579 (blink)'
def main():
parser = optparse.OptionParser()
parser.add_option('-t', '--target',
help=='Target for which the landmines have to be emitted')
options, args = parser.parse_args()
if args:
parser.error('Unknown arguments %s' % args)
print_landmines(options.target)
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | Python |
5401eb7b463dfd9a807b86b7bdfa4079fc0cb2ac | Define basic regular expressions | Spirotot/taskwiki,phha/taskwiki | autoload/vimwiki_pytasks.py | autoload/vimwiki_pytasks.py | import vim
import re
from tasklib.task import TaskWarrior, Task
# Building blocks
BRACKET_OPENING = re.escape('* [')
BRACKET_CLOSING = re.escape('] ')
EMPTY_SPACE = r'(?P<space>\s*)'
TEXT = r'(?P<text>.+)'
UUID = r'(?P<uuid>[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})'
DUE = r'(?P<due>\(\d{4}-\d\d-\d\d( \d\d:\d\d)?\))'
COMPLETION_MARK = r'(?P<completed>.)'
UUID_COMMENT = ' #{0}'.format(UUID)
# Middle building blocks
INCOMPLETE_TASK_PREFIX = EMPTY_SPACE + BRACKET_OPENING + '[^X]' + BRACKET_CLOSING + TEXT
# Final regexps
TASKS_TO_SAVE_TO_TW = ''.join([
INCOMPLETE_TASK_PREFIX, # any amount of whitespace followed by uncompleted square
# Any of the following:
'(',
UUID_COMMENT, # Task UUID
')?'
])
GENERIC_TASK = ''.join([
EMPTY_SPACE,
BRACKET_OPENING,
COMPLETION_MARK,
BRACKET_CLOSING,
TEXT,
'(', DUE, ')?' # Due is optional
'(', UUID_COMMENT, ')?' # UUID is optional, it can't be there for new tasks
])
with open("vystup", 'w') as f:
f.write(TASKS_TO_SAVE_TO_TW)
"""
How this plugin works:
1.) On startup, it reads all the tasks and syncs info TW -> Vimwiki file. Task is identified by their
uuid.
2.) When saving, the opposite sync is performed (Vimwiki -> TW direction).
a) if task is marked as subtask by indentation, the dependency is created between
"""
INCOMPLETE_TASK_REGEXP = (
"\v\* \[[^X]\].*" # any amount of whitespace followed by uncompleted square
# Any of the following:
"(\(\d{4}-\d\d-\d\d( \d\d:\d\d)?\)" # Timestamp
"|#TW\s*$" # Task indicator (insert this to have the task added)
"|#[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})" # Task UUID
)
TASK_REGEXP = '#TW'
tw = task.TaskWarrior()
class Random(object):
attr = 'Ta dpc'
r = Random()
def get_task(uuid):
return tw.tasks.get(uuid=uuid)
def load_tasks():
valid_tasks = [line for line in vim.current.buffer if re.search(TASK_REGEXP, line)]
for line in valid_tasks:
vim.command('echom "%s"' % line)
r.attr = 'Whoohoooo'
def RandomExample():
vim.command('echom "volame daco"')
vim.command('echom "%s"' % r.attr)
def RandomExample3():
r.attr = r.attr + 'XO'
vim.command('echom "Random example 3"')
if __name__ == '__main__':
load_tasks()
| import vim
import re
from tasklib import task
"""
How this plugin works:
1.) On startup, it reads all the tasks and syncs info TW -> Vimwiki file. Task is identified by their
uuid.
2.) When saving, the opposite sync is performed (Vimwiki -> TW direction).
a) if task is marked as subtask by indentation, the dependency is created between
"""
INCOMPLETE_TASK_REGEXP = (
"\v\* \[[^X]\].*" # any amount of whitespace followed by uncompleted square
# Any of the following:
"(\(\d{4}-\d\d-\d\d( \d\d:\d\d)?\)" # Timestamp
"|#TW\s*$" # Task indicator (insert this to have the task added)
"|#[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})" # Task UUID
)
TASK_REGEXP = '#TW'
tw = task.TaskWarrior()
class Random(object):
attr = 'Ta dpc'
r = Random()
def get_task(uuid):
return tw.tasks.get(uuid=uuid)
def load_tasks():
valid_tasks = [line for line in vim.current.buffer if re.search(TASK_REGEXP, line)]
for line in valid_tasks:
vim.command('echom "%s"' % line)
r.attr = 'Whoohoooo'
def RandomExample():
vim.command('echom "volame daco"')
vim.command('echom "%s"' % r.attr)
def RandomExample3():
r.attr = r.attr + 'XO'
vim.command('echom "Random example 3"')
if __name__ == '__main__':
load_tasks()
| mit | Python |
08402e98f9eb56ab3b103e5bf36004638461f903 | Add koi7-to-utf8 script. | sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource | languages/python/koi7-to-utf8.py | languages/python/koi7-to-utf8.py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
#
# Перекодировка из семибитного кода КОИ-7 Н2
# (коды дисплея Videoton-340) в кодировку UTF-8.
# Copyright (C) 2016 Serge Vakulenko <vak@cronyx.ru>
#
import sys
if len(sys.argv) != 2:
print "Usage: koi7-to-utf8 file"
sys.exit (1)
translate = {
'`':'Ю', 'a':'А', 'b':'Б', 'c':'Ц', 'd':'Д', 'e':'Е', 'f':'Ф', 'g':'Г',
'h':'Х', 'i':'И', 'j':'Й', 'k':'К', 'l':'Л', 'm':'М', 'n':'Н', 'o':'О',
'p':'П', 'q':'Я', 'r':'Р', 's':'С', 't':'Т', 'u':'У', 'v':'Ж', 'w':'В',
'x':'Ь', 'y':'Ы', 'z':'З', '{':'Ш', '|':'Э', '}':'Щ', '~':'Ч',
}
# Встретилась русская буква, обрабатываем
def decode_index(i):
global body
c = body[i]
#print c,
body = body[:i] + translate[c] + body[i+1:]
# Обрабатываем файл
f = open(sys.argv[1])
body = f.read().encode("utf-8")
i = 0
while i < len(body):
next = i+1
c = body[i]
if c >= '`' and c <= '~':
decode_index(i)
i = next
sys.stdout.write(body)
| apache-2.0 | Python | |
815845fd98627fe9df0b0444ee31fe337d1c63da | Add celery worker module | jmlong1027/multiscanner,mitre/multiscanner,jmlong1027/multiscanner,awest1339/multiscanner,jmlong1027/multiscanner,jmlong1027/multiscanner,mitre/multiscanner,mitre/multiscanner,MITRECND/multiscanner,awest1339/multiscanner,awest1339/multiscanner,awest1339/multiscanner,MITRECND/multiscanner | utils/celery_worker.py | utils/celery_worker.py | import os
import sys
# Append .. to sys path
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import multiscanner
from celery import Celery
from celery.contrib.batches import Batches
app = Celery('celery_worker', broker='pyamqp://guest@localhost//')
@app.task(base=Batches, flush_every=100, flush_interval=10)
def multiscanner_celery(filelist, config=multiscanner.CONFIG):
'''
TODO: Add other ars + config options...
This function essentially takes in a file list and runs
multiscanner on them. Results are stored in the
storage configured in storage.ini.
Usage:
from celery_worker import multiscanner_celery
multiscanner_celery.delay([list, of, files, to, scan])
'''
storage_conf = multiscanner.common.get_storage_config_path(config)
storage_handler = multiscanner.storage.StorageHandler(configfile=storage_conf)
resultlist = multiscanner.multiscan(filelist, configfile=config)
results = multiscanner.parse_reports(resultlist, python=True)
storage_handler.store(results, wait=False)
storage_handler.close()
return results
| mpl-2.0 | Python | |
60e37ece40e96ecd9bba16b72cdb64e1eb6f8f77 | Fix purge_cluster script | enthought/distarray,enthought/distarray,RaoUmer/distarray,RaoUmer/distarray | utils/purge_cluster.py | utils/purge_cluster.py | # encoding: utf-8
# ---------------------------------------------------------------------------
# Copyright (C) 2008-2014, IPython Development Team and Enthought, Inc.
# Distributed under the terms of the BSD License. See COPYING.rst.
# ---------------------------------------------------------------------------
""" Simple utility to clean out existing namespaces on engines. """
from __future__ import print_function
import sys
from distarray.context import Context
def dump():
""" Print out key names that exist on the engines. """
context = Context()
keylist = context.dump_keys(all_other_contexts=True)
num_keys = len(keylist)
print('*** %d ENGINE KEYS ***' % (num_keys))
for key, targets in keylist:
print('%s : %r' % (key, targets))
def purge():
""" Remove keys from the engine namespaces. """
print('Purging keys from engines...')
context = Context()
context.cleanup(all_other_contexts=True)
if __name__ == '__main__':
cmd = sys.argv[1]
if cmd == 'dump':
dump()
elif cmd == 'purge':
purge()
else:
raise ValueError("%s command not found" % (cmd,))
| # encoding: utf-8
# ---------------------------------------------------------------------------
# Copyright (C) 2008-2014, IPython Development Team and Enthought, Inc.
# Distributed under the terms of the BSD License. See COPYING.rst.
# ---------------------------------------------------------------------------
""" Simple utility to clean out existing namespaces on engines. """
from __future__ import print_function
import sys
from distarray.context import Context
def dump():
""" Print out key names that exist on the engines. """
context = Context()
keylist = context.dump_keys(all_other_contexts=True)
num_keys = len(keylist)
print('*** %d ENGINE KEYS ***' % (num_keys))
for key, targets in keylist:
print('%s : %r' % (key, targets))
def purge():
""" Remove keys from the engine namespaces. """
print('Purging keys from engines...')
context = Context()
context.purge_keys(all_other_contexts=True)
if __name__ == '__main__':
cmd = sys.argv[1]
if cmd == 'dump':
dump()
elif cmd == 'purge':
purge()
else:
raise ValueError("%s command not found" % (cmd,))
| bsd-3-clause | Python |
43629166927a0e6e7f4648a165ce12e22b32508d | Add missing migration for DiscoveryItem (#15913) | bqbn/addons-server,bqbn/addons-server,wagnerand/addons-server,bqbn/addons-server,mozilla/olympia,mozilla/olympia,mozilla/olympia,mozilla/addons-server,diox/olympia,wagnerand/addons-server,mozilla/addons-server,mozilla/addons-server,mozilla/olympia,diox/olympia,wagnerand/addons-server,diox/olympia,mozilla/addons-server,wagnerand/addons-server,bqbn/addons-server,diox/olympia | src/olympia/discovery/migrations/0010_auto_20201104_1424.py | src/olympia/discovery/migrations/0010_auto_20201104_1424.py | # Generated by Django 2.2.16 on 2020-11-04 14:24
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('discovery', '0009_auto_20201027_1903'),
]
operations = [
migrations.RemoveField(
model_name='discoveryitem',
name='custom_addon_name',
),
migrations.RemoveField(
model_name='discoveryitem',
name='custom_heading',
),
]
| bsd-3-clause | Python | |
c5f91aa604ccca0966be3076c46385d6019b65f2 | Add utils refresh_db | odtvince/APITaxi,openmaraude/APITaxi,odtvince/APITaxi,odtvince/APITaxi,l-vincent-l/APITaxi,odtvince/APITaxi,l-vincent-l/APITaxi,openmaraude/APITaxi | APITaxi/utils/refresh_db.py | APITaxi/utils/refresh_db.py | # -*- coding: utf-8 -*-
#Source: http://dogpilecache.readthedocs.org/en/latest/usage.html
from sqlalchemy import event
from sqlalchemy.orm import Session
def cache_refresh(session, refresher, *args, **kwargs):
"""
Refresh the functions cache data in a new thread. Starts refreshing only
after the session was committed so all database data is available.
"""
assert isinstance(session, Session), \
"Need a session, not a sessionmaker or scoped_session"
@event.listens_for(session, "after_commit")
def do_refresh(session):
t = Thread(target=refresher, args=args, kwargs=kwargs)
t.daemon = True
t.start()
| agpl-3.0 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.