commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
f6d3c63a0131a7532a091c1cc492ef7d7c84263e | Access realm alias objects in lower-case. | zerver/management/commands/realm_alias.py | zerver/management/commands/realm_alias.py | from __future__ import absolute_import
from __future__ import print_function
from typing import Any
from argparse import ArgumentParser
from django.core.management.base import BaseCommand
from zerver.models import Realm, RealmAlias, get_realm, can_add_alias
from zerver.lib.actions import realm_aliases
import sys
class Command(BaseCommand):
help = """Manage aliases for the specified realm"""
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
parser.add_argument('-r', '--realm',
dest='domain',
type=str,
required=True,
help='The name of the realm.')
parser.add_argument('--op',
dest='op',
type=str,
default="show",
help='What operation to do (add, show, remove).')
parser.add_argument('alias', metavar='<alias>', type=str, nargs='?',
help="alias to add or remove")
def handle(self, *args, **options):
# type: (*Any, **str) -> None
realm = get_realm(options["domain"])
if options["op"] == "show":
print("Aliases for %s:" % (realm.domain,))
for alias in realm_aliases(realm):
print(alias)
sys.exit(0)
alias = options['alias'].lower()
if options["op"] == "add":
if not can_add_alias(alias):
print("A Realm already exists for this domain, cannot add it as an alias for another realm!")
sys.exit(1)
RealmAlias.objects.create(realm=realm, domain=alias)
sys.exit(0)
elif options["op"] == "remove":
RealmAlias.objects.get(realm=realm, domain=alias).delete()
sys.exit(0)
else:
self.print_help("python manage.py", "realm_alias")
sys.exit(1)
| from __future__ import absolute_import
from __future__ import print_function
from typing import Any
from argparse import ArgumentParser
from django.core.management.base import BaseCommand
from zerver.models import Realm, RealmAlias, get_realm, can_add_alias
from zerver.lib.actions import realm_aliases
import sys
class Command(BaseCommand):
help = """Manage aliases for the specified realm"""
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
parser.add_argument('-r', '--realm',
dest='domain',
type=str,
required=True,
help='The name of the realm.')
parser.add_argument('--op',
dest='op',
type=str,
default="show",
help='What operation to do (add, show, remove).')
parser.add_argument('alias', metavar='<alias>', type=str, nargs='?',
help="alias to add or remove")
def handle(self, *args, **options):
# type: (*Any, **str) -> None
realm = get_realm(options["domain"])
if options["op"] == "show":
print("Aliases for %s:" % (realm.domain,))
for alias in realm_aliases(realm):
print(alias)
sys.exit(0)
alias = options['alias']
if options["op"] == "add":
if not can_add_alias(alias):
print("A Realm already exists for this domain, cannot add it as an alias for another realm!")
sys.exit(1)
RealmAlias.objects.create(realm=realm, domain=alias)
sys.exit(0)
elif options["op"] == "remove":
RealmAlias.objects.get(realm=realm, domain=alias).delete()
sys.exit(0)
else:
self.print_help("python manage.py", "realm_alias")
sys.exit(1)
| Python | 0 |
a6d958b7c29f11014ed322b9f153e8ad0c1a2cda | Add local server. | runserver.py | runserver.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask_rest_service import app
app.run(debug=True)
| Python | 0 | |
40b0f0cb42b14d79fc0cd4451b592a6933b436e4 | Add Python script to generate AOM CTC-formatted CSV files. | csv_export.py | csv_export.py | #!/usr/bin/env python
import argparse
import json
import os
import csv
import sys
from numpy import *
#offset by 3
met_index = {'PSNR': 0, 'PSNRHVS': 1, 'SSIM': 2, 'FASTSSIM': 3, 'CIEDE2000': 4,
'PSNR Cb': 5, 'PSNR Cr': 6, 'APSNR': 7, 'APSNR Cb': 8, 'APSNR Cr':9,
'MSSSIM':10, 'Encoding Time':11, 'VMAF_old':12, 'Decoding Time': 13,
"PSNR Y (libvmaf)": 14, "PSNR Cb (libvmaf)": 15, "PSNR Cr (libvmaf)": 16,
"CIEDE2000 (libvmaf)": 17, "SSIM (libvmaf)": 18, "MS-SSIM (libvmaf)": 19,
"PSNR-HVS Y (libvmaf)": 20, "PSNR-HVS Cb (libvmaf)": 21, "PSNR-HVS Cr (libvmaf)": 22,
"PSNR-HVS (libvmaf)": 23, "VMAF": 24, "VMAF-NEG": 25,
"APSNR Y (libvmaf)": 26, "APSNR Cb (libvmaf)": 27, "APSNR Cr (libvmaf)": 28}
parser = argparse.ArgumentParser(description='Generate CTC CSV version of .out files')
parser.add_argument('run',nargs=1,help='Run folder')
args = parser.parse_args()
info_data = json.load(open(args.run[0]+'/info.json'))
task = info_data['task']
sets = json.load(open(os.path.join(os.getenv("CONFIG_DIR", "rd_tool"), "sets.json")))
videos = sets[task]["sources"]
w = csv.writer(sys.stdout, dialect='excel')
w.writerow(['Video', 'QP', 'Filesize', 'PSNR Y', 'PSNR U', 'PSNR V',
'SSIM', 'MS-SSIM', 'VMAF', 'nVMAF', 'PSNR-HVS Y', 'DE2K',
'APSNR Y', 'APSNR U', 'APSNR V', 'Enc T [s]', 'Dec T [s]'])
for video in videos:
a = loadtxt(os.path.join(args.run[0],task,video+'-daala.out'))
for row in a:
w.writerow([video,
row[0], #qp
row[2],# bitrate
row[met_index['PSNR Y (libvmaf)']+3],
row[met_index['PSNR Cb (libvmaf)']+3],
row[met_index['PSNR Cr (libvmaf)']+3],
row[met_index['SSIM (libvmaf)']+3],
row[met_index['MS-SSIM (libvmaf)']+3],
row[met_index['VMAF']+3],
row[met_index['VMAF-NEG']+3],
row[met_index['PSNR-HVS Y (libvmaf)']+3],
row[met_index['CIEDE2000 (libvmaf)']+3],
row[met_index['APSNR Y (libvmaf)']+3],
row[met_index['APSNR Cb (libvmaf)']+3],
row[met_index['APSNR Cr (libvmaf)']+3],
row[met_index['Encoding Time']+3],
row[met_index['Decoding Time']+3],
])
| Python | 0 | |
a00dc9b0b1779ee8218917bca4c75823081b7854 | Add migration file for new database model | InvenTree/part/migrations/0072_bomitemsubstitute.py | InvenTree/part/migrations/0072_bomitemsubstitute.py | # Generated by Django 3.2.5 on 2021-10-12 23:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('part', '0071_alter_partparametertemplate_name'),
]
operations = [
migrations.CreateModel(
name='BomItemSubstitute',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bom_item', models.ForeignKey(help_text='Parent BOM item', on_delete=django.db.models.deletion.CASCADE, related_name='substitutes', to='part.bomitem', verbose_name='BOM Item')),
('part', models.ForeignKey(help_text='Substitute part', limit_choices_to={'component': True}, on_delete=django.db.models.deletion.CASCADE, related_name='substitute_items', to='part.part', verbose_name='Part')),
],
),
]
| Python | 0 | |
88087c9416103ae7f56749f59cdfabcd19fb14ab | Add a snippet. | python/notion_api/update_a_page_and_its_icon.py | python/notion_api/update_a_page_and_its_icon.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#################################################################
# Install the Python requests library: pip install requests
# http://docs.python-requests.org/en/master/user/quickstart/
#################################################################
# Src: https://developers.notion.com/reference/patch-page
import requests
import json
with open("NOTION_SECRET_TOKEN", "r") as fd:
NOTION_TOKEN = fd.read().strip()
with open("NOTION_DB_ID", "r") as fd:
NOTION_DB_ID = fd.read().strip()
with open("NOTION_PAGE_ID", "r") as fd:
NOTION_PAGE_ID = fd.read().strip()
REQUEST_URL = f"https://api.notion.com/v1/pages/{NOTION_PAGE_ID}"
HEADER_DICT = {
"Authorization": f"Bearer {NOTION_TOKEN}",
"Content-Type": "application/json",
"Notion-Version": "2021-08-16"
}
DATA_DICT = {
"icon": {
"type": "emoji",
"emoji": "\ud83d\udfe0"
},
"properties": {
"Score": {
"rich_text": [
{
"text": {
"content": "Top!"
}
}
]
}
}
}
resp = requests.patch(REQUEST_URL, headers=HEADER_DICT, data=json.dumps(DATA_DICT))
print(json.dumps(resp.json(), sort_keys=False, indent=4))
#with open("db.json", "w") as fd:
# #json.dump(data, fd) # no pretty print
# json.dump(issue_list, fd, sort_keys=False, indent=4) # pretty print format | Python | 0.000002 | |
a962de79938c73b5c0e0459be7b82265bde76b40 | Test case for LSPI on gridworld. | cases/gridworld/lspi.py | cases/gridworld/lspi.py | #!/usr/bin/env python
__author__ = "William Dabney"
from Domains import GridWorld
from Tools import Logger
from Agents import LSPI
from Representations import Tabular
from Policies import eGreedy
from Experiments import Experiment
def make_experiment(id=1, path="./Results/Temp"):
"""
Each file specifying an experimental setup should contain a
make_experiment function which returns an instance of the Experiment
class with everything set up.
@param id: number used to seed the random number generators
@param path: output directory where logs and results are stored
"""
# Experiment variables
max_steps = 10000
num_policy_checks = 10
## Logging
logger = Logger()
## Domain:
# MAZE = '/Domains/GridWorldMaps/1x3.txt'
maze = './Domains/GridWorldMaps/4x5.txt'
domain = GridWorld(maze, noise=0.3, logger=logger)
## Representation
representation = Tabular(domain, logger, discretization=20)
## Policy
policy = eGreedy(representation, logger, epsilon=0.1)
## Agent
agent = LSPI(representation, policy, domain,
logger, max_steps, max_steps/num_policy_checks)
experiment = Experiment(**locals())
return experiment
if __name__ == '__main__':
path = "./Results/Temp/{domain}/{agent}/{representation}/"
experiment = make_experiment(1, path=path)
experiment.run(visualize_steps=False, # should each learning step be shown?
visualize_learning=False, # show performance runs?
visualize_performance=True) # show value function?
experiment.plot()
experiment.save()
| Python | 0 | |
72559b02424b933322b2e5c6c9873a8a6b63ef78 | Add eclipse update script | environments/auto/macos/bin/eclipse-update.py | environments/auto/macos/bin/eclipse-update.py | #!/usr/bin/python
import os
import subprocess
import sys
p2repositoryLocations = [
"http://download.eclipse.org/eclipse/updates/4.7",
"http://download.eclipse.org/releases/oxygen",
"http://dist.springsource.com/release/TOOLS/update/e4.7/",
"http://jeeeyul.github.io/update/",
"http://andrei.gmxhome.de/eclipse/",
"http://www.nodeclipse.org/updates/markdown/",
"http://plantuml.sourceforge.net/updatesitejuno/",
"http://winterwell.com/software/updatesite/",
"https://raw.githubusercontent.com/satyagraha/gfm_viewer/master/p2-composite/"
]
p2features = [
# Feature groups
"org.eclipse.egit" + ".feature.group",
"org.eclipse.jgit" + ".feature.group",
"org.eclipse.epp.mpc" + ".feature.group",
"org.eclipse.wst.common.fproj" + ".feature.group",
"org.eclipse.jst.common.fproj.enablement.jdt" + ".feature.group",
"org.eclipse.jst.enterprise_ui.feature" + ".feature.group",
"org.eclipse.wst.jsdt.feature" + ".feature.group",
"org.eclipse.wst.json_ui.feature" + ".feature.group",
"org.eclipse.wst.web_ui.feature" + ".feature.group",
"org.eclipse.wst.xml_ui.feature" + ".feature.group",
"org.eclipse.wst.xml.xpath2.processor.feature" + ".feature.group",
"org.eclipse.wst.xsl.feature" + ".feature.group",
"org.eclipse.fx.ide.css.feature" + ".feature.group",
"org.eclipse.m2e.logback.feature" + ".feature.group",
"org.eclipse.m2e.feature" + ".feature.group",
"org.eclipse.m2e.wtp.feature" + ".feature.group",
"org.eclipse.m2e.wtp.jaxrs.feature" + ".feature.group",
"org.eclipse.m2e.wtp.jpa.feature" + ".feature.group",
"org.sonatype.m2e.mavenarchiver.feature" + ".feature.group",
"org.springframework.ide.eclipse.feature" + ".feature.group",
"org.springframework.ide.eclipse.autowire.feature" + ".feature.group",
"org.springframework.ide.eclipse.boot.dash.feature" + ".feature.group",
"org.springframework.ide.eclipse.maven.feature" + ".feature.group",
"org.springframework.ide.eclipse.boot.feature" + ".feature.group",
"AnyEditTools" + ".feature.group",
# Direct plugins
"net.jeeeyul.eclipse.themes.ui",
"net.jeeeyul.eclipse.themes",
"net.sourceforge.plantuml.eclipse",
"winterwell.markdown",
"code.satyagraha.gfm.viewer.plugin"
]
if len(sys.argv) < 2:
sys.exit("Location of eclipse installation must be passed as first parameter!")
else:
eclipseApplicationDirectory = os.path.abspath(sys.argv[1])
eclipseBinaryCommandFile = os.path.join(eclipseApplicationDirectory, "Contents/MacOS/eclipse");
subprocess.call([
eclipseBinaryCommandFile,
"-profile", "SDKProfile",
"-noSplash", "-consolelog",
"-application", "org.eclipse.equinox.p2.director",
"-repository", ",".join(p2repositoryLocations),
"-installIU", ",".join(p2features),
"-destination", eclipseApplicationDirectory
])
| Python | 0 | |
b514cf783d53a5c713911729422239c9b0f0ff99 | Add automatic leak detection python script in examples | client/python/examples/edleak_autodetect.py | client/python/examples/edleak_autodetect.py | import sys
import rpc.ws
import edleak.api
import edleak.slice_runner
def usage():
print('autodetect [period] [duration]')
def print_leaker(leaker):
print('-------------------------------')
print('class : ' + leaker['leak_factor']['class'])
print('leak size : ' + str(leaker['leak_factor']['leak']))
print('call-stack: ')
for caller in leaker['stack']:
print(' ' + caller)
if __name__ == '__main__':
if len(sys.argv) != 3:
usage()
sys.exit(-1)
period = int(sys.argv[1])
duration = int(sys.argv[2])
ws_rpc = rpc.ws.WebService("localhost", 8080)
el = edleak.api.EdLeak(ws_rpc)
runner = edleak.slice_runner.SliceRunner(el)
# First run, to find the leakers
print('Starting 1st run...')
asset = runner.run(period, duration)
allocers = asset.getAllocerList()
leakers = [l for l in allocers if l['leak_factor']['leak'] > 0 and
(l['leak_factor']['class'] == 'linear' or
l['leak_factor']['class'] == 'exp')]
if len(leakers) == 0:
print('No leaks found.')
sys.exit(0)
print(str(len(leakers)) + ' leaks found. Starting 2nd run to retrieve callstacks...')
for leaker in leakers:
el.addStackWatch(leaker['id'])
asset = runner.run(period, duration)
allocers = asset.getAllocerList()
leakers = [l for l in allocers if l['leak_factor']['leak'] > 0 and
(l['leak_factor']['class'] == 'linear' or
l['leak_factor']['class'] == 'exp')]
for leaker in leakers:
if len(leaker['stack']) > 1:
print_leaker(leaker)
| Python | 0 | |
c7b756c69f3fce63208d1378ccee8d76e8574f3f | Add basic_bond_seed_file for 5 bonds. | bond_analytics_project/basic_bond_seed_file.py | bond_analytics_project/basic_bond_seed_file.py | import datetime
import os
import django
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bond_analytics_project.settings')
django.setup()
from bondapi.models import Bond
test_bond_list = [
dict(name='test_bond_1', face_value=1000, annual_interest=27.5 * 2, annual_coupon_rate=5.50,
market_interest_rate=5.75, issue_date=datetime.date(1978, 1, 26), settlement_date=datetime.date(2007, 1, 26),
maturity_date=datetime.date(2008, 1, 26), term_to_maturity=1.0, bond_price=99.76, bond_valuation=997.6),
dict(name='test_bond_2', face_value=1000, annual_interest=37.5 * 2, annual_coupon_rate=7.50,
market_interest_rate=6.50, issue_date=datetime.date(1991, 2, 16), settlement_date=datetime.date(2007, 2, 16),
maturity_date=datetime.date(2012, 2, 16), term_to_maturity=5.0, bond_price=104.21, bond_valuation=1042.13),
dict(name='test_bond_3', face_value=1000, annual_interest=25.00 * 2, annual_coupon_rate=5.00,
market_interest_rate=7.00, issue_date=datetime.date(1999, 4, 9), settlement_date=datetime.date(2007, 1, 14),
maturity_date=datetime.date(2020, 4, 9), term_to_maturity=13.2, bond_price=82.92, bond_valuation=829.15),
dict(name='test_bond_4', face_value=1000, annual_interest=45.00 * 2, annual_coupon_rate=9.00,
market_interest_rate=7.25, issue_date=datetime.date(1987, 11, 6), settlement_date=datetime.date(2006, 7, 16),
maturity_date=datetime.date(2028, 11, 6), term_to_maturity=22.3, bond_price=119.22, bond_valuation=1192.16),
dict(name='test_bond_5', face_value=1000, annual_interest=35.00 * 2, annual_coupon_rate=7.00,
market_interest_rate=7.25, issue_date=datetime.date(2006, 10, 12), settlement_date=datetime.date(2006, 10, 13),
maturity_date=datetime.date(2036, 10, 13), term_to_maturity=30, bond_price=96.96, bond_valuation=969.58)
]
if __name__ == '__main__':
print('Resetting database.')
Bond.objects.all().delete()
print('Starting seeding.')
for bond in test_bond_list:
new_bond = Bond(
name=bond['name'],
face_value=bond['face_value'],
annual_payment_frequency=2,
annual_coupon_rate=bond['annual_coupon_rate'],
issue_date=bond['issue_date'],
settlement_date=bond['settlement_date'],
maturity_date=bond['maturity_date'],
)
new_bond.save()
print('Bond {} has been created and saved.'.format(new_bond.name))
print('Seeding complete.')
| Python | 0 | |
04da8d531267972554c6300c24a5a7b2c7def59d | add basic unit testing for appliance instances (incomplete) | tests/test_appliance_instance.py | tests/test_appliance_instance.py | import sys
sys.path.append('..')
import disaggregator as da
import unittest
import pandas as pd
import numpy as np
class ApplianceInstanceTestCase(unittest.TestCase):
def setUp(self):
indices = [pd.date_range('1/1/2013', periods=96, freq='15T'),
pd.date_range('1/2/2013', periods=96, freq='15T')]
data = [np.zeros(96),np.zeros(96)]
series = [pd.Series(d, index=i) for d,i in zip(data,indices)]
self.traces = [da.ApplianceTrace(s,{}) for s in series]
self.normal_instance = da.ApplianceInstance(self.traces)
def test_get_traces(self):
self.assertIsNotNone(self.normal_instance.get_traces(),
'instance should have traces')
if __name__ == "__main__":
unittest.main()
| Python | 0.000012 | |
879744e19cab5cc7357912ba670d200adfd58be6 | add aur-update | bumblebee_status/modules/contrib/aur-update.py | bumblebee_status/modules/contrib/aur-update.py | """Check updates for AUR.
Requires the following packages:
* yay (used as default)
Note - You can replace yay by changing the "yay -Qum"
command for your preferred AUR helper. Few examples:
paru -Qum
pikaur -Qua
rua upgrade --printonly
trizen -Su --aur --quiet
yay -Qum
contributed by `ishaanbhimwal <https://github.com/ishaanbhimwal>`_ - many thanks!
"""
import logging
import core.module
import core.widget
import core.decorators
import util.cli
class Module(core.module.Module):
@core.decorators.every(minutes=60)
def __init__(self, config, theme):
super().__init__(config, theme, core.widget.Widget(self.utilization))
self.background = True
self.__packages = 0
self.__error = False
@property
def __format(self):
return self.parameter("format", "Update AUR: {}")
def utilization(self, widget):
return self.__format.format(self.__packages)
def hidden(self):
return self.__packages == 0 and not self.__error
def update(self):
self.__error = False
code, result = util.cli.execute(
"yay -Qum", ignore_errors=True, return_exitcode=True
)
if code == 0:
self.__packages = len(result.strip().split("\n"))
elif code == 2:
self.__packages = 0
else:
self.__error = True
logging.error("yay -Qum exited with {}: {}".format(code, result))
def state(self, widget):
if self.__error:
return "warning"
return self.threshold_state(self.__packages, 1, 100)
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| Python | 0.000001 | |
c6a0abe4f5cf3d1f54a10636b4c8882a2a1c9663 | hamster tracks, the beginnings. not usable without a running hamster applet | hamster_tracks.py | hamster_tracks.py | #!/usr/bin/env python
# - coding: utf-8 -
# Copyright (C) 2010 Toms Bauģis <toms.baugis at gmail.com>
"""An attempt to make an overview visualization. Consumes hamster d-bus API"""
import gtk
from lib import graphics
import dbus
import time, datetime as dt
from collections import defaultdict
HAMSTER_DBUS_PATH = "/org/gnome/Hamster"
HAMSTER_DBUS_IFACE = "org.gnome.Hamster"
class Scene(graphics.Scene):
def __init__(self):
graphics.Scene.__init__(self)
bus = dbus.SessionBus()
obj = bus.get_object(HAMSTER_DBUS_IFACE, HAMSTER_DBUS_PATH)
self.hamster = dbus.Interface(obj, "org.gnome.Hamster")
self.facts = self.get_facts()
self.day_counts = defaultdict(list)
activities, categories = defaultdict(int), defaultdict(int)
for fact in self.facts:
self.day_counts[fact['start_time'].date()].append(fact)
activities[fact['name']] += 1
categories[fact['category']] += 1
if fact['end_time'] and fact['start_time'].date() != fact['end_time'].date():
self.day_counts[fact['end_time'].date()].append(fact)
self.activities = [activity[0] for activity in sorted(activities.items(), key=lambda item:item[1], reverse=True)]
self.categories = categories.keys()
self.connect("on-enter-frame", self.on_enter_frame)
def get_facts(self):
facts = []
start_time = time.mktime(dt.datetime(2009, 1, 1).timetuple())
for fact in self.hamster.GetFacts(start_time, 0):
# run through the facts and convert them to python types so it's easier to operate with
res = {}
for key in ['name', 'category', 'description']:
res[key] = str(fact[key])
for key in ['start_time', 'end_time', 'date']:
res[key] = dt.datetime.utcfromtimestamp(fact[key]) if fact[key] else None
res['delta'] = dt.timedelta(days = fact['delta'] / (60 * 60 * 24),
seconds = fact['delta'] % (60 * 60 * 24))
res['tags'] = [str(tag) for tag in fact['tags']] if fact['tags'] else []
facts.append(res)
return facts
def on_enter_frame(self, scene, context):
if not self.facts:
return
g = graphics.Graphics(context)
g.set_line_style(width=1)
start_date = self.facts[0]['start_time'].date()
end_date = self.facts[-1]['end_time'].date()
days = (end_date - start_date).days
day_pixel = self.width / float(days)
for i in range(0, self.height, 2):
g.rectangle(0, i * 3, self.width, 3)
g.fill("#fafafa")
for day in range(days):
current_date = start_date + dt.timedelta(days=day)
cur_x = round(day * day_pixel)
pixel_width = max(round(day_pixel), 1)
if not self.day_counts[current_date]:
g.rectangle(cur_x, 0, day_pixel, self.height)
g.fill("#fff", 0.5)
for j, fact in enumerate(self.day_counts[current_date]):
#bar per category
g.rectangle(cur_x, 27 + self.categories.index(fact['category']) * 3, pixel_width, 3)
#bar per activity
g.rectangle(cur_x, 102 + self.activities.index(fact['name']) * 9, pixel_width, 6)
#number of activities
g.rectangle(cur_x, self.height - 3 * j, pixel_width, 3)
g.fill("#aaa")
class BasicWindow:
def __init__(self):
window = gtk.Window(gtk.WINDOW_TOPLEVEL)
window.set_size_request(600, 300)
window.connect("delete_event", lambda *args: gtk.main_quit())
window.add(Scene())
window.show_all()
example = BasicWindow()
gtk.main()
| Python | 0.998979 | |
4175f27a03be52baa8b4245df96a03e6bbd22310 | Add test for pygame sound play hook | modulation_test.py | modulation_test.py | import pygame
import random
from demodulate.cfg import *
from gen_tone import *
if __name__ == "__main__":
pygame.mixer.pre_init(frequency = int(SAMPLE_FREQ), channels = 1)
pygame.mixer.init()
WPM = random.uniform(2,20)
pattern = [1,0,1,1,1,0,0,0,0,0,0,0] # morse code 'A'
#gen_test_data()
data = gen_tone(pattern, WPM)
snd = pygame.sndarray.make_sound(data)
snd.play()
| Python | 0 | |
af1f087affef7a30729a85fbde9c8157ce3bfbed | Add to be completed to notes in rule note load | show_term.py | show_term.py | import argparse
from app.core import create_app
from app.config import config
application = create_app('production').app_context().push()
from app.main.models import Term, Rule, Note
from openpyxl import Workbook
#########################################################################################
## ##
## Simply pipe delimited lists ##
## ##
#########################################################################################
def list_terms():
for term in Term.query.all():
print(term.name, end="| ")
def list_rules():
for rule in Rule.query.all():
print(rule.name, end="|")
for term in rule.terms:
print(term.name, end="|")
#print("Term: %s" % term.name )
print()
#########################################################################################
## ##
## Term, rule, rule note dump with indent ##
## ##
#########################################################################################
def list_terms_rules_indent():
'''
Produce a list of terms, rules and rule notes in this format:
Term: Application Number
Rule: Balance and Account Balance Correction
Rule Note: Source: `acct\2.0_account_history\acct_hist_ext.sas`
'''
for term in Term.query.all():
# print(term.name, end="|")
print("Term: %s" % term.name )
for rule in term.rules:
# print(rule.name, end="|")
print(" Rule: %s" % rule.name )
for rn in rule.comments:
print(" Rule Note: %s" % rn.note.split('\n', 1)[0])
#########################################################################################
## ##
## Functions for the term dump to Excel ##
## ##
#########################################################################################
def output_comments(ws, rule, start_row):
if rule.comments.count() > 0:
print(" >> There are %s rule notes" % rule.comments.count())
comment_row = start_row
for rn in rule.comments:
ws.cell(row=comment_row, column=3).value = rn.note.split('\n', 1)[0]
print(" Rule Note: %s (%s)" % (rn.note.split('\n', 1)[0], comment_row))
comment_row += 1
return comment_row - 1
else:
print(" >> No rule comments, not adding one to row")
return start_row
def output_rules(ws, term, start_row):
if term.rules:
print(" >> There are rules")
rule_row = start_row
for rule in term.rules:
ws.cell(row=rule_row, column=2).value = rule.name
print(" Rule: %s (%s)" % (rule.name, rule_row))
# Do rule comments here
rule_row = output_comments(ws, rule, rule_row)
# Add one?
print(" >> Add one to rule row %s" % rule_row)
rule_row += 1
# Is the final + 1 needed?
return rule_row
else:
return start_row + 1
def output_terms(ws, start_row):
term_row = start_row
for term in Term.query.order_by(Term.name).all():
ws.cell(row=term_row, column=1).value = term.name
print("Term: %s (%s)" % (term.name, term_row))
term_row = output_rules(ws, term, term_row)
def excel_term_dump():
wb = Workbook()
ws = wb.active
output_terms(ws, 1)
wb.save("H:\glossary.xlsx")
#########################################################################################
## ##
## Argparse custom usage function ##
## ##
#########################################################################################
def msg(name=None):
return '''show_term.py command
Commands are:
rules Pipe delimited dump of rules
term Pipe delimited dump of terms
term_rules_indent Dump terms, rules and rule notes in indented form
excel_term_dump Dump terms, rules and rule notes to Excel
'''
#########################################################################################
## ##
## Main ##
## ##
#########################################################################################
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Utility to dump data from the Glossary',
usage=msg())
parser.add_argument("command", help="Enter the command to run")
args = parser.parse_args()
if args.command == "rules":
list_rules()
elif args.command == "terms":
list_terms()
elif args.command == "excel_term_dump":
excel_term_dump()
elif args.command == "terms_rules_indent":
list_terms_rules_indent()
else:
parser.print_help()
| Python | 0 | |
cfeab0e8f704a4681e1ec887b3ce116839557af9 | update tests to changes in graph_lasso | sklearn/covariance/tests/test_graph_lasso.py | sklearn/covariance/tests/test_graph_lasso.py | """ Test the graph_lasso module.
"""
import sys
from StringIO import StringIO
import numpy as np
from scipy import linalg
from sklearn.covariance import graph_lasso, GraphLasso, GraphLassoCV, \
empirical_covariance
from sklearn.datasets.samples_generator import make_sparse_spd_matrix
from sklearn.utils import check_random_state
def test_graph_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.95,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (.1, .01):
covs = dict()
for method in ('cd', 'lars'):
cov_, _, costs = graph_lasso(emp_cov, alpha=.1, return_costs=True)
covs[method] = cov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease
np.testing.assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
np.testing.assert_allclose(covs['cd'], covs['lars'])
# Smoke test the estimator
model = GraphLasso(alpha=.1).fit(X)
np.testing.assert_allclose(model.covariance_, covs['cd'])
def test_graph_lasso_cv(random_state=1):
# Sample data from a sparse multivariate normal
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
# Capture stdout, to smoke test the verbose mode
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
GraphLassoCV(verbose=10, alphas=3).fit(X)
finally:
sys.stdout = orig_stdout
| """ Test the graph_lasso module.
"""
import sys
from StringIO import StringIO
import numpy as np
from scipy import linalg
from sklearn.covariance import graph_lasso, GraphLasso, GraphLassoCV
from sklearn.datasets.samples_generator import make_sparse_spd_matrix
from sklearn.utils import check_random_state
def test_graph_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.95,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
for alpha in (.1, .01):
covs = dict()
for method in ('cd', 'lars'):
cov_, _, costs = graph_lasso(X, alpha=.1, return_costs=True)
covs[method] = cov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease
np.testing.assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
np.testing.assert_allclose(covs['cd'], covs['lars'])
# Smoke test the estimator
model = GraphLasso(alpha=.1).fit(X)
np.testing.assert_allclose(model.covariance_, covs['cd'])
def test_graph_lasso_cv(random_state=1):
# Sample data from a sparse multivariate normal
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
# Capture stdout, to smoke test the verbose mode
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
GraphLassoCV(verbose=10, alphas=3).fit(X)
finally:
sys.stdout = orig_stdout
| Python | 0 |
dd9893eec00c16f55b77944509bafe4864319b72 | create main function | JobManager.py | JobManager.py |
import filelib.parser.ma
import filelib.parser.mb
import os.path
import sys
if __name__ == "__main__":
addFilePath = "/root/test_maya_2015.mb"
if(len(sys.argv) > 1):
addFilePath = sys.argv[1]
(dir,jobExt) = os.path.splitext(addFilePath)
jobExt = jobExt.lower()
if jobExt == ".ma":
fileParser = filelib.parser.ma.FileParserMayaMA(addFilePath, SudioPlugin())
elif jobExt == ".mb":
fileParser = filelib.parser.mb.FileParserMayaMB(addFilePath)
fileParser.parse()
print fileParser.getparam()
# job2 = fileParser.getJob()
#jobfactory = JobFactory();
#job2 = jobfactory.getJob(fileParser.getparam(), SudioPlugin())
| Python | 0.004115 | |
c65731de77f88380f2c816fa9667d153140bfbe1 | Add LDA script | lda/lda_analysis.py | lda/lda_analysis.py | import sys
from sklearn.lda import LDA
import matplotlib.pyplot as plt
import numpy as np
def read_variants(flname):
fl = open(flname)
markers = []
individuals = []
population_ids = []
population = -1
for ln in fl:
if "Marker" in ln:
if len(individuals) == 0:
continue
marker = dict()
marker["individuals"] = np.array(individuals)
marker["population_labels"] = np.array(population_ids)
markers.append(marker)
population = -1
population_ids = []
individuals = []
elif "Population" in ln:
population += 1
else:
individual = map(float, ln.strip().split())
individuals.append(individual)
population_ids.append(population)
if len(individuals) != 0:
marker = dict()
marker["individuals"] = np.array(individuals)
marker["population_labels"] = np.array(population_ids)
markers.append(marker)
fl.close()
return markers
def plot_scores(markers, flname):
plt.clf()
scores = []
for i, marker in enumerate(markers):
try:
lda = LDA()
lda.fit(marker["individuals"], marker["population_labels"])
scores.append(lda.score(marker["individuals"], marker["population_labels"]))
except:
scores.append(0.0)
plt.hist(scores, bins=np.arange(0.0, 1.0, 0.01))
plt.xlabel("Score", fontsize=18)
plt.ylabel("Occurrences", fontsize=18)
plt.savefig(flname, DPI=200)
def plot_lda_projection(marker, flname):
lda = LDA()
lda.fit(marker["individuals"], marker["population_labels"])
print lda.score(marker["individuals"], marker["population_labels"])
proj = lda.transform(marker["individuals"])
n_samples, n_components = proj.shape
plt.scatter(proj, marker["population_labels"])
plt.xlabel("Component 0", fontsize=18)
plt.ylabel("Population Labels", fontsize=18)
plt.savefig(flname, DPI=200)
if __name__ == "__main__":
variants_fl = sys.argv[1]
#variant_id = int(sys.argv[2])
plot_flname = sys.argv[2]
variants = read_variants(variants_fl)
print len(variants)
#plot_lda_projection(variants[variant_id], plot_flname)
plot_scores(variants, plot_flname)
| Python | 0.000001 | |
cf97c95ab9dcb3b1dba6608639471375a1cbef42 | Create afUdimLayout.py | scripts/afUdimLayout.py | scripts/afUdimLayout.py | import pymel.core as pm
import maya.mel as mel
allSets = pm.ls(sl=1,type="objectSet")
for i in range(0,len(allSets)):
if i<10:
pm.select(allSets[i],r=1,ne=1)
pm.select(hierarchy=1)
mel.eval("ConvertSelectionToUVs;")
pm.polyEditUV(u=i,v=0)
elif i>=10<20:
pm.select(allSets[i],r=1,ne=1)
pm.select(hierarchy=1)
mel.eval("ConvertSelectionToUVs;")
pm.polyEditUV(u=i-10,v=1)
elif i>=20<30:
pm.select(allSets[i],r=1,ne=1)
pm.select(hierarchy=1)
mel.eval("ConvertSelectionToUVs;")
pm.polyEditUV(u=i-20,v=2)
elif i>=30<40:
pm.select(allSets[i],r=1,ne=1)
pm.select(hierarchy=1)
mel.eval("ConvertSelectionToUVs;")
pm.polyEditUV(u=i-30,v=3)
| Python | 0.000001 | |
49f557228a6c826598c48a08f6a0de4ee176d888 | add python script to send ogg audio stream over LCM messages | software/tools/tools/scripts/oggStreamLCM.py | software/tools/tools/scripts/oggStreamLCM.py | import bot_core
import lcm
import urllib2
import time
import sys
import os
import select
import subprocess
import threading
# VLC command:
# cvlc <input> --sout '#transcode{acodec=vorb,ab=10,channels=1,samplerate=8000}:std{access=http,mux=ogg,url=localhost:8080}'
# where <input> is a file or a url
serverChannel = 'OGG_SERVER'
clientChannel = 'OGG_CLIENT'
oggUrl = 'http://localhost:8080'
messageSize = 4096
serverThreadRunning = False
serverThread = None
def serverStreamLoop():
stream = urllib2.urlopen(oggUrl)
lcmHandle = lcm.LCM()
m = bot_core.raw_t()
m.utime = 0
totalBytes = 0
global serverThreadRunning
while serverThreadRunning:
m.data = stream.read(messageSize)
if not m.data:
break
m.utime = m.utime + 1
m.length = len(m.data)
totalBytes += m.length
#print 'publishing message %d. %d bytes. total so far: %f kB' % (m.utime, m.length, totalBytes/1024.0)
lcmHandle.publish(serverChannel, m.encode())
print 'stream publisher loop returning'
def handleMessageFromClient(channel, data):
m = bot_core.raw_t.decode(data)
print 'message from client:', m.data
global serverThread, serverThreadRunning
if serverThread:
serverThreadRunning = False
serverThread.join()
serverThread = None
serverThreadRunning = True
serverThread = threading.Thread(target=serverStreamLoop)
serverThread.daemon = True
serverThread.start()
def server():
lcmHandle = lcm.LCM()
subscription = lcmHandle.subscribe(clientChannel, handleMessageFromClient)
while True:
lcmHandle.handle()
oggProc = None
def handleMessageFromServer(channel, data):
m = bot_core.raw_t.decode(data)
oggProc.stdin.write(m.data)
def client():
global oggProc
oggProc = subprocess.Popen(['ogg123', '-'], stdin=subprocess.PIPE, stderr=subprocess.STDOUT)
lcmHandle = lcm.LCM()
m = bot_core.raw_t()
m.utime = 0
m.data = 'restart_stream'
m.length = len(m.data)
lcmHandle.publish(clientChannel, m.encode())
subscription = lcmHandle.subscribe(serverChannel, handleMessageFromServer)
while True:
lcmHandle.handle()
def main():
mode = sys.argv[1]
assert mode in ('--client', '--server')
if mode == '--server':
server()
else:
client()
if __name__ == '__main__':
main()
| Python | 0.000001 | |
76be22f3d1aa86616ecd06a326344f24ff03adbe | Add function to generate uniform addresses | DataGeneration/GenerateUniformAddresses.py | DataGeneration/GenerateUniformAddresses.py | # The purpose of this script is to generate a uniformly distributed series of
# lat/long coordinates given max/min latitude, max/min longitude, latitude
# resolution, and longitude resolution, where resolution is the desired number
# of degrees between output coordinates
# Outputs a pandas dataframe of lat/long coordinate pairs
import pandas as pd # For the dataframe
import numpy as np # To calculate ranges with float step-values
import math # For math
def GenerateUniformCoordinates(lat_min, lat_max,
lng_min, lng_max,
lat_res, lng_res):
# Calculate the number of rows our output DataFrame will contain so that we
# can pre-allocate the memory for the dataframe using the index property.
nrows_lat = math.ceil((lat_max - lat_min) / lat_res + 1)
nrows_lng = math.ceil((lng_max - lng_min) / lng_res + 1)
nrows = nrows_lat * nrows_lng
# Output some data for debugging
print('Latitude Quantity: ' + str(nrows_lat))
print('Longitude Quantity: ' + str(nrows_lng))
print('Total Number of Rows to Output: ' + str(nrows))
# Instantiate or DataFrame
df = pd.DataFrame(columns = ['lat','lng'], index=np.arange(0, nrows))
# Iterate through each latitude and each longitude calculated with the
# np.arange function, adding lat_res to the max value to ensure that we
# include the max value in the range that we iterate through
row_num = 0
for lat in np.arange(lat_min, lat_max + lat_res, lat_res):
for lng in np.arange(lng_min, lng_max + lng_res, lng_res):
df.loc[row_num] = [lat, lng] #Add the lat/lng pair to the dataframe
row_num += 1 #increment our row number
return df
# These values are the degrees walked per minute at a speed of 3.1 miles per
# hour at 41.4822 deg N and 81.6697 deg W, which is the center of Cleveland
lat_res = 0.000724516
lng_res = 0.000963461
lat_min = 41.227883
lat_max = 41.637051
lng_min = -81.96753
lng_max = -81.438542
output_df = GenerateUniformCoordinates(lat_min, lat_max,
lng_min, lng_max,
lat_res, lng_res)
output_df.to_csv('uniform_addresses.csv') | Python | 0.018119 | |
05f87be4c85036c69abc9404acb824c58d71f101 | Add border operation... Damn that was easy | slice_ops.py | slice_ops.py | import slicer
import shapely.ops
import shapely.geometry
def border(sli, amount):
cuts = [cut.polygon(True) for cut in sli.cuts]
cut_outline = shapely.ops.cascaded_union(cuts) \
.buffer(amount / 2)
shape_outline = sli.poly.boundary.buffer(amount)
outlines = cut_outline.union(shape_outline)
newpoly = outlines.intersection(sli.poly)
sli.poly = newpoly
| Python | 0 | |
a3089dd3d9c31d0d705fe54858fdc0ebee76f488 | write a Python client for Sift Science's REST API | server/sift_client.py | server/sift_client.py | """Python client for Sift Science's REST API
(https://siftscience.com/docs/rest-api).
"""
import json
import logging
import traceback
import requests
API_URL = 'https://api.siftscience.com/v202/events'
sift_logger = logging.getLogger('sift_client')
class Client(object):
def __init__(self, api_key, api_url=API_URL, timeout=2.0):
"""Initialize the client.
Args:
api_key: Your Sift Science API key associated with your customer
account. You can obtain this from
https://siftscience.com/quickstart
api_url: The URL to send events to.
timeout: Number of seconds to wait before failing request. Defaults
to 2 seconds.
"""
self.api_key = api_key
self.url = api_url
self.timeout = timeout
def track(self, event, properties):
"""Track an event and associated properties to the Sift Science client.
This call is blocking.
Args:
event: The name of the event to send. This can either be a reserved
event name such as "$transaction" or "$label" or a custom event
name (that does not start with a $).
properties: A dict of additional event-specific attributes to track
Returns:
A requests.Response object if the track call succeeded, otherwise
a subclass of requests.exceptions.RequestException indicating the
exception that occurred.
"""
headers = { 'Content-type' : 'application/json', 'Accept' : '*/*' }
properties.update({ '$api_key': self.api_key, '$type': event })
try:
response = requests.post(self.url, data=json.dumps(properties),
headers=headers, timeout=self.timeout)
# TODO(david): Wrap the response object in a class
return response
except requests.exceptions.RequestException as e:
sift_logger.warn('Failed to track event: %s' % properties)
sift_logger.warn(traceback.format_exception_only(type(e), e))
return e
| Python | 0 | |
f3f5249c0ac7d41ebf2115fb0b5c7576012bcb38 | Add production settings | src/biocloud/settings/production.py | src/biocloud/settings/production.py | # In production set the environment variable like this:
# DJANGO_SETTINGS_MODULE=my_proj.settings.production
from .base import * # NOQA
import logging.config
# For security and performance reasons, DEBUG is turned off
DEBUG = False
# Must mention ALLOWED_HOSTS in production!
# ALLOWED_HOSTS = []
# Cache the templates in memory for speed-up
loaders = [
(
'django.template.loaders.cached.Loader',
[
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
),
]
TEMPLATES[0]['OPTIONS'].update({"loaders": loaders})
TEMPLATES[0]['OPTIONS'].update({"debug": False})
del TEMPLATES[0]['APP_DIRS']
# Email settings
EMAIL_BACKEND = env.email_url()['EMAIL_BACKEND']
EMAIL_HOST = env.email_url()['EMAIL_HOST']
EMAIL_HOST_PASSWORD = env.email_url()['EMAIL_HOST_PASSWORD']
EMAIL_HOST_USER = env.email_url()['EMAIL_HOST_USER']
EMAIL_PORT = env.email_url()['EMAIL_PORT']
EMAIL_USE_TLS = env.email_url()['EMAIL_USE_TLS']
DEFAULT_FROM_EMAIL = SERVER_EMAIL = '{name} <{addr}>'.format(
name='BioCloud Dev',
addr='biocloud@liang2.io',
)
# Securiy related settings
# SECURE_HSTS_SECONDS = 2592000
# SECURE_BROWSER_XSS_FILTER = True
# SECURE_CONTENT_TYPE_NOSNIFF=True
# SESSION_COOKIE_SECURE = True
# CSRF_COOKIE_SECURE = True
# CSRF_COOKIE_HTTPONLY = True
# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# X_FRAME_OPTIONS = 'DENY'
# Log everything to the logs directory at the top
LOGFILE_ROOT = join(BASE_DIR, 'logs')
# Reset logging
LOGGING_CONFIG = None
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': (
'[%(asctime)s] %(levelname)s '
'[%(pathname)s:%(lineno)s] %(message)s'
),
'datefmt': "%d/%b/%Y %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'django_log_file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': join(LOGFILE_ROOT, 'django.log'),
'formatter': 'verbose'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
'loggers': {
'django': {
'handlers': ['django_log_file', ],
'propagate': True,
'level': 'DEBUG',
},
}
}
for app in LOCAL_APPS:
app_handler = '%s_log_file' % app
app_log_filepath = '%s.log' % app
LOGGING['loggers'][app] = {
'handlers': [app_handler, 'console', ],
'level': 'DEBUG',
}
LOGGING['handlers'][app_handler] = {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': join(LOGFILE_ROOT, app_log_filepath),
'formatter': 'verbose',
}
logging.config.dictConfig(LOGGING)
| Python | 0.000001 | |
cda1efa55242641accf78162493c3ebb3582399e | Create AM_example.py | Effects/Amplitude_Modulation/AM_example.py | Effects/Amplitude_Modulation/AM_example.py | # Play a wave file with amplitude modulation.
# Assumes wave file is mono.
# This implementation reads and plays a one frame (sample) at a time (no blocking)
"""
Read a signal from a wave file, do amplitude modulation, play to output
Original: pyrecplay_modulation.py by Gerald Schuller, Octtober 2013
Modified to read a wave file - Ivan Selesnick, September 2015
"""
# f0 = 0 # Normal audio
f0 = 400 # 'Duck' audio
import pyaudio
import struct
import wave
import math
# Open wave file (mono)
input_wavefile = 'author.wav'
# input_wavefile = 'sin01_mono.wav'
# input_wavefile = 'sin01_stereo.wav'
wf = wave.open( input_wavefile, 'rb')
RATE = wf.getframerate()
WIDTH = wf.getsampwidth()
LEN = wf.getnframes()
CHANNELS = wf.getnchannels()
print 'The sampling rate is {0:d} samples per second'.format(RATE)
print 'Each sample is {0:d} bytes'.format(WIDTH)
print 'The signal is {0:d} samples long'.format(LEN)
print 'The signal has {0:d} channel(s)'.format(CHANNELS)
# Open audio stream
p = pyaudio.PyAudio()
stream = p.open(format = p.get_format_from_width(WIDTH),
channels = 1,
rate = RATE,
input = False,
output = True)
print('* Playing...')
# Loop through wave file
for n in range(0, LEN):
# Get sample from wave file
input_string = wf.readframes(1)
# Convert binary string to tuple of numbers
input_tuple = struct.unpack('h', input_string)
# (h: two bytes per sample (WIDTH = 2))
# Use first value (of two if stereo)
input_value = input_tuple[0]
# Amplitude modulation (f0 Hz cosine)
output_value = input_value * math.cos(2*math.pi*f0*n/RATE)
# Convert value to binary string
output_string = struct.pack('h', output_value)
# Write binary string to audio output stream
stream.write(output_string)
print('* Done')
stream.stop_stream()
stream.close()
p.terminate()
| Python | 0.000005 | |
2387d8f269cbe1943db1b1e6304603ccb6901e43 | Add flashcards for powers of two estimation | flashcards.py | flashcards.py | import random
import time
DELAY = 10
while 1:
time.sleep(DELAY)
useful_powers_of_2 = {7, 8, 10, 16, 20, 30, 32, 40}
random_power_of_2 = random.sample(useful_powers_of_2, 1)[0]
print '\nWhat\'s the largest %s bit integer?' % random_power_of_2
time.sleep(DELAY)
print 'Answer: %s' % '{:,}'.format(2 ** random_power_of_2) | Python | 0 | |
7a79c163144b242be57ed8cf45ae4fb5097f11fa | Create defaultlog.py | defaultlog.py | defaultlog.py | # -*- coding: utf-8 -*-
"""Console and file logging configuration.
This module automatically configures the logging to use a colored console
format, and a timed rotating log file that rolls over at midnight.
The log formats results in the following outputs:
Console:
[INFO ] This is some info (root)
[DEBUG ] Now this is some debug (parser.ekpo)
File:
[2015-12-29T16:16:55]13748 root INFO This is some info
[2015-12-29T16:16:55]13748 parser.ekpo DEBUG This is some module debug
[2015-12-29T16:16:55]13748 root INFO Some more info
Just put this file into your project structure to use it.
Usage:
from defaultlog import logging
Examples:
Package-level using the root logger
-----------------------------------
# Import the logging module through this customized module
from defaultlog import logging
def main():
# Log using the root logger
logging.info('Program started')
if __name__ == '__main__':
main()
Library code that log to a private logger
-----------------------------------------
# Imports the bult-in logging package
import logging
# Creates a private logger instance
logger = logging.getLogger(__name__)
def do_it():
# Log using the private logger
# It keeps the configuration done in the root logger
logger.info('Doing it')
Dependencies:
coloredlogs
colorama
"""
import logging
import logging.config
import yaml
default_config = \
"""
version: 1
formatters:
console:
format : "[%(levelname)-7s] %(message)s"
datefmt: "%H:%M:%S"
file:
format : "[%(asctime)s]%(thread)-5d %(name)-40s %(levelname)-8s %(message)s"
datefmt: "%Y-%m-%dT%H:%M:%S"
colored:
format : "[%(log_color)s%(levelname)-8s%(reset)s] %(message_log_color)s%(message)s%(reset)s %(name_log_color)s(%(name)s)"
datefmt: "%H:%M:%S"
() : colorlog.ColoredFormatter
log_colors:
DEBUG : white
INFO : bold_green
WARNING : bold_yellow
ERROR : bold_red
CRITICAL: bold_white,bg_red
secondary_log_colors:
message:
INFO : bold_white
WARNING : bold_yellow
ERROR : bold_red
CRITICAL: bold_red
name:
DEBUG : purple
INFO : purple
WARNING : purple
ERROR : purple
CRITICAL: purple
handlers:
console:
level : DEBUG
class : logging.StreamHandler
formatter: colored
stream : ext://sys.stdout
file:
level : DEBUG
class : logging.handlers.TimedRotatingFileHandler
formatter: file
when : midnight
filename : logs/log.log
encoding : utf8
loggers:
custom_module:
handlers: [file]
level: WARNING
root:
handlers: [console, file]
level: DEBUG
disable_existing_loggers: False
"""
try:
config = yaml.load(default_config)
logging.config.dictConfig(config)
except Exception:
logging.exception("Couldn't import logging settings from yaml")
| Python | 0.000001 | |
2acf089d00426d8b61317c6d031aee7696d42b03 | Create script to import Wicklow data | import_wicklow.py | import_wicklow.py | import psycopg2
import re
import sys
ORG_ID = 10
conn = psycopg2.connect("dbname=school_crm user=postgres host=localhost port=5432")
cur = conn.cursor()
# cur.execute("set client_encoding to 'latin1'")
def import_names():
cur.execute("DELETE FROM person_tag WHERE person_id in (SELECT person_id from person where organization_id=%d)" % ORG_ID)
cur.execute("DELETE FROM person_tag_change WHERE person_id in (SELECT person_id from person where organization_id=%d)" % ORG_ID)
cur.execute("DELETE FROM person WHERE organization_id=%d" % ORG_ID)
cur.execute("SELECT id from tag where title='Current Student' and organization_id=%d" % ORG_ID)
student_tag_id = int(cur.fetchone()[0])
cur.execute("SELECT id from tag where title='Staff' and organization_id=%d" % ORG_ID)
staff_tag_id = int(cur.fetchone()[0])
is_student = False
for name in open('names.txt'):
if not name.strip():
is_student = True
continue
splits = name.split(' ')
first_name = splits[0].title()
last_name = splits[1].title()
cur.execute("""INSERT into person(
first_name, last_name, organization_id) VALUES
(%s, %s, %s) returning person_id""", (
first_name, last_name, ORG_ID))
person_id = int(cur.fetchone()[0])
cur.execute("INSERT into person_tag (tag_id, person_id) VALUES (%s, %s)",
(student_tag_id if is_student else staff_tag_id, person_id))
cur.execute("INSERT into person_tag_change (tag_id, person_id, creator_id, was_add) VALUES (%s, %s, 1, true)",
(student_tag_id if is_student else staff_tag_id, person_id))
def import_lawbook():
cur.execute("DELETE from entry WHERE section_id in "
"(SELECT s.id from section s join chapter c on s.chapter_id=c.id where organization_id=%d)" % ORG_ID)
cur.execute("DELETE from section WHERE chapter_id in "
"(SELECT id from chapter where organization_id=%d)" % ORG_ID)
cur.execute("DELETE from chapter WHERE organization_id=%d" % ORG_ID)
chapter_id = None
section_id = None
rule_content = ''
rule_num = ''
rule_title = ''
for line in open('lawbook.txt'):
chapter_match = re.match(r'Section (.*) - (.*)', line)
section_match = re.match(r'(\d+) *- *(.*)', line)
rule_match = re.match(r'(\d+\.\d+.*)\t(.*)', line)
if chapter_match or section_match or rule_match:
if rule_num and rule_title and rule_content:
cur.execute("""INSERT into entry(num, title, section_id, content)
VALUES (%s, %s, %s, %s)""", (
rule_num, rule_title, section_id, rule_content))
# print('RULE', rule_num, rule_title, rule_content)
rule_content = ''
if section_match:
cur.execute("""INSERT into section(num, title, chapter_id) VALUES
(%s, %s, %s) returning id""", (
section_match.group(1), section_match.group(2).strip(), chapter_id))
section_id = int(cur.fetchone()[0])
# print('SECTION', section_match.group(1), section_match.group(2))
rule_content = ''
elif rule_match:
rule_content = ''
splits = rule_match.group(1).split('.')
rule_num = splits[1].strip()
rule_title = rule_match.group(2).strip()
elif chapter_match:
cur.execute("""INSERT into chapter (num, title, organization_id)
VALUES (%s, %s, %s) returning id""", (
chapter_match.group(1).strip(), chapter_match.group(2).strip(), ORG_ID))
chapter_id = int(cur.fetchone()[0])
# print(chapter_match.group(1), chapter_match.group(2), chapter_id)
rule_content = ''
else:
rule_content += line
print('Now manually copy over the last chapter')
import_names()
import_lawbook()
conn.commit()
cur.close()
conn.close()
| Python | 0 | |
f8b9e697f4d49f35dda322817ac8ac63d96b6732 | Add failing wait tests | nclxd/tests/test_container_utils.py | nclxd/tests/test_container_utils.py | # Copyright 2015 Canonical Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import exception
from nova import test
from nclxd.nova.virt.lxd import container_utils
from nclxd import tests
class LXDTestContainerUtils(test.NoDBTestCase):
def setUp(self):
super(LXDTestContainerUtils, self).setUp()
self.ml = tests.lxd_mock()
lxd_patcher = mock.patch('pylxd.api.API',
mock.Mock(return_value=self.ml))
lxd_patcher.start()
self.addCleanup(lxd_patcher.stop)
self.container_utils = container_utils.LXDContainerUtils()
def test_wait_undefined(self):
self.assertRaises(exception.NovaException,
self.container_utils.wait_for_container,
None)
def test_wait_timedout(self):
self.ml.wait_container_operation.return_value = False
self.assertRaises(exception.NovaException,
self.container_utils.wait_for_container,
'fake')
| # Copyright 2015 Canonical Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
| Python | 0.000005 |
04979d5536a9787cac0024dd6e767f0baec280fd | Update __init__.py | tendrl/node_agent/objects/definition/__init__.py | tendrl/node_agent/objects/definition/__init__.py | import importlib
import namespaces as ns
import yaml
from tendrl.commons import objects
from tendrl.commons import etcdobj
from tendrl.node_agent.objects.definition import master
# Definitions need there own special init and have to be present in the NS
# before anything else, Hence subclassing BaseObject
class Definition(objects.BaseObject):
def __init__(self, *args, **kwargs):
super(Definition, self).__init__(*args, **kwargs)
self.value = '_tendrl/definitions'
self.master = master.read
self._parsed_defs = yaml.safe_load(self.master)
self._etcd_cls = _DefinitionEtcd
def get_obj_definition(self, namespace, obj_name):
raw_ns = "namespace.%s" % namespace
raw_obj = self._get_parsed_defs()[raw_ns]['objects'][obj_name]
for atom_name, atom in raw_obj.get('atoms', {}).iteritems():
atom_mod = atom['run'].split(".atoms.")[-1].split(".")[0]
atom_fqdn = "%s.objects.%s.atoms.%s" % (namespace,
obj_name.lower(),
atom_mod)
atom_cls = getattr(importlib.import_module(atom_fqdn), atom_name)
tendrl_ns.add_atom(obj_name, atom_name, atom_cls)
for flow_name, flow in raw_obj.get('flows', {}).iteritems():
flow_mod = flow['run'].split(".flows.")[-1].split(".")[0]
flow_fqdn = "%s.objects.%s.flows.%s" % (namespace,
obj_name.lower(),
flow_mod)
flow_cls = getattr(importlib.import_module(flow_fqdn), flow_name)
tendrl_ns.add_obj_flow(obj_name, flow_name, flow_cls)
return ns.Namespace(attrs=raw_obj['attrs'],
enabled=raw_obj['enabled'],
obj_list=raw_obj.get('list', ""),
obj_value=raw_obj['value'],
atoms=raw_obj.get('atoms', {}),
flows=raw_obj.get('flows', {}),
help=raw_obj['help'])
def get_flow_definition(self, namespace, flow_name):
raw_ns = "namespace.%s" % namespace
raw_flow = self._get_parsed_defs()[raw_ns]['flows'][flow_name]
flow_mod = raw_flow['run'].split(".flows.")[-1].split(".")[0]
flow_fqdn = "%s.flows.%s" % (namespace, flow_mod)
flow_cls = getattr(importlib.import_module(flow_fqdn), flow_name)
tendrl_ns.add_flow(flow_name, flow_cls)
return ns.Namespace(atoms=raw_flow['atoms'],
help=raw_flow['help'],
enabled=raw_flow['enabled'],
inputs=raw_flow['inputs'],
pre_run=raw_flow.get('pre_run', []),
post_run=raw_flow.get('post_run', []),
type=raw_flow['type'],
uuid=raw_flow['uuid']
)
def _get_parsed_defs(self):
self._parsed_defs = yaml.safe_load(self.master)
return self._parsed_defs
class _DefinitionEtcd(etcdobj.EtcdObj):
"""A table of the Definitions, lazily updated
"""
__name__ = '_tendrl/definitions'
_tendrl_cls = Definition
| import importlib
import namespaces as ns
import yaml
from tendrl.commons import objects
from tendrl.commons import etcdobj
from tendrl.node_agent.objects.definition import master
# Definitions need there own special init and have to be present in the NS
# before anything else, Hence subclassing BaseObject
class Definition(objects.BaseObject):
def __init__(self, *args, **kwargs):
super(Definition, self).__init__(*args, **kwargs)
self.value = '_tendrl/definitions'
self.master = master
self._parsed_defs = yaml.safe_load(self.master)
self._etcd_cls = _DefinitionEtcd
def get_obj_definition(self, namespace, obj_name):
raw_ns = "namespace.%s" % namespace
raw_obj = self._get_parsed_defs()[raw_ns]['objects'][obj_name]
for atom_name, atom in raw_obj.get('atoms', {}).iteritems():
atom_mod = atom['run'].split(".atoms.")[-1].split(".")[0]
atom_fqdn = "%s.objects.%s.atoms.%s" % (namespace,
obj_name.lower(),
atom_mod)
atom_cls = getattr(importlib.import_module(atom_fqdn), atom_name)
tendrl_ns.add_atom(obj_name, atom_name, atom_cls)
for flow_name, flow in raw_obj.get('flows', {}).iteritems():
flow_mod = flow['run'].split(".flows.")[-1].split(".")[0]
flow_fqdn = "%s.objects.%s.flows.%s" % (namespace,
obj_name.lower(),
flow_mod)
flow_cls = getattr(importlib.import_module(flow_fqdn), flow_name)
tendrl_ns.add_obj_flow(obj_name, flow_name, flow_cls)
return ns.Namespace(attrs=raw_obj['attrs'],
enabled=raw_obj['enabled'],
obj_list=raw_obj.get('list', ""),
obj_value=raw_obj['value'],
atoms=raw_obj.get('atoms', {}),
flows=raw_obj.get('flows', {}),
help=raw_obj['help'])
def get_flow_definition(self, namespace, flow_name):
raw_ns = "namespace.%s" % namespace
raw_flow = self._get_parsed_defs()[raw_ns]['flows'][flow_name]
flow_mod = raw_flow['run'].split(".flows.")[-1].split(".")[0]
flow_fqdn = "%s.flows.%s" % (namespace, flow_mod)
flow_cls = getattr(importlib.import_module(flow_fqdn), flow_name)
tendrl_ns.add_flow(flow_name, flow_cls)
return ns.Namespace(atoms=raw_flow['atoms'],
help=raw_flow['help'],
enabled=raw_flow['enabled'],
inputs=raw_flow['inputs'],
pre_run=raw_flow.get('pre_run', []),
post_run=raw_flow.get('post_run', []),
type=raw_flow['type'],
uuid=raw_flow['uuid']
)
def _get_parsed_defs(self):
self._parsed_defs = yaml.safe_load(self.master)
return self._parsed_defs
class _DefinitionEtcd(etcdobj.EtcdObj):
"""A table of the Definitions, lazily updated
"""
__name__ = '_tendrl/definitions'
_tendrl_cls = Definition | Python | 0.000072 |
51ee19f41e6fc48d4791bde97c5d28d55d76cdf4 | Add brute force inplementation | solvers/BruteForce.py | solvers/BruteForce.py | #!/usr/bin/env python
# encoding: utf-8
from itertools import permutations
from base_solver import BaseSolver
class BruteForceSolver(BaseSolver):
def run_search(self):
# get list of mid nodes names
mid_nodes = []
for node in self.task.mid_nodes:
mid_nodes.append(node.name)
# iterate over permutations generator
best_distance = float('inf')
best_solution = None
cycles = 0
for permutation in permutations(mid_nodes):
# check permutation distance
path = [self.task.start.name, ]
path.extend(permutation)
path.append(self.task.finish.name)
distance = self.task.get_path_distance(path)
# check if this is the best solution so far
if distance < best_distance:
best_distance = distance
best_solution = path
cycles += 1
return best_solution, best_distance, cycles
| Python | 0.999725 | |
46496d8761ae94a349ed3b592ec7ee7e0c7e1a15 | Remove unused import; add missing import | gitc_utils.py | gitc_utils.py | #
# Copyright (C) 2015 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import sys
import git_command
import git_config
# TODO (sbasi) - Remove this constant and fetch manifest dir from /gitc/.config
GITC_MANIFEST_DIR = '/usr/local/google/gitc/'
GITC_FS_ROOT_DIR = '/gitc/manifest-rw/'
NUM_BATCH_RETRIEVE_REVISIONID = 300
def _set_project_revisions(projects):
"""Sets the revisionExpr for a list of projects.
Because of the limit of open file descriptors allowed, length of projects
should not be overly large. Recommend calling this function multiple times
with each call not exceeding NUM_BATCH_RETRIEVE_REVISIONID projects.
@param projects: List of project objects to set the revionExpr for.
"""
# Retrieve the commit id for each project based off of it's current
# revisionExpr and it is not already a commit id.
project_gitcmds = [(
project, git_command.GitCommand(None,
['ls-remote',
project.remote.url,
project.revisionExpr],
capture_stdout=True, cwd='/tmp'))
for project in projects if not git_config.IsId(project.revisionExpr)]
for proj, gitcmd in project_gitcmds:
if gitcmd.Wait():
print('FATAL: Failed to retrieve revisionExpr for %s' % project)
sys.exit(1)
proj.revisionExpr = gitcmd.stdout.split('\t')[0]
def generate_gitc_manifest(client_dir, manifest):
"""Generate a manifest for shafsd to use for this GITC client.
@param client_dir: GITC client directory to install the .manifest file in.
@param manifest: XmlManifest object representing the repo manifest.
"""
print('Generating GITC Manifest by fetching revision SHAs for each '
'project.')
project_gitcmd_dict = {}
index = 0
while index < len(manifest.projects):
_set_project_revisions(
manifest.projects[index:(index+NUM_BATCH_RETRIEVE_REVISIONID)])
index += NUM_BATCH_RETRIEVE_REVISIONID
# Save the manifest.
with open(os.path.join(client_dir, '.manifest'), 'w') as f:
manifest.Save(f)
| #
# Copyright (C) 2015 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import shutil
import git_command
import git_config
# TODO (sbasi) - Remove this constant and fetch manifest dir from /gitc/.config
GITC_MANIFEST_DIR = '/usr/local/google/gitc/'
GITC_FS_ROOT_DIR = '/gitc/manifest-rw/'
NUM_BATCH_RETRIEVE_REVISIONID = 300
def _set_project_revisions(projects):
"""Sets the revisionExpr for a list of projects.
Because of the limit of open file descriptors allowed, length of projects
should not be overly large. Recommend calling this function multiple times
with each call not exceeding NUM_BATCH_RETRIEVE_REVISIONID projects.
@param projects: List of project objects to set the revionExpr for.
"""
# Retrieve the commit id for each project based off of it's current
# revisionExpr and it is not already a commit id.
project_gitcmds = [(
project, git_command.GitCommand(None,
['ls-remote',
project.remote.url,
project.revisionExpr],
capture_stdout=True, cwd='/tmp'))
for project in projects if not git_config.IsId(project.revisionExpr)]
for proj, gitcmd in project_gitcmds:
if gitcmd.Wait():
print('FATAL: Failed to retrieve revisionExpr for %s' % project)
sys.exit(1)
proj.revisionExpr = gitcmd.stdout.split('\t')[0]
def generate_gitc_manifest(client_dir, manifest):
"""Generate a manifest for shafsd to use for this GITC client.
@param client_dir: GITC client directory to install the .manifest file in.
@param manifest: XmlManifest object representing the repo manifest.
"""
print('Generating GITC Manifest by fetching revision SHAs for each '
'project.')
project_gitcmd_dict = {}
index = 0
while index < len(manifest.projects):
_set_project_revisions(
manifest.projects[index:(index+NUM_BATCH_RETRIEVE_REVISIONID)])
index += NUM_BATCH_RETRIEVE_REVISIONID
# Save the manifest.
with open(os.path.join(client_dir, '.manifest'), 'w') as f:
manifest.Save(f)
| Python | 0.000007 |
4dfc0c49cec86f3c03b90fa66e1fc9de2ac665e6 | Add migration file (fix fields) | samples/migrations/0012_auto_20170512_1138.py | samples/migrations/0012_auto_20170512_1138.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-12 14:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('samples', '0011_fluvaccine_date_applied'),
]
operations = [
migrations.AlterField(
model_name='collectedsample',
name='collection_date',
field=models.DateField(blank=True, null=True, verbose_name='Data de coleta'),
),
migrations.AlterField(
model_name='fluvaccine',
name='date_applied',
field=models.DateField(blank=True, null=True, verbose_name='Data de aplicação'),
),
]
| Python | 0 | |
947551083798e3125cf0782df44cc18728c6bca4 | test messages | src/eduid_webapp/security/tests/test_msgs.py | src/eduid_webapp/security/tests/test_msgs.py | # -*- coding: utf-8 -*-
import unittest
from eduid_webapp.security.helpers import SecurityMsg
class MessagesTests(unittest.TestCase):
def test_messages(self):
""""""
self.assertEqual(str(SecurityMsg.out_of_sync.value), 'user-out-of-sync')
self.assertEqual(str(SecurityMsg.stale_reauthn.value), 'security.stale_authn_info')
self.assertEqual(str(SecurityMsg.rm_verified.value), 'nins.verified_no_rm')
self.assertEqual(str(SecurityMsg.rm_success.value), 'nins.success_removal')
self.assertEqual(str(SecurityMsg.temp_problem.value), 'Temporary technical problems')
self.assertEqual(str(SecurityMsg.already_exists.value), 'nins.already_exists')
self.assertEqual(str(SecurityMsg.add_success.value), 'nins.successfully_added')
self.assertEqual(str(SecurityMsg.max_tokens.value), 'security.u2f.max_allowed_tokens')
self.assertEqual(str(SecurityMsg.max_webauthn.value), 'security.webauthn.max_allowed_tokens')
self.assertEqual(str(SecurityMsg.missing_data.value), 'security.u2f.missing_enrollment_data')
self.assertEqual(str(SecurityMsg.u2f_registered.value), 'security.u2f_register_success')
self.assertEqual(str(SecurityMsg.no_u2f.value), 'security.u2f.no_token_found')
self.assertEqual(str(SecurityMsg.no_challenge.value), 'security.u2f.missing_challenge_data')
self.assertEqual(str(SecurityMsg.no_token.value), 'security.u2f.missing_token')
self.assertEqual(str(SecurityMsg.long_desc.value), 'security.u2f.description_to_long')
self.assertEqual(str(SecurityMsg.rm_u2f_success.value), 'security.u2f-token-removed')
self.assertEqual(str(SecurityMsg.no_pdata.value), 'security.webauthn-missing-pdata')
self.assertEqual(str(SecurityMsg.webauthn_success.value), 'security.webauthn_register_success')
self.assertEqual(str(SecurityMsg.no_last.value), 'security.webauthn-noremove-last')
self.assertEqual(str(SecurityMsg.rm_webauthn.value), 'security.webauthn-token-removed')
self.assertEqual(str(SecurityMsg.no_webauthn.value), 'security.webauthn-token-notfound')
| Python | 0.000004 | |
fcfb84838c7bb111fb9710f4984767b2233caed3 | test commit | test.py | test.py | print("Content-Type: text/plain")
print("")
print("Fuck you")
| Python | 0.000002 | |
f4f3429d157988d4823f20d5155b951f8471fb1b | Fix test app | test.py | test.py |
def app(environ, start_response):
"""Simplest possible application object"""
data = 'Hello, World!\n'
status = '200 OK'
response_headers = [
('Content-type','text/plain'),
('Content-Length', len(data))
]
start_response(status, response_headers)
return [data]
|
from gunicorn.httpserver import WSGIServer
def app(environ, start_response):
"""Simplest possible application object"""
data = 'Hello, World!\n'
status = '200 OK'
response_headers = [
('Content-type','text/plain'),
('Content-Length', len(data))
]
start_response(status, response_headers)
return [data]
if __name__ == '__main__':
server = WSGIServer(("127.0.0.1", 8000), 1, simple_app)
server.run() | Python | 0.000013 |
bc0aa69adc5b1e290941c221ddd498d3fb92244e | Add simple recipe tagger experiment | test.py | test.py | import nltk
from nltk.classify import MaxentClassifier
# Set up our training material in a nice dictionary.
training = {
'ingredients': [
'Pastry for 9-inch tart pan',
'Apple cider vinegar',
'3 eggs',
'1/4 cup sugar',
],
'steps': [
'Sift the powdered sugar and cocoa powder together.',
'Coarsely crush the peppercorns using a mortar and pestle.',
'While the vegetables are cooking, scrub the pig ears clean and cut away any knobby bits of cartilage so they will lie flat.',
'Heat the oven to 375 degrees.',
],
}
# Set up a list that will contain all of our tagged examples,
# which we will pass into the classifier at the end.
training_set = []
for key, val in training.items():
for i in val:
# Set up a list we can use for all of our features,
# which are just individual words in this case.
features = []
# Before we can tokenize words, we need to break the
# text out into sentences.
sentences = nltk.sent_tokenize(i)
for sentence in sentences:
features = features + nltk.word_tokenize(sentence)
# For this example, it's a good idea to normalize for case.
# You may or may not need to do this.
features = [i.lower() for i in features]
# Each feature needs a value. A typical use for a case like this
# is to use True or 1, though you can use almost any value for
# a more complicated application or analysis.
features = dict([(i, True) for i in features])
# NLTK expects you to feed a classifier a list of tuples
# where each tuple is (features, tag).
training_set.append((features, key))
def classify(s):
p = classifier.prob_classify(s)
import json
print("%s\n >>> %s, %s\n" % (json.dumps(s), p.max(), p.prob(p.max())))
return (p.max(), p.prob(p.max()))
# Train up our classifier
# TODO: get http://www.umiacs.umd.edu/~hal/megam/version0_91/ working
classifier = MaxentClassifier.train(training_set)
print()
print()
# Test it out!
# You need to feed the classifier your data in the same format you used
# to train it, in this case individual lowercase words.
classify({'apple': True, 'cider': True, 'vinegar': True, 'cocoa': True})
classify({'heat': True, 'oven': True})
classify({'prepare': True, 'oven': True})
classify({'nothing': True})
| Python | 0.000039 | |
5d9200298ab660bee79d7958f8e155023893be08 | Change author | l10n_cr_account_banking_cr_bcr/__openerp__.py | l10n_cr_account_banking_cr_bcr/__openerp__.py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'BCR Account Banking',
'version': '0.1',
'license': 'AGPL-3',
'author': 'ClearCorp',
'website': 'http://www.clearcorp.co.cr',
'category': 'Accounting & Finance',
'depends': [
'account_banking_ccorp_dg',
],
'init_xml': [],
'update_xml': [],
'demo_xml': [],
'description': '',
'active': False,
'installable': True,
}
| # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'BCR Account Banking',
'version': '0.1',
'license': 'AGPL-3',
'author': 'CLEARCORP S.A.',
'website': 'http://www.clearcorp.co.cr',
'category': 'Accounting & Finance',
'depends': [
'account_banking_ccorp',
],
'init_xml': [],
'update_xml': [],
'demo_xml': [],
'description': '',
'active': False,
'installable': True,
}
| Python | 0.000003 |
ab458e10742897c692e3d4e4066ed193e141e258 | add filterfuncs module | filterfuncs.py | filterfuncs.py | from tools import pipeline_helpers
import pandas as pd
def run1(infile, features_label, output_label):
"""
Handle variant data by only keeping rows where 10-90% of samples have
variants.
For CNV data, don't do any filtering.
Otherwise, simply remove rows with zero variance.
"""
if (features_label == 'exome_variants' or 'variants' in output_label):
d = pipeline_helpers.remove_nfrac_variants(infile, nfrac=0.1)
elif features_label == 'cnv':
return pd.read_table(infile, index_col=0)
else:
d = pipeline_helpers.remove_zero_variance(infile)
return d
| Python | 0.000001 | |
b9d5e015b291f27becc682f05a12ec5c6a0cf467 | Implement module to create new pads on collabedit.com. | gygax/modules/pad.py | gygax/modules/pad.py | # -*- coding: utf-8 -*-
"""
:mod:`gygax.modules.pad` --- Module for creating pads on collabedit.com
=======================================================================
"""
from http import client
from gygax.modules import admin
def pad(bot, sender, text):
if not admin.is_admin(sender):
bot.reply("unauthorized")
return
# We can't use urllib, because collabedit uses weird redirects which make
# urllib think we are redirected in an endless loop.
conn = client.HTTPConnection("collabedit.com")
conn.request("GET", "/new")
r1 = conn.getresponse()
if r1.status != 302:
raise Exception("GET /new returned {} {}".format(r1.status, r1.reason))
headers = {"Cookie": r1.getheader("Set-Cookie").split(";")[0]}
r1.read() # Read the response body so we can make a new request.
conn.request("GET", r1.getheader("Location"), headers=headers)
r2 = conn.getresponse()
if r2.status != 302:
raise Exception("GET {} returned {} {}".format(
r1.getheader("Location"), r2.status, r2.reason))
bot.reply("http://collabedit.com{}".format(r2.getheader("Location")))
conn.close()
pad.command = ".pad"
| Python | 0 | |
7cc86a96427cc35824960c01d84fbe8d45364670 | Add admin page for User | helios_auth/admin.py | helios_auth/admin.py | from django.contrib import admin
from helios.models import User
class UserAdmin(admin.ModelAdmin):
exclude = ('info', 'token')
admin.site.register(User, UserAdmin) | Python | 0.000143 | |
e99700ff985e9821faf390ca6070a0c879eafc20 | Add perkeyavg python example | src/python/PerKeyAvg.py | src/python/PerKeyAvg.py | """
>>> from pyspark.context import SparkContext
>>> sc = SparkContext('local', 'test')
>>> b = sc.parallelize([("coffee", 1), ("pandas", 2), ("coffee", 3), ("very", 4)])
>>> perKeyAvg(b)
"""
import sys
from pyspark import SparkContext
def perKeyAvg(nums):
"""Compute the avg"""
sumCount = nums.combineByKey((lambda x: (x,1)),
(lambda x, y: (x[0] + y, x[1] + 1)),
(lambda x, y: (x[0] + y[0], x[1] + y[1])))
return sumCount.collectAsMap()
if __name__ == "__main__":
master = "local"
if len(sys.argv) == 2:
master = sys.argv[1]
sc = SparkContext(master, "Sum")
nums = sc.parallelize([("coffee", 1), ("pandas", 2), ("coffee", 3), ("very", 4)])
avg = perKeyAvg(nums)
print avg
| Python | 0.000001 | |
a640bf45c4fb8829888f664e48058d6647473449 | Fix migrations | lowfat/migrations/0113_merge_20171103_0948.py | lowfat/migrations/0113_merge_20171103_0948.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-11-03 09:48
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('lowfat', '0112_auto_20171031_1133'),
('lowfat', '0111_auto_20171009_0933'),
]
operations = [
]
| Python | 0.000006 | |
8d2c141ac6c2d1772561d36c38cbbf8140abd9db | Add day 12. | day_12.py | day_12.py | """
http://adventofcode.com/day/11
--- Day 12: JSAbacusFramework.io ---
Santa's Accounting-Elves need help balancing the books after a recent order.
Unfortunately, their accounting software uses a peculiar storage format. That's
where you come in.
They have a JSON document which contains a variety of things: arrays ([1,2,3]),
objects ({"a":1, "b":2}), numbers, and strings. Your first job is to simply find
all of the numbers throughout the document and add them together.
For example:
- [1,2,3] and {"a":2,"b":4} both have a sum of 6.
- [[[3]]] and {"a":{"b":4},"c":-1} both have a sum of 3.
- {"a":[-1,1]} and [-1,{"a":1}] both have a sum of 0.
- [] and {} both have a sum of 0.
You will not encounter any strings containing numbers.
What is the sum of all numbers in the document?
--- Part Two ---
Uh oh - the Accounting-Elves have realized that they double-counted everything
red.
Ignore any object (and all of its children) which has any property with the
value "red". Do this only for objects {...}), not arrays ([...]).
- [1,2,3] still has a sum of 6.
- [1,{"c":"red","b":2},3] now has a sum of 4, because the middle object is
ignored.
- {"d":"red","e":[1,2,3,4],"f":5} now has a sum of 0, because the entire
structure is ignored.
- [1,"red",5] has a sum of 6, because "red" in an array has no effect.
"""
import json
import re
def test_sum_numbers():
assert sum_numbers([1, 2, 3]) == 6
assert sum_numbers({"a": 2, "b": 4}) == 6
assert sum_numbers([[[3]]]) == 3
assert sum_numbers({"a": {"b": 4}, "c": -1}) == 3
assert sum_numbers({"a": [-1, 1]}) == 0
assert sum_numbers([-1, {"a": 1}]) == 0
assert sum_numbers([]) == 0
assert sum_numbers({}) == 0
def test_sum_non_reds():
assert sum_non_reds([1, 2, 3]) == 6
assert sum_non_reds([1, {"c": "red", "b": 2}, 3]) == 4
assert sum_non_reds({"d": "red", "e": [1, 2, 3, 4], "f": 5}) == 0
assert sum_non_reds({"d": "red", "e": [1, 2, {}, 3, 4], "f": 5}) == 0
assert sum_non_reds([1, "red", 5]) == 6
def sum_numbers(s):
return sum(int(i) for i in re.findall(r"(-?\d+)", str(s)))
def sum_non_reds(s):
if isinstance(s, int):
return s
if isinstance(s, list):
return sum(sum_non_reds(i) for i in s)
elif isinstance(s, dict):
if "red" in s.values():
return 0
else:
return sum(sum_non_reds(i) for i in s.values())
return 0
def part_one():
with open("inputs/day_12_input.txt") as fin:
print(sum_numbers(fin.read()))
def part_two():
with open("inputs/day_12_input.txt") as fin:
print(sum_non_reds(json.load(fin)))
if __name__ == "__main__":
part_one()
part_two()
| Python | 0.000572 | |
946c5b14ec95af2e4dde406e94a50e7d5cdc1502 | Create BalanceData.py | BalanceData.py | BalanceData.py | #Copyright (c) 2016 Vidhya, Nandini
import os
import numpy as np
import operator
from constants import *
FIX_DEV = 0.00000001
rootdir = os.getcwd()
newdir = os.path.join(rootdir,'featurefiles')
def LoadData():
data_file = open(os.path.join(newdir,'out_2.txt'),'r')
unprocessed_data = data_file.readlines()
labels ={}
features ={}
for line in unprocessed_data:
feature_vector = []
split_line = line.split(' ')
for element in split_line[1:-1]:
feature_vector.append(float(element))
track_id = split_line[0]
features[track_id] = feature_vector
data_file.close()
label_file = open(os.path.join(newdir,'labelout.txt'),'r')
label_data = label_file.readlines()
for line in label_data:
split_line = line.split('\t')
track_id = split_line[0]
#print (track_id)
if track_id in features:
labels[split_line[0]] = split_line[1].split('\n')[0]
label_file.close()
for key in features:
feature = features[key]
label = labels[key]
# print feature, label
return features, labels
def writeToFile(key,feature,fp):
fp1 = open(fp,'a')
line = key
for s in feature:
line+= " %f" %float(s)
line+="\n"
fp1.write(line)
def BalanceData(features, labels):
if not os.path.exists('train'):
os.makedirs('train')
traindir = os.path.join(rootdir,'train')
if not os.path.exists('test'):
os.makedirs('test')
testdir = os.path.join(rootdir,'test')
count =0
testFile = open(os.path.join(testdir,'testFile'),'w+')
genreFeat={}
numList ={}
testFeat = {}
trainFeat ={}
genreTestFeat ={}
for genre in genres:
str1 = genre+'.txt'
fout = open(os.path.join(traindir,str1),'w+')
#print fout
delKey =[]
feature_list =[]
test_list =[]
subcount=0
for key in features:
if labels[key] == genre:
delKey.append(key)
subcount=subcount+1
fout.close()
count = count+ subcount
numList[genre] = subcount/2
if subcount != 0:
for key in delKey[:subcount/2]:
trainFeat[key] = features[key]
trainFeat[key].append(key)
feature_list.append(trainFeat[key])
#writeToFile(key, features[key], os.path.join(traindir,str1))
genreFeat[genre] = feature_list
for key in delKey[subcount/2:]:
testFeat[key] = features[key]
testFeat[key].append(key)
test_list.append(testFeat[key])
#writeToFile(key,features[key], os.path.join(testdir,'testFile'))
genreTestFeat[genre] = test_list
for key in delKey:
del features[key]
return genreFeat, numList, count, genreTestFeat
def ConvertToArrays(feats):
features =[]
labels = []
keys = []
for genre in feats:
#print genre
for f in feats[genre]:
features.append(f[:-1])
keys.append(f[-1])
#print features
#input("press enter")
labels.append(genre)
return np.asarray(features), np.asarray(labels), np.asarray(keys)
def GetData():
features, labels =LoadData()
genreFeat,countGenre, count, genreTestFeat = BalanceData(features, labels)
train_features, train_labels, train_keys = ConvertToArrays(genreFeat)
test_features, test_labels, test_keys = ConvertToArrays(genreTestFeat)
return train_features, train_labels, test_features, test_labels, test_keys
| Python | 0.000001 | |
3811bf52733bfbac7e5720f860cced216b530963 | Add a Theme object | src/theme.py | src/theme.py | # This module is part of the GeoTag-X project builder.
# Copyright (C) 2015 UNITAR.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
class Theme:
path = None
assets = {
"core":{
"js":None,
"css":None
},
"geolocation":{
"js":None,
"css":None
},
"datetime":{
"js":None,
"css":None
},
}
template = None
def __init__(self, path):
"""__init__(path:string)
Instantiates a Theme object from the content of the directory located at
the specified path.
"""
import os
from jinja2 import Environment, FileSystemLoader
valid, message = Theme.isvalidpath(path)
if not valid:
raise Exception(message)
self.path = path
self.template = Environment(loader=FileSystemLoader(searchpath=os.path.join(self.path, "templates"))).get_template("base.html")
def getasset(self, name):
"""getasset(name:string)
Returns the set of assets contained in the bundle with the specified name.
"""
css, js = "", ""
name = name.strip()
bundle = self.assets.get(name, None)
if bundle is None:
print "[Theme::getasset] Warning! Unknown asset bundle '%s'." % name
else:
# If any assets have not been loaded into memory, do so.
empties = filter(lambda item: item[1] is None, bundle.iteritems())
if len(empties) > 0:
import os
basefile = os.path.join(self.path, *["assets", "bundles", "asset.bundle.%s" % name])
for key, _ in empties:
filepath = "%s.%s" % (basefile, key)
try:
with open(filepath, "r") as file:
bundle[key] = file.read()
except IOError:
# If a theme does not contain a specified asset, set its
# value to an empty string. Leaving it as 'None' means
# the script will keep searching for the missing file.
bundle[key] = ""
css = bundle["css"]
js = bundle["js"]
return css, js
def getassets(self, bundles=set()):
"""getassets(bundles:set)
Returns the themes JS and CSS assets, based on the specified bundles.
"""
try: assert type(bundles) is set, "[Theme::getassets] Error! 'bundles' parameter is not a set!"
except AssertionError as error: raise Exception(error)
# The core bundle is always returned, however it is explicitly added to
# the bundle set, it needs to be removed or it will be concatenated to
# the result twice.
if "core" in bundles:
bundles.remove("core")
css, js = self.getasset("core")
for bundle in bundles:
css_, js_ = self.getasset(bundle)
if len(css_) > 0: css += css_
if len(js_) > 0: js += ";" + js_ # ';' makes sure statements between concatenated scripts are separated.
return css, js
@staticmethod
def isvalidpath(path):
"""isvalidpath(path:string)
Returns true if the specified path contains a valid theme, false otherwise.
"""
return (True, None)
| Python | 0.000001 | |
7bace5ca301124f03d7ff98669ac08c0c32da55f | Add example OOP python script | labs/lab-5/oop.py | labs/lab-5/oop.py | #!/usr/bin/python
#
# Copyright 2016 BMC Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class Animal(object):
def __init__(self):
self.voice = "???"
def speak(self):
print('A {0} says "{1}"'.format(self.__class__.__name__, self.voice))
class Cat(Animal):
def __init__(self):
super(Cat, self).__init__()
self.voice = 'Meow!'
class Dog(Animal):
def __init__(self):
super(Dog, self).__init__()
self.voice = 'Woof!'
if __name__ == '__main__':
animal = Animal()
animal.speak()
cat = Cat()
cat.speak()
dog = Dog()
dog.speak()
| Python | 0.000005 | |
5f2533e090e181d84c3e5567131447aa4326773a | Add libx11 package | libx11/conanfile.py | libx11/conanfile.py | from conans import ConanFile, AutoToolsBuildEnvironment, tools
import os
class Libx11Conan(ConanFile):
name = "libx11"
version = "1.6.5"
license = "Custom https://cgit.freedesktop.org/xorg/lib/libX11/tree/COPYING"
url = "https://github.com/trigger-happy/conan-packages"
description = "X11 client-side library"
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False]}
default_options = "shared=False"
generators = "cmake"
def source(self):
pkgLink = 'https://xorg.freedesktop.org/releases/individual/lib/libX11-{version}.tar.bz2'.format(version=self.version)
self.run("curl -JOL " + pkgLink)
self.run("tar xf libX11-{version}.tar.bz2".format(version=self.version))
def build(self):
envBuild = AutoToolsBuildEnvironment(self)
installPrefix=os.getcwd()
with tools.chdir("libX11-{version}".format(version=self.version)):
with tools.environment_append(envBuild.vars):
self.run("./configure --prefix={0} --disable-xf86bigfont".format(installPrefix))
self.run("make install")
def package(self):
self.copy("lib/*", dst=".", keep_path=True)
self.copy("include/*", dst=".", keep_path=True)
def package_info(self):
self.cpp_info.libs = ["X11", "X11-xcb"]
| Python | 0.000001 | |
e972bb5127b231bfbdf021597f5c9a32bb6e21c8 | Create gametesting.py | gametesting.py | gametesting.py | Python | 0 | ||
a83a48f6c9276b86c3cc13aeb000611036a6e3c4 | Make all end-points accepting post | jedihttp/handlers.py | jedihttp/handlers.py | import bottle
from bottle import response, request
import json
import jedi
import logging
app = bottle.Bottle( __name__ )
logger = logging.getLogger( __name__ )
@app.post( '/healthy' )
def healthy():
return _Json({})
@app.post( '/ready' )
def ready():
return _Json({})
@app.post( '/completions' )
def completion():
logger.info( 'received /completions request' )
script = _GetJediScript( request.json )
return _Json(
{
'completions': [ {
'name': completion.name,
'description': completion.description,
'docstring': completion.docstring(),
'module_path': completion.module_path,
'line': completion.line,
'column': completion.column
} for completion in script.completions() ]
} )
def _GetJediScript( request_data ):
source = request_data[ 'source' ]
line = request_data[ 'line' ]
col = request_data[ 'col' ]
path = request_data[ 'path' ]
return jedi.Script( source, line, col, path )
def _Json( data ):
response.content_type = 'application/json'
return json.dumps( data )
| import bottle
from bottle import response, request
import json
import jedi
import logging
app = bottle.Bottle( __name__ )
logger = logging.getLogger( __name__ )
@app.get( '/healthy' )
def healthy():
return _Json({})
@app.get( '/ready' )
def ready():
return _Json({})
@app.post( '/completions' )
def completion():
logger.info( 'received /completions request' )
script = _GetJediScript( request.json )
return _Json(
{
'completions': [ {
'name': completion.name,
'description': completion.description,
'docstring': completion.docstring(),
'module_path': completion.module_path,
'line': completion.line,
'column': completion.column
} for completion in script.completions() ]
} )
def _GetJediScript( request_data ):
source = request_data[ 'source' ]
line = request_data[ 'line' ]
col = request_data[ 'col' ]
path = request_data[ 'path' ]
return jedi.Script( source, line, col, path )
def _Json( data ):
response.content_type = 'application/json'
return json.dumps( data )
| Python | 0.000318 |
64e91bf4a7fb8dae8ae49db64396bdfed12bec63 | Add deploy script for pypi. | deploy.py | deploy.py | """
Deploy this package to PyPi.
If the package is already uploaded (by --version) then this will do nothing.
Reqires Python3.
"""
import http.client
import json
import subprocess
def setup(*args):
o = subprocess.check_output('python3 ./setup.py %s' % ' '.join(args),
shell=True)
return o.decode().rstrip()
name = setup('--name')
version = setup('--version')
print("Package:", name)
print("Version:", version)
print("Checking PyPi...")
piconn = http.client.HTTPSConnection('pypi.python.org')
piconn.request("GET", '/pypi/%s/json' % name)
piresp = piconn.getresponse()
if piresp.status != 200:
exit('PyPi Service Error: %s' % piresp.reason)
piinfo = json.loads(piresp.read().decode())
deployed_versions = list(piinfo['releases'].keys())
if version in deployed_versions:
print("PyPi is already up-to-date for:", version)
exit()
print(setup('sdist', 'upload'))
| Python | 0 | |
2dfa0dd815061497f89f9ca5e09fa62ea4dc23bf | fix issue #956 | launcher/start.py | launcher/start.py | #!/usr/bin/env python
# coding:utf-8
import os, sys
import time
import atexit
import webbrowser
import launcher_log
current_path = os.path.dirname(os.path.abspath(__file__))
python_path = os.path.abspath( os.path.join(current_path, os.pardir, 'python27', '1.0'))
noarch_lib = os.path.abspath( os.path.join(python_path, 'lib', 'noarch'))
sys.path.append(noarch_lib)
has_desktop = True
if sys.platform.startswith("linux"):
def X_is_running():
try:
from subprocess import Popen, PIPE
p = Popen(["xset", "-q"], stdout=PIPE, stderr=PIPE, shell=True)
p.communicate()
return p.returncode == 0
except:
return False
if X_is_running():
from gtk_tray import sys_tray
else:
from non_tray import sys_tray
has_desktop = False
elif sys.platform == "win32":
win32_lib = os.path.join(python_path, 'lib', 'win32')
sys.path.append(win32_lib)
from win_tray import sys_tray
elif sys.platform == "darwin":
darwin_lib = os.path.abspath( os.path.join(python_path, 'lib', 'darwin'))
sys.path.append(darwin_lib)
extra_lib = "/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python/PyObjc"
sys.path.append(extra_lib)
try:
import mac_tray as sys_tray
except:
from non_tray import sys_tray
else:
print("detect platform fail:%s" % sys.platform)
from non_tray import sys_tray
has_desktop = False
import config
import web_control
import module_init
import update
import setup_win_python
def exit_handler():
print 'Stopping all modules before exit!'
module_init.stop_all()
web_control.stop()
atexit.register(exit_handler)
def main():
# change path to launcher
global __file__
__file__ = os.path.abspath(__file__)
if os.path.islink(__file__):
__file__ = getattr(os, 'readlink', lambda x: x)(__file__)
os.chdir(os.path.dirname(os.path.abspath(__file__)))
web_control.confirm_xxnet_exit()
setup_win_python.check_setup()
module_init.start_all_auto()
web_control.start()
if has_desktop and config.get(["modules", "launcher", "popup_webui"], 1) == 1:
webbrowser.open("http://127.0.0.1:8085/")
update.start()
if config.get(["modules", "launcher", "show_systray"], 1):
sys_tray.serve_forever()
else:
while True:
time.sleep(100)
module_init.stop_all()
sys.exit()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt: # Ctrl + C on console
module_init.stop_all()
sys.exit
| #!/usr/bin/env python
# coding:utf-8
import os, sys
import time
import atexit
import webbrowser
import launcher_log
current_path = os.path.dirname(os.path.abspath(__file__))
python_path = os.path.abspath( os.path.join(current_path, os.pardir, 'python27', '1.0'))
noarch_lib = os.path.abspath( os.path.join(python_path, 'lib', 'noarch'))
sys.path.append(noarch_lib)
has_desktop = True
if sys.platform.startswith("linux"):
def X_is_running():
try:
from subprocess import Popen, PIPE
p = Popen(["xset", "-q"], stdout=PIPE, stderr=PIPE)
p.communicate()
return p.returncode == 0
except:
return False
if X_is_running():
from gtk_tray import sys_tray
else:
from non_tray import sys_tray
has_desktop = False
elif sys.platform == "win32":
win32_lib = os.path.join(python_path, 'lib', 'win32')
sys.path.append(win32_lib)
from win_tray import sys_tray
elif sys.platform == "darwin":
darwin_lib = os.path.abspath( os.path.join(python_path, 'lib', 'darwin'))
sys.path.append(darwin_lib)
extra_lib = "/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python/PyObjc"
sys.path.append(extra_lib)
try:
import mac_tray as sys_tray
except:
from non_tray import sys_tray
else:
print("detect platform fail:%s" % sys.platform)
from non_tray import sys_tray
has_desktop = False
import config
import web_control
import module_init
import update
import setup_win_python
def exit_handler():
print 'Stopping all modules before exit!'
module_init.stop_all()
web_control.stop()
atexit.register(exit_handler)
def main():
# change path to launcher
global __file__
__file__ = os.path.abspath(__file__)
if os.path.islink(__file__):
__file__ = getattr(os, 'readlink', lambda x: x)(__file__)
os.chdir(os.path.dirname(os.path.abspath(__file__)))
web_control.confirm_xxnet_exit()
setup_win_python.check_setup()
module_init.start_all_auto()
web_control.start()
if has_desktop and config.get(["modules", "launcher", "popup_webui"], 1) == 1:
webbrowser.open("http://127.0.0.1:8085/")
update.start()
if config.get(["modules", "launcher", "show_systray"], 1):
sys_tray.serve_forever()
else:
while True:
time.sleep(100)
module_init.stop_all()
sys.exit()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt: # Ctrl + C on console
module_init.stop_all()
sys.exit
| Python | 0 |
03137f65202f5423ea705db601aaea7f18c590f9 | add the main file | lexos/__main__.py | lexos/__main__.py | """Module allowing for ``python -m lexos ...``."""
from lexos import application
application.run()
| Python | 0.000001 | |
370f5a87ac8d26245b5919fc98b24019861f4dde | Add missing test | tests/test_fetch_site_logs_from_ftp_sites.py | tests/test_fetch_site_logs_from_ftp_sites.py | import pytest
import os
from fetch_site_logs_from_ftp_sites import gws_list_site_logs
def test_get_gws_site_logs():
os.environ['gws_url'] = 'https://testgeodesy-webservices.geodesy.ga.gov.au'
assert len(gws_list_site_logs()) > 1000
| Python | 0.000383 | |
0774413ae3623c28a8aaf77727d0c355f6a5bd7c | Add deezer_complete core plugin #146 | timeside/plugins/provider/deezer_complete.py | timeside/plugins/provider/deezer_complete.py | from timeside.core import implements, interfacedoc
from timeside.core.provider import Provider
from timeside.core.api import IProvider
from timeside.core.tools.utils import slugify
import os
from requests import get
class DeezerComplete(Provider):
"""
Represents Deezer Provider while loading results
computed on complete tracks on Deezer's infrastructure
"""
implements(IProvider)
@staticmethod
@interfacedoc
def id():
return 'deezer_complete'
@staticmethod
@interfacedoc
def name():
return "Deezer Complete"
@staticmethod
@interfacedoc
def ressource_access():
return False
def get_source_from_id(self, external_id, path, download=False):
return ''
def get_source_from_url(self, url, path, download=False):
return ''
def get_id_from_url(self, url):
return url.split("/")[-1:][0]
| Python | 0 | |
fa8032792f208e2693f8f6a4693ba9af084de935 | use path | src/robotide/spec/librarymanager.py | src/robotide/spec/librarymanager.py | # Copyright 2008-2012 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from Queue import Queue
import os
from threading import Thread
from robotide.spec.librarydatabase import LibraryDatabase
from robotide.spec.libraryfetcher import _get_import_result_from_process
from robotide.spec.xmlreaders import _init_from_spec, _get_path
class LibraryManager(Thread):
def __init__(self, database_name):
self._database_name = database_name
self._messages = Queue()
Thread.__init__(self)
self.setDaemon(True)
def run(self):
self._initiate_database_connection()
while True:
if not self._handle_message():
break
self._database.close()
def _initiate_database_connection(self):
self._database = LibraryDatabase(self._database_name)
def _handle_message(self):
message = self._messages.get()
if not message:
return False
type = message[0]
if type == 'fetch':
self._handle_fetch_keywords_message(message)
return True
def _handle_fetch_keywords_message(self, message):
_, library_name, library_args, callback = message
try:
path =_get_path(library_name.replace('/', os.sep), os.path.abspath('.'))
keywords = _get_import_result_from_process(path, library_args)
except ImportError:
keywords = _init_from_spec(library_name)
self._update_database_and_call_callback_if_needed((library_name, library_args), keywords, callback)
def _update_database_and_call_callback_if_needed(self, library_key, keywords, callback):
db_keywords = self._database.fetch_library_keywords(*library_key)
if not db_keywords or self._keywords_differ(keywords, db_keywords):
self._database.insert_library_keywords(library_key[0], library_key[1], keywords)
self._call(callback, keywords)
else:
self._database.update_library_timestamp(*library_key)
def _call(self, callback, *args):
try:
callback(*args)
except Exception:
pass
def fetch_keywords(self, library_name, library_args, callback):
self._messages.put(('fetch', library_name, library_args, callback))
def stop(self):
self._messages.put(False)
def _keywords_differ(self, keywords1, keywords2):
if keywords1 != keywords2 and None in (keywords1, keywords2):
return True
if len(keywords1) != len(keywords2):
return True
for k1, k2 in zip(keywords1, keywords2):
if k1.name != k2.name:
return True
if k1.doc != k2.doc:
return True
if k1.arguments != k2.arguments:
return True
if k1.source != k2.source:
return True
return False
| # Copyright 2008-2012 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from Queue import Queue
from threading import Thread
from robotide.spec.librarydatabase import LibraryDatabase
from robotide.spec.libraryfetcher import _get_import_result_from_process
from robotide.spec.xmlreaders import _init_from_spec
class LibraryManager(Thread):
def __init__(self, database_name):
self._database_name = database_name
self._messages = Queue()
Thread.__init__(self)
self.setDaemon(True)
def run(self):
self._initiate_database_connection()
while True:
if not self._handle_message():
break
self._database.close()
def _initiate_database_connection(self):
self._database = LibraryDatabase(self._database_name)
def _handle_message(self):
message = self._messages.get()
if not message:
return False
type = message[0]
if type == 'fetch':
self._handle_fetch_keywords_message(message)
return True
def _handle_fetch_keywords_message(self, message):
_, library_name, library_args, callback = message
try:
keywords = _get_import_result_from_process(library_name, library_args)
except ImportError:
keywords = _init_from_spec(library_name)
self._update_database_and_call_callback_if_needed((library_name, library_args), keywords, callback)
def _update_database_and_call_callback_if_needed(self, library_key, keywords, callback):
db_keywords = self._database.fetch_library_keywords(*library_key)
if not db_keywords or self._keywords_differ(keywords, db_keywords):
self._database.insert_library_keywords(library_key[0], library_key[1], keywords)
self._call(callback, keywords)
else:
self._database.update_library_timestamp(*library_key)
def _call(self, callback, *args):
try:
callback(*args)
except Exception:
pass
def fetch_keywords(self, library_name, library_args, callback):
self._messages.put(('fetch', library_name, library_args, callback))
def stop(self):
self._messages.put(False)
def _keywords_differ(self, keywords1, keywords2):
if keywords1 != keywords2 and None in (keywords1, keywords2):
return True
if len(keywords1) != len(keywords2):
return True
for k1, k2 in zip(keywords1, keywords2):
if k1.name != k2.name:
return True
if k1.doc != k2.doc:
return True
if k1.arguments != k2.arguments:
return True
if k1.source != k2.source:
return True
return False
| Python | 0.000005 |
02c06a544b1b6e4230a9b658540b360cc60c0bb5 | add cmstat.py | gist/cmstat.py | gist/cmstat.py | from __future__ import print_function
import sh
from collections import namedtuple
import os
import itertools
git = sh.git.bake()
NumStat = namedtuple('NumStat', ['insert', 'delete', 'filename'])
def getCommit(commit):
"""get commit message
--no-pager: stop pager which block stdout
-n 1: only show one commit
(not use ^.. style, this will fail when not have prior commit)
--color=never: not colorful output
"""
opt = ('--numstat', '-n 1', '--color=never', '--pretty=%H')
return git('--no-pager', 'log', commit, *opt)
def parseNumStat(cm):
l = cm.split('\n\n')
ret = []
if len(l) < 2:
return ret
for line in l[1].split('\n'):
line = line.split()
if len(line) < 3:
continue
if line[0] == '-' or line[1] == '-':
continue
n = NumStat(int(line[0]), int(line[1]), line[2])
ret.append(n)
return ret
def getStatLst():
cmlst = git('rev-list', 'HEAD', '--after=2014-01-01', '--author=liuyang')
shalst = cmlst.split()
stlst = []
for sha in shalst:
cm = getCommit(sha)
ret = parseNumStat(cm)
stlst.extend(ret)
return stlst
def groupFileExt(numst):
fn = numst.filename
ret = os.path.splitext(fn)
if ret[1] == "":
return ret[0]
else:
return ret[1]
def main():
stlst = getStatLst()
a = sum([st.insert for st in stlst])
b = sum([st.delete for st in stlst])
print(a, b, a + b)
stlst = sorted(stlst, key=groupFileExt)
for ext, g in itertools.groupby(stlst, groupFileExt):
g = list(g)
aa = sum([st.insert for st in g])
bb = sum([st.delete for st in g])
print(ext, aa, bb, aa + bb)
if __name__ == "__main__":
main()
| Python | 0.000101 | |
e29a2107cd08e6b40b99e3682d783887107a5e77 | Add a loader to load several yaml files | populous/loader.py | populous/loader.py | import collections
import yaml
def load_yaml(*filenames):
"""
Parse the given files as if they were a single YAML file.
"""
with ChainedFileObject(*filenames) as f:
return yaml.load(f)
class ChainedFileObject(object):
"""
A file-like object behaving like if all the given filenames were a single
file.
Note that you never get content from several files during a single call to
``read``, even if the length of the requested buffer if longer that the
remaining bytes in the current file. You have to call ``read`` again in
order to get content from the next file.
Can be used as a context manager (in a ``with`` statement).
Example::
>>> f = ChainedFileObject('foo.txt', 'bar.txt')
>>> f.read()
"I'm the content of foo.txt"
>>> f.read(1024)
"I'm the content of bar.txt"
>>> f.read()
''
>>> f.close()
"""
def __init__(self, *filenames):
self.filenames = collections.deque(filenames)
self.current = None
self.nextfile()
def __enter__(self):
return self
def __exit__(self, *args):
return self.close()
def nextfile(self):
current = self.current
self.current = None
try:
if current:
current.close()
finally:
try:
self.current = open(self.filenames.popleft())
except IndexError:
self.current = None
def read(self, n=None):
if not self.current:
return ''
output = self.current.read()
if not output:
self.nextfile()
return self.read()
return output
def close(self):
current = self.current
self.current = None
if current:
current.close()
| Python | 0 | |
555cedf76d5f569b8a99691ed7dba672e578bb42 | Add admin integration for positions | positions/admin.py | positions/admin.py | from django.contrib import admin
from .models import Position
class PositionAdminIndex(admin.ModelAdmin):
list_display = ['title', 'date']
list_filter = ['date']
search_fields = ['title', 'content']
admin.site.register(Position, PositionAdminIndex) | Python | 0 | |
9dc39f6492d9ece3964d5cb733cc146acee7cf66 | Create w3_1.py | w3_1.py | w3_1.py | print("hello")
| Python | 0.000482 | |
31fcd83585905ca28245e42163c77af38f0c83cf | Create w3_1.py | w3_1.py | w3_1.py | print("test")
| Python | 0.000482 | |
d35f2d7310c277625ea6e2e15b887ac9620696a7 | Add unit test for glacier vault | tests/unit/glacier/test_vault.py | tests/unit/glacier/test_vault.py | #!/usr/bin/env python
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import unittest
from cStringIO import StringIO
import mock
from mock import ANY
from boto.glacier import vault
class TestVault(unittest.TestCase):
def setUp(self):
self.size_patch = mock.patch('os.path.getsize')
self.getsize = self.size_patch.start()
def tearDown(self):
self.size_patch.stop()
def test_upload_archive_small_file(self):
api = mock.Mock()
v = vault.Vault(api, None)
v.name = 'myvault'
self.getsize.return_value = 1
stringio = StringIO('content')
m = mock.mock_open()
m.return_value.read = stringio.read
api.upload_archive.return_value = {'ArchiveId': 'archive_id'}
with mock.patch('boto.glacier.vault.open', m, create=True):
archive_id = v.upload_archive('filename', 'my description')
self.assertEqual(archive_id, 'archive_id')
api.upload_archive.assert_called_with('myvault', m.return_value, ANY,
ANY, 'my description')
if __name__ == '__main__':
unittest.main()
| Python | 0 | |
d44f12ca4395c0001bbaf0cf0d5436a84484569c | Create fasta2nexus.py | biokit/converters/fasta2nexus.py | biokit/converters/fasta2nexus.py | from Bio import AlignIO
class Fasta2Nexus(object):
"""
"""
def __init__(self, infile, outfile, *args, **kwargs):
"""
"""
self.infile = infile
self.outfile = outfile
def __call__(self):
input_handle = open(self.infile, "rU")
output_handle = open(self.outfile, "w")
alignments = AlignIO.parse(input_handle, "fasta")
AlignIO.write(alignments, output_handle, "nexus")
output_handle.close()
input_handle.close()
| Python | 0.000001 | |
e8cd41a2151e5907aeaac685f5c78300a010ce7e | add sensu plugin to check eventanomaly | plugins/bongo/check-eventanomaly.py | plugins/bongo/check-eventanomaly.py | #!/usr/bin/env python
from optparse import OptionParser
import socket
import sys
import httplib
import json
PASS = 0
FAIL = 1
def get_bongo_host(server, app):
try:
con = httplib.HTTPConnection(server, timeout=45)
con.request("GET","/v2/apps/" + app)
data = con.getresponse()
if data.status >= 300:
print "get_bongo_host: Recieved non-2xx response= %s" % (data.status)
sys.exit(FAIL)
json_data = json.loads(data.read())
host = "%s:%s" % (json_data['app']['tasks'][0]['host'],json_data['app']['tasks'][0]['ports'][0])
con.close()
return host
except Exception, e:
print "get_bongo_host: %s :exception caught" % (e)
sys.exit(FAIL)
def get_status(host, group, time):
try:
con = httplib.HTTPConnection(host,timeout=45)
con.request("GET","/v1/eventdrop/" + group + "/" + time)
data = con.getresponse()
if data.status >= 300:
print "get_status: Recieved non-2xx response= %s" % (data.status)
sys.exit(FAIL)
json_data = json.loads(data.read())
con.close()
print "get_status: %s" % (json_data['msg'])
sys.exit(json_data['status'])
#if json_data['status'] == 1:
# print "get_status: %s" % (json_data['msg'])
# sys.exit(FAIL)
#else:
# print "%s is fine" %group
# sys.exit(PASS)
except Exception, e:
print "get_status: %s :exception caught" % (e)
sys.exit(FAIL)
if __name__=="__main__":
parser = OptionParser()
parser.add_option("-s", dest="server", action="store", default="localhost:8080", help="Marathon Cluster address with port no")
parser.add_option("-a", dest="app", action="store", default="bongo.useast.prod", help="App Id to retrieve the slave address")
parser.add_option("-g", dest="group", action="store", default="pmi", help="The group of event pmi or adevents")
parser.add_option("-t", dest="time", action="store", default="10min", help="The time gap for which the difference is to be calculated")
(options, args) = parser.parse_args()
host = get_bongo_host(options.server, options.app)
get_status(host, options.group, options.time)
| Python | 0 | |
6a95f7aa987994cdd173dc52d5de2754e449ebbb | Add a Python script that controls the user list in my own Twitter lists. | listmanager.py | listmanager.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This script manages your own Twitter lists.
Examples:
You can add users to a list by the "add" command::
$ python listmanager.py add your_screen_name your_list_name user1 [user2 ...]
Likewise, you can also remove users by the "remove" command.
"""
from secret import twitter_instance
from argparse import ArgumentParser
__version__ = '1.0.0'
def configure():
"""Parse the command line parameters.
Returns:
An instance of argparse.ArgumentParser that stores the command line
parameters.
"""
parser = ArgumentParser(description='Twitter List Manager')
parser.add_argument('--version', action='version', version=__version__)
# Positional arguments
parser.add_argument(
'command',
choices=['add', 'remove',],
help='Either "add" or "remove".')
parser.add_argument(
'owner_screen_name',
help='The screen name of the user who owns the list being requested by a slug.')
parser.add_argument(
'slug',
help='The slug of the list.')
parser.add_argument(
'screen_names',
nargs='+',
help='A comma separated list of screen names, up to 100 are allowed in a single request.')
return parser
def main(args):
"""The main function.
Args:
args: An instance of argparse.ArgumentParser parsed in the configure
function.
Returns:
None.
"""
tw = twitter_instance()
# Few commands are available so far.
if args.command == 'add':
tw.lists.members.create_all(
owner_screen_name=args.owner_screen_name,
slug=args.slug,
screen_name=','.join(args.screen_names))
elif args.command == 'remove':
tw.lists.members.destroy_all(
owner_screen_name=args.owner_screen_name,
slug=args.slug,
screen_name=','.join(args.screen_names))
if __name__ == '__main__':
parser = configure()
main(parser.parse_args())
| Python | 0 | |
489c77d3bbd3a9e0e14578f4371870042e2d04d1 | Add another debug script | debug1.py | debug1.py | import logging
import threading
from cornbread.xorg import *
if __name__ == '__main__':
logging.warning('Creating FW')
w = FocusedWindow()
logging.warning('Creating FW thread')
t = threading.Thread(target=FocusedWindowWatcher, args=(w,))
logging.warning('Starting thread')
t.start()
try:
logging.warning('Joining FW thread')
t.join(4)
except KeyboardInterrupt as e:
logging.warning('Keyboard interrupt')
w._exit_watch = True
t.join(4)
| Python | 0.000001 | |
28d409eea4fbcd3846d0146f878529ed3b1c2145 | Create update.py | app/update.py | app/update.py | '''
Update functions for Classes in Models
- Update SCTR - updatingSCTR()
in: array (Adj Close)
out: float (Average SCTR over SCTR_AVERAGE days, EMA50)
- Update Money wave - updatingMoneyWave()
in: array (High, Low, Adj Close, nextMWPrice = False, MW)
out: float (Money Wave)
- Sub func
Update next stock price for a fixed MW - if nextMWPrice = True
out: float (Price)
- Update weekly EMA Long Term (50) vs Sort Term (10) - updatingEMALTvsST()
in: array (Adj Close)
out: Boolean (or array for plot)
- Update CoppockCurve - updatingCoppock()
Not yet implemented!
in: ?
out: Boolean (or array for plot)
- Update plot - updatingPlot()
Not yet implemented!
in:
out:
'''
#import pandas as pd
import numpy as np
import talib as tb
from config import SCTR_AVERAGE
def updatingSCTR(adjClose):
if len(adjClose) > 250:
# -- Long term SCTR --------------------
ema200 = tb.EMA(adjClose, timeperiod=200)
sctrEMA200 = ((adjClose/ema200)-1)
sctrROC125 = tb.ROC(adjClose, timeperiod=125)
longTerm = ((sctrEMA200*0.3) + (sctrROC125*0.3))
# -- Medium term SCTR ------------------
ema50 = tb.EMA(adjClose, timeperiod=50)
sctrEMA50 = ((adjClose/ema50)-1)
sctrROC20 = tb.ROC(adjClose, timeperiod=20)
mediumTerm = ((sctrEMA50*0.15) + (sctrROC20*0.15))
# -- Short term SCTR -------------------
ppo = tb.PPO(adjClose, fastperiod=12, slowperiod=26, matype=1)
ppoEMA = tb.EMA(ppo, timeperiod=9)
ppoHist = ppo - ppoEMA
ppoHistSlope = (ppoHist - np.roll(ppoHist,3))/3
ppoHistSlope[ppoHistSlope > 1] = 1
ppoHistSlope[ppoHistSlope < -1] = -1
rsi14 = tb.RSI(adjClose, timeperiod=14)
shortTerm = (((ppoHistSlope+1)*50)*0.05) + (rsi14*0.05)
sctr = (longTerm + mediumTerm + shortTerm)
return sctr[-1] #*SCTR_AVERAGE):].mean()
# Throw exception?
return None
def updatingMoneyWave(highp, lowp, closep, MW = 20, nextMWPrice = False):
if len(closep) > 10:
slowk, slowd = tb.STOCH(highp, lowp, closep, fastk_period=5, slowk_period=3, slowk_matype=0, slowd_period=1, slowd_matype=0)
if nextMWPrice:
preStoch = ((MW*3) - slowd[-1] - slowd[-2])/100
newPrice = ((max(highp[-4:]) - min(lowp[-4:]))*preStoch)+min(lowp[-4:])
return (slowd[-1], newPrice)
return (slowd[-1])
# Throw exception?
return (None, None)
def updatingEMA50(adjClose):
if len(adjClose) > 60:
ema50 = tb.EMA(adjClose, timeperiod=50)
return adjClose[-1] > ema50[-1]
def updatingEMALTvsST(daily):
if len(daily['Adj Close']) > 300:
weekly = daily.asfreq('W-FRI', method='pad', how='end')
shortTerm = tb.EMA(weekly['Adj Close'].values, timeperiod=10)
longTerm = tb.EMA(weekly['Adj Close'].values, timeperiod=50)
return shortTerm[-1] > longTerm[-1]
# Throw exception
return None
def updatingCoppock():
return True
def updatingPlot():
return True
| Python | 0.000001 | |
b6b65f0ca7253af5325eafc6b19e7cfecda231b3 | Add solution for exercise 2b of hw3 | hw3/hw3_2b.py | hw3/hw3_2b.py | import sympy
x1, x2 = sympy.symbols('x1 x2')
f = 8*x1 + 12*x2 + x1**2 -2*x2**2
df_dx1 = sympy.diff(f,x1)
df_dx2 = sympy.diff(f,x2)
H = sympy.hessian(f, (x1, x2))
xs = sympy.solve([df_dx1, df_dx2], [x1, x2])
H_xs = H.subs([(x1,xs[x1]), (x2,xs[x2])])
lambda_xs = H_xs.eigenvals()
count = 0
for i in lambda_xs.keys():
if i.evalf() <= 0:
count += 1
if count == 0:
print 'Local minima'
elif count == len(lambda_xs.keys()):
print 'Lacal maxima'
else:
print 'Saddle point'
| Python | 0.000071 | |
6daa90f89ec563f2d5b6eaa57c46bc4b06ad1cc0 | Generate shapes using a pca model | detect.py | detect.py | from functools import partial
import numpy as np
import menpo.io as mio
def bbox_overlap_area(a, b):
max_overlap = np.min([a.max(axis=0), b.max(axis=0)], axis=0)
min_overlap = np.max([a.min(axis=0), b.min(axis=0)], axis=0)
overlap_size = max_overlap - min_overlap
if np.any(overlap_size < 0):
return 0
else:
return overlap_size.prod()
def bbox_proportion_overlap(a, b):
overlap = bbox_overlap_area(a, b)
return overlap / bbox_area(a)
def bbox_area(b):
return (b.max(axis=0) - b.min(axis=0)).prod()
def bbox_area_ratio(a, b):
return bbox_area(a) / bbox_area(b)
def bbox_overlap_acceptable(gt, d):
return (bbox_proportion_overlap(gt, d) > 0.5 and
bbox_area_ratio(gt, d) > 0.5)
def load_dlib_detector():
from menpodetect import load_dlib_frontal_face_detector
detector = load_dlib_frontal_face_detector()
return partial(detector, greyscale=False)
detector = load_dlib_detector()
def load_opencv_detector():
from menpodetect import load_opencv_frontal_face_detector
detector = load_opencv_frontal_face_detector()
return partial(detector, greyscale=False)
def load_pico_detector():
from menpodetect import load_pico_frontal_face_detector
detector = load_pico_frontal_face_detector()
return partial(detector, greyscale=False)
def detect_and_check(img, det=None, group=None):
if det is None:
det = detector
gt = img.landmarks[group].lms.bounding_box()
bad_fits = []
for detection in detector(img):
if bbox_overlap_acceptable(gt.points, detection.points):
return detection
return None
def normalize(gt):
from menpo.transform import Translation, NonUniformScale
t = Translation(gt.centre()).pseudoinverse()
s = NonUniformScale(gt.range()).pseudoinverse()
return t.compose_before(s)
def random_instance(pca):
weights = np.random.multivariate_normal(np.zeros_like(pca.eigenvalues),
np.diag(pca.eigenvalues))
return pca.instance(weights)
_DETECTORS = {
'dlib': load_dlib_detector,
'pico': load_pico_detector,
'opencv': load_opencv_detector
}
def synthesize_detection(pca_model, lms):
"""Synthesizes a bounding box for a particular detector.
Args:
pca_model: A menpo PCAModel instance.
im: A menpo image.
Returns:
A
"""
gt_bb = lms.bounding_box()
instance = random_instance(pca_model)
return normalize(gt_bb).pseudoinverse().apply(instance)
def create_generator(shapes, detections):
import menpo.io as mio
from menpo.landmark import LandmarkGroup
from menpo.model import PCAModel
# normalize these to size [1, 1], centred on origin
normed_detections = [
normalize(lms.bounding_box()).apply(det)
for lms, det in zip(shapes, detections)
]
# build a PCA model from good detections
return PCAModel(normed_detections)
def load_n_create_generator(pattern, detector_name,
group=None, overwrite=False):
import menpo.io as mio
from menpo.landmark import LandmarkGroup
from menpo.model import PCAModel
try:
detector = _DETECTORS[detector_name]()
except KeyError:
detector_list = ', '.join(list(_DETECTORS.keys()))
raise ValueError('Valid detector types are: {}'.format(detector_list))
print('Running {} detector on {}'.format(detector_name, pattern))
bboxes = [(img, detect_and_check(img, detector, group=group))
for img in mio.import_images(pattern, normalise=False,
verbose=True)]
# find all the detections that did not fail
detections = filter(lambda x: x[1] is not None, bboxes)
print('Creating a model out of {} detections.'.format(len(detections)))
# normalize these to size [1, 1], centred on origin
normed_detections = [
normalize(im.landmarks[group].lms.bounding_box()).apply(det)
for im, det in detections
]
# build a PCA model from good detections
pca = PCAModel(normed_detections)
mio.export_pickle(pca, '{}_gen.pkl'.format(detector_name), overwrite=overwrite)
if __name__ == '__main__':
path = '/Users/gtrigeo/db/lfpw/trainset/*.png'
create_generator(path, 'dlib', group='PTS')
| Python | 0.999996 | |
8649fef1ddea18525fd0f6c5f8aa42e18b0726f8 | rename plot to visualizer | lib/visualizer.py | lib/visualizer.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
import datetime
import time
from utils import slugify
from scipy.cluster.hierarchy import dendrogram
def create_bar_graph(_x,_y,_title,_disp):
print "Creating bar graph..."
# VARIABLES
bar_color='#CCCCCC'
bar_width=.35
images_path="/home/clemsos/Dev/mitras/out/"
w=len(_x) # width of the canvas
h=len(_y) # height of the canvas
# Create a figure with size 6 _x 6 inches.
fig = plt.figure(figsize=(w,h))
# Create a canvas and add the figure to it.
canvas = FigureCanvas(fig)
# bar plot for volume of
bars = fig.add_subplot(111)
# Display Grid
bars.grid(True,linestyle='-',color='0.75')
# Display Bars
bars.bar(_x, _y, facecolor=bar_color,
align='center', ecolor='black')
# This sets the ticks on the x axis to be exactly where we put the center of the bars.
# bars.set_xticks(_x)
# Create a y label
bars.set_ylabel('Counts')
# Create a title, in italics
bars.set_title(_title,fontstyle='italic')
# Generate the Scatter Plot.
# bars.scatter(_x,_y,s=20,color='tomato');
# Auto-adjust and beautify the labels
fig.autofmt_xdate()
# Save the generated Scatter Plot to a PNG file.
fn=images_path+slugify(_title)
canvas.print_figure(fn,dpi=200)
fig.savefig(fn+".pdf")
print " graph file has been at %s.png"%fn
print " graph file has been at %s.pdf"%fn
# Show us everything
if _disp is True :
plt.show()
def plot_sparcity():
# should use matplotlib spy : http://matplotlib.org/examples/pylab_examples/spy_demos.html
pass
def augmented_dendrogram(*args, **kwargs):
ddata = dendrogram(*args, **kwargs)
if not kwargs.get('no_plot', False):
for i, d in zip(ddata['icoord'], ddata['dcoord']):
x = 0.5 * sum(i[1:3])
y = d[1]
plt.plot(x, y, 'ro')
plt.annotate("%.3g" % y, (x, y), xytext=(0, -8),
textcoords='offset points',
va='top', ha='center')
# VIZ lib
# http://bokeh.pydata.org/ | Python | 0.000874 | |
813e1c44340fae6cb41144878d7afabfcd564f2b | Create matrix.py | src/mathematics/matrix.py | src/mathematics/matrix.py | import random
import time
from functools import reduce
class Matrix(object):
def __init__(self, mat=None):
if mat is None:
self.matrix = []
else:
self.matrix = mat
self.shape = self.get_shape()
def __add__(self, other):
result = []
if self.get_shape() != other.get_shape():
raise IndexError
def _add(list1, list2):
list3 = []
for j in range(len(list1)):
list3 += [list1[j] + list2[j]]
return list3
for i in range(self.shape[0]):
result += [_add(self.matrix[i], other.matrix[i])]
return Matrix(result)
def __mul__(self, other):
def n_mul(mat, n):
result = []
for i in range(mat.get_shape()[0]):
result.append([n * x for x in mat.matrix[i]])
return result
def m_mul(mat1, mat2):
result = []
if mat1.shape[1] != mat2.shape[0]:
raise IndexError
def _mul(list1, list2):
tmp = 0
for _ in range(len(list1)):
tmp += list1[_] * list2[_]
return tmp
temp = []
mat2 = mat2.t()
for i in range(mat1.shape[0]):
for j in range(mat1.shape[1]):
temp += [_mul(mat1.matrix[i], mat2.matrix[j])]
result.append(temp)
temp = []
else:
return result
if isinstance(other, int) or isinstance(other, float):
return Matrix(n_mul(self, other))
else:
return Matrix(m_mul(self, other))
def __sub__(self, other):
other *= -1
return self + other
def t(self): # TODO: speed up transpose function
t_matrix = []
if not self.matrix:
return []
for j in range(self.shape[1]):
t_matrix.append([self.matrix[i][j] for i in range(self.shape[0])])
return Matrix(t_matrix)
def inv(self): # TODO: finish the inv function
pass
@staticmethod
def det(mat):
if mat.shape[0] != mat.shape[1]:
raise IndexError
for j in range(mat.shape[0]):
for i in range(j+1, mat.shape[0]):
k = mat.matrix[i][j]/mat.matrix[j][j]
mat.matrix[i] = list(map(lambda x, y: y - k*x, mat.matrix[j], mat.matrix[i]))
main_diagonal = map(lambda x: mat.get(x, x), [row for row in range(mat.shape[0])])
return reduce(lambda x, y: x*y, main_diagonal)
def get_shape(self):
if self.matrix:
return len(self.matrix), len(self.matrix[0])
else:
return None
def zero(self, row=3, col=3):
for y in range(row):
self.matrix += [[0*x for x in range(col)]]
self.shape = (row, col)
def eye(self, n=2):
self.zero(row=n, col=n)
for i in range(n):
self.matrix[i][i] = 1
self.shape = self.get_shape()
def rand(self, row, col):
random.seed(time.time())
self.zero(row, col)
for i in range(self.get_shape()[0]):
self.matrix[i] = list(map(lambda x: x + random.random(), self.matrix[i]))
self.shape = self.get_shape()
def get(self, row, col):
if row >= self.get_shape()[0] or col >= self.get_shape()[1]:
raise IndexError
return self.matrix[row][col]
def mprint(self):
for i in range(self.get_shape()[0]):
if i == 0:
print('[')
print(self.matrix[i])
elif i == self.get_shape()[0] - 1:
print(self.matrix[i])
print(']')
else:
print(self.matrix[i])
| Python | 0.000006 | |
71b0af732e6d151a22cc0d0b28b55020780af8b6 | Add memoize function for python 2.x | ftools.py | ftools.py | from functools import wraps
def memoize(obj):
# This is taken from the Python Decorator Library on the official Python
# wiki. https://wiki.python.org/moin/PythonDecoratorLibrary#Memoize
# Unfortunately we're using Python 2.x here and lru_cache isn't available
cache = obj.cache = {}
@wraps(obj)
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
| Python | 0.000018 | |
1967db2a9b6e3b4420a1ebc5fe5fe157d61c6314 | Initialise entry and do a proper 404 if it could not be found. | kindlefeed.py | kindlefeed.py | # KindleFeed Controller
# =====================
#
# This file is part of KindleFeed.
#
# KindleFeed is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# KindleFeed is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with KindleFeed. If not, see <http://www.gnu.org/licenses/>.
import feedparser, flask, urllib
app = flask.Flask(__name__)
@app.template_filter('quote_plus')
def urlencode(s):
return urllib.quote_plus(s)
@app.route('/')
def index():
feeds = (('Mashable', 'http://feeds.mashable.com/Mashable'), ('TechCrunch', 'http://feeds.feedburner.com/techcrunch'))
return flask.render_template('index.html', feeds=feeds)
@app.route('/feed')
def feed():
url = flask.request.args.get('url')
feed = feedparser.parse(url)
return flask.render_template('feed.html', url=url, feed=feed)
@app.route('/entry')
def entry():
feed_url = flask.request.args.get('feed')
entry_id = flask.request.args.get('entry')
feed = feedparser.parse(feed_url)
entry = None
for i in feed.entries:
if i.id == entry_id:
entry = i
if entry == None:
flask.abort(404)
return flask.render_template('entry.html', feed_url=feed_url, feed=feed, entry=entry)
def main():
app.debug = True
app.run(host='0.0.0.0', port=80)
if __name__ == '__main__':
main()
| # KindleFeed Controller
# =====================
#
# This file is part of KindleFeed.
#
# KindleFeed is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# KindleFeed is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with KindleFeed. If not, see <http://www.gnu.org/licenses/>.
import feedparser, flask, urllib
app = flask.Flask(__name__)
@app.template_filter('quote_plus')
def urlencode(s):
return urllib.quote_plus(s)
@app.route('/')
def index():
feeds = (('Mashable', 'http://feeds.mashable.com/Mashable'), ('TechCrunch', 'http://feeds.feedburner.com/techcrunch'))
return flask.render_template('index.html', feeds=feeds)
@app.route('/feed')
def feed():
url = flask.request.args.get('url')
feed = feedparser.parse(url)
return flask.render_template('feed.html', url=url, feed=feed)
@app.route('/entry')
def entry():
feed_url = flask.request.args.get('feed')
entry_id = flask.request.args.get('entry')
feed = feedparser.parse(feed_url)
for i in feed.entries:
if i.id == entry_id:
entry = i
return flask.render_template('entry.html', feed_url=feed_url, feed=feed, entry=entry)
def main():
app.debug = True
app.run(host='0.0.0.0', port=80)
if __name__ == '__main__':
main()
| Python | 0 |
4bc2c46e605b7bffb6e7e8206fdb6bb168864c45 | test random user fulfilling the specifications | listRandomUser.py | listRandomUser.py | import random
class list_random:
def __init__(self, n):
self.n=n
self.count=n/2
self.l_tuple=[]
for i in range(n):
for j in range(i+1,n):
self.l_tuple.append([i,j,0])
# 0 no usado
# 1 invalido
# 2 usado
def _valido(self,i,lista):
if self.l_tuple[i][2]==0:
for j in lista:
if (j[0]==self.l_tuple[i][0] or\
j[0]==self.l_tuple[i][1] or\
j[1]==self.l_tuple[i][0] or\
j[1]==self.l_tuple[i][1]):
self.l_tuple[i][2]==1
return False
self.l_tuple[i][2]==2
lista.append((self.l_tuple[i][0],self.l_tuple[i][1]))
return True
return False
def list1(self):
lista=[]
k = self.count
while (k>0):
i = random.randrange(len(self.l_tuple))
if self._valido(i,lista):
pass
else:
last = len(self.l_tuple)-1
for j in range(len(self.l_tuple)):
if self._valido((i+j+1) % len(self.l_tuple),lista):
break
if j == last:
# no encontrada solucion
raise "solucion no encontrada"
k=k-1
print "UNO MENOS", k
return lista
| Python | 0 | |
c2f1717c53042f8ff3a7ba169a2db365aa8bc8ba | ADd gff2togff3.py | gff2togff3.py | gff2togff3.py | """Change attribute string from GFF2 format GGF3 format."""
import csv
import sys
for row in csv.reader(open(sys.argv[1]), delimiter="\t"):
if not row[0].startswith("#"):
row[8] = ";".join(
["%s=%s" % (attribute.split()[0], " ".join(attribute.split()[1:]))
for attribute in row[8].split(" ; ")])
print("\t".join(row))
| Python | 0.001224 | |
ac89ec64ab619bfa778d0961aeaefc8967d971a3 | Add errors.py to move away from Python errors | errors.py | errors.py | # Kimi language interpreter in Python 3
# Anjana Vakil
# http://www.github.com/vakila/kimi
def complain_and_die(message):
print(message)
quit()
def assert_or_complain(assertion, message):
try:
assert assertion
except AssertionError:
complain_and_die(message)
| Python | 0.000001 | |
5cb726d5139537cbe7c03bc5ed540b9cdb7c7e21 | Add bzero simprocedure I have had lying around forever | angr/procedures/posix/bzero.py | angr/procedures/posix/bzero.py | from ..libc import memset
class bzero(memset.memset):
def run(self, addr, size):
return super().run(addr, self.state.solver.BVV(0, self.arch.byte_width), size)
| Python | 0 | |
551f78f32665b1397120ada10036c1d9c09daddc | Create flip-bits.py | lulu/flip-bits.py | lulu/flip-bits.py | class Solution:
"""
@param a, b: Two integer
return: An integer
"""
def bitSwapRequired(self, a, b):
# write your code here
return self.countOnes(a^b)
def countOnes(self, num):
# write your code here
counter = 0
a = 1
for i in range(0, 32):
digit = num & a
if digit != 0:
counter += 1
a *= 2
return counter
| Python | 0.000004 | |
75437fc5607b41763f8c81813ba12dbe1c414c5f | combine the sequence names from various headers and then concatonate the sam entries | iron/utilities/combine_sam.py | iron/utilities/combine_sam.py | #!/usr/bin/python
import sys, argparse, re
def main():
parser = argparse.ArgumentParser(description = 'Combine sam files')
parser.add_argument('sam_files',nargs='+',help='FILENAME for sam files')
args = parser.parse_args()
header = False
seqs = set()
tagorder = []
tagseen = {}
for file in args.sam_files:
with open(file) as inf:
for line in inf:
line = line.rstrip()
f = line.split("\t")
m = re.match('^(@\S\S)\s',line)
if not m or len(f) > 10: break
if m.group(1) == '@SQ':
seqs.add(line)
if m.group(1) not in tagseen:
tagorder.append(m.group(1))
tagseen[m.group(1)] = line
#now print the header
for tag in tagorder:
if tag != '@SQ':
print tagseen[tag]
else:
for seq in sorted(seqs):
print seq
#now go back through and do the sam data
for file in args.samfiles:
with open(file) as inf:
for line in inf:
f = line.rstrip().split("\t")
if len(f) > 10:
print line
main()
| Python | 0.000001 | |
e3c493847ead7352ecad1e92a739a1b79549a70c | Add dodo tape command | dodo_commands/extra/webdev_commands/tape.py | dodo_commands/extra/webdev_commands/tape.py | # noqa
import argparse
from dodo_commands.extra.standard_commands import DodoCommand
class Command(DodoCommand): # noqa
help = ""
decorators = ["docker"]
docker_options = [
'--name=tape',
]
def add_arguments_imp(self, parser): # noqa
parser.add_argument(
'tape_args',
nargs=argparse.REMAINDER
)
def handle_imp(self, tape_args, **kwargs): # noqa
tape_args = tape_args[1:] if tape_args[:1] == ['-'] else tape_args
self.runcmd(
[
self.get_config("/TAPE/tape", "tape"),
self.get_config("/TAPE/glob")
] + tape_args,
cwd=self.get_config("/TAPE/src_dir")
)
| Python | 0.000004 | |
0f13cc95eeeed58c770e60b74a37f99ca24a28f0 | add tests for views | api/tests/test_views.py | api/tests/test_views.py | from django.test import TestCase
from rest_framework.test import APIClient
from rest_framework import status
from django.core.urlresolvers import reverse
class ViewsTestCase(TestCase):
"""Test suite for views."""
def setUp(self):
"""setup variables"""
self.client = APIClient()
def create_file(self, filepath):
"""Create a file for testing."""
f = open(filepath, 'w')
f.write('this is a good file\n')
f.close()
f = open(filepath, 'rb')
return {'_file': f}
def test_file_upload(self):
data = self.create_file('/tmp/file')
response = self.client.post(
reverse('api.upload'), data, format='multipart')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_getting_all_files(self):
response = self.client.get(reverse('file_get'))
def test_getting_specific_file(self):
pass
def test_deleting_a_file(self):
"""Ensure an existing file can be deleted."""
data = self.create_file('/tmp/file')
response = self.client.post(
reverse('api.upload'), data, format='multipart')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# get the file that's just been uploaded
new_file = File.objects.get()
res = self.client.delete(
reverse('api.delete'), kwargs={'pk': new_file.id}, follow=True)
self.assertEqual(res.status_code, status.HTTP_204_NO_CONTENT)
| Python | 0 | |
aba613ddef5e25e057ca515bb017c4a21095936f | Add example to use CRF1d with automatically sorting sequences | examples/pos/postagging_with_auto_transpose.py | examples/pos/postagging_with_auto_transpose.py | import argparse
import collections
import nltk
import numpy
import six
import chainer
from chainer import datasets
import chainer.links as L
from chainer import reporter
from chainer import training
from chainer.training import extensions
class CRF(chainer.Chain):
def __init__(self, n_vocab, n_pos):
super(CRF, self).__init__()
with self.init_scope():
self.feature = L.EmbedID(n_vocab, n_pos)
self.crf = L.CRF1d(n_pos, transpose=True)
def forward(self, xs, ys):
# h[i] is feature vector for each batch of words.
hs = [self.feature(x) for x in xs]
loss = self.crf(hs, ys)
reporter.report({'loss': loss}, self)
# To predict labels, call argmax method.
_, predict = self.crf.argmax(hs)
correct = 0
total = 0
for y, p in six.moves.zip(ys, predict):
# NOTE y is ndarray because
# it does not pass to transpose_sequence
correct += self.xp.sum(y == p)
total += len(y)
reporter.report({'correct': correct}, self)
reporter.report({'total': total}, self)
return loss
def argmax(self, xs):
hs = [self.feature(x) for x in xs]
return self.crf.argmax(hs)
def convert(batch, device):
sentences = [
chainer.dataset.to_device(device, sentence) for sentence, _ in batch]
poses = [chainer.dataset.to_device(device, pos) for _, pos in batch]
return {'xs': sentences, 'ys': poses}
def main():
parser = argparse.ArgumentParser(
description='Chainer example: POS-tagging')
parser.add_argument('--batchsize', '-b', type=int, default=30,
help='Number of images in each mini batch')
parser.add_argument('--epoch', '-e', type=int, default=20,
help='Number of sweeps over the dataset to train')
parser.add_argument('--gpu', '-g', type=int, default=-1,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
parser.add_argument('--resume', '-r', default='',
help='Resume the training from snapshot')
args = parser.parse_args()
vocab = collections.defaultdict(lambda: len(vocab))
pos_vocab = collections.defaultdict(lambda: len(pos_vocab))
# Convert word sequences and pos sequences to integer sequences.
nltk.download('brown')
data = []
for sentence in nltk.corpus.brown.tagged_sents():
xs = numpy.array([vocab[lex] for lex, _ in sentence], numpy.int32)
ys = numpy.array([pos_vocab[pos] for _, pos in sentence], numpy.int32)
data.append((xs, ys))
print('# of sentences: {}'.format(len(data)))
print('# of words: {}'.format(len(vocab)))
print('# of pos: {}'.format(len(pos_vocab)))
model = CRF(len(vocab), len(pos_vocab))
if args.gpu >= 0:
chainer.backends.cuda.get_device_from_id(args.gpu).use()
model.to_gpu(args.gpu)
optimizer = chainer.optimizers.Adam()
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.WeightDecay(0.0001))
test_data, train_data = datasets.split_dataset_random(
data, len(data) // 10, seed=0)
train_iter = chainer.iterators.SerialIterator(train_data, args.batchsize)
test_iter = chainer.iterators.SerialIterator(test_data, args.batchsize,
repeat=False, shuffle=False)
updater = training.updaters.StandardUpdater(
train_iter, optimizer, converter=convert, device=args.gpu)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
evaluator = extensions.Evaluator(
test_iter, model, device=args.gpu, converter=convert)
# Only validate in each 1000 iteration
trainer.extend(evaluator, trigger=(1000, 'iteration'))
trainer.extend(extensions.LogReport(trigger=(100, 'iteration')),
trigger=(100, 'iteration'))
trainer.extend(
extensions.MicroAverage(
'main/correct', 'main/total', 'main/accuracy'))
trainer.extend(
extensions.MicroAverage(
'validation/main/correct', 'validation/main/total',
'validation/main/accuracy'))
trainer.extend(
extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'elapsed_time']),
trigger=(100, 'iteration'))
trainer.extend(extensions.ProgressBar(update_interval=10))
if args.resume:
chainer.serializers.load_npz(args.resume, trainer)
trainer.run()
if __name__ == '__main__':
main()
| Python | 0 | |
27788308891d9cd82da7782d62b5920ea7a54f80 | Add custom command to daily check scores | employees/management/commands/dailycheck.py | employees/management/commands/dailycheck.py | from constance import config
from datetime import datetime
from django.core.management.base import BaseCommand
from django.core.mail import EmailMessage
from django.shortcuts import get_list_or_404
from employees.models import Employee
class Command(BaseCommand):
help = "Update scores daily."
def change_day(self):
employees = get_list_or_404(Employee)
for employee in employees:
employee.yesterday_given = employee.today_given
employee.yesterday_received = employee.today_received
employee.today_given = 0
employee.today_received = 0
employee.save()
def change_month(self):
employees = get_list_or_404(Employee)
for employee in employees:
employee.last_month_given = employee.current_month_given
employee.last_month_score = employee.current_month_score
employee.current_month_given = 0
employee.current_month_score = 0
employee.save()
def change_year(self):
employees = get_list_or_404(Employee)
for employee in employees:
employee.last_year_given = employee.current_year_given
employee.last_year_score = employee.current_year_score
employee.current_year_given = 0
employee.current_year_score = 0
employee.save()
def send_daily_email(self):
subject = config.DAILY_EXECUTION_CONFIRMATION_SUBJECT
message = config.DAILY_EXECUTION_CONFIRMATION_MESSAGE
email = EmailMessage(subject, message, to=[config.DAILY_EXECUTION_CONFIRMATION_EMAIL])
email.send()
def send_blocked_notification_email(self, employee):
subject = config.USER_BLOCKED_NOTIFICATION_SUBJECT
message = config.USER_BLOCKED_NOTIFICATION_MESSAGE % employee.username
email = EmailMessage(subject, message, to=[employee.email])
email.send()
def evaluate_block_users(self):
employees = get_list_or_404(Employee)
for employee in employees:
if employee.yesterday_given > config.MAX_STARS_GIVEN_DAY:
employee.is_blocked = True
if employee.yesterday_received > config.MAX_STARS_RECEIVED_DAY:
employee.is_blocked = True
if employee.current_month_given > config.MAX_STARS_GIVEN_MONTHLY:
employee.is_blocked = True
if employee.current_month_score > config.MAX_STARS_RECEIVED_MONTHLY:
employee.is_blocked = True
employee.save()
try:
if employee.is_blocked:
self.send_blocked_notification_email()
except Exception as e:
print e
def handle(self, *args, **options):
today = datetime.now()
self.change_day()
self.evaluate_block_users()
self.send_daily_email()
if today.day == 1:
self.change_month()
if (today.day == 1 and today.month == 1):
self.change_year()
| Python | 0 | |
8aac73fdc26fd838c3f91ffa9bc58e25777a5179 | Add tests for mach angle | properties/tests/test_mach_angle.py | properties/tests/test_mach_angle.py | #!/usr/bin/env python
"""Test Mach angle functions.
Test data is obtained from http://www.grc.nasa.gov/WWW/k-12/airplane/machang.html.
"""
import nose
import nose.tools as nt
from properties.prandtl_meyer_function import mu_in_deg
@nt.raises(ValueError)
def test_mach_lesser_than_one():
m = 0.1
mu_in_deg(m)
def test_normal_mach():
m1 = 1.5
nt.assert_almost_equal(mu_in_deg(m1), 41.762, places=3)
m2 = 2.6
nt.assert_almost_equal(mu_in_deg(m2), 22.594, places=3)
if __name__ == '__main__':
nose.main() | Python | 0 | |
49dfd690abe794e3b393b8bcac3e0ab1427c41b3 | Define riot_open. | riot/app.py | riot/app.py | # -*- coding: utf-8 -*-
import urwid
def run_tag(tag, *args, **kwargs):
loop = urwid.MainLoop(tag, *args, **kwargs)
loop.run()
def quit_app():
raise urwid.ExitMainLoop()
| Python | 0.000913 | |
04021db907109a5291833eb5ae96c45fb8d1802c | Add flask app mocking the EC API | ckanext/glasgow/tests/mock_ec.py | ckanext/glasgow/tests/mock_ec.py | import uuid
import flask
from werkzeug.exceptions import default_exceptions
from werkzeug.exceptions import HTTPException
def make_json_app(import_name, **kwargs):
"""
Creates a JSON-oriented Flask app.
All error responses that you don't specifically
manage yourself will have application/json content
type, and will contain JSON like this (just an example):
{ "message": "405: Method Not Allowed" }
"""
def make_json_error(ex):
response = flask.jsonify(Message=str(ex))
response.status_code = (ex.code
if isinstance(ex, HTTPException)
else 500)
return response
app = flask.Flask(import_name, **kwargs)
for code in default_exceptions.iterkeys():
app.error_handler_spec[None][code] = make_json_error
return app
app = make_json_app(__name__)
dataset_all_fields = [
'Category',
'Description',
'License',
'MaintainerContact',
'MaintainerName',
'OpennessRating',
'PublishedOnBehalfOf',
'Quality',
'StandardName',
'StandardRating',
'StandardVersion',
'Tags',
'Theme',
'Title',
'UsageGuidance',
]
dataset_mandatory_fields = [
'Title',
'Description',
'MaintainerName',
'MaintainerContact',
'License',
'OpennessRating',
'Quality',
]
dataset_fields_under_255_characters = [
'Title',
'MaintainerName',
'MaintainerContact',
'License',
'Category',
'PublishedOnBehalfOf',
'StandardName',
'StandardVersion',
'Theme',
'UsageGuidance',
]
@app.route('/datasets', methods=['POST'])
def request_dataset_create():
return handle_dataset_request()
@app.route('/datasets', methods=['PUT'])
def request_dataset_update():
return handle_dataset_request()
def handle_dataset_request():
data = flask.request.json
if not data:
response = flask.jsonify(
Message='No data received'
)
response.status_code = 400
return response
# Authorization
if (not 'Authorization' in flask.request.headers or
flask.request.headers['Authorization'] == 'unknown_token'):
response = flask.jsonify(
Message='Not Auhtorized'
)
response.status_code = 401
return response
# Basic Validation
for field in dataset_mandatory_fields:
if not data.get(field):
response = flask.jsonify(
Message='Missing fields',
ModelState={
'model.' + field:
["The {0} field is required.".format(field)]
})
response.status_code = 400
return response
for field in dataset_fields_under_255_characters:
if len(data.get(field, '')) > 255:
response = flask.jsonify(
Message='Field too long',
ModelState={
'model.' + field:
["{0} field must be shorter than 255 characters."
.format(field)]
})
response.status_code = 400
return response
# All good, return a request id
return flask.jsonify(
RequestId=unicode(uuid.uuid4())
)
@app.route('/')
def api_description():
api_desc = {
'Request dataset creation': 'POST /datasets',
'Request dataset update': 'PUT /datasets',
}
return flask.jsonify(**api_desc)
def run(**kwargs):
app.run(**kwargs)
if __name__ == '__main__':
run(port=7070, debug=True)
| Python | 0 | |
f1cc40c716f1e4f598e0a9230cd188fc897ac117 | add config | moon/config.py | moon/config.py | # -*- coding: utf-8 -*-
""" 这里是一些工具, 用来实现简单的项目配置系统 """
import logging
_confdata = {}
def setconf(prjname, confile, confdict={}):
_confdata[prjname] = (confile, confdict)
def exportconf(prjname, globals):
""" 从文件和字典中导出配置
>>> open("/tmp/testmoonconf.py", "w").write("OSOS = 10")
>>> setconf("hongbo", "/tmp/testmoonconf.py", {"OSOSOS": 321})
>>> d = {}
>>> exportconf("hongbo", d)
>>> print d["OSOS"]
10
>>> print d["OSOSOS"]
321
"""
try:
filename, confdict = _confdata[prjname]
except KeyError as e:
e.strerror = "Unable to find confdata for '%s', " \
"you must `setconf` first" % prjname
raise
try:
with open(filename) as config_file:
exec(compile(config_file.read(), filename, "exec"), globals)
logging.info("Load config from %s", filename)
except IOError as e:
e.strerror = 'Unable to load configuration file (%s)' % e.strerror
raise
if confdict:
globals.update(confdict)
if __name__ == "__main__":
import sys, os
sys.path.remove(os.path.abspath(os.path.dirname(__file__)))
import doctest
doctest.testmod()
| Python | 0.000002 | |
106a339561f5b79e0cd9508246d2f8da227c4fdc | move file to folder | move_hmdb51.py | move_hmdb51.py | import argparse
import os
import sys
import math
import cv2
import numpy as np
import multiprocessing
import re
import shutil
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, help="video image list",
default='/media/llj/storage/tvcj/hmdbcnn3_test')
parser.add_argument('--origin_file_dir', type=str, default='/media/llj/storage/hmdb51')
args = parser.parse_args()
txt_files = []
for root, folders, filenames in os.walk(args.data_dir):
for filename in filenames:
txt_files.append(str(filename))
print ' 1 ', txt_files[0]
class_name = os.listdir(args.origin_file_dir)
for name in class_name:
if not os.path.exists(args.data_dir + '/' + name):
os.makedirs(args.data_dir + '/' + name)
for root, folders, filename in os.walk(args.origin_file_dir):
for folder in folders:
folder_dir = os.path.join(root,folder)
avi_files = os.listdir(folder_dir)
#print ' avi 1', avi_files[0]
for txt in txt_files:
if txt[:-4] in str(avi_files):
shutil.move(args.data_dir + '/' + txt , args.data_dir + '/' + folder+'/'+txt)
| Python | 0.000003 | |
6349d8acfd76fc893dfdb6a7c12aebfe9ec1bac9 | add plexpy/Plex.tv | Contents/Libraries/Shared/subzero/lib/auth.py | Contents/Libraries/Shared/subzero/lib/auth.py | # coding=utf-8
# thanks, https://github.com/drzoidberg33/plexpy/blob/master/plexpy/plextv.py
class PlexTV(object):
"""
Plex.tv authentication
"""
def __init__(self, username=None, password=None):
self.protocol = 'HTTPS'
self.username = username
self.password = password
self.ssl_verify = plexpy.CONFIG.VERIFY_SSL_CERT
self.request_handler = http_handler.HTTPHandler(host='plex.tv',
port=443,
token=plexpy.CONFIG.PMS_TOKEN,
ssl_verify=self.ssl_verify)
def get_plex_auth(self, output_format='raw'):
uri = '/users/sign_in.xml'
base64string = base64.encodestring('%s:%s' % (self.username, self.password)).replace('\n', '')
headers = {'Content-Type': 'application/xml; charset=utf-8',
'Content-Length': '0',
'X-Plex-Device-Name': 'PlexPy',
'X-Plex-Product': 'PlexPy',
'X-Plex-Version': 'v0.1 dev',
'X-Plex-Client-Identifier': plexpy.CONFIG.PMS_UUID,
'Authorization': 'Basic %s' % base64string + ":"
}
request = self.request_handler.make_request(uri=uri,
proto=self.protocol,
request_type='POST',
headers=headers,
output_format=output_format)
return request
def get_token(self):
plextv_response = self.get_plex_auth(output_format='xml')
if plextv_response:
xml_head = plextv_response.getElementsByTagName('user')
if not xml_head:
logger.warn("Error parsing XML for Plex.tv token")
return []
auth_token = xml_head[0].getAttribute('authenticationToken')
return auth_token
else:
return [] | Python | 0 | |
d0c2ee2e0d848a586cc03ba5ac5da697b333ef32 | Create list of random num | Misc/listOfRandomNum.py | Misc/listOfRandomNum.py | #List of randoms
import random
import math
numList = []
for i in range(10):
numList.append(random.randrange(1, 20))
for i in numList:
print("Rand num = " + str(i))
| Python | 0.00002 | |
9f508a429949d59f9969cc1e17a9094fa7c2441d | Create routines.py | routines.py | routines.py | Python | 0.000003 | ||
85abbe29c7c764deac75b6e7b95e1ccec645d84b | Add icmp_ping ansible module | ansible-tests/validations/library/icmp_ping.py | ansible-tests/validations/library/icmp_ping.py | #!/usr/bin/env python
DOCUMENTATION = '''
---
module: icmp_ping
short_description: ICMP ping remote hosts
requirements: [ ping ]
description:
- Check host connectivity with ICMP ping.
options:
host:
required: true
description:
- IP address or hostname of host to ping
type: str
author: "Martin Andre (@mandre)"
'''
EXAMPLES = '''
# Ping host:
- icmp: name=somegroup state=present
- hosts: webservers
tasks:
- name: Check Internet connectivity
ping: host="www.ansible.com"
'''
def main():
module = AnsibleModule(
argument_spec = dict(
host = dict(required=True, type='str'),
)
)
failed = False
host = module.params.pop('host')
result = module.run_command('ping -c 1 {}'.format(host))[0]
if result != 0:
failed = True
module.exit_json(failed=failed, changed=False)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| Python | 0 | |
24cf3ca775e8f42fa73217e29d3662a32627f9ea | Use a more reliable method to get the commit SHA for a tag. | buedafab/notify.py | buedafab/notify.py | """Deploy notification hooks for third party services like Campfire and Hoptoad.
"""
from fabric.api import env, require, local
from fabric.decorators import runs_once
import os
from buedafab import utils
@runs_once
def hoptoad_deploy(deployed=False):
"""Notify Hoptoad of the time and commit SHA of an app deploy.
Requires the hoppy Python package and the env keys:
hoptoad_api_key - as it sounds.
deployment_type - app environment
release - the commit SHA or git tag of the deployed version
scm - path to the remote git repository
"""
require('hoptoad_api_key')
require('deployment_type')
require('release')
require('scm')
if deployed and env.hoptoad_api_key:
commit = local('git rev-parse --short %(release)s' % env,
capture=True)
import hoppy.deploy
hoppy.api_key = env.hoptoad_api_key
try:
hoppy.deploy.Deploy().deploy(
env=env.deployment_type,
scm_revision=commit,
scm_repository=env.scm,
local_username=os.getlogin())
except Exception, e:
print ("Couldn't notify Hoptoad of the deploy, but continuing "
"anyway: %s" % e)
else:
print ('Hoptoad notified of deploy of %s@%s to %s environment by %s'
% (env.scm, commit, env.deployment_type, os.getlogin()))
@runs_once
def campfire_notify(deployed=False):
"""Hop in Campfire and notify your developers of the time and commit SHA of
an app deploy.
Requires the pinder Python package and the env keys:
deployment_type - app environment
release - the commit SHA or git tag of the deployed version
scm_http_url - path to an HTTP view of the remote git repository
campfire_subdomain - subdomain of your Campfire account
campfire_token - API token for Campfire
campfire_room - the room to join and notify (the string name, e.g.
"Developers")
"""
require('deployment_type')
require('release')
if (deployed and env.campfire_subdomain and env.campfire_token
and env.campfire_room):
from pinder import Campfire
deploying = local('git rev-list --abbrev-commit %s | head -n 1' %
env.release, capture=True)
branch = utils.branch(env.release)
if env.tagged:
require('release')
branch = env.release
name = env.unit
deployer = os.getlogin()
deployed = env.deployed_version
target = env.deployment_type.lower()
source_repo_url = env.scm_http_url
compare_url = ('%s/compare/%s...%s' % (source_repo_url, deployed,
deploying))
campfire = Campfire(env.campfire_subdomain, env.campfire_token,
ssl=True)
room = campfire.find_room_by_name(env.campfire_room)
room.join()
if deployed:
message = ('%s is deploying %s %s (%s..%s) to %s %s'
% (deployer, name, branch, deployed, deploying, target,
compare_url))
else:
message = ('%s is deploying %s %s to %s' % (deployer, name,
branch, target))
room.speak(message)
print 'Campfire notified that %s' % message
| """Deploy notification hooks for third party services like Campfire and Hoptoad.
"""
from fabric.api import env, require, local
from fabric.decorators import runs_once
import os
from buedafab import utils
@runs_once
def hoptoad_deploy(deployed=False):
"""Notify Hoptoad of the time and commit SHA of an app deploy.
Requires the hoppy Python package and the env keys:
hoptoad_api_key - as it sounds.
deployment_type - app environment
release - the commit SHA or git tag of the deployed version
scm - path to the remote git repository
"""
require('hoptoad_api_key')
require('deployment_type')
require('release')
require('scm')
if deployed and env.hoptoad_api_key:
commit = local('git rev-parse --short %(release)s' % env,
capture=True)
import hoppy.deploy
hoppy.api_key = env.hoptoad_api_key
try:
hoppy.deploy.Deploy().deploy(
env=env.deployment_type,
scm_revision=commit,
scm_repository=env.scm,
local_username=os.getlogin())
except Exception, e:
print ("Couldn't notify Hoptoad of the deploy, but continuing "
"anyway: %s" % e)
else:
print ('Hoptoad notified of deploy of %s@%s to %s environment by %s'
% (env.scm, commit, env.deployment_type, os.getlogin()))
@runs_once
def campfire_notify(deployed=False):
"""Hop in Campfire and notify your developers of the time and commit SHA of
an app deploy.
Requires the pinder Python package and the env keys:
deployment_type - app environment
release - the commit SHA or git tag of the deployed version
scm_http_url - path to an HTTP view of the remote git repository
campfire_subdomain - subdomain of your Campfire account
campfire_token - API token for Campfire
campfire_room - the room to join and notify (the string name, e.g.
"Developers")
"""
require('deployment_type')
require('release')
if (deployed and env.campfire_subdomain and env.campfire_token
and env.campfire_room):
from pinder import Campfire
deploying = local('git rev-parse --short %(release)s' % env,
capture=True)
branch = utils.branch(env.release)
if env.tagged:
require('release')
branch = env.release
name = env.unit
deployer = os.getlogin()
deployed = env.deployed_version
target = env.deployment_type.lower()
source_repo_url = env.scm_http_url
compare_url = ('%s/compare/%s...%s' % (source_repo_url, deployed,
deploying))
campfire = Campfire(env.campfire_subdomain, env.campfire_token,
ssl=True)
room = campfire.find_room_by_name(env.campfire_room)
room.join()
if deployed:
message = ('%s is deploying %s %s (%s..%s) to %s %s'
% (deployer, name, branch, deployed, deploying, target,
compare_url))
else:
message = ('%s is deploying %s %s to %s' % (deployer, name,
branch, target))
room.speak(message)
print 'Campfire notified that %s' % message
| Python | 0 |
d3937b803baf036d5bd96dfcb1e10e51b29bab1e | Create migration | fellowms/migrations/0023_event_ad_status.py | fellowms/migrations/0023_event_ad_status.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-06 13:00
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fellowms', '0022_fellow_user'),
]
operations = [
migrations.AddField(
model_name='event',
name='ad_status',
field=models.CharField(choices=[('U', 'Unprocessed'), ('V', 'Visible'), ('H', 'Hide'), ('A', 'Archived')], default='U', max_length=1),
),
]
| Python | 0.000001 | |
db465ffe58a425b651930eaf1778ef179ed42d2a | Rename res_dict to result and add comment. | redash/query_runner/dynamodb_sql.py | redash/query_runner/dynamodb_sql.py | import json
import logging
import sys
from redash.query_runner import *
from redash.utils import JSONEncoder
logger = logging.getLogger(__name__)
try:
from dql import Engine, FragmentEngine
from pyparsing import ParseException
enabled = True
except ImportError, e:
enabled = False
types_map = {
'UNICODE': TYPE_INTEGER,
'TINYINT': TYPE_INTEGER,
'SMALLINT': TYPE_INTEGER,
'INT': TYPE_INTEGER,
'DOUBLE': TYPE_FLOAT,
'DECIMAL': TYPE_FLOAT,
'FLOAT': TYPE_FLOAT,
'REAL': TYPE_FLOAT,
'BOOLEAN': TYPE_BOOLEAN,
'TIMESTAMP': TYPE_DATETIME,
'DATE': TYPE_DATETIME,
'CHAR': TYPE_STRING,
'STRING': TYPE_STRING,
'VARCHAR': TYPE_STRING
}
class DynamoDBSQL(BaseSQLQueryRunner):
@classmethod
def configuration_schema(cls):
return {
"type": "object",
"properties": {
"region": {
"type": "string",
"default": "us-east-1"
},
"access_key": {
"type": "string",
},
"secret_key": {
"type": "string",
}
},
"required": ["access_key", "secret_key"],
"secret": ["secret_key"]
}
def test_connection(self):
engine = self._connect()
list(engine.connection.list_tables())
@classmethod
def annotate_query(cls):
return False
@classmethod
def type(cls):
return "dynamodb_sql"
@classmethod
def name(cls):
return "DynamoDB (with DQL)"
def __init__(self, configuration):
super(DynamoDBSQL, self).__init__(configuration)
def _connect(self):
engine = FragmentEngine()
config = self.configuration.to_dict()
if not config.get('region'):
config['region'] = 'us-east-1'
if config.get('host') == '':
config['host'] = None
engine.connect(**config)
return engine
def _get_tables(self, schema):
engine = self._connect()
for table in engine.describe_all():
schema[table.name] = {'name': table.name, 'columns': table.attrs.keys()}
def run_query(self, query, user):
engine = None
try:
engine = self._connect()
result = engine.execute(query if str(query).endswith(';') else str(query)+';')
columns = []
rows = []
# When running a count query it returns the value as a string, in which case
# we transform it into a dictionary to be the same as regular queries.
if isinstance(result, basestring):
result = [{"value": result}]
for item in result:
if not columns:
for k, v in item.iteritems():
columns.append({
'name': k,
'friendly_name': k,
'type': types_map.get(str(type(v)).upper(), None)
})
rows.append(item)
data = {'columns': columns, 'rows': rows}
json_data = json.dumps(data, cls=JSONEncoder)
error = None
except ParseException as e:
error = u"Error parsing query at line {} (column {}):\n{}".format(e.lineno, e.column, e.line)
json_data = None
except (SyntaxError, RuntimeError) as e:
error = e.message
json_data = None
except KeyboardInterrupt:
if engine and engine.connection:
engine.connection.cancel()
error = "Query cancelled by user."
json_data = None
except Exception as e:
raise sys.exc_info()[1], None, sys.exc_info()[2]
return json_data, error
register(DynamoDBSQL)
| import json
import logging
import sys
from redash.query_runner import *
from redash.utils import JSONEncoder
logger = logging.getLogger(__name__)
try:
from dql import Engine, FragmentEngine
from pyparsing import ParseException
enabled = True
except ImportError, e:
enabled = False
types_map = {
'UNICODE': TYPE_INTEGER,
'TINYINT': TYPE_INTEGER,
'SMALLINT': TYPE_INTEGER,
'INT': TYPE_INTEGER,
'DOUBLE': TYPE_FLOAT,
'DECIMAL': TYPE_FLOAT,
'FLOAT': TYPE_FLOAT,
'REAL': TYPE_FLOAT,
'BOOLEAN': TYPE_BOOLEAN,
'TIMESTAMP': TYPE_DATETIME,
'DATE': TYPE_DATETIME,
'CHAR': TYPE_STRING,
'STRING': TYPE_STRING,
'VARCHAR': TYPE_STRING
}
class DynamoDBSQL(BaseSQLQueryRunner):
@classmethod
def configuration_schema(cls):
return {
"type": "object",
"properties": {
"region": {
"type": "string",
"default": "us-east-1"
},
"access_key": {
"type": "string",
},
"secret_key": {
"type": "string",
}
},
"required": ["access_key", "secret_key"],
"secret": ["secret_key"]
}
def test_connection(self):
engine = self._connect()
list(engine.connection.list_tables())
@classmethod
def annotate_query(cls):
return False
@classmethod
def type(cls):
return "dynamodb_sql"
@classmethod
def name(cls):
return "DynamoDB (with DQL)"
def __init__(self, configuration):
super(DynamoDBSQL, self).__init__(configuration)
def _connect(self):
engine = FragmentEngine()
config = self.configuration.to_dict()
if not config.get('region'):
config['region'] = 'us-east-1'
if config.get('host') == '':
config['host'] = None
engine.connect(**config)
return engine
def _get_tables(self, schema):
engine = self._connect()
for table in engine.describe_all():
schema[table.name] = {'name': table.name, 'columns': table.attrs.keys()}
def run_query(self, query, user):
engine = None
try:
engine = self._connect()
res_dict = engine.execute(query if str(query).endswith(';') else str(query)+';')
columns = []
rows = []
if isinstance(result, basestring):
result = [{"value": result}]
for item in res_dict:
if not columns:
for k, v in item.iteritems():
columns.append({
'name': k,
'friendly_name': k,
'type': types_map.get(str(type(v)).upper(), None)
})
rows.append(item)
data = {'columns': columns, 'rows': rows}
json_data = json.dumps(data, cls=JSONEncoder)
error = None
except ParseException as e:
error = u"Error parsing query at line {} (column {}):\n{}".format(e.lineno, e.column, e.line)
json_data = None
except (SyntaxError, RuntimeError) as e:
error = e.message
json_data = None
except KeyboardInterrupt:
if engine and engine.connection:
engine.connection.cancel()
error = "Query cancelled by user."
json_data = None
except Exception as e:
raise sys.exc_info()[1], None, sys.exc_info()[2]
return json_data, error
register(DynamoDBSQL)
| Python | 0 |
101d6bc6d62e7e08d7c6867a77e58b0b168afc17 | Add migrations to remove all the models | pinax/stripe/migrations/0015_auto_20190120_1239.py | pinax/stripe/migrations/0015_auto_20190120_1239.py | # Generated by Django 2.1.5 on 2019-01-20 18:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pinax_stripe', '0014_auto_20180413_1959'),
]
operations = [
migrations.RemoveField(
model_name='account',
name='user',
),
migrations.RemoveField(
model_name='bankaccount',
name='account',
),
migrations.RemoveField(
model_name='bitcoinreceiver',
name='customer',
),
migrations.RemoveField(
model_name='card',
name='customer',
),
migrations.RemoveField(
model_name='charge',
name='customer',
),
migrations.RemoveField(
model_name='charge',
name='invoice',
),
migrations.DeleteModel(
name='Coupon',
),
migrations.RemoveField(
model_name='customer',
name='stripe_account',
),
migrations.RemoveField(
model_name='customer',
name='user',
),
migrations.RemoveField(
model_name='customer',
name='users',
),
migrations.RemoveField(
model_name='invoice',
name='charge',
),
migrations.RemoveField(
model_name='invoice',
name='customer',
),
migrations.RemoveField(
model_name='invoice',
name='subscription',
),
migrations.RemoveField(
model_name='invoiceitem',
name='invoice',
),
migrations.RemoveField(
model_name='invoiceitem',
name='plan',
),
migrations.RemoveField(
model_name='invoiceitem',
name='subscription',
),
migrations.AlterUniqueTogether(
name='plan',
unique_together=set(),
),
migrations.RemoveField(
model_name='plan',
name='stripe_account',
),
migrations.RemoveField(
model_name='subscription',
name='customer',
),
migrations.RemoveField(
model_name='subscription',
name='plan',
),
migrations.RemoveField(
model_name='transfer',
name='event',
),
migrations.RemoveField(
model_name='transfer',
name='stripe_account',
),
migrations.RemoveField(
model_name='transferchargefee',
name='transfer',
),
migrations.AlterUniqueTogether(
name='useraccount',
unique_together=set(),
),
migrations.RemoveField(
model_name='useraccount',
name='account',
),
migrations.RemoveField(
model_name='useraccount',
name='customer',
),
migrations.RemoveField(
model_name='useraccount',
name='user',
),
migrations.RemoveField(
model_name='event',
name='customer',
),
migrations.RemoveField(
model_name='event',
name='stripe_account',
),
migrations.AddField(
model_name='event',
name='account_id',
field=models.CharField(blank=True, max_length=200),
),
migrations.AddField(
model_name='event',
name='customer_id',
field=models.CharField(blank=True, max_length=200),
),
migrations.DeleteModel(
name='Account',
),
migrations.DeleteModel(
name='BankAccount',
),
migrations.DeleteModel(
name='BitcoinReceiver',
),
migrations.DeleteModel(
name='Card',
),
migrations.DeleteModel(
name='Charge',
),
migrations.DeleteModel(
name='Customer',
),
migrations.DeleteModel(
name='Invoice',
),
migrations.DeleteModel(
name='InvoiceItem',
),
migrations.DeleteModel(
name='Plan',
),
migrations.DeleteModel(
name='Subscription',
),
migrations.DeleteModel(
name='Transfer',
),
migrations.DeleteModel(
name='TransferChargeFee',
),
migrations.DeleteModel(
name='UserAccount',
),
]
| Python | 0 | |
0781070ee0c17a34a3cc9521e8a6b67c401aa692 | Add WGAN Tests | models/wgan_test.py | models/wgan_test.py | # Lint as: python3
"""Tests for WGAN model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import os
import wgan
class SpectralTest(tf.test.TestCase):
def test_interpolation_2d(self):
x1 = np.random.normal(size=(10, 256))
x2 = np.random.normal(size=(10, 256))
interpolation = wgan._get_interpolation(x1, x2)
self.assertShapeEqual(x1, interpolation)
def test_interpolation_3d(self):
x1 = np.random.normal(size=(10, 256, 32))
x2 = np.random.normal(size=(10, 256, 32))
interpolation = wgan._get_interpolation(x1, x2)
self.assertShapeEqual(x1, interpolation)
if __name__ == '__main__':
os.environ["CUDA_VISIBLE_DEVICES"] = ''
tf.test.main()
| Python | 0 | |
e90d12802ff62738cbe4094e8db079f6519f47a5 | Create BDayGift.py | Probability/BDayGift.py | Probability/BDayGift.py | import sys;
n = int(sys.stdin.readline());
S = 0
for i in range(n):
S += int(sys.stdin.readline());
print(S/2.0);
| Python | 0 | |
45f91a92fd3ae08dd7403707f3981f306122eb6c | test task creation | freelancefinder/remotes/tests/test_tasks.py | freelancefinder/remotes/tests/test_tasks.py | """Tests related to the remotes.tasks functions."""
from django_celery_beat.models import IntervalSchedule, PeriodicTask
from ..tasks import setup_periodic_tasks
def test_make_tasks():
"""Ensure that setup makes some tasks/schedules."""
setup_periodic_tasks(None)
intervals = IntervalSchedule.objects.all().count()
tasks = PeriodicTask.objects.all().count()
assert intervals > 0
assert tasks > 0
| Python | 0.000322 | |
9edce7cb1704aa1d06b74b661725d54b465e54da | Add SQLALCHEMY_ECHO support in heroku.py (when debugging) | heroku.py | heroku.py | #!/usr/bin/env python
from evesrp import create_app
from evesrp.killmail import CRESTMail, ZKillmail
from evesrp.transformers import ShipTransformer, PilotTransformer
from evesrp.auth.testauth import TestAuth
from evesrp.auth.bravecore import BraveCore
from os import environ as env
from binascii import unhexlify
from ecdsa import SigningKey, VerifyingKey, NIST256p
from hashlib import sha256
class TestZKillboard(ZKillmail):
def __init__(self, *args, **kwargs):
super(TestZKillboard, self).__init__(*args, **kwargs)
if self.domain not in ('zkb.pleaseignore.com', 'kb.pleaseignore.com'):
raise ValueError("This killmail is from the wrong killboard")
@property
def value(self):
return 0
def hex2key(hex_key):
key_bytes = unhexlify(hex_key)
if len(hex_key) == 64:
return SigningKey.from_string(key_bytes, curve=NIST256p,
hashfunc=sha256)
elif len(hex_key) == 128:
return VerifyingKey.from_string(key_bytes, curve=NIST256p,
hashfunc=sha256)
else:
raise ValueError("Key in hex form is of the wrong length.")
def configure_app(app, config):
app.config['USER_AGENT_EMAIL'] = 'paxswill@paxswill.com'
app.config['SQLALCHEMY_DATABASE_URI'] = config.get('DATABASE_URL',
'sqlite:///')
app.config['AUTH_METHODS'] = [TestAuth(admins=['paxswill',]), ]
app.config['KILLMAIL_SOURCES'] = [
EveWikiZKillmail,
EveWikiCRESTMail
]
# Configure Brave Core if all the needed things are there
try:
core_private_key = hex2key(config['CORE_AUTH_PRIVATE_KEY'])
core_public_key = hex2key(config['CORE_AUTH_PUBLIC_KEY'])
core_identifier = config['CORE_AUTH_IDENTIFIER']
except KeyError:
pass
else:
app.config['AUTH_METHODS'].append(BraveCore(core_private_key,
core_public_key, core_identifier))
if config.get('DEBUG') is not None:
app.debug = True
if config.get('SQLALCHEMY_ECHO') is not None:
app.config['SQLALCHEMY_ECHO'] = True
secret_key = config.get('SECRET_KEY')
if secret_key is not None:
app.config['SECRET_KEY'] = unhexlify(secret_key)
app = create_app()
configure_app(app, env)
if __name__ == '__main__':
# So we get the database tables for these
from evesrp.auth.testauth import TestUser, TestGroup
from evesrp.auth.bravecore import CoreUser, CoreGroup
print("Creating databases...")
app.extensions['sqlalchemy'].db.create_all(app=app)
| #!/usr/bin/env python
from evesrp import create_app
from evesrp.killmail import CRESTMail, ZKillmail
from evesrp.transformers import ShipTransformer, PilotTransformer
from evesrp.auth.testauth import TestAuth
from evesrp.auth.bravecore import BraveCore
from os import environ as env
from binascii import unhexlify
from ecdsa import SigningKey, VerifyingKey, NIST256p
from hashlib import sha256
class TestZKillboard(ZKillmail):
def __init__(self, *args, **kwargs):
super(TestZKillboard, self).__init__(*args, **kwargs)
if self.domain not in ('zkb.pleaseignore.com', 'kb.pleaseignore.com'):
raise ValueError("This killmail is from the wrong killboard")
@property
def value(self):
return 0
def hex2key(hex_key):
key_bytes = unhexlify(hex_key)
if len(hex_key) == 64:
return SigningKey.from_string(key_bytes, curve=NIST256p,
hashfunc=sha256)
elif len(hex_key) == 128:
return VerifyingKey.from_string(key_bytes, curve=NIST256p,
hashfunc=sha256)
else:
raise ValueError("Key in hex form is of the wrong length.")
def configure_app(app, config):
app.config['USER_AGENT_EMAIL'] = 'paxswill@paxswill.com'
app.config['SQLALCHEMY_DATABASE_URI'] = config.get('DATABASE_URL',
'sqlite:///')
app.config['AUTH_METHODS'] = [TestAuth(admins=['paxswill',]), ]
app.config['KILLMAIL_SOURCES'] = [
EveWikiZKillmail,
EveWikiCRESTMail
]
# Configure Brave Core if all the needed things are there
try:
core_private_key = hex2key(config['CORE_AUTH_PRIVATE_KEY'])
core_public_key = hex2key(config['CORE_AUTH_PUBLIC_KEY'])
core_identifier = config['CORE_AUTH_IDENTIFIER']
except KeyError:
pass
else:
app.config['AUTH_METHODS'].append(BraveCore(core_private_key,
core_public_key, core_identifier))
if config.get('DEBUG') is not None:
app.debug = True
secret_key = config.get('SECRET_KEY')
if secret_key is not None:
app.config['SECRET_KEY'] = unhexlify(secret_key)
app = create_app()
configure_app(app, env)
if __name__ == '__main__':
# So we get the database tables for these
from evesrp.auth.testauth import TestUser, TestGroup
from evesrp.auth.bravecore import CoreUser, CoreGroup
print("Creating databases...")
app.extensions['sqlalchemy'].db.create_all(app=app)
| Python | 0 |
cd3f59026b9026d62537b38d4e9d70a740e88018 | Add tests for java mode | tests/test_java_mode.py | tests/test_java_mode.py | import editor_manager
import editor_common
import curses
import curses.ascii
import keytab
from ped_test_util import read_str,validate_screen,editor_test_suite,play_macro,screen_size,match_attr
def test_java_mode(testdir,capsys):
with capsys.disabled():
def main(stdscr):
lines_to_test = [
'// This is a simple Java program.',
'// FileName : "HelloWorld.java"',
'class HelloWorld',
'{',
' // Your program begins with a call to main()',
' // Prints "Hello, World" to the terminal window',
' public static void main(String args[])',
' {',
' System.out.println("Hello, World");',
' }',
'}'
]
args = { "java_test":"\n".join(lines_to_test)}
testfile = testdir.makefile(".java", **args)
green = curses.color_pair(1)
red = curses.color_pair(2)
cyan = curses.color_pair(3)
white = curses.color_pair(4)
ed = editor_common.Editor(stdscr,None,str(testfile))
ed.setWin(stdscr.subwin(ed.max_y,ed.max_x,0,0))
ed.main(False)
ed.main(False)
validate_screen(ed)
assert(ed.mode and ed.mode.name() == "java_mode")
match_list = [(0,0,32,red),(2,0,5,cyan),(4,4,44,red),(8,27,14,green)]
for line,pos,width,attr in match_list:
assert(match_attr(ed.scr,line+1,pos,1,width,attr))
ed.goto(7,5)
ed.endln()
ed.main(False,10)
assert(ed.getLine() == 8 and ed.getPos() == 4)
ed.insert('if (20 > 18) {')
ed.main(False,10)
ed.insert('System.out.println("20 greater than 18");')
ed.main(False,10)
ed.insert('}')
ed.main(False,10)
ed.main(False)
ed.main(False)
assert(match_attr(ed.scr,9,4,1,2,cyan))
assert(match_attr(ed.scr,10,27,1,20,green))
assert(ed.getLine() == 11 and ed.getPos() == 4)
curses.wrapper(main)
| Python | 0.000001 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.