commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13 values | lang stringclasses 23 values |
|---|---|---|---|---|---|---|---|---|
497f1c70d0ecedb904f5b71be494e01246d874f6 | Add weight test | Net-ng/kansha,bcroq/kansha,bcroq/kansha,Net-ng/kansha,bcroq/kansha,bcroq/kansha,Net-ng/kansha,Net-ng/kansha | kansha/card_addons/weight/tests.py | kansha/card_addons/weight/tests.py | # -*- coding:utf-8 -*-
#--
# Copyright (c) 2012-2014 Net-ng.
# All rights reserved.
#
# This software is licensed under the BSD License, as described in
# the file LICENSE.txt, which you should have received as part of
# this distribution.
#--
from kansha.cardextension.tests import CardExtensionTestCase
from .comp import CardWeightEditor
class CardWeightTest(CardExtensionTestCase):
def create_instance(self, card, action_log):
return CardWeightEditor(card, action_log)
def test_copy(self):
self.extension.weight(u'25')
self.extension.commit()
self.assertEqual(self.extension.get_data().weight, u'25')
cpy = self.extension.copy(self.card_copy, {})
self.assertEqual(self.extension.weight(), cpy.weight())
| bsd-3-clause | Python | |
6aed81e89e321f45ba2ff95bfb0c78504c0bf79e | add setup_database script (tests) using scripts/import_osm.sh (in progress) | iBis-project/server-python | tests/setup_database.py | tests/setup_database.py | #!/usr/bin/env python
import argparse
import subprocess
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-l", "--osm_url", required=True, help="OSM download URL (*.osm.bz2)", type=str)
parser.add_argument("-p", "--user", required=True, help="PostgreSQL database password", type=str)
parser.add_argument("-u", "--password", required=True, help="PostgreSQL database user", type=str,
choices=["postgres"])
args = parser.parse_args()
print 'Importing OSM data ...'
if subprocess.call(['scripts/import_osm.sh', args.osm_url, args.user, args.password]) == 0:
print '... Success!'
else:
print '... Failed.'
return 1
print 'Creating iBis tables ...'
# TODO
print '... Success!'
if __name__ == '__main__':
main()
| agpl-3.0 | Python | |
783b04ad8da2b65d9a07a0bdd4f236273f9ad39d | Create test.py | shivarajnesargi/BotOrNot | ProjectMidway/test.py | ProjectMidway/test.py | mit | Python | ||
7383343f7fb77c74455a50490ad2886fcf36bbd5 | Comment test for the moment | Widukind/dlstats,mmalter/dlstats,mmalter/dlstats,MichelJuillard/dlstats,MichelJuillard/dlstats,MichelJuillard/dlstats,mmalter/dlstats,Widukind/dlstats | dlstats/fetchers/test_ecb.py | dlstats/fetchers/test_ecb.py | import unittest
import mongomock
import ulstats
from dlstats.fetchers._skeleton import (Skeleton, Category, Series, BulkSeries,
Dataset, Provider)
import datetime
from bson import ObjectId
#class CategoriesTestCase(unittest.TestCase):
#if __name__ == '__main__':
# unittest.main()
| agpl-3.0 | Python | |
e446ab24ba981b22bf84ae2e09a8ba62cf17528e | Create batch_download.py | kevinkid135/Horriblesubs-Batch-Download | batch_download.py | batch_download.py | import time #used to pause script
import os #library used to open magnet link
from selenium import webdriver #use selenium
#global variables
driverLocation = "C:/Users/Kevin/Downloads/Browsers/chromedriver.exe"
url = "http://horriblesubs.info/shows/shigatsu-wa-kimi-no-uso/"
quality = "1080p"
download_format = "Magnet"
browser = webdriver.Chrome(driverLocation)
browser.get(url)
#time.sleep(1) # Let the user actually see something!
links = browser.find_elements_by_link_text(quality)
length = len(links)
print (str(length) + " episodes found")
counter = 1
for link in links:
print("Clicking '{}': {}/{}".format(quality, counter, length))
link.click()
counter+=1
time.sleep(0.1) #delay of animation
#break # temp
print
links = browser.find_elements_by_link_text(download_format)
counter = 1
for link in links:
print("Clicking '{}': {}/{}".format(download_format, counter, length))
os.startfile(link.get_attribute("href"))
counter+=1
time.sleep(0.1) #not too fast...
#break # temporary, only do first iteration
#time.sleep(5) # Let the user actually see something!
browser.quit()
| mit | Python | |
fa0886bdeab19cb326a3e751dff1c46fb7911228 | Apply migration 1160 again | alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api | migrations/versions/1180_set_framework_datetimes_not_nullable_again.py | migrations/versions/1180_set_framework_datetimes_not_nullable_again.py | """Remove deprecated application_close_date field and set the remaining date fields to non-nullable.
Revision ID: 1180
Revises: 1170
Create Date: 2018-05-08 09:53:43.699711
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from sqlalchemy.sql import table, column, and_
# revision identifiers, used by Alembic.
revision = '1180'
down_revision = '1170'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('frameworks', 'applications_close_at_utc',
existing_type=postgresql.TIMESTAMP(),
nullable=False)
op.alter_column('frameworks', 'clarifications_close_at_utc',
existing_type=postgresql.TIMESTAMP(),
nullable=False)
op.alter_column('frameworks', 'clarifications_publish_at_utc',
existing_type=postgresql.TIMESTAMP(),
nullable=False)
op.alter_column('frameworks', 'framework_expires_at_utc',
existing_type=postgresql.TIMESTAMP(),
nullable=False)
op.alter_column('frameworks', 'framework_live_at_utc',
existing_type=postgresql.TIMESTAMP(),
nullable=False)
op.alter_column('frameworks', 'intention_to_award_at_utc',
existing_type=postgresql.TIMESTAMP(),
nullable=False)
op.drop_column('frameworks', 'application_close_date')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('frameworks',
sa.Column('application_close_date', postgresql.TIMESTAMP(), autoincrement=False, nullable=True))
op.alter_column('frameworks', 'intention_to_award_at_utc',
existing_type=postgresql.TIMESTAMP(),
nullable=True)
op.alter_column('frameworks', 'framework_live_at_utc',
existing_type=postgresql.TIMESTAMP(),
nullable=True)
op.alter_column('frameworks', 'framework_expires_at_utc',
existing_type=postgresql.TIMESTAMP(),
nullable=True)
op.alter_column('frameworks', 'clarifications_publish_at_utc',
existing_type=postgresql.TIMESTAMP(),
nullable=True)
op.alter_column('frameworks', 'clarifications_close_at_utc',
existing_type=postgresql.TIMESTAMP(),
nullable=True)
op.alter_column('frameworks', 'applications_close_at_utc',
existing_type=postgresql.TIMESTAMP(),
nullable=True)
# ### end Alembic commands ###
| mit | Python | |
9167643047c61bae50a7c73775631c7bfe434cc9 | Add a new wrapper class for managing ansible static inventory. | bdastur/spam,bdastur/spam | spam/ansiInventory.py | spam/ansiInventory.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
AnsibleInventory:
INTRO:
USAGE:
"""
import os
import ansible.inventory
class AnsibleInventory(object):
'''
Ansible Inventory wrapper class.
'''
def __init__(self, inventory_filename):
'''
Initialize Inventory
'''
if not os.path.exists(inventory_filename):
print "Provide a valid inventory filename"
return
self.inventory = ansible.inventory.InventoryParser(inventory_filename)
def get_hosts(self, group=None):
'''
Get the hosts
'''
if not group:
return self.inventory.hosts.keys()
groupobj = self.inventory.groups.get(group, None)
if not groupobj:
return None
hostobjs = groupobj.get_hosts()
hostlist = []
for host in hostobjs:
hostlist.append(host.name)
return hostlist
| apache-2.0 | Python | |
77eecb7a809a7b4f56d70e6d7e09deb2c7e0188b | add template engine | zenanhu/pluto,zenanhu/pluto,zenanhu/pluto,zenanhu/pluto | template-engine/code/templite.py | template-engine/code/templite.py | #!/usr/bin/env python
# coding: utf-8
class CodeBuilder(object):
INDENT_STEP = 4
def __init__(self, indent=0):
self.code = []
self.indent_level = indent
def add_line(self, line):
self.code.extend([" " * self.indent_level, line, "\n"])
def indent(self):
self.indent_level += self.INDENT_STEP
def dedent(self):
self.indent_level -= self.INDENT_STEP
def add_section(self):
section = CodeBuilder(self.indent_level)
self.code.append(section)
return section
def __str__(self):
return "".join(str(c) for c in self.code)
def get_globals(self):
assert self.indent_level == 0
python_source = str(self)
global_namespace = {}
exec(python_source, global_namespace)
return global_namespace
class Templite(object):
def __init__(self, text, *contexts):
self.context = {}
for context in contexts:
self.context.update(context)
self.all_vars = set()
self.loop_vars = set()
code = Codebuilder()
code.add_line("def render_function(context, do_dots):")
code.indent()
vars_code = code.add_section()
code.add_line("result = []")
code.add_line("append_result = result.append")
code.add_line("extend_result = result.extend")
code.add_line("to_str = str")
| apache-2.0 | Python | |
c8f504c52f9e981b3974f4be1581da890021473a | add new collector for cassandra cfstats | MediaMath/Diamond,MediaMath/Diamond,MediaMath/Diamond,MediaMath/Diamond | src/collectors/mmcassandra/mmcassandra.py | src/collectors/mmcassandra/mmcassandra.py | import subprocess, socket, math
import diamond.collector
def parse_line(line):
metric_name, rhs = line.strip().split(':', 1)
rhs = rhs.strip()
if ' ' in rhs:
str_value, units = rhs.split(' ', 1)
if units not in ('ms', 'ms.'):
raise ValueError("Cannot parse " + repr(line))
else:
str_value = rhs
try:
value = float(str_value)
except:
value = str_value
return metric_name, value
class Keyspace(object):
def __init__(self, name, stats, tables):
self.name = name
self.stats = stats
self.tables = tables
class Table(object):
def __init__(self, name, stats):
self.name = name
self.stats = stats
def cfstats():
output = subprocess.check_output(['nodetool', 'cfstats'])
lines = [line for line in output.splitlines()
if line and (line != '----------------')]
# cfstats output is structured in a very specific way: all lines are
# key: value pairs prefixed by tabs. everything indented belongs to the
keyspaces = []
for line in lines:
tab_count = len(line) - len(line.lstrip('\t'))
if tab_count == 0:
key, value = parse_line(line)
assert key == 'Keyspace'
keyspaces.append(Keyspace(value, [], []))
elif tab_count == 1:
key, value = parse_line(line)
if not math.isnan(value):
keyspaces[-1].stats.append(parse_line(line))
elif tab_count == 2:
key, value = parse_line(line)
if key == 'Table':
keyspaces[-1].tables.append(Table(value, []))
else:
if not math.isnan(value):
keyspaces[-1].tables[-1].stats.append((key, value))
else:
raise ValueError
return keyspaces
bad_keyspaces = ('system', 'system_traces')
class ColumnFamilyStatsCollector(diamond.collector.Collector):
def collect(self):
for keyspace in cfstats():
if keyspace.name not in bad_keyspaces:
for (key, value) in keyspace.stats:
name = 'cassandra.cfstats.{}.{}'.format(
keyspace.name, key)
self.publish(name, value)
for table in keyspace.tables:
for (key, value) in table.stats:
name = 'cassandra.cfstats.{}.{}.{}'.format(
keyspace.name, table.name, key)
self.publish(name, value)
| mit | Python | |
ebd15d9bcf5a46417af7f3d46769716c4d12b793 | rename pre_push hook | kriskavalieri/nodejs-docker-boilerplate,kriskavalieri/nodejs-docker-boilerplate | pre_push.py | pre_push.py | #!/usr/bin/env python
import atexit
import glob
import os
import re
import subprocess
import sys
stable_branch_re = re.compile(r'master|stable|prod|production')
def chain_cmds(cmds, stdin=None):
for cmd in cmds:
p = subprocess.Popen(cmd, stdin=stdin, stdout=subprocess.PIPE)
stdin = p.stdout
return p.stdout.read().strip().decode()
def get_current_branch():
branch_cmd = "git rev-parse HEAD | git branch -a --contains | grep remotes | sed s/.*remotes.origin.//"
return os.environ.get(
"GIT_BRANCH",
chain_cmds(
[pipe_cmd.split(" ") for pipe_cmd in branch_cmd.split(" | ")]
)
)
def set_mode_prefixes():
cov_34_mode = ''
current_branch = get_current_branch()
if not stable_branch_re.search(current_branch):
prefix = "* On branch {} which is likely not production-bound, so executing low-threshold checks...".format(current_branch)
cov_34_mode = "_light"
else:
prefix = "* Push to production branch detected, executing full-scale checks..."
print(prefix + "\n* Running pre-flight checks, please hold...")
return cov_34_mode
def run_checks(GIT_ROOT):
test_dir = glob.glob(GIT_ROOT + "/test*")
assert test_dir and os.path.isdir(test_dir[0]), \
"Package's test directory not found"
COV_SUMMARY = os.path.join(GIT_ROOT, ".cov_temp/coverage-summary")
LINT_OUT = os.path.join(GIT_ROOT, ".lint_out")
os.chdir(GIT_ROOT)
os.system("bash {}/run_checks_in_docker.sh".format(GIT_ROOT))
out = ""
with open(COV_SUMMARY) as cov_summary, open(LINT_OUT) as lint:
# on account of insignificantly low no of lines of .lint_out,
# let's just read the whole thing at once
if os.path.getsize(LINT_OUT):
# file not empty, errors/warnings present, interrupt and print
out += "\n* Lint returned some errors/warnings: \n{}\n\n".format(lint.read().strip())
total_covs = cov_summary.read()
if total_covs:
out += "* Istanbul complained about too low coverage rates: \n{}".format(total_covs)
if out:
print(out)
sys.exit(1)
print("* All checked out good")
if __name__ == "__main__":
cov_34_mode = set_mode_prefixes()
HOOKS_PATH = os.path.dirname(os.path.abspath(__file__))
GIT_ROOT = os.path.join(HOOKS_PATH, "../..")
COV_34_CONFIG = os.path.join(GIT_ROOT, ".istanbul", ".istanbul-config.yml" + cov_34_mode)
os.system("cp {} ../.istanbul-config.yml".format(COV_34_CONFIG))
@atexit.register
def restore_default_34_config():
os.system("cp {} ../.istanbul-config.yml".format(
COV_34_CONFIG.replace(cov_34_mode, "")))
run_checks(GIT_ROOT)
| mit | Python | |
a123b42eb3aed078aea26109056cf786aec2664a | add link_flair.py for interacting with link flair on submissions | rhots/automation | bin/link_flair.py | bin/link_flair.py | import argparse
import praw
def main():
parser = argparse.ArgumentParser(description='Get or set link flair')
parser.add_argument('action', choices=['get', 'set'], help='get or set')
parser.add_argument('id', help='id of the submission')
parser.add_argument('--text', help='link flair text to set')
parser.add_argument('--class', help='link flair CSS class to set')
args = vars(parser.parse_args())
reddit = praw.Reddit('moderation')
submission = reddit.submission(args['id'])
if args['action'] == 'get':
print('Flair text: {0}'.format(submission.link_flair_text))
print('Flair class: {0}'.format(submission.link_flair_css_class))
elif args['action'] == 'set':
submission.mod.flair(args['text'], args['class'])
print('Link flair set')
main()
| isc | Python | |
495e9680ae7c1b9c1071c9f840df7881f5d4934b | add a Spider to KFC#15 | iandees/all-the-places,iandees/all-the-places,iandees/all-the-places | locations/spiders/kfc.py | locations/spiders/kfc.py | import json
import re
import scrapy
from locations.items import GeojsonPointItem
class KFCSpider(scrapy.Spider):
name = "kfc"
allowed_domains = ["www.kfc.com"]
def start_requests(self):
url = 'https://services.kfc.com/services/query/locations'
headers = {
'Accept-Language': 'en-US,en;q=0.9',
'Origin': 'https://www.kfc.com',
'Accept-Encoding': 'gzip, deflate, br',
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Referer': 'https://www.kfc.com/store-locator?query=90210',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Requested-With': 'XMLHttpRequest',
}
form_data = {
'address': '90210',
'distance': '100'
}
yield scrapy.http.FormRequest(
url=url, method='POST', formdata=form_data,
headers=headers, callback=self.parse
)
def parse(self, response):
data = json.loads(response.body_as_unicode())
stores = data['results']
print(stores)
for store in stores:
properties = {
'ref': store['entityID'],
'name': store['storeNumber'],
'addr_full': store['addressLine'],
'city': store['city'],
'state': store['state'],
'postcode': store['postalCode'],
'lat': store['latitude'],
'lon': store['longitude'],
'phone': store['businessPhone']
}
yield GeojsonPointItem(**properties)
| mit | Python | |
a67a4e15ce25e9e9a795534b4e629d6680fb491b | Implement player choosing a random pawn to move | risteon/ludo_python | ludo/playermoverandom.py | ludo/playermoverandom.py | # Player
from playerbase import PlayerBase, Players
from random import randint
class PlayerMoveRandom(PlayerBase):
def get_desc(self):
""""Return description string"""""
return "Chooses a random pawn to move"
def _choose_move_impl(self, moves):
if not moves:
return None
return moves[randint(0, len(moves)-1)]
| mit | Python | |
298d4e6eaca54defe914530ebdee9ded255cfd79 | add lxc integration tests | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | tests/integration/modules/lxc.py | tests/integration/modules/lxc.py | # -*- coding: utf-8 -*-
'''
Test the lxc module
'''
# Import Salt Testing libs
from salttesting.helpers import ensure_in_syspath, requires_salt_modules
ensure_in_syspath('../../')
# Import salt libs
import integration
@requires_salt_modules('lxc.list')
class LXCModuleTest(integration.ModuleCase):
'''
Test the lxc module
'''
prefix = '_salttesting'
def setUp(self):
os = self.run_function('grains.item',
['os', 'oscodename', 'osarch'])
p = {'download':
{'dist': os['os'].lower(),
'arch': os['osarch'].lower(),
'template': 'download',
'release': os['oscodename'].lower()}}
self.run_function('grains.setval', ['lxc.profile', p])
def tearDown(self):
'''
Clean up any LXCs created.
'''
r = self.run_function('lxc.list')
for k, v in r.items():
for x in v:
if x.startswith(self.prefix):
self.run_function('lxc.destroy', [x])
def test_create_destroy(self):
'''
Test basic create/destroy of an LXC.
'''
opts = {'arch': 'amd64',
'dist': 'ubuntu',
'release': 'trusty'}
r = self.run_function('lxc.create', [self.prefix],
template='download', options=opts)
self.assertEqual(r, {'created': True})
self.assertTrue(self.run_function('lxc.exists', [self.prefix]))
r = self.run_function('lxc.destroy', [self.prefix])
self.assertEqual(r, {'state': None, 'change': True})
self.assertFalse(self.run_function('lxc.exists', [self.prefix]))
def test_init(self):
'''
Test basic init functionality.
'''
r = self.run_function('lxc.init', [self.prefix],
profile='download', seed=False)
self.assertTrue(r.get('created', False))
self.assertTrue(self.run_function('lxc.exists', [self.prefix]))
def test_macvlan(self):
'''
Regression test for macvlan nic profile.
'''
p = {"macvlan": {"eth0": {
"macvlan.mode": "bridge",
"link": "eth0",
"type": "macvlan"}}}
self.run_function('grains.setval', ['lxc.nic', p])
self.run_function('lxc.init', [self.prefix],
profile='download', nic='macvlan',
seed=False, start=False)
self.run_function('grains.delval', ['lxc.nic'])
f = '/var/lib/lxc/{0}/config'.format(self.prefix)
conf = self.run_function('lxc.read_conf', [f])
# Due to a segfault in lxc-destroy caused by invalid configs,
# truncate the config.
self.run_function('cmd.run', ['truncate -s 0 {0}'.format(f)])
self.assertEqual(conf.get('lxc.network.type'), 'macvlan')
if __name__ == '__main__':
from integration import run_tests
run_tests(LXCModuleTest)
| apache-2.0 | Python | |
c7e7430d76337ef5cfd6779d9a32c2c9d948eb86 | Add guess phred encoding script | jason-weirather/Au-public,jason-weirather/Au-public,jason-weirather/Au-public,jason-weirather/Au-public | carbon/guess-encoding.py | carbon/guess-encoding.py | """
awk 'NR % 4 == 0' your.fastq | python %prog [options]
guess the encoding of a stream of qual lines.
"""
import sys
import optparse
RANGES = {
'Sanger': (33, 93),
'Solexa': (59, 104),
'Illumina-1.3': (64, 104),
'Illumina-1.5': (67, 104)
}
def get_qual_range(qual_str):
"""
>>> get_qual_range("DLXYXXRXWYYTPMLUUQWTXTRSXSWMDMTRNDNSMJFJFFRMV")
(68, 89)
"""
vals = [ord(c) for c in qual_str]
return min(vals), max(vals)
def get_encodings_in_range(rmin, rmax, ranges=RANGES):
valid_encodings = []
for encoding, (emin, emax) in ranges.items():
if rmin >= emin and rmax <= emax:
valid_encodings.append(encoding)
return valid_encodings
def main():
p = optparse.OptionParser(__doc__)
p.add_option("-n", dest="n", help="number of qual lines to test default:-1"
" means test until end of file or until it it possible to "
" determine a single file-type",
type='int', default=-1)
opts, args = p.parse_args()
print >>sys.stderr, "# reading qualities from stdin"
gmin, gmax = 99, 0
valid = []
for i, line in enumerate(sys.stdin):
lmin, lmax = get_qual_range(line.rstrip())
if lmin < gmin or lmax > gmax:
gmin, gmax = min(lmin, gmin), max(lmax, gmax)
valid = get_encodings_in_range(gmin, gmax)
if len(valid) == 0:
print >>sys.stderr, "no encodings for range: %s" % str((gmin, gmax))
sys.exit()
if len(valid) == 1 and opts.n == -1:
print "\t".join(valid) + "\t" + str((gmin, gmax))
sys.exit()
if opts.n > 0 and i > opts.n:
print "\t".join(valid) + "\t" + str((gmin, gmax))
sys.exit()
print "\t".join(valid) + "\t" + str((gmin, gmax))
if __name__ == "__main__":
import doctest
if doctest.testmod(optionflags=doctest.ELLIPSIS |\
doctest.NORMALIZE_WHITESPACE).failed == 0:
main()
| apache-2.0 | Python | |
7dbec704e0e9011b87940b48d21ab343f4003a8b | Add performance testing script. | Marketcircle/graypy,imankulov/graypy,severb/graypy,severb/graypy,stratosgear/graypy | perftest.py | perftest.py | #! /usr/bin/env python
import argparse
import logging
import logging.config
import sys
import time
def main(argv=sys.argv):
parser = argparse.ArgumentParser(prog="perftest.py")
parser.add_argument('--graylog-host',
help='Graylog2 host. Do not test GELFHandler if not specified.')
parser.add_argument('--graylog-port', type=int, default=12201,
help='Graylog2 GELF UDP port. Default: 12201')
parser.add_argument('--rabbit-url',
help='RabbitMQ url (ex: amqp://guest:guest@localhost/). '
'Do not test GELFRabbitHandler if not specified.')
parser.add_argument('--rabbit-exchange', default='logging.gelf',
help='RabbitMQ exchange. Default: logging.gelf')
parser.add_argument('--console-logger', action='store_true', default=None)
parser.add_argument('--stress', action='store_true',
help='Enable performance/stress test. WARNING this logs MANY warnings.')
args = parser.parse_args(argv[1:])
if all(v is None for v in
[args.graylog_host, args.rabbit_url, args.console_logger]):
parser.print_help()
config = {
'version': 1,
'formatters': {
'brief': {'format': "%(levelname)-7s %(name)s - %(message)s"},
'message': {'format': "%(message)s"},
},
'handlers': {},
'root': {'handlers': [], 'level': 'DEBUG'},
'disable_existing_loggers': False,
}
if args.graylog_host is not None:
config['handlers']['graylog_udp'] = {
'class': 'graypy.GELFHandler',
'host': args.graylog_host,
'port': args.graylog_port,
'debugging_fields': 0,
'formatter': 'message',
}
config['root']['handlers'].append('graylog_udp')
if args.rabbit_url is not None:
config['handlers']['graylog_rabbit'] = {
'class': 'graypy.GELFRabbitHandler',
'url': args.rabbit_url,
'exchange': args.rabbit_exchange,
'debugging_fields': 0,
'formatter': 'message',
}
config['root']['handlers'].append('graylog_rabbit')
if args.console_logger:
config['handlers']['console'] = {
'class': 'logging.StreamHandler',
'formatter': 'brief',
}
config['root']['handlers'].append('console')
logging.config.dictConfig(config)
log = logging.getLogger()
t_start = time.time()
total = 0
log.debug('debug')
log.info('info')
log.warn('warning')
log.error('error')
log.critical('critical')
total += 5
if args.stress:
t_end = time.time() + 10
tx = t_end - 9
cx = 0
while True:
log.warn('warning')
cx += 1
total += 1
if time.time() > tx:
elapsed = time.time() - (tx - 1)
tx += 1
print('%s messages in %.3f seconds (%.3f msg/s)'
% (cx, elapsed, cx / elapsed))
cx = 0
if tx > t_end:
break
elapsed = time.time() - t_start
print('%s messages in %.3f seconds (%.3f msg/s)'
% (total, elapsed, total / elapsed))
if __name__ == '__main__':
main()
| bsd-3-clause | Python | |
06092ce552c78de4efdc5845d94146fd5cf6fd38 | add plot tool | ktarrant/options_csv | plot_csv.py | plot_csv.py | import pandas as pd
import numpy as np
import plotly.plotly as py
import plotly.graph_objs as go
import argparse
clean_text = lambda s: "".join([c for c in s if c.isalpha() or c.isdigit() or c==' ']).rstrip()
def make_hbar_plot(options_table, symbol, parameter):
data = [
go.Bar(
name=otype,
x=options_table['{}_{}'.format(otype, parameter)],
y=options_table['Strike'],
orientation='h',
marker={
"color": color,
},
)
for otype, color in [("call", "green"), ("put", "red")]
]
layout = go.Layout(
title="{} - {}".format(symbol, parameter),
barmode='stack'
)
fig = go.Figure(data=data, layout=layout)
return py.plot(fig, filename=clean_text("{}_{}".format(symbol, parameter)))
if __name__ == "__main__":
typical_params = ["Ask", "Bid", "Change", "Last", "Open Int.", "Symbol", "Vol"]
parser = argparse.ArgumentParser(description="Plots a parameter from an options CSV")
parser.add_argument("--csv", default="spx.csv", help="CSV file to pull parameter from")
parser.add_argument("--param", default="Open Int.",
help="Parameter to pull and plot. Typical params are {} ".format(typical_params))
args = parser.parse_args()
options_table = pd.DataFrame.from_csv(args.csv)
url = make_hbar_plot(options_table, args.csv.split(".")[0], args.param)
| mit | Python | |
dbe71d02a95e65b644a1ac811712a31059975457 | test update | redhat-cip/dci-control-server,redhat-cip/dci-control-server,enovance/dci-control-server,enovance/dci-control-server | tests/api/v1/test_jobs_update.py | tests/api/v1/test_jobs_update.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2016 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def test_update_jobs(admin, remoteci_context, job_user_id, topic_user_id):
# test update schedule latest components
data = {
'name': 'pname',
'type': 'type_1',
'url': 'http://example.com/',
'topic_id': topic_user_id,
'export_control': True,
'state': 'active'}
c1 = admin.post('/api/v1/components', data=data).data['component']['id']
data.update({'type': 'type_2', 'name': 'pname1'})
c2 = admin.post('/api/v1/components', data=data).data['component']['id']
data.update({'type': 'type_3', 'name': 'pname2'})
c3 = admin.post('/api/v1/components', data=data).data['component']['id']
latest_components = {c1, c2, c3}
r = remoteci_context.post('/api/v1/jobs/%s/update' % job_user_id)
assert r.status_code == 201
update_job = r.data['job']
assert update_job['update_previous_job_id'] == job_user_id
assert update_job['topic_id'] == topic_user_id
update_cmpts = admin.get('/api/v1/jobs/%s/components' % update_job['id'])
update_cmpts = {cmpt['id'] for cmpt in update_cmpts.data['components']}
assert latest_components == update_cmpts
| apache-2.0 | Python | |
fdada5e48a13ef5b1c55710a584d281d36a32375 | Add stub for testing `generic_decorators`. | jakirkham/nanshe,jakirkham/nanshe,nanshe-org/nanshe,nanshe-org/nanshe,DudLab/nanshe,DudLab/nanshe | tests/test_generic_decorators.py | tests/test_generic_decorators.py | __author__ = "John Kirkham <kirkhamj@janelia.hhmi.org>"
__date__ = "$Mar 25, 2015 13:30:52 EDT$"
| bsd-3-clause | Python | |
61e0c6e325a91564250a937c0b1769992f65a7f5 | Add initial unit tests for swarm module | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | tests/unit/modules/test_swarm.py | tests/unit/modules/test_swarm.py | # -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Libs
import salt.modules.swarm
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import patch
from tests.support.unit import TestCase
class SwarmTestCase(TestCase, LoaderModuleMockMixin):
"""
Test cases for salt.modules.swarm
"""
def setup_loader_modules(self):
return {salt.modules.swarm: {}}
def test___virtual___valid_docker_module(self):
"""
Test that __virtual__ requires a proper loaded docker library
"""
class ValidDockerModule(object):
class APIClient:
pass
def from_env(self):
pass
with patch(
"salt.modules.swarm.HAS_DOCKER",
salt.modules.swarm._is_docker_module(ValidDockerModule()),
):
self.assertEqual(
salt.modules.swarm.__virtual__(), salt.modules.swarm.__virtualname__
)
def test___virtual___not_valid_docker_module(self):
class NotValidDockerModule(object):
pass
with patch(
"salt.modules.swarm.HAS_DOCKER",
salt.modules.swarm._is_docker_module(NotValidDockerModule()),
):
ret = salt.modules.swarm.__virtual__()
self.assertEqual(len(ret), 2)
self.assertFalse(ret[0])
| apache-2.0 | Python | |
2e953f9571d132f2a346351b4593849e5c5bee14 | Add unit tests for download_repo_run_scan.py | spdx/spdx-github | run_scan/unit_test.py | run_scan/unit_test.py | import unittest
import download_repo_run_scan
from os import path, remove
import shutil
#Test that when given a valid zip file url,
#the download_github_zip will result in the creation
#of a local file at the returned location
class downloadFileTestCase(unittest.TestCase):
file_location = ''
url = 'https://github.com/OSSHealth/ghdata/archive/master.zip'
def setUp(self):
self.file_location = download_repo_run_scan.download_github_zip(self.url)
def tearDown(self):
remove(self.file_location)
def testDownload(self):
assert path.isfile(self.file_location)
#Test that we can unzip a zip file. This requires a zip file to be present.
#For this I am using the same download method as above to get a zip file
#from GitHub, so both tests will fail if the download method fails
class unzipFileTestCase(unittest.TestCase):
file_location = ''
url = 'https://github.com/OSSHealth/ghdata/archive/master.zip'
extracted_directory = ''
def setUp(self):
self.file_location = download_repo_run_scan.download_github_zip(self.url)
self.extracted_directory = download_repo_run_scan.unzip_file(self.file_location)
def tearDown(self):
#Remove the zip file
remove(self.file_location)
#Remove the unzipped directory
shutil.rmtree(self.extracted_directory)
def testUnzip(self):
assert path.isdir(self.extracted_directory)
#This tests whether a file output is produced from calling the scan method.
#It needs the download zip and extract zip methods to be working.
class scanTestCase(unittest.TestCase):
file_location = ''
url = 'https://github.com/OSSHealth/ghdata/archive/master.zip'
extracted_directory = ''
spdx_file_name = ''
def setUp(self):
self.file_location = download_repo_run_scan.download_github_zip(self.url)
self.extracted_directory = download_repo_run_scan.unzip_file(self.file_location)
#Set output file name to the directory name .SPDX
self.spdx_file_name = self.extracted_directory[:-1] + '.SPDX'
#scan the extracted directory and put results in a named file
download_repo_run_scan.scan(self.extracted_directory, self.spdx_file_name)
def tearDown(self):
#Remove the zip file
remove(self.file_location)
#Remove the unzipped directory
shutil.rmtree(self.extracted_directory)
#Remove the scan results file
remove(self.spdx_file_name)
def testScan(self):
assert path.isfile(self.spdx_file_name)
#This checks whether the check_valid_url method correctly determines
#whether a url results in an error (400 or 500 code)
class checkURLTestCase(unittest.TestCase):
good_url = 'https://www.google.com/'
bad_url = 'https://www.google.com/fail'
def testGoodURL(self):
assert download_repo_run_scan.check_valid_url(self.good_url) == True
def testBadURL(self):
assert download_repo_run_scan.check_valid_url(self.bad_url) == False
if __name__ == "__main__":
unittest.main()
| apache-2.0 | Python | |
a88959202e66d47f032797c2c5790461fe458392 | add tests boilerplates | wikedzit/bucketlist | api/v1/tests/test_api_tokens.py | api/v1/tests/test_api_tokens.py | import unittest
import json
class TestAuthentication(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_validates_user_inputs(self):
self.assertTrue(True)
def test_user_can_register(self):
self.assertTrue(True)
def test_no_ducplicated_usernames(self):
self.assertTrue(True)
def test_user_can_login(self):
self.assertTrue(True) | mit | Python | |
16850052ced6975ab99c73c2c15497a3f91ccab9 | Add reader back in again.. will use for blender | ndevenish/Blender_ioEDM,ndevenish/Blender_ioEDM | edm/reader.py | edm/reader.py | #!/usr/bin/env python3
import struct
from collections import namedtuple
from .typereader import get_type_reader
import logging
logger = logging.getLogger(__name__)
class Reader(object):
def __init__(self, filename):
self.filename = filename
self.stream = open(filename, "rb")
def tell(self):
return self.stream.tell()
def seek(self, offset, from_what=0):
self.stream.seek(offset, from_what)
def read_constant(self, data):
filedata = self.stream.read(len(data))
assert data == filedata, "Fixed byte data mismatch"
def read(self, length):
return self.stream.read(length)
def read_uchar(self):
return struct.unpack("B", self.stream.read(1))[0]
def read_ushort(self):
return struct.unpack("<H", self.stream.read(2))[0]
def read_uint(self):
"""Read an unsigned integer from the data"""
return struct.unpack("<I", self.stream.read(4))[0]
def read_float(self):
return struct.unpack("<f", self.stream.read(4))[0]
def read_format(self, format):
"""Read a struct format from the data"""
return struct.unpack(format, self.stream.read(struct.calcsize(format)))
def read_string(self):
"""Read a length-prefixed string from the file"""
prepos = self.stream.tell()
length = self.read_uint()
try:
return self.stream.read(length).decode("UTF-8")
except UnicodeDecodeError:
raise RuntimeError("Could not decode string with length {} at position {}".format(length, prepos))
def read_list(self, reader):
"""Reads a length-prefixed list of something"""
length = self.read_uint()
entries = []
for index in range(length):
entries.append(reader(self))
return entries
def read_single_type(self, source=None):
"""Reads a single instance of a named type"""
assert source is self or source is None
typeName = self.read_string()
reader = get_type_reader(typeName)
return reader(self)
# def read_typed_list(self):
# return read_list()
# length = self.read_uint()
# entries = []
# logger.debug("Reading typed list of length {}".format(length))
# for index in range(length):
# entries.append(self.read_single_type())
# return entries
# | mit | Python | |
8022d7361affddde110a289bc683201ea70af5fe | add weight conversion script | chainer/chainercv,yuyu2172/chainercv,pfnet/chainercv,yuyu2172/chainercv,chainer/chainercv | examples/yolo/darknet2npz.py | examples/yolo/darknet2npz.py | import argparse
import numpy as np
import chainer
from chainer import serializers
from chainercv.links import Conv2DBNActiv
from chainercv.links import YOLOv3
def load(file, link):
if isinstance(link, Conv2DBNActiv):
for param in (
link.bn.beta.array,
link.bn.gamma.array,
link.bn.avg_mean,
link.bn.avg_var,
link.conv.W.array):
param[:] = np.fromfile(file, dtype=np.float32, count=param.size) \
.reshape(param.shape)
elif isinstance(link, chainer.ChainList):
for l in link:
load(file, l)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--n_fg_class', type=int, default=80)
parser.add_argument('darknetmodel')
parser.add_argument('output')
args = parser.parse_args()
model = YOLOv3(args.n_fg_class)
with chainer.using_config('train', False):
model(np.empty((1, 3, model.insize, model.insize), dtype=np.float32))
with open(args.darknetmodel, mode='rb') as f:
major = np.fromfile(f, dtype=np.int32, count=1)
minor = np.fromfile(f, dtype=np.int32, count=1)
np.fromfile(f, dtype=np.int32, count=1) # revision
assert(major * 10 + minor >= 2 and major < 1000 and minor < 1000)
np.fromfile(f, dtype=np.int64, count=1) # seen
load(f, model.extractor)
serializers.save_npz(args.output, model)
if __name__ == '__main__':
main()
| mit | Python | |
7bd4ecf4f0f16ed58f253ca16045c3dd86f0a28c | Test script. | zsiciarz/django-briefcase,zsiciarz/django-briefcase | runtests.py | runtests.py | # -*- coding: utf-8 -*-
import os
from django.conf import settings
def make_absolute_path(path):
return os.path.join(os.path.realpath(os.path.dirname(__file__)), path)
if not settings.configured:
settings.configure(
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
},
SITE_ID = 1,
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django_nose',
'briefcase',
),
TEMPLATE_DIRS = (
make_absolute_path('example_project/templates'),
),
ROOT_URLCONF = 'example_project.urls',
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner',
NOSE_ARGS = ['--stop'],
)
from django.core.management import call_command
call_command('test', 'briefcase')
| mit | Python | |
a4d5e88973a25464be26488d17ecc663cce776d7 | Add map example with data generators | jakevdp/altair,altair-viz/altair | altair/examples/world_map.py | altair/examples/world_map.py | """
World Map
---------
This example shows how to create a world map using data generators for
different background layers.
"""
# category: maps
import altair as alt
from vega_datasets import data
# Data generators for the background
sphere = alt.sphere()
graticule = alt.graticule()
# Source of land data
source = alt.topo_feature(data.world_110m.url, 'countries')
# Layering and configuring the components
alt.layer(
alt.Chart(sphere).mark_geoshape(fill='lightblue'),
alt.Chart(graticule).mark_geoshape(stroke='white', strokeWidth=0.5),
alt.Chart(source).mark_geoshape(fill='ForestGreen', stroke='black')
).project(
'naturalEarth1'
).properties(width=600, height=400).configure_view(stroke=None)
| bsd-3-clause | Python | |
8cbe2878f5fdca899ec71bc08e7d2de4a3c3caf2 | add python solution to "project euler - problem3" | mo/project-euler,mo/project-euler,mo/project-euler,mo/project-euler,mo/project-euler,mo/project-euler,mo/project-euler,mo/project-euler,mo/project-euler | problem3.py | problem3.py | number = 600851475143
for divisor in xrange(2,number):
if (number % divisor == 0):
print divisor, " is a divisor"
number = number / divisor
print "new number is", number
| mit | Python | |
f1976ef533d98ac6e423312435bb25692831bfd9 | Create bumper.py | LCAS/teaching,LCAS/teaching,LCAS/teaching,LCAS/teaching,LCAS/teaching | cmp3103m-code-fragments/scripts/bumper.py | cmp3103m-code-fragments/scripts/bumper.py | import rospy
from geometry_msgs.msg import Twist
from kobuki_msgs.msg import BumperEvent
class Chatter:
def __init__(self):
rospy.init_node('chatter')
self.publisher = rospy.Publisher('/mobile_base/commands/velocity', Twist, queue_size=1)
self.scan_sub = rospy.Subscriber('/mobile_base/events/bumper', BumperEvent, self.bumper_cb)
self.obstacles = False
def bumper_cb(self, laser_msg):
if laser_msg.bumper != 1:
return
if laser_msg.state == 1:
self.obstacles = True
else:
self.obstacles = False
def run(self):
while not rospy.is_shutdown():
t = Twist()
if self.obstacles:
t.angular.z = 1.0
else:
t.linear.x = 0.4
self.publisher.publish(t)
c = Chatter()
c.run()
| mit | Python | |
d571af56293912042846047c88e4a7b2c2f40df9 | add archive command | mralext20/alex-bot | alexBot/cogs/memework.py | alexBot/cogs/memework.py | # -*- coding: utf-8 -*-
from ..tools import Cog
from discord.ext import commands
import discord
from datetime import datetime
class Memework(Cog):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.archive_cat = self.bot.get_channel(355886867285147648)
self.rowboat_log = self.bot.get_channel(303658324652589057)
self.dog_log = self.bot.get_channel(336159533124812800)
@commands.command()
@commands.bot_has_role("Rowboat")
@commands.has_permissions(manage_channels=True)
async def archive(self, ctx: commands.Context, channel: discord.TextChannel=None):
if channel is None:
channel = ctx.channel
try:
assert ctx.guild.id == 295341979800436736
except AssertionError:
await ctx.send("this only works in the memework guild."
"pls tell Alex from Alaska to unload this.")
try:
assert isinstance(channel, discord.TextChannel)
except AssertionError:
await ctx.send("you idiot i don't know what that is")
return
await channel.edit(category=self.archive_cat,
sync_permissions=True,
name=f"archived-{channel.name}",
reason=f"archived by {ctx.author.name}")
await channel.send(f"this channel was archived by {ctx.author} at {datetime.utcnow().strftime('%H:%M')} UTC.")
await ctx.send(f"archived {channel.mention}")
await self.dog_log.send(f"`[{datetime.utcnow().strftime('%H:%m')}]`"
f"\U0001f6e0 {ctx.author} (`{ctx.author.id}`) Archived "
f"{channel} (`{channel.id}`)")
await self.rowboat_log.send(f"`[{datetime.utcnow().strftime('%H:%m:%S')}]`"
f"\U0001f6e0 {ctx.author} (`{ctx.author.id}`) Archived "
f"**{channel}**")
def setup(bot):
bot.add_cog(Memework(bot))
| mit | Python | |
4b0656a2581df14bee4ae97da95f68360c24ee82 | Create rrd_export.py | ninjawil/weather-station,ninjawil/weather-station,ninjawil/weather-station,ninjawil/weather-station,ninjawil/weather-station,ninjawil/weather-station,ninjawil/weather-station | scripts/rrd_export.py | scripts/rrd_export.py | #-------------------------------------------------------------------------------
#
# The MIT License (MIT)
#
# Copyright (c) 2015 William De Freitas
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#-------------------------------------------------------------------------------
#!/usr/bin/env python
'''Manages RRD export to XML'''
#===============================================================================
# Import modules
#===============================================================================
# Standard Library
import os
import sys
import subprocess
import shlex
# Third party modules
import rrdtool
# Application modules
import log
import settings as s
#===============================================================================
# Export RRD data to xml
#===============================================================================
def rrdExport(start , step , sortieXML):
texte = "rrdtool xport -s {0} -e now --step {1} ".format(start, step)
texte += "DEF:a={0}:inside_temp:AVERAGE ".format(s.RRDTOOL_RRD_FILE)
texte += "DEF:b={0}:inside_hum:AVERAGE ".format(s.RRDTOOL_RRD_FILE)
texte += "DEF:c={0}:door_open:AVERAGE ".format(s.RRDTOOL_RRD_FILE)
texte += "DEF:d={0}:precip_rate:AVERAGE ".format(s.RRDTOOL_RRD_FILE)
texte += "DEF:e={0}:precip_acc:AVERAGE ".format(s.RRDTOOL_RRD_FILE)
texte += "DEF:f={0}:outside_temp:AVERAGE ".format(s.RRDTOOL_RRD_FILE)
texte += "XPORT:a:""inside_temp"" "
texte += "XPORT:b:""inside_hum"" "
texte += "XPORT:c:""door_open"" "
texte += "XPORT:d:""precip_rate"" "
texte += "XPORT:e:""precip_acc"" "
texte += "XPORT:f:""outside_temp"" "
fileout = open("/home/pi/weather/data/"+sortieXML,"w")
args = shlex.split(texte)
subprocess.Popen(args, stdout=fileout)
fileout.close()
#===============================================================================
# MAIN
#===============================================================================
def main():
'''Entry point for script'''
# ok extact 3 hours data
rrdExport("now-3h",300, "weather3h.xml")
#ok 24 hours
rrdExport("now-24h",900, "weather24h.xml")
#ok 48 hours
rrdExport("now-48h",1800, "weather48h.xml")
#ok 1 week
rrdExport("now-8d",3600, "weather1w.xml")
#ok 1 month
rrdExport("now-1month",14400, "weather1m.xml")
#ok 3 month
rrdExport("now-3month",28800, "weather3m.xml")
#ok 1 year
rrdExport("now-1y",43200, "weather1y.xml")
#===============================================================================
# BOILER PLATE
#===============================================================================
if __name__=='__main__':
sys.exit(main())
| mit | Python | |
d3166947023283ae6aed9737703c852552cf17f8 | Update app/extensions/allows/allows.py | apipanda/openssl,apipanda/openssl,apipanda/openssl,apipanda/openssl | app/extensions/allows/allows.py | app/extensions/allows/allows.py | from flask import current_app
from flask import request
from functools import wraps
from werkzeug import LocalProxy
from werkzeug.exceptions import Forbidden
class Allows(object):
def __init__(self, app=None, identity_loader=None,
throws=Forbidden, on_fail=None):
self._identity_loader = identity_loader
self.throws = throws
self.on_fail = _make_callable(on_fail)
if app:
self.init_app(app)
def init_app(self, app):
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['allows'] = self
def requires(self, *requirements, **opts):
def raiser():
raise opts.get('throws', self.throws)
def fail(*args, **kwargs):
f = _make_callable(opts.get('on_fail', self.on_fail))
res = f(*args, **kwargs)
if res is not None:
return res
raiser()
def decorator(f):
@wraps(f)
def allower(*args, **kwargs):
if self.fulfill(requirements):
return f(*args, **kwargs)
else:
return fail(*args, **kwargs)
return allower
return decorator
def identity_loader(self, f):
"Provides an identity loader for the instance"
self._identity_loader = f
return f
def fulfill(self, requirements, identity=None):
"Runs each requirement until one is not fulfilled"
identity = identity or self._identity_loader()
return all(r(identity, request) for r in requirements)
def __get_allows():
"Internal helper"
try:
return current_app.extensions['allows']
except (AttributeError, KeyError):
raise RuntimeError("Not configured.")
def _make_callable(func_or_value):
if not callable(func_or_value):
return lambda *a, **k: func_or_value
return func_or_value
_allows = LocalProxy(__get_allows, name="allows")
| mit | Python | |
37dc854c8af69c679f91163355b2a4314d66820b | Add a marker interface | usingnamespace/usingnamespace | usingnamespace/api/interfaces.py | usingnamespace/api/interfaces.py | from zope.interface import Interface
class ISerializer(Interface):
"""Marker Interface"""
| isc | Python | |
b4333af5737b1376452eb0490f4175a1554ba212 | Fix #116 | studio666/gratipay.com,eXcomm/gratipay.com,mccolgst/www.gittip.com,MikeFair/www.gittip.com,mccolgst/www.gittip.com,studio666/gratipay.com,gratipay/gratipay.com,bountysource/www.gittip.com,mccolgst/www.gittip.com,bountysource/www.gittip.com,eXcomm/gratipay.com,bountysource/www.gittip.com,eXcomm/gratipay.com,gratipay/gratipay.com,bountysource/www.gittip.com,studio666/gratipay.com,mccolgst/www.gittip.com,gratipay/gratipay.com,MikeFair/www.gittip.com,eXcomm/gratipay.com,studio666/gratipay.com,MikeFair/www.gittip.com,gratipay/gratipay.com | configure-aspen.py | configure-aspen.py | import os
import gittip
import gittip.wireup
import gittip.authentication
import gittip.csrf
from gittip.networks import github
gittip.wireup.canonical()
gittip.wireup.db()
gittip.wireup.billing()
website.github_client_id = os.environ['GITHUB_CLIENT_ID'].decode('ASCII')
website.github_client_secret = os.environ['GITHUB_CLIENT_SECRET'].decode('ASCII')
website.github_callback = os.environ['GITHUB_CALLBACK'].decode('ASCII')
website.hooks.inbound_early.register(gittip.canonize)
website.hooks.inbound_early.register(gittip.csrf.inbound)
website.hooks.inbound_early.register(gittip.authentication.inbound)
website.hooks.outbound_late.register(gittip.authentication.outbound)
website.hooks.outbound_late.register(gittip.csrf.outbound)
def add_stuff(request):
request.context['__version__'] = gittip.__version__
request.context['username'] = None
request.context['github'] = github
website.hooks.inbound_early.register(add_stuff)
| import os
import gittip
import gittip.wireup
import gittip.authentication
import gittip.csrf
gittip.wireup.canonical()
gittip.wireup.db()
gittip.wireup.billing()
website.github_client_id = os.environ['GITHUB_CLIENT_ID'].decode('ASCII')
website.github_client_secret = os.environ['GITHUB_CLIENT_SECRET'].decode('ASCII')
website.github_callback = os.environ['GITHUB_CALLBACK'].decode('ASCII')
website.hooks.inbound_early.register(gittip.canonize)
website.hooks.inbound_early.register(gittip.csrf.inbound)
website.hooks.inbound_early.register(gittip.authentication.inbound)
website.hooks.outbound_late.register(gittip.authentication.outbound)
website.hooks.outbound_late.register(gittip.csrf.outbound)
def add_stuff(request):
request.context['__version__'] = gittip.__version__
request.context['username'] = None
website.hooks.inbound_early.register(add_stuff)
| cc0-1.0 | Python |
f42d2264917f109b8cee1d641a475934a456aa61 | Add a BaseFrontend | vrs01/mopidy,priestd09/mopidy,swak/mopidy,ZenithDK/mopidy,jcass77/mopidy,dbrgn/mopidy,diandiankan/mopidy,jodal/mopidy,diandiankan/mopidy,hkariti/mopidy,swak/mopidy,woutervanwijk/mopidy,jmarsik/mopidy,bencevans/mopidy,bacontext/mopidy,pacificIT/mopidy,rawdlite/mopidy,hkariti/mopidy,diandiankan/mopidy,tkem/mopidy,rawdlite/mopidy,vrs01/mopidy,jodal/mopidy,vrs01/mopidy,swak/mopidy,ali/mopidy,rawdlite/mopidy,SuperStarPL/mopidy,mokieyue/mopidy,ZenithDK/mopidy,swak/mopidy,jcass77/mopidy,ali/mopidy,glogiotatidis/mopidy,liamw9534/mopidy,liamw9534/mopidy,tkem/mopidy,ZenithDK/mopidy,priestd09/mopidy,ali/mopidy,priestd09/mopidy,mokieyue/mopidy,quartz55/mopidy,jmarsik/mopidy,mokieyue/mopidy,tkem/mopidy,pacificIT/mopidy,glogiotatidis/mopidy,glogiotatidis/mopidy,mopidy/mopidy,kingosticks/mopidy,kingosticks/mopidy,ali/mopidy,mopidy/mopidy,mopidy/mopidy,bencevans/mopidy,quartz55/mopidy,pacificIT/mopidy,bencevans/mopidy,bacontext/mopidy,dbrgn/mopidy,bacontext/mopidy,tkem/mopidy,dbrgn/mopidy,jmarsik/mopidy,ZenithDK/mopidy,vrs01/mopidy,hkariti/mopidy,woutervanwijk/mopidy,SuperStarPL/mopidy,bencevans/mopidy,quartz55/mopidy,jcass77/mopidy,dbrgn/mopidy,kingosticks/mopidy,SuperStarPL/mopidy,adamcik/mopidy,jmarsik/mopidy,bacontext/mopidy,pacificIT/mopidy,mokieyue/mopidy,abarisain/mopidy,adamcik/mopidy,glogiotatidis/mopidy,quartz55/mopidy,adamcik/mopidy,hkariti/mopidy,abarisain/mopidy,diandiankan/mopidy,SuperStarPL/mopidy,jodal/mopidy,rawdlite/mopidy | mopidy/frontends/base.py | mopidy/frontends/base.py | class BaseFrontend(object):
"""
Base class for frontends.
:param core_queue: queue for messaging the core
:type core_queue: :class:`multiprocessing.Queue`
:param backend: the backend
:type backend: :class:`mopidy.backends.base.BaseBackend`
"""
def __init__(self, core_queue, backend):
self.core_queue = core_queue
self.backend = backend
def start(self):
"""Start the frontend."""
pass
def destroy(self):
"""Destroy the frontend."""
pass
def process_message(self, message):
"""
Process messages for the frontend.
:param message: the message
:type message: dict
"""
raise NotImplementedError
| apache-2.0 | Python | |
2d6ecb3b5b67539c6ad0f211d7b059ac44df2731 | Make gallery of examples for various bending angles up to 90 degrees | willettk/rgz-analysis,willettk/rgz-analysis,afgaron/rgz-analysis,willettk/rgz-analysis,afgaron/rgz-analysis,afgaron/rgz-analysis | python/bending_examples.py | python/bending_examples.py | # Make a gallery of images showing the RGZ consensus double sources, sorted by bending angle.
from astropy.io import ascii
path = '/Users/willettk/Astronomy/Research/GalaxyZoo'
data = ascii.read('{:}/rgz-analysis/csv/static_catalog3.csv'.format(path),delimiter=' ')
import bending_angles as ba
import numpy as np
pathdict = ba.make_pathdict()
def bending_examples():
for a in np.linspace(0,80,9):
bdata = data[(data['bending_angle'] >= a) & (data['bending_angle'] < a+10.)]
count,errcount = 0,0
if len(bdata) > 0:
for b in bdata:
zid = b['zooniverse_id']
try:
if b['angle_type'] == 'multipeaked_singles':
angle_type = 'mps'
else:
angle_type = 'radio'
ba.plot_one_double(zid,pathdict,save_fig=True,anglepath='{0:.0f}_{1:.0f}/'.format(a,a+10),dbltype=angle_type)
count += 1
except ValueError as inst:
print "ValueError,",inst.args,zid
errcount += 1
print '{:d} galaxies with bending angle, {:d} with errors for angles between {:.0f} and {:.0f}'.format(count,errcount,a,a+10)
| mit | Python | |
93ba6ff584d84255f46ef11fb44e9ae863cd7aed | add demo about reversing arguments | ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study | python/src/reverse_args.py | python/src/reverse_args.py | # Copyright (c) 2014 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import sys
if __name__ == '__main__':
print ' '.join(reversed(sys.argv[1:]))
| bsd-2-clause | Python | |
10964c0a3a1de2ba8c8d32c7aeed123b5712a759 | Test DbMixin with fake | adi-foundry/nycodex,alanhdu/nycodex,alexander-yu/nycodex,alexander-yu/nycodex,adi-foundry/nycodex | nycodex/tests/test_db.py | nycodex/tests/test_db.py | import pytest
import sqlalchemy
import testing.postgresql
from nycodex import db
class FakeTable(db.Base, db.DbMixin):
__tablename__ = "_fake_table"
id = sqlalchemy.Column(sqlalchemy.CHAR(9), primary_key=True)
name = sqlalchemy.Column(sqlalchemy.TEXT, nullable=False)
description = sqlalchemy.Column(sqlalchemy.TEXT, nullable=True)
@pytest.fixture
def engine():
with testing.postgresql.Postgresql() as postgresql:
engine = sqlalchemy.create_engine(postgresql.url())
db.Base.metadata.create_all(engine)
yield engine
@pytest.fixture
def conn(engine):
with engine.connect() as conn:
yield conn
@pytest.fixture
def session(engine):
Session = sqlalchemy.orm.sessionmaker(bind=engine)
session = Session()
yield session
session.close()
def test_upsert(session, conn):
fake = [
FakeTable(id="abcd-0000", name="a", description="x"),
FakeTable(id="abcd-0001", name="b", description="y"),
FakeTable(id="abcd-0002", name="c", description="z"),
]
FakeTable.upsert(conn, fake)
assert session.query(FakeTable).order_by(FakeTable.id).all() == fake
# Do not insert extra columns
FakeTable.upsert(conn, fake)
assert session.query(FakeTable).order_by(FakeTable.id).all() == fake
fake[0].name = 'b'
FakeTable.upsert(conn, fake)
assert session.query(FakeTable).order_by(FakeTable.id).all() == fake
# Do not overwrrite non-null columns w/ NULL
new = [
FakeTable(id="abcd-0002", name="d", description=None),
]
FakeTable.upsert(conn, new)
fake[2].name = 'd'
assert session.query(FakeTable).order_by(FakeTable.id).all() == fake
| import datetime as dt
import pytest
import pytz
import sqlalchemy
import testing.postgresql
from nycodex import db
@pytest.fixture
def engine():
with testing.postgresql.Postgresql() as postgresql:
engine = sqlalchemy.create_engine(postgresql.url())
db.Base.metadata.create_all(engine)
yield engine
@pytest.fixture
def conn(engine):
with engine.connect() as conn:
yield conn
@pytest.fixture
def session(engine):
Session = sqlalchemy.orm.sessionmaker(bind=engine)
session = Session()
yield session
session.close()
def test_Owner_upsert(session, conn):
owners = [
db.Owner(id="abcd-0000", name="a"),
db.Owner(id="abcd-0001", name="b"),
db.Owner(id="abcd-0002", name="c"),
]
db.Owner.upsert(conn, owners)
assert session.query(db.Owner).order_by(db.Owner.id).all() == owners
# Does not insert extra columns
db.Owner.upsert(conn, owners)
assert session.query(db.Owner).order_by(db.Owner.id).all() == owners
# Handles conflicts correctly
owners[0].name = 'd'
db.Owner.upsert(conn, owners)
assert session.query(db.Owner).order_by(db.Owner.id).all() == owners
def test_Dataset_upsert(session, conn):
owner = db.Owner(id="abcd-0000", name="owner")
session.add(owner)
session.commit()
datasets = [
db.Dataset(
id="abcd-0000",
name="x",
description="test",
is_official=True,
updated_at=pytz.utc.localize(dt.datetime.utcnow()),
scraped_at=pytz.utc.localize(dt.datetime.utcnow()),
owner_id=owner.id,
domain_category=db.DomainCategory.RECREATION,
domain_tags=['2010', 'politics'],
asset_type=db.AssetType.MAP),
db.Dataset(
id="abcd-0001",
name="y",
description="test",
is_official=False,
owner_id=owner.id,
updated_at=pytz.utc.localize(dt.datetime.utcnow()),
domain_category="Recreation",
domain_tags=[],
asset_type="map")
]
db.Dataset.upsert(conn, datasets)
assert session.query(db.Dataset).order_by(db.Dataset.id).count() == 2
# Does not insert extra columns
db.Dataset.upsert(conn, datasets)
assert session.query(db.Dataset).order_by(db.Dataset.id).count() == 2
# Handles conflicts correctly
datasets[1].domain_category = db.DomainCategory.SOCIAL_SERVICES
datasets[1].asset_type = db.AssetType.DATASET
assert session.query(db.Dataset).order_by(db.Dataset.id).all() != datasets
db.Dataset.upsert(conn, datasets)
assert session.query(db.Dataset).order_by(db.Dataset.id).all() == datasets
| apache-2.0 | Python |
058eeefdef485595e48dcd0250bcf9dc9664287a | Add Launchpad (fixes #17) | foauth/foauth.org,foauth/foauth.org,foauth/oauth-proxy,foauth/foauth.org | services/launchpad.py | services/launchpad.py | from oauthlib.oauth1.rfc5849 import SIGNATURE_PLAINTEXT, SIGNATURE_TYPE_BODY, SIGNATURE_TYPE_AUTH_HEADER
import requests
import foauth.providers
class Launchpad(foauth.providers.OAuth1):
# General info about the provider
provider_url = 'https://launchpad.net/'
docs_url = 'https://launchpad.net/+apidoc/1.0.html'
category = 'Code'
# URLs to interact with the API
request_token_url = 'https://launchpad.net/+request-token'
authorize_url = 'https://launchpad.net/+authorize-token'
access_token_url = 'https://launchpad.net/+access-token'
api_domains = ['api.launchpad.net', 'api.staging.launchpad.net']
signature_method = SIGNATURE_PLAINTEXT
returns_token = False
signature_type = SIGNATURE_TYPE_AUTH_HEADER
available_permissions = [
(None, 'read non-privade data'),
('WRITE_PUBLIC', 'change non-private data'),
('READ_PRIVATE', 'read anything, including private data'),
('WRITE_PRIVATE', 'change anything, including private data'),
]
permissions_widget = 'radio'
def __init__(self, *args, **kwargs):
super(Launchpad, self).__init__(*args, **kwargs)
self.client_secret = '' # Must be empty to satisfy Launchpad
def get_authorize_params(self, redirect_uri, scopes):
params = super(Launchpad, self).get_authorize_params(redirect_uri, scopes)
params['allow_permission'] = scopes[0] or 'READ_PUBLIC'
return params
def get_request_token_response(self, redirect_uri, scopes):
# Launchpad expects the signature in the body, but we don't have
# additional parameters, so oauthlib doesn't help us here.
return requests.post(self.get_request_token_url(),
data={'oauth_consumer_key': self.client_id,
'oauth_signature_method': 'PLAINTEXT',
'oauth_signature': '&'})
def get_access_token_response(self, token, secret, verifier=None):
# Launchpad expects the signature in the body, but we don't have
# additional parameters, so oauthlib doesn't help us here.
req = requests.Request(url=self.authorize_url,
data={'oauth_consumer_key': self.client_id,
'oauth_token': token,
'oauth_signature_method': 'PLAINTEXT',
'oauth_signature': '&%s' % secret})
req = req.prepare()
return requests.post(self.get_access_token_url(),
data={'oauth_consumer_key': self.client_id,
'oauth_token': token,
'oauth_signature_method': 'PLAINTEXT',
'oauth_signature': '&%s' % secret})
def get_user_id(self, key):
r = super(Launchpad, self).api(key, self.api_domains[0], '/1.0/people/+me')
return r.json()[u'name']
| bsd-3-clause | Python | |
98232fea3f8847aad69ed6af0b37a5624c24488b | Add experiment script to run a dataset through all graph clustering method | studiawan/pygraphc | experiment.py | experiment.py | import fnmatch
import os
from pygraphc.preprocess.PreprocessLog import PreprocessLog
from pygraphc.preprocess.CreateGraph import CreateGraph
from pygraphc.clustering.MajorClust import MajorClust, ImprovedMajorClust
def get_dataset(dataset, dataset_path, file_extension):
# get all log files under dataset directory
matches = []
for root, dirnames, filenames in os.walk(dataset_path):
for filename in fnmatch.filter(filenames, file_extension):
matches.append(os.path.join(root, filename))
# get file identifier, log file, labeled log file, result per cluster, result per line, and anomaly report
files = {}
result_path = './result/'
for match in matches:
identifier = match.split(dataset)
index = dataset + identifier[1]
files[index] = {'log_path': match, 'labeled_path': str(match) + '.labeled',
'result_percluster': result_path + index + '.percluster',
'result_perline': result_path + index + '.perline',
'anomaly_report': result_path + index + '.anomaly'}
return files
def main(dataset):
# get dataset files
files = {}
if dataset == 'Hofstede2014':
files = get_dataset(dataset, '/home/hudan/Git/labeled-authlog/dataset/Hofstede2014', '*.anon')
elif dataset == 'SecRepo':
files = get_dataset(dataset, '/home/hudan/Git/labeled-authlog/dataset/SecRepo', '*.log')
# main process
for file_identifier, properties in files.iteritems():
# preprocess log file
preprocess = PreprocessLog(properties['log_path'])
preprocess.do_preprocess()
events_unique = preprocess.events_unique
# create graph
g = CreateGraph(events_unique)
g.do_create()
graph = g.g
# run MajorClust method
mc = MajorClust(graph)
clusters = mc.get_majorclust(graph)
print clusters
if __name__ == '__main__':
data = 'Hofstede2014'
main(data)
| mit | Python | |
aee93802503a19b873299774e351ef883e81356b | Add functions.py to provide comment functions | kensonman/webframe,kensonman/webframe,kensonman/webframe | functions.py | functions.py | #-*- coding: utf-8 -*-
def getClientIP( req ):
'''
Get the client ip address
'''
xForwardedFor=req.META.get('HTTP_X_FORWARDED_FOR')
if xForwardedFor:
ip=xForwardedFor.split(',')[0]
else:
ip=req.META.get('REMOTE_ADDR')
return ip
def getBool( val, trueOpts=['YES', 'Y', '1', 'TRUE', 'T'] ):
'''
Retrieve the boolean value from string
'''
if val:
return str(val).upper() in trueOpts
return False
| apache-2.0 | Python | |
f3a43d11f79766c43be6de359762bd37cde49b38 | Complete 3 pointer method | bowen0701/algorithms_data_structures | lc0611_valid_triangle_number.py | lc0611_valid_triangle_number.py | """Leetcode 611. Valid Triangle Number
Medium
URL: https://leetcode.com/problems/valid-triangle-number/
Given an array consists of non-negative integers,
your task is to count the number of triplets chosen from the array that can
make triangles if we take them as side lengths of a triangle.
Example 1:
Input: [2,2,3,4]
Output: 3
Explanation:
Valid combinations are:
2,3,4 (using the first 2)
2,3,4 (using the second 2)
2,2,3
Note:
- The length of the given array won't exceed 1000.
- The integers in the given array are in the range of [0, 1000].
"""
class SolutionThreePointers(object):
def triangleNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
# Apply three pointer method.
n = len(nums)
# First sort nums in increasing order.
nums.sort()
n_triplets = 0
for i in range(n - 1, 1, -1):
# For each i, it suffices to apply two pointer method on the left of i.
# Since if num l + r > i, triangle number condition is satisfied.
l, r = 0, i - 1
while l < r:
if nums[l] + nums[r] > nums[i]:
n_triplets += r - l
r -= 1
else:
l += 1
return n_triplets
def main():
import time
nums = [2,2,3,4]
print SolutionThreePointers().triangleNumber(nums)
if __name__ == '__main__':
main()
| bsd-2-clause | Python | |
45a12a354783845d4f761147f6949797d8f6ffa3 | Add flat type driver unittests | mandeepdhami/neutron,skyddv/neutron,Stavitsky/neutron,aristanetworks/neutron,virtualopensystems/neutron,yamahata/tacker,barnsnake351/neutron,blueboxgroup/neutron,wenhuizhang/neutron,apporc/neutron,SamYaple/neutron,swdream/neutron,jacknjzhou/neutron,dhanunjaya/neutron,gkotton/neutron,blueboxgroup/neutron,watonyweng/neutron,leeseuljeong/leeseulstack_neutron,apporc/neutron,suneeth51/neutron,SmartInfrastructures/neutron,vbannai/neutron,cloudbase/neutron,adelina-t/neutron,jumpojoy/neutron,openstack/neutron,CiscoSystems/neutron,wolverineav/neutron,antonioUnina/neutron,projectcalico/calico-neutron,glove747/liberty-neutron,javaos74/neutron,cisco-openstack/neutron,mattt416/neutron,openstack/neutron,gkotton/neutron,cisco-openstack/neutron,paninetworks/neutron,eonpatapon/neutron,miyakz1192/neutron,CiscoSystems/neutron,eayunstack/neutron,sebrandon1/neutron,NeCTAR-RC/neutron,Metaswitch/calico-neutron,takeshineshiro/neutron,vbannai/neutron,redhat-openstack/neutron,vveerava/Openstack,virtualopensystems/neutron,cloudbase/neutron-virtualbox,yanheven/neutron,shahbazn/neutron,igor-toga/local-snat,dhanunjaya/neutron,JianyuWang/neutron,eayunstack/neutron,jerryz1982/neutron,redhat-openstack/neutron,leeseulstack/openstack,miyakz1192/neutron,paninetworks/neutron,leeseulstack/openstack,alexandrucoman/vbox-neutron-agent,alexandrucoman/vbox-neutron-agent,mattt416/neutron,cloudbase/neutron-virtualbox,igor-toga/local-snat,blueboxgroup/neutron,Metaswitch/calico-neutron,waltBB/neutron_read,klmitch/neutron,magic0704/neutron,SamYaple/neutron,magic0704/neutron,yuewko/neutron,CiscoSystems/neutron,pnavarro/neutron,jacknjzhou/neutron,takeshineshiro/neutron,klmitch/neutron,dims/neutron,yamahata/tacker,gkotton/neutron,noironetworks/neutron,bigswitch/neutron,huntxu/neutron,eonpatapon/neutron,barnsnake351/neutron,skyddv/neutron,vivekanand1101/neutron,aristanetworks/neutron,huntxu/neutron,sebrandon1/neutron,wenhuizhang/neutron,neoareslinux/neutron,yamahata/tacker,mandeepdhami/neutron,neoareslinux/neutron,yuewko/neutron,NeCTAR-RC/neutron,wolverineav/neutron,antonioUnina/neutron,vbannai/neutron,bgxavier/neutron,rdo-management/neutron,JianyuWang/neutron,jerryz1982/neutron,mahak/neutron,SmartInfrastructures/neutron,pnavarro/neutron,cernops/neutron,silenci/neutron,vveerava/Openstack,leeseulstack/openstack,asgard-lab/neutron,watonyweng/neutron,bigswitch/neutron,sasukeh/neutron,shahbazn/neutron,suneeth51/neutron,waltBB/neutron_read,mahak/neutron,yanheven/neutron,gopal1cloud/neutron,mahak/neutron,bgxavier/neutron,JioCloud/neutron,MaximNevrov/neutron,leeseuljeong/leeseulstack_neutron,virtualopensystems/neutron,Stavitsky/neutron,rdo-management/neutron,MaximNevrov/neutron,swdream/neutron,infobloxopen/neutron,chitr/neutron,sasukeh/neutron,vveerava/Openstack,gopal1cloud/neutron,jumpojoy/neutron,chitr/neutron,cloudbase/neutron,JioCloud/neutron,projectcalico/calico-neutron,adelina-t/neutron,openstack/neutron,mmnelemane/neutron,cernops/neutron,silenci/neutron,mmnelemane/neutron,noironetworks/neutron,dims/neutron,leeseuljeong/leeseulstack_neutron,glove747/liberty-neutron,infobloxopen/neutron,vivekanand1101/neutron,asgard-lab/neutron,javaos74/neutron | neutron/tests/unit/ml2/test_type_flat.py | neutron/tests/unit/ml2/test_type_flat.py | # Copyright (c) 2014 Thales Services SAS
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.common import exceptions as exc
import neutron.db.api as db
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers import type_flat
from neutron.tests import base
FLAT_NETWORKS = 'flat_net1, flat_net2'
class FlatTypeTest(base.BaseTestCase):
def setUp(self):
super(FlatTypeTest, self).setUp()
db.configure_db()
self.driver = type_flat.FlatTypeDriver()
self.driver._parse_networks(FLAT_NETWORKS)
self.session = db.get_session()
self.addCleanup(db.clear_db)
def _get_allocation(self, session, segment):
return session.query(type_flat.FlatAllocation).filter_by(
physical_network=segment[api.PHYSICAL_NETWORK]).first()
def test_validate_provider_segment(self):
segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT,
api.PHYSICAL_NETWORK: 'flat_net1'}
self.driver.validate_provider_segment(segment)
def test_validate_provider_segment_without_physnet_restriction(self):
self.driver._parse_networks('*')
segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT,
api.PHYSICAL_NETWORK: 'other_flat_net'}
self.driver.validate_provider_segment(segment)
def test_validate_provider_segment_with_missing_physical_network(self):
segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT}
self.assertRaises(exc.InvalidInput,
self.driver.validate_provider_segment,
segment)
def test_validate_provider_segment_with_unsupported_physical_network(self):
segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT,
api.PHYSICAL_NETWORK: 'other_flat_net'}
self.assertRaises(exc.InvalidInput,
self.driver.validate_provider_segment,
segment)
def test_validate_provider_segment_with_unallowed_segmentation_id(self):
segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT,
api.PHYSICAL_NETWORK: 'flat_net1',
api.SEGMENTATION_ID: 1234}
self.assertRaises(exc.InvalidInput,
self.driver.validate_provider_segment,
segment)
def test_reserve_provider_segment(self):
segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT,
api.PHYSICAL_NETWORK: 'flat_net1'}
self.driver.reserve_provider_segment(self.session, segment)
alloc = self._get_allocation(self.session, segment)
self.assertEqual(segment[api.PHYSICAL_NETWORK], alloc.physical_network)
def test_release_segment(self):
segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT,
api.PHYSICAL_NETWORK: 'flat_net1'}
self.driver.reserve_provider_segment(self.session, segment)
self.driver.release_segment(self.session, segment)
alloc = self._get_allocation(self.session, segment)
self.assertIsNone(alloc)
def test_reserve_provider_segment_already_reserved(self):
segment = {api.NETWORK_TYPE: p_const.TYPE_FLAT,
api.PHYSICAL_NETWORK: 'flat_net1'}
self.driver.reserve_provider_segment(self.session, segment)
self.assertRaises(exc.FlatNetworkInUse,
self.driver.reserve_provider_segment,
self.session, segment)
def test_allocate_tenant_segment(self):
observed = self.driver.allocate_tenant_segment(self.session)
self.assertIsNone(observed)
| apache-2.0 | Python | |
a68198514528773d2368e203491121b5c34f956d | Add tests of compile_ptx and compile_ptx_for_current_device | stuartarchibald/numba,stonebig/numba,seibert/numba,stonebig/numba,seibert/numba,seibert/numba,seibert/numba,sklam/numba,sklam/numba,stuartarchibald/numba,IntelLabs/numba,cpcloud/numba,cpcloud/numba,cpcloud/numba,numba/numba,numba/numba,stonebig/numba,gmarkall/numba,stuartarchibald/numba,cpcloud/numba,sklam/numba,gmarkall/numba,gmarkall/numba,IntelLabs/numba,numba/numba,stonebig/numba,IntelLabs/numba,numba/numba,gmarkall/numba,stuartarchibald/numba,stuartarchibald/numba,stonebig/numba,cpcloud/numba,sklam/numba,numba/numba,seibert/numba,IntelLabs/numba,gmarkall/numba,IntelLabs/numba,sklam/numba | numba/cuda/tests/cudapy/test_compiler.py | numba/cuda/tests/cudapy/test_compiler.py | from math import sqrt
from numba import cuda, float32, void
from numba.cuda import compile_ptx, compile_ptx_for_current_device
from numba.cuda.testing import skip_on_cudasim, unittest
@skip_on_cudasim('Compilation unsupported in the simulator')
class TestCompileToPTX(unittest.TestCase):
def test_global_kernel(self):
def f(r, x, y):
i = cuda.grid(1)
if i < len(r):
r[i] = x[i] + y[i]
args = (float32[:], float32[:], float32[:])
ptx, resty = compile_ptx(f, args)
# Kernels should not have a func_retval parameter
self.assertNotIn('func_retval', ptx)
# .visible .func is used to denote a device function
self.assertNotIn('.visible .func', ptx)
# .visible .entry would denote the presence of a global function
self.assertIn('.visible .entry', ptx)
# Return type for kernels should always be void
self.assertEqual(resty, void)
def test_device_function(self):
def add(x, y):
return x + y
args = (float32, float32)
ptx, resty = compile_ptx(add, args, device=True)
# Device functions take a func_retval parameter for storing the
# returned value in by reference
self.assertIn('func_retval', ptx)
# .visible .func is used to denote a device function
self.assertIn('.visible .func', ptx)
# .visible .entry would denote the presence of a global function
self.assertNotIn('.visible .entry', ptx)
# Inferred return type as expected?
self.assertEqual(resty, float32)
def test_fastmath(self):
def f(x, y, z, d):
return sqrt((x * y + z) / d)
args = (float32, float32, float32, float32)
ptx, resty = compile_ptx(f, args, device=True)
# Without fastmath, fma contraction is enabled by default, but ftz and
# approximate div / sqrt is not.
self.assertIn('fma.rn.f32', ptx)
self.assertIn('div.rn.f32', ptx)
self.assertIn('sqrt.rn.f32', ptx)
ptx, resty = compile_ptx(f, args, device=True, fastmath=True)
# With fastmath, ftz and approximate div / sqrt are enabled
self.assertIn('fma.rn.ftz.f32', ptx)
# "full" refers to a full-range approximate divide
self.assertIn('div.full.ftz.f32', ptx)
self.assertIn('sqrt.approx.ftz.f32', ptx)
def test_compile_ptx_for_current_device(self):
def add(x, y):
return x + y
args = (float32, float32)
ptx, resty = compile_ptx_for_current_device(add, args, device=True)
# Check we target the current device's compute capabilitay
cc = cuda.get_current_device().compute_capability
target = f'.target sm_{cc[0]}{cc[1]}'
self.assertIn(target, ptx)
if __name__ == '__main__':
unittest.main()
| bsd-2-clause | Python | |
e2fa74c533b49cb3479d4c75d5b68f6993e33082 | Add new package:linsys-v (#16823) | LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack,iulian787/spack,iulian787/spack,iulian787/spack,iulian787/spack,iulian787/spack,LLNL/spack | var/spack/repos/builtin/packages/linsys-v/package.py | var/spack/repos/builtin/packages/linsys-v/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class LinsysV(MakefilePackage):
"""LINSYS_V: Verified Solution of Linear Systems"""
homepage = "http://www.math.twcu.ac.jp/ogita/post-k/"
url = "http://www.math.twcu.ac.jp/ogita/post-k/software/LINSYS_V/LINSYS_V_alpha.tar.gz"
version(
"alpha",
sha256="6666bc837bb6598b7cdefb233d3d0f2c308a24fe3465e4fe9b6c9762810bb320",
)
depends_on("mpi")
depends_on("blas", type="link")
depends_on("lapack", type="link")
depends_on("scalapack", type="link")
def patch(self):
math_libs = (
self.spec["lapack"].libs
+ self.spec["blas"].libs
+ self.spec["scalapack"].libs
)
makefile = FileFilter("Makefile")
if self.spec.satisfies("%gcc"):
makefile.filter(r"^ENV\s+=\sK", "#ENV=K")
makefile.filter(r"^#ENV\s+=\sGCC", "ENV=GCC")
makefile.filter(r"^MKL\s+=\s1", "MKL=0")
makefile.filter(r"^CC\s+=\smpicc",
"CC={0}".format(self.spec["mpi"].mpicc))
makefile.filter(
r"^LIBS\s+=\s-lscalapack\s-lblacs\s-llapack\s-lblas",
"LIBS={0}".format(math_libs.ld_flags) + " -lm",
)
elif self.spec.satisfies("%fj"):
makefile.filter(r"^#ENV\s+=\sK", "ENV=K")
makefile.filter(r"^ENV\s+=\sGCC", "#ENV=GCC")
makefile.filter(r"^MKL\s+=\s1", "MKL=0")
makefile.filter(
r"^CC\s+=\smpifccpx",
"CC={0}".format(self.spec["mpi"].mpicc)
)
makefile.filter(
r"^CFLAGS\s+=\s-Kfast,openmp",
"CFLAGS=-Ofast {0}".format(self.compiler.openmp_flag),
)
makefile.filter(
r"^LIBS\s+=\s-SCALAPACK\s-SSL2BLAMP",
"LIBS=-SSL2BLAMP {0}".format(math_libs.ld_flags),
)
elif self.spec.satisfies("%intel"):
makefile.filter(r"^ENV\s+=\sGCC", "#ENV=GCC")
makefile.filter(r"^ENV\s+=\sICC", "ENV=ICC")
makefile.filter(r"^C\s+=\smpiicc",
"CC={0}".format(self.spec["mpi"].mpicc))
def install(self, spec, prefix):
mkdirp(prefix.bin)
install("ex_linsys_v", prefix.bin)
| lgpl-2.1 | Python | |
1c0230f7d0add83c36daadcf88bda72500e49015 | add new package (#22648) | LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack | var/spack/repos/builtin/packages/rnaquast/package.py | var/spack/repos/builtin/packages/rnaquast/package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack import *
class Rnaquast(Package):
"""Quality assessment of de novo transcriptome assemblies from RNA-Seq data
rnaQUAST is a tool for evaluating RNA-Seq assemblies using reference genome
and gene database. In addition, rnaQUAST is also capable of estimating gene
database coverage by raw reads and de novo quality assessment
using third-party software."""
homepage = "https://github.com/ablab/rnaquast"
url = "https://github.com/ablab/rnaquast/archive/refs/tags/v2.2.0.tar.gz"
maintainers = ['dorton21']
version('2.2.0', sha256='117dff9d9c382ba74b7b0ff24bc7b95b9ca6aa701ebf8afd22943aa54e382334')
depends_on('python@2.5:', type=('build', 'run'))
depends_on('py-matplotlib', type=('build', 'run'))
depends_on('py-joblib', type=('build', 'run'))
depends_on('py-gffutils', type=('build', 'run'))
depends_on('gmap-gsnap', type=('build', 'run'))
depends_on('blast-plus', type=('build', 'run'))
def install(self, spec, prefix):
install_tree('.', prefix.bin)
os.rename('%s/rnaQUAST.py' % prefix.bin, '%s/rnaQUAST' % prefix.bin)
def setup_run_environment(self, env):
env.prepend_path('PATH', prefix.bin)
| lgpl-2.1 | Python | |
3c26db43490640e0a98f5c22bb65eecd2c1c5ba9 | Create sudoku_formatter.py | KGHustad/SudokuFormatter | sudoku_formatter.py | sudoku_formatter.py | instructions = """INSTRUCTIONS:
Copy a sudoku puzzle from http://www.menneske.no/sudoku/ to a text file,
and provide this file as the first command line argument and the file for
output as the second command line argument
Remember that box height and width must be added manually (before/after
formatting)"""
import sys
#step #1: fjerne enkle mellomrom bak alle tall
#step #2: bytte ut tall > 9 med bokstaver
#step #3: erstatte doble mellomrom med .
#step #4: erstatte enkle mellomrom med .
#step #5: fjerne tabulering
if len(sys.argv) == 2:
if sys.argv[1] == 'help':
print instructions
sys.exit(1)
#parse command line arguments
try:
in_filename = sys.argv[1]
out_filename = sys.argv[2]
except IndexError:
print "ERROR: Insufficient number of command line arguments."
print "Correct usage:"
print "python %s <input_file> <output_file>\n" % sys.argv[0]
print "For help:"
print "python %s help" % sys.argv[0]
sys.exit(1)
#read from file
try:
infile = open(in_filename, 'r')
data = infile.read()
infile.close()
except IOError:
print "ERROR: Unable to read from %s" % in_filename
sys.exit(1)
#step 1
data = data.replace('1 ', '1')
data = data.replace('2 ', '2')
data = data.replace('3 ', '3')
data = data.replace('4 ', '4')
data = data.replace('5 ', '5')
data = data.replace('6 ', '6')
data = data.replace('7 ', '7')
data = data.replace('8 ', '8')
data = data.replace('9 ', '9')
data = data.replace('0 ', '0')
#step 2
dic = { '10':'A', '11':'B', '12':'C', '13':'D', '14':'E', '15':'F',
'16':'G', '17':'H', '18':'I', '19':'J', '20':'K', '21':'L',
'22':'M', '23':'N', '24':'O', '25':'P', '26':'Q', '27':'R', '28':'S',
'29':'T', '30':'U', '31':'V', '32':'W', '33':'X', '34':'Y', '35':'Z'}
numbers = range(10,36)
for number in numbers:
data = data.replace(str(number), dic[str(number)])
#step 3
data = data.replace(' ', '.')
#step 4
data = data.replace(' ', '.')
#step 5
data = data.replace('\t', '')
#write to file
try:
outfile = open(out_filename, 'w')
outfile.write(data)
outfile.close()
print "The sudoku puzzle was successfully formatted. "
except IOError:
print "ERROR: Unable to write to %s" % out_filename
sys.exit(1)
| mit | Python | |
710fe77b1d03de83cfefa3173b17d2adf5f220fe | Add systemd watchdog | AngryDevelopersLLC/res-core | systemd_watchdog.py | systemd_watchdog.py | # RES Service Package
# Copyright © 2015 InvestGroup, LLC
import asyncio
import logging
def install_watchdog(interval, loop=None):
assert isinstance(interval, (int, float)) and interval > 0
logger = logging.getLogger("systemd-watchdog")
try:
from systemd.daemon import notify as sd_notify
except ImportError:
logger.warning("Failed to import systemd => watchdog is disabled")
return
if loop is None:
loop = asyncio.get_event_loop()
def notify():
sd_notify("WATCHDOG=1")
loop.call_later(interval, notify)
notify()
logger.info("Installed watchdog notification once per %s sec" % interval)
| bsd-3-clause | Python | |
5f0feca62055dafd9931bf68abe3286bda9f41e0 | Create __init__.py | ShashaQin/frappe,ShashaQin/frappe,ShashaQin/frappe,ShashaQin/frappe | frappe/desk/doctype/communication_reconciliation/__init__.py | frappe/desk/doctype/communication_reconciliation/__init__.py | import frappe
@frappe.whitelist(allow_guest=True)
def relink(self,name,reference_doctype,reference_name):
dt = reference_doctype
dn = reference_name
if dt=="" or dt==None or dn == "" or dn == None:
return # is blank maybe try flash missing required
frappe.db.sql("""update `tabCommunication`
set reference_doctype = %s ,reference_name = %s ,status = "Linked"
where name = %s """,(dt,dn,name))
return self.fetch()
| mit | Python | |
c07609fa140c54572832f4f8f4787d0991495e60 | check in some migrations | xmaruto/mcord,cboling/xos,cboling/xos,cboling/xos,xmaruto/mcord,jermowery/xos,cboling/xos,xmaruto/mcord,cboling/xos,jermowery/xos,xmaruto/mcord,jermowery/xos,jermowery/xos | planetstack/core/migrations/0002_omf_friendly_default_false.py | planetstack/core/migrations/0002_omf_friendly_default_false.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='slice',
name='omf_friendly',
field=models.BooleanField(default=False),
),
]
| apache-2.0 | Python | |
18178108ec569ec959c7cac181fa439d99a0170f | convert a patric tab file to a seed directory | linsalrob/PhiSpy,linsalrob/PhiSpy,linsalrob/PhiSpy,linsalrob/PhiSpy | tab2seed.py | tab2seed.py | """
Convert a tab file from PATRIC to a the SEED files that we need for PhiSpy
We need the following files:
1. assigned_functions - a tab separated list of FIG ID and function
2. contigs - the fasta DNA sequence. Note we may download this separately
3. genome - the name of the genome -- may not be required
4. taxonomy - the taxonomy of the genome -- may not be required
5. taxonomy_id - the tax id. -- also may not be required
6. Features/peg/tbl - the tbl that has id,contig_start_stop, [alt ids]
7. Features/rna/tbl - the RNA genes
The files that PhiSpy opens are:
a. dir/contigs
b. dir/Features/peg/tbl
c. dir/assigned_functions
d. dir/Features/rna/tbl
"""
import os
import sys
import argparse
def parse_tab(filename, outputdir):
"""
Parse a patric tab file
:param filename: the file to parse
:return: ummm
"""
if not (os.path.exists(os.path.join(outputdir, "Features"))):
os.mkdir(os.path.join(outputdir, "Features"))
if not (os.path.exists(os.path.join(outputdir, "Features/peg"))):
os.mkdir(os.path.join(outputdir, "Features/peg"))
if not (os.path.exists(os.path.join(outputdir, "Features/rna"))):
os.mkdir(os.path.join(outputdir, "Features/rna"))
peg = open(os.path.join(outputdir, "Features/peg/tbl"), 'w')
rna = open(os.path.join(outputdir, "Features/rna/tbl"), 'w')
asf = open(os.path.join(outputdir, "assigned_functions"), 'w')
wrote_genome = False
with open(filename, 'r') as fin:
for l in fin:
if l.startswith('genome_id'):
continue
# genome_id genome_name accession annotation feature_type patric_id refseq_locus_tag alt_locus_tag
# uniprotkb_accession start end strand na_length gene product figfam_id plfam_id pgfam_id
# go ec pathway
l = l.replace("\n", "") # this is a hack because I can't figure out how to do chomp
p = l.split("\t")
if not wrote_genome:
with open(os.path.join(outputdir, "GENOME"), 'w') as gout:
gout.write("{}\n".format(p[1]))
wrote_genome = True
gid, name, acc, who, ftype, fid, refseq_locus, alt, uni, start, stop, strand, length, gene, prod, ffid, plid, pgid, go, ec, pw = p
if start > stop:
(start, stop) = (stop, start)
if "CDS" in p[4]:
peg.write("{}\t{}_{}_{}\n".format(fid, acc, start, stop))
asf.write("{}\t{}\n".format(fid, prod))
elif "rna" in p[4].lower():
rna.write("{}\t{}_{}_{}\n".format(fid, acc, start, stop))
peg.close()
rna.close()
asf.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Convert a patric tab file to a minimal seed directory")
parser.add_argument('-f', help='The patric tab file', required=True)
parser.add_argument('-o', help='output directory', required=True)
parser.add_argument('-v', help='verbose output', action="store_true")
args = parser.parse_args()
if not os.path.exists(args.o):
os.mkdir(args.o)
parse_tab(args.f, args.o) | mit | Python | |
45c20be4027ccb7aa6a1c3e643ca164413bd353f | add management command for commenting on outstanding apps about tos changes. | WikipediaLibrary/TWLight,WikipediaLibrary/TWLight,WikipediaLibrary/TWLight,WikipediaLibrary/TWLight,WikipediaLibrary/TWLight | TWLight/applications/management/commands/notify_applicants_tou_changes.py | TWLight/applications/management/commands/notify_applicants_tou_changes.py | import logging
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.utils.translation import ugettext as _
from TWLight.applications.models import Application
from TWLight.resources.models import Partner
from django_comments.models import Comment
logger = logging.getLogger(__name__)
class Command(BaseCommand):
def handle(self, *args, **options):
# Get apps with a status of PENDING or QUESTION for partners with a status of AVAILABLE or WAITLIST
# where the editor has not agreed to the terms of use.
pending_apps = (
Application.objects.filter(
status__in=[Application.PENDING, Application.QUESTION],
partner__status__in=[Partner.AVAILABLE, Partner.WAITLIST],
editor__isnull=False,
agreement_with_terms_of_use=False
)
.exclude(editor__user__groups__name="restricted")
.order_by("status", "partner", "date_created")
)
# Loop through the apps and add a comment.
for app in pending_apps:
comment = Comment(
content_object=app,
site_id=settings.SITE_ID,
# Translators: This comment is added to pending applications when our terms of use change.
comment=_("Our terms of use have changed. "
"Your applications will not be processed until you log in agree to our updated terms.")
)
comment.save()
| mit | Python | |
378e89fd74cfd383c3e1b21fc342194728f66bb7 | Add DogStatsd metrics backend. (#5230) | jean/sentry,JackDanger/sentry,jean/sentry,mvaled/sentry,looker/sentry,mvaled/sentry,ifduyue/sentry,gencer/sentry,ifduyue/sentry,looker/sentry,ifduyue/sentry,beeftornado/sentry,ifduyue/sentry,gencer/sentry,jean/sentry,looker/sentry,looker/sentry,looker/sentry,jean/sentry,mvaled/sentry,mvaled/sentry,beeftornado/sentry,mvaled/sentry,JackDanger/sentry,JackDanger/sentry,gencer/sentry,gencer/sentry,gencer/sentry,beeftornado/sentry,jean/sentry,ifduyue/sentry,mvaled/sentry | src/sentry/metrics/dogstatsd.py | src/sentry/metrics/dogstatsd.py | from __future__ import absolute_import
__all__ = ['DogStatsdMetricsBackend']
from datadog import initialize, statsd
from .base import MetricsBackend
class DogStatsdMetricsBackend(MetricsBackend):
def __init__(self, prefix=None, **kwargs):
# TODO(dcramer): it'd be nice if the initialize call wasn't a global
self.tags = kwargs.pop('tags', None)
initialize(**kwargs)
super(DogStatsdMetricsBackend, self).__init__(prefix=prefix)
def incr(self, key, instance=None, tags=None, amount=1, sample_rate=1):
if tags is None:
tags = {}
if self.tags:
tags.update(self.tags)
if instance:
tags['instance'] = instance
if tags:
tags = ['{}:{}'.format(*i) for i in tags.items()]
statsd.increment(
self._get_key(key),
amount,
sample_rate=sample_rate,
tags=tags,
)
def timing(self, key, value, instance=None, tags=None, sample_rate=1):
if tags is None:
tags = {}
if self.tags:
tags.update(self.tags)
if instance:
tags['instance'] = instance
if tags:
tags = ['{}:{}'.format(*i) for i in tags.items()]
statsd.timing(
self._get_key(key),
value,
sample_rate=sample_rate,
tags=tags,
)
| bsd-3-clause | Python | |
56f9ea1ba0026bc21eeb904afaf25606a6186125 | Test per veure que no permetem capçaleres multivaluades | UPC/mailtoticket,UPC/mailtoticket | test/test_regles.py | test/test_regles.py | import unittest
import mock
import settings
from soa.tiquets import GestioTiquets
from soa.identitat import GestioIdentitat
from filtres.nou import FiltreNou
from mailticket import MailTicket
from testhelper import llegir_mail
class TestRegles(unittest.TestCase):
def setUp(self):
self.tickets = mock.create_autospec(GestioTiquets)
self.identitat = mock.create_autospec(GestioIdentitat)
settings.init()
def test_regla_amb_cc_comprova_primer_valor (self):
settings.set("valors_defecte",
[
{"order":["Cc"],
"match":"mail.qualsevol2@mail.com",
"defaults":{"equipResolutor":"666"}
}
]
)
msg = llegir_mail("cc.txt")
f = FiltreNou(msg, self.tickets, self.identitat)
defaults= f.obtenir_parametres_addicionals()
self.assertEqual(defaults["equipResolutor"], "666")
def test_regla_amb_cc_comprova_segon_valor (self):
settings.set("valors_defecte",
[
{"order":["Cc"],
"match":"mail.concret2@mail.com",
"defaults":{"equipResolutor":"666"}
}
]
)
msg = llegir_mail("cc.txt")
f = FiltreNou(msg, self.tickets, self.identitat)
defaults= f.obtenir_parametres_addicionals()
self.assertEqual(defaults["equipResolutor"], "666")
if __name__ == '__main__':
unittest.main()
| agpl-3.0 | Python | |
81ceae0740bba58cff0b410a6af6ff803d0a3b36 | Add Greengenes json parser | RNAcentral/rnacentral-import-pipeline,RNAcentral/rnacentral-import-pipeline,RNAcentral/rnacentral-import-pipeline,RNAcentral/rnacentral-import-pipeline | luigi/json_parser_greengenes.py | luigi/json_parser_greengenes.py | """
Copyright [2009-2014] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Usage:
python path/to/this/file.py
--local-scheduler
--destination /path/to/output/files
--input-file /path/to/input/file.json
Optional parameters:
--test # to process just the first few entries (default: False)
"""
import luigi
from rnacentral_entry import RNAcentralEntry
from json_parser import JsonParser
class JsonParserGreengenes(JsonParser): # pylint: disable=W0232
"""
Luigi Task for converting Noncode Json file into csv files
that can be loaded into the RNAcentral database.
"""
database = 'greengenes'
def create_rnacentral_entries(self):
"""
Process json file into RNAcentralEntry objects that can be written
to the output files using standard methods.
"""
for i, seq in enumerate(self.data): # pylint: disable=E1101
if self.test and i > 100: # pylint: disable=E1101
break
seq['assembly_info'] = [seq['assembly_info']]
(feature_location_start, feature_location_end) = self.get_feature_start_end(seq['assembly_info']) # pylint: disable=E1101
rnacentral_entry = RNAcentralEntry(
database = self.database.upper(),
division='XXX', # pylint: disable=W0511
feature_location_end=feature_location_end,
feature_location_start=feature_location_start,
feature_type = seq['feature_type'],
gene = seq['gene'],
is_composite = 'N',
lineage = seq['lineage'] + seq['scientific_name'],
ncbi_tax_id = seq['ncbi_tax_id'],
note = ' '.join(seq['ontology']),
parent_accession = seq['primary_accession'].split('.')[0],
primary_id = seq['xref'][1],
product = seq['product'],
project = 'PRJ_GRNGNS',
sequence = seq['sequence'].upper(),
seq_version = seq['primary_accession'].split('.')[-1],
species = seq['scientific_name'],
references=[
{
'authors': 'McDonald D, Price MN, Goodrich J, Nawrocki EP, DeSantis TZ, Probst A, Andersen GL, Knight R, Hugenholtz P',
'location': 'ISME J. 2012 Mar;6(3):610-8',
'title': 'An improved Greengenes taxonomy with explicit ranks for ecological and evolutionary analyses of bacteria and archaea',
'pmid': 22134646,
'doi': '10.1038/ismej.2011.139',
}
],
)
for exon in seq['assembly_info']:
rnacentral_entry.assembly_info.append(exon)
rnacentral_entry.accession = self.get_accession(rnacentral_entry, self.database) # pylint: disable=E1101
rnacentral_entry.description = self.get_description(rnacentral_entry) # pylint: disable=E1101
self.entries.append(rnacentral_entry) # pylint: disable=E1101
# main entry point
if __name__ == '__main__':
luigi.run(main_task_cls=JsonParserGreengenes)
| apache-2.0 | Python | |
035540578674d0c731b2921fcf7081acfd2831b5 | Add tests checking equivalence to optimizers.py | google/jax,tensorflow/probability,google/jax,tensorflow/probability,google/jax,google/jax | tests/optix_test.py | tests/optix_test.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the optix module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from jax import numpy as jnp
from jax.experimental import optimizers
from jax.experimental import optix
from jax.tree_util import tree_leaves
import numpy as onp
STEPS = 50
LR = 1e-2
class OptixTest(absltest.TestCase):
def setUp(self):
super(OptixTest, self).setUp()
self.init_params = (jnp.array([1., 2.]), jnp.array([3., 4.]))
self.per_step_updates = (jnp.array([500., 5.]), jnp.array([300., 3.]))
def test_sgd(self):
# experimental/optimizers.py
jax_params = self.init_params
opt_init, opt_update, get_params = optimizers.sgd(LR)
state = opt_init(jax_params)
for i in range(STEPS):
state = opt_update(i, self.per_step_updates, state)
jax_params = get_params(state)
# experimental/optix.py
optix_params = self.init_params
opt_init, opt_update = optix.sgd(LR, 0.0)
state = opt_init(optix_params)
for _ in range(STEPS):
updates, state = opt_update(self.per_step_updates, state)
optix_params = optix.apply_updates(optix_params, updates)
# Check equivalence.
for x, y in zip(tree_leaves(jax_params), tree_leaves(optix_params)):
onp.testing.assert_allclose(x, y, rtol=1e-5)
def test_adam(self):
b1, b2, eps = 0.9, 0.999, 1e-8
# experimental/optimizers.py
jax_params = self.init_params
opt_init, opt_update, get_params = optimizers.adam(LR, b1, b2, eps)
state = opt_init(jax_params)
for i in range(STEPS):
state = opt_update(i, self.per_step_updates, state)
jax_params = get_params(state)
# experimental/optix.py
optix_params = self.init_params
opt_init, opt_update = optix.adam(LR, b1, b2, eps)
state = opt_init(optix_params)
for _ in range(STEPS):
updates, state = opt_update(self.per_step_updates, state)
optix_params = optix.apply_updates(optix_params, updates)
# Check equivalence.
for x, y in zip(tree_leaves(jax_params), tree_leaves(optix_params)):
onp.testing.assert_allclose(x, y, rtol=1e-5)
def test_rmsprop(self):
decay, eps = .9, 0.1
# experimental/optimizers.py
jax_params = self.init_params
opt_init, opt_update, get_params = optimizers.rmsprop(LR, decay, eps)
state = opt_init(jax_params)
for i in range(STEPS):
state = opt_update(i, self.per_step_updates, state)
jax_params = get_params(state)
# experimental/optix.py
optix_params = self.init_params
opt_init, opt_update = optix.rmsprop(LR, decay, eps)
state = opt_init(optix_params)
for _ in range(STEPS):
updates, state = opt_update(self.per_step_updates, state)
optix_params = optix.apply_updates(optix_params, updates)
# Check equivalence.
for x, y in zip(tree_leaves(jax_params), tree_leaves(optix_params)):
onp.testing.assert_allclose(x, y, rtol=1e-5)
if __name__ == '__main__':
absltest.main()
| apache-2.0 | Python | |
2390a9893374166be6e41f99f17318119014e60d | add unit tests | julien6387/supervisors,julien6387/supvisors,julien6387/supervisors,julien6387/supvisors,julien6387/supervisors,julien6387/supvisors,julien6387/supervisors,julien6387/supvisors | supervisors/tests/test_types.py | supervisors/tests/test_types.py | #!/usr/bin/python
#-*- coding: utf-8 -*-
# ======================================================================
# Copyright 2016 Julien LE CLEACH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================================================
import sys
import unittest
class TypesTest(unittest.TestCase):
""" Test case for the types module. """
def test_AddressStates(self):
""" Test the AddressStates enumeration. """
from supervisors.ttypes import AddressStates
self.assertEqual('UNKNOWN', AddressStates._to_string(AddressStates.UNKNOWN))
self.assertEqual('RUNNING', AddressStates._to_string(AddressStates.RUNNING))
self.assertEqual('SILENT', AddressStates._to_string(AddressStates.SILENT))
self.assertEqual('ISOLATING', AddressStates._to_string(AddressStates.ISOLATING))
self.assertEqual('ISOLATED', AddressStates._to_string(AddressStates.ISOLATED))
def test_ApplicationStates(self):
""" Test the ApplicationStates enumeration. """
from supervisors.ttypes import ApplicationStates
self.assertEqual('UNKNOWN', ApplicationStates._to_string(ApplicationStates.UNKNOWN))
self.assertEqual('STOPPED', ApplicationStates._to_string(ApplicationStates.STOPPED))
self.assertEqual('STARTING', ApplicationStates._to_string(ApplicationStates.STARTING))
self.assertEqual('RUNNING', ApplicationStates._to_string(ApplicationStates.RUNNING))
self.assertEqual('STOPPING', ApplicationStates._to_string(ApplicationStates.STOPPING))
def test_DeploymentStrategies(self):
""" Test the DeploymentStrategies enumeration. """
from supervisors.ttypes import DeploymentStrategies
self.assertEqual('CONFIG', DeploymentStrategies._to_string(DeploymentStrategies.CONFIG))
self.assertEqual('LESS_LOADED', DeploymentStrategies._to_string(DeploymentStrategies.LESS_LOADED))
self.assertEqual('MOST_LOADED', DeploymentStrategies._to_string(DeploymentStrategies.MOST_LOADED))
def test_ConciliationStrategies(self):
""" Test the ConciliationStrategies enumeration. """
from supervisors.ttypes import ConciliationStrategies
self.assertEqual('SENICIDE', ConciliationStrategies._to_string(ConciliationStrategies.SENICIDE))
self.assertEqual('INFANTICIDE', ConciliationStrategies._to_string(ConciliationStrategies.INFANTICIDE))
self.assertEqual('USER', ConciliationStrategies._to_string(ConciliationStrategies.USER))
self.assertEqual('STOP', ConciliationStrategies._to_string(ConciliationStrategies.STOP))
self.assertEqual('RESTART', ConciliationStrategies._to_string(ConciliationStrategies.RESTART))
def test_StartingFailureStrategies(self):
""" Test the StartingFailureStrategies enumeration. """
from supervisors.ttypes import StartingFailureStrategies
self.assertEqual('ABORT', StartingFailureStrategies._to_string(StartingFailureStrategies.ABORT))
self.assertEqual('CONTINUE', StartingFailureStrategies._to_string(StartingFailureStrategies.CONTINUE))
def test_RunningFailureStrategies(self):
""" Test the RunningFailureStrategies enumeration. """
from supervisors.ttypes import RunningFailureStrategies
self.assertEqual('CONTINUE', RunningFailureStrategies._to_string(RunningFailureStrategies.CONTINUE))
self.assertEqual('STOP', RunningFailureStrategies._to_string(RunningFailureStrategies.STOP))
self.assertEqual('RESTART', RunningFailureStrategies._to_string(RunningFailureStrategies.RESTART))
def test_SupervisorsStates(self):
""" Test the SupervisorsStates enumeration. """
from supervisors.ttypes import SupervisorsStates
self.assertEqual('INITIALIZATION', SupervisorsStates._to_string(SupervisorsStates.INITIALIZATION))
self.assertEqual('ELECTION', SupervisorsStates._to_string(SupervisorsStates.ELECTION))
self.assertEqual('DEPLOYMENT', SupervisorsStates._to_string(SupervisorsStates.DEPLOYMENT))
self.assertEqual('OPERATION', SupervisorsStates._to_string(SupervisorsStates.OPERATION))
self.assertEqual('CONCILIATION', SupervisorsStates._to_string(SupervisorsStates.CONCILIATION))
def test_exception(self):
""" Test the exception InvalidTransition. """
from supervisors.ttypes import InvalidTransition
# test with unknown attributes
with self.assertRaises(InvalidTransition) as exc:
raise InvalidTransition('invalid transition')
self.assertEqual('invalid transition', exc.exception.value)
def test_suite():
return unittest.findTestCases(sys.modules[__name__])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| apache-2.0 | Python | |
ae7f22b5fc606a8415e286ffabd43d3fbb71977c | Add Euler angle conversion tests. | BBN-Q/QGL,BBN-Q/QGL | tests/test_euler.py | tests/test_euler.py | import unittest
from QGL import *
from QGL.Euler import *
from QGL.Cliffords import C1
import QGL.config
try:
from helpers import setup_test_lib
except:
from .helpers import setup_test_lib
class EulerDecompositions(unittest.TestCase):
N_test = 1000
def setUp(self):
pass
#setup_test_lib()
#self.q1 = QubitFactory('q1')
def test_zyz_decomp(self):
for j in range(self.N_test):
Uh = haar_unitary(2)
Ux = zyz_unitary(*zyz_angles(Uh))
assert is_close(Uh, Ux)
def test_xyx_decomp(self):
for j in range(self.N_test):
Uh = haar_unitary(2)
Ux = xyx_unitary(*xyx_angles(Uh))
assert is_close(Uh, Ux)
def test_xyx_cliffords(self):
for j in range(24):
Uxyx = xyx_unitary(*xyx_angles(C1[j]))
assert is_close(Uxyx, C1[j]), f"{j}"
if __name__ == "__main__":
unittest.main()
| apache-2.0 | Python | |
329c1d9cd515414fe754385ee302197b93eadd20 | implement 15 (15) ツイッターのユーザー名(例えば@xxxxxxx)を,そのユーザーのページへのリンク(<a href="https://twitter.com/#!/xxxxxxx">@xxxxxxx</a>で囲まれたHTML断片)に置換せよ. | mihyaeru21/nlp100 | set02/15.py | set02/15.py | # -*- coding: utf-8 -*-
# (15) ツイッターのユーザー名(例えば@xxxxxxx)を,そのユーザーのページへのリンク(<a href="https://twitter.com/#!/xxxxxxx">@xxxxxxx</a>で囲まれたHTML断片)に置換せよ.
import sys
import csv
import re
re_name = re.compile(u'@([a-zA-Z0-9_]+)')
for row in csv.reader(sys.stdin):
tweet = row[5].decode('utf-8')
replaced = re_name.sub(ur'<a href="https://twitter.com/#!/\1">@\1</a>', tweet)
if replaced != tweet:
print replaced.encode('utf-8')
| unlicense | Python | |
89ade49695c8961f23879050dda8aa684603c04b | Fix serializer | CTFd/CTFd,LosFuzzys/CTFd,isislab/CTFd,ajvpot/CTFd,CTFd/CTFd,LosFuzzys/CTFd,isislab/CTFd,isislab/CTFd,CTFd/CTFd,LosFuzzys/CTFd,CTFd/CTFd,LosFuzzys/CTFd,ajvpot/CTFd,isislab/CTFd,ajvpot/CTFd,ajvpot/CTFd | CTFd/utils/exports/serializers.py | CTFd/utils/exports/serializers.py | import json
import six
from collections import defaultdict, OrderedDict
from CTFd.utils.exports.encoders import JSONEncoder
class JSONSerializer(object):
def __init__(self, query, fileobj):
self.query = query
self.fileobj = fileobj
self.buckets = defaultdict(list)
def serialize(self):
for row in self.query:
self.write(None, row)
self.close()
def write(self, path, result):
self.buckets[path].append(result)
def wrap(self, result):
result = OrderedDict([("count", len(result)), ("results", result)])
result["meta"] = {}
return result
def close(self):
for path, result in self.buckets.items():
result = self.wrap(result)
# Certain databases (MariaDB) store JSON as LONGTEXT.
# Before emitting a file we should standardize to valid JSON (i.e. a dict)
# See Issue #973
for i, r in enumerate(result["results"]):
data = r.get("requirements")
if data:
try:
if isinstance(data, six.string_types):
result["results"][i]["requirements"] = json.loads(data)
except ValueError:
pass
data = json.dumps(result, cls=JSONEncoder, indent=2)
self.fileobj.write(data.encode("utf-8"))
| import json
import six
from collections import OrderedDict
from CTFd.utils.exports.encoders import JSONEncoder
class JSONSerializer(object):
def __init__(self, query, fileobj):
self.query = query
self.fileobj = fileobj
self.buckets = []
def serialize(self):
for row in self.query:
self.write(None, row)
self.close()
def write(self, path, result):
self.buckets.append([result])
def wrap(self, result):
result = OrderedDict([("count", len(result)), ("results", result)])
result["meta"] = {}
return result
def close(self):
for result in self.buckets:
result = self.wrap(result)
# Certain databases (MariaDB) store JSON as LONGTEXT.
# Before emitting a file we should standardize to valid JSON (i.e. a dict)
# See Issue #973
for i, r in enumerate(result["results"]):
data = r.get("requirements")
if data:
try:
if isinstance(data, six.string_types):
result["results"][i]["requirements"] = json.loads(data)
except ValueError:
pass
data = json.dumps(result, cls=JSONEncoder, indent=2)
self.fileobj.write(data.encode("utf-8"))
| apache-2.0 | Python |
933d364981f2b05cbca3325ee92f0696da5de44e | Create settings.py | InfoAgeTech/django-starter | settings.py | settings.py | # -*- coding: utf-8 -*-
| mit | Python | |
7b6b1426015a83b96395f0c7c112dc53d373647f | Add init file for remediation module. | tensorflow/fairness-indicators,tensorflow/fairness-indicators,tensorflow/fairness-indicators | fairness_indicators/remediation/__init__.py | fairness_indicators/remediation/__init__.py | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| apache-2.0 | Python | |
205362c2f068ca22fe40cb6399b071849727ee55 | Test cases for style attribute parsing | brechtm/rinohtype,brechtm/rinohtype,brechtm/rinohtype | tests/test_parse.py | tests/test_parse.py |
import pytest
from rinoh.dimension import DimensionBase, PT, PICA, INCH, MM, CM, PERCENT
from rinoh.style import OptionSet, Bool, Integer
def test_optionset_from_string():
ONE = 'one'
TWO = 'two'
THREE = 'three'
class TestSet1(OptionSet):
values = ONE, TWO, THREE
assert TestSet1.from_string('one') == ONE
assert TestSet1.from_string('TWO') == TWO
assert TestSet1.from_string('tHRee') == THREE
with pytest.raises(ValueError):
TestSet1.from_string('four')
with pytest.raises(ValueError):
TestSet1.from_string('none')
class TestSet2(OptionSet):
values = None, TWO
assert TestSet2.from_string('none') == None
assert TestSet2.from_string('nONe') == None
assert TestSet2.from_string('two') == TWO
with pytest.raises(ValueError):
TestSet2.from_string('one')
with pytest.raises(ValueError):
TestSet2.from_string('False')
def test_bool_from_string():
assert Bool.from_string('true') == True
assert Bool.from_string('false') == False
assert Bool.from_string('TRUE') == True
assert Bool.from_string('FALSE') == False
assert Bool.from_string('True') == True
assert Bool.from_string('FaLSE') == False
with pytest.raises(ValueError):
Bool.from_string('1')
with pytest.raises(ValueError):
Bool.from_string('0')
with pytest.raises(ValueError):
Bool.from_string('T')
with pytest.raises(ValueError):
Bool.from_string('f')
def test_integer_from_string():
assert Integer.from_string('1') == 1
assert Integer.from_string('001') == 1
assert Integer.from_string('873654354') == 873654354
assert Integer.from_string('-9') == -9
with pytest.raises(ValueError):
assert Integer.from_string('1e5')
with pytest.raises(ValueError):
assert Integer.from_string('0.5')
def test_dimensionbase_from_string():
assert DimensionBase.from_string('0') == 0
assert DimensionBase.from_string('1pt') == 1*PT
assert DimensionBase.from_string('10 pt') == 10*PT
assert DimensionBase.from_string('25pc') == 25*PICA
assert DimensionBase.from_string('1.5 in') == 1.5*INCH
assert DimensionBase.from_string('99999mm') == 99999*MM
assert DimensionBase.from_string('-2.1 cm') == -2.1*CM
assert DimensionBase.from_string('21%') == 21.00*PERCENT
assert DimensionBase.from_string('-16.12%') == -16.12*PERCENT
with pytest.raises(ValueError):
assert DimensionBase.from_string('20inch')
| agpl-3.0 | Python | |
9112bfe1a4f9253b78c6145f74b12c712cee136b | Create spacewar.py | danielwilson2017/Space-Shooter,HHS-IntroProgramming/Space-Shooter,BautiG/Space-Shooter,eliwoloshin/Space-Shooter,RDanilek/Space-Shooter,voidJeff/Space-Shooter,danielwilson2017/Space-Shooter,WilsonRimberg/Space-Shooter,eliwoloshin/Space-Shooter,VinzentM/Space-Shooter,CriticalD20/Space-Shooter,RDanilek/Space-Shooter,EthanAdner/Space-Shooter,phstearns/Space-Shooter,HaginCodes/Space-Shooter,davidwilson826/Space-Shooter,voidJeff/Space-Shooter,ChubbyPotato/Space-Shooter,anoushkaalavilli/Space-Shooter,averywallis/Space-Shooter,ChubbyPotato/Space-Shooter,CANDYISLIFE/Space-Shooter,SSupattapone/Space-Shooter,marcusjdh/Space-Shooter,nilskingston/Space-Shooter,Funjando/Space-Shooter,Funjando/Space-Shooter,APikielny/Space-Shooter,VinzentM/Space-Shooter,phstearns/Space-Shooter,SSupattapone/Space-Shooter,WilsonRimberg/Space-Shooter,nilskingston/Space-Shooter,CANDYISLIFE/Space-Shooter,CriticalD20/Space-Shooter,HHS-IntroProgramming/Space-Shooter,APikielny/Space-Shooter,ryankynor/Space-Shooter,averywallis/Space-Shooter,ryankynor/Space-Shooter,TheBigBlueBlob/Space-Shooter,HaginCodes/Space-Shooter,marcusjdh/Space-Shooter,davidwilson826/Space-Shooter,TheBigBlueBlob/Space-Shooter,BautiG/Space-Shooter,anoushkaalavilli/Space-Shooter,EthanAdner/Space-Shooter | spacewar.py | spacewar.py | """
spacewar.py
Author: <your name here>
Credit: <list sources used, if any>
Assignment:
Write and submit a program that implements the sandbox platformer game:
https://github.com/HHS-IntroProgramming/Spacewar
"""
| mit | Python | |
ed793b470e55a0fe1c7a0a4b0cc3d8a2774e4628 | verify that the files are actually images | d-grossman/scrapeCars | scripts/verifyImageType.py | scripts/verifyImageType.py | from resnet50 import ResNet50
from keras.preprocessing import image
from imagenet_utils import preprocess_input, decode_predictions
import numpy as np
import json
import time
import sys
from multiprocessing import Pool
import functools
def readTasking(filename):
tasking = open(filename,'r')
data = list()
fileProblems = list()
for task in tasking:
task = task.strip()
line = json.loads(task)
data.append(line)
tasking.close()
return data
def procLine2(l, r):
img_path = '{0}/{1}'.format(r,l['filename'])
try:
img = image.load_img(img_path, target_size=(224, 224))
return (1,l['filename'])
except:
return (0,l['filename'])
def writeTasking(filename, tasking, bad):
outFile = open(filename,'w')
badFiles = set()
for item in bad:
if item[0] == 0:
badFiles.add(item[1])
for task in tasking:
if task['filename'] not in badFiles:
outFile.write(json.dumps(task)+'\n')
outFile.close()
def main():
procLine = functools.partial(procLine2, r=sys.argv[2] )
p = Pool()
tasking = readTasking(sys.argv[1])
files = p.map(procLine,tasking)
writeTasking(sys.argv[1]+'.new',tasking,files)
if __name__ == '__main__':
main()
| apache-2.0 | Python | |
12c950c09839ad887d76de9a062e36015534c054 | Create activate-devices.py | JeffreyPowell/pi-heating-hub,JeffreyPowell/pi-heating-hub,JeffreyPowell/pi-heating-hub | cron/activate-devices.py | cron/activate-devices.py | apache-2.0 | Python | ||
4a8fe168af19517328187b3ff9ea4f7ae4d48eef | Add DHCPv6 rogue server script. | Vladimir-Ivanov-Git/raw-packet,Vladimir-Ivanov-Git/raw-packet | dhcpv6_rogue_server.py | dhcpv6_rogue_server.py | #!/usr/bin/env python
from base import Base
from network import DHCPv6_raw
from sys import exit
from argparse import ArgumentParser
from ipaddress import IPv6Address
from scapy.all import sniff, ICMPv6, DHCPv6
from socket import socket, AF_PACKET, SOCK_RAW, inet_aton
from base64 import b64encode
from struct import pack
from netaddr import IPAddress
from tm import ThreadManager
from time import sleep
current_network_interface = None
target_mac_address = None
target_ip_address = None
recursive_dns_address = None
Base = Base()
Base.check_user()
Base.check_platform()
tm = ThreadManager(3)
parser = ArgumentParser(description='DHCPv6 Rogue server')
parser.add_argument('-i', '--interface', help='Set interface name for send reply packets')
parser.add_argument('-p', '--prefix', type=str, help='Set network prefix', default='fd00::/64')
parser.add_argument('-f', '--first_suffix_ip', type=str, help='Set first suffix client ip for offering', default='2')
parser.add_argument('-l', '--last_suffix_ip', type=str, help='Set last suffix client ip for offering', default='ff')
parser.add_argument('-t', '--target_mac', type=str, help='Set target MAC address', default=None)
parser.add_argument('-I', '--target_ip', type=str, help='Set client IPv6 address with MAC in --target_mac', default=None)
parser.add_argument('-d', '--dns', type=str, help='Set recursive DNS IPv6 address', default=None)
parser.add_argument('-s', '--dns_search', type=str, help='Set DNS search list', default="test.com")
parser.add_argument('-q', '--quiet', action='store_true', help='Minimal output')
parser.add_argument('--apple', action='store_true', help='Apple devices MiTM')
args = parser.parse_args()
if not args.quiet:
Base.print_banner()
dhcpv6 = DHCPv6_raw()
if args.interface is None:
current_network_interface = Base.netiface_selection()
else:
current_network_interface = args.interface
if args.target_mac is not None:
target_mac_address = str(args.target_mac).lower()
if args.target_ip is not None:
target_ip_address = args.target_ip
your_mac_address = Base.get_netiface_mac_address(current_network_interface)
if your_mac_address is None:
print Base.c_error + "Network interface: " + current_network_interface + " do not have MAC address!"
exit(1)
your_ipv6_link_address = Base.get_netiface_ipv6_link_address(current_network_interface)
if your_ipv6_link_address is None:
print Base.c_error + "Network interface: " + current_network_interface + " do not have IPv6 link local address!"
exit(1)
if args.dns is None:
recursive_dns_address = your_ipv6_link_address
else:
recursive_dns_address = args.dns
if not args.quiet:
print Base.c_info + "Network interface: " + Base.cINFO + current_network_interface + Base.cEND
if args.target_mac is not None:
print Base.c_info + "Target MAC: " + Base.cINFO + args.target_mac + Base.cEND
if args.target_ip is not None:
print Base.c_info + "Target IP: " + Base.cINFO + args.target_ip + Base.cEND
else:
print Base.c_info + "First suffix offer IP: " + Base.cINFO + args.first_suffix_ip + Base.cEND
print Base.c_info + "Last suffix offer IP: " + Base.cINFO + args.last_suffix_ip + Base.cEND
print Base.c_info + "Prefix: " + Base.cINFO + args.prefix + Base.cEND
print Base.c_info + "Router IPv6 address: " + Base.cINFO + your_ipv6_link_address + Base.cEND
print Base.c_info + "DNS IPv6 address: " + Base.cINFO + recursive_dns_address + Base.cEND
def reply(request):
# ICMPv6 REQUESTS
if request.haslayer(ICMPv6):
print "ICMPv6 request!"
# DHCPv6 REQUESTS
if request.haslayer(DHCPv6):
print "DHCPv6 request!"
if __name__ == "__main__":
if args.target_ip is not None:
if args.target_mac is None:
print Base.c_error + "Please set target MAC address (--target_mac 00:AA:BB:CC:DD:FF) for target IPv6!"
exit(1)
else:
if args.target_mac is None:
print Base.c_info + "Waiting for a ICMPv6 or DHCPv6 requests ..."
sniff(lfilter=lambda d: d.src != your_mac_address,
filter="icmpv6 or dhcpv6",
prn=reply, iface=current_network_interface)
else:
print Base.c_info + "Waiting for a ICMPv6 or DHCPv6 requests from: " + args.target_mac + " ..."
sniff(lfilter=lambda d: d.src == args.target_mac,
filter="icmpv6 or dhcpv6",
prn=reply, iface=current_network_interface)
| mit | Python | |
4628adc38789f52e8e2ef0cdf600b9fbed7b30ab | Test events (really event __repr__) | python-hyper/wsproto | test/test_events.py | test/test_events.py | import pytest
from h11 import Request
from wsproto.events import (
ConnectionClosed,
ConnectionEstablished,
ConnectionRequested,
)
from wsproto.frame_protocol import CloseReason
def test_connection_requested_repr_no_subprotocol():
method = b'GET'
target = b'/foo'
headers = {
b'host': b'localhost',
b'sec-websocket-version': b'13',
}
http_version = b'1.1'
req = Request(method=method, target=target, headers=list(headers.items()),
http_version=http_version)
event = ConnectionRequested([], req)
r = repr(event)
assert 'ConnectionRequested' in r
assert target.decode('ascii') in r
def test_connection_requested_repr_with_subprotocol():
method = b'GET'
target = b'/foo'
headers = {
b'host': b'localhost',
b'sec-websocket-version': b'13',
b'sec-websocket-protocol': b'fnord',
}
http_version = b'1.1'
req = Request(method=method, target=target, headers=list(headers.items()),
http_version=http_version)
event = ConnectionRequested([], req)
r = repr(event)
assert 'ConnectionRequested' in r
assert target.decode('ascii') in r
assert headers[b'sec-websocket-protocol'].decode('ascii') in r
@pytest.mark.parametrize('subprotocol,extensions', [
('sproto', None),
(None, ['fake']),
('sprout', ['pretend']),
])
def test_connection_established_repr(subprotocol, extensions):
event = ConnectionEstablished(subprotocol, extensions)
r = repr(event)
if subprotocol:
assert subprotocol in r
if extensions:
for extension in extensions:
assert extension in r
@pytest.mark.parametrize('code,reason', [
(CloseReason.NORMAL_CLOSURE, None),
(CloseReason.NORMAL_CLOSURE, 'because i felt like it'),
(CloseReason.INVALID_FRAME_PAYLOAD_DATA, 'GOOD GOD WHAT DID YOU DO'),
])
def test_connection_closed_repr(code, reason):
event = ConnectionClosed(code, reason)
r = repr(event)
assert repr(code) in r
if reason:
assert reason in r
| mit | Python | |
60075ecdc73097c39895193a593688cb3cf103dd | add glucose example | maxalbert/bokeh,aiguofer/bokeh,Karel-van-de-Plassche/bokeh,evidation-health/bokeh,percyfal/bokeh,bsipocz/bokeh,satishgoda/bokeh,daodaoliang/bokeh,caseyclements/bokeh,DuCorey/bokeh,lukebarnard1/bokeh,stuart-knock/bokeh,muku42/bokeh,phobson/bokeh,msarahan/bokeh,timsnyder/bokeh,ahmadia/bokeh,srinathv/bokeh,rs2/bokeh,maxalbert/bokeh,CrazyGuo/bokeh,jplourenco/bokeh,ChristosChristofidis/bokeh,mutirri/bokeh,CrazyGuo/bokeh,sahat/bokeh,philippjfr/bokeh,aiguofer/bokeh,bokeh/bokeh,bsipocz/bokeh,ChristosChristofidis/bokeh,azjps/bokeh,roxyboy/bokeh,saifrahmed/bokeh,tacaswell/bokeh,justacec/bokeh,birdsarah/bokeh,schoolie/bokeh,ericdill/bokeh,phobson/bokeh,jplourenco/bokeh,srinathv/bokeh,bokeh/bokeh,lukebarnard1/bokeh,draperjames/bokeh,phobson/bokeh,xguse/bokeh,mindriot101/bokeh,quasiben/bokeh,rothnic/bokeh,aavanian/bokeh,carlvlewis/bokeh,birdsarah/bokeh,deeplook/bokeh,azjps/bokeh,Karel-van-de-Plassche/bokeh,carlvlewis/bokeh,dennisobrien/bokeh,quasiben/bokeh,canavandl/bokeh,gpfreitas/bokeh,saifrahmed/bokeh,mutirri/bokeh,dennisobrien/bokeh,saifrahmed/bokeh,bokeh/bokeh,tacaswell/bokeh,awanke/bokeh,justacec/bokeh,laurent-george/bokeh,ChinaQuants/bokeh,rs2/bokeh,rothnic/bokeh,KasperPRasmussen/bokeh,roxyboy/bokeh,KasperPRasmussen/bokeh,almarklein/bokeh,ericmjl/bokeh,paultcochrane/bokeh,matbra/bokeh,sahat/bokeh,draperjames/bokeh,stuart-knock/bokeh,DuCorey/bokeh,paultcochrane/bokeh,KasperPRasmussen/bokeh,timsnyder/bokeh,PythonCharmers/bokeh,percyfal/bokeh,clairetang6/bokeh,laurent-george/bokeh,awanke/bokeh,rs2/bokeh,philippjfr/bokeh,ChinaQuants/bokeh,daodaoliang/bokeh,ChinaQuants/bokeh,mutirri/bokeh,ericdill/bokeh,eteq/bokeh,abele/bokeh,tacaswell/bokeh,percyfal/bokeh,timsnyder/bokeh,stonebig/bokeh,matbra/bokeh,gpfreitas/bokeh,matbra/bokeh,ptitjano/bokeh,evidation-health/bokeh,Karel-van-de-Plassche/bokeh,srinathv/bokeh,khkaminska/bokeh,gpfreitas/bokeh,ahmadia/bokeh,lukebarnard1/bokeh,ahmadia/bokeh,birdsarah/bokeh,htygithub/bokeh,laurent-george/bokeh,rs2/bokeh,jakirkham/bokeh,xguse/bokeh,daodaoliang/bokeh,msarahan/bokeh,ahmadia/bokeh,bokeh/bokeh,xguse/bokeh,justacec/bokeh,msarahan/bokeh,khkaminska/bokeh,ChristosChristofidis/bokeh,htygithub/bokeh,jplourenco/bokeh,ericdill/bokeh,eteq/bokeh,saifrahmed/bokeh,satishgoda/bokeh,alan-unravel/bokeh,deeplook/bokeh,deeplook/bokeh,schoolie/bokeh,schoolie/bokeh,akloster/bokeh,rhiever/bokeh,josherick/bokeh,timsnyder/bokeh,timothydmorton/bokeh,philippjfr/bokeh,maxalbert/bokeh,muku42/bokeh,paultcochrane/bokeh,timsnyder/bokeh,schoolie/bokeh,ptitjano/bokeh,schoolie/bokeh,ericmjl/bokeh,caseyclements/bokeh,almarklein/bokeh,ptitjano/bokeh,maxalbert/bokeh,stonebig/bokeh,josherick/bokeh,Karel-van-de-Plassche/bokeh,canavandl/bokeh,quasiben/bokeh,ptitjano/bokeh,mindriot101/bokeh,rhiever/bokeh,roxyboy/bokeh,DuCorey/bokeh,dennisobrien/bokeh,azjps/bokeh,phobson/bokeh,rhiever/bokeh,akloster/bokeh,khkaminska/bokeh,draperjames/bokeh,tacaswell/bokeh,rhiever/bokeh,lukebarnard1/bokeh,akloster/bokeh,josherick/bokeh,awanke/bokeh,roxyboy/bokeh,caseyclements/bokeh,clairetang6/bokeh,akloster/bokeh,matbra/bokeh,abele/bokeh,xguse/bokeh,KasperPRasmussen/bokeh,caseyclements/bokeh,jakirkham/bokeh,awanke/bokeh,almarklein/bokeh,muku42/bokeh,rothnic/bokeh,draperjames/bokeh,jakirkham/bokeh,rothnic/bokeh,bsipocz/bokeh,eteq/bokeh,evidation-health/bokeh,bsipocz/bokeh,stonebig/bokeh,satishgoda/bokeh,paultcochrane/bokeh,ChristosChristofidis/bokeh,rs2/bokeh,philippjfr/bokeh,jakirkham/bokeh,alan-unravel/bokeh,alan-unravel/bokeh,mindriot101/bokeh,clairetang6/bokeh,daodaoliang/bokeh,msarahan/bokeh,aiguofer/bokeh,canavandl/bokeh,laurent-george/bokeh,aavanian/bokeh,khkaminska/bokeh,abele/bokeh,ptitjano/bokeh,jplourenco/bokeh,DuCorey/bokeh,satishgoda/bokeh,ericmjl/bokeh,ChinaQuants/bokeh,phobson/bokeh,azjps/bokeh,timothydmorton/bokeh,abele/bokeh,gpfreitas/bokeh,htygithub/bokeh,aiguofer/bokeh,muku42/bokeh,birdsarah/bokeh,clairetang6/bokeh,carlvlewis/bokeh,KasperPRasmussen/bokeh,jakirkham/bokeh,PythonCharmers/bokeh,timothydmorton/bokeh,josherick/bokeh,mutirri/bokeh,aavanian/bokeh,evidation-health/bokeh,percyfal/bokeh,DuCorey/bokeh,PythonCharmers/bokeh,carlvlewis/bokeh,CrazyGuo/bokeh,Karel-van-de-Plassche/bokeh,alan-unravel/bokeh,stonebig/bokeh,ericmjl/bokeh,aavanian/bokeh,sahat/bokeh,azjps/bokeh,aavanian/bokeh,eteq/bokeh,stuart-knock/bokeh,justacec/bokeh,stuart-knock/bokeh,bokeh/bokeh,deeplook/bokeh,dennisobrien/bokeh,ericdill/bokeh,draperjames/bokeh,htygithub/bokeh,CrazyGuo/bokeh,philippjfr/bokeh,percyfal/bokeh,mindriot101/bokeh,PythonCharmers/bokeh,aiguofer/bokeh,ericmjl/bokeh,timothydmorton/bokeh,srinathv/bokeh,dennisobrien/bokeh,canavandl/bokeh | examples/plotting/file/glucose.py | examples/plotting/file/glucose.py |
from bokeh.sampledata import glucose
from bokeh.plotting import *
day = glucose.data.ix['2010-10-06']
highs = day[day['glucose'] > 180]
lows = day[day['glucose'] < 80]
output_file("glucose.html", title="glucose.py example")
hold()
line(day.index.astype('int')/1000000, day['glucose'], color='grey', tools="pan,zoom,resize")
scatter(highs.index.astype('int')/1000000, highs['glucose'], color='red', radius=4, legend="high")
scatter(lows.index.astype('int')/1000000, lows['glucose'], color='blue', radius=4, legend="low")
#figure()
# open a browser
show()
| bsd-3-clause | Python | |
6f0fdb4d7b1202c7ab07d01cf34954ee725df6fe | add basic-calculator | EdisonAlgorithms/LeetCode,EdisonAlgorithms/LeetCode,EdisonAlgorithms/LeetCode,zeyuanxy/leet-code,zeyuanxy/leet-code,zeyuanxy/leet-code | vol5/basic-calculator/basic-calculator.py | vol5/basic-calculator/basic-calculator.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: Zeyuan Shang
# @Date: 2015-11-18 17:21:42
# @Last Modified by: Zeyuan Shang
# @Last Modified time: 2015-11-18 17:21:51
class Solution:
operators = ['+', '-', '*', '/']
def getPriority(self, operator):
return {
'+' : 1,
'-' : 1,
'*' : 2,
'/' : 2,
}.get(operator, 0)
def toRPN(self, s):
tokens, stack = [], []
number = ''
for c in s:
if c.isdigit():
number += c
else:
if number:
tokens.append(number)
number = ''
if c in self.operators:
while len(stack) and self.getPriority(stack[-1]) >= self.getPriority(c):
tokens.append(stack.pop())
stack.append(c)
elif c == '(':
stack.append(c)
elif c == ')':
while len(stack) and stack[-1] != '(':
tokens.append(stack.pop())
stack.pop()
if number:
tokens.append(number)
while len(stack):
tokens.append(stack.pop())
return tokens
def calcValue(self, x, y, operator):
return {
'+': lambda x, y: x + y,
'-': lambda x, y: x - y,
'*': lambda x, y: x * y,
'/': lambda x, y: int(float(x) / y),
}[operator](x, y)
def evalRPN(self, tokens):
operands = []
for token in tokens:
if token in self.operators:
y, x = operands.pop(), operands.pop()
operands.append(self.calcValue(x, y, token))
else:
operands.append(int(token))
return operands[0]
def calculate(self, s):
tokens = self.toRPN(s)
return self.evalRPN(tokens) | mit | Python | |
54c1028157447233014890419ab869a8c1ca0c3a | Add linux installer | r4mos/youtube-dl-simple-server,r4mos/youtube-dl-simple-server,r4mos/youtube-dl-simple-server | installation/server-linux.py | installation/server-linux.py | #!/usr/bin/env python
#-*- encoding:utf-8 -*-
import os
import sys
import urllib2
import subprocess
installationPath = os.path.expanduser('~') + '/.config/ydlss'
serverLocalLocation = installationPath + '/youtube-dl-simple-server'
serverWebLocation = 'https://github.com/r4mos/youtube-dl-simple-server/raw/master/bin/server/linux/youtube-dl-simple-server'
try:
print 'Cheeking installation folder'
if not os.path.isdir(installationPath):
os.makedirs(installationPath)
print 'Downloading server'
latest = urllib2.urlopen(serverWebLocation)
output = open(serverLocalLocation, 'wb')
output.write(latest.read())
output.close()
print 'Changing permissions'
subprocess.check_output(['chmod', 'a+x', serverLocalLocation])
print 'Adding to .profile file to autostart'
profile = open (os.path.expanduser('~') + '/.profile', 'a')
profile.write('sleep 5 && ' + serverLocalLocation + ' &\n')
profile.close()
print '\nCompleted installation but server is stopped'
print 'Reboot your computer or start the server manually:'
print serverLocalLocation + ' --verbose'
except:
print 'Fail. An error occurred'
sys.exit(1) | mit | Python | |
46d6b7451bb8e295751d771228782cfbbcb8ecc7 | Add dask_client_from_ipclient function | clarkfitzg/dask,esc/dask,wiso/dask,dask/dask,pombredanne/dask,PhE/dask,mraspaud/dask,chrisbarber/dask,dask/dask,blaze/dask,gameduell/dask,freeman-lab/dask,jayhetee/dask,simudream/dask,cowlicks/dask,jcrist/dask,mikegraham/dask,pombredanne/dask,clarkfitzg/dask,mraspaud/dask,ContinuumIO/dask,esc/dask,ssanderson/dask,freeman-lab/dask,jakirkham/dask,mrocklin/dask,ContinuumIO/dask,marianotepper/dask,simudream/dask,cpcloud/dask,jcrist/dask,PhE/dask,marianotepper/dask,hainm/dask,jayhetee/dask,mrocklin/dask,wiso/dask,vikhyat/dask,blaze/dask,jakirkham/dask,vikhyat/dask,ssanderson/dask,hainm/dask | dask/distributed/ipython_utils.py | dask/distributed/ipython_utils.py | from .scheduler import Scheduler
from .worker import Worker
from .client import Client
def dask_client_from_ipclient(client):
"""
Construct a scheduler from an ipython client.
"""
zmq_context = client._context
scheduler = Scheduler(context=zmq_context)
workers = [Worker(scheduler.address_to_workers) for i in range(len(client))]
dask_client = Client(scheduler.address_to_clients)
return dask_client
| bsd-3-clause | Python | |
9d88c6f1b0a654a9b89350d475d1a6c46116d917 | Add query | tkem/mopidy-internetarchive | mopidy_internetarchive/query.py | mopidy_internetarchive/query.py | from __future__ import unicode_literals
import collections
import logging
logger = logging.getLogger(__name__)
QUERY_FIELDS = {
'uri',
'track_name',
'track_no',
'album',
'artist',
'composer',
'performer',
'albumartist',
'genre',
'date',
'comment',
'any'
}
DEFAULT_FILTERS = dict.fromkeys(QUERY_FIELDS, lambda qv, value: False)
TRACK_FILTERS = dict(
DEFAULT_FILTERS,
uri=lambda qv, track: qv == track.uri,
track_name=lambda qv, track: qv == track.name,
track_no=lambda qv, track: qv.isdigit() and int(qv) == track.track_no,
album=lambda qv, track: track.album and qv == track.album.name,
artist=lambda qv, track: any(
qv == a.name for a in track.artists
),
composer=lambda qv, track: any(
qv == a.name for a in track.composers
),
performer=lambda qv, track: any(
qv == a.name for a in track.performers
),
albumartist=lambda qv, track: track.album and any(
qv == a.name for a in track.album.artists
),
genre=lambda qv, track: qv == track.genre,
date=lambda qv, track: qv == track.date,
comment=lambda qv, track: qv == track.comment
)
ALBUM_FILTERS = dict(
DEFAULT_FILTERS,
uri=lambda qv, album: qv == album.uri,
album=lambda qv, album: qv == album.name,
artist=lambda qv, album: any(
qv == a.name for a in album.artists
),
albumartist=lambda qv, album: any(
qv == a.name for a in album.artists
),
date=lambda qv, album: qv == album.date
)
ARTIST_FILTERS = dict(
DEFAULT_FILTERS,
uri=lambda qv, artist: qv == artist.uri,
artist=lambda qv, artist: qv == artist.name
)
# setup 'any' filters
def _any_filter(filtermap):
filters = [filtermap[key] for key in filtermap.keys() if key != 'any']
def any_filter(qv, value):
return any(f(qv, value) for f in filters)
return any_filter
TRACK_FILTERS['any'] = _any_filter(TRACK_FILTERS)
ALBUM_FILTERS['any'] = _any_filter(ALBUM_FILTERS)
ARTIST_FILTERS['any'] = _any_filter(ARTIST_FILTERS)
class Query(collections.Mapping):
class QV(unicode):
def __new__(cls, value):
return super(Query.QV, cls).__new__(cls, value.strip().lower())
def __eq__(self, other):
return other and self in other.lower()
def __ne__(self, other):
return not other or self not in other.lower()
def __repr__(self):
return 'qv' + super(Query.QV, self).__repr__()
__hash__ = None
def __init__(self, query, exact=False):
self.__query = {}
for field, values in query.iteritems():
if field not in QUERY_FIELDS:
raise LookupError('Invalid query field "%s"' % field)
if not values:
raise LookupError('Missing query value for "%s"' % field)
if not hasattr(values, '__iter__'):
values = [values]
if not all(values):
raise LookupError('Missing query value for "%s"' % field)
if exact:
self.__query[field] = values
else:
self.__query[field] = [self.QV(value) for value in values]
def __getitem__(self, key):
return self.__query.__getitem__(key)
def __iter__(self):
return self.__query.__iter__()
def __len__(self):
return self.__query.__len__()
def filter_tracks(self, tracks):
return filter(self._get_filter(TRACK_FILTERS), tracks)
def filter_albums(self, albums):
return filter(self._get_filter(ALBUM_FILTERS), albums)
def filter_artists(self, artists):
return filter(self._get_filter(ARTIST_FILTERS), artists)
def _get_filter(self, filtermap):
from functools import partial
filters = []
for field, values in self.__query.iteritems():
filters.extend(partial(filtermap[field], qv) for qv in values)
def filterfunc(model):
return all(f(model) for f in filters)
return filterfunc
| apache-2.0 | Python | |
93e19ab50567e045daf0e35d856033303be70192 | Implement first test cases | kfricke/micropython-esp8266uart | test_esp8266uart.py | test_esp8266uart.py | import esp8266uart
esp = esp8266uart.ESP8266(1, 115200)
print('Testing generic methods')
print('=======================')
print('AT startup...')
if esp.test():
print('Success!')
else:
print('Failed!')
#print('Soft-Reset...')
#if esp.reset():
# print('Success!')
#else:
# print('Failed!')
print('Another AT startup...')
if esp.test():
print('Success!')
else:
print('Failed!')
print()
print('Testing WIFI methods')
print('====================')
wifi_mode = 1
print("Testing get_mode/set_mode of value '%s'(%i)..." % (esp8266uart.WIFI_MODES[wifi_mode], wifi_mode))
esp.set_mode(wifi_mode)
if esp.get_mode() == wifi_mode:
print('Success!')
else:
print('Failed!')
print('Disconnecting from WLAN...')
if esp.disconnect():
print('Success!')
else:
print('Failed!')
print('Disconnecting from WLAN again...')
if esp.disconnect():
print('Success!')
else:
print('Failed!')
print('Checking if not connected WLAN...')
if esp.get_accesspoint() == None:
print('Success!')
else:
print('Failed!')
print('Scanning for WLANs...')
wlans = esp.list_all_accesspoints()
for wlan in wlans:
print(wlan)
print("Scanning for WLAN '%s'..." % (wlan['ssid']))
for wlan2 in esp.list_accesspoints(wlan['ssid']):
print(wlan2)
print('Setting access point mode...')
if esp.set_mode(esp8266uart.WIFI_MODES['Access Point + Station']):
print('Failed!')
else:
print('Success!')
| mit | Python | |
8ec7492658bab8d6b0fba5d6b49a58b6408f5fa2 | Add framework for graphing temporal time | PinPinIre/Final-Year-Project,PinPinIre/Final-Year-Project,PinPinIre/Final-Year-Project | src/graph_run_algo.py | src/graph_run_algo.py | import dateutil.parser
import time
import matplotlib.pyplot as plt
from operator import add
from datetime import datetime
from gensim import corpora, models, similarities
minute = 60
hour = 3600
cdict = "%scorpus.dict"
ccorpus = "%scorpus.mm"
corpus_model = "%s.%s"
def gen_graph(figure, x, y, subtitle, xlabel, ylabel, scale):
figure.suptitle(subtitle, fontsize=14, fontweight='bold')
ax = figure.add_subplot(111)
figure.subplots_adjust(top=0.85)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_yscale(scale)
ax.plot(x, y, linestyle='--', marker='o')
def load_data(sizes, algorithm):
dictionaries = [corpora.Dictionary.load(cdict % size) for size in sizes]
corpi = [corpora.MmCorpus(ccorpus % size) for size in sizes]
corpus_models = [models.ldamodel.LdaModel.load(corpus_model % (size, algorithm)) for size in sizes]
dict_sizes = [len(x) for x in dictionaries]
return dict_sizes, corpus_models, corpi, dictionaries
def read_logfile(path):
train_times = list()
build_times = list()
with open(path) as log_file:
for i, line in enumerate(log_file):
bt_time = line.strip().split("\t")[-1]
hours, mins, seconds = bt_time.split(":")
total = (hour * int(hours)) + (minute * int(mins)) + float(seconds)
if (i % 2) == 0:
train_times.append(total)
else:
build_times.append(total)
total_times = map(add, train_times, build_times)
return total_times, train_times, build_times
def main():
corp_dict_size = plt.figure()
corp_time = plt.figure()
corp_build = plt.figure()
total_time = plt.figure()
total_times, train_times, build_times = read_logfile("runtimes.log")
sizes = [1000, 5000, 10000, 15000, 20000, 25000, 30000]
dict_sizes, corpus_models, corpi, dictionaries = load_data(sizes, "lda")
gen_graph(corp_dict_size, sizes, dict_sizes, 'Corpus size and dictionary features', "corpus size", "dictionary size", 'log')
gen_graph(corp_time, sizes, train_times, 'Corpus size and train time', "corpus size", "training time", 'log')
gen_graph(corp_build, sizes, build_times, 'Corpus size and build time', "corpus size", "build time", 'log')
gen_graph(total_time, sizes, total_times, 'Corpus size and total time', "corpus size", "total time", 'log')
plt.show()
if __name__ == "__main__":
main()
| mit | Python | |
87fe0b7ff745e4057e4b60a1a9a75fe57581b2d5 | add exp4 script | ManuelMBaumann/opt_tau,ManuelMBaumann/opt_tau | num_exper/exp4.py | num_exper/exp4.py | import os
import numpy as np
np = 4
fmin = 1.0
fmax = 9.0
df = 0.5
for i in np.arange(fmin+df, fmax, df):
str1 = 'python3 elast_wedge.py --ndims=2 --dx=100.0 --dy=100.0 --dz=100.0 --df=0.1 --degree=1 --damping=0.5 --maxit=300 \
--tol=1e-8 --dg_pp=0 --tau_re=-100 -tau_im=-0.7 --block=True \
--plots=False --plot_resnrm=True --solver_flag=0 --nprocs=8'
w_tick3 = np.logspace(np.log10(i), np.log10(fmax), num=np, endpoint=True)
int1 = ' --freq=[1,'+str(i)+']'
int2 = ' --freq=['+str(i)+','+str(w_tick3[1])+']'
int3 = ' --freq=['+str(w_tick3[1])+','+str(w_tick3[2])+']'
int4 = ' --freq=['+str(str(w_tick3[2]))+',9]'
os.system(str1+int1)
os.system(str1+int2)
os.system(str1+int3)
os.system(str1+int4) | mit | Python | |
2fa721b24891fcd8170d87328334c05faec9cb9a | add hamming char dist function | ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms | cryptography/hamming_distance/python/hamming_char_dist.py | cryptography/hamming_distance/python/hamming_char_dist.py | """
Determine the hamming distance, at the character level, between two equal length strings a and b.
"""
def hamming_char_dist(a,b):
if len(a) != len(b):
raise ValueError("a and b are unequal lengths")
dist = 0
for i in range(len(a)):
if(a[i] != b[i]):
dist += 1
return dist | cc0-1.0 | Python | |
c679b614b638123d846940718bb3fd27ed3078ce | Add test for fftpack. | chadnetzer/numpy-gaurdro,efiring/numpy-work,jasonmccampbell/numpy-refactor-sprint,efiring/numpy-work,teoliphant/numpy-refactor,illume/numpy3k,chadnetzer/numpy-gaurdro,teoliphant/numpy-refactor,Ademan/NumPy-GSoC,jasonmccampbell/numpy-refactor-sprint,chadnetzer/numpy-gaurdro,illume/numpy3k,jasonmccampbell/numpy-refactor-sprint,teoliphant/numpy-refactor,Ademan/NumPy-GSoC,efiring/numpy-work,illume/numpy3k,Ademan/NumPy-GSoC,efiring/numpy-work,teoliphant/numpy-refactor,Ademan/NumPy-GSoC,teoliphant/numpy-refactor,jasonmccampbell/numpy-refactor-sprint,chadnetzer/numpy-gaurdro,illume/numpy3k | numpy/fft/tests/test_fftpack.py | numpy/fft/tests/test_fftpack.py | import sys
from numpy.testing import *
set_package_path()
from numpy.fft import *
restore_path()
class test_fftshift(NumpyTestCase):
def check_fft_n(self):
self.failUnlessRaises(ValueError,fft,[1,2,3],0)
if __name__ == "__main__":
NumpyTest().run()
| bsd-3-clause | Python | |
9436af51e8b823fa83358b40f94dcd07970aea6b | test test | adriennekarnoski/data-structures,adriennekarnoski/data-structures | src/test_deque.py | src/test_deque.py | """Test functions for deque module."""
import pytest
def test_queue_is_instance_of_queue_object():
"""Test stack is instance of Stack()."""
from deque import Deque
d = Deque()
assert isinstance(d, Deque)
def test_queue_is_instance_of_doubly_linked_list():
"""Test stack inherits from DoublyLinkedList."""
from deque import Deque
from doubly_linked_list import DoublyLinkedList
d = Deque()
assert isinstance(d._doubly_linked_list, DoublyLinkedList)
def test_enqueue_adds_a_value(deque_fixture):
"""Test the enqueue method adds value."""
deque_fixture.append(2)
assert deque_fixture._doubly_linked_list.tail.data == 2
# def test_append_adds_multiple_values_and_checks_front(deque_fixture):
# """Test the append method adds value."""
# deque_fixture.append(2)
# deque_fixture.append(3)
# assert deque_fixture._doubly_linked_list.tail.data == 2
# def test_dequeue_removes_first_node_added(deque_fixture):
# """Test that node dequeued is first node added."""
# deque_fixture.append(2)
# remove = deque_fixture.popleft()
# assert remove == 2
# def test_popleft_removes_first_node_added_even_with_multiple_nodes(deque_fixture):
# """Test that node popleftd is first node added even with multiple nodes."""
# deque_fixture.append(2)
# deque_fixture.append(3)
# remove = deque_fixture.popleft()
# assert remove == 2 | mit | Python | |
7a3a7a8d5a397c886086cc87fff3f058921d06d2 | add IntegerListField for keeping major selections | jittat/ku-eng-direct-admission,jittat/ku-eng-direct-admission,jittat/ku-eng-direct-admission | application/fields.py | application/fields.py | from django.db import models
class IntegerListField(models.Field):
"""
IntegerListField keeps a list of int as a comma-separated string.
>>> g = IntegerListField()
>>> g.get_db_prep_value([1,2,-1,20,30,40,-100])
'1,2,-1,20,30,40,-100'
>>> g.to_python('1,2,-10,3,4,-100,7')
[1,2,-10,3,4,-100,7]
"""
__metaclass__ = models.SubfieldBase
def db_type(self):
return 'text'
def to_python(self, value):
if isinstance(value, list):
return value
if value==None or value=='':
return []
else:
return [ int(r) for r in value.split(',') ]
def get_db_prep_value(self, value):
return ','.join([str(r) for r in value])
| agpl-3.0 | Python | |
985129b190cbc26700f4f40e90e0b32984d52ca1 | Add example script for updating useremails. Fixes #1041 | devilry/devilry-django,devilry/devilry-django,devilry/devilry-django,devilry/devilry-django | not_for_deploy/sysadmin_example_scripts/update_all_useremails_example.py | not_for_deploy/sysadmin_example_scripts/update_all_useremails_example.py | #!/usr/bin/env python
import django
import os
from django.contrib.auth import get_user_model
from django.utils import timezone
from django.db import transaction
class ProgressPrintIterator:
"""
Progress print iterator. Useful to print progress of long running scripts.
(Copied from ievv_opensource since we are a bit behind on the versions in master)
Example::
queryset = MyModel.objects
total = queryset.count()
for obj, is_end_of_group in ProgressPrintIterator(
iterator=queryset.iterator(),
total_count=total,
what='Doing something',
items_per_group=500):
# Do something with ``obj``. If you want to do something after 500 items has been processed
# including the last iteration (which may be less than 500 items),
# use ``if is_end_of_group``
"""
def __init__(self, iterator, total_count, what, items_per_group=500, log_function=None):
"""
Args:
iterator: Some iterator, such as a ``queryset.iterator()``.
total_count: Total number of items.
what: A message to print when printing progress
items_per_group: Items per group - we print progress each time we have processed this number of items.
log_function: A log function. For management scripts, you want to set this to ``self.stdout.write``.
"""
self.iterator = iterator
self.total_count = total_count
self.what = what
self.items_per_group = items_per_group
self.log_function = log_function or print
def __iter__(self):
start_time = timezone.now()
for index, item in enumerate(self.iterator, start=1):
progress_percent = index / self.total_count * 100
is_end_of_group = (index % self.items_per_group == 0) or (index == self.total_count)
yield item, is_end_of_group
if is_end_of_group:
now = timezone.now()
time_used = now - start_time
if progress_percent > 0:
estimated_end_delta = time_used / progress_percent * (100 - progress_percent)
estimated_end_minutes = round(estimated_end_delta.total_seconds() / 60, 2)
else:
estimated_end_minutes = 'UNKNOWN'
self.log_function(
f'{round(progress_percent, 1)}% [{index}/{self.total_count}]: {self.what}. '
f'Est. minutes remaining: {estimated_end_minutes}')
def update_notifications_for_user(user):
"""
Swap from one email suffix to another, and set the primary and notification
email to the email address maching the new suffix.
"""
from devilry.devilry_account.models import UserEmail
from_email_suffixes = ['@old.shit.example.com', '@oldstuff.example.com', '@superoldstuff.example.com']
new_primary_email_suffix = '@example.com'
# Convert from old to new primary
for from_email_suffix in from_email_suffixes:
if user.useremail_set.filter(email__endswith=from_email_suffix).exists():
matched_email = user.useremail_set.filter(email__endswith=from_email_suffix).first()
username = matched_email.email.split('@')[0]
matched_email.email = f'{username}{new_primary_email_suffix}'
new_email = matched_email.email
# Prevent generating duplicates (which is an IntegrityError enforced by the unique constraint in the database)
if not UserEmail.objects.filter(email=new_email).exists():
matched_email.email = new_email
matched_email.save()
# Force notifications and "is_primary" to the `new_primary_email_suffix`
new_primary_email = user.useremail_set.filter(email__endswith=new_primary_email_suffix).first()
if new_primary_email is not None:
user.useremail_set.update(use_for_notifications=False, is_primary=None)
new_primary_email.use_for_notifications = True
new_primary_email.is_primary = True
new_primary_email.clean()
new_primary_email.save()
if __name__ == "__main__":
# For development:
os.environ.setdefault('DJANGOENV', 'develop')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "devilry.project.settingsproxy")
django.setup()
# For production: Specify python path to your settings file here
# os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'devilry_settings')
# django.setup()
user_queryset = get_user_model().objects
for user, is_end_of_group in ProgressPrintIterator(
iterator=user_queryset.iterator(),
total_count=user_queryset.count(),
what='Processing users',
items_per_group=300):
with transaction.atomic():
update_notifications_for_user(user=user)
| bsd-3-clause | Python | |
4cb6b3a3e1b74dd83812469c472accfc22e4d699 | Update consecutive-numbers-sum.py | tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode | Python/consecutive-numbers-sum.py | Python/consecutive-numbers-sum.py | # Time: O(sqrt(n))
# Space: O(1)
# Given a positive integer N,
# how many ways can we write it as a sum of
# consecutive positive integers?
#
# Example 1:
#
# Input: 5
# Output: 2
# Explanation: 5 = 5 = 2 + 3
# Example 2:
#
# Input: 9
# Output: 3
# Explanation: 9 = 9 = 4 + 5 = 2 + 3 + 4
# Example 3:
#
# Input: 15
# Output: 4
# Explanation: 15 = 15 = 8 + 7 = 4 + 5 + 6 = 1 + 2 + 3 + 4 + 5
# Note: 1 <= N <= 10 ^ 9.
class Solution(object):
def consecutiveNumbersSum(self, N):
"""
:type N: int
:rtype: int
"""
# x + x+1 + x+2 + ... + x+l-1 = N = 2^k * M, where M is odd
# => l*x + (l-1)*l/2 = N
# => x = (N -(l-1)*l/2)/l= 2^k * M/l - (l-1)/2 is integer
# => l could be 2 or any odd factor of M (excluding M),
# set x = 2^k * M/l - (l-1)/2 is integer, and also unique
# => the answer is the number of all odd factors of M
# if prime factorization of N is 2^k * p1^a * p2^b * ..
# => answer is the number of all odd factors = (a+1) * (b+1) * ...
result = 1
while N % 2 == 0:
N /= 2
i = 3
while i*i <= N:
count = 0
while N % i == 0:
N /= i
count += 1
result *= count+1
i += 2
if N > 1:
result *= 2
return result
| # Time: O(sqrt(n))
# Space: O(1)
# Given a positive integer N,
# how many ways can we write it as a sum of
# consecutive positive integers?
#
# Example 1:
#
# Input: 5
# Output: 2
# Explanation: 5 = 5 = 2 + 3
# Example 2:
#
# Input: 9
# Output: 3
# Explanation: 9 = 9 = 4 + 5 = 2 + 3 + 4
# Example 3:
#
# Input: 15
# Output: 4
# Explanation: 15 = 15 = 8 + 7 = 4 + 5 + 6 = 1 + 2 + 3 + 4 + 5
# Note: 1 <= N <= 10 ^ 9.
class Solution(object):
def consecutiveNumbersSum(self, N):
"""
:type N: int
:rtype: int
"""
# x + x+1 + x+2 + ... + x+l-1 = N = 2^k * M
# => l*x + (l-1)*l/2 = N
# => x = (N -(l-1)*l/2)/l= 2^k * M/l - (l-1)/2 is integer
# => l could be 2 or any odd factor of M (excluding M),
# set x = 2^k * M/l - (l-1)/2 is integer, and also unique
# => the answer is the number of all odd factors of M
# if prime factorization of N is 2^k * p1^a * p2^b * ..
# => answer is the number of all odd factors = (a+1) * (b+1) * ...
result = 1
while N % 2 == 0:
N /= 2
i = 3
while i*i <= N:
count = 0
while N % i == 0:
N /= i
count += 1
result *= count+1
i += 2
if N > 1:
result *= 2
return result
| mit | Python |
3dc8fde56be2438dae03e5d9d310fa2d19cd1ce2 | Add multi-result testing | klmitch/dtest,klmitch/dtest | tests/test_multi.py | tests/test_multi.py | # Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from dtest import *
from dtest.util import *
@repeat(2)
def test_multi():
# Set up a list to record executions
recorded = []
# Now, define an inner function
def inner(*args, **kwargs):
# Place the arguments into the recorded list
recorded.append((args, kwargs))
# Now, yield the inner function once...
yield ('inner1', inner, (1,), dict(kw=1))
# Yield it again
yield ('inner2', inner, (2,), dict(kw=2))
# Now, check if recorded has what we expect
assert_equal(len(recorded), 4)
assert_tuple_equal(recorded[0][0], (1,))
assert_dict_equal(recorded[0][1], dict(kw=1))
assert_tuple_equal(recorded[1][0], (1,))
assert_dict_equal(recorded[1][1], dict(kw=1))
assert_tuple_equal(recorded[2][0], (2,))
assert_dict_equal(recorded[2][1], dict(kw=2))
assert_tuple_equal(recorded[3][0], (2,))
assert_dict_equal(recorded[3][1], dict(kw=2))
| apache-2.0 | Python | |
362c35a1753c908fa7496c0e050b9325420e405e | add missing migrations | dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | corehq/apps/linked_domain/migrations/0012_auto_20200929_0809.py | corehq/apps/linked_domain/migrations/0012_auto_20200929_0809.py | # Generated by Django 2.2.16 on 2020-09-29 08:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('linked_domain', '0011_auto_20200728_2316'),
]
operations = [
migrations.AlterField(
model_name='domainlinkhistory',
name='model',
field=models.CharField(choices=[('app', 'Application'), ('custom_user_data', 'Custom User Data Fields'), ('custom_product_data', 'Custom Product Data Fields'), ('custom_location_data', 'Custom Location Data Fields'), ('roles', 'User Roles'), ('toggles', 'Feature Flags and Previews'), ('fixture', 'Lookup Table'), ('case_search_data', 'Case Search Settings'), ('report', 'Report'), ('data_dictionary', 'Data Dictionary'), ('keyword', 'Keyword')], max_length=128),
),
]
| bsd-3-clause | Python | |
65b4a1a587c257a1b80d72a6e59d57d5c854f8ed | Add tests | chrisseto/pyjwe | tests/test_pyjwe.py | tests/test_pyjwe.py | import pytest
import jwe
from jwe import validation
from cryptography.exceptions import InvalidTag
class TestValidation:
def test_correct_header(self):
try:
validation.validate_header({
'alg': 'dir',
'enc': 'A256GCM'
})
except Exception as e:
pytest.fail(e)
def test_missing_keys(self):
with pytest.raises(jwe.exceptions.MalformedHeader):
validation.validate_header({})
def test_missing_alg(self):
with pytest.raises(jwe.exceptions.MalformedHeader):
validation.validate_header({'alg': 'dir'})
def test_missing_enc(self):
with pytest.raises(jwe.exceptions.MalformedHeader):
validation.validate_header({'enc': 'A256GCM'})
def test_unsupported_alg(self):
with pytest.raises(jwe.exceptions.UnsupportedAlgorithm):
validation.validate_header({'alg': 'foo', 'enc': 'A256GCM'})
with pytest.raises(jwe.exceptions.UnsupportedAlgorithm):
validation.validate_header({'alg': 'bar', 'enc': 'A256GCM'})
with pytest.raises(jwe.exceptions.UnsupportedAlgorithm):
validation.validate_header({'alg': 'baz', 'enc': 'A256GCM'})
def test_unsupported_enc(self):
with pytest.raises(jwe.exceptions.UnsupportedEncryption):
validation.validate_header({'alg': 'dir', 'enc': 'RSA'})
with pytest.raises(jwe.exceptions.UnsupportedEncryption):
validation.validate_header({'alg': 'dir', 'enc': 'A126'})
with pytest.raises(jwe.exceptions.UnsupportedEncryption):
validation.validate_header({'alg': 'dir', 'enc': 'Base64'})
class TestApi:
def test_encrypt_decrypt(self):
key = jwe.kdf(b'Testing', b'Pepper')
data = b'Just some data'
encrypted = jwe.encrypt(data, key)
assert encrypted != data
assert jwe.decrypt(encrypted, key) == data
def test_improper_key(self):
key = jwe.kdf(b'Testing', b'Pepper')
data = b'Just some data'
encrypted = jwe.encrypt(data, key)
with pytest.raises(InvalidTag):
# TODO make this a custom exception
jwe.decrypt(encrypted, jwe.kdf(b'somekey', b'Salt')) == data
class TestDecryption:
def test_invalid_data(self):
with pytest.raises(jwe.exceptions.MalformedData):
jwe.decrypt(b'junkdata', jwe.kdf(b'key', b'Salt'))
def test_invalid_header_json(self):
with pytest.raises(jwe.exceptions.MalformedData) as e:
jwe.decrypt(
jwe.encrypt(
b'Just Some Data',
jwe.kdf(b'key', b'Salt')
)[3:], # Cut out some of the JSON
jwe.kdf(b'key', b'Salt')
)
assert e.value.args[0] == 'Header is not valid JSON'
def test_no_key_wrapping(self):
data = jwe.encrypt(b'Just Some Data', jwe.kdf(b'key', b'Salt')).split(b'.')
data[1] = b'cmFwcGE='
with pytest.raises(jwe.exceptions.UnsupportedOption) as e:
jwe.decrypt(b'.'.join(data), jwe.kdf(b'key', b'Salt'))
assert e.value.args[0] == 'Key wrapping is currently not supported'
| apache-2.0 | Python | |
b43aef2a3ebd54a72791ae635dca4d7544c0ad23 | Add Flask test server | Oshlack/scRNA-tools,lazappi/single-cell-software,Oshlack/scRNA-tools | test_server.py | test_server.py | from flask import Flask
app = Flask(__name__, static_folder=".", static_url_path="")
if __name__ == "__main__":
app.run(debug=True, host="127.0.0.1", port=8765)
| mit | Python | |
85bd2d96b97b61d70d610a8b566e70abae14b264 | add example of boolean operations for 2d polygons | mozman/ezdxf,mozman/ezdxf,mozman/ezdxf,mozman/ezdxf,mozman/ezdxf | examples/render/boolean_operations_for_2d_polygons.py | examples/render/boolean_operations_for_2d_polygons.py | # Copyright (c) 2022, Manfred Moitzi
# License: MIT License
from pathlib import Path
import ezdxf
from ezdxf.render.forms import gear, translate
from ezdxf.math.clipping import (
greiner_hormann_union,
greiner_hormann_difference,
greiner_hormann_intersection,
)
from ezdxf import zoom
DIR = Path("~/Desktop/Outbox").expanduser()
if not DIR.exists():
DIR = Path(".")
PATCH = [
(0.3, 1.5),
(8.924927791151, 12.144276424324),
(15.730880789598, 2.627501561855),
(2.887557565461, 0.615276783076),
(-5.236692244506, -14.884612410891),
(-13.715295679108, -8.86188181765),
(-0.775863683945, -2.09940448737),
(-3.015869519575, 2.14587216514),
(-18.188942751518, 0.179187467964),
(-9.008545512094, 15.195189150382),
(0.3, 1.5),
]
SQUARE1 = [(0, 0), (10, 0), (10, 10), (0, 10), (0, 0)]
SQUARE2 = [(10, 5), (20, 5), (20, 15), (10, 15), (10, 5)]
def export(polygons, name):
doc = ezdxf.new()
msp = doc.modelspace()
for color, polygon in enumerate(polygons):
msp.add_lwpolyline(polygon, dxfattribs={"color": color + 1})
zoom.extents(msp, 1.1)
doc.saveas(DIR / name)
print(f"exported: {name}")
def execute_all_operations(p1, p2, prefix: str):
export([p1, p2], prefix + "_source.dxf")
export(greiner_hormann_union(p1, p2), prefix + "_union.dxf")
export(greiner_hormann_intersection(p1, p2), prefix + "_intersection.dxf")
export(greiner_hormann_difference(p1, p2), prefix + "_difference.dxf")
export(
greiner_hormann_difference(p2, p1),
prefix + "_difference_reversed.dxf",
)
def gear_and_patch():
form = list(
gear(
16,
top_width=1,
bottom_width=3,
height=2,
outside_radius=10,
close=True,
)
)
# Important for proper results:
# The polygons have to overlap and intersect each other!
# Polygon points (vertices) on an edge of the other polygon do not count as
# intersection!
execute_all_operations(form, PATCH, "gp")
def this_does_not_work():
# This example shows the boolean operations on non overlapping
# and non-intersecting squares.
# Polygon points (vertices) on an edge of the other polygon do not count as
# intersection!
execute_all_operations(SQUARE1, SQUARE2, "ts1")
def fixed_union_of_two_squares():
# This example fixes the union problem of "this_does_not_work" by shifting
# the second square just a little bit:
execute_all_operations(
SQUARE1, list(translate(SQUARE2, (-0.001, 0))), "ts2"
)
if __name__ == "__main__":
gear_and_patch()
this_does_not_work()
fixed_union_of_two_squares()
| mit | Python | |
3352cb3f38db4f68decf6de60528ad6ff07ce613 | Create mssql_import.py | jmhwang/personal_utils | database/mssql_import.py | database/mssql_import.py | # -*-coding:cp949-*-
# vim: set et:ts=4:sw=4
import pyodbc
def import_table(file, table):
connection = pyodbc.connect(
r'DRIVER={SQL Server};'
r'SERVER=127.0.0.1\instance;'
r'DATABASE=database;'
r'UID=id;'
r'PWD=passwd')
with open (file, 'r') as f:
lines = f.readlines()
cursor = connection.cursor()
for line in lines[1:]: # 1st line skip
line = line.replace('\n', '')
query = 'insert into ' + table + ' values ({0})'.decode('cp949')
query = query.format(line.decode('cp949'))
#print(query)
cursor.execute(query)
cursor.commit()
print "%d 건 완료" % (len(lines)-1,)
import sys
import os.path
if len(sys.argv) < 2 :
print u'파일명을 입력하세요'.encode('cp949')
print u'파일명에 적힌 테이블로 데이터를 입력합니다'.encode('cp949')
print u'사용법 :\n\timport.exe db_name..tablename.csv'.encode('cp949')
sys.exit(0)
file = sys.argv[1]
tbl = os.path.splitext(os.path.basename(file))[0]
# print file # ".\db_name..tablename.csv"
# print tbl # "db_name..tablename"
import_table(file, tbl.decode('cp949'))
| mit | Python | |
d67b685340cf2db7cd31b50a4484c29625b8fea5 | Remove pixel test fail expectation | jaruba/chromium.src,Pluto-tv/chromium-crosswalk,ondra-novak/chromium.src,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk-efl,Just-D/chromium-1,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk,Chilledheart/chromium,Chilledheart/chromium,crosswalk-project/chromium-crosswalk-efl,jaruba/chromium.src,PeterWangIntel/chromium-crosswalk,Jonekee/chromium.src,dednal/chromium.src,Fireblend/chromium-crosswalk,chuan9/chromium-crosswalk,axinging/chromium-crosswalk,hgl888/chromium-crosswalk,markYoungH/chromium.src,Fireblend/chromium-crosswalk,chuan9/chromium-crosswalk,axinging/chromium-crosswalk,fujunwei/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,chuan9/chromium-crosswalk,axinging/chromium-crosswalk,Fireblend/chromium-crosswalk,M4sse/chromium.src,littlstar/chromium.src,crosswalk-project/chromium-crosswalk-efl,hgl888/chromium-crosswalk,axinging/chromium-crosswalk,dednal/chromium.src,chuan9/chromium-crosswalk,ltilve/chromium,M4sse/chromium.src,PeterWangIntel/chromium-crosswalk,krieger-od/nwjs_chromium.src,ondra-novak/chromium.src,Jonekee/chromium.src,hgl888/chromium-crosswalk,dednal/chromium.src,Pluto-tv/chromium-crosswalk,littlstar/chromium.src,dednal/chromium.src,TheTypoMaster/chromium-crosswalk,Jonekee/chromium.src,chuan9/chromium-crosswalk,fujunwei/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,dednal/chromium.src,hgl888/chromium-crosswalk-efl,Just-D/chromium-1,Fireblend/chromium-crosswalk,dushu1203/chromium.src,markYoungH/chromium.src,hgl888/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,Fireblend/chromium-crosswalk,axinging/chromium-crosswalk,hgl888/chromium-crosswalk,ltilve/chromium,jaruba/chromium.src,hgl888/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,ltilve/chromium,Pluto-tv/chromium-crosswalk,littlstar/chromium.src,krieger-od/nwjs_chromium.src,ltilve/chromium,PeterWangIntel/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,dednal/chromium.src,jaruba/chromium.src,M4sse/chromium.src,markYoungH/chromium.src,dednal/chromium.src,M4sse/chromium.src,dushu1203/chromium.src,ondra-novak/chromium.src,mohamed--abdel-maksoud/chromium.src,TheTypoMaster/chromium-crosswalk,Pluto-tv/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,Chilledheart/chromium,littlstar/chromium.src,ltilve/chromium,hgl888/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,Chilledheart/chromium,jaruba/chromium.src,dednal/chromium.src,dednal/chromium.src,bright-sparks/chromium-spacewalk,Fireblend/chromium-crosswalk,dushu1203/chromium.src,M4sse/chromium.src,axinging/chromium-crosswalk,axinging/chromium-crosswalk,littlstar/chromium.src,jaruba/chromium.src,Chilledheart/chromium,krieger-od/nwjs_chromium.src,dushu1203/chromium.src,mohamed--abdel-maksoud/chromium.src,Jonekee/chromium.src,bright-sparks/chromium-spacewalk,crosswalk-project/chromium-crosswalk-efl,TheTypoMaster/chromium-crosswalk,ltilve/chromium,jaruba/chromium.src,fujunwei/chromium-crosswalk,krieger-od/nwjs_chromium.src,bright-sparks/chromium-spacewalk,mohamed--abdel-maksoud/chromium.src,Pluto-tv/chromium-crosswalk,Jonekee/chromium.src,axinging/chromium-crosswalk,markYoungH/chromium.src,Jonekee/chromium.src,mohamed--abdel-maksoud/chromium.src,Chilledheart/chromium,hgl888/chromium-crosswalk-efl,markYoungH/chromium.src,fujunwei/chromium-crosswalk,Pluto-tv/chromium-crosswalk,dednal/chromium.src,markYoungH/chromium.src,axinging/chromium-crosswalk,markYoungH/chromium.src,TheTypoMaster/chromium-crosswalk,dushu1203/chromium.src,dushu1203/chromium.src,TheTypoMaster/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk,Jonekee/chromium.src,M4sse/chromium.src,fujunwei/chromium-crosswalk,M4sse/chromium.src,markYoungH/chromium.src,crosswalk-project/chromium-crosswalk-efl,crosswalk-project/chromium-crosswalk-efl,chuan9/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,dushu1203/chromium.src,hgl888/chromium-crosswalk-efl,TheTypoMaster/chromium-crosswalk,krieger-od/nwjs_chromium.src,jaruba/chromium.src,Just-D/chromium-1,M4sse/chromium.src,Just-D/chromium-1,dushu1203/chromium.src,Just-D/chromium-1,ondra-novak/chromium.src,bright-sparks/chromium-spacewalk,hgl888/chromium-crosswalk-efl,ltilve/chromium,Chilledheart/chromium,PeterWangIntel/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,bright-sparks/chromium-spacewalk,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk-efl,ondra-novak/chromium.src,chuan9/chromium-crosswalk,Jonekee/chromium.src,mohamed--abdel-maksoud/chromium.src,littlstar/chromium.src,fujunwei/chromium-crosswalk,markYoungH/chromium.src,Just-D/chromium-1,mohamed--abdel-maksoud/chromium.src,PeterWangIntel/chromium-crosswalk,ondra-novak/chromium.src,dushu1203/chromium.src,dushu1203/chromium.src,hgl888/chromium-crosswalk-efl,ondra-novak/chromium.src,Chilledheart/chromium,Fireblend/chromium-crosswalk,hgl888/chromium-crosswalk-efl,dednal/chromium.src,krieger-od/nwjs_chromium.src,littlstar/chromium.src,bright-sparks/chromium-spacewalk,crosswalk-project/chromium-crosswalk-efl,jaruba/chromium.src,PeterWangIntel/chromium-crosswalk,jaruba/chromium.src,axinging/chromium-crosswalk,M4sse/chromium.src,krieger-od/nwjs_chromium.src,fujunwei/chromium-crosswalk,Jonekee/chromium.src,chuan9/chromium-crosswalk,Pluto-tv/chromium-crosswalk,markYoungH/chromium.src,mohamed--abdel-maksoud/chromium.src,Just-D/chromium-1,Fireblend/chromium-crosswalk,bright-sparks/chromium-spacewalk,Jonekee/chromium.src,ltilve/chromium,Chilledheart/chromium,TheTypoMaster/chromium-crosswalk,krieger-od/nwjs_chromium.src,ondra-novak/chromium.src,Pluto-tv/chromium-crosswalk,jaruba/chromium.src,Just-D/chromium-1,Pluto-tv/chromium-crosswalk,markYoungH/chromium.src,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk,ltilve/chromium,fujunwei/chromium-crosswalk,ondra-novak/chromium.src,axinging/chromium-crosswalk,littlstar/chromium.src,krieger-od/nwjs_chromium.src,Just-D/chromium-1,mohamed--abdel-maksoud/chromium.src,M4sse/chromium.src,bright-sparks/chromium-spacewalk,Jonekee/chromium.src,M4sse/chromium.src,bright-sparks/chromium-spacewalk,fujunwei/chromium-crosswalk,dushu1203/chromium.src,Fireblend/chromium-crosswalk | content/test/gpu/gpu_tests/pixel_expectations.py | content/test/gpu/gpu_tests/pixel_expectations.py | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import test_expectations
# Valid expectation conditions are:
#
# Operating systems:
# win, xp, vista, win7, mac, leopard, snowleopard, lion, mountainlion,
# linux, chromeos, android
#
# GPU vendors:
# amd, arm, broadcom, hisilicon, intel, imagination, nvidia, qualcomm,
# vivante
#
# Specific GPUs can be listed as a tuple with vendor name and device ID.
# Examples: ('nvidia', 0x1234), ('arm', 'Mali-T604')
# Device IDs must be paired with a GPU vendor.
class PixelExpectations(test_expectations.TestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('Pixel.Canvas2DRedBox',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
pass
| # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import test_expectations
# Valid expectation conditions are:
#
# Operating systems:
# win, xp, vista, win7, mac, leopard, snowleopard, lion, mountainlion,
# linux, chromeos, android
#
# GPU vendors:
# amd, arm, broadcom, hisilicon, intel, imagination, nvidia, qualcomm,
# vivante
#
# Specific GPUs can be listed as a tuple with vendor name and device ID.
# Examples: ('nvidia', 0x1234), ('arm', 'Mali-T604')
# Device IDs must be paired with a GPU vendor.
class PixelExpectations(test_expectations.TestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('Pixel.Canvas2DRedBox',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
self.Fail('Pixel.Canvas2DRedBox', bug=384551)
self.Fail('Pixel.CSS3DBlueBox', bug=384551)
self.Fail('Pixel.WebGLGreenTriangle', bug=384551)
pass
| bsd-3-clause | Python |
b0116fb3b52e9c9f0cc156bc49f1400261ed879c | ADD mechanism table | OpenMined/PySyft,OpenMined/PySyft,OpenMined/PySyft,OpenMined/PySyft | packages/syft/src/syft/core/node/common/node_table/mechanism.py | packages/syft/src/syft/core/node/common/node_table/mechanism.py | # stdlib
from typing import Any
# third party
from sqlalchemy import Column
from sqlalchemy import LargeBinary
from sqlalchemy import String
from sqlalchemy import Integer
# syft absolute
from syft import deserialize
from syft import serialize
# relative
from . import Base
class Mechanism(Base):
__tablename__ = "mechanism"
id = Column(Integer(), primary_key=True, autoincrement=True)
mechanism_bin = Column(LargeBinary(3072), default=None)
@property
def obj(self) -> Any:
return deserialize(self.mechanism_bin, from_bytes=True) # TODO: techdebt fix
@obj.setter
def obj(self, value: Any) -> None:
self.mechanism_bin = serialize(value, to_bytes=True) # TODO: techdebt fix
| apache-2.0 | Python | |
05e90660ab0072a50dd17a2afa1e08c93cf694e8 | Create reverse_k.py | hs634/algorithms,hs634/algorithms | python/linked_list/reverse_k.py | python/linked_list/reverse_k.py | public ListNode reverseKGroup(ListNode head, int k) {
ListNode curr = head;
int count = 0;
while (curr != null && count != k) { // find the k+1 node
curr = curr.next;
count++;
}
if (count == k) { // if k+1 node is found
curr = reverseKGroup(curr, k); // reverse list with k+1 node as head
// head - head-pointer to direct part,
// curr - head-pointer to reversed part;
while (count-- > 0) { // reverse current k-group:
ListNode tmp = head.next; // tmp - next head in direct part
head.next = curr; // preappending "direct" head to the reversed list
curr = head; // move head of reversed part to a new node
head = tmp; // move "direct" head to the next node in direct part
}
head = curr;
}
return head;
}
| mit | Python | |
9b05195a0474ba39666e15035aeeacc312d5398e | Manage taboo contracts (#28144) | thaim/ansible,thaim/ansible | lib/ansible/modules/network/aci/aci_taboo_contract.py | lib/ansible/modules/network/aci/aci_taboo_contract.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_taboo_contract
short_description: Manage taboo contracts on Cisco ACI fabrics
description:
- Manage taboo contracts on Cisco ACI fabrics.
author:
- Swetha Chunduri (@schunduri)
- Dag Wieers (@dagwieers)
- Jacob McGill (@jmcgill298)
version_added: '2.4'
requirements:
- ACI Fabric 1.0(3f)+
notes:
- The tenant used must exist before using this module in your playbook. The M(aci_tenant) module can be used for this.
options:
taboo_contract:
description:
- Taboo Contract name.
required: yes
aliases: [ name ]
description:
description:
- Description for the filter.
aliases: [ descr ]
tenant:
description:
- The name of the tenant.
required: yes
aliases: [ tenant_name ]
scope:
description:
- The scope of a service contract.
- The APIC defaults new Taboo Contracts to a scope of context (VRF).
choices: [ application-profile, context, global, tenant ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
# FIXME: Add more, better examples
EXAMPLES = r'''
- aci_taboo_contract:
hostname: '{{ inventory_hostname }}'
username: '{{ username }}'
password: '{{ password }}'
taboo_contract: '{{ taboo_contract }}'
description: '{{ descr }}'
tenant: '{{ tenant }}'
'''
RETURN = r'''
#
'''
from ansible.module_utils.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec
argument_spec.update(
taboo_contract=dict(type='str', required=False, aliases=['name']), # Not required for querying all contracts
tenant=dict(type='str', required=False, aliases=['tenant_name']), # Not required for querying all contracts
scope=dict(type='str', choices=['application-profile', 'context', 'global', 'tenant']),
description=dict(type='str', aliases=['descr']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
method=dict(type='str', choices=['delete', 'get', 'post'], aliases=['action'], removed_in_version='2.6'), # Deprecated starting from v2.6
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
taboo_contract = module.params['taboo_contract']
# tenant = module.params['tenant']
description = module.params['description']
scope = module.params['scope']
state = module.params['state']
aci = ACIModule(module)
# TODO: This logic could be cleaner.
if taboo_contract is not None:
path = 'api/mo/uni/tn-%(tenant)s/taboo-%(taboo_contract)s.json' % module.params
elif state == 'query':
# Query all objects
path = 'api/node/class/vzTaboo.json'
else:
module.fail_json(msg="Parameter 'taboo_contract' is required for state 'absent' or 'present'")
aci.result['url'] = '%(protocol)s://%(hostname)s/' % aci.params + path
aci.get_existing()
if state == 'present':
# Filter out module parameters with null values
aci.payload(aci_class='vzBrCP', class_config=dict(name=taboo_contract, descr=description, scope=scope))
# Generate config diff which will be used as POST request body
aci.get_diff(aci_class='vzBrCP')
# Submit changes if module not in check_mode and the proposed is different than existing
aci.post_config()
elif state == 'absent':
aci.delete_config()
module.exit_json(**aci.result)
if __name__ == "__main__":
main()
| mit | Python | |
8acc41135419e81bfdb02ba544263453d17788dd | Add simple example | jccriado/matchingtools,jccriado/effective | examples/simple_example.py | examples/simple_example.py | """
Simple example to illustrate some of the features of `effective`
The model has a :math:`SU(2)\times U(1)` simmetry and contains
a complex scalar doublet :math:`\phi` (the Higgs) with hypercharge
:math:`1/2` and a real scalar triplet :math:`\Xi` with zero
hypercharge that couple as:
.. math::
\mathcal{L}_{int} = - \kappa\Xi^a\phi^\dagger\sigma^a\phi
- \lamb \Xi^a \Xi^a \phi^\dagger\phi,
where :math:`\kappa` and :math:`\lamb` are a coupling constants
and :math:`\sigma^a` are the Pauli matrices. We will then integrate
out the heavy scalar :math:`\Xi` to obtain an effective Lagrangian
which we will finally write in terms of the operators.
.. math::
\mathcal{O}_\phi=(\phi^\dagger\phi)^3,\;
\mathcal{O}_{\phi 4}=(\phi^\dagger\phi)^2
"""
from effective.operators import (
TensorBuilder, FieldBuilder, Op, OpSum,
number_op, tensor_op, boson, fermion, kdelta)
from effective.integration import RealScalar, integrate
from effective.transformations import apply_rules
from effective.output import Writer
# Creation of the model
sigma = TensorBuilder("sigma")
kappa = TensorBuilder("kappa")
lamb = TensorBuilder("lamb")
phi = FieldBuilder("phi", 1, boson)
phic = FieldBuilder("phic", 1, boson)
Xi = FieldBuilder("Xi", 1, boson)
interaction_lagrangian = -OpSum(
Op(kappa(), Xi(0), phic(1), sigma(0, 1, 2), phi(2)),
Op(lamb(), Xi(0), Xi(0), phic(1), phi(1)))
# Integration
heavy_Xi = RealScalar("Xi", 1)
heavy_fields = [heavy_Xi]
max_dim = 6
effective_lagrangian = integrate(
heavy_fields, interaction_lagrangian, max_dim)
# Transformations of the effective Lagrangian
fierz_rule = (
Op(sigma(0, -1, -2), sigma(0, -3, -4)),
OpSum(number_op(2) * Op(kdelta(-1, -4), kdelta(-3, -2)),
-Op(kdelta(-1, -2), kdelta(-3, -4))))
Ophi = tensor_op("Ophi")
Ophi4 = tensor_op("Ophi4")
definition_rules = [
(Op(phic(0), phi(0), phic(1), phi(1), phic(2), phi(2)),
OpSum(Ophi)),
(Op(phic(0), phi(0), phic(1), phi(1)),
OpSum(Ophi4))]
rules = [fierz_rule] + definition_rules
max_iterations = 2
transf_eff_lag = apply_rules(
effective_lagrangian, rules, max_iterations)
# Output
final_op_names = ["Ophi", "Ophi4"]
eff_lag_writer = Writer(transf_eff_lag, final_op_names)
eff_lag_writer.write_text_file("simple_example")
latex_tensor_reps = {"kappa": r"\kappa",
"lamb": r"\lambda",
"MXi": r"M_{{\Xi}}",
"phi": r"\phi_{}",
"phic": r"\phi^*_{}"}
latex_op_reps = {"Ophi": r"\mathcal{{O}}_{{\phi}}",
"Ophi4": r"\mathcal{{O}}_{{\phi 4}}"}
latex_indices = ["i", "j", "k", "l"]
eff_lag_writer.write_pdf(
"simple_example", latex_tensor_reps,
latex_op_reps, latex_indices)
| mit | Python | |
f18d70ce9c9e86ca184da939f9ffb193b32d981d | add 135 | EdisonAlgorithms/ProjectEuler,EdisonAlgorithms/ProjectEuler,zeyuanxy/project-euler,EdisonAlgorithms/ProjectEuler,zeyuanxy/project-euler,zeyuanxy/project-euler,zeyuanxy/project-euler,EdisonAlgorithms/ProjectEuler | vol3/135.py | vol3/135.py | if __name__ == "__main__":
L = 10 ** 6
sol = [0] * (L + 1)
for u in xrange(1, L + 1):
for v in xrange(1, L + 1):
if u * v > L:
break
if 3 * v <= u:
continue
if (u + v) % 4 == 0 and (3 * v - u) % 4 == 0:
sol[u * v] += 1
ans = 0
for i in range(1, L + 1):
if sol[i] == 10:
ans += 1
print ans
| mit | Python | |
64b2435b77044c1a258433f8794d9bd0a431b61a | Add accept header | JoeyEremondi/travis-daily | travis-ping.py | travis-ping.py | #Based on https://github.com/FiloSottile/travis-cron
import urllib2
import json
import sys
def api_call(url, token=None, data=None):
print url
if data:
data = json.dumps(data)
req = urllib2.Request(url, data)
if data:
req.add_header('Content-Type', 'application/json; charset=UTF-8')
if token:
req.add_header('Authorization', 'token ' + token)
req.add_header("Accept" , 'application/vnd.travis-ci.2+json')
p = urllib2.urlopen(req)
return json.loads(p.read())
def travis_ping(travis_token, repository):
last_build_id = api_call('https://api.travis-ci.org/repos/{}/builds'.format(repository))[0]['id']
print "Got build ID", last_build_id
return api_call('https://api.travis-ci.org/builds/{}/restart'.format(last_build_id), travis_token, { 'build_id': last_build_id })['result']
def main():
#print sys.argv[1][0]
#print sys.argv[2][0]
travis_ping(sys.argv[1], sys.argv[2])
if __name__ == "__main__":
main()
| #Based on https://github.com/FiloSottile/travis-cron
import urllib2
import json
import sys
def api_call(url, token=None, data=None):
print url
if data:
data = json.dumps(data)
req = urllib2.Request(url, data)
if data:
req.add_header('Content-Type', 'application/json; charset=UTF-8')
if token:
req.add_header('Authorization', 'token ' + token)
p = urllib2.urlopen(req)
return json.loads(p.read())
def travis_ping(travis_token, repository):
last_build_id = api_call('https://api.travis-ci.org/repos/{}/builds'.format(repository))[0]['id']
print "Got build ID", last_build_id
return api_call('https://api.travis-ci.org/builds/{}/restart'.format(last_build_id), travis_token, { 'build_id': last_build_id })['result']
def main():
#print sys.argv[1][0]
#print sys.argv[2][0]
travis_ping(sys.argv[1], sys.argv[2])
if __name__ == "__main__":
main()
| mit | Python |
b7d219a5afbf349385dbe9b2712f34f5e756e3e6 | add flask app | chhantyal/scrapd,chhantyal/scrapd | zalando/app.py | zalando/app.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask import Flask
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'Hello World!'
if __name__ == '__main__':
app.run() | bsd-3-clause | Python | |
90b92a1977c32dd660533567c0d5034b93d5c9c7 | Add smarts to cope with slug clashes with other places with the same names. | patricmutwiri/pombola,hzj123/56th,geoffkilpin/pombola,patricmutwiri/pombola,hzj123/56th,ken-muturi/pombola,hzj123/56th,hzj123/56th,mysociety/pombola,mysociety/pombola,hzj123/56th,geoffkilpin/pombola,mysociety/pombola,ken-muturi/pombola,hzj123/56th,mysociety/pombola,ken-muturi/pombola,mysociety/pombola,patricmutwiri/pombola,geoffkilpin/pombola,patricmutwiri/pombola,patricmutwiri/pombola,ken-muturi/pombola,geoffkilpin/pombola,mysociety/pombola,ken-muturi/pombola,geoffkilpin/pombola,ken-muturi/pombola,geoffkilpin/pombola,patricmutwiri/pombola | pombola/core/management/commands/core_create_places_from_mapit_entries.py | pombola/core/management/commands/core_create_places_from_mapit_entries.py | # This script will copy areas from mapit to core.places, including creating the
# place kind if required.
# import re
# import sys
from django.core.management.base import LabelCommand
from mapit.models import Type
from pombola.core.models import Place, PlaceKind
from django.template.defaultfilters import slugify
class Command(LabelCommand):
help = 'Copy mapit.areas to core.places'
args = '<mapit.type.code>'
def handle_label(self, mapit_type_code, **options):
# load the mapit type
mapit_type = Type.objects.get(code=mapit_type_code)
# if needed create the core placetype
placekind, created = PlaceKind.objects.get_or_create(
name=mapit_type.description,
defaults={
'slug': slugify(mapit_type.description)
}
)
# create all the places as needed for all mapit areas of that type
for area in mapit_type.areas.all():
# There may be a slug clash as several areas have the same name but
# are different placekinds. Create the slug and then check to see
# if the slug is already in use for a placekind other than ours. If
# it is append the placekind to the slug.
slug = slugify(area.name)
if Place.objects.filter(slug=slug).exclude(kind=placekind).exists():
slug = slug + '-' + placekind.slug
print "'%s' (%s)" % (area.name, slug)
place, created = Place.objects.get_or_create(
name=area.name,
kind=placekind,
defaults={
'slug': slug,
}
)
place.mapit_area = area
place.save()
| # This script will copy areas from mapit to core.places, including creating the
# place kind if required.
# import re
# import sys
from django.core.management.base import LabelCommand
from mapit.models import Type
from pombola.core.models import Place, PlaceKind
from django.template.defaultfilters import slugify
class Command(LabelCommand):
help = 'Copy mapit.areas to core.places'
args = '<mapit.type.code>'
def handle_label(self, mapit_type_code, **options):
# load the mapit type
mapit_type = Type.objects.get(code=mapit_type_code)
# if needed create the core placetype
placekind, created = PlaceKind.objects.get_or_create(
name=mapit_type.description,
defaults={
'slug': slugify(mapit_type.description)
}
)
# create all the places as needed for all mapit areas of that type
for area in mapit_type.areas.all():
print area.name
place, created = Place.objects.get_or_create(
name=area.name,
kind=placekind,
defaults={
'slug': slugify(area.name),
}
)
place.mapit_area = area
place.save()
| agpl-3.0 | Python |
0be0d20fc667f0734b85d98f1d359130f7ed5b98 | Add failing specs for current/future class names. | plotly/python-api,plotly/python-api,plotly/plotly.py,plotly/plotly.py,plotly/plotly.py,plotly/python-api | plotly/tests/test_core/test_graph_objs/test_graph_objs.py | plotly/tests/test_core/test_graph_objs/test_graph_objs.py | from unittest import TestCase
import plotly.graph_objs as go
import plotly.graph_reference as gr
OLD_CLASS_NAMES = ['AngularAxis', 'Annotation', 'Annotations', 'Area',
'Bar', 'Box', 'ColorBar', 'Contour', 'Contours',
'Data', 'ErrorX', 'ErrorY', 'ErrorZ', 'Figure',
'Font', 'Heatmap', 'Histogram', 'Histogram2d',
'Histogram2dContour', 'Layout', 'Legend', 'Line',
'Margin', 'Marker', 'RadialAxis', 'Scatter',
'Scatter3d', 'Scene', 'Stream', 'Surface', 'Trace',
'XAxis', 'XBins', 'YAxis', 'YBins', 'ZAxis']
class TestBackwardsCompat(TestCase):
def test_old_class_names(self):
# these were all defined at one point, we want to maintain backwards
# compat, so we basically just create a checkpoint with this test.
for class_name in OLD_CLASS_NAMES:
self.assertIn(class_name, go.__dict__.keys())
class TestGraphObjs(TestCase):
def test_traces_should_be_defined(self):
# we *always* want to create classes for traces
class_names = [gr.string_to_class_name(object_name)
for object_name in gr.TRACE_NAMES]
for class_name in class_names:
self.assertIn(class_name, go.__dict__.keys())
def test_no_new_classes(self):
# for maintenance reasons, we don't want to generate new class defs
expected_class_names = {gr.string_to_class_name(object_name)
for object_name in gr.TRACE_NAMES}
expected_class_names.update(OLD_CLASS_NAMES)
# assume that CapitalCased keys are the classes we defined
current_class_names = {key for key in go.__dict__.keys()
if key[0].isupper()}
self.assertEqual(current_class_names, expected_class_names)
| mit | Python | |
6b53d081b78d3ea2073bdc13112b146660595b5f | Add tests for resource_renderer | ouvigna/pyramid_restpike | tests/test_resource_renderer.py | tests/test_resource_renderer.py | from nose.tools import assert_true, assert_equals
from pyramid.testing import DummyRequest
from pyramid.response import Response
class SUTResource(object):
def __init__(self):
self.request = DummyRequest()
def index(self):
return {}
class TestResourceRenderer(object):
def _getTargetClass(self):
from pyramid_restpike import resource_renderer
return resource_renderer
def _makeOne(self, *arg, **kw):
decorator = self._getTargetClass()(*arg, **kw)
class SUTResource(object):
@decorator
def index(self):
return {}
sut = SUTResource()
sut.request = DummyRequest()
return sut
def test_default_renderer(self):
sut = self._makeOne('json')
response = sut.index()
assert_true(isinstance(response, Response))
| mit | Python | |
22b2446546ce59b99980e98e81b3571d81085304 | Test that daily westminster pages load | tdhopper/westminster-daily,olneyhymn/westminster-daily,olneyhymn/westminster-daily,olneyhymn/westminster-daily,olneyhymn/westminster-daily,tdhopper/westminster-daily,tdhopper/westminster-daily | tests/test_westminster_daily.py | tests/test_westminster_daily.py | import datetime as dt
from flask_application import app
def test_daily_westminster_pages_exist():
start_date = dt.date(2015, 01, 01)
with app.test_client() as c:
for days in range(365):
date = start_date + dt.timedelta(days=days)
month, day = date.month, date.day
response = c.get('/{month:02d}/{day:02d}/'.format(month=month, day=day))
assert response.status_code == 200
def test_daily_westminster_bad_days():
with app.test_client() as c:
response = c.get('/01/32/')
assert response.status_code == 404
response = c.get('/02/30/')
assert response.status_code == 404
response = c.get('/04/31/')
assert response.status_code == 404
def test_daily_leap_day():
with app.test_client() as c:
response = c.get('/02/29/')
assert response.status_code == 200
| bsd-3-clause | Python | |
68451f2df6a1706993d7da6d7b1ff80092bca9af | Add another ui test | Schevo/kiwi,Schevo/kiwi,Schevo/kiwi | tests/ui/personalinformation.py | tests/ui/personalinformation.py | from kiwi.ui.test.player import Player
player = Player(['examples/validation/personalinformation.py'])
app = player.get_app()
player.wait_for_window("Form")
app.Form.name.set_text("")
app.Form.name.set_text("J")
app.Form.name.set_text("Jo")
app.Form.name.set_text("Joh")
app.Form.name.set_text("Joha")
app.Form.name.set_text("Johan")
app.Form.age.set_text("")
app.Form.age.set_text(" ")
app.Form.age.set_text(" ")
app.Form.age.set_text("1 ")
app.Form.age.set_text("1")
app.Form.age.set_text("12")
app.Form.age.set_text("1")
app.Form.age.set_text("1 ")
app.Form.age.set_text(" ")
app.Form.age.set_text(" ")
app.Form.age.set_text(" ")
app.Form.age.set_text("9 ")
app.Form.age.set_text("9")
app.Form.age.set_text("99")
app.Form.GtkToggleButton.clicked()
app.Form.ProxyEntry.set_text("")
app.Form.ProxyEntry.set_text(" / / ")
app.Form.ProxyEntry.set_text(" / / ")
app.Form.ProxyEntry.set_text("/ / ")
app.Form.ProxyEntry.set_text(" / ")
app.Form.ProxyEntry.set_text(" / ")
app.Form.ProxyEntry.set_text("/ ")
app.Form.ProxyEntry.set_text(" ")
app.Form.ProxyEntry.set_text(" ")
app.Form.ProxyEntry.set_text(" ")
app.Form.ProxyEntry.set_text(" ")
app.Form.ProxyEntry.set_text("")
app.Form.ProxyEntry.set_text("02/14/1969")
app.Form.GtkToggleButton.clicked()
app.Form.ProxyEntry.set_text("")
app.Form.ProxyEntry.set_text(" / / ")
app.Form.ProxyEntry.set_text(" / / ")
app.Form.ProxyEntry.set_text("/ / ")
app.Form.ProxyEntry.set_text(" / ")
app.Form.ProxyEntry.set_text(" / ")
app.Form.ProxyEntry.set_text("/ ")
app.Form.ProxyEntry.set_text(" ")
app.Form.ProxyEntry.set_text(" ")
app.Form.ProxyEntry.set_text(" ")
app.Form.ProxyEntry.set_text(" ")
app.Form.ProxyEntry.set_text("")
app.Form.ProxyEntry.set_text("02/13/1969")
app.Form.height.set_text("")
app.Form.height.set_text("1")
app.Form.height.set_text("12")
app.Form.height.set_text("123")
app.Form.height.set_text("1234")
app.Form.height.set_text("12345")
app.Form.weight.set_text("")
app.Form.weight.set_text("87")
app.Form.weight.set_text("")
app.Form.weight.set_text("88")
app.Form.weight.set_text("")
app.Form.weight.set_text("89")
app.Form.weight.set_text("")
app.Form.weight.set_text("90")
app.Form.weight.set_text("")
app.Form.weight.set_text("91")
app.Form.weight.set_text("")
app.Form.weight.set_text("92")
app.Form.weight.set_text("")
app.Form.weight.set_text("93")
app.Form.weight.set_text("")
app.Form.weight.set_text("92")
app.Form.weight.set_text("")
app.Form.weight.set_text("91")
app.Form.weight.set_text("")
app.Form.weight.set_text("90")
app.Form.weight.set_text("")
app.Form.weight.set_text("89")
app.Form.weight.set_text("")
app.Form.weight.set_text("90")
app.Form.height.set_text("")
app.Form.height.set_text("1")
app.Form.height.set_text("12")
app.Form.age.set_text("")
app.Form.age.set_text(" ")
app.Form.age.set_text(" ")
app.Form.age.set_text("1 ")
app.Form.age.set_text("1")
app.Form.age.set_text("12")
app.Form.GtkToggleButton.clicked()
app.Form.ProxyEntry.set_text("")
app.Form.ProxyEntry.set_text("Brazilian")
app.Form.GtkToggleButton.clicked()
app.Form.ProxyEntry.set_text("")
app.Form.ProxyEntry.set_text("Yankee")
app.Form.ProxyEntry.set_text("")
app.Form.ProxyEntry.set_text("Other")
app.Form.ProxyEntry.set_text("")
app.Form.ProxyEntry.set_text("Yankee")
app.Form.ProxyEntry.set_text("")
app.Form.ProxyEntry.set_text("Brazilian")
app.Form.ProxyEntry.set_text("")
app.Form.ProxyEntry.set_text("Yankee")
app.Form.gender.select_item_by_label("Male")
app.Form.gender.select_item_by_label("Female")
app.Form.status_single.clicked()
app.Form.status.clicked()
app.Form.status.clicked()
app.Form.status_single.clicked()
app.Form.ok_btn.clicked()
player.finish()
| lgpl-2.1 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.