commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
13486556a15cdb2dbfe3f390f973942d93338995 | Create TryRecord.py | TryRecord.py | TryRecord.py | """
Example usage of Record class
The MIT License (MIT)
Copyright (c) <2016> <Larry McCaig (aka: Larz60+ aka: Larz60p)>
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import Record
class TryRecord:
def __init__(self, filename=None):
if filename:
self.rec = Record.Record(filename)
def try_record(self):
stkrec = self.rec.record
print('\nrecords:')
for record in stkrec:
print(record)
keys = stkrec._asdict().keys()
print('\nKeys:')
for key in keys:
print('\nkey: {}'.format(key))
thisrec = getattr(stkrec, key)
print('filename: {}'.format(thisrec.filename))
print('number of columns: {}'.format(len(thisrec.columns)))
print('column 0 column name: {}'.format(thisrec.columns[0].db_column_name))
if __name__ == '__main__':
tr = TryRecord('StockData.json')
tr.try_record()
| Python | 0.000001 | |
8d94bbc272b0b39ea3a561671faf696a4851c1a1 | Create app.py | reddit2telegram/channels/MoreTankieChapo/app.py | reddit2telegram/channels/MoreTankieChapo/app.py | #encoding:utf-8
subreddit = 'MoreTankieChapo'
t_channel = '@MoreTankieChapo'
def send_post(submission, r2t):
return r2t.send_simple(submission)
| Python | 0.000003 | |
e5ae14b4438fc7ae15156615206453097b8f759b | add wave test | Python/WaveTest.py | Python/WaveTest.py | import requests
def text2code(text):
'''
convert a string to wave code
'''
ret = None
get_wave_params = {'type' : 'text', 'content' : text}
response = requests.post('http://rest.sinaapp.com/api/post', data=get_wave_params)
if response.status_code == 200:
try:
data = response.json()
ret = data['code']
except: # json() may cause ValueError
pass
return ret
def code2text(code):
'''
convert a wave code to string
'''
ret = None
get_text_params = {'code' : code}
response = requests.get('http://rest.sinaapp.com/api/get', params=get_text_params)
if (response.status_code == 200):
try:
data = response.json()
ret = data['content']
except:
pass
return ret
def main():
text = 'Flame-Team'
code = text2code(text)
if code is not None:
print text + ' to code is ' + code
text_restore = code2text(code)
if text_restore is not None:
print code + ' to text is ' + text_restore
if __name__ == '__main__':
main()
| Python | 0.000007 | |
f6a725b5915575f61fcb7c34ac7b464cd304e7b5 | test mode shows user | dj/scripts/tweet.py | dj/scripts/tweet.py | #!/usr/bin/python
# tweets #client.slug, #video, title and blipurl
# shortens the URL and title if needed
# if over 140 char, url is shortened using bity,
# if still over, title is truncated.
import twitter
import urllib2
import urllib
import time
import pw # see pw_samp.py for sample.
from process import process
# from main.models import Episode, Raw_File, Cut_List
class tweet(process):
ready_state = 5
def shorten(self, url):
return url # hack because auth broke:
## Out[15]: '{\n "errorCode": 203, \n "errorMessage": "You must be authenticated to access shorten", \n "statusCode": "ERROR"\n}'
d=dict(version='2.0.1',login=pw.bitly['user'], apikey=pw.bitly['password'], longurl=url)
q = urllib.urlencode(d)
print q
url = 'http://api.bit.ly/shorten?' + q
data = eval(urllib2.urlopen(url).read())
print data
return data['results'].values()[0]['shorturl']
def mk_tweet(self, prefix, video_name, authors, video_url):
message = ' '.join([prefix, video_name, '-', authors, video_url])
if len(message) > 140:
message = ' '.join([prefix, video_name, video_url])
if len(message) > 140:
short_url = self.shorten(video_url)
message = ' '.join([prefix, video_name, short_url])
if len(message) > 140:
video_name = video_name[:140 - len(message) - 3] + '...'
message = ' '.join([prefix, video_name, short_url])
return message
def process_ep(self, ep):
if self.options.verbose: print ep.id, ep.name
show = ep.show
client = show.client
# use the username for the client, else use the first user in pw.py
user = client.blip_user if client.blip_user else 'nextdayvideo'
blip_url="http://%s.blip.tv/file/%s" % (user,ep.target)
prefix = "#%s #VIDEO" % show.client.slug
tweet = self.mk_tweet(prefix, ep.name, ep.authors, blip_url)
ret=False
if self.options.test:
print 'test mode:'
print 'user:', user
print tweet
else:
print 'tweeting:', tweet
# print user,password
t = pw.twit[user]
api = twitter.Api(consumer_key=t['consumer_key'],
consumer_secret=t['consumer_secret'],
access_token_key=t['access_key'],
access_token_secret=t['access_secret'] )
if self.options.verbose: print api.VerifyCredentials()
status = api.PostUpdate(tweet)
d=status.AsDict()
self.last_tweet = d
self.last_tweet_url = "http://twitter.com/#!/squid/status/%s" % (d["id"], )
print self.last_tweet_url
ret=True
return ret
if __name__ == '__main__':
p=tweet()
p.main()
| #!/usr/bin/python
# tweets #client.slug, #video, title and blipurl
# shortens the URL and title if needed
# if over 140 char, url is shortened using bity,
# if still over, title is truncated.
import twitter
import urllib2
import urllib
import time
import pw # see pw_samp.py for sample.
from process import process
# from main.models import Episode, Raw_File, Cut_List
class tweet(process):
ready_state = 5
def shorten(self, url):
return url # hack because auth broke:
## Out[15]: '{\n "errorCode": 203, \n "errorMessage": "You must be authenticated to access shorten", \n "statusCode": "ERROR"\n}'
d=dict(version='2.0.1',login=pw.bitly['user'], apikey=pw.bitly['password'], longurl=url)
q = urllib.urlencode(d)
print q
url = 'http://api.bit.ly/shorten?' + q
data = eval(urllib2.urlopen(url).read())
print data
return data['results'].values()[0]['shorturl']
def mk_tweet(self, prefix, video_name, authors, video_url):
message = ' '.join([prefix, video_name, '-', authors, video_url])
if len(message) > 140:
message = ' '.join([prefix, video_name, video_url])
if len(message) > 140:
short_url = self.shorten(video_url)
message = ' '.join([prefix, video_name, short_url])
if len(message) > 140:
video_name = video_name[:140 - len(message) - 3] + '...'
message = ' '.join([prefix, video_name, short_url])
return message
def process_ep(self, ep):
if self.options.verbose: print ep.id, ep.name
show = ep.show
client = show.client
# use the username for the client, else use the first user in pw.py
user = client.blip_user if client.blip_user else 'nextdayvideo'
blip_url="http://%s.blip.tv/file/%s" % (user,ep.target)
prefix = "#%s #VIDEO" % show.client.slug
tweet = self.mk_tweet(prefix, ep.name, ep.authors, blip_url)
ret=False
if self.options.test:
print 'test mode:', tweet
else:
print 'tweeting:', tweet
# print user,password
t = pw.twit[user]
api = twitter.Api(consumer_key=t['consumer_key'],
consumer_secret=t['consumer_secret'],
access_token_key=t['access_key'],
access_token_secret=t['access_secret'] )
if self.options.verbose: print api.VerifyCredentials()
status = api.PostUpdate(tweet)
d=status.AsDict()
self.last_tweet = d
self.last_tweet_url = "http://twitter.com/#!/squid/status/%s" % (d["id"], )
print self.last_tweet_url
ret=True
return ret
if __name__ == '__main__':
p=tweet()
p.main()
| Python | 0.000001 |
561f595337106c60c55212dd87d90ed3002de07f | disable pretty json (reduces size by 30%) | runserver.py | runserver.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import logging
from threading import Thread
from pogom import config
from pogom.app import Pogom
from pogom.search import search_loop, set_cover, set_location
from pogom.utils import get_args, insert_mock_data
from pogom.models import create_tables, SearchConfig
from pogom.pgoapi.utilities import get_pos_by_name
log = logging.getLogger(__name__)
def start_locator_thread(args):
search_thread = Thread(target=search_loop, args=(args,))
search_thread.daemon = True
search_thread.name = 'search_thread'
search_thread.start()
if __name__ == '__main__':
args = get_args()
logging.basicConfig(level=logging.INFO, format='%(asctime)s [%(module)11s] [%(levelname)7s] %(message)s')
if not args.debug:
logging.getLogger("peewee").setLevel(logging.INFO)
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("pogom.pgoapi.pgoapi").setLevel(logging.WARNING)
logging.getLogger("pogom.pgoapi.rpc_api").setLevel(logging.CRITICAL)
logging.getLogger("pogom.models").setLevel(logging.WARNING)
logging.getLogger("werkzeug").setLevel(logging.WARNING)
elif args.debug == "info":
logging.getLogger("pogom.pgoapi.pgoapi").setLevel(logging.INFO)
logging.getLogger("pogom.models").setLevel(logging.INFO)
logging.getLogger("werkzeug").setLevel(logging.INFO)
elif args.debug == "debug":
logging.getLogger("pogom.pgoapi.pgoapi").setLevel(logging.DEBUG)
logging.getLogger("pogom.pgoapi.pgoapi").setLevel(logging.DEBUG)
logging.getLogger("pogom.models").setLevel(logging.DEBUG)
logging.getLogger("werkzeug").setLevel(logging.INFO)
create_tables()
set_location(args.location, args.radius)
set_cover()
if not args.mock:
start_locator_thread(args)
else:
insert_mock_data(config, 6)
app = Pogom(__name__)
config['ROOT_PATH'] = app.root_path
app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False
app.run(threaded=True, debug=args.debug, host=args.host, port=args.port)
| #!/usr/bin/python
# -*- coding: utf-8 -*-
import logging
from threading import Thread
from pogom import config
from pogom.app import Pogom
from pogom.search import search_loop, set_cover, set_location
from pogom.utils import get_args, insert_mock_data
from pogom.models import create_tables, SearchConfig
from pogom.pgoapi.utilities import get_pos_by_name
log = logging.getLogger(__name__)
def start_locator_thread(args):
search_thread = Thread(target=search_loop, args=(args,))
search_thread.daemon = True
search_thread.name = 'search_thread'
search_thread.start()
if __name__ == '__main__':
args = get_args()
logging.basicConfig(level=logging.INFO, format='%(asctime)s [%(module)11s] [%(levelname)7s] %(message)s')
if not args.debug:
logging.getLogger("peewee").setLevel(logging.INFO)
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("pogom.pgoapi.pgoapi").setLevel(logging.WARNING)
logging.getLogger("pogom.pgoapi.rpc_api").setLevel(logging.CRITICAL)
logging.getLogger("pogom.models").setLevel(logging.WARNING)
logging.getLogger("werkzeug").setLevel(logging.WARNING)
elif args.debug == "info":
logging.getLogger("pogom.pgoapi.pgoapi").setLevel(logging.INFO)
logging.getLogger("pogom.models").setLevel(logging.INFO)
logging.getLogger("werkzeug").setLevel(logging.INFO)
elif args.debug == "debug":
logging.getLogger("pogom.pgoapi.pgoapi").setLevel(logging.DEBUG)
logging.getLogger("pogom.pgoapi.pgoapi").setLevel(logging.DEBUG)
logging.getLogger("pogom.models").setLevel(logging.DEBUG)
logging.getLogger("werkzeug").setLevel(logging.INFO)
create_tables()
set_location(args.location, args.radius)
set_cover()
if not args.mock:
start_locator_thread(args)
else:
insert_mock_data(config, 6)
app = Pogom(__name__)
config['ROOT_PATH'] = app.root_path
app.run(threaded=True, debug=args.debug, host=args.host, port=args.port)
| Python | 0 |
fb1498abaca07e3594d2f24edc1596fb03225dea | Add new package: dnsmasq (#16253) | var/spack/repos/builtin/packages/dnsmasq/package.py | var/spack/repos/builtin/packages/dnsmasq/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Dnsmasq(MakefilePackage):
"""A lightweight, caching DNS proxy with integrated DHCP server."""
homepage = "http://www.thekelleys.org.uk/dnsmasq/doc.html"
url = "http://www.thekelleys.org.uk/dnsmasq/dnsmasq-2.70.tar.gz"
version('2.81', sha256='3c28c68c6c2967c3a96e9b432c0c046a5df17a426d3a43cffe9e693cf05804d0')
version('2.80', sha256='9e4a58f816ce0033ce383c549b7d4058ad9b823968d352d2b76614f83ea39adc')
version('2.79', sha256='77512dd6f31ffd96718e8dcbbf54f02c083f051d4cca709bd32540aea269f789')
version('2.78', sha256='c92e5d78aa6353354d02aabf74590d08980bb1385d8a00b80ef9bc80430aa1dc')
version('2.77', sha256='ae97a68c4e64f07633f31249eb03190d673bdb444a05796a3a2d3f521bfe9d38')
version('2.76', sha256='777c4762d2fee3738a0380401f2d087b47faa41db2317c60660d69ad10a76c32')
version('2.75', sha256='f8252c0a0ba162c2cd45f81140c7c17cc40a5fca2b869d1a420835b74acad294')
version('2.74', sha256='27b95a8b933d7eb88e93a4c405b808d09268246d4e108606e423ac518aede78f')
version('2.73', sha256='9f350f74ae2c7990b1c7c6c8591d274c37b674aa987f54dfee7ca856fae0d02d')
version('2.72', sha256='635f1b47417d17cf32e45cfcfd0213ac39fd09918479a25373ba9b2ce4adc05d')
version('2.71', sha256='7d8c64f66a396442e01b639df3ea6b4e02ba88cbe206c80be8de68b6841634c4')
version('2.70', sha256='8eb7bf53688d6aaede5c90cfd2afcce04803a4efbddfbeecc6297180749e98af')
def install(self, spec, prefix):
mkdirp(prefix.bin)
install('./src/dnsmasq', prefix.bin)
| Python | 0 | |
7b71bbd87234c8cbe8c7fa189c0617b4ca191989 | Add tweak_billing_log command | silver/management/commands/tweak_billing_log.py | silver/management/commands/tweak_billing_log.py | import datetime as dt
from datetime import datetime
from optparse import make_option
from django.core.management.base import BaseCommand
from django.utils import timezone
from silver.models import Subscription, BillingLog
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--date',
action='store',
dest='date'),
)
def handle(self, *args, **options):
if options['date']:
date = datetime.strptime(options['date'], '%Y-%m-%d')
else:
now = timezone.now().date()
date = dt.date(now.year, now.month - 1, 1)
for subscription in Subscription.objects.all():
self.stdout.write('Tweaking for subscription %d' % subscription.id)
BillingLog.objects.create(subscription=subscription,
billing_date=date)
| Python | 0.000004 | |
1b023e8471dad22bfb6b8de0d30c0796c30e2a40 | Copy hello.py from add_snippet branch | hello.py | hello.py | import cygroonga as grn
import datetime
with grn.Groonga():
with grn.Context() as ctx:
db = ctx.open_or_create_database("test.db")
table1 = ctx.open_or_create_table("table1",
grn.OBJ_TABLE_HASH_KEY | grn.OBJ_PERSISTENT,
ctx.at(grn.DB_SHORT_TEXT))
print("table1 path: %s" % table1.path())
print("table1 name: %s" % table1.name())
table1.open_or_create_column("column1",
grn.OBJ_PERSISTENT | grn.OBJ_COLUMN_SCALAR,
ctx.at(grn.DB_TEXT))
table1.open_or_create_column("created_at",
grn.OBJ_PERSISTENT | grn.OBJ_COLUMN_SCALAR,
ctx.at(grn.DB_TIME))
id, added = table1.add_record("foo")
print("id=%d, added=%s" % (id, added))
table1.column("column1").set_string(id, "foo1")
table1.column("created_at").set_time(id, datetime.datetime.now())
print("record count=%d" % table1.record_count())
id = table1.get_record("foo")
print("id=%d" % id)
print("column1 value=%s" % table1.column("column1").get_string(id))
print("created_at value=%s" % table1.column("created_at").get_time(id))
index_table1 = ctx.open_or_create_table("table1_index",
grn.OBJ_TABLE_PAT_KEY | grn.OBJ_KEY_NORMALIZE |
grn.OBJ_PERSISTENT,
ctx.at(grn.DB_SHORT_TEXT))
index_table1.set_default_tokenizer("TokenBigram")
index_table1.open_or_create_index_column("table1_index",
grn.OBJ_PERSISTENT | grn.OBJ_COLUMN_INDEX |
grn.OBJ_WITH_POSITION | grn.OBJ_WITH_SECTION,
"table1", ["_key"])
q = table1.create_query()
print("after create_query")
q.parse("_key:@foo", None, grn.OP_MATCH, grn.OP_AND,
grn.EXPR_SYNTAX_QUERY | grn.EXPR_ALLOW_PRAGMA | grn.EXPR_ALLOW_COLUMN)
print("after parse")
records = table1.select(q)
print("matched record count=%d" % records.record_count())
with records.open_table_cursor() as c:
while True:
record_id = c.next()
if not record_id:
break
print("record_id=%d" % record_id)
#db.remove()
| Python | 0 | |
53b6b1f4b7f58b1a7d748f67e220bd4da147df0e | Create hello.py | hello.py | hello.py | def main():
print("Hello!")
| Python | 0.999503 | |
708fe9f6765717e1f1dabce1f9ac9ed56a7cc769 | Add a new pacakage: HiC-Pro. (#7858) | var/spack/repos/builtin/packages/hic-pro/package.py | var/spack/repos/builtin/packages/hic-pro/package.py | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class HicPro(MakefilePackage):
"""HiC-Pro is a package designed to process Hi-C data,
from raw fastq files (paired-end Illumina data)
to the normalized contact maps"""
homepage = "https://github.com/nservant/HiC-Pro"
url = "https://github.com/nservant/HiC-Pro/archive/v2.10.0.tar.gz"
version('2.10.0', '6ae2213dcc984b722d1a1f65fcbb21a2')
depends_on('bowtie2')
depends_on('samtools')
depends_on('python@2.7:2.8')
depends_on('r')
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-scipy', type=('build', 'run'))
depends_on('py-pysam', type=('build', 'run'))
depends_on('py-bx-python', type=('build', 'run'))
depends_on('r-rcolorbrewer', type=('build', 'run'))
depends_on('r-ggplot2', type=('build', 'run'))
def edit(self, spec, prefix):
config = FileFilter('config-install.txt')
config.filter('PREFIX =.*', 'PREFIX = {0}'.format(prefix))
config.filter('BOWTIE2 PATH =.*',
'BOWTIE2_PATH = {0}'.format(spec['bowtie2'].prefix))
config.filter('SAMTOOLS_PATH =.*',
'SAMTOOLS_PATH = {0}'.format(spec['samtools'].prefix))
config.filter('R_PATH =.*',
'R_RPTH ={0}'.format(spec['r'].prefix))
config.filter('PYTHON_PATH =.*',
'PYTHON_RPTH ={0}'.format(spec['python'].prefix))
def build(self, spec, preifx):
make('-f', './scripts/install/Makefile',
'CONFIG_SYS=./config-install.txt')
make('mapbuilder')
make('readstrimming')
make('iced')
def install(sefl, spec, prefix):
# Patch INSTALLPATH in config-system.txt
config = FileFilter('config-system.txt')
config.filter('/HiC-Pro_2.10.0', '')
# Install
install('config-hicpro.txt', prefix)
install('config-install.txt', prefix)
install('config-system.txt', prefix)
install_tree('bin', prefix.bin)
install_tree('annotation', prefix.annotation)
install_tree('doc', prefix.doc)
install_tree('scripts', prefix.scripts)
install_tree('test-op', join_path(prefix, 'test-op'))
| Python | 0 | |
033032260a43a416857b7057bd4fc212422abc51 | Add a simple command line skew-T plotter | notebooks/Command_Line_Tools/skewt.py | notebooks/Command_Line_Tools/skewt.py | # skewt.py - A simple Skew-T plotting tool
import argparse
from datetime import datetime
import matplotlib.pyplot as plt
import metpy.calc as mpcalc
from metpy.io.upperair import get_upper_air_data
from metpy.plots import Hodograph, SkewT
from metpy.units import units
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import numpy as np
def get_sounding_data(date, station):
ds = get_upper_air_data(date, station)
p = ds.variables['pressure'][:]
T = ds.variables['temperature'][:]
Td = ds.variables['dewpoint'][:]
u = ds.variables['u_wind'][:]
v = ds.variables['v_wind'][:]
windspeed = ds.variables['speed'][:]
return p, T, Td, u, v, windspeed
def plot_sounding(date, station):
p, T, Td, u, v, windspeed = get_sounding_data(date, station)
lcl_pressure, lcl_temperature = mpcalc.lcl(p[0], T[0], Td[0])
lfc_pressure, lfc_temperature = mpcalc.lfc(p, T, Td)
parcel_path = mpcalc.parcel_profile(p, T[0], Td[0]).to('degC')
# Create a new figure. The dimensions here give a good aspect ratio
fig = plt.figure(figsize=(8, 8))
skew = SkewT(fig)
# Plot the data
skew.plot(p, T, color='tab:red')
skew.plot(p, Td, color='tab:green')
# Plot thermodynamic parameters and parcel path
skew.plot(p, parcel_path, color='black')
if lcl_pressure:
skew.ax.axhline(lcl_pressure, color='black')
if lfc_pressure:
skew.ax.axhline(lfc_pressure, color='0.7')
# Add the relevant special lines
skew.ax.axvline(0, color='c', linestyle='--', linewidth=2)
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
# Shade areas representing CAPE and CIN
skew.shade_cin(p, T, parcel_path)
skew.shade_cape(p, T, parcel_path)
# Add wind barbs
skew.plot_barbs(p, u, v)
# Add an axes to the plot
ax_hod = inset_axes(skew.ax, '30%', '30%', loc=1, borderpad=3)
# Plot the hodograph
h = Hodograph(ax_hod, component_range=100.)
# Grid the hodograph
h.add_grid(increment=20)
# Plot the data on the hodograph
mask = (p >= 100 * units.mbar)
h.plot_colormapped(u[mask], v[mask], windspeed[mask]) # Plot a line colored by wind speed
# Set some sensible axis limits
skew.ax.set_ylim(1000, 100)
skew.ax.set_xlim(-40, 60)
return fig, skew
if __name__ == '__main__':
# Parse out the command line arguments
parser = argparse.ArgumentParser(description='''Make an advanced SkewT
plot of upper air observations.''')
parser.add_argument('--date', required=True,
help='Date of the sounding YYYYMMDD')
parser.add_argument('--hour', required=True,
help='Time of the sounding in hours')
parser.add_argument('--station', default='OUN',
help='Station three letter identifier')
parser.add_argument('--savefig', action='store_true',
help='Save out figure instead of displaying it')
parser.add_argument('--imgformat', default='png',
help='Format to save the resulting image as.')
args = parser.parse_args()
# Parse out the date time stamp
date = datetime.strptime('{0}{1}'.format(args.date, args.hour), '%Y%m%d%H')
# Make the sounding figure
fig, skew = plot_sounding(date, args.station)
# Save or show figurexs
if args.savefig:
plt.savefig('{0}_{1}.{2}'.format(args.station,
datetime.strftime(date, '%Y%m%d_%HZ'),
args.imgformat))
else:
plt.show()
| Python | 0.000007 | |
724e86e31b6584012af5afe458e0823b9a2ca7ab | Create a class named "CreateSpark", which is to solove the problem of "Cannot run multiple SparkContexts at once; existing SparkContext(app=spam-msg-classifier, master=local[8]) created by __init__" | myclass/class_create_spark.py | myclass/class_create_spark.py | # -*- coding: utf-8 -*-
# !/usr/bin/python
################################### PART0 DESCRIPTION #################################
# Filename: class_save_word_to_database.py
# Description:
#
# Author: Shuai Yuan
# E-mail: ysh329@sina.com
# Create: 2015-11-17 20:43:09
# Last:
__author__ = 'yuens'
################################### PART1 IMPORT ######################################
import logging
import time
from pyspark import SparkContext, SparkConf
################################### PART2 CLASS && FUNCTION ###########################
class CreateSpark(object):
def __init__(self, pyspark_app_name):
self.start = time.clock()
logging.basicConfig(level = logging.INFO,
format = '%(asctime)s %(levelname)5s %(filename)19s[line:%(lineno)3d] %(funcName)s %(message)s',
datefmt = '%y-%m-%d %H:%M:%S',
filename = './main.log',
filemode = 'a')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)5s %(filename)19s[line:%(lineno)3d] %(funcName)s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
logging.info("START CLASS {class_name}.".format(class_name = CreateSpark.__name__))
# Configure Spark
try:
conf = SparkConf().setAppName(pyspark_app_name).setMaster("local[8]")
self.sc = SparkContext(conf = conf)
logging.info("Start pyspark successfully.")
except Exception as e:
logging.error("Fail in starting pyspark.")
logging.error(e)
def return_spark_context(self):
return self.sc
def __del__(self):
# Close SparkContext
try:
self.sc.stop()
logging.info("close SparkContext successfully.")
except Exception as e:
logging.error(e)
logging.info("END CLASS {class_name}.".format(class_name = CreateSpark.__name__))
self.end = time.clock()
logging.info("The class {class_name} run time is : {delta_time} seconds".format(class_name = CreateSpark.__name__, delta_time = self.end))
################################### PART3 CLASS TEST ##################################
"""
# initialization parameter
pyspark_app_name = "spam-msg-classifier"
SparkCreator = CreateSpark(pyspark_app_name = pyspark_app_name)
pyspark_sc = SparkCreator.return_spark_context()
logging.info("sc.version:{0}".format(pyspark_sc.version))
""" | Python | 0 | |
2bd453c4a7402f24cd43b49e73d0b95e371e6654 | add package Feature/sentieon (#9557) | var/spack/repos/builtin/packages/sentieon-genomics/package.py | var/spack/repos/builtin/packages/sentieon-genomics/package.py | # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os.path
from spack import *
class SentieonGenomics(Package):
"""Sentieon provides complete solutions for secondary DNA analysis.
Our software improves upon BWA, GATK, Mutect, and Mutect2 based pipelines.
The Sentieon tools are deployable on any CPU-based computing system.
Please set the path to the sentieon license server with:
export SENTIEON_LICENSE=[FQDN]:[PORT]
Note: A manual download is required.
Spack will search your current directory for the download file.
Alternatively, add this file to a mirror so that Spack can find it.
For instructions on how to set up a mirror, see
http://spack.readthedocs.io/en/latest/mirrors.html"""
homepage = "https://www.sentieon.com/"
url = "file://{0}/sentieon-genomics-201808.01.tar.gz".format(os.getcwd())
version('201808.01', sha256='6d77bcd5a35539549b28eccae07b19a3b353d027720536e68f46dcf4b980d5f7')
# Licensing.
license_require = True
license_vars = ['SENTIEON_LICENSE']
def install(self, spec, prefix):
install_tree('bin', prefix.bin)
install_tree('doc', prefix.doc)
install_tree('etc', prefix.etc)
install_tree('lib', prefix.lib)
install_tree('libexec', prefix.libexec)
install_tree('share', prefix.share)
| Python | 0 | |
8c1cd72d11836ad913af5c3614137358ddf3efee | add mgmt cmd to set related user | sources/management/commands/set_related_user.py | sources/management/commands/set_related_user.py | from django.core.management.base import BaseCommand, CommandError
from django.core.mail import send_mail
from django.contrib.auth.models import User
# from sources.models import Person
import random
def set_related_user(email_address, person_id):
obj = Person.objects.get(id=person_id)
try:
user_existing = User.objects.get(email=obj.email_address)
except:
user_existing = False
if user_existing:
obj.related_user = user_existing
else:
username = '{}{}'.format(obj.first_name, obj.last_name).lower().replace('-','')
choices = 'abcdefghijklmnopqrstuvwxyz0123456789'
middle_choices = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
password = \
''.join([random.SystemRandom().choice(choices) for i in range(1)]) + \
''.join([random.SystemRandom().choice(middle_choices) for i in range(23)]) + \
''.join([random.SystemRandom().choice(choices) for i in range(1)])
user_new = User.objects.create_user(username, password=password)
user_new.email = obj.email_address
user_new.first_name = obj.first_name
user_new.last_name = obj.last_name
user_new.save()
class Command(BaseCommand):
help = 'Set the related user for a Person.'
def add_arguments(self, parser):
## required
parser.add_argument('email',
help='Specify the user emamil.'
)
## optional
# parser.add_argument('-t' '--test',
# action='store_true',
# # type=str,
# dest='test',
# default=False,
# help="Specific whether it's a test or not"
# )
def handle(self, *args, **options):
## unpack args
email_address = options['email']
## call the function
email_add_user(email_address)
| Python | 0 | |
208077afd9b1ba741df6bccafdd5f008e7b75e38 | Add nftables test | meta-iotqa/lib/oeqa/runtime/sanity/nftables.py | meta-iotqa/lib/oeqa/runtime/sanity/nftables.py | import os
import subprocess
from time import sleep
from oeqa.oetest import oeRuntimeTest
class NftablesTest(oeRuntimeTest):
def check_ssh_connection(self):
'''Check SSH connection to DUT port 2222'''
process = subprocess.Popen(("ssh -o UserKnownHostsFile=/dev/null " \
"-o ConnectTimeout=3 " \
"-o StrictHostKeyChecking=no root@" + \
self.target.ip +" -p 2222 ls").split(),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output, err = process.communicate()
output = output.decode("utf-8")
returncode = process.returncode
return returncode, output
def add_test_table(self):
self.target.run("nft add table ip test")
self.target.run("nft add chain ip test input {type filter hook input priority 0\;}")
self.target.run("nft add chain ip test donothing")
self.target.run("nft add chain ip test prerouting {type nat hook prerouting priority 0 \;}")
self.target.run("nft add chain ip test postrouting {type nat hook postrouting priority 100 \;}")
def delete_test_table(self):
self.target.run("nft delete table ip test")
def test_reject(self):
'''Test rejecting SSH with nftables'''
self.add_test_table()
self.target.run("nft add rule ip test input tcp dport 2222 reject")
self.target.run("nft add rule ip test input goto donothing")
returncode, output = self.check_ssh_connection()
self.delete_test_table()
self.assertIn("Connection refused", output, msg="Error message: %s" % output)
def test_drop(self):
'''Test dropping SSH with nftables'''
self.add_test_table()
self.target.run("nft add rule ip test input tcp dport 2222 drop")
self.target.run("nft add rule ip test input goto donothing")
returncode, output = self.check_ssh_connection()
self.delete_test_table()
self.assertIn("Connection timed out", output, msg="Error message: %s" % output)
def test_redirect(self):
'''Test redirecting port'''
# Check that SSH can't connect to port 2222
returncode, output = self.check_ssh_connection()
self.assertNotEqual(returncode, 0, msg="Error message: %s" % output)
self.add_test_table()
self.target.run("nft add rule ip test prerouting tcp dport 2222 redirect to 22")
# Check that SSH can connect to port 2222
returncode, output = self.check_ssh_connection()
self.assertEqual(returncode, 0, msg="Error message: %s" % output)
self.delete_test_table()
# Check that SSH can't connect to port 2222
returncode, output = self.check_ssh_connection()
self.assertNotEqual(returncode, 0, msg="Error message: %s" % output)
| Python | 0 | |
3be145af359df5bcf928da1b984af8635ea33c27 | add model for parcels, temp until i figure out psql migrations in flask | farmsList/farmsList/public/models.py | farmsList/farmsList/public/models.py | # -*- coding: utf-8 -*-
from farmsList.database import (
Column,
db,
Model,
ReferenceCol,
relationship,
SurrogatePK,
)
class Parcel(SurrogatePK, Model):
__tablename__ = 'parcels'
name = Column(db.String(80), unique=True, nullable=False)
def __init__(self, name, **kwargs):
db.Model.__init__(self, name=name, **kwargs)
def __repr__(self):
return '<Role({name})>'.format(name=self.name)
| Python | 0 | |
8a3425209090cb9acc6353ab6fccc0ec31cae804 | permutations II | backtracking/47.py | backtracking/47.py | class Solution:
def permuteUnique(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
ret = []
nums.sort()
used = [0]*len(nums)
self.dfs(ret, [], nums, used)
return ret
def dfs(self, ret, temp, nums, used):
if len(temp) == len(nums):
ret.append(temp)
return
for i in range(0, len(nums)):
if used[i]:
continue
# 因为nums可能有重复, 所以对待重复内容需要视作一块整体处理, 也就是当nums[i]与nums[i-1]相同时, 只有nums[i-1]被使用过时, 才可以使用nums[i]
if i > 0 and nums[i] == nums[i-1] and used[i-1]:
continue
used[i] = 1
self.dfs(ret, temp + [nums[i]], nums, used)
used[i] = 0
| Python | 0.999946 | |
a5d63ec0f8f192aaeae8b9a7f1cf423d18de25dc | Add test runner to handle issue with import path | server/test.py | server/test.py | import pytest
pytest.main('-x tests/')
| Python | 0 | |
8632b60718fa353797ffc53281e57a37caf9452f | Add config command for setting the address of rf sensors. | set_address.py | set_address.py | import zmq
import time
import sys
print sys.argv[1:]
# ZeroMQ Context
context = zmq.Context()
sock_live = context.socket(zmq.PUB)
sock_live.connect("tcp://"+sys.argv[1])
time.sleep(1)
# Send multipart only allows send byte arrays, so we convert everything to strings before sending
# [TODO] add .encode('UTF-8') when we switch to python3.
sock_live.send_multipart(["set-address",'pair',sys.argv[2],"0"])
sock_live.close()
| Python | 0 | |
f5f6bc0999d5b6f065adb81982ce3a322e1ab987 | add regression test for fit_spectrum() Python 3.x issue | nmrglue/analysis/tests/test_analysis_linesh.py | nmrglue/analysis/tests/test_analysis_linesh.py | import numpy as np
import nmrglue as ng
from nmrglue.analysis.linesh import fit_spectrum
def test_fit_spectrum():
_bb = np.random.uniform(0, 77, size=65536)
lineshapes = ['g']
params = [[(13797.0, 2.2495075273313034)],
[(38979.0, 5.8705185693227664)],
[(39066.0, 5.7125954296137103)],
[(39153.0, 5.7791485451283791)],
[(41649.0, 4.260242375400459)],
[(49007.0, 4.2683625950679964)],
[(54774.0, 3.2907139764685569)]]
amps = [35083.008667, 32493.824402, 32716.156556, 33310.711914, 82682.928405,
82876.544313, 85355.658142]
bounds = [[[(None, None), (0, None)]], [[(None, None), (0, None)]],
[[(None, None), (0, None)]], [[(None, None), (0, None)]],
[[(None, None), (0, None)]], [[(None, None), (0, None)]],
[[(None, None), (0, None)]]]
ampbounds = [None, None, None, None, None, None, None]
centers = [(13797.0,), (38979.0,), (39066.0,), (39153.0,), (41649.0,),
(49007.0,), (54774.0,)]
rIDs = [1, 2, 3, 4, 5, 6, 7]
box_width = (5,)
error_flag = False
verb = False
params_best, amp_best, iers = ng.linesh.fit_spectrum(
_bb, lineshapes, params, amps, bounds, ampbounds, centers,
rIDs, box_width, error_flag, verb=False)
| Python | 0 | |
c5a2167a63516c23390263408fcd2c9a4f654fc8 | Add tests for the parse method of the spider | webcomix/tests/test_comic_spider.py | webcomix/tests/test_comic_spider.py | from webcomix.comic_spider import ComicSpider
def test_parse_yields_good_page(mocker):
mock_response = mocker.patch('scrapy.http.Response')
mock_response.urljoin.return_value = "http://xkcd.com/3/"
mock_response.url = "http://xkcd.com/2/"
mock_selector = mocker.patch('scrapy.selector.SelectorList')
mock_response.xpath.return_value = mock_selector
mock_selector.extract_first.side_effect = [
'//imgs.xkcd.com/comics/tree_cropped_(1).jpg', 'xkcd.com/3/'
]
spider = ComicSpider()
result = spider.parse(mock_response)
results = list(result)
assert len(results) == 2
assert results[0].get(
'url') == "http://imgs.xkcd.com/comics/tree_cropped_(1).jpg"
assert results[1].url == "http://xkcd.com/3/"
def test_parse_yields_bad_page(mocker):
mock_response = mocker.patch('scrapy.http.Response')
mock_response.urljoin.return_value = "http://xkcd.com/3/"
mock_response.url = "http://xkcd.com/2/"
mock_selector = mocker.patch('scrapy.selector.SelectorList')
mock_response.xpath.return_value = mock_selector
mock_selector.extract_first.side_effect = [None, 'xkcd.com/3/']
spider = ComicSpider()
result = spider.parse(mock_response)
results = list(result)
assert len(results) == 1
assert results[0].url == "http://xkcd.com/3/"
| Python | 0 | |
fd5da951feee92c055853c63b698b44397ead6be | Add save function for use across the application | app/db_instance.py | app/db_instance.py | from app import db
def save(data):
try:
print(data)
db.session.add(data)
db.session.commit()
except Exception as e:
raise e
| Python | 0 | |
585fec12673ab0207f5b641a9ba0df4a510667ac | Add harvester for mblwhoilibrary | scrapi/harvesters/mblwhoilibrary.py | scrapi/harvesters/mblwhoilibrary.py | '''
Harvester for the WHOAS at MBLWHOI Library for the SHARE project
Example API call: http://darchive.mblwhoilibrary.org/oai/request?verb=ListRecords&metadataPrefix=oai_dc
'''
from __future__ import unicode_literals
from scrapi.base import helpers
from scrapi.base import OAIHarvester
class MblwhoilibraryHarvester(OAIHarvester):
short_name = 'mblwhoilibrary'
long_name = 'WHOAS at MBLWHOI Library'
url = 'http://darchive.mblwhoilibrary.org/oai/request'
@property
def schema(self):
return helpers.updated_schema(self._schema, {
"uris": {
"objectUris": ('//dc:relation/node()', helpers.oai_extract_dois)
}
})
base_url = 'http://darchive.mblwhoilibrary.org/oai/request'
property_list = ['date', 'relation', 'identifier', 'type', 'format', 'setSpec']
timezone_granularity = True
| Python | 0.000001 | |
f5cc9c86b1cfbb2cda1b4c1c4c8656a6ca7a2a7f | Create graphingWIP.py | src/graphingWIP.py | src/graphingWIP.py | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import matplotlib.dates as md
import dateutil
# create empty dynamic arrays
temp_x = []
x = []
y = []
f = open("temp.log", "r") # open log folder
for line in f: # load x and y values
temp_line = line.split('=')
temp_x.append(temp_line[0][:-1]) # trim spaces
y.append(float(temp_line[1][1:-2])) # trim C
f.close()
x = [dateutil.parser.parse(s) for s in temp_x]
ax = plt.gca()
xfmt = md.DateFormatter('%d/%m/%Y %H:%M:%S')
ax.xaxis.set_major_formatter(xfmt)
plt.plot(x, y)
plt.title('Temprature against time')
plt.xlabel('Date and Time (DD/MM/YYYY HH:MM:SS)')
plt.ylabel('Temprature C')
plt.show()
| Python | 0 | |
534fcff9f812df4cef273ca7853df12647b25d06 | Add preliminary metrics file and import some from sklearn | metrics.py | metrics.py | from sklern.metrics import roc_curve as roc, roc_auc_score as auc
def enrichment_factor():
pass
def log_auc():
pass
| Python | 0 | |
3c45de2506d6fd86ad96ee9f2e1b5b773aad82d9 | split out common functionality | fabfile/common.py | fabfile/common.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Common pieces that work on all Nix OS'
.. module:: common
:platform: Linux, MacOS
.. moduleauthor:: John P. Neumann
.. note::
None
"""
# Built In
import os
import sys
# Third Party
from fabric.api import local, cd, task, execute
# Custom
@task
def setup_devdirs():
"""Creates all of our directories."""
home = os.path.expanduser('~')
dirpaths = ['go', 'src', 'bin', 'envs', 'repos']
execute(setup_vimdirs)
for pth in dirpaths:
_create_dir(os.path.join(home, pth))
@task
def setup_vimdirs():
"""Sets up vim directories."""
home = os.path.expanduser('~')
_create_dir(os.path.join(home, '.vim_swap'))
_create_dir(os.path.join(home, '.vim_undo'))
@task
def powerline_fonts(repo_dir):
"""Download and install the powerline fonts.
:param repo_dir: The base directory to check the repo out to.
:type repo_dir: str
:returns: None
"""
execute(setup_devdirs)
with cd(repo_dir):
if not os.path.exists(os.path.join(repo_dir, 'powerline-fonts')):
local('git clone git@github.com:powerline/fonts.git powerline-fonts')
with cd('powerline-fonts'):
local('./install.sh')
@task
def dotfiles(repo_dir):
"""Download dotfiles and create our symlinks.
:param repo_dir: The base directory to check the repo out to.
:type repo_dir: str
:returns: None
"""
execute(setup_devdirs)
dotfiles_dir = os.path.join(
repo_dir, 'dotfiles'
)
if os.path.exists(dotfiles_dir):
sys.exit('dotfiles repo already exists')
with cd(repo_dir):
local('git clone git@github.com:johnpneumann/dotfiles.git')
@task
def dotfiles_symlinks(repo_dir):
"""Creates all of our dotfile symlinks.
:param repo_dir: The base directory to check the repo out to.
:type repo_dir: str
:returns: None
"""
linkage = {
'.bash_aliases': 'bash_aliases_prev',
'.bash_profile': 'bash_profile_prev',
'.bashrc': 'bashrc_prev', '.profile': 'profile_prev',
'.vimrc': 'vimrc_prev', '.vim': 'vim_prev',
'iterm2_prefs': 'iterm2_prefs_prev',
'public_scripts': 'public_scripts_prev'
}
home_dir = os.path.expanduser('~')
for key, value in linkage.items():
dest = os.path.join(home_dir, key)
backup = os.path.join(home_dir, value)
source = os.path.join(repo_dir, key)
_create_symlinks(
source=source, destination=dest, backup=backup
)
@task
def vundle_install():
"""Install vundle and all of the plugins."""
local('git clone https://github.com/VundleVim/Vundle.vim.git ~/.vim/bundle/Vundle.vim')
local('vim +PluginInstall +qall')
def _create_symlinks(source, destination, backup):
"""Creates symlinks and backs up directories.
:param source: The source file.
:type source: str
:param destination: The destination for the symlink.
:type destination: str
:param backup: The destination to backup the file to if it exists.
:type backup: str
:returns: None
"""
if os.path.exists(source):
if os.path.exists(destination):
local('mv {dst} {bckp}'.format(dst=destination, bckp=backup))
local('ln -s {src} {dst}'.format(src=source, dst=destination))
def _create_dir(path):
"""Creates a directory.
:param path: The path to the directory to create.
:type path: str
:returns: None
"""
if os.path.exists(path):
sys.stdout.write('{path} exists\n'.format(path=path))
return
local('mkdir -p {pth}'.format(pth=path))
| Python | 0.999847 | |
40e9825ee0a2ccf7c3e92d4fd6599c1976a240a3 | Add deprecated public `graphql` module | fbchat/graphql.py | fbchat/graphql.py | # -*- coding: UTF-8 -*-
"""This file is here to maintain backwards compatability."""
from __future__ import unicode_literals
from .models import *
from .utils import *
from ._graphql import (
FLAGS,
WHITESPACE,
ConcatJSONDecoder,
graphql_color_to_enum,
get_customization_info,
graphql_to_sticker,
graphql_to_attachment,
graphql_to_extensible_attachment,
graphql_to_subattachment,
graphql_to_live_location,
graphql_to_poll,
graphql_to_poll_option,
graphql_to_plan,
graphql_to_quick_reply,
graphql_to_message,
graphql_to_user,
graphql_to_thread,
graphql_to_group,
graphql_to_page,
graphql_queries_to_json,
graphql_response_to_json,
GraphQL,
)
| Python | 0.000001 | |
f98aa5f336cd81ad55bc46122821df3ad314a4cb | Add py-dockerpy-creds (#19198) | var/spack/repos/builtin/packages/py-dockerpy-creds/package.py | var/spack/repos/builtin/packages/py-dockerpy-creds/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyDockerpyCreds(PythonPackage):
"""Python bindings for the docker credentials store API """
homepage = "https://github.com/shin-/dockerpy-creds"
url = "https://github.com/shin-/dockerpy-creds/archive/0.4.0.tar.gz"
version('0.4.0', sha256='c76c2863c6e9a31b8f70ee5b8b0e5ac6860bfd422d930c04a387599e4272b4b9')
version('0.3.0', sha256='3660a5e9fc7c2816ab967e4bdb4802f211e35011357ae612a601d6944721e153')
version('0.2.3', sha256='7278a7e3c904ccea4bcc777b991a39cac9d4702bfd7d76b95ff6179500d886c4')
version('0.2.2', sha256='bb26b8a8882b9d115a43169663cd9557d132a68147d9a1c77cb4a3ffc9897398')
version('0.2.1', sha256='7882efd95f44b5df166b4e34c054b486dc7287932a49cd491edf406763695351')
version('0.2.0', sha256='f2838348e1175079e3062bf0769b9fa5070c29f4d94435674b9f8a76144f4e5b')
version('0.1.0', sha256='f7ab290cb536e7ef1c774d4eb5df86237e579a9c7a87805da39ff07bd14e0aff')
depends_on('python@2.0:2.8,3.4:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-six', type=('build', 'run'))
| Python | 0 | |
07a1612250a9c3b2de1ffe53fb916a8cff153c3f | add count of collisions | findCollisions.py | findCollisions.py | from collections import Counter
def countCollisions(entries):
collisions = [k for k,v in Counter(entries).items() if v>1]
num_collisions = len(collisions)
print(num_collisions,'word collisions:\n',collisions)
return num_collisions
def countCollisionsInFile(filename):
entries = []
with open(filename,'r') as f:
for line in f:
# get just the words
entries.append(line.split(',')[1].replace(' \'',''))
return countCollisions(entries)
def countCollisionsInList(entries):
return countCollisions(entries)
| Python | 0.000017 | |
4a0fa1028f22944f30e39c65806f0d123e18420f | Create input.py | input.py | input.py | ckey=""
csecret=""
atoken=""
asecret=""
query='' #Add keyword for which you want to start the miner
| Python | 0 | |
98499f07c6dcccba3605e9ab9c8eaef9463b0634 | Add some validators | indra/tools/stmt_validator.py | indra/tools/stmt_validator.py | class StatementValidator:
def __init__(self):
class DbRefsEntryValidator:
@staticmethod
def validate(entry):
raise NotImplementedError()
class ChebiPrefix(DbRefsEntryValidator):
@staticmethod
def validate(entry):
return not entry or entry.startswith('CHEBI')
class UniProtIDNotList(DbRefsEntryValidator):
@staticmethod
def validate(entry):
if not isinstance(entry, str):
return False
if ',' in entry:
return False
return True | Python | 0.000002 | |
ba1f04337d0653d4808427b5d07ed8673526b315 | add mygpo.wsgi | mygpo.wsgi | mygpo.wsgi | #!/usr/bin/python
# -*- coding: utf-8 -*-
# my.gpodder.org FastCGI handler for lighttpd (default setup)
#
# This file is part of my.gpodder.org.
#
# my.gpodder.org is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# my.gpodder.org is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public
# License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with my.gpodder.org. If not, see <http://www.gnu.org/licenses/>.
import sys
import os
import _strptime
# Add this directory as custom Python path
mygpo_root = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, mygpo_root)
sys.path.insert(0, os.path.join(mygpo_root, 'lib'))
# Set the DJANGO_SETTINGS_MODULE environment variable
os.environ['DJANGO_SETTINGS_MODULE'] = 'mygpo.settings'
import django.core.handlers.wsgi
application = django.core.handlers.wsgi.WSGIHandler()
| Python | 0.000135 | |
1086259090a396b2a2ed40788d1cb8c8ff7c95f3 | fix the fixme | src/robotide/plugins/connector.py | src/robotide/plugins/connector.py | # Copyright 2008-2009 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robotide.context import LOG, SETTINGS
from robotide import utils
def PluginFactory(application, plugin_class):
try:
plugin = plugin_class(application)
except Exception, err:
return BrokenPlugin(str(err), plugin_class)
else:
return PluginConnector(application, plugin)
class _PluginConnector(object):
def __init__(self, name, doc='', error=None):
self.name = name
self.doc = doc
self.error = error
self.active = False
self.metadata = {}
self.config_panel = lambda self: None
class PluginConnector(_PluginConnector):
def __init__(self, application, plugin):
_PluginConnector.__init__(self, plugin.name, plugin.doc)
self._plugin = plugin
self._settings = SETTINGS['Plugins'].add_section(plugin.name)
self.config_panel = plugin.config_panel
self.metadata = plugin.metadata
if self._settings.get('_active', plugin.initially_active):
self.activate()
def activate(self):
self._plugin.activate()
self.active = True
self._settings.set('_active', True)
def deactivate(self):
self._plugin.deactivate()
self.active = False
self._settings.set('_active', False)
class BrokenPlugin(_PluginConnector):
def __init__(self, error, plugin_class):
name = utils.name_from_class(plugin_class, 'Plugin')
doc = 'This plugin is disabled because it failed to load properly.\n' \
+ 'Error: ' + error
_PluginConnector.__init__(self, name, doc=doc, error=error)
LOG.error("Taking %s plugin into use failed:\n%s" % (name, error))
| # Copyright 2008-2009 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robotide.context import LOG, SETTINGS
from robotide import utils
def PluginFactory(application, plugin_class):
try:
plugin = plugin_class(application)
except Exception, err:
return BrokenPlugin(str(err), plugin_class)
else:
return PluginConnector(application, plugin)
class _PluginConnector(object):
def __init__(self, name, doc='', error=None):
self.name = name
self.doc = doc
self.error = error
self.active = False
self.metadata = {}
self.config_panel = lambda self: None
class PluginConnector(_PluginConnector):
def __init__(self, application, plugin):
_PluginConnector.__init__(self, plugin.name, plugin.doc)
self._plugin = plugin
# FIXME: breaks in case the section does not exist
self._settings = SETTINGS['Plugins'][plugin.name]
self.config_panel = plugin.config_panel
self.metadata = plugin.metadata
if self._settings.get('_active', plugin.initially_active):
self.activate()
def activate(self):
self._plugin.activate()
self.active = True
self._settings.set('_active', True)
def deactivate(self):
self._plugin.deactivate()
self.active = False
self._settings.set('_active', False)
class BrokenPlugin(_PluginConnector):
def __init__(self, error, plugin_class):
name = utils.name_from_class(plugin_class, 'Plugin')
doc = 'This plugin is disabled because it failed to load properly.\n' \
+ 'Error: ' + error
_PluginConnector.__init__(self, name, doc=doc, error=error)
LOG.error("Taking %s plugin into use failed:\n%s" % (name, error))
| Python | 0.000023 |
f89fd041b7f263ac944df4a65fb4bbd21d5b2998 | Modify test case name and add methods for improving code coverage | tests/python-test-library/testcases/contextGetPropertiesTCs.py | tests/python-test-library/testcases/contextGetPropertiesTCs.py | #!/usr/bin/env python2.5
##
## @file contextOrientationTCs.py
##
## Copyright (C) 2008 Nokia. All rights reserved.
##
##
##
##
## Requires python2.5-gobject and python2.5-dbus
##
## Implements also some testing API:
##
##
import os
import sys
from time import sleep
import unittest
import traceback
import dbus
import dbus_emulator
import conf
class ContextHelpers(unittest.TestCase):
def setUp(self):
#print os.environ['DBUS_SYSTEM_BUS_ADDRESS']
#print os.environ['DBUS_SESSION_BUS_ADDRESS']
self.sysBus = dbus.SystemBus()
try:
object = self.sysBus.get_object("com.nokia.mce","/com/nokia/mce/request")
self.ifceMCE = dbus.Interface(object,"com.nokia.mce.request")
except dbus.DBusException:
traceback.print_exc()
sys.exit(1)
def tearDown(self):
pass
#try:
# self.ifceMCE.Exit()
#except dbus.DBusException:
# traceback.print_exc()
# sys.exit(1)
def setFacing(self,facingState):
try:
self.ifceMCE.req_device_facing_change(facingState)
except dbus.DBusException:
traceback.print_exc()
sys.exit(1)
def setRotation(self,rotationState):
try:
self.ifceMCE.req_device_rotation_change(rotationState)
except dbus.DBusException:
traceback.print_exc()
sys.exit(1)
def setCoord(self, x, y, z):
try:
self.ifceMCE.req_device_coord_change(x, y, z)
except dbus.DBusException:
traceback.print_exc()
sys.exit(1)
def getProp(self,properties):
bus = dbus.SessionBus()
try:
object = bus.get_object("org.freedesktop.ContextKit","/org/freedesktop/ContextKit/Manager")
iface = dbus.Interface(object,"org.freedesktop.ContextKit.Manager")
ret = iface.Get(properties)
except dbus.DBusException, ex:
raise
return ret
class DeviceOrientation(ContextHelpers):
def test_faceup(self):
self.setFacing("face_up")
prop = ['Context.Device.Orientation.facingUp']
struct = self.getProp(prop)
self.assert_(struct['Context.Device.Orientation.facingUp'][1],True)
def test_facedown(self):
self.setFacing("face_down")
prop = ['Context.Device.Orientation.facingUp']
struct = self.getProp(prop)
self.assert_(struct['Context.Device.Orientation.facingUp'][1],False)
def test_orientationEdgeLeft(self):
self.setFacing("face_up")
self.setRotation("portrait")
self.setCoord(-1000, 20, -10)
prop = ['Context.Device.Orientation.facingUp','Context.Device.Orientation.edgeUp']
struct = self.getProp(prop)
self.assert_(struct['Context.Device.Orientation.facingUp'][1],True)
self.assert_(struct['Context.Device.Orientation.edgeUp'][1],"2")
def test_orientationEdgeRight(self):
self.setFacing("face_up")
self.setRotation("portrait")
self.setCoord(1000, 20, -10)
prop = ['Context.Device.Orientation.facingUp','Context.Device.Orientation.edgeUp']
struct = self.getProp(prop)
self.assert_(struct['Context.Device.Orientation.facingUp'][1],True)
self.assert_(struct['Context.Device.Orientation.edgeUp'][1],"3")
def test_orientationEdgeUp(self):
self.setFacing("face_down")
self.setCoord(20, -1000, 10)
self.setRotation("landscape")
prop = ['Context.Device.Orientation.facingUp','Context.Device.Orientation.edgeUp']
struct = self.getProp(prop)
self.assert_(struct['Context.Device.Orientation.facingUp'][1],False)
self.assert_(struct['Context.Device.Orientation.edgeUp'][1],"1")
def test_orientationEdgeDown(self):
self.setFacing("face_down")
self.setCoord(20, 1000, 10)
self.setRotation("landscape")
prop = ['Context.Device.Orientation.facingUp','Context.Device.Orientation.edgeUp']
struct = self.getProp(prop)
self.assert_(struct['Context.Device.Orientation.facingUp'][1],False)
self.assert_(struct['Context.Device.Orientation.edgeUp'][1],"4")
def test_inexistentProperties(self):
prop = ['Context.Device.Something']
self.assertRaises(dbus.DBusException,self.getProp,prop)
def testRun():
suite = unittest.TestLoader().loadTestsFromTestCase(DeviceOrientation)
unittest.TextTestRunner(verbosity=2).run(suite)
| Python | 0 | |
0d22f1ab7f4c83af280edb799f863fa0f46ea326 | Create generic views for index/login | app/views.py | app/views.py | from flask import render_template, flash, redirect
from app import app
from .forms.login import LoginForm
@app.route('/')
@app.route('/index')
def index():
user = {'nickname': 'Mark'} # fake user
return render_template("index.html",
title='Home',
user=user)
@app.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
# Debug Print
flash('Login requested for Username="%s", remember_me=%s' %
(form.username.data, str(form.remember_me.data)))
return redirect('/index')
return render_template('login.html',
title='Sign In',
form=form)
| Python | 0 | |
45939892a21bbf11ddcd1400d26cf2e94fa8ebac | add nox tests. | noxfile.py | noxfile.py | import nox
PYTHON_VERSIONS = ["3.6", "3.7", "3.8"]
PACKAGE = "abilian"
@nox.session(python="python3.6")
def lint(session):
# session.env["LC_ALL"] = "en_US.UTF-8"
session.install("poetry", "psycopg2-binary")
session.run("poetry", "install", "-q")
session.run("yarn", external="True")
session.run("make", "lint-ci")
@nox.session(python=PYTHON_VERSIONS)
def pytest(session):
# session.env["LC_ALL"] = "en_US.UTF-8"
session.install("psycopg2-binary")
cmd = "echo ; echo SQLALCHEMY_DATABASE_URI = $SQLALCHEMY_DATABASE_URI ; echo"
session.run("sh", "-c", cmd, external=True)
session.run("poetry", "install", "-q", external="True")
session.run("yarn", external="True")
session.run("pip", "check")
session.run("pytest", "-q")
# TODO later
# @nox.session(python="3.8")
# def typeguard(session):
# # session.env["LC_ALL"] = "en_US.UTF-8"
# session.install("psycopg2-binary")
# session.run("poetry", "install", "-q", external="True")
# session.run("yarn", external="True")
# session.run("pytest", f"--typeguard-packages={PACKAGE}")
| Python | 0 | |
5addf2c2992cfdedf06da58861dae93347e02fb9 | Support for nox test runner (alternative to tox), provides a workaround for #80. | noxfile.py | noxfile.py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016 - 2020 -- Lars Heuer
# All rights reserved.
#
# License: BSD License
#
"""\
Nox test runner configuration.
"""
import os
from functools import partial
import shutil
import nox
@nox.session(python="3")
def docs(session):
"""\
Build the documentation.
"""
session.install('-Ur', 'requirements.rtd')
output_dir = os.path.abspath(os.path.join(session.create_tmp(), 'output'))
doctrees, html, man = map(partial(os.path.join, output_dir), ['doctrees', 'html', 'man'])
shutil.rmtree(output_dir, ignore_errors=True)
session.install('.')
session.cd('docs')
session.run('sphinx-build', '-W', '-b', 'html', '-d', doctrees, '.', html)
session.run('sphinx-build', '-W', '-b', 'man', '-d', doctrees, '.', man)
@nox.session(python='3')
def coverage(session):
"""\
Run coverage.
"""
session.install('coverage', '-Ur', 'requirements.testing.txt')
session.install('.')
session.run('coverage', 'erase')
session.run('coverage', 'run', './tests/alltests.py')
session.run('coverage', 'report', '--include=segno*')
session.run('coverage', 'html', '--include=segno*')
@nox.session(python=['2.7', '3.7', 'pypy', 'pypy3'])
def test(session):
"""\
Run test suite.
"""
if session.python == 'pypy':
# See <https://github.com/heuer/segno/issues/80>
session.run('pip', 'uninstall', '-y', 'pip')
session.run('easy_install', 'pip==20.1')
session.install('-Ur', 'requirements.testing.txt')
session.install('.')
session.run('py.test')
| Python | 0 | |
510b90d42dbccd0aa1e3ff48ee8dbe7230b65185 | Add script to compute some stats about data from energy consumption measures | get_stats_from.py | get_stats_from.py | import argparse
import csv
from glob import glob
import re
import statistics
import sys
def get_stats_from(files_names, files_content):
for i in range(len(files_content)):
file_name = files_names[i]
file_content = files_content[i]
print("FILE : {0}".format(files_names[i]))
print("\t*MEAN : {0}".format(statistics.mean(file_content)))
print("\t*MEDIAN : {0}".format(statistics.median(file_content)))
try:
print("\t*MOST TYPICAL VALUE : {0}".format(statistics.mode(file_content)))
except:
print("2 most typical values!")
print("\t*STANDARD DEVIATION : {0}".format(statistics.stdev(file_content)))
print("\t*VARIANCE : {0}".format(statistics.variance(file_content)))
def get_global_stats(files_content):
data = []
for sublist in files_content:
data = data + sublist
print("*GLOBAL MEAN : {0}".format(statistics.mean(data)))
print("*GLOBAL MEDIAN : {0}".format(statistics.median(data)))
try:
print("*GLOBAL MOST TYPICAL VALUE : {0}".format(statistics.mode(data)))
except:
print("2 most typical values!")
print("*GLOBAL STANDARD DEVIATION : {0}".format(statistics.stdev(data)))
print("*GLOBAL VARIANCE : {0}".format(statistics.variance(data)))
def main():
parser = argparse.ArgumentParser(description='Get stats from Powertool output')
parser.add_argument('-p', '--path', type=str, default=None, required=True,
help="specify path to your directories")
parser.add_argument('-o', '--output', action="store_true",
help="save the output in the analysed directory")
args = parser.parse_args()
directories = glob(args.path+"*")
if len(directories) == 0:
sys.exit(1)
csv_files = []
for directory in directories:
current_files = [x for x in glob(directory + "/*") if ".csv" in x]
csv_files = csv_files + current_files
files_content = []
for csv_file in csv_files:
with open(csv_file, "r") as csv_content:
csv_reader = csv.reader(csv_content)
files_content.append([float(row[0]) for row in csv_reader if not (re.match("^\d+?\.\d+?$", row[0]) is None)])
get_stats_from(directories, files_content)
get_global_stats(files_content)
if __name__ == '__main__':
main()
| Python | 0.000001 | |
32dcc681a82ef2246d0fad441481d6e68f79ddd6 | Add Python benchmark | lib/node_modules/@stdlib/math/base/special/ln/benchmark/python/benchmark.py | lib/node_modules/@stdlib/math/base/special/ln/benchmark/python/benchmark.py | #!/usr/bin/env python
"""Benchmark ln."""
from __future__ import print_function
import timeit
NAME = "ln"
REPEATS = 3
ITERATIONS = 1000000
def print_version():
"""Print the TAP version."""
print("TAP version 13")
def print_summary(total, passing):
"""Print the benchmark summary.
# Arguments
* `total`: total number of tests
* `passing`: number of passing tests
"""
print("#")
print("1.." + str(total)) # TAP plan
print("# total " + str(total))
print("# pass " + str(passing))
print("#")
print("# ok")
def print_results(elapsed):
"""Print benchmark results.
# Arguments
* `elapsed`: elapsed time (in seconds)
# Examples
``` python
python> print_results(0.131009101868)
```
"""
rate = ITERATIONS / elapsed
print(" ---")
print(" iterations: " + str(ITERATIONS))
print(" elapsed: " + str(elapsed))
print(" rate: " + str(rate))
print(" ...")
def benchmark():
"""Run the benchmark and print benchmark results."""
setup = "from math import log; from random import random;"
stmt = "y = log(10000.0*random() - 0.0)"
t = timeit.Timer(stmt, setup=setup)
print_version()
for i in xrange(REPEATS):
print("# python::" + NAME)
elapsed = t.timeit(number=ITERATIONS)
print_results(elapsed)
print("ok " + str(i+1) + " benchmark finished")
print_summary(REPEATS, REPEATS)
def main():
"""Run the benchmark."""
benchmark()
if __name__ == "__main__":
main()
| Python | 0.000138 | |
20b450c4cd0ff9c57d894fa263056ff4cd2dbf07 | Add a vim version of merge business hours | vim_turing_machine/machines/merge_business_hours/vim_merge_business_hours.py | vim_turing_machine/machines/merge_business_hours/vim_merge_business_hours.py | from vim_turing_machine.machines.merge_business_hours.merge_business_hours import merge_business_hours_transitions
from vim_turing_machine.vim_machine import VimTuringMachine
if __name__ == '__main__':
merge_business_hours = VimTuringMachine(merge_business_hours_transitions(), debug=True)
merge_business_hours.run(initial_tape=sys.argv[1], max_steps=50)
| Python | 0.000001 | |
9ac5bfb17346f364414f17e3e16ba15ab812f5a0 | tidy up | src/lino/tools/mail.py | src/lino/tools/mail.py | ## Copyright Luc Saffre 2003-2004.
## This file is part of the Lino project.
## Lino is free software; you can redistribute it and/or modify it
## under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## Lino is distributed in the hope that it will be useful, but WITHOUT
## ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
## or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
## License for more details.
## You should have received a copy of the GNU General Public License
## along with Lino; if not, write to the Free Software Foundation,
## Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
used by :
scripts/openmail.py
tests/etc/1.py
"""
import sys,os
import urllib
import email
import webbrowser
def mailto_url(to=None,subject=None,body=None,cc=None):
"""
encodes the content as a mailto link as described on
http://www.faqs.org/rfcs/rfc2368.html
Examples partly taken from
http://selfhtml.teamone.de/html/verweise/email.htm
"""
#url = "mailto:" + urllib.quote(to.strip())
url = "mailto:" + urllib.quote(to.strip(),"@,")
sep = "?"
if cc:
url+= sep + "cc=" + urllib.quote(cc,"@,")
sep = "&"
if subject:
url+= sep + "subject=" + urllib.quote(subject,"")
sep = "&"
if body:
# Also note that line breaks in the body of a message MUST be
# encoded with "%0D%0A". (RFC 2368)
body="\r\n".join(body.splitlines())
url+= sep + "body=" + urllib.quote(body,"")
sep = "&"
# if not confirm("okay"): return
return url
## def readmail2(filename):
## "reads a real RFC2822 file"
## msg = email.message_from_file(open(filename))
## if msg.is_multipart():
## raise "%s contains a multipart message : not supported" % filename
## return msg
def readmail(filename):
"""reads a "simplified pseudo-RFC2822" file
"""
from email.Message import Message
msg = Message()
text = open(filename).read()
text = text.decode("cp850")
text = text.encode("iso-8859-1","replace")
headersDone = False
subject = None
to = None
body = ""
for line in text.splitlines():
if headersDone:
body += line + "\n"
else:
if len(line) == 0:
headersDone = True
else:
(name,value) = line.split(':')
msg[name] = value.strip()
## if name.lower() == 'subject':
## subject = value.strip()
## elif name.lower() == 'to':
## to = value.strip()
## else:
## raise "%s : invalid header field in line %s" % (
## name,repr(line))
msg.set_payload(body)
return msg
def openmail(msg):
url = mailto_url(msg.get('to'),msg.get("subject"),msg.get_payload())
webbrowser.open(url,new=1)
| Python | 0.000001 | |
137a7c6e98e0ba8bd916d4ba696b0f0f4e2cdc56 | Create uptime.py | plot-uptime/uptime.py | plot-uptime/uptime.py | Python | 0.000024 | ||
e90c48ba46d7971386e01b3def9edbb2df5d74e8 | Create mummy.py | management/commands/mummy.py | management/commands/mummy.py | """
1. Install model-mommy
`pip install model-mommy`
2. Use the command
`./manage mummy someotherapp.HilariousModelName:9000 yetanotherapp.OmgTheseModelNamesLawl:1`
"""
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from model_mommy import mommy
class Command(BaseCommand):
args = '<modelpath modelpath:count ...>'
help = 'Generate model instances using model-mommy'
def handle(self, *args, **options):
for modelpath in args:
count = 1
if ":" in modelpath:
modelpath, count = modelpath.split(":")
self.stdout.write("Processing: {}".format(modelpath))
mommy.make(modelpath, _quantity=count)
| Python | 0.0034 | |
3b7de4dbe3611863620cb528092779d25efde025 | remove dj 3.2 warnings | data_exports/apps.py | data_exports/apps.py | #!/usr/bin/env python
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class CsvExportConfig(AppConfig):
name = 'data_exports'
default_auto_field = "django.db.models.AutoField"
| Python | 0.000002 | |
85c732e395e3db4ec63a0d8580d895363d82e4a0 | Add the salt.output module | salt/output.py | salt/output.py | """
A simple way of setting the output format for data from modules
"""
import pprint
# Conditionally import the json and yaml modules
try:
import json
JSON = True
except ImportError:
JSON = False
try:
import yaml
YAML = True
except ImportError:
YAML = False
__all__ = ('get_outputter',)
class Outputter(object):
"""
Class for outputting data to the screen.
"""
supports = None
@classmethod
def check(klass, name):
# Don't advertise Outputter classes for optional modules
if hasattr(klass, "enabled") and not klass.enabled:
return False
return klass.supports == name
def __call__(self, data, **kwargs):
print "Calling Outputter.__call__()"
pprint.pprint(data)
class TxtOutputter(Outputter):
"""
Plain text output. Primarily for returning output from
shell commands in the exact same way they would output
on the shell when ran directly.
"""
supports = "txt"
def __call__(self, data, **kwargs):
if hasattr(data, "keys"):
for key in data.keys():
value = data[key]
for line in value.split('\n'):
print "{0}: {1}".format(key, line)
else:
# For non-dictionary data, run pprint
super(TxtOutputter, self).__call__(data)
class JSONOutputter(Outputter):
"""JSON output. Chokes on non-serializable objects."""
supports = "json"
enabled = JSON
def __call__(self, data, **kwargs):
try:
# A good kwarg might be: indent=4
print json.dumps(data, **kwargs)
except TypeError:
super(JSONOutputter, self).__call__(data)
class YamlOutputter(Outputter):
"""Yaml output. All of the cool kids are doing it."""
supports = "yaml"
enabled = YAML
def __call__(self, data, **kwargs):
print yaml.dump(data, **kwargs)
class RawOutputter(Outputter):
"""Raw output. This calls repr() on the returned data."""
supports = "raw"
def __call__(self, data, **kwargs):
print data
def get_outputter(name=None):
"""
Factory function for returning the right output class.
Usage:
printout = get_outputter("txt")
printout(ret)
"""
# Return an actual instance of the correct output class
for i in Outputter.__subclasses__():
if i.check(name):
return i()
return Outputter()
| Python | 0.000142 | |
380acd0e40ad2924f1434d4ae7f7e0a8a163139f | add script for building the cluster catalog | bin/build-cluster-catalog.py | bin/build-cluster-catalog.py | #!/usr/bin/env python
"""Build and write out the NGC-star-clusters.fits catalog.
"""
import os
import numpy as np
import numpy.ma as ma
from astropy.io import ascii
from astropy.table import Table
from astrometry.util.starutil_numpy import hmsstring2ra, dmsstring2dec
from astrometry.libkd.spherematch import match_radec
from pkg_resources import resource_filename
#import desimodel.io
#import desimodel.footprint
#tiles = desimodel.io.load_tiles(onlydesi=True)
if not os.path.isfile('/tmp/NGC.csv'):
os.system('wget -P /tmp https://raw.githubusercontent.com/mattiaverga/OpenNGC/master/NGC.csv')
names = ('name', 'type', 'ra_hms', 'dec_dms', 'const', 'majax', 'minax',
'pa', 'bmag', 'vmag', 'jmag', 'hmag', 'kmag', 'sbrightn', 'hubble',
'cstarumag', 'cstarbmag', 'cstarvmag', 'messier', 'ngc', 'ic',
'cstarnames', 'identifiers', 'commonnames', 'nednotes', 'ongcnotes')
NGC = ascii.read('/tmp/NGC.csv', delimiter=';', names=names)
NGC = NGC[(NGC['ra_hms'] != 'N/A')]
ra, dec = [], []
for _ra, _dec in zip(ma.getdata(NGC['ra_hms']), ma.getdata(NGC['dec_dms'])):
ra.append(hmsstring2ra(_ra.replace('h', ':').replace('m', ':').replace('s','')))
dec.append(dmsstring2dec(_dec.replace('d', ':').replace('m', ':').replace('s','')))
NGC['ra'] = ra
NGC['dec'] = dec
objtype = np.char.strip(ma.getdata(NGC['type']))
# Keep all globular clusters and planetary nebulae
keeptype = ('PN', 'GCl')
keep = np.zeros(len(NGC), dtype=bool)
for otype in keeptype:
ww = [otype == tt for tt in objtype]
keep = np.logical_or(keep, ww)
print(np.sum(keep))
clusters = NGC[keep]
# Fill missing major axes with a nominal 0.4 arcmin (roughly works
# for NGC7009, which is the only missing PN in the footprint).
ma.set_fill_value(clusters['majax'], 0.4)
clusters['majax'] = ma.filled(clusters['majax'].data)
# Increase the radius of IC4593
# https://github.com/legacysurvey/legacypipe/issues/347
clusters[clusters['name'] == 'IC4593']['majax'] = 0.5
#indesi = desimodel.footprint.is_point_in_desi(tiles, ma.getdata(clusters['ra']),
# ma.getdata(clusters['dec']))
#print(np.sum(indesi))
#bb = clusters[indesi]
#bb[np.argsort(bb['majax'])[::-1]]['name', 'ra', 'dec', 'majax', 'type']
# Build the output catalog: select a subset of the columns and rename
# majax-->radius (arcmin-->degree)
out = Table()
out['name'] = clusters['name']
out['alt_name'] = ['' if mm == 0 else 'M{}'.format(str(mm))
for mm in ma.getdata(clusters['messier'])]
out['type'] = clusters['type']
out['ra'] = clusters['ra']
out['dec'] = clusters['dec']
out['radius_orig'] = (clusters['majax'] / 60).astype('f4') # [degrees]
out['radius'] = out['radius_orig']
# Read the ancillary globular cluster catalog and update the radii in the NGC.
#https://heasarc.gsfc.nasa.gov/db-perl/W3Browse/w3table.pl?tablehead=name%3Dglobclust&Action=More+Options
if False:
gcfile = resource_filename('legacypipe', 'data/globular_clusters.fits')
gcs = Table.read(gcfile)
I, J, _ = match_radec(clusters['ra'], clusters['dec'], gcs['RA'], gcs['DEC'], 10./3600., nearest=True)
out['radius'][I] = (gcs['HALF_LIGHT_RADIUS'][J] / 60).astype('f4') # [degrees]
if False: # debugging
bb = out[['M' in nn for nn in out['alt_name']]]
bb[np.argsort(bb['radius'])]
bb['radius'] *= 60
bb['radius_orig'] *= 60
print(bb)
clusterfile = resource_filename('legacypipe', 'data/NGC-star-clusters.fits')
print('Writing {}'.format(clusterfile))
out.write(clusterfile, overwrite=True)
# Code to help visually check all the globular clusters.
if False:
checktype = ('GCl', 'PN')
check = np.zeros(len(NGC), dtype=bool)
for otype in checktype:
ww = [otype == tt for tt in objtype]
check = np.logical_or(check, ww)
check_clusters = NGC[check] # 845 of them
# Write out a catalog, load it into the viewer and look at each of them.
check_clusters[['ra', 'dec', 'name']].write('/tmp/check.fits', overwrite=True) # 25 of them
| Python | 0 | |
9b0c335fc956c2d2156d169e3636d862ebfbadc0 | add a scraping script | hadairopink.py | hadairopink.py | #!/usr/bin/env python
"""
No description.
"""
import sys
from scrapy import cmdline, Request
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
TARGET_DOMAIN = 'hadairopink.com'
XPATH_IMAGE_SRC = '//div[@class="kizi"]//a/img[contains(@src, "/wp-content/uploads/")]/@src'
XPATH_PAGINATION = '/html/body//div[@class="pagination"]/a[@data-wpel-link="internal"]'
XPATH_ENTRY = '/html/body//h3[@class="entry-title-ac"]/a'
class Crawler(CrawlSpider):
"""No descrition"""
name = TARGET_DOMAIN
allowed_domains = [TARGET_DOMAIN]
custom_settings = {
'DOWNLOAD_DELAY': 1,
}
rules = (
Rule(LinkExtractor(restrict_xpaths=XPATH_ENTRY), callback='parse_entry'),
Rule(LinkExtractor(restrict_xpaths=XPATH_PAGINATION)),
)
def start_requests(self):
"""No descrition"""
url = self.tag
yield Request(url, dont_filter=True)
def parse_entry(self, response):
"""No descrition"""
if images := response.xpath(XPATH_IMAGE_SRC).getall():
yield {
'title': response.xpath('//title/text()').get(),
'url': response.url,
'images': images}
if __name__ == '__main__':
#cmdline.execute(f"scrapy runspider {sys.argv[0]} -a tag={sys.argv[1]} -O images.csv".split())
command_line = ["scrapy", "runspider"]
command_line.extend(sys.argv)
cmdline.execute(command_line)
| Python | 0.000001 | |
285cddc3ed75f70e077738a206c50a57671245ea | add hello world script by pyThon | hello_flask.py | hello_flask.py | # -*- coding: utf-8 -*-
from flask import Flask
app = flask(__name__)
@app.route('/')
def hello_flask():
return 'Hello Flask!'
if __name__ == '__main__':
app.run() | Python | 0.000003 | |
7478a6605b4d722e2eec9031457fb33ee99857f5 | add geo tools from dingo | edisgo/tools/geo.py | edisgo/tools/geo.py | from geopy.distance import vincenty
import os
if not 'READTHEDOCS' in os.environ:
from shapely.geometry import LineString
from shapely.ops import transform
import logging
logger = logging.getLogger('edisgo')
def calc_geo_branches_in_polygon(mv_grid, polygon, mode, proj):
# TODO: DOCSTRING
branches = []
polygon_shp = transform(proj, polygon)
for branch in mv_grid.graph_edges():
nodes = branch['adj_nodes']
branch_shp = transform(proj, LineString([nodes[0].geo_data, nodes[1].geo_data]))
# check if branches intersect with polygon if mode = 'intersects'
if mode == 'intersects':
if polygon_shp.intersects(branch_shp):
branches.append(branch)
# check if polygon contains branches if mode = 'contains'
elif mode == 'contains':
if polygon_shp.contains(branch_shp):
branches.append(branch)
# error
else:
raise ValueError('Mode is invalid!')
return branches
def calc_geo_branches_in_buffer(node, mv_grid, radius, radius_inc, proj):
""" Determines branches in nodes' associated graph that are at least partly within buffer of `radius` from `node`.
If there are no nodes, the buffer is successively extended by `radius_inc` until nodes are found.
Args:
node: origin node (e.g. LVStationDing0 object) with associated shapely object (attribute `geo_data`) in any CRS
(e.g. WGS84)
radius: buffer radius in m
radius_inc: radius increment in m
proj: pyproj projection object: nodes' CRS to equidistant CRS (e.g. WGS84 -> ETRS)
Returns:
list of branches (NetworkX branch objects)
"""
branches = []
while not branches:
node_shp = transform(proj, node.geo_data)
buffer_zone_shp = node_shp.buffer(radius)
for branch in mv_grid.graph_edges():
nodes = branch['adj_nodes']
branch_shp = transform(proj, LineString([nodes[0].geo_data, nodes[1].geo_data]))
if buffer_zone_shp.intersects(branch_shp):
branches.append(branch)
radius += radius_inc
return branches
def calc_geo_dist_vincenty(node_source, node_target):
""" Calculates the geodesic distance between `node_source` and `node_target` incorporating the detour factor in
config_calc.cfg.
Args:
node_source: source node (Ding0 object), member of _graph
node_target: target node (Ding0 object), member of _graph
Returns:
Distance in m
"""
branch_detour_factor = cfg_ding0.get('assumptions', 'branch_detour_factor')
# notice: vincenty takes (lat,lon)
branch_length = branch_detour_factor * vincenty((node_source.geo_data.y, node_source.geo_data.x),
(node_target.geo_data.y, node_target.geo_data.x)).m
# ========= BUG: LINE LENGTH=0 WHEN CONNECTING GENERATORS ===========
# When importing generators, the geom_new field is used as position. If it is empty, EnergyMap's geom
# is used and so there are a couple of generators at the same position => length of interconnecting
# line is 0. See issue #76
if branch_length == 0:
branch_length = 1
logger.warning('Geo distance is zero, check objects\' positions. '
'Distance is set to 1m')
# ===================================================================
return branch_length
| Python | 0 | |
49c98929190be5c759f200ec8816f6ab334d0e4b | Create inception5h.py | inception5h.py | inception5h.py | # -*- coding: utf-8 -*-
########################################################################
#
# The Inception Model 5h for TensorFlow.
#
# This variant of the Inception model is easier to use for DeepDream
# and other imaging techniques. This is because it allows the input
# image to be any size, and the optimized images are also prettier.
#
# It is unclear which Inception model this implements because the
# Google developers have (as usual) neglected to document it.
# It is dubbed the 5h-model because that is the name of the zip-file,
# but it is apparently simpler than the v.3 model.
#
# See the Python Notebook for Tutorial #14 for an example usage.
#
# Implemented in Python 3.5 with TensorFlow v0.11.0rc0
#
########################################################################
#
# This file is part of the TensorFlow Tutorials available at:
#
# https://github.com/Hvass-Labs/TensorFlow-Tutorials
#
# Published under the MIT License. See the file LICENSE for details.
#
# Copyright 2016 by Magnus Erik Hvass Pedersen
#
########################################################################
import numpy as np
import tensorflow as tf
import download
import os
########################################################################
# Various directories and file-names.
# Internet URL for the tar-file with the Inception model.
# Note that this might change in the future and will need to be updated.
data_url = "http://storage.googleapis.com/download.tensorflow.org/models/inception5h.zip"
# Directory to store the downloaded data.
data_dir = "inception/5h/"
# File containing the TensorFlow graph definition. (Downloaded)
path_graph_def = "tensorflow_inception_graph.pb"
########################################################################
def maybe_download():
"""
Download the Inception model from the internet if it does not already
exist in the data_dir. The file is about 50 MB.
"""
print("Downloading Inception 5h Model ...")
download.maybe_download_and_extract(url=data_url, download_dir=data_dir)
########################################################################
class Inception5h:
"""
The Inception model is a Deep Neural Network which has already been
trained for classifying images into 1000 different categories.
When you create a new instance of this class, the Inception model
will be loaded and can be used immediately without training.
"""
# Name of the tensor for feeding the input image.
tensor_name_input_image = "input:0"
# Names for some of the commonly used layers in the Inception model.
layer_names = ['conv2d0', 'conv2d1', 'conv2d2',
'mixed3a', 'mixed3b',
'mixed4a', 'mixed4b', 'mixed4c', 'mixed4d', 'mixed4e',
'mixed5a', 'mixed5b']
def __init__(self):
# Now load the Inception model from file. The way TensorFlow
# does this is confusing and requires several steps.
# Create a new TensorFlow computational graph.
self.graph = tf.Graph()
# Set the new graph as the default.
with self.graph.as_default():
# TensorFlow graphs are saved to disk as so-called Protocol Buffers
# aka. proto-bufs which is a file-format that works on multiple
# platforms. In this case it is saved as a binary file.
# Open the graph-def file for binary reading.
path = os.path.join(data_dir, path_graph_def)
with tf.gfile.FastGFile(path, 'rb') as file:
# The graph-def is a saved copy of a TensorFlow graph.
# First we need to create an empty graph-def.
graph_def = tf.GraphDef()
# Then we load the proto-buf file into the graph-def.
graph_def.ParseFromString(file.read())
# Finally we import the graph-def to the default TensorFlow graph.
tf.import_graph_def(graph_def, name='')
# Now self.graph holds the Inception model from the proto-buf file.
# Get a reference to the tensor for inputting images to the graph.
self.input = self.graph.get_tensor_by_name(self.tensor_name_input_image)
# Get references to the tensors for the commonly used layers.
self.layer_tensors = [self.graph.get_tensor_by_name(name + ":0") for name in self.layer_names]
def create_feed_dict(self, image=None):
"""
Create and return a feed-dict with an image.
:param image:
The input image is a 3-dim array which is already decoded.
The pixels MUST be values between 0 and 255 (float or int).
:return:
Dict for feeding to the Inception graph in TensorFlow.
"""
# Expand 3-dim array to 4-dim by prepending an 'empty' dimension.
# This is because we are only feeding a single image, but the
# Inception model was built to take multiple images as input.
image = np.expand_dims(image, axis=0)
# Image is passed in as a 3-dim array of raw pixel-values.
feed_dict = {self.tensor_name_input_image: image}
return feed_dict
def get_gradient(self, tensor):
"""
Get the gradient of the given tensor with respect to
the input image. This allows us to modify the input
image so as to maximize the given tensor.
For use in e.g. DeepDream and Visual Analysis.
:param tensor:
The tensor whose value we want to maximize
by changing the input image.
:return:
Gradient for the tensor with regard to the input image.
"""
# Set the graph as default so we can add operations to it.
with self.graph.as_default():
# Square the tensor-values.
# You can try and remove this to see the effect.
tensor = tf.square(tensor)
# Average the tensor so we get a single scalar value.
tensor_mean = tf.reduce_mean(tensor)
# Use TensorFlow to automatically create a mathematical
# formula for the gradient using the chain-rule of
# differentiation.
gradient = tf.gradients(tensor_mean, self.input)[0]
return gradient
| Python | 0.000001 | |
27ea547fbd7c936bd017b64b31ecf09ed991c6c0 | Add index to fixed_ips.address | nova/db/sqlalchemy/migrate_repo/versions/085_add_index_to_fixed_ips_by_address.py | nova/db/sqlalchemy/migrate_repo/versions/085_add_index_to_fixed_ips_by_address.py | # Copyright 2012 IBM
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Index, MetaData, Table
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances = Table('fixed_ips', meta, autoload=True)
index = Index('address', instances.c.address)
index.create(migrate_engine)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances = Table('fixed_ips', meta, autoload=True)
index = Index('address', instances.c.address)
index.drop(migrate_engine)
| Python | 0.000001 | |
65258cf8d11e8e5c7cce3e07d9a389e5617948dd | Add boilerplate code | aoc.py | aoc.py | import argparse
import importlib
import sys
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Advent of Code 2016")
parser.add_argument("--day", type=int, dest="days", nargs="+", default=range(1, 25))
parser.add_argument("--stdin", dest='stdin', action='store_true', default=False)
args = parser.parse_args()
print("Advent of Code 2016")
print("===================")
print()
for day in args.days:
try:
problem_module = importlib.import_module("day_{}".format(day))
input_file = open("day_{}.txt".format(day)) if not args.stdin else sys.stdin
problem = problem_module.Problem(input_file)
print("Day", day)
print("------")
if hasattr(problem, 'step1') and callable(getattr(problem, 'step1')):
print("Step 1:", problem.step1())
if hasattr(problem, 'step2') and callable(getattr(problem, 'step2')):
print("Step 2:", problem.step2())
print()
except ImportError as e:
print("Day", day, "is not implemented yet")
| Python | 0.001915 | |
6db7902e5f78d28b9a00eb801c12d15c91949453 | Add gruneisen script for making figure | phonondb/phonopy/gruneisen.py | phonondb/phonopy/gruneisen.py | import numpy as np
from cogue.crystal.utility import klength2mesh
class ModeGruneisen:
def __init__(self,
phonon_orig,
phonon_plus,
phonon_minus,
distance=100):
self._phonopy_gruneisen = phonopy_gruneisen
self._phonon = phonon
self._lattice = np.array(phonon.get_unitcell().get_cell().T,
dtype='double')
self._mesh = None
self._gruneisen = None
def run(self):
self._set_mesh(distance=distance)
if self._run_mesh_sampling():
self._run_gruneisen()
return True
return False
def get_lattice(self):
return self._lattice
def get_mesh(self):
return self._mesh
def get_mesh_gruneisen(self):
return self._gruneisen
def plot(self, plt, max_index=101):
temps, fe, entropy, cv = self._thermal_properties
fig = plt.figure()
fig.subplots_adjust(left=0.20, right=0.92, bottom=0.18)
plt.tick_params(axis='both', which='major', labelsize=10.5)
ax = fig.add_subplot(111)
plt.plot(temps[:max_index], fe[:max_index], 'r-')
plt.plot(temps[:max_index], entropy[:max_index], 'b-')
plt.plot(temps[:max_index], cv[:max_index], 'g-')
xlim = ax.get_xlim()
ylim = ax.get_ylim()
aspect = (xlim[1] - xlim[0]) / (ylim[1] - ylim[0])
# ax.set_aspect(aspect * 0.7)
plt.legend(('Free energy [kJ/mol]', 'Entropy [J/K/mol]',
r'C$_\mathrm{V}$ [J/K/mol]'),
loc='best',
prop={'size':8.5},
frameon=False)
plt.xlabel("Temperatures (K)")
plt.ylabel("Thermal properties (*/unitcell)")
def save_figure(self, plt):
plt.savefig("gruneisen.png")
def _set_mesh(self, distance=100):
self._mesh = klength2mesh(distance, self._lattice)
def _run_mesh_sampling(self):
return self._phonopy_gruneisen.set_mesh(self._mesh)
def _run_gruneisen(self):
self._thermal_properties = self._phonon.get_thermal_properties()
if __name__ == '__main__':
import sys
import yaml
from phonopy import Phonopy
from phonopy.gruneisen.mesh import Mesh as GruneisenMesh
from phonopy.interface.phonopy_yaml import phonopyYaml
from phonopy.file_IO import parse_FORCE_SETS
from cogue.crystal.utility import get_angles, get_lattice_parameters
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams.update({'figure.figsize': (4.5, 3)})
import matplotlib.pyplot as plt
phonons = []
for dirname in ('orig', 'plus', 'minus'):
if len(sys.argv) > 1:
cell = phonopyYaml("%s/" % dirname + sys.argv[1]).get_atoms()
else:
cell = phonopyYaml("%s/POSCAR-unitcell.yaml" % dirname).get_atoms()
phonon_info = yaml.load(open("%s/%s.yaml" % (dirname, dirname)))
phonon = Phonopy(cell,
phonon_info['supercell_matrix'],
is_auto_displacements=False)
force_sets = parse_FORCE_SETS()
phonon.set_displacement_dataset(force_sets)
phonon.produce_force_constants()
phonons.append(phonon)
distance = 100
gruneisen = ModeGruneisen(phonons[0], phonons[1], phonons[2], distance=distance)
if gruneisen.run():
gruneisen.plot(plt)
lattice = gruneisen.get_lattice()
print "a, b, c =", get_lattice_parameters(lattice)
print "alpha, beta, gamma =", get_angles(lattice)
print "mesh (x=%f) =" % distance, gruneisen.get_mesh()
gruneisen.save_figure(plt)
else:
print "Mode Gruneisen parameter calculation failed."
| Python | 0 | |
813eb3b6bdc01906e39f11f93b4a326fc2fb1ee5 | Add kitchen-sink base test | test/base.py | test/base.py | import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import os
import uuid
import torch2c
def base_test():
fc1 = nn.Linear(10,20)
fc1.weight.data.normal_(0.0,1.0)
fc1.bias.data.normal_(0.0,1.0)
fc2 = nn.Linear(20,2)
fc2.weight.data.normal_(0.0,1.0)
fc2.bias.data.normal_(0.0,1.0)
model_0 = lambda x: F.log_softmax(fc2(F.relu(fc1(x))))
fc3 = nn.Linear(10,2)
fc3.weight.data.normal_(0.0,1.0)
fc3.bias.data.normal_(0.0,1.0)
model_1 = lambda x: F.softmax(F.relu(fc3(x)))
data = Variable(torch.rand(10,10))
out = model_0(data) + model_1(data) + 1
out_path = 'out'
if not os.path.isdir(out_path):
os.mkdir(out_path)
uid = str(uuid.uuid4())
torch2c.compile(out,'base',os.path.join(out_path,uid),compile_test=True)
if __name__=='__main__':
base_test()
| Python | 0.000001 | |
407f7fcf8f481c57df59789b7f845928428f1bf9 | Add example script. | telegrambot/example.py | telegrambot/example.py | from telegrambot import TelegramBot, main
from telegrambot.commands import GetCommand
class DemoTelegramBot(TelegramBot, GetCommand):
pass
if __name__ == '__main__':
main(bot_class=DemoTelegramBot) | Python | 0 | |
9fdd671d9c0b91dc789ebf3b24226edb3e6a072a | Add new migration to load metrics fixtures | sleep/migrations/0002_load_metrics.py | sleep/migrations/0002_load_metrics.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.core.management import call_command
def load_metrics():
call_command('loaddata', 'metrics.json')
class Migration(migrations.Migration):
dependencies = [
('sleep', '0001_initial'),
]
operations = [
]
| Python | 0 | |
7ecc619104177c72b69337a35c7604491f2b06ec | Create amman.py | ideascube/conf/amman.py | ideascube/conf/amman.py | # -*- coding: utf-8 -*-
"""Amman box in Jordan"""
from .base import * # noqa
from django.utils.translation import ugettext_lazy as _
IDEASCUBE_NAME = u"مخيم الأزرق"
COUNTRIES_FIRST = ['SY', 'JO']
TIME_ZONE = 'Asia/Amman'
LANGUAGE_CODE = 'ar'
MONITORING_ENTRY_EXPORT_FIELDS = ['serial', 'refugee_id', 'birth_year',
'gender']
USER_FORM_FIELDS = (
('Ideasbox', ['serial', 'box_awareness']),
(_('Personal informations'), ['refugee_id', 'short_name', 'full_name', 'latin_name', 'birth_year', 'gender']), # noqa
(_('Family'), ['marital_status', 'family_status', 'children_under_12', 'children_under_18', 'children_above_18']), # noqa
(_('In the camp'), ['camp_entry_date', 'camp_activities', 'current_occupation', 'camp_address']), # noqa
(_('Origin'), ['country', 'city', 'country_of_origin_occupation', 'school_level', 'is_sent_to_school']), # noqa
(_('Language skills'), ['ar_level', 'en_level']),
(_('National residents'), ['id_card_number']),
)
ENTRY_ACTIVITY_CHOICES = [
('16 Days of Activism', _('16 Days of Activism')),
("AMANI Campaign", _("AMANI Campaign")),
("Anger Management Training", _("Anger Management Training")),
("Basic Computer Training", _("Basic Computer Training")),
("Beauty Training", _("Beauty Training")),
("Book Club", _("Book Club")),
("Conflict Resolution Training", _("Conflict Resolution Training")),
("Coping Skills and Mechanisms Training", _("Coping Skills and Mechanisms Training")), # noqa
("EDRAAK", _("EDRAAK")),
("Emotional intelligence Training", _("Emotional intelligence Training")),
("Handicrafts", _("Handicrafts")),
("How to be a Psychosocial Counselor Training", _("How to be a Psychosocial Counselor Training")), # noqa
("I am Woman", _("I am Woman")),
("International Children Day", _("International Children Day")),
("International Refugee Day", _("International Refugee Day")),
("Marathon", _("Marathon")),
("Mother's day celebration", _("Mother's day celebration")),
("Parenting Skills Training", _("Parenting Skills Training")),
("Peer Support Group", _("Peer Support Group")),
("Psychosocial ART Interventions Training", _("Psychosocial ART Interventions Training")), # noqa
("Puppets and Theatre", _("Puppets and Theatre")),
("Sewing and stitching", _("Sewing and stitching")),
("SIMSIM Club", _("SIMSIM Club")),
("Social Work Training", _("Social Work Training")),
("Stress Management Training", _("Stress Management Training")),
("Training of Trainers", _("Training of Trainers")),
("World Mental Health Day", _("World Mental Health Day")),
]
| Python | 0.000002 | |
ef4d7e4fb43b5db29576f95625fd612c259731be | Create ServoSync.py | home/Mats/ServoSync.py | home/Mats/ServoSync.py | port = "COM99"
arduino = Runtime.start("arduino","Arduino")
vard = Runtime.start("va","VirtualArduino")
vard.connect(port)
arduino.connect(port)
servo1 = Runtime.start("servo1","Servo")
servo2 = Runtime.start("servo2","Servo")
servo1.attach("arduino",1)
servo2.attach("arduino",2)
servo1.sync(servo2)
| Python | 0.000001 | |
ecfbaded5e03529d1b189b6b5fc8b2f8516c4b31 | Add hoster plugin for ARD mediathek | module/plugins/hoster/ARD.py | module/plugins/hoster/ARD.py |
import subprocess
import re
import os.path
import os
from module.utils import save_join, save_path
from module.plugins.Hoster import Hoster
# Requires rtmpdump
# by Roland Beermann
class RTMP:
# TODO: Port to some RTMP-library like rtmpy or similar
# TODO?: Integrate properly into the API of pyLoad
command = "rtmpdump"
@classmethod
def download_rtmp_stream(cls, url, output_file, playpath=None):
opts = [
"-r", url,
"-o", output_file,
]
if playpath:
opts.append("--playpath")
opts.append(playpath)
cls._invoke_rtmpdump(opts)
@classmethod
def _invoke_rtmpdump(cls, opts):
args = [
cls.command
]
args.extend(opts)
return subprocess.check_call(args)
class ARD(Hoster):
__name__ = "ARD Mediathek"
__version__ = "0.1"
__pattern__ = r"http://www\.ardmediathek\.de/.*"
__config__ = []
def process(self, pyfile):
site = self.load(pyfile.url)
avail_videos = re.findall(r"""mediaCollection.addMediaStream\(0, ([0-9]*), "([^\"]*)", "([^\"]*)", "[^\"]*"\);""", site)
avail_videos.sort(key=lambda videodesc: int(videodesc[0]), reverse=True) # The higher the number, the better the quality
quality, url, playpath = avail_videos[0]
pyfile.name = re.search(r"<h1>([^<]*)</h1>", site).group(1)
if url.startswith("http"):
# Best quality is available over HTTP. Very rare.
self.download(url)
else:
pyfile.setStatus("downloading")
download_folder = self.config['general']['download_folder']
location = save_join(download_folder, pyfile.package().folder)
if not os.path.exists(location):
os.makedirs(location, int(self.core.config["permission"]["folder"], 8))
if self.core.config["permission"]["change_dl"] and os.name != "nt":
try:
uid = getpwnam(self.config["permission"]["user"])[2]
gid = getgrnam(self.config["permission"]["group"])[2]
chown(location, uid, gid)
except Exception, e:
self.log.warning(_("Setting User and Group failed: %s") % str(e))
output_file = save_join(location, save_path(pyfile.name))
RTMP.download_rtmp_stream(url, playpath=playpath, output_file=output_file)
| Python | 0 | |
206ef4f7aad6c4ce51e4737a7d506a79061f1047 | Add an `import_or_skip` function to testing. | distarray/testing.py | distarray/testing.py | import unittest
import importlib
from functools import wraps
from distarray.error import InvalidCommSizeError
from distarray.mpiutils import MPI, create_comm_of_size
def import_or_skip(name):
"""Try importing `name`, raise SkipTest on failure.
Parameters
----------
name : str
Module name to try to import.
Returns
-------
module : module object
Module object imported by importlib.
Raises
------
unittest.SkipTest
If the attempted import raises an ImportError.
Examples
--------
>>> h5py = import_or_skip('h5py')
>>> h5py.get_config()
<h5py.h5.H5PYConfig at 0x103dd5a78>
"""
try:
return importlib.import_module(name)
except ImportError:
errmsg = '%s not found... skipping.' % name
raise unittest.SkipTest(errmsg)
def comm_null_passes(fn):
"""Decorator. If `self.comm` is COMM_NULL, pass."""
@wraps(fn)
def wrapper(self, *args, **kwargs):
if self.comm == MPI.COMM_NULL:
pass
else:
return fn(self, *args, **kwargs)
return wrapper
class MpiTestCase(unittest.TestCase):
"""Base test class for MPI test cases.
Overload `get_comm_size` to change the default comm size (default is
4). Overload `more_setUp` to add more to the default `setUp`.
"""
def get_comm_size(self):
return 4
def more_setUp(self):
pass
def setUp(self):
try:
self.comm = create_comm_of_size(self.get_comm_size())
except InvalidCommSizeError:
msg = "Must run with comm size >= {}."
raise unittest.SkipTest(msg.format(self.get_comm_size()))
else:
self.more_setUp()
def tearDown(self):
if self.comm != MPI.COMM_NULL:
self.comm.Free()
| import unittest
from functools import wraps
from distarray.error import InvalidCommSizeError
from distarray.mpiutils import MPI, create_comm_of_size
def comm_null_passes(fn):
"""Decorator. If `self.comm` is COMM_NULL, pass."""
@wraps(fn)
def wrapper(self, *args, **kwargs):
if self.comm == MPI.COMM_NULL:
pass
else:
return fn(self, *args, **kwargs)
return wrapper
class MpiTestCase(unittest.TestCase):
"""Base test class for MPI test cases.
Overload `get_comm_size` to change the default comm size (default is
4). Overload `more_setUp` to add more to the default `setUp`.
"""
def get_comm_size(self):
return 4
def more_setUp(self):
pass
def setUp(self):
try:
self.comm = create_comm_of_size(self.get_comm_size())
except InvalidCommSizeError:
msg = "Must run with comm size >= {}."
raise unittest.SkipTest(msg.format(self.get_comm_size()))
else:
self.more_setUp()
def tearDown(self):
if self.comm != MPI.COMM_NULL:
self.comm.Free()
| Python | 0 |
80c1dba49bbdaf4d0d37e8a06549774d2afd019a | Add cosmo_viewer app | pvapp/cosmo_viewer.py | pvapp/cosmo_viewer.py | ################################################################################
#
# Copyright 2013 Kitware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
# import to process args
import sys
import os
import math
import json
import argparse
# import annotations
from autobahn.wamp import exportRpc
# import paraview modules.
from paraview import simple, web, servermanager, web_helper, paraviewweb_wamp, paraviewweb_protocols
# Setup global variables
timesteps = []
currentTimeIndex = 0
view = None
dataPath = None
authKey = None
def initView(width, height):
global view
view = simple.GetRenderView()
simple.Render()
view.ViewSize = [width, height]
view.Background = [0.0, 0.0, 0.0]
view.OrientationAxesLabelColor = [0, 0, 0]
# This class defines the exposed RPC methods for the midas application
class CosmoApp(paraviewweb_wamp.ServerProtocol):
def initialize(self):
global authKey
# Bring used components
self.registerParaViewWebProtocol(paraviewweb_protocols.ParaViewWebMouseHandler())
self.registerParaViewWebProtocol(paraviewweb_protocols.ParaViewWebViewPort())
self.registerParaViewWebProtocol(paraviewweb_protocols.ParaViewWebViewPortImageDelivery())
self.registerParaViewWebProtocol(paraviewweb_protocols.ParaViewWebViewPortGeometryDelivery())
# Update authentication key to use
#self.updateSecret(authKey)
@exportRpc("openFile")
def openFile(self, filename):
fileid = ""
if self.reader:
try:
simple.Delete(self.reader)
except:
self.reader = None
try:
self.reader = simple.OpenDataFile(filename)
simple.Show()
simple.Render()
simple.ResetCamera()
fileid = self.reader.GetGlobalIDAsString()
except:
self.reader = None
return fileid
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Midas+ParaViewWeb application")
web.add_arguments(parser)
parser.add_argument("--data-dir", default=os.getcwd(),
help="path to data directory", dest="path")
parser.add_argument("--width", default=575,
help="width of the render window", dest="width")
parser.add_argument("--height", default=575,
help="height of the render window", dest="height")
args = parser.parse_args()
dataPath = args.path
authKey = args.authKey
width = args.width
height = args.height
initView(width, height)
web.start_webserver(options=args, protocol=CosmoApp)
| Python | 0 | |
99ed96105fcbaa7b2836d19e1dde17bc49f23327 | Commit the basic skeleton | crawl_ptt.py | crawl_ptt.py | #!/usr/bin/env python
from pprint import pprint
import logging
from bs4 import BeautifulSoup
import requests
logging.basicConfig(
format=(
'%(asctime)s\t%(levelname)s\t'
#'%(processName)s\t%(threadName)s\t'
'%(name)s\t%(funcName)s:%(lineno)d\t'
'%(message)s'
),
level=logging.DEBUG
)
def make_fake_browser():
fake_browser = requests.Session()
fake_browser.headers = {
'user-agent': (
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/54.0.2840.98 Safari/537.36'
),
'accept': (
'text/html,application/xhtml+xml,application/xml;q=0.9,'
'image/webp,*/*;q=0.8'
),
'accept-encoding': 'gzip, deflate, sdch, br',
'accept-language': 'en-US,en;q=0.8,zh-TW;q=0.6,zh;q=0.4',
'cookie': 'over18=1',
}
return fake_browser
def save_index_html():
fake_browser = make_fake_browser()
resp = fake_browser.get('https://www.ptt.cc/bbs/Gossiping/index.html')
with open('index.html', 'w') as f:
f.write(resp.text)
logging.info('Saved index.html')
if __name__ == '__main__':
save_index_html()
| Python | 0.999616 | |
8287876963af72756c3ff9102526c56f3e28a8a2 | Test for file resources | tests/functional_tests/test_resources/test_file_resource.py | tests/functional_tests/test_resources/test_file_resource.py | # -*- coding: utf8 -*-
from tuttle.resources import FileResource
import tuttle.resources
from os import path
class TestHttpResource():
def test_real_resource_exists(self):
"""A real resource should exist"""
file_url = "file://{}".format(path.abspath(tuttle.resources.__file__))
res = FileResource(file_url)
assert res.exists()
def test_fictive_resource_exists(self):
"""A real resource should exist"""
res = FileResource("fictive_file")
assert not res.exists()
| Python | 0 | |
14755cda032b5cb44626b2da66d943517427f947 | test for malformed db imports | tests/test_core.py | tests/test_core.py | """unit tests for core.py"""
import pytest
import core
def test_malformed_linkdatabase():
# pytest.set_trace()
with pytest.raises(EOFError):
core.LinkDatabase().load(db='tests/garbage.pickle')
| Python | 0 | |
3a59057f7465d9982e26b92cddafa0ea9ba48806 | Add new package: universal-ctags (#18962) | var/spack/repos/builtin/packages/universal-ctags/package.py | var/spack/repos/builtin/packages/universal-ctags/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class UniversalCtags(AutotoolsPackage):
"""Universal Ctags generates an index (or tag) file of language
objects found in source files for many popular programming languages.
This index makes it easy for text editors and other tools to locate
the indexed items."""
homepage = "https://ctags.io/"
git = "https://github.com/universal-ctags/ctags.git"
version('master', branch='master')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
| Python | 0 | |
f29e278a1b661224c9580d8275654a8c6fe7d3cf | add test for http.encode_request | tests/test_http.py | tests/test_http.py | """Test bbs2ch.http module."""
from bbs2ch import http
def test_host_path():
"""Return hostname and path from url."""
assert (u'hoge.com', '/') == http.host_path(u'http://hoge.com/')
def test_encode_request_get():
"""Return http request string."""
header = [(u'Key', u'Value'),
(u'Key2', u'Value2')]
assert ('GET / HTTP/1.1\r\n'
'Key: Value\r\n'
'Key2: Value2\r\n'
'\r\n'
'\r\n' ==
http.encode_request('GET', u'/', header))
def test_encode_request_post():
"""Return http request string.
if body is not empty, add header to Content-length and Content-Type.
"""
header = [(u'Key', u'Value'),
(u'Key2', u'Value2')]
body = [(u'key', u'value'),
(u'key2', u'value2')]
assert ('POST / HTTP/1.1\r\n'
'Key: Value\r\n'
'Key2: Value2\r\n'
'Content-Type: application/x-www-form-urlencoded\r\n'
'Content-Length: 21\r\n'
'\r\n'
'key=value&key2=value2\r\n'
==
http.encode_request(u'POST', u'/', header, body))
| Python | 0 | |
f032556bf07b37f9544c71ecad7aed472021bc97 | Add script to update giving and teams receiving | sql/branch.py | sql/branch.py | import sys
from gratipay import wireup
db = wireup.db(wireup.env())
participants = db.all("""
SELECT p.*::participants
FROM participants p
WHERE (
SELECT error
FROM current_exchange_routes er
WHERE er.participant = p.id
AND network = 'braintree-cc'
) <> ''
""")
total = len(participants)
print("%s participants with failing cards" % total)
counter = 1
for p in participants:
sys.stdout.write("\rUpdating (%i/%i)" % (counter, total))
sys.stdout.flush()
counter += 1
p.update_giving_and_teams()
print("Done!")
| Python | 0 | |
da66b2a2a2e2a73ffd986aea6ba5d086d43892fc | Add main smoketest | tests/test_main.py | tests/test_main.py | import unittest
import sys
from chaser import main
class TestMain(unittest.TestCase):
def test_smoke_main(self):
sys.argv = ["chaser"]
main()
| Python | 0.000002 | |
ad1d349d49072b5bda6641db4f070704fde81e5f | Add FCC. | inspectors/fcc.py | inspectors/fcc.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import logging
import os
from urllib.parse import urljoin
from bs4 import BeautifulSoup
from utils import utils, inspector
# http://transition.fcc.gov/oig/oigreportsaudit.html
# Oldest report: 1994
# options:
# standard since/year options for a year range to fetch from.
#
# Notes for IG's web team:
AUDIT_REPORTS_URL = "http://transition.fcc.gov/oig/oigreportsaudit.html"
SEMIANNUAL_REPORTS_URL = "http://transition.fcc.gov/oig/oigreportssemiannual.html"
OTHER_REPORTS_URL = "http://transition.fcc.gov/oig/oigreportsletters.html"
def run(options):
year_range = inspector.year_range(options)
for url in [AUDIT_REPORTS_URL, SEMIANNUAL_REPORTS_URL, OTHER_REPORTS_URL]:
doc = beautifulsoup_from_url(url)
results = doc.find_all("table", {"border": 2})[0].select("tr")
for index, result in enumerate(results):
if index < 2:
# The first two rows are headers
continue
report = report_from(result, url, year_range)
if report:
inspector.save_report(report)
def report_from(result, page_url, year_range):
if not result.text.strip():
# Nothing in the entire row, just an empty row
return
report_url = urljoin(page_url, result.select("td a")[0].get('href'))
report_filename = report_url.split("/")[-1]
report_id, extension = os.path.splitext(report_filename)
published_on_text = result.select("td")[0].text.split("\r\n")[0].strip()
if len(result.select("td")) == 2:
# Semiannual report
published_on_text = published_on_text.split("to")[-1].split("through")[-1].strip()
published_on = datetime.datetime.strptime(published_on_text, '%B %d, %Y')
title = "Semi-Annual Report - {}".format(published_on_text)
else:
try:
published_on = datetime.datetime.strptime(published_on_text, '%m/%d/%y')
except ValueError:
published_on = datetime.datetime.strptime(published_on_text, '%m/%d/%Y')
title = result.select("td")[1].text.strip()
if published_on.year not in year_range:
logging.debug("[%s] Skipping, not in requested range." % report_url)
return
report = {
'inspector': 'fcc',
'inspector_url': 'http://fcc.gov/oig/',
'agency': 'fcc',
'agency_name': "Federal Communications Commission",
'report_id': report_id,
'url': report_url,
'title': title,
'published_on': datetime.datetime.strftime(published_on, "%Y-%m-%d"),
}
return report
def beautifulsoup_from_url(url):
body = utils.download(url)
return BeautifulSoup(body)
utils.run(run) if (__name__ == "__main__") else None | Python | 0 | |
c510b27dea59eeae229cf30dabc39ae083f286b0 | Add better indexes | ureport/stats/migrations/0017_better_indexes.py | ureport/stats/migrations/0017_better_indexes.py | # Generated by Django 3.2.6 on 2021-09-27 17:49
from django.db import migrations
INDEX_POLLSTATS_ORG_RESULT_SQL = """
CREATE INDEX IF NOT EXISTS stats_pollstats_org_result on stats_pollstats (org_id, flow_result_id) WHERE flow_result_id IS NOT NULL;
"""
INDEX_POLLSTATS_ORG_QST_RST_CAT_SQL = """
CREATE INDEX IF NOT EXISTS stats_pollstats_org_qstn_rslt_cat_age_gndr_schm_date_not_null on stats_pollstats (org_id, question_id, flow_result_id, category_id, flow_result_category_id, age_segment_id, gender_segment_id, scheme_segment_id, location_id, date) WHERE date IS NOT NULL;
"""
class Migration(migrations.Migration):
dependencies = [
("stats", "0016_pollstats_scheme_segment"),
]
operations = [
migrations.RunSQL(INDEX_POLLSTATS_ORG_RESULT_SQL),
migrations.RunSQL(INDEX_POLLSTATS_ORG_QST_RST_CAT_SQL),
]
| Python | 0.001981 | |
b304b1087d69d4142a9df5ad2db339e5aafe3331 | Update category | news/views.py | news/views.py | from django.shortcuts import render, redirect,render_to_response
from django.template import RequestContext
from django.http import HttpResponse
from django.urls import reverse
from django.views.generic import TemplateView,DetailView
# Create your views here
from news.models import Slider,How_it_works,ArticleCategory,Contact_us,ArticleCategory,Article,RelationCategoryArticle,ArticleImages
"""
Just in case test views
"""
def index(request):
return redirect(reverse('main-index'))
class TemplateAllData(TemplateView):
def get_context_data(self, **kwargs):
context = super(TemplateAllData, self).get_context_data(**kwargs)
context['categorys'] = ArticleCategory.objects.all()
context['contact'] = Contact_us.objects.all()
return context
class TestView(TemplateAllData):
template_name = 'index.html'
def get_context_data(self, **kwargs):
context = super(TestView, self).get_context_data(**kwargs)
context['slider'] = Slider.objects.filter(status=True)
context['how_it'] = How_it_works.objects.all().order_by('id')
context['feed'] = Article.objects.filter(status=True,home_page_status=True)
return context
class AboutView(TemplateAllData):
template_name = 'index-1.html'
class GalleryView(TemplateAllData):
template_name = 'index-2.html'
def get_context_data(self, **kwargs):
context = super(GalleryView, self).get_context_data(**kwargs)
context['albom'] = ArticleImages.objects.all()
return context
class ContactsView(TemplateAllData):
template_name = 'index-4.html'
class PrivacyView(TemplateAllData):
template_name = 'index-5.html'
class CategoryDetailView(DetailView):
model = ArticleCategory
template_name = 'index-3.html'
def get_context_data(self, **kwargs):
context = super(CategoryDetailView, self).get_context_data(**kwargs)
context['categorys'] = ArticleCategory.objects.all()
context['contact'] = Contact_us.objects.all()
context['cat_feed'] = RelationCategoryArticle.objects.filter(category_obj__slug=self.kwargs.get('slug'))
return context | from django.shortcuts import render, redirect,render_to_response
from django.template import RequestContext
from django.http import HttpResponse
from django.urls import reverse
from django.views.generic import TemplateView,DetailView
# Create your views here
from news.models import Slider,How_it_works,ArticleCategory,Contact_us,ArticleCategory,Article,RelationCategoryArticle,ArticleImages
"""
Just in case test views
"""
def index(request):
return redirect(reverse('main-index'))
class TemplateAllData(TemplateView):
def get_context_data(self, **kwargs):
context = super(TemplateAllData, self).get_context_data(**kwargs)
context['categorys'] = ArticleCategory.objects.all()
context['contact'] = Contact_us.objects.all()
return context
class TestView(TemplateAllData):
template_name = 'index.html'
def get_context_data(self, **kwargs):
context = super(TestView, self).get_context_data(**kwargs)
context['slider'] = Slider.objects.filter(status=True)
context['how_it'] = How_it_works.objects.all().order_by('id')
context['feed'] = Article.objects.filter(status=True,home_page_status=True)
return context
class AboutView(TemplateAllData):
template_name = 'index-1.html'
class GalleryView(TemplateAllData):
template_name = 'index-2.html'
def get_context_data(self, **kwargs):
context = super(GalleryView, self).get_context_data(**kwargs)
context['albom'] = ArticleImages.objects.all()
return context
class ContactsView(TemplateAllData):
template_name = 'index-4.html'
class PrivacyView(TemplateAllData):
template_name = 'index-5.html'
class CategoryDetailView(DetailView):
model = ArticleCategory
template_name = 'index-3.html'
def get_context_data(self, **kwargs):
context = super(CategoryDetailView, self).get_context_data(**kwargs)
context['categorys'] = ArticleCategory.objects.all()
context['contact'] = Contact_us.objects.all()
context['cat_feed'] = RelationCategoryArticle.objects.filter(category_obj__slug=kwargs.get('slug'))
return context | Python | 0.000001 |
fb86dcdd6046c7d35e932396ba541671727b4d01 | rearrange imports to standards | ngSe/utils.py | ngSe/utils.py | from functools import wraps
from time import time, sleep
from .exceptions import element_exceptions
def retry(f=None, timeout=30, interval=0.1):
"""
When working with a responsive UI, sometimes elements are not ready at the very second you request it
This wrapper will keep on retrying finding or interacting with the element until its ready
"""
# This allows us to use '@retry' or '@retry(timeout=thing, interval=other_thing)' for custom times
if f is None:
def rwrapper(f):
return retry(f, timeout, interval)
return rwrapper
@wraps(f)
def wrapper(*args, **kwargs):
# The wrapped function gets the optional arguments retry_timeout and retry_interval added
retry_timeout = kwargs.pop('retry_timeout', timeout)
retry_interval = kwargs.pop('retry_interval', interval)
prep = kwargs.pop('prep', None)
end_time = time() + retry_timeout
while True:
try:
if prep is not None:
prep()
return f(*args, **kwargs)
except element_exceptions:
if time() > end_time:
# timeout, re-raise the original exception
raise
sleep(retry_interval)
return wrapper
| from time import time, sleep
from functools import wraps
from .exceptions import element_exceptions
def retry(f=None, timeout=30, interval=0.1):
"""
When working with a responsive UI, sometimes elements are not ready at the very second you request it
This wrapper will keep on retrying finding or interacting with the element until its ready
"""
# This allows us to use '@retry' or '@retry(timeout=thing, interval=other_thing)' for custom times
if f is None:
def rwrapper(f):
return retry(f, timeout, interval)
return rwrapper
@wraps(f)
def wrapper(*args, **kwargs):
# The wrapped function gets the optional arguments retry_timeout and retry_interval added
retry_timeout = kwargs.pop('retry_timeout', timeout)
retry_interval = kwargs.pop('retry_interval', interval)
prep = kwargs.pop('prep', None)
end_time = time() + retry_timeout
while True:
try:
if prep is not None:
prep()
return f(*args, **kwargs)
except element_exceptions:
if time() > end_time:
# timeout, re-raise the original exception
raise
sleep(retry_interval)
return wrapper
| Python | 0.021967 |
92aab88f88a4a9e3df82dd3f7a94b491a7cb3bd1 | add interactive script | interactive.py | interactive.py | from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
pile = defaultdict(dict)
def draw_sandpile():
dim = raw_input("Enter dimensions of grid (eg, 4x4):\n")
try:
r, c = map(int, dim.strip().split('x'))
pile["r"] = r
pile["c"] = c
except Exception:
print("Enter in the form <int>x<int> (eg, 4x4)")
raise
pad_pile(pile)
for row in range(1, r+1):
for col in range(1, c+1):
pile[(row, col)]["max"] = int(raw_input("Max for row %s, col %s:\n" % (row, col)))
pile[(row, col)]["#"] = int(raw_input("Initial for row %s, col %s:\n" % (row, col)))
count = 0
while pile_unstable(pile):
count += 1
collapse_pile(pile)
unpad_pile(pile)
print("\nRan for %i iterations" % count)
plot(pile)
def plot(pile):
numpy_array = convert_to_numpy_array(pile)
plt.matshow(numpy_array, cmap=plt.get_cmap('gist_rainbow'))
plt.colorbar(orientation='horizontal')
plt.axis('off')
# printing the sand count in the plot
it = np.nditer(numpy_array, flags=['multi_index'])
print(numpy_array)
while not it.finished:
plt.text(it.multi_index[1], it.multi_index[0], int(it[0]), va='center', ha='center')
it.iternext()
plt.show()
def convert_to_numpy_array(pile):
r = pile["r"]
c = pile["c"]
np_array = np.empty(shape=(r, c))
for row in range(r):
for col in range(c):
np_array[row][col] = pile[(row+1, col+1)]["#"]
return np_array
def pad_pile(pile):
r = pile["r"]
c = pile["c"]
for row in range(r+2):
for col in range(c+2):
pile[(row, col)]["max"] = 0
pile[(row, col)]["#"] = 0
def unpad_pile(pile):
r = pile["r"]
c = pile["c"]
for col in range(c+2):
del pile[(0, col)]
del pile[(r+1, col)]
for row in range(1, r+1):
del pile[(row, 0)]
del pile[(row, c+1)]
return pile
def pile_unstable(pile):
r = pile["r"]
c = pile["c"]
for row in range(1, r+1):
for col in range(1, c+1):
if pile[(row, col)]["#"] > pile[(row, col)]["max"]:
return True
return False
def get_toppable_squares(pile):
toppable_squares = []
r = pile["r"]
c = pile["c"]
for row in range(1, r+1):
for col in range(1, c+1):
if pile[(row, col)]["#"] > pile[(row, col)]["max"]:
toppable_squares.append((row, col))
return toppable_squares
def collapse_pile(pile):
toppable_squares = get_toppable_squares(pile)
for square in toppable_squares:
topple(square, pile)
def topple(square, pile):
# toppling order is clockwise - LEFT, TOP, RIGHT, BOTTOM
r, c = square[0], square[1]
if pile[square]["#"] >= 1:
pile[square]["#"] -= 1
pile[(r-1, c)]["#"] += 1
if pile[square]["#"] >= 1:
pile[square]["#"] -= 1
pile[(r, c+1)]["#"] += 1
if pile[square]["#"] >= 1:
pile[square]["#"] -= 1
pile[(r+1, c)]["#"] += 1
if pile[square]["#"] >= 1:
pile[square]["#"] -= 1
pile[(r, c-1)]["#"] += 1
return pile
if __name__ == '__main__':
draw_sandpile()
| Python | 0.000001 | |
d648aeb90158cb104ac6548887a39dc13dfa236f | add management cmd make_emails_lowercase | corehq/apps/users/management/commands/make_emails_lowercase.py | corehq/apps/users/management/commands/make_emails_lowercase.py | from django.core.management import BaseCommand
from corehq.apps.users.models import CouchUser
class Command(BaseCommand):
help = "Makes emails into lowercase"
def handle(self, *args, **options):
for couch_user in CouchUser.all():
if couch_user.email and any(char.isupper() for char in couch_user.email):
print couch_user.email
couch_user.email = couch_user.email.lower()
couch_user.save()
| Python | 0.000212 | |
9d2976200965c4ea6b324d0822f6be786a25f2ea | Add file containing filesystem utilities | refmanage/fs_utils.py | refmanage/fs_utils.py | # -*- coding: utf-8 -*-
| Python | 0.000001 | |
64c62afa2a87d6d87017eeaf3f80f6dc18c05c88 | Create pysugar.py | src/pysugar.py | src/pysugar.py | # -*- coding: utf-8 -*-
'''
Created on 2014年7月23日
@author: fengzishiren
@mail: xiaoyaozi106@163.com
'''
import re
import MySQLdb
import os
import sys
from settings import *
__version__ = '0.1'
__date__ = '2014-07-23'
__updated__ = '2014-07-23'
FORMAT_ARGS = {'author': 'pysugar', 'date': __date__, 'version': __version__}
PATTERN_NAME = re.compile(r'\s(\w+)\(')
PATTERN_ARG = re.compile(r'\((\w+)|(Map<\w+,\w+>)\s(\w+)\)')
class Connection(object):
def connect(self):
host = DATABASE.get('host', '127.0.0.1')
port = DATABASE.get('port', 3306) # oracle default port:10033 mysql:3306
username = DATABASE['username']
password = DATABASE['password']
database = DATABASE['database']
# self.con = MySQLdb.connect(username, password, host + ':' + str(port) + '/' + database)
self.con = MySQLdb.connect(host=host, user=username, passwd=password, db=database, port=port)
self.cursor = self.con.cursor()
return self.cursor
def close(self):
self.con.commit()
self.cursor.close()
self.con.close()
class Token(object):
def __init__(self, name, _type, args):
self.name = name
self.type = _type # insert update delete select
self.args = args # map'or obj'
def say(x):
print x
return x
def col2prop(BIG_ID):
'''
input eg. SEC_PASS
return secPass
'''
bigA = lambda x:''.join((x[0].upper(), x[1:]))
mid = ''.join([bigA(part) for part in BIG_ID.lower().split('_')])
return ''.join((mid[0].lower(), mid[1:]))
def table2bean(table_name, ignore_preffix='tb_'):
'''
input table_name eg. tb_person_relation
return PersonRelation
Note: 'tb_' will be ignored
'''
if table_name.startswith(ignore_preffix):
table_name = table_name[len(ignore_preffix):]
bigA = lambda x:''.join((x[0].upper(), x[1:]))
return ''.join([bigA(c) for c in table_name.split('_')])
def set_format_args(**kwargs):
global FORMAT_ARGS
FORMAT_ARGS.update(kwargs)
def get_tokens(daosrc):
'''
分析格式化后的dao源代码
return Tokens
'''
dao_code = daosrc % FORMAT_ARGS
lines = dao_code.split('\n')
lines = [x.strip() for x in lines if x.strip().startswith('public')]
return map(get_token, lines[1:])
def get_token(line):
line = line[len('public'):].strip()
match = PATTERN_NAME.search(line)
assert match, 'Syntax error!'
name = match.group(1)
_type = OP_TYPE_DICT.get(name[0:3])
if type == None:
return None
match = PATTERN_ARG.search(line)
assert match, 'Illegal Parameters'
return Token(name, _type, match.group(1).lower())
def gen_bean_name(bean):
'''
eg. UserAccount, userAccount
'''
set_format_args(bean_name = bean, var_name = ''.join((bean[0].lower(), bean[1:])))
def gen_sqls(table_name, tokens):
gs = lambda table_name, token: SQL_CODE[token.type] % \
dict({'method_name': token.name, 'arg_type':token.args, 'table_name':table_name}, **FORMAT_ARGS)
sqls = [gs(table_name, tok) for tok in tokens if tok] # Note: Ignore tok if tok == None
set_format_args(sqls = '\n'.join(sqls))
def get_bean_content(fieldtypes):
fields = ['private %s %s;' % (v, k) for k, v in fieldtypes.items()]
GET_AND_SET = \
"""
public %(type)s get%(Name)s() {
return %(name)s;
}
public void set%(Name)s(%(type)s %(name)s) {
this.%(name)s = %(name)s;
}"""
bigA = lambda x:''.join((x[0].upper(), x[1:]))
content = '\n'.join([GET_AND_SET % {'name': f, 'Name':bigA(f), 'type': t} for f, t in fieldtypes.items()])
return '\n'.join(('\n\t'.join(fields), content))
def gen_map_and_fields(table_name, obtain_type=lambda x: x):
'''
obtain_type 必须是一个函数 用来处理从数据库类型到Java类型的转换
这里obtain_type默认什么也不做 用来处理MySql类型
'''
sql = 'select * from %s' % table_name
con = Connection()
try:
cursor = con.connect()
cursor.execute(sql)
descs = cursor.description
finally:
con.close()
fieldtypes = {col2prop(e[0]): DO_TYPE_DICT.get(obtain_type(e[1]), 'Object') for e in descs}
set_format_args(bean_content = get_bean_content(fieldtypes))
procols = {col2prop(key): key for key in [e[0] for e in descs]}
resultmap = ['<result property="%s" column="%s" />' % (k, v) for k, v in procols.items()]
set_format_args(map = '\n\t'.join(resultmap))
cols = '(%s)' % (','.join(procols.values()))
vals = '(%s)' % (','.join(['#{%s}' % k for k in procols.keys()]))
# cache it
set_format_args(insert_suffix = ' values '.join((cols, vals)))
us = [' = '.join((v, '#{%s}' % k)) for k, v in procols.items()]
# cache it
set_format_args(update_set = ',\n\t'.join(us))
def write_file((filename, content), _dir=OUTPUT_DIR):
if not os.path.exists(_dir):
os.mkdir(_dir)
with open(os.path.join(_dir, filename), 'w') as f:
return f.write(content) # Note: return None
def get_tpl(name):
with open(name) as f:
return f.read()
def main(table_name):
bean = table2bean(table_name)
daosrc = get_tpl(os.path.join(TEMPLATES_DIR, 'dao.tpl'))
gen_bean_name(bean)
gen_map_and_fields(table_name)
gen_sqls(table_name, get_tokens(daosrc))
formatted = {outfile % FORMAT_ARGS : (get_tpl(os.path.join(TEMPLATES_DIR, tpl)) if tpl != 'dao.tpl' else daosrc)\
% FORMAT_ARGS for tpl, outfile in IO_FILE_LIST.items()}
map(write_file, formatted.items())
if __name__ == '__main__':
if sys.argv.__len__() == 3:
main(sys.argv[1])
else:
main('auth_user')
say('bye-bye')
| Python | 0.001172 | |
b4784d90d00f5ec1dadcbc56312374a0c838da75 | add createBW | createWorld_BW.py | createWorld_BW.py | ##########################################################################
##file: createWorld_BW.py
##Author: Ken Zyma
##Project: Blocks World and Agency
##
##Dependencies: py2neo.py
## neo4j data stored locally at...(localhost:7474/db/data)
##
##This file contains all methods to create a blocksWorld(n) configuration
## and add them to neo4j data file.
###########################################################################
from py2neo import node, rel
from py2neo import neo4j
import logging
#uncomment for debug logging
#logging.basicConfig(level=logging.DEBUG)
from copy import copy
from copy import deepcopy
import math
import bisect
import time
import random
graph_db = neo4j.GraphDatabaseService("http://localhost:7474/db/data/")
#generateRnadomConfig generates a random configuration
#of the size of n and a set width.
def generateRandomConfig(n):
width = random.randint(1,2)
l=range(1,n+1)
config=[]
for x in range(width):
config.append([])
for x in range(len(l)):
config[random.randint(0,width-1)].append(l.pop())
#protect agains empty list being returned
if [] in config:
config.remove([])
return config
def numbize(nList):
num=0
for n in nList:
num*=100
num+=n
return num
def genCfgId(cfg):
nL=map(numbize,cfg)
nL.sort()
cfId=''
for n in nL:
cfId+=str(n)+'a'
return cfId
def destsOf(cfg):
rr=[x for x in range(len(cfg))]
mList=[]
for indx in range(len(cfg)):
irr = copy(rr)
irr[indx]=-1
mList.append(irr)
return mList
def genMvs(cfg):
src=[x for x in range(len(cfg))]
dests=[]
for indx in src:
idst = copy(src)
if len(cfg[indx])>1:
idst[indx]=-1
else:
idst.remove(indx)
dests.append(idst)
return [(src[a],dests[a][b]) for a in range(len(src)) for b in range(len(dests[a]))]
def mkNwCfg(mv,cfg):
newCfg=deepcopy(cfg)
frm=mv[0]
to=mv[1]
mvBlk=newCfg[frm].pop()
if to>-1:
newCfg[to].append(mvBlk)
else:
newCfg.append([mvBlk])
if newCfg[frm]==[]:
del newCfg[frm]
return newCfg
#cfg1 is configuration, mv is the move from "xpand'd node"
def makNd(cfg1,mv,root):
return {'cfg':cfg1,'nid':genCfgId(cfg1),'mv':mv,'root':root}
def xpdNd(node):
cfg=node['cfg']
mvs = genMvs(cfg)
nodes = []
for x in mvs:
nwCfg = mkNwCfg(x,cfg)
newNode=makNd(nwCfg,x,node["nid"])
nodes.append(newNode)
return nodes
#generates center Node in the form [[0],[1],...[n]]
def generateCenterNode(n):
i=[]
for x in range(1,n+1):
n = []
n.append(x)
i.append(n)
return i
#convert 1 to One, 2 to Two, ect
#*note* this is a workaround, neo4j does not allow any numbers
# as relation names
def convertNtoStr(n):
return {
'1': "one",
'2': "two",
'3': "three",
'4': "four",
'5': "five",
'6': "six",
'7': "seven",
'8': "eight",
'9': "nine",
'0': "zero",
'-1': "table",
}.get(n,"none")
def findIndexById(nid,List):
return List.index(nid)
#Generate the Blocks World
#*note* all checking of which blocks were already created,ect is done
# in python (client side) to avoid a bunch of unnessesary http reqs.
def generateBW(n):
#list of New nodes that need to be expanded
NeedToXpndList = []
NeedToXpndListID = []
#list of nodes that were expanded
DoneXpndList = []
#list of all nodes created and parrell indx'd node list for Neo2J
nodesID = []
nodesAndRelsN2J = []
nodesN2J = []
relsN2J = []
#generate central node/initial node
CntCfg = generateCenterNode(n)
CntNode = makNd(CntCfg,None,None)
nodesID.append(CntNode['nid'])
nodesN2J.append(node(id=CntNode["nid"]))
NeedToXpndList.append(CntNode)
NeedToXpndListID.append(CntNode['nid'])
#until all nodes that need to be expanded are expanded, continue...
while([] != (NeedToXpndListID)):
DoneXpndList.append(NeedToXpndListID.pop())
xpndNode = xpdNd(NeedToXpndList.pop())
for x in xpndNode:
if (x['nid'] not in nodesID):
NeedToXpndList.append(x)
NeedToXpndListID.append(x['nid'])
nodesID.append(x['nid'])
nodesN2J.append(node(id=x["nid"]))
for x in xpndNode:
#create relationship string for mv
temp= x["mv"]
move="MOVE"+convertNtoStr(str(temp[0]))+"TO"+convertNtoStr(str(temp[1]))
#create the relationship
indxOfRoot = findIndexById(x["root"],nodesID)
indxOfCurrent = findIndexById(x["nid"],nodesID)
#append relation to nodesAndRels
relsN2J.append(rel(indxOfRoot,move,indxOfCurrent))
#convert to string and remove first and last element (the " and ")
#-->>this is nessesary to do graph.db.create(nodesAndRelsN2J)
#str(nodesAndRelsN2J)[1:-1]
nodesAndRelsN2J = nodesN2J + relsN2J
return nodesAndRelsN2J
# *NOTE* args = (arg1, arg2, arg3)
# func(*args)
| Python | 0.000001 | |
b674efd944bf124da60db90d90cc2da35761427d | Conform to pep8 | shinken/bin.py | shinken/bin.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
"""
This file is to be imported by every Shinken service component:
Arbiter, Scheduler, etc. It just checks for the main requirement of
Shinken.
"""
import sys
VERSION = "2.2"
# Make sure people are using Python 2.6 or higher
# This is the canonical python version check
if sys.version_info < (2, 6):
sys.exit("Shinken requires as a minimum Python 2.6.x, sorry")
elif sys.version_info >= (3,):
sys.exit("Shinken is not yet compatible with Python 3.x, sorry")
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
"""
This file is to be imported by every Shinken service component:
Arbiter, Scheduler, etc. It just checks for the main requirement of
Shinken.
"""
import sys
VERSION = "2.2"
# Make sure people are using Python 2.6 or higher
# This is the canonical python version check
if sys.version_info < (2, 6):
sys.exit("Shinken requires as a minimum Python 2.6.x, sorry")
elif sys.version_info >= (3,):
sys.exit("Shinken is not yet compatible with Python 3.x, sorry")
| Python | 0.999996 |
5a77df8ebd1fd20ac6de34fe19853adbfeea6e31 | Add arabic tests | revscoring/languages/tests/test_arabic.py | revscoring/languages/tests/test_arabic.py | import pickle
from nose.tools import eq_
from .. import arabic
from ...datasources import revision
from ...dependencies import solve
from .util import compare_extraction
BAD = [
"احا",
"عاهرا",
"زندقتهما",
"حمار",
"لعن",
"يلعن",
"لعنه",
"امك",
"لعنتهما",
"فلعنهما",
"اعزبوا",
"عزبوا",
"لدحي",
"زبي",
"كلب",
"كافر",
"والله",
"الحمار",
"الزنا",
"النيك",
"كلابي",
"الكلب",
"منو",
"نجس",
"والعياذ",
"يتبرز",
"الكافر",
"تتزر",
"منكاحا",
"وينكح",
"منافق",
"الشيطان",
]
INFORMAL = [
"كالامازوه",
"فغانيون",
"ومراف",
"زوه",
"رلا",
"بلوجاتي",
"كتمتمان",
"سراريه",
"اجك",
"الجيدي",
"مناخرهم",
"الجيرل",
"وخلاخيل",
"اكشفي",
"ومحاسنه",
"يبزقن",
"اجهن",
"اطهن",
"ستنفض",
"خطبهن",
"اخدون",
"غمزني",
"فطلقني",
"فحكه",
"خرق",
"وهل",
"اللي",
"تحرموا",
"الزن",
"بالنعلين",
"وغلامك",
"عليلك",
"فتحدثها",
"اتمن",
"الشنبا",
"وروراو",
"والفاج",
"صوردون",
"ورجلاي",
"وضاحا",
"مختار",
"نسب",
"شيخ",
]
OTHER = [
"""يقوم تاريخ علم الأحياء بدراسة الأحياء من الزمن القديم إلى المعاصر.
مع أن مفهوم علم الأحياء كمجال واحد متماسك ظهر في القرن التاسع عشر،
فإن علوم الأحياء ظهرت من تقاليد الطب والتاريخ الطبيعي المأخوذة من
أيورفيدا، الطب المصري القديم وكتابات أرسطو وجالينوس في العصور اليونانية
والرومانية القديمة. تم تطوير هذا العمل القديم خلال القرون الوسطى من قبل
الأطباء والعلماء المسلمين مثل ابن سينا. خلال عصر النهضة الأوروبية وبداية
العصر الحديث، تم تحديث الفكر في علم الأحياء في أوروبا بسبب الاهتمام المتجدد
بالفلسفة التجريبية واكتشاف العديد من الكائنات الحية التي لم تكن معروفة
"""
]
def test_badwords():
compare_extraction(arabic.revision.badwords_list, BAD, OTHER)
def test_informals():
compare_extraction(arabic.revision.informals_list, INFORMAL, OTHER)
def test_revision():
cache = {revision.text: "يقوم تاريخ علم الأحياء بدراسة الأحياء."}
eq_(solve(arabic.revision.words_list, cache=cache),
["يقوم", "تاريخ", "علم", "الأحياء", "بدراسة", "الأحياء"])
def test_pickling():
eq_(arabic, pickle.loads(pickle.dumps(arabic)))
| Python | 0.000681 | |
6979bbf6547d689b1980762349a0e78c9c7c026d | Create fibonacci.py | python/fibonacci/fibonacci.py | python/fibonacci/fibonacci.py | a = 0
b = 1
c = 0
n = int(input("Nombre de termes : "))
for i in range (1, n+1):
c = a+b
b = a
a= c
print(c)
| Python | 0.000838 | |
fcf691454b8607fec9d7f5cba43579dc02c26c8b | Check coverage of pgi, vs gi | tests/pgi_covergage.py | tests/pgi_covergage.py | """
find pgi coverage of all gi.repositorys.
you need to have access to both 'gi' and 'pgi' in the current python
environment.
In a virtualenv this works:
$ pip install pgi
$ pip install vext.gi
$ python pgi_coverage.py
"""
TYPELIB_DIR="/usr/lib/girepository-1.0"
from os.path import basename
from glob import glob
from textwrap import dedent
def test_pgi_coverage(gi_module, pgi_module):
name_width = len(max(dir(gi_module), key=len))
print('%s %s' % (gi_module.__name__.rjust(name_width), pgi_module.__name__))
for name in dir(gi_module):
if name.startswith('_'):
continue
status = 'OK'
try:
getattr(pgi_module, name)
except NotImplementedError as e:
#status = "FAIL: '%s'" % str(e.__class__.__name__)
status = "FAIL"
for line in str(e).splitlines():
if line.startswith('NotImplementedError:'):
status = status + " " + line
print("%s\t%s" % (name.rjust(name_width), status))
print("")
def test_coverage(typelib):
code = dedent("""
from pgi.repository import {0} as PGI_{0}
from gi.repository import {0} as GI_{0}
test_pgi_coverage(GI_{0}, PGI_{0})
""".format(typelib))
try:
print("PGI coverage of %s" % typelib)
exec(code)
except Exception as e:
print("Skipped because of %s during test" % str(e))
def get_typelibs():
typelibs = []
for typelib in glob(TYPELIB_DIR + "/*.typelib"):
fn = basename(typelib).partition("-")[0]
typelibs.append(fn)
return typelibs
if __name__=='__main__':
typelibs = get_typelibs()
for typelib in typelibs:
test_coverage(typelib)
| Python | 0 | |
7c1cbc49e6cdc6ef514382eee9679f4e9719257b | add basic-calculator-ii | vol5/basic-calculator-ii/basic-calculator-ii.py | vol5/basic-calculator-ii/basic-calculator-ii.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: Zeyuan Shang
# @Date: 2015-11-18 17:22:37
# @Last Modified by: Zeyuan Shang
# @Last Modified time: 2015-11-18 17:22:44
class Solution:
operators = ['+', '-', '*', '/']
def getPriority(self, operator):
return {
'+' : 1,
'-' : 1,
'*' : 2,
'/' : 2,
}.get(operator, 0)
def toRPN(self, s):
tokens, stack = [], []
number = ''
for c in s:
if c.isdigit():
number += c
else:
if number:
tokens.append(number)
number = ''
if c in self.operators:
while len(stack) and self.getPriority(stack[-1]) >= self.getPriority(c):
tokens.append(stack.pop())
stack.append(c)
elif c == '(':
stack.append(c)
elif c == ')':
while len(stack) and stack[-1] != '(':
tokens.append(stack.pop())
stack.pop()
if number:
tokens.append(number)
while len(stack):
tokens.append(stack.pop())
return tokens
def calcValue(self, x, y, operator):
return {
'+': lambda x, y: x + y,
'-': lambda x, y: x - y,
'*': lambda x, y: x * y,
'/': lambda x, y: int(float(x) / y),
}[operator](x, y)
def evalRPN(self, tokens):
operands = []
for token in tokens:
if token in self.operators:
y, x = operands.pop(), operands.pop()
operands.append(self.calcValue(x, y, token))
else:
operands.append(int(token))
return operands[0]
def calculate(self, s):
tokens = self.toRPN(s)
return self.evalRPN(tokens) | Python | 0.999311 | |
943383a60f76a13290e540ac35c1ea3a8fc21a3e | Add a utility for downsampling a pair FASTQ files. | micall/utils/sample_fastq.py | micall/utils/sample_fastq.py | #!/usr/bin/env python
import argparse
import random
def parse_args():
parser = argparse.ArgumentParser(
description="Randomly sample reads from FASTQ files for quick processing.")
parser.add_argument('fastq1',
type=argparse.FileType('rU'),
help='original FASTQ file of forward reads')
parser.add_argument('fastq2',
type=argparse.FileType('rU'),
help='original FASTQ file of reverse reads')
parser.add_argument('short_fastq1',
type=argparse.FileType('w'),
help='FASTQ file to write forward reads in')
parser.add_argument('short_fastq2',
type=argparse.FileType('w'),
help='FASTQ file to write reverse reads in')
parser.add_argument('--count',
'-n',
type=float,
default=10000.0,
help='approximate number of read pairs to write')
return parser.parse_args()
def get_reads(fastq_file):
""" Yield reads as tuples of four lines: header, sequence, '+', quality. """
for read in zip(fastq_file, fastq_file, fastq_file, fastq_file):
yield read
def get_named_reads(fastq_file):
""" Yield (name, read) pairs. """
for read in get_reads(fastq_file):
header = read[0]
name = header.split(' ')[0]
yield (name, read)
def process_read(name, read, out_file, odds, skipped_names, chosen_names):
""" Write a read to the out_file if it is chosen.
@param name: the name of the read that is used to match forward and reverse
reads
@param read: a tuple of four lines that makes up a read
@param out_file: an open file to write the chosen reads to
@param odds: a float between zero and one that sets the odds of choosing
a read
@param skipped_names: a set of names that have already been skipped, but
their partners have not been seen yet
@param chosen_names: a set of names that have already been chosen and
written, but their partners have not been seen yet
"""
try:
skipped_names.remove(name)
# This name was skipped, and we've seen both reads. We're done.
return
except KeyError:
pass
try:
chosen_names.remove(name)
is_chosen = True
except KeyError:
# Haven't seen this name yet, decide whether to choose it.
is_chosen = random.uniform(0, 1) < odds
if is_chosen:
chosen_names.add(name)
else:
skipped_names.add(name)
if is_chosen:
for line in read:
out_file.write(line)
def main():
args = parse_args()
for line_count, _ in enumerate(args.fastq1, 1):
pass
args.fastq1.seek(0)
read_count = line_count/4
odds = args.count/read_count
rev_reads = get_named_reads(args.fastq2)
skipped_names = set()
chosen_names = set()
for fwd_name, fwd_read in get_named_reads(args.fastq1):
rev_name, rev_read = rev_reads.next()
process_read(fwd_name,
fwd_read,
args.short_fastq1,
odds,
skipped_names,
chosen_names)
process_read(rev_name,
rev_read,
args.short_fastq2,
odds,
skipped_names,
chosen_names)
main()
| Python | 0 | |
408be8a0d49b7542c74e016a572499a8c4d85351 | Add tests to verify team index and add pages render without errors | app/teams/tests.py | app/teams/tests.py | from app.test_base import BaseTestCase
class TestTeamBehavior(BaseTestCase):
def test_index_page_200(self):
self.login()
response = self.client.get('/teams/')
self.assert200(response)
def test_add_page_200(self):
self.login()
response = self.client.get('/teams/new')
self.assert200(response) | Python | 0 | |
5ce6283cff4a3a97911a663d777869a7c7377341 | add http_codes | catalogService/http_codes.py | catalogService/http_codes.py | #
# Copyright (c) 2008 rPath, Inc.
#
HTTP_CONTINUE = 100
HTTP_SWITCHING_PROTOCOLS = 101
HTTP_PROCESSING = 102
HTTP_OK = 200
HTTP_CREATED = 201
HTTP_ACCEPTED = 202
HTTP_NON_AUTHORITATIVE = 203
HTTP_NO_CONTENT = 204
HTTP_RESET_CONTENT = 205
HTTP_PARTIAL_CONTENT = 206
HTTP_MULTI_STATUS = 207
HTTP_MULTIPLE_CHOICES = 300
HTTP_MOVED_PERMANENTLY = 301
HTTP_MOVED_TEMPORARILY = 302
HTTP_SEE_OTHER = 303
HTTP_NOT_MODIFIED = 304
HTTP_USE_PROXY = 305
HTTP_TEMPORARY_REDIRECT = 307
HTTP_BAD_REQUEST = 400
HTTP_UNAUTHORIZED = 401
HTTP_PAYMENT_REQUIRED = 402
HTTP_FORBIDDEN = 403
HTTP_NOT_FOUND = 404
HTTP_METHOD_NOT_ALLOWED = 405
HTTP_NOT_ACCEPTABLE = 406
HTTP_PROXY_AUTHENTICATION_REQUIRED = 407
HTTP_REQUEST_TIME_OUT = 408
HTTP_CONFLICT = 409
HTTP_GONE = 410
HTTP_LENGTH_REQUIRED = 411
HTTP_PRECONDITION_FAILED = 412
HTTP_REQUEST_ENTITY_TOO_LARGE = 413
HTTP_REQUEST_URI_TOO_LARGE = 414
HTTP_UNSUPPORTED_MEDIA_TYPE = 415
HTTP_RANGE_NOT_SATISFIABLE = 416
HTTP_EXPECTATION_FAILED = 417
HTTP_UNPROCESSABLE_ENTITY = 422
HTTP_LOCKED = 423
HTTP_FAILED_DEPENDENCY = 424
HTTP_UPGRADE_REQUIRED = 426
HTTP_INTERNAL_SERVER_ERROR = 500
HTTP_NOT_IMPLEMENTED = 501
HTTP_BAD_GATEWAY = 502
HTTP_SERVICE_UNAVAILABLE = 503
HTTP_GATEWAY_TIME_OUT = 504
HTTP_VERSION_NOT_SUPPORTED = 505
HTTP_VARIANT_ALSO_VARIES = 506
HTTP_INSUFFICIENT_STORAGE = 507
HTTP_NOT_EXTENDED = 510
| Python | 0.000004 | |
5c602a98098bdedeffc2b7359a4b3d8407cb1449 | Add migration to ensure consistency on file keys. | scripts/migrate_inconsistent_file_keys.py | scripts/migrate_inconsistent_file_keys.py | #!/usr/bin/env python
# encoding: utf-8
"""Find all nodes with different sets of keys for `files_current` and
`files_versions`, and ensure that all keys present in the former are also
present in the latter.
"""
from website.models import Node
from website.app import init_app
def find_file_mismatch_nodes():
"""Find nodes with inconsistent `files_current` and `files_versions` field
keys.
"""
return [
node for node in Node.find()
if set(node.files_versions.keys()) != set(node.files_current.keys())
]
def migrate_node(node):
"""Ensure that all keys present in `files_current` are also present in
`files_versions`.
"""
for key, file_id in node.files_current.iteritems():
if key not in node.files_versions:
node.files_versions[key] = [file_id]
else:
if file_id not in node.files_versions[key]:
node.files_versions[key].append(file_id)
node.save()
def main(dry_run=True):
init_app()
nodes = find_file_mismatch_nodes()
print('Migrating {0} nodes'.format(len(nodes)))
if dry_run:
return
for node in nodes:
migrate_node(node)
if __name__ == '__main__':
import sys
dry_run = 'dry' in sys.argv
main(dry_run=dry_run)
from nose.tools import * # noqa
from tests.base import OsfTestCase
from tests.factories import ProjectFactory
from framework.auth import Auth
class TestMigrateFiles(OsfTestCase):
def clear(self):
Node.remove()
def setUp(self):
super(TestMigrateFiles, self).setUp()
self.clear()
self.nodes = []
for idx in range(3):
node = ProjectFactory()
node.add_file(
Auth(user=node.creator),
'name',
'contents',
len('contents'),
'text/plain',
)
self.nodes.append(node)
self.nodes[-1].files_versions = {}
self.nodes[-1].save()
# Sanity check
assert_in('name', self.nodes[-1].files_current)
assert_not_in('name', self.nodes[-1].files_versions)
def tearDown(self):
super(TestMigrateFiles, self).tearDown()
self.clear()
def test_get_targets(self):
targets = find_file_mismatch_nodes()
assert_equal(len(targets), 1)
assert_equal(targets[0], self.nodes[-1])
def test_migrate(self):
main(dry_run=False)
assert_equal(len(find_file_mismatch_nodes()), 0)
assert_in('name', self.nodes[-1].files_versions)
assert_equal(
self.nodes[-1].files_current['name'],
self.nodes[-1].files_versions['name'][0],
)
| Python | 0 | |
e5e6506ab6b5191e309aa75e56e25253c0ba7763 | Create drivers.py | chips/memory/file/drivers.py | chips/memory/file/drivers.py | # This code has to be added to the corresponding __init__.py
DRIVERS["filememory"] = ["PICKLEFILE", "JSONFILE"]
| Python | 0.000001 | |
386baa36355b0e9378fff59fe768d1baa7e73fec | Add Himax motion detection example. | scripts/examples/Arduino/Portenta-H7/21-Sensor-Control/himax_motion_detection.py | scripts/examples/Arduino/Portenta-H7/21-Sensor-Control/himax_motion_detection.py | # Himax motion detection example.
import sensor, image, time, pyb
from pyb import Pin, ExtInt
sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.QVGA)
sensor.set_framerate(15)
sensor.ioctl(sensor.IOCTL_HIMAX_MD_THRESHOLD, 0x01)
sensor.ioctl(sensor.IOCTL_HIMAX_MD_WINDOW, (0, 0, 320, 240))
sensor.ioctl(sensor.IOCTL_HIMAX_MD_CLEAR)
sensor.ioctl(sensor.IOCTL_HIMAX_MD_ENABLE, True)
motion_detected = False
def on_motion(line):
global motion_detected
motion_detected = True
led = pyb.LED(3)
ext = ExtInt(Pin("PC15"), ExtInt.IRQ_RISING, Pin.PULL_DOWN, on_motion)
clock = time.clock()
while(True):
clock.tick()
img = sensor.snapshot()
if (motion_detected):
led.on()
time.sleep_ms(500)
# Clear motion detection flag
sensor.ioctl(sensor.IOCTL_HIMAX_MD_CLEAR)
motion_detected = False
led.off()
print(clock.fps())
| Python | 0 | |
b1ab4ef6fbac0ce02d05464e03599d44721fb239 | Add an example using partial. | examples/partial.py | examples/partial.py | #!/usr/bin/env python
from functools import partial
import matplotlib.pyplot as plt
import numpy as np
from latexipy import latexipy as lp
if __name__ == '__main__':
lp.latexify()
figure = partial(lp.figure, folder='some_images', exts=['png'])
x = np.linspace(-np.pi, np.pi)
y1 = np.sin(x)
y2 = np.cos(x)
with figure('sin'):
plt.plot(x, y1, label='sine')
plt.title('Sine')
plt.xlabel(r'$\theta$')
plt.ylabel('Value')
plt.legend()
with figure('cos'):
plt.plot(x, y2, label='cosine', c='C1')
plt.title('Cosine')
plt.xlabel(r'$\theta$')
plt.ylabel('Value')
plt.legend()
with figure('both'):
plt.plot(x, y1, label='sine')
plt.plot(x, y2, label='cosine')
plt.title('Sine and cosine')
plt.xlabel(r'$\theta$')
plt.ylabel('Value')
plt.legend()
| Python | 0.000001 | |
e781a1e89b945dad1585f82dfdb77cbffbe8fdeb | add unit tests | lib/svtplay_dl/tests/prio_streams.py | lib/svtplay_dl/tests/prio_streams.py | #!/usr/bin/python
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil; coding: utf-8 -*-
# ex:ts=4:sw=4:sts=4:et:fenc=utf-8
from __future__ import absolute_import
import unittest
from svtplay_dl.utils import prio_streams
class Stream(object):
def __init__(self, proto, bitrate):
self.proto = proto
self.bitrate = bitrate
def name(self):
return self.proto
def __repr__(self):
return '%s(%d)' % (self.proto.upper(), self.bitrate)
class PrioStreamsTest(unittest.TestCase):
def _gen_proto_case(self, ordered, unordered, default=True, expected=None):
streams = [Stream(x, 100) for x in unordered]
kwargs = {}
if not default:
kwargs['protocol_prio'] = ordered
if expected is None:
expected = [str(Stream(x, 100)) for x in ordered]
return self.assertEqual(
[str(x) for x in prio_streams(streams, **kwargs)],
expected
)
def test_default_order(self):
return self._gen_proto_case(
['hls', 'hds', 'http', 'rtmp'],
['rtmp', 'hds', 'hls', 'http']
)
def test_custom_order(self):
return self._gen_proto_case(
['http', 'rtmp', 'hds', 'hls'],
['rtmp', 'hds', 'hls', 'http'],
default=False,
)
def test_custom_order_1(self):
return self._gen_proto_case(
['http'],
['rtmp', 'hds', 'hls', 'http'],
default=False,
)
def test_proto_unavail(self):
return self._gen_proto_case(
['http', 'rtmp'],
['hds', 'hls', 'https'],
default=False,
expected=[],
)
| Python | 0.000001 | |
4e4a8bbb459e6158a7c2d22c04849de9b4de2693 | Add directory.py to the directory package | classyfd/directory/directory.py | classyfd/directory/directory.py | """Contains a Directory class to represent real directories""" | Python | 0.000001 | |
0c079b7160cf635c14a016d418d2bc8d3d521f26 | add docker start tool | tools/run.py | tools/run.py | # coding=utf-8
import os
import json
os.system("docker rm -f redis")
os.system("docker rm -f mysql")
os.system("docker rm -f oj_web_server")
if os.system("docker run --name mysql -v /root/data:/var/lib/mysql -v /root/data/my.cnf:/etc/my.cnf -e MYSQL_ROOT_PASSWORD=root -d mysql/mysql-server:latest"):
print "Error start mysql"
exit()
if os.system("docker run --name redis -d redis"):
print "Error start redis"
exit()
if os.system("docker run --name oj_web_server -e oj_env=server -v /root/qduoj:/code -v /root/test_case:/code/test_case -v /root/log:/code/log -v /root/upload:/code/upload -v /root/qduoj/dockerfiles/oj_web_server/supervisord.conf:/etc/supervisord.conf -v /root/qduoj/dockerfiles/oj_web_server/gunicorn.conf:/etc/gunicorn.conf -v /root/qduoj/dockerfiles/oj_web_server/mq.conf:/etc/mq.conf -d -p 127.0.0.1:8080:8080 --link mysql --link=redis oj_web_server"):
print "Erro start oj_web_server"
exit()
inspect_redis = json.loads(os.popen("docker inspect redis").read())
if not inspect_redis:
print "Error when inspect redis ip"
exit()
redis_ip = inspect_redis[0]["NetworkSettings"]["IPAddress"]
print "redis ip ", redis_ip
inspect_mysql = json.loads(os.popen("docker inspect mysql").read())
if not inspect_mysql:
print "Error when inspect mysql ip"
exit()
mysql_ip = inspect_mysql[0]["NetworkSettings"]["IPAddress"]
print "mysql ip ", mysql_ip
f = open("/etc/profile", "r")
content = ""
for line in f.readlines():
if line.startswith("export REDIS_PORT_6379_TCP_ADDR"):
content += ("\nexport REDIS_PORT_6379_TCP_ADDR=" + redis_ip + "\n")
elif line.startswith("export submission_db_host"):
content += ("\nexport submission_db_host=" + mysql_ip + "\n")
else:
content += line
f.close()
f = open("/etc/profile", "w")
f.write(content)
f.close()
print "Please run source /etc/profile"
| Python | 0.000001 | |
4cf7f6c23bc9d01c6780afa4d27bf9e5e71fb72b | add hacky download | util/unidata_dl.py | util/unidata_dl.py | import glob
import os
import datetime
sts = datetime.datetime(2016, 11, 21, 0, 0)
ets = datetime.datetime(2016, 11, 21, 3, 0)
interval = datetime.timedelta(minutes=1)
os.chdir('data/nexrad/NIDS')
for nexrad in glob.glob('???'):
os.chdir(nexrad)
for nids in ['N0Q', 'NET', 'N0R', 'EET']:
if not os.path.isdir(nids):
continue
os.chdir(nids)
now = sts
while now < ets:
fp = "%s_%s" % (nids, now.strftime("%Y%m%d_%H%M"))
if not os.path.isfile(fp):
url = now.strftime(("http://motherlode.ucar.edu/native/radar/"
"level3/" + nids + "/" + nexrad +
"/%Y%m%d/Level3_" + nexrad + "_" + nids +
"_%Y%m%d_%H%M.nids"))
cmd = "wget -q -O %s %s" % (fp, url)
os.system(cmd)
now += interval
os.chdir('..')
os.chdir('..')
| Python | 0 | |
e8799d50dea038fe6e6bb94a4676d2f6eaf1ed1d | Update judgeapi.py | judge/judgeapi.py | judge/judgeapi.py | from django.conf import settings
import socket
import struct
import json
import logging
from judge.simple_comet_client import delete_channel, create_channel, send_message
logger = logging.getLogger('judge.judgeapi')
size_pack = struct.Struct('!I')
def judge_request(packet, reply=True):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((settings.BRIDGED_DJANGO_HOST, settings.BRIDGED_DJANGO_PORT))
output = json.dumps(packet, separators=(',', ':'))
output = output.encode('zlib')
writer = sock.makefile('w', 0)
writer.write(size_pack.pack(len(output)))
writer.write(output)
writer.close()
if reply:
reader = sock.makefile('r', -1)
input = reader.read(4)
if not input:
raise ValueError('Judge did not respond')
length = size_pack.unpack(input)[0]
input = reader.read(length)
if not input:
raise ValueError('Judge did not respond')
reader.close()
sock.close()
input = input.decode('zlib')
result = json.loads(input)
return result
def judge_submission(submission):
chan = 'sub_%d' % submission.id
delete_channel(chan) # Delete if exist
create_channel(chan)
create_channel('submissions') #TODO: only attempt to create once
try:
response = judge_request({
'name': 'submission-request',
'submission-id': submission.id,
'problem-id': submission.problem.code,
'language': submission.language.key,
'source': submission.source,
})
except BaseException:
logger.exception('Failed to send request to judge')
submission.status = 'IE'
submission.save()
success = False
else:
submission.status = 'QU' if (response['name'] == 'submission-received' and
response['submission-id'] == submission.id) else 'IE'
id = 1 if submission.user.is_admin() else (2 if submission.user.is_problem_setter() else 0)
send_message('submissions', 'submission-start %d %s %s %s %s %s %d %s' %
(submission.id, submission.problem.code, submission.problem.name.replace(" ", "\f"),
submission.status, submission.language.key,
submission.user.user.username, id,
str(submission.date).replace(" ", "\f")))
success = True
submission.time = None
submission.memory = None
submission.points = None
submission.result = None
submission.save()
return success
def abort_submission(submission):
judge_request({'name': 'terminate-submission', 'submission-id': submission.id}, reply=False)
| from django.conf import settings
import socket
import struct
import json
import logging
from judge.simple_comet_client import delete_channel, create_channel, send_message
logger = logging.getLogger('judge.judgeapi')
size_pack = struct.Struct('!I')
def judge_request(packet, reply=True):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((settings.BRIDGED_DJANGO_HOST, settings.BRIDGED_DJANGO_PORT))
output = json.dumps(packet, separators=(',', ':'))
output = output.encode('zlib')
writer = sock.makefile('w', 0)
writer.write(size_pack.pack(len(output)))
writer.write(output)
writer.close()
if reply:
reader = sock.makefile('r', -1)
input = reader.read(4)
if not input:
raise ValueError('Judge did not respond')
length = size_pack.unpack(input)[0]
input = reader.read(length)
if not input:
raise ValueError('Judge did not respond')
reader.close()
sock.close()
input = input.decode('zlib')
result = json.loads(input)
return result
def judge_submission(submission):
chan = 'sub_%d' % submission.id
delete_channel(chan) # Delete if exist
create_channel(chan)
create_channel('submissions') #TODO: only attempt to create once
try:
response = judge_request({
'name': 'submission-request',
'submission-id': submission.id,
'problem-id': submission.problem.code,
'language': submission.language.key,
'source': submission.source,
})
except BaseException:
logger.exception('Failed to send request to judge')
submission.status = 'IE'
submission.save()
success = False
else:
submission.status = 'QU' if (response['name'] == 'submission-received' and
response['submission-id'] == submission.id) else 'IE'
id = 1 if submission.user.is_admin() else (2 is submission.user.is_problem_setter() else 0)
send_message('submissions', 'submission-start %d %s %s %s %s %s %d %s' %
(submission.id, submission.problem.code, submission.problem.name.replace(" ", "\f"),
submission.status, submission.language.key,
submission.user.user.username, id,
str(submission.date).replace(" ", "\f")))
success = True
submission.time = None
submission.memory = None
submission.points = None
submission.result = None
submission.save()
return success
def abort_submission(submission):
judge_request({'name': 'terminate-submission', 'submission-id': submission.id}, reply=False)
| Python | 0.000001 |
3dbf91d4d447f6dbddece040b3a9dcbeb8ebcd22 | Add missing migrations | getyourdata/data_request/migrations/0023_auto_20160716_0946.py | getyourdata/data_request/migrations/0023_auto_20160716_0946.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-07-16 09:46
from __future__ import unicode_literals
from django.db import migrations, models
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('data_request', '0022_faqcontent_priority'),
]
operations = [
migrations.CreateModel(
name='RequestContent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name=b'Created on')),
('updated_on', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name=b'Updated on')),
('title', models.CharField(default='Default', max_length=255, unique=True)),
('header', models.TextField(blank=True, default='Dear recipient,')),
('header_en', models.TextField(blank=True, default='Dear recipient,', null=True)),
('header_fi', models.TextField(blank=True, default='Dear recipient,', null=True)),
('content1', models.TextField(blank=True, default='content first')),
('content1_en', models.TextField(blank=True, default='content first', null=True)),
('content1_fi', models.TextField(blank=True, default='content first', null=True)),
('content2', models.TextField(blank=True, default='content second')),
('content2_en', models.TextField(blank=True, default='content second', null=True)),
('content2_fi', models.TextField(blank=True, default='content second', null=True)),
('footer', models.TextField(blank=True, default='Regards,')),
('footer_en', models.TextField(blank=True, default='Regards,', null=True)),
('footer_fi', models.TextField(blank=True, default='Regards,', null=True)),
],
options={
'abstract': False,
},
),
migrations.DeleteModel(
name='EmailContent',
),
migrations.RemoveField(
model_name='pdfcontents',
name='content1_en',
),
migrations.RemoveField(
model_name='pdfcontents',
name='content1_fi',
),
migrations.RemoveField(
model_name='pdfcontents',
name='content2_en',
),
migrations.RemoveField(
model_name='pdfcontents',
name='content2_fi',
),
migrations.RemoveField(
model_name='pdfcontents',
name='footer_en',
),
migrations.RemoveField(
model_name='pdfcontents',
name='footer_fi',
),
migrations.RemoveField(
model_name='pdfcontents',
name='header_en',
),
migrations.RemoveField(
model_name='pdfcontents',
name='header_fi',
),
]
| Python | 0.000029 | |
ddb9e1c0160f40fe60330c247906b9b41f18be1b | Create hearthstone_way_to_legend.py | hearthstone_way_to_legend.py | hearthstone_way_to_legend.py | import random,statistics
winrate = 0.51
iterations = 100
games = [0]*iterations
passwinstreak = 5*5 # below Rank 5
for x in range(iterations):
# 1-10 11-15 => 15 rank
ladderPosition = 5*10 + 4*5
winstrek = 0
while True:
games[x] = games[x] + 1
if random.random() <= winrate:
winstreak = winstrek + 1
ladderPosition = ladderPosition - 1
if winstrek >= 2 and ladderPosition > passwinstreak:
ladderPosition = ladderPosition - 1
else:
winstreak = 0
ladderPosition = ladderPosition + 1
if ladderPosition is 0:
break
print("Total games (mean of " + str(iterations) + " iterations): "+ str(statistics.mean(games)))
input()
| Python | 0.00009 | |
a801deeaa00e443b3c68c1fbcea1e6ff62d90082 | Add Python script to generate users | python/addusers.py | python/addusers.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Adds a sequential number of users into a test database
with username: newusern and password newusern
Not for production usage
"""
import MySQLdb
hostname = # FILL IN
username = # FILL IN
password = # FILL IN
# Simple routine to run a query on a database and print the results:
def doQuery( conn, n_users ) :
cur = conn.cursor()
try:
for i in range(0,n_users):
cur.execute("""CREATE USER \'newuser%i\'@\'localhost\' IDENTIFIED BY \'password%i\'""" % (i,i) )
cur.execute( """GRANT ALL PRIVILEGES ON * . * TO \'newuser%i\'@\'localhost\'""" % i )
cur.execute( """FLUSH PRIVILEGES""" )
except MySQLdb.Error, e:
try:
print ("MySQL Error [%d]: %s" % (e.args[0], e.args[1]))
except IndexError:
print ("MySQL Error: %s" % str(e))
if __name__ == '__main__':
print("Using mysql.connector…")
myConnection = MySQLdb.connect( host=hostname, user=username, passwd=password, 20)
doQuery( myConnection )
myConnection.close()
| Python | 0.000003 | |
6602bbfa47c2523bc4d058bfa1d5a28d5fc33836 | Add tag reading | bw_read_xml.py | bw_read_xml.py | from lxml import etree
from copy import copy
class BattWarsObject(object):
def __init__(self, obj):
self._attributes = {}
self.type = obj.get("type")
self.id = obj.get("id")
self._xml_node = obj
# We will create a name for this object by putting the type and ID together.
self.name = "{0}[{1}]".format(self.type, self.id)
for attr in obj:
assert attr not in self._attributes
self._attributes[attr.get("name")] = attr
@property
def attributes(self):
return self._attributes
def has_attr(self, name):
return name in self._attributes
def get_attr(self, name):
return self._attributes[name]
def get_attr_type(self, name):
return self._attributes[name].get("type")
def get_attr_elements(self, name):
return [elem.text for elem in self._attributes[name]]
def get_attr_tag(self, name):
return self._attributes[name].tag
# Use this for attributes that have only 1 element
def get_attr_value(self, name):
return self._attributes[name][0].text
def set_attr_value(self, name, val):
self._attributes[name][0].text = val
class BattWarsLevel(object):
def __init__(self, fileobj):
self._tree = etree.parse(fileobj)
self._root = self._tree.getroot()
self.obj_map = {}
for obj in self._root:
bw_object = BattWarsObject(obj)
self.obj_map[bw_object.id] = bw_object
# The root of a BW level xml file contains the objects
# used by that level.
@property
def objects(self):
return self._root
def get_attributes(self, obj):
return []
def create_object_hierarchy(id_map):
hierarchy = {}
never_referenced = {obj_id: True for obj_id in id_map.keys()}
for obj_id, obj in id_map.items():
if obj.has_attr("mBase"):
# In the xml file mBase has the type pointer, but it's actually
# the ID of a different object in the file.
pointer = obj.get_attr_value("mBase")
#assert pointer in id_map
if obj.id not in hierarchy:
del never_referenced[obj_id]
hierarchy[obj.id] = pointer
else:
raise RuntimeError("one object shouldn't have more than 1 reference: %s" % obj.name)
return hierarchy, never_referenced
def create_ref(ref, hierarchy, id_map):
if ref.id not in hierarchy:
return ref.name
else:
return ref.name + " => " + create_ref(id_map[hierarchy[ref.id]], hierarchy, id_map)
if __name__ == "__main__":
infile = "bw2_sandbox/SP_5.3_Level.xml"
with open(infile, "r") as f:
bw_level = BattWarsLevel(f)
types = {}
id_map = {}
for obj in bw_level.objects:
bw_object = BattWarsObject(obj)
if bw_object.type not in types:
types[bw_object.type] = 1
else:
types[bw_object.type] += 1
#assert bw_object.id not in id_map
if bw_object.id in id_map:
print(bw_object.name)
id_map[bw_object.id] = bw_object
# Never referenced actually doesn't mean that it isn't referenced at all,
# but that it isn't referenced in a mBase attribute
hierarchy, never_referenced = create_object_hierarchy(id_map)
print(never_referenced)
with open("hierarchy.txt", "w") as f:
f.write("")
with open("hierarchy.txt", "a") as f:
for obj_id in sorted(id_map.keys()):
obj = id_map[obj_id]
if obj_id in hierarchy:
f.write(create_ref(obj, hierarchy, id_map)+"\n")
print("done")
| from lxml import etree
from copy import copy
class BattWarsObject(object):
def __init__(self, obj):
self._attributes = {}
self.type = obj.get("type")
self.id = obj.get("id")
self._xml_node = obj
# We will create a name for this object by putting the type and ID together.
self.name = "{0}[{1}]".format(self.type, self.id)
for attr in obj:
assert attr not in self._attributes
self._attributes[attr.get("name")] = attr
@property
def attributes(self):
return self._attributes
def has_attr(self, name):
return name in self._attributes
def get_attr(self, name):
return self._attributes[name]
def get_attr_type(self, name):
return self._attributes[name].get("type")
def get_attr_elements(self, name):
return [elem.text for elem in self._attributes[name]]
# Use this for attributes that have only 1 element
def get_attr_value(self, name):
return self._attributes[name][0].text
def set_attr_value(self, name, val):
self._attributes[name][0].text = val
class BattWarsLevel(object):
def __init__(self, fileobj):
self._tree = etree.parse(fileobj)
self._root = self._tree.getroot()
self.obj_map = {}
for obj in self._root:
bw_object = BattWarsObject(obj)
self.obj_map[bw_object.id] = bw_object
# The root of a BW level xml file contains the objects
# used by that level.
@property
def objects(self):
return self._root
def get_attributes(self, obj):
return []
def create_object_hierarchy(id_map):
hierarchy = {}
never_referenced = {obj_id: True for obj_id in id_map.keys()}
for obj_id, obj in id_map.items():
if obj.has_attr("mBase"):
# In the xml file mBase has the type pointer, but it's actually
# the ID of a different object in the file.
pointer = obj.get_attr_value("mBase")
assert pointer in id_map
if obj.id not in hierarchy:
del never_referenced[obj_id]
hierarchy[obj.id] = pointer
else:
raise RuntimeError("one object shouldn't have more than 1 reference")
return hierarchy, never_referenced
def create_ref(ref, hierarchy, id_map):
if ref.id not in hierarchy:
return ref.name
else:
return ref.name + " => " + create_ref(id_map[hierarchy[ref.id]], hierarchy, id_map)
if __name__ == "__main__":
with open("bw1_sandbox/C1_Gauntlet_Level.xml", "r") as f:
bw_level = BattWarsLevel(f)
types = {}
id_map = {}
for obj in bw_level.objects:
bw_object = BattWarsObject(obj)
if bw_object.type not in types:
types[bw_object.type] = 1
else:
types[bw_object.type] += 1
assert bw_object.id not in id_map
id_map[bw_object.id] = bw_object
# Never referenced actually doesn't mean that it isn't referenced at all,
# but that it isn't referenced in a mBase attribute
hierarchy, never_referenced = create_object_hierarchy(id_map)
print(never_referenced)
with open("hierarchy.txt", "w") as f:
f.write("")
with open("hierarchy.txt", "a") as f:
for obj_id in sorted(id_map.keys()):
obj = id_map[obj_id]
if obj_id in hierarchy:
f.write(create_ref(obj, hierarchy, id_map)+"\n")
print("done")
| Python | 0 |
821f1b83c441122b28ad2dc869576ca22a4ee642 | Create ngram_service.py | ngram_utils/ngram_service.py | ngram_utils/ngram_service.py | from thrift.transport import TSocket
from thrift.protocol import TBinaryProtocol
from thrift.transport import TTransport
from libs.hbase import Hbase
class NgramService(object):
def __init__(self, mongo_host, hbase_host):
mclient = settings.MONGO_CLIENT
unigram_db = mclient['unigrams']
bigram_db = mclient['bigrams']
trigram_db = mclient['trigrams']
unigram_col_all = unigram_db['all']
bigram_col_preps = bigram_db['preps']
trigram_col_preps = trigram_db['preps']
# No Determinatives
trigram_db_nodt = mclient['tetragrams']
bigram_db_nodt = mclient['bigrams_nodt']
trigram_preps_nodt1 = trigram_db_nodt['preps1']
trigram_preps_nodt2 = trigram_db_nodt['preps2']
bigram_col_preps_nodt = bigram_db_nodt['preps']
# HBASE
h_unigrams = 'ngrams1'
h_bigrams = 'ngrams2'
h_trigrams_skips = 'ngrams3'
transport = TTransport.TBufferedTransport(TSocket.TSocket(*settings.HBASE_HOST))
protocol = TBinaryProtocol.TBinaryProtocolAccelerated(transport)
client = Hbase.Client(protocol)
transport.open()
rate = 0
start = time.time()
| Python | 0.000003 | |
897b56183c3b30a0bc4f439e20d42ce8da2b444c | add empty unit test for viewhandler module | supvisors/tests/test_viewhandler.py | supvisors/tests/test_viewhandler.py | #!/usr/bin/python
#-*- coding: utf-8 -*-
# ======================================================================
# Copyright 2016 Julien LE CLEACH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================================================
import sys
import unittest
from supvisors.tests.base import DummySupvisors
class ViewHandlerTest(unittest.TestCase):
""" Test case for the viewhandler module. """
def test_TODO(self):
""" Test the values set at construction. """
from supvisors.viewhandler import ViewHandler
handler = ViewHandler()
self.assertIsNotNone(handler)
def test_suite():
return unittest.findTestCases(sys.modules[__name__])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| Python | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.