commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
41af5deac1332302b478b57376053d03721bade7 | Add the missing legend module | jaj42/dyngraph,jaj42/GraPhysio,jaj42/GraPhysio | dyngraph/legend.py | dyngraph/legend.py | import pyqtgraph as pg
__all__ = ['MyLegendItem']
class MyLegendItem(pg.LegendItem):
def __init__(self, size=None, offset=(40,5)):
super().__init__(size, offset)
def paint(self, p, *args):
p.setPen(pg.mkPen(0,0,0,255))
p.setBrush(pg.mkBrush(255,255,255,255))
p.drawRect(self.boundingRect())
| isc | Python | |
3c057dcf61292bc4c268e430aa7a11b2fc0a56f8 | Create generate_cloud.py | prakhar21/SocialCops,prakhar21/SocialCops | generate_cloud.py | generate_cloud.py | '''
Author: Prakhar Mishra
Date: 11/01/2016
'''
# Importing packages
import sys
import matplotlib.pyplot as plt
from wordcloud import WordCloud, STOPWORDS
from scipy.misc import imread
import csv
# Opening and getting marker for input file
data = open(str(sys.argv[1]), 'r')
data_reader = csv.reader(data)
# Reads only the tweets and appends it into
# places list
places = []
for line in data_reader:
if line[1] != 'None':
places.append(line[1])
else:
pass
# Joins all the tweets sepeated with space
words = ' '.join(places)
# Background Image
twitter_mask = imread('twitter_mask.png', flatten=True)
# Cleans
final_tweets = " ".join([word for word in words.split()
if not word.startswith('@') and word != 'ude' and word != 'n' and word != 'udc'
])
wc = WordCloud(
font_path = 'CabinSketch-Regular.ttf',
stopwords=STOPWORDS,
background_color='black',
width=1050,
height=850,
mask=twitter_mask
).generate(final_tweets)
plt.imshow(wc)
plt.axis("off")
plt.show()
| apache-2.0 | Python | |
74388ceaacb7eedf98eb03f1263ea2ec6db596e1 | Add initial script to export feed downloads. | AchyuthIIIT/mediacloud,AchyuthIIIT/mediacloud,AchyuthIIIT/mediacloud,AchyuthIIIT/mediacloud,AchyuthIIIT/mediacloud,AchyuthIIIT/mediacloud,AchyuthIIIT/mediacloud,AchyuthIIIT/mediacloud,berkmancenter/mediacloud,berkmancenter/mediacloud,AchyuthIIIT/mediacloud,berkmancenter/mediacloud,berkmancenter/mediacloud,berkmancenter/mediacloud | python_scripts/media_export_through_api.py | python_scripts/media_export_through_api.py | # -*- coding: utf-8 -*-
import psycopg2
import psycopg2.extras
import requests
import json
import mc_database
import mediacloud
def get_download_from_api( mc_api_url, api_key, downloads_id ):
r = requests.get( mc_api_url +'/api/v2/downloads/single/' + str( downloads_id) ,
params = { 'key': api_key} )
download = r.json()[0]
return download
def add_feed_download_with_api( mc_api_url, api_key, download, raw_content ):
r = requests.put( mc_api_url + '/api/v2/crawler/add_feed_download',
params={ 'key': api_key },
data=json.dumps( { 'download': download, 'raw_content': raw_content } ),
headers={ 'Accept': 'application/json'} )
return r
local_key = '2a4cebc31101a2d3d5e60456c23ae877c2d49944068f237e1134e2c75191a2af'
local_key = '1161251f5de4f381a198eea4dc20350fd992f5eef7cb2fdc284c245ff3d4f3ca'
source_media_cloud_api_url = 'http://localhost:8000/'
dest_media_cloud_api_url = 'http://localhost:3000/'
source_api_key = 'e07cf98dd0d457351354ee520635c226acd238ecf15ec9e853346e185343bf7b'
dest_api_key = local_key
db_label = "AWS backup crawler"
conn = mc_database.connect_to_database( db_label )
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
cursor.execute( "SELECT * from downloads where type='feed' and state in ( 'success', 'feed_error') order by downloads_id limit 10" )
feed_downloads = cursor.fetchall()
for feed_download in feed_downloads:
download = get_download_from_api( source_media_cloud_api_url, source_api_key, feed_download['downloads_id'] )
#print download
#break
raw_content = download['raw_content' ]
del download['raw_content']
if download[ 'state' ] == 'feed_error':
download[ 'state' ] = 'success'
add_feed_download_with_api( dest_media_cloud_api_url, dest_api_key, download, raw_content )
| agpl-3.0 | Python | |
b9f464491a6131940ff2b88586ccdd44431ce992 | add fpga-bit-to-bin.py | pavel-demin/red-pitaya-notes,pavel-demin/red-pitaya-notes,fbalakirev/red-pitaya-notes,pavel-demin/red-pitaya-notes,pavel-demin/red-pitaya-notes,pavel-demin/red-pitaya-notes,pavel-demin/red-pitaya-notes,fbalakirev/red-pitaya-notes,pavel-demin/red-pitaya-notes,fbalakirev/red-pitaya-notes,fbalakirev/red-pitaya-notes,fbalakirev/red-pitaya-notes,pavel-demin/red-pitaya-notes,fbalakirev/red-pitaya-notes,fbalakirev/red-pitaya-notes | scripts/fpga-bit-to-bin.py | scripts/fpga-bit-to-bin.py | #!/usr/bin/python
# copied from https://github.com/topic-embedded-products/meta-topic/blob/master/recipes-bsp/fpga/fpga-bit-to-bin/fpga-bit-to-bin.py
import sys
import os
import struct
def flip32(data):
sl = struct.Struct('<I')
sb = struct.Struct('>I')
b = buffer(data)
d = bytearray(len(data))
for offset in xrange(0, len(data), 4):
sb.pack_into(d, offset, sl.unpack_from(b, offset)[0])
return d
import argparse
parser = argparse.ArgumentParser(description='Convert FPGA bit files to raw bin format suitable for flashing')
parser.add_argument('-f', '--flip', dest='flip', action='store_true', default=False, help='Flip 32-bit endianess (needed for Zynq)')
parser.add_argument("bitfile", help="Input bit file name")
parser.add_argument("binfile", help="Output bin file name")
args = parser.parse_args()
short = struct.Struct('>H')
ulong = struct.Struct('>I')
bitfile = open(args.bitfile, 'rb')
l = short.unpack(bitfile.read(2))[0]
if l != 9:
raise Exception, "Missing <0009> header (0x%x), not a bit file" % l
bitfile.read(l)
l = short.unpack(bitfile.read(2))[0]
d = bitfile.read(l)
if d != 'a':
raise Exception, "Missing <a> header, not a bit file"
l = short.unpack(bitfile.read(2))[0]
d = bitfile.read(l)
print "Design name:", d
KEYNAMES = {'b': "Partname", 'c': "Date", 'd': "Time"}
while 1:
k = bitfile.read(1)
if not k:
raise Exception, "unexpected EOF"
elif k == 'e':
l = ulong.unpack(bitfile.read(4))[0]
print "found binary data:", l
d = bitfile.read(l)
if args.flip:
d = flip32(d)
open(args.binfile, 'wb').write(d)
break
elif k in KEYNAMES:
l = short.unpack(bitfile.read(2))[0]
d = bitfile.read(l)
print KEYNAMES[k], d
else:
print "Unexpected key: ", k
l = short.unpack(bitfile.read(2))[0]
d = bitfile.read(l)
| mit | Python | |
1359a099e7019767cfd904eee0c510c3b33a7d03 | add script to generate pcap files | usi-systems/p4paxos,usi-systems/p4paxos,usi-systems/p4paxos,usi-systems/p4paxos,usi-systems/p4paxos | generate_pcap_files.py | generate_pcap_files.py | #!/usr/bin/env python
from scapy.all import *
import sys
import argparse
class Paxos(Packet):
name ="PaxosPacket "
fields_desc =[
XByteField("msgtype", 0x3),
XShortField("instance", 0x1),
XByteField("round", 0x1),
XByteField("vround", 0x0),
XLongField("acceptor", 0x0),
XBitField("value", 0x11223344, 512)
]
def paxos_packet(typ, inst, rnd, vrnd, value):
eth = Ether(dst="08:00:27:10:a8:80")
ip = IP(src="10.0.0.1", dst="10.0.0.2")
udp = UDP(sport=34951, dport=0x8888)
pkt = eth / ip / udp / Paxos(msgtype=typ, instance=inst, round=rnd, vround=vrnd, value=value)
return pkt
if __name__=='__main__':
parser = argparse.ArgumentParser(description='P4Paxos demo')
parser.add_argument('-i', '--inst', help='Paxos instance', type=int, default=0)
parser.add_argument('-r', '--rnd', help='Paxos round', type=int, default=1)
parser.add_argument('-a', '--vrnd', help='Paxos value round', type=int, default=0)
parser.add_argument('-v', '--value', help='Paxos value', type=int, default=0x11223344)
parser.add_argument('-o', '--output', help='output pcap file', type=str, required=True)
args = parser.parse_args()
p0 = paxos_packet(0, args.inst, args.rnd, args.vrnd, args.value)
p2a = paxos_packet(3, args.inst, args.rnd, args.vrnd, args.value)
p1a = paxos_packet(1, args.inst, args.rnd, args.vrnd, 0)
p1b = paxos_packet(2, args.inst, args.rnd, args.vrnd, 0xAABBCCDD)
pkts = [p0, p2a, p1a, p1b]
wrpcap("%s" % args.output, pkts)
| apache-2.0 | Python | |
8b834db0b7632c31c06a2958799fa7e29b505aaf | add robust kinematic example | hungpham2511/toppra,hungpham2511/toppra,hungpham2511/toppra | examples/robust_kinematics.py | examples/robust_kinematics.py | import toppra as ta
import toppra.constraint as constraint
import toppra.algorithm as algo
import numpy as np
import matplotlib.pyplot as plt
import argparse
def main():
parser = argparse.ArgumentParser(description="An example showcasing the usage of robust constraints."
"A velocity constraint and a robust acceleration constraint"
"are considered in this script.")
parser.add_argument("-N", "--N", type=int, help="Number of segments in the discretization.", default=100)
parser.add_argument("-v", "--verbose", action="store_true", default=False)
parser.add_argument("-du", "--du", default=1e-3, type=float)
parser.add_argument("-dx", "--dx", default=5e-2, type=float)
parser.add_argument("-dc", "--dc", default=9e-3, type=float)
parser.add_argument("-i", "--interpolation_scheme", default=1, type=int)
args = parser.parse_args()
if args.verbose:
ta.setup_logging("DEBUG")
# Parameters
N_samples = 5
dof = 7
# Random waypoints used to obtain a random geometric path.
np.random.seed(9)
way_pts = np.random.randn(N_samples, dof)
# Create velocity bounds, then velocity constraint object
vlim_ = np.random.rand(dof) * 20
vlim = np.vstack((-vlim_, vlim_)).T
# Create acceleration bounds, then acceleration constraint object
alim_ = np.random.rand(dof) * 2
alim = np.vstack((-alim_, alim_)).T
path = ta.SplineInterpolator(np.linspace(0, 1, 5), way_pts)
pc_vel = constraint.JointVelocityConstraint(vlim)
pc_acc = constraint.JointAccelerationConstraint(
alim, discretization_scheme=constraint.DiscretizationType.Interpolation)
robust_pc_acc = constraint.RobustCanonicalLinearConstraint(
pc_acc, [args.du, args.dx, args.dc], args.interpolation_scheme)
instance = algo.TOPPRA([pc_vel, robust_pc_acc], path,
gridpoints=np.linspace(0, 1, args.N + 1))
X = instance.compute_feasible_sets()
K = instance.compute_controllable_sets(0, 0)
_, sd_vec, _ = instance.compute_parameterization(0, 0)
X = np.sqrt(X)
K = np.sqrt(K)
plt.plot(X[:, 0], c='green', label="Feasible sets")
plt.plot(X[:, 1], c='green')
plt.plot(K[:, 0], '--', c='red', label="Controllable sets")
plt.plot(K[:, 1], '--', c='red')
plt.plot(sd_vec, label="Velocity profile")
plt.legend()
plt.title("Path-position path-velocity plot")
plt.show()
jnt_traj, aux_traj = instance.compute_trajectory(0, 0)
ts_sample = np.linspace(0, jnt_traj.get_duration(), 100)
qs_sample = jnt_traj.evaldd(ts_sample)
plt.plot(ts_sample, qs_sample)
plt.show()
if __name__ == '__main__':
main()
| mit | Python | |
d82d66e7210a3f628d47a1a5463d6ac244d8f1c8 | Gather product infomation | sdgdsffdsfff/Cmdb_Puppet,henry-zhang/Cmdb_Puppet | gethostinfo/product.py | gethostinfo/product.py | #!/home/python/bin/python
# -*- coding:utf-8 -*-
from subprocess import PIPE,Popen
import urllib, urllib2
def getDMI():
p = Popen('dmidecode',shell=True,stdout=PIPE)
stdout, stderr = p.communicate()
return stdout
def parserDMI(dmidata):
pd = {}
fd = {}
line_in = False
for line in dmidata.split('\n'):
if line.startswith('System Information'):
line_in = True
continue
if line.startswith('\t') and line_in:
k, v = [i.strip() for i in line.split(':')]
pd[k] = v
else:
line_in = False
name = "Manufacturer:%s ; Serial_Number:%s ; Product_Name:%s ; UUID:%s " % (pd['Manufacturer'],pd['Serial Number'],pd['Product Name'],pd['UUID'])
for i in name.split(';'):
k, v = [j.strip() for j in i.split(':')]
fd[k] = v
return fd
if __name__ == '__main__':
dmidata = getDMI()
postdata = parserDMI(dmidata)
print postdata
| epl-1.0 | Python | |
f82998254b5b93c6fafc36fa23fa4c1566d42125 | Add interpreter stump | zmbc/shakespearelang,zmbc/shakespearelang,zmbc/shakespearelang | shakespeare_interpreter.py | shakespeare_interpreter.py | from shakespeare_parser import shakespeareParser
import argparse
argparser = argparse.ArgumentParser(description = "Run files in Shakespeare Programming Language.")
argparser.add_argument('filename', type=str, help="SPL file location")
args = argparser.parse_args()
if(args.filename):
with open(args.filename, 'r') as f:
text = f.read().replace('\n', ' ')
parser = shakespeareParser()
ast = parser.parse(text, rule_name='play')
print(ast) | mit | Python | |
a11155e5df71370e2153b9ac34e34ece8c5603fd | Create separate supervisor for running under the GUI | OrganDonor/Organelle,OrganDonor/Organelle | gui-supervisor.py | gui-supervisor.py | #!/usr/bin/env python
"""Master program for the Organ Donor "Organelle"
This program is mainly responsible for monitoring the physical rotary switch
that allows users to select a major mode of operation for the Organelle.
When it detects that the switch has been moved, it asks the current program
to clean up and exit (by sending it a SIGUSR1 signal), waits for this to
complete, and launches the newly selected program.
Because Python GUI programs on the Raspberry Pi take a while to launch in
the best case, there is no way the rotary switch can be super responsive.
We'll have to settle for predictable but slow behavior. When the switch
position starts to change, we'll go ahead and signal the running program,
and continue to monitor the switch position. Once the switch reading has
been steady for a while, we will launch the new program (which might be
the same as the old program).
2015-05-25 Paul Williamson
2015-08-24 ptw Reconstructing lost work: actually launch programs and signal them,
and don't use the screen.
Added syslog logging.
2015-08-25 ptw Adjusted pin number to match as-built configuration.
"""
# If we're running on Raspbian Jessie, we can use GPIO without being root!
# Otherwise, must run as root to use the built-in GPIO package.
import RPi.GPIO as GPIO
import sys, time
import subprocess
import syslog
import signal
syslog.openlog("organelle")
syslog.syslog(syslog.LOG_INFO, "Organelle GUI supervisor started")
switch_steady_delay = 1.0 # seconds before the switch is considered stable
proc_exit_delay = 1.0 # seconds to allow the process to exit
# Pin numbers will follow the Broadcom SoC pin numbering
GPIO.setmode(GPIO.BCM)
# Mapping of pins onto programs and their command-line arguments
programs = { 4: ("xterm", "-fullscreen -e ./organelle.py p MIDI4x4_20:0 MIDIPLUS_1"),
17: ("xterm", "-fullscreen -e ./organelle.py p MIDI4x4_20:1 MIDIPLUS_2"),
27: ("xterm", "-fullscreen -e ./organelle.py p MIDI4x4_20:2 MIDIPLUS_3"),
22: ("xterm", "-fullscreen -e ./organelle.py p MIDI4x4_20:3 MIDIPLUS_4"),
5: ("xterm", "-fullscreen -e ./organelle.py 4"), # keyboards only
6: ("xterm", "-fullscreen -e ./organelle.py 4"),
13: ("xterm", "-fullscreen -e ./organelle.py 4"),
19: ("xterm", "-fullscreen -e ./jukebox.py 5"), # auto-play
26: ("xterm", "-fullscreen -e ./organelle.py t"), # theremin
23: ("xterm", "-fullscreen -e ./organelle.py g") # pitch game
}
# Extract the list of GPIO pins from the program mapping.
pins = programs.keys()
# Function that reads all the pins into a dictionary.
def rotary_switch():
return {x : GPIO.input(x) for x in pins}
# Given a dictionary levels containing the pin levels,
# and hoping that exactly one of them is 0 (because it's a rotary switch),
# return the pin number of the first one that's 0.
# If somehow none of the pins are grounded, return None.
def selected(levels):
for pin,val in levels.iteritems():
if val == 0:
return pin
return None
# Display a prompt in case the screen is unclaimed long enough to matter.
#def prompt():
# sys.stderr.write("\x1b[2J\x1b[10;1H") # row 10, column 1
# print "Select mode using rotary switch"
def prompt():
# we need not do anything to prompt; the wallpaper is the prompt.
pass
# Set all pins as inputs with pullup, so we just ground a pin to activate.
for p in pins:
GPIO.setup(p, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# The rotary switch monitoring goes on forever ...
while True:
prompt()
# Here we are in between programs. Wait for a constant switch reading.
levels = rotary_switch()
waitfor = time.time() + switch_steady_delay
while time.time() < waitfor:
newlevels = rotary_switch()
if newlevels != levels:
levels.update(newlevels)
waitfor = time.time() + switch_steady_delay
# OK, the switch has been steady for long enough. Launch that program!
choice = selected(levels)
if choice is None:
continue
(prog,arg) = programs[choice]
# dummy launch for testing
#print "Here we launch %s %s" % (prog,arg)
proc = subprocess.Popen([prog]+arg.split())
if not proc:
syslog.syslog(syslog.LOG_ERROR, "Failed to launch " + prog + " " + arg)
continue
syslog.syslog(syslog.LOG_INFO, "Launched " + prog + " " + arg)
# Program is running. Continue watching the rotary switch for changes.
while levels == rotary_switch():
time.sleep(0.100)
# Switch touched! Ask the program to exit and wait for it to do so.
proc.send_signal(signal.SIGUSR1)
proc.wait()
# waitfor = time.time() + proc_exit_delay
# while time.time() < waitfor:
# if proc.poll():
# syslog.syslog(syslog.LOG_INFO, "Normal exit")
# break
# time.sleep(0.100)
# if not proc.poll():
# # uh oh, program didn't exit as requested. Terminate with prejudice.
# syslog.syslog(syslog.LOG_ERR, "Program failed to exit on request!")
# proc.kill()
# proc.wait() # if kill() doesn't work, we're hung too.
| cc0-1.0 | Python | |
d5435f2b0548f762985652b07ef047b68552b069 | add guided backprop relu | diogo149/treeano,jagill/treeano,jagill/treeano,diogo149/treeano,nsauder/treeano,nsauder/treeano,jagill/treeano,nsauder/treeano,diogo149/treeano | treeano/sandbox/nodes/guided_backprop.py | treeano/sandbox/nodes/guided_backprop.py | """
from "Striving for Simplicity - The All Convolutional Net"
http://arxiv.org/abs/1412.6806
"""
import treeano
import treeano.sandbox.utils
class _GuidedBackprop(treeano.sandbox.utils.OverwriteGrad):
"""
based on Lasagne Recipes on Guided Backpropagation
"""
def grad(self, inputs, out_grads):
(inp,) = inputs
(grd,) = out_grads
dtype = inp.dtype
return (grd * (inp > 0).astype(dtype) * (grd > 0).astype(dtype),)
guided_backprop_relu = _GuidedBackprop(treeano.utils.rectify)
| apache-2.0 | Python | |
91396ed246166f610e9cfc4519862f061af4e6b2 | Enable Django Admin for some of our data | uq-eresearch/uqam,uq-eresearch/uqam,uq-eresearch/uqam,uq-eresearch/uqam | cat/admin.py | cat/admin.py | from django.contrib import admin
from models import MuseumObject,FunctionalCategory
class MOAdmin(admin.ModelAdmin):
fields = ('registration_number','country','description','comment')
list_display = ('registration_number','country','description','comment')
list_filter = ('country','functional_category')
search_fields = ['description','comment']
admin.site.register(MuseumObject, MOAdmin)
admin.site.register(FunctionalCategory)
| bsd-3-clause | Python | |
d44edfbb3561c136c9bf53dfafa42e34f96e476b | Add tests for rhs_reuse | cgranade/qutip,qutip/qutip,cgranade/qutip,qutip/qutip | qutip/tests/test_rhs_reuse.py | qutip/tests/test_rhs_reuse.py | # This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
import numpy as np
from numpy.testing import assert_, assert_equal, run_module_suite
import qutip as qt
from qutip.solver import config
def test_rhs_reuse():
"""
rhs_reuse : pyx filenames match for rhs_reus= True
"""
N = 10
a = qt.destroy(N)
H = [a.dag()*a, [a+a.dag(), 'sin(t)']]
psi0 = qt.fock(N,3)
tlist = np.linspace(0,10,10)
e_ops = [a.dag()*a]
c_ops = [0.25*a]
# Test sesolve
out1 = qt.mesolve(H, psi0,tlist, e_ops=e_ops)
_temp_config_name = config.tdname
out2 = qt.mesolve(H, psi0,tlist, e_ops=e_ops)
assert_(config.tdname != _temp_config_name)
_temp_config_name = config.tdname
out3 = qt.mesolve(H, psi0,tlist, e_ops=e_ops,
options=qt.Options(rhs_reuse=True))
assert_(config.tdname == _temp_config_name)
# Test mesolve
out1 = qt.mesolve(H, psi0,tlist, c_ops=c_ops, e_ops=e_ops)
_temp_config_name = config.tdname
out2 = qt.mesolve(H, psi0,tlist, c_ops=c_ops, e_ops=e_ops)
assert_(config.tdname != _temp_config_name)
_temp_config_name = config.tdname
out3 = qt.mesolve(H, psi0,tlist, e_ops=e_ops, c_ops=c_ops,
options=qt.Options(rhs_reuse=True))
assert_(config.tdname == _temp_config_name)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause | Python | |
1541259e885ee498f05e42633d1bad3bac9d054d | Implement the LOADMODULE command | Heufneutje/txircd,ElementalAlchemist/txircd | txircd/modules/core/cmd_loadmodule.py | txircd/modules/core/cmd_loadmodule.py | from twisted.plugin import IPlugin
from twisted.words.protocols import irc
from txircd.ircd import ModuleLoadError
from txircd.module_interface import Command, ICommand, IModuleData, ModuleData
from zope.interface import implements
# We're using the InspIRCd numerics, as they seem to be the only ones to actually use numerics
irc.ERR_CANTLOADMODULE = "974"
irc.RPL_LOADEDMODULE = "975"
class LoadModuleCommand(Command, ModuleData):
implements(IPlugin, ICommand, IModuleData)
name = "LoadModuleCommand"
core = True
def hookIRCd(self, ircd):
self.ircd = ircd
def actions(self):
return [ ("commandpermission-LOADMODULE", 1, self.restrictToOpers) ]
def userCommands(self):
return [ ("LOADMODULE", 1, self) ]
def restrictToOpers(self, user, command, data):
if not self.ircd.runActionUntilValue("userhasoperpermission", user, "command-loadmodule", users=[user]):
user.sendSingleError("LoadModulePermission", irc.ERR_NOPRIVILEGES, ":Permission denied - You do not have the correct operator privileges")
return False
return None
def parseParams(self, user, params, prefix, tags):
if not params:
user.sendSingleError("LoadModuleCmd", irc.ERR_NEEDMOREPARAMS, "LOADMODULE", ":Not enough parameters")
return None
return {
"modulename": params[0]
}
def execute(self, user, data):
moduleName = data["modulename"]
if moduleName in self.ircd.loadedModules:
user.sendMessage(irc.ERR_CANTLOADMODULE, moduleName, ":Module is already loaded")
else:
try:
self.ircd.loadModule(moduleName)
if moduleName in self.ircd.loadedModules:
user.sendMessage(irc.RPL_LOADEDMODULE, moduleName, ":Module successfully loaded")
else:
user.sendMessage(irc.ERR_CANTLOADMODULE, moduleName, ":No such module")
except ModuleLoadError as e:
user.sendMessage(irc.ERR_CANTLOADMODULE, moduleName, ":{}".format(e.message))
return True
loadmoduleCommand = LoadModuleCommand() | bsd-3-clause | Python | |
edc982bdfaece6aaf23b3e7f9c967de800eacbd6 | Implement links server notice type | Heufneutje/txircd | txircd/modules/extra/snotice_links.py | txircd/modules/extra/snotice_links.py | from twisted.plugin import IPlugin
from txircd.modbase import IModuleData, ModuleData
from zope.interface import implements
class SnoLinks(ModuleData):
implements(IPlugin, IModuleData)
name = "ServerNoticeLinks"
def actions(self):
return [ ("serverconnect", 1, self.announceConnect),
("serverquit", 1, self.announceQuit),
("servernoticetype", 1, self.checkSnoType) ]
def announceConnect(self, server):
message = "Server {} ({}) connected (to {})".format(server.name, server.serverID, self.ircd.name if server.nextClosest == self.ircd.serverID else self.ircd.servers[server.nextClosest].name)
self.ircd.runActionStandard("sendservernotice", "links", message)
def announceQuit(self, server, reason):
message = "Server {} ({}) disconnected (from {}) ({})".format(server.name, server.serverID, self.ircd.name if server.nextClosest == self.ircd.serverID else self.ircd.servers[server.nextClosest].name, reason)
self.ircd.runActionStandard("sendservernotice", "links", message)
def checkSnoType(self, user, typename):
if typename == "links":
return True
return False
snoLinks = SnoLinks() | bsd-3-clause | Python | |
c65f59c6a6048807d29e5ce123447afd006ce05f | Add missing migration for username length | mikkokeskinen/tunnistamo,mikkokeskinen/tunnistamo | users/migrations/0007_username_length.py | users/migrations/0007_username_length.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-11-02 11:53
from __future__ import unicode_literals
import django.contrib.auth.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0006_auto_20160508_1407'),
]
operations = [
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username'),
),
]
| mit | Python | |
50f33b1ae3b3ce6c1fb00a53145a7fe2fe822680 | Add redis tests | globocom/database-as-a-service,globocom/database-as-a-service,globocom/database-as-a-service,globocom/database-as-a-service | dbaas/drivers/tests/test_driver_redis.py | dbaas/drivers/tests/test_driver_redis.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import mock
import logging
from django.test import TestCase
from drivers import DriverFactory
from physical.tests import factory as factory_physical
from logical.tests import factory as factory_logical
from logical.models import Database
from ..redis import Redis
LOG = logging.getLogger(__name__)
class AbstractTestDriverRedis(TestCase):
def setUp(self):
self.databaseinfra = factory_physical.DatabaseInfraFactory(password="OPlpplpooi", endpoint="127.0.0.1:6379")
self.instance = factory_physical.InstanceFactory(databaseinfra=self.databaseinfra, port=6379)
self.driver = Redis(databaseinfra=self.databaseinfra)
self._redis_client = None
def tearDown(self):
if not Database.objects.filter(databaseinfra_id=self.databaseinfra.id):
self.databaseinfra.delete()
self.driver = self.databaseinfra = self._redis_client = None
@property
def redis_client(self):
if self._redis_client is None:
self._redis_client = self.driver.__redis_client__(self.instance)
return self._redis_client
class RedisEngineTestCase(AbstractTestDriverRedis):
"""
Tests Redis Engine
"""
def test_redis_app_installed(self):
self.assertTrue(DriverFactory.is_driver_available("redis"))
#test redis methods
def test_instantiate_redis_using_engine_factory(self):
self.assertEqual(Redis, type(self.driver))
self.assertEqual(self.databaseinfra, self.driver.databaseinfra)
def test_connection_string(self):
self.assertEqual("redis://:<password>@127.0.0.1:6379/0", self.driver.get_connection())
def test_get_password(self):
self.assertEqual(self.databaseinfra.password, self.driver.get_password())
def test_get_default_port(self):
self.assertEqual(6379, self.driver.default_port)
def test_connection_with_database(self):
self.database = factory_logical.DatabaseFactory(name="my_db_url_name", databaseinfra=self.databaseinfra)
self.assertEqual("redis://:<password>@127.0.0.1:6379/0", self.driver.get_connection(database=self.database))
class ManageDatabaseRedisTestCase(AbstractTestDriverRedis):
""" Test case to managing database in redis engine """
def setUp(self):
super(ManageDatabaseRedisTestCase, self).setUp()
self.database = factory_logical.DatabaseFactory(databaseinfra=self.databaseinfra)
# ensure database is dropped
#get fake driver
driver = self.databaseinfra.get_driver()
driver.remove_database(self.database)
def tearDown(self):
if not Database.objects.filter(databaseinfra_id=self.databaseinfra.id):
self.database.delete()
super(ManageDatabaseRedisTestCase, self).tearDown()
| bsd-3-clause | Python | |
e175cfda1e54586823e6bf0ad78145d175929b83 | add entrypoints analysis module | bat-serjo/vivisect,cmaruti/vivisect,bat-serjo/vivisect,atlas0fd00m/vivisect,bat-serjo/vivisect,cmaruti/vivisect,atlas0fd00m/vivisect,vivisect/vivisect,vivisect/vivisect,vivisect/vivisect,cmaruti/vivisect,atlas0fd00m/vivisect | vivisect/analysis/generic/entrypoints.py | vivisect/analysis/generic/entrypoints.py |
def analyze(vw):
'''
Analysis module which processes entries in VaSet "EntryPoints" as Functions
This is a vital analysis module, only made into a module to allow for
control over when in the process it gets executed.
'''
vw.processEntryPoints()
| apache-2.0 | Python | |
68e675f4531bf07cdc484837b02121941d4c8ad0 | add engine_config.py | tmthyjames/SQLCell | engine_config.py | engine_config.py | # default connection string info here
driver = 'postgresql'
username = 'tdobbins'
password = 'tdobbins'
host = 'localhost'
port = '5432'
default_db = 'bls' | mit | Python | |
e32e4500676f043ca137bf5f8af04848f149e067 | Add contribution.py | AwesomeTickets/Dashboard | script/contribution.py | script/contribution.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import urllib.request
import json
import sys
OWNERS = ['AwesomeTickets']
REPOS = ['Dashboard', 'Integration', 'ServiceServer', 'StaticPageServer', 'DatabaseServer', 'CacheServer']
URL = 'https://api.github.com/repos/%s/%s/stats/contributors'
def create_req(url):
return urllib.request.Request(url, data=None, headers={
'User-Agent': 'AwesomeTickets'
})
for owner in OWNERS:
tot = 0
author_tot = {}
for repo in REPOS:
print("%s/%s:" % (owner, repo))
req = create_req(URL % (owner, repo))
try:
with urllib.request.urlopen(req) as res:
json_str = res.read().decode('utf-8')
contribs = json.loads(json_str)
contribs.sort(key=lambda x: x['total'], reverse=True)
for contrib in contribs:
cnt = contrib["total"]
tot += cnt
author = contrib["author"]["login"]
if (author in author_tot):
author_tot[author] += cnt
else:
author_tot[author] = cnt
print("%-16s: %4s commits" % (author, cnt))
except urllib.error.HTTPError as err:
print(err)
print(err.read().decode('utf-8'))
sys.exit(1)
print("")
print("%s:" % owner)
author_tot = [(k, author_tot[k]) for k in sorted(author_tot, key=author_tot.get, reverse=True)]
for author, cnt in author_tot:
print("%-16s: %4d commits (%2d%%)"
% (author, cnt, cnt * 100 // tot))
| mit | Python | |
cbf3c6140aafefbaef7186e0cb97d0758b1d38b2 | add the dna algorithm (#6323) | TheAlgorithms/Python | strings/dna.py | strings/dna.py | import re
def dna(dna: str) -> str:
"""
https://en.wikipedia.org/wiki/DNA
Returns the second side of a DNA strand
>>> dna("GCTA")
'CGAT'
>>> dna("ATGC")
'TACG'
>>> dna("CTGA")
'GACT'
>>> dna("GFGG")
'Invalid Strand'
"""
r = len(re.findall("[ATCG]", dna)) != len(dna)
val = dna.translate(dna.maketrans("ATCG", "TAGC"))
return "Invalid Strand" if r else val
if __name__ == "__main__":
__import__("doctest").testmod()
| mit | Python | |
1ef84c24c60cf802aeb4bf6084f9b7fc7696f79a | Add a script to print the scheduling times of albums | barrucadu/lainonlife,barrucadu/lainonlife,barrucadu/lainonlife,barrucadu/lainonlife | scripts/album_times.py | scripts/album_times.py | #!/usr/bin/env python3
"""Radio scheduling program.
Usage:
album_times.py [--host=HOST] PORT
Options:
--host=HOST Hostname of MPD [default: localhost]
-h --help Show this text
Prints out the last scheduling time of every album.
"""
from datetime import datetime
from docopt import docopt
from mpd import MPDClient
def album_sticker_get(client, album, sticker):
"""Gets a sticker associated with an album."""
# I am pretty sure that MPD only implements stickers for songs, so
# the sticker gets attached to the first song in the album.
tracks = client.find("album", album)
if len(tracks) == 0:
return
return client.sticker_get("song", tracks[0]["file"], "album_" + sticker)
def album_sticker_set(client, album, sticker, val):
"""Sets a sticker associated with an album."""
# I am pretty sure that MPD only implements stickers for songs, so
# the sticker gets attached to the first song in the album.
tracks = client.find("album", album)
if len(tracks) == 0:
return
return client.sticker_set("song", tracks[0]["file"], "album_" + sticker, val)
def list_albums(client):
"""Lists albums sorted by last play timestamp."""
# Get all albums
albums = client.list("album")
all_albums = list(filter(lambda a: a not in ["", "Lainchan Radio Transitions"], albums))
# Group albums by when they were last scheduled
albums_by_last_scheduled = {}
last_scheduled_times = []
for album in all_albums:
# Get the last scheduled time, defaulting to 0
try:
last_scheduled = int(album_sticker_get(client, album, "last_scheduled"))
except:
last_scheduled = 0
# Put the album into the appropriate bucket
if last_scheduled in albums_by_last_scheduled:
albums_by_last_scheduled[last_scheduled].append(album)
else:
albums_by_last_scheduled[last_scheduled] = [album]
last_scheduled_times.append(last_scheduled)
# Pick the 10 oldest times
last_scheduled_times.sort()
for last_scheduled in last_scheduled_times:
dt = datetime.utcfromtimestamp(last_scheduled)
albums = albums_by_last_scheduled[last_scheduled]
print("{}: {}".format(dt.strftime('%Y-%m-%d %H:%M:%S'), albums))
if __name__ == "__main__":
args = docopt(__doc__)
try:
args["PORT"] = int(args["PORT"])
except:
print("PORT must be an integer")
exit(1)
try:
client = MPDClient()
client.connect(args["--host"], args["PORT"])
except:
print("could not connect to MPD")
exit(2)
list_albums(client)
| mit | Python | |
bab9866d6e255cfe74a5eaf325f8cc8a16648621 | Add migration | softwaresaved/fat,softwaresaved/fat,softwaresaved/fat,softwaresaved/fat | lowfat/migrations/0089_auto_20170301_2349.py | lowfat/migrations/0089_auto_20170301_2349.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-01 23:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lowfat', '0088_auto_20170301_1411'),
]
operations = [
migrations.AddField(
model_name='expense',
name='invoice',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='expense',
name='invoice_reference',
field=models.TextField(blank=True, max_length=8, null=True),
),
migrations.AddField(
model_name='historicalexpense',
name='invoice',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='historicalexpense',
name='invoice_reference',
field=models.TextField(blank=True, max_length=8, null=True),
),
]
| bsd-3-clause | Python | |
c0aa01adcf6385bb4d0b6ddd13299b6b0c3af566 | Remove accidental leftovers | toolforger/sympy,emon10005/sympy,shikil/sympy,skidzo/sympy,rahuldan/sympy,ChristinaZografou/sympy,yukoba/sympy,rahuldan/sympy,saurabhjn76/sympy,souravsingh/sympy,Arafatk/sympy,mcdaniel67/sympy,wanglongqi/sympy,sahmed95/sympy,jerli/sympy,Titan-C/sympy,Curious72/sympy,kaichogami/sympy,farhaanbukhsh/sympy,Gadal/sympy,yukoba/sympy,kumarkrishna/sympy,atsao72/sympy,maniteja123/sympy,mafiya69/sympy,abloomston/sympy,souravsingh/sympy,Shaswat27/sympy,mafiya69/sympy,wanglongqi/sympy,Vishluck/sympy,Designist/sympy,Designist/sympy,wyom/sympy,atreyv/sympy,wyom/sympy,cswiercz/sympy,drufat/sympy,Curious72/sympy,jaimahajan1997/sympy,MechCoder/sympy,emon10005/sympy,atsao72/sympy,madan96/sympy,kaichogami/sympy,yashsharan/sympy,moble/sympy,yukoba/sympy,Davidjohnwilson/sympy,farhaanbukhsh/sympy,debugger22/sympy,Titan-C/sympy,sampadsaha5/sympy,madan96/sympy,AkademieOlympia/sympy,abloomston/sympy,ahhda/sympy,grevutiu-gabriel/sympy,Designist/sympy,mafiya69/sympy,ahhda/sympy,kaushik94/sympy,shikil/sympy,abhiii5459/sympy,debugger22/sympy,iamutkarshtiwari/sympy,Arafatk/sympy,yashsharan/sympy,kevalds51/sympy,grevutiu-gabriel/sympy,AkademieOlympia/sympy,atsao72/sympy,postvakje/sympy,skidzo/sympy,bukzor/sympy,VaibhavAgarwalVA/sympy,drufat/sympy,ga7g08/sympy,aktech/sympy,VaibhavAgarwalVA/sympy,moble/sympy,Shaswat27/sympy,mcdaniel67/sympy,Vishluck/sympy,madan96/sympy,lindsayad/sympy,ga7g08/sympy,iamutkarshtiwari/sympy,Davidjohnwilson/sympy,pandeyadarsh/sympy,ga7g08/sympy,Arafatk/sympy,asm666/sympy,kumarkrishna/sympy,kevalds51/sympy,Shaswat27/sympy,bukzor/sympy,Vishluck/sympy,lindsayad/sympy,Gadal/sympy,jerli/sympy,Curious72/sympy,wanglongqi/sympy,pandeyadarsh/sympy,ahhda/sympy,hargup/sympy,jbbskinny/sympy,MechCoder/sympy,garvitr/sympy,garvitr/sympy,moble/sympy,jaimahajan1997/sympy,wyom/sympy,jbbskinny/sympy,AkademieOlympia/sympy,sampadsaha5/sympy,souravsingh/sympy,shikil/sympy,pandeyadarsh/sympy,skidzo/sympy,postvakje/sympy,cswiercz/sympy,ChristinaZografou/sympy,kaichogami/sympy,Titan-C/sympy,kumarkrishna/sympy,maniteja123/sympy,grevutiu-gabriel/sympy,lindsayad/sympy,maniteja123/sympy,sahmed95/sympy,oliverlee/sympy,saurabhjn76/sympy,jaimahajan1997/sympy,iamutkarshtiwari/sympy,saurabhjn76/sympy,asm666/sympy,jerli/sympy,jbbskinny/sympy,MechCoder/sympy,oliverlee/sympy,farhaanbukhsh/sympy,abhiii5459/sympy,ChristinaZografou/sympy,drufat/sympy,atreyv/sympy,debugger22/sympy,cswiercz/sympy,sahmed95/sympy,Davidjohnwilson/sympy,chaffra/sympy,kaushik94/sympy,aktech/sympy,aktech/sympy,abloomston/sympy,chaffra/sympy,postvakje/sympy,toolforger/sympy,hargup/sympy,emon10005/sympy,oliverlee/sympy,chaffra/sympy,abhiii5459/sympy,VaibhavAgarwalVA/sympy,kevalds51/sympy,mcdaniel67/sympy,toolforger/sympy,bukzor/sympy,Gadal/sympy,yashsharan/sympy,kaushik94/sympy,sampadsaha5/sympy,atreyv/sympy,hargup/sympy,rahuldan/sympy,garvitr/sympy,asm666/sympy | sympy/geometry/__init__.py | sympy/geometry/__init__.py | """
A geometry module for the SymPy library. This module contains all of the
entities and functions needed to construct basic geometrical data and to
perform simple informational queries.
Usage:
======
Notes:
======
Currently the geometry module supports 2-dimensional
and 3 -dimensional Euclidean space.
Examples
========
"""
from sympy.geometry.point import Point, Point2D, Point3D
from sympy.geometry.line import Line, Ray, Segment
from sympy.geometry.line3d import Line3D, Segment3D, Ray3D
from sympy.geometry.plane import Plane
from sympy.geometry.ellipse import Ellipse, Circle
from sympy.geometry.polygon import Polygon, RegularPolygon, Triangle, rad, deg
from sympy.geometry.util import are_similar, centroid, convex_hull, idiff, \
intersection
from sympy.geometry.exceptions import GeometryError
from sympy.geometry.curve import Curve
| """
A geometry module for the SymPy library. This module contains all of the
entities and functions needed to construct basic geometrical data and to
perform simple informational queries.
Usage:
======
Notes:
======
Currently the geometry module supports 2-dimensional
and 3 -dimensional Euclidean space.
Examples
========
"""
from sympy.geometry.point import Point, Point2D, Point3D
from sympy.geometry.line import Line, Ray, Segment
from sympy.geometry.line3d import Line3D, Segment3D, Ray3D
from sympy.geometry.plane import Plane
from sympy.geometry.ellipse import Ellipse, Circle
from sympy.geometry.polygon import Polygon, RegularPolygon, Triangle, rad, deg
from sympy.geometry.util import are_similar, centroid, convex_hull, idiff, \
intersection
from sympy.geometry.exceptions import GeometryError
from sympy.geometry.curve import Curve
from sympy.geometry.entity import R2, R3
| bsd-3-clause | Python |
2fadb52dae11ad0910c33f689074f65e8651a005 | add script to sniff for prohibited files | AlanCoding/Ansible-inventory-file-examples,AlanCoding/Ansible-inventory-file-examples | scripts/prohibited/general.py | scripts/prohibited/general.py | #!/usr/bin/env python
import os
import json
import re
errors = list()
my_file = __file__
my_dir = os.path.dirname(my_file)
my_filename = my_file.rsplit(os.path.sep, 1)[1]
# assert that only one tempfile is visible
for tmpdir in ('/tmp', '/var/tmp'):
for files in os.listdir(tmpdir):
matches = [f for f in files if f != my_filename]
if matches:
files = map(lambda f: os.path.join(tmpdir, f), files)
errors.append(("Found temporary files", files))
# assert that no project directories are visible
lib_dir = '/var/lib'
if os.path.isdir(lib_dir):
files = os.listdir(lib_dir)
if files:
errors.append(("Found project directories", files))
# assert that no tower conf files are visible
etc_dir = '/etc'
if os.path.isdir(etc_dir):
files = os.listdir(etc_dir)
if files:
errors.append(("Tower config files", files))
# assert that no tower log files are visible
log_dir = '/var/log'
if os.path.isdir(log_dir):
files = os.listdir(log_dir)
if files:
errors.append(("Tower log files", files))
if errors:
err_str = "The following errors were detected while running a proot-enabled inventory_update.\\n"
for (name, files) in errors:
err_str += "\\n# %s\\n" % name
err_str += " - %s" % "\\n - ".join(files)
raise Exception(err_str)
print json.dumps({})
| mit | Python | |
25561dab146c3bcd8b2ea8f0d7b3dfcf9cf0b8cc | add test with one null status coverage | stifoon/navitia,fueghan/navitia,fueghan/navitia,francois-vincent/navitia,djludo/navitia,thiphariel/navitia,Tisseo/navitia,thiphariel/navitia,Tisseo/navitia,datanel/navitia,antoine-de/navitia,antoine-de/navitia,stifoon/navitia,prhod/navitia,CanalTP/navitia,TeXitoi/navitia,prhod/navitia,is06/navitia,CanalTP/navitia,pbougue/navitia,Tisseo/navitia,lrocheWB/navitia,lrocheWB/navitia,frodrigo/navitia,francois-vincent/navitia,datanel/navitia,kadhikari/navitia,stifoon/navitia,is06/navitia,francois-vincent/navitia,patochectp/navitia,stifoon/navitia,kadhikari/navitia,TeXitoi/navitia,prhod/navitia,patochectp/navitia,kinnou02/navitia,lrocheWB/navitia,patochectp/navitia,ballouche/navitia,ballouche/navitia,antoine-de/navitia,is06/navitia,Tisseo/navitia,is06/navitia,kinnou02/navitia,djludo/navitia,prhod/navitia,frodrigo/navitia,TeXitoi/navitia,VincentCATILLON/navitia,kadhikari/navitia,kinnou02/navitia,xlqian/navitia,patochectp/navitia,frodrigo/navitia,Tisseo/navitia,xlqian/navitia,pbougue/navitia,djludo/navitia,VincentCATILLON/navitia,fueghan/navitia,VincentCATILLON/navitia,xlqian/navitia,VincentCATILLON/navitia,francois-vincent/navitia,fueghan/navitia,CanalTP/navitia,thiphariel/navitia,datanel/navitia,TeXitoi/navitia,antoine-de/navitia,lrocheWB/navitia,xlqian/navitia,datanel/navitia,djludo/navitia,ballouche/navitia,frodrigo/navitia,ballouche/navitia,kadhikari/navitia,kinnou02/navitia,CanalTP/navitia,pbougue/navitia,xlqian/navitia,pbougue/navitia,CanalTP/navitia,thiphariel/navitia | source/jormungandr/tests/coverage_tests.py | source/jormungandr/tests/coverage_tests.py | # Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from tests_mechanism import AbstractTestFixture, dataset
from check_utils import *
import logging
@dataset(["main_routing_test", "null_status_test"])
class TestNullStatus(AbstractTestFixture):
"""
Test with an empty coverage
"""
def null_status_test(self):
response = self.query("/v1/coverage", display=False)
logging.info(response['regions'])
assert('regions' in response)
assert(len(response['regions']) == 1)
assert(response['regions'][0]['id'] == 'main_routing_test')
| agpl-3.0 | Python | |
f64183528d4a44d4ac0f3d97e7847f8895d58039 | implement the receiver | WheatonCS/Lexos,WheatonCS/Lexos,WheatonCS/Lexos | lexos/receivers/rolling_windows_receiver.py | lexos/receivers/rolling_windows_receiver.py | from enum import Enum
from typing import NamedTuple, Optional
from lexos.receivers.base_receiver import BaseReceiver
class RWACountType(Enum):
average = "average"
ratio = "ratio"
class RWATokenType(Enum):
string = "string"
regex = "regex"
word = "word"
class WindowUnitType(Enum):
letter = "letter"
word = "word"
line = "line"
class RWAWindowOptions(NamedTuple):
window_size: int
window_unit: WindowUnitType
class RWATokenOptions(NamedTuple):
token_type: RWATokenType
token: str
secondary_token: str
class RWAFrontEndOptions(NamedTuple):
count_type: RWACountType
token_options: RWATokenOptions
window_options: RWAWindowOptions
milestone: Optional[str]
class RollingWindowsReceiver(BaseReceiver):
def _get_count_type(self) -> RWACountType:
if self._front_end_data['counttype'] == 'ratio':
return RWACountType.ratio
elif self._front_end_data['counttype'] == 'average':
return RWACountType.average
else:
raise ValueError("invalid count type from front end")
def _get_token_options(self) -> RWATokenOptions:
if self._front_end_data['inputtype'] == 'string':
token_type = RWATokenType.string
elif self._front_end_data['inputtype'] == 'regex':
token_type = RWATokenType.regex
elif self._front_end_data['inputtype'] == 'word':
token_type = RWATokenType.word
else:
raise ValueError("invalid token type from front end")
token = self._front_end_data['rollingsearchword']
secondary_token = self._front_end_data['rollingsearchwordopt']
return RWATokenOptions(token_type=token_type, token=token,
secondary_token=secondary_token)
def _get_window_option(self) -> RWAWindowOptions:
if self._front_end_data['windowtype'] == 'letter':
window_unit = WindowUnitType.letter
elif self._front_end_data['windowtype'] == 'word':
window_unit = WindowUnitType.word
elif self._front_end_data['windowtype'] == 'lines':
window_unit = WindowUnitType.line
else:
raise ValueError("invalid window unit from front end")
window_size = self._front_end_data['rollingwindowsize']
return RWAWindowOptions(window_size=window_size,
window_unit=window_unit)
def _get_milestone(self) -> Optional[str]:
if 'rollinghasmilestone' in self._front_end_data:
return None
else:
return self._front_end_data['rollingmilestonetype']
def options_from_front_end(self) -> RWAFrontEndOptions:
return RWAFrontEndOptions(
count_type=self._get_count_type(),
token_options=self._get_token_options(),
window_options=self._get_window_option(),
milestone=self._get_milestone()
)
| mit | Python | |
408a541fc1cdd5198444db7703e0baffda493d82 | Create mp3test.py | MyRobotLab/pyrobotlab,sstocker46/pyrobotlab,sstocker46/pyrobotlab,MyRobotLab/pyrobotlab,mecax/pyrobotlab,MyRobotLab/pyrobotlab,mecax/pyrobotlab,MyRobotLab/pyrobotlab,sstocker46/pyrobotlab,MyRobotLab/pyrobotlab | home/Markus/mp3test.py | home/Markus/mp3test.py | # this is a test script
# i have a folder with the mp3 files named from music1 to music8.
# it random choses the files . no problem
# but i want to change the sleep(120) so the next starts when the previous is finished
from java.lang import String
from org.myrobotlab.service import Speech
from org.myrobotlab.service import Sphinx
from org.myrobotlab.service import Runtime
import random
mouth = Runtime.createAndStart("mouth","Speech")
def play():
for y in range(0, 8):
number = str(random.randint(1, 8))
mouth.audioFile.playFile("C:\Users\Markus\Music\Robynsfavoriter\music" + str(number) + ".mp3", False)
print number
mouth.speak("playing song number" + str(number))
sleep(120)
mouth.audioFile.silence()
play()
| apache-2.0 | Python | |
bda42a4630e8b9e720443b6785ff2e3435bfdfa6 | Add code for getting team schedule and game outcomes | jldbc/pybaseball | pybaseball/team_results.py | pybaseball/team_results.py | import pandas as pd
import requests
from bs4 import BeautifulSoup
# TODO: raise error if year > current year or < first year of a team's existence
# TODO: team validation. return error if team does not exist.
# TODO: sanitize team inputs (force to all caps)
def get_soup(season, team):
# get most recent year's schedule if year not specified
if(season is None):
season = datetime.datetime.today().strftime("%Y")
url = "http://www.baseball-reference.com/teams/{}/{}-schedule-scores.shtml".format(team, season)
s=requests.get(url).content
return BeautifulSoup(s, "html.parser")
def get_table(soup):
table = soup.find_all('table')[0]
data = []
headings = [th.get_text() for th in table.find("tr").find_all("th")]
headings = headings[1:] # the "gm#" heading doesn't have a <td> element
headings[3] = "Home_Away"
data.append(headings)
table_body = table.find('tbody')
rows = table_body.find_all('tr')
for row_index in range(len(rows)-1): #last row is a description of column meanings
row = rows[row_index]
try:
cols = row.find_all('td')
#links = row.find_all('a')
if cols[3].text == "":
cols[3].string = 'Home' # this element only has an entry if it's an away game
if cols[12].text == "":
cols[12].string = "None" # tie games won't have a pitcher win or loss
if cols[13].text == "":
cols[13].string = "None"
if cols[14].text == "":
cols[14].string = "None" # games w/o saves have blank td entry
if cols[8].text == "":
cols[8].string = "9" # entry is blank if no extra innings
cols = [ele.text.strip() for ele in cols]
data.append([ele for ele in cols if ele])
except:
# two cases will break the above: games that haven't happened yet, and BR's redundant mid-table headers
# if future games, grab the scheduling info. Otherwise do nothing.
if len(cols)>1:
cols = [ele.text.strip() for ele in cols][0:5]
data.append([ele for ele in cols if ele])
#convert to pandas dataframe. make first row the table's column names and reindex.
data = pd.DataFrame(data)
data = data.rename(columns=data.iloc[0])
data = data.reindex(data.index.drop(0))
return data
def schedule_and_record(season=None, team=None):
# retrieve html from baseball reference
soup = get_soup(season, team)
table = get_table(soup)
return table
| mit | Python | |
0cf3d1d145c4ec8aa8e5c5917d610ee37776d26f | add positve and negative particle efficiency calculation options | tuos/RpPb2015Analysis,tuos/RpPb2015Analysis,tuos/RpPb2015Analysis,tuos/RpPb2015Analysis | eff/pPb/testppchargeDepEff/run_pp_cfg.py | eff/pPb/testppchargeDepEff/run_pp_cfg.py | import FWCore.ParameterSet.Config as cms
process = cms.Process('TRACKANA')
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_38T_PostLS1_cff')
process.load('Configuration.StandardSequences.ReconstructionHeavyIons_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('TrackingCode.HIRun2015Ana.HITrackCorrectionAnalyzer_cfi')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(10)
)
process.options = cms.untracked.PSet(wantSummary = cms.untracked.bool(True))
process.TFileService = cms.Service("TFileService",
fileName = cms.string('trk.root')
)
process.load("SimTracker.TrackAssociation.trackingParticleRecoTrackAsssociation_cfi")
process.tpRecoAssocGeneralTracks = process.trackingParticleRecoTrackAsssociation.clone()
process.tpRecoAssocGeneralTracks.label_tr = cms.InputTag("generalTracks")
process.load("SimTracker.TrackAssociatorProducers.quickTrackAssociatorByHits_cfi")
process.quickTrackAssociatorByHits.SimToRecoDenominator = cms.string('reco')
process.load("SimTracker.TrackerHitAssociation.clusterTpAssociationProducer_cfi")
# Input source
process.source = cms.Source("PoolSource",
duplicateCheckMode = cms.untracked.string("noDuplicateCheck"),
fileNames = cms.untracked.vstring(
##pythia
#'/store/user/velicanu/MinBias_TuneCUETP8M1_5p02TeV-pythia8/MinBias_TuneCUETP8M1_5p02TeV_pythia8_pp502Fall15_MCRUN2_71_V1_ppreco_RECODEBUG_v0/151117_131129/0000/step3_100.root'
'/store/user/dgulhan/PYTHIA_QCD_TuneCUETP8M1_cfi_GEN_SIM_5020GeV/PYTHIA_QCD80_TuneCUETP8M1_cfi_RECODEBUGpp_757p1_timeslew_HcalRespCorrs_v4_00_mc/151211_111516/0000/step3_1.root'
)
)
### Track cuts ###
# input collections
process.HITrackCorrections.centralitySrc = cms.InputTag("centralityBin","HFtowers")
process.HITrackCorrections.trackSrc = cms.InputTag("generalTracks")
process.HITrackCorrections.vertexSrc = cms.InputTag("offlinePrimaryVertices")
process.HITrackCorrections.qualityString = cms.string("highPurity")
process.HITrackCorrections.pfCandSrc = cms.InputTag("particleFlow")
process.HITrackCorrections.jetSrc = cms.InputTag("ak4CaloJets")
# options
process.HITrackCorrections.useCentrality = False
process.HITrackCorrections.applyTrackCuts = True
process.HITrackCorrections.fillNTuples = False
process.HITrackCorrections.applyVertexZCut = True
process.HITrackCorrections.doVtxReweighting = False
process.HITrackCorrections.doCaloMatched = True
# cut values
process.HITrackCorrections.dxyErrMax = 3.0
process.HITrackCorrections.dzErrMax = 3.0
process.HITrackCorrections.ptErrMax = 0.3 #track trigger 0.1
process.HITrackCorrections.nhitsMin = 0 # track trigger 11
process.HITrackCorrections.chi2nMax = 9999.9 # track trigger 0.15
process.HITrackCorrections.reso = 0.2
#process.HITrackCorrections.crossSection = 1.0 #1.0 is no reweigh
# vertex reweight parameters
#pp 5TeV
process.HITrackCorrections.vtxWeightParameters = cms.vdouble(0.0028637,0.225207,5.61457,0.00296148,0.640783,5.41939)
###
process.HITrackCorrections.algoParameters = cms.vint32(4,5,6,7,8)
## pos, neg, all
process.HITrackCorrections.partileCharge = cms.int32(2)
process.HITrackCorrectionsPos = process.HITrackCorrections.clone()
process.HITrackCorrectionsPos.partileCharge = cms.int32(1)
process.HITrackCorrectionsNeg = process.HITrackCorrections.clone()
process.HITrackCorrectionsNeg.partileCharge = cms.int32(-1)
process.p = cms.Path(
process.tpClusterProducer *
process.quickTrackAssociatorByHits *
process.tpRecoAssocGeneralTracks *
process.HITrackCorrections
)
process.p2 = cms.Path(
process.tpClusterProducer *
process.quickTrackAssociatorByHits *
process.tpRecoAssocGeneralTracks *
process.HITrackCorrectionsPos
)
process.p3 = cms.Path(
process.tpClusterProducer *
process.quickTrackAssociatorByHits *
process.tpRecoAssocGeneralTracks *
process.HITrackCorrectionsNeg
)
| mit | Python | |
30d0ca9fa2c76463569362eb0f640dbbe0079068 | Add builder to compile coffeescript to javascript files | ZzCalvinzZ/ludumdare26,ZzCalvinzZ/ludumdare26 | buildGame.py | buildGame.py | #!/usr/bin/env python
import fnmatch
import os
from subprocess import call
rootPath = 'ludumdare26'
pattern = '*.coffee'
for root, dirs, files in os.walk(rootPath):
for filename in fnmatch.filter(files, pattern):
print( os.path.join(root, filename))
call( [ 'coffee', '-c', os.path.join(root, filename) ] )
| apache-2.0 | Python | |
054cfe232cd7672788e8b2b4e4af44b7cfbe99be | solve 1 problem | Shuailong/Leetcode | solutions/single-number-ii.py | solutions/single-number-ii.py | #!/usr/bin/env python
# encoding: utf-8
"""
single-number-ii.py
Created by Shuailong on 2016-03-23.
https://leetcode.com/problems/single-number-ii/.
"""
class Solution(object):
def singleNumber(self, nums):
"""
O(n) time and O(1) space, but slower than Solution1 and Solution2.
:type nums: List[int]
:rtype: int
"""
MAX_LEN = 32
bit_sums = [0]*32
for i in range(MAX_LEN):
for num in nums:
bit_sums[i] += num >> i & 1
bit_sums = [str(bit_sum % 3) for bit_sum in bit_sums] # bit_nums[:]
bit_sums.reverse()
if bit_sums[0] == '0':
res = int(''.join(bit_sums), 2)
else:
bits = ''.join(['1' if bit == '0' else '0' for bit in bit_sums])
res = -(int(bits, 2) + 1)
return res
from collections import Counter
class Solution2(object):
def singleNumber(self, nums):
"""
Use extra space. 52ms
:type nums: List[int]
:rtype: int
"""
c = Counter(nums)
for k in c:
if c[k] == 1:
return k
class Solution1(object):
def singleNumber(self, nums):
"""
Not linear time. 60ms.
:type nums: List[int]
:rtype: int
"""
nums.sort()
for i in range(0, len(nums)-2, 3):
if nums[i] != nums[i+1]:
return nums[i]
return nums[len(nums)-1]
def main():
solution = Solution()
nums = [1]
print solution.singleNumber(nums)
if __name__ == '__main__':
main()
| mit | Python | |
5e377f926e964f5d6d74ba713947f11b72534b70 | Add namelizer.py to normalize feature names for Events | nickwbarber/HILT-annotations | namelizer.py | namelizer.py | import itertools
import os
from lxml import etree as ET
import gate
conversations_path = "/home/nick/hilt/pes/conversations"
files = list(
itertools.chain.from_iterable(
(
[
os.path.join(
results[0],
x,
)
for x in results[2]
if x.lower().endswith(".xml")
]
for results in os.walk(conversations_path)
)
)
)
annotation_files = (
gate.AnnotationFile(f)
for f in files
)
for f in annotation_files:
for annotation in f.iter_annotations():
if annotation._type.lower() == "event":
for feature in annotation.get_features():
if "positive" in feature.get_name().lower():
feature.set_name("Polarity")
f.tree.write(f.filename)
# print(
# set(
# feature.get_name()
# for annotation_file in annotation_files
# for annotation in annotation_file.iter_annotations()
# for feature in annotation.get_features()
# if annotation._type.lower() == "event"
# )
# )
| mit | Python | |
9a878c96ccf2fb7316dc6e9d330840a78ffd303b | Create second_degree_equation_solver.py | V3sth4cks153/Python-Programs | second_degree_equation_solver.py | second_degree_equation_solver.py | # -*- coding: utf-8 -*-
import sys
import math
print " _____ _ _ _____ _ ___ "
print "| __|___ _ _ ___| |_|_|___ ___ | __|___| |_ _ ___ ___ _ _ |_ | "
print "| __| . | | | .'| _| | . | | |__ | . | | | | -_| _| | | |_ _| |_ "
print "|_____|_ |___|__,|_| |_|___|_|_| |_____|___|_|\_/|___|_| \_/|_|_____|"
print " |_| "
print "\nWelcome in the Equation Resolver by V3sth4cks153. Please give the values for 'a', 'b' and 'c' as follows: ax^2+bx+c = 0.\n"
a = input("Value of 'A': ")
if a != 0:
print "A = %s" % (a)
else:
sys.exit("Invalid value. Please enter only numbers other than zero.")
b = input("Value of 'B': ")
if b != 0:
print "B = %s" % (b)
else:
sys.exit("Invalid value. Please enter only numbers other than zero.")
c = input("Value of 'C': ")
if c != 0:
print "C = %s" % (c)
else:
sys.exit("Invalid value. Please enter only numbers other than zero.")
af = float(a)
bf = float(b)
cf = float(c)
discriminant = (bf * bf) - 4 * (af * cf)
if discriminant >= 0:
print "\nThe discriminant is equal to: %s.\n" % (discriminant)
else:
sys.exit("The equation is unresolvable. The discriminant is negative.")
x1 = (-bf - math.sqrt(discriminant) ) / (2 * af)
x2 = (-bf + math.sqrt(discriminant) ) / (2 * af)
x3 = (-bf) / (2 * af)
if discriminant == 0:
print "Sole solution of the equation: %s" % (x3)
else:
print "Solutions: (%s; %s) \n\nThank you for using the Equation Solver by V3sth4cks153 !" % (x1, x2)
| mit | Python | |
2c223297fa64acc853994d6970d422253ce033bd | fix http3 channel | hansroh/skitai,hansroh/skitai,hansroh/skitai | skitai/http3_server.py | skitai/http3_server.py | #!/usr/bin/env python
from . import http_server
from .counter import counter
import socket, time
from rs4 import asyncore
import ssl
from skitai import lifetime
import os, sys, errno
import skitai
from errno import EWOULDBLOCK
class http3_channel (https_server.https_channel):
def __init__ (self, server, data, addr):
http_server.http_channel.__init__(self, server, None, addr)
self.create_socket (socket.AF_INET, socket.SOCK_DGRAM)
self.connect (addr)
class http3_server (https_server.https_server):
def create_socket (self, stack):
super ().create_socket (stack, socket.SOCK_DGRAM)
def _serve (self, shutdown_phase = 2):
self.shutdown_phase = shutdown_phase
def handle_connect(self):
pass
def handle_read (self):
ret = self.recv (self.buffer_size)
if not ret:
return
data, addr = ret
if data:
http3_channel (self, data, addr)
def recv (self, buffer_size):
try:
return self.socket.recvfrom (buffer_size)
except MemoryError:
lifetime.shutdown (1, 1.0)
except ssl.SSLError as why:
if why.errno == ssl.SSL_ERROR_WANT_READ:
try:
raise BlockingIOError
except NameError:
raise socket.error (EWOULDBLOCK)
# closed connection
elif why.errno in (ssl.SSL_ERROR_ZERO_RETURN, ssl.SSL_ERROR_EOF):
self.handle_close ()
return None
else:
raise
| mit | Python | |
4f865a4249ea3e5b349f2ab57d8b50c5582bf848 | add git script | probml/pyprobml,probml/pyprobml,probml/pyprobml,probml/pyprobml | scripts/git_colab.py | scripts/git_colab.py | # test script
def git_command(cmd):
print('git command', cmd)
| mit | Python | |
54c22e1f6cb129d24f4b611cfef00ef66ecff356 | Create API for batch | ikinsella/squall,ikinsella/squall,ikinsella/squall,ikinsella/squall | flaskapp/appname/controllers/batchAPI.py | flaskapp/appname/controllers/batchAPI.py | from flask import Flask, jsonify, abort, request, session, Blueprint
from flask_restful import Resource, Api
from appname.models import (db, Batch)
from flask.ext.restless import APIManager
from flask.ext.sqlalchemy import SQLAlchemy
batchAPI = Blueprint('batchAPI', __name__)
@batchAPI.route('/addBatch', methods = ['POST'])
def add_batch():
if not request.json or not 'name' in request.json:
abort(400)
b = Batch(request.json["name"], request.json["description"])
db.session.add(b)
db.session.commit()
return jsonify( {'batch': b.serialize } ), 201
@batchAPI.route('/getBatch', methods = ['GET'])
def getBatch():
return jsonify( json_list = [bt.serialize for bt in Batch.query.all()] )
| bsd-3-clause | Python | |
7bcbd81ece25f5967248c650ee61e85e52c67203 | Add ComparisonOperator to enums | suutari/shoop,suutari/shoop,suutari-ai/shoop,suutari-ai/shoop,suutari-ai/shoop,shoopio/shoop,shoopio/shoop,shoopio/shoop,suutari/shoop | shuup/utils/enums.py | shuup/utils/enums.py | # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from django.utils.translation import ugettext_lazy as _
from enumfields import Enum
class ComparisonOperator(Enum):
EQUALS = 0
GTE = 1
LTE = 2
class Labels:
EQUALS = _('Exactly')
GTE = _('Greater than or equal to')
LTE = _("Lower than or equal to")
| agpl-3.0 | Python | |
c58de5a29fb324b9fcd661319e4b6df9b5f189c0 | fix an error in appeaser.py | ranjinidas/Axelrod,marcharper/Axelrod,ranjinidas/Axelrod,marcharper/Axelrod | axelrod/strategies/appeaser.py | axelrod/strategies/appeaser.py | from axelrod import Player
class Appeaser(Player):
"""
A player who tries to guess what the opponent wants, switching his
behaviour every time the opponent plays 'D'.
"""
def strategy(self, opponent):
"""
Start with 'C', switch between 'C' and 'D' when opponent plays 'D'.
"""
if len(self.history) == 0:
self.str = 'C'
if len(opponent.history) > 0:
if opponent.history[-1] == 'D':
if self.str == 'C':
self.str = 'D'
else:
self.str = 'C'
return self.str
def __repr__(self):
"""
The string method for the strategy:
"""
return 'Appeaser'
| from axelrod import Player
class Appeaser(Player):
"""
A player who tries to guess what the opponent wants, switching his
behaviour every time the opponent plays 'D'.
"""
def strategy(self, opponent):
"""
Start with 'C', switch between 'C' and 'D' when opponent plays 'D'.
"""
if len(self.history) == 0:
self.str = 'C'
if opponent.history[-1] == 'D':
if self.str == 'C':
self.str = 'D'
else:
self.str = 'C'
return self.str
def __repr__(self):
"""
The string method for the strategy:
"""
return 'Appeaser'
| mit | Python |
86041833c9db3a7b6aa764a6bd449be563c2ede8 | Add mocked tests | demisto/demisto-py,demisto/demisto-py | mock_tests.py | mock_tests.py | from urllib3_mock import Responses
import demisto_client
responses = Responses('requests.packages.urllib3')
api_key = 'sample_api_key'
host = 'http://localhost:8080'
@responses.activate
def test_get_docker_images():
body = '{ "images": [{"id": "aae7b8aaba8c", "repository": ' \
'"openapitools/openapi-generator-cli", "tag": "latest", "createdSince": "3 days ago", ' \
'"createdAt": "2019-08-19 13:34:22 +0300 IDT", "size": "131MB" }]}'
responses.add('GET', '/settings/docker-images',
body=body,
status=200,
content_type='application/json')
# create an instance of the API class
api_instance = demisto_client.configure(hostname=host, api_key=api_key)
api_response = api_instance.get_docker_images()
assert api_response.images[0].created_at == '2019-08-19 13:34:22 +0300 IDT'
assert api_response.images[0].id == 'aae7b8aaba8c'
assert api_response.images[0].repository == 'openapitools/openapi-generator-cli'
assert len(responses.calls) == 1
assert responses.calls[0].request.url == '/settings/docker-images'
assert responses.calls[0].request.host == 'localhost'
assert responses.calls[0].request.scheme == 'http'
@responses.activate
def test_create_incident():
body = '{"name":"Test Incident","owner":"Admin","parent":"","phase":"",' \
'"playbookId":"playbook0","playbook_id":"playbook0","rawCategory":"",' \
'"rawCloseReason":"","rawJSON":"","rawName":"Test Incident","rawPhase":"",' \
'"rawType":"Unclassified","raw_category":"","raw_close_reason":"","raw_json":"",' \
'"raw_name":"Test Incident","raw_phase":"","raw_type":"Unclassified","reason":"",' \
'"runStatus":"","run_status":"","severity":0,"sourceBrand":"Manual",' \
'"sourceInstance":"admin","source_brand":"Manual","source_instance":"admin",' \
'"status":0,"type":"Unclassified","version":1}'
responses.add('POST', '/incident',
body=body,
status=200,
content_type='application/json')
# create an instance of the API class
api_instance = demisto_client.configure(hostname=host, api_key=api_key)
create_incident_request = demisto_client.demisto_api.CreateIncidentRequest()
create_incident_request.name = 'Test Incident'
create_incident_request.type = 'Unclassified'
create_incident_request.owner = 'Admin'
api_response = api_instance.create_incident(create_incident_request=create_incident_request)
print(api_response)
assert api_response.name == 'Test Incident'
assert api_response.type == 'Unclassified'
assert api_response.owner == 'Admin'
assert len(responses.calls) == 1
assert responses.calls[0].request.url == '/incident'
assert responses.calls[0].request.host == 'localhost'
assert responses.calls[0].request.scheme == 'http'
| apache-2.0 | Python | |
1ddadaab8086569fe4f13612710d8a47442a4bfd | Read data in pickles | alfredolainez/deep-nlp | data_handling.py | data_handling.py | import json
import pickle
import random
DEFAULT_REVIEWS_FILE = "data/yelp_academic_dataset_review.json"
DEFAULT_REVIEWS_PICKLE = "data/reviews.pickle"
def pickles_from_json(json_file=DEFAULT_REVIEWS_FILE, pickle_name=DEFAULT_REVIEWS_PICKLE, num_partitions=100):
"""
Dumps a json into a number of pickle partitions, which contain a list of python objects.
"""
print "Reading json file..."
object = []
with open(json_file) as json_data:
for line in json_data:
object.append(json.loads(line))
print "Shuffling resulting python objects"
random.shuffle(object)
length_partition = len(object)/num_partitions
remaining_to_process = len(object)
current_partition = 1
while remaining_to_process > 0:
print 'Working on partition {} of {}'.format(current_partition, num_partitions)
# All the remaining elements go to the last partition
if current_partition == num_partitions:
stop = None
num_in_partition = remaining_to_process
else:
stop = -remaining_to_process + length_partition
num_in_partition = length_partition
pickle.dump(object[-remaining_to_process:stop],
open(pickle_name + '.' + str(current_partition), "wb"),
pickle.HIGHEST_PROTOCOL)
current_partition += 1
remaining_to_process -= num_in_partition
def load_partitions(partition_list, pickle_base_name=DEFAULT_REVIEWS_PICKLE + '.'):
"""
Returns a python object being a list of dictionaries.
It reads the data from a sequence of files starting with the given base name. For instance:
partition_list = [2,4,6], pickle_base_name = "pickle." will read files pickle.2, pickle.4, pickle.6
"""
num_partition = 1
result = []
for partition in partition_list:
print 'Reading partition %d of %d' % (num_partition, len(partition_list))
with open(pickle_base_name + str(partition)) as file:
loaded_element = pickle.load(file)
result.extend(loaded_element)
num_partition += 1
print "Read a total of %d partitions for a total of %d objects" % (num_partition - 1, len(result))
return result
lol = load_partitions(range(1,101))
| apache-2.0 | Python | |
d26cc02d4ea3f625eee8deed67a42555245b977f | Implement KeOps MaternKernel | jrg365/gpytorch,jrg365/gpytorch,jrg365/gpytorch | gpytorch/kernels/keops/matern_kernel.py | gpytorch/kernels/keops/matern_kernel.py | #!/usr/bin/env python3
import torch
import math
from pykeops.torch import LazyTensor as KEOLazyTensor
from ..kernel import Kernel
from gpytorch.lazy import KeOpsLazyTensor
class MaternKernel(Kernel):
def __init__(self, nu=2.5, **kwargs):
if nu not in {0.5, 1.5, 2.5}:
raise RuntimeError("nu expected to be 0.5, 1.5, or 2.5")
super(MaternKernel, self).__init__(has_lengthscale=True, **kwargs)
self.nu = nu
def covar_func(self, x1, x2, diag=False):
# TODO: x1 / x2 size checks are a work around for a very minor bug in KeOps.
# This bug is fixed on KeOps master, and we'll remove that part of the check
# when they cut a new release.
if diag or x1.size(-2) == 1 or x2.size(-2) == 1:
distance = self.covar_dist(x1, x2, diag=diag)
exp_component = torch.exp(-math.sqrt(self.nu * 2) * distance)
if self.nu == 0.5:
constant_component = 1
elif self.nu == 1.5:
constant_component = (math.sqrt(3) * distance).add(1)
elif self.nu == 2.5:
constant_component = (math.sqrt(5) * distance).add(1).add(5.0 / 3.0 * distance ** 2)
return constant_component * exp_component
else:
with torch.autograd.enable_grad():
x1_ = KEOLazyTensor(x1[:, None, :])
x2_ = KEOLazyTensor(x2[None, :, :])
distance = ((x1_ - x2_) ** 2).sum(-1).sqrt()
exp_component = (-math.sqrt(self.nu * 2) * distance).exp()
if self.nu == 0.5:
constant_component = 1
elif self.nu == 1.5:
constant_component = (math.sqrt(3) * distance) + 1
elif self.nu == 2.5:
constant_component = (math.sqrt(5) * distance) + (1 + 5.0 / 3.0 * distance ** 2)
return constant_component * exp_component
def forward(self, x1, x2, diag=False, **params):
mean = x1.contiguous().view(-1, x1.size(-1)).mean(0)[(None,) * (x1.dim() - 1)]
x1_ = (x1 - mean).div(self.lengthscale)
x2_ = (x2 - mean).div(self.lengthscale)
if diag:
return self.covar_func(x1_, x2_, diag=True)
covar_func = lambda x1, x2, diag=False: self.covar_func(x1, x2, diag)
return KeOpsLazyTensor(x1_, x2_, covar_func)
| mit | Python | |
e6a7cafd95abb80b3bd235fd391c34685b4b5603 | Add preprocessor for Leipzig Corpus | KshitijKarthick/tvecs,KshitijKarthick/tvecs,KshitijKarthick/tvecs | modules/preprocessor/leipzig_preprocessor.py | modules/preprocessor/leipzig_preprocessor.py | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
"""Leipzig Preprocessor which inherits from BasePreprocessor."""
import os
import codecs
import regex as re
from collections import defaultdict
from base_preprocessor import BasePreprocessor
class LeipzigPreprocessor(BasePreprocessor):
"""Leipzig Preprocessor which preprocesses the Leipzig-Corpus."""
def __init__(
self,
corpus_fname,
corpus_dir_path='.',
encoding='utf-8',
need_preprocessing=False,
language=None,
limit=None
):
"""Constructor which initializes the BasePreprocessor constructor."""
self.language = language
# If language is not specified, regex pattern for split is default ''
self.lang_split_sent = defaultdict(lambda : u'')
# Specify language specific split regex pattern
lang_split_sent = [
('hindi', u'[।]'),
]
# Store language specific regex pattern in the defaultdict
for k,v in lang_split_sent:
self.lang_split_sent[k] = v
# < -- call function to preprocess leipzig corpus -- >
self.leipzig_corpus_preprocess(corpus_fname, corpus_dir_path, encoding)
# < -- call BasePreprocessor Constructor -- >
super(LeipzigPreprocessor, self).__init__(
corpus_fname,
corpus_dir_path=corpus_dir_path,
encoding=encoding,
need_preprocessing=False,
limit=limit
)
def leipzig_corpus_preprocess(self, corpus_fname, corpus_dir_path, encoding):
"""
Extract valid content from the Corpus
Store extracted corpus data in corpus_fname.preprocessed
"""
with codecs.open(
os.path.join(
corpus_dir_path, corpus_fname
), 'r', encoding='utf-8') as file:
line_split_list = file.read().split("\n")
tab_split_list = [line.split('\t')[1] for line in line_split_list]
extracted_corpus = "\n".join(tab_split_list)
with codecs.open(
os.path.join(
corpus_dir_path, '%s.preprocessed' % (corpus_fname)
), 'w', encoding='utf-8'
) as extracted_corpus_file:
extracted_corpus_file.write(extracted_corpus)
def _extract_corpus_data(self, data):
"""
Function not utilised for Leipzig Corpus
Executed only if need_preprocessing is set to True
"""
raise NotImplementedError(
"Base Class _extract_corpus_data() not implemented"
)
def _clean_word(self, word):
"""
Preprocess words after tokenizing words from sentences.
* Remove apostrophes ['s, s'].
* Bring to lowercase.
* Remove punctuations.
"""
return re.sub(
pattern=ur"((\p{P}+)|(\p{S}+))",
repl='',
string=word.lower()
).strip()
def _tokenize_sentences(self, data):
"""
Function to tokenize corpus data into sentences.
Function not utilised for Leipzig Corpus
"""
raise NotImplementedError(
"Base Class _tokenize_sentences() not implemented"
)
def _tokenize_words(self, sentence):
"""Tokenize Words from sentences."""
return sentence.split()
BasePreprocessor.register(LeipzigPreprocessor)
| mit | Python | |
c724e6f825c801c864c4498f6b35f2b0bb041b47 | add item shop | ivoryhuang/LOL_simple_text_version | games/Item_Shop.py | games/Item_Shop.py | from items import *
class Item_Shop():
def __init__(self):
self.items = []
def prepare_items(self):
pass | mit | Python | |
28d07102411cbd019bdac97ce6e1251af3bffd35 | Add migration for Image | ic-labs/django-icekit,ic-labs/django-icekit,ic-labs/django-icekit,ic-labs/django-icekit | icekit/migrations/0006_image_imageitem.py | icekit/migrations/0006_image_imageitem.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('fluent_contents', '0001_initial'),
('icekit', '0005_remove_layout_key'),
]
operations = [
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('image', models.ImageField(upload_to=b'uploads/images/', verbose_name='Image field')),
('alt_text', models.CharField(help_text="A description of the image for users who don't see images.", max_length=255)),
('title', models.CharField(help_text='The title is shown in the caption.', max_length=255, blank=True)),
('caption', models.TextField(blank=True)),
('is_active', models.BooleanField(default=True)),
('admin_notes', models.TextField(help_text='Internal notes for administrators only.', blank=True)),
('categories', models.ManyToManyField(related_name='icekit_image_related', to='icekit.MediaCategory', blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ImageItem',
fields=[
('contentitem_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='fluent_contents.ContentItem')),
('image', models.ForeignKey(help_text='An image from the image library.', to='icekit.Image')),
],
options={
'db_table': 'contentitem_icekit_imageitem',
'verbose_name': 'Image',
'verbose_name_plural': 'Images',
},
bases=('fluent_contents.contentitem',),
),
]
| mit | Python | |
8f3b0928a7d12da534d18e14d29f504eb432de97 | Create beta_password_generator.py | Orange9000/Codewars,Orange9000/Codewars | Solutions/beta/beta_password_generator.py | Solutions/beta/beta_password_generator.py | from random import randint
import re
ABC = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ012345678901234567890123456789"
def password_gen():
password = ''.join(ABC[randint(0,81)] for i in range(0, randint(6, 20)))
return password if re.fullmatch('(?=.*[a-z])(?=.*[A-Z])(?=.*[0-9])[a-zA-Z0-9]{6,20}', password) else password_gen()
| mit | Python | |
47bb7c90b657f4f9704e354b7fc03cb126b24d75 | FIx git mv fail | tamasgal/km3pipe,tamasgal/km3pipe | examples/stats/guess_the_dist.py | examples/stats/guess_the_dist.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Guess the distribution!
=======================
Fit several distributions to angular residuals.
Note: to fit the landau distribution, you need to have ROOT and the
``rootpy`` package installed.
"""
import h5py
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
from statsmodels.distributions.empirical_distribution import ECDF
from statsmodels.nonparametric.kde import KDEUnivariate
try:
import ROOT
import rootpy.plotting
HAS_ROOT = True
except ImportError:
HAS_ROOT = False
from km3pipe.stats import bootstrap_fit
import km3pipe.style.moritz # noqa
##################################################################
DATA_FILE = '../data/residuals.h5'
with h5py.File(DATA_FILE) as h5:
resid = h5['/residuals'][:]
##################################################################
# Fit somedistributions, and obtain the confidence intervals on the
# distribution parameters through bootstrapping.
n_bs = 5
q = 95
ln_par, ln_lo, ln_up = bootstrap_fit(stats.lognorm, resid, n_iter=n_bs, quant=q)
hc_par, hc_lo, hc_up = bootstrap_fit(stats.halfcauchy, resid, n_iter=n_bs, quant=q)
gam_par, gam_lo, gam_up = bootstrap_fit(stats.gamma, resid, n_iter=n_bs, quant=q)
##################################################################
hc = stats.halfcauchy(*stats.halfcauchy.fit(resid))
lg = stats.lognorm(*stats.lognorm.fit(resid))
dens = KDEUnivariate(resid)
dens.fit()
ecdf = ECDF(resid)
##################################################################
# prepare X axes for plotting
ex = ecdf.x
x = np.linspace(min(resid), max(resid), 2000)
##################################################################
# Fit a Landau distribution with ROOT
if HAS_ROOT:
root_hist = rootpy.plotting.Hist(100, 0, np.pi)
root_hist.fill_array(resid)
root_hist /= root_hist.Integral()
land_f = ROOT.TF1('land_f', "TMath::Landau(x, [0], [1], 0)")
fr = root_hist.fit('land_f', "S").Get()
try:
p = fr.GetParams()
land = np.array([ROOT.TMath.Landau(xi, p[0], p[1], True) for xi in x])
land_cdf = np.array([ROOT.ROOT.Math.landau_cdf(k, p[0], p[1]) for k in ex])
except AttributeError:
# wtf this fails sometimes, idk, works on root6
HAS_ROOT = False
##################################################################
# ... and plot everything.
fig, axes = plt.subplots(ncols=2, nrows=2, figsize=(6 * 2, 4 * 2))
axes[0, 0].hist(resid, bins='auto', normed=True)
axes[0, 0].plot(x, lg.pdf(x), label='Log Norm')
axes[0, 0].plot(x, hc.pdf(x), label='Half Cauchy')
if HAS_ROOT:
axes[0, 0].plot(x, land, label='Landau', color='blue')
axes[0, 0].plot(x, dens.evaluate(x), label='KDE')
axes[0, 0].set_xlabel('x')
axes[0, 0].set_xlim(0, 0.3)
axes[0, 0].set_ylabel('PDF(x)')
axes[0, 0].legend()
axes[0, 1].hist(resid, bins='auto', normed=True)
axes[0, 1].plot(x, lg.pdf(x), label='Log Norm')
axes[0, 1].plot(x, hc.pdf(x), label='Half Cauchy')
if HAS_ROOT:
axes[0, 1].plot(x, land, label='Landau', color='blue')
axes[0, 1].plot(x, dens.evaluate(x), label='KDE')
axes[0, 1].set_xlabel('x')
axes[0, 1].set_ylabel('PDF(x)')
axes[0, 1].set_yscale('log')
axes[0, 1].legend()
axes[1, 0].plot(ex, 1 - lg.cdf(ex), label='Log Norm')
if HAS_ROOT:
axes[1, 0].plot(ex, 1 - land_cdf, label='Landau', color='blue')
axes[1, 0].plot(ex, 1 - hc.cdf(ex), label='Half Cauchy')
axes[1, 0].plot(ex, 1 - ecdf.y, label='Empirical CDF', linewidth=3, linestyle='--')
axes[1, 0].set_xscale('log')
axes[1, 0].set_xlabel('x')
axes[1, 0].set_ylabel('1 - CDF(x)')
axes[1, 0].legend()
axes[1, 1].loglog(ex, 1 - lg.cdf(ex), label='Log Norm')
if HAS_ROOT:
axes[1, 1].loglog(ex, 1 - land_cdf, label='Landau', color='blue')
axes[1, 1].loglog(ex, 1 - hc.cdf(ex), label='Half Cauchy')
axes[1, 1].loglog(ex, 1 - ecdf.y, label='Empirical CDF', linewidth=3, linestyle='--')
axes[1, 1].set_xlabel('x')
axes[1, 1].set_ylabel('1 - CDF(x)')
axes[1, 1].legend()
| mit | Python | |
df03481fd9b52e17bc637dacefd15bead4f07f23 | Add initial version of membership fee updater | HelsinkiHacklab/asylum,HelsinkiHacklab/asylum,HelsinkiHacklab/asylum,HelsinkiHacklab/asylum | project/creditor/management/commands/update_membershipfees.py | project/creditor/management/commands/update_membershipfees.py | # -*- coding: utf-8 -*-
import datetime
import dateutil.parser
from creditor.models import RecurringTransaction, TransactionTag
from creditor.tests.fixtures.recurring import MembershipfeeFactory
from django.core.management.base import BaseCommand, CommandError
from members.models import Member
class Command(BaseCommand):
help = 'Update membership fee RecurringTransactions'
def add_arguments(self, parser):
parser.add_argument('oldamount', type=int)
parser.add_argument('cutoffdate', type=str)
parser.add_argument('newamount', type=int)
def handle(self, *args, **options):
cutoff_dt = dateutil.parser.parse(options['cutoffdate'])
end_dt = cutoff_dt - datetime.timedelta(minutes=1)
tgt_tag = TransactionTag.objects.get(label='Membership fee', tmatch='1')
for rt in RecurringTransaction.objects.filter(
rtype=RecurringTransaction.YEARLY,
tag=tgt_tag,
end=None,
start__lt=cutoff_dt,
amount=options['oldamount']
):
rt.end = end_dt
rt.save()
newrt = MembershipfeeFactory.create(amount=options['newamount'], start=cutoff_dt, end=None, owner=rt.owner)
if options['verbosity'] > 0:
print("Generated RecurringTransaction %s" % newrt)
| mit | Python | |
d38007954c927e38112c5500ba6f8328f8d4a66d | add command `manage.py drop [user_id]` | perGENIE/pergenie,perGENIE/pergenie,knmkr/perGENIE,knmkr/perGENIE,perGENIE/pergenie-web,knmkr/perGENIE,perGENIE/pergenie-web,knmkr/perGENIE,perGENIE/pergenie-web,perGENIE/pergenie-web,perGENIE/pergenie,perGENIE/pergenie,knmkr/perGENIE,perGENIE/pergenie-web,perGENIE/pergenie-web,perGENIE/pergenie,knmkr/perGENIE | pergenie/apps/db/management/commands/drop.py | pergenie/apps/db/management/commands/drop.py | # -*- coding: utf-8 -*-
import sys, os
import glob
import shutil
from pprint import pprint
from optparse import make_option
from pymongo import MongoClient
# from termcolor import colored
from django.core.management.base import BaseCommand
from django.conf import settings
from lib.api.genomes import Genomes
genomes = Genomes()
from lib.utils import clogging
log = clogging.getColorLogger(__name__)
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option(
"--variants",
action='store_true',
help=""
),
make_option(
"--reports",
action='store_true',
help=""
),
make_option(
"--user",
action='store_true',
help=""
),
)
def handle(self, *args, **options):
if not args:
self.print_help("drop", "help")
return
log.info('Drop collections...')
with MongoClient(host=settings.MONGO_URI) as c:
db = c['pergenie']
data_info = db['data_info']
if options["variants"]:
# Drop collection `variants.file_uuid`
targets = []
for user_id in args:
targets += genomes.get_all_variants(user_id)
pprint(targets)
print '...will be deleted'
yn = raw_input('y/n > ')
if yn == 'y':
for target in targets:
db.drop_collection(target)
# Drop document in `data_info`
targets = []
for user_id in args:
targets += list(data_info.find({'user_id': user_id}))
pprint(targets)
print '...will be deleted'
yn = raw_input('y/n > ')
if yn == 'y':
for target in targets:
data_info.remove(target)
if options["reports"]:
print 'sorry, not implemented yet...'
return
if options["user"]:
print 'sorry, not implemented yet...'
return
# Remove record in Django Database
# TODO:
# Drop document in `user_info`
# TODO:
# rm `dir`
targets = []
for user_id in args:
targets += glob.glob(os.path.join(settings.UPLOAD_DIR, user_id)) # FIXME: use UUID ?
pprint(targets)
print '...will be deleted'
yn = raw_input('y/n > ')
if yn == 'y':
for target in targets:
shutil.rmtree(target) # rm -r <dir>
| agpl-3.0 | Python | |
4e8ff1f7e524d8dc843816c714e86d11e21a8562 | Add script to upload files to carto db | alejandro-mc/BDM-DDD,alejandro-mc/BDM-DDD | uploadtoCDB.py | uploadtoCDB.py | #uploadtoCDB.py
#Written By: Alejandro Morejon Cortina (Apr 2016)
#usage:python uploadtoCDB.py <username> <filecontainingkey.txt> <upload.csv>
import sys
import requests
import csv
import json
import time
#sys.argv[1] is the your cartodb user name
#sys.argv[2] is the text file containing your api key
#sys.argv[3] is the csv file to be uploaded
if __name__ == "__main__":
with open(sys.argv[2],'r') as fi:
key = fi.readline().strip('\n')
cdbusername = sys.argv[1]
#import url to cartodb account
importurl = "https://" + cdbusername +".cartodb.com/api/v1/imports/?api_key="+key
f = open(sys.argv[3],"rb")
#request to upload file to cartodb
r = requests.post(importurl,files={'file': f})
print r.text
f.close()
response = json.loads(r.text)
checkimporturl = "https://"+ cdbusername +".cartodb.com/api/v1/imports/"
status = requests.get(checkimporturl + response["item_queue_id"] + "?api_key=" + key)
#wait for upload to finish
while not json.loads(status.text)["state"] in ["complete","failure"]:
status = requests.get(checkimporturl + response["item_queue_id"] + "?api_key=" + key)
time.sleep(1)
| mit | Python | |
701d8ef1c4d97d7c58adca892992cc38bee321d5 | Update app/helpers/marshmallow/__init__.py | apipanda/openssl,apipanda/openssl,apipanda/openssl,apipanda/openssl | app/helpers/marshmallow/__init__.py | app/helpers/marshmallow/__init__.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from .schema import (
TableSchemaOpts,
ModelSchemaOpts,
TableSchema,
ModelSchema,
)
from .convert import (
ModelConverter,
fields_for_model,
property2field,
column2field,
field_for,
)
from .exceptions import ModelConversionError
__all__ = [
'TableSchema',
'ModelSchema',
'TableSchemaOpts',
'ModelSchemaOpts',
'ModelConverter',
'fields_for_model',
'property2field',
'column2field',
'ModelConversionError',
'field_for',
]
| mit | Python | |
fa88245727ec8dbf5ed341bb9d5d0816a33e9415 | Create Calendar.py | kejrp23/Python | Calendar.py | Calendar.py | """
Created on 3/26/27
Created for Codeacademy Training exersize.
Written by: Jason R. Pittman
Note: All comments are my own for information purpose and future building ideas.
Note: They reflect my ideas and my ideas alone
This is a calendar program for Command Line.
The program should behave in the following way:
Print a welcome message to the user
Prompt the user to view, add, update, or delete an event on the calendar
Depending on the user's input: view, add, update, or delete an event on the calendar
The program should never terminate unless the user decides to exit
"""
#This is the main structural requirements for the Calendar
from time import sleep,strftime #save time and import both at once
MY_FIRST_NAME = "Jason" #at some point let the user do this through raw_input()
calendar = {} #dictionaries use values and pairs so we will use Dates and events
#Above here we have the time elements a Consant name to work with and a dictionary
#ok from here on is the Calendar
def welcome():#This is the Splash screen or Welcome area for getting started.
#This is a possible line for adding a raw_input for the users name
print "Welcome " + MY_FIRST_NAME + "."
print "Opening your Calendar one moment please"
sleep(1)
print "Today's date is: " + strftime("%A %B %d,%Y")
print "The Current time is " + strftime("%I:%M:%S")
print "What would you like to do? "
#time to set how the calendar runs until exit
def start_calendar():
welcome()
start = True
while start:#keeps the program running unless exited by choice
user_choice = raw_input('A to Add an Event, U to Update and Existing Event, V to View an Event, D to Delete an Event, X to Exit the program: ')#This can probably be cleaned up to make more sense and take up less space. it's currently so long it's on two lines
user_choice = user_choice.upper()
#check these indentions i don't think they are right
if user_choice == 'V':
if len(calender.keys()) < 1:
print " The Calendar is Empty right now..."
start_calendar()
| artistic-2.0 | Python | |
45d097a2ac0bcc1dffaa3e13a7bfe0a7af266fd9 | Create crawl-twitter.py | JosPolfliet/snippets | crawl-twitter.py | crawl-twitter.py |
# coding: utf-8
# In[18]:
import csv, codecs, cStringIO
class UnicodeWriter:
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = cStringIO.StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, row):
self.writer.writerow([s.encode("utf-8") for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
for row in rows:
self.writerow(row)
# In[38]:
import tweepy
import csv
consumer_key = ''
consumer_secret = ''
access_token_key = ''
access_token_secret = ''
# Bounding boxes for geolocations
# Online-Tool to create boxes (c+p as raw CSV): http://boundingbox.klokantech.com/
GEOBOX_WORLD = [-180,-90,180,90]
GEOBOX_GERMANY = [5.0770049095, 47.2982950435, 15.0403900146, 54.9039819757]
GEOBOX_BELGIUM = [2.5214, 49.4753, 6.3776, 51.5087]
GEOCIRCLE_BELGIUM="50.56928286558243,4.7021484375,125km"
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token_key, access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
with open('tweets{}.csv'.format(time.strftime("%Y%m%d%H%M%S")), 'wb') as csvFile:
csvWriter = UnicodeWriter(csvFile)
csvWriter.writerow(["tweet_id","created_at","text","user_name",
"user_id",'user_screen_name','user_followers_count',
"favorite_count", "retweet_count", "is_quote_status", 'geo', 'lang'])
for tweet in tweepy.Cursor(api.search,q="*",geocode=GEOCIRCLE_BELGIUM).items(10):
csvWriter.writerow([tweet.id_str, str(tweet.created_at),
tweet.text, #.encode("utf-8"),
tweet.user.name,
str(tweet.user.id),
tweet.user.screen_name,
str(tweet.user.followers_count),
str(tweet.favorite_count),
str(tweet.retweet_count),
str(tweet.is_quote_status),
str(tweet.geo),
tweet.lang])
| mit | Python | |
bd312a1fd7a9633316b03d1473de84979120ae5f | Add sentiment generator | reinarduswindy/rojak,CodeRiderz/rojak,pyk/rojak,reinarduswindy/rojak,reinarduswindy/rojak,bobbypriambodo/rojak,CodeRiderz/rojak,bobbypriambodo/rojak,bobbypriambodo/rojak,rawgni/rojak,rawgni/rojak,bobbypriambodo/rojak,reinarduswindy/rojak,CodeRiderz/rojak,CodeRiderz/rojak,bobbypriambodo/rojak,bobbypriambodo/rojak,pyk/rojak,rawgni/rojak,CodeRiderz/rojak,pyk/rojak,pyk/rojak,CodeRiderz/rojak,rawgni/rojak,CodeRiderz/rojak,pyk/rojak,reinarduswindy/rojak,reinarduswindy/rojak,rawgni/rojak,pyk/rojak,bobbypriambodo/rojak,pyk/rojak,rawgni/rojak,rawgni/rojak,reinarduswindy/rojak | rojak-database/generate_sentiment_data.py | rojak-database/generate_sentiment_data.py | import MySQLdb as mysql
from faker import Factory
# Open database connection
db = mysql.connect('localhost', 'root', 'rojak', 'rojak_database')
# Create new db cursor
cursor = db.cursor()
sql = '''
INSERT INTO `sentiment`(`name`)
VALUES ('{}');
'''
MAX_SENTIMENT=10
for i in xrange(MAX_SENTIMENT):
# Generate random data for the sentiment
sentiment_name = 'sentiment_{}'.format(i + 1)
# Parse the SQL command
insert_sql = sql.format(sentiment_name)
# insert to the database
try:
cursor.execute(insert_sql)
db.commit()
except mysql.Error as err:
print("Something went wrong: {}".format(err))
db.rollback()
# Close the DB connection
db.close()
| bsd-3-clause | Python | |
46de363a67bc1bf7bd403625618bfd4b319c230b | Add dein/log source | Shougo/dein.vim,Shougo/dein.vim,Shougo/dein.vim | rplugin/python3/denite/source/dein_log.py | rplugin/python3/denite/source/dein_log.py | # ============================================================================
# FILE: dein.py
# AUTHOR: delphinus <delphinus@remora.cx>
# License: MIT license
# ============================================================================
import re
from denite.source.base import Base
HEADER_RE = re.compile(r'^\s*[a-zA-Z_]\w*://')
SPACE_RE = re.compile(r'^\s+')
DEIN_LOG_SYNTAX_HIGHLIGHT = [
{'name': 'Progress', 're': r'\[[ =]\+\]', 'link': 'String'},
{'name': 'Source', 're': r'|.\{-}|', 'link': 'Type'},
{'name': 'URI', 're': r'-> diff URI', 'link': 'Underlined'},
]
class Source(Base):
def __init__(self, vim):
super().__init__(vim)
self.name = 'dein/log'
def on_init(self, context):
context['__source_log'] = []
def gather_candidates(self, context):
dein_context = self.vim.call('dein#install#_get_context')
context['is_async'] = bool(dein_context)
if context['args'] and context['args'][0] == '!':
log_func = 'dein#install#_get_updates_log'
else:
log_func = 'dein#install#_get_log'
logs = self.vim.call(log_func)
def make_candidates(row):
return {
'word': ' -> diff URI',
'kind': 'file',
'action__path': SPACE_RE.sub('', row),
} if HEADER_RE.match(row) else {'word': row, 'kind': 'word'}
rows = len(context['__source_log'])
candidates = list(map(make_candidates, logs[rows:]))
context['__source_log'] = logs
return candidates
def highlight(self):
for syn in DEIN_LOG_SYNTAX_HIGHLIGHT:
self.vim.command(
'syntax match {0}_{1} /{2}/ contained containedin={0}'
.format(self.syntax_name, syn['name'], syn['re']))
self.vim.command(
'highlight default link {0}_{1} {2}'
.format(self.syntax_name, syn['name'], syn['link']))
| mit | Python | |
06290c7ddd164ab508207713e5dff304a1907986 | Create Pedidolistar.py | AEDA-Solutions/matweb,AEDA-Solutions/matweb,AEDA-Solutions/matweb,AEDA-Solutions/matweb,AEDA-Solutions/matweb | backend/Models/Grau/Pedidolistar.py | backend/Models/Grau/Pedidolistar.py | from Framework.Pedido import Pedido
from Framework.ErroNoHTTP import ErroNoHTTP
class PedidoListar(Pedido):
def __init__(self,variaveis_do_ambiente):
super(PedidoListar, self).__init__(variaveis_do_ambiente)
try:
self.nome = self.corpo['nome']
except:
raise ErroNoHTTP(400)
def getNome(self):
return self.nome
| mit | Python | |
deb1987bcfb04a773cbc2e826a6d61790a95759b | Create KeepHorizontalHand.py | MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab | home/juerg/KeepHorizontalHand.py | home/juerg/KeepHorizontalHand.py | i01 = Runtime.start("i01", "InMoov")
hand = i01.startRightHand("COM15")
arduino = Runtime.getService("i01.right")
keepHorizontalOutPin = 13
boolean keepHorizontal = False
def keepHorizontalStart():
arduino.digitalWrite(keepHorizontalOutPin, 1)
keepHorizontal = True;
i01.rightHand.wrist.detach()
def keepHorizontalStop():
arduino.digitalWrite(keepHorizontalOutPin, 0)
keepHorizontal = False;
i01.rightHand.wrist.attach()
| apache-2.0 | Python | |
cd44a9c7157f637ebeb289af11067d6c0d217fe9 | add errors and debugging example | nsone/nsone-python,ns1/nsone-python | examples/errors-and-debugging.py | examples/errors-and-debugging.py | #
# Copyright (c) 2014 NSONE, Inc.
#
# License under The MIT License (MIT). See LICENSE in project root.
#
from nsone import NSONE, Config
# to enable verbose logging, set 'verbosity' in the config and use
# the standard python logging system
config = Config()
config.createFromAPIKey('qACMD09OJXBxT7XOwv9v')
config['verbosity'] = 5
import logging
logging.basicConfig(level=logging.DEBUG)
print(config)
nsone = NSONE(config=config)
# now all requests will show up in the logging system
# exception handling:
# the follow exceptions may be thrown
# from nsone.rest.errors import ResourceException, \
# RateLimitException, AuthException
# ResourceException is the base exception (Auth and RateLimit extend it)
# it (and therefore they) have the properties message, response, body
# AuthException is raised when apikey is incorrect or the key doesn't
# have permission to the requested resource
# RateLimitException is raised when rate limit is exceed
# you can access the properties by, limit, and period to calculate backoff
# ResourceException is raised in any other exception situation
| mit | Python | |
70cec0f09d35c2ad202a3adc3d15bc7597de1b7a | Add monkey-patch test | reclosedev/requests-cache,YetAnotherNerd/requests-cache,femtotrader/requests-cache | tests/test_monkey_patch.py | tests/test_monkey_patch.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Path hack
import os, sys
sys.path.insert(0, os.path.abspath('..'))
import unittest
import time
import json
from collections import defaultdict
import requests
from requests import Request
import requests_cache
from requests_cache import CachedSession
from requests_cache.compat import bytes, str, is_py3
CACHE_NAME = 'requests_cache_test'
CACHE_BACKEND = 'sqlite'
FAST_SAVE = False
class MonkeyPatchTestCase(unittest.TestCase):
def test_session(self):
requests_cache.configure(name=CACHE_NAME, backend=CACHE_BACKEND)
self.assert_(isinstance(requests.Session(), CachedSession))
self.assert_(isinstance(requests.sessions.Session(), CachedSession))
self.assert_(isinstance(requests.session(), CachedSession))
if __name__ == '__main__':
unittest.main()
| bsd-2-clause | Python | |
0e725b43caae9c0f14cdc2292c6110267afaa755 | Add tests for session helpers | Candihub/pixel,Candihub/pixel,Candihub/pixel,Candihub/pixel,Candihub/pixel | apps/explorer/tests/views/test_helpers.py | apps/explorer/tests/views/test_helpers.py | from apps.explorer.views.helpers import (
get_omics_units_from_session, get_selected_pixel_sets_from_session,
set_omics_units_to_session, set_selected_pixel_sets_to_session,
)
def test_get_omics_units_from_session_returns_empty_list():
empty_session = dict()
omics_units = get_omics_units_from_session(empty_session, key='foo')
assert omics_units == []
def test_get_omics_units_from_session_returns_default_if_supplied():
empty_session = dict()
default = 'some-default-value'
omics_units = get_omics_units_from_session(
empty_session,
key='foo',
default=default
)
assert omics_units == default
def test_get_selected_pixel_sets_from_session_returns_empty_list():
empty_session = dict()
selected_pixel_sets = get_selected_pixel_sets_from_session(empty_session)
assert selected_pixel_sets == []
def test_get_selected_pixel_sets_from_session_returns_default_if_supplied():
empty_session = dict()
default = 'some-default-value'
selected_pixel_sets = get_selected_pixel_sets_from_session(
empty_session,
default=default
)
assert selected_pixel_sets == default
def test_set_omics_units_to_session():
session = dict()
omics_units = ['bar']
set_omics_units_to_session(
session,
key='foo',
omics_units=omics_units
)
assert 'explorer' in session
assert 'foo' in session['explorer']
assert session['explorer']['foo'] == omics_units
def test_set_omics_units_to_session_preserves_other_values():
session = dict()
# create a default session, without anything inside
set_omics_units_to_session(session, key='something-else')
assert 'explorer' in session
assert 'something-else' in session['explorer']
# set omics units now
omics_units = ['bar']
set_omics_units_to_session(
session,
key='foo',
omics_units=omics_units
)
assert 'explorer' in session
assert 'foo' in session['explorer']
assert 'something-else' in session['explorer']
assert session['explorer']['foo'] == omics_units
def test_set_selected_pixel_sets_to_session():
session = dict()
pixel_sets = ['bar']
set_selected_pixel_sets_to_session(
session,
pixel_sets=pixel_sets
)
assert 'explorer' in session
assert 'pixel_sets' in session['explorer']
assert session['explorer']['pixel_sets'] == pixel_sets
def test_set_selected_pixel_sets_to_session_preserves_other_values():
session = dict()
# create a default session, without anything inside
set_omics_units_to_session(session, key='something-else')
assert 'explorer' in session
assert 'something-else' in session['explorer']
# set pixel sets now
pixel_sets = ['bar']
set_selected_pixel_sets_to_session(
session,
pixel_sets=pixel_sets
)
assert 'explorer' in session
assert 'pixel_sets' in session['explorer']
assert 'something-else' in session['explorer']
assert session['explorer']['pixel_sets'] == pixel_sets
| bsd-3-clause | Python | |
7c41770adac83bcb00424649d7d248e996363e9b | add 1st test | simpeg/simpeg | tests/em/nsem/forward/test_Recursive1D_VsAnalyticHalfspace.py | tests/em/nsem/forward/test_Recursive1D_VsAnalyticHalfspace.py | import unittest
from SimPEG.electromagnetics import natural_source as nsem
from SimPEG import maps
import numpy as np
from scipy.constants import mu_0
def create_survey(freq):
receivers_list = [
nsem.receivers.AnalyticReceiver1D(component='real'),
nsem.receivers.AnalyticReceiver1D(component='imag'),
nsem.receivers.AnalyticReceiver1D(component='app_res'),
nsem.receivers.AnalyticReceiver1D(component='phase')
]
source_list = [nsem.sources.AnalyticPlanewave1D(receivers_list, freq)]
return nsem.survey.Survey1D(source_list)
def true_solution(freq, sigma_half):
# -ve sign can be removed if convention changes
soln = np.r_[
-np.sqrt(np.pi*freq*mu_0/sigma_half),
-np.sqrt(np.pi*freq*mu_0/sigma_half),
1/sigma_half,
45.
]
return soln
def compute_simulation_error(freq, sigma_half):
layer_thicknesses = np.array([100.])
conductivity_model = sigma_half*np.ones(2)
model_mapping = maps.IdentityMap()
survey = create_survey(np.array([freq]))
simulation = nsem.simulation_1d.Simulation1DRecursive(
survey=survey, thicknesses=layer_thicknesses, sigmaMap=model_mapping
)
dpred = simulation.dpred(conductivity_model)
danal = true_solution(freq, sigma_half)
return np.abs((danal - dpred)/danal)
class TestRecursiveForward(unittest.TestCase):
def test_1(self):
self.assertTrue(np.all(compute_simulation_error(0.1, 0.001) < 1e-4))
def test_2(self):
self.assertTrue(np.all(compute_simulation_error(0.1, 1.) < 1e-4))
def test_3(self):
self.assertTrue(np.all(compute_simulation_error(100., 0.001) < 1e-4))
def test_4(self):
self.assertTrue(np.all(compute_simulation_error(100., 1.) < 1e-4))
if __name__ == '__main__':
unittest.main()
| mit | Python | |
d1ca69d9b84fbcd034ee53e50de30fe48a0869a8 | add migration to grandfather odata feed privileges to advanced and above plans | dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | corehq/apps/accounting/migrations/0044_grandfather_odata_privs.py | corehq/apps/accounting/migrations/0044_grandfather_odata_privs.py | # Generated by Django 1.11.21 on 2019-10-07 13:21
from django.core.management import call_command
from django.db import migrations
from corehq.privileges import ODATA_FEED
from corehq.util.django_migrations import skip_on_fresh_install
@skip_on_fresh_install
def _grandfather_odata_privs(apps, schema_editor):
call_command('cchq_prbac_bootstrap')
call_command(
'cchq_prbac_grandfather_privs',
ODATA_FEED,
skip_edition='Paused,Community,Standard,Pro',
noinput=True,
)
class Migration(migrations.Migration):
dependencies = [
('accounting', '0043_grandfather_case_privs'),
]
operations = [
migrations.RunPython(_grandfather_odata_privs),
]
| bsd-3-clause | Python | |
c2be8321712c46e1f94dae42f5003e960e8330c3 | Create hp.py | OpM3TA/Python-Change-HP-Printer-Display | hp.py | hp.py | import os,sys,socket
printer = socket.socket()
"""
Perl stuff ->
\e%-12345X\@PJL JOB
\@PJL RDYMSG DISPLAY="Hello"
\@PJL EOJ
\e%-12345X
UEL = chr(0x1B)+chr(0x25)+chr(0x2D)+chr(0x31)+chr(0x32)+chr(0x33)+chr(0x34)+chr(0x35)+chr(0x58)
buf = UEL + '...@PJL RDYMSG DISPLAY = "%s"\n' % (message) + UEL + "\n"
"""
def create_payload(display_msg):
if len(display_msg) >16:
print "Message is too long! Keep it under 16 characters."
print "Your message was "+str(len(display_msg))+" characters"
return ""
else:
stage1="\x1b%-12345X@PJL JOB\r\n"
stage2='@PJL RDYMSG DISPLAY="'+display_msg+'"\r\n'
stage3="@PJL EOJ\r\n"
end = "\x1b%-12345X\r\n"
return stage1+stage2+stage3+end
payload= create_payload(raw_input("Display Message: "))
print payload
# printer_host = raw_input("Enter the IP Address of the printer: ")
# printer.connect((printer_host, 9100))
# Let's hope this works
# printer.send(payload)
# printer.close()
| unlicense | Python | |
bee511bc002395b3c55e7bd7664947857fd402da | Add bst.py | kentsommer/bst-amortized-analysis | bst.py | bst.py | import random
from tabulate import tabulate
class Node:
def __init__(self, value, parent=None):
self.value = value
self.left = None
self.right = None
self.parent = parent
class BST:
def __init__(self):
self.root = None
def insert(self, value):
if self.root is None:
self.root = Node(value)
current = self.root
while current:
if value == current.value:
return
elif value < current.value:
if current.left:
current = current.left
else:
current.left = Node(value, current)
return
else:
if current.right:
current = current.right
else:
current.right = Node(value, current)
return
def gen_random_tree(self):
num_nodes = random.randint(50, 100)
for i in range(0, num_nodes):
self.insert(random.randint(0, 1000))
def findmostleft(node, stepcount=0):
stepcount = stepcount
current = node
while True:
if current.left != None:
current = current.left
stepcount += 1
continue
break
return current, stepcount
def findnext(node):
stepcount = 0
if node.right != None:
return findmostleft(node.right, 1)
else:
parent = node.parent
stepcount += 1
while parent != None:
if parent.left == node:
break
node = parent
parent = node.parent
stepcount += 1
return parent, stepcount
def inorder(node):
stepcount = 0
first = findmostleft(node)
stepcount += first[1]
current = first[0]
for x in range(1, bst_size(node)):
next = findnext(current)
stepcount += next[1]
current = next[0]
return stepcount
def bst_size(root, count=0):
if root is None:
return count
return bst_size(root.left, bst_size(root.right, count + 1))
if __name__ == '__main__':
table = []
for i in range(1, 51):
cTree = BST()
cTree.gen_random_tree()
count = inorder(cTree.root)
size = bst_size(cTree.root)
table.append(["table " + str(i), str(size), str(count), str(2 * (size-1))])
print tabulate(table, headers=["Table Number", "Table Size", "Step Count", "2(N-1)"])
| mit | Python | |
5244eaa269118036b255edfb51a3bd72dc9b0320 | Add migration for reviews subscriptions (#10) | laurenrevere/osf.io,baylee-d/osf.io,binoculars/osf.io,sloria/osf.io,crcresearch/osf.io,leb2dg/osf.io,erinspace/osf.io,cslzchen/osf.io,crcresearch/osf.io,CenterForOpenScience/osf.io,HalcyonChimera/osf.io,mattclark/osf.io,laurenrevere/osf.io,adlius/osf.io,adlius/osf.io,leb2dg/osf.io,binoculars/osf.io,adlius/osf.io,aaxelb/osf.io,saradbowman/osf.io,brianjgeiger/osf.io,mfraezz/osf.io,Johnetordoff/osf.io,aaxelb/osf.io,caseyrollins/osf.io,saradbowman/osf.io,icereval/osf.io,CenterForOpenScience/osf.io,brianjgeiger/osf.io,crcresearch/osf.io,felliott/osf.io,chennan47/osf.io,pattisdr/osf.io,HalcyonChimera/osf.io,pattisdr/osf.io,CenterForOpenScience/osf.io,sloria/osf.io,aaxelb/osf.io,adlius/osf.io,caseyrollins/osf.io,mfraezz/osf.io,sloria/osf.io,TomBaxter/osf.io,aaxelb/osf.io,Johnetordoff/osf.io,felliott/osf.io,cslzchen/osf.io,felliott/osf.io,leb2dg/osf.io,baylee-d/osf.io,HalcyonChimera/osf.io,mattclark/osf.io,leb2dg/osf.io,mattclark/osf.io,pattisdr/osf.io,mfraezz/osf.io,Johnetordoff/osf.io,CenterForOpenScience/osf.io,baylee-d/osf.io,TomBaxter/osf.io,cslzchen/osf.io,HalcyonChimera/osf.io,icereval/osf.io,icereval/osf.io,brianjgeiger/osf.io,TomBaxter/osf.io,brianjgeiger/osf.io,erinspace/osf.io,cslzchen/osf.io,chennan47/osf.io,caseyrollins/osf.io,binoculars/osf.io,felliott/osf.io,Johnetordoff/osf.io,laurenrevere/osf.io,erinspace/osf.io,mfraezz/osf.io,chennan47/osf.io | osf/migrations/0061_auto_20171002_1438.py | osf/migrations/0061_auto_20171002_1438.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-10-02 19:38
from __future__ import unicode_literals
import logging
from django.db import migrations
from osf.models import OSFUser
from osf.models import NotificationSubscription
from website.notifications.utils import to_subscription_key
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def add_reviews_notification_setting(*args, **kwargs):
active_users = OSFUser.objects.filter(date_confirmed__isnull=False).exclude(date_disabled__isnull=False).exclude(is_active=False).order_by('id')
total_active_users = active_users.count()
reviews_notification = 'global_reviews'
logger.info('About to add a global_reviews setting for {} users.'.format(total_active_users))
total_created = 0
for user in active_users.iterator():
user_subscription_id = to_subscription_key(user._id, reviews_notification)
subscription = NotificationSubscription.load(user_subscription_id)
if not subscription:
logger.info('No {} subscription found for user {}. Subscribing...'.format(reviews_notification, user._id))
subscription = NotificationSubscription(_id=user_subscription_id, owner=user, event_name=reviews_notification)
subscription.save() # Need to save in order to access m2m fields
subscription.add_user_to_subscription(user, 'email_transactional')
else:
logger.info('User {} already has a {} subscription'.format(user._id, reviews_notification))
total_created += 1
logger.info('Added subscriptions for {}/{} users'.format(total_created, total_active_users))
class Migration(migrations.Migration):
dependencies = [
('osf', '0060_reviews'),
]
operations = [
migrations.RunPython(add_reviews_notification_setting),
]
| apache-2.0 | Python | |
8955c00cdf3715b0f6403e9d049c0e221f77f7ac | Add helper script for testing | niklasf/lila-openingexplorer,niklasf/lila-openingexplorer | client.py | client.py | #!/usr/bin/env python
import chess
import chess.pgn
import requests
import random
game = chess.pgn.Game()
node = game
board = game.board()
while not board.is_game_over(claim_draw=True):
move = random.choice(list(board.legal_moves))
node = node.add_variation(move)
board.push(move)
game.headers["Result"] = board.result(claim_draw=True)
print(game)
print()
res = requests.put("http://localhost:9000/", data=str(game))
print(res)
print(res.text)
print(res)
| agpl-3.0 | Python | |
78bc3e4cb460a7f8e7f2164418744c7ca90fd2f3 | Add a superclass for mapping with hawaii as a subclass. | ocefpaf/seapy,dalepartridge/seapy,powellb/seapy | map.py | map.py | #!/usr/bin/env python
"""
map.py
State Estimation and Analysis for PYthon
Utilities for dealing with basemap plotting. These routnes are simply
abstractions over the existing basemap to make it quicker for generating
basemap plots and figures.
Written by Brian Powell on 9/4/14
Copyright (c)2013 University of Hawaii under the BSD-License.
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import os
class map(object):
def __init__(self, llcrnrlon=-180, llcrnrlat=-40, urcrnrlon=180,
urcrnrlat=40, figsize=(8.,6.), dlat=1, dlon=2):
self.basemap = Basemap(llcrnrlon=llcrnrlon, llcrnrlat=llcrnrlat,
urcrnrlon=urcrnrlon, urcrnrlat=urcrnrlat,
projection='lcc',
lat_0=urcrnrlat-(urcrnrlat-llcrnrlat)/2.,
lon_0=urcrnrlon-(urcrnrlon-llcrnrlon)/2.,
resolution='c', area_thresh=0.0)
self.figsize=figsize
self.dlon=dlon
self.dlat=dlat
self.fig=None
self.new_figure()
def new_figure(self):
if self.fig != None:
self.ax.set_axis_off()
plt.close(self.fig)
self.fig = plt.figure(figsize=self.figsize)
self.ax = self.fig.add_axes([-0.01, 0.25, 1.01, 0.7])
self.basemap.drawmapboundary(fill_color="aqua")
# Create the lat/lon lines
self.basemap.drawmeridians(np.arange(self.basemap.llcrnrlon,
self.basemap.urcrnrlon,self.dlon),color="0.5",
linewidth=0.25, dashes=[1,1,0.1,1], labels=[0,0,0,1],fontsize=12)
self.basemap.drawparallels(np.arange(self.basemap.llcrnrlat,
self.basemap.urcrnrlat,self.dlat),color="0.5",
linewidth=0.25, dashes=[1,1,0.1,1], labels=[1,0,0,0],fontsize=12)
def land(self, color="black"):
self.basemap.drawcoastlines()
self.basemap.drawcountries()
self.basemap.fillcontinents(color=color)
def zoom(self, xrange, yrange):
x,y = self.basemap(xrange, yrange)
self.ax.set_xlim(x)
self.ax.set_ylim(y)
self.fig.canvas.draw()
def pcolor(self, lon, lat, data, **kwargs):
# Pcolor requires a modification to the locations to line up with
# the geography
dlon=lon*0;
dlat=lat*0;
dlon[:,0:-1]=lon[:,1:]-lon[:,0:-1]
dlat[0:-1,:]=lat[1:,:]-lat[0:-1,:]
x,y = self.basemap(lon-dlon*0.5,lat-dlat*0.5)
self.pc = self.ax.pcolor(x,y,data,**kwargs)
def colorbar(self, label=None, cticks=None, **kwargs):
self.cax = self.fig.add_axes([0.25, 0.16, 0.5, 0.03])
self.cb = plt.colorbar(self.pc, cax=self.cax, orientation="horizontal",
ticks=cticks)
self.basemap.set_axes_limits(ax=self.ax)
if label != None:
self.cb.set_label(label)
| mit | Python | |
92840c5dc58e78bd41d4a3c7eaec58097f166585 | add spider example, crawls entry titles | margelatu/czl-scrape,lbogdan/czl-scrape,code4romania/czl-scrape,lbogdan/czl-scrape,costibleotu/czl-scrape,lbogdan/czl-scrape,lbogdan/czl-scrape,margelatu/czl-scrape,mgax/czl-scrape,margelatu/czl-scrape,code4romania/czl-scrape,mgax/czl-scrape,mgax/czl-scrape,code4romania/czl-scrape,costibleotu/czl-scrape,costibleotu/czl-scrape,lbogdan/czl-scrape,mgax/czl-scrape,costibleotu/czl-scrape,code4romania/czl-scrape,mgax/czl-scrape,margelatu/czl-scrape | justitie/just/spiders/test.py | justitie/just/spiders/test.py | # -*- coding: utf-8 -*-
import scrapy
from scrapy.loader import ItemLoader
from items import JustPublication
class TestSpider(scrapy.Spider):
name = "test"
def start_requests(self):
yield scrapy.Request(
url="http://www.just.ro/transparenta-decizionala/acte-normative/proiecte-in-dezbatere/?lcp_page0=1",
callback=self.parse)
def parse(self, response):
for li_item in response.css('#content div.entry-content ul.lcp_catlist li'):
title = li_item.css('h3.lcp_post a::text').extract_first()
item = JustPublication(title=title)
yield item
pass
| mpl-2.0 | Python | |
1f476db6a86831b2b1462cdd3b27e8f2ae3d2187 | Drop old_content table | Code4SA/pmg-cms-2,Code4SA/pmg-cms-2,Code4SA/pmg-cms-2 | alembic/versions/3d9034ad2d7d_really_drop_old_content.py | alembic/versions/3d9034ad2d7d_really_drop_old_content.py | """really drop old content
Revision ID: 3d9034ad2d7d
Revises: 1dd2771cbf39
Create Date: 2015-03-29 12:01:54.276288
"""
# revision identifiers, used by Alembic.
revision = '3d9034ad2d7d'
down_revision = '1dd2771cbf39'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.drop_table(u'old_content')
def downgrade():
pass
| apache-2.0 | Python | |
380acef9c27a2445366eaec88acdf9672bdeb3c1 | test class for FeedbackFeedTimelineBuilder | devilry/devilry-django,devilry/devilry-django,devilry/devilry-django,devilry/devilry-django | devilry/devilry_group/tests/test_feedbackfeed_timeline_builder.py | devilry/devilry_group/tests/test_feedbackfeed_timeline_builder.py | import devilry
from django.test import RequestFactory, TestCase
from django.utils import timezone
import htmls
import mock
from model_mommy import mommy
from devilry.devilry_group.views.feedbackfeed_student import StudentFeedbackFeedView
from devilry.devilry_group.views.feedbackfeed_timeline_builder import FeedbackFeedTimelineBuilder
from devilry.project.develop.testhelpers.corebuilder import UserBuilder2, AssignmentGroupBuilder, FeedbackSetBuilder, \
GroupCommentBuilder
class TestFeedbackFeedTimelineBuilder(TestCase, object):
def test_get_feedbacksets_for_group(self):
timelinebuilder = FeedbackFeedTimelineBuilder(None)
assignment = mommy.make_recipe('devilry.apps.core.assignment_activeperiod_start')
assignment_group = mommy.make(
'core.AssignmentGroup',
parentnode=assignment,
name='dewey'
)
feedbackset = mommy.make(
'devilry_group.FeedbackSet',
group=assignment_group,
created_datetime=timezone.now(),
deadline_datetime=timezone.now()+timezone.timedelta(days=11)
)
feedbackset1 = mommy.make(
'devilry_group.FeedbackSet',
group=assignment_group,
created_datetime=timezone.now(),
deadline_datetime=timezone.now()+timezone.timedelta(days=10)
)
self.assertEquals(2, len(timelinebuilder.get_feedbacksets_for_group(assignment_group))) | bsd-3-clause | Python | |
80a1cc839abc23a80b511c99e6a6c03b044eaf35 | Add downloader for per-polling-station data | akx/yle-kuntavaalit-2017-data | ext_download.py | ext_download.py | import argparse
import glob
import json
from kvd_utils import download_json, get_session
ext_url_template = 'https://vaalit.yle.fi/content/kv2017/{version}/electorates/{electorate}/municipalities/{municipality}/pollingDistricts/{district}/partyAndCandidateResults.json'
def download_ext_data(version):
sess = get_session()
for muni_fn in glob.glob('data/{version}/*.json'.format(version=version)):
with open(muni_fn) as infp:
muni = json.load(infp)
name = muni['calculationStatus']['name']['fi']
perc = float(muni['calculationStatus']['calculationStatusPercent'])
if perc < 100:
print('%s: %.2f%% less than 100%% percent, skipping' % (name, perc))
continue
for district in muni['pollingDistricts']:
url = ext_url_template.format(
version=version,
electorate=muni['calculationStatus']['edid'],
municipality=muni['calculationStatus']['muid'],
district=district['pdid'],
)
output_file = 'ext_data/{version}/{name}/{district}.json'.format(
version=version,
name=name,
district=district['name']['fi'].replace(' ', '_'),
)
download_json(sess, url, output_file)
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument('version', type=int)
args = ap.parse_args()
download_ext_data(version=args.version)
| mit | Python | |
c47ace2e77f87a2a249b8e43c4611c80e63bbe03 | add verification for util/hosts.py | alfredodeza/ceph-doctor | ceph_medic/tests/util/test_hosts.py | ceph_medic/tests/util/test_hosts.py | import pytest
from ceph_medic.util import hosts, configuration
import ceph_medic
from textwrap import dedent
class TestContainerPlatform(object):
def test_unable_to_retrieve_pods(self, stub_check):
stub_check(([], ['error from command'], 1))
with pytest.raises(SystemExit) as error:
hosts.container_platform()
assert str(error.value) == 'error from command'
def test_oc_executable_fails(self, stub_check, capsys):
stub_check((['{"items": {}}'], [], 1))
with pytest.raises(SystemExit):
hosts.container_platform()
stdout, stderr = capsys.readouterr()
assert 'Unable to retrieve the pods using command' in stdout
assert 'oc --request-timeout=5 get -n rook-ceph pods -o json' in stdout
def test_kubectl_executable_fails(self, stub_check, capsys):
stub_check((['{"items": {}}'], [], 1))
with pytest.raises(SystemExit):
hosts.container_platform('kubernetes')
stdout, stderr = capsys.readouterr()
assert 'Unable to retrieve the pods using command' in stdout
assert 'kubectl --request-timeout=5 get -n rook-ceph pods -o json' in stdout
def test_no_context(self, stub_check):
check = stub_check((['{"items": {}}'], [], 1))
with pytest.raises(SystemExit):
hosts.container_platform('kubernetes')
command = check.calls[0]['args'][1]
assert command == [
'kubectl', '--request-timeout=5', 'get', '-n',
'rook-ceph', 'pods', '-o', 'json'
]
def test_kubectl_with_context(self, stub_check):
contents = dedent("""
[kubernetes]
context = 87
""")
conf = configuration.load_string(contents)
ceph_medic.config.file = conf
check = stub_check((['{"items": {}}'], [], 1))
with pytest.raises(SystemExit):
hosts.container_platform('kubernetes')
command = check.calls[0]['args'][1]
assert command == [
'kubectl', '--context', '87', '--request-timeout=5', 'get', '-n',
'rook-ceph', 'pods', '-o', 'json'
]
def test_oc_with_context(self, stub_check):
contents = dedent("""
[openshift]
context = 87
""")
conf = configuration.load_string(contents)
ceph_medic.config.file = conf
check = stub_check((['{"items": {}}'], [], 1))
with pytest.raises(SystemExit):
hosts.container_platform()
command = check.calls[0]['args'][1]
assert command == [
'oc', '--context', '87', '--request-timeout=5', 'get', '-n',
'rook-ceph', 'pods', '-o', 'json'
]
| mit | Python | |
574b069363f74de35b75b6b28ca66976e6af45bb | Fix situation where session id is an int | dimagi/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq | corehq/apps/smsforms/management/commands/migrate_sms_sessions_to_sql.py | corehq/apps/smsforms/management/commands/migrate_sms_sessions_to_sql.py | import logging
from django.core.management.base import BaseCommand
from corehq.apps.smsforms.models import XFormsSession, sync_sql_session_from_couch_session, SQLXFormsSession
from dimagi.utils.couch.database import iter_docs
class Command(BaseCommand):
args = ""
help = ""
def handle(self, *args, **options):
db = XFormsSession.get_db()
session_ids = [row['id'] for row in db.view("smsforms/sessions_by_touchforms_id")]
errors = []
for session_doc in iter_docs(db, session_ids):
try:
# Handle the old touchforms session id convention where it was
# always an int
session_id = session_doc.get("session_id", None)
if isinstance(session_id, int):
session_doc["session_id"] = str(session_id)
couch_session = XFormsSession.wrap(session_doc)
sync_sql_session_from_couch_session(couch_session)
except Exception as e:
logging.exception('problem migrating session {}: {}'.format(session_doc['_id'], e))
errors.append(session_doc['_id'])
print 'migrated {} couch sessions. there are now {} in sql'.format(
len(session_ids) - len(errors), SQLXFormsSession.objects.count()
)
if errors:
print 'errors: {}'.format(', '.join(errors))
| import logging
from django.core.management.base import BaseCommand
from corehq.apps.smsforms.models import XFormsSession, sync_sql_session_from_couch_session, SQLXFormsSession
from dimagi.utils.couch.database import iter_docs
class Command(BaseCommand):
args = ""
help = ""
def handle(self, *args, **options):
db = XFormsSession.get_db()
session_ids = [row['id'] for row in db.view("smsforms/sessions_by_touchforms_id")]
errors = []
for session_doc in iter_docs(db, session_ids):
try:
couch_session = XFormsSession.wrap(session_doc)
sync_sql_session_from_couch_session(couch_session)
except Exception as e:
logging.exception('problem migrating session {}: {}'.format(session_doc['_id'], e))
errors.append(session_doc['_id'])
print 'migrated {} couch sessions. there are now {} in sql'.format(
len(session_ids) - len(errors), SQLXFormsSession.objects.count()
)
if errors:
print 'errors: {}'.format(', '.join(errors))
| bsd-3-clause | Python |
36c31f33818c04e9122c8a5dcd325420bef94e56 | add simplisitc test for demumble | nico/demumble,nico/demumble | demumble_test.py | demumble_test.py | from __future__ import print_function
tests = [
('demumble', ''),
('demumble hello', 'hello\n'),
('demumble _Z4funcPci', 'func(char*, int)\n'),
('demumble ?Fx_i@@YAHP6AHH@Z@Z', 'int __cdecl Fx_i(int (__cdecl*)(int))\n'),
]
import os, subprocess
for t in tests:
cmd = t[0].split()
# Assume that demumble is next to this script.
cmd[0] = os.path.join(os.path.dirname(__file__) or '.', cmd[0])
out = subprocess.check_output(cmd)
if out != t[1]:
print("`%s`: Expected '%s', got '%s'" % (t[0], t[1], out))
| apache-2.0 | Python | |
803d62323c85cd9e29176f32b1094c5daca3c2dd | add wrapper for CHOLMOD cholesky factor object | Evfro/polara | polara/lib/cholesky.py | polara/lib/cholesky.py | class CholeskyFactor:
def __init__(self, factor):
self._factor = factor
self._L = None
self._transposed = False
@property
def L(self):
if self._L is None:
self._L = self._factor.L()
return self._L
@property
def T(self):
self._transposed = True
return self
def dot(self, v):
if self._transposed:
self._transposed = False
return self.L.T.dot(self._factor.apply_P(v))
else:
return self._factor.apply_Pt(self.L.dot(v))
def solve(self, y):
x = self._factor
if self._transposed:
self._transposed = False
return x.apply_Pt(x.solve_Lt(y, use_LDLt_decomposition=False))
else:
raise NotImplementedError
def update_inplace(self, A, beta):
self._factor.cholesky_inplace(A, beta=beta)
self._L = None
| mit | Python | |
1fff1dba9ac1d2fa1a825e87d36d46ce0ddec894 | Add distributions.py | ibab/python-mle | distributions.py | distributions.py |
import theano.tensor as T
from numpy import inf
from math import pi
def alltrue(vals):
ret = 1
for c in vals:
ret = ret * (1 * c)
return ret
def bound(logp, *conditions):
return T.switch(alltrue(conditions), logp, -inf)
class Distribution:
def __init__(self):
pass
class Uniform(Distribution):
def __init__(self, lower=0, upper=1, *args, **kwargs):
super(Uniform, self).__init__(*args, **kwargs)
self.lower = lower
self.upper = upper
def logp(self, value):
upper = self.upper
lower = self.lower
return T.log(1 / (upper - lower))
class Normal(Distribution):
def __init__(self, mu=0, sigma=1, *args, **kwargs):
super(Normal, self).__init__(*args, **kwargs)
self.mu = mu
self.sigma = sigma
def logp(self, value):
mu = self.mu
sigma = self.sigma
ret = -1 / sigma**2 * (value - mu)**2 + T.log(1 / (sigma**2 * 2 * pi)) / 2.
return bound(ret, sigma > 0)
class Mix2(Distribution):
def __init__(self, frac, dist1, dist2, *args, **kwargs):
super(Mix2, self).__init__(*args, **kwargs)
self.frac = frac
self.dist1 = dist1
self.dist2 = dist2
def logp(self, value):
frac = self.frac
dist1 = self.dist1
dist2 = self.dist2
return T.log(frac * T.exp(dist1.logp(value)) + (1 - frac) * T.exp(dist2.logp(value)))
| mit | Python | |
1323086d7d948253cd26e186af7fc16752b642a9 | Add main app | seguri/json-beautifier,seguri/json-beautifier,seguri/json-beautifier | json-beautifier.py | json-beautifier.py | from google.appengine.api import memcache
from webapp2_extras.security import generate_random_string
import jinja2
import json
import os
import webapp2
MEMCACHE_EXPIRE = 5 * 60 # seconds
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
)
def is_json(s):
try:
json.loads(s)
except ValueError:
return False
return True
class MainPage(webapp2.RequestHandler):
def get(self):
return webapp2.Response('Make a POST with json=your_stringified_json to have it beautified.')
def post(self):
json_ = self.request.get('json')
if json_ and is_json(json_):
random_string = generate_random_string(length=16)
if memcache.add(random_string, json_, MEMCACHE_EXPIRE):
return webapp2.Response('%s/%s' % (self.request.host, random_string))
else:
self.abort(500)
else:
return webapp2.Response('ERROR: You must provide a valid json.')
class CacheHandler(webapp2.RequestHandler):
def get(self, key):
cached = memcache.get(key)
if cached:
template = JINJA_ENVIRONMENT.get_template('templates/beautified.html')
return webapp2.Response(template.render({'json': cached}))
else:
self.abort(404)
urls = (
(r'/', MainPage),
(r'/(\w+)', CacheHandler),
)
application = webapp2.WSGIApplication(urls, debug=True)
| mit | Python | |
c0aa90403183b47cd3c40afba5267d0e305cf1ca | add new module | albertbup/DeepBeliefNet,albertbup/DeepBeliefNetwork,albertbup/deep-belief-network | dbn/utils.py | dbn/utils.py | import numpy as np
def batch_generator(batch_size, data, labels=None):
"""
Generates batches of samples
:param data: array-like, shape = (n_samples, n_features)
:param labels: array-like, shape = (n_samples, )
:return:
"""
n_batches = int(np.ceil(len(data) / float(batch_size)))
idx = np.random.permutation(len(data))
data_shuffled = data[idx]
if labels is not None:
labels_shuffled = labels[idx]
for i in range(n_batches):
start = i * batch_size
end = start + batch_size
if labels is not None:
yield data_shuffled[start:end, :], labels_shuffled[start:end]
else:
yield data_shuffled[start:end, :]
def to_categorical(labels, num_classes):
"""
Converts labels as single integer to row vectors. For instance, given a three class problem, labels would be
mapped as label_1: [1 0 0], label_2: [0 1 0], label_3: [0, 0, 1] where labels can be either int or string.
:param labels: array-like, shape = (n_samples, )
:return:
"""
new_labels = np.zeros([len(labels), num_classes])
label_to_idx_map, idx_to_label_map = dict(), dict()
idx = 0
for i, label in enumerate(labels):
if label not in label_to_idx_map:
label_to_idx_map[label] = idx
idx_to_label_map[idx] = label
idx += 1
new_labels[i][label_to_idx_map[label]] = 1
return new_labels, label_to_idx_map, idx_to_label_map
| mit | Python | |
c39aec0ae76868299fbbd72f145b4b27a5bc2d67 | Create Annual_emissions.py | architecture-building-systems/CityEnergyAnalyst,architecture-building-systems/CityEnergyAnalyst,architecture-building-systems/CityEnergyAnalyst | cea/plots/comparisons/Annual_emissions.py | cea/plots/comparisons/Annual_emissions.py | from __future__ import division
from __future__ import print_function
import plotly.graph_objs as go
import cea.plots.comparisons
from cea.plots.variable_naming import NAMING, COLOR
__author__ = "Jimeno Fonseca"
__copyright__ = "Copyright 2019, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Jimeno A. Fonseca"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "cea@arch.ethz.ch"
__status__ = "Production"
class ComparisonsAnnualEmissionsPlot(cea.plots.comparisons.ComparisonsPlotBase):
"""Implement the "CAPEX vs. OPEX of centralized system in generation X" plot"""
name = "Annualized emissions"
def __init__(self, project, parameters, cache):
super(ComparisonsAnnualEmissionsPlot, self).__init__(project, parameters, cache)
self.analysis_fields = ["GHG_sys_connected_tonCO2",
"GHG_sys_disconnected_tonCO2",
"GHG_sys_embodied_tonCO2",
]
self.normalization = self.parameters['normalization']
self.input_files = [(x[4].get_optimization_slave_total_performance, [x[3], x[2]]) if x[2] != "today" else
(x[4].get_costs_operation_file, []) for x in self.scenarios_and_systems]
self.titley = self.calc_titles()
def calc_titles(self):
if self.normalization == "gross floor area":
titley = 'Annual emissions [kg CO2-eq/m2.yr]'
elif self.normalization == "net floor area":
titley = 'Annual emissions [kg CO2-eq/m2.yr]'
elif self.normalization == "air conditioned floor area":
titley = 'Annual emissions [kg CO2-eq/m2.yr]'
elif self.normalization == "building occupancy":
titley = 'Annual emissions [kg CO2-eq/pax.yr]'
else:
titley = 'Annual emissions [ton CO2-eq/yr]'
return titley
@property
def title(self):
if self.normalization != "none":
return "Annual Emissions per Scenario normalized to {normalized}".format(normalized=self.normalization)
else:
return "Annual Emissions per Scenario"
@property
def output_path(self):
return self.locator.get_timeseries_plots_file('scenarios_annualized_emissions')
@property
def layout(self):
return go.Layout(barmode='relative',
yaxis=dict(title=self.titley))
def calc_graph(self):
data = self.preprocessing_annual_emissions_scenarios()
graph = []
for field in self.analysis_fields:
y = data[field].values
flag_for_unused_technologies = all(v == 0 for v in y)
if not flag_for_unused_technologies:
trace = go.Bar(x=data['scenario_name'], y=y, name=NAMING[field],
marker=dict(color=COLOR[field]))
graph.append(trace)
return graph
def main():
"""Test this plot"""
import cea.config
import cea.plots.cache
config = cea.config.Configuration()
cache = cea.plots.cache.NullPlotCache()
ComparisonsAnnualEmissionsPlot(config.project,
{'scenarios-and-systems': config.plots_comparisons.scenarios_and_systems,
'normalization': config.plots_comparisons.normalization},
cache).plot(auto_open=True)
if __name__ == '__main__':
main()
| mit | Python | |
4136eca2c09fcd3d12b147b2d1d7a11a12fbd94d | Add logcatPkg | androidyue/DroidPy | logcatPkg.py | logcatPkg.py | #!/usr/bin/env python
#coding:utf-8
#author:andrewallanwallace@gmail.com
#This script is aimed to grep logs by application(User should input a packageName and then we look up for the process ids then separate logs by process ids).
import os
import sys
packageName=str(sys.argv[1])
command = "adb shell ps | grep %s | awk '{print $2}'"%(packageName)
p = os.popen(command)
##for some applications,there are multiple processes,so we should get all the process id
pid = p.readline().strip()
filters = pid
while(pid != ""):
pid = p.readline().strip()
if (pid != ''):
filters = filters + "|" + pid
#print 'command = %s;filters=%s'%(command, filters)
if (filters != '') :
cmd = 'adb logcat | grep --color=always -E "%s" '%(filters)
os.system(cmd)
| apache-2.0 | Python | |
03735a85521f43ce7af9db7bb1762e3e395bc154 | Solve knowit2017/dec15 | matslindh/codingchallenges,matslindh/codingchallenges | knowit2017/15.py | knowit2017/15.py | def cut_trees(trees):
cut = []
while trees:
cut.append(len(trees))
lowest = min(trees)
trees = [tree - lowest for tree in trees if (tree - lowest) > 0]
return cut
def test_cut_trees():
assert [6, 4, 2, 1] == cut_trees([5, 4, 4, 2, 2, 8])
if __name__ == "__main__":
cut = cut_trees([23, 74, 26, 23, 92, 92, 44, 13, 34, 23, 69, 4, 19, 94, 94, 38, 14, 9, 51, 98, 72, 46, 17, 25, 21, 87, 99, 50, 59, 53, 82, 24, 93, 16, 88, 52, 14, 38, 27, 7, 18, 81, 13, 75, 80, 11, 29, 39, 37, 78, 55, 17, 78, 12, 77, 84, 63, 29, 68, 32, 17, 55, 31, 30, 3, 17, 99, 6, 45, 81, 75, 31, 50, 93, 66, 98, 94, 59, 68, 30, 98, 57, 83, 75, 68, 85, 98, 76, 91, 23, 53, 42, 72, 77])
cut = [str(x) for x in cut]
print(', '.join(cut))
| mit | Python | |
28a2ee23218ee767e2e0ed0e9cc7228813bbfc33 | Fix docstring lies and minor code cleanup, no functional change | AlanZatarain/py-lepton,tectronics/py-lepton,jmichelsen/py-lepton,Alwnikrotikz/py-lepton,jmichelsen/py-lepton,tectronics/py-lepton,Alwnikrotikz/py-lepton,AlanZatarain/py-lepton | lepton/system.py | lepton/system.py | #############################################################################
#
# Copyright (c) 2008 by Casey Duncan and contributors
# All Rights Reserved.
#
# This software is subject to the provisions of the MIT License
# A copy of the license should accompany this distribution.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
#
#############################################################################
"""Particle system classes"""
__version__ = '$Id$'
class ParticleSystem(object):
def __init__(self, global_controllers=()):
"""Initialize the particle system, adding the specified global
controllers, if any
"""
# Tuples are used for global controllers to prevent
# unpleasant side-affects if they are added during update or draw
self.controllers = tuple(global_controllers)
self.groups = []
def add_global_controller(self, *controllers):
"""Add a global controller applied to all groups on update"""
self.controllers += controllers
def add_group(self, group):
"""Add a particle group to the system"""
self.groups.append(group)
def remove_group(self, group):
"""Remove a particle group from the system, raise ValueError
if the group is not in the system
"""
self.groups.remove(group)
def __len__(self):
"""Return the number of particle groups in the system"""
return len(self.groups)
def __iter__(self):
"""Iterate the system's particle groups"""
# Iterate a copy of the group list to so that the groups
# can be safely changed during iteration
return iter(list(self.groups))
def __contains__(self, group):
"""Return True if the specified group is in the system"""
return group in self.groups
def update(self, time_delta):
"""Update all particle groups in the system. time_delta is the
time since the last update (in arbitrary time units).
When updating, first the global controllers are applied to
all groups. Then update(time_delta) is called for all groups.
This method can be conveniently scheduled using the Pyglet
scheduler method: pyglet.clock.schedule_interval
"""
for group in self:
group.update(time_delta)
def run_ahead(self, time, framerate):
"""Run the particle system for the specified time frame at the
specified framerate to move time forward as quickly as possible.
Useful for "warming up" the particle system to reach a steady-state
before anything is drawn or to simply "skip ahead" in time.
time -- The amount of simulation time to skip over.
framerate -- The framerate of the simulation in updates per unit
time. Higher values will increase simulation accuracy,
but will take longer to compute.
"""
if time:
td = 1.0 / framerate
update = self.update
for i in xrange(int(time / td)):
update(td)
def draw(self):
"""Draw all particle groups in the system using their renderers.
This method is convenient to call from your Pyglet window's
on_draw handler to redraw particles when needed.
"""
for group in self:
group.draw()
| #############################################################################
#
# Copyright (c) 2008 by Casey Duncan and contributors
# All Rights Reserved.
#
# This software is subject to the provisions of the MIT License
# A copy of the license should accompany this distribution.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
#
#############################################################################
"""Particle system classes"""
__version__ = '$Id$'
class ParticleSystem(object):
def __init__(self, global_controllers=()):
"""Initialize the particle system, adding the specified global
controllers and renderers, if any
"""
# Tuples are used for global controllers and renders to prevent
# unpleasant side-affects if they are added during update or draw
self.controllers = tuple(controller for controller in global_controllers)
self.groups = []
def add_global_controller(self, *controllers):
"""Add a global controller applied to all groups on update"""
self.controllers += controllers
def add_group(self, group):
"""Add a particle group to the system"""
self.groups.append(group)
def remove_group(self, group):
"""Remove a particle group from the system, raise ValueError
if the group is not in the system
"""
self.groups.remove(group)
def __len__(self):
"""Return the number of particle groups in the system"""
return len(self.groups)
def __iter__(self):
"""Iterate the system's particle groups"""
# Iterate a copy of the group list to so that the groups
# can be safely changed during iteration
return iter(list(self.groups))
def __contains__(self, group):
"""Return True if the specified group is in the system"""
return group in self.groups
def update(self, time_delta):
"""Update all particle groups in the system. time_delta is the
time since the last update (in arbitrary time units).
When updating, first the global controllers are applied to
all groups. Then update(time_delta) is called for all groups.
This method can be conveniently scheduled using the Pyglet
scheduler method: pyglet.clock.schedule_interval
"""
for group in self:
group.update(time_delta)
def run_ahead(self, time, framerate):
"""Run the particle system for the specified time frame at the
specified framerate to move time forward as quickly as possible.
Useful for "warming up" the particle system to reach a steady-state
before anything is drawn or to simply "skip ahead" in time.
time -- The amount of simulation time to skip over.
framerate -- The framerate of the simulation in updates per unit
time. Higher values will increase simulation accuracy,
but will take longer to compute.
"""
if time:
td = 1.0 / framerate
update = self.update
for i in xrange(int(time / td)):
update(td)
def draw(self):
"""Draw all particle groups in the system using their renderers.
This method is convenient to call from you Pyglet window's
on_draw handler to redraw particles when needed.
"""
for group in self:
group.draw()
| mit | Python |
a330a2a2d2a0706815c274e5a19409e0306ef26d | add script to generate a readkey | buildtimetrend/python-lib | get_read_key.py | get_read_key.py | #!/usr/bin/env python
# vim: set expandtab sw=4 ts=4:
# Generate a read key for Keen.io trends
#
# Copyright (C) 2014 Dieter Adriaenssens <ruleant@users.sourceforge.net>
#
# This file is part of buildtime-trend
# <https://github.com/ruleant/buildtime-trend/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from buildtimetrend.keenio import keen_io_generate_read_key
if __name__ == "__main__":
# define projectname
# fe. project_name = "ruleant/buildtime-trend"
project_name = "projectname"
# generate a read key
print keen_io_generate_read_key(project_name)
| agpl-3.0 | Python | |
884f2d1620ae4eefe555ed9523a4e5504cd804ed | add test for MICE | eltonlaw/impyute | test/imputations/cs/test_mice.py | test/imputations/cs/test_mice.py | """test_mice.py"""
import unittest
import numpy as np
from impyute.datasets import test_data
from impyute.imputations.cs import mice
class TestEM(unittest.TestCase):
""" Tests for Multivariate Imputation by Chained Equations"""
def setUp(self):
"""
self.data_c: Complete dataset/No missing values
self.data_m: Incommplete dataset/Has missing values
"""
mask = np.zeros((5, 5), dtype=bool)
self.data_c = test_data(mask=mask)
mask[0][0] = True
self.data_m = test_data(mask=mask)
def test_return_type(self):
""" Check return type, should return an np.ndarray"""
imputed = mice(self.data_m)
self.assertTrue(isinstance(imputed, np.ndarray))
def test_impute_missing_values(self):
""" After imputation, no NaN's should exist"""
imputed = mice(self.data_m)
self.assertFalse(np.isnan(imputed).any())
if __name__ == "__main__":
unittest.main()
| mit | Python | |
0e2cd05c8a7b876fedcb80e0ee26e9ae1c21f607 | Create extras.py | nevrrmind/VRS-Display-1024x600 | extras.py | extras.py | # -*- coding: utf-8 -*-
warnlist =["7500","7600","7700","0020","0037"]
sqkinfo = {
"7500":"ENTFÜHRUNG",
"7600":"FUNKAUSFALL",
"7700":"NOTFALL",
"7000":"VFR-zivil",
"0037":"BPO mit Restlicht",
"1000":"IFR",
"0020":"Hubschrauber-Rettungsflüge",
}
| mit | Python | |
df258636e65cce98ac0213cdf6f7f830f9cd7656 | Add sfp_instagram | smicallef/spiderfoot,smicallef/spiderfoot,smicallef/spiderfoot | modules/sfp_instagram.py | modules/sfp_instagram.py | #-------------------------------------------------------------------------------
# Name: sfp_instagram
# Purpose: Gather information from Instagram profiles.
#
# Author: <bcoles@gmail.com>
#
# Created: 2019-07-11
# Copyright: (c) bcoles 2019
# Licence: GPL
#-------------------------------------------------------------------------------
import json
import re
from sflib import SpiderFoot, SpiderFootPlugin, SpiderFootEvent
class sfp_instagram(SpiderFootPlugin):
"""Instagram:Footprint,Investigate,Passive:Social Media::Gather information from Instagram profiles."""
# Default options
opts = {
}
# Option descriptions
optdescs = {
}
results = None
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = dict()
for opt in userOpts.keys():
self.opts[opt] = userOpts[opt]
# What events is this module interested in for input
def watchedEvents(self):
return [ 'SOCIAL_MEDIA' ]
# What events this module produces
def producedEvents(self):
return [ 'RAW_RIR_DATA' ]
# Extract profile JSON from HTML
def extractJson(self, html):
m = r'<script type="application/ld\+json">(.+?)</script>'
json_data = re.findall(m, html, re.MULTILINE | re.DOTALL)
if not json_data:
return None
try:
data = json.loads(json_data[0])
except BaseException as e:
self.sf.debug('Error processing JSON response: ' + str(e))
return None
return data
# Handle events sent to this module
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
if eventData in self.results:
return None
self.results[eventData] = True
self.sf.debug("Received event, " + eventName + ", from " + srcModuleName)
# Parse profile URL
try:
network = eventData.split(": ")[0]
url = eventData.split(": ")[1]
except BaseException as e:
self.sf.error("Unable to parse SOCIAL_MEDIA: " +
eventData + " (" + str(e) + ")", False)
return None
if not network == 'Instagram':
self.sf.debug("Skipping social network profile, " + url + \
", as not an Instagram profile")
return None
# Retrieve profile
res = self.sf.fetchUrl(url,
timeout=self.opts['_fetchtimeout'],
useragent=self.opts['_useragent'])
if res['content'] is None:
self.sf.debug('No response from Instagram.com')
return None
# Check if the profile is valid and extract profile data as JSON
json_data = self.extractJson(res['content'])
if not json_data:
self.sf.debug(url + " is not a valid Instagram profile")
return None
e = SpiderFootEvent('RAW_RIR_DATA', str(json_data), self.__name__, event)
self.notifyListeners(e)
# End of sfp_instagram class
| mit | Python | |
978f9c92f159b687c7238cfdd3ab30b8a15f5b9e | Add the multiprocessing example | jctanner/python-examples | multiprocessing_subprocess.py | multiprocessing_subprocess.py | #!/usr/bin/env python
import os
import sys
import subprocess
from multiprocessing import Process, Queue
def run_command_live(args, cwd=None, shell=True, checkrc=False, workerid=None):
""" Show realtime output for a subprocess """
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=cwd,
shell=shell)
pid = p.pid
so = ""
se = ""
while p.poll() is None:
lo = p.stdout.readline() # This blocks until it receives a newline.
sys.stdout.write('worker[' + str(workerid) + '] (' + str(pid) + ') ' + lo)
so += lo
print p.stdout.read()
return (p.returncode, so, "", pid)
def mp_worker(input, output, options):
""" A worker is forked per command """
for command in iter(input.get, 'STOP'):
thispid = os.getpid()
print "worker[%s] --> command: %s" % (thispid, command)
(rc, so, se, pid) = run_command_live(command, workerid=thispid)
rdict = {
'command': command,
'rc': rc,
'so': so,
'se': se,
'pid': pid
}
output.put(rdict)
def mp_processor(commands, options={}):
""" Spawn processes for each command in a list and return the results """
NUMBER_OF_PROCESSES = len(commands)
# Create queues
task_queue = Queue()
done_queue = Queue()
# Add each command to the queue
for command in commands:
task_queue.put(command)
# Fork the processes
for i in range(NUMBER_OF_PROCESSES):
Process(target=mp_worker, args=(task_queue, done_queue, options)).start()
# Collect results
results = []
for i in range(NUMBER_OF_PROCESSES):
results.append(done_queue.get())
# End the queue
for i in range(NUMBER_OF_PROCESSES):
task_queue.put('STOP')
return results
if __name__ == "__main__":
cmd1 = "whoami"
cmd2 = "uname -a"
cmd3 = "last | head"
cmd4 = "for x in $(seq 1 10); do echo $x; sleep 1; done;"
cmd5 = "for x in $(seq 1 10); do echo $x; sleep 2; done;"
cmd6 = "for x in $(seq 1 10); do echo $x; sleep 3; done;"
commands = [cmd1, cmd2, cmd3, cmd4, cmd5, cmd6]
rdata = mp_processor(commands, options={})
| apache-2.0 | Python | |
429bb8998cebe672774dd4b5b8b1188941227568 | add craftr/libs/opencl module | creator-build/craftr | src/libs/opencl/__init__.py | src/libs/opencl/__init__.py |
namespace = 'craftr/libs/opencl'
import os, sys
import craftr, {path} from 'craftr'
import cxx from 'craftr/lang/cxx'
vendor = craftr.options.get('opencl.vendor', None)
if not vendor:
raise EnvironmentError('option not set: opencl.vendor')
if vendor == 'intel':
sdk_dir = craftr.options.get('opencl.intel_sdk', None)
if not sdk_dir:
sdk_dir = 'C:\\Intel\\OpenCL\\sdk'
if os.name == 'nt':
cxx.prebuilt(
name = 'opencl',
includes = [path.join(sdk_dir, 'include')],
libpath = [path.join(sdk_dir, 'lib', cxx.compiler.arch)],
syslibs = ['OpenCL']
)
else:
raise NotImplementedError('intel on {!r}'.format(sys.platform))
elif vendor == 'nvidia':
sdk_dir = craftr.options.get('opencl.nvidia_sdk', None)
raise NotImplementedError('nvidia')
elif vendor == 'amd':
sdk_dir = craftr.options.get('opencl.amd_sdk', None)
raise NotImplementedError('amd')
else:
raise EnvironmentError('unsupported opencl.vendor: {!r}'.format(vendor))
| mit | Python | |
d03938dfbab4301c0a302df6ae8418927049b8f5 | add types.py | st-tech/zr-obp | obp/types.py | obp/types.py | # Copyright (c) ZOZO Technologies, Inc. All rights reserved.
# Licensed under the Apache 2.0 License.
"""Types."""
from typing import Union, Dict
import numpy as np
from .policy import BaseContextFreePolicy, BaseContextualPolicy
# dataset
BanditFeedback = Dict[str, Union[str, np.ndarray]]
# policy
BanditPolicy = Union[BaseContextFreePolicy, BaseContextualPolicy]
| apache-2.0 | Python | |
faeb52d76b4dd66a60d34598a79becee26dc121e | Add the page handler for static routing. | yiyangyi/cc98-tornado | handler/page.py | handler/page.py | class AboutHandler(BaseHandler):
class FaqHandler(BaseHandler):
class RobotsHandler(BaseHandler):
class ApiHandler(BaseHandler): | mit | Python | |
f61a63888f42e2e3e89c3a9c6fb26d68f0870004 | Create generate_struct.py | vnpy/vnpy,bigdig/vnpy,bigdig/vnpy,vnpy/vnpy,bigdig/vnpy,bigdig/vnpy | vnpy/api/sgit/generator/generate_struct.py | vnpy/api/sgit/generator/generate_struct.py | """"""
import importlib
class StructGenerator:
"""Struct生成器"""
def __init__(self, filename: str, prefix: str):
"""Constructor"""
self.filename = filename
self.prefix = prefix
self.typedefs = {}
self.load_constant()
def load_constant(self):
""""""
module_name = f"{self.prefix}_typedef"
module = importlib.import_module(module_name)
for name in dir(module):
if "__" not in name:
self.typedefs[name] = getattr(module, name)
def run(self):
"""运行生成"""
self.f_cpp = open(self.filename, "r")
self.f_struct = open(f"{self.prefix}_struct.py", "w")
for line in self.f_cpp:
self.process_line(line)
self.f_cpp.close()
self.f_struct.close()
print("Struct生成成功")
def process_line(self, line: str):
"""处理每行"""
line = line.replace(";", "")
line = line.replace("\n", "")
if line.startswith("struct"):
self.process_declare(line)
elif line.startswith("{"):
self.process_start(line)
elif line.startswith("}"):
self.process_end(line)
elif "\t" in line and "///" not in line:
self.process_member(line)
def process_declare(self, line: str):
"""处理声明"""
words = line.split(" ")
name = words[1]
end = "{"
new_line = f"{name} = {end}\n"
self.f_struct.write(new_line)
def process_start(self, line: str):
"""处理开始"""
pass
def process_end(self, line: str):
"""处理结束"""
new_line = "}\n\n"
self.f_struct.write(new_line)
def process_member(self, line: str):
"""处理成员"""
words = line.split("\t")
words = [word for word in words if word]
py_type = self.typedefs[words[0]]
name = words[1]
new_line = f" \"{name}\": \"{py_type}\",\n"
self.f_struct.write(new_line)
if __name__ == "__main__":
generator = StructGenerator("../include/ctp/ThostFtdcUserApiStruct.h", "ctp")
generator.run()
| mit | Python | |
814e041dc91c76da03b30d0c5e48e9a146fe6850 | add a custom decorator to implement two legged API authentication | IRI-Research/django-chunked-uploads,IRI-Research/django-chunked-uploads,IRI-Research/django-chunked-uploads | chunked_uploads/utils/decorators.py | chunked_uploads/utils/decorators.py | from django.http import HttpResponse
from django.conf import settings
import oauth2
from django.contrib.sites.models import Site
try: from functools import wraps
except ImportError: from django.utils.functional import wraps # Python 2.4 fallback.
def oauth_required(view_func):
"""
Decorator for views to ensure that the user is sending an OAuth signed request.
"""
def _checklogin(request, *args, **kwargs):
try:
key = request.REQUEST.get('oauth_consumer_key', None)
uurl = 'http://' + Site.objects.get_current().domain + request.path # if you don't use the Site framework, you just need to provide the domain of your site/API
oreq = oauth2.Request(request.method, uurl, request.REQUEST, '', False)
server = oauth2.Server()
cons = oauth2.Consumer(key, settings.OAUTH_PARTNERS[key])
server.add_signature_method(oauth2.SignatureMethod_HMAC_SHA1())
server.verify_request(oreq, cons, None)
return view_func(request, *args, **kwargs)
except:
return HttpResponse("API ERROR")
return wraps(view_func)(_checklogin) | bsd-3-clause | Python | |
82d24f4712c0c3c6bb5ffcbdcc61addd8fb66f75 | test chewie interface | faucetsdn/faucet,anarkiwi/faucet,trentindav/faucet,anarkiwi/faucet,REANNZ/faucet,trentindav/faucet,shivarammysore/faucet,shivarammysore/faucet,trungdtbk/faucet,mwutzke/faucet,mwutzke/faucet,REANNZ/faucet,trungdtbk/faucet,gizmoguy/faucet,gizmoguy/faucet,faucetsdn/faucet | tests/unit/faucet/test_chewie.py | tests/unit/faucet/test_chewie.py | #!/usr/bin/env python
"""Unit tests run as PYTHONPATH=.. python3 ./test_chewie.py."""
import unittest
from chewie.mac_address import MacAddress
from tests.unit.faucet import test_valve
DP1_CONFIG = """
dp_id: 1
dot1x:
nfv_intf: abcdef"""
CONFIG = """
acls:
eapol_to_nfv:
- rule:
dl_type: 0x888e
actions:
output:
# set_fields:
# - eth_dst: NFV_MAC
port: p2
- rule:
eth_src: ff:ff:ff:ff:ff:ff
actions:
allow: 0
- rule:
actions:
allow: 0
eapol_from_nfv:
- rule:
dl_type: 0x888e
# eth_dst: NFV_MAC
actions:
output:
# set_fields:
# - eth_dst: 01:80:c2:00:00:03
port: p1
- rule:
actions:
allow: 0
allowall:
- rule:
actions:
allow: 1
dps:
s1:
hardware: 'GenericTFM'
%s
interfaces:
p1:
number: 1
native_vlan: v100
dot1x: True
acl_in: eapol_to_nfv
p2:
number: 2
native_vlan: v100
acl_in: eapol_from_nfv
p3:
number: 3
native_vlan: v100
acl_in: allowall
vlans:
v100:
vid: 0x100
""" % DP1_CONFIG
class FaucetDot1XTest(test_valve.ValveTestBases.ValveTestSmall):
"""Test chewie api"""
def setUp(self):
self.setup_valve(CONFIG)
def test_success_dot1x(self):
"""Test success api"""
self.dot1x.reset(valves=self.valves_manager.valves)
self.assertEqual(len(self.last_flows_to_dp[1]), 0)
self.dot1x.dot1x_speaker.auth_success(MacAddress.from_string('00:00:00:00:ab:01')) #,
# MacAddress.from_string('00:00:00:00:00:01'))
# 2 = 1 FlowMod + 1 Barrier
self.assertEqual(len(self.last_flows_to_dp[1]), 2, self.last_flows_to_dp[1])
def _test_failure_dot1x(self):
"""Test failure api"""
self.dot1x.reset(valves=self.valves_manager.valves)
self.assertEqual(len(self.last_flows_to_dp[1]), 0)
self.dot1x.dot1x_speaker.auth_faliure(MacAddress.from_string('00:00:00:00:ab:01'),
MacAddress.from_string('00:00:00:00:00:01'))
def _test_logoff_dot1x(self):
"""Test logoff api"""
self.dot1x.reset(valves=self.valves_manager.valves)
self.assertEqual(len(self.last_flows_to_dp[1]), 0)
self.dot1x.dot1x_speaker.auth_logoff(MacAddress.from_string('00:00:00:00:ab:01'),
MacAddress.from_string('00:00:00:00:00:01'))
# 2 = 1 FlowMod + 1 Barrier
self.assertEqual(len(self.last_flows_to_dp[1]), 2, self.last_flows_to_dp[1])
if __name__ == "__main__":
unittest.main() # pytype: disable=module-attr
| apache-2.0 | Python | |
3f0f00a0767a6f169fed57e1c21cff7b85239464 | Create hooke_jeeves.py | bradling/direct-search-opt | hooke_jeeves.py | hooke_jeeves.py | def hooke_jeeves(evalf, x0, s, a=1, r=0.5, kmax=1e5, smin=1e-6):
import numpy as np
import scipy as sp
# Hooke and Jeeves
k = 0 # function evaulation counter
n = x0.size
# first step
xb = x0
fxb = evalf(xb); k += 1
x, fx = pattern_search(xb, fxb, s); k += (2*n)
# note: k incremented by 2n because pattern_search calls evalf() 2n times
# keep reducing step size and continuing pattern search until success
while (fx >= fxb) and (s > smin):
s = r*s # reduce step size
x, fx = pattern_search(xb, fxb, s); k += (2*n)
# if pattern search succeeded, enter main loop
delta = x - xb
xb = x; fxb = fx
while (k < kmax) and (s > smin):
# first take acceleration step
xe = xb + a*delta
fxe = evalf(xe); k += 1
# pattern search around xe
x, fx = pattern_search(xe, fxe, s); k += (2*n)
if fx < fxb:
# patten serach succeeded; take the new point and lop
delta = x-xe
xb, fxb = x, fx
else:
# pattern search about xe failed; pattern search around xb
x, fx = pattern_search(xb, fxb, s); k += (2*n)
if fx < fxb:
delta = x-xb
xb, fxb = x, fx
else:
# patten search about xb failed; reduce s and try again
s = r*s
#endif
#endif
#endwhile
return xb, fxb, k, s
# end hooke_jeeves()
# pattern search function for use with hooke_jeeves()
def pattern_search(x, fx, s):
ii = 0
n = x.size
# loop through each dimension
while ii < n:
# define current basis vector
d = np.zeros(n)
d[ii] = 1
# look at x +/- s*d, take lowest f value
y = np.array([x, x + s*d, x-s*d])
fVals = np.array([fx, evalf(y[1]), evalf(y[2]) ])
idx = np.argmin(fVals)
x = y[idx]; fx = fVals[idx]
ii += 1
return x, fx
# end pattern_search()
| mit | Python | |
acbaafaaeb7fa7e10bc39e0cc8dc4f7ad808b35b | tidy up | lsaffre/lino,khchine5/lino,lino-framework/lino,khchine5/lino,lsaffre/lino,lino-framework/lino,lsaffre/lino,lsaffre/lino,lino-framework/lino,khchine5/lino,khchine5/lino,lino-framework/lino,khchine5/lino,lino-framework/lino,lsaffre/lino | src/lino/tools/mail.py | src/lino/tools/mail.py | ## Copyright Luc Saffre 2003-2004.
## This file is part of the Lino project.
## Lino is free software; you can redistribute it and/or modify it
## under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## Lino is distributed in the hope that it will be useful, but WITHOUT
## ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
## or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
## License for more details.
## You should have received a copy of the GNU General Public License
## along with Lino; if not, write to the Free Software Foundation,
## Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
used by :
scripts/openmail.py
tests/etc/1.py
"""
import sys,os
import urllib
import email
import webbrowser
def mailto_url(to=None,subject=None,body=None,cc=None):
"""
encodes the content as a mailto link as described on
http://www.faqs.org/rfcs/rfc2368.html
Examples partly taken from
http://selfhtml.teamone.de/html/verweise/email.htm
"""
#url = "mailto:" + urllib.quote(to.strip())
url = "mailto:" + urllib.quote(to.strip(),"@,")
sep = "?"
if cc:
url+= sep + "cc=" + urllib.quote(cc,"@,")
sep = "&"
if subject:
url+= sep + "subject=" + urllib.quote(subject,"")
sep = "&"
if body:
# Also note that line breaks in the body of a message MUST be
# encoded with "%0D%0A". (RFC 2368)
body="\r\n".join(body.splitlines())
url+= sep + "body=" + urllib.quote(body,"")
sep = "&"
# if not confirm("okay"): return
return url
## def readmail2(filename):
## "reads a real RFC2822 file"
## msg = email.message_from_file(open(filename))
## if msg.is_multipart():
## raise "%s contains a multipart message : not supported" % filename
## return msg
def readmail(filename):
"""reads a "simplified pseudo-RFC2822" file
"""
from email.Message import Message
msg = Message()
text = open(filename).read()
text = text.decode("cp850")
text = text.encode("iso-8859-1","replace")
headersDone = False
subject = None
to = None
body = ""
for line in text.splitlines():
if headersDone:
body += line + "\n"
else:
if len(line) == 0:
headersDone = True
else:
(name,value) = line.split(':')
msg[name] = value.strip()
## if name.lower() == 'subject':
## subject = value.strip()
## elif name.lower() == 'to':
## to = value.strip()
## else:
## raise "%s : invalid header field in line %s" % (
## name,repr(line))
msg.set_payload(body)
return msg
def openmail(msg):
url = mailto_url(msg.get('to'),msg.get("subject"),msg.get_payload())
webbrowser.open(url,new=1)
| unknown | Python | |
bfc968f953ca643813214adbea1301f5fcfc0533 | Create maze_wiki | peterhogan/python,peterhogan/python | maze_wiki.py | maze_wiki.py | # Code by Erik Sweet and Bill Basener
import random
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.cm as cm
num_rows = int(input("Rows: ")) # number of rows
num_cols = int(input("Columns: ")) # number of columns
M = np.zeros((num_rows,num_cols,5), dtype=np.uint8)
# The array M is going to hold the array information for each cell.
# The first four coordinates tell if walls exist on those sides
# and the fifth indicates if the cell has been visited in the search.
# M(LEFT, UP, RIGHT, DOWN, CHECK_IF_VISITED)
image = np.zeros((num_rows*10,num_cols*10), dtype=np.uint8)
# The array image is going to be the output image to display
# Set starting row and column
r = 0
c = 0
history = [(r,c)] # The history is the
# Trace a path though the cells of the maze and open walls along the path.
# We do this with a while loop, repeating the loop until there is no history,
# which would mean we backtracked to the initial start.
while history:
M[r,c,4] = 1 # designate this location as visited
# check if the adjacent cells are valid for moving to
check = []
if c > 0 and M[r,c-1,4] == 0:
check.append('L')
if r > 0 and M[r-1,c,4] == 0:
check.append('U')
if c < num_cols-1 and M[r,c+1,4] == 0:
check.append('R')
if r < num_rows-1 and M[r+1,c,4] == 0:
check.append('D')
if len(check): # If there is a valid cell to move to.
# Mark the walls between cells as open if we move
history.append([r,c])
move_direction = random.choice(check)
if move_direction == 'L':
M[r,c,0] = 1
c = c-1
M[r,c,2] = 1
if move_direction == 'U':
M[r,c,1] = 1
r = r-1
M[r,c,3] = 1
if move_direction == 'R':
M[r,c,2] = 1
c = c+1
M[r,c,0] = 1
if move_direction == 'D':
M[r,c,3] = 1
r = r+1
M[r,c,1] = 1
else: # If there are no valid cells to move to.
# retrace one step back in history if no move is possible
r,c = history.pop()
# Open the walls at the start and finish
M[0,0,0] = 1
M[num_rows-1,num_cols-1,2] = 1
# Generate the image for display
for row in range(0,num_rows):
for col in range(0,num_cols):
cell_data = M[row,col]
for i in range(10*row+1,10*row+9):
image[i,range(10*col+1,10*col+9)] = 255
if cell_data[0] == 1:image[range(10*row+1,10*row+9),10*col] = 255
if cell_data[1] == 1:image[10*row,range(10*col+1,10*col+9)] = 255
if cell_data[2] == 1:image[range(10*row+1,10*row+9),10*col+9] = 255
if cell_data[3] == 1:image[10*row+9,range(10*col+1,10*col+9)] = 255
# Display the image
plt.imshow(image, cmap = cm.Greys_r, interpolation='none')
plt.show()
| mit | Python | |
c963adc2237a6cec7b0b14034d9b802b4ba57324 | add CLI stub for realtime service | quantrocket-llc/quantrocket-client,quantrocket-llc/quantrocket-client | quantrocket/cli/subcommands/realtime.py | quantrocket/cli/subcommands/realtime.py | # Copyright 2017 QuantRocket - All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def add_subparser(subparsers):
_parser = subparsers.add_parser("realtime", description="QuantRocket realtime data CLI", help="quantrocket realtime -h")
_subparsers = _parser.add_subparsers(title="subcommands", dest="subcommand")
_subparsers.required = True
parser = _subparsers.add_parser("quote", help="get realtime quotes for securities")
parser.add_argument("-g", "--groups", nargs="*", metavar="GROUP", help="limit to these groups")
parser.add_argument("-i", "--conids", nargs="*", metavar="CONID", help="limit to these IB conids")
parser.add_argument("--exclude-groups", nargs="*", metavar="GROUP", help="exclude these groups")
parser.add_argument("--exclude-conids", nargs="*", metavar="CONID", help="exclude these conids")
parser.add_argument("-f", "--fields", nargs="*", metavar="FIELD", help="limit to these fields")
parser.add_argument("-w", "--window", metavar="HH:MM:SS", help="limit to this historical window (use Pandas timedelta string)")
parser.add_argument("-s", "--snapshot", action="store_true", help="return a snapshot of the latest quotes")
parser.set_defaults(func="quantrocket.realtime.get_quotes")
parser = _subparsers.add_parser("add", help="add securities to the realtime data stream")
parser.add_argument("-g", "--groups", nargs="*", metavar="GROUP", help="limit to these groups")
parser.add_argument("-i", "--conids", nargs="*", metavar="CONID", help="limit to these IB conids")
parser.add_argument("--exclude-groups", nargs="*", metavar="GROUP", help="exclude these groups")
parser.add_argument("--exclude-conids", nargs="*", metavar="CONID", help="exclude these conids")
parser.add_argument("-c", "--cancel-in", metavar="HH:MM:SS", help="automatically cancel the securities after this much time (use Pandas timedelta string)")
parser.set_defaults(func="quantrocket.realtime.stream_securities")
parser = _subparsers.add_parser("cancel", help="remove securities from the realtime data stream")
parser.add_argument("-g", "--groups", nargs="*", metavar="GROUP", help="limit to these groups")
parser.add_argument("-i", "--conids", nargs="*", metavar="CONID", help="limit to these IB conids")
parser.add_argument("--exclude-groups", nargs="*", metavar="GROUP", help="exclude these groups")
parser.add_argument("--exclude-conids", nargs="*", metavar="CONID", help="exclude these conids")
parser.set_defaults(func="quantrocket.realtime.cancel_stream")
| apache-2.0 | Python | |
312c55f41e818c8df155c97eabcde1d7ca262961 | add a beamdyn case execution script | OpenFAST/OpenFAST,OpenFAST/OpenFAST,OpenFAST/OpenFAST | reg_tests/lib/executeBeamdynCase.py | reg_tests/lib/executeBeamdynCase.py | #
# Copyright 2017 National Renewable Energy Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This program executes a single BeamDyn case.
Usage: `python3 executeBeamdynCase.py input_file beamdyn_executable`
- `beamdyn_executable` is an optional argument pointing to the BeamDyn executable of choice.
- if `beamdyn_executable` is not given, an attempt will be made to find one in $PATH
Example: `python3 executeBeamdynCase.py CaseDir/case01.fst`
Example: `python3 executeBeamdynCase.py CaseDir/case01.fst beamdyn`
Example: `python3 executeBeamdynCase.py CaseDir/case01.fst openfast/install/bin/beamdyn`
"""
import os
from stat import *
import sys
import shutil
import subprocess
def exitWithError(error, code=1):
print(error)
sys.exit(code)
if len(sys.argv) != 3:
exitWithError("Invalid arguments given: {}\n".format(" ".join(sys.argv)) +
"Usage: python3 executeBeamdynCase.py case_directory beamdyn_executable")
# verify that the given input file exists
caseDirectory = sys.argv[1]
caseInputFile = "bd_driver.inp"
if not os.path.isfile(os.path.join(caseDirectory, caseInputFile)):
exitWithError("The given input file, {}, does not exist.".format(caseInputFile))
# verify that the given executable exists and can be run
executable = sys.argv[2]
if not os.path.isfile(executable):
exitWithError("The given beamdyn_driver, {}, does not exist.".format(executable))
permissionsMask = oct(os.stat(executable)[ST_MODE])[-1:]
if not int(permissionsMask)%2 == 1:
exitWithError("The given beamdyn_driver, {}, does not executable permission.".format(executable))
# execute the given case
os.chdir(caseDirectory)
command = "{} {} > {}.log".format(executable, caseInputFile, caseInputFile.split(".")[0])
print("'{}' - running".format(command))
sys.stdout.flush()
return_code = subprocess.call(command, shell=True)
print("'{}' - finished with exit code {}".format(command, return_code))
sys.exit(return_code)
| apache-2.0 | Python | |
102e8c17ca25a8fbd55212482be572ec32bfefd7 | check in a second test client | r-barnes/waterviz,r-barnes/waterviz,r-barnes/waterviz,HydroLogic/waterviz,NelsonMinar/vector-river-map,NelsonMinar/vector-river-map,NelsonMinar/vector-river-map,r-barnes/waterviz,HydroLogic/waterviz,NelsonMinar/vector-river-map,HydroLogic/waterviz,HydroLogic/waterviz | slowTiles.py | slowTiles.py | #!/usr/bin/env python
"""Test times and sizes of some particularly large or difficult tiles."""
import requests, time, grequests
urlbase = 'http://127.0.0.1:8000'
for spot in ('7/25/49', '6/13/23', '5/6/11', '5/7/12', '4/3/6'):
url = "{}/mergedRivers/{}.json".format(urlbase, spot);
start = time.time()
r = requests.get(url)
l = len(r.content);
end = time.time()
j = r.json()
assert r.status_code == 200
print "{:>8}: {:6.0f} ms {:6.0f} kb {:>6} features".format(spot, 1000*(end-start), l/1024, len(j['features']))
| bsd-3-clause | Python | |
c5c7f416e5722fade968c9c65032d7f88759a08d | add model of a subject | mapix/WebHello | model/subject.py | model/subject.py | # coding: utf-8
from datetime import datetime
class Subject(object):
def __init__(self, id, text, create_time=None, update_time=None):
self.subject_id = str(id)
self.subject_text = text
self.create_time = create_time
self.update_time = update_time
@classmethod
def new(cls, text):
create_time = datetime.now()
default = {}
default.update(subject_id='2', subject_text=text,
create_time=create_time,
update_time=create_time)
print default
# TODO:存取条目
@classmethod
def get_by_id(cls, id):
# TODO:查询存储系统,然后返回
pass
@classmethod
def get_all_subject(cls):
#TODO:全部返回
pass
def update(self, text=''):
update = {}
if text:
update.update(text=text)
if update:
update.update(update_time=datetime.now())
#TODO:更新存储系统里面相应条目
| bsd-3-clause | Python | |
92f49298f22f412a11ad7e51c630107b0e191ff5 | Add CAN_DETECT info | madhukar01/coala-bears,Vamshi99/coala-bears,Asnelchristian/coala-bears,dosarudaniel/coala-bears,naveentata/coala-bears,srisankethu/coala-bears,gs0510/coala-bears,shreyans800755/coala-bears,coala-analyzer/coala-bears,SanketDG/coala-bears,chriscoyfish/coala-bears,srisankethu/coala-bears,mr-karan/coala-bears,shreyans800755/coala-bears,seblat/coala-bears,sounak98/coala-bears,madhukar01/coala-bears,Shade5/coala-bears,coala/coala-bears,seblat/coala-bears,LWJensen/coala-bears,vijeth-aradhya/coala-bears,incorrectusername/coala-bears,Shade5/coala-bears,meetmangukiya/coala-bears,shreyans800755/coala-bears,shreyans800755/coala-bears,refeed/coala-bears,Shade5/coala-bears,naveentata/coala-bears,incorrectusername/coala-bears,horczech/coala-bears,Asnelchristian/coala-bears,aptrishu/coala-bears,sounak98/coala-bears,refeed/coala-bears,gs0510/coala-bears,Vamshi99/coala-bears,chriscoyfish/coala-bears,incorrectusername/coala-bears,meetmangukiya/coala-bears,ku3o/coala-bears,arjunsinghy96/coala-bears,seblat/coala-bears,damngamerz/coala-bears,srisankethu/coala-bears,Asnelchristian/coala-bears,ankit01ojha/coala-bears,coala/coala-bears,incorrectusername/coala-bears,kaustubhhiware/coala-bears,refeed/coala-bears,Vamshi99/coala-bears,coala/coala-bears,LWJensen/coala-bears,gs0510/coala-bears,ankit01ojha/coala-bears,ankit01ojha/coala-bears,Asnelchristian/coala-bears,Shade5/coala-bears,LWJensen/coala-bears,vijeth-aradhya/coala-bears,yash-nisar/coala-bears,incorrectusername/coala-bears,aptrishu/coala-bears,LWJensen/coala-bears,kaustubhhiware/coala-bears,chriscoyfish/coala-bears,shreyans800755/coala-bears,ankit01ojha/coala-bears,refeed/coala-bears,srisankethu/coala-bears,coala/coala-bears,coala-analyzer/coala-bears,srisankethu/coala-bears,naveentata/coala-bears,yashtrivedi96/coala-bears,incorrectusername/coala-bears,coala-analyzer/coala-bears,mr-karan/coala-bears,SanketDG/coala-bears,kaustubhhiware/coala-bears,naveentata/coala-bears,srisankethu/coala-bears,meetmangukiya/coala-bears,srisankethu/coala-bears,vijeth-aradhya/coala-bears,SanketDG/coala-bears,horczech/coala-bears,ankit01ojha/coala-bears,sounak98/coala-bears,chriscoyfish/coala-bears,refeed/coala-bears,kaustubhhiware/coala-bears,arjunsinghy96/coala-bears,madhukar01/coala-bears,ankit01ojha/coala-bears,yash-nisar/coala-bears,dosarudaniel/coala-bears,SanketDG/coala-bears,horczech/coala-bears,coala/coala-bears,mr-karan/coala-bears,vijeth-aradhya/coala-bears,arjunsinghy96/coala-bears,vijeth-aradhya/coala-bears,dosarudaniel/coala-bears,yashtrivedi96/coala-bears,seblat/coala-bears,meetmangukiya/coala-bears,chriscoyfish/coala-bears,ku3o/coala-bears,dosarudaniel/coala-bears,sounak98/coala-bears,Asnelchristian/coala-bears,arjunsinghy96/coala-bears,gs0510/coala-bears,kaustubhhiware/coala-bears,naveentata/coala-bears,damngamerz/coala-bears,horczech/coala-bears,ku3o/coala-bears,yashtrivedi96/coala-bears,aptrishu/coala-bears,incorrectusername/coala-bears,LWJensen/coala-bears,coala-analyzer/coala-bears,yashtrivedi96/coala-bears,meetmangukiya/coala-bears,coala/coala-bears,aptrishu/coala-bears,Shade5/coala-bears,coala-analyzer/coala-bears,damngamerz/coala-bears,SanketDG/coala-bears,arjunsinghy96/coala-bears,Vamshi99/coala-bears,mr-karan/coala-bears,kaustubhhiware/coala-bears,coala/coala-bears,sounak98/coala-bears,ku3o/coala-bears,SanketDG/coala-bears,ku3o/coala-bears,kaustubhhiware/coala-bears,yashtrivedi96/coala-bears,refeed/coala-bears,ankit01ojha/coala-bears,naveentata/coala-bears,shreyans800755/coala-bears,gs0510/coala-bears,Asnelchristian/coala-bears,yash-nisar/coala-bears,horczech/coala-bears,Vamshi99/coala-bears,Vamshi99/coala-bears,meetmangukiya/coala-bears,Shade5/coala-bears,gs0510/coala-bears,arjunsinghy96/coala-bears,yash-nisar/coala-bears,seblat/coala-bears,refeed/coala-bears,meetmangukiya/coala-bears,gs0510/coala-bears,naveentata/coala-bears,damngamerz/coala-bears,aptrishu/coala-bears,sounak98/coala-bears,mr-karan/coala-bears,arjunsinghy96/coala-bears,srisankethu/coala-bears,meetmangukiya/coala-bears,damngamerz/coala-bears,ku3o/coala-bears,Vamshi99/coala-bears,Vamshi99/coala-bears,sounak98/coala-bears,incorrectusername/coala-bears,chriscoyfish/coala-bears,damngamerz/coala-bears,SanketDG/coala-bears,madhukar01/coala-bears,yash-nisar/coala-bears,aptrishu/coala-bears,sounak98/coala-bears,kaustubhhiware/coala-bears,mr-karan/coala-bears,dosarudaniel/coala-bears,horczech/coala-bears,srisankethu/coala-bears,damngamerz/coala-bears,coala/coala-bears,shreyans800755/coala-bears,gs0510/coala-bears,chriscoyfish/coala-bears,seblat/coala-bears,LWJensen/coala-bears,coala/coala-bears,yashtrivedi96/coala-bears,horczech/coala-bears,yash-nisar/coala-bears,Asnelchristian/coala-bears,seblat/coala-bears,mr-karan/coala-bears,gs0510/coala-bears,ankit01ojha/coala-bears,meetmangukiya/coala-bears,horczech/coala-bears,shreyans800755/coala-bears,vijeth-aradhya/coala-bears,yashtrivedi96/coala-bears,naveentata/coala-bears,dosarudaniel/coala-bears,vijeth-aradhya/coala-bears,ku3o/coala-bears,coala/coala-bears,aptrishu/coala-bears,damngamerz/coala-bears,ku3o/coala-bears,yash-nisar/coala-bears,Vamshi99/coala-bears,coala-analyzer/coala-bears,shreyans800755/coala-bears,coala-analyzer/coala-bears,SanketDG/coala-bears,ankit01ojha/coala-bears,damngamerz/coala-bears,incorrectusername/coala-bears,ku3o/coala-bears,vijeth-aradhya/coala-bears,refeed/coala-bears,refeed/coala-bears,mr-karan/coala-bears,ankit01ojha/coala-bears,arjunsinghy96/coala-bears,naveentata/coala-bears,dosarudaniel/coala-bears,madhukar01/coala-bears,seblat/coala-bears,chriscoyfish/coala-bears,Shade5/coala-bears,aptrishu/coala-bears,horczech/coala-bears,horczech/coala-bears,Vamshi99/coala-bears,madhukar01/coala-bears,yashtrivedi96/coala-bears,Shade5/coala-bears,LWJensen/coala-bears,dosarudaniel/coala-bears,srisankethu/coala-bears,shreyans800755/coala-bears,madhukar01/coala-bears,Asnelchristian/coala-bears,yashtrivedi96/coala-bears,aptrishu/coala-bears,madhukar01/coala-bears,LWJensen/coala-bears,refeed/coala-bears,vijeth-aradhya/coala-bears,Shade5/coala-bears,dosarudaniel/coala-bears,sounak98/coala-bears,SanketDG/coala-bears,damngamerz/coala-bears,yash-nisar/coala-bears,Asnelchristian/coala-bears,damngamerz/coala-bears,coala-analyzer/coala-bears,coala-analyzer/coala-bears,LWJensen/coala-bears,aptrishu/coala-bears,shreyans800755/coala-bears,yash-nisar/coala-bears,arjunsinghy96/coala-bears,aptrishu/coala-bears,madhukar01/coala-bears,yash-nisar/coala-bears,yash-nisar/coala-bears,srisankethu/coala-bears,coala/coala-bears,coala/coala-bears,Vamshi99/coala-bears,horczech/coala-bears,refeed/coala-bears,ankit01ojha/coala-bears,kaustubhhiware/coala-bears | bears/configfiles/DockerfileLintBear.py | bears/configfiles/DockerfileLintBear.py | import json
from coalib.bearlib.abstractions.Linter import linter
from coalib.bears.requirements.NpmRequirement import NpmRequirement
from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY
from coalib.results.Result import Result
@linter(executable='dockerfile_lint')
class DockerfileLintBear:
"""
Check file syntax as well as arbitrary semantic and best practice
in Dockerfiles. it also checks LABEL rules against docker images.
Uses ``dockerfile_lint`` to provide the analysis.
See <https://github.com/projectatomic/dockerfile_lint#dockerfile-lint> for
more information .
"""
LANGUAGES = {"Dockerfile"}
REQUIREMENTS = {NpmRequirement('dockerfile_lint', '0')}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
CAN_DETECT = {'Syntax', 'Smell'}
severity_map = {
"error": RESULT_SEVERITY.MAJOR,
"warn": RESULT_SEVERITY.NORMAL,
"info": RESULT_SEVERITY.INFO}
@staticmethod
def create_arguments(filename, file, config_file):
return '--json', '-f', filename
def process_output(self, output, filename, file):
output = json.loads(output)
for severity in output:
if severity == "summary":
continue
for issue in output[severity]["data"]:
yield Result.from_values(
origin=self,
message=issue["message"],
file=filename,
severity=self.severity_map[issue["level"]],
line=issue["line"])
| import json
from coalib.bearlib.abstractions.Linter import linter
from coalib.bears.requirements.NpmRequirement import NpmRequirement
from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY
from coalib.results.Result import Result
@linter(executable='dockerfile_lint')
class DockerfileLintBear:
"""
Check file syntax as well as arbitrary semantic and best practice
in Dockerfiles. it also checks LABEL rules against docker images.
Uses ``dockerfile_lint`` to provide the analysis.
See <https://github.com/projectatomic/dockerfile_lint#dockerfile-lint> for
more information .
"""
LANGUAGES = {"Dockerfile"}
REQUIREMENTS = {NpmRequirement('dockerfile_lint', '0')}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
severity_map = {
"error": RESULT_SEVERITY.MAJOR,
"warn": RESULT_SEVERITY.NORMAL,
"info": RESULT_SEVERITY.INFO}
@staticmethod
def create_arguments(filename, file, config_file):
return '--json', '-f', filename
def process_output(self, output, filename, file):
output = json.loads(output)
for severity in output:
if severity == "summary":
continue
for issue in output[severity]["data"]:
yield Result.from_values(
origin=self,
message=issue["message"],
file=filename,
severity=self.severity_map[issue["level"]],
line=issue["line"])
| agpl-3.0 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.