commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13 values | lang stringclasses 23 values |
|---|---|---|---|---|---|---|---|---|
40dd078b5e176ae5039bf20dcb50350e8f065808 | Create python script to scroll error messages | jeffstephens/pi-resto,jeffstephens/pi-resto | recognition/scrollError.py | recognition/scrollError.py | from sense_hat import SenseHat
import sys
sense = SenseHat()
sense.show_message(sys.stdin.read(), scroll_speed=.08, text_colour=[255, 0, 0])
| mit | Python | |
d6a53b1b8acbddc16006c0c8752b44f176aecb12 | add ntuple analyser | morgenst/PyAnalysisTools,morgenst/PyAnalysisTools,morgenst/PyAnalysisTools | PyAnalysisTools/AnalysisTools/NTupleAnalyser.py | PyAnalysisTools/AnalysisTools/NTupleAnalyser.py | import os
from PyAnalysisTools.base import InvalidInputError
from PyAnalysisTools.base.YAMLHandle import YAMLLoader
from PyAnalysisTools.ROOTUtils.FileHandle import FileHandle
import pathos.multiprocessing as mp
try:
import pyAMI.client
except Exception as e:
_logger.error("pyAMI not loaded")
sys.exit(1)
class NTupleAnalyser(object):
def __init__(self, **kwargs):
if not "dataset_list" in kwargs:
raise InvalidInputError("No dataset list provided")
self.datasets = YAMLLoader.read_yaml(kwargs["dataset_list"])
self.input_path = kwargs["input_path"]
def transform_dataset_list(self):
self.datasets = [ds for campaign in self.datasets.values() for ds in campaign]
self.datasets = map(lambda ds: [ds, ".".join([ds.split(".")[1], ds.split(".")[5]])], self.datasets)
def add_path(self):
processed_datasets = os.listdir(self.input_path)
for ds in self.datasets:
match = [ds[1] in pds for pds in processed_datasets]
try:
index = match.index(True)
ds.append(processed_datasets[index])
except ValueError:
ds.append(None)
def get_events(self, ds):
n_processed_events = 0
for rf in os.listdir(os.path.join(self.input_path, ds[2])):
n_processed_events += int(FileHandle(file_name=os.path.join(self.input_path, ds[2], rf),
switch_off_process_name_analysis=True).get_number_of_total_events(True))
ds.append(n_processed_events)
client = pyAMI.client.Client('atlas')
n_expected_events = int(client.execute("GetDatasetInfo -logicalDatasetName=%s" % ds[0],
format="dict_object").get_rows()[0]["totalEvents"])
ds.append(n_expected_events)
@staticmethod
def print_summary(missing, incomplete):
print "--------------- Missing datasets ---------------"
for ds in missing:
print ds[0]
print "------------------------------------------------"
print
print
print
print "--------------- Incomplete datasets ---------------"
for ds in incomplete:
print ds[2], ds[-2], ds[-1]
def run(self):
self.transform_dataset_list()
self.add_path()
missing_datasets = filter(lambda ds: ds[2] is None, self.datasets)
self.datasets = filter(lambda ds: ds not in missing_datasets, self.datasets)
mp.ThreadPool(10).map(self.get_events, self.datasets)
incomplete_datasets = filter(lambda ds: not ds[-2] ==ds[-1], self.datasets)
self.print_summary(missing_datasets, incomplete_datasets) | mit | Python | |
67d1382c5c36e4476c56a9cd5c2e841131b07e6c | add classMulInherit.py | medifle/python_6.00.1x | classMulInherit.py | classMulInherit.py | class A(object):
def __init__(self):
self.a = 1
def x(self):
print "A.x"
def y(self):
print "A.y"
def z(self):
print "A.z"
class B(A):
def __init__(self):
A.__init__(self)
self.a = 2
self.b = 3
def y(self):
print "B.y"
def z(self):
print "B.z"
class C(object):
def __init__(self):
self.a = 4
self.c = 5
def y(self):
print "C.y"
def z(self):
print "C.z"
class D(C, B):
def __init__(self):
C.__init__(self)
B.__init__(self)
self.d = 6
def z(self):
print "D.z"
# When resolving a reference to an attribute of an object
# that's an instance of class D,
# Python first searches the object's instance variables
# then uses a simple left-to-right, depth first search through the class hierarchy.
# In this case that would mean searching D's attributes, then the class C,
# followed the class B and its superclasses (ie, class A,
# and then any superclasses it may have, et cetera). | mit | Python | |
555dc74ad29b99fd4cf4c3ba97b7edfdaf8e485f | Create next-greater-element-i.py | yiwen-luo/LeetCode,yiwen-luo/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,jaredkoontz/leetcode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,yiwen-luo/LeetCode,kamyu104/LeetCode,jaredkoontz/leetcode,jaredkoontz/leetcode,tudennis/LeetCode---kamyu104-11-24-2015,yiwen-luo/LeetCode,jaredkoontz/leetcode,kamyu104/LeetCode,jaredkoontz/leetcode,yiwen-luo/LeetCode | Python/next-greater-element-i.py | Python/next-greater-element-i.py | # Time: O(m + n)
# Space: O(m + n)
# You are given two arrays (without duplicates) nums1 and nums2 where nums1’s elements are subset of nums2.
# Find all the next greater numbers for nums1's elements in the corresponding places of nums2.
#
# The Next Greater Number of a number x in nums1 is the first greater number to its right in nums2.
# If it does not exist, output -1 for this number.
#
# Example 1:
# Input: nums1 = [4,1,2], nums2 = [1,3,4,2].
# Output: [-1,3,-1]
# Explanation:
# For number 4 in the first array, you cannot find the next greater number for it in the second array, so output -1.
# For number 1 in the first array, the next greater number for it in the second array is 3.
# For number 2 in the first array, there is no next greater number for it in the second array, so output -1.
# Example 2:
# Input: nums1 = [2,4], nums2 = [1,2,3,4].
# Output: [3,-1]
# Explanation:
# For number 2 in the first array, the next greater number for it in the second array is 3.
# For number 4 in the first array, there is no next greater number for it in the second array, so output -1.
# Note:
# All elements in nums1 and nums2 are unique.
# The length of both nums1 and nums2 would not exceed 1000.
class Solution(object):
def nextGreaterElement(self, findNums, nums):
"""
:type findNums: List[int]
:type nums: List[int]
:rtype: List[int]
"""
stk, lookup = [], {}
for num in nums:
while stk and num > stk[-1]:
lookup[stk.pop()] = num
stk.append(num)
while stk:
lookup[stk.pop()] = -1
return map(lambda x : lookup[x], findNums)
| mit | Python | |
b0c03b86d606c85dd1cab1ad9e9678e1057d0ae1 | Add pen which draws to TrueType glyphs. | googlefonts/fonttools,fonttools/fonttools | Lib/fontTools/pens/ttGlyphPen.py | Lib/fontTools/pens/ttGlyphPen.py | from __future__ import print_function, division, absolute_import
from array import array
from fontTools.misc.py23 import *
from fontTools.pens.basePen import AbstractPen
from fontTools.ttLib.tables import ttProgram
from fontTools.ttLib.tables._g_l_y_f import Glyph
from fontTools.ttLib.tables._g_l_y_f import GlyphComponent
from fontTools.ttLib.tables._g_l_y_f import GlyphCoordinates
__all__ = ["TTGlyphPen"]
class TTGlyphPen(AbstractPen):
"""Pen used for drawing to a TrueType glyph."""
def __init__(self):
self.points = []
self.endPts = []
self.types = []
self.components = []
def _addPoint(self, pt, onCurve):
self.points.append([int(coord) for coord in pt])
self.types.append(onCurve)
def lineTo(self, pt):
self._addPoint(pt, 1)
def moveTo(self, pt):
assert (not self.points) or (self.endPts[-1] == len(self.points) - 1)
self.lineTo(pt)
def qCurveTo(self, *points):
for pt in points[:-1]:
self._addPoint(pt, 0)
self._addPoint(points[-1], 1)
def closePath(self):
endPt = len(self.points) - 1
# ignore anchors
if endPt == 0 or (self.endPts and endPt == self.endPts[-1] + 1):
self.points.pop()
self.types.pop()
return
self.endPts.append(endPt)
def endPath(self):
# TrueType contours are always "closed"
self.closePath()
def addComponent(self, glyphName, transformation):
component = GlyphComponent()
component.glyphName = glyphName
component.transform = (transformation[:2], transformation[2:4])
component.x, component.y = [int(n) for n in transformation[4:]]
component.flags = 0
self.components.append(component)
def glyph(self):
glyph = Glyph()
glyph.coordinates = GlyphCoordinates(self.points)
glyph.endPtsOfContours = self.endPts
glyph.flags = array("B", self.types)
glyph.components = self.components
# TrueType glyphs can't have both contours and components
if glyph.components:
glyph.numberOfContours = -1
else:
glyph.numberOfContours = len(glyph.endPtsOfContours)
glyph.program = ttProgram.Program()
glyph.program.fromBytecode("")
return glyph
| mit | Python | |
95e2e9af124595aae4801fc9813ee1c294d404cd | Change invalidtxrequest to use BitcoinTestFramework | kallewoof/bitcoin,jmcorgan/bitcoin,MeshCollider/bitcoin,bitcoin/bitcoin,h4x3rotab/BTCGPU,lbryio/lbrycrd,gjhiggins/vcoincore,wellenreiter01/Feathercoin,Kogser/bitcoin,achow101/bitcoin,rnicoll/bitcoin,tjps/bitcoin,JeremyRubin/bitcoin,wellenreiter01/Feathercoin,ElementsProject/elements,guncoin/guncoin,droark/bitcoin,globaltoken/globaltoken,FeatherCoin/Feathercoin,bespike/litecoin,achow101/bitcoin,sstone/bitcoin,namecoin/namecoin-core,pataquets/namecoin-core,r8921039/bitcoin,monacoinproject/monacoin,bitcoinsSG/bitcoin,randy-waterhouse/bitcoin,globaltoken/globaltoken,bespike/litecoin,globaltoken/globaltoken,jambolo/bitcoin,ericshawlinux/bitcoin,sipsorcery/bitcoin,lateminer/bitcoin,myriadcoin/myriadcoin,JeremyRubin/bitcoin,jambolo/bitcoin,dscotese/bitcoin,kazcw/bitcoin,tecnovert/particl-core,apoelstra/bitcoin,randy-waterhouse/bitcoin,wellenreiter01/Feathercoin,ericshawlinux/bitcoin,ajtowns/bitcoin,rawodb/bitcoin,Xekyo/bitcoin,OmniLayer/omnicore,fanquake/bitcoin,domob1812/namecore,GlobalBoost/GlobalBoost,bitcoinknots/bitcoin,sebrandon1/bitcoin,droark/bitcoin,EthanHeilman/bitcoin,ahmedbodi/vertcoin,vertcoin/vertcoin,TheBlueMatt/bitcoin,Christewart/bitcoin,rnicoll/bitcoin,practicalswift/bitcoin,mruddy/bitcoin,jmcorgan/bitcoin,jamesob/bitcoin,sstone/bitcoin,pstratem/bitcoin,jtimon/bitcoin,mruddy/bitcoin,jamesob/bitcoin,mitchellcash/bitcoin,yenliangl/bitcoin,GlobalBoost/GlobalBoost,domob1812/namecore,lateminer/bitcoin,jlopp/statoshi,sipsorcery/bitcoin,andreaskern/bitcoin,r8921039/bitcoin,fujicoin/fujicoin,pstratem/bitcoin,qtumproject/qtum,nikkitan/bitcoin,Flowdalic/bitcoin,TheBlueMatt/bitcoin,bitcoinsSG/bitcoin,globaltoken/globaltoken,rawodb/bitcoin,monacoinproject/monacoin,vmp32k/litecoin,tjps/bitcoin,Kogser/bitcoin,tecnovert/particl-core,achow101/bitcoin,alecalve/bitcoin,lateminer/bitcoin,Bushstar/UFO-Project,vmp32k/litecoin,namecoin/namecoin-core,domob1812/huntercore,ElementsProject/elements,globaltoken/globaltoken,CryptArc/bitcoin,jtimon/bitcoin,mm-s/bitcoin,pataquets/namecoin-core,myriadteam/myriadcoin,pataquets/namecoin-core,kazcw/bitcoin,mitchellcash/bitcoin,fujicoin/fujicoin,digibyte/digibyte,Bushstar/UFO-Project,particl/particl-core,FeatherCoin/Feathercoin,RHavar/bitcoin,afk11/bitcoin,yenliangl/bitcoin,peercoin/peercoin,midnightmagic/bitcoin,thrasher-/litecoin,tecnovert/particl-core,cdecker/bitcoin,kallewoof/bitcoin,peercoin/peercoin,instagibbs/bitcoin,thrasher-/litecoin,litecoin-project/litecoin,tjps/bitcoin,andreaskern/bitcoin,guncoin/guncoin,donaloconnor/bitcoin,n1bor/bitcoin,paveljanik/bitcoin,GroestlCoin/bitcoin,ericshawlinux/bitcoin,jonasschnelli/bitcoin,afk11/bitcoin,bespike/litecoin,bespike/litecoin,jambolo/bitcoin,afk11/bitcoin,JeremyRubin/bitcoin,apoelstra/bitcoin,Bushstar/UFO-Project,rnicoll/bitcoin,RHavar/bitcoin,OmniLayer/omnicore,droark/bitcoin,midnightmagic/bitcoin,BTCGPU/BTCGPU,stamhe/bitcoin,jonasschnelli/bitcoin,MarcoFalke/bitcoin,vertcoin/vertcoin,prusnak/bitcoin,namecoin/namecoin-core,DigitalPandacoin/pandacoin,guncoin/guncoin,FeatherCoin/Feathercoin,sipsorcery/bitcoin,vertcoin/vertcoin,digibyte/digibyte,sstone/bitcoin,jnewbery/bitcoin,randy-waterhouse/bitcoin,sstone/bitcoin,ajtowns/bitcoin,myriadcoin/myriadcoin,pstratem/bitcoin,CryptArc/bitcoin,Christewart/bitcoin,jlopp/statoshi,joshrabinowitz/bitcoin,midnightmagic/bitcoin,MeshCollider/bitcoin,dscotese/bitcoin,namecoin/namecore,pstratem/bitcoin,Bushstar/UFO-Project,ajtowns/bitcoin,MarcoFalke/bitcoin,qtumproject/qtum,jlopp/statoshi,guncoin/guncoin,untrustbank/litecoin,GroestlCoin/GroestlCoin,prusnak/bitcoin,qtumproject/qtum,stamhe/bitcoin,fanquake/bitcoin,rnicoll/dogecoin,GroestlCoin/bitcoin,guncoin/guncoin,DigitalPandacoin/pandacoin,domob1812/namecore,afk11/bitcoin,namecoin/namecore,tecnovert/particl-core,joshrabinowitz/bitcoin,tecnovert/particl-core,FeatherCoin/Feathercoin,GroestlCoin/GroestlCoin,untrustbank/litecoin,mitchellcash/bitcoin,jambolo/bitcoin,vmp32k/litecoin,bitcoin/bitcoin,digibyte/digibyte,ryanofsky/bitcoin,digibyte/digibyte,domob1812/bitcoin,andreaskern/bitcoin,myriadteam/myriadcoin,ahmedbodi/vertcoin,qtumproject/qtum,dscotese/bitcoin,jlopp/statoshi,guncoin/guncoin,Kogser/bitcoin,Sjors/bitcoin,joshrabinowitz/bitcoin,domob1812/bitcoin,n1bor/bitcoin,MarcoFalke/bitcoin,sstone/bitcoin,bitcoinsSG/bitcoin,Kogser/bitcoin,sebrandon1/bitcoin,prusnak/bitcoin,alecalve/bitcoin,EthanHeilman/bitcoin,bitcoin/bitcoin,sipsorcery/bitcoin,alecalve/bitcoin,alecalve/bitcoin,domob1812/bitcoin,CryptArc/bitcoin,Flowdalic/bitcoin,domob1812/huntercore,peercoin/peercoin,ericshawlinux/bitcoin,bitcoinknots/bitcoin,rnicoll/bitcoin,myriadteam/myriadcoin,jamesob/bitcoin,GlobalBoost/GlobalBoost,ahmedbodi/vertcoin,mm-s/bitcoin,joshrabinowitz/bitcoin,jmcorgan/bitcoin,nikkitan/bitcoin,mruddy/bitcoin,cdecker/bitcoin,afk11/bitcoin,Kogser/bitcoin,paveljanik/bitcoin,rawodb/bitcoin,jambolo/bitcoin,MeshCollider/bitcoin,GlobalBoost/GlobalBoost,litecoin-project/litecoin,MarcoFalke/bitcoin,nikkitan/bitcoin,FeatherCoin/Feathercoin,fanquake/bitcoin,BTCGPU/BTCGPU,donaloconnor/bitcoin,untrustbank/litecoin,h4x3rotab/BTCGPU,Sjors/bitcoin,ajtowns/bitcoin,gjhiggins/vcoincore,myriadcoin/myriadcoin,stamhe/bitcoin,mm-s/bitcoin,mruddy/bitcoin,sipsorcery/bitcoin,qtumproject/qtum,jmcorgan/bitcoin,TheBlueMatt/bitcoin,fanquake/bitcoin,wellenreiter01/Feathercoin,fujicoin/fujicoin,kazcw/bitcoin,Kogser/bitcoin,bitcoin/bitcoin,AkioNak/bitcoin,lbryio/lbrycrd,jlopp/statoshi,mm-s/bitcoin,namecoin/namecore,lbryio/lbrycrd,TheBlueMatt/bitcoin,EthanHeilman/bitcoin,practicalswift/bitcoin,sebrandon1/bitcoin,JeremyRubin/bitcoin,Kogser/bitcoin,litecoin-project/litecoin,andreaskern/bitcoin,thrasher-/litecoin,CryptArc/bitcoin,domob1812/namecore,bitcoinknots/bitcoin,donaloconnor/bitcoin,BTCGPU/BTCGPU,alecalve/bitcoin,ahmedbodi/vertcoin,domob1812/huntercore,ryanofsky/bitcoin,apoelstra/bitcoin,GroestlCoin/bitcoin,namecoin/namecore,apoelstra/bitcoin,mitchellcash/bitcoin,stamhe/bitcoin,apoelstra/bitcoin,namecoin/namecoin-core,droark/bitcoin,ryanofsky/bitcoin,Sjors/bitcoin,monacoinproject/monacoin,particl/particl-core,JeremyRubin/bitcoin,lateminer/bitcoin,thrasher-/litecoin,anditto/bitcoin,n1bor/bitcoin,joshrabinowitz/bitcoin,tjps/bitcoin,tjps/bitcoin,Flowdalic/bitcoin,digibyte/digibyte,particl/particl-core,GroestlCoin/bitcoin,rnicoll/dogecoin,tjps/bitcoin,bitcoinsSG/bitcoin,GroestlCoin/bitcoin,apoelstra/bitcoin,mitchellcash/bitcoin,bitcoinknots/bitcoin,namecoin/namecoin-core,midnightmagic/bitcoin,Kogser/bitcoin,Christewart/bitcoin,GroestlCoin/GroestlCoin,prusnak/bitcoin,vmp32k/litecoin,nikkitan/bitcoin,Xekyo/bitcoin,DigitalPandacoin/pandacoin,jnewbery/bitcoin,namecoin/namecoin-core,myriadteam/myriadcoin,jambolo/bitcoin,paveljanik/bitcoin,qtumproject/qtum,anditto/bitcoin,peercoin/peercoin,gjhiggins/vcoincore,particl/particl-core,dscotese/bitcoin,MarcoFalke/bitcoin,GroestlCoin/GroestlCoin,Sjors/bitcoin,r8921039/bitcoin,ryanofsky/bitcoin,myriadcoin/myriadcoin,ericshawlinux/bitcoin,vmp32k/litecoin,pataquets/namecoin-core,rnicoll/dogecoin,midnightmagic/bitcoin,namecoin/namecore,digibyte/digibyte,TheBlueMatt/bitcoin,domob1812/bitcoin,jnewbery/bitcoin,jlopp/statoshi,practicalswift/bitcoin,lbryio/lbrycrd,Christewart/bitcoin,mruddy/bitcoin,EthanHeilman/bitcoin,ajtowns/bitcoin,wellenreiter01/Feathercoin,AkioNak/bitcoin,n1bor/bitcoin,cdecker/bitcoin,Flowdalic/bitcoin,yenliangl/bitcoin,sipsorcery/bitcoin,monacoinproject/monacoin,GlobalBoost/GlobalBoost,OmniLayer/omnicore,joshrabinowitz/bitcoin,mm-s/bitcoin,domob1812/huntercore,n1bor/bitcoin,jnewbery/bitcoin,particl/particl-core,domob1812/huntercore,vertcoin/vertcoin,ElementsProject/elements,OmniLayer/omnicore,Bushstar/UFO-Project,andreaskern/bitcoin,Christewart/bitcoin,ElementsProject/elements,Xekyo/bitcoin,randy-waterhouse/bitcoin,jtimon/bitcoin,CryptArc/bitcoin,ahmedbodi/vertcoin,jonasschnelli/bitcoin,yenliangl/bitcoin,lbryio/lbrycrd,AkioNak/bitcoin,sebrandon1/bitcoin,OmniLayer/omnicore,jmcorgan/bitcoin,bitcoinsSG/bitcoin,ElementsProject/elements,paveljanik/bitcoin,domob1812/bitcoin,lbryio/lbrycrd,monacoinproject/monacoin,pataquets/namecoin-core,CryptArc/bitcoin,thrasher-/litecoin,rnicoll/dogecoin,GroestlCoin/GroestlCoin,practicalswift/bitcoin,vertcoin/vertcoin,Sjors/bitcoin,BTCGPU/BTCGPU,fanquake/bitcoin,h4x3rotab/BTCGPU,prusnak/bitcoin,monacoinproject/monacoin,fujicoin/fujicoin,pstratem/bitcoin,litecoin-project/litecoin,h4x3rotab/BTCGPU,rawodb/bitcoin,vertcoin/vertcoin,kallewoof/bitcoin,rnicoll/bitcoin,vmp32k/litecoin,Bushstar/UFO-Project,yenliangl/bitcoin,jonasschnelli/bitcoin,r8921039/bitcoin,tecnovert/particl-core,bespike/litecoin,GlobalBoost/GlobalBoost,gjhiggins/vcoincore,afk11/bitcoin,bitcoinsSG/bitcoin,EthanHeilman/bitcoin,bitcoin/bitcoin,FeatherCoin/Feathercoin,achow101/bitcoin,Kogser/bitcoin,rnicoll/bitcoin,RHavar/bitcoin,gjhiggins/vcoincore,DigitalPandacoin/pandacoin,peercoin/peercoin,Xekyo/bitcoin,jtimon/bitcoin,droark/bitcoin,Flowdalic/bitcoin,gjhiggins/vcoincore,dscotese/bitcoin,TheBlueMatt/bitcoin,fujicoin/fujicoin,Kogser/bitcoin,globaltoken/globaltoken,anditto/bitcoin,cdecker/bitcoin,mm-s/bitcoin,ajtowns/bitcoin,untrustbank/litecoin,randy-waterhouse/bitcoin,ryanofsky/bitcoin,achow101/bitcoin,rawodb/bitcoin,Xekyo/bitcoin,n1bor/bitcoin,Xekyo/bitcoin,h4x3rotab/BTCGPU,kazcw/bitcoin,rnicoll/dogecoin,jonasschnelli/bitcoin,fanquake/bitcoin,jtimon/bitcoin,RHavar/bitcoin,dscotese/bitcoin,bitcoinknots/bitcoin,qtumproject/qtum,lateminer/bitcoin,nikkitan/bitcoin,jtimon/bitcoin,instagibbs/bitcoin,anditto/bitcoin,myriadteam/myriadcoin,sstone/bitcoin,JeremyRubin/bitcoin,ericshawlinux/bitcoin,lateminer/bitcoin,practicalswift/bitcoin,peercoin/peercoin,myriadcoin/myriadcoin,randy-waterhouse/bitcoin,MarcoFalke/bitcoin,pataquets/namecoin-core,kazcw/bitcoin,domob1812/namecore,jamesob/bitcoin,andreaskern/bitcoin,anditto/bitcoin,ElementsProject/elements,litecoin-project/litecoin,rawodb/bitcoin,Kogser/bitcoin,AkioNak/bitcoin,myriadteam/myriadcoin,donaloconnor/bitcoin,paveljanik/bitcoin,nikkitan/bitcoin,jamesob/bitcoin,RHavar/bitcoin,kazcw/bitcoin,Flowdalic/bitcoin,donaloconnor/bitcoin,litecoin-project/litecoin,prusnak/bitcoin,Kogser/bitcoin,stamhe/bitcoin,namecoin/namecore,wellenreiter01/Feathercoin,DigitalPandacoin/pandacoin,GroestlCoin/GroestlCoin,bitcoin/bitcoin,mitchellcash/bitcoin,domob1812/namecore,kallewoof/bitcoin,alecalve/bitcoin,jmcorgan/bitcoin,particl/particl-core,MeshCollider/bitcoin,kallewoof/bitcoin,MeshCollider/bitcoin,fujicoin/fujicoin,GlobalBoost/GlobalBoost,kallewoof/bitcoin,achow101/bitcoin,thrasher-/litecoin,OmniLayer/omnicore,r8921039/bitcoin,bespike/litecoin,untrustbank/litecoin,r8921039/bitcoin,cdecker/bitcoin,EthanHeilman/bitcoin,sebrandon1/bitcoin,untrustbank/litecoin,MeshCollider/bitcoin,mruddy/bitcoin,lbryio/lbrycrd,sebrandon1/bitcoin,instagibbs/bitcoin,ahmedbodi/vertcoin,practicalswift/bitcoin,ryanofsky/bitcoin,BTCGPU/BTCGPU,myriadcoin/myriadcoin,donaloconnor/bitcoin,instagibbs/bitcoin,domob1812/huntercore,jnewbery/bitcoin,AkioNak/bitcoin,domob1812/bitcoin,DigitalPandacoin/pandacoin,h4x3rotab/BTCGPU,yenliangl/bitcoin,pstratem/bitcoin,paveljanik/bitcoin,Christewart/bitcoin,cdecker/bitcoin,GroestlCoin/bitcoin,BTCGPU/BTCGPU,midnightmagic/bitcoin,stamhe/bitcoin,instagibbs/bitcoin,droark/bitcoin,instagibbs/bitcoin,anditto/bitcoin,RHavar/bitcoin,AkioNak/bitcoin,jamesob/bitcoin | test/functional/p2p_invalid_tx.py | test/functional/p2p_invalid_tx.py | #!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test node responses to invalid transactions.
In this test we connect to one node over p2p, and test tx requests."""
from test_framework.blocktools import create_block, create_coinbase, create_transaction
from test_framework.messages import COIN
from test_framework.mininode import network_thread_start, P2PDataStore
from test_framework.test_framework import BitcoinTestFramework
class InvalidTxRequestTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [["-whitelist=127.0.0.1"]]
def run_test(self):
# Add p2p connection to node0
node = self.nodes[0] # convenience reference to the node
node.add_p2p_connection(P2PDataStore())
network_thread_start()
node.p2p.wait_for_verack()
best_block = self.nodes[0].getbestblockhash()
tip = int(best_block, 16)
best_block_time = self.nodes[0].getblock(best_block)['time']
block_time = best_block_time + 1
self.log.info("Create a new block with an anyone-can-spend coinbase.")
height = 1
block = create_block(tip, create_coinbase(height), block_time)
block_time += 1
block.solve()
# Save the coinbase for later
block1 = block
tip = block.sha256
height += 1
node.p2p.send_blocks_and_test([block], node, success=True)
self.log.info("Mature the block.")
self.nodes[0].generate(100)
# b'\x64' is OP_NOTIF
# Transaction will be rejected with code 16 (REJECT_INVALID)
tx1 = create_transaction(block1.vtx[0], 0, b'\x64', 50 * COIN - 12000)
node.p2p.send_txs_and_test([tx1], node, success=False, reject_code=16, reject_reason=b'mandatory-script-verify-flag-failed (Invalid OP_IF construction)')
# TODO: test further transactions...
if __name__ == '__main__':
InvalidTxRequestTest().main()
| #!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test node responses to invalid transactions.
In this test we connect to one node over p2p, and test tx requests."""
import time
from test_framework.blocktools import create_block, create_coinbase, create_transaction
from test_framework.comptool import RejectResult, TestInstance, TestManager
from test_framework.messages import COIN
from test_framework.mininode import network_thread_start
from test_framework.test_framework import ComparisonTestFramework
class InvalidTxRequestTest(ComparisonTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def run_test(self):
test = TestManager(self, self.options.tmpdir)
test.add_all_connections(self.nodes)
self.tip = None
self.block_time = None
network_thread_start()
test.run()
def get_tests(self):
self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
self.block_time = int(time.time()) + 1
self.log.info("Create a new block with an anyone-can-spend coinbase.")
height = 1
block = create_block(self.tip, create_coinbase(height), self.block_time)
self.block_time += 1
block.solve()
# Save the coinbase for later
self.block1 = block
self.tip = block.sha256
height += 1
yield TestInstance([[block, True]])
self.log.info("Mature the block.")
test = TestInstance(sync_every_block=False)
for i in range(100):
block = create_block(self.tip, create_coinbase(height), self.block_time)
block.solve()
self.tip = block.sha256
self.block_time += 1
test.blocks_and_transactions.append([block, True])
height += 1
yield test
# b'\x64' is OP_NOTIF
# Transaction will be rejected with code 16 (REJECT_INVALID)
tx1 = create_transaction(self.block1.vtx[0], 0, b'\x64', 50 * COIN - 12000)
yield TestInstance([[tx1, RejectResult(16, b'mandatory-script-verify-flag-failed')]])
# TODO: test further transactions...
if __name__ == '__main__':
InvalidTxRequestTest().main()
| mit | Python |
08447fa344e21d6d704c6f195ad2b7405fa8f916 | Add test for total property | UITools/saleor,car3oon/saleor,itbabu/saleor,tfroehlich82/saleor,tfroehlich82/saleor,maferelo/saleor,laosunhust/saleor,jreigel/saleor,itbabu/saleor,UITools/saleor,car3oon/saleor,KenMutemi/saleor,maferelo/saleor,HyperManTT/ECommerceSaleor,itbabu/saleor,spartonia/saleor,KenMutemi/saleor,laosunhust/saleor,jreigel/saleor,rodrigozn/CW-Shop,rchav/vinerack,mociepka/saleor,jreigel/saleor,rodrigozn/CW-Shop,KenMutemi/saleor,spartonia/saleor,HyperManTT/ECommerceSaleor,mociepka/saleor,UITools/saleor,maferelo/saleor,tfroehlich82/saleor,car3oon/saleor,rodrigozn/CW-Shop,HyperManTT/ECommerceSaleor,mociepka/saleor,laosunhust/saleor,spartonia/saleor,spartonia/saleor,UITools/saleor,laosunhust/saleor,rchav/vinerack,rchav/vinerack,UITools/saleor | saleor/order/test_order.py | saleor/order/test_order.py | from .models import Order
def test_total_property():
order = Order(total_net=20, total_tax=5)
assert order.total.gross == 25
assert order.total.tax == 5
assert order.total.net == 20
| bsd-3-clause | Python | |
2b09a8d75e0d59bba41467210b7d0588eb4a09d5 | add migration for junebug channel type | tsotetsi/textily-web,pulilab/rapidpro,tsotetsi/textily-web,pulilab/rapidpro,pulilab/rapidpro,pulilab/rapidpro,tsotetsi/textily-web,tsotetsi/textily-web,tsotetsi/textily-web,pulilab/rapidpro | temba/channels/migrations/0050_add_junebug_channel_type.py | temba/channels/migrations/0050_add_junebug_channel_type.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.12 on 2017-01-26 15:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('channels', '0049_auto_20170106_0910'),
]
operations = [
migrations.AlterField(
model_name='channel',
name='channel_type',
field=models.CharField(choices=[('AT', "Africa's Talking"), ('A', 'Android'), ('BM', 'Blackmyna'), ('CT', 'Clickatell'), ('DA', 'Dart Media'), ('DM', 'Dummy'), ('EX', 'External'), ('FB', 'Facebook'), ('GL', 'Globe Labs'), ('HX', 'High Connection'), ('H9', 'Hub9'), ('IB', 'Infobip'), ('JS', 'Jasmin'), ('JN', 'Junebug'), ('KN', 'Kannel'), ('LN', 'Line'), ('M3', 'M3 Tech'), ('MB', 'Mblox'), ('NX', 'Nexmo'), ('PL', 'Plivo'), ('SQ', 'Shaqodoon'), ('SC', 'SMSCentral'), ('ST', 'Start Mobile'), ('TG', 'Telegram'), ('T', 'Twilio'), ('TW', 'TwiML Rest API'), ('TMS', 'Twilio Messaging Service'), ('TT', 'Twitter'), ('VB', 'Verboice'), ('VI', 'Viber'), ('VP', 'Viber Public Channels'), ('VM', 'Vumi'), ('VMU', 'Vumi USSD'), ('YO', 'Yo!'), ('ZV', 'Zenvia')], default='A', help_text='Type of this channel, whether Android, Twilio or SMSC', max_length=3, verbose_name='Channel Type'),
),
]
| agpl-3.0 | Python | |
ace26ab5e713fabd02f4f481956c47640f50b166 | Add unit test for volume limits client | vedujoshi/tempest,openstack/tempest,cisco-openstack/tempest,cisco-openstack/tempest,Juniper/tempest,masayukig/tempest,openstack/tempest,Juniper/tempest,masayukig/tempest,vedujoshi/tempest | tempest/tests/lib/services/volume/v2/test_limits_client.py | tempest/tests/lib/services/volume/v2/test_limits_client.py | # Copyright 2017 FiberHome Telecommunication Technologies CO.,LTD
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.services.volume.v2 import limits_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestLimitsClient(base.BaseServiceTest):
FAKE_LIMIT_INFO = {
"limits": {
"rate": [],
"absolute": {
"totalSnapshotsUsed": 0,
"maxTotalBackups": 10,
"maxTotalVolumeGigabytes": 1000,
"maxTotalSnapshots": 10,
"maxTotalBackupGigabytes": 1000,
"totalBackupGigabytesUsed": 0,
"maxTotalVolumes": 10,
"totalVolumesUsed": 0,
"totalBackupsUsed": 0,
"totalGigabytesUsed": 0
}
}
}
def setUp(self):
super(TestLimitsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = limits_client.LimitsClient(fake_auth,
'volume',
'regionOne')
def _test_show_limits(self, bytes_body=False):
self.check_service_client_function(
self.client.show_limits,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_LIMIT_INFO,
bytes_body)
def test_show_limits_with_str_body(self):
self._test_show_limits()
def test_show_limits_with_bytes_body(self):
self._test_show_limits(bytes_body=True)
| apache-2.0 | Python | |
29c268db2cbb3b4787d3e925f925a49f0df68c46 | add cache UT | maxwu/cistat,maxwu/cistat | test/test_cache.py | test/test_cache.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Logger Class
Simple encapsulation on logging functions.
- Console printing
- File handler and the mapping for multithreading file handlers are under design yet.
.. moduleauthor:: Max Wu <http://maxwu.me>
.. References::
**ReadtheDocs**: https://pythonguidecn.readthedocs.io/zh/latest/writing/logging.html
"""
import unittest
from me.maxwu.cistat.cache import CacheIt
from me.maxwu.cistat import config
from me.maxwu.cistat.reqs.circleci_request import CircleCiReq
class MyTestCase(unittest.TestCase):
@staticmethod
def get_cache_stat():
dc = CacheIt(enable=config.get_cache_enable())
hit, miss = dc.cache.stats()
dc.close()
print(">>>Test>>> Cache Stat: hit={:d} miss={:d}".format(hit, miss))
return hit, miss
def setUp(self):
self.hit_orig, self.miss_orig = self.get_cache_stat()
self.url = 'https://80-77958022-gh.circle-artifacts.com/0/tmp/circle-junit.BxjS188/junit/TEST-org.maxwu.jrefresh.HttpApi.SourceIpApiTest.xml'
def test_cache_stat(self):
hit_0, miss_0 = self.get_cache_stat()
xunit1 = CircleCiReq.get_artifact_report(url=self.url)
hit_1, miss_1 = self.get_cache_stat()
self.assertEqual(1, hit_1 + miss_1 - hit_0 - miss_0)
xunit2 = CircleCiReq.get_artifact_report(url=self.url)
hit_2, miss_2 = self.get_cache_stat()
self.assertEqual(miss_2, miss_1) # For the 2nd fetch, it won't be missed.
self.assertEqual(1, hit_2 - hit_1) # For the 2nd fetch, it shall hit at least once
def test_cache_stat(self):
hit_0, miss_0 = self.get_cache_stat()
xunit1 = CircleCiReq.get_artifact_report() # No url provided
hit_1, miss_1 = self.get_cache_stat()
self.assertEqual((hit_0, miss_0), (hit_1, miss_1))
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
| mit | Python | |
e4b9c43d53121d2b21c4b864fcc74674b0b6dfc1 | Create class to interpolate values between indexes | thelonious/g2x,gizmo-cda/g2x,gizmo-cda/g2x,thelonious/g2x,gizmo-cda/g2x,gizmo-cda/g2x | scratchpad/Interpolator.py | scratchpad/Interpolator.py | class Interpolator:
def __init__(self):
self.data = []
def addIndexValue(self, index, value):
self.data.append((index, value))
def valueAtIndex(self, target_index):
if target_index < self.data[0][0]:
return None
elif self.data[-1][0] < target_index:
return None
else:
start = None
end = None
for (index, value) in self.data:
if index == target_index:
return value
else:
if index <= target_index:
start = (index, value)
elif target_index < index:
end = (index, value)
break
index_delta = end[0] - start[0]
percent = (target_index - start[0]) / index_delta
value_delta = end[1] - start[1]
return start[1] + value_delta * percent
| mit | Python | |
c3de9ebfa84fd93572d0a4ac991272609a593328 | Create af_renameSG.py | aaronfang/personal_scripts | scripts/af_renameSG.py | scripts/af_renameSG.py | # rename shading group name to material name but with SG ended
import pymel.core as pm
import re
selSG = pm.ls(sl=True,fl=True)
for SG in selSG:
curMat = pm.listConnections(SG,d=1)
for mat in curMat:
if pm.nodeType(mat) == 'blinn' or pm.nodeType(mat) == 'lambert':
sgNM = re.split("_mat",str(mat))[0]+"SG"
pm.rename(SG,sgNM)
| mit | Python | |
b8777453cf03b212f2b06ca0afeef6c780e39f51 | add face_classifier.py | wkentaro/keyopener,wkentaro/keyopener | scripts/face_classifier.py | scripts/face_classifier.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# face_classifier.py
# author: Kentaro Wada <www.kentaro.wada@gmail.com>
import os
import sys
import collections
from sklearn import svm
import cv2
class FaceClassifier(object):
def __init__(self, data_dir):
self.data_dir = data_dir
self.img_dict = collections.defaultdict(list)
self._lookup_imgs()
def _lookup_imgs(self):
face_dirs = os.listdir(self.data_dir)
for face_dir in face_dirs:
face_dir_abs = os.path.join(self.data_dir, face_dir)
if not os.path.isdir(face_dir_abs):
continue
for img in os.listdir(face_dir_abs):
base, ext = os.path.splitext(img)
if ext not in ['.png', '.pgm']:
continue
self.img_dict[face_dir].append(
os.path.join(self.data_dir, face_dir, img))
def main():
data_dir = '../data/cropped'
face_clf = FaceClassifier(data_dir=data_dir)
for person, img_path in face_clf.img_dict.items():
img = cv2.imread(img_path[0])
print img_path[0]
cv2.imshow('cropped img', img)
k = cv2.waitKey(0)
if k == 27:
continue
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
| mit | Python | |
ef7abdab7681e496cebd1e4655a63cafcb9163db | add gafton's migration script to scripts/ | sassoftware/conary,sassoftware/conary,sassoftware/conary,sassoftware/conary,sassoftware/conary | scripts/migrate-dbstore.py | scripts/migrate-dbstore.py | #!/usr/bin/python
import sys
import os
if 'CONARY_PATH' in os.environ:
sys.path.insert(0, os.environ['CONARY_PATH'])
from conary import dbstore
from conary.dbstore import sqlerrors
from conary.repository.netrepos import schema
if len(sys.argv) != 3:
print "Usage: migrate <sqlite_path> <mysql_spec>"
sqlite = dbstore.connect(sys.argv[1], driver = "sqlite")
cs = sqlite.cursor()
mysql = dbstore.connect(sys.argv[2], driver = "mysql")
cm = mysql.cursor()
schema.createSchema(mysql)
for t in sqlite.tables.keys():
if t in mysql.tables:
continue
print "Only in sqlite:", t
for t in mysql.tables.keys():
if t in sqlite.tables:
continue
print "Only in mysql:", t
tList = [
'Branches',
'Versions',
'Items',
'Labels',
'LabelMap',
'Flavors',
'FlavorMap',
'FlavorScores',
'UserGroups',
'Users',
'UserGroupMembers',
'Permissions',
'Instances',
'Dependencies',
'Latest',
'Metadata',
'MetadataItems',
'Nodes',
'ChangeLogs',
'PGPKeys',
'PGPFingerprints',
'Provides',
'Requires',
'FileStreams',
'TroveFiles',
'TroveInfo',
'TroveTroves',
'EntitlementGroups',
'Entitlements',
]
for t in tList:
print
print "Converting", t
count = cs.execute("select count(*) from %s" % t).fetchone()[0]
i = 0
cs.execute("select * from %s" % t)
cm.execute('alter table %s disable keys' % t)
while True:
row = cs.fetchone_dict()
if row is None:
break
if t == "Permissions":
row["canWrite"] = row["write"]
del row["write"]
if 'entGroupEdmin' in row:
del row["entGroupAdmin"]
row = row.items()
sql = "insert into %s (%s) values (%s)" % (
t, ", ".join(x[0] for x in row),
", ".join(["?"] * len(row)))
i += 1
try:
cm.execute(sql, [x[1] for x in row])
except sqlerrors.ColumnNotUnique:
print "\r%s: SKIPPING" % t, row
except:
print "ERROR - SQL", sql, "ARGS:", [x[1] for x in row]
raise
else:
if i % 1000 == 0:
sys.stdout.write("\r%s: %d/%d %d%%" % (t, i, count, i*100/count))
sys.stdout.flush()
if i % 50000 == 0:
mysql.commit()
cm.execute('alter table %s enable keys' % t)
print "\r%s: %d/%d 100%%" % (t, i, count)
mysql.commit()
| apache-2.0 | Python | |
36781fb1b04a3d2fd3162ea88969244faab22a60 | Convert GML to EWKT, via PostGIS | Open511/open511-server,Open511/open511-server,Open511/open511-server | open511/utils/postgis.py | open511/utils/postgis.py | from django.db import connection
def gml_to_ewkt(gml_string, force_2D=False):
cursor = connection.cursor()
if force_2D:
sql = 'SELECT ST_AsEWKT(ST_Force_2D(ST_GeomFromGML(%s)))'
else:
sql = 'SELECT ST_AsEWKT(ST_GeomFromGML(%s))'
cursor.execute(sql, [gml_string])
return cursor.fetchone()[0] | mit | Python | |
aa320244cc03fe299aa33057c8b92a6c2352a5fd | Add tracer for sqlalchemy | openstack/osprofiler,stackforge/osprofiler,openstack/osprofiler,openstack/osprofiler,stackforge/osprofiler,stackforge/osprofiler | osprofiler/sqlalchemy.py | osprofiler/sqlalchemy.py | # Copyright 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from osprofiler import profiler
def before_execute(name):
"""Add listener that will send trace info before sql executed."""
def handler(conn, clauseelement, multiparams, params):
p = profiler.get_profiler()
if p:
info = {"db.statement": str(clauseelement),
"db.multiparams": str(multiparams),
"db.params": str(params)}
p.start(name, info=info)
return handler
def after_execute():
"""Add listener that will send trace info after sql executed."""
def handler(conn, clauseelement, multiparams, params, result):
p = profiler.get_profiler()
if p:
p.stop(info={"db.result": str(result)})
return handler
def add_tracing(sqlalchemy, engine, name):
"""Add tracing to all sqlalchemy calls."""
sqlalchemy.event.listen(engine, 'before_execute', before_execute(name))
sqlalchemy.event.listen(engine, 'after_execute', after_execute())
| apache-2.0 | Python | |
eefef8a5917243b75065441d46db19cbd65a7f1d | Create debounce decorator | qurben/mopidy-headless | mopidy_headless/decorator.py | mopidy_headless/decorator.py | import time
def debounce(wait):
"""
Wait before calling a function again, discarding any calls in between
"""
def decorator(fn):
def wrapped(*args, **kwargs):
now = time.time()
if wrapped.last is not None:
delta = now - wrapped.last
if delta < wait: return
wrapped.last = now
fn(*args, **kwargs)
wrapped.last = None
return wrapped
return decorator
| apache-2.0 | Python | |
b2e059ce247de4b083c059d1ffe925983c262183 | add test cases | catniplab/vLGP | tests/test_fast.py | tests/test_fast.py | from unittest import TestCase
import numpy as np
class TestFast(TestCase):
def test_clip_grad(self):
from vlgp import fast
np.random.seed(0)
n = 100
x = np.random.randn(n)
x_clipped = fast.clip_grad(x, bound=1.0)
self.assertTrue(np.all(np.logical_and(x_clipped >= -1.0, x_clipped <= 1.0)))
def test_cut_trial(self):
from vlgp import fast
y = np.random.randn(100, 10)
x = np.random.randn(100, 5)
trial = {'y': y, 'x': x}
fast_trials = fast.cut_trial(trial, 10)
for each in fast_trials:
self.assertTrue(each['y'].shape == (10, 10))
| mit | Python | |
3fc118da6cdc29f4867dc33319ca56f4f3731346 | add leetcode 121 | weixsong/algorithm,weixsong/algorithm,weixsong/algorithm | leetcode/121.py | leetcode/121.py | #!/usr/bin/env python
"""
Say you have an array for which the ith element is the price of a given stock on day i.
If you were only permitted to complete at most one transaction
(ie, buy one and sell one share of the stock), design an algorithm to find the maximum profit.
"""
class Solution(object):
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
Idea: dynamic programming
dp(i): the profit of day i,
dp(i) = prices[i] - min_price or max_profit in day i - 1
min_price: current min price till day i
Dynamic programming equation:
dp(i) = max | prices[i] - min_price
|
| dp[i - 1]
min_price = min | min_price
| prices[i]
Time: O(n)
Space: O(1)
"""
if prices == None or len(prices) == 0:
return 0
min_price = prices[0]
profit = 0
for p in prices:
profit = max(p - min_price, profit)
min_price = min(min_price, p)
return profit
| mit | Python | |
16ad7991c22b4d9834a5db57912789d825a0cefb | Add unit tests | kemskems/otdet | tests/test_util.py | tests/test_util.py | import util
from nose.tools import assert_equal
class TestPick():
def check(self, filenames, expected, k, randomized):
result = util.pick(filenames, k, randomized)
assert_equal(result, expected)
def test_all_sequential(self):
filenames = ['a-4.txt', 'b-2.txt', 'c-3.txt', 'd-1.txt', 'e-0.txt']
expected = ['e-0.txt', 'd-1.txt', 'b-2.txt', 'c-3.txt', 'a-4.txt']
self.check(filenames, expected, k=None, randomized=False)
| mit | Python | |
fcc92760db0d1dc56aca70aff69b34a29c9e8e6c | Add unit tests for the methods in util | shsmith/electrumx,thelazier/electrumx,Groestlcoin/electrumx-grs,erasmospunk/electrumx,shsmith/electrumx,bauerj/electrumx,thelazier/electrumx,erasmospunk/electrumx,Crowndev/electrumx,Groestlcoin/electrumx-grs,bauerj/electrumx,Crowndev/electrumx | tests/test_util.py | tests/test_util.py | from lib import util
def test_cachedproperty():
class Target:
def __init__(self):
self.call_count = 0
@util.cachedproperty
def prop(self):
self.call_count += 1
return self.call_count
t = Target()
assert t.prop == t.prop == 1
def test_deep_getsizeof():
int_t = util.deep_getsizeof(1)
assert util.deep_getsizeof([1, 1]) > 2 * int_t
assert util.deep_getsizeof({1: 1}) > 2 * int_t
assert util.deep_getsizeof({1: {1: 1}}) > 3 * int_t
class Base:
pass
class A(Base):
pass
class B(Base):
pass
def test_subclasses():
assert util.subclasses(Base) == [A, B]
def test_chunks():
assert list(util.chunks([1, 2, 3, 4, 5], 2)) == [[1, 2], [3, 4], [5]]
def test_increment_byte_string():
assert util.increment_byte_string(b'1') == b'2'
assert util.increment_byte_string(b'\x01\x01') == b'\x01\x02'
assert util.increment_byte_string(b'\xff\xff') == b'\x01\x00\x00'
| mit | Python | |
a1fc7311ddc50eb43f43fc51d3290f2c91fd4fa1 | Update cheapest-flights-within-k-stops.py | kamyu104/LeetCode,kamyu104/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode | Python/cheapest-flights-within-k-stops.py | Python/cheapest-flights-within-k-stops.py | # Time: O((|E| + |V|) * log|V|) = O(|E| * log|V|)
# Space: O(|E| + |V|) = O(|E|)
# There are n cities connected by m flights. Each fight starts from city u and arrives at v with a price w.
#
# Now given all the cities and fights, together with starting city src and the destination dst,
# your task is to find the cheapest price from src to dst with up to k stops.
# If there is no such route, output -1.
#
# Example 1:
# Input:
# n = 3, edges = [[0,1,100],[1,2,100],[0,2,500]]
# src = 0, dst = 2, k = 1
# Output: 200
# Explanation:
# The cheapest price from city 0 to city 2 with at most 1 stop costs 200, as marked red in the picture.
#
# Example 2:
# Input:
# n = 3, edges = [[0,1,100],[1,2,100],[0,2,500]]
# src = 0, dst = 2, k = 0
# Output: 500
#
# Explanation:
# The cheapest price from city 0 to city 2 with at most 0 stop costs 500, as marked blue in the picture.
# Note:
# - The number of nodes n will be in range [1, 100], with nodes labeled from 0 to n - 1.
# - The size of flights will be in range [0, n * (n - 1) / 2].
# - The format of each flight will be (src, dst, price).
# - The price of each flight will be in the range [1, 10000].
# - k is in the range of [0, n - 1].
# - There will not be any duplicated flights or self cycles.
import collections
import heapq
class Solution(object):
def findCheapestPrice(self, n, flights, src, dst, K):
"""
:type n: int
:type flights: List[List[int]]
:type src: int
:type dst: int
:type K: int
:rtype: int
"""
adj = collections.defaultdict(list)
for u, v, w in flights:
adj[u].append((v, w))
min_heap = [(0, src, K+1)]
while min_heap:
result, u, k = heapq.heappop(min_heap)
if u == dst:
return result
if k > 0:
for v, w in adj[u]:
heapq.heappush(min_heap, (result+w, v, k-1))
return -1
| # Time: O((|E| + |V|) * log|V|) = O(|E| * log|V|)
# Space: O(|E| + |V|)
# There are n cities connected by m flights. Each fight starts from city u and arrives at v with a price w.
#
# Now given all the cities and fights, together with starting city src and the destination dst,
# your task is to find the cheapest price from src to dst with up to k stops.
# If there is no such route, output -1.
#
# Example 1:
# Input:
# n = 3, edges = [[0,1,100],[1,2,100],[0,2,500]]
# src = 0, dst = 2, k = 1
# Output: 200
# Explanation:
# The cheapest price from city 0 to city 2 with at most 1 stop costs 200, as marked red in the picture.
#
# Example 2:
# Input:
# n = 3, edges = [[0,1,100],[1,2,100],[0,2,500]]
# src = 0, dst = 2, k = 0
# Output: 500
#
# Explanation:
# The cheapest price from city 0 to city 2 with at most 0 stop costs 500, as marked blue in the picture.
# Note:
# - The number of nodes n will be in range [1, 100], with nodes labeled from 0 to n - 1.
# - The size of flights will be in range [0, n * (n - 1) / 2].
# - The format of each flight will be (src, dst, price).
# - The price of each flight will be in the range [1, 10000].
# - k is in the range of [0, n - 1].
# - There will not be any duplicated flights or self cycles.
import collections
import heapq
class Solution(object):
def findCheapestPrice(self, n, flights, src, dst, K):
"""
:type n: int
:type flights: List[List[int]]
:type src: int
:type dst: int
:type K: int
:rtype: int
"""
adj = collections.defaultdict(list)
for u, v, w in flights:
adj[u].append((v, w))
min_heap = [(0, src, K+1)]
while min_heap:
result, u, k = heapq.heappop(min_heap)
if u == dst:
return result
if k > 0:
for v, w in adj[u]:
heapq.heappush(min_heap, (result+w, v, k-1))
return -1
| mit | Python |
1c41bc4d06ad2209ddd6fe79621cabd210b94589 | Add __init__ | dshean/demcoreg,dshean/demcoreg | demcoreg/__init__.py | demcoreg/__init__.py | #! /usr/bin/env python
| mit | Python | |
97671650987d74c6281e56f3f4e1950f2d996d5b | upgrade version... | citrix-openstack-build/glance,jumpstarter-io/glance,kfwang/Glance-OVA-OVF,redhat-openstack/glance,klmitch/glance,stevelle/glance,takeshineshiro/glance,SUSE-Cloud/glance,scripnichenko/glance,akash1808/glance,JioCloud/glance,klmitch/glance,tanglei528/glance,openstack/glance,sigmavirus24/glance,ntt-sic/glance,tanglei528/glance,scripnichenko/glance,paramite/glance,SUSE-Cloud/glance,openstack/glance,cloudbau/glance,wkoathp/glance,kfwang/Glance-OVA-OVF,stevelle/glance,darren-wang/gl,rajalokan/glance,vuntz/glance,paramite/glance,rajalokan/glance,redhat-openstack/glance,vuntz/glance,sigmavirus24/glance,openstack/glance,darren-wang/gl,dims/glance,saeki-masaki/glance,rickerc/glance_audit,takeshineshiro/glance,jumpstarter-io/glance,dims/glance,rickerc/glance_audit,saeki-masaki/glance,ozamiatin/glance,akash1808/glance,JioCloud/glance,ntt-sic/glance,wkoathp/glance,ozamiatin/glance,cloudbau/glance,citrix-openstack-build/glance | setup.py | setup.py | #!/usr/bin/python
# Copyright (c) 2010 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
from setuptools.command.sdist import sdist
import os
import subprocess
class local_sdist(sdist):
"""Customized sdist hook - builds the ChangeLog file from VC first"""
def run(self):
if os.path.isdir('.bzr'):
# We're in a bzr branch
log_cmd = subprocess.Popen(["bzr", "log", "--gnu"],
stdout=subprocess.PIPE)
changelog = log_cmd.communicate()[0]
with open("ChangeLog", "w") as changelog_file:
changelog_file.write(changelog)
sdist.run(self)
name = 'glance'
version = '0.1.3pre'
setup(
name=name,
version=version,
description='Glance',
license='Apache License (2.0)',
author='OpenStack, LLC.',
author_email='openstack-admins@lists.launchpad.net',
url='https://launchpad.net/glance',
packages=find_packages(exclude=['tests', 'bin']),
test_suite='nose.collector',
cmdclass={'sdist': local_sdist},
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.6',
'Environment :: No Input/Output (Daemon)',
],
install_requires=[], # removed for better compat
scripts=['bin/glance-api',
'bin/glance-registry'])
| #!/usr/bin/python
# Copyright (c) 2010 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
from setuptools.command.sdist import sdist
import os
import subprocess
class local_sdist(sdist):
"""Customized sdist hook - builds the ChangeLog file from VC first"""
def run(self):
if os.path.isdir('.bzr'):
# We're in a bzr branch
log_cmd = subprocess.Popen(["bzr", "log", "--gnu"],
stdout=subprocess.PIPE)
changelog = log_cmd.communicate()[0]
with open("ChangeLog", "w") as changelog_file:
changelog_file.write(changelog)
sdist.run(self)
name = 'glance'
version = '0.1.1'
setup(
name=name,
version=version,
description='Glance',
license='Apache License (2.0)',
author='OpenStack, LLC.',
author_email='openstack-admins@lists.launchpad.net',
url='https://launchpad.net/glance',
packages=find_packages(exclude=['tests', 'bin']),
test_suite='nose.collector',
cmdclass={'sdist': local_sdist},
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.6',
'Environment :: No Input/Output (Daemon)',
],
install_requires=[], # removed for better compat
scripts=['bin/glance-api',
'bin/glance-registry'])
| apache-2.0 | Python |
e123f31a2a863491bb6353336038e7d324475bc9 | Add setuptools for install | sc0tfree/netbyte | setup.py | setup.py | from setuptools import setup
setup(
name='netbyte',
version='0.4',
url='http://www.sc0tfree.com',
license='MIT License',
author='sc0tfree',
author_email='henry@sc0tfree.com',
description='Netbyte is a Netcat-style tool that facilitates probing proprietary TCP and UDP services. It is lightweight, fully interactive and provides formatted output in both hexadecimal and ASCII.',
keywords='utils cli netcat hexadecimal',
packages=['netbyte'],
install_requires=[
'colorama',
],
entry_points = {
"console_scripts" : ['netbyte = netbyte.netbyte:main']
},
)
| mit | Python | |
6e805995a165f923c1c4f71c163c64a245f9a3d5 | Add simple distutils script for modules | epierson9/multiphenotype_methods | setup.py | setup.py | from distutils.core import setup
setup(name='dimreducer',
version='1.0',
description='Dimension reduction methods',
py_modules=['dimreducer'],
)
setup(name='multiphenotype_utils',
version='1.0',
description='Utility functions for all methods',
py_modules=['multiphenotype_utils'],
)
setup(name='general_autoencoder',
version='1.0',
description='Autoencoder base class',
py_modules=['general_autoencoder'],
)
setup(name='standard_autoencoder',
version='1.0',
description='Standard autoencoder',
py_modules=['standard_autoencoder'],
)
setup(name='variational_autoencoder',
version='1.0',
description='VAE',
py_modules=['variational_autoencoder'],
)
setup(name='variational_age_autoencoder',
version='1.0',
description='VAE with age',
py_modules=['variational_age_autoencoder'],
)
| mit | Python | |
914b7cd2c94bddd1a68eb2293364633a9325506f | add a unit test | sdpython/ensae_teaching_cs,sdpython/ensae_teaching_cs,sdpython/ensae_teaching_cs,sdpython/ensae_teaching_cs,sdpython/ensae_teaching_cs,sdpython/ensae_teaching_cs | _unittests/ut_td_1a/test_diff.py | _unittests/ut_td_1a/test_diff.py | """
@brief test log(time=1s)
You should indicate a time in seconds. The program ``run_unittests.py``
will sort all test files by increasing time and run them.
"""
import sys
import os
import unittest
from difflib import SequenceMatcher
try:
import src
import pyquickhelper as skip_
except ImportError:
path = os.path.normpath(
os.path.abspath(
os.path.join(
os.path.split(__file__)[0],
"..",
"..")))
if path not in sys.path:
sys.path.append(path)
path = os.path.normpath(
os.path.abspath(
os.path.join(
os.path.split(__file__)[0],
"..",
"..",
"..",
"pyquickhelper",
"src")))
if path not in sys.path:
sys.path.append(path)
import src
import pyquickhelper.loghelper as skip_
from pyquickhelper.loghelper import fLOG
class TestDiff(unittest.TestCase):
def test_diff(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
seq1 = "ab ab abc abcd abc".split()
seq2 = "ab ab abc abc abc adb".split()
diff = SequenceMatcher(a=seq1, b=seq2)
nb = 0
for opcode in diff.get_opcodes():
fLOG(opcode)
nb += 1
self.assertEqual(nb, 4)
if __name__ == "__main__":
from src.ensae_teaching_cs.helpers.pygame_helper import wait_event
import pygame
pygame.init()
h = 20
font = pygame.font.Font("freesansbold.ttf", h)
font_small = pygame.font.Font("freesansbold.ttf", 3 * h // 4)
size = 500, 500
white = 255, 255, 255
screen = pygame.display.set_mode(size)
screen.fill(white)
pos = 0
for opcode in diff.get_opcodes():
if opcode[0] == "delete":
color = (200, 0, 0)
for i in range(opcode[1], opcode[2]):
text = seq1[i]
text = font_small.render(text, True, color)
screen.blit(text, (10, h * pos + h // 6))
pos += 1
else:
color = (0, 0, 0) if opcode[0] == "equal" else (0, 120, 0)
for i in range(opcode[3], opcode[4]):
text = seq2[i]
text = font.render(text, True, color)
screen.blit(text, (10, h * pos))
pos += 1
pygame.display.flip()
wait_event(pygame)
if __name__ == "__main__":
unittest.main()
| mit | Python | |
c03411020db80b703260314236d96cc409398545 | Create variable.py | avsingh999/Learn_python | introduction/variable.py | introduction/variable.py | a = 10
A = 10
print(a)
print(A)
| mit | Python | |
3314f5d6ffb843a58e61856e726bd47e426538aa | Add spec_cleaner/__main__.py to allow running spec-cleaner without installing it. | plusky/spec-cleaner,plusky/spec-cleaner,plusky/spec-cleaner,plusky/spec-cleaner,plusky/spec-cleaner | spec_cleaner/__main__.py | spec_cleaner/__main__.py | from __future__ import absolute_import
import os
import sys
# If we are running from a wheel, add the wheel to sys.path.
if __package__ == '':
# __file__ is spec-cleaner-*.whl/spec_cleaner/__main__.py.
# First dirname call strips of '/__main__.py', second strips off '/spec_cleaner'.
# Resulting path is the name of the wheel itself.
# Add that to sys.path so we can import spec_cleaner.
path = os.path.dirname(os.path.dirname(__file__))
sys.path.insert(0, path)
import spec_cleaner
if __name__ == '__main__':
sys.exit(spec_cleaner.main())
| bsd-3-clause | Python | |
c54623d673d03d841d330e80d414a687770cc2a1 | Add setup.py | sussman/zvm,sussman/zvm | setup.py | setup.py | import setuptools
with open("README", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="zvm", # Replace with your own username
version="1.0.0",
author="Ben Collins-Sussman",
author_email="sussman@gmail.com",
description="A pure-python implementation of a Z-machine for interactive fiction",
long_description=long_description,
long_description_content_type="text/x-rst",
url="https://github.com/sussman/zvm",
project_urls={
"Bug Tracker": "https://github.com/sussman/zvm/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Topic :: Games/Entertainment",
],
packages=setuptools.find_packages(include=["zvm"]),
python_requires=">=3.6",
)
| bsd-3-clause | Python | |
ce5883c6a7a0c8c8f79c941f66288ce748b1b405 | Add setup.py | timothyryanwalsh/brunnhilde | setup.py | setup.py | from setuptools import setup
setup(
name = 'brunnhilde',
version = '1.4.0',
url = 'https://github.com/timothyryanwalsh/brunnhilde',
author = 'Tim Walsh',
author_email = 'timothyryanwalsh@gmail.com',
py_modules = ['brunnhilde'],
scripts = ['brunnhilde.py'],
description = 'A Siegfried-based digital archives reporting tool for directories and disk images',
keywords = 'archives reporting formats directories diskimages',
platforms = ['POSIX'],
classifiers = [
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: MacOS',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Topic :: Communications :: File Sharing',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Topic :: Database',
'Topic :: System :: Archiving',
'Topic :: System :: Filesystems',
'Topic :: Utilities'
],
) | mit | Python | |
73e0bd62ac7a2d8b8322e21130ee7ec0659dc3cc | add setup.py | asseldonk/vispa-jsroot,asseldonk/vispa-jsroot,asseldonk/vispa-jsroot | setup.py | setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from distutils.core import setup
setup(
name = "jsroot",
version = "0.0.0",
description = "VISPA ROOT Browser - Inspect contents of root files.",
author = "VISPA Project",
author_email = "vispa@lists.rwth-aachen.de",
url = "http://vispa.physik.rwth-aachen.de/",
license = "GNU GPL v2",
packages = ["jsroot"],
package_dir = {"jsroot": "jsroot"},
package_data = {"jsroot": [
"workspace/*",
"static/*",
]},
# install_requires = ["vispa"],
)
| mit | Python | |
8e8678c2bc915e671f50bb6ea91288662053c280 | add setup file | laginha/yard,laginha/yard,laginha/yard | setup.py | setup.py | #!/usr/bin/env python
# encoding: utf-8
from setuptools import setup, find_packages
setup(
name = 'yard',
version = '0.1.0',
author = "Diogo Laginha",
url = 'https://github.com/laginha/yard',
description = "Yet Another Resftul Django-app",
packages = ['yard'],
install_requires = [],
extras_require = {},
)
| mit | Python | |
dd1810ddf1f85312c7a8b5ec23d4844b5ca63a13 | add data_filtering.py | cxmo/project-beta,berkeley-stat159/project-beta,rishizsinha/project-beta,yuchengdong/project-beta-1 | code/data_filtering.py | code/data_filtering.py | import numpy as np
import matplotlib.pyplot as plt
import os
import sys
import nitime
# Import the time-series objects:
from nitime.timeseries import TimeSeries
# Import the analysis objects:
from nitime.analysis import SpectralAnalyzer, FilterAnalyzer, NormalizationAnalyzer
os.getcwd()
os.chdir('..')
os.chdir('data')
## load data
data2d = np.load('masked_data_50k.npy')
## warning
print('Warning!! This scripts take at least 20 minutes to run.')
## plot data
plt.plot(data2d[7440,:])
## setting the TR
TR = 2
T = TimeSeries(data2d, sampling_interval=TR)
## examining the spectrum of the original data, before filtering.
# We do this by initializing a SpectralAnalyzer for the original data:
S_original = SpectralAnalyzer(T)
fig01 = plt.figure()
ax01 = fig01.add_subplot(1, 1, 1)
# ax01.plot(S_original.psd[0],
# S_original.psd[1][9],
# label='Welch PSD')
ax01.plot(S_original.spectrum_fourier[0],
np.abs(S_original.spectrum_fourier[1][9]),
label='FFT')
ax01.plot(S_original.periodogram[0],
S_original.periodogram[1][9],
label='Periodogram')
# ax01.plot(S_original.spectrum_multi_taper[0],
# S_original.spectrum_multi_taper[1][9],
# label='Multi-taper')
ax01.set_xlabel('Frequency (Hz)')
ax01.set_ylabel('Power')
plt.ylim((0,8000))
ax01.legend()
plt.savefig("../figure/FFT.jpg")
print('FFT.jpg saved')
## We start by initializing a FilterAnalyzer.
#This is initialized with the time-series containing the data
#and with the upper and lower bounds of the range into which we wish to filter
F = FilterAnalyzer(T, ub=0.15, lb=0.02)
# Initialize a figure to display the results:
fig02 = plt.figure()
ax02 = fig02.add_subplot(1, 1, 1)
# Plot the original, unfiltered data:
ax02.plot(F.data[7440], label='unfiltered')
ax02.plot(F.filtered_fourier.data[7440], label='Fourier')
ax02.legend()
ax02.set_xlabel('Time (TR)')
ax02.set_ylabel('Signal amplitude (a.u.)')
plt.savefig("../figure/data_filtering_on_smoothed_data.jpg")
print('data_filtering_on_smoothed_data.jpg')
np.save('filtered_data.npy',F.filtered_fourier.data)
print('filtered_data.npy saved')
F.filtered_fourier.data.shape
fdata = F.filtered_fourier.data
fdata.shape
v = np.var(fdata,axis=1)
plt.hist(v)
plt.xlabel("voxels")
plt.ylabel("variance")
plt.title("variance of the voxel activity filtering")
plt.savefig("../figure/voxel_variance_on_smoothed_data.jpg")
print('voxel_variance_on_smoothed_data.jpg') | bsd-3-clause | Python | |
e0fbd1d0e5e9b845ebfa6aa1739937a9974cbc87 | Add setup.py | MizukiSonoko/iroha-cli,MizukiSonoko/iroha-cli | setup.py | setup.py | #!/usr/bin/env python
from distutils.core import setup
setup(
name='iroha-ya-cli',
version='0.7',
description='Cli for hyperledger/iroha',
author='Sonoko Mizuki',
author_email='mizuki.sonoko@gmail.com',
packages=['src'],
entry_points={
'console_scripts':
'iroha-ya-cli = src.main:main'
},
) | apache-2.0 | Python | |
2484c0f9415694c99e5b1ac15ee4b64f12e839b6 | add migration to reflect schema updates to wagtailforms | torchbox/wagtaildemo,torchbox/wagtaildemo,torchbox/wagtaildemo,torchbox/wagtaildemo | demo/migrations/0005_auto_20160531_1736.py | demo/migrations/0005_auto_20160531_1736.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('demo', '0004_auto_20151019_1351'),
]
operations = [
migrations.AlterField(
model_name='formfield',
name='choices',
field=models.CharField(blank=True, max_length=512, verbose_name='choices', help_text='Comma separated list of choices. Only applicable in checkboxes, radio and dropdown.'),
),
migrations.AlterField(
model_name='formfield',
name='default_value',
field=models.CharField(blank=True, max_length=255, verbose_name='default value', help_text='Default value. Comma separated values supported for checkboxes.'),
),
migrations.AlterField(
model_name='formfield',
name='field_type',
field=models.CharField(choices=[('singleline', 'Single line text'), ('multiline', 'Multi-line text'), ('email', 'Email'), ('number', 'Number'), ('url', 'URL'), ('checkbox', 'Checkbox'), ('checkboxes', 'Checkboxes'), ('dropdown', 'Drop down'), ('radio', 'Radio buttons'), ('date', 'Date'), ('datetime', 'Date/time')], max_length=16, verbose_name='field type'),
),
migrations.AlterField(
model_name='formfield',
name='help_text',
field=models.CharField(blank=True, max_length=255, verbose_name='help text'),
),
migrations.AlterField(
model_name='formfield',
name='label',
field=models.CharField(help_text='The label of the form field', max_length=255, verbose_name='label'),
),
migrations.AlterField(
model_name='formfield',
name='required',
field=models.BooleanField(verbose_name='required', default=True),
),
migrations.AlterField(
model_name='formpage',
name='from_address',
field=models.CharField(blank=True, max_length=255, verbose_name='from address'),
),
migrations.AlterField(
model_name='formpage',
name='subject',
field=models.CharField(blank=True, max_length=255, verbose_name='subject'),
),
migrations.AlterField(
model_name='formpage',
name='to_address',
field=models.CharField(blank=True, max_length=255, verbose_name='to address', help_text='Optional - form submissions will be emailed to this address'),
),
]
| bsd-3-clause | Python | |
97d96097122ca50e84fcadd3a5c21ae51ccc8bf7 | Create Polarity_classifier.py | HaythemSahbani/Web-mining-university-project | src/Polarity_classifier.py | src/Polarity_classifier.py | import pickle
import itertools
from nltk.collocations import BigramCollocationFinder
from nltk.metrics import BigramAssocMeasures
from nltk.corpus import stopwords
class Polarity_classifier:
def __init__(self):
pass
def bigram_word_feats(self, words, score_fn=BigramAssocMeasures.chi_sq, n=200):
bigram_finder = BigramCollocationFinder.from_words(words)
bigrams = bigram_finder.nbest(score_fn, n)
return dict([(ngram, True) for ngram in itertools.chain(words, bigrams)])
def bag_of_word_feats(self, words):
return dict([(word, True) for word in words])
def stopword_filtered_word_feats(self, words):
stopset = set(stopwords.words('english'))
return dict([(word, True) for word in words if word not in stopset])
def set_polarity_bigram_classifier(self, json_tweets):
f = open('bigram_classifier.pickle')
classifier = pickle.load(f)
f.close()
for tweet in json_tweets:
tweet["polarity"] = classifier.classify(self.bigram_word_feats(tweet["text"].split()))
def set_polarity_bag_classifier(self, json_tweets):
f = open('bag_classifier.pickle')
classifier = pickle.load(f)
f.close()
for tweet in json_tweets:
tweet["polarity"] = classifier.classify(self.bag_of_word_feats(tweet["text"].split()))
def set_polarity_stop_classifier(self, json_tweets):
f = open('stop_word_classifier.pickle')
classifier = pickle.load(f)
f.close()
for tweet in json_tweets:
tweet["polarity"] = classifier.classify(self.stopword_filtered_word_feats(tweet["text"].split()))
| mit | Python | |
2c39bc6e1586dcacc1d23d9be643d1f27f035eac | Add wsgi file | toulibre/agendadulibreshow | agendadulibre/agendadulibre.wsgi | agendadulibre/agendadulibre.wsgi | import sys
sys.path.insert(0, '/var/www/agendadulibre/agendadulibre')
#sys.path.insert(0, os.curdir)
activate_this = '/home/numahell/.virtualenvs/flask/local/bin/activate_this.py'
execfile(activate_this, dict(__file__=activate_this))
from app import app as application
| mit | Python | |
f9c68d3c250e3a83ab1d0ed9e0760c0631dca869 | add setup.py | sephii/fabliip,liip/fabliip,sephii/fabliip,liip/fabliip | setup.py | setup.py | #!/usr/bin/env python
from setuptools import find_packages, setup
from fabliip import __version__
setup(
name='fabliip',
version=__version__,
packages=find_packages(),
description='Set of Fabric functions to help deploying websites.',
author='Sylvain Fankhauser',
author_email='sylvain.fankhauser@liip.ch',
url='https://github.com/sephii/fabliip',
install_requires=['fabric'],
)
| mit | Python | |
054be2f9a06c0da3b7fcf5d40985ce8055f3f447 | add setup.py | Battleroid/bark | setup.py | setup.py | from setuptools import setup, find_packages
setup(
name='bark',
version='1.0',
url='https://github.com/battleroid/bark',
description='Single file static site generator.',
license='MIT License',
keywords='bark static site generator jinja blog python markdown',
author='Casey Weed',
author_email='me@caseyweed.net',
download_url='https://github.com/battleroid/bark/tarball/master',
packages = find_packages(),
install_requires=[
'Jinja2>=2.8',
'python-frontmatter>=0.2.1',
'python-slugify>=1.1.3',
'python-dateutil>=2.4.2',
'misaka>=1.0.2'
],
setup_requires=[],
entry_points={
'console_scripts': ['bark = bark.bark:main']
},
platforms=['any'],
classifiers=[
'Programming Language :: Python',
'Topic :: Internet',
'Topic :: Internet :: WWW/FTP',
'Topic :: Internet :: WWW/HTTP :: Site Management',
'Topic :: Text Processing',
'Topic :: Text Processing :: Markup',
'Topic :: Text Processing :: Markup :: HTML'
]
)
| mit | Python | |
c6c6594cda35aaa15f1efb9f336548671b0028c5 | Add generic serializer tool for plugins to use | sassoftware/rmake3,sassoftware/rmake3,sassoftware/rmake3 | rmake/lib/twisted_extras/tools.py | rmake/lib/twisted_extras/tools.py | #
# Copyright (c) rPath, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from twisted.internet import defer
class Serializer(object):
def __init__(self):
self._lock = defer.DeferredLock()
self._waiting = {}
def call(self, func, collapsible=False):
d = self._lock.acquire()
self._waiting[d] = collapsible
@d.addCallback
def _locked(_):
if collapsible and len(self._waiting) > 1:
# Superseded
return
return func()
@d.addBoth
def _unlock(result):
self._lock.release()
del self._waiting[d]
return result
return d
| apache-2.0 | Python | |
b4f5b5da5e7a7266e7f908b6ffc975ea3f1f0657 | Add setup.py | jackromo/mathLibPy | setup.py | setup.py | from distutils.core import setup
setup(name='MathLibPy',
version='0.0.0',
description='Math library for Python',
author='Jack Romo',
author_email='sharrackor@gmail.com',
packages=['mathlibpy'],
)
| mit | Python | |
49178742953cc63b066d2142d9e2b3f0f2e20e17 | Tweak setup.py so that it may run even when fired from different locations, as suggested by Maarten Damen. | sprymix/python-dateutil,sprymix/dateutil,pganssle/dateutil-test-codecov,jenshnielsen/dateutil,adamgreig/python-dateutil,mjschultz/dateutil,Bachmann1234/dateutil,emsoftware/python-dateutil,abalkin/dateutil,pganssle/dateutil-test-codecov,abalkin/dateutil | setup.py | setup.py | #!/usr/bin/python
from os.path import isfile, join
import glob
import os
import re
from setuptools import setup
if isfile("MANIFEST"):
os.unlink("MANIFEST")
TOPDIR = os.path.dirname(__file__) or "."
VERSION = re.search('__version__ = "([^"]+)"',
open(TOPDIR + "/dateutil/__init__.py").read()).group(1)
setup(name="python-dateutil",
version = VERSION,
description = "Extensions to the standard python 2.3+ datetime module",
author = "Gustavo Niemeyer",
author_email = "gustavo@niemeyer.net",
url = "http://labix.org/python-dateutil",
license = "PSF License",
long_description =
"""\
The dateutil module provides powerful extensions to the standard
datetime module, available in Python 2.3+.
""",
packages = ["dateutil", "dateutil.zoneinfo"],
package_data={"": ["*.tar.gz"]},
include_package_data=True,
zip_safe=False,
)
| #!/usr/bin/python
from os.path import isfile, join
import glob
import os
import re
from setuptools import setup
if isfile("MANIFEST"):
os.unlink("MANIFEST")
VERSION = re.search('__version__ = "([^"]+)"',
open("dateutil/__init__.py").read()).group(1)
setup(name="python-dateutil",
version = VERSION,
description = "Extensions to the standard python 2.3+ datetime module",
author = "Gustavo Niemeyer",
author_email = "gustavo@niemeyer.net",
url = "http://labix.org/python-dateutil",
license = "PSF License",
long_description =
"""\
The dateutil module provides powerful extensions to the standard
datetime module, available in Python 2.3+.
""",
packages = ["dateutil", "dateutil.zoneinfo"],
package_data={"": ["*.tar.gz"]},
include_package_data=True,
zip_safe=False,
)
| bsd-3-clause | Python |
65c9335775688a15b344be4762ee7c75bd66bdb2 | Add a setup.py file | arturfelipe/cities | setup.py | setup.py | import os
import codecs
from setuptools import setup, find_packages
def read(fname):
file_path = os.path.join(os.path.dirname(__file__), fname)
return codecs.open(file_path, encoding='utf-8').read()
setup(
name='cities',
version='0.0.1',
description='Load data from cities and countries all over the world',
author='Artur Sousa',
author_email='arturfelipe.sousa@gmail.com',
url='https://github.com/arturfelipe/cities',
packages=find_packages(exclude=['demo']),
install_requires=[
'requests',
],
include_package_data=True,
zip_safe=False,
long_description=read('README.md'),
license='MIT',
keywords='cities countries'
)
| mit | Python | |
f57605c4f37fb29a93f06d165b9eb69fee2771b9 | Add fake setup.py (#1620) | encode/uvicorn,encode/uvicorn | setup.py | setup.py | import sys
from setuptools import setup
sys.stderr.write(
"""
===============================
Unsupported installation method
===============================
httpx no longer supports installation with `python setup.py install`.
Please use `python -m pip install .` instead.
"""
)
sys.exit(1)
# The below code will never execute, however GitHub is particularly
# picky about where it finds Python packaging metadata.
# See: https://github.com/github/feedback/discussions/6456
#
# To be removed once GitHub catches up.
setup(
name="uvicorn",
install_requires=[
"click>=7.0",
"h11>=0.8",
"typing-extensions;python_version < '3.8'",
],
)
| bsd-3-clause | Python | |
7c863017bd687a06c63a5c60c53c6efca80d6b0e | Add setup script | mdegreg/discord-toastlogger | setup.py | setup.py | from setuptools import setup
setup(
name='discord-toastlogger',
version='0.1.0',
scripts=['toastbot'],
url='https://github.com/mdegreg/discord-toastlogger',
license='MIT',
install_requires=[
'discord'
]
) | mit | Python | |
1b0b91e9445e080e790571a00e767f31f5035fd1 | Add setup.py | efficios/pytsdl | setup.py | setup.py | #!/usr/bin/env python3
#
# The MIT License (MIT)
#
# Copyright (c) 2014 Philippe Proulx <eepp.ca>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
from setuptools import setup
# make sure we run Python 3+ here
v = sys.version_info
if v.major < 3:
sys.stderr.write('Sorry, pytsdl needs Python 3\n')
sys.exit(1)
packages = [
'pytsdl',
]
setup(name='pytsdl',
version=0.1,
description='TSDL parser implemented entirely in Python 3',
author='Philippe Proulx',
author_email='eeppeliteloop@gmail.com',
url='https://github.com/eepp/pytsdl',
packages=packages)
| mit | Python | |
c40a65c46b075881222f5c9ccebccfb0c627aa51 | Create setup.py | Mester/demo-day-vikings,Mester/demo-day-vikings | setup.py | setup.py | unlicense | Python | ||
e87d736c83d89129f4a152163993cb5c173dddd4 | Add setup | TimeWz667/Kamanian | setup.py | setup.py | from setuptools import setup
setup(name='Kamanian',
version='1.00',
packages=['dzdy'],
install_requires=['pandas', 'numpy', 'scipy', 'pcore', 'matplotlib', 'networkx'])
| mit | Python | |
05477b14e19d1e2d0483405bf3558f7d80fb9b60 | Switch to setuptools. | misja/esmre,b-long/esmre,wharris/esmre,wharris/libesm,wharris/esmre,misja/esmre,b-long/esmre,wharris/libesm | setup.py | setup.py | # setup.py - distutils configuration for esm and esmre modules
# Copyright (C) 2007 Tideway Systems Limited.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
# USA
from setuptools import setup, Extension
module1 = Extension("esm",
#define_macros=[("HEAP_CHECK", 1)],
sources = ['src/esm.c',
'src/aho_corasick.c',
'src/ac_heap.c',
'src/ac_list.c'])
setup (name = "esmre",
version = '0.2.1',
description = 'Regular expression accelerator',
long_description = " ".join("""
Modules used to accelerate execution of a large collection of regular
expressions using the Aho-Corasick algorithms.
""".strip().split()),
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved ::',
'GNU Library or Lesser General Public License (LGPL)',
'Operating System :: POSIX',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing :: Indexing'
],
install_requires=['setuptools'],
author = 'Will Harris',
author_email = 'w.harris@tideway.com',
url = 'http://code.google.com/p/esmre/',
license = 'GNU LGPL',
platforms = ['POSIX'],
ext_modules = [module1],
package_dir = {'': 'src'},
py_modules = ["esmre"])
| # setup.py - distutils configuration for esm and esmre modules
# Copyright (C) 2007 Tideway Systems Limited.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
# USA
from distutils.core import setup, Extension
module1 = Extension("esm",
#define_macros=[("HEAP_CHECK", 1)],
sources = ['src/esm.c',
'src/aho_corasick.c',
'src/ac_heap.c',
'src/ac_list.c'])
setup (name = "esmre",
version = '0.2.1',
description = 'Regular expression accelerator',
long_description = " ".join("""
Modules used to accelerate execution of a large collection of regular
expressions using the Aho-Corasick algorithms.
""".strip().split()),
author = 'Will Harris',
author_email = 'w.harris@tideway.com',
url = 'http://code.google.com/p/esmre/',
license = 'GNU LGPL',
platforms = ['POSIX'],
ext_modules = [module1],
package_dir = {'': 'src'},
py_modules = ["esmre"])
| lgpl-2.1 | Python |
a896d7f6b3886a73789f8aff079ab983af38e29f | Add lava server extension loader | OSSystems/lava-server,OSSystems/lava-server,Linaro/lava-server,Linaro/lava-server,Linaro/lava-server,Linaro/lava-server,OSSystems/lava-server | lava_server/extension.py | lava_server/extension.py | # Copyright (C) 2010, 2011 Linaro Limited
#
# Author: Zygmunt Krynicki <zygmunt.krynicki@linaro.org>
#
# This file is part of LAVA Server.
#
# LAVA Server is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License version 3
# as published by the Free Software Foundation
#
# LAVA Server is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with LAVA Server. If not, see <http://www.gnu.org/licenses/>.
from abc import ABCMeta, abstractmethod
import logging
class ILavaServerExtension(object):
"""
Interface for LAVA Server extensions.
"""
__metaclass__ = ABCMeta
@abstractmethod
def contribute_to_settings(self, settings):
"""
Add elements required to initialize this extension into the project
settings module.
"""
# TODO: Publish API objects for xml-rpc
# TODO: Publish menu items
# TODO: Publish URLs (perhaps it's better to do that explicitly rather than
# asking everyone to shove it into contribute_to_settings()
class ExtensionImportError(Exception):
def __init__(self, extension, message):
self.extension = extension
self.message = message
def __repr__(self):
return "ExtensionImportError(extension={0!r}, message={1!r})".format(
extension, message)
class ExtensionLoader(object):
"""
Helper to load extensions
"""
def __init__(self, settings):
self.settings = settings
def find_extensions(self):
# TODO: Implement for real
yield "demo_app.extension:DemoExtension"
def load_extensions(self):
for name in self.find_extensions():
self.install_extension(name)
def install_extension(self, name):
try:
extension_cls = self.import_extension(name)
extension = extension_cls()
extension.contribute_to_settings(self.settings)
except ExtensionImportError as ex:
logging.exception("Unable to import extension %r: %s", name, ex.message)
except Exception:
logging.exception("Unable to install extension %r", name)
def import_extension(self, name):
"""
Import extension specified by the given name.
Name must be a string like "module:class". Module may be a
package with dotted syntax to address specific module.
@return Imported extension class implementing ILavaServerExtension
@raises ExtensionImportError
"""
try:
module_or_package_name, class_name = name.split(":", 1)
except ValueError:
raise ExtensionImportError(
name, "Unable to split extension into module and class")
try:
module = __import__(module_or_package_name, fromlist=[''])
except ImportError as ex:
raise ExtensionImportError(
name, "Unable to import required modules")
try:
extension_cls = getattr(module, class_name)
except AttributeError:
raise ExtensionImportError(
name, "Unable to access class component")
if not issubclass(extension_cls, ILavaServerExtension):
raise ExtensionImportError(
name, "Class does not implement ILavaServerExtension interface")
return extension_cls
| agpl-3.0 | Python | |
44bdeb2d5bf8c7877eb1e92cda65f6c844a93642 | add models. | fle-internal/content-pack-maker | contentpacks/models.py | contentpacks/models.py | from peewee import Model, SqliteDatabase, CharField, TextField, BooleanField,\
ForeignKeyField, PrimaryKeyField, Using, IntegerField, \
OperationalError
class Item(Model):
title = CharField()
description = TextField()
available = BooleanField()
files_complete = IntegerField(default=0)
total_files = IntegerField(default=0)
kind = CharField()
parent = ForeignKeyField("self", null=True, index=True, related_name="children")
id = CharField(index=True)
pk = PrimaryKeyField(primary_key=True)
slug = CharField()
path = CharField(index=True, unique=True)
extra_fields = CharField(null=True)
youtube_id = CharField(null=True)
size_on_disk = IntegerField(default=0)
remote_size = IntegerField(default=0)
def __init__(self, *args, **kwargs):
# kwargs = parse_model_data(kwargs)
super(Item, self).__init__(*args, **kwargs)
class AssessmentItem(Model):
id = CharField(max_length=50, primary_key=True)
item_data = TextField() # A serialized JSON blob
author_names = CharField(max_length=200) # A serialized JSON list
| bsd-2-clause | Python | |
d72f9f06afcf5d1c177afa418a7c4bf60af8fb75 | Support mm:ss. | denarced/since | since.py | since.py | #!/usr/bin/env python3
import datetime
import re
import sys
def main(strTime):
now = datetime.datetime.now()
pattern = r'(\d\d):(\d\d)'
match = re.match(pattern, strTime)
time = datetime.datetime(
now.year,
now.month,
now.day,
int(match.group(1)),
int(match.group(2)))
diff = now - time
if diff.total_seconds() < 0:
return now - time + datetime.timedelta(1)
return diff
if __name__ == '__main__':
if len(sys.argv) < 2:
print('Usage {0} {{time}}'.format(sys.argv[0]))
else:
print(main(sys.argv[1]))
| mit | Python | |
1d25676049994db266129b1a1c98cec3acbba0ca | Add missing file on last merge | frictionlessdata/goodtables.io,frictionlessdata/goodtables.io,frictionlessdata/goodtables.io,frictionlessdata/goodtables.io | goodtablesio/models/subscription.py | goodtablesio/models/subscription.py | import logging
import datetime
from sqlalchemy import (
Column, Unicode, DateTime, Boolean, ForeignKey)
from sqlalchemy.orm import relationship
from goodtablesio.models.base import Base, BaseModelMixin, make_uuid
log = logging.getLogger(__name__)
class Subscription(Base, BaseModelMixin):
__tablename__ = 'subscriptions'
id = Column(Unicode, primary_key=True, default=make_uuid)
plan_id = Column(Unicode, ForeignKey('plans.id'))
user_id = Column(Unicode, ForeignKey('users.id'))
active = Column(Boolean, default=True)
started = Column(DateTime(timezone=True), default=datetime.datetime.utcnow)
expires = Column(DateTime(timezone=True))
finished = Column(DateTime(timezone=True))
plan = relationship(
'Plan', primaryjoin='Subscription.plan_id == Plan.id')
| agpl-3.0 | Python | |
9b0278530c2c4f32dd2a751fb4f8b93c8c34a3ea | add arch tool for waf backend. | cournape/Bento,cournape/Bento,cournape/Bento,cournape/Bento | bento/backends/waf_tools/arch.py | bento/backends/waf_tools/arch.py | import re
from waflib.Tools.c_config import SNIP_EMPTY_PROGRAM
from waflib.Configure import conf
ARCHS = ["i386", "x86_64", "ppc", "ppc64"]
FILE_MACHO_RE = re.compile("Mach-O.*object ([a-zA-Z_0-9]+)")
@conf
def check_cc_arch(conf):
env = conf.env
archs = []
for arch in ARCHS:
env.stash()
try:
env.append_value('CFLAGS', ['-arch', arch])
env.append_value('LINKFLAGS', ['-arch', arch])
try:
conf.check_cc(fragment=SNIP_EMPTY_PROGRAM, msg="Checking for %r suport" % arch)
archs.append(arch)
except conf.errors.ConfigurationError:
pass
finally:
env.revert()
env["ARCH_CC"] = archs
#def detect_arch(filename):
@conf
def check_cc_default_arch(conf):
start_msg = "Checking for default CC arch"
fragment = SNIP_EMPTY_PROGRAM
output_var = "DEFAULT_CC_ARCH"
return _check_default_arch(conf, start_msg, fragment, output_var)
@conf
def check_cxx_default_arch(conf):
start_msg = "Checking for default CXX arch"
fragment = SNIP_EMPTY_PROGRAM
output_var = "DEFAULT_CXX_ARCH"
return _check_default_arch(conf, start_msg, fragment, output_var)
@conf
def check_fc_default_arch(conf):
start_msg = "Checking for default FC arch"
fragment = """\
program main
end
"""
output_var = "DEFAULT_FC_ARCH"
compile_filename = 'test.f'
features = "fc fcprogram"
return _check_default_arch(conf, start_msg, fragment, output_var, compile_filename, features)
@conf
def _check_default_arch(conf, start_msg, fragment, output_var, compile_filename="test.c", features="c cprogram"):
env = conf.env
if not "FILE_BIN" in conf.env:
file_bin = conf.find_program(["file"], var="FILE_BIN")
else:
file_bin = conf.env.FILE_BIN
conf.start_msg(start_msg)
ret = conf.check_cc(fragment=fragment, compile_filename=compile_filename, features=features)
task_gen = conf.test_bld.groups[0][0]
obj_filename = task_gen.tasks[0].outputs[0].abspath()
out = conf.cmd_and_log([file_bin, obj_filename])
m = FILE_MACHO_RE.search(out)
if m is None:
conf.fatal("Could not determine arch from output %r" % out)
else:
default_arch = m.group(1)
conf.env[output_var] = default_arch
conf.end_msg(default_arch)
| bsd-3-clause | Python | |
856f855e10588ddbe2ad5053cc5d7366c76459a8 | Implement basic perception | joshleeb/PerceptronVis | percept/perceptron.py | percept/perceptron.py | import random
def rand_w():
'''
Generate a random weight.
'''
return round(random.uniform(-1, 1), 3)
class Perceptron:
def __init__(
self, w0=rand_w(), w1=rand_w(), w2=rand_w(), learning_rate=0.1):
self.w0, self.w1, self.w2 = w0, w1, w2
self.learning_rate = learning_rate
def train(self, y, x1, x2):
y_hat = self.predict(x1, x2)
if y_hat < y:
self.w0 += self.learning_rate
self.w1 += self.learning_rate * x1
self.w2 += self.learning_rate * x2
if y_hat > y:
self.w0 -= self.learning_rate
self.w1 -= self.learning_rate * x1
self.w2 -= self.learning_rate * x2
self.round_weights()
def evaluate(self, x1, x2):
return self.w0 + self.w1 * x1 + self.w2 * x2
def activate(self, y):
return int(y >= 0)
def predict(self, x1, x2):
return self.activate(self.evaluate(x1, x2))
def round_weights(self, dp=3):
self.w0 = round(self.w0, dp)
self.w1 = round(self.w1, dp)
self.w2 = round(self.w2, dp)
def get_weights(self):
return (self.w0, self.w1, self.w2)
def get_plot_fn(self):
def fn(x):
return -self.w1 / self.w2 * x - self.w0 / self.w2
return fn
| mit | Python | |
e5f82b794ee2e6054deb15433c7dc7261146f181 | Add merge migration | Johnetordoff/osf.io,aaxelb/osf.io,baylee-d/osf.io,Johnetordoff/osf.io,saradbowman/osf.io,mfraezz/osf.io,adlius/osf.io,adlius/osf.io,caseyrollins/osf.io,Johnetordoff/osf.io,baylee-d/osf.io,mattclark/osf.io,HalcyonChimera/osf.io,HalcyonChimera/osf.io,caseyrollins/osf.io,adlius/osf.io,cslzchen/osf.io,Johnetordoff/osf.io,icereval/osf.io,brianjgeiger/osf.io,mattclark/osf.io,adlius/osf.io,brianjgeiger/osf.io,pattisdr/osf.io,cslzchen/osf.io,mattclark/osf.io,CenterForOpenScience/osf.io,cslzchen/osf.io,caseyrollins/osf.io,felliott/osf.io,saradbowman/osf.io,aaxelb/osf.io,felliott/osf.io,sloria/osf.io,sloria/osf.io,erinspace/osf.io,icereval/osf.io,erinspace/osf.io,brianjgeiger/osf.io,pattisdr/osf.io,aaxelb/osf.io,felliott/osf.io,cslzchen/osf.io,brianjgeiger/osf.io,CenterForOpenScience/osf.io,sloria/osf.io,HalcyonChimera/osf.io,erinspace/osf.io,icereval/osf.io,CenterForOpenScience/osf.io,baylee-d/osf.io,mfraezz/osf.io,CenterForOpenScience/osf.io,mfraezz/osf.io,mfraezz/osf.io,HalcyonChimera/osf.io,aaxelb/osf.io,felliott/osf.io,pattisdr/osf.io | osf/migrations/0112_merge_20180614_1454.py | osf/migrations/0112_merge_20180614_1454.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-06-14 19:54
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0107_merge_20180604_1232'),
('osf', '0111_auto_20180605_1240'),
]
operations = [
]
| apache-2.0 | Python | |
5f051f2ae1b105d6cc58d1cac760cb5d20908c3b | Support rudimentary translation service from IIT Bombay via web API. | Ezhil-Language-Foundation/open-tamil,Ezhil-Language-Foundation/open-tamil,Ezhil-Language-Foundation/open-tamil,arcturusannamalai/open-tamil,arcturusannamalai/open-tamil,arcturusannamalai/open-tamil,arcturusannamalai/open-tamil,Ezhil-Language-Foundation/open-tamil,arcturusannamalai/open-tamil,arcturusannamalai/open-tamil,Ezhil-Language-Foundation/open-tamil,arcturusannamalai/open-tamil,Ezhil-Language-Foundation/open-tamil,Ezhil-Language-Foundation/open-tamil,Ezhil-Language-Foundation/open-tamil,Ezhil-Language-Foundation/open-tamil | valai/translate.py | valai/translate.py | # * coding: utf8 *
#
# (C) 2020 Muthiah Annamalai <ezhillang@gmail.com>
#
# Uses the IIT-Bombay service on the web.
#
import json
import requests
from urllib.parse import quote
from functools import lru_cache
@lru_cache(1024,str)
def en2ta(text):
"""translate from English to Tamil"""
return IITB_translator('en', 'ta', text)
@lru_cache(1024,str)
def ta2en(text):
"""translate from Tamil to English"""
return IITB_translator('ta','en',text)
def IITB_translator(src_lang,dest_lang,_text):
text = quote(_text)
URLFMT = 'http://www.cfilt.iitb.ac.in/indicnlpweb/indicnlpws/translate/{0}/{1}/{2}/'
url = URLFMT.format(src_lang.lower(),dest_lang.lower(),text)
response = requests.get(url)
return response.json()[dest_lang.lower()]
if __name__ == "__main__":
print(ta2en('கவிதை மிக அழகாக இருக்கிறது'))
print(en2ta('world is not flat'))
| mit | Python | |
25056e74093f01d68af14277da6089903b617ee6 | Create Career.py | imreeciowy/wfrp-gen | Career.py | Career.py | class Career:
def __init__(career_name, advances, skills_to_take, talents_to_take, career_trappings, race_dependent)
self.career_name = career_name
self.advances = advances
self.skills_to_take = skills_to_take
self.talents_to_take = talents_to_take
self.career_trappings = career_trappings
self.race_dependent = race_dependent
| mit | Python | |
64ab32daba1ddbe7e8b56850188dab3f8ca42286 | Add TCP check | NicolasLM/sauna,bewiwi/sauna,bewiwi/sauna,NicolasLM/sauna | sauna/plugins/ext/tcp.py | sauna/plugins/ext/tcp.py | import socket
from sauna.plugins import (Plugin, PluginRegister)
my_plugin = PluginRegister('TCP')
@my_plugin.plugin()
class Tcp(Plugin):
@my_plugin.check()
def request(self, check_config):
try:
with socket.create_connection((check_config['host'],
check_config['port']),
timeout=check_config['timeout']):
pass
except Exception as e:
return Plugin.STATUS_CRIT, "{}".format(e)
else:
return Plugin.STATUS_OK, "OK"
@staticmethod
def config_sample():
return '''
# Tcp
- type: TCP
checks:
- type: request
host: localhost
port: 11211
timeout: 5
'''
| bsd-2-clause | Python | |
d95ce2570989e1b18c313efb1f95f611a9a2cc80 | add color_histogram_matcher for objects | pazeshun/jsk_apc,pazeshun/jsk_apc,pazeshun/jsk_apc,pazeshun/jsk_apc,pazeshun/jsk_apc | jsk_2015_05_baxter_apc/node_scripts/color_histogram_matcher.py | jsk_2015_05_baxter_apc/node_scripts/color_histogram_matcher.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
from __future__ import division
import rospy
import cv2
import numpy as np
from sensor_msgs.msg import Image
from jsk_2014_picking_challenge.srv import ObjectMatch, ObjectMatchResponse
from jsk_recognition_msgs.msg import ColorHistogram
query_features = None
target_features = None
class ColorHistogramMatcher(object):
def __init__(self):
self.query_histogram = {}
self.target_histograms = None
rospy.Service('/semi/color_histogram_matcher', ObjectMatch,
self.handle_colorhist_matcher)
# input is color_histograms extracted by camera_image
rospy.Subscriber('~input/histogram/red', ColorHistogram,
self.cb_histogram_red)
rospy.Subscriber('~input/histogram/green', ColorHistogram,
self.cb_histogram_green)
rospy.Subscriber('~input/histogram/blue', ColorHistogram,
self.cb_histogram_blue)
def handle_colorhist_matcher(self, req):
"""Handler of service request"""
self.load_target_histograms(req.objects)
return ObjectMatchResponse(probabilities=self.get_probabilities())
def load_target_histograms(self):
"""Load extracted color histogram features of objects"""
rospy.loginfo('Loading object color histogram features')
# self.target_histograms = ...
raise NotImplementedError
def coefficient(query_hist, target_hist, method=0):
"""Compute coefficient of 2 histograms with several methods"""
if method == 0:
return (1. + cv2.compareHist(query_hist, target_hist,
cv2.cv.CV_COMP_CORREL)) / 2.;
def get_probabilities(self):
"""Get probabilities of color matching"""
query_histogram = self.query_histogram
target_histograms = self.target_histograms
obj_coefs = []
for obj_name, target_histgram in target_histograms.iteritems():
# loop for RGB color &
# compute max coefficient about each histograms
coefs = []
for q_hist, t_hist in zip(
query_histogram.values(), target_histogram.values()):
coefs.append(coefficient(q_hist, t_hist))
obj_coefs.append(max(coefs))
obj_coefs = np.array(obj_coefs)
# change coefficient array to probability array
if obj_coefs.sum() == 0:
return obj_coefs
else:
return obj_coefs / obj_coefs.sum()
def cb_histogram_red(self, msg):
"""Get input red histogram"""
self.query_histogram['red'] = msg.histogram
def cb_histogram_green(self, msg):
"""Get input green histogram"""
self.query_histogram['green'] = msg.histogram
def cb_histogram_blue(self, msg):
"""Get input blue histogram"""
self.query_histogram['blue'] = msg.histogram
def main():
m = ColorHistogramMatcher()
rospy.spin()
if __name__ == '__main__':
main()
| bsd-3-clause | Python | |
74197adab35815bc1168f661d6f5cf5c829afc99 | Add example | mopemope/pykt,mopemope/pykt | example/serialize.py | example/serialize.py | from pykt import KyotoTycoon, set_serializer, set_deserializer
from cPickle import dumps, loads
set_serializer(dumps)
set_deserializer(loads)
key = "A" * 12
val = "B" * 1024
d = dict(name="John", no=1)
db = KyotoTycoon()
db.open()
print db.set(key, d)
ret = db.get(key)
assert(d == ret)
db.close()
| bsd-3-clause | Python | |
9c0a74194e6546eac6dbaec000599a623d525909 | Create drivers.py | ariegg/webiopi-drivers,ariegg/webiopi-drivers | chips/digital/pca9698/drivers.py | chips/digital/pca9698/drivers.py |
DRIVERS["pca9698" ] = ["PCA9698"]
| apache-2.0 | Python | |
d6492629e3c837374082cac71034a7bad36291bc | Test of commit | BecquerelRecipe/Trunk | Parser.py | Parser.py | if __name__ == '__main__':
main() | apache-2.0 | Python | |
bef69c38103e8ef937fea41a0a58c934b34f4281 | add yaml syntax checker script | bigswitch/bosi,bigswitch/bosi | bosi/rhosp_resources/yamls/yaml_syntax_check.py | bosi/rhosp_resources/yamls/yaml_syntax_check.py | #!/usr/bin/env python
import os
import sys
import yaml
EXIT_ERROR = -1
YAML_FILE_EXT = ".yaml"
def help():
""" Print how to use the script """
print "Usage: %s <directory>" % sys.argv[0]
def check_yaml_syntax(f):
""" Check the syntax of the given YAML file.
return: True if valid, False otherwise
"""
with open(f, 'r') as stream:
try:
yaml.load(stream)
except yaml.YAMLError as exc:
print "%s: Invalid YAML syntax.\n%s\n" % (f, exc)
return False
return True
def main():
""" Find all YAML files in the input directory and validate their syntax
"""
if len(sys.argv) < 2:
help()
sys.exit(EXIT_ERROR)
yaml_dir = sys.argv[1]
if not os.path.isdir(yaml_dir):
print "ERROR: Invalid directory %s" % yaml_dir
sys.exit(EXIT_ERROR)
all_valid = True
for root, dirs, files in os.walk(yaml_dir):
for f in files:
if YAML_FILE_EXT in f:
fname = root + "/" + f
valid = check_yaml_syntax(fname)
if valid:
print "%s: Valid YAML syntax" % fname
else:
all_valid = False
break
if all_valid:
print "All files have valid YAML syntax"
else:
print "Some files have invalid YAML syntax"
if __name__ == "__main__":
main()
| apache-2.0 | Python | |
f09c45cde66dd8da07511e1105af14ffd41799b0 | add a command to trigger a bulk sync | dstufft/jutils | crate_project/apps/crate/management/commands/trigger_bulk_sync.py | crate_project/apps/crate/management/commands/trigger_bulk_sync.py | from django.core.management.base import BaseCommand
from pypi.tasks import bulk_synchronize
class Command(BaseCommand):
def handle(self, *args, **options):
bulk_synchronize.delay()
print "Bulk Synchronize Triggered"
| bsd-2-clause | Python | |
27ed68923579c5afff0c70b025deb8b73d448aa8 | Set calculation type of all indicators to Number | toladata/TolaActivity,toladata/TolaActivity,toladata/TolaActivity,toladata/TolaActivity | indicators/migrations/0013_set_all_calculation_type_to_numeric.py | indicators/migrations/0013_set_all_calculation_type_to_numeric.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-07-04 09:56
from __future__ import unicode_literals
from django.db import migrations
from ..models import Indicator
def set_calculation_type(apps, schema_editor):
Indicator.objects.all().update(
calculation_type=Indicator.CALC_TYPE_NUMERIC)
class Migration(migrations.Migration):
dependencies = [
('indicators', '0012_auto_20180704_0256'),
]
operations = [
migrations.RunPython(set_calculation_type),
]
| apache-2.0 | Python | |
834516acf7b5cfbbb0f728f8b725bea120b5f5b3 | Add python version of the post-receive hook | Humbedooh/gitpubsub | post_receive.py | post_receive.py | import re
import os
import sys
import os
import json
from subprocess import Popen, PIPE
from httplib2 import Http
postURL = "http://localhost:2069/json"
pwd = os.getcwd()
if len(sys.argv) <= 3:
print("Usage: post-receive [old] [new] [ref]")
exit()
old, new, ref = sys.argv[1:4]
m = re.match(r"^.*/([^/]+)$", pwd)
if not m:
print("Could not figure out which project this is :(", project)
exit()
project = m.group(1)
print("Posting commit message for project " + project)
process = Popen(["git", "show", "--name-only", new], stdout=PIPE)
#process = Popen(["ls", "-la"], stdout=PIPE)
exit_code = os.waitpid(process.pid, 0)
output = process.communicate()[0]
output = """
Author: Humbedooh <humbedooh@apache.org>
Stuffs: Mooo
Log message goes here
"""
commit = {'ref': ref, 'repository': "git", 'hash': new, 'project': project}
headers, commit['log'] = output.split("\n\n", 2)
parsed = dict(re.findall(r"(?P<name>[^:\n]+): (?P<value>[^\r\n]+)", headers))
author = re.match(r"^(.+) <(.+)>$", parsed.get("Author", "?? <??@??>"))
if author:
commit['author'] = author.group(1)
commit['email'] = author.group(2)
else:
commit['author'] = "Unknown"
commit['email'] = "unknown@unknown"
data = json.dumps(commit) + "\n\n"
print(data)
Http().request(postURL, "PUT", data)
| apache-2.0 | Python | |
6e28da4e1a1d8ad794f12d9782b0e2dd54119dc4 | add mysql module | lytofb/freemarker_doc_crawler | db_mysql_module.py | db_mysql_module.py | __author__ = 'root'
import pymysql;
import sqlalchemy;
import threading;
from time import clock;
class SQLiteWraper(object):
def __init__(self):
# self.lock = threading.RLock()
self.engine = sqlalchemy.create_engine('mysql+pymysql://developer:developer@172.28.217.66/xixiche?charset=utf8')
def get_conn(self):
conn = self.engine.connect();
return conn
def conn_close(self,conn=None):
conn.close()
def time_counter(func):
def count_second(self,*args,**kwargs):
start=clock()
rs = func(self,*args,**kwargs)
finish=clock()
print("%.2f" % (finish-start))
return rs
return count_second
def conn_trans(func):
def connection(self,*args,**kwargs):
# self.lock.acquire()
conn = self.get_conn()
kwargs['conn'] = conn
rs = func(self,*args,**kwargs)
self.conn_close(conn)
# self.lock.release()
return rs
return connection
@time_counter
@conn_trans
def batch(self,sqllist,conn=None):
trans = conn.begin()
try:
for sql in sqllist:
print("executing ..."+sql)
conn.execute(sql)
trans.commit()
except pymysql.IntegrityError as e:
#print e
return -1
except Exception as e:
print (e)
return -2
return 0
@conn_trans
def execute(self,sql,conn=None):
trans = conn.begin()
try:
result = conn.execute(sql)
trans.commit()
except pymysql.IntegrityError as e:
#print e
return -1
except Exception as e:
print (e)
return -2
return result
@time_counter
def sqrt(self,a, eps=1e-10):
if a == 0.0 or a == 1.0:
return a
x = 1.0
y = x - (x*x-a)/(2*x)
while not (-eps < y-x < eps):
x = y
y = x - (x*x-a)/(2*x)
return x
if __name__=='__main__':
db = SQLiteWraper();
# data_merchant = db.execute("select * from data_merchant")
# for row in data_merchant:
# print(row.items())
print(db.sqrt(100))
testsql = [];
for i in range(1,1):
testsql.append("insert into test (name,test_bigint) values ('hehe','"+str(i)+"')")
print("sqllist prepared")
db.batch(testsql)
| mit | Python | |
35748678aaea24355d5207ae26d10dd455a47820 | implement HostTestsSuite | oVirt/ovirt-engine-sdk-tests | src/test/hosttestssuite.py | src/test/hosttestssuite.py |
from src.test.abstractovirttestssuite import AbstractOvirtTestsSuite
from ovirtsdk.xml import params
from src.infrastructure.annotations import conflicts
from src.resource.hostresourcemanager import HostResourceManager
class HostTestsSuite(AbstractOvirtTestsSuite):
__hostResourceManager = HostResourceManager()
def getHostResourceManager(self):
return HostTestsSuite.__hostResourceManager
####### pre/post test run #############
def setUp(self):
pass
def tearDown(self):
pass
######## pre/post class run #############
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
# ############### test/s ###############
@conflicts.resources([params.Host])
def testCreate(self):
# verify add() response
new_host = self.getHostResourceManager().add()
self.assertNotEqual(new_host, None, 'Host create has failed!')
# verify get of newly created cluster
host = self.getHostResourceManager().get(get_only=True)
self.assertNotEqual(host, None, 'Fetch of host post create has failed!')
| apache-2.0 | Python | |
5f22ca2b9d6c9f0e55e208c25d410fe196ef619d | add closure report tool | moyogo/tachyfont,moyogo/tachyfont,googlei18n/TachyFont,googlefonts/TachyFont,googlefonts/TachyFont,moyogo/tachyfont,bstell/TachyFont,moyogo/tachyfont,googlefonts/TachyFont,bstell/TachyFont,googlefonts/TachyFont,bstell/TachyFont,moyogo/tachyfont,googlei18n/TachyFont,googlei18n/TachyFont,googlei18n/TachyFont,googlefonts/TachyFont,googlei18n/TachyFont,bstell/TachyFont,bstell/TachyFont | build_time/src/closure_report.py | build_time/src/closure_report.py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
Copyright 2015 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import codecs
import locale
import os
from os import path
from StringIO import StringIO
import zipfile
# Put parent directory of gae_server in PYTHONPATH if you have problems
from gae_server import incremental_fonts_utils
def getfile(data_path, name):
f = path.join(data_path, name)
if not path.isfile(f):
raise ValueError('no %s file in %s' % (name, data_path))
return f
def gettext(filepath):
with open(filepath) as f:
return f.read()
class DirFetch(object):
def __init__(self, dirpath):
self.dirpath = dirpath
def get(self, name):
return StringIO(gettext(getfile(self.dirpath, name)))
class ZipFetch(object):
def __init__(self, zippath):
self.zf = zipfile.ZipFile(zippath)
def get(self, name):
return StringIO(self.zf.open(name, 'r').read())
def get_tachy_cmap_and_creader(data_path):
# if it's a dir, assume it's the temp directory for a font
if path.isdir(data_path):
fetcher = DirFetch(data_path)
elif data_path.endswith('TachyFont.jar'):
fetcher = ZipFetch(data_path)
else:
raise ValueError('%s is not a dir or TachyFont jar' % data_path)
cp_file = fetcher.get('codepoints')
gid_file = fetcher.get('gids')
cidx_file = fetcher.get('closure_idx')
cdata_file = fetcher.get('closure_data')
# _build_cmap is 'private' but it's python, so...
cmap = incremental_fonts_utils._build_cmap(cp_file, gid_file)
creader = incremental_fonts_utils.ClosureReader(cidx_file, cdata_file)
return cmap, creader
def resolve_datapath(root, name):
result = None
if not path.isdir(root):
raise ValueError('%s is not a directory' % root)
for f in os.listdir(root):
if f.find(name) == -1:
continue
fpath = path.join(root, f)
if not path.isdir(fpath) and not f.endswith('TachyFont.jar'):
continue
if result:
raise ValueError('\'%s\' matches more than one item in %s' % (name, root))
result = fpath
return result
def show_closures(root, fontdir, text_list):
datapath = resolve_datapath(root, fontdir)
cmap, creader = get_tachy_cmap_and_creader(datapath)
for text in text_list:
show_closure(cmap, creader, text)
def show_closure(cmap, creader, text):
# Assume text is utf-8, possibly with unicode escapes.
text = cleanstr(text)
cps = [ord(cp) for cp in text]
print 'text:', text
print 'length: ', len(text)
seen_cps = set()
seen_gids = set()
n = 0
for cp in cps:
prefix = r'%2d] %6x (%s)' % (n, cp, unichr(cp))
n += 1
if cp in seen_cps:
print '%s: (seen)' % prefix
continue
seen_cps.add(cp)
if not cp in cmap:
print '%s: <not in cmap>' % prefix
continue
gids = creader.read(cmap[cp])
print '%s: %s' % (prefix, ', '.join([str(gid) for gid in sorted(gids)]))
seen_gids.update(gids)
print 'unique cps:', len(seen_cps)
print 'unique gids:', len(seen_gids)
def cleanstr(text):
text = codecs.decode(text, 'utf-8')
uetext = codecs.encode(text, 'unicode-escape')
uetext = uetext.replace(r'\\u', r'\u')
uetext = uetext.replace(r'\\U', r'\U') # remember, python requires 8 hex digits...
return codecs.decode(uetext, 'unicode-escape')
def main():
default_root = path.abspath(path.relpath('..', __file__))
default_text = ('\U0001f150人類社会のすべての構成員の固有の尊厳と平等で譲るこ'
'とのできない権利とを承認することは、世界における')
parser = argparse.ArgumentParser()
parser.add_argument('-r', '--root', help='dir containing font build files (default %s)' %
default_root, default=default_root, metavar='path')
parser.add_argument('-n', '--name', help='(partial) name of font dir', required=True)
parser.add_argument('-t', '--texts', help='text to dump', nargs='*',
default=[default_text])
args = parser.parse_args()
show_closures(args.root, args.name, args.texts)
if __name__ == '__main__':
locale.setlocale(locale.LC_COLLATE, 'en_US.UTF-8')
main()
| apache-2.0 | Python | |
e20c403ff196f18dd1416eaf811b427c37b113ba | Add a table editor demo featuring a checkbox column. | burnpanck/traits,burnpanck/traits | enthought/traits/ui/demo/Advanced/Table_editor_with_checkbox_column.py | enthought/traits/ui/demo/Advanced/Table_editor_with_checkbox_column.py | """
This shows a table editor which has a checkbox column in addition to normal
data columns.
"""
# Imports:
from random \
import randint
from enthought.traits.api \
import HasStrictTraits, Str, Int, Float, List, Bool, Property
from enthought.traits.ui.api \
import View, Item, TableEditor
from enthought.traits.ui.table_column \
import ObjectColumn
from enthought.traits.ui.extras.checkbox_column \
import CheckboxColumn
# Create a specialized column to set the text color differently based upon
# whether or not the player is in the lineup:
class PlayerColumn ( ObjectColumn ):
def get_text_color ( self, object ):
return [ 'light grey', 'black' ][ object.in_lineup ]
# The 'players' trait table editor:
player_editor = TableEditor(
sortable = False,
configurable = False,
columns = [ CheckboxColumn( name = 'in_lineup', label = 'In Lineup' ),
PlayerColumn( name = 'name', editable = False ),
PlayerColumn( name = 'at_bats', label = 'AB' ),
PlayerColumn( name = 'strike_outs', label = 'SO' ),
PlayerColumn( name = 'singles', label = 'S' ),
PlayerColumn( name = 'doubles', label = 'D' ),
PlayerColumn( name = 'triples', label = 'T' ),
PlayerColumn( name = 'home_runs', label = 'HR' ),
PlayerColumn( name = 'walks', label = 'W' ),
PlayerColumn( name = 'average', label = 'Ave',
editable = False, format = '%0.3f' ) ]
)
# 'Player' class:
class Player ( HasStrictTraits ):
# Trait definitions:
in_lineup = Bool( True )
name = Str
at_bats = Int
strike_outs = Int
singles = Int
doubles = Int
triples = Int
home_runs = Int
walks = Int
average = Property( Float )
def _get_average ( self ):
""" Computes the player's batting average from the current statistics.
"""
if self.at_bats == 0:
return 0.0
return float( self.singles + self.doubles +
self.triples + self.home_runs ) / self.at_bats
class Team ( HasStrictTraits ):
# Trait definitions:
players = List( Player )
# Trait view definitions:
traits_view = View(
Item( 'players',
show_label = False,
editor = player_editor
),
title = 'Baseball Team Roster Demo',
width = 0.5,
height = 0.5,
resizable = True
)
def random_player ( name ):
""" Generates and returns a random player.
"""
p = Player( name = name,
strike_outs = randint( 0, 50 ),
singles = randint( 0, 50 ),
doubles = randint( 0, 20 ),
triples = randint( 0, 5 ),
home_runs = randint( 0, 30 ),
walks = randint( 0, 50 ) )
return p.set( at_bats = p.strike_outs + p.singles + p.doubles + p.triples +
p.home_runs + randint( 100, 200 ) )
# Sample team:
demo = view = Team( players = [ random_player( name ) for name in [
'Dave', 'Mike', 'Joe', 'Tom', 'Dick', 'Harry', 'Dirk', 'Fields', 'Stretch'
] ] )
if __name__ == "__main__":
demo.configure_traits()
| bsd-3-clause | Python | |
d0ebf20c9f6bbcfbb55649092b7a35ed82b05dac | Add module show_source.py in gallery (used to show source code of examples) | kikocorreoso/brython,kikocorreoso/brython,brython-dev/brython,brython-dev/brython,brython-dev/brython,kikocorreoso/brython | www/gallery/show_source.py | www/gallery/show_source.py | from browser import ajax, document, html, bind, window, highlight
btn = html.BUTTON("Show source code", Class="nice")
height = window.innerHeight
width = window.innerWidth
css = """
/* colors for highlighted Python code */
span.python-string{
color: #27d;
}
span.python-comment{
color: #019;
}
span.python-keyword{
color: #950;
}
span.python-builtin{
color: #183;
}
em {
color:#339;
font-family:courier
}
strong {
color:#339;
font-family:courier;
}
button.nice{
margin-right: 15%;
color: #fff;
background: #7ae;
border-width: 2px;
border-style: solid;
border-radius: 5px;
border-color: #45b;
text-align: center;
font-size: 15px;
padding: 6px;
}
"""
document.body <= html.STYLE(css)
state = "off"
div_style = {"position": "absolute",
"left": int(width * 0.5),
"paddingLeft": "10px",
"backgroundColor": "#ccc",
"borderStyle": "solid",
"borderColor" : "#888",
"borderWidth": "0px 0px 0px 3px",
"color": "#113",
"font-size": "12px"
}
def show_source(text):
div = html.DIV(style=div_style, Class="show_source")
indent = None
lines = text.split("\n")
for line in lines:
if line.strip():
_indent = len(line) - len(line.lstrip())
if indent is None:
indent = _indent
break
if indent:
text = "\n".join(line[indent:] for line in lines)
div <= highlight.highlight(text)
document <= div
div.left = max(int(width / 2),
width - div.offsetWidth - int(0.02 * width))
@bind(btn, "click")
def show(ev):
global state
if state == "on":
for div in document.select(".show_source"):
div.remove()
state = "off"
btn.text = "Show source code"
else:
scripts = document.select("script")
for script in scripts:
if not script.src:
show_source(script.text)
else:
if script.src.endswith(".py") and \
not script.src.endswith("show_source.py"):
req = ajax.get(script.src, oncomplete=show_external)
state = "on"
btn.text = "Hide source code"
def show_external(req):
"""Used after an Ajax request for external script."""
show_source(req.text)
href = window.location.href
href = href[href.rfind("/") + 1:]
document.body.insertBefore(html.DIV(btn, style={"text-align": "right"}),
document.body.children[0])
div_style["top"] = btn.offsetTop + int(1.5 * btn.offsetHeight)
| bsd-3-clause | Python | |
0c24d31e08fe7e72745b3273eec0b5bfe7e9a07a | Add script to manage availability annotations | apple/swift-system,apple/swift-system,apple/swift-system | Utilities/expand-availability.py | Utilities/expand-availability.py | #!/usr/bin/env python3
# This script uses the file `availability-macros.def` to automatically
# add/remove `@available` attributes to declarations in Swift sources
# in this package.
#
# In order for this to work, ABI-impacting declarations need to be annotated
# with special comments in the following format:
#
# /*System 0.0.2*/
# public func greeting() -> String {
# "Hello"
# }
#
# The script adds full availability incantations to these comments. It can run
# in one of two modes:
#
# By default, `expand-availability.py` expands availability macros within the
# comments. This is useful during package development to cross-reference
# availability across `SystemPackage` and the ABI-stable `System` module that
# ships in Apple's OS releases:
#
# /*System 0.0.2, @available(macOS 12.0, iOS 15.0, watchOS 8.0, tvOS 15.0, *)*/
# public func greeting() -> String {
# "Hello"
# }
#
# `expand-availability.py --attributes` adds actual availability attributes.
# This is used by maintainers to build ABI stable releases of System on Apple's
# platforms:
#
# /*System 0.0.2*/@available(macOS 12.0, iOS 15.0, watchOS 8.0, tvOS 15.0, *)
# public func greeting() -> String {
# "Hello"
# }
#
# The script recognizes these attributes and updates/removes them on each run,
# so you can run the script to enable/disable attributes without worrying about
# duplicate attributes.
import os
import os.path
import fileinput
import re
import sys
import argparse
versions = {
"System 0.0.1": "macOS 11.0, iOS 14.0, watchOS 7.0, tvOS 14.0",
"System 0.0.2": "macOS 12.0, iOS 15.0, watchOS 8.0, tvOS 15.0",
"System 1.1.0": "macOS 9999, iOS 9999, watchOS 9999, tvOS 9999",
}
parser = argparse.ArgumentParser(description="Expand availability macros.")
parser.add_argument("--attributes", help="Add @available attributes",
action="store_true")
args = parser.parse_args()
def swift_sources_in(path):
result = []
for (dir, _, files) in os.walk(path):
for file in files:
extension = os.path.splitext(file)[1]
if extension == ".swift":
result.append(os.path.join(dir, file))
return result
macro_pattern = re.compile(
r"/\*(System [^ *]+)(, @available\([^)]*\))?\*/(@available\([^)]*\))?")
sources = swift_sources_in("Sources") + swift_sources_in("Tests")
for line in fileinput.input(files=sources, inplace=True):
match = re.search(macro_pattern, line)
if match:
system_version = match.group(1)
expansion = versions[system_version]
if expansion is None:
raise ValueError("{0}:{1}: error: Unknown System version '{0}'"
.format(fileinput.filename(), fileinput.lineno(),
system_version))
if args.attributes:
replacement = "/*{0}*/@available({1}, *)".format(system_version, expansion)
else:
replacement = "/*{0}, @available({1}, *)*/".format(system_version, expansion)
line = line[:match.start()] + replacement + line[match.end():]
print(line, end="")
| apache-2.0 | Python | |
aad264f065bb07c5c811db7372f1ca981308ad45 | Create start_azure_vm.py | azureautomation/runbooks | Utility/Python/start_azure_vm.py | Utility/Python/start_azure_vm.py | #!/usr/bin/env python2
"""
Starts Azure resource manager virtual machines in a subscription.
This Azure Automation runbook runs on Azure to start Azure vms in a subscription.
If no arguments are specified, then all VMs that are currently stopped are started.
If a resource group is specified, then all VMs in the resource group are started.
If a resource group and VM are specified, then that specific VM is started.
Args:
groupname (-g) - Resource group name.
vmname (-v) - virtual machine name
Starts the virtual machines
Example 1:
start_azure_vm.py -g <resourcegroupname> -v <vmname>
start_azure_vm.py -g <resourcegroupname>
start_azure_vm.py
Changelog:
2017-09-11 AutomationTeam:
-initial script
"""
import threading
import getopt
import sys
import azure.mgmt.resource
import azure.mgmt.storage
import azure.mgmt.compute
import automationassets
# Max number of VMs to process at a time
_MAX_THREADS = 20
# Returns a credential based on an Azure Automation RunAs connection dictionary
def get_automation_runas_credential(runas_connection):
""" Returs a credential that can be used to authenticate against Azure resources """
from OpenSSL import crypto
from msrestazure import azure_active_directory
import adal
# Get the Azure Automation RunAs service principal certificate
cert = automationassets.get_automation_certificate("AzureRunAsCertificate")
sp_cert = crypto.load_pkcs12(cert)
pem_pkey = crypto.dump_privatekey(crypto.FILETYPE_PEM, sp_cert.get_privatekey())
# Get run as connection information for the Azure Automation service principal
application_id = runas_connection["ApplicationId"]
thumbprint = runas_connection["CertificateThumbprint"]
tenant_id = runas_connection["TenantId"]
# Authenticate with service principal certificate
resource = "https://management.core.windows.net/"
authority_url = ("https://login.microsoftonline.com/" + tenant_id)
context = adal.AuthenticationContext(authority_url)
return azure_active_directory.AdalAuthentication(
lambda: context.acquire_token_with_client_certificate(
resource,
application_id,
pem_pkey,
thumbprint)
)
class StartVMThread(threading.Thread):
""" Thread class to start Azure VM """
def __init__(self, resource_group, vm_name):
threading.Thread.__init__(self)
self.resource_group = resource_group
self.vm_name = vm_name
def run(self):
print "Starting " + self.vm_name + " in resource group " + self.resource_group
start_vm(self.resource_group, self.vm_name)
def start_vm(resource_group, vm_name):
""" Starts a vm in the specified resource group """
# Start the VM
vm_start = compute_client.virtual_machines.start(resource_group, vm_name)
vm_start.wait()
# Process any arguments sent in
resource_group_name = None
vm_name = None
opts, args = getopt.getopt(sys.argv[1:], "g:v:")
for o, a in opts:
if o == '-g': # if resource group name is passed with -g option, then use it.
resource_group_name = a
elif o == '-v': # if vm name is passed in with the -v option, then use it.
vm_name = a
# Check for correct arguments passed in
if vm_name is not None and resource_group_name is None:
raise ValueError("VM name argument passed in without a resource group specified")
# Authenticate to Azure using the Azure Automation RunAs service principal
automation_runas_connection = automationassets.get_automation_connection("AzureRunAsConnection")
azure_credential = get_automation_runas_credential(automation_runas_connection)
subscription_id = str(automation_runas_connection["SubscriptionId"])
resource_client = azure.mgmt.resource.ResourceManagementClient(
azure_credential,
subscription_id)
compute_client = azure.mgmt.compute.ComputeManagementClient(
azure_credential, subscription_id)
# Get list of resource groups
groups = []
if resource_group_name is None and vm_name is None:
# Get all resource groups
groups = resource_client.resource_groups.list()
elif resource_group_name is not None and vm_name is None:
# Get specific resource group
resource_group = resource_client.resource_groups.get(resource_group_name)
groups.append(resource_group)
elif resource_group_name is not None and vm_name is not None:
# Specific resource group and VM name passed in so start the VM
vm_detail = compute_client.virtual_machines.get(resource_group_name, vm_name, expand='instanceView')
if vm_detail.instance_view.statuses[1].code == 'PowerState/deallocated':
start_vm(resource_group_name, vm_name)
# List of threads that are used to start VMs in parallel
vm_threads_list = []
# Process any VMs that are in a group
for group in groups:
vms = compute_client.virtual_machines.list(group.name)
for vm in vms:
vm_detail = compute_client.virtual_machines.get(group.name, vm.name, expand='instanceView')
if vm_detail.instance_view.statuses[1].code == 'PowerState/deallocated':
start_vm_thread = StartVMThread(group.name, vm.name)
start_vm_thread.start()
vm_threads_list.append(start_vm_thread)
if len(vm_threads_list) > _MAX_THREADS:
for thread in vm_threads_list:
thread.join()
del vm_threads_list[:]
# Wait for all threads to complete
for thread in vm_threads_list:
thread.join()
print "Finished starting all VMs"
| mit | Python | |
264f4a827e39d55259aaa53bde967dae6befc606 | Complete Programming Experience: polysum | Kunal57/MIT_6.00.1x | pset2/grader.py | pset2/grader.py | # Grader
# 10.0 points possible (ungraded)
# A regular polygon has n number of sides. Each side has length s.
# The area of a regular polygon is: 0.25∗n∗s2tan(π/n)
# The perimeter of a polygon is: length of the boundary of the polygon
# Write a function called polysum that takes 2 arguments, n and s. This function should sum the area and square of the perimeter of the regular polygon. The function returns the sum, rounded to 4 decimal places.
from math import tan, pi
def polysum(n, s):
""" calculate the sum of the perimeter and area of the polygon """
perimeter_squared = (n * s)**2
fraction_top = (.25 * n) * (s**2)
fraction_bottom = tan(pi/n)
area = fraction_top/fraction_bottom
return round(perimeter_squared + area, 4)
print(polysum(52, 78)) | mit | Python | |
e8170b2f446f23771bd746747493bebbd0dc9288 | add velocity filter | OTL/otl_diff_drive | nodes/velocity_filter.py | nodes/velocity_filter.py | #! /usr/bin/env python
import rospy
import roslib
roslib.load_manifest("otl_diff_drive")
from otl_diff_drive import twist_velocities
from geometry_msgs.msg import Twist
def isStopVelocity(twist):
VERY_SMALL = 0.0001
return abs(twist.linear.x) < VERY_SMALL and abs(twist.angular.z) < VERY_SMALL
class VelocityFilter:
def __init__(self):
self._max_linear_velocity = rospy.get_param("~max_translational_velocity", 1.0)
self._max_angular_velocity = rospy.get_param("~max_rotational_velocity", 3.0)
self._velocity_filter = twist_velocities.VelocityFilter(self._max_linear_velocity, self._max_angular_velocity)
self._max_linear_accel = rospy.get_param("~max_translational_acceleration", 1.0)
self._max_angular_accel = rospy.get_param("~max_rotational_acceleration", 3.0)
self._accel_filter = twist_velocities.AccelFilter(self._max_linear_accel, self._max_angular_accel)
self._output_pub = rospy.Publisher('/output_vel', Twist)
self._command_sub = rospy.Subscriber("/cmd_vel", Twist, self.on_twist_command)
self._current_velocity = None
self._output_velocity = Twist()
self._last_command_stamp = None
def on_twist_command(self, command):
self._current_velocity = command
self.publish()
self._last_command_stamp = rospy.Time.now()
def get_elapsed_sec(self):
return (rospy.Time.now() - self._last_command_stamp).to_sec()
def publish(self):
if self._current_velocity:
limited_x, limited_theta = self._velocity_filter.filter(self._current_velocity.linear.x, self._current_velocity.angular.z)
if self._last_command_stamp:
duration = self.get_elapsed_sec()
else:
duration = 0.1
if duration > 0.1:
duration = 0.1
x, theta = self._accel_filter.filter(limited_x, limited_theta, duration)
self._output_velocity.linear.x = x
self._output_velocity.angular.z = theta
self._output_pub.publish(self._output_velocity)
def main(self):
r = rospy.Rate(10)
TIME_FOR_STOP = 5.0
while not rospy.is_shutdown():
if self._last_command_stamp:
# recently updated
if self.get_elapsed_sec() < TIME_FOR_STOP:
# target is stop
if isStopVelocity(self._current_velocity):
# output is not stop velocity
if not isStopVelocity(self._output_velocity):
# then repeat publish
self.publish()
r.sleep()
if __name__ == '__main__':
rospy.init_node('velocity_filter')
node = VelocityFilter()
node.main()
| bsd-3-clause | Python | |
18ed0900c22fa2ed646f08adf66e1917a6a04b43 | add collect_impression | amimoto-ami/amimoto-amazon-alexa,amimoto-ami/amimoto-amazon-alexa | amimoto_alexa/collect_message.py | amimoto_alexa/collect_message.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
for amimoto_alexa
"""
import lamvery
from helpers import *
from debugger import *
def collect_impression(intent, session):
"""Collect impression and finalize session
"""
session_attributes = build_session_attributes(session)
card_title = "Impression"
debug_logger(session)
speech_output = "Thank you! You can see impressions on twitter and ,A MI MO TO Blog." \
"Have a nice day! "
# todo: tweet if exist id.
# todo: store session summary to firehose
should_end_session = True
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, None, should_end_session))
| mit | Python | |
aec48fe807e4589344a9f04e13b8f0b651110917 | add package installer. | ISIFoundation/influenzanet-epidb-client | python/setup.py | python/setup.py | from setuptools import setup
import epidb_client
version = epidb_client.__version__
setup(
name = "epidb-client",
version = version,
url = 'http://www.epiwork.eu/',
description = 'EPIWork Database - Client Code',
author = 'Fajran Iman Rusadi',
packages = ['epidb_client'],
install_requires = ['setuptools'],
)
| agpl-3.0 | Python | |
437431289b25418c5acd9890b86350aa62ae0668 | add updated script with changes from @fransua | sestaton/sesbio,sestaton/sesbio,sestaton/sesbio,sestaton/sesbio | transposon_annotation/transposon_annotation_ecolopy_scripts/ecolopy.py | transposon_annotation/transposon_annotation_ecolopy_scripts/ecolopy.py | import matplotlib
matplotlib.use('Agg')
from ecolopy_dev import Community
from ecolopy_dev.utils import draw_shannon_distrib
com = Community('test_log_abund.txt')
print com
com.fit_model('ewens')
com.set_current_model('ewens')
ewens_model = com.get_model('ewens')
print ewens_model
com.fit_model('lognormal')
com.set_current_model('lognormal')
lognormal_model = com.get_model('lognormal')
print lognormal_model
com.fit_model('etienne')
com.set_current_model('etienne')
etienne_model = com.get_model('etienne')
print etienne_model
tmp = {}
likelihoods = []
for met in ['fmin', 'slsqp', 'l_bfgs_b', 'tnc']:
print 'Optimizing with %s...' % met
try:
com.fit_model(name='etienne', method=met, verbose=False)
model = com.get_model('etienne')
tmp[met] ={}
tmp[met]['model'] = model
tmp[met]['theta'] = model.theta
tmp[met]['I'] = model.I
tmp[met]['m'] = model.m
tmp[met]['lnL'] = model.lnL
# in case you reach two times the same likelyhood it may not be necessary
# to go on with other optimization strategies...
# of course if time is not limiting it is not worth to check :)
if round(model.lnL,1) in likelihoods:
break
likelihoods.append(round(model.lnL, 1))
except Exception as e:
print ' optimization failed: ' + e.args[0]
# in case optimization by fmin failed to found correct values for theta and m:
if not (1 <= tmp['fmin']['theta'] < com.S and \
1e-50 <= tmp['fmin']['m'] < 1-1e-50):
del (tmp['fmin'])
# find the model with the higher likelihood:
met = min(tmp, key=lambda x: tmp[x]['lnL'])
# load it as 'etienne' model
com.set_model(tmp[met]['model'])
lrt = com.lrt('ewens', 'etienne')
best = 'ewens' if lrt > 0.05 else 'etienne'
print 'Best model by LRT was: ' + best
com.generate_random_neutral_distribution(model=best)
pval, neut_h = com.test_neutrality (model=best, gens=10000, full=True)
#draw_shannon_distrib(neut_h, abd.shannon)
draw_shannon_distrib(neut_h, com.shannon, outfile='test_log_shannon_dist.pdf', filetype='pdf')
print 'P-value for neutrality test was: ', pval
out = open('test_log_shannon_neutral_data.tsv', 'w')
out.write('# shannon:' + str(com.shannon) + '\n')
out.write('\n'.join([str(s) for s in neut_h]) + '\n')
out.close()
com.dump_community('test_log_ecolopy.pik') | mit | Python | |
c7c3ab0a4013df99b928351040f1156b07ba6767 | Add some tests for the tokens | realityone/flaskbb,realityone/flaskbb,realityone/flaskbb | tests/unit/utils/test_tokens.py | tests/unit/utils/test_tokens.py | from flask import current_app
from itsdangerous import TimedJSONWebSignatureSerializer
from flaskbb.utils.tokens import make_token, get_token_status
def test_make_token(user):
token = make_token(user, "test")
s = TimedJSONWebSignatureSerializer(current_app.config['SECRET_KEY'])
unpacked_token = s.loads(token)
assert user.id == unpacked_token["id"]
assert "test" == unpacked_token["op"]
def test_valid_token_status(user):
token = make_token(user, "valid_test")
expired, invalid, token_user = get_token_status(token, "valid_test")
assert not expired
assert not invalid
assert token_user == user
def test_token_status_with_data(user):
token = make_token(user, "test_data")
expired, invalid, token_user, data = \
get_token_status(token, "test_data", return_data=True)
assert user.id == data["id"]
assert "test_data" == data["op"]
def test_token_operation(user):
token = make_token(user, "operation_test")
expired, invalid, token_user = get_token_status(token, "invalid_op")
assert invalid
assert not expired
assert not token_user
def test_invalid_token_status(user):
token = "this-is-not-a-token"
expired, invalid, token_user, data = \
get_token_status(token, "invalid_test", return_data=True)
assert invalid
assert not expired
assert not token_user
assert data is None
def test_expired_token_status(user):
token = make_token(user, "expired_test", -1)
expired, invalid, token_user = get_token_status(token, "expired_test")
assert expired
assert not invalid
assert not token_user
| bsd-3-clause | Python | |
0848197b3c9ff8d09575b85b5e3a2ca1aac6f6c5 | Put split and merge in own module too | glormph/msstitch | app/drivers/pycolator/splitmerge.py | app/drivers/pycolator/splitmerge.py | from app.drivers.basedrivers import PycolatorDriver
from app.preparation import pycolator as preparation
from app.readers import pycolator as readers
class SplitDriver(PycolatorDriver):
def __init__(self, **kwargs):
super(SplitDriver, self).__init__(**kwargs)
self.targetsuffix = kwargs.get('targetsuffix', '_target.xml')
self.decoysuffix = kwargs.get('decoysuffix', '_decoy.xml')
def prepare(self):
self.ns, self.static_xml = self.prepare_percolator_output(self.fn)
def run(self):
td = {'target': self.targetsuffix, 'decoy': self.decoysuffix}
for filter_type in ['target', 'decoy']:
self.prepare()
self.set_features(filter_type)
self.outsuffix = td[filter_type]
self.write(filter_type)
def set_features(self, filter_type):
""" Calls splitter to split percolator output into target/decoy elements.
Writes two new xml files with features. Currently only psms and
peptides. Proteins not here, since one cannot do protein inference
before having merged and remapped multifraction data anyway.
"""
elements_to_split = {'psm': self.allpsms, 'peptide': self.allpeps}
self.features = preparation.split_target_decoy(elements_to_split,
self.ns, filter_type)
class MergeDriver(PycolatorDriver):
"""Base class for merging multiple percolator fractions under different
sorts of filtering. It writes a single percolator out xml from multiple fractions.
Namespace and static xml come from first percolator file.
Make sure fractions are from same percolator run."""
outsuffix = '_merged.xml'
def __init__(self, **kwargs):
super(MergeDriver, self).__init__(**kwargs)
self.mergefiles = [self.fn]
self.mergefiles.extend(kwargs.get('multifile_input', None))
def prepare(self):
self.ns, self.static_xml = self.prepare_percolator_output(self.fn)
def set_features(self):
""""Merge all psms and peptides"""
allpsms_str = readers.generate_psms_multiple_fractions_strings(
self.mergefiles, self.ns)
allpeps_str = readers.generate_peptides_multiple_fractions_strings(
self.mergefiles, self.ns)
self.features = {'psm': allpsms_str, 'peptide': allpeps_str}
| mit | Python | |
4bf5d21402d5394f36eec006fd3ba03354bb8523 | Add dashboard url route | ethanperez/t4k-rms,ethanperez/t4k-rms | dashboard/urls.py | dashboard/urls.py | from django.conf.urls import patterns, url
from dashboard import views
urlpatterns = patterns('dashboard.views',
url(r'^$', views.dashboard, name = 'dashboard'),
url(r'^login/$', views.enter_gate, name = 'login'),
url(r'^logout/$', views.exit_gate, name = 'logout'),
) | mit | Python | |
5dd9cc55368e9f5bd8c79f74f3c7c1fc84a6bd8b | Add common migration (unrelated to branch) | lutris/website,lutris/website,lutris/website,lutris/website | common/migrations/0010_auto_20200529_0514.py | common/migrations/0010_auto_20200529_0514.py | # Generated by Django 2.2.12 on 2020-05-29 05:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('common', '0009_upload_hosting'),
]
operations = [
migrations.AlterField(
model_name='upload',
name='destination',
field=models.CharField(max_length=256, verbose_name='destination path'),
),
]
| agpl-3.0 | Python | |
f53488e3c797afb4f47f005e078d53a3bea14715 | add solution for Combination Sum III | zhyu/leetcode,zhyu/leetcode | algorithms/combinationSumIII/combinationSumIII.py | algorithms/combinationSumIII/combinationSumIII.py | class Solution:
# @param {integer} k
# @param {integer} n
# @return {integer[][]}
def combinationSum3(self, k, n):
return [arr for arr in ([j+1 for j in xrange(10) if i & (1 << j)]
for i in xrange(1, 512) if bin(i).count('1') == k)
if sum(arr) == n]
| mit | Python | |
fedb80cf8ee5859e1d8f5caccc7a67ae979e743e | Remove unnecessary grit_out_dir variable from component_strings.gyp. | jaruba/chromium.src,patrickm/chromium.src,TheTypoMaster/chromium-crosswalk,Fireblend/chromium-crosswalk,dednal/chromium.src,M4sse/chromium.src,TheTypoMaster/chromium-crosswalk,bright-sparks/chromium-spacewalk,axinging/chromium-crosswalk,jaruba/chromium.src,Fireblend/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,Fireblend/chromium-crosswalk,markYoungH/chromium.src,M4sse/chromium.src,Chilledheart/chromium,hgl888/chromium-crosswalk-efl,Just-D/chromium-1,mohamed--abdel-maksoud/chromium.src,PeterWangIntel/chromium-crosswalk,Just-D/chromium-1,crosswalk-project/chromium-crosswalk-efl,PeterWangIntel/chromium-crosswalk,Fireblend/chromium-crosswalk,hgl888/chromium-crosswalk,M4sse/chromium.src,PeterWangIntel/chromium-crosswalk,Pluto-tv/chromium-crosswalk,markYoungH/chromium.src,bright-sparks/chromium-spacewalk,littlstar/chromium.src,Jonekee/chromium.src,ltilve/chromium,Jonekee/chromium.src,patrickm/chromium.src,dednal/chromium.src,hgl888/chromium-crosswalk-efl,markYoungH/chromium.src,axinging/chromium-crosswalk,Pluto-tv/chromium-crosswalk,dednal/chromium.src,ChromiumWebApps/chromium,ltilve/chromium,jaruba/chromium.src,bright-sparks/chromium-spacewalk,ChromiumWebApps/chromium,ltilve/chromium,dushu1203/chromium.src,dednal/chromium.src,Pluto-tv/chromium-crosswalk,Chilledheart/chromium,bright-sparks/chromium-spacewalk,TheTypoMaster/chromium-crosswalk,axinging/chromium-crosswalk,bright-sparks/chromium-spacewalk,markYoungH/chromium.src,anirudhSK/chromium,ChromiumWebApps/chromium,anirudhSK/chromium,PeterWangIntel/chromium-crosswalk,M4sse/chromium.src,krieger-od/nwjs_chromium.src,bright-sparks/chromium-spacewalk,crosswalk-project/chromium-crosswalk-efl,jaruba/chromium.src,Just-D/chromium-1,krieger-od/nwjs_chromium.src,fujunwei/chromium-crosswalk,Fireblend/chromium-crosswalk,Jonekee/chromium.src,littlstar/chromium.src,mohamed--abdel-maksoud/chromium.src,Just-D/chromium-1,Chilledheart/chromium,bright-sparks/chromium-spacewalk,fujunwei/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,Jonekee/chromium.src,ltilve/chromium,anirudhSK/chromium,TheTypoMaster/chromium-crosswalk,patrickm/chromium.src,markYoungH/chromium.src,markYoungH/chromium.src,jaruba/chromium.src,axinging/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,dushu1203/chromium.src,Just-D/chromium-1,crosswalk-project/chromium-crosswalk-efl,anirudhSK/chromium,chuan9/chromium-crosswalk,markYoungH/chromium.src,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk,dednal/chromium.src,hgl888/chromium-crosswalk-efl,dushu1203/chromium.src,crosswalk-project/chromium-crosswalk-efl,ondra-novak/chromium.src,jaruba/chromium.src,PeterWangIntel/chromium-crosswalk,chuan9/chromium-crosswalk,Chilledheart/chromium,Jonekee/chromium.src,jaruba/chromium.src,ltilve/chromium,ondra-novak/chromium.src,ChromiumWebApps/chromium,dednal/chromium.src,ondra-novak/chromium.src,Just-D/chromium-1,ondra-novak/chromium.src,M4sse/chromium.src,chuan9/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Pluto-tv/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,hgl888/chromium-crosswalk-efl,markYoungH/chromium.src,ChromiumWebApps/chromium,littlstar/chromium.src,hgl888/chromium-crosswalk,anirudhSK/chromium,Fireblend/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,hgl888/chromium-crosswalk,jaruba/chromium.src,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,axinging/chromium-crosswalk,dednal/chromium.src,PeterWangIntel/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,dushu1203/chromium.src,dushu1203/chromium.src,ChromiumWebApps/chromium,jaruba/chromium.src,PeterWangIntel/chromium-crosswalk,axinging/chromium-crosswalk,Just-D/chromium-1,chuan9/chromium-crosswalk,anirudhSK/chromium,littlstar/chromium.src,littlstar/chromium.src,patrickm/chromium.src,fujunwei/chromium-crosswalk,Fireblend/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,Jonekee/chromium.src,M4sse/chromium.src,M4sse/chromium.src,axinging/chromium-crosswalk,M4sse/chromium.src,krieger-od/nwjs_chromium.src,dednal/chromium.src,fujunwei/chromium-crosswalk,patrickm/chromium.src,littlstar/chromium.src,ChromiumWebApps/chromium,dushu1203/chromium.src,Jonekee/chromium.src,markYoungH/chromium.src,anirudhSK/chromium,Jonekee/chromium.src,markYoungH/chromium.src,hgl888/chromium-crosswalk-efl,Just-D/chromium-1,M4sse/chromium.src,patrickm/chromium.src,krieger-od/nwjs_chromium.src,ondra-novak/chromium.src,dushu1203/chromium.src,chuan9/chromium-crosswalk,Jonekee/chromium.src,bright-sparks/chromium-spacewalk,ondra-novak/chromium.src,ondra-novak/chromium.src,dednal/chromium.src,ChromiumWebApps/chromium,anirudhSK/chromium,Chilledheart/chromium,crosswalk-project/chromium-crosswalk-efl,Fireblend/chromium-crosswalk,hgl888/chromium-crosswalk-efl,krieger-od/nwjs_chromium.src,Chilledheart/chromium,Pluto-tv/chromium-crosswalk,patrickm/chromium.src,Pluto-tv/chromium-crosswalk,ltilve/chromium,axinging/chromium-crosswalk,bright-sparks/chromium-spacewalk,dushu1203/chromium.src,Pluto-tv/chromium-crosswalk,ChromiumWebApps/chromium,krieger-od/nwjs_chromium.src,fujunwei/chromium-crosswalk,krieger-od/nwjs_chromium.src,M4sse/chromium.src,ltilve/chromium,fujunwei/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk,jaruba/chromium.src,dushu1203/chromium.src,patrickm/chromium.src,Chilledheart/chromium,hgl888/chromium-crosswalk-efl,mohamed--abdel-maksoud/chromium.src,hgl888/chromium-crosswalk,anirudhSK/chromium,hgl888/chromium-crosswalk-efl,chuan9/chromium-crosswalk,Fireblend/chromium-crosswalk,ltilve/chromium,hgl888/chromium-crosswalk,Chilledheart/chromium,Just-D/chromium-1,dednal/chromium.src,markYoungH/chromium.src,hgl888/chromium-crosswalk-efl,dednal/chromium.src,axinging/chromium-crosswalk,hgl888/chromium-crosswalk-efl,hgl888/chromium-crosswalk,axinging/chromium-crosswalk,anirudhSK/chromium,chuan9/chromium-crosswalk,dushu1203/chromium.src,Jonekee/chromium.src,Jonekee/chromium.src,patrickm/chromium.src,ChromiumWebApps/chromium,anirudhSK/chromium,ondra-novak/chromium.src,littlstar/chromium.src,jaruba/chromium.src,mohamed--abdel-maksoud/chromium.src,littlstar/chromium.src,fujunwei/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,Chilledheart/chromium,fujunwei/chromium-crosswalk,anirudhSK/chromium,krieger-od/nwjs_chromium.src,ondra-novak/chromium.src,krieger-od/nwjs_chromium.src,chuan9/chromium-crosswalk,Pluto-tv/chromium-crosswalk,ChromiumWebApps/chromium,M4sse/chromium.src,crosswalk-project/chromium-crosswalk-efl,crosswalk-project/chromium-crosswalk-efl,ChromiumWebApps/chromium,axinging/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,fujunwei/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,ltilve/chromium,dushu1203/chromium.src,chuan9/chromium-crosswalk,krieger-od/nwjs_chromium.src | components/component_strings.gyp | components/component_strings.gyp | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'component_strings',
'type': 'none',
'actions': [
{
'action_name': 'component_strings',
'variables': {
'grit_grd_file': 'component_strings.grd',
'grit_out_dir': '<(SHARED_INTERMEDIATE_DIR)/components/strings',
},
'includes': [ '../build/grit_action.gypi' ],
},
],
'direct_dependent_settings': {
'include_dirs': [
'<(SHARED_INTERMEDIATE_DIR)/components/strings',
],
},
},
],
}
| # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'grit_out_dir': '<(SHARED_INTERMEDIATE_DIR)/components',
},
'targets': [
{
'target_name': 'component_strings',
'type': 'none',
'actions': [
{
'action_name': 'component_strings',
'variables': {
'grit_grd_file': 'component_strings.grd',
'grit_out_dir': '<(SHARED_INTERMEDIATE_DIR)/components/strings',
},
'includes': [ '../build/grit_action.gypi' ],
},
],
'direct_dependent_settings': {
'include_dirs': [
'<(SHARED_INTERMEDIATE_DIR)/components/strings',
],
},
},
],
}
| bsd-3-clause | Python |
6307a8a813062b3faad6b0f393d1886d4ad9bed8 | add initial date for committees | dhosterman/hebrew_order_david,dhosterman/hebrew_order_david,dhosterman/hebrew_order_david | application/migrations/0019_auto_20150316_2009.py | application/migrations/0019_auto_20150316_2009.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from application.models import Committee
def add_committees(apps, schema_editor):
committees = [
'Fundraising and Social Action',
'Membership',
'Social Functions',
'Archivist and Historian',
'Treasury',
'Communication and Social Media',
'Executive Committee'
]
for committee in committees:
c = Committee(name=committee)
c.save()
class Migration(migrations.Migration):
dependencies = [
('application', '0018_auto_20150316_1920'),
]
operations = [
migrations.RunPython(add_committees),
]
| mit | Python | |
fa0afad07f34f350233ae2a4f1654faef9bc1814 | Add a python version for the phonebook benchmark | SwiftAndroid/swift,ahoppen/swift,uasys/swift,karwa/swift,gottesmm/swift,ken0nek/swift,xwu/swift,huonw/swift,natecook1000/swift,jckarter/swift,ken0nek/swift,allevato/swift,hughbe/swift,tinysun212/swift-windows,gmilos/swift,MukeshKumarS/Swift,shahmishal/swift,sschiau/swift,austinzheng/swift,adrfer/swift,uasys/swift,shajrawi/swift,bitjammer/swift,tjw/swift,allevato/swift,calebd/swift,return/swift,benlangmuir/swift,gribozavr/swift,tjw/swift,johnno1962d/swift,nathawes/swift,nathawes/swift,danielmartin/swift,nathawes/swift,bitjammer/swift,xedin/swift,calebd/swift,jtbandes/swift,kstaring/swift,lorentey/swift,tardieu/swift,dduan/swift,CodaFi/swift,jopamer/swift,gregomni/swift,shahmishal/swift,gmilos/swift,manavgabhawala/swift,shajrawi/swift,natecook1000/swift,tkremenek/swift,JGiola/swift,jckarter/swift,JGiola/swift,cbrentharris/swift,calebd/swift,karwa/swift,zisko/swift,slavapestov/swift,ahoppen/swift,ken0nek/swift,swiftix/swift.old,airspeedswift/swift,emilstahl/swift,Ivacker/swift,tinysun212/swift-windows,ben-ng/swift,danielmartin/swift,apple/swift,amraboelela/swift,gottesmm/swift,return/swift,apple/swift,swiftix/swift.old,khizkhiz/swift,gregomni/swift,LeoShimonaka/swift,Jnosh/swift,austinzheng/swift,russbishop/swift,amraboelela/swift,felix91gr/swift,kentya6/swift,JGiola/swift,xwu/swift,mightydeveloper/swift,tardieu/swift,apple/swift,frootloops/swift,jckarter/swift,brentdax/swift,felix91gr/swift,adrfer/swift,karwa/swift,practicalswift/swift,alblue/swift,CodaFi/swift,xedin/swift,LeoShimonaka/swift,huonw/swift,swiftix/swift,parkera/swift,jmgc/swift,parkera/swift,aschwaighofer/swift,dduan/swift,lorentey/swift,JaSpa/swift,tinysun212/swift-windows,cbrentharris/swift,jmgc/swift,kstaring/swift,austinzheng/swift,LeoShimonaka/swift,ben-ng/swift,therealbnut/swift,kentya6/swift,bitjammer/swift,johnno1962d/swift,gottesmm/swift,johnno1962d/swift,felix91gr/swift,kperryua/swift,johnno1962d/swift,jopamer/swift,milseman/swift,return/swift,stephentyrone/swift,kstaring/swift,SwiftAndroid/swift,shahmishal/swift,codestergit/swift,modocache/swift,nathawes/swift,tkremenek/swift,tkremenek/swift,gregomni/swift,swiftix/swift,Jnosh/swift,rudkx/swift,JaSpa/swift,Ivacker/swift,kusl/swift,gottesmm/swift,devincoughlin/swift,frootloops/swift,jtbandes/swift,ken0nek/swift,cbrentharris/swift,shahmishal/swift,therealbnut/swift,gottesmm/swift,atrick/swift,codestergit/swift,practicalswift/swift,ken0nek/swift,shahmishal/swift,shahmishal/swift,cbrentharris/swift,huonw/swift,russbishop/swift,gmilos/swift,JGiola/swift,therealbnut/swift,IngmarStein/swift,Ivacker/swift,JaSpa/swift,frootloops/swift,tinysun212/swift-windows,harlanhaskins/swift,deyton/swift,nathawes/swift,xwu/swift,milseman/swift,LeoShimonaka/swift,gregomni/swift,SwiftAndroid/swift,mightydeveloper/swift,swiftix/swift.old,KrishMunot/swift,ahoppen/swift,therealbnut/swift,MukeshKumarS/Swift,jmgc/swift,emilstahl/swift,deyton/swift,danielmartin/swift,emilstahl/swift,zisko/swift,codestergit/swift,sdulal/swift,Ivacker/swift,therealbnut/swift,amraboelela/swift,kentya6/swift,return/swift,shajrawi/swift,alblue/swift,airspeedswift/swift,huonw/swift,JaSpa/swift,therealbnut/swift,calebd/swift,tkremenek/swift,manavgabhawala/swift,khizkhiz/swift,gribozavr/swift,tinysun212/swift-windows,KrishMunot/swift,emilstahl/swift,dduan/swift,jopamer/swift,ken0nek/swift,aschwaighofer/swift,Jnosh/swift,dduan/swift,mightydeveloper/swift,arvedviehweger/swift,zisko/swift,alblue/swift,russbishop/swift,kperryua/swift,kstaring/swift,CodaFi/swift,jckarter/swift,sschiau/swift,LeoShimonaka/swift,stephentyrone/swift,parkera/swift,swiftix/swift.old,sdulal/swift,djwbrown/swift,amraboelela/swift,Jnosh/swift,airspeedswift/swift,return/swift,adrfer/swift,rudkx/swift,kperryua/swift,natecook1000/swift,JaSpa/swift,kusl/swift,JGiola/swift,atrick/swift,CodaFi/swift,gmilos/swift,jmgc/swift,lorentey/swift,amraboelela/swift,JGiola/swift,devincoughlin/swift,allevato/swift,tinysun212/swift-windows,modocache/swift,benlangmuir/swift,parkera/swift,hooman/swift,uasys/swift,natecook1000/swift,LeoShimonaka/swift,swiftix/swift.old,apple/swift,jopamer/swift,tjw/swift,djwbrown/swift,danielmartin/swift,allevato/swift,uasys/swift,swiftix/swift.old,ahoppen/swift,Jnosh/swift,arvedviehweger/swift,cbrentharris/swift,return/swift,alblue/swift,bitjammer/swift,modocache/swift,IngmarStein/swift,alblue/swift,harlanhaskins/swift,djwbrown/swift,modocache/swift,huonw/swift,russbishop/swift,tardieu/swift,OscarSwanros/swift,jopamer/swift,adrfer/swift,IngmarStein/swift,karwa/swift,milseman/swift,atrick/swift,glessard/swift,jtbandes/swift,xedin/swift,practicalswift/swift,hughbe/swift,slavapestov/swift,codestergit/swift,huonw/swift,felix91gr/swift,benlangmuir/swift,alblue/swift,lorentey/swift,manavgabhawala/swift,alblue/swift,practicalswift/swift,gribozavr/swift,MukeshKumarS/Swift,deyton/swift,brentdax/swift,xedin/swift,harlanhaskins/swift,brentdax/swift,aschwaighofer/swift,johnno1962d/swift,johnno1962d/swift,tardieu/swift,codestergit/swift,kperryua/swift,codestergit/swift,devincoughlin/swift,kusl/swift,shajrawi/swift,mightydeveloper/swift,swiftix/swift,sschiau/swift,IngmarStein/swift,tkremenek/swift,kusl/swift,roambotics/swift,jopamer/swift,SwiftAndroid/swift,tjw/swift,deyton/swift,felix91gr/swift,shajrawi/swift,austinzheng/swift,tjw/swift,bitjammer/swift,apple/swift,glessard/swift,ken0nek/swift,ben-ng/swift,gribozavr/swift,jmgc/swift,jckarter/swift,CodaFi/swift,airspeedswift/swift,kentya6/swift,sdulal/swift,dreamsxin/swift,hughbe/swift,shahmishal/swift,gregomni/swift,LeoShimonaka/swift,adrfer/swift,parkera/swift,jckarter/swift,practicalswift/swift,kentya6/swift,stephentyrone/swift,tinysun212/swift-windows,LeoShimonaka/swift,russbishop/swift,swiftix/swift.old,harlanhaskins/swift,apple/swift,devincoughlin/swift,uasys/swift,kusl/swift,hughbe/swift,austinzheng/swift,milseman/swift,return/swift,tardieu/swift,ahoppen/swift,rudkx/swift,sschiau/swift,swiftix/swift,dduan/swift,OscarSwanros/swift,milseman/swift,austinzheng/swift,kentya6/swift,IngmarStein/swift,deyton/swift,parkera/swift,CodaFi/swift,modocache/swift,tjw/swift,jtbandes/swift,karwa/swift,glessard/swift,felix91gr/swift,ben-ng/swift,shajrawi/swift,karwa/swift,lorentey/swift,zisko/swift,manavgabhawala/swift,stephentyrone/swift,khizkhiz/swift,roambotics/swift,brentdax/swift,hughbe/swift,kentya6/swift,jmgc/swift,mightydeveloper/swift,hughbe/swift,jtbandes/swift,jckarter/swift,ben-ng/swift,OscarSwanros/swift,devincoughlin/swift,Ivacker/swift,xedin/swift,deyton/swift,hooman/swift,benlangmuir/swift,kentya6/swift,SwiftAndroid/swift,slavapestov/swift,stephentyrone/swift,brentdax/swift,hooman/swift,brentdax/swift,roambotics/swift,stephentyrone/swift,aschwaighofer/swift,rudkx/swift,jtbandes/swift,tkremenek/swift,manavgabhawala/swift,calebd/swift,milseman/swift,atrick/swift,sdulal/swift,allevato/swift,manavgabhawala/swift,djwbrown/swift,devincoughlin/swift,lorentey/swift,KrishMunot/swift,hooman/swift,djwbrown/swift,KrishMunot/swift,rudkx/swift,sdulal/swift,MukeshKumarS/Swift,dreamsxin/swift,amraboelela/swift,nathawes/swift,stephentyrone/swift,gmilos/swift,danielmartin/swift,hooman/swift,roambotics/swift,rudkx/swift,JaSpa/swift,gottesmm/swift,SwiftAndroid/swift,harlanhaskins/swift,airspeedswift/swift,dduan/swift,Jnosh/swift,mightydeveloper/swift,sdulal/swift,practicalswift/swift,swiftix/swift,MukeshKumarS/Swift,johnno1962d/swift,arvedviehweger/swift,cbrentharris/swift,xwu/swift,practicalswift/swift,IngmarStein/swift,therealbnut/swift,hooman/swift,frootloops/swift,kperryua/swift,emilstahl/swift,deyton/swift,nathawes/swift,kstaring/swift,xwu/swift,kusl/swift,gmilos/swift,uasys/swift,felix91gr/swift,emilstahl/swift,arvedviehweger/swift,arvedviehweger/swift,kperryua/swift,modocache/swift,adrfer/swift,codestergit/swift,slavapestov/swift,gribozavr/swift,sdulal/swift,jopamer/swift,milseman/swift,mightydeveloper/swift,xedin/swift,khizkhiz/swift,cbrentharris/swift,kusl/swift,harlanhaskins/swift,OscarSwanros/swift,amraboelela/swift,tardieu/swift,glessard/swift,arvedviehweger/swift,practicalswift/swift,allevato/swift,djwbrown/swift,karwa/swift,tardieu/swift,zisko/swift,xedin/swift,djwbrown/swift,sschiau/swift,xwu/swift,slavapestov/swift,tkremenek/swift,dduan/swift,kusl/swift,hughbe/swift,OscarSwanros/swift,gregomni/swift,lorentey/swift,manavgabhawala/swift,glessard/swift,calebd/swift,khizkhiz/swift,kstaring/swift,danielmartin/swift,KrishMunot/swift,parkera/swift,aschwaighofer/swift,roambotics/swift,calebd/swift,huonw/swift,OscarSwanros/swift,cbrentharris/swift,glessard/swift,adrfer/swift,parkera/swift,tjw/swift,swiftix/swift,shajrawi/swift,sschiau/swift,Jnosh/swift,emilstahl/swift,gribozavr/swift,IngmarStein/swift,frootloops/swift,mightydeveloper/swift,KrishMunot/swift,ben-ng/swift,benlangmuir/swift,aschwaighofer/swift,kstaring/swift,swiftix/swift,modocache/swift,khizkhiz/swift,atrick/swift,xwu/swift,sdulal/swift,khizkhiz/swift,frootloops/swift,devincoughlin/swift,MukeshKumarS/Swift,CodaFi/swift,zisko/swift,ahoppen/swift,hooman/swift,natecook1000/swift,airspeedswift/swift,karwa/swift,harlanhaskins/swift,roambotics/swift,slavapestov/swift,SwiftAndroid/swift,danielmartin/swift,uasys/swift,emilstahl/swift,russbishop/swift,jtbandes/swift,jmgc/swift,Ivacker/swift,OscarSwanros/swift,sschiau/swift,gmilos/swift,Ivacker/swift,kperryua/swift,frootloops/swift,xedin/swift,gottesmm/swift,lorentey/swift,natecook1000/swift,gribozavr/swift,sschiau/swift,aschwaighofer/swift,MukeshKumarS/Swift,shahmishal/swift,russbishop/swift,austinzheng/swift,gribozavr/swift,natecook1000/swift,slavapestov/swift,zisko/swift,brentdax/swift,KrishMunot/swift,ben-ng/swift,Ivacker/swift,allevato/swift,devincoughlin/swift,benlangmuir/swift,shajrawi/swift,arvedviehweger/swift,airspeedswift/swift,atrick/swift,JaSpa/swift,bitjammer/swift,swiftix/swift.old,bitjammer/swift | utils/benchmark/Strings/PySort.py | utils/benchmark/Strings/PySort.py |
words=[
u"James", u"John", u"Robert", u"Michael", u"William", u"David", u"Richard", u"Joseph",
u"Charles", u"Thomas", u"Christopher", u"Daniel", u"Matthew", u"Donald", u"Anthony",
u"Paul", u"Mark", u"George", u"Steven", u"Kenneth", u"Andrew", u"Edward", u"Brian",
u"Joshua", u"Kevin", u"Ronald", u"Timothy", u"Jason", u"Jeffrey", u"Gary", u"Ryan",
u"Nicholas", u"Eric", u"Stephen", u"Jacob", u"Larry", u"Frank", u"Jonathan", u"Scott",
u"Justin", u"Raymond", u"Brandon", u"Gregory", u"Samuel", u"Patrick", u"Benjamin",
u"Jack", u"Dennis", u"Jerry", u"Alexander", u"Tyler", u"Douglas", u"Henry", u"Peter",
u"Walter", u"Aaron", u"Jose", u"Adam", u"Harold", u"Zachary", u"Nathan", u"Carl",
u"Kyle", u"Arthur", u"Gerald", u"Lawrence", u"Roger", u"Albert", u"Keith", u"Jeremy",
u"Terry", u"Joe", u"Sean", u"Willie", u"Jesse", u"Ralph", u"Billy", u"Austin", u"Bruce",
u"Christian", u"Roy", u"Bryan", u"Eugene", u"Louis", u"Harry", u"Wayne", u"Ethan",
u"Jordan", u"Russell", u"Alan", u"Philip", u"Randy", u"Juan", u"Howard", u"Vincent",
u"Bobby", u"Dylan", u"Johnny", u"Phillip", u"Craig"]
# This is a phone book record.
class Record:
def __init__(self, firstname, lastname):
self.first = firstname
self.last = lastname
def __lt__(self, other):
if self.last < other.last:
return True
if self.last > other.last:
return False
return self.first < other.first
Records = []
for first in words:
for last in words:
Records.append(Record(first, last))
for i in xrange(100):
y = Records[:]
y = sorted(y)
#for w in y:
# print w.first, w.last
| apache-2.0 | Python | |
22e3933f6a9ff6c424d1a1f6d225f32c234359c5 | add leetcode Pascal's Triangle II | Fity/2code,Fity/2code,Fity/2code,Fity/2code,Fity/2code,Fity/2code | leetcode/PascalTriangleII/solution.py | leetcode/PascalTriangleII/solution.py | # -*- coding:utf-8 -*-
class Solution:
# @return a list of integers
def getRow(self, rowIndex):
if rowIndex == 0:
return [1]
ret = [1]
begin = 1
while rowIndex > 0:
ret.append(ret[-1] * rowIndex / begin)
rowIndex -= 1
begin += 1
return ret
if __name__ == '__main__':
s = Solution()
for x in xrange(5):
print x, s.getRow(x)
| mit | Python | |
8deb311e6196c618f9ae3f18d18c1827407b8b96 | Add a snippet. | jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets | python/pyside/pyside6/widget_QSqlTableModel_sqlite_from_file.py | python/pyside/pyside6/widget_QSqlTableModel_sqlite_from_file.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Ref: http://doc.qt.io/qt-5/modelview.html#2-1-a-read-only-table
import sys
from PySide6 import QtCore, QtWidgets
from PySide6.QtCore import Qt
from PySide6.QtWidgets import QApplication, QTableView
from PySide6.QtSql import QSqlDatabase, QSqlQuery, QSqlTableModel
# INIT THE DATABASE #############################
db = QSqlDatabase.addDatabase("QSQLITE")
db.setDatabaseName("./employee.db")
assert db.open()
# INSERT VALUES
q = QSqlQuery()
assert q.prepare("INSERT INTO employee(first_name, last_name) VALUES(?, ?)")
q.addBindValue("Jean")
q.addBindValue("Dupont")
q.exec()
q.addBindValue("Paul")
q.addBindValue("Dupond")
q.exec()
#################################################
app = QApplication(sys.argv)
table_view = QTableView()
model = QSqlTableModel()
model.setTable("employee")
model.select()
table_view.setModel(model)
table_view.show()
# The mainloop of the application. The event handling starts from this point.
exit_code = app.exec()
# The sys.exit() method ensures a clean exit.
# The environment will be informed, how the application ended.
sys.exit(exit_code)
| mit | Python | |
1fe2e3b2ed933f22ce128ca1ea8c728981009a44 | Add squashed migration | ioO/billjobs | billjobs/migrations/0002_service_is_available_squashed_0005_bill_issuer_address_default.py | billjobs/migrations/0002_service_is_available_squashed_0005_bill_issuer_address_default.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-21 16:55
from __future__ import unicode_literals
from django.db import migrations, models
from billjobs.settings import BILLJOBS_BILL_ISSUER
class Migration(migrations.Migration):
replaces = [('billjobs', '0002_service_is_available'), ('billjobs', '0003_billline_note'), ('billjobs', '0004_auto_20160321_1256'), ('billjobs', '0005_bill_issuer_address_default')]
dependencies = [
('billjobs', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='service',
name='is_available',
field=models.BooleanField(default=True, verbose_name='Is available ?'),
),
migrations.AddField(
model_name='billline',
name='note',
field=models.CharField(blank=True, help_text='Write a simple note which will be added in your bill', max_length=1024, verbose_name='Note'),
),
migrations.AddField(
model_name='bill',
name='issuer_address',
field=models.CharField(default=BILLJOBS_BILL_ISSUER, max_length=1024),
),
]
| mit | Python | |
fb41c5295d867dc9ac6ec64da3646246c897e109 | add running time of algorithms | xbfool/hackerrank_xbfool | src/algorithms/arrays_and_sorting/running_time_of_algorithms.py | src/algorithms/arrays_and_sorting/running_time_of_algorithms.py | number = input()
number_array = [(int)(x) for x in raw_input().split()]
total = 0
for i in range(1, number):
for j in range(i):
ii = number_array[i]
jj = number_array[j]
if ii < jj:
total += i - j
number_array = number_array[:j] + [ii] + [jj] + number_array[j+1:i] + number_array[i+1:]
break
print total
| mit | Python | |
1f062298a68aaf6a4161279c539caed07816f1a8 | Add tests for influxdb/alarm_state_history_repository.py | stackforge/monasca-persister,openstack/monasca-persister,openstack/monasca-persister,openstack/monasca-persister,stackforge/monasca-persister,stackforge/monasca-persister | monasca_persister/tests/test_influxdb_alarm_state_history_repository.py | monasca_persister/tests/test_influxdb_alarm_state_history_repository.py | # (C) Copyright 2019 Fujitsu Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mock import Mock
from mock import patch
from oslotest import base
from monasca_persister.repositories.influxdb.alarm_state_history_repository \
import AlarmStateHistInfluxdbRepository
from monasca_persister.repositories.influxdb import abstract_repository
class TestInfluxdbAlarmStateHistoryRepo(base.BaseTestCase):
def setUp(self):
super(TestInfluxdbAlarmStateHistoryRepo, self).setUp()
with patch.object(abstract_repository.cfg, 'CONF', return_value=Mock()):
self.alarm_state_repo = AlarmStateHistInfluxdbRepository()
def tearDown(self):
super(TestInfluxdbAlarmStateHistoryRepo, self).tearDown()
def test_process_message(self):
message = Mock()
message.message.value = """{
"alarm-transitioned": {
"alarmId": "dummyid",
"metrics": "dummymetrics",
"newState": "dummynewState",
"oldState": "dummyoldState",
"link": "dummylink",
"lifecycleState": "dummylifecycleState",
"stateChangeReason": "dummystateChangeReason",
"tenantId": "dummytenantId",
"timestamp": "10",
"subAlarms": {
"subAlarmExpression": "dummy_sub_alarm",
"currentValues": "dummy_values",
"metricDefinition": "dummy_definition",
"subAlarmState": "dummy_state"
}
}
}"""
expected_output = u'alarm_state_history,tenant_id=dummytenantId ' \
u'tenant_id="dummytenantId",alarm_id="dummyid",' \
u'metrics="\\"dummymetrics\\"",new_state="dummynewState"' \
u',old_state="dummyoldState",link="dummylink",' \
u'lifecycle_state="dummylifecycleState",' \
u'reason="dummystateChangeReason",reason_data="{}"'
expected_dict = ['\\"sub_alarm_expression\\":\\"dummy_sub_alarm\\"',
'\\"metric_definition\\":\\"dummy_definition\\"',
'\\"sub_alarm_state\\":\\"dummy_state\\"',
'\\"current_values\\":\\"dummy_values\\"']
actual_output = self.alarm_state_repo.process_message(message)
self.assertIn(expected_output, actual_output)
for elem in expected_dict:
self.assertIn(elem, actual_output)
| apache-2.0 | Python | |
1cd457765727a0a65d02ddc9ea164af4913448c6 | Create a python file | lesparza90/TheIoTLearningInitiative,lesparza90/TheIoTLearningInitiative | InternetOfThings101/main.py | InternetOfThings101/main.py | import time
import sys
import signal
def interruptHandler(signal, frame):
sys.exit(0)
if __name__ == '__main__':
signal.signal(signal.SIGINT, interruptHandler)
while True:
print "Hello Internet of Things 101"
time.sleep(5)
# End of File
| apache-2.0 | Python | |
2e9f43d1c1679355e2d7d452137ddf7fb2bbdedf | Test Basic.Publish -> Basic.Get message passing | Zephor5/pika,vrtsystems/pika,renshawbay/pika-python3,jstnlef/pika,hugoxia/pika,vitaly-krugl/pika,skftn/pika,fkarb/pika-python3,reddec/pika,shinji-s/pika,benjamin9999/pika,pika/pika,zixiliuyue/pika,knowsis/pika,Tarsbot/pika | tests/async-send-get-test.py | tests/async-send-get-test.py | #!/usr/bin/env python
"""
Send a message and confirm you can retrieve it with Basic.Get
Test Steps:
1) Connect to broker - start_test
2) Open Channel - on_connected
3) Delcare Queue - on_channel_open
4) Send test message - on_queue_declared
5) Call basic get - on_queue_declared
6) Validate that sent message and basic get body are the same - check_message
"""
import utils.async as async
import nose
from pika.adapters import SelectConnection
channel = None
confirmed = False
connection = None
queue = None
ADAPTER = SelectConnection
HOST = 'localhost'
PORT = 5672
@nose.tools.timed(2)
def start_test():
global confirmed, connection
confirmed = False
connection = async.connect(ADAPTER, HOST, PORT, on_connected)
connection.ioloop.start()
if not confirmed:
assert False
pass
@nose.tools.nottest
def on_connected(connection):
global connected
connected = connection.is_open()
if connected:
async.channel(connection, on_channel_open)
@nose.tools.nottest
def on_channel_open(channel_):
global channel, queue
channel = channel_
queue = async.queue_name()
async.queue_declare(channel, queue, on_queue_declared)
@nose.tools.nottest
def on_queue_declared(frame):
global channel, queue
test_message = async.send_test_message(channel, queue)
def check_message(channel_number, method, header, body):
global connection, confirmed
if body == test_message:
confirmed = True
connection.ioloop.stop()
channel.basic_get(callback=check_message, queue=queue)
| bsd-3-clause | Python | |
2cb8f7b2df2583c9fdb545744adad2386b4ee7f3 | Add test for issue #2465 - tuple subsclass subscript | adafruit/circuitpython,adafruit/circuitpython,adafruit/micropython,adafruit/circuitpython,adafruit/micropython,adafruit/circuitpython,adafruit/micropython,adafruit/circuitpython,adafruit/micropython,adafruit/micropython,adafruit/circuitpython | tests/basics/subscr_tuple.py | tests/basics/subscr_tuple.py | # subscripting a subclassed tuple
class Foo(tuple):
pass
foo = Foo((1,2))
foo[0]
| mit | Python | |
7d46f6e714be4b53a800c72f800d400ef6b280c4 | add tests for core functions | keeprocking/pygelf,keeprocking/pygelf | tests/test_core_functions.py | tests/test_core_functions.py | from pygelf import gelf
import json
import zlib
import struct
import pytest
@pytest.mark.parametrize('compress', [True, False])
def test_pack(compress):
message = {'version': '1.1', 'short_message': 'test pack'}
packed_message = gelf.pack(message, compress)
unpacked_message = zlib.decompress(packed_message) if compress else packed_message
unpacked_message = json.loads(unpacked_message.decode('utf-8'))
assert message == unpacked_message
def test_split():
message = b'12345'
header = b'\x1e\x0f'
chunks = list(gelf.split(message, 2))
expected = [
(struct.pack('b', 0), struct.pack('b', 3), b'12'),
(struct.pack('b', 1), struct.pack('b', 3), b'34'),
(struct.pack('b', 2), struct.pack('b', 3), b'5')
]
assert len(chunks) == len(expected)
for index, chunk in enumerate(chunks):
expected_index, expected_chunks_count, expected_chunk = expected[index]
assert chunk[:2] == header
assert chunk[10:11] == expected_index
assert chunk[11:12] == expected_chunks_count
assert chunk[12:] == expected_chunk
| mit | Python | |
2c665dbcb90785b9754f89cb6a0d3d9c3ffddc95 | Add test | ronekko/deep_metric_learning | tests/test_proxy_nca_loss.py | tests/test_proxy_nca_loss.py | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 07 19:23:18 2017
@author: sakurai
"""
import unittest
import numpy as np
import chainer
from chainer import cuda
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
import chainer.functions as F
from deep_metric_learning.lib.functions.proxy_nca_loss import proxy_nca_loss
class TestProxyNcaLoss(unittest.TestCase):
def setUp(self):
batch_size = 5
n_classes = 10
out_dims = 3
self.x_data = np.random.randn(batch_size, out_dims).astype(np.float32)
# x_data is assumed that each vector is L2 normalized
self.x_data /= np.linalg.norm(self.x_data, axis=1, keepdims=True)
self.proxy_data = np.random.randn(
n_classes, out_dims).astype(np.float32)
self.labels_data = np.random.choice(n_classes, batch_size)
def check_forward(self, x_data, proxy_data, labels_data):
x = chainer.Variable(x_data)
proxy = chainer.Variable(proxy_data)
x = F.normalize(x)
loss = proxy_nca_loss(x, proxy, labels_data)
self.assertEqual(loss.dtype, np.float32)
def test_forward_cpu(self):
self.check_forward(self.x_data, self.proxy_data, self.labels_data)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x_data),
cuda.to_gpu(self.proxy_data),
self.labels_data)
def check_backward(self, x_data, proxy_data, labels_data):
gradient_check.check_backward(
lambda x, p: proxy_nca_loss(x, p, labels_data),
(x_data, proxy_data), None, atol=1.e-1)
def test_backward_cpu(self):
self.check_backward(self.x_data, self.proxy_data, self.labels_data)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x_data),
cuda.to_gpu(self.proxy_data),
self.labels_data)
testing.run_module(__name__, __file__)
| mit | Python | |
703a5556174706db330c8d2e426471e490a00cef | Switch rottentomatoes to xfail rather than skip test. | qk4l/Flexget,Danfocus/Flexget,Flexget/Flexget,drwyrm/Flexget,ianstalk/Flexget,dsemi/Flexget,crawln45/Flexget,jawilson/Flexget,sean797/Flexget,crawln45/Flexget,poulpito/Flexget,malkavi/Flexget,Flexget/Flexget,JorisDeRieck/Flexget,qk4l/Flexget,jacobmetrick/Flexget,dsemi/Flexget,tobinjt/Flexget,poulpito/Flexget,qvazzler/Flexget,antivirtel/Flexget,malkavi/Flexget,OmgOhnoes/Flexget,poulpito/Flexget,Pretagonist/Flexget,jawilson/Flexget,oxc/Flexget,qk4l/Flexget,JorisDeRieck/Flexget,gazpachoking/Flexget,LynxyssCZ/Flexget,Flexget/Flexget,crawln45/Flexget,malkavi/Flexget,OmgOhnoes/Flexget,JorisDeRieck/Flexget,JorisDeRieck/Flexget,qvazzler/Flexget,tobinjt/Flexget,jacobmetrick/Flexget,Danfocus/Flexget,antivirtel/Flexget,OmgOhnoes/Flexget,LynxyssCZ/Flexget,Danfocus/Flexget,Flexget/Flexget,malkavi/Flexget,drwyrm/Flexget,antivirtel/Flexget,tarzasai/Flexget,ianstalk/Flexget,oxc/Flexget,tarzasai/Flexget,gazpachoking/Flexget,dsemi/Flexget,drwyrm/Flexget,tobinjt/Flexget,jacobmetrick/Flexget,tobinjt/Flexget,ianstalk/Flexget,sean797/Flexget,LynxyssCZ/Flexget,Danfocus/Flexget,jawilson/Flexget,Pretagonist/Flexget,sean797/Flexget,LynxyssCZ/Flexget,oxc/Flexget,tarzasai/Flexget,qvazzler/Flexget,Pretagonist/Flexget,crawln45/Flexget,jawilson/Flexget | tests/test_rottentomatoes.py | tests/test_rottentomatoes.py | from __future__ import unicode_literals, division, absolute_import
import pytest
class TestRottenTomatoesLookup(object):
config = """
tasks:
test:
mock:
# tests search
- {title: 'Toy Story'}
- {title: 'The Matrix'}
- {title: 'Star Wars: Episode I - The Phantom Menace (3D)'}
# tests direct id
- {title: '[Group] Taken 720p', rt_id: 770680780}
# tests title + year
- {title: 'Rush.Hour[1998]1080p[Eng]-FOO'}
# test short title, with repack and without year
- {title: 'Up.REPACK.720p.Bluray.x264-FlexGet'}
rottentomatoes_lookup: yes
"""
@pytest.mark.xfail(reason='This plugin seems to be broken')
def test_rottentomatoes_lookup(self, execute_task, use_vcr):
task = execute_task('test')
# check that these were created
assert task.find_entry(rt_name='Toy Story', rt_year=1995, rt_id=9559, imdb_id='tt0114709'), \
'Didn\'t populate RT info for Toy Story'
assert task.find_entry(imdb_id='tt0114709'), \
'Didn\'t populate imdb_id info for Toy Story'
assert task.find_entry(rt_name='The Matrix', rt_year=1999, rt_id=12897, imdb_id='tt0133093'), \
'Didn\'t populate RT info for The Matrix'
assert task.find_entry(rt_name='Star Wars: Episode I - The Phantom Menace',
rt_year=1999, rt_id=10008), \
'Didn\'t populate RT info for Star Wars: Episode I - The Phantom Menace (in 3D)'
assert task.find_entry(rt_name='Taken', rt_year=2008, rt_id=770680780), \
'Didn\'t populate RT info for Taken'
assert task.find_entry(rt_name='Rush Hour', rt_year=1998, rt_id=10201), \
'Didn\'t populate RT info for Rush Hour'
assert task.find_entry(rt_name='Up', rt_year=2009, rt_id=770671912), \
'Didn\'t populate RT info for Up'
| from __future__ import unicode_literals, division, absolute_import
from nose.plugins.skip import SkipTest
class TestRottenTomatoesLookup(object):
config = """
tasks:
test:
mock:
# tests search
- {title: 'Toy Story'}
- {title: 'The Matrix'}
- {title: 'Star Wars: Episode I - The Phantom Menace (3D)'}
# tests direct id
- {title: '[Group] Taken 720p', rt_id: 770680780}
# tests title + year
- {title: 'Rush.Hour[1998]1080p[Eng]-FOO'}
# test short title, with repack and without year
- {title: 'Up.REPACK.720p.Bluray.x264-FlexGet'}
rottentomatoes_lookup: yes
"""
def test_rottentomatoes_lookup(self, execute_task, use_vcr):
raise SkipTest('This plugin seems to be broken')
task = execute_task('test')
# check that these were created
assert task.find_entry(rt_name='Toy Story', rt_year=1995, rt_id=9559, imdb_id='tt0114709'), \
'Didn\'t populate RT info for Toy Story'
assert task.find_entry(imdb_id='tt0114709'), \
'Didn\'t populate imdb_id info for Toy Story'
assert task.find_entry(rt_name='The Matrix', rt_year=1999, rt_id=12897, imdb_id='tt0133093'), \
'Didn\'t populate RT info for The Matrix'
assert task.find_entry(rt_name='Star Wars: Episode I - The Phantom Menace',
rt_year=1999, rt_id=10008), \
'Didn\'t populate RT info for Star Wars: Episode I - The Phantom Menace (in 3D)'
assert task.find_entry(rt_name='Taken', rt_year=2008, rt_id=770680780), \
'Didn\'t populate RT info for Taken'
assert task.find_entry(rt_name='Rush Hour', rt_year=1998, rt_id=10201), \
'Didn\'t populate RT info for Rush Hour'
assert task.find_entry(rt_name='Up', rt_year=2009, rt_id=770671912), \
'Didn\'t populate RT info for Up'
| mit | Python |
ecdda7cf81cb3feb353a1d62441eff92aed082af | Add aggregationtypes to routes | pwyf/IATI-Data-Quality,pwyf/IATI-Data-Quality,pwyf/IATI-Data-Quality,pwyf/IATI-Data-Quality | iatidataquality/aggregationtypes.py | iatidataquality/aggregationtypes.py |
# IATI Data Quality, tools for Data QA on IATI-formatted publications
# by Mark Brough, Martin Keegan, Ben Webb and Jennifer Smith
#
# Copyright (C) 2013 Publish What You Fund
#
# This programme is free software; you may redistribute and/or modify
# it under the terms of the GNU Affero General Public License v3.0
from flask import Flask, render_template, flash, request, Markup, \
session, redirect, url_for, escape, Response, abort, send_file
import StringIO
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.login import (LoginManager, current_user, login_required,
login_user, logout_user, UserMixin, AnonymousUser,
confirm_login, fresh_login_required)
from sqlalchemy import func
from datetime import datetime
from iatidataquality import app
from iatidataquality import db
import os
import sys
import json
current = os.path.dirname(os.path.abspath(__file__))
parent = os.path.dirname(current)
sys.path.append(parent)
from iatidq import dqtests, dqaggregationtypes
import StringIO
import unicodecsv
@app.route("/aggregationtypes/")
@app.route("/aggregationtypes/<aggregationtype_id>/")
def aggregationtypes(aggregationtype_id=None):
ats=dqaggregationtypes.aggregationTypes()
return render_template("aggregation_types.html", aggregationtypes=ats)
@app.route("/aggregationtypes/new/", methods=['POST', 'GET'])
@app.route("/aggregationtypes/<aggregationtype_id>/edit/", methods=['POST', 'GET'])
def aggregationtypes_edit(aggregationtype_id=None):
if aggregationtype_id:
if request.method=='POST':
data = {
'name': request.form['name'],
'description': request.form['description'],
'test_id': request.form['test_id'],
'test_result': request.form['test_result']
}
if data['test_id']=="":
data['test_id'] = None
aggregationtype = dqaggregationtypes.updateAggregationType(aggregationtype_id, data)
if aggregationtype:
flash('Successfully updated your aggregation type.', 'success')
else:
aggregationtype = {}
flash('Could not update your aggregation type.', 'error')
else:
aggregationtype=dqaggregationtypes.aggregationTypes(aggregationtype_id)
else:
aggregationtype = {}
if request.method=='POST':
data = {
'name': request.form['name'],
'description': request.form['description'],
'test_id': request.form['test_id'],
'test_result': request.form['test_result']
}
if data['test_id']=="":
data['test_id'] = None
aggregationtype = dqaggregationtypes.addAggregationType(data)
if aggregationtype:
flash('Successfully added your aggregation type.', 'success')
else:
aggregationtype = {}
flash('Could not add your aggregation type.', 'error')
tests = dqtests.tests()
return render_template("aggregation_types_edit.html", aggregationtype=aggregationtype, tests=tests)
| agpl-3.0 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.